6669d3252f576ddeeccf8d99f209d8dbf9bbd5dc
[deliverable/linux.git] / drivers / gpu / drm / radeon / ci_dpm.c
1 /*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include "drmP.h"
25 #include "radeon.h"
26 #include "cikd.h"
27 #include "r600_dpm.h"
28 #include "ci_dpm.h"
29 #include "atom.h"
30 #include <linux/seq_file.h>
31
32 #define MC_CG_ARB_FREQ_F0 0x0a
33 #define MC_CG_ARB_FREQ_F1 0x0b
34 #define MC_CG_ARB_FREQ_F2 0x0c
35 #define MC_CG_ARB_FREQ_F3 0x0d
36
37 #define SMC_RAM_END 0x40000
38
39 #define VOLTAGE_SCALE 4
40 #define VOLTAGE_VID_OFFSET_SCALE1 625
41 #define VOLTAGE_VID_OFFSET_SCALE2 100
42
43 static const struct ci_pt_defaults defaults_hawaii_xt =
44 {
45 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
46 { 0x84, 0x0, 0x0, 0x7F, 0x0, 0x0, 0x5A, 0x60, 0x51, 0x8E, 0x79, 0x6B, 0x5F, 0x90, 0x79 },
47 { 0x1EA, 0x1EA, 0x1EA, 0x224, 0x224, 0x224, 0x24F, 0x24F, 0x24F, 0x28E, 0x28E, 0x28E, 0x2BC, 0x2BC, 0x2BC }
48 };
49
50 static const struct ci_pt_defaults defaults_hawaii_pro =
51 {
52 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
53 { 0x93, 0x0, 0x0, 0x97, 0x0, 0x0, 0x6B, 0x60, 0x51, 0x95, 0x79, 0x6B, 0x5F, 0x90, 0x79 },
54 { 0x1EA, 0x1EA, 0x1EA, 0x224, 0x224, 0x224, 0x24F, 0x24F, 0x24F, 0x28E, 0x28E, 0x28E, 0x2BC, 0x2BC, 0x2BC }
55 };
56
57 static const struct ci_pt_defaults defaults_bonaire_xt =
58 {
59 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
60 { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61 },
61 { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
62 };
63
64 static const struct ci_pt_defaults defaults_bonaire_pro =
65 {
66 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062,
67 { 0x8C, 0x23F, 0x244, 0xA6, 0x83, 0x85, 0x86, 0x86, 0x83, 0xDB, 0xDB, 0xDA, 0x67, 0x60, 0x5F },
68 { 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB }
69 };
70
71 static const struct ci_pt_defaults defaults_saturn_xt =
72 {
73 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
74 { 0x8C, 0x247, 0x249, 0xA6, 0x80, 0x81, 0x8B, 0x89, 0x86, 0xC9, 0xCA, 0xC9, 0x4D, 0x4D, 0x4D },
75 { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
76 };
77
78 static const struct ci_pt_defaults defaults_saturn_pro =
79 {
80 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000,
81 { 0x96, 0x21D, 0x23B, 0xA1, 0x85, 0x87, 0x83, 0x84, 0x81, 0xE6, 0xE6, 0xE6, 0x71, 0x6A, 0x6A },
82 { 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 }
83 };
84
85 static const struct ci_pt_config_reg didt_config_ci[] =
86 {
87 { 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
88 { 0x10, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
89 { 0x10, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
90 { 0x10, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
91 { 0x11, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
92 { 0x11, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
93 { 0x11, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
94 { 0x11, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
95 { 0x12, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
96 { 0x12, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
97 { 0x12, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
98 { 0x12, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
99 { 0x2, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
100 { 0x2, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
101 { 0x2, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
102 { 0x1, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
103 { 0x1, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
104 { 0x0, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
105 { 0x30, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
106 { 0x30, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
107 { 0x30, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
108 { 0x30, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
109 { 0x31, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
110 { 0x31, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
111 { 0x31, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
112 { 0x31, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
113 { 0x32, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
114 { 0x32, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
115 { 0x32, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
116 { 0x32, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
117 { 0x22, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
118 { 0x22, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
119 { 0x22, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
120 { 0x21, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
121 { 0x21, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
122 { 0x20, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
123 { 0x50, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
124 { 0x50, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
125 { 0x50, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
126 { 0x50, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
127 { 0x51, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
128 { 0x51, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
129 { 0x51, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
130 { 0x51, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
131 { 0x52, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
132 { 0x52, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
133 { 0x52, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
134 { 0x52, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
135 { 0x42, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
136 { 0x42, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
137 { 0x42, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
138 { 0x41, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
139 { 0x41, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
140 { 0x40, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
141 { 0x70, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
142 { 0x70, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
143 { 0x70, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
144 { 0x70, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
145 { 0x71, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
146 { 0x71, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
147 { 0x71, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
148 { 0x71, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
149 { 0x72, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
150 { 0x72, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
151 { 0x72, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
152 { 0x72, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
153 { 0x62, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
154 { 0x62, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
155 { 0x62, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
156 { 0x61, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
157 { 0x61, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
158 { 0x60, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
159 { 0xFFFFFFFF }
160 };
161
162 extern u8 rv770_get_memory_module_index(struct radeon_device *rdev);
163 extern void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table,
164 u32 *max_clock);
165 extern int ni_copy_and_switch_arb_sets(struct radeon_device *rdev,
166 u32 arb_freq_src, u32 arb_freq_dest);
167 extern u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock);
168 extern u8 si_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode);
169 extern void si_trim_voltage_table_to_fit_state_table(struct radeon_device *rdev,
170 u32 max_voltage_steps,
171 struct atom_voltage_table *voltage_table);
172 extern void cik_enter_rlc_safe_mode(struct radeon_device *rdev);
173 extern void cik_exit_rlc_safe_mode(struct radeon_device *rdev);
174 extern int ci_mc_load_microcode(struct radeon_device *rdev);
175
176 static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev,
177 struct atom_voltage_table_entry *voltage_table,
178 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd);
179 static int ci_set_power_limit(struct radeon_device *rdev, u32 n);
180 static int ci_set_overdrive_target_tdp(struct radeon_device *rdev,
181 u32 target_tdp);
182 static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate);
183
184 static struct ci_power_info *ci_get_pi(struct radeon_device *rdev)
185 {
186 struct ci_power_info *pi = rdev->pm.dpm.priv;
187
188 return pi;
189 }
190
191 static struct ci_ps *ci_get_ps(struct radeon_ps *rps)
192 {
193 struct ci_ps *ps = rps->ps_priv;
194
195 return ps;
196 }
197
198 static void ci_initialize_powertune_defaults(struct radeon_device *rdev)
199 {
200 struct ci_power_info *pi = ci_get_pi(rdev);
201
202 switch (rdev->pdev->device) {
203 case 0x6650:
204 case 0x6658:
205 case 0x665C:
206 default:
207 pi->powertune_defaults = &defaults_bonaire_xt;
208 break;
209 case 0x6651:
210 case 0x665D:
211 pi->powertune_defaults = &defaults_bonaire_pro;
212 break;
213 case 0x6640:
214 pi->powertune_defaults = &defaults_saturn_xt;
215 break;
216 case 0x6641:
217 pi->powertune_defaults = &defaults_saturn_pro;
218 break;
219 case 0x67B8:
220 case 0x67B0:
221 case 0x67A0:
222 case 0x67A1:
223 case 0x67A2:
224 case 0x67A8:
225 case 0x67A9:
226 case 0x67AA:
227 case 0x67B9:
228 case 0x67BE:
229 pi->powertune_defaults = &defaults_hawaii_xt;
230 break;
231 case 0x67BA:
232 case 0x67B1:
233 pi->powertune_defaults = &defaults_hawaii_pro;
234 break;
235 }
236
237 pi->dte_tj_offset = 0;
238
239 pi->caps_power_containment = true;
240 pi->caps_cac = false;
241 pi->caps_sq_ramping = false;
242 pi->caps_db_ramping = false;
243 pi->caps_td_ramping = false;
244 pi->caps_tcp_ramping = false;
245
246 if (pi->caps_power_containment) {
247 pi->caps_cac = true;
248 pi->enable_bapm_feature = true;
249 pi->enable_tdc_limit_feature = true;
250 pi->enable_pkg_pwr_tracking_feature = true;
251 }
252 }
253
254 static u8 ci_convert_to_vid(u16 vddc)
255 {
256 return (6200 - (vddc * VOLTAGE_SCALE)) / 25;
257 }
258
259 static int ci_populate_bapm_vddc_vid_sidd(struct radeon_device *rdev)
260 {
261 struct ci_power_info *pi = ci_get_pi(rdev);
262 u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
263 u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
264 u8 *hi2_vid = pi->smc_powertune_table.BapmVddCVidHiSidd2;
265 u32 i;
266
267 if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries == NULL)
268 return -EINVAL;
269 if (rdev->pm.dpm.dyn_state.cac_leakage_table.count > 8)
270 return -EINVAL;
271 if (rdev->pm.dpm.dyn_state.cac_leakage_table.count !=
272 rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count)
273 return -EINVAL;
274
275 for (i = 0; i < rdev->pm.dpm.dyn_state.cac_leakage_table.count; i++) {
276 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
277 lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1);
278 hi_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2);
279 hi2_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3);
280 } else {
281 lo_vid[i] = ci_convert_to_vid(rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc);
282 hi_vid[i] = ci_convert_to_vid((u16)rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage);
283 }
284 }
285 return 0;
286 }
287
288 static int ci_populate_vddc_vid(struct radeon_device *rdev)
289 {
290 struct ci_power_info *pi = ci_get_pi(rdev);
291 u8 *vid = pi->smc_powertune_table.VddCVid;
292 u32 i;
293
294 if (pi->vddc_voltage_table.count > 8)
295 return -EINVAL;
296
297 for (i = 0; i < pi->vddc_voltage_table.count; i++)
298 vid[i] = ci_convert_to_vid(pi->vddc_voltage_table.entries[i].value);
299
300 return 0;
301 }
302
303 static int ci_populate_svi_load_line(struct radeon_device *rdev)
304 {
305 struct ci_power_info *pi = ci_get_pi(rdev);
306 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
307
308 pi->smc_powertune_table.SviLoadLineEn = pt_defaults->svi_load_line_en;
309 pi->smc_powertune_table.SviLoadLineVddC = pt_defaults->svi_load_line_vddc;
310 pi->smc_powertune_table.SviLoadLineTrimVddC = 3;
311 pi->smc_powertune_table.SviLoadLineOffsetVddC = 0;
312
313 return 0;
314 }
315
316 static int ci_populate_tdc_limit(struct radeon_device *rdev)
317 {
318 struct ci_power_info *pi = ci_get_pi(rdev);
319 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
320 u16 tdc_limit;
321
322 tdc_limit = rdev->pm.dpm.dyn_state.cac_tdp_table->tdc * 256;
323 pi->smc_powertune_table.TDC_VDDC_PkgLimit = cpu_to_be16(tdc_limit);
324 pi->smc_powertune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
325 pt_defaults->tdc_vddc_throttle_release_limit_perc;
326 pi->smc_powertune_table.TDC_MAWt = pt_defaults->tdc_mawt;
327
328 return 0;
329 }
330
331 static int ci_populate_dw8(struct radeon_device *rdev)
332 {
333 struct ci_power_info *pi = ci_get_pi(rdev);
334 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
335 int ret;
336
337 ret = ci_read_smc_sram_dword(rdev,
338 SMU7_FIRMWARE_HEADER_LOCATION +
339 offsetof(SMU7_Firmware_Header, PmFuseTable) +
340 offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl),
341 (u32 *)&pi->smc_powertune_table.TdcWaterfallCtl,
342 pi->sram_end);
343 if (ret)
344 return -EINVAL;
345 else
346 pi->smc_powertune_table.TdcWaterfallCtl = pt_defaults->tdc_waterfall_ctl;
347
348 return 0;
349 }
350
351 static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct radeon_device *rdev)
352 {
353 struct ci_power_info *pi = ci_get_pi(rdev);
354 u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
355 u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
356 int i, min, max;
357
358 min = max = hi_vid[0];
359 for (i = 0; i < 8; i++) {
360 if (0 != hi_vid[i]) {
361 if (min > hi_vid[i])
362 min = hi_vid[i];
363 if (max < hi_vid[i])
364 max = hi_vid[i];
365 }
366
367 if (0 != lo_vid[i]) {
368 if (min > lo_vid[i])
369 min = lo_vid[i];
370 if (max < lo_vid[i])
371 max = lo_vid[i];
372 }
373 }
374
375 if ((min == 0) || (max == 0))
376 return -EINVAL;
377 pi->smc_powertune_table.GnbLPMLMaxVid = (u8)max;
378 pi->smc_powertune_table.GnbLPMLMinVid = (u8)min;
379
380 return 0;
381 }
382
383 static int ci_populate_bapm_vddc_base_leakage_sidd(struct radeon_device *rdev)
384 {
385 struct ci_power_info *pi = ci_get_pi(rdev);
386 u16 hi_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd;
387 u16 lo_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd;
388 struct radeon_cac_tdp_table *cac_tdp_table =
389 rdev->pm.dpm.dyn_state.cac_tdp_table;
390
391 hi_sidd = cac_tdp_table->high_cac_leakage / 100 * 256;
392 lo_sidd = cac_tdp_table->low_cac_leakage / 100 * 256;
393
394 pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd = cpu_to_be16(hi_sidd);
395 pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd = cpu_to_be16(lo_sidd);
396
397 return 0;
398 }
399
400 static int ci_populate_bapm_parameters_in_dpm_table(struct radeon_device *rdev)
401 {
402 struct ci_power_info *pi = ci_get_pi(rdev);
403 const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
404 SMU7_Discrete_DpmTable *dpm_table = &pi->smc_state_table;
405 struct radeon_cac_tdp_table *cac_tdp_table =
406 rdev->pm.dpm.dyn_state.cac_tdp_table;
407 struct radeon_ppm_table *ppm = rdev->pm.dpm.dyn_state.ppm_table;
408 int i, j, k;
409 const u16 *def1;
410 const u16 *def2;
411
412 dpm_table->DefaultTdp = cac_tdp_table->tdp * 256;
413 dpm_table->TargetTdp = cac_tdp_table->configurable_tdp * 256;
414
415 dpm_table->DTETjOffset = (u8)pi->dte_tj_offset;
416 dpm_table->GpuTjMax =
417 (u8)(pi->thermal_temp_setting.temperature_high / 1000);
418 dpm_table->GpuTjHyst = 8;
419
420 dpm_table->DTEAmbientTempBase = pt_defaults->dte_ambient_temp_base;
421
422 if (ppm) {
423 dpm_table->PPM_PkgPwrLimit = cpu_to_be16((u16)ppm->dgpu_tdp * 256 / 1000);
424 dpm_table->PPM_TemperatureLimit = cpu_to_be16((u16)ppm->tj_max * 256);
425 } else {
426 dpm_table->PPM_PkgPwrLimit = cpu_to_be16(0);
427 dpm_table->PPM_TemperatureLimit = cpu_to_be16(0);
428 }
429
430 dpm_table->BAPM_TEMP_GRADIENT = cpu_to_be32(pt_defaults->bapm_temp_gradient);
431 def1 = pt_defaults->bapmti_r;
432 def2 = pt_defaults->bapmti_rc;
433
434 for (i = 0; i < SMU7_DTE_ITERATIONS; i++) {
435 for (j = 0; j < SMU7_DTE_SOURCES; j++) {
436 for (k = 0; k < SMU7_DTE_SINKS; k++) {
437 dpm_table->BAPMTI_R[i][j][k] = cpu_to_be16(*def1);
438 dpm_table->BAPMTI_RC[i][j][k] = cpu_to_be16(*def2);
439 def1++;
440 def2++;
441 }
442 }
443 }
444
445 return 0;
446 }
447
448 static int ci_populate_pm_base(struct radeon_device *rdev)
449 {
450 struct ci_power_info *pi = ci_get_pi(rdev);
451 u32 pm_fuse_table_offset;
452 int ret;
453
454 if (pi->caps_power_containment) {
455 ret = ci_read_smc_sram_dword(rdev,
456 SMU7_FIRMWARE_HEADER_LOCATION +
457 offsetof(SMU7_Firmware_Header, PmFuseTable),
458 &pm_fuse_table_offset, pi->sram_end);
459 if (ret)
460 return ret;
461 ret = ci_populate_bapm_vddc_vid_sidd(rdev);
462 if (ret)
463 return ret;
464 ret = ci_populate_vddc_vid(rdev);
465 if (ret)
466 return ret;
467 ret = ci_populate_svi_load_line(rdev);
468 if (ret)
469 return ret;
470 ret = ci_populate_tdc_limit(rdev);
471 if (ret)
472 return ret;
473 ret = ci_populate_dw8(rdev);
474 if (ret)
475 return ret;
476 ret = ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(rdev);
477 if (ret)
478 return ret;
479 ret = ci_populate_bapm_vddc_base_leakage_sidd(rdev);
480 if (ret)
481 return ret;
482 ret = ci_copy_bytes_to_smc(rdev, pm_fuse_table_offset,
483 (u8 *)&pi->smc_powertune_table,
484 sizeof(SMU7_Discrete_PmFuses), pi->sram_end);
485 if (ret)
486 return ret;
487 }
488
489 return 0;
490 }
491
492 static void ci_do_enable_didt(struct radeon_device *rdev, const bool enable)
493 {
494 struct ci_power_info *pi = ci_get_pi(rdev);
495 u32 data;
496
497 if (pi->caps_sq_ramping) {
498 data = RREG32_DIDT(DIDT_SQ_CTRL0);
499 if (enable)
500 data |= DIDT_CTRL_EN;
501 else
502 data &= ~DIDT_CTRL_EN;
503 WREG32_DIDT(DIDT_SQ_CTRL0, data);
504 }
505
506 if (pi->caps_db_ramping) {
507 data = RREG32_DIDT(DIDT_DB_CTRL0);
508 if (enable)
509 data |= DIDT_CTRL_EN;
510 else
511 data &= ~DIDT_CTRL_EN;
512 WREG32_DIDT(DIDT_DB_CTRL0, data);
513 }
514
515 if (pi->caps_td_ramping) {
516 data = RREG32_DIDT(DIDT_TD_CTRL0);
517 if (enable)
518 data |= DIDT_CTRL_EN;
519 else
520 data &= ~DIDT_CTRL_EN;
521 WREG32_DIDT(DIDT_TD_CTRL0, data);
522 }
523
524 if (pi->caps_tcp_ramping) {
525 data = RREG32_DIDT(DIDT_TCP_CTRL0);
526 if (enable)
527 data |= DIDT_CTRL_EN;
528 else
529 data &= ~DIDT_CTRL_EN;
530 WREG32_DIDT(DIDT_TCP_CTRL0, data);
531 }
532 }
533
534 static int ci_program_pt_config_registers(struct radeon_device *rdev,
535 const struct ci_pt_config_reg *cac_config_regs)
536 {
537 const struct ci_pt_config_reg *config_regs = cac_config_regs;
538 u32 data;
539 u32 cache = 0;
540
541 if (config_regs == NULL)
542 return -EINVAL;
543
544 while (config_regs->offset != 0xFFFFFFFF) {
545 if (config_regs->type == CISLANDS_CONFIGREG_CACHE) {
546 cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
547 } else {
548 switch (config_regs->type) {
549 case CISLANDS_CONFIGREG_SMC_IND:
550 data = RREG32_SMC(config_regs->offset);
551 break;
552 case CISLANDS_CONFIGREG_DIDT_IND:
553 data = RREG32_DIDT(config_regs->offset);
554 break;
555 default:
556 data = RREG32(config_regs->offset << 2);
557 break;
558 }
559
560 data &= ~config_regs->mask;
561 data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
562 data |= cache;
563
564 switch (config_regs->type) {
565 case CISLANDS_CONFIGREG_SMC_IND:
566 WREG32_SMC(config_regs->offset, data);
567 break;
568 case CISLANDS_CONFIGREG_DIDT_IND:
569 WREG32_DIDT(config_regs->offset, data);
570 break;
571 default:
572 WREG32(config_regs->offset << 2, data);
573 break;
574 }
575 cache = 0;
576 }
577 config_regs++;
578 }
579 return 0;
580 }
581
582 static int ci_enable_didt(struct radeon_device *rdev, bool enable)
583 {
584 struct ci_power_info *pi = ci_get_pi(rdev);
585 int ret;
586
587 if (pi->caps_sq_ramping || pi->caps_db_ramping ||
588 pi->caps_td_ramping || pi->caps_tcp_ramping) {
589 cik_enter_rlc_safe_mode(rdev);
590
591 if (enable) {
592 ret = ci_program_pt_config_registers(rdev, didt_config_ci);
593 if (ret) {
594 cik_exit_rlc_safe_mode(rdev);
595 return ret;
596 }
597 }
598
599 ci_do_enable_didt(rdev, enable);
600
601 cik_exit_rlc_safe_mode(rdev);
602 }
603
604 return 0;
605 }
606
607 static int ci_enable_power_containment(struct radeon_device *rdev, bool enable)
608 {
609 struct ci_power_info *pi = ci_get_pi(rdev);
610 PPSMC_Result smc_result;
611 int ret = 0;
612
613 if (enable) {
614 pi->power_containment_features = 0;
615 if (pi->caps_power_containment) {
616 if (pi->enable_bapm_feature) {
617 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableDTE);
618 if (smc_result != PPSMC_Result_OK)
619 ret = -EINVAL;
620 else
621 pi->power_containment_features |= POWERCONTAINMENT_FEATURE_BAPM;
622 }
623
624 if (pi->enable_tdc_limit_feature) {
625 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_TDCLimitEnable);
626 if (smc_result != PPSMC_Result_OK)
627 ret = -EINVAL;
628 else
629 pi->power_containment_features |= POWERCONTAINMENT_FEATURE_TDCLimit;
630 }
631
632 if (pi->enable_pkg_pwr_tracking_feature) {
633 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PkgPwrLimitEnable);
634 if (smc_result != PPSMC_Result_OK) {
635 ret = -EINVAL;
636 } else {
637 struct radeon_cac_tdp_table *cac_tdp_table =
638 rdev->pm.dpm.dyn_state.cac_tdp_table;
639 u32 default_pwr_limit =
640 (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
641
642 pi->power_containment_features |= POWERCONTAINMENT_FEATURE_PkgPwrLimit;
643
644 ci_set_power_limit(rdev, default_pwr_limit);
645 }
646 }
647 }
648 } else {
649 if (pi->caps_power_containment && pi->power_containment_features) {
650 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_TDCLimit)
651 ci_send_msg_to_smc(rdev, PPSMC_MSG_TDCLimitDisable);
652
653 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)
654 ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableDTE);
655
656 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit)
657 ci_send_msg_to_smc(rdev, PPSMC_MSG_PkgPwrLimitDisable);
658 pi->power_containment_features = 0;
659 }
660 }
661
662 return ret;
663 }
664
665 static int ci_enable_smc_cac(struct radeon_device *rdev, bool enable)
666 {
667 struct ci_power_info *pi = ci_get_pi(rdev);
668 PPSMC_Result smc_result;
669 int ret = 0;
670
671 if (pi->caps_cac) {
672 if (enable) {
673 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableCac);
674 if (smc_result != PPSMC_Result_OK) {
675 ret = -EINVAL;
676 pi->cac_enabled = false;
677 } else {
678 pi->cac_enabled = true;
679 }
680 } else if (pi->cac_enabled) {
681 ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableCac);
682 pi->cac_enabled = false;
683 }
684 }
685
686 return ret;
687 }
688
689 static int ci_power_control_set_level(struct radeon_device *rdev)
690 {
691 struct ci_power_info *pi = ci_get_pi(rdev);
692 struct radeon_cac_tdp_table *cac_tdp_table =
693 rdev->pm.dpm.dyn_state.cac_tdp_table;
694 s32 adjust_percent;
695 s32 target_tdp;
696 int ret = 0;
697 bool adjust_polarity = false; /* ??? */
698
699 if (pi->caps_power_containment &&
700 (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)) {
701 adjust_percent = adjust_polarity ?
702 rdev->pm.dpm.tdp_adjustment : (-1 * rdev->pm.dpm.tdp_adjustment);
703 target_tdp = ((100 + adjust_percent) *
704 (s32)cac_tdp_table->configurable_tdp) / 100;
705 target_tdp *= 256;
706
707 ret = ci_set_overdrive_target_tdp(rdev, (u32)target_tdp);
708 }
709
710 return ret;
711 }
712
713 void ci_dpm_powergate_uvd(struct radeon_device *rdev, bool gate)
714 {
715 struct ci_power_info *pi = ci_get_pi(rdev);
716
717 if (pi->uvd_power_gated == gate)
718 return;
719
720 pi->uvd_power_gated = gate;
721
722 ci_update_uvd_dpm(rdev, gate);
723 }
724
725 bool ci_dpm_vblank_too_short(struct radeon_device *rdev)
726 {
727 struct ci_power_info *pi = ci_get_pi(rdev);
728 u32 vblank_time = r600_dpm_get_vblank_time(rdev);
729 u32 switch_limit = pi->mem_gddr5 ? 450 : 300;
730
731 if (vblank_time < switch_limit)
732 return true;
733 else
734 return false;
735
736 }
737
738 static void ci_apply_state_adjust_rules(struct radeon_device *rdev,
739 struct radeon_ps *rps)
740 {
741 struct ci_ps *ps = ci_get_ps(rps);
742 struct ci_power_info *pi = ci_get_pi(rdev);
743 struct radeon_clock_and_voltage_limits *max_limits;
744 bool disable_mclk_switching;
745 u32 sclk, mclk;
746 u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
747 int i;
748
749 if (rps->vce_active) {
750 rps->evclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].evclk;
751 rps->ecclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].ecclk;
752 } else {
753 rps->evclk = 0;
754 rps->ecclk = 0;
755 }
756
757 if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
758 ci_dpm_vblank_too_short(rdev))
759 disable_mclk_switching = true;
760 else
761 disable_mclk_switching = false;
762
763 if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
764 pi->battery_state = true;
765 else
766 pi->battery_state = false;
767
768 if (rdev->pm.dpm.ac_power)
769 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
770 else
771 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
772
773 if (rdev->pm.dpm.ac_power == false) {
774 for (i = 0; i < ps->performance_level_count; i++) {
775 if (ps->performance_levels[i].mclk > max_limits->mclk)
776 ps->performance_levels[i].mclk = max_limits->mclk;
777 if (ps->performance_levels[i].sclk > max_limits->sclk)
778 ps->performance_levels[i].sclk = max_limits->sclk;
779 }
780 }
781
782 /* limit clocks to max supported clocks based on voltage dependency tables */
783 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
784 &max_sclk_vddc);
785 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
786 &max_mclk_vddci);
787 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
788 &max_mclk_vddc);
789
790 for (i = 0; i < ps->performance_level_count; i++) {
791 if (max_sclk_vddc) {
792 if (ps->performance_levels[i].sclk > max_sclk_vddc)
793 ps->performance_levels[i].sclk = max_sclk_vddc;
794 }
795 if (max_mclk_vddci) {
796 if (ps->performance_levels[i].mclk > max_mclk_vddci)
797 ps->performance_levels[i].mclk = max_mclk_vddci;
798 }
799 if (max_mclk_vddc) {
800 if (ps->performance_levels[i].mclk > max_mclk_vddc)
801 ps->performance_levels[i].mclk = max_mclk_vddc;
802 }
803 }
804
805 /* XXX validate the min clocks required for display */
806
807 if (disable_mclk_switching) {
808 mclk = ps->performance_levels[ps->performance_level_count - 1].mclk;
809 sclk = ps->performance_levels[0].sclk;
810 } else {
811 mclk = ps->performance_levels[0].mclk;
812 sclk = ps->performance_levels[0].sclk;
813 }
814
815 if (rps->vce_active) {
816 if (sclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk)
817 sclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].sclk;
818 if (mclk < rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].mclk)
819 mclk = rdev->pm.dpm.vce_states[rdev->pm.dpm.vce_level].mclk;
820 }
821
822 ps->performance_levels[0].sclk = sclk;
823 ps->performance_levels[0].mclk = mclk;
824
825 if (ps->performance_levels[1].sclk < ps->performance_levels[0].sclk)
826 ps->performance_levels[1].sclk = ps->performance_levels[0].sclk;
827
828 if (disable_mclk_switching) {
829 if (ps->performance_levels[0].mclk < ps->performance_levels[1].mclk)
830 ps->performance_levels[0].mclk = ps->performance_levels[1].mclk;
831 } else {
832 if (ps->performance_levels[1].mclk < ps->performance_levels[0].mclk)
833 ps->performance_levels[1].mclk = ps->performance_levels[0].mclk;
834 }
835 }
836
837 static int ci_set_thermal_temperature_range(struct radeon_device *rdev,
838 int min_temp, int max_temp)
839 {
840 int low_temp = 0 * 1000;
841 int high_temp = 255 * 1000;
842 u32 tmp;
843
844 if (low_temp < min_temp)
845 low_temp = min_temp;
846 if (high_temp > max_temp)
847 high_temp = max_temp;
848 if (high_temp < low_temp) {
849 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
850 return -EINVAL;
851 }
852
853 tmp = RREG32_SMC(CG_THERMAL_INT);
854 tmp &= ~(CI_DIG_THERM_INTH_MASK | CI_DIG_THERM_INTL_MASK);
855 tmp |= CI_DIG_THERM_INTH(high_temp / 1000) |
856 CI_DIG_THERM_INTL(low_temp / 1000);
857 WREG32_SMC(CG_THERMAL_INT, tmp);
858
859 #if 0
860 /* XXX: need to figure out how to handle this properly */
861 tmp = RREG32_SMC(CG_THERMAL_CTRL);
862 tmp &= DIG_THERM_DPM_MASK;
863 tmp |= DIG_THERM_DPM(high_temp / 1000);
864 WREG32_SMC(CG_THERMAL_CTRL, tmp);
865 #endif
866
867 return 0;
868 }
869
870 #if 0
871 static int ci_read_smc_soft_register(struct radeon_device *rdev,
872 u16 reg_offset, u32 *value)
873 {
874 struct ci_power_info *pi = ci_get_pi(rdev);
875
876 return ci_read_smc_sram_dword(rdev,
877 pi->soft_regs_start + reg_offset,
878 value, pi->sram_end);
879 }
880 #endif
881
882 static int ci_write_smc_soft_register(struct radeon_device *rdev,
883 u16 reg_offset, u32 value)
884 {
885 struct ci_power_info *pi = ci_get_pi(rdev);
886
887 return ci_write_smc_sram_dword(rdev,
888 pi->soft_regs_start + reg_offset,
889 value, pi->sram_end);
890 }
891
892 static void ci_init_fps_limits(struct radeon_device *rdev)
893 {
894 struct ci_power_info *pi = ci_get_pi(rdev);
895 SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
896
897 if (pi->caps_fps) {
898 u16 tmp;
899
900 tmp = 45;
901 table->FpsHighT = cpu_to_be16(tmp);
902
903 tmp = 30;
904 table->FpsLowT = cpu_to_be16(tmp);
905 }
906 }
907
908 static int ci_update_sclk_t(struct radeon_device *rdev)
909 {
910 struct ci_power_info *pi = ci_get_pi(rdev);
911 int ret = 0;
912 u32 low_sclk_interrupt_t = 0;
913
914 if (pi->caps_sclk_throttle_low_notification) {
915 low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
916
917 ret = ci_copy_bytes_to_smc(rdev,
918 pi->dpm_table_start +
919 offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT),
920 (u8 *)&low_sclk_interrupt_t,
921 sizeof(u32), pi->sram_end);
922
923 }
924
925 return ret;
926 }
927
928 static void ci_get_leakage_voltages(struct radeon_device *rdev)
929 {
930 struct ci_power_info *pi = ci_get_pi(rdev);
931 u16 leakage_id, virtual_voltage_id;
932 u16 vddc, vddci;
933 int i;
934
935 pi->vddc_leakage.count = 0;
936 pi->vddci_leakage.count = 0;
937
938 if (radeon_atom_get_leakage_id_from_vbios(rdev, &leakage_id) == 0) {
939 for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
940 virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
941 if (radeon_atom_get_leakage_vddc_based_on_leakage_params(rdev, &vddc, &vddci,
942 virtual_voltage_id,
943 leakage_id) == 0) {
944 if (vddc != 0 && vddc != virtual_voltage_id) {
945 pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
946 pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
947 pi->vddc_leakage.count++;
948 }
949 if (vddci != 0 && vddci != virtual_voltage_id) {
950 pi->vddci_leakage.actual_voltage[pi->vddci_leakage.count] = vddci;
951 pi->vddci_leakage.leakage_id[pi->vddci_leakage.count] = virtual_voltage_id;
952 pi->vddci_leakage.count++;
953 }
954 }
955 }
956 }
957 }
958
959 static void ci_set_dpm_event_sources(struct radeon_device *rdev, u32 sources)
960 {
961 struct ci_power_info *pi = ci_get_pi(rdev);
962 bool want_thermal_protection;
963 enum radeon_dpm_event_src dpm_event_src;
964 u32 tmp;
965
966 switch (sources) {
967 case 0:
968 default:
969 want_thermal_protection = false;
970 break;
971 case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL):
972 want_thermal_protection = true;
973 dpm_event_src = RADEON_DPM_EVENT_SRC_DIGITAL;
974 break;
975 case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
976 want_thermal_protection = true;
977 dpm_event_src = RADEON_DPM_EVENT_SRC_EXTERNAL;
978 break;
979 case ((1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
980 (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL)):
981 want_thermal_protection = true;
982 dpm_event_src = RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
983 break;
984 }
985
986 if (want_thermal_protection) {
987 #if 0
988 /* XXX: need to figure out how to handle this properly */
989 tmp = RREG32_SMC(CG_THERMAL_CTRL);
990 tmp &= DPM_EVENT_SRC_MASK;
991 tmp |= DPM_EVENT_SRC(dpm_event_src);
992 WREG32_SMC(CG_THERMAL_CTRL, tmp);
993 #endif
994
995 tmp = RREG32_SMC(GENERAL_PWRMGT);
996 if (pi->thermal_protection)
997 tmp &= ~THERMAL_PROTECTION_DIS;
998 else
999 tmp |= THERMAL_PROTECTION_DIS;
1000 WREG32_SMC(GENERAL_PWRMGT, tmp);
1001 } else {
1002 tmp = RREG32_SMC(GENERAL_PWRMGT);
1003 tmp |= THERMAL_PROTECTION_DIS;
1004 WREG32_SMC(GENERAL_PWRMGT, tmp);
1005 }
1006 }
1007
1008 static void ci_enable_auto_throttle_source(struct radeon_device *rdev,
1009 enum radeon_dpm_auto_throttle_src source,
1010 bool enable)
1011 {
1012 struct ci_power_info *pi = ci_get_pi(rdev);
1013
1014 if (enable) {
1015 if (!(pi->active_auto_throttle_sources & (1 << source))) {
1016 pi->active_auto_throttle_sources |= 1 << source;
1017 ci_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
1018 }
1019 } else {
1020 if (pi->active_auto_throttle_sources & (1 << source)) {
1021 pi->active_auto_throttle_sources &= ~(1 << source);
1022 ci_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
1023 }
1024 }
1025 }
1026
1027 static void ci_enable_vr_hot_gpio_interrupt(struct radeon_device *rdev)
1028 {
1029 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT)
1030 ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableVRHotGPIOInterrupt);
1031 }
1032
1033 static int ci_unfreeze_sclk_mclk_dpm(struct radeon_device *rdev)
1034 {
1035 struct ci_power_info *pi = ci_get_pi(rdev);
1036 PPSMC_Result smc_result;
1037
1038 if (!pi->need_update_smu7_dpm_table)
1039 return 0;
1040
1041 if ((!pi->sclk_dpm_key_disabled) &&
1042 (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1043 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
1044 if (smc_result != PPSMC_Result_OK)
1045 return -EINVAL;
1046 }
1047
1048 if ((!pi->mclk_dpm_key_disabled) &&
1049 (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1050 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
1051 if (smc_result != PPSMC_Result_OK)
1052 return -EINVAL;
1053 }
1054
1055 pi->need_update_smu7_dpm_table = 0;
1056 return 0;
1057 }
1058
1059 static int ci_enable_sclk_mclk_dpm(struct radeon_device *rdev, bool enable)
1060 {
1061 struct ci_power_info *pi = ci_get_pi(rdev);
1062 PPSMC_Result smc_result;
1063
1064 if (enable) {
1065 if (!pi->sclk_dpm_key_disabled) {
1066 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DPM_Enable);
1067 if (smc_result != PPSMC_Result_OK)
1068 return -EINVAL;
1069 }
1070
1071 if (!pi->mclk_dpm_key_disabled) {
1072 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_Enable);
1073 if (smc_result != PPSMC_Result_OK)
1074 return -EINVAL;
1075
1076 WREG32_P(MC_SEQ_CNTL_3, CAC_EN, ~CAC_EN);
1077
1078 WREG32_SMC(LCAC_MC0_CNTL, 0x05);
1079 WREG32_SMC(LCAC_MC1_CNTL, 0x05);
1080 WREG32_SMC(LCAC_CPL_CNTL, 0x100005);
1081
1082 udelay(10);
1083
1084 WREG32_SMC(LCAC_MC0_CNTL, 0x400005);
1085 WREG32_SMC(LCAC_MC1_CNTL, 0x400005);
1086 WREG32_SMC(LCAC_CPL_CNTL, 0x500005);
1087 }
1088 } else {
1089 if (!pi->sclk_dpm_key_disabled) {
1090 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_DPM_Disable);
1091 if (smc_result != PPSMC_Result_OK)
1092 return -EINVAL;
1093 }
1094
1095 if (!pi->mclk_dpm_key_disabled) {
1096 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_Disable);
1097 if (smc_result != PPSMC_Result_OK)
1098 return -EINVAL;
1099 }
1100 }
1101
1102 return 0;
1103 }
1104
1105 static int ci_start_dpm(struct radeon_device *rdev)
1106 {
1107 struct ci_power_info *pi = ci_get_pi(rdev);
1108 PPSMC_Result smc_result;
1109 int ret;
1110 u32 tmp;
1111
1112 tmp = RREG32_SMC(GENERAL_PWRMGT);
1113 tmp |= GLOBAL_PWRMGT_EN;
1114 WREG32_SMC(GENERAL_PWRMGT, tmp);
1115
1116 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1117 tmp |= DYNAMIC_PM_EN;
1118 WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1119
1120 ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, VoltageChangeTimeout), 0x1000);
1121
1122 WREG32_P(BIF_LNCNT_RESET, 0, ~RESET_LNCNT_EN);
1123
1124 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Voltage_Cntl_Enable);
1125 if (smc_result != PPSMC_Result_OK)
1126 return -EINVAL;
1127
1128 ret = ci_enable_sclk_mclk_dpm(rdev, true);
1129 if (ret)
1130 return ret;
1131
1132 if (!pi->pcie_dpm_key_disabled) {
1133 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_Enable);
1134 if (smc_result != PPSMC_Result_OK)
1135 return -EINVAL;
1136 }
1137
1138 return 0;
1139 }
1140
1141 static int ci_freeze_sclk_mclk_dpm(struct radeon_device *rdev)
1142 {
1143 struct ci_power_info *pi = ci_get_pi(rdev);
1144 PPSMC_Result smc_result;
1145
1146 if (!pi->need_update_smu7_dpm_table)
1147 return 0;
1148
1149 if ((!pi->sclk_dpm_key_disabled) &&
1150 (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1151 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_SCLKDPM_FreezeLevel);
1152 if (smc_result != PPSMC_Result_OK)
1153 return -EINVAL;
1154 }
1155
1156 if ((!pi->mclk_dpm_key_disabled) &&
1157 (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1158 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_FreezeLevel);
1159 if (smc_result != PPSMC_Result_OK)
1160 return -EINVAL;
1161 }
1162
1163 return 0;
1164 }
1165
1166 static int ci_stop_dpm(struct radeon_device *rdev)
1167 {
1168 struct ci_power_info *pi = ci_get_pi(rdev);
1169 PPSMC_Result smc_result;
1170 int ret;
1171 u32 tmp;
1172
1173 tmp = RREG32_SMC(GENERAL_PWRMGT);
1174 tmp &= ~GLOBAL_PWRMGT_EN;
1175 WREG32_SMC(GENERAL_PWRMGT, tmp);
1176
1177 tmp = RREG32(SCLK_PWRMGT_CNTL);
1178 tmp &= ~DYNAMIC_PM_EN;
1179 WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1180
1181 if (!pi->pcie_dpm_key_disabled) {
1182 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_Disable);
1183 if (smc_result != PPSMC_Result_OK)
1184 return -EINVAL;
1185 }
1186
1187 ret = ci_enable_sclk_mclk_dpm(rdev, false);
1188 if (ret)
1189 return ret;
1190
1191 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_Voltage_Cntl_Disable);
1192 if (smc_result != PPSMC_Result_OK)
1193 return -EINVAL;
1194
1195 return 0;
1196 }
1197
1198 static void ci_enable_sclk_control(struct radeon_device *rdev, bool enable)
1199 {
1200 u32 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1201
1202 if (enable)
1203 tmp &= ~SCLK_PWRMGT_OFF;
1204 else
1205 tmp |= SCLK_PWRMGT_OFF;
1206 WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1207 }
1208
1209 #if 0
1210 static int ci_notify_hw_of_power_source(struct radeon_device *rdev,
1211 bool ac_power)
1212 {
1213 struct ci_power_info *pi = ci_get_pi(rdev);
1214 struct radeon_cac_tdp_table *cac_tdp_table =
1215 rdev->pm.dpm.dyn_state.cac_tdp_table;
1216 u32 power_limit;
1217
1218 if (ac_power)
1219 power_limit = (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
1220 else
1221 power_limit = (u32)(cac_tdp_table->battery_power_limit * 256);
1222
1223 ci_set_power_limit(rdev, power_limit);
1224
1225 if (pi->caps_automatic_dc_transition) {
1226 if (ac_power)
1227 ci_send_msg_to_smc(rdev, PPSMC_MSG_RunningOnAC);
1228 else
1229 ci_send_msg_to_smc(rdev, PPSMC_MSG_Remove_DC_Clamp);
1230 }
1231
1232 return 0;
1233 }
1234 #endif
1235
1236 static PPSMC_Result ci_send_msg_to_smc_with_parameter(struct radeon_device *rdev,
1237 PPSMC_Msg msg, u32 parameter)
1238 {
1239 WREG32(SMC_MSG_ARG_0, parameter);
1240 return ci_send_msg_to_smc(rdev, msg);
1241 }
1242
1243 static PPSMC_Result ci_send_msg_to_smc_return_parameter(struct radeon_device *rdev,
1244 PPSMC_Msg msg, u32 *parameter)
1245 {
1246 PPSMC_Result smc_result;
1247
1248 smc_result = ci_send_msg_to_smc(rdev, msg);
1249
1250 if ((smc_result == PPSMC_Result_OK) && parameter)
1251 *parameter = RREG32(SMC_MSG_ARG_0);
1252
1253 return smc_result;
1254 }
1255
1256 static int ci_dpm_force_state_sclk(struct radeon_device *rdev, u32 n)
1257 {
1258 struct ci_power_info *pi = ci_get_pi(rdev);
1259
1260 if (!pi->sclk_dpm_key_disabled) {
1261 PPSMC_Result smc_result =
1262 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_DPM_ForceState, n);
1263 if (smc_result != PPSMC_Result_OK)
1264 return -EINVAL;
1265 }
1266
1267 return 0;
1268 }
1269
1270 static int ci_dpm_force_state_mclk(struct radeon_device *rdev, u32 n)
1271 {
1272 struct ci_power_info *pi = ci_get_pi(rdev);
1273
1274 if (!pi->mclk_dpm_key_disabled) {
1275 PPSMC_Result smc_result =
1276 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_MCLKDPM_ForceState, n);
1277 if (smc_result != PPSMC_Result_OK)
1278 return -EINVAL;
1279 }
1280
1281 return 0;
1282 }
1283
1284 static int ci_dpm_force_state_pcie(struct radeon_device *rdev, u32 n)
1285 {
1286 struct ci_power_info *pi = ci_get_pi(rdev);
1287
1288 if (!pi->pcie_dpm_key_disabled) {
1289 PPSMC_Result smc_result =
1290 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PCIeDPM_ForceLevel, n);
1291 if (smc_result != PPSMC_Result_OK)
1292 return -EINVAL;
1293 }
1294
1295 return 0;
1296 }
1297
1298 static int ci_set_power_limit(struct radeon_device *rdev, u32 n)
1299 {
1300 struct ci_power_info *pi = ci_get_pi(rdev);
1301
1302 if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
1303 PPSMC_Result smc_result =
1304 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_PkgPwrSetLimit, n);
1305 if (smc_result != PPSMC_Result_OK)
1306 return -EINVAL;
1307 }
1308
1309 return 0;
1310 }
1311
1312 static int ci_set_overdrive_target_tdp(struct radeon_device *rdev,
1313 u32 target_tdp)
1314 {
1315 PPSMC_Result smc_result =
1316 ci_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
1317 if (smc_result != PPSMC_Result_OK)
1318 return -EINVAL;
1319 return 0;
1320 }
1321
1322 static int ci_set_boot_state(struct radeon_device *rdev)
1323 {
1324 return ci_enable_sclk_mclk_dpm(rdev, false);
1325 }
1326
1327 static u32 ci_get_average_sclk_freq(struct radeon_device *rdev)
1328 {
1329 u32 sclk_freq;
1330 PPSMC_Result smc_result =
1331 ci_send_msg_to_smc_return_parameter(rdev,
1332 PPSMC_MSG_API_GetSclkFrequency,
1333 &sclk_freq);
1334 if (smc_result != PPSMC_Result_OK)
1335 sclk_freq = 0;
1336
1337 return sclk_freq;
1338 }
1339
1340 static u32 ci_get_average_mclk_freq(struct radeon_device *rdev)
1341 {
1342 u32 mclk_freq;
1343 PPSMC_Result smc_result =
1344 ci_send_msg_to_smc_return_parameter(rdev,
1345 PPSMC_MSG_API_GetMclkFrequency,
1346 &mclk_freq);
1347 if (smc_result != PPSMC_Result_OK)
1348 mclk_freq = 0;
1349
1350 return mclk_freq;
1351 }
1352
1353 static void ci_dpm_start_smc(struct radeon_device *rdev)
1354 {
1355 int i;
1356
1357 ci_program_jump_on_start(rdev);
1358 ci_start_smc_clock(rdev);
1359 ci_start_smc(rdev);
1360 for (i = 0; i < rdev->usec_timeout; i++) {
1361 if (RREG32_SMC(FIRMWARE_FLAGS) & INTERRUPTS_ENABLED)
1362 break;
1363 }
1364 }
1365
1366 static void ci_dpm_stop_smc(struct radeon_device *rdev)
1367 {
1368 ci_reset_smc(rdev);
1369 ci_stop_smc_clock(rdev);
1370 }
1371
1372 static int ci_process_firmware_header(struct radeon_device *rdev)
1373 {
1374 struct ci_power_info *pi = ci_get_pi(rdev);
1375 u32 tmp;
1376 int ret;
1377
1378 ret = ci_read_smc_sram_dword(rdev,
1379 SMU7_FIRMWARE_HEADER_LOCATION +
1380 offsetof(SMU7_Firmware_Header, DpmTable),
1381 &tmp, pi->sram_end);
1382 if (ret)
1383 return ret;
1384
1385 pi->dpm_table_start = tmp;
1386
1387 ret = ci_read_smc_sram_dword(rdev,
1388 SMU7_FIRMWARE_HEADER_LOCATION +
1389 offsetof(SMU7_Firmware_Header, SoftRegisters),
1390 &tmp, pi->sram_end);
1391 if (ret)
1392 return ret;
1393
1394 pi->soft_regs_start = tmp;
1395
1396 ret = ci_read_smc_sram_dword(rdev,
1397 SMU7_FIRMWARE_HEADER_LOCATION +
1398 offsetof(SMU7_Firmware_Header, mcRegisterTable),
1399 &tmp, pi->sram_end);
1400 if (ret)
1401 return ret;
1402
1403 pi->mc_reg_table_start = tmp;
1404
1405 ret = ci_read_smc_sram_dword(rdev,
1406 SMU7_FIRMWARE_HEADER_LOCATION +
1407 offsetof(SMU7_Firmware_Header, FanTable),
1408 &tmp, pi->sram_end);
1409 if (ret)
1410 return ret;
1411
1412 pi->fan_table_start = tmp;
1413
1414 ret = ci_read_smc_sram_dword(rdev,
1415 SMU7_FIRMWARE_HEADER_LOCATION +
1416 offsetof(SMU7_Firmware_Header, mcArbDramTimingTable),
1417 &tmp, pi->sram_end);
1418 if (ret)
1419 return ret;
1420
1421 pi->arb_table_start = tmp;
1422
1423 return 0;
1424 }
1425
1426 static void ci_read_clock_registers(struct radeon_device *rdev)
1427 {
1428 struct ci_power_info *pi = ci_get_pi(rdev);
1429
1430 pi->clock_registers.cg_spll_func_cntl =
1431 RREG32_SMC(CG_SPLL_FUNC_CNTL);
1432 pi->clock_registers.cg_spll_func_cntl_2 =
1433 RREG32_SMC(CG_SPLL_FUNC_CNTL_2);
1434 pi->clock_registers.cg_spll_func_cntl_3 =
1435 RREG32_SMC(CG_SPLL_FUNC_CNTL_3);
1436 pi->clock_registers.cg_spll_func_cntl_4 =
1437 RREG32_SMC(CG_SPLL_FUNC_CNTL_4);
1438 pi->clock_registers.cg_spll_spread_spectrum =
1439 RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM);
1440 pi->clock_registers.cg_spll_spread_spectrum_2 =
1441 RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM_2);
1442 pi->clock_registers.dll_cntl = RREG32(DLL_CNTL);
1443 pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL);
1444 pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL);
1445 pi->clock_registers.mpll_dq_func_cntl = RREG32(MPLL_DQ_FUNC_CNTL);
1446 pi->clock_registers.mpll_func_cntl = RREG32(MPLL_FUNC_CNTL);
1447 pi->clock_registers.mpll_func_cntl_1 = RREG32(MPLL_FUNC_CNTL_1);
1448 pi->clock_registers.mpll_func_cntl_2 = RREG32(MPLL_FUNC_CNTL_2);
1449 pi->clock_registers.mpll_ss1 = RREG32(MPLL_SS1);
1450 pi->clock_registers.mpll_ss2 = RREG32(MPLL_SS2);
1451 }
1452
1453 static void ci_init_sclk_t(struct radeon_device *rdev)
1454 {
1455 struct ci_power_info *pi = ci_get_pi(rdev);
1456
1457 pi->low_sclk_interrupt_t = 0;
1458 }
1459
1460 static void ci_enable_thermal_protection(struct radeon_device *rdev,
1461 bool enable)
1462 {
1463 u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
1464
1465 if (enable)
1466 tmp &= ~THERMAL_PROTECTION_DIS;
1467 else
1468 tmp |= THERMAL_PROTECTION_DIS;
1469 WREG32_SMC(GENERAL_PWRMGT, tmp);
1470 }
1471
1472 static void ci_enable_acpi_power_management(struct radeon_device *rdev)
1473 {
1474 u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
1475
1476 tmp |= STATIC_PM_EN;
1477
1478 WREG32_SMC(GENERAL_PWRMGT, tmp);
1479 }
1480
1481 #if 0
1482 static int ci_enter_ulp_state(struct radeon_device *rdev)
1483 {
1484
1485 WREG32(SMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower);
1486
1487 udelay(25000);
1488
1489 return 0;
1490 }
1491
1492 static int ci_exit_ulp_state(struct radeon_device *rdev)
1493 {
1494 int i;
1495
1496 WREG32(SMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower);
1497
1498 udelay(7000);
1499
1500 for (i = 0; i < rdev->usec_timeout; i++) {
1501 if (RREG32(SMC_RESP_0) == 1)
1502 break;
1503 udelay(1000);
1504 }
1505
1506 return 0;
1507 }
1508 #endif
1509
1510 static int ci_notify_smc_display_change(struct radeon_device *rdev,
1511 bool has_display)
1512 {
1513 PPSMC_Msg msg = has_display ? PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay;
1514
1515 return (ci_send_msg_to_smc(rdev, msg) == PPSMC_Result_OK) ? 0 : -EINVAL;
1516 }
1517
1518 static int ci_enable_ds_master_switch(struct radeon_device *rdev,
1519 bool enable)
1520 {
1521 struct ci_power_info *pi = ci_get_pi(rdev);
1522
1523 if (enable) {
1524 if (pi->caps_sclk_ds) {
1525 if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_ON) != PPSMC_Result_OK)
1526 return -EINVAL;
1527 } else {
1528 if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
1529 return -EINVAL;
1530 }
1531 } else {
1532 if (pi->caps_sclk_ds) {
1533 if (ci_send_msg_to_smc(rdev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
1534 return -EINVAL;
1535 }
1536 }
1537
1538 return 0;
1539 }
1540
1541 static void ci_program_display_gap(struct radeon_device *rdev)
1542 {
1543 u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL);
1544 u32 pre_vbi_time_in_us;
1545 u32 frame_time_in_us;
1546 u32 ref_clock = rdev->clock.spll.reference_freq;
1547 u32 refresh_rate = r600_dpm_get_vrefresh(rdev);
1548 u32 vblank_time = r600_dpm_get_vblank_time(rdev);
1549
1550 tmp &= ~DISP_GAP_MASK;
1551 if (rdev->pm.dpm.new_active_crtc_count > 0)
1552 tmp |= DISP_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
1553 else
1554 tmp |= DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE);
1555 WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp);
1556
1557 if (refresh_rate == 0)
1558 refresh_rate = 60;
1559 if (vblank_time == 0xffffffff)
1560 vblank_time = 500;
1561 frame_time_in_us = 1000000 / refresh_rate;
1562 pre_vbi_time_in_us =
1563 frame_time_in_us - 200 - vblank_time;
1564 tmp = pre_vbi_time_in_us * (ref_clock / 100);
1565
1566 WREG32_SMC(CG_DISPLAY_GAP_CNTL2, tmp);
1567 ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, PreVBlankGap), 0x64);
1568 ci_write_smc_soft_register(rdev, offsetof(SMU7_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
1569
1570
1571 ci_notify_smc_display_change(rdev, (rdev->pm.dpm.new_active_crtc_count == 1));
1572
1573 }
1574
1575 static void ci_enable_spread_spectrum(struct radeon_device *rdev, bool enable)
1576 {
1577 struct ci_power_info *pi = ci_get_pi(rdev);
1578 u32 tmp;
1579
1580 if (enable) {
1581 if (pi->caps_sclk_ss_support) {
1582 tmp = RREG32_SMC(GENERAL_PWRMGT);
1583 tmp |= DYN_SPREAD_SPECTRUM_EN;
1584 WREG32_SMC(GENERAL_PWRMGT, tmp);
1585 }
1586 } else {
1587 tmp = RREG32_SMC(CG_SPLL_SPREAD_SPECTRUM);
1588 tmp &= ~SSEN;
1589 WREG32_SMC(CG_SPLL_SPREAD_SPECTRUM, tmp);
1590
1591 tmp = RREG32_SMC(GENERAL_PWRMGT);
1592 tmp &= ~DYN_SPREAD_SPECTRUM_EN;
1593 WREG32_SMC(GENERAL_PWRMGT, tmp);
1594 }
1595 }
1596
1597 static void ci_program_sstp(struct radeon_device *rdev)
1598 {
1599 WREG32_SMC(CG_SSP, (SSTU(R600_SSTU_DFLT) | SST(R600_SST_DFLT)));
1600 }
1601
1602 static void ci_enable_display_gap(struct radeon_device *rdev)
1603 {
1604 u32 tmp = RREG32_SMC(CG_DISPLAY_GAP_CNTL);
1605
1606 tmp &= ~(DISP_GAP_MASK | DISP_GAP_MCHG_MASK);
1607 tmp |= (DISP_GAP(R600_PM_DISPLAY_GAP_IGNORE) |
1608 DISP_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK));
1609
1610 WREG32_SMC(CG_DISPLAY_GAP_CNTL, tmp);
1611 }
1612
1613 static void ci_program_vc(struct radeon_device *rdev)
1614 {
1615 u32 tmp;
1616
1617 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1618 tmp &= ~(RESET_SCLK_CNT | RESET_BUSY_CNT);
1619 WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1620
1621 WREG32_SMC(CG_FTV_0, CISLANDS_VRC_DFLT0);
1622 WREG32_SMC(CG_FTV_1, CISLANDS_VRC_DFLT1);
1623 WREG32_SMC(CG_FTV_2, CISLANDS_VRC_DFLT2);
1624 WREG32_SMC(CG_FTV_3, CISLANDS_VRC_DFLT3);
1625 WREG32_SMC(CG_FTV_4, CISLANDS_VRC_DFLT4);
1626 WREG32_SMC(CG_FTV_5, CISLANDS_VRC_DFLT5);
1627 WREG32_SMC(CG_FTV_6, CISLANDS_VRC_DFLT6);
1628 WREG32_SMC(CG_FTV_7, CISLANDS_VRC_DFLT7);
1629 }
1630
1631 static void ci_clear_vc(struct radeon_device *rdev)
1632 {
1633 u32 tmp;
1634
1635 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1636 tmp |= (RESET_SCLK_CNT | RESET_BUSY_CNT);
1637 WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1638
1639 WREG32_SMC(CG_FTV_0, 0);
1640 WREG32_SMC(CG_FTV_1, 0);
1641 WREG32_SMC(CG_FTV_2, 0);
1642 WREG32_SMC(CG_FTV_3, 0);
1643 WREG32_SMC(CG_FTV_4, 0);
1644 WREG32_SMC(CG_FTV_5, 0);
1645 WREG32_SMC(CG_FTV_6, 0);
1646 WREG32_SMC(CG_FTV_7, 0);
1647 }
1648
1649 static int ci_upload_firmware(struct radeon_device *rdev)
1650 {
1651 struct ci_power_info *pi = ci_get_pi(rdev);
1652 int i, ret;
1653
1654 for (i = 0; i < rdev->usec_timeout; i++) {
1655 if (RREG32_SMC(RCU_UC_EVENTS) & BOOT_SEQ_DONE)
1656 break;
1657 }
1658 WREG32_SMC(SMC_SYSCON_MISC_CNTL, 1);
1659
1660 ci_stop_smc_clock(rdev);
1661 ci_reset_smc(rdev);
1662
1663 ret = ci_load_smc_ucode(rdev, pi->sram_end);
1664
1665 return ret;
1666
1667 }
1668
1669 static int ci_get_svi2_voltage_table(struct radeon_device *rdev,
1670 struct radeon_clock_voltage_dependency_table *voltage_dependency_table,
1671 struct atom_voltage_table *voltage_table)
1672 {
1673 u32 i;
1674
1675 if (voltage_dependency_table == NULL)
1676 return -EINVAL;
1677
1678 voltage_table->mask_low = 0;
1679 voltage_table->phase_delay = 0;
1680
1681 voltage_table->count = voltage_dependency_table->count;
1682 for (i = 0; i < voltage_table->count; i++) {
1683 voltage_table->entries[i].value = voltage_dependency_table->entries[i].v;
1684 voltage_table->entries[i].smio_low = 0;
1685 }
1686
1687 return 0;
1688 }
1689
1690 static int ci_construct_voltage_tables(struct radeon_device *rdev)
1691 {
1692 struct ci_power_info *pi = ci_get_pi(rdev);
1693 int ret;
1694
1695 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
1696 ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDC,
1697 VOLTAGE_OBJ_GPIO_LUT,
1698 &pi->vddc_voltage_table);
1699 if (ret)
1700 return ret;
1701 } else if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
1702 ret = ci_get_svi2_voltage_table(rdev,
1703 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
1704 &pi->vddc_voltage_table);
1705 if (ret)
1706 return ret;
1707 }
1708
1709 if (pi->vddc_voltage_table.count > SMU7_MAX_LEVELS_VDDC)
1710 si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_VDDC,
1711 &pi->vddc_voltage_table);
1712
1713 if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
1714 ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_VDDCI,
1715 VOLTAGE_OBJ_GPIO_LUT,
1716 &pi->vddci_voltage_table);
1717 if (ret)
1718 return ret;
1719 } else if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
1720 ret = ci_get_svi2_voltage_table(rdev,
1721 &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
1722 &pi->vddci_voltage_table);
1723 if (ret)
1724 return ret;
1725 }
1726
1727 if (pi->vddci_voltage_table.count > SMU7_MAX_LEVELS_VDDCI)
1728 si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_VDDCI,
1729 &pi->vddci_voltage_table);
1730
1731 if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
1732 ret = radeon_atom_get_voltage_table(rdev, VOLTAGE_TYPE_MVDDC,
1733 VOLTAGE_OBJ_GPIO_LUT,
1734 &pi->mvdd_voltage_table);
1735 if (ret)
1736 return ret;
1737 } else if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
1738 ret = ci_get_svi2_voltage_table(rdev,
1739 &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
1740 &pi->mvdd_voltage_table);
1741 if (ret)
1742 return ret;
1743 }
1744
1745 if (pi->mvdd_voltage_table.count > SMU7_MAX_LEVELS_MVDD)
1746 si_trim_voltage_table_to_fit_state_table(rdev, SMU7_MAX_LEVELS_MVDD,
1747 &pi->mvdd_voltage_table);
1748
1749 return 0;
1750 }
1751
1752 static void ci_populate_smc_voltage_table(struct radeon_device *rdev,
1753 struct atom_voltage_table_entry *voltage_table,
1754 SMU7_Discrete_VoltageLevel *smc_voltage_table)
1755 {
1756 int ret;
1757
1758 ret = ci_get_std_voltage_value_sidd(rdev, voltage_table,
1759 &smc_voltage_table->StdVoltageHiSidd,
1760 &smc_voltage_table->StdVoltageLoSidd);
1761
1762 if (ret) {
1763 smc_voltage_table->StdVoltageHiSidd = voltage_table->value * VOLTAGE_SCALE;
1764 smc_voltage_table->StdVoltageLoSidd = voltage_table->value * VOLTAGE_SCALE;
1765 }
1766
1767 smc_voltage_table->Voltage = cpu_to_be16(voltage_table->value * VOLTAGE_SCALE);
1768 smc_voltage_table->StdVoltageHiSidd =
1769 cpu_to_be16(smc_voltage_table->StdVoltageHiSidd);
1770 smc_voltage_table->StdVoltageLoSidd =
1771 cpu_to_be16(smc_voltage_table->StdVoltageLoSidd);
1772 }
1773
1774 static int ci_populate_smc_vddc_table(struct radeon_device *rdev,
1775 SMU7_Discrete_DpmTable *table)
1776 {
1777 struct ci_power_info *pi = ci_get_pi(rdev);
1778 unsigned int count;
1779
1780 table->VddcLevelCount = pi->vddc_voltage_table.count;
1781 for (count = 0; count < table->VddcLevelCount; count++) {
1782 ci_populate_smc_voltage_table(rdev,
1783 &pi->vddc_voltage_table.entries[count],
1784 &table->VddcLevel[count]);
1785
1786 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
1787 table->VddcLevel[count].Smio |=
1788 pi->vddc_voltage_table.entries[count].smio_low;
1789 else
1790 table->VddcLevel[count].Smio = 0;
1791 }
1792 table->VddcLevelCount = cpu_to_be32(table->VddcLevelCount);
1793
1794 return 0;
1795 }
1796
1797 static int ci_populate_smc_vddci_table(struct radeon_device *rdev,
1798 SMU7_Discrete_DpmTable *table)
1799 {
1800 unsigned int count;
1801 struct ci_power_info *pi = ci_get_pi(rdev);
1802
1803 table->VddciLevelCount = pi->vddci_voltage_table.count;
1804 for (count = 0; count < table->VddciLevelCount; count++) {
1805 ci_populate_smc_voltage_table(rdev,
1806 &pi->vddci_voltage_table.entries[count],
1807 &table->VddciLevel[count]);
1808
1809 if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
1810 table->VddciLevel[count].Smio |=
1811 pi->vddci_voltage_table.entries[count].smio_low;
1812 else
1813 table->VddciLevel[count].Smio = 0;
1814 }
1815 table->VddciLevelCount = cpu_to_be32(table->VddciLevelCount);
1816
1817 return 0;
1818 }
1819
1820 static int ci_populate_smc_mvdd_table(struct radeon_device *rdev,
1821 SMU7_Discrete_DpmTable *table)
1822 {
1823 struct ci_power_info *pi = ci_get_pi(rdev);
1824 unsigned int count;
1825
1826 table->MvddLevelCount = pi->mvdd_voltage_table.count;
1827 for (count = 0; count < table->MvddLevelCount; count++) {
1828 ci_populate_smc_voltage_table(rdev,
1829 &pi->mvdd_voltage_table.entries[count],
1830 &table->MvddLevel[count]);
1831
1832 if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
1833 table->MvddLevel[count].Smio |=
1834 pi->mvdd_voltage_table.entries[count].smio_low;
1835 else
1836 table->MvddLevel[count].Smio = 0;
1837 }
1838 table->MvddLevelCount = cpu_to_be32(table->MvddLevelCount);
1839
1840 return 0;
1841 }
1842
1843 static int ci_populate_smc_voltage_tables(struct radeon_device *rdev,
1844 SMU7_Discrete_DpmTable *table)
1845 {
1846 int ret;
1847
1848 ret = ci_populate_smc_vddc_table(rdev, table);
1849 if (ret)
1850 return ret;
1851
1852 ret = ci_populate_smc_vddci_table(rdev, table);
1853 if (ret)
1854 return ret;
1855
1856 ret = ci_populate_smc_mvdd_table(rdev, table);
1857 if (ret)
1858 return ret;
1859
1860 return 0;
1861 }
1862
1863 static int ci_populate_mvdd_value(struct radeon_device *rdev, u32 mclk,
1864 SMU7_Discrete_VoltageLevel *voltage)
1865 {
1866 struct ci_power_info *pi = ci_get_pi(rdev);
1867 u32 i = 0;
1868
1869 if (pi->mvdd_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
1870 for (i = 0; i < rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count; i++) {
1871 if (mclk <= rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries[i].clk) {
1872 voltage->Voltage = pi->mvdd_voltage_table.entries[i].value;
1873 break;
1874 }
1875 }
1876
1877 if (i >= rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count)
1878 return -EINVAL;
1879 }
1880
1881 return -EINVAL;
1882 }
1883
1884 static int ci_get_std_voltage_value_sidd(struct radeon_device *rdev,
1885 struct atom_voltage_table_entry *voltage_table,
1886 u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd)
1887 {
1888 u16 v_index, idx;
1889 bool voltage_found = false;
1890 *std_voltage_hi_sidd = voltage_table->value * VOLTAGE_SCALE;
1891 *std_voltage_lo_sidd = voltage_table->value * VOLTAGE_SCALE;
1892
1893 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL)
1894 return -EINVAL;
1895
1896 if (rdev->pm.dpm.dyn_state.cac_leakage_table.entries) {
1897 for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
1898 if (voltage_table->value ==
1899 rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
1900 voltage_found = true;
1901 if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count)
1902 idx = v_index;
1903 else
1904 idx = rdev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
1905 *std_voltage_lo_sidd =
1906 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
1907 *std_voltage_hi_sidd =
1908 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
1909 break;
1910 }
1911 }
1912
1913 if (!voltage_found) {
1914 for (v_index = 0; (u32)v_index < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
1915 if (voltage_table->value <=
1916 rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
1917 voltage_found = true;
1918 if ((u32)v_index < rdev->pm.dpm.dyn_state.cac_leakage_table.count)
1919 idx = v_index;
1920 else
1921 idx = rdev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
1922 *std_voltage_lo_sidd =
1923 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
1924 *std_voltage_hi_sidd =
1925 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
1926 break;
1927 }
1928 }
1929 }
1930 }
1931
1932 return 0;
1933 }
1934
1935 static void ci_populate_phase_value_based_on_sclk(struct radeon_device *rdev,
1936 const struct radeon_phase_shedding_limits_table *limits,
1937 u32 sclk,
1938 u32 *phase_shedding)
1939 {
1940 unsigned int i;
1941
1942 *phase_shedding = 1;
1943
1944 for (i = 0; i < limits->count; i++) {
1945 if (sclk < limits->entries[i].sclk) {
1946 *phase_shedding = i;
1947 break;
1948 }
1949 }
1950 }
1951
1952 static void ci_populate_phase_value_based_on_mclk(struct radeon_device *rdev,
1953 const struct radeon_phase_shedding_limits_table *limits,
1954 u32 mclk,
1955 u32 *phase_shedding)
1956 {
1957 unsigned int i;
1958
1959 *phase_shedding = 1;
1960
1961 for (i = 0; i < limits->count; i++) {
1962 if (mclk < limits->entries[i].mclk) {
1963 *phase_shedding = i;
1964 break;
1965 }
1966 }
1967 }
1968
1969 static int ci_init_arb_table_index(struct radeon_device *rdev)
1970 {
1971 struct ci_power_info *pi = ci_get_pi(rdev);
1972 u32 tmp;
1973 int ret;
1974
1975 ret = ci_read_smc_sram_dword(rdev, pi->arb_table_start,
1976 &tmp, pi->sram_end);
1977 if (ret)
1978 return ret;
1979
1980 tmp &= 0x00FFFFFF;
1981 tmp |= MC_CG_ARB_FREQ_F1 << 24;
1982
1983 return ci_write_smc_sram_dword(rdev, pi->arb_table_start,
1984 tmp, pi->sram_end);
1985 }
1986
1987 static int ci_get_dependency_volt_by_clk(struct radeon_device *rdev,
1988 struct radeon_clock_voltage_dependency_table *allowed_clock_voltage_table,
1989 u32 clock, u32 *voltage)
1990 {
1991 u32 i = 0;
1992
1993 if (allowed_clock_voltage_table->count == 0)
1994 return -EINVAL;
1995
1996 for (i = 0; i < allowed_clock_voltage_table->count; i++) {
1997 if (allowed_clock_voltage_table->entries[i].clk >= clock) {
1998 *voltage = allowed_clock_voltage_table->entries[i].v;
1999 return 0;
2000 }
2001 }
2002
2003 *voltage = allowed_clock_voltage_table->entries[i-1].v;
2004
2005 return 0;
2006 }
2007
2008 static u8 ci_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
2009 u32 sclk, u32 min_sclk_in_sr)
2010 {
2011 u32 i;
2012 u32 tmp;
2013 u32 min = (min_sclk_in_sr > CISLAND_MINIMUM_ENGINE_CLOCK) ?
2014 min_sclk_in_sr : CISLAND_MINIMUM_ENGINE_CLOCK;
2015
2016 if (sclk < min)
2017 return 0;
2018
2019 for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID; ; i--) {
2020 tmp = sclk / (1 << i);
2021 if (tmp >= min || i == 0)
2022 break;
2023 }
2024
2025 return (u8)i;
2026 }
2027
2028 static int ci_initial_switch_from_arb_f0_to_f1(struct radeon_device *rdev)
2029 {
2030 return ni_copy_and_switch_arb_sets(rdev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
2031 }
2032
2033 static int ci_reset_to_default(struct radeon_device *rdev)
2034 {
2035 return (ci_send_msg_to_smc(rdev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ?
2036 0 : -EINVAL;
2037 }
2038
2039 static int ci_force_switch_to_arb_f0(struct radeon_device *rdev)
2040 {
2041 u32 tmp;
2042
2043 tmp = (RREG32_SMC(SMC_SCRATCH9) & 0x0000ff00) >> 8;
2044
2045 if (tmp == MC_CG_ARB_FREQ_F0)
2046 return 0;
2047
2048 return ni_copy_and_switch_arb_sets(rdev, tmp, MC_CG_ARB_FREQ_F0);
2049 }
2050
2051 static int ci_populate_memory_timing_parameters(struct radeon_device *rdev,
2052 u32 sclk,
2053 u32 mclk,
2054 SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs)
2055 {
2056 u32 dram_timing;
2057 u32 dram_timing2;
2058 u32 burst_time;
2059
2060 radeon_atom_set_engine_dram_timings(rdev, sclk, mclk);
2061
2062 dram_timing = RREG32(MC_ARB_DRAM_TIMING);
2063 dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
2064 burst_time = RREG32(MC_ARB_BURST_TIME) & STATE0_MASK;
2065
2066 arb_regs->McArbDramTiming = cpu_to_be32(dram_timing);
2067 arb_regs->McArbDramTiming2 = cpu_to_be32(dram_timing2);
2068 arb_regs->McArbBurstTime = (u8)burst_time;
2069
2070 return 0;
2071 }
2072
2073 static int ci_do_program_memory_timing_parameters(struct radeon_device *rdev)
2074 {
2075 struct ci_power_info *pi = ci_get_pi(rdev);
2076 SMU7_Discrete_MCArbDramTimingTable arb_regs;
2077 u32 i, j;
2078 int ret = 0;
2079
2080 memset(&arb_regs, 0, sizeof(SMU7_Discrete_MCArbDramTimingTable));
2081
2082 for (i = 0; i < pi->dpm_table.sclk_table.count; i++) {
2083 for (j = 0; j < pi->dpm_table.mclk_table.count; j++) {
2084 ret = ci_populate_memory_timing_parameters(rdev,
2085 pi->dpm_table.sclk_table.dpm_levels[i].value,
2086 pi->dpm_table.mclk_table.dpm_levels[j].value,
2087 &arb_regs.entries[i][j]);
2088 if (ret)
2089 break;
2090 }
2091 }
2092
2093 if (ret == 0)
2094 ret = ci_copy_bytes_to_smc(rdev,
2095 pi->arb_table_start,
2096 (u8 *)&arb_regs,
2097 sizeof(SMU7_Discrete_MCArbDramTimingTable),
2098 pi->sram_end);
2099
2100 return ret;
2101 }
2102
2103 static int ci_program_memory_timing_parameters(struct radeon_device *rdev)
2104 {
2105 struct ci_power_info *pi = ci_get_pi(rdev);
2106
2107 if (pi->need_update_smu7_dpm_table == 0)
2108 return 0;
2109
2110 return ci_do_program_memory_timing_parameters(rdev);
2111 }
2112
2113 static void ci_populate_smc_initial_state(struct radeon_device *rdev,
2114 struct radeon_ps *radeon_boot_state)
2115 {
2116 struct ci_ps *boot_state = ci_get_ps(radeon_boot_state);
2117 struct ci_power_info *pi = ci_get_pi(rdev);
2118 u32 level = 0;
2119
2120 for (level = 0; level < rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; level++) {
2121 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[level].clk >=
2122 boot_state->performance_levels[0].sclk) {
2123 pi->smc_state_table.GraphicsBootLevel = level;
2124 break;
2125 }
2126 }
2127
2128 for (level = 0; level < rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.count; level++) {
2129 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries[level].clk >=
2130 boot_state->performance_levels[0].mclk) {
2131 pi->smc_state_table.MemoryBootLevel = level;
2132 break;
2133 }
2134 }
2135 }
2136
2137 static u32 ci_get_dpm_level_enable_mask_value(struct ci_single_dpm_table *dpm_table)
2138 {
2139 u32 i;
2140 u32 mask_value = 0;
2141
2142 for (i = dpm_table->count; i > 0; i--) {
2143 mask_value = mask_value << 1;
2144 if (dpm_table->dpm_levels[i-1].enabled)
2145 mask_value |= 0x1;
2146 else
2147 mask_value &= 0xFFFFFFFE;
2148 }
2149
2150 return mask_value;
2151 }
2152
2153 static void ci_populate_smc_link_level(struct radeon_device *rdev,
2154 SMU7_Discrete_DpmTable *table)
2155 {
2156 struct ci_power_info *pi = ci_get_pi(rdev);
2157 struct ci_dpm_table *dpm_table = &pi->dpm_table;
2158 u32 i;
2159
2160 for (i = 0; i < dpm_table->pcie_speed_table.count; i++) {
2161 table->LinkLevel[i].PcieGenSpeed =
2162 (u8)dpm_table->pcie_speed_table.dpm_levels[i].value;
2163 table->LinkLevel[i].PcieLaneCount =
2164 r600_encode_pci_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
2165 table->LinkLevel[i].EnabledForActivity = 1;
2166 table->LinkLevel[i].DownT = cpu_to_be32(5);
2167 table->LinkLevel[i].UpT = cpu_to_be32(30);
2168 }
2169
2170 pi->smc_state_table.LinkLevelCount = (u8)dpm_table->pcie_speed_table.count;
2171 pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
2172 ci_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
2173 }
2174
2175 static int ci_populate_smc_uvd_level(struct radeon_device *rdev,
2176 SMU7_Discrete_DpmTable *table)
2177 {
2178 u32 count;
2179 struct atom_clock_dividers dividers;
2180 int ret = -EINVAL;
2181
2182 table->UvdLevelCount =
2183 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count;
2184
2185 for (count = 0; count < table->UvdLevelCount; count++) {
2186 table->UvdLevel[count].VclkFrequency =
2187 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].vclk;
2188 table->UvdLevel[count].DclkFrequency =
2189 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].dclk;
2190 table->UvdLevel[count].MinVddc =
2191 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2192 table->UvdLevel[count].MinVddcPhases = 1;
2193
2194 ret = radeon_atom_get_clock_dividers(rdev,
2195 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2196 table->UvdLevel[count].VclkFrequency, false, &dividers);
2197 if (ret)
2198 return ret;
2199
2200 table->UvdLevel[count].VclkDivider = (u8)dividers.post_divider;
2201
2202 ret = radeon_atom_get_clock_dividers(rdev,
2203 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2204 table->UvdLevel[count].DclkFrequency, false, &dividers);
2205 if (ret)
2206 return ret;
2207
2208 table->UvdLevel[count].DclkDivider = (u8)dividers.post_divider;
2209
2210 table->UvdLevel[count].VclkFrequency = cpu_to_be32(table->UvdLevel[count].VclkFrequency);
2211 table->UvdLevel[count].DclkFrequency = cpu_to_be32(table->UvdLevel[count].DclkFrequency);
2212 table->UvdLevel[count].MinVddc = cpu_to_be16(table->UvdLevel[count].MinVddc);
2213 }
2214
2215 return ret;
2216 }
2217
2218 static int ci_populate_smc_vce_level(struct radeon_device *rdev,
2219 SMU7_Discrete_DpmTable *table)
2220 {
2221 u32 count;
2222 struct atom_clock_dividers dividers;
2223 int ret = -EINVAL;
2224
2225 table->VceLevelCount =
2226 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count;
2227
2228 for (count = 0; count < table->VceLevelCount; count++) {
2229 table->VceLevel[count].Frequency =
2230 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].evclk;
2231 table->VceLevel[count].MinVoltage =
2232 (u16)rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2233 table->VceLevel[count].MinPhases = 1;
2234
2235 ret = radeon_atom_get_clock_dividers(rdev,
2236 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2237 table->VceLevel[count].Frequency, false, &dividers);
2238 if (ret)
2239 return ret;
2240
2241 table->VceLevel[count].Divider = (u8)dividers.post_divider;
2242
2243 table->VceLevel[count].Frequency = cpu_to_be32(table->VceLevel[count].Frequency);
2244 table->VceLevel[count].MinVoltage = cpu_to_be16(table->VceLevel[count].MinVoltage);
2245 }
2246
2247 return ret;
2248
2249 }
2250
2251 static int ci_populate_smc_acp_level(struct radeon_device *rdev,
2252 SMU7_Discrete_DpmTable *table)
2253 {
2254 u32 count;
2255 struct atom_clock_dividers dividers;
2256 int ret = -EINVAL;
2257
2258 table->AcpLevelCount = (u8)
2259 (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count);
2260
2261 for (count = 0; count < table->AcpLevelCount; count++) {
2262 table->AcpLevel[count].Frequency =
2263 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].clk;
2264 table->AcpLevel[count].MinVoltage =
2265 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].v;
2266 table->AcpLevel[count].MinPhases = 1;
2267
2268 ret = radeon_atom_get_clock_dividers(rdev,
2269 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2270 table->AcpLevel[count].Frequency, false, &dividers);
2271 if (ret)
2272 return ret;
2273
2274 table->AcpLevel[count].Divider = (u8)dividers.post_divider;
2275
2276 table->AcpLevel[count].Frequency = cpu_to_be32(table->AcpLevel[count].Frequency);
2277 table->AcpLevel[count].MinVoltage = cpu_to_be16(table->AcpLevel[count].MinVoltage);
2278 }
2279
2280 return ret;
2281 }
2282
2283 static int ci_populate_smc_samu_level(struct radeon_device *rdev,
2284 SMU7_Discrete_DpmTable *table)
2285 {
2286 u32 count;
2287 struct atom_clock_dividers dividers;
2288 int ret = -EINVAL;
2289
2290 table->SamuLevelCount =
2291 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count;
2292
2293 for (count = 0; count < table->SamuLevelCount; count++) {
2294 table->SamuLevel[count].Frequency =
2295 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].clk;
2296 table->SamuLevel[count].MinVoltage =
2297 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2298 table->SamuLevel[count].MinPhases = 1;
2299
2300 ret = radeon_atom_get_clock_dividers(rdev,
2301 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2302 table->SamuLevel[count].Frequency, false, &dividers);
2303 if (ret)
2304 return ret;
2305
2306 table->SamuLevel[count].Divider = (u8)dividers.post_divider;
2307
2308 table->SamuLevel[count].Frequency = cpu_to_be32(table->SamuLevel[count].Frequency);
2309 table->SamuLevel[count].MinVoltage = cpu_to_be16(table->SamuLevel[count].MinVoltage);
2310 }
2311
2312 return ret;
2313 }
2314
2315 static int ci_calculate_mclk_params(struct radeon_device *rdev,
2316 u32 memory_clock,
2317 SMU7_Discrete_MemoryLevel *mclk,
2318 bool strobe_mode,
2319 bool dll_state_on)
2320 {
2321 struct ci_power_info *pi = ci_get_pi(rdev);
2322 u32 dll_cntl = pi->clock_registers.dll_cntl;
2323 u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
2324 u32 mpll_ad_func_cntl = pi->clock_registers.mpll_ad_func_cntl;
2325 u32 mpll_dq_func_cntl = pi->clock_registers.mpll_dq_func_cntl;
2326 u32 mpll_func_cntl = pi->clock_registers.mpll_func_cntl;
2327 u32 mpll_func_cntl_1 = pi->clock_registers.mpll_func_cntl_1;
2328 u32 mpll_func_cntl_2 = pi->clock_registers.mpll_func_cntl_2;
2329 u32 mpll_ss1 = pi->clock_registers.mpll_ss1;
2330 u32 mpll_ss2 = pi->clock_registers.mpll_ss2;
2331 struct atom_mpll_param mpll_param;
2332 int ret;
2333
2334 ret = radeon_atom_get_memory_pll_dividers(rdev, memory_clock, strobe_mode, &mpll_param);
2335 if (ret)
2336 return ret;
2337
2338 mpll_func_cntl &= ~BWCTRL_MASK;
2339 mpll_func_cntl |= BWCTRL(mpll_param.bwcntl);
2340
2341 mpll_func_cntl_1 &= ~(CLKF_MASK | CLKFRAC_MASK | VCO_MODE_MASK);
2342 mpll_func_cntl_1 |= CLKF(mpll_param.clkf) |
2343 CLKFRAC(mpll_param.clkfrac) | VCO_MODE(mpll_param.vco_mode);
2344
2345 mpll_ad_func_cntl &= ~YCLK_POST_DIV_MASK;
2346 mpll_ad_func_cntl |= YCLK_POST_DIV(mpll_param.post_div);
2347
2348 if (pi->mem_gddr5) {
2349 mpll_dq_func_cntl &= ~(YCLK_SEL_MASK | YCLK_POST_DIV_MASK);
2350 mpll_dq_func_cntl |= YCLK_SEL(mpll_param.yclk_sel) |
2351 YCLK_POST_DIV(mpll_param.post_div);
2352 }
2353
2354 if (pi->caps_mclk_ss_support) {
2355 struct radeon_atom_ss ss;
2356 u32 freq_nom;
2357 u32 tmp;
2358 u32 reference_clock = rdev->clock.mpll.reference_freq;
2359
2360 if (pi->mem_gddr5)
2361 freq_nom = memory_clock * 4;
2362 else
2363 freq_nom = memory_clock * 2;
2364
2365 tmp = (freq_nom / reference_clock);
2366 tmp = tmp * tmp;
2367 if (radeon_atombios_get_asic_ss_info(rdev, &ss,
2368 ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
2369 u32 clks = reference_clock * 5 / ss.rate;
2370 u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom);
2371
2372 mpll_ss1 &= ~CLKV_MASK;
2373 mpll_ss1 |= CLKV(clkv);
2374
2375 mpll_ss2 &= ~CLKS_MASK;
2376 mpll_ss2 |= CLKS(clks);
2377 }
2378 }
2379
2380 mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK;
2381 mclk_pwrmgt_cntl |= DLL_SPEED(mpll_param.dll_speed);
2382
2383 if (dll_state_on)
2384 mclk_pwrmgt_cntl |= MRDCK0_PDNB | MRDCK1_PDNB;
2385 else
2386 mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
2387
2388 mclk->MclkFrequency = memory_clock;
2389 mclk->MpllFuncCntl = mpll_func_cntl;
2390 mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
2391 mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
2392 mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
2393 mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
2394 mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
2395 mclk->DllCntl = dll_cntl;
2396 mclk->MpllSs1 = mpll_ss1;
2397 mclk->MpllSs2 = mpll_ss2;
2398
2399 return 0;
2400 }
2401
2402 static int ci_populate_single_memory_level(struct radeon_device *rdev,
2403 u32 memory_clock,
2404 SMU7_Discrete_MemoryLevel *memory_level)
2405 {
2406 struct ci_power_info *pi = ci_get_pi(rdev);
2407 int ret;
2408 bool dll_state_on;
2409
2410 if (rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) {
2411 ret = ci_get_dependency_volt_by_clk(rdev,
2412 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2413 memory_clock, &memory_level->MinVddc);
2414 if (ret)
2415 return ret;
2416 }
2417
2418 if (rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries) {
2419 ret = ci_get_dependency_volt_by_clk(rdev,
2420 &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2421 memory_clock, &memory_level->MinVddci);
2422 if (ret)
2423 return ret;
2424 }
2425
2426 if (rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries) {
2427 ret = ci_get_dependency_volt_by_clk(rdev,
2428 &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
2429 memory_clock, &memory_level->MinMvdd);
2430 if (ret)
2431 return ret;
2432 }
2433
2434 memory_level->MinVddcPhases = 1;
2435
2436 if (pi->vddc_phase_shed_control)
2437 ci_populate_phase_value_based_on_mclk(rdev,
2438 &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
2439 memory_clock,
2440 &memory_level->MinVddcPhases);
2441
2442 memory_level->EnabledForThrottle = 1;
2443 memory_level->EnabledForActivity = 1;
2444 memory_level->UpH = 0;
2445 memory_level->DownH = 100;
2446 memory_level->VoltageDownH = 0;
2447 memory_level->ActivityLevel = (u16)pi->mclk_activity_target;
2448
2449 memory_level->StutterEnable = false;
2450 memory_level->StrobeEnable = false;
2451 memory_level->EdcReadEnable = false;
2452 memory_level->EdcWriteEnable = false;
2453 memory_level->RttEnable = false;
2454
2455 memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2456
2457 if (pi->mclk_stutter_mode_threshold &&
2458 (memory_clock <= pi->mclk_stutter_mode_threshold) &&
2459 (pi->uvd_enabled == false) &&
2460 (RREG32(DPG_PIPE_STUTTER_CONTROL) & STUTTER_ENABLE) &&
2461 (rdev->pm.dpm.new_active_crtc_count <= 2))
2462 memory_level->StutterEnable = true;
2463
2464 if (pi->mclk_strobe_mode_threshold &&
2465 (memory_clock <= pi->mclk_strobe_mode_threshold))
2466 memory_level->StrobeEnable = 1;
2467
2468 if (pi->mem_gddr5) {
2469 memory_level->StrobeRatio =
2470 si_get_mclk_frequency_ratio(memory_clock, memory_level->StrobeEnable);
2471 if (pi->mclk_edc_enable_threshold &&
2472 (memory_clock > pi->mclk_edc_enable_threshold))
2473 memory_level->EdcReadEnable = true;
2474
2475 if (pi->mclk_edc_wr_enable_threshold &&
2476 (memory_clock > pi->mclk_edc_wr_enable_threshold))
2477 memory_level->EdcWriteEnable = true;
2478
2479 if (memory_level->StrobeEnable) {
2480 if (si_get_mclk_frequency_ratio(memory_clock, true) >=
2481 ((RREG32(MC_SEQ_MISC7) >> 16) & 0xf))
2482 dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
2483 else
2484 dll_state_on = ((RREG32(MC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
2485 } else {
2486 dll_state_on = pi->dll_default_on;
2487 }
2488 } else {
2489 memory_level->StrobeRatio = si_get_ddr3_mclk_frequency_ratio(memory_clock);
2490 dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
2491 }
2492
2493 ret = ci_calculate_mclk_params(rdev, memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
2494 if (ret)
2495 return ret;
2496
2497 memory_level->MinVddc = cpu_to_be32(memory_level->MinVddc * VOLTAGE_SCALE);
2498 memory_level->MinVddcPhases = cpu_to_be32(memory_level->MinVddcPhases);
2499 memory_level->MinVddci = cpu_to_be32(memory_level->MinVddci * VOLTAGE_SCALE);
2500 memory_level->MinMvdd = cpu_to_be32(memory_level->MinMvdd * VOLTAGE_SCALE);
2501
2502 memory_level->MclkFrequency = cpu_to_be32(memory_level->MclkFrequency);
2503 memory_level->ActivityLevel = cpu_to_be16(memory_level->ActivityLevel);
2504 memory_level->MpllFuncCntl = cpu_to_be32(memory_level->MpllFuncCntl);
2505 memory_level->MpllFuncCntl_1 = cpu_to_be32(memory_level->MpllFuncCntl_1);
2506 memory_level->MpllFuncCntl_2 = cpu_to_be32(memory_level->MpllFuncCntl_2);
2507 memory_level->MpllAdFuncCntl = cpu_to_be32(memory_level->MpllAdFuncCntl);
2508 memory_level->MpllDqFuncCntl = cpu_to_be32(memory_level->MpllDqFuncCntl);
2509 memory_level->MclkPwrmgtCntl = cpu_to_be32(memory_level->MclkPwrmgtCntl);
2510 memory_level->DllCntl = cpu_to_be32(memory_level->DllCntl);
2511 memory_level->MpllSs1 = cpu_to_be32(memory_level->MpllSs1);
2512 memory_level->MpllSs2 = cpu_to_be32(memory_level->MpllSs2);
2513
2514 return 0;
2515 }
2516
2517 static int ci_populate_smc_acpi_level(struct radeon_device *rdev,
2518 SMU7_Discrete_DpmTable *table)
2519 {
2520 struct ci_power_info *pi = ci_get_pi(rdev);
2521 struct atom_clock_dividers dividers;
2522 SMU7_Discrete_VoltageLevel voltage_level;
2523 u32 spll_func_cntl = pi->clock_registers.cg_spll_func_cntl;
2524 u32 spll_func_cntl_2 = pi->clock_registers.cg_spll_func_cntl_2;
2525 u32 dll_cntl = pi->clock_registers.dll_cntl;
2526 u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
2527 int ret;
2528
2529 table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
2530
2531 if (pi->acpi_vddc)
2532 table->ACPILevel.MinVddc = cpu_to_be32(pi->acpi_vddc * VOLTAGE_SCALE);
2533 else
2534 table->ACPILevel.MinVddc = cpu_to_be32(pi->min_vddc_in_pp_table * VOLTAGE_SCALE);
2535
2536 table->ACPILevel.MinVddcPhases = pi->vddc_phase_shed_control ? 0 : 1;
2537
2538 table->ACPILevel.SclkFrequency = rdev->clock.spll.reference_freq;
2539
2540 ret = radeon_atom_get_clock_dividers(rdev,
2541 COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
2542 table->ACPILevel.SclkFrequency, false, &dividers);
2543 if (ret)
2544 return ret;
2545
2546 table->ACPILevel.SclkDid = (u8)dividers.post_divider;
2547 table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2548 table->ACPILevel.DeepSleepDivId = 0;
2549
2550 spll_func_cntl &= ~SPLL_PWRON;
2551 spll_func_cntl |= SPLL_RESET;
2552
2553 spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
2554 spll_func_cntl_2 |= SCLK_MUX_SEL(4);
2555
2556 table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
2557 table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
2558 table->ACPILevel.CgSpllFuncCntl3 = pi->clock_registers.cg_spll_func_cntl_3;
2559 table->ACPILevel.CgSpllFuncCntl4 = pi->clock_registers.cg_spll_func_cntl_4;
2560 table->ACPILevel.SpllSpreadSpectrum = pi->clock_registers.cg_spll_spread_spectrum;
2561 table->ACPILevel.SpllSpreadSpectrum2 = pi->clock_registers.cg_spll_spread_spectrum_2;
2562 table->ACPILevel.CcPwrDynRm = 0;
2563 table->ACPILevel.CcPwrDynRm1 = 0;
2564
2565 table->ACPILevel.Flags = cpu_to_be32(table->ACPILevel.Flags);
2566 table->ACPILevel.MinVddcPhases = cpu_to_be32(table->ACPILevel.MinVddcPhases);
2567 table->ACPILevel.SclkFrequency = cpu_to_be32(table->ACPILevel.SclkFrequency);
2568 table->ACPILevel.CgSpllFuncCntl = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl);
2569 table->ACPILevel.CgSpllFuncCntl2 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl2);
2570 table->ACPILevel.CgSpllFuncCntl3 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl3);
2571 table->ACPILevel.CgSpllFuncCntl4 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl4);
2572 table->ACPILevel.SpllSpreadSpectrum = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum);
2573 table->ACPILevel.SpllSpreadSpectrum2 = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum2);
2574 table->ACPILevel.CcPwrDynRm = cpu_to_be32(table->ACPILevel.CcPwrDynRm);
2575 table->ACPILevel.CcPwrDynRm1 = cpu_to_be32(table->ACPILevel.CcPwrDynRm1);
2576
2577 table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
2578 table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
2579
2580 if (pi->vddci_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
2581 if (pi->acpi_vddci)
2582 table->MemoryACPILevel.MinVddci =
2583 cpu_to_be32(pi->acpi_vddci * VOLTAGE_SCALE);
2584 else
2585 table->MemoryACPILevel.MinVddci =
2586 cpu_to_be32(pi->min_vddci_in_pp_table * VOLTAGE_SCALE);
2587 }
2588
2589 if (ci_populate_mvdd_value(rdev, 0, &voltage_level))
2590 table->MemoryACPILevel.MinMvdd = 0;
2591 else
2592 table->MemoryACPILevel.MinMvdd =
2593 cpu_to_be32(voltage_level.Voltage * VOLTAGE_SCALE);
2594
2595 mclk_pwrmgt_cntl |= MRDCK0_RESET | MRDCK1_RESET;
2596 mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
2597
2598 dll_cntl &= ~(MRDCK0_BYPASS | MRDCK1_BYPASS);
2599
2600 table->MemoryACPILevel.DllCntl = cpu_to_be32(dll_cntl);
2601 table->MemoryACPILevel.MclkPwrmgtCntl = cpu_to_be32(mclk_pwrmgt_cntl);
2602 table->MemoryACPILevel.MpllAdFuncCntl =
2603 cpu_to_be32(pi->clock_registers.mpll_ad_func_cntl);
2604 table->MemoryACPILevel.MpllDqFuncCntl =
2605 cpu_to_be32(pi->clock_registers.mpll_dq_func_cntl);
2606 table->MemoryACPILevel.MpllFuncCntl =
2607 cpu_to_be32(pi->clock_registers.mpll_func_cntl);
2608 table->MemoryACPILevel.MpllFuncCntl_1 =
2609 cpu_to_be32(pi->clock_registers.mpll_func_cntl_1);
2610 table->MemoryACPILevel.MpllFuncCntl_2 =
2611 cpu_to_be32(pi->clock_registers.mpll_func_cntl_2);
2612 table->MemoryACPILevel.MpllSs1 = cpu_to_be32(pi->clock_registers.mpll_ss1);
2613 table->MemoryACPILevel.MpllSs2 = cpu_to_be32(pi->clock_registers.mpll_ss2);
2614
2615 table->MemoryACPILevel.EnabledForThrottle = 0;
2616 table->MemoryACPILevel.EnabledForActivity = 0;
2617 table->MemoryACPILevel.UpH = 0;
2618 table->MemoryACPILevel.DownH = 100;
2619 table->MemoryACPILevel.VoltageDownH = 0;
2620 table->MemoryACPILevel.ActivityLevel =
2621 cpu_to_be16((u16)pi->mclk_activity_target);
2622
2623 table->MemoryACPILevel.StutterEnable = false;
2624 table->MemoryACPILevel.StrobeEnable = false;
2625 table->MemoryACPILevel.EdcReadEnable = false;
2626 table->MemoryACPILevel.EdcWriteEnable = false;
2627 table->MemoryACPILevel.RttEnable = false;
2628
2629 return 0;
2630 }
2631
2632
2633 static int ci_enable_ulv(struct radeon_device *rdev, bool enable)
2634 {
2635 struct ci_power_info *pi = ci_get_pi(rdev);
2636 struct ci_ulv_parm *ulv = &pi->ulv;
2637
2638 if (ulv->supported) {
2639 if (enable)
2640 return (ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ?
2641 0 : -EINVAL;
2642 else
2643 return (ci_send_msg_to_smc(rdev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ?
2644 0 : -EINVAL;
2645 }
2646
2647 return 0;
2648 }
2649
2650 static int ci_populate_ulv_level(struct radeon_device *rdev,
2651 SMU7_Discrete_Ulv *state)
2652 {
2653 struct ci_power_info *pi = ci_get_pi(rdev);
2654 u16 ulv_voltage = rdev->pm.dpm.backbias_response_time;
2655
2656 state->CcPwrDynRm = 0;
2657 state->CcPwrDynRm1 = 0;
2658
2659 if (ulv_voltage == 0) {
2660 pi->ulv.supported = false;
2661 return 0;
2662 }
2663
2664 if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2665 if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
2666 state->VddcOffset = 0;
2667 else
2668 state->VddcOffset =
2669 rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage;
2670 } else {
2671 if (ulv_voltage > rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
2672 state->VddcOffsetVid = 0;
2673 else
2674 state->VddcOffsetVid = (u8)
2675 ((rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage) *
2676 VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
2677 }
2678 state->VddcPhase = pi->vddc_phase_shed_control ? 0 : 1;
2679
2680 state->CcPwrDynRm = cpu_to_be32(state->CcPwrDynRm);
2681 state->CcPwrDynRm1 = cpu_to_be32(state->CcPwrDynRm1);
2682 state->VddcOffset = cpu_to_be16(state->VddcOffset);
2683
2684 return 0;
2685 }
2686
2687 static int ci_calculate_sclk_params(struct radeon_device *rdev,
2688 u32 engine_clock,
2689 SMU7_Discrete_GraphicsLevel *sclk)
2690 {
2691 struct ci_power_info *pi = ci_get_pi(rdev);
2692 struct atom_clock_dividers dividers;
2693 u32 spll_func_cntl_3 = pi->clock_registers.cg_spll_func_cntl_3;
2694 u32 spll_func_cntl_4 = pi->clock_registers.cg_spll_func_cntl_4;
2695 u32 cg_spll_spread_spectrum = pi->clock_registers.cg_spll_spread_spectrum;
2696 u32 cg_spll_spread_spectrum_2 = pi->clock_registers.cg_spll_spread_spectrum_2;
2697 u32 reference_clock = rdev->clock.spll.reference_freq;
2698 u32 reference_divider;
2699 u32 fbdiv;
2700 int ret;
2701
2702 ret = radeon_atom_get_clock_dividers(rdev,
2703 COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
2704 engine_clock, false, &dividers);
2705 if (ret)
2706 return ret;
2707
2708 reference_divider = 1 + dividers.ref_div;
2709 fbdiv = dividers.fb_div & 0x3FFFFFF;
2710
2711 spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
2712 spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
2713 spll_func_cntl_3 |= SPLL_DITHEN;
2714
2715 if (pi->caps_sclk_ss_support) {
2716 struct radeon_atom_ss ss;
2717 u32 vco_freq = engine_clock * dividers.post_div;
2718
2719 if (radeon_atombios_get_asic_ss_info(rdev, &ss,
2720 ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
2721 u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
2722 u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
2723
2724 cg_spll_spread_spectrum &= ~CLK_S_MASK;
2725 cg_spll_spread_spectrum |= CLK_S(clk_s);
2726 cg_spll_spread_spectrum |= SSEN;
2727
2728 cg_spll_spread_spectrum_2 &= ~CLK_V_MASK;
2729 cg_spll_spread_spectrum_2 |= CLK_V(clk_v);
2730 }
2731 }
2732
2733 sclk->SclkFrequency = engine_clock;
2734 sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
2735 sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
2736 sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
2737 sclk->SpllSpreadSpectrum2 = cg_spll_spread_spectrum_2;
2738 sclk->SclkDid = (u8)dividers.post_divider;
2739
2740 return 0;
2741 }
2742
2743 static int ci_populate_single_graphic_level(struct radeon_device *rdev,
2744 u32 engine_clock,
2745 u16 sclk_activity_level_t,
2746 SMU7_Discrete_GraphicsLevel *graphic_level)
2747 {
2748 struct ci_power_info *pi = ci_get_pi(rdev);
2749 int ret;
2750
2751 ret = ci_calculate_sclk_params(rdev, engine_clock, graphic_level);
2752 if (ret)
2753 return ret;
2754
2755 ret = ci_get_dependency_volt_by_clk(rdev,
2756 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
2757 engine_clock, &graphic_level->MinVddc);
2758 if (ret)
2759 return ret;
2760
2761 graphic_level->SclkFrequency = engine_clock;
2762
2763 graphic_level->Flags = 0;
2764 graphic_level->MinVddcPhases = 1;
2765
2766 if (pi->vddc_phase_shed_control)
2767 ci_populate_phase_value_based_on_sclk(rdev,
2768 &rdev->pm.dpm.dyn_state.phase_shedding_limits_table,
2769 engine_clock,
2770 &graphic_level->MinVddcPhases);
2771
2772 graphic_level->ActivityLevel = sclk_activity_level_t;
2773
2774 graphic_level->CcPwrDynRm = 0;
2775 graphic_level->CcPwrDynRm1 = 0;
2776 graphic_level->EnabledForActivity = 1;
2777 graphic_level->EnabledForThrottle = 1;
2778 graphic_level->UpH = 0;
2779 graphic_level->DownH = 0;
2780 graphic_level->VoltageDownH = 0;
2781 graphic_level->PowerThrottle = 0;
2782
2783 if (pi->caps_sclk_ds)
2784 graphic_level->DeepSleepDivId = ci_get_sleep_divider_id_from_clock(rdev,
2785 engine_clock,
2786 CISLAND_MINIMUM_ENGINE_CLOCK);
2787
2788 graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
2789
2790 graphic_level->Flags = cpu_to_be32(graphic_level->Flags);
2791 graphic_level->MinVddc = cpu_to_be32(graphic_level->MinVddc * VOLTAGE_SCALE);
2792 graphic_level->MinVddcPhases = cpu_to_be32(graphic_level->MinVddcPhases);
2793 graphic_level->SclkFrequency = cpu_to_be32(graphic_level->SclkFrequency);
2794 graphic_level->ActivityLevel = cpu_to_be16(graphic_level->ActivityLevel);
2795 graphic_level->CgSpllFuncCntl3 = cpu_to_be32(graphic_level->CgSpllFuncCntl3);
2796 graphic_level->CgSpllFuncCntl4 = cpu_to_be32(graphic_level->CgSpllFuncCntl4);
2797 graphic_level->SpllSpreadSpectrum = cpu_to_be32(graphic_level->SpllSpreadSpectrum);
2798 graphic_level->SpllSpreadSpectrum2 = cpu_to_be32(graphic_level->SpllSpreadSpectrum2);
2799 graphic_level->CcPwrDynRm = cpu_to_be32(graphic_level->CcPwrDynRm);
2800 graphic_level->CcPwrDynRm1 = cpu_to_be32(graphic_level->CcPwrDynRm1);
2801
2802 return 0;
2803 }
2804
2805 static int ci_populate_all_graphic_levels(struct radeon_device *rdev)
2806 {
2807 struct ci_power_info *pi = ci_get_pi(rdev);
2808 struct ci_dpm_table *dpm_table = &pi->dpm_table;
2809 u32 level_array_address = pi->dpm_table_start +
2810 offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
2811 u32 level_array_size = sizeof(SMU7_Discrete_GraphicsLevel) *
2812 SMU7_MAX_LEVELS_GRAPHICS;
2813 SMU7_Discrete_GraphicsLevel *levels = pi->smc_state_table.GraphicsLevel;
2814 u32 i, ret;
2815
2816 memset(levels, 0, level_array_size);
2817
2818 for (i = 0; i < dpm_table->sclk_table.count; i++) {
2819 ret = ci_populate_single_graphic_level(rdev,
2820 dpm_table->sclk_table.dpm_levels[i].value,
2821 (u16)pi->activity_target[i],
2822 &pi->smc_state_table.GraphicsLevel[i]);
2823 if (ret)
2824 return ret;
2825 if (i == (dpm_table->sclk_table.count - 1))
2826 pi->smc_state_table.GraphicsLevel[i].DisplayWatermark =
2827 PPSMC_DISPLAY_WATERMARK_HIGH;
2828 }
2829
2830 pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
2831 pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
2832 ci_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
2833
2834 ret = ci_copy_bytes_to_smc(rdev, level_array_address,
2835 (u8 *)levels, level_array_size,
2836 pi->sram_end);
2837 if (ret)
2838 return ret;
2839
2840 return 0;
2841 }
2842
2843 static int ci_populate_ulv_state(struct radeon_device *rdev,
2844 SMU7_Discrete_Ulv *ulv_level)
2845 {
2846 return ci_populate_ulv_level(rdev, ulv_level);
2847 }
2848
2849 static int ci_populate_all_memory_levels(struct radeon_device *rdev)
2850 {
2851 struct ci_power_info *pi = ci_get_pi(rdev);
2852 struct ci_dpm_table *dpm_table = &pi->dpm_table;
2853 u32 level_array_address = pi->dpm_table_start +
2854 offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
2855 u32 level_array_size = sizeof(SMU7_Discrete_MemoryLevel) *
2856 SMU7_MAX_LEVELS_MEMORY;
2857 SMU7_Discrete_MemoryLevel *levels = pi->smc_state_table.MemoryLevel;
2858 u32 i, ret;
2859
2860 memset(levels, 0, level_array_size);
2861
2862 for (i = 0; i < dpm_table->mclk_table.count; i++) {
2863 if (dpm_table->mclk_table.dpm_levels[i].value == 0)
2864 return -EINVAL;
2865 ret = ci_populate_single_memory_level(rdev,
2866 dpm_table->mclk_table.dpm_levels[i].value,
2867 &pi->smc_state_table.MemoryLevel[i]);
2868 if (ret)
2869 return ret;
2870 }
2871
2872 pi->smc_state_table.MemoryLevel[0].ActivityLevel = cpu_to_be16(0x1F);
2873
2874 pi->smc_state_table.MemoryDpmLevelCount = (u8)dpm_table->mclk_table.count;
2875 pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
2876 ci_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
2877
2878 pi->smc_state_table.MemoryLevel[dpm_table->mclk_table.count - 1].DisplayWatermark =
2879 PPSMC_DISPLAY_WATERMARK_HIGH;
2880
2881 ret = ci_copy_bytes_to_smc(rdev, level_array_address,
2882 (u8 *)levels, level_array_size,
2883 pi->sram_end);
2884 if (ret)
2885 return ret;
2886
2887 return 0;
2888 }
2889
2890 static void ci_reset_single_dpm_table(struct radeon_device *rdev,
2891 struct ci_single_dpm_table* dpm_table,
2892 u32 count)
2893 {
2894 u32 i;
2895
2896 dpm_table->count = count;
2897 for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++)
2898 dpm_table->dpm_levels[i].enabled = false;
2899 }
2900
2901 static void ci_setup_pcie_table_entry(struct ci_single_dpm_table* dpm_table,
2902 u32 index, u32 pcie_gen, u32 pcie_lanes)
2903 {
2904 dpm_table->dpm_levels[index].value = pcie_gen;
2905 dpm_table->dpm_levels[index].param1 = pcie_lanes;
2906 dpm_table->dpm_levels[index].enabled = true;
2907 }
2908
2909 static int ci_setup_default_pcie_tables(struct radeon_device *rdev)
2910 {
2911 struct ci_power_info *pi = ci_get_pi(rdev);
2912
2913 if (!pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels)
2914 return -EINVAL;
2915
2916 if (pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels) {
2917 pi->pcie_gen_powersaving = pi->pcie_gen_performance;
2918 pi->pcie_lane_powersaving = pi->pcie_lane_performance;
2919 } else if (!pi->use_pcie_performance_levels && pi->use_pcie_powersaving_levels) {
2920 pi->pcie_gen_performance = pi->pcie_gen_powersaving;
2921 pi->pcie_lane_performance = pi->pcie_lane_powersaving;
2922 }
2923
2924 ci_reset_single_dpm_table(rdev,
2925 &pi->dpm_table.pcie_speed_table,
2926 SMU7_MAX_LEVELS_LINK);
2927
2928 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
2929 pi->pcie_gen_powersaving.min,
2930 pi->pcie_lane_powersaving.min);
2931 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 1,
2932 pi->pcie_gen_performance.min,
2933 pi->pcie_lane_performance.min);
2934 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 2,
2935 pi->pcie_gen_powersaving.min,
2936 pi->pcie_lane_powersaving.max);
2937 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 3,
2938 pi->pcie_gen_performance.min,
2939 pi->pcie_lane_performance.max);
2940 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 4,
2941 pi->pcie_gen_powersaving.max,
2942 pi->pcie_lane_powersaving.max);
2943 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 5,
2944 pi->pcie_gen_performance.max,
2945 pi->pcie_lane_performance.max);
2946
2947 pi->dpm_table.pcie_speed_table.count = 6;
2948
2949 return 0;
2950 }
2951
2952 static int ci_setup_default_dpm_tables(struct radeon_device *rdev)
2953 {
2954 struct ci_power_info *pi = ci_get_pi(rdev);
2955 struct radeon_clock_voltage_dependency_table *allowed_sclk_vddc_table =
2956 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
2957 struct radeon_clock_voltage_dependency_table *allowed_mclk_table =
2958 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
2959 struct radeon_cac_leakage_table *std_voltage_table =
2960 &rdev->pm.dpm.dyn_state.cac_leakage_table;
2961 u32 i;
2962
2963 if (allowed_sclk_vddc_table == NULL)
2964 return -EINVAL;
2965 if (allowed_sclk_vddc_table->count < 1)
2966 return -EINVAL;
2967 if (allowed_mclk_table == NULL)
2968 return -EINVAL;
2969 if (allowed_mclk_table->count < 1)
2970 return -EINVAL;
2971
2972 memset(&pi->dpm_table, 0, sizeof(struct ci_dpm_table));
2973
2974 ci_reset_single_dpm_table(rdev,
2975 &pi->dpm_table.sclk_table,
2976 SMU7_MAX_LEVELS_GRAPHICS);
2977 ci_reset_single_dpm_table(rdev,
2978 &pi->dpm_table.mclk_table,
2979 SMU7_MAX_LEVELS_MEMORY);
2980 ci_reset_single_dpm_table(rdev,
2981 &pi->dpm_table.vddc_table,
2982 SMU7_MAX_LEVELS_VDDC);
2983 ci_reset_single_dpm_table(rdev,
2984 &pi->dpm_table.vddci_table,
2985 SMU7_MAX_LEVELS_VDDCI);
2986 ci_reset_single_dpm_table(rdev,
2987 &pi->dpm_table.mvdd_table,
2988 SMU7_MAX_LEVELS_MVDD);
2989
2990 pi->dpm_table.sclk_table.count = 0;
2991 for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
2992 if ((i == 0) ||
2993 (pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count-1].value !=
2994 allowed_sclk_vddc_table->entries[i].clk)) {
2995 pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value =
2996 allowed_sclk_vddc_table->entries[i].clk;
2997 pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled = true;
2998 pi->dpm_table.sclk_table.count++;
2999 }
3000 }
3001
3002 pi->dpm_table.mclk_table.count = 0;
3003 for (i = 0; i < allowed_mclk_table->count; i++) {
3004 if ((i==0) ||
3005 (pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count-1].value !=
3006 allowed_mclk_table->entries[i].clk)) {
3007 pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value =
3008 allowed_mclk_table->entries[i].clk;
3009 pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled = true;
3010 pi->dpm_table.mclk_table.count++;
3011 }
3012 }
3013
3014 for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
3015 pi->dpm_table.vddc_table.dpm_levels[i].value =
3016 allowed_sclk_vddc_table->entries[i].v;
3017 pi->dpm_table.vddc_table.dpm_levels[i].param1 =
3018 std_voltage_table->entries[i].leakage;
3019 pi->dpm_table.vddc_table.dpm_levels[i].enabled = true;
3020 }
3021 pi->dpm_table.vddc_table.count = allowed_sclk_vddc_table->count;
3022
3023 allowed_mclk_table = &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
3024 if (allowed_mclk_table) {
3025 for (i = 0; i < allowed_mclk_table->count; i++) {
3026 pi->dpm_table.vddci_table.dpm_levels[i].value =
3027 allowed_mclk_table->entries[i].v;
3028 pi->dpm_table.vddci_table.dpm_levels[i].enabled = true;
3029 }
3030 pi->dpm_table.vddci_table.count = allowed_mclk_table->count;
3031 }
3032
3033 allowed_mclk_table = &rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk;
3034 if (allowed_mclk_table) {
3035 for (i = 0; i < allowed_mclk_table->count; i++) {
3036 pi->dpm_table.mvdd_table.dpm_levels[i].value =
3037 allowed_mclk_table->entries[i].v;
3038 pi->dpm_table.mvdd_table.dpm_levels[i].enabled = true;
3039 }
3040 pi->dpm_table.mvdd_table.count = allowed_mclk_table->count;
3041 }
3042
3043 ci_setup_default_pcie_tables(rdev);
3044
3045 return 0;
3046 }
3047
3048 static int ci_find_boot_level(struct ci_single_dpm_table *table,
3049 u32 value, u32 *boot_level)
3050 {
3051 u32 i;
3052 int ret = -EINVAL;
3053
3054 for(i = 0; i < table->count; i++) {
3055 if (value == table->dpm_levels[i].value) {
3056 *boot_level = i;
3057 ret = 0;
3058 }
3059 }
3060
3061 return ret;
3062 }
3063
3064 static int ci_init_smc_table(struct radeon_device *rdev)
3065 {
3066 struct ci_power_info *pi = ci_get_pi(rdev);
3067 struct ci_ulv_parm *ulv = &pi->ulv;
3068 struct radeon_ps *radeon_boot_state = rdev->pm.dpm.boot_ps;
3069 SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
3070 int ret;
3071
3072 ret = ci_setup_default_dpm_tables(rdev);
3073 if (ret)
3074 return ret;
3075
3076 if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE)
3077 ci_populate_smc_voltage_tables(rdev, table);
3078
3079 ci_init_fps_limits(rdev);
3080
3081 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
3082 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
3083
3084 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
3085 table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
3086
3087 if (pi->mem_gddr5)
3088 table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
3089
3090 if (ulv->supported) {
3091 ret = ci_populate_ulv_state(rdev, &pi->smc_state_table.Ulv);
3092 if (ret)
3093 return ret;
3094 WREG32_SMC(CG_ULV_PARAMETER, ulv->cg_ulv_parameter);
3095 }
3096
3097 ret = ci_populate_all_graphic_levels(rdev);
3098 if (ret)
3099 return ret;
3100
3101 ret = ci_populate_all_memory_levels(rdev);
3102 if (ret)
3103 return ret;
3104
3105 ci_populate_smc_link_level(rdev, table);
3106
3107 ret = ci_populate_smc_acpi_level(rdev, table);
3108 if (ret)
3109 return ret;
3110
3111 ret = ci_populate_smc_vce_level(rdev, table);
3112 if (ret)
3113 return ret;
3114
3115 ret = ci_populate_smc_acp_level(rdev, table);
3116 if (ret)
3117 return ret;
3118
3119 ret = ci_populate_smc_samu_level(rdev, table);
3120 if (ret)
3121 return ret;
3122
3123 ret = ci_do_program_memory_timing_parameters(rdev);
3124 if (ret)
3125 return ret;
3126
3127 ret = ci_populate_smc_uvd_level(rdev, table);
3128 if (ret)
3129 return ret;
3130
3131 table->UvdBootLevel = 0;
3132 table->VceBootLevel = 0;
3133 table->AcpBootLevel = 0;
3134 table->SamuBootLevel = 0;
3135 table->GraphicsBootLevel = 0;
3136 table->MemoryBootLevel = 0;
3137
3138 ret = ci_find_boot_level(&pi->dpm_table.sclk_table,
3139 pi->vbios_boot_state.sclk_bootup_value,
3140 (u32 *)&pi->smc_state_table.GraphicsBootLevel);
3141
3142 ret = ci_find_boot_level(&pi->dpm_table.mclk_table,
3143 pi->vbios_boot_state.mclk_bootup_value,
3144 (u32 *)&pi->smc_state_table.MemoryBootLevel);
3145
3146 table->BootVddc = pi->vbios_boot_state.vddc_bootup_value;
3147 table->BootVddci = pi->vbios_boot_state.vddci_bootup_value;
3148 table->BootMVdd = pi->vbios_boot_state.mvdd_bootup_value;
3149
3150 ci_populate_smc_initial_state(rdev, radeon_boot_state);
3151
3152 ret = ci_populate_bapm_parameters_in_dpm_table(rdev);
3153 if (ret)
3154 return ret;
3155
3156 table->UVDInterval = 1;
3157 table->VCEInterval = 1;
3158 table->ACPInterval = 1;
3159 table->SAMUInterval = 1;
3160 table->GraphicsVoltageChangeEnable = 1;
3161 table->GraphicsThermThrottleEnable = 1;
3162 table->GraphicsInterval = 1;
3163 table->VoltageInterval = 1;
3164 table->ThermalInterval = 1;
3165 table->TemperatureLimitHigh = (u16)((pi->thermal_temp_setting.temperature_high *
3166 CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3167 table->TemperatureLimitLow = (u16)((pi->thermal_temp_setting.temperature_low *
3168 CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3169 table->MemoryVoltageChangeEnable = 1;
3170 table->MemoryInterval = 1;
3171 table->VoltageResponseTime = 0;
3172 table->VddcVddciDelta = 4000;
3173 table->PhaseResponseTime = 0;
3174 table->MemoryThermThrottleEnable = 1;
3175 table->PCIeBootLinkLevel = 0;
3176 table->PCIeGenInterval = 1;
3177 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2)
3178 table->SVI2Enable = 1;
3179 else
3180 table->SVI2Enable = 0;
3181
3182 table->ThermGpio = 17;
3183 table->SclkStepSize = 0x4000;
3184
3185 table->SystemFlags = cpu_to_be32(table->SystemFlags);
3186 table->SmioMaskVddcVid = cpu_to_be32(table->SmioMaskVddcVid);
3187 table->SmioMaskVddcPhase = cpu_to_be32(table->SmioMaskVddcPhase);
3188 table->SmioMaskVddciVid = cpu_to_be32(table->SmioMaskVddciVid);
3189 table->SmioMaskMvddVid = cpu_to_be32(table->SmioMaskMvddVid);
3190 table->SclkStepSize = cpu_to_be32(table->SclkStepSize);
3191 table->TemperatureLimitHigh = cpu_to_be16(table->TemperatureLimitHigh);
3192 table->TemperatureLimitLow = cpu_to_be16(table->TemperatureLimitLow);
3193 table->VddcVddciDelta = cpu_to_be16(table->VddcVddciDelta);
3194 table->VoltageResponseTime = cpu_to_be16(table->VoltageResponseTime);
3195 table->PhaseResponseTime = cpu_to_be16(table->PhaseResponseTime);
3196 table->BootVddc = cpu_to_be16(table->BootVddc * VOLTAGE_SCALE);
3197 table->BootVddci = cpu_to_be16(table->BootVddci * VOLTAGE_SCALE);
3198 table->BootMVdd = cpu_to_be16(table->BootMVdd * VOLTAGE_SCALE);
3199
3200 ret = ci_copy_bytes_to_smc(rdev,
3201 pi->dpm_table_start +
3202 offsetof(SMU7_Discrete_DpmTable, SystemFlags),
3203 (u8 *)&table->SystemFlags,
3204 sizeof(SMU7_Discrete_DpmTable) - 3 * sizeof(SMU7_PIDController),
3205 pi->sram_end);
3206 if (ret)
3207 return ret;
3208
3209 return 0;
3210 }
3211
3212 static void ci_trim_single_dpm_states(struct radeon_device *rdev,
3213 struct ci_single_dpm_table *dpm_table,
3214 u32 low_limit, u32 high_limit)
3215 {
3216 u32 i;
3217
3218 for (i = 0; i < dpm_table->count; i++) {
3219 if ((dpm_table->dpm_levels[i].value < low_limit) ||
3220 (dpm_table->dpm_levels[i].value > high_limit))
3221 dpm_table->dpm_levels[i].enabled = false;
3222 else
3223 dpm_table->dpm_levels[i].enabled = true;
3224 }
3225 }
3226
3227 static void ci_trim_pcie_dpm_states(struct radeon_device *rdev,
3228 u32 speed_low, u32 lanes_low,
3229 u32 speed_high, u32 lanes_high)
3230 {
3231 struct ci_power_info *pi = ci_get_pi(rdev);
3232 struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
3233 u32 i, j;
3234
3235 for (i = 0; i < pcie_table->count; i++) {
3236 if ((pcie_table->dpm_levels[i].value < speed_low) ||
3237 (pcie_table->dpm_levels[i].param1 < lanes_low) ||
3238 (pcie_table->dpm_levels[i].value > speed_high) ||
3239 (pcie_table->dpm_levels[i].param1 > lanes_high))
3240 pcie_table->dpm_levels[i].enabled = false;
3241 else
3242 pcie_table->dpm_levels[i].enabled = true;
3243 }
3244
3245 for (i = 0; i < pcie_table->count; i++) {
3246 if (pcie_table->dpm_levels[i].enabled) {
3247 for (j = i + 1; j < pcie_table->count; j++) {
3248 if (pcie_table->dpm_levels[j].enabled) {
3249 if ((pcie_table->dpm_levels[i].value == pcie_table->dpm_levels[j].value) &&
3250 (pcie_table->dpm_levels[i].param1 == pcie_table->dpm_levels[j].param1))
3251 pcie_table->dpm_levels[j].enabled = false;
3252 }
3253 }
3254 }
3255 }
3256 }
3257
3258 static int ci_trim_dpm_states(struct radeon_device *rdev,
3259 struct radeon_ps *radeon_state)
3260 {
3261 struct ci_ps *state = ci_get_ps(radeon_state);
3262 struct ci_power_info *pi = ci_get_pi(rdev);
3263 u32 high_limit_count;
3264
3265 if (state->performance_level_count < 1)
3266 return -EINVAL;
3267
3268 if (state->performance_level_count == 1)
3269 high_limit_count = 0;
3270 else
3271 high_limit_count = 1;
3272
3273 ci_trim_single_dpm_states(rdev,
3274 &pi->dpm_table.sclk_table,
3275 state->performance_levels[0].sclk,
3276 state->performance_levels[high_limit_count].sclk);
3277
3278 ci_trim_single_dpm_states(rdev,
3279 &pi->dpm_table.mclk_table,
3280 state->performance_levels[0].mclk,
3281 state->performance_levels[high_limit_count].mclk);
3282
3283 ci_trim_pcie_dpm_states(rdev,
3284 state->performance_levels[0].pcie_gen,
3285 state->performance_levels[0].pcie_lane,
3286 state->performance_levels[high_limit_count].pcie_gen,
3287 state->performance_levels[high_limit_count].pcie_lane);
3288
3289 return 0;
3290 }
3291
3292 static int ci_apply_disp_minimum_voltage_request(struct radeon_device *rdev)
3293 {
3294 struct radeon_clock_voltage_dependency_table *disp_voltage_table =
3295 &rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk;
3296 struct radeon_clock_voltage_dependency_table *vddc_table =
3297 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
3298 u32 requested_voltage = 0;
3299 u32 i;
3300
3301 if (disp_voltage_table == NULL)
3302 return -EINVAL;
3303 if (!disp_voltage_table->count)
3304 return -EINVAL;
3305
3306 for (i = 0; i < disp_voltage_table->count; i++) {
3307 if (rdev->clock.current_dispclk == disp_voltage_table->entries[i].clk)
3308 requested_voltage = disp_voltage_table->entries[i].v;
3309 }
3310
3311 for (i = 0; i < vddc_table->count; i++) {
3312 if (requested_voltage <= vddc_table->entries[i].v) {
3313 requested_voltage = vddc_table->entries[i].v;
3314 return (ci_send_msg_to_smc_with_parameter(rdev,
3315 PPSMC_MSG_VddC_Request,
3316 requested_voltage * VOLTAGE_SCALE) == PPSMC_Result_OK) ?
3317 0 : -EINVAL;
3318 }
3319 }
3320
3321 return -EINVAL;
3322 }
3323
3324 static int ci_upload_dpm_level_enable_mask(struct radeon_device *rdev)
3325 {
3326 struct ci_power_info *pi = ci_get_pi(rdev);
3327 PPSMC_Result result;
3328
3329 if (!pi->sclk_dpm_key_disabled) {
3330 if (pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3331 result = ci_send_msg_to_smc_with_parameter(rdev,
3332 PPSMC_MSG_SCLKDPM_SetEnabledMask,
3333 pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
3334 if (result != PPSMC_Result_OK)
3335 return -EINVAL;
3336 }
3337 }
3338
3339 if (!pi->mclk_dpm_key_disabled) {
3340 if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3341 result = ci_send_msg_to_smc_with_parameter(rdev,
3342 PPSMC_MSG_MCLKDPM_SetEnabledMask,
3343 pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3344 if (result != PPSMC_Result_OK)
3345 return -EINVAL;
3346 }
3347 }
3348
3349 if (!pi->pcie_dpm_key_disabled) {
3350 if (pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3351 result = ci_send_msg_to_smc_with_parameter(rdev,
3352 PPSMC_MSG_PCIeDPM_SetEnabledMask,
3353 pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
3354 if (result != PPSMC_Result_OK)
3355 return -EINVAL;
3356 }
3357 }
3358
3359 ci_apply_disp_minimum_voltage_request(rdev);
3360
3361 return 0;
3362 }
3363
3364 static void ci_find_dpm_states_clocks_in_dpm_table(struct radeon_device *rdev,
3365 struct radeon_ps *radeon_state)
3366 {
3367 struct ci_power_info *pi = ci_get_pi(rdev);
3368 struct ci_ps *state = ci_get_ps(radeon_state);
3369 struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
3370 u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
3371 struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
3372 u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
3373 u32 i;
3374
3375 pi->need_update_smu7_dpm_table = 0;
3376
3377 for (i = 0; i < sclk_table->count; i++) {
3378 if (sclk == sclk_table->dpm_levels[i].value)
3379 break;
3380 }
3381
3382 if (i >= sclk_table->count) {
3383 pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
3384 } else {
3385 /* XXX check display min clock requirements */
3386 if (0 != CISLAND_MINIMUM_ENGINE_CLOCK)
3387 pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
3388 }
3389
3390 for (i = 0; i < mclk_table->count; i++) {
3391 if (mclk == mclk_table->dpm_levels[i].value)
3392 break;
3393 }
3394
3395 if (i >= mclk_table->count)
3396 pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
3397
3398 if (rdev->pm.dpm.current_active_crtc_count !=
3399 rdev->pm.dpm.new_active_crtc_count)
3400 pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
3401 }
3402
3403 static int ci_populate_and_upload_sclk_mclk_dpm_levels(struct radeon_device *rdev,
3404 struct radeon_ps *radeon_state)
3405 {
3406 struct ci_power_info *pi = ci_get_pi(rdev);
3407 struct ci_ps *state = ci_get_ps(radeon_state);
3408 u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
3409 u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
3410 struct ci_dpm_table *dpm_table = &pi->dpm_table;
3411 int ret;
3412
3413 if (!pi->need_update_smu7_dpm_table)
3414 return 0;
3415
3416 if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK)
3417 dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value = sclk;
3418
3419 if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)
3420 dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value = mclk;
3421
3422 if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK)) {
3423 ret = ci_populate_all_graphic_levels(rdev);
3424 if (ret)
3425 return ret;
3426 }
3427
3428 if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) {
3429 ret = ci_populate_all_memory_levels(rdev);
3430 if (ret)
3431 return ret;
3432 }
3433
3434 return 0;
3435 }
3436
3437 static int ci_enable_uvd_dpm(struct radeon_device *rdev, bool enable)
3438 {
3439 struct ci_power_info *pi = ci_get_pi(rdev);
3440 const struct radeon_clock_and_voltage_limits *max_limits;
3441 int i;
3442
3443 if (rdev->pm.dpm.ac_power)
3444 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3445 else
3446 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3447
3448 if (enable) {
3449 pi->dpm_level_enable_mask.uvd_dpm_enable_mask = 0;
3450
3451 for (i = rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3452 if (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3453 pi->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i;
3454
3455 if (!pi->caps_uvd_dpm)
3456 break;
3457 }
3458 }
3459
3460 ci_send_msg_to_smc_with_parameter(rdev,
3461 PPSMC_MSG_UVDDPM_SetEnabledMask,
3462 pi->dpm_level_enable_mask.uvd_dpm_enable_mask);
3463
3464 if (pi->last_mclk_dpm_enable_mask & 0x1) {
3465 pi->uvd_enabled = true;
3466 pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
3467 ci_send_msg_to_smc_with_parameter(rdev,
3468 PPSMC_MSG_MCLKDPM_SetEnabledMask,
3469 pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3470 }
3471 } else {
3472 if (pi->last_mclk_dpm_enable_mask & 0x1) {
3473 pi->uvd_enabled = false;
3474 pi->dpm_level_enable_mask.mclk_dpm_enable_mask |= 1;
3475 ci_send_msg_to_smc_with_parameter(rdev,
3476 PPSMC_MSG_MCLKDPM_SetEnabledMask,
3477 pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3478 }
3479 }
3480
3481 return (ci_send_msg_to_smc(rdev, enable ?
3482 PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable) == PPSMC_Result_OK) ?
3483 0 : -EINVAL;
3484 }
3485
3486 static int ci_enable_vce_dpm(struct radeon_device *rdev, bool enable)
3487 {
3488 struct ci_power_info *pi = ci_get_pi(rdev);
3489 const struct radeon_clock_and_voltage_limits *max_limits;
3490 int i;
3491
3492 if (rdev->pm.dpm.ac_power)
3493 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3494 else
3495 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3496
3497 if (enable) {
3498 pi->dpm_level_enable_mask.vce_dpm_enable_mask = 0;
3499 for (i = rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3500 if (rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3501 pi->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i;
3502
3503 if (!pi->caps_vce_dpm)
3504 break;
3505 }
3506 }
3507
3508 ci_send_msg_to_smc_with_parameter(rdev,
3509 PPSMC_MSG_VCEDPM_SetEnabledMask,
3510 pi->dpm_level_enable_mask.vce_dpm_enable_mask);
3511 }
3512
3513 return (ci_send_msg_to_smc(rdev, enable ?
3514 PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable) == PPSMC_Result_OK) ?
3515 0 : -EINVAL;
3516 }
3517
3518 #if 0
3519 static int ci_enable_samu_dpm(struct radeon_device *rdev, bool enable)
3520 {
3521 struct ci_power_info *pi = ci_get_pi(rdev);
3522 const struct radeon_clock_and_voltage_limits *max_limits;
3523 int i;
3524
3525 if (rdev->pm.dpm.ac_power)
3526 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3527 else
3528 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3529
3530 if (enable) {
3531 pi->dpm_level_enable_mask.samu_dpm_enable_mask = 0;
3532 for (i = rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3533 if (rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3534 pi->dpm_level_enable_mask.samu_dpm_enable_mask |= 1 << i;
3535
3536 if (!pi->caps_samu_dpm)
3537 break;
3538 }
3539 }
3540
3541 ci_send_msg_to_smc_with_parameter(rdev,
3542 PPSMC_MSG_SAMUDPM_SetEnabledMask,
3543 pi->dpm_level_enable_mask.samu_dpm_enable_mask);
3544 }
3545 return (ci_send_msg_to_smc(rdev, enable ?
3546 PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable) == PPSMC_Result_OK) ?
3547 0 : -EINVAL;
3548 }
3549
3550 static int ci_enable_acp_dpm(struct radeon_device *rdev, bool enable)
3551 {
3552 struct ci_power_info *pi = ci_get_pi(rdev);
3553 const struct radeon_clock_and_voltage_limits *max_limits;
3554 int i;
3555
3556 if (rdev->pm.dpm.ac_power)
3557 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3558 else
3559 max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3560
3561 if (enable) {
3562 pi->dpm_level_enable_mask.acp_dpm_enable_mask = 0;
3563 for (i = rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
3564 if (rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
3565 pi->dpm_level_enable_mask.acp_dpm_enable_mask |= 1 << i;
3566
3567 if (!pi->caps_acp_dpm)
3568 break;
3569 }
3570 }
3571
3572 ci_send_msg_to_smc_with_parameter(rdev,
3573 PPSMC_MSG_ACPDPM_SetEnabledMask,
3574 pi->dpm_level_enable_mask.acp_dpm_enable_mask);
3575 }
3576
3577 return (ci_send_msg_to_smc(rdev, enable ?
3578 PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable) == PPSMC_Result_OK) ?
3579 0 : -EINVAL;
3580 }
3581 #endif
3582
3583 static int ci_update_uvd_dpm(struct radeon_device *rdev, bool gate)
3584 {
3585 struct ci_power_info *pi = ci_get_pi(rdev);
3586 u32 tmp;
3587
3588 if (!gate) {
3589 if (pi->caps_uvd_dpm ||
3590 (rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0))
3591 pi->smc_state_table.UvdBootLevel = 0;
3592 else
3593 pi->smc_state_table.UvdBootLevel =
3594 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1;
3595
3596 tmp = RREG32_SMC(DPM_TABLE_475);
3597 tmp &= ~UvdBootLevel_MASK;
3598 tmp |= UvdBootLevel(pi->smc_state_table.UvdBootLevel);
3599 WREG32_SMC(DPM_TABLE_475, tmp);
3600 }
3601
3602 return ci_enable_uvd_dpm(rdev, !gate);
3603 }
3604
3605 static u8 ci_get_vce_boot_level(struct radeon_device *rdev)
3606 {
3607 u8 i;
3608 u32 min_evclk = 30000; /* ??? */
3609 struct radeon_vce_clock_voltage_dependency_table *table =
3610 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
3611
3612 for (i = 0; i < table->count; i++) {
3613 if (table->entries[i].evclk >= min_evclk)
3614 return i;
3615 }
3616
3617 return table->count - 1;
3618 }
3619
3620 static int ci_update_vce_dpm(struct radeon_device *rdev,
3621 struct radeon_ps *radeon_new_state,
3622 struct radeon_ps *radeon_current_state)
3623 {
3624 struct ci_power_info *pi = ci_get_pi(rdev);
3625 int ret = 0;
3626 u32 tmp;
3627
3628 if (radeon_current_state->evclk != radeon_new_state->evclk) {
3629 if (radeon_new_state->evclk) {
3630 pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(rdev);
3631
3632 tmp = RREG32_SMC(DPM_TABLE_475);
3633 tmp &= ~VceBootLevel_MASK;
3634 tmp |= VceBootLevel(pi->smc_state_table.VceBootLevel);
3635 WREG32_SMC(DPM_TABLE_475, tmp);
3636
3637 ret = ci_enable_vce_dpm(rdev, true);
3638 } else {
3639 ret = ci_enable_vce_dpm(rdev, false);
3640 }
3641 }
3642 return ret;
3643 }
3644
3645 #if 0
3646 static int ci_update_samu_dpm(struct radeon_device *rdev, bool gate)
3647 {
3648 return ci_enable_samu_dpm(rdev, gate);
3649 }
3650
3651 static int ci_update_acp_dpm(struct radeon_device *rdev, bool gate)
3652 {
3653 struct ci_power_info *pi = ci_get_pi(rdev);
3654 u32 tmp;
3655
3656 if (!gate) {
3657 pi->smc_state_table.AcpBootLevel = 0;
3658
3659 tmp = RREG32_SMC(DPM_TABLE_475);
3660 tmp &= ~AcpBootLevel_MASK;
3661 tmp |= AcpBootLevel(pi->smc_state_table.AcpBootLevel);
3662 WREG32_SMC(DPM_TABLE_475, tmp);
3663 }
3664
3665 return ci_enable_acp_dpm(rdev, !gate);
3666 }
3667 #endif
3668
3669 static int ci_generate_dpm_level_enable_mask(struct radeon_device *rdev,
3670 struct radeon_ps *radeon_state)
3671 {
3672 struct ci_power_info *pi = ci_get_pi(rdev);
3673 int ret;
3674
3675 ret = ci_trim_dpm_states(rdev, radeon_state);
3676 if (ret)
3677 return ret;
3678
3679 pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
3680 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.sclk_table);
3681 pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
3682 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.mclk_table);
3683 pi->last_mclk_dpm_enable_mask =
3684 pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
3685 if (pi->uvd_enabled) {
3686 if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask & 1)
3687 pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
3688 }
3689 pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
3690 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.pcie_speed_table);
3691
3692 return 0;
3693 }
3694
3695 static u32 ci_get_lowest_enabled_level(struct radeon_device *rdev,
3696 u32 level_mask)
3697 {
3698 u32 level = 0;
3699
3700 while ((level_mask & (1 << level)) == 0)
3701 level++;
3702
3703 return level;
3704 }
3705
3706
3707 int ci_dpm_force_performance_level(struct radeon_device *rdev,
3708 enum radeon_dpm_forced_level level)
3709 {
3710 struct ci_power_info *pi = ci_get_pi(rdev);
3711 PPSMC_Result smc_result;
3712 u32 tmp, levels, i;
3713 int ret;
3714
3715 if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
3716 if ((!pi->sclk_dpm_key_disabled) &&
3717 pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3718 levels = 0;
3719 tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask;
3720 while (tmp >>= 1)
3721 levels++;
3722 if (levels) {
3723 ret = ci_dpm_force_state_sclk(rdev, levels);
3724 if (ret)
3725 return ret;
3726 for (i = 0; i < rdev->usec_timeout; i++) {
3727 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
3728 CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT;
3729 if (tmp == levels)
3730 break;
3731 udelay(1);
3732 }
3733 }
3734 }
3735 if ((!pi->mclk_dpm_key_disabled) &&
3736 pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3737 levels = 0;
3738 tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
3739 while (tmp >>= 1)
3740 levels++;
3741 if (levels) {
3742 ret = ci_dpm_force_state_mclk(rdev, levels);
3743 if (ret)
3744 return ret;
3745 for (i = 0; i < rdev->usec_timeout; i++) {
3746 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
3747 CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT;
3748 if (tmp == levels)
3749 break;
3750 udelay(1);
3751 }
3752 }
3753 }
3754 if ((!pi->pcie_dpm_key_disabled) &&
3755 pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3756 levels = 0;
3757 tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
3758 while (tmp >>= 1)
3759 levels++;
3760 if (levels) {
3761 ret = ci_dpm_force_state_pcie(rdev, level);
3762 if (ret)
3763 return ret;
3764 for (i = 0; i < rdev->usec_timeout; i++) {
3765 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) &
3766 CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT;
3767 if (tmp == levels)
3768 break;
3769 udelay(1);
3770 }
3771 }
3772 }
3773 } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
3774 if ((!pi->sclk_dpm_key_disabled) &&
3775 pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3776 levels = ci_get_lowest_enabled_level(rdev,
3777 pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
3778 ret = ci_dpm_force_state_sclk(rdev, levels);
3779 if (ret)
3780 return ret;
3781 for (i = 0; i < rdev->usec_timeout; i++) {
3782 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
3783 CURR_SCLK_INDEX_MASK) >> CURR_SCLK_INDEX_SHIFT;
3784 if (tmp == levels)
3785 break;
3786 udelay(1);
3787 }
3788 }
3789 if ((!pi->mclk_dpm_key_disabled) &&
3790 pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3791 levels = ci_get_lowest_enabled_level(rdev,
3792 pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3793 ret = ci_dpm_force_state_mclk(rdev, levels);
3794 if (ret)
3795 return ret;
3796 for (i = 0; i < rdev->usec_timeout; i++) {
3797 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX) &
3798 CURR_MCLK_INDEX_MASK) >> CURR_MCLK_INDEX_SHIFT;
3799 if (tmp == levels)
3800 break;
3801 udelay(1);
3802 }
3803 }
3804 if ((!pi->pcie_dpm_key_disabled) &&
3805 pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3806 levels = ci_get_lowest_enabled_level(rdev,
3807 pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
3808 ret = ci_dpm_force_state_pcie(rdev, levels);
3809 if (ret)
3810 return ret;
3811 for (i = 0; i < rdev->usec_timeout; i++) {
3812 tmp = (RREG32_SMC(TARGET_AND_CURRENT_PROFILE_INDEX_1) &
3813 CURR_PCIE_INDEX_MASK) >> CURR_PCIE_INDEX_SHIFT;
3814 if (tmp == levels)
3815 break;
3816 udelay(1);
3817 }
3818 }
3819 } else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) {
3820 if (!pi->sclk_dpm_key_disabled) {
3821 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_NoForcedLevel);
3822 if (smc_result != PPSMC_Result_OK)
3823 return -EINVAL;
3824 }
3825 if (!pi->mclk_dpm_key_disabled) {
3826 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_MCLKDPM_NoForcedLevel);
3827 if (smc_result != PPSMC_Result_OK)
3828 return -EINVAL;
3829 }
3830 if (!pi->pcie_dpm_key_disabled) {
3831 smc_result = ci_send_msg_to_smc(rdev, PPSMC_MSG_PCIeDPM_UnForceLevel);
3832 if (smc_result != PPSMC_Result_OK)
3833 return -EINVAL;
3834 }
3835 }
3836
3837 rdev->pm.dpm.forced_level = level;
3838
3839 return 0;
3840 }
3841
3842 static int ci_set_mc_special_registers(struct radeon_device *rdev,
3843 struct ci_mc_reg_table *table)
3844 {
3845 struct ci_power_info *pi = ci_get_pi(rdev);
3846 u8 i, j, k;
3847 u32 temp_reg;
3848
3849 for (i = 0, j = table->last; i < table->last; i++) {
3850 if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3851 return -EINVAL;
3852 switch(table->mc_reg_address[i].s1 << 2) {
3853 case MC_SEQ_MISC1:
3854 temp_reg = RREG32(MC_PMG_CMD_EMRS);
3855 table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS >> 2;
3856 table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
3857 for (k = 0; k < table->num_entries; k++) {
3858 table->mc_reg_table_entry[k].mc_data[j] =
3859 ((temp_reg & 0xffff0000)) | ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
3860 }
3861 j++;
3862 if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3863 return -EINVAL;
3864
3865 temp_reg = RREG32(MC_PMG_CMD_MRS);
3866 table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS >> 2;
3867 table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP >> 2;
3868 for (k = 0; k < table->num_entries; k++) {
3869 table->mc_reg_table_entry[k].mc_data[j] =
3870 (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
3871 if (!pi->mem_gddr5)
3872 table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
3873 }
3874 j++;
3875 if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3876 return -EINVAL;
3877
3878 if (!pi->mem_gddr5) {
3879 table->mc_reg_address[j].s1 = MC_PMG_AUTO_CMD >> 2;
3880 table->mc_reg_address[j].s0 = MC_PMG_AUTO_CMD >> 2;
3881 for (k = 0; k < table->num_entries; k++) {
3882 table->mc_reg_table_entry[k].mc_data[j] =
3883 (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
3884 }
3885 j++;
3886 if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3887 return -EINVAL;
3888 }
3889 break;
3890 case MC_SEQ_RESERVE_M:
3891 temp_reg = RREG32(MC_PMG_CMD_MRS1);
3892 table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1 >> 2;
3893 table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
3894 for (k = 0; k < table->num_entries; k++) {
3895 table->mc_reg_table_entry[k].mc_data[j] =
3896 (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
3897 }
3898 j++;
3899 if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
3900 return -EINVAL;
3901 break;
3902 default:
3903 break;
3904 }
3905
3906 }
3907
3908 table->last = j;
3909
3910 return 0;
3911 }
3912
3913 static bool ci_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
3914 {
3915 bool result = true;
3916
3917 switch(in_reg) {
3918 case MC_SEQ_RAS_TIMING >> 2:
3919 *out_reg = MC_SEQ_RAS_TIMING_LP >> 2;
3920 break;
3921 case MC_SEQ_DLL_STBY >> 2:
3922 *out_reg = MC_SEQ_DLL_STBY_LP >> 2;
3923 break;
3924 case MC_SEQ_G5PDX_CMD0 >> 2:
3925 *out_reg = MC_SEQ_G5PDX_CMD0_LP >> 2;
3926 break;
3927 case MC_SEQ_G5PDX_CMD1 >> 2:
3928 *out_reg = MC_SEQ_G5PDX_CMD1_LP >> 2;
3929 break;
3930 case MC_SEQ_G5PDX_CTRL >> 2:
3931 *out_reg = MC_SEQ_G5PDX_CTRL_LP >> 2;
3932 break;
3933 case MC_SEQ_CAS_TIMING >> 2:
3934 *out_reg = MC_SEQ_CAS_TIMING_LP >> 2;
3935 break;
3936 case MC_SEQ_MISC_TIMING >> 2:
3937 *out_reg = MC_SEQ_MISC_TIMING_LP >> 2;
3938 break;
3939 case MC_SEQ_MISC_TIMING2 >> 2:
3940 *out_reg = MC_SEQ_MISC_TIMING2_LP >> 2;
3941 break;
3942 case MC_SEQ_PMG_DVS_CMD >> 2:
3943 *out_reg = MC_SEQ_PMG_DVS_CMD_LP >> 2;
3944 break;
3945 case MC_SEQ_PMG_DVS_CTL >> 2:
3946 *out_reg = MC_SEQ_PMG_DVS_CTL_LP >> 2;
3947 break;
3948 case MC_SEQ_RD_CTL_D0 >> 2:
3949 *out_reg = MC_SEQ_RD_CTL_D0_LP >> 2;
3950 break;
3951 case MC_SEQ_RD_CTL_D1 >> 2:
3952 *out_reg = MC_SEQ_RD_CTL_D1_LP >> 2;
3953 break;
3954 case MC_SEQ_WR_CTL_D0 >> 2:
3955 *out_reg = MC_SEQ_WR_CTL_D0_LP >> 2;
3956 break;
3957 case MC_SEQ_WR_CTL_D1 >> 2:
3958 *out_reg = MC_SEQ_WR_CTL_D1_LP >> 2;
3959 break;
3960 case MC_PMG_CMD_EMRS >> 2:
3961 *out_reg = MC_SEQ_PMG_CMD_EMRS_LP >> 2;
3962 break;
3963 case MC_PMG_CMD_MRS >> 2:
3964 *out_reg = MC_SEQ_PMG_CMD_MRS_LP >> 2;
3965 break;
3966 case MC_PMG_CMD_MRS1 >> 2:
3967 *out_reg = MC_SEQ_PMG_CMD_MRS1_LP >> 2;
3968 break;
3969 case MC_SEQ_PMG_TIMING >> 2:
3970 *out_reg = MC_SEQ_PMG_TIMING_LP >> 2;
3971 break;
3972 case MC_PMG_CMD_MRS2 >> 2:
3973 *out_reg = MC_SEQ_PMG_CMD_MRS2_LP >> 2;
3974 break;
3975 case MC_SEQ_WR_CTL_2 >> 2:
3976 *out_reg = MC_SEQ_WR_CTL_2_LP >> 2;
3977 break;
3978 default:
3979 result = false;
3980 break;
3981 }
3982
3983 return result;
3984 }
3985
3986 static void ci_set_valid_flag(struct ci_mc_reg_table *table)
3987 {
3988 u8 i, j;
3989
3990 for (i = 0; i < table->last; i++) {
3991 for (j = 1; j < table->num_entries; j++) {
3992 if (table->mc_reg_table_entry[j-1].mc_data[i] !=
3993 table->mc_reg_table_entry[j].mc_data[i]) {
3994 table->valid_flag |= 1 << i;
3995 break;
3996 }
3997 }
3998 }
3999 }
4000
4001 static void ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table)
4002 {
4003 u32 i;
4004 u16 address;
4005
4006 for (i = 0; i < table->last; i++) {
4007 table->mc_reg_address[i].s0 =
4008 ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
4009 address : table->mc_reg_address[i].s1;
4010 }
4011 }
4012
4013 static int ci_copy_vbios_mc_reg_table(const struct atom_mc_reg_table *table,
4014 struct ci_mc_reg_table *ci_table)
4015 {
4016 u8 i, j;
4017
4018 if (table->last > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4019 return -EINVAL;
4020 if (table->num_entries > MAX_AC_TIMING_ENTRIES)
4021 return -EINVAL;
4022
4023 for (i = 0; i < table->last; i++)
4024 ci_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
4025
4026 ci_table->last = table->last;
4027
4028 for (i = 0; i < table->num_entries; i++) {
4029 ci_table->mc_reg_table_entry[i].mclk_max =
4030 table->mc_reg_table_entry[i].mclk_max;
4031 for (j = 0; j < table->last; j++)
4032 ci_table->mc_reg_table_entry[i].mc_data[j] =
4033 table->mc_reg_table_entry[i].mc_data[j];
4034 }
4035 ci_table->num_entries = table->num_entries;
4036
4037 return 0;
4038 }
4039
4040 static int ci_initialize_mc_reg_table(struct radeon_device *rdev)
4041 {
4042 struct ci_power_info *pi = ci_get_pi(rdev);
4043 struct atom_mc_reg_table *table;
4044 struct ci_mc_reg_table *ci_table = &pi->mc_reg_table;
4045 u8 module_index = rv770_get_memory_module_index(rdev);
4046 int ret;
4047
4048 table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
4049 if (!table)
4050 return -ENOMEM;
4051
4052 WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING));
4053 WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING));
4054 WREG32(MC_SEQ_DLL_STBY_LP, RREG32(MC_SEQ_DLL_STBY));
4055 WREG32(MC_SEQ_G5PDX_CMD0_LP, RREG32(MC_SEQ_G5PDX_CMD0));
4056 WREG32(MC_SEQ_G5PDX_CMD1_LP, RREG32(MC_SEQ_G5PDX_CMD1));
4057 WREG32(MC_SEQ_G5PDX_CTRL_LP, RREG32(MC_SEQ_G5PDX_CTRL));
4058 WREG32(MC_SEQ_PMG_DVS_CMD_LP, RREG32(MC_SEQ_PMG_DVS_CMD));
4059 WREG32(MC_SEQ_PMG_DVS_CTL_LP, RREG32(MC_SEQ_PMG_DVS_CTL));
4060 WREG32(MC_SEQ_MISC_TIMING_LP, RREG32(MC_SEQ_MISC_TIMING));
4061 WREG32(MC_SEQ_MISC_TIMING2_LP, RREG32(MC_SEQ_MISC_TIMING2));
4062 WREG32(MC_SEQ_PMG_CMD_EMRS_LP, RREG32(MC_PMG_CMD_EMRS));
4063 WREG32(MC_SEQ_PMG_CMD_MRS_LP, RREG32(MC_PMG_CMD_MRS));
4064 WREG32(MC_SEQ_PMG_CMD_MRS1_LP, RREG32(MC_PMG_CMD_MRS1));
4065 WREG32(MC_SEQ_WR_CTL_D0_LP, RREG32(MC_SEQ_WR_CTL_D0));
4066 WREG32(MC_SEQ_WR_CTL_D1_LP, RREG32(MC_SEQ_WR_CTL_D1));
4067 WREG32(MC_SEQ_RD_CTL_D0_LP, RREG32(MC_SEQ_RD_CTL_D0));
4068 WREG32(MC_SEQ_RD_CTL_D1_LP, RREG32(MC_SEQ_RD_CTL_D1));
4069 WREG32(MC_SEQ_PMG_TIMING_LP, RREG32(MC_SEQ_PMG_TIMING));
4070 WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2));
4071 WREG32(MC_SEQ_WR_CTL_2_LP, RREG32(MC_SEQ_WR_CTL_2));
4072
4073 ret = radeon_atom_init_mc_reg_table(rdev, module_index, table);
4074 if (ret)
4075 goto init_mc_done;
4076
4077 ret = ci_copy_vbios_mc_reg_table(table, ci_table);
4078 if (ret)
4079 goto init_mc_done;
4080
4081 ci_set_s0_mc_reg_index(ci_table);
4082
4083 ret = ci_set_mc_special_registers(rdev, ci_table);
4084 if (ret)
4085 goto init_mc_done;
4086
4087 ci_set_valid_flag(ci_table);
4088
4089 init_mc_done:
4090 kfree(table);
4091
4092 return ret;
4093 }
4094
4095 static int ci_populate_mc_reg_addresses(struct radeon_device *rdev,
4096 SMU7_Discrete_MCRegisters *mc_reg_table)
4097 {
4098 struct ci_power_info *pi = ci_get_pi(rdev);
4099 u32 i, j;
4100
4101 for (i = 0, j = 0; j < pi->mc_reg_table.last; j++) {
4102 if (pi->mc_reg_table.valid_flag & (1 << j)) {
4103 if (i >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4104 return -EINVAL;
4105 mc_reg_table->address[i].s0 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s0);
4106 mc_reg_table->address[i].s1 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s1);
4107 i++;
4108 }
4109 }
4110
4111 mc_reg_table->last = (u8)i;
4112
4113 return 0;
4114 }
4115
4116 static void ci_convert_mc_registers(const struct ci_mc_reg_entry *entry,
4117 SMU7_Discrete_MCRegisterSet *data,
4118 u32 num_entries, u32 valid_flag)
4119 {
4120 u32 i, j;
4121
4122 for (i = 0, j = 0; j < num_entries; j++) {
4123 if (valid_flag & (1 << j)) {
4124 data->value[i] = cpu_to_be32(entry->mc_data[j]);
4125 i++;
4126 }
4127 }
4128 }
4129
4130 static void ci_convert_mc_reg_table_entry_to_smc(struct radeon_device *rdev,
4131 const u32 memory_clock,
4132 SMU7_Discrete_MCRegisterSet *mc_reg_table_data)
4133 {
4134 struct ci_power_info *pi = ci_get_pi(rdev);
4135 u32 i = 0;
4136
4137 for(i = 0; i < pi->mc_reg_table.num_entries; i++) {
4138 if (memory_clock <= pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
4139 break;
4140 }
4141
4142 if ((i == pi->mc_reg_table.num_entries) && (i > 0))
4143 --i;
4144
4145 ci_convert_mc_registers(&pi->mc_reg_table.mc_reg_table_entry[i],
4146 mc_reg_table_data, pi->mc_reg_table.last,
4147 pi->mc_reg_table.valid_flag);
4148 }
4149
4150 static void ci_convert_mc_reg_table_to_smc(struct radeon_device *rdev,
4151 SMU7_Discrete_MCRegisters *mc_reg_table)
4152 {
4153 struct ci_power_info *pi = ci_get_pi(rdev);
4154 u32 i;
4155
4156 for (i = 0; i < pi->dpm_table.mclk_table.count; i++)
4157 ci_convert_mc_reg_table_entry_to_smc(rdev,
4158 pi->dpm_table.mclk_table.dpm_levels[i].value,
4159 &mc_reg_table->data[i]);
4160 }
4161
4162 static int ci_populate_initial_mc_reg_table(struct radeon_device *rdev)
4163 {
4164 struct ci_power_info *pi = ci_get_pi(rdev);
4165 int ret;
4166
4167 memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4168
4169 ret = ci_populate_mc_reg_addresses(rdev, &pi->smc_mc_reg_table);
4170 if (ret)
4171 return ret;
4172 ci_convert_mc_reg_table_to_smc(rdev, &pi->smc_mc_reg_table);
4173
4174 return ci_copy_bytes_to_smc(rdev,
4175 pi->mc_reg_table_start,
4176 (u8 *)&pi->smc_mc_reg_table,
4177 sizeof(SMU7_Discrete_MCRegisters),
4178 pi->sram_end);
4179 }
4180
4181 static int ci_update_and_upload_mc_reg_table(struct radeon_device *rdev)
4182 {
4183 struct ci_power_info *pi = ci_get_pi(rdev);
4184
4185 if (!(pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
4186 return 0;
4187
4188 memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4189
4190 ci_convert_mc_reg_table_to_smc(rdev, &pi->smc_mc_reg_table);
4191
4192 return ci_copy_bytes_to_smc(rdev,
4193 pi->mc_reg_table_start +
4194 offsetof(SMU7_Discrete_MCRegisters, data[0]),
4195 (u8 *)&pi->smc_mc_reg_table.data[0],
4196 sizeof(SMU7_Discrete_MCRegisterSet) *
4197 pi->dpm_table.mclk_table.count,
4198 pi->sram_end);
4199 }
4200
4201 static void ci_enable_voltage_control(struct radeon_device *rdev)
4202 {
4203 u32 tmp = RREG32_SMC(GENERAL_PWRMGT);
4204
4205 tmp |= VOLT_PWRMGT_EN;
4206 WREG32_SMC(GENERAL_PWRMGT, tmp);
4207 }
4208
4209 static enum radeon_pcie_gen ci_get_maximum_link_speed(struct radeon_device *rdev,
4210 struct radeon_ps *radeon_state)
4211 {
4212 struct ci_ps *state = ci_get_ps(radeon_state);
4213 int i;
4214 u16 pcie_speed, max_speed = 0;
4215
4216 for (i = 0; i < state->performance_level_count; i++) {
4217 pcie_speed = state->performance_levels[i].pcie_gen;
4218 if (max_speed < pcie_speed)
4219 max_speed = pcie_speed;
4220 }
4221
4222 return max_speed;
4223 }
4224
4225 static u16 ci_get_current_pcie_speed(struct radeon_device *rdev)
4226 {
4227 u32 speed_cntl = 0;
4228
4229 speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & LC_CURRENT_DATA_RATE_MASK;
4230 speed_cntl >>= LC_CURRENT_DATA_RATE_SHIFT;
4231
4232 return (u16)speed_cntl;
4233 }
4234
4235 static int ci_get_current_pcie_lane_number(struct radeon_device *rdev)
4236 {
4237 u32 link_width = 0;
4238
4239 link_width = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL) & LC_LINK_WIDTH_RD_MASK;
4240 link_width >>= LC_LINK_WIDTH_RD_SHIFT;
4241
4242 switch (link_width) {
4243 case RADEON_PCIE_LC_LINK_WIDTH_X1:
4244 return 1;
4245 case RADEON_PCIE_LC_LINK_WIDTH_X2:
4246 return 2;
4247 case RADEON_PCIE_LC_LINK_WIDTH_X4:
4248 return 4;
4249 case RADEON_PCIE_LC_LINK_WIDTH_X8:
4250 return 8;
4251 case RADEON_PCIE_LC_LINK_WIDTH_X12:
4252 /* not actually supported */
4253 return 12;
4254 case RADEON_PCIE_LC_LINK_WIDTH_X0:
4255 case RADEON_PCIE_LC_LINK_WIDTH_X16:
4256 default:
4257 return 16;
4258 }
4259 }
4260
4261 static void ci_request_link_speed_change_before_state_change(struct radeon_device *rdev,
4262 struct radeon_ps *radeon_new_state,
4263 struct radeon_ps *radeon_current_state)
4264 {
4265 struct ci_power_info *pi = ci_get_pi(rdev);
4266 enum radeon_pcie_gen target_link_speed =
4267 ci_get_maximum_link_speed(rdev, radeon_new_state);
4268 enum radeon_pcie_gen current_link_speed;
4269
4270 if (pi->force_pcie_gen == RADEON_PCIE_GEN_INVALID)
4271 current_link_speed = ci_get_maximum_link_speed(rdev, radeon_current_state);
4272 else
4273 current_link_speed = pi->force_pcie_gen;
4274
4275 pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
4276 pi->pspp_notify_required = false;
4277 if (target_link_speed > current_link_speed) {
4278 switch (target_link_speed) {
4279 #ifdef CONFIG_ACPI
4280 case RADEON_PCIE_GEN3:
4281 if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN3, false) == 0)
4282 break;
4283 pi->force_pcie_gen = RADEON_PCIE_GEN2;
4284 if (current_link_speed == RADEON_PCIE_GEN2)
4285 break;
4286 case RADEON_PCIE_GEN2:
4287 if (radeon_acpi_pcie_performance_request(rdev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
4288 break;
4289 #endif
4290 default:
4291 pi->force_pcie_gen = ci_get_current_pcie_speed(rdev);
4292 break;
4293 }
4294 } else {
4295 if (target_link_speed < current_link_speed)
4296 pi->pspp_notify_required = true;
4297 }
4298 }
4299
4300 static void ci_notify_link_speed_change_after_state_change(struct radeon_device *rdev,
4301 struct radeon_ps *radeon_new_state,
4302 struct radeon_ps *radeon_current_state)
4303 {
4304 struct ci_power_info *pi = ci_get_pi(rdev);
4305 enum radeon_pcie_gen target_link_speed =
4306 ci_get_maximum_link_speed(rdev, radeon_new_state);
4307 u8 request;
4308
4309 if (pi->pspp_notify_required) {
4310 if (target_link_speed == RADEON_PCIE_GEN3)
4311 request = PCIE_PERF_REQ_PECI_GEN3;
4312 else if (target_link_speed == RADEON_PCIE_GEN2)
4313 request = PCIE_PERF_REQ_PECI_GEN2;
4314 else
4315 request = PCIE_PERF_REQ_PECI_GEN1;
4316
4317 if ((request == PCIE_PERF_REQ_PECI_GEN1) &&
4318 (ci_get_current_pcie_speed(rdev) > 0))
4319 return;
4320
4321 #ifdef CONFIG_ACPI
4322 radeon_acpi_pcie_performance_request(rdev, request, false);
4323 #endif
4324 }
4325 }
4326
4327 static int ci_set_private_data_variables_based_on_pptable(struct radeon_device *rdev)
4328 {
4329 struct ci_power_info *pi = ci_get_pi(rdev);
4330 struct radeon_clock_voltage_dependency_table *allowed_sclk_vddc_table =
4331 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
4332 struct radeon_clock_voltage_dependency_table *allowed_mclk_vddc_table =
4333 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
4334 struct radeon_clock_voltage_dependency_table *allowed_mclk_vddci_table =
4335 &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
4336
4337 if (allowed_sclk_vddc_table == NULL)
4338 return -EINVAL;
4339 if (allowed_sclk_vddc_table->count < 1)
4340 return -EINVAL;
4341 if (allowed_mclk_vddc_table == NULL)
4342 return -EINVAL;
4343 if (allowed_mclk_vddc_table->count < 1)
4344 return -EINVAL;
4345 if (allowed_mclk_vddci_table == NULL)
4346 return -EINVAL;
4347 if (allowed_mclk_vddci_table->count < 1)
4348 return -EINVAL;
4349
4350 pi->min_vddc_in_pp_table = allowed_sclk_vddc_table->entries[0].v;
4351 pi->max_vddc_in_pp_table =
4352 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
4353
4354 pi->min_vddci_in_pp_table = allowed_mclk_vddci_table->entries[0].v;
4355 pi->max_vddci_in_pp_table =
4356 allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
4357
4358 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk =
4359 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
4360 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk =
4361 allowed_mclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
4362 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc =
4363 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
4364 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci =
4365 allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
4366
4367 return 0;
4368 }
4369
4370 static void ci_patch_with_vddc_leakage(struct radeon_device *rdev, u16 *vddc)
4371 {
4372 struct ci_power_info *pi = ci_get_pi(rdev);
4373 struct ci_leakage_voltage *leakage_table = &pi->vddc_leakage;
4374 u32 leakage_index;
4375
4376 for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
4377 if (leakage_table->leakage_id[leakage_index] == *vddc) {
4378 *vddc = leakage_table->actual_voltage[leakage_index];
4379 break;
4380 }
4381 }
4382 }
4383
4384 static void ci_patch_with_vddci_leakage(struct radeon_device *rdev, u16 *vddci)
4385 {
4386 struct ci_power_info *pi = ci_get_pi(rdev);
4387 struct ci_leakage_voltage *leakage_table = &pi->vddci_leakage;
4388 u32 leakage_index;
4389
4390 for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
4391 if (leakage_table->leakage_id[leakage_index] == *vddci) {
4392 *vddci = leakage_table->actual_voltage[leakage_index];
4393 break;
4394 }
4395 }
4396 }
4397
4398 static void ci_patch_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
4399 struct radeon_clock_voltage_dependency_table *table)
4400 {
4401 u32 i;
4402
4403 if (table) {
4404 for (i = 0; i < table->count; i++)
4405 ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
4406 }
4407 }
4408
4409 static void ci_patch_clock_voltage_dependency_table_with_vddci_leakage(struct radeon_device *rdev,
4410 struct radeon_clock_voltage_dependency_table *table)
4411 {
4412 u32 i;
4413
4414 if (table) {
4415 for (i = 0; i < table->count; i++)
4416 ci_patch_with_vddci_leakage(rdev, &table->entries[i].v);
4417 }
4418 }
4419
4420 static void ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
4421 struct radeon_vce_clock_voltage_dependency_table *table)
4422 {
4423 u32 i;
4424
4425 if (table) {
4426 for (i = 0; i < table->count; i++)
4427 ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
4428 }
4429 }
4430
4431 static void ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(struct radeon_device *rdev,
4432 struct radeon_uvd_clock_voltage_dependency_table *table)
4433 {
4434 u32 i;
4435
4436 if (table) {
4437 for (i = 0; i < table->count; i++)
4438 ci_patch_with_vddc_leakage(rdev, &table->entries[i].v);
4439 }
4440 }
4441
4442 static void ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(struct radeon_device *rdev,
4443 struct radeon_phase_shedding_limits_table *table)
4444 {
4445 u32 i;
4446
4447 if (table) {
4448 for (i = 0; i < table->count; i++)
4449 ci_patch_with_vddc_leakage(rdev, &table->entries[i].voltage);
4450 }
4451 }
4452
4453 static void ci_patch_clock_voltage_limits_with_vddc_leakage(struct radeon_device *rdev,
4454 struct radeon_clock_and_voltage_limits *table)
4455 {
4456 if (table) {
4457 ci_patch_with_vddc_leakage(rdev, (u16 *)&table->vddc);
4458 ci_patch_with_vddci_leakage(rdev, (u16 *)&table->vddci);
4459 }
4460 }
4461
4462 static void ci_patch_cac_leakage_table_with_vddc_leakage(struct radeon_device *rdev,
4463 struct radeon_cac_leakage_table *table)
4464 {
4465 u32 i;
4466
4467 if (table) {
4468 for (i = 0; i < table->count; i++)
4469 ci_patch_with_vddc_leakage(rdev, &table->entries[i].vddc);
4470 }
4471 }
4472
4473 static void ci_patch_dependency_tables_with_leakage(struct radeon_device *rdev)
4474 {
4475
4476 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4477 &rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
4478 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4479 &rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk);
4480 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4481 &rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk);
4482 ci_patch_clock_voltage_dependency_table_with_vddci_leakage(rdev,
4483 &rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk);
4484 ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4485 &rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table);
4486 ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4487 &rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table);
4488 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4489 &rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table);
4490 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(rdev,
4491 &rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table);
4492 ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(rdev,
4493 &rdev->pm.dpm.dyn_state.phase_shedding_limits_table);
4494 ci_patch_clock_voltage_limits_with_vddc_leakage(rdev,
4495 &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
4496 ci_patch_clock_voltage_limits_with_vddc_leakage(rdev,
4497 &rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc);
4498 ci_patch_cac_leakage_table_with_vddc_leakage(rdev,
4499 &rdev->pm.dpm.dyn_state.cac_leakage_table);
4500
4501 }
4502
4503 static void ci_get_memory_type(struct radeon_device *rdev)
4504 {
4505 struct ci_power_info *pi = ci_get_pi(rdev);
4506 u32 tmp;
4507
4508 tmp = RREG32(MC_SEQ_MISC0);
4509
4510 if (((tmp & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT) ==
4511 MC_SEQ_MISC0_GDDR5_VALUE)
4512 pi->mem_gddr5 = true;
4513 else
4514 pi->mem_gddr5 = false;
4515
4516 }
4517
4518 static void ci_update_current_ps(struct radeon_device *rdev,
4519 struct radeon_ps *rps)
4520 {
4521 struct ci_ps *new_ps = ci_get_ps(rps);
4522 struct ci_power_info *pi = ci_get_pi(rdev);
4523
4524 pi->current_rps = *rps;
4525 pi->current_ps = *new_ps;
4526 pi->current_rps.ps_priv = &pi->current_ps;
4527 }
4528
4529 static void ci_update_requested_ps(struct radeon_device *rdev,
4530 struct radeon_ps *rps)
4531 {
4532 struct ci_ps *new_ps = ci_get_ps(rps);
4533 struct ci_power_info *pi = ci_get_pi(rdev);
4534
4535 pi->requested_rps = *rps;
4536 pi->requested_ps = *new_ps;
4537 pi->requested_rps.ps_priv = &pi->requested_ps;
4538 }
4539
4540 int ci_dpm_pre_set_power_state(struct radeon_device *rdev)
4541 {
4542 struct ci_power_info *pi = ci_get_pi(rdev);
4543 struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps;
4544 struct radeon_ps *new_ps = &requested_ps;
4545
4546 ci_update_requested_ps(rdev, new_ps);
4547
4548 ci_apply_state_adjust_rules(rdev, &pi->requested_rps);
4549
4550 return 0;
4551 }
4552
4553 void ci_dpm_post_set_power_state(struct radeon_device *rdev)
4554 {
4555 struct ci_power_info *pi = ci_get_pi(rdev);
4556 struct radeon_ps *new_ps = &pi->requested_rps;
4557
4558 ci_update_current_ps(rdev, new_ps);
4559 }
4560
4561
4562 void ci_dpm_setup_asic(struct radeon_device *rdev)
4563 {
4564 int r;
4565
4566 r = ci_mc_load_microcode(rdev);
4567 if (r)
4568 DRM_ERROR("Failed to load MC firmware!\n");
4569 ci_read_clock_registers(rdev);
4570 ci_get_memory_type(rdev);
4571 ci_enable_acpi_power_management(rdev);
4572 ci_init_sclk_t(rdev);
4573 }
4574
4575 int ci_dpm_enable(struct radeon_device *rdev)
4576 {
4577 struct ci_power_info *pi = ci_get_pi(rdev);
4578 struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
4579 int ret;
4580
4581 if (ci_is_smc_running(rdev))
4582 return -EINVAL;
4583 if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
4584 ci_enable_voltage_control(rdev);
4585 ret = ci_construct_voltage_tables(rdev);
4586 if (ret) {
4587 DRM_ERROR("ci_construct_voltage_tables failed\n");
4588 return ret;
4589 }
4590 }
4591 if (pi->caps_dynamic_ac_timing) {
4592 ret = ci_initialize_mc_reg_table(rdev);
4593 if (ret)
4594 pi->caps_dynamic_ac_timing = false;
4595 }
4596 if (pi->dynamic_ss)
4597 ci_enable_spread_spectrum(rdev, true);
4598 if (pi->thermal_protection)
4599 ci_enable_thermal_protection(rdev, true);
4600 ci_program_sstp(rdev);
4601 ci_enable_display_gap(rdev);
4602 ci_program_vc(rdev);
4603 ret = ci_upload_firmware(rdev);
4604 if (ret) {
4605 DRM_ERROR("ci_upload_firmware failed\n");
4606 return ret;
4607 }
4608 ret = ci_process_firmware_header(rdev);
4609 if (ret) {
4610 DRM_ERROR("ci_process_firmware_header failed\n");
4611 return ret;
4612 }
4613 ret = ci_initial_switch_from_arb_f0_to_f1(rdev);
4614 if (ret) {
4615 DRM_ERROR("ci_initial_switch_from_arb_f0_to_f1 failed\n");
4616 return ret;
4617 }
4618 ret = ci_init_smc_table(rdev);
4619 if (ret) {
4620 DRM_ERROR("ci_init_smc_table failed\n");
4621 return ret;
4622 }
4623 ret = ci_init_arb_table_index(rdev);
4624 if (ret) {
4625 DRM_ERROR("ci_init_arb_table_index failed\n");
4626 return ret;
4627 }
4628 if (pi->caps_dynamic_ac_timing) {
4629 ret = ci_populate_initial_mc_reg_table(rdev);
4630 if (ret) {
4631 DRM_ERROR("ci_populate_initial_mc_reg_table failed\n");
4632 return ret;
4633 }
4634 }
4635 ret = ci_populate_pm_base(rdev);
4636 if (ret) {
4637 DRM_ERROR("ci_populate_pm_base failed\n");
4638 return ret;
4639 }
4640 ci_dpm_start_smc(rdev);
4641 ci_enable_vr_hot_gpio_interrupt(rdev);
4642 ret = ci_notify_smc_display_change(rdev, false);
4643 if (ret) {
4644 DRM_ERROR("ci_notify_smc_display_change failed\n");
4645 return ret;
4646 }
4647 ci_enable_sclk_control(rdev, true);
4648 ret = ci_enable_ulv(rdev, true);
4649 if (ret) {
4650 DRM_ERROR("ci_enable_ulv failed\n");
4651 return ret;
4652 }
4653 ret = ci_enable_ds_master_switch(rdev, true);
4654 if (ret) {
4655 DRM_ERROR("ci_enable_ds_master_switch failed\n");
4656 return ret;
4657 }
4658 ret = ci_start_dpm(rdev);
4659 if (ret) {
4660 DRM_ERROR("ci_start_dpm failed\n");
4661 return ret;
4662 }
4663 ret = ci_enable_didt(rdev, true);
4664 if (ret) {
4665 DRM_ERROR("ci_enable_didt failed\n");
4666 return ret;
4667 }
4668 ret = ci_enable_smc_cac(rdev, true);
4669 if (ret) {
4670 DRM_ERROR("ci_enable_smc_cac failed\n");
4671 return ret;
4672 }
4673 ret = ci_enable_power_containment(rdev, true);
4674 if (ret) {
4675 DRM_ERROR("ci_enable_power_containment failed\n");
4676 return ret;
4677 }
4678
4679 ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
4680
4681 ci_update_current_ps(rdev, boot_ps);
4682
4683 return 0;
4684 }
4685
4686 int ci_dpm_late_enable(struct radeon_device *rdev)
4687 {
4688 int ret;
4689
4690 if (rdev->irq.installed &&
4691 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
4692 #if 0
4693 PPSMC_Result result;
4694 #endif
4695 ret = ci_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
4696 if (ret) {
4697 DRM_ERROR("ci_set_thermal_temperature_range failed\n");
4698 return ret;
4699 }
4700 rdev->irq.dpm_thermal = true;
4701 radeon_irq_set(rdev);
4702 #if 0
4703 result = ci_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt);
4704
4705 if (result != PPSMC_Result_OK)
4706 DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
4707 #endif
4708 }
4709
4710 ci_dpm_powergate_uvd(rdev, true);
4711
4712 return 0;
4713 }
4714
4715 void ci_dpm_disable(struct radeon_device *rdev)
4716 {
4717 struct ci_power_info *pi = ci_get_pi(rdev);
4718 struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
4719
4720 ci_dpm_powergate_uvd(rdev, false);
4721
4722 if (!ci_is_smc_running(rdev))
4723 return;
4724
4725 if (pi->thermal_protection)
4726 ci_enable_thermal_protection(rdev, false);
4727 ci_enable_power_containment(rdev, false);
4728 ci_enable_smc_cac(rdev, false);
4729 ci_enable_didt(rdev, false);
4730 ci_enable_spread_spectrum(rdev, false);
4731 ci_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
4732 ci_stop_dpm(rdev);
4733 ci_enable_ds_master_switch(rdev, true);
4734 ci_enable_ulv(rdev, false);
4735 ci_clear_vc(rdev);
4736 ci_reset_to_default(rdev);
4737 ci_dpm_stop_smc(rdev);
4738 ci_force_switch_to_arb_f0(rdev);
4739
4740 ci_update_current_ps(rdev, boot_ps);
4741 }
4742
4743 int ci_dpm_set_power_state(struct radeon_device *rdev)
4744 {
4745 struct ci_power_info *pi = ci_get_pi(rdev);
4746 struct radeon_ps *new_ps = &pi->requested_rps;
4747 struct radeon_ps *old_ps = &pi->current_rps;
4748 int ret;
4749
4750 ci_find_dpm_states_clocks_in_dpm_table(rdev, new_ps);
4751 if (pi->pcie_performance_request)
4752 ci_request_link_speed_change_before_state_change(rdev, new_ps, old_ps);
4753 ret = ci_freeze_sclk_mclk_dpm(rdev);
4754 if (ret) {
4755 DRM_ERROR("ci_freeze_sclk_mclk_dpm failed\n");
4756 return ret;
4757 }
4758 ret = ci_populate_and_upload_sclk_mclk_dpm_levels(rdev, new_ps);
4759 if (ret) {
4760 DRM_ERROR("ci_populate_and_upload_sclk_mclk_dpm_levels failed\n");
4761 return ret;
4762 }
4763 ret = ci_generate_dpm_level_enable_mask(rdev, new_ps);
4764 if (ret) {
4765 DRM_ERROR("ci_generate_dpm_level_enable_mask failed\n");
4766 return ret;
4767 }
4768
4769 ret = ci_update_vce_dpm(rdev, new_ps, old_ps);
4770 if (ret) {
4771 DRM_ERROR("ci_update_vce_dpm failed\n");
4772 return ret;
4773 }
4774
4775 ret = ci_update_sclk_t(rdev);
4776 if (ret) {
4777 DRM_ERROR("ci_update_sclk_t failed\n");
4778 return ret;
4779 }
4780 if (pi->caps_dynamic_ac_timing) {
4781 ret = ci_update_and_upload_mc_reg_table(rdev);
4782 if (ret) {
4783 DRM_ERROR("ci_update_and_upload_mc_reg_table failed\n");
4784 return ret;
4785 }
4786 }
4787 ret = ci_program_memory_timing_parameters(rdev);
4788 if (ret) {
4789 DRM_ERROR("ci_program_memory_timing_parameters failed\n");
4790 return ret;
4791 }
4792 ret = ci_unfreeze_sclk_mclk_dpm(rdev);
4793 if (ret) {
4794 DRM_ERROR("ci_unfreeze_sclk_mclk_dpm failed\n");
4795 return ret;
4796 }
4797 ret = ci_upload_dpm_level_enable_mask(rdev);
4798 if (ret) {
4799 DRM_ERROR("ci_upload_dpm_level_enable_mask failed\n");
4800 return ret;
4801 }
4802 if (pi->pcie_performance_request)
4803 ci_notify_link_speed_change_after_state_change(rdev, new_ps, old_ps);
4804
4805 return 0;
4806 }
4807
4808 int ci_dpm_power_control_set_level(struct radeon_device *rdev)
4809 {
4810 return ci_power_control_set_level(rdev);
4811 }
4812
4813 void ci_dpm_reset_asic(struct radeon_device *rdev)
4814 {
4815 ci_set_boot_state(rdev);
4816 }
4817
4818 void ci_dpm_display_configuration_changed(struct radeon_device *rdev)
4819 {
4820 ci_program_display_gap(rdev);
4821 }
4822
4823 union power_info {
4824 struct _ATOM_POWERPLAY_INFO info;
4825 struct _ATOM_POWERPLAY_INFO_V2 info_2;
4826 struct _ATOM_POWERPLAY_INFO_V3 info_3;
4827 struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
4828 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
4829 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
4830 };
4831
4832 union pplib_clock_info {
4833 struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
4834 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
4835 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
4836 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
4837 struct _ATOM_PPLIB_SI_CLOCK_INFO si;
4838 struct _ATOM_PPLIB_CI_CLOCK_INFO ci;
4839 };
4840
4841 union pplib_power_state {
4842 struct _ATOM_PPLIB_STATE v1;
4843 struct _ATOM_PPLIB_STATE_V2 v2;
4844 };
4845
4846 static void ci_parse_pplib_non_clock_info(struct radeon_device *rdev,
4847 struct radeon_ps *rps,
4848 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
4849 u8 table_rev)
4850 {
4851 rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
4852 rps->class = le16_to_cpu(non_clock_info->usClassification);
4853 rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
4854
4855 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
4856 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
4857 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
4858 } else {
4859 rps->vclk = 0;
4860 rps->dclk = 0;
4861 }
4862
4863 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
4864 rdev->pm.dpm.boot_ps = rps;
4865 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
4866 rdev->pm.dpm.uvd_ps = rps;
4867 }
4868
4869 static void ci_parse_pplib_clock_info(struct radeon_device *rdev,
4870 struct radeon_ps *rps, int index,
4871 union pplib_clock_info *clock_info)
4872 {
4873 struct ci_power_info *pi = ci_get_pi(rdev);
4874 struct ci_ps *ps = ci_get_ps(rps);
4875 struct ci_pl *pl = &ps->performance_levels[index];
4876
4877 ps->performance_level_count = index + 1;
4878
4879 pl->sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
4880 pl->sclk |= clock_info->ci.ucEngineClockHigh << 16;
4881 pl->mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
4882 pl->mclk |= clock_info->ci.ucMemoryClockHigh << 16;
4883
4884 pl->pcie_gen = r600_get_pcie_gen_support(rdev,
4885 pi->sys_pcie_mask,
4886 pi->vbios_boot_state.pcie_gen_bootup_value,
4887 clock_info->ci.ucPCIEGen);
4888 pl->pcie_lane = r600_get_pcie_lane_support(rdev,
4889 pi->vbios_boot_state.pcie_lane_bootup_value,
4890 le16_to_cpu(clock_info->ci.usPCIELane));
4891
4892 if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
4893 pi->acpi_pcie_gen = pl->pcie_gen;
4894 }
4895
4896 if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) {
4897 pi->ulv.supported = true;
4898 pi->ulv.pl = *pl;
4899 pi->ulv.cg_ulv_parameter = CISLANDS_CGULVPARAMETER_DFLT;
4900 }
4901
4902 /* patch up boot state */
4903 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
4904 pl->mclk = pi->vbios_boot_state.mclk_bootup_value;
4905 pl->sclk = pi->vbios_boot_state.sclk_bootup_value;
4906 pl->pcie_gen = pi->vbios_boot_state.pcie_gen_bootup_value;
4907 pl->pcie_lane = pi->vbios_boot_state.pcie_lane_bootup_value;
4908 }
4909
4910 switch (rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
4911 case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
4912 pi->use_pcie_powersaving_levels = true;
4913 if (pi->pcie_gen_powersaving.max < pl->pcie_gen)
4914 pi->pcie_gen_powersaving.max = pl->pcie_gen;
4915 if (pi->pcie_gen_powersaving.min > pl->pcie_gen)
4916 pi->pcie_gen_powersaving.min = pl->pcie_gen;
4917 if (pi->pcie_lane_powersaving.max < pl->pcie_lane)
4918 pi->pcie_lane_powersaving.max = pl->pcie_lane;
4919 if (pi->pcie_lane_powersaving.min > pl->pcie_lane)
4920 pi->pcie_lane_powersaving.min = pl->pcie_lane;
4921 break;
4922 case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
4923 pi->use_pcie_performance_levels = true;
4924 if (pi->pcie_gen_performance.max < pl->pcie_gen)
4925 pi->pcie_gen_performance.max = pl->pcie_gen;
4926 if (pi->pcie_gen_performance.min > pl->pcie_gen)
4927 pi->pcie_gen_performance.min = pl->pcie_gen;
4928 if (pi->pcie_lane_performance.max < pl->pcie_lane)
4929 pi->pcie_lane_performance.max = pl->pcie_lane;
4930 if (pi->pcie_lane_performance.min > pl->pcie_lane)
4931 pi->pcie_lane_performance.min = pl->pcie_lane;
4932 break;
4933 default:
4934 break;
4935 }
4936 }
4937
4938 static int ci_parse_power_table(struct radeon_device *rdev)
4939 {
4940 struct radeon_mode_info *mode_info = &rdev->mode_info;
4941 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
4942 union pplib_power_state *power_state;
4943 int i, j, k, non_clock_array_index, clock_array_index;
4944 union pplib_clock_info *clock_info;
4945 struct _StateArray *state_array;
4946 struct _ClockInfoArray *clock_info_array;
4947 struct _NonClockInfoArray *non_clock_info_array;
4948 union power_info *power_info;
4949 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
4950 u16 data_offset;
4951 u8 frev, crev;
4952 u8 *power_state_offset;
4953 struct ci_ps *ps;
4954
4955 if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
4956 &frev, &crev, &data_offset))
4957 return -EINVAL;
4958 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
4959
4960 state_array = (struct _StateArray *)
4961 (mode_info->atom_context->bios + data_offset +
4962 le16_to_cpu(power_info->pplib.usStateArrayOffset));
4963 clock_info_array = (struct _ClockInfoArray *)
4964 (mode_info->atom_context->bios + data_offset +
4965 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
4966 non_clock_info_array = (struct _NonClockInfoArray *)
4967 (mode_info->atom_context->bios + data_offset +
4968 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
4969
4970 rdev->pm.dpm.ps = kzalloc(sizeof(struct radeon_ps) *
4971 state_array->ucNumEntries, GFP_KERNEL);
4972 if (!rdev->pm.dpm.ps)
4973 return -ENOMEM;
4974 power_state_offset = (u8 *)state_array->states;
4975 for (i = 0; i < state_array->ucNumEntries; i++) {
4976 u8 *idx;
4977 power_state = (union pplib_power_state *)power_state_offset;
4978 non_clock_array_index = power_state->v2.nonClockInfoIndex;
4979 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
4980 &non_clock_info_array->nonClockInfo[non_clock_array_index];
4981 if (!rdev->pm.power_state[i].clock_info)
4982 return -EINVAL;
4983 ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL);
4984 if (ps == NULL) {
4985 kfree(rdev->pm.dpm.ps);
4986 return -ENOMEM;
4987 }
4988 rdev->pm.dpm.ps[i].ps_priv = ps;
4989 ci_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
4990 non_clock_info,
4991 non_clock_info_array->ucEntrySize);
4992 k = 0;
4993 idx = (u8 *)&power_state->v2.clockInfoIndex[0];
4994 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
4995 clock_array_index = idx[j];
4996 if (clock_array_index >= clock_info_array->ucNumEntries)
4997 continue;
4998 if (k >= CISLANDS_MAX_HARDWARE_POWERLEVELS)
4999 break;
5000 clock_info = (union pplib_clock_info *)
5001 ((u8 *)&clock_info_array->clockInfo[0] +
5002 (clock_array_index * clock_info_array->ucEntrySize));
5003 ci_parse_pplib_clock_info(rdev,
5004 &rdev->pm.dpm.ps[i], k,
5005 clock_info);
5006 k++;
5007 }
5008 power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
5009 }
5010 rdev->pm.dpm.num_ps = state_array->ucNumEntries;
5011
5012 /* fill in the vce power states */
5013 for (i = 0; i < RADEON_MAX_VCE_LEVELS; i++) {
5014 u32 sclk, mclk;
5015 clock_array_index = rdev->pm.dpm.vce_states[i].clk_idx;
5016 clock_info = (union pplib_clock_info *)
5017 &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
5018 sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
5019 sclk |= clock_info->ci.ucEngineClockHigh << 16;
5020 mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
5021 mclk |= clock_info->ci.ucMemoryClockHigh << 16;
5022 rdev->pm.dpm.vce_states[i].sclk = sclk;
5023 rdev->pm.dpm.vce_states[i].mclk = mclk;
5024 }
5025
5026 return 0;
5027 }
5028
5029 static int ci_get_vbios_boot_values(struct radeon_device *rdev,
5030 struct ci_vbios_boot_state *boot_state)
5031 {
5032 struct radeon_mode_info *mode_info = &rdev->mode_info;
5033 int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
5034 ATOM_FIRMWARE_INFO_V2_2 *firmware_info;
5035 u8 frev, crev;
5036 u16 data_offset;
5037
5038 if (atom_parse_data_header(mode_info->atom_context, index, NULL,
5039 &frev, &crev, &data_offset)) {
5040 firmware_info =
5041 (ATOM_FIRMWARE_INFO_V2_2 *)(mode_info->atom_context->bios +
5042 data_offset);
5043 boot_state->mvdd_bootup_value = le16_to_cpu(firmware_info->usBootUpMVDDCVoltage);
5044 boot_state->vddc_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCVoltage);
5045 boot_state->vddci_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCIVoltage);
5046 boot_state->pcie_gen_bootup_value = ci_get_current_pcie_speed(rdev);
5047 boot_state->pcie_lane_bootup_value = ci_get_current_pcie_lane_number(rdev);
5048 boot_state->sclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultEngineClock);
5049 boot_state->mclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultMemoryClock);
5050
5051 return 0;
5052 }
5053 return -EINVAL;
5054 }
5055
5056 void ci_dpm_fini(struct radeon_device *rdev)
5057 {
5058 int i;
5059
5060 for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
5061 kfree(rdev->pm.dpm.ps[i].ps_priv);
5062 }
5063 kfree(rdev->pm.dpm.ps);
5064 kfree(rdev->pm.dpm.priv);
5065 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
5066 r600_free_extended_power_table(rdev);
5067 }
5068
5069 int ci_dpm_init(struct radeon_device *rdev)
5070 {
5071 int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
5072 u16 data_offset, size;
5073 u8 frev, crev;
5074 struct ci_power_info *pi;
5075 int ret;
5076 u32 mask;
5077
5078 pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL);
5079 if (pi == NULL)
5080 return -ENOMEM;
5081 rdev->pm.dpm.priv = pi;
5082
5083 ret = drm_pcie_get_speed_cap_mask(rdev->ddev, &mask);
5084 if (ret)
5085 pi->sys_pcie_mask = 0;
5086 else
5087 pi->sys_pcie_mask = mask;
5088 pi->force_pcie_gen = RADEON_PCIE_GEN_INVALID;
5089
5090 pi->pcie_gen_performance.max = RADEON_PCIE_GEN1;
5091 pi->pcie_gen_performance.min = RADEON_PCIE_GEN3;
5092 pi->pcie_gen_powersaving.max = RADEON_PCIE_GEN1;
5093 pi->pcie_gen_powersaving.min = RADEON_PCIE_GEN3;
5094
5095 pi->pcie_lane_performance.max = 0;
5096 pi->pcie_lane_performance.min = 16;
5097 pi->pcie_lane_powersaving.max = 0;
5098 pi->pcie_lane_powersaving.min = 16;
5099
5100 ret = ci_get_vbios_boot_values(rdev, &pi->vbios_boot_state);
5101 if (ret) {
5102 ci_dpm_fini(rdev);
5103 return ret;
5104 }
5105
5106 ret = r600_get_platform_caps(rdev);
5107 if (ret) {
5108 ci_dpm_fini(rdev);
5109 return ret;
5110 }
5111
5112 ret = r600_parse_extended_power_table(rdev);
5113 if (ret) {
5114 ci_dpm_fini(rdev);
5115 return ret;
5116 }
5117
5118 ret = ci_parse_power_table(rdev);
5119 if (ret) {
5120 ci_dpm_fini(rdev);
5121 return ret;
5122 }
5123
5124 pi->dll_default_on = false;
5125 pi->sram_end = SMC_RAM_END;
5126
5127 pi->activity_target[0] = CISLAND_TARGETACTIVITY_DFLT;
5128 pi->activity_target[1] = CISLAND_TARGETACTIVITY_DFLT;
5129 pi->activity_target[2] = CISLAND_TARGETACTIVITY_DFLT;
5130 pi->activity_target[3] = CISLAND_TARGETACTIVITY_DFLT;
5131 pi->activity_target[4] = CISLAND_TARGETACTIVITY_DFLT;
5132 pi->activity_target[5] = CISLAND_TARGETACTIVITY_DFLT;
5133 pi->activity_target[6] = CISLAND_TARGETACTIVITY_DFLT;
5134 pi->activity_target[7] = CISLAND_TARGETACTIVITY_DFLT;
5135
5136 pi->mclk_activity_target = CISLAND_MCLK_TARGETACTIVITY_DFLT;
5137
5138 pi->sclk_dpm_key_disabled = 0;
5139 pi->mclk_dpm_key_disabled = 0;
5140 pi->pcie_dpm_key_disabled = 0;
5141
5142 pi->caps_sclk_ds = true;
5143
5144 pi->mclk_strobe_mode_threshold = 40000;
5145 pi->mclk_stutter_mode_threshold = 40000;
5146 pi->mclk_edc_enable_threshold = 40000;
5147 pi->mclk_edc_wr_enable_threshold = 40000;
5148
5149 ci_initialize_powertune_defaults(rdev);
5150
5151 pi->caps_fps = false;
5152
5153 pi->caps_sclk_throttle_low_notification = false;
5154
5155 pi->caps_uvd_dpm = true;
5156 pi->caps_vce_dpm = true;
5157
5158 ci_get_leakage_voltages(rdev);
5159 ci_patch_dependency_tables_with_leakage(rdev);
5160 ci_set_private_data_variables_based_on_pptable(rdev);
5161
5162 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
5163 kzalloc(4 * sizeof(struct radeon_clock_voltage_dependency_entry), GFP_KERNEL);
5164 if (!rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
5165 ci_dpm_fini(rdev);
5166 return -ENOMEM;
5167 }
5168 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
5169 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
5170 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
5171 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
5172 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720;
5173 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
5174 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810;
5175 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
5176 rdev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900;
5177
5178 rdev->pm.dpm.dyn_state.mclk_sclk_ratio = 4;
5179 rdev->pm.dpm.dyn_state.sclk_mclk_delta = 15000;
5180 rdev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
5181
5182 rdev->pm.dpm.dyn_state.valid_sclk_values.count = 0;
5183 rdev->pm.dpm.dyn_state.valid_sclk_values.values = NULL;
5184 rdev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
5185 rdev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
5186
5187 if (rdev->family == CHIP_HAWAII) {
5188 pi->thermal_temp_setting.temperature_low = 94500;
5189 pi->thermal_temp_setting.temperature_high = 95000;
5190 pi->thermal_temp_setting.temperature_shutdown = 104000;
5191 } else {
5192 pi->thermal_temp_setting.temperature_low = 99500;
5193 pi->thermal_temp_setting.temperature_high = 100000;
5194 pi->thermal_temp_setting.temperature_shutdown = 104000;
5195 }
5196
5197 pi->uvd_enabled = false;
5198
5199 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5200 pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5201 pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5202 if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT))
5203 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5204 else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
5205 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5206
5207 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL) {
5208 if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
5209 pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5210 else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
5211 pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5212 else
5213 rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL;
5214 }
5215
5216 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_MVDDCONTROL) {
5217 if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
5218 pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5219 else if (radeon_atom_is_voltage_gpio(rdev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
5220 pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5221 else
5222 rdev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_MVDDCONTROL;
5223 }
5224
5225 pi->vddc_phase_shed_control = true;
5226
5227 #if defined(CONFIG_ACPI)
5228 pi->pcie_performance_request =
5229 radeon_acpi_is_pcie_performance_request_supported(rdev);
5230 #else
5231 pi->pcie_performance_request = false;
5232 #endif
5233
5234 if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size,
5235 &frev, &crev, &data_offset)) {
5236 pi->caps_sclk_ss_support = true;
5237 pi->caps_mclk_ss_support = true;
5238 pi->dynamic_ss = true;
5239 } else {
5240 pi->caps_sclk_ss_support = false;
5241 pi->caps_mclk_ss_support = false;
5242 pi->dynamic_ss = true;
5243 }
5244
5245 if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
5246 pi->thermal_protection = true;
5247 else
5248 pi->thermal_protection = false;
5249
5250 pi->caps_dynamic_ac_timing = true;
5251
5252 pi->uvd_power_gated = false;
5253
5254 /* make sure dc limits are valid */
5255 if ((rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
5256 (rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
5257 rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
5258 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
5259
5260 return 0;
5261 }
5262
5263 void ci_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
5264 struct seq_file *m)
5265 {
5266 u32 sclk = ci_get_average_sclk_freq(rdev);
5267 u32 mclk = ci_get_average_mclk_freq(rdev);
5268
5269 seq_printf(m, "power level avg sclk: %u mclk: %u\n",
5270 sclk, mclk);
5271 }
5272
5273 void ci_dpm_print_power_state(struct radeon_device *rdev,
5274 struct radeon_ps *rps)
5275 {
5276 struct ci_ps *ps = ci_get_ps(rps);
5277 struct ci_pl *pl;
5278 int i;
5279
5280 r600_dpm_print_class_info(rps->class, rps->class2);
5281 r600_dpm_print_cap_info(rps->caps);
5282 printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
5283 for (i = 0; i < ps->performance_level_count; i++) {
5284 pl = &ps->performance_levels[i];
5285 printk("\t\tpower level %d sclk: %u mclk: %u pcie gen: %u pcie lanes: %u\n",
5286 i, pl->sclk, pl->mclk, pl->pcie_gen + 1, pl->pcie_lane);
5287 }
5288 r600_dpm_print_ps_status(rdev, rps);
5289 }
5290
5291 u32 ci_dpm_get_sclk(struct radeon_device *rdev, bool low)
5292 {
5293 struct ci_power_info *pi = ci_get_pi(rdev);
5294 struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
5295
5296 if (low)
5297 return requested_state->performance_levels[0].sclk;
5298 else
5299 return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
5300 }
5301
5302 u32 ci_dpm_get_mclk(struct radeon_device *rdev, bool low)
5303 {
5304 struct ci_power_info *pi = ci_get_pi(rdev);
5305 struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
5306
5307 if (low)
5308 return requested_state->performance_levels[0].mclk;
5309 else
5310 return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
5311 }
This page took 0.234562 seconds and 4 git commands to generate.