Commit | Line | Data |
---|---|---|
3bace359 JZ |
1 | /* |
2 | * Copyright 2015 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | */ | |
23 | #include "linux/delay.h" | |
24 | #include <linux/types.h> | |
25 | #include <linux/kernel.h> | |
26 | #include <linux/slab.h> | |
27 | #include "cgs_common.h" | |
28 | #include "power_state.h" | |
29 | #include "hwmgr.h" | |
17c00a2f RZ |
30 | #include "pppcielanes.h" |
31 | #include "pp_debug.h" | |
32 | #include "ppatomctrl.h" | |
3bace359 | 33 | |
17c00a2f RZ |
34 | extern int cz_hwmgr_init(struct pp_hwmgr *hwmgr); |
35 | extern int tonga_hwmgr_init(struct pp_hwmgr *hwmgr); | |
aabcb7c1 | 36 | extern int fiji_hwmgr_init(struct pp_hwmgr *hwmgr); |
b83c4ab9 | 37 | extern int ellesemere_hwmgr_init(struct pp_hwmgr *hwmgr); |
aabcb7c1 | 38 | |
3bace359 JZ |
39 | int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle) |
40 | { | |
41 | struct pp_hwmgr *hwmgr; | |
42 | ||
43 | if ((handle == NULL) || (pp_init == NULL)) | |
44 | return -EINVAL; | |
45 | ||
46 | hwmgr = kzalloc(sizeof(struct pp_hwmgr), GFP_KERNEL); | |
47 | if (hwmgr == NULL) | |
48 | return -ENOMEM; | |
49 | ||
50 | handle->hwmgr = hwmgr; | |
51 | hwmgr->smumgr = handle->smu_mgr; | |
52 | hwmgr->device = pp_init->device; | |
53 | hwmgr->chip_family = pp_init->chip_family; | |
54 | hwmgr->chip_id = pp_init->chip_id; | |
55 | hwmgr->hw_revision = pp_init->rev_id; | |
56 | hwmgr->usec_timeout = AMD_MAX_USEC_TIMEOUT; | |
57 | hwmgr->power_source = PP_PowerSource_AC; | |
58 | ||
59 | switch (hwmgr->chip_family) { | |
bdecc20a JZ |
60 | case AMD_FAMILY_CZ: |
61 | cz_hwmgr_init(hwmgr); | |
62 | break; | |
c82baa28 | 63 | case AMD_FAMILY_VI: |
64 | switch (hwmgr->chip_id) { | |
65 | case CHIP_TONGA: | |
66 | tonga_hwmgr_init(hwmgr); | |
67 | break; | |
aabcb7c1 EH |
68 | case CHIP_FIJI: |
69 | fiji_hwmgr_init(hwmgr); | |
70 | break; | |
b83c4ab9 RZ |
71 | case CHIP_BAFFIN: |
72 | case CHIP_ELLESMERE: | |
73 | ellesemere_hwmgr_init(hwmgr); | |
74 | break; | |
c82baa28 | 75 | default: |
76 | return -EINVAL; | |
77 | } | |
78 | break; | |
3bace359 JZ |
79 | default: |
80 | return -EINVAL; | |
81 | } | |
82 | ||
83 | phm_init_dynamic_caps(hwmgr); | |
84 | ||
85 | return 0; | |
86 | } | |
87 | ||
88 | int hwmgr_fini(struct pp_hwmgr *hwmgr) | |
89 | { | |
90 | if (hwmgr == NULL || hwmgr->ps == NULL) | |
91 | return -EINVAL; | |
92 | ||
93 | kfree(hwmgr->ps); | |
94 | kfree(hwmgr); | |
95 | return 0; | |
96 | } | |
97 | ||
98 | int hw_init_power_state_table(struct pp_hwmgr *hwmgr) | |
99 | { | |
100 | int result; | |
101 | unsigned int i; | |
102 | unsigned int table_entries; | |
103 | struct pp_power_state *state; | |
104 | int size; | |
105 | ||
106 | if (hwmgr->hwmgr_func->get_num_of_pp_table_entries == NULL) | |
107 | return -EINVAL; | |
108 | ||
109 | if (hwmgr->hwmgr_func->get_power_state_size == NULL) | |
110 | return -EINVAL; | |
111 | ||
112 | hwmgr->num_ps = table_entries = hwmgr->hwmgr_func->get_num_of_pp_table_entries(hwmgr); | |
113 | ||
114 | hwmgr->ps_size = size = hwmgr->hwmgr_func->get_power_state_size(hwmgr) + | |
115 | sizeof(struct pp_power_state); | |
116 | ||
117 | hwmgr->ps = kzalloc(size * table_entries, GFP_KERNEL); | |
118 | ||
c15c8d70 RZ |
119 | if (hwmgr->ps == NULL) |
120 | return -ENOMEM; | |
121 | ||
3bace359 JZ |
122 | state = hwmgr->ps; |
123 | ||
124 | for (i = 0; i < table_entries; i++) { | |
125 | result = hwmgr->hwmgr_func->get_pp_table_entry(hwmgr, i, state); | |
17c00a2f | 126 | |
3bace359 JZ |
127 | if (state->classification.flags & PP_StateClassificationFlag_Boot) { |
128 | hwmgr->boot_ps = state; | |
129 | hwmgr->current_ps = hwmgr->request_ps = state; | |
130 | } | |
131 | ||
132 | state->id = i + 1; /* assigned unique num for every power state id */ | |
133 | ||
134 | if (state->classification.flags & PP_StateClassificationFlag_Uvd) | |
135 | hwmgr->uvd_ps = state; | |
09b7a986 | 136 | state = (struct pp_power_state *)((unsigned long)state + size); |
3bace359 JZ |
137 | } |
138 | ||
139 | return 0; | |
140 | } | |
141 | ||
142 | ||
143 | /** | |
144 | * Returns once the part of the register indicated by the mask has | |
145 | * reached the given value. | |
146 | */ | |
147 | int phm_wait_on_register(struct pp_hwmgr *hwmgr, uint32_t index, | |
148 | uint32_t value, uint32_t mask) | |
149 | { | |
150 | uint32_t i; | |
151 | uint32_t cur_value; | |
152 | ||
153 | if (hwmgr == NULL || hwmgr->device == NULL) { | |
154 | printk(KERN_ERR "[ powerplay ] Invalid Hardware Manager!"); | |
155 | return -EINVAL; | |
156 | } | |
157 | ||
158 | for (i = 0; i < hwmgr->usec_timeout; i++) { | |
159 | cur_value = cgs_read_register(hwmgr->device, index); | |
160 | if ((cur_value & mask) == (value & mask)) | |
161 | break; | |
162 | udelay(1); | |
163 | } | |
164 | ||
165 | /* timeout means wrong logic*/ | |
166 | if (i == hwmgr->usec_timeout) | |
167 | return -1; | |
168 | return 0; | |
169 | } | |
170 | ||
171 | int phm_wait_for_register_unequal(struct pp_hwmgr *hwmgr, | |
172 | uint32_t index, uint32_t value, uint32_t mask) | |
173 | { | |
174 | uint32_t i; | |
175 | uint32_t cur_value; | |
176 | ||
177 | if (hwmgr == NULL || hwmgr->device == NULL) { | |
178 | printk(KERN_ERR "[ powerplay ] Invalid Hardware Manager!"); | |
179 | return -EINVAL; | |
180 | } | |
181 | ||
182 | for (i = 0; i < hwmgr->usec_timeout; i++) { | |
183 | cur_value = cgs_read_register(hwmgr->device, index); | |
184 | if ((cur_value & mask) != (value & mask)) | |
185 | break; | |
186 | udelay(1); | |
187 | } | |
188 | ||
189 | /* timeout means wrong logic*/ | |
190 | if (i == hwmgr->usec_timeout) | |
191 | return -1; | |
192 | return 0; | |
193 | } | |
194 | ||
195 | ||
196 | /** | |
197 | * Returns once the part of the register indicated by the mask has | |
198 | * reached the given value.The indirect space is described by giving | |
199 | * the memory-mapped index of the indirect index register. | |
200 | */ | |
201 | void phm_wait_on_indirect_register(struct pp_hwmgr *hwmgr, | |
202 | uint32_t indirect_port, | |
203 | uint32_t index, | |
204 | uint32_t value, | |
205 | uint32_t mask) | |
206 | { | |
207 | if (hwmgr == NULL || hwmgr->device == NULL) { | |
208 | printk(KERN_ERR "[ powerplay ] Invalid Hardware Manager!"); | |
209 | return; | |
210 | } | |
211 | ||
212 | cgs_write_register(hwmgr->device, indirect_port, index); | |
213 | phm_wait_on_register(hwmgr, indirect_port + 1, mask, value); | |
214 | } | |
215 | ||
216 | void phm_wait_for_indirect_register_unequal(struct pp_hwmgr *hwmgr, | |
217 | uint32_t indirect_port, | |
218 | uint32_t index, | |
219 | uint32_t value, | |
220 | uint32_t mask) | |
221 | { | |
222 | if (hwmgr == NULL || hwmgr->device == NULL) { | |
223 | printk(KERN_ERR "[ powerplay ] Invalid Hardware Manager!"); | |
224 | return; | |
225 | } | |
226 | ||
227 | cgs_write_register(hwmgr->device, indirect_port, index); | |
228 | phm_wait_for_register_unequal(hwmgr, indirect_port + 1, | |
229 | value, mask); | |
230 | } | |
28a18bab RZ |
231 | |
232 | bool phm_cf_want_uvd_power_gating(struct pp_hwmgr *hwmgr) | |
233 | { | |
234 | return phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_UVDPowerGating); | |
235 | } | |
236 | ||
237 | bool phm_cf_want_vce_power_gating(struct pp_hwmgr *hwmgr) | |
238 | { | |
239 | return phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_VCEPowerGating); | |
240 | } | |
17c00a2f RZ |
241 | |
242 | ||
243 | int phm_trim_voltage_table(struct pp_atomctrl_voltage_table *vol_table) | |
244 | { | |
245 | uint32_t i, j; | |
246 | uint16_t vvalue; | |
247 | bool found = false; | |
248 | struct pp_atomctrl_voltage_table *table; | |
249 | ||
250 | PP_ASSERT_WITH_CODE((NULL != vol_table), | |
251 | "Voltage Table empty.", return -EINVAL); | |
252 | ||
253 | table = kzalloc(sizeof(struct pp_atomctrl_voltage_table), | |
254 | GFP_KERNEL); | |
255 | ||
256 | if (NULL == table) | |
257 | return -EINVAL; | |
258 | ||
259 | table->mask_low = vol_table->mask_low; | |
260 | table->phase_delay = vol_table->phase_delay; | |
261 | ||
262 | for (i = 0; i < vol_table->count; i++) { | |
263 | vvalue = vol_table->entries[i].value; | |
264 | found = false; | |
265 | ||
266 | for (j = 0; j < table->count; j++) { | |
267 | if (vvalue == table->entries[j].value) { | |
268 | found = true; | |
269 | break; | |
270 | } | |
271 | } | |
272 | ||
273 | if (!found) { | |
274 | table->entries[table->count].value = vvalue; | |
275 | table->entries[table->count].smio_low = | |
276 | vol_table->entries[i].smio_low; | |
277 | table->count++; | |
278 | } | |
279 | } | |
280 | ||
281 | memcpy(vol_table, table, sizeof(struct pp_atomctrl_voltage_table)); | |
282 | kfree(table); | |
283 | ||
284 | return 0; | |
285 | } | |
286 | ||
287 | int phm_get_svi2_mvdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table, | |
288 | phm_ppt_v1_clock_voltage_dependency_table *dep_table) | |
289 | { | |
290 | uint32_t i; | |
291 | int result; | |
292 | ||
293 | PP_ASSERT_WITH_CODE((0 != dep_table->count), | |
294 | "Voltage Dependency Table empty.", return -EINVAL); | |
295 | ||
296 | PP_ASSERT_WITH_CODE((NULL != vol_table), | |
297 | "vol_table empty.", return -EINVAL); | |
298 | ||
299 | vol_table->mask_low = 0; | |
300 | vol_table->phase_delay = 0; | |
301 | vol_table->count = dep_table->count; | |
302 | ||
303 | for (i = 0; i < dep_table->count; i++) { | |
304 | vol_table->entries[i].value = dep_table->entries[i].mvdd; | |
305 | vol_table->entries[i].smio_low = 0; | |
306 | } | |
307 | ||
308 | result = phm_trim_voltage_table(vol_table); | |
309 | PP_ASSERT_WITH_CODE((0 == result), | |
310 | "Failed to trim MVDD table.", return result); | |
311 | ||
312 | return 0; | |
313 | } | |
314 | ||
315 | int phm_get_svi2_vddci_voltage_table(struct pp_atomctrl_voltage_table *vol_table, | |
316 | phm_ppt_v1_clock_voltage_dependency_table *dep_table) | |
317 | { | |
318 | uint32_t i; | |
319 | int result; | |
320 | ||
321 | PP_ASSERT_WITH_CODE((0 != dep_table->count), | |
322 | "Voltage Dependency Table empty.", return -EINVAL); | |
323 | ||
324 | PP_ASSERT_WITH_CODE((NULL != vol_table), | |
325 | "vol_table empty.", return -EINVAL); | |
326 | ||
327 | vol_table->mask_low = 0; | |
328 | vol_table->phase_delay = 0; | |
329 | vol_table->count = dep_table->count; | |
330 | ||
331 | for (i = 0; i < dep_table->count; i++) { | |
332 | vol_table->entries[i].value = dep_table->entries[i].vddci; | |
333 | vol_table->entries[i].smio_low = 0; | |
334 | } | |
335 | ||
336 | result = phm_trim_voltage_table(vol_table); | |
337 | PP_ASSERT_WITH_CODE((0 == result), | |
338 | "Failed to trim VDDCI table.", return result); | |
339 | ||
340 | return 0; | |
341 | } | |
342 | ||
343 | int phm_get_svi2_vdd_voltage_table(struct pp_atomctrl_voltage_table *vol_table, | |
344 | phm_ppt_v1_voltage_lookup_table *lookup_table) | |
345 | { | |
346 | int i = 0; | |
347 | ||
348 | PP_ASSERT_WITH_CODE((0 != lookup_table->count), | |
349 | "Voltage Lookup Table empty.", return -EINVAL); | |
350 | ||
351 | PP_ASSERT_WITH_CODE((NULL != vol_table), | |
352 | "vol_table empty.", return -EINVAL); | |
353 | ||
354 | vol_table->mask_low = 0; | |
355 | vol_table->phase_delay = 0; | |
356 | ||
357 | vol_table->count = lookup_table->count; | |
358 | ||
359 | for (i = 0; i < vol_table->count; i++) { | |
360 | vol_table->entries[i].value = lookup_table->entries[i].us_vdd; | |
361 | vol_table->entries[i].smio_low = 0; | |
362 | } | |
363 | ||
364 | return 0; | |
365 | } | |
366 | ||
367 | void phm_trim_voltage_table_to_fit_state_table(uint32_t max_vol_steps, | |
368 | struct pp_atomctrl_voltage_table *vol_table) | |
369 | { | |
370 | unsigned int i, diff; | |
371 | ||
372 | if (vol_table->count <= max_vol_steps) | |
373 | return; | |
374 | ||
375 | diff = vol_table->count - max_vol_steps; | |
376 | ||
377 | for (i = 0; i < max_vol_steps; i++) | |
378 | vol_table->entries[i] = vol_table->entries[i + diff]; | |
379 | ||
380 | vol_table->count = max_vol_steps; | |
381 | ||
382 | return; | |
383 | } | |
384 | ||
385 | int phm_reset_single_dpm_table(void *table, | |
386 | uint32_t count, int max) | |
387 | { | |
388 | int i; | |
389 | ||
390 | struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table; | |
391 | ||
392 | PP_ASSERT_WITH_CODE(count <= max, | |
393 | "Fatal error, can not set up single DPM table entries to exceed max number!", | |
394 | ); | |
395 | ||
396 | dpm_table->count = count; | |
397 | for (i = 0; i < max; i++) | |
398 | dpm_table->dpm_level[i].enabled = false; | |
399 | ||
400 | return 0; | |
401 | } | |
402 | ||
403 | void phm_setup_pcie_table_entry( | |
404 | void *table, | |
405 | uint32_t index, uint32_t pcie_gen, | |
406 | uint32_t pcie_lanes) | |
407 | { | |
408 | struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table; | |
409 | dpm_table->dpm_level[index].value = pcie_gen; | |
410 | dpm_table->dpm_level[index].param1 = pcie_lanes; | |
411 | dpm_table->dpm_level[index].enabled = 1; | |
412 | } | |
413 | ||
414 | int32_t phm_get_dpm_level_enable_mask_value(void *table) | |
415 | { | |
416 | int32_t i; | |
417 | int32_t mask = 0; | |
418 | struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table; | |
419 | ||
420 | for (i = dpm_table->count; i > 0; i--) { | |
421 | mask = mask << 1; | |
422 | if (dpm_table->dpm_level[i - 1].enabled) | |
423 | mask |= 0x1; | |
424 | else | |
425 | mask &= 0xFFFFFFFE; | |
426 | } | |
427 | ||
428 | return mask; | |
429 | } | |
430 | ||
431 | uint8_t phm_get_voltage_index( | |
432 | struct phm_ppt_v1_voltage_lookup_table *lookup_table, uint16_t voltage) | |
433 | { | |
434 | uint8_t count = (uint8_t) (lookup_table->count); | |
435 | uint8_t i; | |
436 | ||
437 | PP_ASSERT_WITH_CODE((NULL != lookup_table), | |
438 | "Lookup Table empty.", return 0); | |
439 | PP_ASSERT_WITH_CODE((0 != count), | |
440 | "Lookup Table empty.", return 0); | |
441 | ||
442 | for (i = 0; i < lookup_table->count; i++) { | |
443 | /* find first voltage equal or bigger than requested */ | |
444 | if (lookup_table->entries[i].us_vdd >= voltage) | |
445 | return i; | |
446 | } | |
447 | /* voltage is bigger than max voltage in the table */ | |
448 | return i - 1; | |
449 | } | |
450 | ||
451 | uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, uint16_t vddci) | |
452 | { | |
453 | uint32_t i; | |
454 | ||
455 | for (i = 0; i < vddci_table->count; i++) { | |
456 | if (vddci_table->entries[i].value >= vddci) | |
457 | return vddci_table->entries[i].value; | |
458 | } | |
459 | ||
460 | PP_ASSERT_WITH_CODE(false, | |
461 | "VDDCI is larger than max VDDCI in VDDCI Voltage Table!", | |
462 | return vddci_table->entries[i].value); | |
463 | } | |
464 | ||
465 | int phm_find_boot_level(void *table, | |
466 | uint32_t value, uint32_t *boot_level) | |
467 | { | |
468 | int result = -EINVAL; | |
469 | uint32_t i; | |
470 | struct vi_dpm_table *dpm_table = (struct vi_dpm_table *)table; | |
471 | ||
472 | for (i = 0; i < dpm_table->count; i++) { | |
473 | if (value == dpm_table->dpm_level[i].value) { | |
474 | *boot_level = i; | |
475 | result = 0; | |
476 | } | |
477 | } | |
478 | ||
479 | return result; | |
480 | } | |
481 | ||
482 | int phm_get_sclk_for_voltage_evv(struct pp_hwmgr *hwmgr, | |
483 | phm_ppt_v1_voltage_lookup_table *lookup_table, | |
484 | uint16_t virtual_voltage_id, int32_t *sclk) | |
485 | { | |
486 | uint8_t entryId; | |
487 | uint8_t voltageId; | |
488 | struct phm_ppt_v1_information *table_info = | |
489 | (struct phm_ppt_v1_information *)(hwmgr->pptable); | |
490 | ||
491 | PP_ASSERT_WITH_CODE(lookup_table->count != 0, "Lookup table is empty", return -EINVAL); | |
492 | ||
493 | /* search for leakage voltage ID 0xff01 ~ 0xff08 and sckl */ | |
494 | for (entryId = 0; entryId < table_info->vdd_dep_on_sclk->count; entryId++) { | |
495 | voltageId = table_info->vdd_dep_on_sclk->entries[entryId].vddInd; | |
496 | if (lookup_table->entries[voltageId].us_vdd == virtual_voltage_id) | |
497 | break; | |
498 | } | |
499 | ||
500 | PP_ASSERT_WITH_CODE(entryId < table_info->vdd_dep_on_sclk->count, | |
501 | "Can't find requested voltage id in vdd_dep_on_sclk table!", | |
502 | return -EINVAL; | |
503 | ); | |
504 | ||
505 | *sclk = table_info->vdd_dep_on_sclk->entries[entryId].clk; | |
506 | ||
507 | return 0; | |
508 | } | |
509 | ||
510 | /** | |
511 | * Initialize Dynamic State Adjustment Rule Settings | |
512 | * | |
513 | * @param hwmgr the address of the powerplay hardware manager. | |
514 | */ | |
515 | int phm_initializa_dynamic_state_adjustment_rule_settings(struct pp_hwmgr *hwmgr) | |
516 | { | |
517 | uint32_t table_size; | |
518 | struct phm_clock_voltage_dependency_table *table_clk_vlt; | |
519 | struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); | |
520 | ||
521 | /* initialize vddc_dep_on_dal_pwrl table */ | |
522 | table_size = sizeof(uint32_t) + 4 * sizeof(struct phm_clock_voltage_dependency_record); | |
523 | table_clk_vlt = (struct phm_clock_voltage_dependency_table *)kzalloc(table_size, GFP_KERNEL); | |
524 | ||
525 | if (NULL == table_clk_vlt) { | |
526 | printk(KERN_ERR "[ powerplay ] Can not allocate space for vddc_dep_on_dal_pwrl! \n"); | |
527 | return -ENOMEM; | |
528 | } else { | |
529 | table_clk_vlt->count = 4; | |
530 | table_clk_vlt->entries[0].clk = PP_DAL_POWERLEVEL_ULTRALOW; | |
531 | table_clk_vlt->entries[0].v = 0; | |
532 | table_clk_vlt->entries[1].clk = PP_DAL_POWERLEVEL_LOW; | |
533 | table_clk_vlt->entries[1].v = 720; | |
534 | table_clk_vlt->entries[2].clk = PP_DAL_POWERLEVEL_NOMINAL; | |
535 | table_clk_vlt->entries[2].v = 810; | |
536 | table_clk_vlt->entries[3].clk = PP_DAL_POWERLEVEL_PERFORMANCE; | |
537 | table_clk_vlt->entries[3].v = 900; | |
538 | pptable_info->vddc_dep_on_dal_pwrl = table_clk_vlt; | |
539 | hwmgr->dyn_state.vddc_dep_on_dal_pwrl = table_clk_vlt; | |
540 | } | |
541 | ||
542 | return 0; | |
543 | } | |
544 | ||
545 | int phm_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) | |
546 | { | |
547 | if (NULL != hwmgr->dyn_state.vddc_dep_on_dal_pwrl) { | |
548 | kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl); | |
549 | hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL; | |
550 | } | |
551 | ||
552 | if (NULL != hwmgr->backend) { | |
553 | kfree(hwmgr->backend); | |
554 | hwmgr->backend = NULL; | |
555 | } | |
556 | ||
557 | return 0; | |
558 | } | |
559 | ||
560 | uint32_t phm_get_lowest_enabled_level(struct pp_hwmgr *hwmgr, uint32_t mask) | |
561 | { | |
562 | uint32_t level = 0; | |
563 | ||
564 | while (0 == (mask & (1 << level))) | |
565 | level++; | |
566 | ||
567 | return level; | |
568 | } |