#include "r600_dpm.h"
#include "cypress_dpm.h"
#include "sumo_dpm.h"
-#include "atom.h"
#define SUMO_MAX_DEEPSLEEP_DIVIDER_ID 5
#define SUMO_MINIMUM_ENGINE_CLOCK 800
return pi;
}
-u32 sumo_get_xclk(struct radeon_device *rdev)
-{
- return rdev->clock.spll.reference_freq;
-}
-
static void sumo_gfx_clockgating_enable(struct radeon_device *rdev, bool enable)
{
if (enable)
static void sumo_program_git(struct radeon_device *rdev)
{
u32 p, u;
- u32 xclk = sumo_get_xclk(rdev);
+ u32 xclk = radeon_get_xclk(rdev);
r600_calculate_u_and_p(SUMO_GICST_DFLT,
xclk, 16, &p, &u);
static void sumo_program_grsd(struct radeon_device *rdev)
{
u32 p, u;
- u32 xclk = sumo_get_xclk(rdev);
+ u32 xclk = radeon_get_xclk(rdev);
u32 grs = 256 * 25 / 100;
r600_calculate_u_and_p(1, xclk, 14, &p, &u);
WREG32(CG_GCOOR, PHC(grs) | SDC(p) | SU(u));
}
-static void sumo_gfx_clockgating_initialize(struct radeon_device *rdev)
+void sumo_gfx_clockgating_initialize(struct radeon_device *rdev)
{
sumo_program_git(rdev);
sumo_program_grsd(rdev);
u32 p, u;
u32 p_c, p_p, d_p;
u32 r_t, i_t;
- u32 xclk = sumo_get_xclk(rdev);
+ u32 xclk = radeon_get_xclk(rdev);
if (rdev->family == CHIP_PALM) {
p_c = 4;
u32 high_clk)
{
struct sumo_power_info *pi = sumo_get_pi(rdev);
- u32 xclk = sumo_get_xclk(rdev);
+ u32 xclk = radeon_get_xclk(rdev);
pi->pasi = 65535 * 100 / high_clk;
pi->asi = 65535 * 100 / high_clk;
}
-static void sumo_program_bsp(struct radeon_device *rdev)
+static void sumo_program_bsp(struct radeon_device *rdev,
+ struct radeon_ps *rps)
{
struct sumo_power_info *pi = sumo_get_pi(rdev);
- struct sumo_ps *ps = sumo_get_ps(rdev->pm.dpm.requested_ps);
+ struct sumo_ps *ps = sumo_get_ps(rps);
u32 i;
u32 highest_engine_clock = ps->levels[ps->num_levels - 1].sclk;
WREG32(CG_AT_7, value);
}
-static void sumo_program_at(struct radeon_device *rdev)
+static void sumo_program_at(struct radeon_device *rdev,
+ struct radeon_ps *rps)
{
struct sumo_power_info *pi = sumo_get_pi(rdev);
- struct sumo_ps *ps = sumo_get_ps(rdev->pm.dpm.requested_ps);
+ struct sumo_ps *ps = sumo_get_ps(rps);
u32 asi;
u32 i;
u32 m_a;
WREG32_P(SCLK_PWRMGT_CNTL, FIR_TREND_MODE, ~FIR_TREND_MODE);
}
-static void sumo_program_vc(struct radeon_device *rdev)
+void sumo_program_vc(struct radeon_device *rdev, u32 vrc)
{
- WREG32(CG_FTV, SUMO_VRC_DFLT);
+ WREG32(CG_FTV, vrc);
}
-static void sumo_clear_vc(struct radeon_device *rdev)
+void sumo_clear_vc(struct radeon_device *rdev)
{
WREG32(CG_FTV, 0);
}
-static void sumo_program_sstp(struct radeon_device *rdev)
+void sumo_program_sstp(struct radeon_device *rdev)
{
u32 p, u;
- u32 xclk = sumo_get_xclk(rdev);
+ u32 xclk = radeon_get_xclk(rdev);
r600_calculate_u_and_p(SUMO_SST_DFLT,
xclk, 16, &p, &u);
sumo_power_level_enable(rdev, 0, true);
}
-static void sumo_patch_boost_state(struct radeon_device *rdev)
+static void sumo_patch_boost_state(struct radeon_device *rdev,
+ struct radeon_ps *rps)
{
struct sumo_power_info *pi = sumo_get_pi(rdev);
- struct sumo_ps *new_ps = sumo_get_ps(rdev->pm.dpm.requested_ps);
+ struct sumo_ps *new_ps = sumo_get_ps(rps);
if (new_ps->flags & SUMO_POWERSTATE_FLAGS_BOOST_STATE) {
pi->boost_pl = new_ps->levels[new_ps->num_levels - 1];
}
}
-static void sumo_pre_notify_alt_vddnb_change(struct radeon_device *rdev)
+static void sumo_pre_notify_alt_vddnb_change(struct radeon_device *rdev,
+ struct radeon_ps *new_rps,
+ struct radeon_ps *old_rps)
{
- struct sumo_ps *new_ps = sumo_get_ps(rdev->pm.dpm.requested_ps);
- struct sumo_ps *old_ps = sumo_get_ps(rdev->pm.dpm.current_ps);
+ struct sumo_ps *new_ps = sumo_get_ps(new_rps);
+ struct sumo_ps *old_ps = sumo_get_ps(old_rps);
u32 nbps1_old = 0;
u32 nbps1_new = 0;
sumo_smu_notify_alt_vddnb_change(rdev, 0, 0);
}
-static void sumo_post_notify_alt_vddnb_change(struct radeon_device *rdev)
+static void sumo_post_notify_alt_vddnb_change(struct radeon_device *rdev,
+ struct radeon_ps *new_rps,
+ struct radeon_ps *old_rps)
{
- struct sumo_ps *new_ps = sumo_get_ps(rdev->pm.dpm.requested_ps);
- struct sumo_ps *old_ps = sumo_get_ps(rdev->pm.dpm.current_ps);
+ struct sumo_ps *new_ps = sumo_get_ps(new_rps);
+ struct sumo_ps *old_ps = sumo_get_ps(old_rps);
u32 nbps1_old = 0;
u32 nbps1_new = 0;
sumo_smu_notify_alt_vddnb_change(rdev, 1, 1);
}
-static void sumo_enable_boost(struct radeon_device *rdev, bool enable)
+static void sumo_enable_boost(struct radeon_device *rdev,
+ struct radeon_ps *rps,
+ bool enable)
{
- struct sumo_ps *new_ps = sumo_get_ps(rdev->pm.dpm.requested_ps);
+ struct sumo_ps *new_ps = sumo_get_ps(rps);
if (enable) {
if (new_ps->flags & SUMO_POWERSTATE_FLAGS_BOOST_STATE)
sumo_boost_state_enable(rdev, false);
}
-static void sumo_update_current_power_levels(struct radeon_device *rdev)
-{
- struct sumo_ps *new_ps = sumo_get_ps(rdev->pm.dpm.requested_ps);
- struct sumo_power_info *pi = sumo_get_pi(rdev);
-
- pi->current_ps = *new_ps;
-}
-
static void sumo_set_forced_level(struct radeon_device *rdev, u32 index)
{
WREG32_P(CG_SCLK_DPM_CTRL_3, FORCE_SCLK_STATE(index), ~FORCE_SCLK_STATE_MASK);
sumo_set_forced_level(rdev, 0);
}
-static void sumo_program_wl(struct radeon_device *rdev)
+static void sumo_program_wl(struct radeon_device *rdev,
+ struct radeon_ps *rps)
{
- struct sumo_ps *new_ps = sumo_get_ps(rdev->pm.dpm.requested_ps);
+ struct sumo_ps *new_ps = sumo_get_ps(rps);
u32 dpm_ctrl4 = RREG32(CG_SCLK_DPM_CTRL_4);
dpm_ctrl4 &= 0xFFFFFF00;
WREG32(CG_SCLK_DPM_CTRL_4, dpm_ctrl4);
}
-static void sumo_program_power_levels_0_to_n(struct radeon_device *rdev)
+static void sumo_program_power_levels_0_to_n(struct radeon_device *rdev,
+ struct radeon_ps *new_rps,
+ struct radeon_ps *old_rps)
{
struct sumo_power_info *pi = sumo_get_pi(rdev);
- struct sumo_ps *new_ps = sumo_get_ps(rdev->pm.dpm.requested_ps);
- struct sumo_ps *old_ps = sumo_get_ps(rdev->pm.dpm.current_ps);
+ struct sumo_ps *new_ps = sumo_get_ps(new_rps);
+ struct sumo_ps *old_ps = sumo_get_ps(old_rps);
u32 i;
u32 n_current_state_levels = (old_ps == NULL) ? 1 : old_ps->num_levels;
sumo_power_level_enable(rdev, i, false);
}
-static void sumo_take_smu_control(struct radeon_device *rdev, bool enable)
+static void sumo_set_uvd_clock_before_set_eng_clock(struct radeon_device *rdev,
+ struct radeon_ps *new_rps,
+ struct radeon_ps *old_rps)
+{
+ struct sumo_ps *new_ps = sumo_get_ps(new_rps);
+ struct sumo_ps *current_ps = sumo_get_ps(old_rps);
+
+ if ((new_rps->vclk == old_rps->vclk) &&
+ (new_rps->dclk == old_rps->dclk))
+ return;
+
+ if (new_ps->levels[new_ps->num_levels - 1].sclk >=
+ current_ps->levels[current_ps->num_levels - 1].sclk)
+ return;
+
+ radeon_set_uvd_clocks(rdev, new_rps->vclk, new_rps->dclk);
+}
+
+static void sumo_set_uvd_clock_after_set_eng_clock(struct radeon_device *rdev,
+ struct radeon_ps *new_rps,
+ struct radeon_ps *old_rps)
+{
+ struct sumo_ps *new_ps = sumo_get_ps(new_rps);
+ struct sumo_ps *current_ps = sumo_get_ps(old_rps);
+
+ if ((new_rps->vclk == old_rps->vclk) &&
+ (new_rps->dclk == old_rps->dclk))
+ return;
+
+ if (new_ps->levels[new_ps->num_levels - 1].sclk <
+ current_ps->levels[current_ps->num_levels - 1].sclk)
+ return;
+
+ radeon_set_uvd_clocks(rdev, new_rps->vclk, new_rps->dclk);
+}
+
+void sumo_take_smu_control(struct radeon_device *rdev, bool enable)
{
+/* This bit selects who handles display phy powergating.
+ * Clear the bit to let atom handle it.
+ * Set it to let the driver handle it.
+ * For now we just let atom handle it.
+ */
+#if 0
u32 v = RREG32(DOUT_SCRATCH3);
if (enable)
v &= 0xFFFFFFFB;
WREG32(DOUT_SCRATCH3, v);
+#endif
}
static void sumo_enable_sclk_ds(struct radeon_device *rdev, bool enable)
static void sumo_program_ttp(struct radeon_device *rdev)
{
- u32 xclk = sumo_get_xclk(rdev);
+ u32 xclk = radeon_get_xclk(rdev);
u32 p, u;
u32 cg_sclk_dpm_ctrl_5 = RREG32(CG_SCLK_DPM_CTRL_5);
{
u32 cg_sclk_dpm_ctrl_4 = RREG32(CG_SCLK_DPM_CTRL_4);
u32 p, u;
- u32 xclk = sumo_get_xclk(rdev);
+ u32 xclk = radeon_get_xclk(rdev);
r600_calculate_u_and_p(100000,
xclk, 14, &p, &u);
WREG32(CG_SCLK_DPM_CTRL_4, cg_sclk_dpm_ctrl_4);
}
-static void sumo_force_nbp_state(struct radeon_device *rdev)
+static void sumo_force_nbp_state(struct radeon_device *rdev,
+ struct radeon_ps *rps)
{
struct sumo_power_info *pi = sumo_get_pi(rdev);
- struct sumo_ps *new_ps = sumo_get_ps(rdev->pm.dpm.requested_ps);
+ struct sumo_ps *new_ps = sumo_get_ps(rps);
if (!pi->driver_nbps_policy_disable) {
if (new_ps->flags & SUMO_POWERSTATE_FLAGS_FORCE_NBPS1_STATE)
}
}
-static u32 sumo_get_sleep_divider_from_id(u32 id)
+u32 sumo_get_sleep_divider_from_id(u32 id)
{
return 1 << id;
}
-static u32 sumo_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
- u32 sclk,
- u32 min_sclk_in_sr)
+u32 sumo_get_sleep_divider_id_from_clock(struct radeon_device *rdev,
+ u32 sclk,
+ u32 min_sclk_in_sr)
{
struct sumo_power_info *pi = sumo_get_pi(rdev);
u32 i;
ps->levels[0].ss_divider_index = 0;
}
-static void sumo_apply_state_adjust_rules(struct radeon_device *rdev)
+static void sumo_apply_state_adjust_rules(struct radeon_device *rdev,
+ struct radeon_ps *new_rps,
+ struct radeon_ps *old_rps)
{
- struct radeon_ps *rps = rdev->pm.dpm.requested_ps;
- struct sumo_ps *ps = sumo_get_ps(rps);
- struct sumo_ps *current_ps = sumo_get_ps(rdev->pm.dpm.current_ps);
+ struct sumo_ps *ps = sumo_get_ps(new_rps);
+ struct sumo_ps *current_ps = sumo_get_ps(old_rps);
struct sumo_power_info *pi = sumo_get_pi(rdev);
u32 min_voltage = 0; /* ??? */
u32 min_sclk = pi->sys_info.min_sclk; /* XXX check against disp reqs */
u32 sclk_in_sr = pi->sys_info.min_sclk; /* ??? */
u32 i;
- if (rps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
+ if (new_rps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
return sumo_patch_thermal_state(rdev, ps, current_ps);
if (pi->enable_boost) {
- if (rps->class & ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE)
+ if (new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE)
ps->flags |= SUMO_POWERSTATE_FLAGS_BOOST_STATE;
}
- if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) ||
- (rps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) ||
- (rps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE))
+ if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) ||
+ (new_rps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) ||
+ (new_rps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE))
ps->flags |= SUMO_POWERSTATE_FLAGS_FORCE_NBPS1_STATE;
for (i = 0; i < ps->num_levels; i++) {
if (ps->flags & SUMO_POWERSTATE_FLAGS_FORCE_NBPS1_STATE)
ps->levels[i].allow_gnb_slow = 1;
- else if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) ||
- (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC))
+ else if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) ||
+ (new_rps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC))
ps->levels[i].allow_gnb_slow = 0;
else if (i == ps->num_levels - 1)
ps->levels[i].allow_gnb_slow = 0;
sumo_take_smu_control(rdev, false);
}
+static void sumo_uvd_init(struct radeon_device *rdev)
+{
+ u32 tmp;
+
+ tmp = RREG32(CG_VCLK_CNTL);
+ tmp &= ~VCLK_DIR_CNTL_EN;
+ WREG32(CG_VCLK_CNTL, tmp);
+
+ tmp = RREG32(CG_DCLK_CNTL);
+ tmp &= ~DCLK_DIR_CNTL_EN;
+ WREG32(CG_DCLK_CNTL, tmp);
+
+ /* 100 Mhz */
+ radeon_set_uvd_clocks(rdev, 10000, 10000);
+}
+
static int sumo_set_thermal_temperature_range(struct radeon_device *rdev,
int min_temp, int max_temp)
{
return 0;
}
+static void sumo_update_current_ps(struct radeon_device *rdev,
+ struct radeon_ps *rps)
+{
+ struct sumo_ps *new_ps = sumo_get_ps(rps);
+ struct sumo_power_info *pi = sumo_get_pi(rdev);
+
+ pi->current_rps = *rps;
+ pi->current_ps = *new_ps;
+ pi->current_rps.ps_priv = &pi->current_ps;
+}
+
+static void sumo_update_requested_ps(struct radeon_device *rdev,
+ struct radeon_ps *rps)
+{
+ struct sumo_ps *new_ps = sumo_get_ps(rps);
+ struct sumo_power_info *pi = sumo_get_pi(rdev);
+
+ pi->requested_rps = *rps;
+ pi->requested_ps = *new_ps;
+ pi->requested_rps.ps_priv = &pi->requested_ps;
+}
+
int sumo_dpm_enable(struct radeon_device *rdev)
{
struct sumo_power_info *pi = sumo_get_pi(rdev);
+ int ret;
if (sumo_dpm_enabled(rdev))
return -EINVAL;
- sumo_enable_clock_power_gating(rdev);
+ ret = sumo_enable_clock_power_gating(rdev);
+ if (ret)
+ return ret;
sumo_program_bootup_state(rdev);
sumo_init_bsp(rdev);
sumo_reset_am(rdev);
sumo_program_power_level_enter_state(rdev);
sumo_enable_voltage_scaling(rdev, true);
sumo_program_sstp(rdev);
- sumo_program_vc(rdev);
+ sumo_program_vc(rdev, SUMO_VRC_DFLT);
sumo_override_cnb_thermal_events(rdev);
sumo_start_dpm(rdev);
sumo_wait_for_level_0(rdev);
if (rdev->irq.installed &&
r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
- sumo_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
+ ret = sumo_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
+ if (ret)
+ return ret;
rdev->irq.dpm_thermal = true;
radeon_irq_set(rdev);
}
+ sumo_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
+
return 0;
}
rdev->irq.dpm_thermal = false;
radeon_irq_set(rdev);
}
+
+ sumo_update_current_ps(rdev, rdev->pm.dpm.boot_ps);
}
-int sumo_dpm_set_power_state(struct radeon_device *rdev)
+int sumo_dpm_pre_set_power_state(struct radeon_device *rdev)
{
struct sumo_power_info *pi = sumo_get_pi(rdev);
+ struct radeon_ps requested_ps = *rdev->pm.dpm.requested_ps;
+ struct radeon_ps *new_ps = &requested_ps;
+
+ sumo_update_requested_ps(rdev, new_ps);
if (pi->enable_dynamic_patch_ps)
- sumo_apply_state_adjust_rules(rdev);
- sumo_update_current_power_levels(rdev);
+ sumo_apply_state_adjust_rules(rdev,
+ &pi->requested_rps,
+ &pi->current_rps);
+
+ return 0;
+}
+
+int sumo_dpm_set_power_state(struct radeon_device *rdev)
+{
+ struct sumo_power_info *pi = sumo_get_pi(rdev);
+ struct radeon_ps *new_ps = &pi->requested_rps;
+ struct radeon_ps *old_ps = &pi->current_rps;
+
+ if (pi->enable_dpm)
+ sumo_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps);
if (pi->enable_boost) {
- sumo_enable_boost(rdev, false);
- sumo_patch_boost_state(rdev);
+ sumo_enable_boost(rdev, new_ps, false);
+ sumo_patch_boost_state(rdev, new_ps);
}
if (pi->enable_dpm) {
- sumo_pre_notify_alt_vddnb_change(rdev);
+ sumo_pre_notify_alt_vddnb_change(rdev, new_ps, old_ps);
sumo_enable_power_level_0(rdev);
sumo_set_forced_level_0(rdev);
sumo_set_forced_mode_enabled(rdev);
sumo_wait_for_level_0(rdev);
- sumo_program_power_levels_0_to_n(rdev);
- sumo_program_wl(rdev);
- sumo_program_bsp(rdev);
- sumo_program_at(rdev);
- sumo_force_nbp_state(rdev);
+ sumo_program_power_levels_0_to_n(rdev, new_ps, old_ps);
+ sumo_program_wl(rdev, new_ps);
+ sumo_program_bsp(rdev, new_ps);
+ sumo_program_at(rdev, new_ps);
+ sumo_force_nbp_state(rdev, new_ps);
sumo_set_forced_mode_disabled(rdev);
sumo_set_forced_mode_enabled(rdev);
sumo_set_forced_mode_disabled(rdev);
- sumo_post_notify_alt_vddnb_change(rdev);
+ sumo_post_notify_alt_vddnb_change(rdev, new_ps, old_ps);
}
if (pi->enable_boost)
- sumo_enable_boost(rdev, true);
+ sumo_enable_boost(rdev, new_ps, true);
+ if (pi->enable_dpm)
+ sumo_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
return 0;
}
+void sumo_dpm_post_set_power_state(struct radeon_device *rdev)
+{
+ struct sumo_power_info *pi = sumo_get_pi(rdev);
+ struct radeon_ps *new_ps = &pi->requested_rps;
+
+ sumo_update_current_ps(rdev, new_ps);
+}
+
void sumo_dpm_reset_asic(struct radeon_device *rdev)
{
sumo_program_bootup_state(rdev);
sumo_program_acpi_power_level(rdev);
sumo_enable_acpi_pm(rdev);
sumo_take_smu_control(rdev, true);
+ sumo_uvd_init(rdev);
}
void sumo_dpm_display_configuration_changed(struct radeon_device *rdev)
return 0;
}
-static u32 sumo_convert_vid2_to_vid7(struct radeon_device *rdev, u32 vid_2bit)
+u32 sumo_convert_vid2_to_vid7(struct radeon_device *rdev,
+ struct sumo_vid_mapping_table *vid_mapping_table,
+ u32 vid_2bit)
{
- struct sumo_power_info *pi = sumo_get_pi(rdev);
u32 i;
- for (i = 0; i < pi->sys_info.vid_mapping_table.num_entries; i++) {
- if (pi->sys_info.vid_mapping_table.entries[i].vid_2bit == vid_2bit)
- return pi->sys_info.vid_mapping_table.entries[i].vid_7bit;
+ for (i = 0; i < vid_mapping_table->num_entries; i++) {
+ if (vid_mapping_table->entries[i].vid_2bit == vid_2bit)
+ return vid_mapping_table->entries[i].vid_7bit;
}
- return pi->sys_info.vid_mapping_table.entries[pi->sys_info.vid_mapping_table.num_entries - 1].vid_7bit;
+ return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_7bit;
}
static u16 sumo_convert_voltage_index_to_value(struct radeon_device *rdev,
u32 vid_2bit)
{
- u32 vid_7bit = sumo_convert_vid2_to_vid7(rdev, vid_2bit);
+ struct sumo_power_info *pi = sumo_get_pi(rdev);
+ u32 vid_7bit = sumo_convert_vid2_to_vid7(rdev, &pi->sys_info.vid_mapping_table, vid_2bit);
if (vid_7bit > 0x7C)
return 0;
}
static void sumo_construct_display_voltage_mapping_table(struct radeon_device *rdev,
+ struct sumo_disp_clock_voltage_mapping_table *disp_clk_voltage_mapping_table,
ATOM_CLK_VOLT_CAPABILITY *table)
{
- struct sumo_power_info *pi = sumo_get_pi(rdev);
u32 i;
for (i = 0; i < SUMO_MAX_NUMBER_VOLTAGES; i++) {
if (table[i].ulMaximumSupportedCLK == 0)
break;
- pi->sys_info.disp_clk_voltage_mapping_table.display_clock_frequency[i] =
+ disp_clk_voltage_mapping_table->display_clock_frequency[i] =
table[i].ulMaximumSupportedCLK;
}
- pi->sys_info.disp_clk_voltage_mapping_table.num_max_voltage_levels = i;
+ disp_clk_voltage_mapping_table->num_max_voltage_levels = i;
- if (pi->sys_info.disp_clk_voltage_mapping_table.num_max_voltage_levels == 0) {
- pi->sys_info.disp_clk_voltage_mapping_table.display_clock_frequency[0] = 80000;
- pi->sys_info.disp_clk_voltage_mapping_table.num_max_voltage_levels = 1;
+ if (disp_clk_voltage_mapping_table->num_max_voltage_levels == 0) {
+ disp_clk_voltage_mapping_table->display_clock_frequency[0] = 80000;
+ disp_clk_voltage_mapping_table->num_max_voltage_levels = 1;
}
}
-static void sumo_construct_sclk_voltage_mapping_table(struct radeon_device *rdev,
- ATOM_AVAILABLE_SCLK_LIST *table)
+void sumo_construct_sclk_voltage_mapping_table(struct radeon_device *rdev,
+ struct sumo_sclk_voltage_mapping_table *sclk_voltage_mapping_table,
+ ATOM_AVAILABLE_SCLK_LIST *table)
{
- struct sumo_power_info *pi = sumo_get_pi(rdev);
u32 i;
u32 n = 0;
u32 prev_sclk = 0;
for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) {
if (table[i].ulSupportedSCLK > prev_sclk) {
- pi->sys_info.sclk_voltage_mapping_table.entries[n].sclk_frequency =
+ sclk_voltage_mapping_table->entries[n].sclk_frequency =
table[i].ulSupportedSCLK;
- pi->sys_info.sclk_voltage_mapping_table.entries[n].vid_2bit =
+ sclk_voltage_mapping_table->entries[n].vid_2bit =
table[i].usVoltageIndex;
prev_sclk = table[i].ulSupportedSCLK;
n++;
}
}
- pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries = n;
+ sclk_voltage_mapping_table->num_max_dpm_entries = n;
}
-static void sumo_construct_vid_mapping_table(struct radeon_device *rdev,
- ATOM_AVAILABLE_SCLK_LIST *table)
+void sumo_construct_vid_mapping_table(struct radeon_device *rdev,
+ struct sumo_vid_mapping_table *vid_mapping_table,
+ ATOM_AVAILABLE_SCLK_LIST *table)
{
- struct sumo_power_info *pi = sumo_get_pi(rdev);
u32 i, j;
for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) {
if (table[i].ulSupportedSCLK != 0) {
- pi->sys_info.vid_mapping_table.entries[table[i].usVoltageIndex].vid_7bit =
+ vid_mapping_table->entries[table[i].usVoltageIndex].vid_7bit =
table[i].usVoltageID;
- pi->sys_info.vid_mapping_table.entries[table[i].usVoltageIndex].vid_2bit =
+ vid_mapping_table->entries[table[i].usVoltageIndex].vid_2bit =
table[i].usVoltageIndex;
}
}
for (i = 0; i < SUMO_MAX_NUMBER_VOLTAGES; i++) {
- if (pi->sys_info.vid_mapping_table.entries[i].vid_7bit == 0) {
+ if (vid_mapping_table->entries[i].vid_7bit == 0) {
for (j = i + 1; j < SUMO_MAX_NUMBER_VOLTAGES; j++) {
- if (pi->sys_info.vid_mapping_table.entries[j].vid_7bit != 0) {
- pi->sys_info.vid_mapping_table.entries[i] =
- pi->sys_info.vid_mapping_table.entries[j];
- pi->sys_info.vid_mapping_table.entries[j].vid_7bit = 0;
+ if (vid_mapping_table->entries[j].vid_7bit != 0) {
+ vid_mapping_table->entries[i] =
+ vid_mapping_table->entries[j];
+ vid_mapping_table->entries[j].vid_7bit = 0;
break;
}
}
}
}
- pi->sys_info.vid_mapping_table.num_entries = i;
+ vid_mapping_table->num_entries = i;
}
union igp_info {
else
pi->sys_info.enable_boost = false;
sumo_construct_display_voltage_mapping_table(rdev,
+ &pi->sys_info.disp_clk_voltage_mapping_table,
igp_info->info_6.sDISPCLK_Voltage);
sumo_construct_sclk_voltage_mapping_table(rdev,
+ &pi->sys_info.sclk_voltage_mapping_table,
igp_info->info_6.sAvail_SCLK);
- sumo_construct_vid_mapping_table(rdev, igp_info->info_6.sAvail_SCLK);
+ sumo_construct_vid_mapping_table(rdev, &pi->sys_info.vid_mapping_table,
+ igp_info->info_6.sAvail_SCLK);
}
return 0;
u32 sumo_dpm_get_sclk(struct radeon_device *rdev, bool low)
{
- struct sumo_ps *requested_state = sumo_get_ps(rdev->pm.dpm.requested_ps);
+ struct sumo_power_info *pi = sumo_get_pi(rdev);
+ struct sumo_ps *requested_state = sumo_get_ps(&pi->requested_rps);
if (low)
return requested_state->levels[0].sclk;