Merge branches 'acpi-smbus', 'acpi-ec' and 'acpi-pci'
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>
Fri, 20 Nov 2015 00:22:52 +0000 (01:22 +0100)
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>
Fri, 20 Nov 2015 00:22:52 +0000 (01:22 +0100)
* acpi-smbus:
  Revert "ACPI / SBS: Add 5 us delay to fix SBS hangs on MacBook"
  ACPI / SMBus: Fix boot stalls / high CPU caused by reentrant code

* acpi-ec:
  ACPI-EC: Drop unnecessary check made before calling acpi_ec_delete_query()

* acpi-pci:
  PCI: Fix OF logic in pci_dma_configure()

Documentation/kernel-parameters.txt
arch/x86/include/asm/msr-index.h
drivers/acpi/cppc_acpi.c
drivers/acpi/sbshc.c
drivers/base/power/wakeirq.c
drivers/cpufreq/Kconfig.arm
drivers/cpufreq/Kconfig.x86
drivers/cpufreq/intel_pstate.c
drivers/pci/probe.c
tools/power/x86/turbostat/turbostat.c

index f8aae632f02f678000f9de7a8d507a5ea0bb9404..742f69d18fc8989ae28d9c0662d6bf334109dddd 100644 (file)
@@ -1583,9 +1583,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                hwp_only
                        Only load intel_pstate on systems which support
                        hardware P state control (HWP) if available.
-               no_acpi
-                       Don't use ACPI processor performance control objects
-                       _PSS and _PPC specified limits.
 
        intremap=       [X86-64, Intel-IOMMU]
                        on      enable Interrupt Remapping (default)
index 9f3905697f123200eb1f7ecc749239dc5d5e1b2d..690b4027e17c994fdef4a626f270ca8b97ee041a 100644 (file)
@@ -35,7 +35,7 @@
 #define MSR_IA32_PERFCTR0              0x000000c1
 #define MSR_IA32_PERFCTR1              0x000000c2
 #define MSR_FSB_FREQ                   0x000000cd
-#define MSR_NHM_PLATFORM_INFO          0x000000ce
+#define MSR_PLATFORM_INFO              0x000000ce
 
 #define MSR_NHM_SNB_PKG_CST_CFG_CTL    0x000000e2
 #define NHM_C3_AUTO_DEMOTE             (1UL << 25)
@@ -44,7 +44,6 @@
 #define SNB_C1_AUTO_UNDEMOTE           (1UL << 27)
 #define SNB_C3_AUTO_UNDEMOTE           (1UL << 28)
 
-#define MSR_PLATFORM_INFO              0x000000ce
 #define MSR_MTRRcap                    0x000000fe
 #define MSR_IA32_BBL_CR_CTL            0x00000119
 #define MSR_IA32_BBL_CR_CTL3           0x0000011e
index 3c083d2cc4342049a97fae02407fb64b7e553036..6730f965b3793f25ba73125a00b18f346decf75f 100644 (file)
@@ -304,7 +304,7 @@ EXPORT_SYMBOL_GPL(acpi_get_psd_map);
 
 static int register_pcc_channel(int pcc_subspace_idx)
 {
-       struct acpi_pcct_subspace *cppc_ss;
+       struct acpi_pcct_hw_reduced *cppc_ss;
        unsigned int len;
 
        if (pcc_subspace_idx >= 0) {
index bf034f8b7c1acde77f90ded7f39f70dbd636b7db..2fa8304171e09b70e501dcab7892a9b443c84695 100644 (file)
@@ -14,7 +14,6 @@
 #include <linux/delay.h>
 #include <linux/module.h>
 #include <linux/interrupt.h>
-#include <linux/dmi.h>
 #include "sbshc.h"
 
 #define PREFIX "ACPI: "
@@ -30,6 +29,7 @@ struct acpi_smb_hc {
        u8 query_bit;
        smbus_alarm_callback callback;
        void *context;
+       bool done;
 };
 
 static int acpi_smbus_hc_add(struct acpi_device *device);
@@ -88,8 +88,6 @@ enum acpi_smb_offset {
        ACPI_SMB_ALARM_DATA = 0x26,     /* 2 bytes alarm data */
 };
 
-static bool macbook;
-
 static inline int smb_hc_read(struct acpi_smb_hc *hc, u8 address, u8 *data)
 {
        return ec_read(hc->offset + address, data);
@@ -100,27 +98,11 @@ static inline int smb_hc_write(struct acpi_smb_hc *hc, u8 address, u8 data)
        return ec_write(hc->offset + address, data);
 }
 
-static inline int smb_check_done(struct acpi_smb_hc *hc)
-{
-       union acpi_smb_status status = {.raw = 0};
-       smb_hc_read(hc, ACPI_SMB_STATUS, &status.raw);
-       return status.fields.done && (status.fields.status == SMBUS_OK);
-}
-
 static int wait_transaction_complete(struct acpi_smb_hc *hc, int timeout)
 {
-       if (wait_event_timeout(hc->wait, smb_check_done(hc),
-                              msecs_to_jiffies(timeout)))
+       if (wait_event_timeout(hc->wait, hc->done, msecs_to_jiffies(timeout)))
                return 0;
-       /*
-        * After the timeout happens, OS will try to check the status of SMbus.
-        * If the status is what OS expected, it will be regarded as the bogus
-        * timeout.
-        */
-       if (smb_check_done(hc))
-               return 0;
-       else
-               return -ETIME;
+       return -ETIME;
 }
 
 static int acpi_smbus_transaction(struct acpi_smb_hc *hc, u8 protocol,
@@ -135,8 +117,7 @@ static int acpi_smbus_transaction(struct acpi_smb_hc *hc, u8 protocol,
        }
 
        mutex_lock(&hc->lock);
-       if (macbook)
-               udelay(5);
+       hc->done = false;
        if (smb_hc_read(hc, ACPI_SMB_PROTOCOL, &temp))
                goto end;
        if (temp) {
@@ -235,8 +216,10 @@ static int smbus_alarm(void *context)
        if (smb_hc_read(hc, ACPI_SMB_STATUS, &status.raw))
                return 0;
        /* Check if it is only a completion notify */
-       if (status.fields.done)
+       if (status.fields.done && status.fields.status == SMBUS_OK) {
+               hc->done = true;
                wake_up(&hc->wait);
+       }
        if (!status.fields.alarm)
                return 0;
        mutex_lock(&hc->lock);
@@ -262,29 +245,12 @@ extern int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
                              acpi_handle handle, acpi_ec_query_func func,
                              void *data);
 
-static int macbook_dmi_match(const struct dmi_system_id *d)
-{
-       pr_debug("Detected MacBook, enabling workaround\n");
-       macbook = true;
-       return 0;
-}
-
-static struct dmi_system_id acpi_smbus_dmi_table[] = {
-       { macbook_dmi_match, "Apple MacBook", {
-         DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
-         DMI_MATCH(DMI_PRODUCT_NAME, "MacBook") },
-       },
-       { },
-};
-
 static int acpi_smbus_hc_add(struct acpi_device *device)
 {
        int status;
        unsigned long long val;
        struct acpi_smb_hc *hc;
 
-       dmi_check_system(acpi_smbus_dmi_table);
-
        if (!device)
                return -EINVAL;
 
index eb6e67451decee999901493d9182ed6c9cf21bf9..0d77cd6fd8d11653c789fa8834e8eb53b801a6d9 100644 (file)
@@ -68,6 +68,9 @@ int dev_pm_set_wake_irq(struct device *dev, int irq)
        struct wake_irq *wirq;
        int err;
 
+       if (irq < 0)
+               return -EINVAL;
+
        wirq = kzalloc(sizeof(*wirq), GFP_KERNEL);
        if (!wirq)
                return -ENOMEM;
@@ -167,6 +170,9 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq)
        struct wake_irq *wirq;
        int err;
 
+       if (irq < 0)
+               return -EINVAL;
+
        wirq = kzalloc(sizeof(*wirq), GFP_KERNEL);
        if (!wirq)
                return -ENOMEM;
index 1582c1c016b098b7d40cccbaf52f7d715a0586d2..8014c2307332cc803a73baec42c297c079d61974 100644 (file)
@@ -84,6 +84,7 @@ config ARM_KIRKWOOD_CPUFREQ
 config ARM_MT8173_CPUFREQ
        bool "Mediatek MT8173 CPUFreq support"
        depends on ARCH_MEDIATEK && REGULATOR
+       depends on ARM64 || (ARM_CPU_TOPOLOGY && COMPILE_TEST)
        depends on !CPU_THERMAL || THERMAL=y
        select PM_OPP
        help
index adbd1de1cea55cbe6777d3a1c4dab546a6a68adf..c59bdcb83217071087cc0a90ef62e5cd01ce38d7 100644 (file)
@@ -5,7 +5,6 @@
 config X86_INTEL_PSTATE
        bool "Intel P state control"
        depends on X86
-       select ACPI_PROCESSOR if ACPI
        help
           This driver provides a P state for Intel core processors.
          The driver implements an internal governor and will become
index 2e31d097def6b884262295e0d46d0faa6418bd81..001a532e342e818d6093f50fc2eca3bd15a335b7 100644 (file)
 #include <asm/cpu_device_id.h>
 #include <asm/cpufeature.h>
 
-#if IS_ENABLED(CONFIG_ACPI)
-#include <acpi/processor.h>
-#endif
-
-#define BYT_RATIOS             0x66a
-#define BYT_VIDS               0x66b
-#define BYT_TURBO_RATIOS       0x66c
-#define BYT_TURBO_VIDS         0x66d
+#define ATOM_RATIOS            0x66a
+#define ATOM_VIDS              0x66b
+#define ATOM_TURBO_RATIOS      0x66c
+#define ATOM_TURBO_VIDS                0x66d
 
 #define FRAC_BITS 8
 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
@@ -117,9 +113,6 @@ struct cpudata {
        u64     prev_mperf;
        u64     prev_tsc;
        struct sample sample;
-#if IS_ENABLED(CONFIG_ACPI)
-       struct acpi_processor_performance acpi_perf_data;
-#endif
 };
 
 static struct cpudata **all_cpu_data;
@@ -150,7 +143,6 @@ struct cpu_defaults {
 static struct pstate_adjust_policy pid_params;
 static struct pstate_funcs pstate_funcs;
 static int hwp_active;
-static int no_acpi_perf;
 
 struct perf_limits {
        int no_turbo;
@@ -163,8 +155,6 @@ struct perf_limits {
        int max_sysfs_pct;
        int min_policy_pct;
        int min_sysfs_pct;
-       int max_perf_ctl;
-       int min_perf_ctl;
 };
 
 static struct perf_limits performance_limits = {
@@ -191,8 +181,6 @@ static struct perf_limits powersave_limits = {
        .max_sysfs_pct = 100,
        .min_policy_pct = 0,
        .min_sysfs_pct = 0,
-       .max_perf_ctl = 0,
-       .min_perf_ctl = 0,
 };
 
 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE
@@ -201,153 +189,6 @@ static struct perf_limits *limits = &performance_limits;
 static struct perf_limits *limits = &powersave_limits;
 #endif
 
-#if IS_ENABLED(CONFIG_ACPI)
-/*
- * The max target pstate ratio is a 8 bit value in both PLATFORM_INFO MSR and
- * in TURBO_RATIO_LIMIT MSR, which pstate driver stores in max_pstate and
- * max_turbo_pstate fields. The PERF_CTL MSR contains 16 bit value for P state
- * ratio, out of it only high 8 bits are used. For example 0x1700 is setting
- * target ratio 0x17. The _PSS control value stores in a format which can be
- * directly written to PERF_CTL MSR. But in intel_pstate driver this shift
- * occurs during write to PERF_CTL (E.g. for cores core_set_pstate()).
- * This function converts the _PSS control value to intel pstate driver format
- * for comparison and assignment.
- */
-static int convert_to_native_pstate_format(struct cpudata *cpu, int index)
-{
-       return cpu->acpi_perf_data.states[index].control >> 8;
-}
-
-static int intel_pstate_init_perf_limits(struct cpufreq_policy *policy)
-{
-       struct cpudata *cpu;
-       int ret;
-       bool turbo_absent = false;
-       int max_pstate_index;
-       int min_pss_ctl, max_pss_ctl, turbo_pss_ctl;
-       int i;
-
-       cpu = all_cpu_data[policy->cpu];
-
-       pr_debug("intel_pstate: default limits 0x%x 0x%x 0x%x\n",
-                cpu->pstate.min_pstate, cpu->pstate.max_pstate,
-                cpu->pstate.turbo_pstate);
-
-       if (!cpu->acpi_perf_data.shared_cpu_map &&
-           zalloc_cpumask_var_node(&cpu->acpi_perf_data.shared_cpu_map,
-                                   GFP_KERNEL, cpu_to_node(policy->cpu))) {
-               return -ENOMEM;
-       }
-
-       ret = acpi_processor_register_performance(&cpu->acpi_perf_data,
-                                                 policy->cpu);
-       if (ret)
-               return ret;
-
-       /*
-        * Check if the control value in _PSS is for PERF_CTL MSR, which should
-        * guarantee that the states returned by it map to the states in our
-        * list directly.
-        */
-       if (cpu->acpi_perf_data.control_register.space_id !=
-                                               ACPI_ADR_SPACE_FIXED_HARDWARE)
-               return -EIO;
-
-       pr_debug("intel_pstate: CPU%u - ACPI _PSS perf data\n", policy->cpu);
-       for (i = 0; i < cpu->acpi_perf_data.state_count; i++)
-               pr_debug("     %cP%d: %u MHz, %u mW, 0x%x\n",
-                        (i == cpu->acpi_perf_data.state ? '*' : ' '), i,
-                        (u32) cpu->acpi_perf_data.states[i].core_frequency,
-                        (u32) cpu->acpi_perf_data.states[i].power,
-                        (u32) cpu->acpi_perf_data.states[i].control);
-
-       /*
-        * If there is only one entry _PSS, simply ignore _PSS and continue as
-        * usual without taking _PSS into account
-        */
-       if (cpu->acpi_perf_data.state_count < 2)
-               return 0;
-
-       turbo_pss_ctl = convert_to_native_pstate_format(cpu, 0);
-       min_pss_ctl = convert_to_native_pstate_format(cpu,
-                                       cpu->acpi_perf_data.state_count - 1);
-       /* Check if there is a turbo freq in _PSS */
-       if (turbo_pss_ctl <= cpu->pstate.max_pstate &&
-           turbo_pss_ctl > cpu->pstate.min_pstate) {
-               pr_debug("intel_pstate: no turbo range exists in _PSS\n");
-               limits->no_turbo = limits->turbo_disabled = 1;
-               cpu->pstate.turbo_pstate = cpu->pstate.max_pstate;
-               turbo_absent = true;
-       }
-
-       /* Check if the max non turbo p state < Intel P state max */
-       max_pstate_index = turbo_absent ? 0 : 1;
-       max_pss_ctl = convert_to_native_pstate_format(cpu, max_pstate_index);
-       if (max_pss_ctl < cpu->pstate.max_pstate &&
-           max_pss_ctl > cpu->pstate.min_pstate)
-               cpu->pstate.max_pstate = max_pss_ctl;
-
-       /* check If min perf > Intel P State min */
-       if (min_pss_ctl > cpu->pstate.min_pstate &&
-           min_pss_ctl < cpu->pstate.max_pstate) {
-               cpu->pstate.min_pstate = min_pss_ctl;
-               policy->cpuinfo.min_freq = min_pss_ctl * cpu->pstate.scaling;
-       }
-
-       if (turbo_absent)
-               policy->cpuinfo.max_freq = cpu->pstate.max_pstate *
-                                               cpu->pstate.scaling;
-       else {
-               policy->cpuinfo.max_freq = cpu->pstate.turbo_pstate *
-                                               cpu->pstate.scaling;
-               /*
-                * The _PSS table doesn't contain whole turbo frequency range.
-                * This just contains +1 MHZ above the max non turbo frequency,
-                * with control value corresponding to max turbo ratio. But
-                * when cpufreq set policy is called, it will call with this
-                * max frequency, which will cause a reduced performance as
-                * this driver uses real max turbo frequency as the max
-                * frequeny. So correct this frequency in _PSS table to
-                * correct max turbo frequency based on the turbo ratio.
-                * Also need to convert to MHz as _PSS freq is in MHz.
-                */
-               cpu->acpi_perf_data.states[0].core_frequency =
-                                               turbo_pss_ctl * 100;
-       }
-
-       pr_debug("intel_pstate: Updated limits using _PSS 0x%x 0x%x 0x%x\n",
-                cpu->pstate.min_pstate, cpu->pstate.max_pstate,
-                cpu->pstate.turbo_pstate);
-       pr_debug("intel_pstate: policy max_freq=%d Khz min_freq = %d KHz\n",
-                policy->cpuinfo.max_freq, policy->cpuinfo.min_freq);
-
-       return 0;
-}
-
-static int intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
-{
-       struct cpudata *cpu;
-
-       if (!no_acpi_perf)
-               return 0;
-
-       cpu = all_cpu_data[policy->cpu];
-       acpi_processor_unregister_performance(policy->cpu);
-       return 0;
-}
-
-#else
-static int intel_pstate_init_perf_limits(struct cpufreq_policy *policy)
-{
-       return 0;
-}
-
-static int intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
-{
-       return 0;
-}
-#endif
-
 static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
                             int deadband, int integral) {
        pid->setpoint = setpoint;
@@ -687,31 +528,31 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata)
        wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
 }
 
-static int byt_get_min_pstate(void)
+static int atom_get_min_pstate(void)
 {
        u64 value;
 
-       rdmsrl(BYT_RATIOS, value);
+       rdmsrl(ATOM_RATIOS, value);
        return (value >> 8) & 0x7F;
 }
 
-static int byt_get_max_pstate(void)
+static int atom_get_max_pstate(void)
 {
        u64 value;
 
-       rdmsrl(BYT_RATIOS, value);
+       rdmsrl(ATOM_RATIOS, value);
        return (value >> 16) & 0x7F;
 }
 
-static int byt_get_turbo_pstate(void)
+static int atom_get_turbo_pstate(void)
 {
        u64 value;
 
-       rdmsrl(BYT_TURBO_RATIOS, value);
+       rdmsrl(ATOM_TURBO_RATIOS, value);
        return value & 0x7F;
 }
 
-static void byt_set_pstate(struct cpudata *cpudata, int pstate)
+static void atom_set_pstate(struct cpudata *cpudata, int pstate)
 {
        u64 val;
        int32_t vid_fp;
@@ -736,27 +577,42 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate)
        wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val);
 }
 
-#define BYT_BCLK_FREQS 5
-static int byt_freq_table[BYT_BCLK_FREQS] = { 833, 1000, 1333, 1167, 800};
-
-static int byt_get_scaling(void)
+static int silvermont_get_scaling(void)
 {
        u64 value;
        int i;
+       /* Defined in Table 35-6 from SDM (Sept 2015) */
+       static int silvermont_freq_table[] = {
+               83300, 100000, 133300, 116700, 80000};
 
        rdmsrl(MSR_FSB_FREQ, value);
-       i = value & 0x3;
+       i = value & 0x7;
+       WARN_ON(i > 4);
 
-       BUG_ON(i > BYT_BCLK_FREQS);
+       return silvermont_freq_table[i];
+}
 
-       return byt_freq_table[i] * 100;
+static int airmont_get_scaling(void)
+{
+       u64 value;
+       int i;
+       /* Defined in Table 35-10 from SDM (Sept 2015) */
+       static int airmont_freq_table[] = {
+               83300, 100000, 133300, 116700, 80000,
+               93300, 90000, 88900, 87500};
+
+       rdmsrl(MSR_FSB_FREQ, value);
+       i = value & 0xF;
+       WARN_ON(i > 8);
+
+       return airmont_freq_table[i];
 }
 
-static void byt_get_vid(struct cpudata *cpudata)
+static void atom_get_vid(struct cpudata *cpudata)
 {
        u64 value;
 
-       rdmsrl(BYT_VIDS, value);
+       rdmsrl(ATOM_VIDS, value);
        cpudata->vid.min = int_tofp((value >> 8) & 0x7f);
        cpudata->vid.max = int_tofp((value >> 16) & 0x7f);
        cpudata->vid.ratio = div_fp(
@@ -764,7 +620,7 @@ static void byt_get_vid(struct cpudata *cpudata)
                int_tofp(cpudata->pstate.max_pstate -
                        cpudata->pstate.min_pstate));
 
-       rdmsrl(BYT_TURBO_VIDS, value);
+       rdmsrl(ATOM_TURBO_VIDS, value);
        cpudata->vid.turbo = value & 0x7f;
 }
 
@@ -885,7 +741,7 @@ static struct cpu_defaults core_params = {
        },
 };
 
-static struct cpu_defaults byt_params = {
+static struct cpu_defaults silvermont_params = {
        .pid_policy = {
                .sample_rate_ms = 10,
                .deadband = 0,
@@ -895,13 +751,33 @@ static struct cpu_defaults byt_params = {
                .i_gain_pct = 4,
        },
        .funcs = {
-               .get_max = byt_get_max_pstate,
-               .get_max_physical = byt_get_max_pstate,
-               .get_min = byt_get_min_pstate,
-               .get_turbo = byt_get_turbo_pstate,
-               .set = byt_set_pstate,
-               .get_scaling = byt_get_scaling,
-               .get_vid = byt_get_vid,
+               .get_max = atom_get_max_pstate,
+               .get_max_physical = atom_get_max_pstate,
+               .get_min = atom_get_min_pstate,
+               .get_turbo = atom_get_turbo_pstate,
+               .set = atom_set_pstate,
+               .get_scaling = silvermont_get_scaling,
+               .get_vid = atom_get_vid,
+       },
+};
+
+static struct cpu_defaults airmont_params = {
+       .pid_policy = {
+               .sample_rate_ms = 10,
+               .deadband = 0,
+               .setpoint = 60,
+               .p_gain_pct = 14,
+               .d_gain_pct = 0,
+               .i_gain_pct = 4,
+       },
+       .funcs = {
+               .get_max = atom_get_max_pstate,
+               .get_max_physical = atom_get_max_pstate,
+               .get_min = atom_get_min_pstate,
+               .get_turbo = atom_get_turbo_pstate,
+               .set = atom_set_pstate,
+               .get_scaling = airmont_get_scaling,
+               .get_vid = atom_get_vid,
        },
 };
 
@@ -938,23 +814,12 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
         * policy, or by cpu specific default values determined through
         * experimentation.
         */
-       if (limits->max_perf_ctl && limits->max_sysfs_pct >=
-                                               limits->max_policy_pct) {
-               *max = limits->max_perf_ctl;
-       } else {
-               max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf),
-                                       limits->max_perf));
-               *max = clamp_t(int, max_perf_adj, cpu->pstate.min_pstate,
-                              cpu->pstate.turbo_pstate);
-       }
+       max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits->max_perf));
+       *max = clamp_t(int, max_perf_adj,
+                       cpu->pstate.min_pstate, cpu->pstate.turbo_pstate);
 
-       if (limits->min_perf_ctl) {
-               *min = limits->min_perf_ctl;
-       } else {
-               min_perf = fp_toint(mul_fp(int_tofp(max_perf),
-                                   limits->min_perf));
-               *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf);
-       }
+       min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits->min_perf));
+       *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf);
 }
 
 static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate, bool force)
@@ -1153,7 +1018,7 @@ static void intel_pstate_timer_func(unsigned long __data)
 static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
        ICPU(0x2a, core_params),
        ICPU(0x2d, core_params),
-       ICPU(0x37, byt_params),
+       ICPU(0x37, silvermont_params),
        ICPU(0x3a, core_params),
        ICPU(0x3c, core_params),
        ICPU(0x3d, core_params),
@@ -1162,7 +1027,7 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
        ICPU(0x45, core_params),
        ICPU(0x46, core_params),
        ICPU(0x47, core_params),
-       ICPU(0x4c, byt_params),
+       ICPU(0x4c, airmont_params),
        ICPU(0x4e, core_params),
        ICPU(0x4f, core_params),
        ICPU(0x5e, core_params),
@@ -1229,12 +1094,6 @@ static unsigned int intel_pstate_get(unsigned int cpu_num)
 
 static int intel_pstate_set_policy(struct cpufreq_policy *policy)
 {
-#if IS_ENABLED(CONFIG_ACPI)
-       struct cpudata *cpu;
-       int i;
-#endif
-       pr_debug("intel_pstate: %s max %u policy->max %u\n", __func__,
-                policy->cpuinfo.max_freq, policy->max);
        if (!policy->cpuinfo.max_freq)
                return -ENODEV;
 
@@ -1270,23 +1129,6 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
        limits->max_perf = div_fp(int_tofp(limits->max_perf_pct),
                                  int_tofp(100));
 
-#if IS_ENABLED(CONFIG_ACPI)
-       cpu = all_cpu_data[policy->cpu];
-       for (i = 0; i < cpu->acpi_perf_data.state_count; i++) {
-               int control;
-
-               control = convert_to_native_pstate_format(cpu, i);
-               if (control * cpu->pstate.scaling == policy->max)
-                       limits->max_perf_ctl = control;
-               if (control * cpu->pstate.scaling == policy->min)
-                       limits->min_perf_ctl = control;
-       }
-
-       pr_debug("intel_pstate: max %u policy_max %u perf_ctl [0x%x-0x%x]\n",
-                policy->cpuinfo.max_freq, policy->max, limits->min_perf_ctl,
-                limits->max_perf_ctl);
-#endif
-
        if (hwp_active)
                intel_pstate_hwp_set();
 
@@ -1341,30 +1183,18 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
        policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
        policy->cpuinfo.max_freq =
                cpu->pstate.turbo_pstate * cpu->pstate.scaling;
-       if (!no_acpi_perf)
-               intel_pstate_init_perf_limits(policy);
-       /*
-        * If there is no acpi perf data or error, we ignore and use Intel P
-        * state calculated limits, So this is not fatal error.
-        */
        policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
        cpumask_set_cpu(policy->cpu, policy->cpus);
 
        return 0;
 }
 
-static int intel_pstate_cpu_exit(struct cpufreq_policy *policy)
-{
-       return intel_pstate_exit_perf_limits(policy);
-}
-
 static struct cpufreq_driver intel_pstate_driver = {
        .flags          = CPUFREQ_CONST_LOOPS,
        .verify         = intel_pstate_verify_policy,
        .setpolicy      = intel_pstate_set_policy,
        .get            = intel_pstate_get,
        .init           = intel_pstate_cpu_init,
-       .exit           = intel_pstate_cpu_exit,
        .stop_cpu       = intel_pstate_stop_cpu,
        .name           = "intel_pstate",
 };
@@ -1406,6 +1236,7 @@ static void copy_cpu_funcs(struct pstate_funcs *funcs)
 }
 
 #if IS_ENABLED(CONFIG_ACPI)
+#include <acpi/processor.h>
 
 static bool intel_pstate_no_acpi_pss(void)
 {
@@ -1601,9 +1432,6 @@ static int __init intel_pstate_setup(char *str)
                force_load = 1;
        if (!strcmp(str, "hwp_only"))
                hwp_only = 1;
-       if (!strcmp(str, "no_acpi"))
-               no_acpi_perf = 1;
-
        return 0;
 }
 early_param("intel_pstate", intel_pstate_setup);
index e735c728e3b34aa441d1bc140e5473554af8385d..edb1984201e9702162321c50f33f76ba362bac5a 100644 (file)
@@ -1685,8 +1685,8 @@ static void pci_dma_configure(struct pci_dev *dev)
 {
        struct device *bridge = pci_get_host_bridge_device(dev);
 
-       if (IS_ENABLED(CONFIG_OF) && dev->dev.of_node) {
-               if (bridge->parent)
+       if (IS_ENABLED(CONFIG_OF) &&
+               bridge->parent && bridge->parent->of_node) {
                        of_dma_configure(&dev->dev, bridge->parent->of_node);
        } else if (has_acpi_companion(bridge)) {
                struct acpi_device *adev = to_acpi_device_node(bridge->fwnode);
index d8e4b20b6d54cc14c25482279a2a652fe483a2f3..0dac7e05a6ac9e5f1500eca1cebbce2d900ab511 100644 (file)
@@ -1173,9 +1173,9 @@ dump_nhm_platform_info(void)
        unsigned long long msr;
        unsigned int ratio;
 
-       get_msr(base_cpu, MSR_NHM_PLATFORM_INFO, &msr);
+       get_msr(base_cpu, MSR_PLATFORM_INFO, &msr);
 
-       fprintf(stderr, "cpu%d: MSR_NHM_PLATFORM_INFO: 0x%08llx\n", base_cpu, msr);
+       fprintf(stderr, "cpu%d: MSR_PLATFORM_INFO: 0x%08llx\n", base_cpu, msr);
 
        ratio = (msr >> 40) & 0xFF;
        fprintf(stderr, "%d * %.0f = %.0f MHz max efficiency frequency\n",
@@ -1807,7 +1807,7 @@ void check_permissions()
  *
  * MSR_SMI_COUNT                   0x00000034
  *
- * MSR_NHM_PLATFORM_INFO           0x000000ce
+ * MSR_PLATFORM_INFO               0x000000ce
  * MSR_NHM_SNB_PKG_CST_CFG_CTL     0x000000e2
  *
  * MSR_PKG_C3_RESIDENCY            0x000003f8
@@ -1876,7 +1876,7 @@ int probe_nhm_msrs(unsigned int family, unsigned int model)
        get_msr(base_cpu, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr);
        pkg_cstate_limit = pkg_cstate_limits[msr & 0xF];
 
-       get_msr(base_cpu, MSR_NHM_PLATFORM_INFO, &msr);
+       get_msr(base_cpu, MSR_PLATFORM_INFO, &msr);
        base_ratio = (msr >> 8) & 0xFF;
 
        base_hz = base_ratio * bclk * 1000000;
This page took 0.0580000000000001 seconds and 5 git commands to generate.