2 * acpi-cpufreq.c - ACPI Processor P-States Driver
4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6 * Copyright (C) 2002 - 2004 Dominik Brodowski <linux@brodo.de>
7 * Copyright (C) 2006 Denis Sadykov <denis.m.sadykov@intel.com>
9 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or (at
14 * your option) any later version.
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License along
22 * with this program; if not, write to the Free Software Foundation, Inc.,
23 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
25 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/init.h>
31 #include <linux/smp.h>
32 #include <linux/sched.h>
33 #include <linux/cpufreq.h>
34 #include <linux/compiler.h>
35 #include <linux/dmi.h>
36 #include <linux/slab.h>
38 #include <linux/acpi.h>
40 #include <linux/delay.h>
41 #include <linux/uaccess.h>
43 #include <acpi/processor.h>
46 #include <asm/processor.h>
47 #include <asm/cpufeature.h>
49 MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski");
50 MODULE_DESCRIPTION("ACPI Processor P-States Driver");
51 MODULE_LICENSE("GPL");
53 #define PFX "acpi-cpufreq: "
56 UNDEFINED_CAPABLE
= 0,
57 SYSTEM_INTEL_MSR_CAPABLE
,
58 SYSTEM_AMD_MSR_CAPABLE
,
62 #define INTEL_MSR_RANGE (0xffff)
63 #define AMD_MSR_RANGE (0x7)
65 #define MSR_K7_HWCR_CPB_DIS (1ULL << 25)
67 struct acpi_cpufreq_data
{
68 struct cpufreq_frequency_table
*freq_table
;
70 unsigned int cpu_feature
;
71 unsigned int acpi_perf_cpu
;
72 cpumask_var_t freqdomain_cpus
;
75 /* acpi_perf_data is a pointer to percpu data. */
76 static struct acpi_processor_performance __percpu
*acpi_perf_data
;
78 static inline struct acpi_processor_performance
*to_perf_data(struct acpi_cpufreq_data
*data
)
80 return per_cpu_ptr(acpi_perf_data
, data
->acpi_perf_cpu
);
83 static struct cpufreq_driver acpi_cpufreq_driver
;
85 static unsigned int acpi_pstate_strict
;
86 static struct msr __percpu
*msrs
;
88 static bool boost_state(unsigned int cpu
)
93 switch (boot_cpu_data
.x86_vendor
) {
94 case X86_VENDOR_INTEL
:
95 rdmsr_on_cpu(cpu
, MSR_IA32_MISC_ENABLE
, &lo
, &hi
);
96 msr
= lo
| ((u64
)hi
<< 32);
97 return !(msr
& MSR_IA32_MISC_ENABLE_TURBO_DISABLE
);
99 rdmsr_on_cpu(cpu
, MSR_K7_HWCR
, &lo
, &hi
);
100 msr
= lo
| ((u64
)hi
<< 32);
101 return !(msr
& MSR_K7_HWCR_CPB_DIS
);
106 static void boost_set_msrs(bool enable
, const struct cpumask
*cpumask
)
112 switch (boot_cpu_data
.x86_vendor
) {
113 case X86_VENDOR_INTEL
:
114 msr_addr
= MSR_IA32_MISC_ENABLE
;
115 msr_mask
= MSR_IA32_MISC_ENABLE_TURBO_DISABLE
;
118 msr_addr
= MSR_K7_HWCR
;
119 msr_mask
= MSR_K7_HWCR_CPB_DIS
;
125 rdmsr_on_cpus(cpumask
, msr_addr
, msrs
);
127 for_each_cpu(cpu
, cpumask
) {
128 struct msr
*reg
= per_cpu_ptr(msrs
, cpu
);
135 wrmsr_on_cpus(cpumask
, msr_addr
, msrs
);
138 static int set_boost(int val
)
141 boost_set_msrs(val
, cpu_online_mask
);
143 pr_debug("Core Boosting %sabled.\n", val
? "en" : "dis");
148 static ssize_t
show_freqdomain_cpus(struct cpufreq_policy
*policy
, char *buf
)
150 struct acpi_cpufreq_data
*data
= policy
->driver_data
;
155 return cpufreq_show_cpus(data
->freqdomain_cpus
, buf
);
158 cpufreq_freq_attr_ro(freqdomain_cpus
);
160 #ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
161 static ssize_t
store_cpb(struct cpufreq_policy
*policy
, const char *buf
,
165 unsigned int val
= 0;
167 if (!acpi_cpufreq_driver
.set_boost
)
170 ret
= kstrtouint(buf
, 10, &val
);
179 static ssize_t
show_cpb(struct cpufreq_policy
*policy
, char *buf
)
181 return sprintf(buf
, "%u\n", acpi_cpufreq_driver
.boost_enabled
);
184 cpufreq_freq_attr_rw(cpb
);
187 static int check_est_cpu(unsigned int cpuid
)
189 struct cpuinfo_x86
*cpu
= &cpu_data(cpuid
);
191 return cpu_has(cpu
, X86_FEATURE_EST
);
194 static int check_amd_hwpstate_cpu(unsigned int cpuid
)
196 struct cpuinfo_x86
*cpu
= &cpu_data(cpuid
);
198 return cpu_has(cpu
, X86_FEATURE_HW_PSTATE
);
201 static unsigned extract_io(u32 value
, struct acpi_cpufreq_data
*data
)
203 struct acpi_processor_performance
*perf
;
206 perf
= to_perf_data(data
);
208 for (i
= 0; i
< perf
->state_count
; i
++) {
209 if (value
== perf
->states
[i
].status
)
210 return data
->freq_table
[i
].frequency
;
215 static unsigned extract_msr(u32 msr
, struct acpi_cpufreq_data
*data
)
217 struct cpufreq_frequency_table
*pos
;
218 struct acpi_processor_performance
*perf
;
220 if (boot_cpu_data
.x86_vendor
== X86_VENDOR_AMD
)
221 msr
&= AMD_MSR_RANGE
;
223 msr
&= INTEL_MSR_RANGE
;
225 perf
= to_perf_data(data
);
227 cpufreq_for_each_entry(pos
, data
->freq_table
)
228 if (msr
== perf
->states
[pos
->driver_data
].status
)
229 return pos
->frequency
;
230 return data
->freq_table
[0].frequency
;
233 static unsigned extract_freq(u32 val
, struct acpi_cpufreq_data
*data
)
235 switch (data
->cpu_feature
) {
236 case SYSTEM_INTEL_MSR_CAPABLE
:
237 case SYSTEM_AMD_MSR_CAPABLE
:
238 return extract_msr(val
, data
);
239 case SYSTEM_IO_CAPABLE
:
240 return extract_io(val
, data
);
257 const struct cpumask
*mask
;
265 /* Called via smp_call_function_single(), on the target CPU */
266 static void do_drv_read(void *_cmd
)
268 struct drv_cmd
*cmd
= _cmd
;
272 case SYSTEM_INTEL_MSR_CAPABLE
:
273 case SYSTEM_AMD_MSR_CAPABLE
:
274 rdmsr(cmd
->addr
.msr
.reg
, cmd
->val
, h
);
276 case SYSTEM_IO_CAPABLE
:
277 acpi_os_read_port((acpi_io_address
)cmd
->addr
.io
.port
,
279 (u32
)cmd
->addr
.io
.bit_width
);
286 /* Called via smp_call_function_many(), on the target CPUs */
287 static void do_drv_write(void *_cmd
)
289 struct drv_cmd
*cmd
= _cmd
;
293 case SYSTEM_INTEL_MSR_CAPABLE
:
294 rdmsr(cmd
->addr
.msr
.reg
, lo
, hi
);
295 lo
= (lo
& ~INTEL_MSR_RANGE
) | (cmd
->val
& INTEL_MSR_RANGE
);
296 wrmsr(cmd
->addr
.msr
.reg
, lo
, hi
);
298 case SYSTEM_AMD_MSR_CAPABLE
:
299 wrmsr(cmd
->addr
.msr
.reg
, cmd
->val
, 0);
301 case SYSTEM_IO_CAPABLE
:
302 acpi_os_write_port((acpi_io_address
)cmd
->addr
.io
.port
,
304 (u32
)cmd
->addr
.io
.bit_width
);
311 static void drv_read(struct drv_cmd
*cmd
)
316 err
= smp_call_function_any(cmd
->mask
, do_drv_read
, cmd
, 1);
317 WARN_ON_ONCE(err
); /* smp_call_function_any() was buggy? */
320 static void drv_write(struct drv_cmd
*cmd
)
324 this_cpu
= get_cpu();
325 if (cpumask_test_cpu(this_cpu
, cmd
->mask
))
327 smp_call_function_many(cmd
->mask
, do_drv_write
, cmd
, 1);
332 get_cur_val(const struct cpumask
*mask
, struct acpi_cpufreq_data
*data
)
334 struct acpi_processor_performance
*perf
;
337 if (unlikely(cpumask_empty(mask
)))
340 switch (data
->cpu_feature
) {
341 case SYSTEM_INTEL_MSR_CAPABLE
:
342 cmd
.type
= SYSTEM_INTEL_MSR_CAPABLE
;
343 cmd
.addr
.msr
.reg
= MSR_IA32_PERF_CTL
;
345 case SYSTEM_AMD_MSR_CAPABLE
:
346 cmd
.type
= SYSTEM_AMD_MSR_CAPABLE
;
347 cmd
.addr
.msr
.reg
= MSR_AMD_PERF_CTL
;
349 case SYSTEM_IO_CAPABLE
:
350 cmd
.type
= SYSTEM_IO_CAPABLE
;
351 perf
= to_perf_data(data
);
352 cmd
.addr
.io
.port
= perf
->control_register
.address
;
353 cmd
.addr
.io
.bit_width
= perf
->control_register
.bit_width
;
362 pr_debug("get_cur_val = %u\n", cmd
.val
);
367 static unsigned int get_cur_freq_on_cpu(unsigned int cpu
)
369 struct acpi_cpufreq_data
*data
;
370 struct cpufreq_policy
*policy
;
372 unsigned int cached_freq
;
374 pr_debug("get_cur_freq_on_cpu (%d)\n", cpu
);
376 policy
= cpufreq_cpu_get_raw(cpu
);
377 if (unlikely(!policy
))
380 data
= policy
->driver_data
;
381 if (unlikely(!data
|| !data
->freq_table
))
384 cached_freq
= data
->freq_table
[to_perf_data(data
)->state
].frequency
;
385 freq
= extract_freq(get_cur_val(cpumask_of(cpu
), data
), data
);
386 if (freq
!= cached_freq
) {
388 * The dreaded BIOS frequency change behind our back.
389 * Force set the frequency on next target call.
394 pr_debug("cur freq = %u\n", freq
);
399 static unsigned int check_freqs(const struct cpumask
*mask
, unsigned int freq
,
400 struct acpi_cpufreq_data
*data
)
402 unsigned int cur_freq
;
405 for (i
= 0; i
< 100; i
++) {
406 cur_freq
= extract_freq(get_cur_val(mask
, data
), data
);
407 if (cur_freq
== freq
)
414 static int acpi_cpufreq_target(struct cpufreq_policy
*policy
,
417 struct acpi_cpufreq_data
*data
= policy
->driver_data
;
418 struct acpi_processor_performance
*perf
;
420 unsigned int next_perf_state
= 0; /* Index into perf table */
423 if (unlikely(data
== NULL
|| data
->freq_table
== NULL
)) {
427 perf
= to_perf_data(data
);
428 next_perf_state
= data
->freq_table
[index
].driver_data
;
429 if (perf
->state
== next_perf_state
) {
430 if (unlikely(data
->resume
)) {
431 pr_debug("Called after resume, resetting to P%d\n",
435 pr_debug("Already at target state (P%d)\n",
441 switch (data
->cpu_feature
) {
442 case SYSTEM_INTEL_MSR_CAPABLE
:
443 cmd
.type
= SYSTEM_INTEL_MSR_CAPABLE
;
444 cmd
.addr
.msr
.reg
= MSR_IA32_PERF_CTL
;
445 cmd
.val
= (u32
) perf
->states
[next_perf_state
].control
;
447 case SYSTEM_AMD_MSR_CAPABLE
:
448 cmd
.type
= SYSTEM_AMD_MSR_CAPABLE
;
449 cmd
.addr
.msr
.reg
= MSR_AMD_PERF_CTL
;
450 cmd
.val
= (u32
) perf
->states
[next_perf_state
].control
;
452 case SYSTEM_IO_CAPABLE
:
453 cmd
.type
= SYSTEM_IO_CAPABLE
;
454 cmd
.addr
.io
.port
= perf
->control_register
.address
;
455 cmd
.addr
.io
.bit_width
= perf
->control_register
.bit_width
;
456 cmd
.val
= (u32
) perf
->states
[next_perf_state
].control
;
463 /* cpufreq holds the hotplug lock, so we are safe from here on */
464 if (policy
->shared_type
!= CPUFREQ_SHARED_TYPE_ANY
)
465 cmd
.mask
= policy
->cpus
;
467 cmd
.mask
= cpumask_of(policy
->cpu
);
471 if (acpi_pstate_strict
) {
472 if (!check_freqs(cmd
.mask
, data
->freq_table
[index
].frequency
,
474 pr_debug("acpi_cpufreq_target failed (%d)\n",
481 perf
->state
= next_perf_state
;
488 acpi_cpufreq_guess_freq(struct acpi_cpufreq_data
*data
, unsigned int cpu
)
490 struct acpi_processor_performance
*perf
;
492 perf
= to_perf_data(data
);
494 /* search the closest match to cpu_khz */
497 unsigned long freqn
= perf
->states
[0].core_frequency
* 1000;
499 for (i
= 0; i
< (perf
->state_count
-1); i
++) {
501 freqn
= perf
->states
[i
+1].core_frequency
* 1000;
502 if ((2 * cpu_khz
) > (freqn
+ freq
)) {
507 perf
->state
= perf
->state_count
-1;
510 /* assume CPU is at P0... */
512 return perf
->states
[0].core_frequency
* 1000;
516 static void free_acpi_perf_data(void)
520 /* Freeing a NULL pointer is OK, and alloc_percpu zeroes. */
521 for_each_possible_cpu(i
)
522 free_cpumask_var(per_cpu_ptr(acpi_perf_data
, i
)
524 free_percpu(acpi_perf_data
);
527 static int boost_notify(struct notifier_block
*nb
, unsigned long action
,
530 unsigned cpu
= (long)hcpu
;
531 const struct cpumask
*cpumask
;
533 cpumask
= get_cpu_mask(cpu
);
536 * Clear the boost-disable bit on the CPU_DOWN path so that
537 * this cpu cannot block the remaining ones from boosting. On
538 * the CPU_UP path we simply keep the boost-disable flag in
539 * sync with the current global state.
544 case CPU_UP_PREPARE_FROZEN
:
545 boost_set_msrs(acpi_cpufreq_driver
.boost_enabled
, cpumask
);
548 case CPU_DOWN_PREPARE
:
549 case CPU_DOWN_PREPARE_FROZEN
:
550 boost_set_msrs(1, cpumask
);
561 static struct notifier_block boost_nb
= {
562 .notifier_call
= boost_notify
,
566 * acpi_cpufreq_early_init - initialize ACPI P-States library
568 * Initialize the ACPI P-States library (drivers/acpi/processor_perflib.c)
569 * in order to determine correct frequency and voltage pairings. We can
570 * do _PDC and _PSD and find out the processor dependency for the
571 * actual init that will happen later...
573 static int __init
acpi_cpufreq_early_init(void)
576 pr_debug("acpi_cpufreq_early_init\n");
578 acpi_perf_data
= alloc_percpu(struct acpi_processor_performance
);
579 if (!acpi_perf_data
) {
580 pr_debug("Memory allocation error for acpi_perf_data.\n");
583 for_each_possible_cpu(i
) {
584 if (!zalloc_cpumask_var_node(
585 &per_cpu_ptr(acpi_perf_data
, i
)->shared_cpu_map
,
586 GFP_KERNEL
, cpu_to_node(i
))) {
588 /* Freeing a NULL pointer is OK: alloc_percpu zeroes. */
589 free_acpi_perf_data();
594 /* Do initialization in ACPI core */
595 acpi_processor_preregister_performance(acpi_perf_data
);
601 * Some BIOSes do SW_ANY coordination internally, either set it up in hw
602 * or do it in BIOS firmware and won't inform about it to OS. If not
603 * detected, this has a side effect of making CPU run at a different speed
604 * than OS intended it to run at. Detect it and handle it cleanly.
606 static int bios_with_sw_any_bug
;
608 static int sw_any_bug_found(const struct dmi_system_id
*d
)
610 bios_with_sw_any_bug
= 1;
614 static const struct dmi_system_id sw_any_bug_dmi_table
[] = {
616 .callback
= sw_any_bug_found
,
617 .ident
= "Supermicro Server X6DLP",
619 DMI_MATCH(DMI_SYS_VENDOR
, "Supermicro"),
620 DMI_MATCH(DMI_BIOS_VERSION
, "080010"),
621 DMI_MATCH(DMI_PRODUCT_NAME
, "X6DLP"),
627 static int acpi_cpufreq_blacklist(struct cpuinfo_x86
*c
)
629 /* Intel Xeon Processor 7100 Series Specification Update
630 * http://www.intel.com/Assets/PDF/specupdate/314554.pdf
631 * AL30: A Machine Check Exception (MCE) Occurring during an
632 * Enhanced Intel SpeedStep Technology Ratio Change May Cause
633 * Both Processor Cores to Lock Up. */
634 if (c
->x86_vendor
== X86_VENDOR_INTEL
) {
635 if ((c
->x86
== 15) &&
636 (c
->x86_model
== 6) &&
637 (c
->x86_mask
== 8)) {
638 printk(KERN_INFO
"acpi-cpufreq: Intel(R) "
639 "Xeon(R) 7100 Errata AL30, processors may "
640 "lock up on frequency changes: disabling "
649 static int acpi_cpufreq_cpu_init(struct cpufreq_policy
*policy
)
652 unsigned int valid_states
= 0;
653 unsigned int cpu
= policy
->cpu
;
654 struct acpi_cpufreq_data
*data
;
655 unsigned int result
= 0;
656 struct cpuinfo_x86
*c
= &cpu_data(policy
->cpu
);
657 struct acpi_processor_performance
*perf
;
659 static int blacklisted
;
662 pr_debug("acpi_cpufreq_cpu_init\n");
667 blacklisted
= acpi_cpufreq_blacklist(c
);
672 data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
676 if (!zalloc_cpumask_var(&data
->freqdomain_cpus
, GFP_KERNEL
)) {
681 perf
= per_cpu_ptr(acpi_perf_data
, cpu
);
682 data
->acpi_perf_cpu
= cpu
;
683 policy
->driver_data
= data
;
685 if (cpu_has(c
, X86_FEATURE_CONSTANT_TSC
))
686 acpi_cpufreq_driver
.flags
|= CPUFREQ_CONST_LOOPS
;
688 result
= acpi_processor_register_performance(perf
, cpu
);
692 policy
->shared_type
= perf
->shared_type
;
695 * Will let policy->cpus know about dependency only when software
696 * coordination is required.
698 if (policy
->shared_type
== CPUFREQ_SHARED_TYPE_ALL
||
699 policy
->shared_type
== CPUFREQ_SHARED_TYPE_ANY
) {
700 cpumask_copy(policy
->cpus
, perf
->shared_cpu_map
);
702 cpumask_copy(data
->freqdomain_cpus
, perf
->shared_cpu_map
);
705 dmi_check_system(sw_any_bug_dmi_table
);
706 if (bios_with_sw_any_bug
&& !policy_is_shared(policy
)) {
707 policy
->shared_type
= CPUFREQ_SHARED_TYPE_ALL
;
708 cpumask_copy(policy
->cpus
, topology_core_cpumask(cpu
));
711 if (check_amd_hwpstate_cpu(cpu
) && !acpi_pstate_strict
) {
712 cpumask_clear(policy
->cpus
);
713 cpumask_set_cpu(cpu
, policy
->cpus
);
714 cpumask_copy(data
->freqdomain_cpus
,
715 topology_sibling_cpumask(cpu
));
716 policy
->shared_type
= CPUFREQ_SHARED_TYPE_HW
;
717 pr_info_once(PFX
"overriding BIOS provided _PSD data\n");
721 /* capability check */
722 if (perf
->state_count
<= 1) {
723 pr_debug("No P-States\n");
728 if (perf
->control_register
.space_id
!= perf
->status_register
.space_id
) {
733 switch (perf
->control_register
.space_id
) {
734 case ACPI_ADR_SPACE_SYSTEM_IO
:
735 if (boot_cpu_data
.x86_vendor
== X86_VENDOR_AMD
&&
736 boot_cpu_data
.x86
== 0xf) {
737 pr_debug("AMD K8 systems must use native drivers.\n");
741 pr_debug("SYSTEM IO addr space\n");
742 data
->cpu_feature
= SYSTEM_IO_CAPABLE
;
744 case ACPI_ADR_SPACE_FIXED_HARDWARE
:
745 pr_debug("HARDWARE addr space\n");
746 if (check_est_cpu(cpu
)) {
747 data
->cpu_feature
= SYSTEM_INTEL_MSR_CAPABLE
;
750 if (check_amd_hwpstate_cpu(cpu
)) {
751 data
->cpu_feature
= SYSTEM_AMD_MSR_CAPABLE
;
757 pr_debug("Unknown addr space %d\n",
758 (u32
) (perf
->control_register
.space_id
));
763 data
->freq_table
= kzalloc(sizeof(*data
->freq_table
) *
764 (perf
->state_count
+1), GFP_KERNEL
);
765 if (!data
->freq_table
) {
770 /* detect transition latency */
771 policy
->cpuinfo
.transition_latency
= 0;
772 for (i
= 0; i
< perf
->state_count
; i
++) {
773 if ((perf
->states
[i
].transition_latency
* 1000) >
774 policy
->cpuinfo
.transition_latency
)
775 policy
->cpuinfo
.transition_latency
=
776 perf
->states
[i
].transition_latency
* 1000;
779 /* Check for high latency (>20uS) from buggy BIOSes, like on T42 */
780 if (perf
->control_register
.space_id
== ACPI_ADR_SPACE_FIXED_HARDWARE
&&
781 policy
->cpuinfo
.transition_latency
> 20 * 1000) {
782 policy
->cpuinfo
.transition_latency
= 20 * 1000;
783 printk_once(KERN_INFO
784 "P-state transition latency capped at 20 uS\n");
788 for (i
= 0; i
< perf
->state_count
; i
++) {
789 if (i
> 0 && perf
->states
[i
].core_frequency
>=
790 data
->freq_table
[valid_states
-1].frequency
/ 1000)
793 data
->freq_table
[valid_states
].driver_data
= i
;
794 data
->freq_table
[valid_states
].frequency
=
795 perf
->states
[i
].core_frequency
* 1000;
798 data
->freq_table
[valid_states
].frequency
= CPUFREQ_TABLE_END
;
801 result
= cpufreq_table_validate_and_show(policy
, data
->freq_table
);
805 if (perf
->states
[0].core_frequency
* 1000 != policy
->cpuinfo
.max_freq
)
806 printk(KERN_WARNING FW_WARN
"P-state 0 is not max freq\n");
808 switch (perf
->control_register
.space_id
) {
809 case ACPI_ADR_SPACE_SYSTEM_IO
:
811 * The core will not set policy->cur, because
812 * cpufreq_driver->get is NULL, so we need to set it here.
813 * However, we have to guess it, because the current speed is
814 * unknown and not detectable via IO ports.
816 policy
->cur
= acpi_cpufreq_guess_freq(data
, policy
->cpu
);
818 case ACPI_ADR_SPACE_FIXED_HARDWARE
:
819 acpi_cpufreq_driver
.get
= get_cur_freq_on_cpu
;
825 /* notify BIOS that we exist */
826 acpi_processor_notify_smm(THIS_MODULE
);
828 pr_debug("CPU%u - ACPI performance management activated.\n", cpu
);
829 for (i
= 0; i
< perf
->state_count
; i
++)
830 pr_debug(" %cP%d: %d MHz, %d mW, %d uS\n",
831 (i
== perf
->state
? '*' : ' '), i
,
832 (u32
) perf
->states
[i
].core_frequency
,
833 (u32
) perf
->states
[i
].power
,
834 (u32
) perf
->states
[i
].transition_latency
);
837 * the first call to ->target() should result in us actually
838 * writing something to the appropriate registers.
845 kfree(data
->freq_table
);
847 acpi_processor_unregister_performance(cpu
);
849 free_cpumask_var(data
->freqdomain_cpus
);
852 policy
->driver_data
= NULL
;
857 static int acpi_cpufreq_cpu_exit(struct cpufreq_policy
*policy
)
859 struct acpi_cpufreq_data
*data
= policy
->driver_data
;
861 pr_debug("acpi_cpufreq_cpu_exit\n");
864 policy
->driver_data
= NULL
;
865 acpi_processor_unregister_performance(data
->acpi_perf_cpu
);
866 free_cpumask_var(data
->freqdomain_cpus
);
867 kfree(data
->freq_table
);
874 static int acpi_cpufreq_resume(struct cpufreq_policy
*policy
)
876 struct acpi_cpufreq_data
*data
= policy
->driver_data
;
878 pr_debug("acpi_cpufreq_resume\n");
885 static struct freq_attr
*acpi_cpufreq_attr
[] = {
886 &cpufreq_freq_attr_scaling_available_freqs
,
888 #ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
894 static struct cpufreq_driver acpi_cpufreq_driver
= {
895 .verify
= cpufreq_generic_frequency_table_verify
,
896 .target_index
= acpi_cpufreq_target
,
897 .bios_limit
= acpi_processor_get_bios_limit
,
898 .init
= acpi_cpufreq_cpu_init
,
899 .exit
= acpi_cpufreq_cpu_exit
,
900 .resume
= acpi_cpufreq_resume
,
901 .name
= "acpi-cpufreq",
902 .attr
= acpi_cpufreq_attr
,
905 static void __init
acpi_cpufreq_boost_init(void)
907 if (boot_cpu_has(X86_FEATURE_CPB
) || boot_cpu_has(X86_FEATURE_IDA
)) {
913 acpi_cpufreq_driver
.set_boost
= set_boost
;
914 acpi_cpufreq_driver
.boost_enabled
= boost_state(0);
916 cpu_notifier_register_begin();
918 /* Force all MSRs to the same value */
919 boost_set_msrs(acpi_cpufreq_driver
.boost_enabled
,
922 __register_cpu_notifier(&boost_nb
);
924 cpu_notifier_register_done();
928 static void acpi_cpufreq_boost_exit(void)
931 unregister_cpu_notifier(&boost_nb
);
938 static int __init
acpi_cpufreq_init(void)
945 /* don't keep reloading if cpufreq_driver exists */
946 if (cpufreq_get_current_driver())
949 pr_debug("acpi_cpufreq_init\n");
951 ret
= acpi_cpufreq_early_init();
955 #ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
956 /* this is a sysfs file with a strange name and an even stranger
957 * semantic - per CPU instantiation, but system global effect.
958 * Lets enable it only on AMD CPUs for compatibility reasons and
959 * only if configured. This is considered legacy code, which
960 * will probably be removed at some point in the future.
962 if (!check_amd_hwpstate_cpu(0)) {
963 struct freq_attr
**attr
;
965 pr_debug("CPB unsupported, do not expose it\n");
967 for (attr
= acpi_cpufreq_attr
; *attr
; attr
++)
974 acpi_cpufreq_boost_init();
976 ret
= cpufreq_register_driver(&acpi_cpufreq_driver
);
978 free_acpi_perf_data();
979 acpi_cpufreq_boost_exit();
984 static void __exit
acpi_cpufreq_exit(void)
986 pr_debug("acpi_cpufreq_exit\n");
988 acpi_cpufreq_boost_exit();
990 cpufreq_unregister_driver(&acpi_cpufreq_driver
);
992 free_acpi_perf_data();
995 module_param(acpi_pstate_strict
, uint
, 0644);
996 MODULE_PARM_DESC(acpi_pstate_strict
,
997 "value 0 or non-zero. non-zero -> strict ACPI checks are "
998 "performed during frequency changes.");
1000 late_initcall(acpi_cpufreq_init
);
1001 module_exit(acpi_cpufreq_exit
);
1003 static const struct x86_cpu_id acpi_cpufreq_ids
[] = {
1004 X86_FEATURE_MATCH(X86_FEATURE_ACPI
),
1005 X86_FEATURE_MATCH(X86_FEATURE_HW_PSTATE
),
1008 MODULE_DEVICE_TABLE(x86cpu
, acpi_cpufreq_ids
);
1010 static const struct acpi_device_id processor_device_ids
[] = {
1011 {ACPI_PROCESSOR_OBJECT_HID
, },
1012 {ACPI_PROCESSOR_DEVICE_HID
, },
1015 MODULE_DEVICE_TABLE(acpi
, processor_device_ids
);
1017 MODULE_ALIAS("acpi");