2 * acpi-cpufreq.c - ACPI Processor P-States Driver
4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6 * Copyright (C) 2002 - 2004 Dominik Brodowski <linux@brodo.de>
7 * Copyright (C) 2006 Denis Sadykov <denis.m.sadykov@intel.com>
9 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or (at
14 * your option) any later version.
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License along
22 * with this program; if not, write to the Free Software Foundation, Inc.,
23 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
25 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/init.h>
31 #include <linux/smp.h>
32 #include <linux/sched.h>
33 #include <linux/cpufreq.h>
34 #include <linux/compiler.h>
35 #include <linux/dmi.h>
36 #include <linux/slab.h>
38 #include <linux/acpi.h>
40 #include <linux/delay.h>
41 #include <linux/uaccess.h>
43 #include <acpi/processor.h>
46 #include <asm/processor.h>
47 #include <asm/cpufeature.h>
49 MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski");
50 MODULE_DESCRIPTION("ACPI Processor P-States Driver");
51 MODULE_LICENSE("GPL");
53 #define PFX "acpi-cpufreq: "
56 UNDEFINED_CAPABLE
= 0,
57 SYSTEM_INTEL_MSR_CAPABLE
,
58 SYSTEM_AMD_MSR_CAPABLE
,
62 #define INTEL_MSR_RANGE (0xffff)
63 #define AMD_MSR_RANGE (0x7)
65 #define MSR_K7_HWCR_CPB_DIS (1ULL << 25)
67 struct acpi_cpufreq_data
{
68 struct cpufreq_frequency_table
*freq_table
;
70 unsigned int cpu_feature
;
71 unsigned int acpi_perf_cpu
;
72 cpumask_var_t freqdomain_cpus
;
75 /* acpi_perf_data is a pointer to percpu data. */
76 static struct acpi_processor_performance __percpu
*acpi_perf_data
;
78 static inline struct acpi_processor_performance
*to_perf_data(struct acpi_cpufreq_data
*data
)
80 return per_cpu_ptr(acpi_perf_data
, data
->acpi_perf_cpu
);
83 static struct cpufreq_driver acpi_cpufreq_driver
;
85 static unsigned int acpi_pstate_strict
;
86 static struct msr __percpu
*msrs
;
88 static bool boost_state(unsigned int cpu
)
93 switch (boot_cpu_data
.x86_vendor
) {
94 case X86_VENDOR_INTEL
:
95 rdmsr_on_cpu(cpu
, MSR_IA32_MISC_ENABLE
, &lo
, &hi
);
96 msr
= lo
| ((u64
)hi
<< 32);
97 return !(msr
& MSR_IA32_MISC_ENABLE_TURBO_DISABLE
);
99 rdmsr_on_cpu(cpu
, MSR_K7_HWCR
, &lo
, &hi
);
100 msr
= lo
| ((u64
)hi
<< 32);
101 return !(msr
& MSR_K7_HWCR_CPB_DIS
);
106 static void boost_set_msrs(bool enable
, const struct cpumask
*cpumask
)
112 switch (boot_cpu_data
.x86_vendor
) {
113 case X86_VENDOR_INTEL
:
114 msr_addr
= MSR_IA32_MISC_ENABLE
;
115 msr_mask
= MSR_IA32_MISC_ENABLE_TURBO_DISABLE
;
118 msr_addr
= MSR_K7_HWCR
;
119 msr_mask
= MSR_K7_HWCR_CPB_DIS
;
125 rdmsr_on_cpus(cpumask
, msr_addr
, msrs
);
127 for_each_cpu(cpu
, cpumask
) {
128 struct msr
*reg
= per_cpu_ptr(msrs
, cpu
);
135 wrmsr_on_cpus(cpumask
, msr_addr
, msrs
);
138 static int _store_boost(int val
)
141 boost_set_msrs(val
, cpu_online_mask
);
143 pr_debug("Core Boosting %sabled.\n", val
? "en" : "dis");
148 static ssize_t
show_freqdomain_cpus(struct cpufreq_policy
*policy
, char *buf
)
150 struct acpi_cpufreq_data
*data
= policy
->driver_data
;
152 return cpufreq_show_cpus(data
->freqdomain_cpus
, buf
);
155 cpufreq_freq_attr_ro(freqdomain_cpus
);
157 #ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
158 static ssize_t
store_boost(const char *buf
, size_t count
)
161 unsigned long val
= 0;
163 if (!acpi_cpufreq_driver
.boost_supported
)
166 ret
= kstrtoul(buf
, 10, &val
);
167 if (ret
|| (val
> 1))
170 _store_boost((int) val
);
175 static ssize_t
store_cpb(struct cpufreq_policy
*policy
, const char *buf
,
178 return store_boost(buf
, count
);
181 static ssize_t
show_cpb(struct cpufreq_policy
*policy
, char *buf
)
183 return sprintf(buf
, "%u\n", acpi_cpufreq_driver
.boost_enabled
);
186 cpufreq_freq_attr_rw(cpb
);
189 static int check_est_cpu(unsigned int cpuid
)
191 struct cpuinfo_x86
*cpu
= &cpu_data(cpuid
);
193 return cpu_has(cpu
, X86_FEATURE_EST
);
196 static int check_amd_hwpstate_cpu(unsigned int cpuid
)
198 struct cpuinfo_x86
*cpu
= &cpu_data(cpuid
);
200 return cpu_has(cpu
, X86_FEATURE_HW_PSTATE
);
203 static unsigned extract_io(u32 value
, struct acpi_cpufreq_data
*data
)
205 struct acpi_processor_performance
*perf
;
208 perf
= to_perf_data(data
);
210 for (i
= 0; i
< perf
->state_count
; i
++) {
211 if (value
== perf
->states
[i
].status
)
212 return data
->freq_table
[i
].frequency
;
217 static unsigned extract_msr(u32 msr
, struct acpi_cpufreq_data
*data
)
219 struct cpufreq_frequency_table
*pos
;
220 struct acpi_processor_performance
*perf
;
222 if (boot_cpu_data
.x86_vendor
== X86_VENDOR_AMD
)
223 msr
&= AMD_MSR_RANGE
;
225 msr
&= INTEL_MSR_RANGE
;
227 perf
= to_perf_data(data
);
229 cpufreq_for_each_entry(pos
, data
->freq_table
)
230 if (msr
== perf
->states
[pos
->driver_data
].status
)
231 return pos
->frequency
;
232 return data
->freq_table
[0].frequency
;
235 static unsigned extract_freq(u32 val
, struct acpi_cpufreq_data
*data
)
237 switch (data
->cpu_feature
) {
238 case SYSTEM_INTEL_MSR_CAPABLE
:
239 case SYSTEM_AMD_MSR_CAPABLE
:
240 return extract_msr(val
, data
);
241 case SYSTEM_IO_CAPABLE
:
242 return extract_io(val
, data
);
259 const struct cpumask
*mask
;
267 /* Called via smp_call_function_single(), on the target CPU */
268 static void do_drv_read(void *_cmd
)
270 struct drv_cmd
*cmd
= _cmd
;
274 case SYSTEM_INTEL_MSR_CAPABLE
:
275 case SYSTEM_AMD_MSR_CAPABLE
:
276 rdmsr(cmd
->addr
.msr
.reg
, cmd
->val
, h
);
278 case SYSTEM_IO_CAPABLE
:
279 acpi_os_read_port((acpi_io_address
)cmd
->addr
.io
.port
,
281 (u32
)cmd
->addr
.io
.bit_width
);
288 /* Called via smp_call_function_many(), on the target CPUs */
289 static void do_drv_write(void *_cmd
)
291 struct drv_cmd
*cmd
= _cmd
;
295 case SYSTEM_INTEL_MSR_CAPABLE
:
296 rdmsr(cmd
->addr
.msr
.reg
, lo
, hi
);
297 lo
= (lo
& ~INTEL_MSR_RANGE
) | (cmd
->val
& INTEL_MSR_RANGE
);
298 wrmsr(cmd
->addr
.msr
.reg
, lo
, hi
);
300 case SYSTEM_AMD_MSR_CAPABLE
:
301 wrmsr(cmd
->addr
.msr
.reg
, cmd
->val
, 0);
303 case SYSTEM_IO_CAPABLE
:
304 acpi_os_write_port((acpi_io_address
)cmd
->addr
.io
.port
,
306 (u32
)cmd
->addr
.io
.bit_width
);
313 static void drv_read(struct drv_cmd
*cmd
)
318 err
= smp_call_function_any(cmd
->mask
, do_drv_read
, cmd
, 1);
319 WARN_ON_ONCE(err
); /* smp_call_function_any() was buggy? */
322 static void drv_write(struct drv_cmd
*cmd
)
326 this_cpu
= get_cpu();
327 if (cpumask_test_cpu(this_cpu
, cmd
->mask
))
329 smp_call_function_many(cmd
->mask
, do_drv_write
, cmd
, 1);
334 get_cur_val(const struct cpumask
*mask
, struct acpi_cpufreq_data
*data
)
336 struct acpi_processor_performance
*perf
;
339 if (unlikely(cpumask_empty(mask
)))
342 switch (data
->cpu_feature
) {
343 case SYSTEM_INTEL_MSR_CAPABLE
:
344 cmd
.type
= SYSTEM_INTEL_MSR_CAPABLE
;
345 cmd
.addr
.msr
.reg
= MSR_IA32_PERF_CTL
;
347 case SYSTEM_AMD_MSR_CAPABLE
:
348 cmd
.type
= SYSTEM_AMD_MSR_CAPABLE
;
349 cmd
.addr
.msr
.reg
= MSR_AMD_PERF_CTL
;
351 case SYSTEM_IO_CAPABLE
:
352 cmd
.type
= SYSTEM_IO_CAPABLE
;
353 perf
= to_perf_data(data
);
354 cmd
.addr
.io
.port
= perf
->control_register
.address
;
355 cmd
.addr
.io
.bit_width
= perf
->control_register
.bit_width
;
364 pr_debug("get_cur_val = %u\n", cmd
.val
);
369 static unsigned int get_cur_freq_on_cpu(unsigned int cpu
)
371 struct acpi_cpufreq_data
*data
;
372 struct cpufreq_policy
*policy
;
374 unsigned int cached_freq
;
376 pr_debug("get_cur_freq_on_cpu (%d)\n", cpu
);
378 policy
= cpufreq_cpu_get(cpu
);
379 if (unlikely(!policy
))
382 data
= policy
->driver_data
;
383 cpufreq_cpu_put(policy
);
384 if (unlikely(!data
|| !data
->freq_table
))
387 cached_freq
= data
->freq_table
[to_perf_data(data
)->state
].frequency
;
388 freq
= extract_freq(get_cur_val(cpumask_of(cpu
), data
), data
);
389 if (freq
!= cached_freq
) {
391 * The dreaded BIOS frequency change behind our back.
392 * Force set the frequency on next target call.
397 pr_debug("cur freq = %u\n", freq
);
402 static unsigned int check_freqs(const struct cpumask
*mask
, unsigned int freq
,
403 struct acpi_cpufreq_data
*data
)
405 unsigned int cur_freq
;
408 for (i
= 0; i
< 100; i
++) {
409 cur_freq
= extract_freq(get_cur_val(mask
, data
), data
);
410 if (cur_freq
== freq
)
417 static int acpi_cpufreq_target(struct cpufreq_policy
*policy
,
420 struct acpi_cpufreq_data
*data
= policy
->driver_data
;
421 struct acpi_processor_performance
*perf
;
423 unsigned int next_perf_state
= 0; /* Index into perf table */
426 if (unlikely(data
== NULL
|| data
->freq_table
== NULL
)) {
430 perf
= to_perf_data(data
);
431 next_perf_state
= data
->freq_table
[index
].driver_data
;
432 if (perf
->state
== next_perf_state
) {
433 if (unlikely(data
->resume
)) {
434 pr_debug("Called after resume, resetting to P%d\n",
438 pr_debug("Already at target state (P%d)\n",
444 switch (data
->cpu_feature
) {
445 case SYSTEM_INTEL_MSR_CAPABLE
:
446 cmd
.type
= SYSTEM_INTEL_MSR_CAPABLE
;
447 cmd
.addr
.msr
.reg
= MSR_IA32_PERF_CTL
;
448 cmd
.val
= (u32
) perf
->states
[next_perf_state
].control
;
450 case SYSTEM_AMD_MSR_CAPABLE
:
451 cmd
.type
= SYSTEM_AMD_MSR_CAPABLE
;
452 cmd
.addr
.msr
.reg
= MSR_AMD_PERF_CTL
;
453 cmd
.val
= (u32
) perf
->states
[next_perf_state
].control
;
455 case SYSTEM_IO_CAPABLE
:
456 cmd
.type
= SYSTEM_IO_CAPABLE
;
457 cmd
.addr
.io
.port
= perf
->control_register
.address
;
458 cmd
.addr
.io
.bit_width
= perf
->control_register
.bit_width
;
459 cmd
.val
= (u32
) perf
->states
[next_perf_state
].control
;
466 /* cpufreq holds the hotplug lock, so we are safe from here on */
467 if (policy
->shared_type
!= CPUFREQ_SHARED_TYPE_ANY
)
468 cmd
.mask
= policy
->cpus
;
470 cmd
.mask
= cpumask_of(policy
->cpu
);
474 if (acpi_pstate_strict
) {
475 if (!check_freqs(cmd
.mask
, data
->freq_table
[index
].frequency
,
477 pr_debug("acpi_cpufreq_target failed (%d)\n",
484 perf
->state
= next_perf_state
;
491 acpi_cpufreq_guess_freq(struct acpi_cpufreq_data
*data
, unsigned int cpu
)
493 struct acpi_processor_performance
*perf
;
495 perf
= to_perf_data(data
);
497 /* search the closest match to cpu_khz */
500 unsigned long freqn
= perf
->states
[0].core_frequency
* 1000;
502 for (i
= 0; i
< (perf
->state_count
-1); i
++) {
504 freqn
= perf
->states
[i
+1].core_frequency
* 1000;
505 if ((2 * cpu_khz
) > (freqn
+ freq
)) {
510 perf
->state
= perf
->state_count
-1;
513 /* assume CPU is at P0... */
515 return perf
->states
[0].core_frequency
* 1000;
519 static void free_acpi_perf_data(void)
523 /* Freeing a NULL pointer is OK, and alloc_percpu zeroes. */
524 for_each_possible_cpu(i
)
525 free_cpumask_var(per_cpu_ptr(acpi_perf_data
, i
)
527 free_percpu(acpi_perf_data
);
530 static int boost_notify(struct notifier_block
*nb
, unsigned long action
,
533 unsigned cpu
= (long)hcpu
;
534 const struct cpumask
*cpumask
;
536 cpumask
= get_cpu_mask(cpu
);
539 * Clear the boost-disable bit on the CPU_DOWN path so that
540 * this cpu cannot block the remaining ones from boosting. On
541 * the CPU_UP path we simply keep the boost-disable flag in
542 * sync with the current global state.
547 case CPU_UP_PREPARE_FROZEN
:
548 boost_set_msrs(acpi_cpufreq_driver
.boost_enabled
, cpumask
);
551 case CPU_DOWN_PREPARE
:
552 case CPU_DOWN_PREPARE_FROZEN
:
553 boost_set_msrs(1, cpumask
);
564 static struct notifier_block boost_nb
= {
565 .notifier_call
= boost_notify
,
569 * acpi_cpufreq_early_init - initialize ACPI P-States library
571 * Initialize the ACPI P-States library (drivers/acpi/processor_perflib.c)
572 * in order to determine correct frequency and voltage pairings. We can
573 * do _PDC and _PSD and find out the processor dependency for the
574 * actual init that will happen later...
576 static int __init
acpi_cpufreq_early_init(void)
579 pr_debug("acpi_cpufreq_early_init\n");
581 acpi_perf_data
= alloc_percpu(struct acpi_processor_performance
);
582 if (!acpi_perf_data
) {
583 pr_debug("Memory allocation error for acpi_perf_data.\n");
586 for_each_possible_cpu(i
) {
587 if (!zalloc_cpumask_var_node(
588 &per_cpu_ptr(acpi_perf_data
, i
)->shared_cpu_map
,
589 GFP_KERNEL
, cpu_to_node(i
))) {
591 /* Freeing a NULL pointer is OK: alloc_percpu zeroes. */
592 free_acpi_perf_data();
597 /* Do initialization in ACPI core */
598 acpi_processor_preregister_performance(acpi_perf_data
);
604 * Some BIOSes do SW_ANY coordination internally, either set it up in hw
605 * or do it in BIOS firmware and won't inform about it to OS. If not
606 * detected, this has a side effect of making CPU run at a different speed
607 * than OS intended it to run at. Detect it and handle it cleanly.
609 static int bios_with_sw_any_bug
;
611 static int sw_any_bug_found(const struct dmi_system_id
*d
)
613 bios_with_sw_any_bug
= 1;
617 static const struct dmi_system_id sw_any_bug_dmi_table
[] = {
619 .callback
= sw_any_bug_found
,
620 .ident
= "Supermicro Server X6DLP",
622 DMI_MATCH(DMI_SYS_VENDOR
, "Supermicro"),
623 DMI_MATCH(DMI_BIOS_VERSION
, "080010"),
624 DMI_MATCH(DMI_PRODUCT_NAME
, "X6DLP"),
630 static int acpi_cpufreq_blacklist(struct cpuinfo_x86
*c
)
632 /* Intel Xeon Processor 7100 Series Specification Update
633 * http://www.intel.com/Assets/PDF/specupdate/314554.pdf
634 * AL30: A Machine Check Exception (MCE) Occurring during an
635 * Enhanced Intel SpeedStep Technology Ratio Change May Cause
636 * Both Processor Cores to Lock Up. */
637 if (c
->x86_vendor
== X86_VENDOR_INTEL
) {
638 if ((c
->x86
== 15) &&
639 (c
->x86_model
== 6) &&
640 (c
->x86_mask
== 8)) {
641 printk(KERN_INFO
"acpi-cpufreq: Intel(R) "
642 "Xeon(R) 7100 Errata AL30, processors may "
643 "lock up on frequency changes: disabling "
652 static int acpi_cpufreq_cpu_init(struct cpufreq_policy
*policy
)
655 unsigned int valid_states
= 0;
656 unsigned int cpu
= policy
->cpu
;
657 struct acpi_cpufreq_data
*data
;
658 unsigned int result
= 0;
659 struct cpuinfo_x86
*c
= &cpu_data(policy
->cpu
);
660 struct acpi_processor_performance
*perf
;
662 static int blacklisted
;
665 pr_debug("acpi_cpufreq_cpu_init\n");
670 blacklisted
= acpi_cpufreq_blacklist(c
);
675 data
= kzalloc(sizeof(*data
), GFP_KERNEL
);
679 if (!zalloc_cpumask_var(&data
->freqdomain_cpus
, GFP_KERNEL
)) {
684 perf
= per_cpu_ptr(acpi_perf_data
, cpu
);
685 data
->acpi_perf_cpu
= cpu
;
686 policy
->driver_data
= data
;
688 if (cpu_has(c
, X86_FEATURE_CONSTANT_TSC
))
689 acpi_cpufreq_driver
.flags
|= CPUFREQ_CONST_LOOPS
;
691 result
= acpi_processor_register_performance(perf
, cpu
);
695 policy
->shared_type
= perf
->shared_type
;
698 * Will let policy->cpus know about dependency only when software
699 * coordination is required.
701 if (policy
->shared_type
== CPUFREQ_SHARED_TYPE_ALL
||
702 policy
->shared_type
== CPUFREQ_SHARED_TYPE_ANY
) {
703 cpumask_copy(policy
->cpus
, perf
->shared_cpu_map
);
705 cpumask_copy(data
->freqdomain_cpus
, perf
->shared_cpu_map
);
708 dmi_check_system(sw_any_bug_dmi_table
);
709 if (bios_with_sw_any_bug
&& !policy_is_shared(policy
)) {
710 policy
->shared_type
= CPUFREQ_SHARED_TYPE_ALL
;
711 cpumask_copy(policy
->cpus
, topology_core_cpumask(cpu
));
714 if (check_amd_hwpstate_cpu(cpu
) && !acpi_pstate_strict
) {
715 cpumask_clear(policy
->cpus
);
716 cpumask_set_cpu(cpu
, policy
->cpus
);
717 cpumask_copy(data
->freqdomain_cpus
,
718 topology_sibling_cpumask(cpu
));
719 policy
->shared_type
= CPUFREQ_SHARED_TYPE_HW
;
720 pr_info_once(PFX
"overriding BIOS provided _PSD data\n");
724 /* capability check */
725 if (perf
->state_count
<= 1) {
726 pr_debug("No P-States\n");
731 if (perf
->control_register
.space_id
!= perf
->status_register
.space_id
) {
736 switch (perf
->control_register
.space_id
) {
737 case ACPI_ADR_SPACE_SYSTEM_IO
:
738 if (boot_cpu_data
.x86_vendor
== X86_VENDOR_AMD
&&
739 boot_cpu_data
.x86
== 0xf) {
740 pr_debug("AMD K8 systems must use native drivers.\n");
744 pr_debug("SYSTEM IO addr space\n");
745 data
->cpu_feature
= SYSTEM_IO_CAPABLE
;
747 case ACPI_ADR_SPACE_FIXED_HARDWARE
:
748 pr_debug("HARDWARE addr space\n");
749 if (check_est_cpu(cpu
)) {
750 data
->cpu_feature
= SYSTEM_INTEL_MSR_CAPABLE
;
753 if (check_amd_hwpstate_cpu(cpu
)) {
754 data
->cpu_feature
= SYSTEM_AMD_MSR_CAPABLE
;
760 pr_debug("Unknown addr space %d\n",
761 (u32
) (perf
->control_register
.space_id
));
766 data
->freq_table
= kzalloc(sizeof(*data
->freq_table
) *
767 (perf
->state_count
+1), GFP_KERNEL
);
768 if (!data
->freq_table
) {
773 /* detect transition latency */
774 policy
->cpuinfo
.transition_latency
= 0;
775 for (i
= 0; i
< perf
->state_count
; i
++) {
776 if ((perf
->states
[i
].transition_latency
* 1000) >
777 policy
->cpuinfo
.transition_latency
)
778 policy
->cpuinfo
.transition_latency
=
779 perf
->states
[i
].transition_latency
* 1000;
782 /* Check for high latency (>20uS) from buggy BIOSes, like on T42 */
783 if (perf
->control_register
.space_id
== ACPI_ADR_SPACE_FIXED_HARDWARE
&&
784 policy
->cpuinfo
.transition_latency
> 20 * 1000) {
785 policy
->cpuinfo
.transition_latency
= 20 * 1000;
786 printk_once(KERN_INFO
787 "P-state transition latency capped at 20 uS\n");
791 for (i
= 0; i
< perf
->state_count
; i
++) {
792 if (i
> 0 && perf
->states
[i
].core_frequency
>=
793 data
->freq_table
[valid_states
-1].frequency
/ 1000)
796 data
->freq_table
[valid_states
].driver_data
= i
;
797 data
->freq_table
[valid_states
].frequency
=
798 perf
->states
[i
].core_frequency
* 1000;
801 data
->freq_table
[valid_states
].frequency
= CPUFREQ_TABLE_END
;
804 result
= cpufreq_table_validate_and_show(policy
, data
->freq_table
);
808 if (perf
->states
[0].core_frequency
* 1000 != policy
->cpuinfo
.max_freq
)
809 printk(KERN_WARNING FW_WARN
"P-state 0 is not max freq\n");
811 switch (perf
->control_register
.space_id
) {
812 case ACPI_ADR_SPACE_SYSTEM_IO
:
814 * The core will not set policy->cur, because
815 * cpufreq_driver->get is NULL, so we need to set it here.
816 * However, we have to guess it, because the current speed is
817 * unknown and not detectable via IO ports.
819 policy
->cur
= acpi_cpufreq_guess_freq(data
, policy
->cpu
);
821 case ACPI_ADR_SPACE_FIXED_HARDWARE
:
822 acpi_cpufreq_driver
.get
= get_cur_freq_on_cpu
;
828 /* notify BIOS that we exist */
829 acpi_processor_notify_smm(THIS_MODULE
);
831 pr_debug("CPU%u - ACPI performance management activated.\n", cpu
);
832 for (i
= 0; i
< perf
->state_count
; i
++)
833 pr_debug(" %cP%d: %d MHz, %d mW, %d uS\n",
834 (i
== perf
->state
? '*' : ' '), i
,
835 (u32
) perf
->states
[i
].core_frequency
,
836 (u32
) perf
->states
[i
].power
,
837 (u32
) perf
->states
[i
].transition_latency
);
840 * the first call to ->target() should result in us actually
841 * writing something to the appropriate registers.
848 kfree(data
->freq_table
);
850 acpi_processor_unregister_performance(cpu
);
852 free_cpumask_var(data
->freqdomain_cpus
);
855 policy
->driver_data
= NULL
;
860 static int acpi_cpufreq_cpu_exit(struct cpufreq_policy
*policy
)
862 struct acpi_cpufreq_data
*data
= policy
->driver_data
;
864 pr_debug("acpi_cpufreq_cpu_exit\n");
867 policy
->driver_data
= NULL
;
868 acpi_processor_unregister_performance(data
->acpi_perf_cpu
);
869 free_cpumask_var(data
->freqdomain_cpus
);
870 kfree(data
->freq_table
);
877 static int acpi_cpufreq_resume(struct cpufreq_policy
*policy
)
879 struct acpi_cpufreq_data
*data
= policy
->driver_data
;
881 pr_debug("acpi_cpufreq_resume\n");
888 static struct freq_attr
*acpi_cpufreq_attr
[] = {
889 &cpufreq_freq_attr_scaling_available_freqs
,
891 #ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
897 static struct cpufreq_driver acpi_cpufreq_driver
= {
898 .verify
= cpufreq_generic_frequency_table_verify
,
899 .target_index
= acpi_cpufreq_target
,
900 .bios_limit
= acpi_processor_get_bios_limit
,
901 .init
= acpi_cpufreq_cpu_init
,
902 .exit
= acpi_cpufreq_cpu_exit
,
903 .resume
= acpi_cpufreq_resume
,
904 .name
= "acpi-cpufreq",
905 .attr
= acpi_cpufreq_attr
,
906 .set_boost
= _store_boost
,
909 static void __init
acpi_cpufreq_boost_init(void)
911 if (boot_cpu_has(X86_FEATURE_CPB
) || boot_cpu_has(X86_FEATURE_IDA
)) {
917 acpi_cpufreq_driver
.boost_supported
= true;
918 acpi_cpufreq_driver
.boost_enabled
= boost_state(0);
920 cpu_notifier_register_begin();
922 /* Force all MSRs to the same value */
923 boost_set_msrs(acpi_cpufreq_driver
.boost_enabled
,
926 __register_cpu_notifier(&boost_nb
);
928 cpu_notifier_register_done();
932 static void acpi_cpufreq_boost_exit(void)
935 unregister_cpu_notifier(&boost_nb
);
942 static int __init
acpi_cpufreq_init(void)
949 /* don't keep reloading if cpufreq_driver exists */
950 if (cpufreq_get_current_driver())
953 pr_debug("acpi_cpufreq_init\n");
955 ret
= acpi_cpufreq_early_init();
959 #ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
960 /* this is a sysfs file with a strange name and an even stranger
961 * semantic - per CPU instantiation, but system global effect.
962 * Lets enable it only on AMD CPUs for compatibility reasons and
963 * only if configured. This is considered legacy code, which
964 * will probably be removed at some point in the future.
966 if (!check_amd_hwpstate_cpu(0)) {
967 struct freq_attr
**attr
;
969 pr_debug("CPB unsupported, do not expose it\n");
971 for (attr
= acpi_cpufreq_attr
; *attr
; attr
++)
978 acpi_cpufreq_boost_init();
980 ret
= cpufreq_register_driver(&acpi_cpufreq_driver
);
982 free_acpi_perf_data();
983 acpi_cpufreq_boost_exit();
988 static void __exit
acpi_cpufreq_exit(void)
990 pr_debug("acpi_cpufreq_exit\n");
992 acpi_cpufreq_boost_exit();
994 cpufreq_unregister_driver(&acpi_cpufreq_driver
);
996 free_acpi_perf_data();
999 module_param(acpi_pstate_strict
, uint
, 0644);
1000 MODULE_PARM_DESC(acpi_pstate_strict
,
1001 "value 0 or non-zero. non-zero -> strict ACPI checks are "
1002 "performed during frequency changes.");
1004 late_initcall(acpi_cpufreq_init
);
1005 module_exit(acpi_cpufreq_exit
);
1007 static const struct x86_cpu_id acpi_cpufreq_ids
[] = {
1008 X86_FEATURE_MATCH(X86_FEATURE_ACPI
),
1009 X86_FEATURE_MATCH(X86_FEATURE_HW_PSTATE
),
1012 MODULE_DEVICE_TABLE(x86cpu
, acpi_cpufreq_ids
);
1014 static const struct acpi_device_id processor_device_ids
[] = {
1015 {ACPI_PROCESSOR_OBJECT_HID
, },
1016 {ACPI_PROCESSOR_DEVICE_HID
, },
1019 MODULE_DEVICE_TABLE(acpi
, processor_device_ids
);
1021 MODULE_ALIAS("acpi");