| 1 | /* |
| 2 | * acpi-cpufreq.c - ACPI Processor P-States Driver |
| 3 | * |
| 4 | * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> |
| 5 | * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> |
| 6 | * Copyright (C) 2002 - 2004 Dominik Brodowski <linux@brodo.de> |
| 7 | * Copyright (C) 2006 Denis Sadykov <denis.m.sadykov@intel.com> |
| 8 | * |
| 9 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
| 10 | * |
| 11 | * This program is free software; you can redistribute it and/or modify |
| 12 | * it under the terms of the GNU General Public License as published by |
| 13 | * the Free Software Foundation; either version 2 of the License, or (at |
| 14 | * your option) any later version. |
| 15 | * |
| 16 | * This program is distributed in the hope that it will be useful, but |
| 17 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 19 | * General Public License for more details. |
| 20 | * |
| 21 | * You should have received a copy of the GNU General Public License along |
| 22 | * with this program; if not, write to the Free Software Foundation, Inc., |
| 23 | * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. |
| 24 | * |
| 25 | * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
| 26 | */ |
| 27 | |
| 28 | #include <linux/kernel.h> |
| 29 | #include <linux/module.h> |
| 30 | #include <linux/init.h> |
| 31 | #include <linux/smp.h> |
| 32 | #include <linux/sched.h> |
| 33 | #include <linux/cpufreq.h> |
| 34 | #include <linux/compiler.h> |
| 35 | #include <linux/dmi.h> |
| 36 | #include <linux/slab.h> |
| 37 | |
| 38 | #include <linux/acpi.h> |
| 39 | #include <linux/io.h> |
| 40 | #include <linux/delay.h> |
| 41 | #include <linux/uaccess.h> |
| 42 | |
| 43 | #include <acpi/processor.h> |
| 44 | |
| 45 | #include <asm/msr.h> |
| 46 | #include <asm/processor.h> |
| 47 | #include <asm/cpufeature.h> |
| 48 | |
| 49 | MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski"); |
| 50 | MODULE_DESCRIPTION("ACPI Processor P-States Driver"); |
| 51 | MODULE_LICENSE("GPL"); |
| 52 | |
| 53 | #define PFX "acpi-cpufreq: " |
| 54 | |
| 55 | enum { |
| 56 | UNDEFINED_CAPABLE = 0, |
| 57 | SYSTEM_INTEL_MSR_CAPABLE, |
| 58 | SYSTEM_AMD_MSR_CAPABLE, |
| 59 | SYSTEM_IO_CAPABLE, |
| 60 | }; |
| 61 | |
| 62 | #define INTEL_MSR_RANGE (0xffff) |
| 63 | #define AMD_MSR_RANGE (0x7) |
| 64 | |
| 65 | #define MSR_K7_HWCR_CPB_DIS (1ULL << 25) |
| 66 | |
| 67 | struct acpi_cpufreq_data { |
| 68 | struct acpi_processor_performance *acpi_data; |
| 69 | struct cpufreq_frequency_table *freq_table; |
| 70 | unsigned int resume; |
| 71 | unsigned int cpu_feature; |
| 72 | cpumask_var_t freqdomain_cpus; |
| 73 | }; |
| 74 | |
| 75 | static DEFINE_PER_CPU(struct acpi_cpufreq_data *, acfreq_data); |
| 76 | |
| 77 | /* acpi_perf_data is a pointer to percpu data. */ |
| 78 | static struct acpi_processor_performance __percpu *acpi_perf_data; |
| 79 | |
| 80 | static struct cpufreq_driver acpi_cpufreq_driver; |
| 81 | |
| 82 | static unsigned int acpi_pstate_strict; |
| 83 | static bool boost_enabled, boost_supported; |
| 84 | static struct msr __percpu *msrs; |
| 85 | |
| 86 | static bool boost_state(unsigned int cpu) |
| 87 | { |
| 88 | u32 lo, hi; |
| 89 | u64 msr; |
| 90 | |
| 91 | switch (boot_cpu_data.x86_vendor) { |
| 92 | case X86_VENDOR_INTEL: |
| 93 | rdmsr_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &lo, &hi); |
| 94 | msr = lo | ((u64)hi << 32); |
| 95 | return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE); |
| 96 | case X86_VENDOR_AMD: |
| 97 | rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi); |
| 98 | msr = lo | ((u64)hi << 32); |
| 99 | return !(msr & MSR_K7_HWCR_CPB_DIS); |
| 100 | } |
| 101 | return false; |
| 102 | } |
| 103 | |
| 104 | static void boost_set_msrs(bool enable, const struct cpumask *cpumask) |
| 105 | { |
| 106 | u32 cpu; |
| 107 | u32 msr_addr; |
| 108 | u64 msr_mask; |
| 109 | |
| 110 | switch (boot_cpu_data.x86_vendor) { |
| 111 | case X86_VENDOR_INTEL: |
| 112 | msr_addr = MSR_IA32_MISC_ENABLE; |
| 113 | msr_mask = MSR_IA32_MISC_ENABLE_TURBO_DISABLE; |
| 114 | break; |
| 115 | case X86_VENDOR_AMD: |
| 116 | msr_addr = MSR_K7_HWCR; |
| 117 | msr_mask = MSR_K7_HWCR_CPB_DIS; |
| 118 | break; |
| 119 | default: |
| 120 | return; |
| 121 | } |
| 122 | |
| 123 | rdmsr_on_cpus(cpumask, msr_addr, msrs); |
| 124 | |
| 125 | for_each_cpu(cpu, cpumask) { |
| 126 | struct msr *reg = per_cpu_ptr(msrs, cpu); |
| 127 | if (enable) |
| 128 | reg->q &= ~msr_mask; |
| 129 | else |
| 130 | reg->q |= msr_mask; |
| 131 | } |
| 132 | |
| 133 | wrmsr_on_cpus(cpumask, msr_addr, msrs); |
| 134 | } |
| 135 | |
| 136 | static ssize_t _store_boost(const char *buf, size_t count) |
| 137 | { |
| 138 | int ret; |
| 139 | unsigned long val = 0; |
| 140 | |
| 141 | if (!boost_supported) |
| 142 | return -EINVAL; |
| 143 | |
| 144 | ret = kstrtoul(buf, 10, &val); |
| 145 | if (ret || (val > 1)) |
| 146 | return -EINVAL; |
| 147 | |
| 148 | if ((val && boost_enabled) || (!val && !boost_enabled)) |
| 149 | return count; |
| 150 | |
| 151 | get_online_cpus(); |
| 152 | |
| 153 | boost_set_msrs(val, cpu_online_mask); |
| 154 | |
| 155 | put_online_cpus(); |
| 156 | |
| 157 | boost_enabled = val; |
| 158 | pr_debug("Core Boosting %sabled.\n", val ? "en" : "dis"); |
| 159 | |
| 160 | return count; |
| 161 | } |
| 162 | |
| 163 | static ssize_t store_global_boost(struct kobject *kobj, struct attribute *attr, |
| 164 | const char *buf, size_t count) |
| 165 | { |
| 166 | return _store_boost(buf, count); |
| 167 | } |
| 168 | |
| 169 | static ssize_t show_global_boost(struct kobject *kobj, |
| 170 | struct attribute *attr, char *buf) |
| 171 | { |
| 172 | return sprintf(buf, "%u\n", boost_enabled); |
| 173 | } |
| 174 | |
| 175 | static struct global_attr global_boost = __ATTR(boost, 0644, |
| 176 | show_global_boost, |
| 177 | store_global_boost); |
| 178 | |
| 179 | static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf) |
| 180 | { |
| 181 | struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); |
| 182 | |
| 183 | return cpufreq_show_cpus(data->freqdomain_cpus, buf); |
| 184 | } |
| 185 | |
| 186 | cpufreq_freq_attr_ro(freqdomain_cpus); |
| 187 | |
| 188 | #ifdef CONFIG_X86_ACPI_CPUFREQ_CPB |
| 189 | static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf, |
| 190 | size_t count) |
| 191 | { |
| 192 | return _store_boost(buf, count); |
| 193 | } |
| 194 | |
| 195 | static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf) |
| 196 | { |
| 197 | return sprintf(buf, "%u\n", boost_enabled); |
| 198 | } |
| 199 | |
| 200 | cpufreq_freq_attr_rw(cpb); |
| 201 | #endif |
| 202 | |
| 203 | static int check_est_cpu(unsigned int cpuid) |
| 204 | { |
| 205 | struct cpuinfo_x86 *cpu = &cpu_data(cpuid); |
| 206 | |
| 207 | return cpu_has(cpu, X86_FEATURE_EST); |
| 208 | } |
| 209 | |
| 210 | static int check_amd_hwpstate_cpu(unsigned int cpuid) |
| 211 | { |
| 212 | struct cpuinfo_x86 *cpu = &cpu_data(cpuid); |
| 213 | |
| 214 | return cpu_has(cpu, X86_FEATURE_HW_PSTATE); |
| 215 | } |
| 216 | |
| 217 | static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data) |
| 218 | { |
| 219 | struct acpi_processor_performance *perf; |
| 220 | int i; |
| 221 | |
| 222 | perf = data->acpi_data; |
| 223 | |
| 224 | for (i = 0; i < perf->state_count; i++) { |
| 225 | if (value == perf->states[i].status) |
| 226 | return data->freq_table[i].frequency; |
| 227 | } |
| 228 | return 0; |
| 229 | } |
| 230 | |
| 231 | static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data) |
| 232 | { |
| 233 | int i; |
| 234 | struct acpi_processor_performance *perf; |
| 235 | |
| 236 | if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) |
| 237 | msr &= AMD_MSR_RANGE; |
| 238 | else |
| 239 | msr &= INTEL_MSR_RANGE; |
| 240 | |
| 241 | perf = data->acpi_data; |
| 242 | |
| 243 | for (i = 0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) { |
| 244 | if (msr == perf->states[data->freq_table[i].driver_data].status) |
| 245 | return data->freq_table[i].frequency; |
| 246 | } |
| 247 | return data->freq_table[0].frequency; |
| 248 | } |
| 249 | |
| 250 | static unsigned extract_freq(u32 val, struct acpi_cpufreq_data *data) |
| 251 | { |
| 252 | switch (data->cpu_feature) { |
| 253 | case SYSTEM_INTEL_MSR_CAPABLE: |
| 254 | case SYSTEM_AMD_MSR_CAPABLE: |
| 255 | return extract_msr(val, data); |
| 256 | case SYSTEM_IO_CAPABLE: |
| 257 | return extract_io(val, data); |
| 258 | default: |
| 259 | return 0; |
| 260 | } |
| 261 | } |
| 262 | |
| 263 | struct msr_addr { |
| 264 | u32 reg; |
| 265 | }; |
| 266 | |
| 267 | struct io_addr { |
| 268 | u16 port; |
| 269 | u8 bit_width; |
| 270 | }; |
| 271 | |
| 272 | struct drv_cmd { |
| 273 | unsigned int type; |
| 274 | const struct cpumask *mask; |
| 275 | union { |
| 276 | struct msr_addr msr; |
| 277 | struct io_addr io; |
| 278 | } addr; |
| 279 | u32 val; |
| 280 | }; |
| 281 | |
| 282 | /* Called via smp_call_function_single(), on the target CPU */ |
| 283 | static void do_drv_read(void *_cmd) |
| 284 | { |
| 285 | struct drv_cmd *cmd = _cmd; |
| 286 | u32 h; |
| 287 | |
| 288 | switch (cmd->type) { |
| 289 | case SYSTEM_INTEL_MSR_CAPABLE: |
| 290 | case SYSTEM_AMD_MSR_CAPABLE: |
| 291 | rdmsr(cmd->addr.msr.reg, cmd->val, h); |
| 292 | break; |
| 293 | case SYSTEM_IO_CAPABLE: |
| 294 | acpi_os_read_port((acpi_io_address)cmd->addr.io.port, |
| 295 | &cmd->val, |
| 296 | (u32)cmd->addr.io.bit_width); |
| 297 | break; |
| 298 | default: |
| 299 | break; |
| 300 | } |
| 301 | } |
| 302 | |
| 303 | /* Called via smp_call_function_many(), on the target CPUs */ |
| 304 | static void do_drv_write(void *_cmd) |
| 305 | { |
| 306 | struct drv_cmd *cmd = _cmd; |
| 307 | u32 lo, hi; |
| 308 | |
| 309 | switch (cmd->type) { |
| 310 | case SYSTEM_INTEL_MSR_CAPABLE: |
| 311 | rdmsr(cmd->addr.msr.reg, lo, hi); |
| 312 | lo = (lo & ~INTEL_MSR_RANGE) | (cmd->val & INTEL_MSR_RANGE); |
| 313 | wrmsr(cmd->addr.msr.reg, lo, hi); |
| 314 | break; |
| 315 | case SYSTEM_AMD_MSR_CAPABLE: |
| 316 | wrmsr(cmd->addr.msr.reg, cmd->val, 0); |
| 317 | break; |
| 318 | case SYSTEM_IO_CAPABLE: |
| 319 | acpi_os_write_port((acpi_io_address)cmd->addr.io.port, |
| 320 | cmd->val, |
| 321 | (u32)cmd->addr.io.bit_width); |
| 322 | break; |
| 323 | default: |
| 324 | break; |
| 325 | } |
| 326 | } |
| 327 | |
| 328 | static void drv_read(struct drv_cmd *cmd) |
| 329 | { |
| 330 | int err; |
| 331 | cmd->val = 0; |
| 332 | |
| 333 | err = smp_call_function_any(cmd->mask, do_drv_read, cmd, 1); |
| 334 | WARN_ON_ONCE(err); /* smp_call_function_any() was buggy? */ |
| 335 | } |
| 336 | |
| 337 | static void drv_write(struct drv_cmd *cmd) |
| 338 | { |
| 339 | int this_cpu; |
| 340 | |
| 341 | this_cpu = get_cpu(); |
| 342 | if (cpumask_test_cpu(this_cpu, cmd->mask)) |
| 343 | do_drv_write(cmd); |
| 344 | smp_call_function_many(cmd->mask, do_drv_write, cmd, 1); |
| 345 | put_cpu(); |
| 346 | } |
| 347 | |
| 348 | static u32 get_cur_val(const struct cpumask *mask) |
| 349 | { |
| 350 | struct acpi_processor_performance *perf; |
| 351 | struct drv_cmd cmd; |
| 352 | |
| 353 | if (unlikely(cpumask_empty(mask))) |
| 354 | return 0; |
| 355 | |
| 356 | switch (per_cpu(acfreq_data, cpumask_first(mask))->cpu_feature) { |
| 357 | case SYSTEM_INTEL_MSR_CAPABLE: |
| 358 | cmd.type = SYSTEM_INTEL_MSR_CAPABLE; |
| 359 | cmd.addr.msr.reg = MSR_IA32_PERF_CTL; |
| 360 | break; |
| 361 | case SYSTEM_AMD_MSR_CAPABLE: |
| 362 | cmd.type = SYSTEM_AMD_MSR_CAPABLE; |
| 363 | cmd.addr.msr.reg = MSR_AMD_PERF_CTL; |
| 364 | break; |
| 365 | case SYSTEM_IO_CAPABLE: |
| 366 | cmd.type = SYSTEM_IO_CAPABLE; |
| 367 | perf = per_cpu(acfreq_data, cpumask_first(mask))->acpi_data; |
| 368 | cmd.addr.io.port = perf->control_register.address; |
| 369 | cmd.addr.io.bit_width = perf->control_register.bit_width; |
| 370 | break; |
| 371 | default: |
| 372 | return 0; |
| 373 | } |
| 374 | |
| 375 | cmd.mask = mask; |
| 376 | drv_read(&cmd); |
| 377 | |
| 378 | pr_debug("get_cur_val = %u\n", cmd.val); |
| 379 | |
| 380 | return cmd.val; |
| 381 | } |
| 382 | |
| 383 | static unsigned int get_cur_freq_on_cpu(unsigned int cpu) |
| 384 | { |
| 385 | struct acpi_cpufreq_data *data = per_cpu(acfreq_data, cpu); |
| 386 | unsigned int freq; |
| 387 | unsigned int cached_freq; |
| 388 | |
| 389 | pr_debug("get_cur_freq_on_cpu (%d)\n", cpu); |
| 390 | |
| 391 | if (unlikely(data == NULL || |
| 392 | data->acpi_data == NULL || data->freq_table == NULL)) { |
| 393 | return 0; |
| 394 | } |
| 395 | |
| 396 | cached_freq = data->freq_table[data->acpi_data->state].frequency; |
| 397 | freq = extract_freq(get_cur_val(cpumask_of(cpu)), data); |
| 398 | if (freq != cached_freq) { |
| 399 | /* |
| 400 | * The dreaded BIOS frequency change behind our back. |
| 401 | * Force set the frequency on next target call. |
| 402 | */ |
| 403 | data->resume = 1; |
| 404 | } |
| 405 | |
| 406 | pr_debug("cur freq = %u\n", freq); |
| 407 | |
| 408 | return freq; |
| 409 | } |
| 410 | |
| 411 | static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq, |
| 412 | struct acpi_cpufreq_data *data) |
| 413 | { |
| 414 | unsigned int cur_freq; |
| 415 | unsigned int i; |
| 416 | |
| 417 | for (i = 0; i < 100; i++) { |
| 418 | cur_freq = extract_freq(get_cur_val(mask), data); |
| 419 | if (cur_freq == freq) |
| 420 | return 1; |
| 421 | udelay(10); |
| 422 | } |
| 423 | return 0; |
| 424 | } |
| 425 | |
| 426 | static int acpi_cpufreq_target(struct cpufreq_policy *policy, |
| 427 | unsigned int index) |
| 428 | { |
| 429 | struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); |
| 430 | struct acpi_processor_performance *perf; |
| 431 | struct drv_cmd cmd; |
| 432 | unsigned int next_perf_state = 0; /* Index into perf table */ |
| 433 | int result = 0; |
| 434 | |
| 435 | if (unlikely(data == NULL || |
| 436 | data->acpi_data == NULL || data->freq_table == NULL)) { |
| 437 | return -ENODEV; |
| 438 | } |
| 439 | |
| 440 | perf = data->acpi_data; |
| 441 | next_perf_state = data->freq_table[index].driver_data; |
| 442 | if (perf->state == next_perf_state) { |
| 443 | if (unlikely(data->resume)) { |
| 444 | pr_debug("Called after resume, resetting to P%d\n", |
| 445 | next_perf_state); |
| 446 | data->resume = 0; |
| 447 | } else { |
| 448 | pr_debug("Already at target state (P%d)\n", |
| 449 | next_perf_state); |
| 450 | goto out; |
| 451 | } |
| 452 | } |
| 453 | |
| 454 | switch (data->cpu_feature) { |
| 455 | case SYSTEM_INTEL_MSR_CAPABLE: |
| 456 | cmd.type = SYSTEM_INTEL_MSR_CAPABLE; |
| 457 | cmd.addr.msr.reg = MSR_IA32_PERF_CTL; |
| 458 | cmd.val = (u32) perf->states[next_perf_state].control; |
| 459 | break; |
| 460 | case SYSTEM_AMD_MSR_CAPABLE: |
| 461 | cmd.type = SYSTEM_AMD_MSR_CAPABLE; |
| 462 | cmd.addr.msr.reg = MSR_AMD_PERF_CTL; |
| 463 | cmd.val = (u32) perf->states[next_perf_state].control; |
| 464 | break; |
| 465 | case SYSTEM_IO_CAPABLE: |
| 466 | cmd.type = SYSTEM_IO_CAPABLE; |
| 467 | cmd.addr.io.port = perf->control_register.address; |
| 468 | cmd.addr.io.bit_width = perf->control_register.bit_width; |
| 469 | cmd.val = (u32) perf->states[next_perf_state].control; |
| 470 | break; |
| 471 | default: |
| 472 | result = -ENODEV; |
| 473 | goto out; |
| 474 | } |
| 475 | |
| 476 | /* cpufreq holds the hotplug lock, so we are safe from here on */ |
| 477 | if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY) |
| 478 | cmd.mask = policy->cpus; |
| 479 | else |
| 480 | cmd.mask = cpumask_of(policy->cpu); |
| 481 | |
| 482 | drv_write(&cmd); |
| 483 | |
| 484 | if (acpi_pstate_strict) { |
| 485 | if (!check_freqs(cmd.mask, data->freq_table[index].frequency, |
| 486 | data)) { |
| 487 | pr_debug("acpi_cpufreq_target failed (%d)\n", |
| 488 | policy->cpu); |
| 489 | result = -EAGAIN; |
| 490 | } |
| 491 | } |
| 492 | |
| 493 | if (!result) |
| 494 | perf->state = next_perf_state; |
| 495 | |
| 496 | out: |
| 497 | return result; |
| 498 | } |
| 499 | |
| 500 | static unsigned long |
| 501 | acpi_cpufreq_guess_freq(struct acpi_cpufreq_data *data, unsigned int cpu) |
| 502 | { |
| 503 | struct acpi_processor_performance *perf = data->acpi_data; |
| 504 | |
| 505 | if (cpu_khz) { |
| 506 | /* search the closest match to cpu_khz */ |
| 507 | unsigned int i; |
| 508 | unsigned long freq; |
| 509 | unsigned long freqn = perf->states[0].core_frequency * 1000; |
| 510 | |
| 511 | for (i = 0; i < (perf->state_count-1); i++) { |
| 512 | freq = freqn; |
| 513 | freqn = perf->states[i+1].core_frequency * 1000; |
| 514 | if ((2 * cpu_khz) > (freqn + freq)) { |
| 515 | perf->state = i; |
| 516 | return freq; |
| 517 | } |
| 518 | } |
| 519 | perf->state = perf->state_count-1; |
| 520 | return freqn; |
| 521 | } else { |
| 522 | /* assume CPU is at P0... */ |
| 523 | perf->state = 0; |
| 524 | return perf->states[0].core_frequency * 1000; |
| 525 | } |
| 526 | } |
| 527 | |
| 528 | static void free_acpi_perf_data(void) |
| 529 | { |
| 530 | unsigned int i; |
| 531 | |
| 532 | /* Freeing a NULL pointer is OK, and alloc_percpu zeroes. */ |
| 533 | for_each_possible_cpu(i) |
| 534 | free_cpumask_var(per_cpu_ptr(acpi_perf_data, i) |
| 535 | ->shared_cpu_map); |
| 536 | free_percpu(acpi_perf_data); |
| 537 | } |
| 538 | |
| 539 | static int boost_notify(struct notifier_block *nb, unsigned long action, |
| 540 | void *hcpu) |
| 541 | { |
| 542 | unsigned cpu = (long)hcpu; |
| 543 | const struct cpumask *cpumask; |
| 544 | |
| 545 | cpumask = get_cpu_mask(cpu); |
| 546 | |
| 547 | /* |
| 548 | * Clear the boost-disable bit on the CPU_DOWN path so that |
| 549 | * this cpu cannot block the remaining ones from boosting. On |
| 550 | * the CPU_UP path we simply keep the boost-disable flag in |
| 551 | * sync with the current global state. |
| 552 | */ |
| 553 | |
| 554 | switch (action) { |
| 555 | case CPU_UP_PREPARE: |
| 556 | case CPU_UP_PREPARE_FROZEN: |
| 557 | boost_set_msrs(boost_enabled, cpumask); |
| 558 | break; |
| 559 | |
| 560 | case CPU_DOWN_PREPARE: |
| 561 | case CPU_DOWN_PREPARE_FROZEN: |
| 562 | boost_set_msrs(1, cpumask); |
| 563 | break; |
| 564 | |
| 565 | default: |
| 566 | break; |
| 567 | } |
| 568 | |
| 569 | return NOTIFY_OK; |
| 570 | } |
| 571 | |
| 572 | |
| 573 | static struct notifier_block boost_nb = { |
| 574 | .notifier_call = boost_notify, |
| 575 | }; |
| 576 | |
| 577 | /* |
| 578 | * acpi_cpufreq_early_init - initialize ACPI P-States library |
| 579 | * |
| 580 | * Initialize the ACPI P-States library (drivers/acpi/processor_perflib.c) |
| 581 | * in order to determine correct frequency and voltage pairings. We can |
| 582 | * do _PDC and _PSD and find out the processor dependency for the |
| 583 | * actual init that will happen later... |
| 584 | */ |
| 585 | static int __init acpi_cpufreq_early_init(void) |
| 586 | { |
| 587 | unsigned int i; |
| 588 | pr_debug("acpi_cpufreq_early_init\n"); |
| 589 | |
| 590 | acpi_perf_data = alloc_percpu(struct acpi_processor_performance); |
| 591 | if (!acpi_perf_data) { |
| 592 | pr_debug("Memory allocation error for acpi_perf_data.\n"); |
| 593 | return -ENOMEM; |
| 594 | } |
| 595 | for_each_possible_cpu(i) { |
| 596 | if (!zalloc_cpumask_var_node( |
| 597 | &per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map, |
| 598 | GFP_KERNEL, cpu_to_node(i))) { |
| 599 | |
| 600 | /* Freeing a NULL pointer is OK: alloc_percpu zeroes. */ |
| 601 | free_acpi_perf_data(); |
| 602 | return -ENOMEM; |
| 603 | } |
| 604 | } |
| 605 | |
| 606 | /* Do initialization in ACPI core */ |
| 607 | acpi_processor_preregister_performance(acpi_perf_data); |
| 608 | return 0; |
| 609 | } |
| 610 | |
| 611 | #ifdef CONFIG_SMP |
| 612 | /* |
| 613 | * Some BIOSes do SW_ANY coordination internally, either set it up in hw |
| 614 | * or do it in BIOS firmware and won't inform about it to OS. If not |
| 615 | * detected, this has a side effect of making CPU run at a different speed |
| 616 | * than OS intended it to run at. Detect it and handle it cleanly. |
| 617 | */ |
| 618 | static int bios_with_sw_any_bug; |
| 619 | |
| 620 | static int sw_any_bug_found(const struct dmi_system_id *d) |
| 621 | { |
| 622 | bios_with_sw_any_bug = 1; |
| 623 | return 0; |
| 624 | } |
| 625 | |
| 626 | static const struct dmi_system_id sw_any_bug_dmi_table[] = { |
| 627 | { |
| 628 | .callback = sw_any_bug_found, |
| 629 | .ident = "Supermicro Server X6DLP", |
| 630 | .matches = { |
| 631 | DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"), |
| 632 | DMI_MATCH(DMI_BIOS_VERSION, "080010"), |
| 633 | DMI_MATCH(DMI_PRODUCT_NAME, "X6DLP"), |
| 634 | }, |
| 635 | }, |
| 636 | { } |
| 637 | }; |
| 638 | |
| 639 | static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c) |
| 640 | { |
| 641 | /* Intel Xeon Processor 7100 Series Specification Update |
| 642 | * http://www.intel.com/Assets/PDF/specupdate/314554.pdf |
| 643 | * AL30: A Machine Check Exception (MCE) Occurring during an |
| 644 | * Enhanced Intel SpeedStep Technology Ratio Change May Cause |
| 645 | * Both Processor Cores to Lock Up. */ |
| 646 | if (c->x86_vendor == X86_VENDOR_INTEL) { |
| 647 | if ((c->x86 == 15) && |
| 648 | (c->x86_model == 6) && |
| 649 | (c->x86_mask == 8)) { |
| 650 | printk(KERN_INFO "acpi-cpufreq: Intel(R) " |
| 651 | "Xeon(R) 7100 Errata AL30, processors may " |
| 652 | "lock up on frequency changes: disabling " |
| 653 | "acpi-cpufreq.\n"); |
| 654 | return -ENODEV; |
| 655 | } |
| 656 | } |
| 657 | return 0; |
| 658 | } |
| 659 | #endif |
| 660 | |
| 661 | static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) |
| 662 | { |
| 663 | unsigned int i; |
| 664 | unsigned int valid_states = 0; |
| 665 | unsigned int cpu = policy->cpu; |
| 666 | struct acpi_cpufreq_data *data; |
| 667 | unsigned int result = 0; |
| 668 | struct cpuinfo_x86 *c = &cpu_data(policy->cpu); |
| 669 | struct acpi_processor_performance *perf; |
| 670 | #ifdef CONFIG_SMP |
| 671 | static int blacklisted; |
| 672 | #endif |
| 673 | |
| 674 | pr_debug("acpi_cpufreq_cpu_init\n"); |
| 675 | |
| 676 | #ifdef CONFIG_SMP |
| 677 | if (blacklisted) |
| 678 | return blacklisted; |
| 679 | blacklisted = acpi_cpufreq_blacklist(c); |
| 680 | if (blacklisted) |
| 681 | return blacklisted; |
| 682 | #endif |
| 683 | |
| 684 | data = kzalloc(sizeof(*data), GFP_KERNEL); |
| 685 | if (!data) |
| 686 | return -ENOMEM; |
| 687 | |
| 688 | if (!zalloc_cpumask_var(&data->freqdomain_cpus, GFP_KERNEL)) { |
| 689 | result = -ENOMEM; |
| 690 | goto err_free; |
| 691 | } |
| 692 | |
| 693 | data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu); |
| 694 | per_cpu(acfreq_data, cpu) = data; |
| 695 | |
| 696 | if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) |
| 697 | acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS; |
| 698 | |
| 699 | result = acpi_processor_register_performance(data->acpi_data, cpu); |
| 700 | if (result) |
| 701 | goto err_free_mask; |
| 702 | |
| 703 | perf = data->acpi_data; |
| 704 | policy->shared_type = perf->shared_type; |
| 705 | |
| 706 | /* |
| 707 | * Will let policy->cpus know about dependency only when software |
| 708 | * coordination is required. |
| 709 | */ |
| 710 | if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL || |
| 711 | policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) { |
| 712 | cpumask_copy(policy->cpus, perf->shared_cpu_map); |
| 713 | } |
| 714 | cpumask_copy(data->freqdomain_cpus, perf->shared_cpu_map); |
| 715 | |
| 716 | #ifdef CONFIG_SMP |
| 717 | dmi_check_system(sw_any_bug_dmi_table); |
| 718 | if (bios_with_sw_any_bug && !policy_is_shared(policy)) { |
| 719 | policy->shared_type = CPUFREQ_SHARED_TYPE_ALL; |
| 720 | cpumask_copy(policy->cpus, cpu_core_mask(cpu)); |
| 721 | } |
| 722 | |
| 723 | if (check_amd_hwpstate_cpu(cpu) && !acpi_pstate_strict) { |
| 724 | cpumask_clear(policy->cpus); |
| 725 | cpumask_set_cpu(cpu, policy->cpus); |
| 726 | cpumask_copy(data->freqdomain_cpus, cpu_sibling_mask(cpu)); |
| 727 | policy->shared_type = CPUFREQ_SHARED_TYPE_HW; |
| 728 | pr_info_once(PFX "overriding BIOS provided _PSD data\n"); |
| 729 | } |
| 730 | #endif |
| 731 | |
| 732 | /* capability check */ |
| 733 | if (perf->state_count <= 1) { |
| 734 | pr_debug("No P-States\n"); |
| 735 | result = -ENODEV; |
| 736 | goto err_unreg; |
| 737 | } |
| 738 | |
| 739 | if (perf->control_register.space_id != perf->status_register.space_id) { |
| 740 | result = -ENODEV; |
| 741 | goto err_unreg; |
| 742 | } |
| 743 | |
| 744 | switch (perf->control_register.space_id) { |
| 745 | case ACPI_ADR_SPACE_SYSTEM_IO: |
| 746 | if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD && |
| 747 | boot_cpu_data.x86 == 0xf) { |
| 748 | pr_debug("AMD K8 systems must use native drivers.\n"); |
| 749 | result = -ENODEV; |
| 750 | goto err_unreg; |
| 751 | } |
| 752 | pr_debug("SYSTEM IO addr space\n"); |
| 753 | data->cpu_feature = SYSTEM_IO_CAPABLE; |
| 754 | break; |
| 755 | case ACPI_ADR_SPACE_FIXED_HARDWARE: |
| 756 | pr_debug("HARDWARE addr space\n"); |
| 757 | if (check_est_cpu(cpu)) { |
| 758 | data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE; |
| 759 | break; |
| 760 | } |
| 761 | if (check_amd_hwpstate_cpu(cpu)) { |
| 762 | data->cpu_feature = SYSTEM_AMD_MSR_CAPABLE; |
| 763 | break; |
| 764 | } |
| 765 | result = -ENODEV; |
| 766 | goto err_unreg; |
| 767 | default: |
| 768 | pr_debug("Unknown addr space %d\n", |
| 769 | (u32) (perf->control_register.space_id)); |
| 770 | result = -ENODEV; |
| 771 | goto err_unreg; |
| 772 | } |
| 773 | |
| 774 | data->freq_table = kmalloc(sizeof(*data->freq_table) * |
| 775 | (perf->state_count+1), GFP_KERNEL); |
| 776 | if (!data->freq_table) { |
| 777 | result = -ENOMEM; |
| 778 | goto err_unreg; |
| 779 | } |
| 780 | |
| 781 | /* detect transition latency */ |
| 782 | policy->cpuinfo.transition_latency = 0; |
| 783 | for (i = 0; i < perf->state_count; i++) { |
| 784 | if ((perf->states[i].transition_latency * 1000) > |
| 785 | policy->cpuinfo.transition_latency) |
| 786 | policy->cpuinfo.transition_latency = |
| 787 | perf->states[i].transition_latency * 1000; |
| 788 | } |
| 789 | |
| 790 | /* Check for high latency (>20uS) from buggy BIOSes, like on T42 */ |
| 791 | if (perf->control_register.space_id == ACPI_ADR_SPACE_FIXED_HARDWARE && |
| 792 | policy->cpuinfo.transition_latency > 20 * 1000) { |
| 793 | policy->cpuinfo.transition_latency = 20 * 1000; |
| 794 | printk_once(KERN_INFO |
| 795 | "P-state transition latency capped at 20 uS\n"); |
| 796 | } |
| 797 | |
| 798 | /* table init */ |
| 799 | for (i = 0; i < perf->state_count; i++) { |
| 800 | if (i > 0 && perf->states[i].core_frequency >= |
| 801 | data->freq_table[valid_states-1].frequency / 1000) |
| 802 | continue; |
| 803 | |
| 804 | data->freq_table[valid_states].driver_data = i; |
| 805 | data->freq_table[valid_states].frequency = |
| 806 | perf->states[i].core_frequency * 1000; |
| 807 | valid_states++; |
| 808 | } |
| 809 | data->freq_table[valid_states].frequency = CPUFREQ_TABLE_END; |
| 810 | perf->state = 0; |
| 811 | |
| 812 | result = cpufreq_table_validate_and_show(policy, data->freq_table); |
| 813 | if (result) |
| 814 | goto err_freqfree; |
| 815 | |
| 816 | if (perf->states[0].core_frequency * 1000 != policy->cpuinfo.max_freq) |
| 817 | printk(KERN_WARNING FW_WARN "P-state 0 is not max freq\n"); |
| 818 | |
| 819 | switch (perf->control_register.space_id) { |
| 820 | case ACPI_ADR_SPACE_SYSTEM_IO: |
| 821 | /* |
| 822 | * The core will not set policy->cur, because |
| 823 | * cpufreq_driver->get is NULL, so we need to set it here. |
| 824 | * However, we have to guess it, because the current speed is |
| 825 | * unknown and not detectable via IO ports. |
| 826 | */ |
| 827 | policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu); |
| 828 | break; |
| 829 | case ACPI_ADR_SPACE_FIXED_HARDWARE: |
| 830 | acpi_cpufreq_driver.get = get_cur_freq_on_cpu; |
| 831 | break; |
| 832 | default: |
| 833 | break; |
| 834 | } |
| 835 | |
| 836 | /* notify BIOS that we exist */ |
| 837 | acpi_processor_notify_smm(THIS_MODULE); |
| 838 | |
| 839 | pr_debug("CPU%u - ACPI performance management activated.\n", cpu); |
| 840 | for (i = 0; i < perf->state_count; i++) |
| 841 | pr_debug(" %cP%d: %d MHz, %d mW, %d uS\n", |
| 842 | (i == perf->state ? '*' : ' '), i, |
| 843 | (u32) perf->states[i].core_frequency, |
| 844 | (u32) perf->states[i].power, |
| 845 | (u32) perf->states[i].transition_latency); |
| 846 | |
| 847 | /* |
| 848 | * the first call to ->target() should result in us actually |
| 849 | * writing something to the appropriate registers. |
| 850 | */ |
| 851 | data->resume = 1; |
| 852 | |
| 853 | return result; |
| 854 | |
| 855 | err_freqfree: |
| 856 | kfree(data->freq_table); |
| 857 | err_unreg: |
| 858 | acpi_processor_unregister_performance(perf, cpu); |
| 859 | err_free_mask: |
| 860 | free_cpumask_var(data->freqdomain_cpus); |
| 861 | err_free: |
| 862 | kfree(data); |
| 863 | per_cpu(acfreq_data, cpu) = NULL; |
| 864 | |
| 865 | return result; |
| 866 | } |
| 867 | |
| 868 | static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy) |
| 869 | { |
| 870 | struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); |
| 871 | |
| 872 | pr_debug("acpi_cpufreq_cpu_exit\n"); |
| 873 | |
| 874 | if (data) { |
| 875 | cpufreq_frequency_table_put_attr(policy->cpu); |
| 876 | per_cpu(acfreq_data, policy->cpu) = NULL; |
| 877 | acpi_processor_unregister_performance(data->acpi_data, |
| 878 | policy->cpu); |
| 879 | free_cpumask_var(data->freqdomain_cpus); |
| 880 | kfree(data->freq_table); |
| 881 | kfree(data); |
| 882 | } |
| 883 | |
| 884 | return 0; |
| 885 | } |
| 886 | |
| 887 | static int acpi_cpufreq_resume(struct cpufreq_policy *policy) |
| 888 | { |
| 889 | struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); |
| 890 | |
| 891 | pr_debug("acpi_cpufreq_resume\n"); |
| 892 | |
| 893 | data->resume = 1; |
| 894 | |
| 895 | return 0; |
| 896 | } |
| 897 | |
| 898 | static struct freq_attr *acpi_cpufreq_attr[] = { |
| 899 | &cpufreq_freq_attr_scaling_available_freqs, |
| 900 | &freqdomain_cpus, |
| 901 | NULL, /* this is a placeholder for cpb, do not remove */ |
| 902 | NULL, |
| 903 | }; |
| 904 | |
| 905 | static struct cpufreq_driver acpi_cpufreq_driver = { |
| 906 | .verify = cpufreq_generic_frequency_table_verify, |
| 907 | .target_index = acpi_cpufreq_target, |
| 908 | .bios_limit = acpi_processor_get_bios_limit, |
| 909 | .init = acpi_cpufreq_cpu_init, |
| 910 | .exit = acpi_cpufreq_cpu_exit, |
| 911 | .resume = acpi_cpufreq_resume, |
| 912 | .name = "acpi-cpufreq", |
| 913 | .attr = acpi_cpufreq_attr, |
| 914 | }; |
| 915 | |
| 916 | static void __init acpi_cpufreq_boost_init(void) |
| 917 | { |
| 918 | if (boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA)) { |
| 919 | msrs = msrs_alloc(); |
| 920 | |
| 921 | if (!msrs) |
| 922 | return; |
| 923 | |
| 924 | boost_supported = true; |
| 925 | boost_enabled = boost_state(0); |
| 926 | |
| 927 | get_online_cpus(); |
| 928 | |
| 929 | /* Force all MSRs to the same value */ |
| 930 | boost_set_msrs(boost_enabled, cpu_online_mask); |
| 931 | |
| 932 | register_cpu_notifier(&boost_nb); |
| 933 | |
| 934 | put_online_cpus(); |
| 935 | } else |
| 936 | global_boost.attr.mode = 0444; |
| 937 | |
| 938 | /* We create the boost file in any case, though for systems without |
| 939 | * hardware support it will be read-only and hardwired to return 0. |
| 940 | */ |
| 941 | if (cpufreq_sysfs_create_file(&(global_boost.attr))) |
| 942 | pr_warn(PFX "could not register global boost sysfs file\n"); |
| 943 | else |
| 944 | pr_debug("registered global boost sysfs file\n"); |
| 945 | } |
| 946 | |
| 947 | static void __exit acpi_cpufreq_boost_exit(void) |
| 948 | { |
| 949 | cpufreq_sysfs_remove_file(&(global_boost.attr)); |
| 950 | |
| 951 | if (msrs) { |
| 952 | unregister_cpu_notifier(&boost_nb); |
| 953 | |
| 954 | msrs_free(msrs); |
| 955 | msrs = NULL; |
| 956 | } |
| 957 | } |
| 958 | |
| 959 | static int __init acpi_cpufreq_init(void) |
| 960 | { |
| 961 | int ret; |
| 962 | |
| 963 | if (acpi_disabled) |
| 964 | return -ENODEV; |
| 965 | |
| 966 | /* don't keep reloading if cpufreq_driver exists */ |
| 967 | if (cpufreq_get_current_driver()) |
| 968 | return -EEXIST; |
| 969 | |
| 970 | pr_debug("acpi_cpufreq_init\n"); |
| 971 | |
| 972 | ret = acpi_cpufreq_early_init(); |
| 973 | if (ret) |
| 974 | return ret; |
| 975 | |
| 976 | #ifdef CONFIG_X86_ACPI_CPUFREQ_CPB |
| 977 | /* this is a sysfs file with a strange name and an even stranger |
| 978 | * semantic - per CPU instantiation, but system global effect. |
| 979 | * Lets enable it only on AMD CPUs for compatibility reasons and |
| 980 | * only if configured. This is considered legacy code, which |
| 981 | * will probably be removed at some point in the future. |
| 982 | */ |
| 983 | if (check_amd_hwpstate_cpu(0)) { |
| 984 | struct freq_attr **iter; |
| 985 | |
| 986 | pr_debug("adding sysfs entry for cpb\n"); |
| 987 | |
| 988 | for (iter = acpi_cpufreq_attr; *iter != NULL; iter++) |
| 989 | ; |
| 990 | |
| 991 | /* make sure there is a terminator behind it */ |
| 992 | if (iter[1] == NULL) |
| 993 | *iter = &cpb; |
| 994 | } |
| 995 | #endif |
| 996 | |
| 997 | ret = cpufreq_register_driver(&acpi_cpufreq_driver); |
| 998 | if (ret) |
| 999 | free_acpi_perf_data(); |
| 1000 | else |
| 1001 | acpi_cpufreq_boost_init(); |
| 1002 | |
| 1003 | return ret; |
| 1004 | } |
| 1005 | |
| 1006 | static void __exit acpi_cpufreq_exit(void) |
| 1007 | { |
| 1008 | pr_debug("acpi_cpufreq_exit\n"); |
| 1009 | |
| 1010 | acpi_cpufreq_boost_exit(); |
| 1011 | |
| 1012 | cpufreq_unregister_driver(&acpi_cpufreq_driver); |
| 1013 | |
| 1014 | free_acpi_perf_data(); |
| 1015 | } |
| 1016 | |
| 1017 | module_param(acpi_pstate_strict, uint, 0644); |
| 1018 | MODULE_PARM_DESC(acpi_pstate_strict, |
| 1019 | "value 0 or non-zero. non-zero -> strict ACPI checks are " |
| 1020 | "performed during frequency changes."); |
| 1021 | |
| 1022 | late_initcall(acpi_cpufreq_init); |
| 1023 | module_exit(acpi_cpufreq_exit); |
| 1024 | |
| 1025 | static const struct x86_cpu_id acpi_cpufreq_ids[] = { |
| 1026 | X86_FEATURE_MATCH(X86_FEATURE_ACPI), |
| 1027 | X86_FEATURE_MATCH(X86_FEATURE_HW_PSTATE), |
| 1028 | {} |
| 1029 | }; |
| 1030 | MODULE_DEVICE_TABLE(x86cpu, acpi_cpufreq_ids); |
| 1031 | |
| 1032 | static const struct acpi_device_id processor_device_ids[] = { |
| 1033 | {ACPI_PROCESSOR_OBJECT_HID, }, |
| 1034 | {ACPI_PROCESSOR_DEVICE_HID, }, |
| 1035 | {}, |
| 1036 | }; |
| 1037 | MODULE_DEVICE_TABLE(acpi, processor_device_ids); |
| 1038 | |
| 1039 | MODULE_ALIAS("acpi"); |