cpufreq: intel_pstate: Avoid calculation for max/min
[deliverable/linux.git] / drivers / cpufreq / intel_pstate.c
1 /*
2 * intel_pstate.c: Native P state management for Intel processors
3 *
4 * (C) Copyright 2012 Intel Corporation
5 * Author: Dirk Brandewie <dirk.j.brandewie@intel.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
10 * of the License.
11 */
12
13 #include <linux/kernel.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/module.h>
16 #include <linux/ktime.h>
17 #include <linux/hrtimer.h>
18 #include <linux/tick.h>
19 #include <linux/slab.h>
20 #include <linux/sched.h>
21 #include <linux/list.h>
22 #include <linux/cpu.h>
23 #include <linux/cpufreq.h>
24 #include <linux/sysfs.h>
25 #include <linux/types.h>
26 #include <linux/fs.h>
27 #include <linux/debugfs.h>
28 #include <linux/acpi.h>
29 #include <linux/vmalloc.h>
30 #include <trace/events/power.h>
31
32 #include <asm/div64.h>
33 #include <asm/msr.h>
34 #include <asm/cpu_device_id.h>
35 #include <asm/cpufeature.h>
36
37 #if IS_ENABLED(CONFIG_ACPI)
38 #include <acpi/processor.h>
39 #endif
40
41 #define BYT_RATIOS 0x66a
42 #define BYT_VIDS 0x66b
43 #define BYT_TURBO_RATIOS 0x66c
44 #define BYT_TURBO_VIDS 0x66d
45
46 #define FRAC_BITS 8
47 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
48 #define fp_toint(X) ((X) >> FRAC_BITS)
49
50 static inline int32_t mul_fp(int32_t x, int32_t y)
51 {
52 return ((int64_t)x * (int64_t)y) >> FRAC_BITS;
53 }
54
55 static inline int32_t div_fp(s64 x, s64 y)
56 {
57 return div64_s64((int64_t)x << FRAC_BITS, y);
58 }
59
60 static inline int ceiling_fp(int32_t x)
61 {
62 int mask, ret;
63
64 ret = fp_toint(x);
65 mask = (1 << FRAC_BITS) - 1;
66 if (x & mask)
67 ret += 1;
68 return ret;
69 }
70
71 struct sample {
72 int32_t core_pct_busy;
73 u64 aperf;
74 u64 mperf;
75 u64 tsc;
76 int freq;
77 ktime_t time;
78 };
79
80 struct pstate_data {
81 int current_pstate;
82 int min_pstate;
83 int max_pstate;
84 int max_pstate_physical;
85 int scaling;
86 int turbo_pstate;
87 };
88
89 struct vid_data {
90 int min;
91 int max;
92 int turbo;
93 int32_t ratio;
94 };
95
96 struct _pid {
97 int setpoint;
98 int32_t integral;
99 int32_t p_gain;
100 int32_t i_gain;
101 int32_t d_gain;
102 int deadband;
103 int32_t last_err;
104 };
105
106 struct cpudata {
107 int cpu;
108
109 struct timer_list timer;
110
111 struct pstate_data pstate;
112 struct vid_data vid;
113 struct _pid pid;
114
115 ktime_t last_sample_time;
116 u64 prev_aperf;
117 u64 prev_mperf;
118 u64 prev_tsc;
119 struct sample sample;
120 #if IS_ENABLED(CONFIG_ACPI)
121 struct acpi_processor_performance acpi_perf_data;
122 #endif
123 };
124
125 static struct cpudata **all_cpu_data;
126 struct pstate_adjust_policy {
127 int sample_rate_ms;
128 int deadband;
129 int setpoint;
130 int p_gain_pct;
131 int d_gain_pct;
132 int i_gain_pct;
133 };
134
135 struct pstate_funcs {
136 int (*get_max)(void);
137 int (*get_max_physical)(void);
138 int (*get_min)(void);
139 int (*get_turbo)(void);
140 int (*get_scaling)(void);
141 void (*set)(struct cpudata*, int pstate);
142 void (*get_vid)(struct cpudata *);
143 };
144
145 struct cpu_defaults {
146 struct pstate_adjust_policy pid_policy;
147 struct pstate_funcs funcs;
148 };
149
150 static struct pstate_adjust_policy pid_params;
151 static struct pstate_funcs pstate_funcs;
152 static int hwp_active;
153 static int no_acpi_perf;
154
155 struct perf_limits {
156 int no_turbo;
157 int turbo_disabled;
158 int max_perf_pct;
159 int min_perf_pct;
160 int32_t max_perf;
161 int32_t min_perf;
162 int max_policy_pct;
163 int max_sysfs_pct;
164 int min_policy_pct;
165 int min_sysfs_pct;
166 int max_perf_ctl;
167 int min_perf_ctl;
168 };
169
170 static struct perf_limits limits = {
171 .no_turbo = 0,
172 .turbo_disabled = 0,
173 .max_perf_pct = 100,
174 .max_perf = int_tofp(1),
175 .min_perf_pct = 0,
176 .min_perf = 0,
177 .max_policy_pct = 100,
178 .max_sysfs_pct = 100,
179 .min_policy_pct = 0,
180 .min_sysfs_pct = 0,
181 .max_perf_ctl = 0,
182 .min_perf_ctl = 0,
183 };
184
185 #if IS_ENABLED(CONFIG_ACPI)
186 /*
187 * The max target pstate ratio is a 8 bit value in both PLATFORM_INFO MSR and
188 * in TURBO_RATIO_LIMIT MSR, which pstate driver stores in max_pstate and
189 * max_turbo_pstate fields. The PERF_CTL MSR contains 16 bit value for P state
190 * ratio, out of it only high 8 bits are used. For example 0x1700 is setting
191 * target ratio 0x17. The _PSS control value stores in a format which can be
192 * directly written to PERF_CTL MSR. But in intel_pstate driver this shift
193 * occurs during write to PERF_CTL (E.g. for cores core_set_pstate()).
194 * This function converts the _PSS control value to intel pstate driver format
195 * for comparison and assignment.
196 */
197 static int convert_to_native_pstate_format(struct cpudata *cpu, int index)
198 {
199 return cpu->acpi_perf_data.states[index].control >> 8;
200 }
201
202 static int intel_pstate_init_perf_limits(struct cpufreq_policy *policy)
203 {
204 struct cpudata *cpu;
205 int ret;
206 bool turbo_absent = false;
207 int max_pstate_index;
208 int min_pss_ctl, max_pss_ctl, turbo_pss_ctl;
209 int i;
210
211 cpu = all_cpu_data[policy->cpu];
212
213 pr_debug("intel_pstate: default limits 0x%x 0x%x 0x%x\n",
214 cpu->pstate.min_pstate, cpu->pstate.max_pstate,
215 cpu->pstate.turbo_pstate);
216
217 if (!cpu->acpi_perf_data.shared_cpu_map &&
218 zalloc_cpumask_var_node(&cpu->acpi_perf_data.shared_cpu_map,
219 GFP_KERNEL, cpu_to_node(policy->cpu))) {
220 return -ENOMEM;
221 }
222
223 ret = acpi_processor_register_performance(&cpu->acpi_perf_data,
224 policy->cpu);
225 if (ret)
226 return ret;
227
228 /*
229 * Check if the control value in _PSS is for PERF_CTL MSR, which should
230 * guarantee that the states returned by it map to the states in our
231 * list directly.
232 */
233 if (cpu->acpi_perf_data.control_register.space_id !=
234 ACPI_ADR_SPACE_FIXED_HARDWARE)
235 return -EIO;
236
237 pr_debug("intel_pstate: CPU%u - ACPI _PSS perf data\n", policy->cpu);
238 for (i = 0; i < cpu->acpi_perf_data.state_count; i++)
239 pr_debug(" %cP%d: %u MHz, %u mW, 0x%x\n",
240 (i == cpu->acpi_perf_data.state ? '*' : ' '), i,
241 (u32) cpu->acpi_perf_data.states[i].core_frequency,
242 (u32) cpu->acpi_perf_data.states[i].power,
243 (u32) cpu->acpi_perf_data.states[i].control);
244
245 /*
246 * If there is only one entry _PSS, simply ignore _PSS and continue as
247 * usual without taking _PSS into account
248 */
249 if (cpu->acpi_perf_data.state_count < 2)
250 return 0;
251
252 turbo_pss_ctl = convert_to_native_pstate_format(cpu, 0);
253 min_pss_ctl = convert_to_native_pstate_format(cpu,
254 cpu->acpi_perf_data.state_count - 1);
255 /* Check if there is a turbo freq in _PSS */
256 if (turbo_pss_ctl <= cpu->pstate.max_pstate &&
257 turbo_pss_ctl > cpu->pstate.min_pstate) {
258 pr_debug("intel_pstate: no turbo range exists in _PSS\n");
259 limits.no_turbo = limits.turbo_disabled = 1;
260 cpu->pstate.turbo_pstate = cpu->pstate.max_pstate;
261 turbo_absent = true;
262 }
263
264 /* Check if the max non turbo p state < Intel P state max */
265 max_pstate_index = turbo_absent ? 0 : 1;
266 max_pss_ctl = convert_to_native_pstate_format(cpu, max_pstate_index);
267 if (max_pss_ctl < cpu->pstate.max_pstate &&
268 max_pss_ctl > cpu->pstate.min_pstate)
269 cpu->pstate.max_pstate = max_pss_ctl;
270
271 /* check If min perf > Intel P State min */
272 if (min_pss_ctl > cpu->pstate.min_pstate &&
273 min_pss_ctl < cpu->pstate.max_pstate) {
274 cpu->pstate.min_pstate = min_pss_ctl;
275 policy->cpuinfo.min_freq = min_pss_ctl * cpu->pstate.scaling;
276 }
277
278 if (turbo_absent)
279 policy->cpuinfo.max_freq = cpu->pstate.max_pstate *
280 cpu->pstate.scaling;
281 else {
282 policy->cpuinfo.max_freq = cpu->pstate.turbo_pstate *
283 cpu->pstate.scaling;
284 /*
285 * The _PSS table doesn't contain whole turbo frequency range.
286 * This just contains +1 MHZ above the max non turbo frequency,
287 * with control value corresponding to max turbo ratio. But
288 * when cpufreq set policy is called, it will call with this
289 * max frequency, which will cause a reduced performance as
290 * this driver uses real max turbo frequency as the max
291 * frequeny. So correct this frequency in _PSS table to
292 * correct max turbo frequency based on the turbo ratio.
293 * Also need to convert to MHz as _PSS freq is in MHz.
294 */
295 cpu->acpi_perf_data.states[0].core_frequency =
296 turbo_pss_ctl * 100;
297 }
298
299 pr_debug("intel_pstate: Updated limits using _PSS 0x%x 0x%x 0x%x\n",
300 cpu->pstate.min_pstate, cpu->pstate.max_pstate,
301 cpu->pstate.turbo_pstate);
302 pr_debug("intel_pstate: policy max_freq=%d Khz min_freq = %d KHz\n",
303 policy->cpuinfo.max_freq, policy->cpuinfo.min_freq);
304
305 return 0;
306 }
307
308 static int intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
309 {
310 struct cpudata *cpu;
311
312 if (!no_acpi_perf)
313 return 0;
314
315 cpu = all_cpu_data[policy->cpu];
316 acpi_processor_unregister_performance(policy->cpu);
317 return 0;
318 }
319
320 #else
321 static int intel_pstate_init_perf_limits(struct cpufreq_policy *policy)
322 {
323 return 0;
324 }
325
326 static int intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
327 {
328 return 0;
329 }
330 #endif
331
332 static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
333 int deadband, int integral) {
334 pid->setpoint = setpoint;
335 pid->deadband = deadband;
336 pid->integral = int_tofp(integral);
337 pid->last_err = int_tofp(setpoint) - int_tofp(busy);
338 }
339
340 static inline void pid_p_gain_set(struct _pid *pid, int percent)
341 {
342 pid->p_gain = div_fp(int_tofp(percent), int_tofp(100));
343 }
344
345 static inline void pid_i_gain_set(struct _pid *pid, int percent)
346 {
347 pid->i_gain = div_fp(int_tofp(percent), int_tofp(100));
348 }
349
350 static inline void pid_d_gain_set(struct _pid *pid, int percent)
351 {
352 pid->d_gain = div_fp(int_tofp(percent), int_tofp(100));
353 }
354
355 static signed int pid_calc(struct _pid *pid, int32_t busy)
356 {
357 signed int result;
358 int32_t pterm, dterm, fp_error;
359 int32_t integral_limit;
360
361 fp_error = int_tofp(pid->setpoint) - busy;
362
363 if (abs(fp_error) <= int_tofp(pid->deadband))
364 return 0;
365
366 pterm = mul_fp(pid->p_gain, fp_error);
367
368 pid->integral += fp_error;
369
370 /*
371 * We limit the integral here so that it will never
372 * get higher than 30. This prevents it from becoming
373 * too large an input over long periods of time and allows
374 * it to get factored out sooner.
375 *
376 * The value of 30 was chosen through experimentation.
377 */
378 integral_limit = int_tofp(30);
379 if (pid->integral > integral_limit)
380 pid->integral = integral_limit;
381 if (pid->integral < -integral_limit)
382 pid->integral = -integral_limit;
383
384 dterm = mul_fp(pid->d_gain, fp_error - pid->last_err);
385 pid->last_err = fp_error;
386
387 result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm;
388 result = result + (1 << (FRAC_BITS-1));
389 return (signed int)fp_toint(result);
390 }
391
392 static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu)
393 {
394 pid_p_gain_set(&cpu->pid, pid_params.p_gain_pct);
395 pid_d_gain_set(&cpu->pid, pid_params.d_gain_pct);
396 pid_i_gain_set(&cpu->pid, pid_params.i_gain_pct);
397
398 pid_reset(&cpu->pid, pid_params.setpoint, 100, pid_params.deadband, 0);
399 }
400
401 static inline void intel_pstate_reset_all_pid(void)
402 {
403 unsigned int cpu;
404
405 for_each_online_cpu(cpu) {
406 if (all_cpu_data[cpu])
407 intel_pstate_busy_pid_reset(all_cpu_data[cpu]);
408 }
409 }
410
411 static inline void update_turbo_state(void)
412 {
413 u64 misc_en;
414 struct cpudata *cpu;
415
416 cpu = all_cpu_data[0];
417 rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
418 limits.turbo_disabled =
419 (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE ||
420 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
421 }
422
423 static void intel_pstate_hwp_set(void)
424 {
425 int min, hw_min, max, hw_max, cpu, range, adj_range;
426 u64 value, cap;
427
428 rdmsrl(MSR_HWP_CAPABILITIES, cap);
429 hw_min = HWP_LOWEST_PERF(cap);
430 hw_max = HWP_HIGHEST_PERF(cap);
431 range = hw_max - hw_min;
432
433 get_online_cpus();
434
435 for_each_online_cpu(cpu) {
436 rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
437 adj_range = limits.min_perf_pct * range / 100;
438 min = hw_min + adj_range;
439 value &= ~HWP_MIN_PERF(~0L);
440 value |= HWP_MIN_PERF(min);
441
442 adj_range = limits.max_perf_pct * range / 100;
443 max = hw_min + adj_range;
444 if (limits.no_turbo) {
445 hw_max = HWP_GUARANTEED_PERF(cap);
446 if (hw_max < max)
447 max = hw_max;
448 }
449
450 value &= ~HWP_MAX_PERF(~0L);
451 value |= HWP_MAX_PERF(max);
452 wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
453 }
454
455 put_online_cpus();
456 }
457
458 /************************** debugfs begin ************************/
459 static int pid_param_set(void *data, u64 val)
460 {
461 *(u32 *)data = val;
462 intel_pstate_reset_all_pid();
463 return 0;
464 }
465
466 static int pid_param_get(void *data, u64 *val)
467 {
468 *val = *(u32 *)data;
469 return 0;
470 }
471 DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param, pid_param_get, pid_param_set, "%llu\n");
472
473 struct pid_param {
474 char *name;
475 void *value;
476 };
477
478 static struct pid_param pid_files[] = {
479 {"sample_rate_ms", &pid_params.sample_rate_ms},
480 {"d_gain_pct", &pid_params.d_gain_pct},
481 {"i_gain_pct", &pid_params.i_gain_pct},
482 {"deadband", &pid_params.deadband},
483 {"setpoint", &pid_params.setpoint},
484 {"p_gain_pct", &pid_params.p_gain_pct},
485 {NULL, NULL}
486 };
487
488 static void __init intel_pstate_debug_expose_params(void)
489 {
490 struct dentry *debugfs_parent;
491 int i = 0;
492
493 if (hwp_active)
494 return;
495 debugfs_parent = debugfs_create_dir("pstate_snb", NULL);
496 if (IS_ERR_OR_NULL(debugfs_parent))
497 return;
498 while (pid_files[i].name) {
499 debugfs_create_file(pid_files[i].name, 0660,
500 debugfs_parent, pid_files[i].value,
501 &fops_pid_param);
502 i++;
503 }
504 }
505
506 /************************** debugfs end ************************/
507
508 /************************** sysfs begin ************************/
509 #define show_one(file_name, object) \
510 static ssize_t show_##file_name \
511 (struct kobject *kobj, struct attribute *attr, char *buf) \
512 { \
513 return sprintf(buf, "%u\n", limits.object); \
514 }
515
516 static ssize_t show_turbo_pct(struct kobject *kobj,
517 struct attribute *attr, char *buf)
518 {
519 struct cpudata *cpu;
520 int total, no_turbo, turbo_pct;
521 uint32_t turbo_fp;
522
523 cpu = all_cpu_data[0];
524
525 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
526 no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1;
527 turbo_fp = div_fp(int_tofp(no_turbo), int_tofp(total));
528 turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100)));
529 return sprintf(buf, "%u\n", turbo_pct);
530 }
531
532 static ssize_t show_num_pstates(struct kobject *kobj,
533 struct attribute *attr, char *buf)
534 {
535 struct cpudata *cpu;
536 int total;
537
538 cpu = all_cpu_data[0];
539 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
540 return sprintf(buf, "%u\n", total);
541 }
542
543 static ssize_t show_no_turbo(struct kobject *kobj,
544 struct attribute *attr, char *buf)
545 {
546 ssize_t ret;
547
548 update_turbo_state();
549 if (limits.turbo_disabled)
550 ret = sprintf(buf, "%u\n", limits.turbo_disabled);
551 else
552 ret = sprintf(buf, "%u\n", limits.no_turbo);
553
554 return ret;
555 }
556
557 static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
558 const char *buf, size_t count)
559 {
560 unsigned int input;
561 int ret;
562
563 ret = sscanf(buf, "%u", &input);
564 if (ret != 1)
565 return -EINVAL;
566
567 update_turbo_state();
568 if (limits.turbo_disabled) {
569 pr_warn("intel_pstate: Turbo disabled by BIOS or unavailable on processor\n");
570 return -EPERM;
571 }
572
573 limits.no_turbo = clamp_t(int, input, 0, 1);
574
575 if (hwp_active)
576 intel_pstate_hwp_set();
577
578 return count;
579 }
580
581 static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
582 const char *buf, size_t count)
583 {
584 unsigned int input;
585 int ret;
586
587 ret = sscanf(buf, "%u", &input);
588 if (ret != 1)
589 return -EINVAL;
590
591 limits.max_sysfs_pct = clamp_t(int, input, 0 , 100);
592 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
593 limits.max_perf_pct = max(limits.min_policy_pct, limits.max_perf_pct);
594 limits.max_perf_pct = max(limits.min_perf_pct, limits.max_perf_pct);
595 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
596
597 if (hwp_active)
598 intel_pstate_hwp_set();
599 return count;
600 }
601
602 static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
603 const char *buf, size_t count)
604 {
605 unsigned int input;
606 int ret;
607
608 ret = sscanf(buf, "%u", &input);
609 if (ret != 1)
610 return -EINVAL;
611
612 limits.min_sysfs_pct = clamp_t(int, input, 0 , 100);
613 limits.min_perf_pct = max(limits.min_policy_pct, limits.min_sysfs_pct);
614 limits.min_perf_pct = min(limits.max_policy_pct, limits.min_perf_pct);
615 limits.min_perf_pct = min(limits.max_perf_pct, limits.min_perf_pct);
616 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
617
618 if (hwp_active)
619 intel_pstate_hwp_set();
620 return count;
621 }
622
623 show_one(max_perf_pct, max_perf_pct);
624 show_one(min_perf_pct, min_perf_pct);
625
626 define_one_global_rw(no_turbo);
627 define_one_global_rw(max_perf_pct);
628 define_one_global_rw(min_perf_pct);
629 define_one_global_ro(turbo_pct);
630 define_one_global_ro(num_pstates);
631
632 static struct attribute *intel_pstate_attributes[] = {
633 &no_turbo.attr,
634 &max_perf_pct.attr,
635 &min_perf_pct.attr,
636 &turbo_pct.attr,
637 &num_pstates.attr,
638 NULL
639 };
640
641 static struct attribute_group intel_pstate_attr_group = {
642 .attrs = intel_pstate_attributes,
643 };
644
645 static void __init intel_pstate_sysfs_expose_params(void)
646 {
647 struct kobject *intel_pstate_kobject;
648 int rc;
649
650 intel_pstate_kobject = kobject_create_and_add("intel_pstate",
651 &cpu_subsys.dev_root->kobj);
652 BUG_ON(!intel_pstate_kobject);
653 rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group);
654 BUG_ON(rc);
655 }
656 /************************** sysfs end ************************/
657
658 static void intel_pstate_hwp_enable(struct cpudata *cpudata)
659 {
660 pr_info("intel_pstate: HWP enabled\n");
661
662 wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
663 }
664
665 static int byt_get_min_pstate(void)
666 {
667 u64 value;
668
669 rdmsrl(BYT_RATIOS, value);
670 return (value >> 8) & 0x7F;
671 }
672
673 static int byt_get_max_pstate(void)
674 {
675 u64 value;
676
677 rdmsrl(BYT_RATIOS, value);
678 return (value >> 16) & 0x7F;
679 }
680
681 static int byt_get_turbo_pstate(void)
682 {
683 u64 value;
684
685 rdmsrl(BYT_TURBO_RATIOS, value);
686 return value & 0x7F;
687 }
688
689 static void byt_set_pstate(struct cpudata *cpudata, int pstate)
690 {
691 u64 val;
692 int32_t vid_fp;
693 u32 vid;
694
695 val = (u64)pstate << 8;
696 if (limits.no_turbo && !limits.turbo_disabled)
697 val |= (u64)1 << 32;
698
699 vid_fp = cpudata->vid.min + mul_fp(
700 int_tofp(pstate - cpudata->pstate.min_pstate),
701 cpudata->vid.ratio);
702
703 vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max);
704 vid = ceiling_fp(vid_fp);
705
706 if (pstate > cpudata->pstate.max_pstate)
707 vid = cpudata->vid.turbo;
708
709 val |= vid;
710
711 wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val);
712 }
713
714 #define BYT_BCLK_FREQS 5
715 static int byt_freq_table[BYT_BCLK_FREQS] = { 833, 1000, 1333, 1167, 800};
716
717 static int byt_get_scaling(void)
718 {
719 u64 value;
720 int i;
721
722 rdmsrl(MSR_FSB_FREQ, value);
723 i = value & 0x3;
724
725 BUG_ON(i > BYT_BCLK_FREQS);
726
727 return byt_freq_table[i] * 100;
728 }
729
730 static void byt_get_vid(struct cpudata *cpudata)
731 {
732 u64 value;
733
734 rdmsrl(BYT_VIDS, value);
735 cpudata->vid.min = int_tofp((value >> 8) & 0x7f);
736 cpudata->vid.max = int_tofp((value >> 16) & 0x7f);
737 cpudata->vid.ratio = div_fp(
738 cpudata->vid.max - cpudata->vid.min,
739 int_tofp(cpudata->pstate.max_pstate -
740 cpudata->pstate.min_pstate));
741
742 rdmsrl(BYT_TURBO_VIDS, value);
743 cpudata->vid.turbo = value & 0x7f;
744 }
745
746 static int core_get_min_pstate(void)
747 {
748 u64 value;
749
750 rdmsrl(MSR_PLATFORM_INFO, value);
751 return (value >> 40) & 0xFF;
752 }
753
754 static int core_get_max_pstate_physical(void)
755 {
756 u64 value;
757
758 rdmsrl(MSR_PLATFORM_INFO, value);
759 return (value >> 8) & 0xFF;
760 }
761
762 static int core_get_max_pstate(void)
763 {
764 u64 tar;
765 u64 plat_info;
766 int max_pstate;
767 int err;
768
769 rdmsrl(MSR_PLATFORM_INFO, plat_info);
770 max_pstate = (plat_info >> 8) & 0xFF;
771
772 err = rdmsrl_safe(MSR_TURBO_ACTIVATION_RATIO, &tar);
773 if (!err) {
774 /* Do some sanity checking for safety */
775 if (plat_info & 0x600000000) {
776 u64 tdp_ctrl;
777 u64 tdp_ratio;
778 int tdp_msr;
779
780 err = rdmsrl_safe(MSR_CONFIG_TDP_CONTROL, &tdp_ctrl);
781 if (err)
782 goto skip_tar;
783
784 tdp_msr = MSR_CONFIG_TDP_NOMINAL + tdp_ctrl;
785 err = rdmsrl_safe(tdp_msr, &tdp_ratio);
786 if (err)
787 goto skip_tar;
788
789 if (tdp_ratio - 1 == tar) {
790 max_pstate = tar;
791 pr_debug("max_pstate=TAC %x\n", max_pstate);
792 } else {
793 goto skip_tar;
794 }
795 }
796 }
797
798 skip_tar:
799 return max_pstate;
800 }
801
802 static int core_get_turbo_pstate(void)
803 {
804 u64 value;
805 int nont, ret;
806
807 rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value);
808 nont = core_get_max_pstate();
809 ret = (value) & 255;
810 if (ret <= nont)
811 ret = nont;
812 return ret;
813 }
814
815 static inline int core_get_scaling(void)
816 {
817 return 100000;
818 }
819
820 static void core_set_pstate(struct cpudata *cpudata, int pstate)
821 {
822 u64 val;
823
824 val = (u64)pstate << 8;
825 if (limits.no_turbo && !limits.turbo_disabled)
826 val |= (u64)1 << 32;
827
828 wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val);
829 }
830
831 static int knl_get_turbo_pstate(void)
832 {
833 u64 value;
834 int nont, ret;
835
836 rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value);
837 nont = core_get_max_pstate();
838 ret = (((value) >> 8) & 0xFF);
839 if (ret <= nont)
840 ret = nont;
841 return ret;
842 }
843
844 static struct cpu_defaults core_params = {
845 .pid_policy = {
846 .sample_rate_ms = 10,
847 .deadband = 0,
848 .setpoint = 97,
849 .p_gain_pct = 20,
850 .d_gain_pct = 0,
851 .i_gain_pct = 0,
852 },
853 .funcs = {
854 .get_max = core_get_max_pstate,
855 .get_max_physical = core_get_max_pstate_physical,
856 .get_min = core_get_min_pstate,
857 .get_turbo = core_get_turbo_pstate,
858 .get_scaling = core_get_scaling,
859 .set = core_set_pstate,
860 },
861 };
862
863 static struct cpu_defaults byt_params = {
864 .pid_policy = {
865 .sample_rate_ms = 10,
866 .deadband = 0,
867 .setpoint = 60,
868 .p_gain_pct = 14,
869 .d_gain_pct = 0,
870 .i_gain_pct = 4,
871 },
872 .funcs = {
873 .get_max = byt_get_max_pstate,
874 .get_max_physical = byt_get_max_pstate,
875 .get_min = byt_get_min_pstate,
876 .get_turbo = byt_get_turbo_pstate,
877 .set = byt_set_pstate,
878 .get_scaling = byt_get_scaling,
879 .get_vid = byt_get_vid,
880 },
881 };
882
883 static struct cpu_defaults knl_params = {
884 .pid_policy = {
885 .sample_rate_ms = 10,
886 .deadband = 0,
887 .setpoint = 97,
888 .p_gain_pct = 20,
889 .d_gain_pct = 0,
890 .i_gain_pct = 0,
891 },
892 .funcs = {
893 .get_max = core_get_max_pstate,
894 .get_max_physical = core_get_max_pstate_physical,
895 .get_min = core_get_min_pstate,
896 .get_turbo = knl_get_turbo_pstate,
897 .get_scaling = core_get_scaling,
898 .set = core_set_pstate,
899 },
900 };
901
902 static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
903 {
904 int max_perf = cpu->pstate.turbo_pstate;
905 int max_perf_adj;
906 int min_perf;
907
908 if (limits.no_turbo || limits.turbo_disabled)
909 max_perf = cpu->pstate.max_pstate;
910
911 /*
912 * performance can be limited by user through sysfs, by cpufreq
913 * policy, or by cpu specific default values determined through
914 * experimentation.
915 */
916 if (limits.max_perf_ctl && limits.max_sysfs_pct >=
917 limits.max_policy_pct) {
918 *max = limits.max_perf_ctl;
919 } else {
920 max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf),
921 limits.max_perf));
922 *max = clamp_t(int, max_perf_adj, cpu->pstate.min_pstate,
923 cpu->pstate.turbo_pstate);
924 }
925
926 if (limits.min_perf_ctl) {
927 *min = limits.min_perf_ctl;
928 } else {
929 min_perf = fp_toint(mul_fp(int_tofp(max_perf),
930 limits.min_perf));
931 *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf);
932 }
933 }
934
935 static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate, bool force)
936 {
937 int max_perf, min_perf;
938
939 if (force) {
940 update_turbo_state();
941
942 intel_pstate_get_min_max(cpu, &min_perf, &max_perf);
943
944 pstate = clamp_t(int, pstate, min_perf, max_perf);
945
946 if (pstate == cpu->pstate.current_pstate)
947 return;
948 }
949 trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
950
951 cpu->pstate.current_pstate = pstate;
952
953 pstate_funcs.set(cpu, pstate);
954 }
955
956 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
957 {
958 cpu->pstate.min_pstate = pstate_funcs.get_min();
959 cpu->pstate.max_pstate = pstate_funcs.get_max();
960 cpu->pstate.max_pstate_physical = pstate_funcs.get_max_physical();
961 cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
962 cpu->pstate.scaling = pstate_funcs.get_scaling();
963
964 if (pstate_funcs.get_vid)
965 pstate_funcs.get_vid(cpu);
966 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate, false);
967 }
968
969 static inline void intel_pstate_calc_busy(struct cpudata *cpu)
970 {
971 struct sample *sample = &cpu->sample;
972 int64_t core_pct;
973
974 core_pct = int_tofp(sample->aperf) * int_tofp(100);
975 core_pct = div64_u64(core_pct, int_tofp(sample->mperf));
976
977 sample->freq = fp_toint(
978 mul_fp(int_tofp(
979 cpu->pstate.max_pstate_physical *
980 cpu->pstate.scaling / 100),
981 core_pct));
982
983 sample->core_pct_busy = (int32_t)core_pct;
984 }
985
986 static inline void intel_pstate_sample(struct cpudata *cpu)
987 {
988 u64 aperf, mperf;
989 unsigned long flags;
990 u64 tsc;
991
992 local_irq_save(flags);
993 rdmsrl(MSR_IA32_APERF, aperf);
994 rdmsrl(MSR_IA32_MPERF, mperf);
995 tsc = rdtsc();
996 local_irq_restore(flags);
997
998 cpu->last_sample_time = cpu->sample.time;
999 cpu->sample.time = ktime_get();
1000 cpu->sample.aperf = aperf;
1001 cpu->sample.mperf = mperf;
1002 cpu->sample.tsc = tsc;
1003 cpu->sample.aperf -= cpu->prev_aperf;
1004 cpu->sample.mperf -= cpu->prev_mperf;
1005 cpu->sample.tsc -= cpu->prev_tsc;
1006
1007 intel_pstate_calc_busy(cpu);
1008
1009 cpu->prev_aperf = aperf;
1010 cpu->prev_mperf = mperf;
1011 cpu->prev_tsc = tsc;
1012 }
1013
1014 static inline void intel_hwp_set_sample_time(struct cpudata *cpu)
1015 {
1016 int delay;
1017
1018 delay = msecs_to_jiffies(50);
1019 mod_timer_pinned(&cpu->timer, jiffies + delay);
1020 }
1021
1022 static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
1023 {
1024 int delay;
1025
1026 delay = msecs_to_jiffies(pid_params.sample_rate_ms);
1027 mod_timer_pinned(&cpu->timer, jiffies + delay);
1028 }
1029
1030 static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu)
1031 {
1032 int32_t core_busy, max_pstate, current_pstate, sample_ratio;
1033 s64 duration_us;
1034 u32 sample_time;
1035
1036 /*
1037 * core_busy is the ratio of actual performance to max
1038 * max_pstate is the max non turbo pstate available
1039 * current_pstate was the pstate that was requested during
1040 * the last sample period.
1041 *
1042 * We normalize core_busy, which was our actual percent
1043 * performance to what we requested during the last sample
1044 * period. The result will be a percentage of busy at a
1045 * specified pstate.
1046 */
1047 core_busy = cpu->sample.core_pct_busy;
1048 max_pstate = int_tofp(cpu->pstate.max_pstate_physical);
1049 current_pstate = int_tofp(cpu->pstate.current_pstate);
1050 core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate));
1051
1052 /*
1053 * Since we have a deferred timer, it will not fire unless
1054 * we are in C0. So, determine if the actual elapsed time
1055 * is significantly greater (3x) than our sample interval. If it
1056 * is, then we were idle for a long enough period of time
1057 * to adjust our busyness.
1058 */
1059 sample_time = pid_params.sample_rate_ms * USEC_PER_MSEC;
1060 duration_us = ktime_us_delta(cpu->sample.time,
1061 cpu->last_sample_time);
1062 if (duration_us > sample_time * 3) {
1063 sample_ratio = div_fp(int_tofp(sample_time),
1064 int_tofp(duration_us));
1065 core_busy = mul_fp(core_busy, sample_ratio);
1066 }
1067
1068 return core_busy;
1069 }
1070
1071 static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
1072 {
1073 int32_t busy_scaled;
1074 struct _pid *pid;
1075 signed int ctl;
1076 int from;
1077 struct sample *sample;
1078
1079 from = cpu->pstate.current_pstate;
1080
1081 pid = &cpu->pid;
1082 busy_scaled = intel_pstate_get_scaled_busy(cpu);
1083
1084 ctl = pid_calc(pid, busy_scaled);
1085
1086 /* Negative values of ctl increase the pstate and vice versa */
1087 intel_pstate_set_pstate(cpu, cpu->pstate.current_pstate - ctl, true);
1088
1089 sample = &cpu->sample;
1090 trace_pstate_sample(fp_toint(sample->core_pct_busy),
1091 fp_toint(busy_scaled),
1092 from,
1093 cpu->pstate.current_pstate,
1094 sample->mperf,
1095 sample->aperf,
1096 sample->tsc,
1097 sample->freq);
1098 }
1099
1100 static void intel_hwp_timer_func(unsigned long __data)
1101 {
1102 struct cpudata *cpu = (struct cpudata *) __data;
1103
1104 intel_pstate_sample(cpu);
1105 intel_hwp_set_sample_time(cpu);
1106 }
1107
1108 static void intel_pstate_timer_func(unsigned long __data)
1109 {
1110 struct cpudata *cpu = (struct cpudata *) __data;
1111
1112 intel_pstate_sample(cpu);
1113
1114 intel_pstate_adjust_busy_pstate(cpu);
1115
1116 intel_pstate_set_sample_time(cpu);
1117 }
1118
1119 #define ICPU(model, policy) \
1120 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\
1121 (unsigned long)&policy }
1122
1123 static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
1124 ICPU(0x2a, core_params),
1125 ICPU(0x2d, core_params),
1126 ICPU(0x37, byt_params),
1127 ICPU(0x3a, core_params),
1128 ICPU(0x3c, core_params),
1129 ICPU(0x3d, core_params),
1130 ICPU(0x3e, core_params),
1131 ICPU(0x3f, core_params),
1132 ICPU(0x45, core_params),
1133 ICPU(0x46, core_params),
1134 ICPU(0x47, core_params),
1135 ICPU(0x4c, byt_params),
1136 ICPU(0x4e, core_params),
1137 ICPU(0x4f, core_params),
1138 ICPU(0x5e, core_params),
1139 ICPU(0x56, core_params),
1140 ICPU(0x57, knl_params),
1141 {}
1142 };
1143 MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
1144
1145 static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] = {
1146 ICPU(0x56, core_params),
1147 {}
1148 };
1149
1150 static int intel_pstate_init_cpu(unsigned int cpunum)
1151 {
1152 struct cpudata *cpu;
1153
1154 if (!all_cpu_data[cpunum])
1155 all_cpu_data[cpunum] = kzalloc(sizeof(struct cpudata),
1156 GFP_KERNEL);
1157 if (!all_cpu_data[cpunum])
1158 return -ENOMEM;
1159
1160 cpu = all_cpu_data[cpunum];
1161
1162 cpu->cpu = cpunum;
1163
1164 if (hwp_active)
1165 intel_pstate_hwp_enable(cpu);
1166
1167 intel_pstate_get_cpu_pstates(cpu);
1168
1169 init_timer_deferrable(&cpu->timer);
1170 cpu->timer.data = (unsigned long)cpu;
1171 cpu->timer.expires = jiffies + HZ/100;
1172
1173 if (!hwp_active)
1174 cpu->timer.function = intel_pstate_timer_func;
1175 else
1176 cpu->timer.function = intel_hwp_timer_func;
1177
1178 intel_pstate_busy_pid_reset(cpu);
1179 intel_pstate_sample(cpu);
1180
1181 add_timer_on(&cpu->timer, cpunum);
1182
1183 pr_debug("intel_pstate: controlling: cpu %d\n", cpunum);
1184
1185 return 0;
1186 }
1187
1188 static unsigned int intel_pstate_get(unsigned int cpu_num)
1189 {
1190 struct sample *sample;
1191 struct cpudata *cpu;
1192
1193 cpu = all_cpu_data[cpu_num];
1194 if (!cpu)
1195 return 0;
1196 sample = &cpu->sample;
1197 return sample->freq;
1198 }
1199
1200 static int intel_pstate_set_policy(struct cpufreq_policy *policy)
1201 {
1202 #if IS_ENABLED(CONFIG_ACPI)
1203 struct cpudata *cpu;
1204 int i;
1205 #endif
1206 pr_debug("intel_pstate: %s max %u policy->max %u\n", __func__,
1207 policy->cpuinfo.max_freq, policy->max);
1208 if (!policy->cpuinfo.max_freq)
1209 return -ENODEV;
1210
1211 if (policy->policy == CPUFREQ_POLICY_PERFORMANCE &&
1212 policy->max >= policy->cpuinfo.max_freq) {
1213 limits.min_policy_pct = 100;
1214 limits.min_perf_pct = 100;
1215 limits.min_perf = int_tofp(1);
1216 limits.max_policy_pct = 100;
1217 limits.max_perf_pct = 100;
1218 limits.max_perf = int_tofp(1);
1219 limits.no_turbo = 0;
1220 limits.max_perf_ctl = 0;
1221 limits.min_perf_ctl = 0;
1222 return 0;
1223 }
1224
1225 limits.min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
1226 limits.min_policy_pct = clamp_t(int, limits.min_policy_pct, 0 , 100);
1227 limits.max_policy_pct = (policy->max * 100) / policy->cpuinfo.max_freq;
1228 limits.max_policy_pct = clamp_t(int, limits.max_policy_pct, 0 , 100);
1229
1230 /* Normalize user input to [min_policy_pct, max_policy_pct] */
1231 limits.min_perf_pct = max(limits.min_policy_pct, limits.min_sysfs_pct);
1232 limits.min_perf_pct = min(limits.max_policy_pct, limits.min_perf_pct);
1233 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
1234 limits.max_perf_pct = max(limits.min_policy_pct, limits.max_perf_pct);
1235
1236 /* Make sure min_perf_pct <= max_perf_pct */
1237 limits.min_perf_pct = min(limits.max_perf_pct, limits.min_perf_pct);
1238
1239 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
1240 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
1241
1242 #if IS_ENABLED(CONFIG_ACPI)
1243 cpu = all_cpu_data[policy->cpu];
1244 for (i = 0; i < cpu->acpi_perf_data.state_count; i++) {
1245 int control;
1246
1247 control = convert_to_native_pstate_format(cpu, i);
1248 if (control * cpu->pstate.scaling == policy->max)
1249 limits.max_perf_ctl = control;
1250 if (control * cpu->pstate.scaling == policy->min)
1251 limits.min_perf_ctl = control;
1252 }
1253
1254 pr_debug("intel_pstate: max %u policy_max %u perf_ctl [0x%x-0x%x]\n",
1255 policy->cpuinfo.max_freq, policy->max, limits.min_perf_ctl,
1256 limits.max_perf_ctl);
1257 #endif
1258
1259 if (hwp_active)
1260 intel_pstate_hwp_set();
1261
1262 return 0;
1263 }
1264
1265 static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
1266 {
1267 cpufreq_verify_within_cpu_limits(policy);
1268
1269 if (policy->policy != CPUFREQ_POLICY_POWERSAVE &&
1270 policy->policy != CPUFREQ_POLICY_PERFORMANCE)
1271 return -EINVAL;
1272
1273 return 0;
1274 }
1275
1276 static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)
1277 {
1278 int cpu_num = policy->cpu;
1279 struct cpudata *cpu = all_cpu_data[cpu_num];
1280
1281 pr_debug("intel_pstate: CPU %d exiting\n", cpu_num);
1282
1283 del_timer_sync(&all_cpu_data[cpu_num]->timer);
1284 if (hwp_active)
1285 return;
1286
1287 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate, false);
1288 }
1289
1290 static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
1291 {
1292 struct cpudata *cpu;
1293 int rc;
1294
1295 rc = intel_pstate_init_cpu(policy->cpu);
1296 if (rc)
1297 return rc;
1298
1299 cpu = all_cpu_data[policy->cpu];
1300
1301 if (limits.min_perf_pct == 100 && limits.max_perf_pct == 100)
1302 policy->policy = CPUFREQ_POLICY_PERFORMANCE;
1303 else
1304 policy->policy = CPUFREQ_POLICY_POWERSAVE;
1305
1306 policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling;
1307 policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
1308
1309 /* cpuinfo and default policy values */
1310 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
1311 policy->cpuinfo.max_freq =
1312 cpu->pstate.turbo_pstate * cpu->pstate.scaling;
1313 if (!no_acpi_perf)
1314 intel_pstate_init_perf_limits(policy);
1315 /*
1316 * If there is no acpi perf data or error, we ignore and use Intel P
1317 * state calculated limits, So this is not fatal error.
1318 */
1319 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
1320 cpumask_set_cpu(policy->cpu, policy->cpus);
1321
1322 return 0;
1323 }
1324
1325 static int intel_pstate_cpu_exit(struct cpufreq_policy *policy)
1326 {
1327 return intel_pstate_exit_perf_limits(policy);
1328 }
1329
1330 static struct cpufreq_driver intel_pstate_driver = {
1331 .flags = CPUFREQ_CONST_LOOPS,
1332 .verify = intel_pstate_verify_policy,
1333 .setpolicy = intel_pstate_set_policy,
1334 .get = intel_pstate_get,
1335 .init = intel_pstate_cpu_init,
1336 .exit = intel_pstate_cpu_exit,
1337 .stop_cpu = intel_pstate_stop_cpu,
1338 .name = "intel_pstate",
1339 };
1340
1341 static int __initdata no_load;
1342 static int __initdata no_hwp;
1343 static int __initdata hwp_only;
1344 static unsigned int force_load;
1345
1346 static int intel_pstate_msrs_not_valid(void)
1347 {
1348 if (!pstate_funcs.get_max() ||
1349 !pstate_funcs.get_min() ||
1350 !pstate_funcs.get_turbo())
1351 return -ENODEV;
1352
1353 return 0;
1354 }
1355
1356 static void copy_pid_params(struct pstate_adjust_policy *policy)
1357 {
1358 pid_params.sample_rate_ms = policy->sample_rate_ms;
1359 pid_params.p_gain_pct = policy->p_gain_pct;
1360 pid_params.i_gain_pct = policy->i_gain_pct;
1361 pid_params.d_gain_pct = policy->d_gain_pct;
1362 pid_params.deadband = policy->deadband;
1363 pid_params.setpoint = policy->setpoint;
1364 }
1365
1366 static void copy_cpu_funcs(struct pstate_funcs *funcs)
1367 {
1368 pstate_funcs.get_max = funcs->get_max;
1369 pstate_funcs.get_max_physical = funcs->get_max_physical;
1370 pstate_funcs.get_min = funcs->get_min;
1371 pstate_funcs.get_turbo = funcs->get_turbo;
1372 pstate_funcs.get_scaling = funcs->get_scaling;
1373 pstate_funcs.set = funcs->set;
1374 pstate_funcs.get_vid = funcs->get_vid;
1375 }
1376
1377 #if IS_ENABLED(CONFIG_ACPI)
1378
1379 static bool intel_pstate_no_acpi_pss(void)
1380 {
1381 int i;
1382
1383 for_each_possible_cpu(i) {
1384 acpi_status status;
1385 union acpi_object *pss;
1386 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
1387 struct acpi_processor *pr = per_cpu(processors, i);
1388
1389 if (!pr)
1390 continue;
1391
1392 status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer);
1393 if (ACPI_FAILURE(status))
1394 continue;
1395
1396 pss = buffer.pointer;
1397 if (pss && pss->type == ACPI_TYPE_PACKAGE) {
1398 kfree(pss);
1399 return false;
1400 }
1401
1402 kfree(pss);
1403 }
1404
1405 return true;
1406 }
1407
1408 static bool intel_pstate_has_acpi_ppc(void)
1409 {
1410 int i;
1411
1412 for_each_possible_cpu(i) {
1413 struct acpi_processor *pr = per_cpu(processors, i);
1414
1415 if (!pr)
1416 continue;
1417 if (acpi_has_method(pr->handle, "_PPC"))
1418 return true;
1419 }
1420 return false;
1421 }
1422
1423 enum {
1424 PSS,
1425 PPC,
1426 };
1427
1428 struct hw_vendor_info {
1429 u16 valid;
1430 char oem_id[ACPI_OEM_ID_SIZE];
1431 char oem_table_id[ACPI_OEM_TABLE_ID_SIZE];
1432 int oem_pwr_table;
1433 };
1434
1435 /* Hardware vendor-specific info that has its own power management modes */
1436 static struct hw_vendor_info vendor_info[] = {
1437 {1, "HP ", "ProLiant", PSS},
1438 {1, "ORACLE", "X4-2 ", PPC},
1439 {1, "ORACLE", "X4-2L ", PPC},
1440 {1, "ORACLE", "X4-2B ", PPC},
1441 {1, "ORACLE", "X3-2 ", PPC},
1442 {1, "ORACLE", "X3-2L ", PPC},
1443 {1, "ORACLE", "X3-2B ", PPC},
1444 {1, "ORACLE", "X4470M2 ", PPC},
1445 {1, "ORACLE", "X4270M3 ", PPC},
1446 {1, "ORACLE", "X4270M2 ", PPC},
1447 {1, "ORACLE", "X4170M2 ", PPC},
1448 {1, "ORACLE", "X4170 M3", PPC},
1449 {1, "ORACLE", "X4275 M3", PPC},
1450 {1, "ORACLE", "X6-2 ", PPC},
1451 {1, "ORACLE", "Sudbury ", PPC},
1452 {0, "", ""},
1453 };
1454
1455 static bool intel_pstate_platform_pwr_mgmt_exists(void)
1456 {
1457 struct acpi_table_header hdr;
1458 struct hw_vendor_info *v_info;
1459 const struct x86_cpu_id *id;
1460 u64 misc_pwr;
1461
1462 id = x86_match_cpu(intel_pstate_cpu_oob_ids);
1463 if (id) {
1464 rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr);
1465 if ( misc_pwr & (1 << 8))
1466 return true;
1467 }
1468
1469 if (acpi_disabled ||
1470 ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr)))
1471 return false;
1472
1473 for (v_info = vendor_info; v_info->valid; v_info++) {
1474 if (!strncmp(hdr.oem_id, v_info->oem_id, ACPI_OEM_ID_SIZE) &&
1475 !strncmp(hdr.oem_table_id, v_info->oem_table_id,
1476 ACPI_OEM_TABLE_ID_SIZE))
1477 switch (v_info->oem_pwr_table) {
1478 case PSS:
1479 return intel_pstate_no_acpi_pss();
1480 case PPC:
1481 return intel_pstate_has_acpi_ppc() &&
1482 (!force_load);
1483 }
1484 }
1485
1486 return false;
1487 }
1488 #else /* CONFIG_ACPI not enabled */
1489 static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; }
1490 static inline bool intel_pstate_has_acpi_ppc(void) { return false; }
1491 #endif /* CONFIG_ACPI */
1492
1493 static int __init intel_pstate_init(void)
1494 {
1495 int cpu, rc = 0;
1496 const struct x86_cpu_id *id;
1497 struct cpu_defaults *cpu_def;
1498
1499 if (no_load)
1500 return -ENODEV;
1501
1502 id = x86_match_cpu(intel_pstate_cpu_ids);
1503 if (!id)
1504 return -ENODEV;
1505
1506 /*
1507 * The Intel pstate driver will be ignored if the platform
1508 * firmware has its own power management modes.
1509 */
1510 if (intel_pstate_platform_pwr_mgmt_exists())
1511 return -ENODEV;
1512
1513 cpu_def = (struct cpu_defaults *)id->driver_data;
1514
1515 copy_pid_params(&cpu_def->pid_policy);
1516 copy_cpu_funcs(&cpu_def->funcs);
1517
1518 if (intel_pstate_msrs_not_valid())
1519 return -ENODEV;
1520
1521 pr_info("Intel P-state driver initializing.\n");
1522
1523 all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus());
1524 if (!all_cpu_data)
1525 return -ENOMEM;
1526
1527 if (static_cpu_has_safe(X86_FEATURE_HWP) && !no_hwp)
1528 hwp_active++;
1529
1530 if (!hwp_active && hwp_only)
1531 goto out;
1532
1533 rc = cpufreq_register_driver(&intel_pstate_driver);
1534 if (rc)
1535 goto out;
1536
1537 intel_pstate_debug_expose_params();
1538 intel_pstate_sysfs_expose_params();
1539
1540 return rc;
1541 out:
1542 get_online_cpus();
1543 for_each_online_cpu(cpu) {
1544 if (all_cpu_data[cpu]) {
1545 del_timer_sync(&all_cpu_data[cpu]->timer);
1546 kfree(all_cpu_data[cpu]);
1547 }
1548 }
1549
1550 put_online_cpus();
1551 vfree(all_cpu_data);
1552 return -ENODEV;
1553 }
1554 device_initcall(intel_pstate_init);
1555
1556 static int __init intel_pstate_setup(char *str)
1557 {
1558 if (!str)
1559 return -EINVAL;
1560
1561 if (!strcmp(str, "disable"))
1562 no_load = 1;
1563 if (!strcmp(str, "no_hwp"))
1564 no_hwp = 1;
1565 if (!strcmp(str, "force"))
1566 force_load = 1;
1567 if (!strcmp(str, "hwp_only"))
1568 hwp_only = 1;
1569 if (!strcmp(str, "no_acpi"))
1570 no_acpi_perf = 1;
1571
1572 return 0;
1573 }
1574 early_param("intel_pstate", intel_pstate_setup);
1575
1576 MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>");
1577 MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors");
1578 MODULE_LICENSE("GPL");
This page took 0.063762 seconds and 5 git commands to generate.