Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/livep...
[deliverable/linux.git] / drivers / cpufreq / intel_pstate.c
1 /*
2 * intel_pstate.c: Native P state management for Intel processors
3 *
4 * (C) Copyright 2012 Intel Corporation
5 * Author: Dirk Brandewie <dirk.j.brandewie@intel.com>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
10 * of the License.
11 */
12
13 #include <linux/kernel.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/module.h>
16 #include <linux/ktime.h>
17 #include <linux/hrtimer.h>
18 #include <linux/tick.h>
19 #include <linux/slab.h>
20 #include <linux/sched.h>
21 #include <linux/list.h>
22 #include <linux/cpu.h>
23 #include <linux/cpufreq.h>
24 #include <linux/sysfs.h>
25 #include <linux/types.h>
26 #include <linux/fs.h>
27 #include <linux/debugfs.h>
28 #include <linux/acpi.h>
29 #include <linux/vmalloc.h>
30 #include <trace/events/power.h>
31
32 #include <asm/div64.h>
33 #include <asm/msr.h>
34 #include <asm/cpu_device_id.h>
35 #include <asm/cpufeature.h>
36
37 #if IS_ENABLED(CONFIG_ACPI)
38 #include <acpi/processor.h>
39 #endif
40
41 #define BYT_RATIOS 0x66a
42 #define BYT_VIDS 0x66b
43 #define BYT_TURBO_RATIOS 0x66c
44 #define BYT_TURBO_VIDS 0x66d
45
46 #define FRAC_BITS 8
47 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
48 #define fp_toint(X) ((X) >> FRAC_BITS)
49
50 static inline int32_t mul_fp(int32_t x, int32_t y)
51 {
52 return ((int64_t)x * (int64_t)y) >> FRAC_BITS;
53 }
54
55 static inline int32_t div_fp(s64 x, s64 y)
56 {
57 return div64_s64((int64_t)x << FRAC_BITS, y);
58 }
59
60 static inline int ceiling_fp(int32_t x)
61 {
62 int mask, ret;
63
64 ret = fp_toint(x);
65 mask = (1 << FRAC_BITS) - 1;
66 if (x & mask)
67 ret += 1;
68 return ret;
69 }
70
71 struct sample {
72 int32_t core_pct_busy;
73 u64 aperf;
74 u64 mperf;
75 u64 tsc;
76 int freq;
77 ktime_t time;
78 };
79
80 struct pstate_data {
81 int current_pstate;
82 int min_pstate;
83 int max_pstate;
84 int max_pstate_physical;
85 int scaling;
86 int turbo_pstate;
87 };
88
89 struct vid_data {
90 int min;
91 int max;
92 int turbo;
93 int32_t ratio;
94 };
95
96 struct _pid {
97 int setpoint;
98 int32_t integral;
99 int32_t p_gain;
100 int32_t i_gain;
101 int32_t d_gain;
102 int deadband;
103 int32_t last_err;
104 };
105
106 struct cpudata {
107 int cpu;
108
109 struct timer_list timer;
110
111 struct pstate_data pstate;
112 struct vid_data vid;
113 struct _pid pid;
114
115 ktime_t last_sample_time;
116 u64 prev_aperf;
117 u64 prev_mperf;
118 u64 prev_tsc;
119 struct sample sample;
120 #if IS_ENABLED(CONFIG_ACPI)
121 struct acpi_processor_performance acpi_perf_data;
122 #endif
123 };
124
125 static struct cpudata **all_cpu_data;
126 struct pstate_adjust_policy {
127 int sample_rate_ms;
128 int deadband;
129 int setpoint;
130 int p_gain_pct;
131 int d_gain_pct;
132 int i_gain_pct;
133 };
134
135 struct pstate_funcs {
136 int (*get_max)(void);
137 int (*get_max_physical)(void);
138 int (*get_min)(void);
139 int (*get_turbo)(void);
140 int (*get_scaling)(void);
141 void (*set)(struct cpudata*, int pstate);
142 void (*get_vid)(struct cpudata *);
143 };
144
145 struct cpu_defaults {
146 struct pstate_adjust_policy pid_policy;
147 struct pstate_funcs funcs;
148 };
149
150 static struct pstate_adjust_policy pid_params;
151 static struct pstate_funcs pstate_funcs;
152 static int hwp_active;
153 static int no_acpi_perf;
154
155 struct perf_limits {
156 int no_turbo;
157 int turbo_disabled;
158 int max_perf_pct;
159 int min_perf_pct;
160 int32_t max_perf;
161 int32_t min_perf;
162 int max_policy_pct;
163 int max_sysfs_pct;
164 int min_policy_pct;
165 int min_sysfs_pct;
166 int max_perf_ctl;
167 int min_perf_ctl;
168 };
169
170 static struct perf_limits performance_limits = {
171 .no_turbo = 0,
172 .turbo_disabled = 0,
173 .max_perf_pct = 100,
174 .max_perf = int_tofp(1),
175 .min_perf_pct = 100,
176 .min_perf = int_tofp(1),
177 .max_policy_pct = 100,
178 .max_sysfs_pct = 100,
179 .min_policy_pct = 0,
180 .min_sysfs_pct = 0,
181 };
182
183 static struct perf_limits powersave_limits = {
184 .no_turbo = 0,
185 .turbo_disabled = 0,
186 .max_perf_pct = 100,
187 .max_perf = int_tofp(1),
188 .min_perf_pct = 0,
189 .min_perf = 0,
190 .max_policy_pct = 100,
191 .max_sysfs_pct = 100,
192 .min_policy_pct = 0,
193 .min_sysfs_pct = 0,
194 .max_perf_ctl = 0,
195 .min_perf_ctl = 0,
196 };
197
198 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE
199 static struct perf_limits *limits = &performance_limits;
200 #else
201 static struct perf_limits *limits = &powersave_limits;
202 #endif
203
204 #if IS_ENABLED(CONFIG_ACPI)
205 /*
206 * The max target pstate ratio is a 8 bit value in both PLATFORM_INFO MSR and
207 * in TURBO_RATIO_LIMIT MSR, which pstate driver stores in max_pstate and
208 * max_turbo_pstate fields. The PERF_CTL MSR contains 16 bit value for P state
209 * ratio, out of it only high 8 bits are used. For example 0x1700 is setting
210 * target ratio 0x17. The _PSS control value stores in a format which can be
211 * directly written to PERF_CTL MSR. But in intel_pstate driver this shift
212 * occurs during write to PERF_CTL (E.g. for cores core_set_pstate()).
213 * This function converts the _PSS control value to intel pstate driver format
214 * for comparison and assignment.
215 */
216 static int convert_to_native_pstate_format(struct cpudata *cpu, int index)
217 {
218 return cpu->acpi_perf_data.states[index].control >> 8;
219 }
220
221 static int intel_pstate_init_perf_limits(struct cpufreq_policy *policy)
222 {
223 struct cpudata *cpu;
224 int ret;
225 bool turbo_absent = false;
226 int max_pstate_index;
227 int min_pss_ctl, max_pss_ctl, turbo_pss_ctl;
228 int i;
229
230 cpu = all_cpu_data[policy->cpu];
231
232 pr_debug("intel_pstate: default limits 0x%x 0x%x 0x%x\n",
233 cpu->pstate.min_pstate, cpu->pstate.max_pstate,
234 cpu->pstate.turbo_pstate);
235
236 if (!cpu->acpi_perf_data.shared_cpu_map &&
237 zalloc_cpumask_var_node(&cpu->acpi_perf_data.shared_cpu_map,
238 GFP_KERNEL, cpu_to_node(policy->cpu))) {
239 return -ENOMEM;
240 }
241
242 ret = acpi_processor_register_performance(&cpu->acpi_perf_data,
243 policy->cpu);
244 if (ret)
245 return ret;
246
247 /*
248 * Check if the control value in _PSS is for PERF_CTL MSR, which should
249 * guarantee that the states returned by it map to the states in our
250 * list directly.
251 */
252 if (cpu->acpi_perf_data.control_register.space_id !=
253 ACPI_ADR_SPACE_FIXED_HARDWARE)
254 return -EIO;
255
256 pr_debug("intel_pstate: CPU%u - ACPI _PSS perf data\n", policy->cpu);
257 for (i = 0; i < cpu->acpi_perf_data.state_count; i++)
258 pr_debug(" %cP%d: %u MHz, %u mW, 0x%x\n",
259 (i == cpu->acpi_perf_data.state ? '*' : ' '), i,
260 (u32) cpu->acpi_perf_data.states[i].core_frequency,
261 (u32) cpu->acpi_perf_data.states[i].power,
262 (u32) cpu->acpi_perf_data.states[i].control);
263
264 /*
265 * If there is only one entry _PSS, simply ignore _PSS and continue as
266 * usual without taking _PSS into account
267 */
268 if (cpu->acpi_perf_data.state_count < 2)
269 return 0;
270
271 turbo_pss_ctl = convert_to_native_pstate_format(cpu, 0);
272 min_pss_ctl = convert_to_native_pstate_format(cpu,
273 cpu->acpi_perf_data.state_count - 1);
274 /* Check if there is a turbo freq in _PSS */
275 if (turbo_pss_ctl <= cpu->pstate.max_pstate &&
276 turbo_pss_ctl > cpu->pstate.min_pstate) {
277 pr_debug("intel_pstate: no turbo range exists in _PSS\n");
278 limits->no_turbo = limits->turbo_disabled = 1;
279 cpu->pstate.turbo_pstate = cpu->pstate.max_pstate;
280 turbo_absent = true;
281 }
282
283 /* Check if the max non turbo p state < Intel P state max */
284 max_pstate_index = turbo_absent ? 0 : 1;
285 max_pss_ctl = convert_to_native_pstate_format(cpu, max_pstate_index);
286 if (max_pss_ctl < cpu->pstate.max_pstate &&
287 max_pss_ctl > cpu->pstate.min_pstate)
288 cpu->pstate.max_pstate = max_pss_ctl;
289
290 /* check If min perf > Intel P State min */
291 if (min_pss_ctl > cpu->pstate.min_pstate &&
292 min_pss_ctl < cpu->pstate.max_pstate) {
293 cpu->pstate.min_pstate = min_pss_ctl;
294 policy->cpuinfo.min_freq = min_pss_ctl * cpu->pstate.scaling;
295 }
296
297 if (turbo_absent)
298 policy->cpuinfo.max_freq = cpu->pstate.max_pstate *
299 cpu->pstate.scaling;
300 else {
301 policy->cpuinfo.max_freq = cpu->pstate.turbo_pstate *
302 cpu->pstate.scaling;
303 /*
304 * The _PSS table doesn't contain whole turbo frequency range.
305 * This just contains +1 MHZ above the max non turbo frequency,
306 * with control value corresponding to max turbo ratio. But
307 * when cpufreq set policy is called, it will call with this
308 * max frequency, which will cause a reduced performance as
309 * this driver uses real max turbo frequency as the max
310 * frequeny. So correct this frequency in _PSS table to
311 * correct max turbo frequency based on the turbo ratio.
312 * Also need to convert to MHz as _PSS freq is in MHz.
313 */
314 cpu->acpi_perf_data.states[0].core_frequency =
315 turbo_pss_ctl * 100;
316 }
317
318 pr_debug("intel_pstate: Updated limits using _PSS 0x%x 0x%x 0x%x\n",
319 cpu->pstate.min_pstate, cpu->pstate.max_pstate,
320 cpu->pstate.turbo_pstate);
321 pr_debug("intel_pstate: policy max_freq=%d Khz min_freq = %d KHz\n",
322 policy->cpuinfo.max_freq, policy->cpuinfo.min_freq);
323
324 return 0;
325 }
326
327 static int intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
328 {
329 struct cpudata *cpu;
330
331 if (!no_acpi_perf)
332 return 0;
333
334 cpu = all_cpu_data[policy->cpu];
335 acpi_processor_unregister_performance(policy->cpu);
336 return 0;
337 }
338
339 #else
340 static int intel_pstate_init_perf_limits(struct cpufreq_policy *policy)
341 {
342 return 0;
343 }
344
345 static int intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
346 {
347 return 0;
348 }
349 #endif
350
351 static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
352 int deadband, int integral) {
353 pid->setpoint = setpoint;
354 pid->deadband = deadband;
355 pid->integral = int_tofp(integral);
356 pid->last_err = int_tofp(setpoint) - int_tofp(busy);
357 }
358
359 static inline void pid_p_gain_set(struct _pid *pid, int percent)
360 {
361 pid->p_gain = div_fp(int_tofp(percent), int_tofp(100));
362 }
363
364 static inline void pid_i_gain_set(struct _pid *pid, int percent)
365 {
366 pid->i_gain = div_fp(int_tofp(percent), int_tofp(100));
367 }
368
369 static inline void pid_d_gain_set(struct _pid *pid, int percent)
370 {
371 pid->d_gain = div_fp(int_tofp(percent), int_tofp(100));
372 }
373
374 static signed int pid_calc(struct _pid *pid, int32_t busy)
375 {
376 signed int result;
377 int32_t pterm, dterm, fp_error;
378 int32_t integral_limit;
379
380 fp_error = int_tofp(pid->setpoint) - busy;
381
382 if (abs(fp_error) <= int_tofp(pid->deadband))
383 return 0;
384
385 pterm = mul_fp(pid->p_gain, fp_error);
386
387 pid->integral += fp_error;
388
389 /*
390 * We limit the integral here so that it will never
391 * get higher than 30. This prevents it from becoming
392 * too large an input over long periods of time and allows
393 * it to get factored out sooner.
394 *
395 * The value of 30 was chosen through experimentation.
396 */
397 integral_limit = int_tofp(30);
398 if (pid->integral > integral_limit)
399 pid->integral = integral_limit;
400 if (pid->integral < -integral_limit)
401 pid->integral = -integral_limit;
402
403 dterm = mul_fp(pid->d_gain, fp_error - pid->last_err);
404 pid->last_err = fp_error;
405
406 result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm;
407 result = result + (1 << (FRAC_BITS-1));
408 return (signed int)fp_toint(result);
409 }
410
411 static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu)
412 {
413 pid_p_gain_set(&cpu->pid, pid_params.p_gain_pct);
414 pid_d_gain_set(&cpu->pid, pid_params.d_gain_pct);
415 pid_i_gain_set(&cpu->pid, pid_params.i_gain_pct);
416
417 pid_reset(&cpu->pid, pid_params.setpoint, 100, pid_params.deadband, 0);
418 }
419
420 static inline void intel_pstate_reset_all_pid(void)
421 {
422 unsigned int cpu;
423
424 for_each_online_cpu(cpu) {
425 if (all_cpu_data[cpu])
426 intel_pstate_busy_pid_reset(all_cpu_data[cpu]);
427 }
428 }
429
430 static inline void update_turbo_state(void)
431 {
432 u64 misc_en;
433 struct cpudata *cpu;
434
435 cpu = all_cpu_data[0];
436 rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
437 limits->turbo_disabled =
438 (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE ||
439 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
440 }
441
442 static void intel_pstate_hwp_set(void)
443 {
444 int min, hw_min, max, hw_max, cpu, range, adj_range;
445 u64 value, cap;
446
447 rdmsrl(MSR_HWP_CAPABILITIES, cap);
448 hw_min = HWP_LOWEST_PERF(cap);
449 hw_max = HWP_HIGHEST_PERF(cap);
450 range = hw_max - hw_min;
451
452 get_online_cpus();
453
454 for_each_online_cpu(cpu) {
455 rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
456 adj_range = limits->min_perf_pct * range / 100;
457 min = hw_min + adj_range;
458 value &= ~HWP_MIN_PERF(~0L);
459 value |= HWP_MIN_PERF(min);
460
461 adj_range = limits->max_perf_pct * range / 100;
462 max = hw_min + adj_range;
463 if (limits->no_turbo) {
464 hw_max = HWP_GUARANTEED_PERF(cap);
465 if (hw_max < max)
466 max = hw_max;
467 }
468
469 value &= ~HWP_MAX_PERF(~0L);
470 value |= HWP_MAX_PERF(max);
471 wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
472 }
473
474 put_online_cpus();
475 }
476
477 /************************** debugfs begin ************************/
478 static int pid_param_set(void *data, u64 val)
479 {
480 *(u32 *)data = val;
481 intel_pstate_reset_all_pid();
482 return 0;
483 }
484
485 static int pid_param_get(void *data, u64 *val)
486 {
487 *val = *(u32 *)data;
488 return 0;
489 }
490 DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param, pid_param_get, pid_param_set, "%llu\n");
491
492 struct pid_param {
493 char *name;
494 void *value;
495 };
496
497 static struct pid_param pid_files[] = {
498 {"sample_rate_ms", &pid_params.sample_rate_ms},
499 {"d_gain_pct", &pid_params.d_gain_pct},
500 {"i_gain_pct", &pid_params.i_gain_pct},
501 {"deadband", &pid_params.deadband},
502 {"setpoint", &pid_params.setpoint},
503 {"p_gain_pct", &pid_params.p_gain_pct},
504 {NULL, NULL}
505 };
506
507 static void __init intel_pstate_debug_expose_params(void)
508 {
509 struct dentry *debugfs_parent;
510 int i = 0;
511
512 if (hwp_active)
513 return;
514 debugfs_parent = debugfs_create_dir("pstate_snb", NULL);
515 if (IS_ERR_OR_NULL(debugfs_parent))
516 return;
517 while (pid_files[i].name) {
518 debugfs_create_file(pid_files[i].name, 0660,
519 debugfs_parent, pid_files[i].value,
520 &fops_pid_param);
521 i++;
522 }
523 }
524
525 /************************** debugfs end ************************/
526
527 /************************** sysfs begin ************************/
528 #define show_one(file_name, object) \
529 static ssize_t show_##file_name \
530 (struct kobject *kobj, struct attribute *attr, char *buf) \
531 { \
532 return sprintf(buf, "%u\n", limits->object); \
533 }
534
535 static ssize_t show_turbo_pct(struct kobject *kobj,
536 struct attribute *attr, char *buf)
537 {
538 struct cpudata *cpu;
539 int total, no_turbo, turbo_pct;
540 uint32_t turbo_fp;
541
542 cpu = all_cpu_data[0];
543
544 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
545 no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1;
546 turbo_fp = div_fp(int_tofp(no_turbo), int_tofp(total));
547 turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100)));
548 return sprintf(buf, "%u\n", turbo_pct);
549 }
550
551 static ssize_t show_num_pstates(struct kobject *kobj,
552 struct attribute *attr, char *buf)
553 {
554 struct cpudata *cpu;
555 int total;
556
557 cpu = all_cpu_data[0];
558 total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
559 return sprintf(buf, "%u\n", total);
560 }
561
562 static ssize_t show_no_turbo(struct kobject *kobj,
563 struct attribute *attr, char *buf)
564 {
565 ssize_t ret;
566
567 update_turbo_state();
568 if (limits->turbo_disabled)
569 ret = sprintf(buf, "%u\n", limits->turbo_disabled);
570 else
571 ret = sprintf(buf, "%u\n", limits->no_turbo);
572
573 return ret;
574 }
575
576 static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
577 const char *buf, size_t count)
578 {
579 unsigned int input;
580 int ret;
581
582 ret = sscanf(buf, "%u", &input);
583 if (ret != 1)
584 return -EINVAL;
585
586 update_turbo_state();
587 if (limits->turbo_disabled) {
588 pr_warn("intel_pstate: Turbo disabled by BIOS or unavailable on processor\n");
589 return -EPERM;
590 }
591
592 limits->no_turbo = clamp_t(int, input, 0, 1);
593
594 if (hwp_active)
595 intel_pstate_hwp_set();
596
597 return count;
598 }
599
600 static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
601 const char *buf, size_t count)
602 {
603 unsigned int input;
604 int ret;
605
606 ret = sscanf(buf, "%u", &input);
607 if (ret != 1)
608 return -EINVAL;
609
610 limits->max_sysfs_pct = clamp_t(int, input, 0 , 100);
611 limits->max_perf_pct = min(limits->max_policy_pct,
612 limits->max_sysfs_pct);
613 limits->max_perf_pct = max(limits->min_policy_pct,
614 limits->max_perf_pct);
615 limits->max_perf_pct = max(limits->min_perf_pct,
616 limits->max_perf_pct);
617 limits->max_perf = div_fp(int_tofp(limits->max_perf_pct),
618 int_tofp(100));
619
620 if (hwp_active)
621 intel_pstate_hwp_set();
622 return count;
623 }
624
625 static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
626 const char *buf, size_t count)
627 {
628 unsigned int input;
629 int ret;
630
631 ret = sscanf(buf, "%u", &input);
632 if (ret != 1)
633 return -EINVAL;
634
635 limits->min_sysfs_pct = clamp_t(int, input, 0 , 100);
636 limits->min_perf_pct = max(limits->min_policy_pct,
637 limits->min_sysfs_pct);
638 limits->min_perf_pct = min(limits->max_policy_pct,
639 limits->min_perf_pct);
640 limits->min_perf_pct = min(limits->max_perf_pct,
641 limits->min_perf_pct);
642 limits->min_perf = div_fp(int_tofp(limits->min_perf_pct),
643 int_tofp(100));
644
645 if (hwp_active)
646 intel_pstate_hwp_set();
647 return count;
648 }
649
650 show_one(max_perf_pct, max_perf_pct);
651 show_one(min_perf_pct, min_perf_pct);
652
653 define_one_global_rw(no_turbo);
654 define_one_global_rw(max_perf_pct);
655 define_one_global_rw(min_perf_pct);
656 define_one_global_ro(turbo_pct);
657 define_one_global_ro(num_pstates);
658
659 static struct attribute *intel_pstate_attributes[] = {
660 &no_turbo.attr,
661 &max_perf_pct.attr,
662 &min_perf_pct.attr,
663 &turbo_pct.attr,
664 &num_pstates.attr,
665 NULL
666 };
667
668 static struct attribute_group intel_pstate_attr_group = {
669 .attrs = intel_pstate_attributes,
670 };
671
672 static void __init intel_pstate_sysfs_expose_params(void)
673 {
674 struct kobject *intel_pstate_kobject;
675 int rc;
676
677 intel_pstate_kobject = kobject_create_and_add("intel_pstate",
678 &cpu_subsys.dev_root->kobj);
679 BUG_ON(!intel_pstate_kobject);
680 rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group);
681 BUG_ON(rc);
682 }
683 /************************** sysfs end ************************/
684
685 static void intel_pstate_hwp_enable(struct cpudata *cpudata)
686 {
687 pr_info("intel_pstate: HWP enabled\n");
688
689 wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
690 }
691
692 static int byt_get_min_pstate(void)
693 {
694 u64 value;
695
696 rdmsrl(BYT_RATIOS, value);
697 return (value >> 8) & 0x7F;
698 }
699
700 static int byt_get_max_pstate(void)
701 {
702 u64 value;
703
704 rdmsrl(BYT_RATIOS, value);
705 return (value >> 16) & 0x7F;
706 }
707
708 static int byt_get_turbo_pstate(void)
709 {
710 u64 value;
711
712 rdmsrl(BYT_TURBO_RATIOS, value);
713 return value & 0x7F;
714 }
715
716 static void byt_set_pstate(struct cpudata *cpudata, int pstate)
717 {
718 u64 val;
719 int32_t vid_fp;
720 u32 vid;
721
722 val = (u64)pstate << 8;
723 if (limits->no_turbo && !limits->turbo_disabled)
724 val |= (u64)1 << 32;
725
726 vid_fp = cpudata->vid.min + mul_fp(
727 int_tofp(pstate - cpudata->pstate.min_pstate),
728 cpudata->vid.ratio);
729
730 vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max);
731 vid = ceiling_fp(vid_fp);
732
733 if (pstate > cpudata->pstate.max_pstate)
734 vid = cpudata->vid.turbo;
735
736 val |= vid;
737
738 wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val);
739 }
740
741 #define BYT_BCLK_FREQS 5
742 static int byt_freq_table[BYT_BCLK_FREQS] = { 833, 1000, 1333, 1167, 800};
743
744 static int byt_get_scaling(void)
745 {
746 u64 value;
747 int i;
748
749 rdmsrl(MSR_FSB_FREQ, value);
750 i = value & 0x3;
751
752 BUG_ON(i > BYT_BCLK_FREQS);
753
754 return byt_freq_table[i] * 100;
755 }
756
757 static void byt_get_vid(struct cpudata *cpudata)
758 {
759 u64 value;
760
761 rdmsrl(BYT_VIDS, value);
762 cpudata->vid.min = int_tofp((value >> 8) & 0x7f);
763 cpudata->vid.max = int_tofp((value >> 16) & 0x7f);
764 cpudata->vid.ratio = div_fp(
765 cpudata->vid.max - cpudata->vid.min,
766 int_tofp(cpudata->pstate.max_pstate -
767 cpudata->pstate.min_pstate));
768
769 rdmsrl(BYT_TURBO_VIDS, value);
770 cpudata->vid.turbo = value & 0x7f;
771 }
772
773 static int core_get_min_pstate(void)
774 {
775 u64 value;
776
777 rdmsrl(MSR_PLATFORM_INFO, value);
778 return (value >> 40) & 0xFF;
779 }
780
781 static int core_get_max_pstate_physical(void)
782 {
783 u64 value;
784
785 rdmsrl(MSR_PLATFORM_INFO, value);
786 return (value >> 8) & 0xFF;
787 }
788
789 static int core_get_max_pstate(void)
790 {
791 u64 tar;
792 u64 plat_info;
793 int max_pstate;
794 int err;
795
796 rdmsrl(MSR_PLATFORM_INFO, plat_info);
797 max_pstate = (plat_info >> 8) & 0xFF;
798
799 err = rdmsrl_safe(MSR_TURBO_ACTIVATION_RATIO, &tar);
800 if (!err) {
801 /* Do some sanity checking for safety */
802 if (plat_info & 0x600000000) {
803 u64 tdp_ctrl;
804 u64 tdp_ratio;
805 int tdp_msr;
806
807 err = rdmsrl_safe(MSR_CONFIG_TDP_CONTROL, &tdp_ctrl);
808 if (err)
809 goto skip_tar;
810
811 tdp_msr = MSR_CONFIG_TDP_NOMINAL + tdp_ctrl;
812 err = rdmsrl_safe(tdp_msr, &tdp_ratio);
813 if (err)
814 goto skip_tar;
815
816 if (tdp_ratio - 1 == tar) {
817 max_pstate = tar;
818 pr_debug("max_pstate=TAC %x\n", max_pstate);
819 } else {
820 goto skip_tar;
821 }
822 }
823 }
824
825 skip_tar:
826 return max_pstate;
827 }
828
829 static int core_get_turbo_pstate(void)
830 {
831 u64 value;
832 int nont, ret;
833
834 rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value);
835 nont = core_get_max_pstate();
836 ret = (value) & 255;
837 if (ret <= nont)
838 ret = nont;
839 return ret;
840 }
841
842 static inline int core_get_scaling(void)
843 {
844 return 100000;
845 }
846
847 static void core_set_pstate(struct cpudata *cpudata, int pstate)
848 {
849 u64 val;
850
851 val = (u64)pstate << 8;
852 if (limits->no_turbo && !limits->turbo_disabled)
853 val |= (u64)1 << 32;
854
855 wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val);
856 }
857
858 static int knl_get_turbo_pstate(void)
859 {
860 u64 value;
861 int nont, ret;
862
863 rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value);
864 nont = core_get_max_pstate();
865 ret = (((value) >> 8) & 0xFF);
866 if (ret <= nont)
867 ret = nont;
868 return ret;
869 }
870
871 static struct cpu_defaults core_params = {
872 .pid_policy = {
873 .sample_rate_ms = 10,
874 .deadband = 0,
875 .setpoint = 97,
876 .p_gain_pct = 20,
877 .d_gain_pct = 0,
878 .i_gain_pct = 0,
879 },
880 .funcs = {
881 .get_max = core_get_max_pstate,
882 .get_max_physical = core_get_max_pstate_physical,
883 .get_min = core_get_min_pstate,
884 .get_turbo = core_get_turbo_pstate,
885 .get_scaling = core_get_scaling,
886 .set = core_set_pstate,
887 },
888 };
889
890 static struct cpu_defaults byt_params = {
891 .pid_policy = {
892 .sample_rate_ms = 10,
893 .deadband = 0,
894 .setpoint = 60,
895 .p_gain_pct = 14,
896 .d_gain_pct = 0,
897 .i_gain_pct = 4,
898 },
899 .funcs = {
900 .get_max = byt_get_max_pstate,
901 .get_max_physical = byt_get_max_pstate,
902 .get_min = byt_get_min_pstate,
903 .get_turbo = byt_get_turbo_pstate,
904 .set = byt_set_pstate,
905 .get_scaling = byt_get_scaling,
906 .get_vid = byt_get_vid,
907 },
908 };
909
910 static struct cpu_defaults knl_params = {
911 .pid_policy = {
912 .sample_rate_ms = 10,
913 .deadband = 0,
914 .setpoint = 97,
915 .p_gain_pct = 20,
916 .d_gain_pct = 0,
917 .i_gain_pct = 0,
918 },
919 .funcs = {
920 .get_max = core_get_max_pstate,
921 .get_max_physical = core_get_max_pstate_physical,
922 .get_min = core_get_min_pstate,
923 .get_turbo = knl_get_turbo_pstate,
924 .get_scaling = core_get_scaling,
925 .set = core_set_pstate,
926 },
927 };
928
929 static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
930 {
931 int max_perf = cpu->pstate.turbo_pstate;
932 int max_perf_adj;
933 int min_perf;
934
935 if (limits->no_turbo || limits->turbo_disabled)
936 max_perf = cpu->pstate.max_pstate;
937
938 /*
939 * performance can be limited by user through sysfs, by cpufreq
940 * policy, or by cpu specific default values determined through
941 * experimentation.
942 */
943 if (limits->max_perf_ctl && limits->max_sysfs_pct >=
944 limits->max_policy_pct) {
945 *max = limits->max_perf_ctl;
946 } else {
947 max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf),
948 limits->max_perf));
949 *max = clamp_t(int, max_perf_adj, cpu->pstate.min_pstate,
950 cpu->pstate.turbo_pstate);
951 }
952
953 if (limits->min_perf_ctl) {
954 *min = limits->min_perf_ctl;
955 } else {
956 min_perf = fp_toint(mul_fp(int_tofp(max_perf),
957 limits->min_perf));
958 *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf);
959 }
960 }
961
962 static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate, bool force)
963 {
964 int max_perf, min_perf;
965
966 if (force) {
967 update_turbo_state();
968
969 intel_pstate_get_min_max(cpu, &min_perf, &max_perf);
970
971 pstate = clamp_t(int, pstate, min_perf, max_perf);
972
973 if (pstate == cpu->pstate.current_pstate)
974 return;
975 }
976 trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
977
978 cpu->pstate.current_pstate = pstate;
979
980 pstate_funcs.set(cpu, pstate);
981 }
982
983 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
984 {
985 cpu->pstate.min_pstate = pstate_funcs.get_min();
986 cpu->pstate.max_pstate = pstate_funcs.get_max();
987 cpu->pstate.max_pstate_physical = pstate_funcs.get_max_physical();
988 cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
989 cpu->pstate.scaling = pstate_funcs.get_scaling();
990
991 if (pstate_funcs.get_vid)
992 pstate_funcs.get_vid(cpu);
993 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate, false);
994 }
995
996 static inline void intel_pstate_calc_busy(struct cpudata *cpu)
997 {
998 struct sample *sample = &cpu->sample;
999 int64_t core_pct;
1000
1001 core_pct = int_tofp(sample->aperf) * int_tofp(100);
1002 core_pct = div64_u64(core_pct, int_tofp(sample->mperf));
1003
1004 sample->freq = fp_toint(
1005 mul_fp(int_tofp(
1006 cpu->pstate.max_pstate_physical *
1007 cpu->pstate.scaling / 100),
1008 core_pct));
1009
1010 sample->core_pct_busy = (int32_t)core_pct;
1011 }
1012
1013 static inline void intel_pstate_sample(struct cpudata *cpu)
1014 {
1015 u64 aperf, mperf;
1016 unsigned long flags;
1017 u64 tsc;
1018
1019 local_irq_save(flags);
1020 rdmsrl(MSR_IA32_APERF, aperf);
1021 rdmsrl(MSR_IA32_MPERF, mperf);
1022 if (cpu->prev_mperf == mperf) {
1023 local_irq_restore(flags);
1024 return;
1025 }
1026
1027 tsc = rdtsc();
1028 local_irq_restore(flags);
1029
1030 cpu->last_sample_time = cpu->sample.time;
1031 cpu->sample.time = ktime_get();
1032 cpu->sample.aperf = aperf;
1033 cpu->sample.mperf = mperf;
1034 cpu->sample.tsc = tsc;
1035 cpu->sample.aperf -= cpu->prev_aperf;
1036 cpu->sample.mperf -= cpu->prev_mperf;
1037 cpu->sample.tsc -= cpu->prev_tsc;
1038
1039 intel_pstate_calc_busy(cpu);
1040
1041 cpu->prev_aperf = aperf;
1042 cpu->prev_mperf = mperf;
1043 cpu->prev_tsc = tsc;
1044 }
1045
1046 static inline void intel_hwp_set_sample_time(struct cpudata *cpu)
1047 {
1048 int delay;
1049
1050 delay = msecs_to_jiffies(50);
1051 mod_timer_pinned(&cpu->timer, jiffies + delay);
1052 }
1053
1054 static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
1055 {
1056 int delay;
1057
1058 delay = msecs_to_jiffies(pid_params.sample_rate_ms);
1059 mod_timer_pinned(&cpu->timer, jiffies + delay);
1060 }
1061
1062 static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu)
1063 {
1064 int32_t core_busy, max_pstate, current_pstate, sample_ratio;
1065 s64 duration_us;
1066 u32 sample_time;
1067
1068 /*
1069 * core_busy is the ratio of actual performance to max
1070 * max_pstate is the max non turbo pstate available
1071 * current_pstate was the pstate that was requested during
1072 * the last sample period.
1073 *
1074 * We normalize core_busy, which was our actual percent
1075 * performance to what we requested during the last sample
1076 * period. The result will be a percentage of busy at a
1077 * specified pstate.
1078 */
1079 core_busy = cpu->sample.core_pct_busy;
1080 max_pstate = int_tofp(cpu->pstate.max_pstate_physical);
1081 current_pstate = int_tofp(cpu->pstate.current_pstate);
1082 core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate));
1083
1084 /*
1085 * Since we have a deferred timer, it will not fire unless
1086 * we are in C0. So, determine if the actual elapsed time
1087 * is significantly greater (3x) than our sample interval. If it
1088 * is, then we were idle for a long enough period of time
1089 * to adjust our busyness.
1090 */
1091 sample_time = pid_params.sample_rate_ms * USEC_PER_MSEC;
1092 duration_us = ktime_us_delta(cpu->sample.time,
1093 cpu->last_sample_time);
1094 if (duration_us > sample_time * 3) {
1095 sample_ratio = div_fp(int_tofp(sample_time),
1096 int_tofp(duration_us));
1097 core_busy = mul_fp(core_busy, sample_ratio);
1098 }
1099
1100 return core_busy;
1101 }
1102
1103 static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
1104 {
1105 int32_t busy_scaled;
1106 struct _pid *pid;
1107 signed int ctl;
1108 int from;
1109 struct sample *sample;
1110
1111 from = cpu->pstate.current_pstate;
1112
1113 pid = &cpu->pid;
1114 busy_scaled = intel_pstate_get_scaled_busy(cpu);
1115
1116 ctl = pid_calc(pid, busy_scaled);
1117
1118 /* Negative values of ctl increase the pstate and vice versa */
1119 intel_pstate_set_pstate(cpu, cpu->pstate.current_pstate - ctl, true);
1120
1121 sample = &cpu->sample;
1122 trace_pstate_sample(fp_toint(sample->core_pct_busy),
1123 fp_toint(busy_scaled),
1124 from,
1125 cpu->pstate.current_pstate,
1126 sample->mperf,
1127 sample->aperf,
1128 sample->tsc,
1129 sample->freq);
1130 }
1131
1132 static void intel_hwp_timer_func(unsigned long __data)
1133 {
1134 struct cpudata *cpu = (struct cpudata *) __data;
1135
1136 intel_pstate_sample(cpu);
1137 intel_hwp_set_sample_time(cpu);
1138 }
1139
1140 static void intel_pstate_timer_func(unsigned long __data)
1141 {
1142 struct cpudata *cpu = (struct cpudata *) __data;
1143
1144 intel_pstate_sample(cpu);
1145
1146 intel_pstate_adjust_busy_pstate(cpu);
1147
1148 intel_pstate_set_sample_time(cpu);
1149 }
1150
1151 #define ICPU(model, policy) \
1152 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\
1153 (unsigned long)&policy }
1154
1155 static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
1156 ICPU(0x2a, core_params),
1157 ICPU(0x2d, core_params),
1158 ICPU(0x37, byt_params),
1159 ICPU(0x3a, core_params),
1160 ICPU(0x3c, core_params),
1161 ICPU(0x3d, core_params),
1162 ICPU(0x3e, core_params),
1163 ICPU(0x3f, core_params),
1164 ICPU(0x45, core_params),
1165 ICPU(0x46, core_params),
1166 ICPU(0x47, core_params),
1167 ICPU(0x4c, byt_params),
1168 ICPU(0x4e, core_params),
1169 ICPU(0x4f, core_params),
1170 ICPU(0x5e, core_params),
1171 ICPU(0x56, core_params),
1172 ICPU(0x57, knl_params),
1173 {}
1174 };
1175 MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
1176
1177 static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] = {
1178 ICPU(0x56, core_params),
1179 {}
1180 };
1181
1182 static int intel_pstate_init_cpu(unsigned int cpunum)
1183 {
1184 struct cpudata *cpu;
1185
1186 if (!all_cpu_data[cpunum])
1187 all_cpu_data[cpunum] = kzalloc(sizeof(struct cpudata),
1188 GFP_KERNEL);
1189 if (!all_cpu_data[cpunum])
1190 return -ENOMEM;
1191
1192 cpu = all_cpu_data[cpunum];
1193
1194 cpu->cpu = cpunum;
1195
1196 if (hwp_active)
1197 intel_pstate_hwp_enable(cpu);
1198
1199 intel_pstate_get_cpu_pstates(cpu);
1200
1201 init_timer_deferrable(&cpu->timer);
1202 cpu->timer.data = (unsigned long)cpu;
1203 cpu->timer.expires = jiffies + HZ/100;
1204
1205 if (!hwp_active)
1206 cpu->timer.function = intel_pstate_timer_func;
1207 else
1208 cpu->timer.function = intel_hwp_timer_func;
1209
1210 intel_pstate_busy_pid_reset(cpu);
1211 intel_pstate_sample(cpu);
1212
1213 add_timer_on(&cpu->timer, cpunum);
1214
1215 pr_debug("intel_pstate: controlling: cpu %d\n", cpunum);
1216
1217 return 0;
1218 }
1219
1220 static unsigned int intel_pstate_get(unsigned int cpu_num)
1221 {
1222 struct sample *sample;
1223 struct cpudata *cpu;
1224
1225 cpu = all_cpu_data[cpu_num];
1226 if (!cpu)
1227 return 0;
1228 sample = &cpu->sample;
1229 return sample->freq;
1230 }
1231
1232 static int intel_pstate_set_policy(struct cpufreq_policy *policy)
1233 {
1234 #if IS_ENABLED(CONFIG_ACPI)
1235 struct cpudata *cpu;
1236 int i;
1237 #endif
1238 pr_debug("intel_pstate: %s max %u policy->max %u\n", __func__,
1239 policy->cpuinfo.max_freq, policy->max);
1240 if (!policy->cpuinfo.max_freq)
1241 return -ENODEV;
1242
1243 if (policy->policy == CPUFREQ_POLICY_PERFORMANCE &&
1244 policy->max >= policy->cpuinfo.max_freq) {
1245 pr_debug("intel_pstate: set performance\n");
1246 limits = &performance_limits;
1247 return 0;
1248 }
1249
1250 pr_debug("intel_pstate: set powersave\n");
1251 limits = &powersave_limits;
1252 limits->min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
1253 limits->min_policy_pct = clamp_t(int, limits->min_policy_pct, 0 , 100);
1254 limits->max_policy_pct = (policy->max * 100) / policy->cpuinfo.max_freq;
1255 limits->max_policy_pct = clamp_t(int, limits->max_policy_pct, 0 , 100);
1256
1257 /* Normalize user input to [min_policy_pct, max_policy_pct] */
1258 limits->min_perf_pct = max(limits->min_policy_pct,
1259 limits->min_sysfs_pct);
1260 limits->min_perf_pct = min(limits->max_policy_pct,
1261 limits->min_perf_pct);
1262 limits->max_perf_pct = min(limits->max_policy_pct,
1263 limits->max_sysfs_pct);
1264 limits->max_perf_pct = max(limits->min_policy_pct,
1265 limits->max_perf_pct);
1266
1267 /* Make sure min_perf_pct <= max_perf_pct */
1268 limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct);
1269
1270 limits->min_perf = div_fp(int_tofp(limits->min_perf_pct),
1271 int_tofp(100));
1272 limits->max_perf = div_fp(int_tofp(limits->max_perf_pct),
1273 int_tofp(100));
1274
1275 #if IS_ENABLED(CONFIG_ACPI)
1276 cpu = all_cpu_data[policy->cpu];
1277 for (i = 0; i < cpu->acpi_perf_data.state_count; i++) {
1278 int control;
1279
1280 control = convert_to_native_pstate_format(cpu, i);
1281 if (control * cpu->pstate.scaling == policy->max)
1282 limits->max_perf_ctl = control;
1283 if (control * cpu->pstate.scaling == policy->min)
1284 limits->min_perf_ctl = control;
1285 }
1286
1287 pr_debug("intel_pstate: max %u policy_max %u perf_ctl [0x%x-0x%x]\n",
1288 policy->cpuinfo.max_freq, policy->max, limits->min_perf_ctl,
1289 limits->max_perf_ctl);
1290 #endif
1291
1292 if (hwp_active)
1293 intel_pstate_hwp_set();
1294
1295 return 0;
1296 }
1297
1298 static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
1299 {
1300 cpufreq_verify_within_cpu_limits(policy);
1301
1302 if (policy->policy != CPUFREQ_POLICY_POWERSAVE &&
1303 policy->policy != CPUFREQ_POLICY_PERFORMANCE)
1304 return -EINVAL;
1305
1306 return 0;
1307 }
1308
1309 static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)
1310 {
1311 int cpu_num = policy->cpu;
1312 struct cpudata *cpu = all_cpu_data[cpu_num];
1313
1314 pr_debug("intel_pstate: CPU %d exiting\n", cpu_num);
1315
1316 del_timer_sync(&all_cpu_data[cpu_num]->timer);
1317 if (hwp_active)
1318 return;
1319
1320 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate, false);
1321 }
1322
1323 static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
1324 {
1325 struct cpudata *cpu;
1326 int rc;
1327
1328 rc = intel_pstate_init_cpu(policy->cpu);
1329 if (rc)
1330 return rc;
1331
1332 cpu = all_cpu_data[policy->cpu];
1333
1334 if (limits->min_perf_pct == 100 && limits->max_perf_pct == 100)
1335 policy->policy = CPUFREQ_POLICY_PERFORMANCE;
1336 else
1337 policy->policy = CPUFREQ_POLICY_POWERSAVE;
1338
1339 policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling;
1340 policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
1341
1342 /* cpuinfo and default policy values */
1343 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
1344 policy->cpuinfo.max_freq =
1345 cpu->pstate.turbo_pstate * cpu->pstate.scaling;
1346 if (!no_acpi_perf)
1347 intel_pstate_init_perf_limits(policy);
1348 /*
1349 * If there is no acpi perf data or error, we ignore and use Intel P
1350 * state calculated limits, So this is not fatal error.
1351 */
1352 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
1353 cpumask_set_cpu(policy->cpu, policy->cpus);
1354
1355 return 0;
1356 }
1357
1358 static int intel_pstate_cpu_exit(struct cpufreq_policy *policy)
1359 {
1360 return intel_pstate_exit_perf_limits(policy);
1361 }
1362
1363 static struct cpufreq_driver intel_pstate_driver = {
1364 .flags = CPUFREQ_CONST_LOOPS,
1365 .verify = intel_pstate_verify_policy,
1366 .setpolicy = intel_pstate_set_policy,
1367 .get = intel_pstate_get,
1368 .init = intel_pstate_cpu_init,
1369 .exit = intel_pstate_cpu_exit,
1370 .stop_cpu = intel_pstate_stop_cpu,
1371 .name = "intel_pstate",
1372 };
1373
1374 static int __initdata no_load;
1375 static int __initdata no_hwp;
1376 static int __initdata hwp_only;
1377 static unsigned int force_load;
1378
1379 static int intel_pstate_msrs_not_valid(void)
1380 {
1381 if (!pstate_funcs.get_max() ||
1382 !pstate_funcs.get_min() ||
1383 !pstate_funcs.get_turbo())
1384 return -ENODEV;
1385
1386 return 0;
1387 }
1388
1389 static void copy_pid_params(struct pstate_adjust_policy *policy)
1390 {
1391 pid_params.sample_rate_ms = policy->sample_rate_ms;
1392 pid_params.p_gain_pct = policy->p_gain_pct;
1393 pid_params.i_gain_pct = policy->i_gain_pct;
1394 pid_params.d_gain_pct = policy->d_gain_pct;
1395 pid_params.deadband = policy->deadband;
1396 pid_params.setpoint = policy->setpoint;
1397 }
1398
1399 static void copy_cpu_funcs(struct pstate_funcs *funcs)
1400 {
1401 pstate_funcs.get_max = funcs->get_max;
1402 pstate_funcs.get_max_physical = funcs->get_max_physical;
1403 pstate_funcs.get_min = funcs->get_min;
1404 pstate_funcs.get_turbo = funcs->get_turbo;
1405 pstate_funcs.get_scaling = funcs->get_scaling;
1406 pstate_funcs.set = funcs->set;
1407 pstate_funcs.get_vid = funcs->get_vid;
1408 }
1409
1410 #if IS_ENABLED(CONFIG_ACPI)
1411
1412 static bool intel_pstate_no_acpi_pss(void)
1413 {
1414 int i;
1415
1416 for_each_possible_cpu(i) {
1417 acpi_status status;
1418 union acpi_object *pss;
1419 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
1420 struct acpi_processor *pr = per_cpu(processors, i);
1421
1422 if (!pr)
1423 continue;
1424
1425 status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer);
1426 if (ACPI_FAILURE(status))
1427 continue;
1428
1429 pss = buffer.pointer;
1430 if (pss && pss->type == ACPI_TYPE_PACKAGE) {
1431 kfree(pss);
1432 return false;
1433 }
1434
1435 kfree(pss);
1436 }
1437
1438 return true;
1439 }
1440
1441 static bool intel_pstate_has_acpi_ppc(void)
1442 {
1443 int i;
1444
1445 for_each_possible_cpu(i) {
1446 struct acpi_processor *pr = per_cpu(processors, i);
1447
1448 if (!pr)
1449 continue;
1450 if (acpi_has_method(pr->handle, "_PPC"))
1451 return true;
1452 }
1453 return false;
1454 }
1455
1456 enum {
1457 PSS,
1458 PPC,
1459 };
1460
1461 struct hw_vendor_info {
1462 u16 valid;
1463 char oem_id[ACPI_OEM_ID_SIZE];
1464 char oem_table_id[ACPI_OEM_TABLE_ID_SIZE];
1465 int oem_pwr_table;
1466 };
1467
1468 /* Hardware vendor-specific info that has its own power management modes */
1469 static struct hw_vendor_info vendor_info[] = {
1470 {1, "HP ", "ProLiant", PSS},
1471 {1, "ORACLE", "X4-2 ", PPC},
1472 {1, "ORACLE", "X4-2L ", PPC},
1473 {1, "ORACLE", "X4-2B ", PPC},
1474 {1, "ORACLE", "X3-2 ", PPC},
1475 {1, "ORACLE", "X3-2L ", PPC},
1476 {1, "ORACLE", "X3-2B ", PPC},
1477 {1, "ORACLE", "X4470M2 ", PPC},
1478 {1, "ORACLE", "X4270M3 ", PPC},
1479 {1, "ORACLE", "X4270M2 ", PPC},
1480 {1, "ORACLE", "X4170M2 ", PPC},
1481 {1, "ORACLE", "X4170 M3", PPC},
1482 {1, "ORACLE", "X4275 M3", PPC},
1483 {1, "ORACLE", "X6-2 ", PPC},
1484 {1, "ORACLE", "Sudbury ", PPC},
1485 {0, "", ""},
1486 };
1487
1488 static bool intel_pstate_platform_pwr_mgmt_exists(void)
1489 {
1490 struct acpi_table_header hdr;
1491 struct hw_vendor_info *v_info;
1492 const struct x86_cpu_id *id;
1493 u64 misc_pwr;
1494
1495 id = x86_match_cpu(intel_pstate_cpu_oob_ids);
1496 if (id) {
1497 rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr);
1498 if ( misc_pwr & (1 << 8))
1499 return true;
1500 }
1501
1502 if (acpi_disabled ||
1503 ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr)))
1504 return false;
1505
1506 for (v_info = vendor_info; v_info->valid; v_info++) {
1507 if (!strncmp(hdr.oem_id, v_info->oem_id, ACPI_OEM_ID_SIZE) &&
1508 !strncmp(hdr.oem_table_id, v_info->oem_table_id,
1509 ACPI_OEM_TABLE_ID_SIZE))
1510 switch (v_info->oem_pwr_table) {
1511 case PSS:
1512 return intel_pstate_no_acpi_pss();
1513 case PPC:
1514 return intel_pstate_has_acpi_ppc() &&
1515 (!force_load);
1516 }
1517 }
1518
1519 return false;
1520 }
1521 #else /* CONFIG_ACPI not enabled */
1522 static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; }
1523 static inline bool intel_pstate_has_acpi_ppc(void) { return false; }
1524 #endif /* CONFIG_ACPI */
1525
1526 static int __init intel_pstate_init(void)
1527 {
1528 int cpu, rc = 0;
1529 const struct x86_cpu_id *id;
1530 struct cpu_defaults *cpu_def;
1531
1532 if (no_load)
1533 return -ENODEV;
1534
1535 id = x86_match_cpu(intel_pstate_cpu_ids);
1536 if (!id)
1537 return -ENODEV;
1538
1539 /*
1540 * The Intel pstate driver will be ignored if the platform
1541 * firmware has its own power management modes.
1542 */
1543 if (intel_pstate_platform_pwr_mgmt_exists())
1544 return -ENODEV;
1545
1546 cpu_def = (struct cpu_defaults *)id->driver_data;
1547
1548 copy_pid_params(&cpu_def->pid_policy);
1549 copy_cpu_funcs(&cpu_def->funcs);
1550
1551 if (intel_pstate_msrs_not_valid())
1552 return -ENODEV;
1553
1554 pr_info("Intel P-state driver initializing.\n");
1555
1556 all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus());
1557 if (!all_cpu_data)
1558 return -ENOMEM;
1559
1560 if (static_cpu_has_safe(X86_FEATURE_HWP) && !no_hwp)
1561 hwp_active++;
1562
1563 if (!hwp_active && hwp_only)
1564 goto out;
1565
1566 rc = cpufreq_register_driver(&intel_pstate_driver);
1567 if (rc)
1568 goto out;
1569
1570 intel_pstate_debug_expose_params();
1571 intel_pstate_sysfs_expose_params();
1572
1573 return rc;
1574 out:
1575 get_online_cpus();
1576 for_each_online_cpu(cpu) {
1577 if (all_cpu_data[cpu]) {
1578 del_timer_sync(&all_cpu_data[cpu]->timer);
1579 kfree(all_cpu_data[cpu]);
1580 }
1581 }
1582
1583 put_online_cpus();
1584 vfree(all_cpu_data);
1585 return -ENODEV;
1586 }
1587 device_initcall(intel_pstate_init);
1588
1589 static int __init intel_pstate_setup(char *str)
1590 {
1591 if (!str)
1592 return -EINVAL;
1593
1594 if (!strcmp(str, "disable"))
1595 no_load = 1;
1596 if (!strcmp(str, "no_hwp"))
1597 no_hwp = 1;
1598 if (!strcmp(str, "force"))
1599 force_load = 1;
1600 if (!strcmp(str, "hwp_only"))
1601 hwp_only = 1;
1602 if (!strcmp(str, "no_acpi"))
1603 no_acpi_perf = 1;
1604
1605 return 0;
1606 }
1607 early_param("intel_pstate", intel_pstate_setup);
1608
1609 MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>");
1610 MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors");
1611 MODULE_LICENSE("GPL");
This page took 0.082012 seconds and 6 git commands to generate.