2 * intel_pstate.c: Native P state management for Intel processors
4 * (C) Copyright 2012 Intel Corporation
5 * Author: Dirk Brandewie <dirk.j.brandewie@intel.com>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
13 #include <linux/kernel.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/module.h>
16 #include <linux/ktime.h>
17 #include <linux/hrtimer.h>
18 #include <linux/tick.h>
19 #include <linux/slab.h>
20 #include <linux/sched.h>
21 #include <linux/list.h>
22 #include <linux/cpu.h>
23 #include <linux/cpufreq.h>
24 #include <linux/sysfs.h>
25 #include <linux/types.h>
27 #include <linux/debugfs.h>
28 #include <linux/acpi.h>
29 #include <linux/vmalloc.h>
30 #include <trace/events/power.h>
32 #include <asm/div64.h>
34 #include <asm/cpu_device_id.h>
35 #include <asm/cpufeature.h>
37 #define BYT_RATIOS 0x66a
38 #define BYT_VIDS 0x66b
39 #define BYT_TURBO_RATIOS 0x66c
40 #define BYT_TURBO_VIDS 0x66d
43 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
44 #define fp_toint(X) ((X) >> FRAC_BITS)
47 static inline int32_t mul_fp(int32_t x
, int32_t y
)
49 return ((int64_t)x
* (int64_t)y
) >> FRAC_BITS
;
52 static inline int32_t div_fp(int32_t x
, int32_t y
)
54 return div_s64((int64_t)x
<< FRAC_BITS
, y
);
57 static inline int ceiling_fp(int32_t x
)
62 mask
= (1 << FRAC_BITS
) - 1;
69 int32_t core_pct_busy
;
104 struct timer_list timer
;
106 struct pstate_data pstate
;
110 ktime_t last_sample_time
;
113 struct sample sample
;
116 static struct cpudata
**all_cpu_data
;
117 struct pstate_adjust_policy
{
126 struct pstate_funcs
{
127 int (*get_max
)(void);
128 int (*get_min
)(void);
129 int (*get_turbo
)(void);
130 int (*get_scaling
)(void);
131 void (*set
)(struct cpudata
*, int pstate
);
132 void (*get_vid
)(struct cpudata
*);
135 struct cpu_defaults
{
136 struct pstate_adjust_policy pid_policy
;
137 struct pstate_funcs funcs
;
140 static struct pstate_adjust_policy pid_params
;
141 static struct pstate_funcs pstate_funcs
;
142 static int hwp_active
;
157 static struct perf_limits limits
= {
161 .max_perf
= int_tofp(1),
164 .max_policy_pct
= 100,
165 .max_sysfs_pct
= 100,
170 static inline void pid_reset(struct _pid
*pid
, int setpoint
, int busy
,
171 int deadband
, int integral
) {
172 pid
->setpoint
= setpoint
;
173 pid
->deadband
= deadband
;
174 pid
->integral
= int_tofp(integral
);
175 pid
->last_err
= int_tofp(setpoint
) - int_tofp(busy
);
178 static inline void pid_p_gain_set(struct _pid
*pid
, int percent
)
180 pid
->p_gain
= div_fp(int_tofp(percent
), int_tofp(100));
183 static inline void pid_i_gain_set(struct _pid
*pid
, int percent
)
185 pid
->i_gain
= div_fp(int_tofp(percent
), int_tofp(100));
188 static inline void pid_d_gain_set(struct _pid
*pid
, int percent
)
190 pid
->d_gain
= div_fp(int_tofp(percent
), int_tofp(100));
193 static signed int pid_calc(struct _pid
*pid
, int32_t busy
)
196 int32_t pterm
, dterm
, fp_error
;
197 int32_t integral_limit
;
199 fp_error
= int_tofp(pid
->setpoint
) - busy
;
201 if (abs(fp_error
) <= int_tofp(pid
->deadband
))
204 pterm
= mul_fp(pid
->p_gain
, fp_error
);
206 pid
->integral
+= fp_error
;
209 * We limit the integral here so that it will never
210 * get higher than 30. This prevents it from becoming
211 * too large an input over long periods of time and allows
212 * it to get factored out sooner.
214 * The value of 30 was chosen through experimentation.
216 integral_limit
= int_tofp(30);
217 if (pid
->integral
> integral_limit
)
218 pid
->integral
= integral_limit
;
219 if (pid
->integral
< -integral_limit
)
220 pid
->integral
= -integral_limit
;
222 dterm
= mul_fp(pid
->d_gain
, fp_error
- pid
->last_err
);
223 pid
->last_err
= fp_error
;
225 result
= pterm
+ mul_fp(pid
->integral
, pid
->i_gain
) + dterm
;
226 result
= result
+ (1 << (FRAC_BITS
-1));
227 return (signed int)fp_toint(result
);
230 static inline void intel_pstate_busy_pid_reset(struct cpudata
*cpu
)
232 pid_p_gain_set(&cpu
->pid
, pid_params
.p_gain_pct
);
233 pid_d_gain_set(&cpu
->pid
, pid_params
.d_gain_pct
);
234 pid_i_gain_set(&cpu
->pid
, pid_params
.i_gain_pct
);
236 pid_reset(&cpu
->pid
, pid_params
.setpoint
, 100, pid_params
.deadband
, 0);
239 static inline void intel_pstate_reset_all_pid(void)
243 for_each_online_cpu(cpu
) {
244 if (all_cpu_data
[cpu
])
245 intel_pstate_busy_pid_reset(all_cpu_data
[cpu
]);
249 static inline void update_turbo_state(void)
254 cpu
= all_cpu_data
[0];
255 rdmsrl(MSR_IA32_MISC_ENABLE
, misc_en
);
256 limits
.turbo_disabled
=
257 (misc_en
& MSR_IA32_MISC_ENABLE_TURBO_DISABLE
||
258 cpu
->pstate
.max_pstate
== cpu
->pstate
.turbo_pstate
);
261 #define PCT_TO_HWP(x) (x * 255 / 100)
262 static void intel_pstate_hwp_set(void)
269 for_each_online_cpu(cpu
) {
270 rdmsrl_on_cpu(cpu
, MSR_HWP_REQUEST
, &value
);
271 min
= PCT_TO_HWP(limits
.min_perf_pct
);
272 value
&= ~HWP_MIN_PERF(~0L);
273 value
|= HWP_MIN_PERF(min
);
275 max
= PCT_TO_HWP(limits
.max_perf_pct
);
276 if (limits
.no_turbo
) {
277 rdmsrl( MSR_HWP_CAPABILITIES
, freq
);
278 max
= HWP_GUARANTEED_PERF(freq
);
281 value
&= ~HWP_MAX_PERF(~0L);
282 value
|= HWP_MAX_PERF(max
);
283 wrmsrl_on_cpu(cpu
, MSR_HWP_REQUEST
, value
);
289 /************************** debugfs begin ************************/
290 static int pid_param_set(void *data
, u64 val
)
293 intel_pstate_reset_all_pid();
297 static int pid_param_get(void *data
, u64
*val
)
302 DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param
, pid_param_get
, pid_param_set
, "%llu\n");
309 static struct pid_param pid_files
[] = {
310 {"sample_rate_ms", &pid_params
.sample_rate_ms
},
311 {"d_gain_pct", &pid_params
.d_gain_pct
},
312 {"i_gain_pct", &pid_params
.i_gain_pct
},
313 {"deadband", &pid_params
.deadband
},
314 {"setpoint", &pid_params
.setpoint
},
315 {"p_gain_pct", &pid_params
.p_gain_pct
},
319 static void __init
intel_pstate_debug_expose_params(void)
321 struct dentry
*debugfs_parent
;
326 debugfs_parent
= debugfs_create_dir("pstate_snb", NULL
);
327 if (IS_ERR_OR_NULL(debugfs_parent
))
329 while (pid_files
[i
].name
) {
330 debugfs_create_file(pid_files
[i
].name
, 0660,
331 debugfs_parent
, pid_files
[i
].value
,
337 /************************** debugfs end ************************/
339 /************************** sysfs begin ************************/
340 #define show_one(file_name, object) \
341 static ssize_t show_##file_name \
342 (struct kobject *kobj, struct attribute *attr, char *buf) \
344 return sprintf(buf, "%u\n", limits.object); \
347 static ssize_t
show_turbo_pct(struct kobject
*kobj
,
348 struct attribute
*attr
, char *buf
)
351 int total
, no_turbo
, turbo_pct
;
354 cpu
= all_cpu_data
[0];
356 total
= cpu
->pstate
.turbo_pstate
- cpu
->pstate
.min_pstate
+ 1;
357 no_turbo
= cpu
->pstate
.max_pstate
- cpu
->pstate
.min_pstate
+ 1;
358 turbo_fp
= div_fp(int_tofp(no_turbo
), int_tofp(total
));
359 turbo_pct
= 100 - fp_toint(mul_fp(turbo_fp
, int_tofp(100)));
360 return sprintf(buf
, "%u\n", turbo_pct
);
363 static ssize_t
show_num_pstates(struct kobject
*kobj
,
364 struct attribute
*attr
, char *buf
)
369 cpu
= all_cpu_data
[0];
370 total
= cpu
->pstate
.turbo_pstate
- cpu
->pstate
.min_pstate
+ 1;
371 return sprintf(buf
, "%u\n", total
);
374 static ssize_t
show_no_turbo(struct kobject
*kobj
,
375 struct attribute
*attr
, char *buf
)
379 update_turbo_state();
380 if (limits
.turbo_disabled
)
381 ret
= sprintf(buf
, "%u\n", limits
.turbo_disabled
);
383 ret
= sprintf(buf
, "%u\n", limits
.no_turbo
);
388 static ssize_t
store_no_turbo(struct kobject
*a
, struct attribute
*b
,
389 const char *buf
, size_t count
)
394 ret
= sscanf(buf
, "%u", &input
);
398 update_turbo_state();
399 if (limits
.turbo_disabled
) {
400 pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
404 limits
.no_turbo
= clamp_t(int, input
, 0, 1);
407 intel_pstate_hwp_set();
412 static ssize_t
store_max_perf_pct(struct kobject
*a
, struct attribute
*b
,
413 const char *buf
, size_t count
)
418 ret
= sscanf(buf
, "%u", &input
);
422 limits
.max_sysfs_pct
= clamp_t(int, input
, 0 , 100);
423 limits
.max_perf_pct
= min(limits
.max_policy_pct
, limits
.max_sysfs_pct
);
424 limits
.max_perf
= div_fp(int_tofp(limits
.max_perf_pct
), int_tofp(100));
427 intel_pstate_hwp_set();
431 static ssize_t
store_min_perf_pct(struct kobject
*a
, struct attribute
*b
,
432 const char *buf
, size_t count
)
437 ret
= sscanf(buf
, "%u", &input
);
441 limits
.min_sysfs_pct
= clamp_t(int, input
, 0 , 100);
442 limits
.min_perf_pct
= max(limits
.min_policy_pct
, limits
.min_sysfs_pct
);
443 limits
.min_perf
= div_fp(int_tofp(limits
.min_perf_pct
), int_tofp(100));
446 intel_pstate_hwp_set();
450 show_one(max_perf_pct
, max_perf_pct
);
451 show_one(min_perf_pct
, min_perf_pct
);
453 define_one_global_rw(no_turbo
);
454 define_one_global_rw(max_perf_pct
);
455 define_one_global_rw(min_perf_pct
);
456 define_one_global_ro(turbo_pct
);
457 define_one_global_ro(num_pstates
);
459 static struct attribute
*intel_pstate_attributes
[] = {
468 static struct attribute_group intel_pstate_attr_group
= {
469 .attrs
= intel_pstate_attributes
,
472 static void __init
intel_pstate_sysfs_expose_params(void)
474 struct kobject
*intel_pstate_kobject
;
477 intel_pstate_kobject
= kobject_create_and_add("intel_pstate",
478 &cpu_subsys
.dev_root
->kobj
);
479 BUG_ON(!intel_pstate_kobject
);
480 rc
= sysfs_create_group(intel_pstate_kobject
, &intel_pstate_attr_group
);
483 /************************** sysfs end ************************/
485 static void intel_pstate_hwp_enable(void)
488 pr_info("intel_pstate HWP enabled\n");
490 wrmsrl( MSR_PM_ENABLE
, 0x1);
493 static int byt_get_min_pstate(void)
497 rdmsrl(BYT_RATIOS
, value
);
498 return (value
>> 8) & 0x7F;
501 static int byt_get_max_pstate(void)
505 rdmsrl(BYT_RATIOS
, value
);
506 return (value
>> 16) & 0x7F;
509 static int byt_get_turbo_pstate(void)
513 rdmsrl(BYT_TURBO_RATIOS
, value
);
517 static void byt_set_pstate(struct cpudata
*cpudata
, int pstate
)
524 if (limits
.no_turbo
&& !limits
.turbo_disabled
)
527 vid_fp
= cpudata
->vid
.min
+ mul_fp(
528 int_tofp(pstate
- cpudata
->pstate
.min_pstate
),
531 vid_fp
= clamp_t(int32_t, vid_fp
, cpudata
->vid
.min
, cpudata
->vid
.max
);
532 vid
= ceiling_fp(vid_fp
);
534 if (pstate
> cpudata
->pstate
.max_pstate
)
535 vid
= cpudata
->vid
.turbo
;
539 wrmsrl(MSR_IA32_PERF_CTL
, val
);
542 #define BYT_BCLK_FREQS 5
543 static int byt_freq_table
[BYT_BCLK_FREQS
] = { 833, 1000, 1333, 1167, 800};
545 static int byt_get_scaling(void)
550 rdmsrl(MSR_FSB_FREQ
, value
);
553 BUG_ON(i
> BYT_BCLK_FREQS
);
555 return byt_freq_table
[i
] * 100;
558 static void byt_get_vid(struct cpudata
*cpudata
)
562 rdmsrl(BYT_VIDS
, value
);
563 cpudata
->vid
.min
= int_tofp((value
>> 8) & 0x7f);
564 cpudata
->vid
.max
= int_tofp((value
>> 16) & 0x7f);
565 cpudata
->vid
.ratio
= div_fp(
566 cpudata
->vid
.max
- cpudata
->vid
.min
,
567 int_tofp(cpudata
->pstate
.max_pstate
-
568 cpudata
->pstate
.min_pstate
));
570 rdmsrl(BYT_TURBO_VIDS
, value
);
571 cpudata
->vid
.turbo
= value
& 0x7f;
574 static int core_get_min_pstate(void)
578 rdmsrl(MSR_PLATFORM_INFO
, value
);
579 return (value
>> 40) & 0xFF;
582 static int core_get_max_pstate(void)
586 rdmsrl(MSR_PLATFORM_INFO
, value
);
587 return (value
>> 8) & 0xFF;
590 static int core_get_turbo_pstate(void)
595 rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT
, value
);
596 nont
= core_get_max_pstate();
603 static inline int core_get_scaling(void)
608 static void core_set_pstate(struct cpudata
*cpudata
, int pstate
)
613 if (limits
.no_turbo
&& !limits
.turbo_disabled
)
616 wrmsrl_on_cpu(cpudata
->cpu
, MSR_IA32_PERF_CTL
, val
);
619 static int knl_get_turbo_pstate(void)
624 rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT
, value
);
625 nont
= core_get_max_pstate();
626 ret
= (((value
) >> 8) & 0xFF);
632 static struct cpu_defaults core_params
= {
634 .sample_rate_ms
= 10,
642 .get_max
= core_get_max_pstate
,
643 .get_min
= core_get_min_pstate
,
644 .get_turbo
= core_get_turbo_pstate
,
645 .get_scaling
= core_get_scaling
,
646 .set
= core_set_pstate
,
650 static struct cpu_defaults byt_params
= {
652 .sample_rate_ms
= 10,
660 .get_max
= byt_get_max_pstate
,
661 .get_min
= byt_get_min_pstate
,
662 .get_turbo
= byt_get_turbo_pstate
,
663 .set
= byt_set_pstate
,
664 .get_scaling
= byt_get_scaling
,
665 .get_vid
= byt_get_vid
,
669 static struct cpu_defaults knl_params
= {
671 .sample_rate_ms
= 10,
679 .get_max
= core_get_max_pstate
,
680 .get_min
= core_get_min_pstate
,
681 .get_turbo
= knl_get_turbo_pstate
,
682 .set
= core_set_pstate
,
686 static void intel_pstate_get_min_max(struct cpudata
*cpu
, int *min
, int *max
)
688 int max_perf
= cpu
->pstate
.turbo_pstate
;
692 if (limits
.no_turbo
|| limits
.turbo_disabled
)
693 max_perf
= cpu
->pstate
.max_pstate
;
696 * performance can be limited by user through sysfs, by cpufreq
697 * policy, or by cpu specific default values determined through
700 max_perf_adj
= fp_toint(mul_fp(int_tofp(max_perf
), limits
.max_perf
));
701 *max
= clamp_t(int, max_perf_adj
,
702 cpu
->pstate
.min_pstate
, cpu
->pstate
.turbo_pstate
);
704 min_perf
= fp_toint(mul_fp(int_tofp(max_perf
), limits
.min_perf
));
705 *min
= clamp_t(int, min_perf
, cpu
->pstate
.min_pstate
, max_perf
);
708 static void intel_pstate_set_pstate(struct cpudata
*cpu
, int pstate
)
710 int max_perf
, min_perf
;
712 update_turbo_state();
714 intel_pstate_get_min_max(cpu
, &min_perf
, &max_perf
);
716 pstate
= clamp_t(int, pstate
, min_perf
, max_perf
);
718 if (pstate
== cpu
->pstate
.current_pstate
)
721 trace_cpu_frequency(pstate
* cpu
->pstate
.scaling
, cpu
->cpu
);
723 cpu
->pstate
.current_pstate
= pstate
;
725 pstate_funcs
.set(cpu
, pstate
);
728 static void intel_pstate_get_cpu_pstates(struct cpudata
*cpu
)
730 cpu
->pstate
.min_pstate
= pstate_funcs
.get_min();
731 cpu
->pstate
.max_pstate
= pstate_funcs
.get_max();
732 cpu
->pstate
.turbo_pstate
= pstate_funcs
.get_turbo();
733 cpu
->pstate
.scaling
= pstate_funcs
.get_scaling();
735 if (pstate_funcs
.get_vid
)
736 pstate_funcs
.get_vid(cpu
);
737 intel_pstate_set_pstate(cpu
, cpu
->pstate
.min_pstate
);
740 static inline void intel_pstate_calc_busy(struct cpudata
*cpu
)
742 struct sample
*sample
= &cpu
->sample
;
745 core_pct
= int_tofp(sample
->aperf
) * int_tofp(100);
746 core_pct
= div64_u64(core_pct
, int_tofp(sample
->mperf
));
748 sample
->freq
= fp_toint(
750 cpu
->pstate
.max_pstate
* cpu
->pstate
.scaling
/ 100),
753 sample
->core_pct_busy
= (int32_t)core_pct
;
756 static inline void intel_pstate_sample(struct cpudata
*cpu
)
761 local_irq_save(flags
);
762 rdmsrl(MSR_IA32_APERF
, aperf
);
763 rdmsrl(MSR_IA32_MPERF
, mperf
);
764 local_irq_restore(flags
);
766 cpu
->last_sample_time
= cpu
->sample
.time
;
767 cpu
->sample
.time
= ktime_get();
768 cpu
->sample
.aperf
= aperf
;
769 cpu
->sample
.mperf
= mperf
;
770 cpu
->sample
.aperf
-= cpu
->prev_aperf
;
771 cpu
->sample
.mperf
-= cpu
->prev_mperf
;
773 intel_pstate_calc_busy(cpu
);
775 cpu
->prev_aperf
= aperf
;
776 cpu
->prev_mperf
= mperf
;
779 static inline void intel_hwp_set_sample_time(struct cpudata
*cpu
)
783 delay
= msecs_to_jiffies(50);
784 mod_timer_pinned(&cpu
->timer
, jiffies
+ delay
);
787 static inline void intel_pstate_set_sample_time(struct cpudata
*cpu
)
791 delay
= msecs_to_jiffies(pid_params
.sample_rate_ms
);
792 mod_timer_pinned(&cpu
->timer
, jiffies
+ delay
);
795 static inline int32_t intel_pstate_get_scaled_busy(struct cpudata
*cpu
)
797 int32_t core_busy
, max_pstate
, current_pstate
, sample_ratio
;
802 * core_busy is the ratio of actual performance to max
803 * max_pstate is the max non turbo pstate available
804 * current_pstate was the pstate that was requested during
805 * the last sample period.
807 * We normalize core_busy, which was our actual percent
808 * performance to what we requested during the last sample
809 * period. The result will be a percentage of busy at a
812 core_busy
= cpu
->sample
.core_pct_busy
;
813 max_pstate
= int_tofp(cpu
->pstate
.max_pstate
);
814 current_pstate
= int_tofp(cpu
->pstate
.current_pstate
);
815 core_busy
= mul_fp(core_busy
, div_fp(max_pstate
, current_pstate
));
818 * Since we have a deferred timer, it will not fire unless
819 * we are in C0. So, determine if the actual elapsed time
820 * is significantly greater (3x) than our sample interval. If it
821 * is, then we were idle for a long enough period of time
822 * to adjust our busyness.
824 sample_time
= pid_params
.sample_rate_ms
* USEC_PER_MSEC
;
825 duration_us
= (u32
) ktime_us_delta(cpu
->sample
.time
,
826 cpu
->last_sample_time
);
827 if (duration_us
> sample_time
* 3) {
828 sample_ratio
= div_fp(int_tofp(sample_time
),
829 int_tofp(duration_us
));
830 core_busy
= mul_fp(core_busy
, sample_ratio
);
836 static inline void intel_pstate_adjust_busy_pstate(struct cpudata
*cpu
)
843 busy_scaled
= intel_pstate_get_scaled_busy(cpu
);
845 ctl
= pid_calc(pid
, busy_scaled
);
847 /* Negative values of ctl increase the pstate and vice versa */
848 intel_pstate_set_pstate(cpu
, cpu
->pstate
.current_pstate
- ctl
);
851 static void intel_hwp_timer_func(unsigned long __data
)
853 struct cpudata
*cpu
= (struct cpudata
*) __data
;
855 intel_pstate_sample(cpu
);
856 intel_hwp_set_sample_time(cpu
);
859 static void intel_pstate_timer_func(unsigned long __data
)
861 struct cpudata
*cpu
= (struct cpudata
*) __data
;
862 struct sample
*sample
;
864 intel_pstate_sample(cpu
);
866 sample
= &cpu
->sample
;
868 intel_pstate_adjust_busy_pstate(cpu
);
870 trace_pstate_sample(fp_toint(sample
->core_pct_busy
),
871 fp_toint(intel_pstate_get_scaled_busy(cpu
)),
872 cpu
->pstate
.current_pstate
,
877 intel_pstate_set_sample_time(cpu
);
880 #define ICPU(model, policy) \
881 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\
882 (unsigned long)&policy }
884 static const struct x86_cpu_id intel_pstate_cpu_ids
[] = {
885 ICPU(0x2a, core_params
),
886 ICPU(0x2d, core_params
),
887 ICPU(0x37, byt_params
),
888 ICPU(0x3a, core_params
),
889 ICPU(0x3c, core_params
),
890 ICPU(0x3d, core_params
),
891 ICPU(0x3e, core_params
),
892 ICPU(0x3f, core_params
),
893 ICPU(0x45, core_params
),
894 ICPU(0x46, core_params
),
895 ICPU(0x47, core_params
),
896 ICPU(0x4c, byt_params
),
897 ICPU(0x4e, core_params
),
898 ICPU(0x4f, core_params
),
899 ICPU(0x56, core_params
),
900 ICPU(0x57, knl_params
),
903 MODULE_DEVICE_TABLE(x86cpu
, intel_pstate_cpu_ids
);
905 static const struct x86_cpu_id intel_pstate_cpu_oob_ids
[] = {
906 ICPU(0x56, core_params
),
910 static int intel_pstate_init_cpu(unsigned int cpunum
)
914 if (!all_cpu_data
[cpunum
])
915 all_cpu_data
[cpunum
] = kzalloc(sizeof(struct cpudata
),
917 if (!all_cpu_data
[cpunum
])
920 cpu
= all_cpu_data
[cpunum
];
923 intel_pstate_get_cpu_pstates(cpu
);
925 init_timer_deferrable(&cpu
->timer
);
926 cpu
->timer
.data
= (unsigned long)cpu
;
927 cpu
->timer
.expires
= jiffies
+ HZ
/100;
930 cpu
->timer
.function
= intel_pstate_timer_func
;
932 cpu
->timer
.function
= intel_hwp_timer_func
;
934 intel_pstate_busy_pid_reset(cpu
);
935 intel_pstate_sample(cpu
);
937 add_timer_on(&cpu
->timer
, cpunum
);
939 pr_debug("Intel pstate controlling: cpu %d\n", cpunum
);
944 static unsigned int intel_pstate_get(unsigned int cpu_num
)
946 struct sample
*sample
;
949 cpu
= all_cpu_data
[cpu_num
];
952 sample
= &cpu
->sample
;
956 static int intel_pstate_set_policy(struct cpufreq_policy
*policy
)
958 if (!policy
->cpuinfo
.max_freq
)
961 if (policy
->policy
== CPUFREQ_POLICY_PERFORMANCE
&&
962 policy
->max
>= policy
->cpuinfo
.max_freq
) {
963 limits
.min_policy_pct
= 100;
964 limits
.min_perf_pct
= 100;
965 limits
.min_perf
= int_tofp(1);
966 limits
.max_policy_pct
= 100;
967 limits
.max_perf_pct
= 100;
968 limits
.max_perf
= int_tofp(1);
973 limits
.min_policy_pct
= (policy
->min
* 100) / policy
->cpuinfo
.max_freq
;
974 limits
.min_policy_pct
= clamp_t(int, limits
.min_policy_pct
, 0 , 100);
975 limits
.min_perf_pct
= max(limits
.min_policy_pct
, limits
.min_sysfs_pct
);
976 limits
.min_perf
= div_fp(int_tofp(limits
.min_perf_pct
), int_tofp(100));
978 limits
.max_policy_pct
= (policy
->max
* 100) / policy
->cpuinfo
.max_freq
;
979 limits
.max_policy_pct
= clamp_t(int, limits
.max_policy_pct
, 0 , 100);
980 limits
.max_perf_pct
= min(limits
.max_policy_pct
, limits
.max_sysfs_pct
);
981 limits
.max_perf
= div_fp(int_tofp(limits
.max_perf_pct
), int_tofp(100));
984 intel_pstate_hwp_set();
989 static int intel_pstate_verify_policy(struct cpufreq_policy
*policy
)
991 cpufreq_verify_within_cpu_limits(policy
);
993 if (policy
->policy
!= CPUFREQ_POLICY_POWERSAVE
&&
994 policy
->policy
!= CPUFREQ_POLICY_PERFORMANCE
)
1000 static void intel_pstate_stop_cpu(struct cpufreq_policy
*policy
)
1002 int cpu_num
= policy
->cpu
;
1003 struct cpudata
*cpu
= all_cpu_data
[cpu_num
];
1005 pr_info("intel_pstate CPU %d exiting\n", cpu_num
);
1007 del_timer_sync(&all_cpu_data
[cpu_num
]->timer
);
1011 intel_pstate_set_pstate(cpu
, cpu
->pstate
.min_pstate
);
1014 static int intel_pstate_cpu_init(struct cpufreq_policy
*policy
)
1016 struct cpudata
*cpu
;
1019 rc
= intel_pstate_init_cpu(policy
->cpu
);
1023 cpu
= all_cpu_data
[policy
->cpu
];
1025 if (limits
.min_perf_pct
== 100 && limits
.max_perf_pct
== 100)
1026 policy
->policy
= CPUFREQ_POLICY_PERFORMANCE
;
1028 policy
->policy
= CPUFREQ_POLICY_POWERSAVE
;
1030 policy
->min
= cpu
->pstate
.min_pstate
* cpu
->pstate
.scaling
;
1031 policy
->max
= cpu
->pstate
.turbo_pstate
* cpu
->pstate
.scaling
;
1033 /* cpuinfo and default policy values */
1034 policy
->cpuinfo
.min_freq
= cpu
->pstate
.min_pstate
* cpu
->pstate
.scaling
;
1035 policy
->cpuinfo
.max_freq
=
1036 cpu
->pstate
.turbo_pstate
* cpu
->pstate
.scaling
;
1037 policy
->cpuinfo
.transition_latency
= CPUFREQ_ETERNAL
;
1038 cpumask_set_cpu(policy
->cpu
, policy
->cpus
);
1043 static struct cpufreq_driver intel_pstate_driver
= {
1044 .flags
= CPUFREQ_CONST_LOOPS
,
1045 .verify
= intel_pstate_verify_policy
,
1046 .setpolicy
= intel_pstate_set_policy
,
1047 .get
= intel_pstate_get
,
1048 .init
= intel_pstate_cpu_init
,
1049 .stop_cpu
= intel_pstate_stop_cpu
,
1050 .name
= "intel_pstate",
1053 static int __initdata no_load
;
1054 static int __initdata no_hwp
;
1055 static int __initdata hwp_only
;
1056 static unsigned int force_load
;
1058 static int intel_pstate_msrs_not_valid(void)
1060 if (!pstate_funcs
.get_max() ||
1061 !pstate_funcs
.get_min() ||
1062 !pstate_funcs
.get_turbo())
1068 static void copy_pid_params(struct pstate_adjust_policy
*policy
)
1070 pid_params
.sample_rate_ms
= policy
->sample_rate_ms
;
1071 pid_params
.p_gain_pct
= policy
->p_gain_pct
;
1072 pid_params
.i_gain_pct
= policy
->i_gain_pct
;
1073 pid_params
.d_gain_pct
= policy
->d_gain_pct
;
1074 pid_params
.deadband
= policy
->deadband
;
1075 pid_params
.setpoint
= policy
->setpoint
;
1078 static void copy_cpu_funcs(struct pstate_funcs
*funcs
)
1080 pstate_funcs
.get_max
= funcs
->get_max
;
1081 pstate_funcs
.get_min
= funcs
->get_min
;
1082 pstate_funcs
.get_turbo
= funcs
->get_turbo
;
1083 pstate_funcs
.get_scaling
= funcs
->get_scaling
;
1084 pstate_funcs
.set
= funcs
->set
;
1085 pstate_funcs
.get_vid
= funcs
->get_vid
;
1088 #if IS_ENABLED(CONFIG_ACPI)
1089 #include <acpi/processor.h>
1091 static bool intel_pstate_no_acpi_pss(void)
1095 for_each_possible_cpu(i
) {
1097 union acpi_object
*pss
;
1098 struct acpi_buffer buffer
= { ACPI_ALLOCATE_BUFFER
, NULL
};
1099 struct acpi_processor
*pr
= per_cpu(processors
, i
);
1104 status
= acpi_evaluate_object(pr
->handle
, "_PSS", NULL
, &buffer
);
1105 if (ACPI_FAILURE(status
))
1108 pss
= buffer
.pointer
;
1109 if (pss
&& pss
->type
== ACPI_TYPE_PACKAGE
) {
1120 static bool intel_pstate_has_acpi_ppc(void)
1124 for_each_possible_cpu(i
) {
1125 struct acpi_processor
*pr
= per_cpu(processors
, i
);
1129 if (acpi_has_method(pr
->handle
, "_PPC"))
1140 struct hw_vendor_info
{
1142 char oem_id
[ACPI_OEM_ID_SIZE
];
1143 char oem_table_id
[ACPI_OEM_TABLE_ID_SIZE
];
1147 /* Hardware vendor-specific info that has its own power management modes */
1148 static struct hw_vendor_info vendor_info
[] = {
1149 {1, "HP ", "ProLiant", PSS
},
1150 {1, "ORACLE", "X4-2 ", PPC
},
1151 {1, "ORACLE", "X4-2L ", PPC
},
1152 {1, "ORACLE", "X4-2B ", PPC
},
1153 {1, "ORACLE", "X3-2 ", PPC
},
1154 {1, "ORACLE", "X3-2L ", PPC
},
1155 {1, "ORACLE", "X3-2B ", PPC
},
1156 {1, "ORACLE", "X4470M2 ", PPC
},
1157 {1, "ORACLE", "X4270M3 ", PPC
},
1158 {1, "ORACLE", "X4270M2 ", PPC
},
1159 {1, "ORACLE", "X4170M2 ", PPC
},
1163 static bool intel_pstate_platform_pwr_mgmt_exists(void)
1165 struct acpi_table_header hdr
;
1166 struct hw_vendor_info
*v_info
;
1167 const struct x86_cpu_id
*id
;
1170 id
= x86_match_cpu(intel_pstate_cpu_oob_ids
);
1172 rdmsrl(MSR_MISC_PWR_MGMT
, misc_pwr
);
1173 if ( misc_pwr
& (1 << 8))
1177 if (acpi_disabled
||
1178 ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT
, 0, &hdr
)))
1181 for (v_info
= vendor_info
; v_info
->valid
; v_info
++) {
1182 if (!strncmp(hdr
.oem_id
, v_info
->oem_id
, ACPI_OEM_ID_SIZE
) &&
1183 !strncmp(hdr
.oem_table_id
, v_info
->oem_table_id
,
1184 ACPI_OEM_TABLE_ID_SIZE
))
1185 switch (v_info
->oem_pwr_table
) {
1187 return intel_pstate_no_acpi_pss();
1189 return intel_pstate_has_acpi_ppc() &&
1196 #else /* CONFIG_ACPI not enabled */
1197 static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; }
1198 static inline bool intel_pstate_has_acpi_ppc(void) { return false; }
1199 #endif /* CONFIG_ACPI */
1201 static int __init
intel_pstate_init(void)
1204 const struct x86_cpu_id
*id
;
1205 struct cpu_defaults
*cpu_def
;
1210 id
= x86_match_cpu(intel_pstate_cpu_ids
);
1215 * The Intel pstate driver will be ignored if the platform
1216 * firmware has its own power management modes.
1218 if (intel_pstate_platform_pwr_mgmt_exists())
1221 cpu_def
= (struct cpu_defaults
*)id
->driver_data
;
1223 copy_pid_params(&cpu_def
->pid_policy
);
1224 copy_cpu_funcs(&cpu_def
->funcs
);
1226 if (intel_pstate_msrs_not_valid())
1229 pr_info("Intel P-state driver initializing.\n");
1231 all_cpu_data
= vzalloc(sizeof(void *) * num_possible_cpus());
1235 if (static_cpu_has_safe(X86_FEATURE_HWP
) && !no_hwp
)
1236 intel_pstate_hwp_enable();
1238 if (!hwp_active
&& hwp_only
)
1241 rc
= cpufreq_register_driver(&intel_pstate_driver
);
1245 intel_pstate_debug_expose_params();
1246 intel_pstate_sysfs_expose_params();
1251 for_each_online_cpu(cpu
) {
1252 if (all_cpu_data
[cpu
]) {
1253 del_timer_sync(&all_cpu_data
[cpu
]->timer
);
1254 kfree(all_cpu_data
[cpu
]);
1259 vfree(all_cpu_data
);
1262 device_initcall(intel_pstate_init
);
1264 static int __init
intel_pstate_setup(char *str
)
1269 if (!strcmp(str
, "disable"))
1271 if (!strcmp(str
, "no_hwp"))
1273 if (!strcmp(str
, "force"))
1275 if (!strcmp(str
, "hwp_only"))
1279 early_param("intel_pstate", intel_pstate_setup
);
1281 MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>");
1282 MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors");
1283 MODULE_LICENSE("GPL");