2 * intel_pstate.c: Native P state management for Intel processors
4 * (C) Copyright 2012 Intel Corporation
5 * Author: Dirk Brandewie <dirk.j.brandewie@intel.com>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
13 #include <linux/kernel.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/module.h>
16 #include <linux/ktime.h>
17 #include <linux/hrtimer.h>
18 #include <linux/tick.h>
19 #include <linux/slab.h>
20 #include <linux/sched.h>
21 #include <linux/list.h>
22 #include <linux/cpu.h>
23 #include <linux/cpufreq.h>
24 #include <linux/sysfs.h>
25 #include <linux/types.h>
27 #include <linux/debugfs.h>
28 #include <linux/acpi.h>
29 #include <trace/events/power.h>
31 #include <asm/div64.h>
33 #include <asm/cpu_device_id.h>
35 #define BYT_RATIOS 0x66a
36 #define BYT_VIDS 0x66b
37 #define BYT_TURBO_RATIOS 0x66c
38 #define BYT_TURBO_VIDS 0x66d
41 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
42 #define fp_toint(X) ((X) >> FRAC_BITS)
45 static inline int32_t mul_fp(int32_t x
, int32_t y
)
47 return ((int64_t)x
* (int64_t)y
) >> FRAC_BITS
;
50 static inline int32_t div_fp(int32_t x
, int32_t y
)
52 return div_s64((int64_t)x
<< FRAC_BITS
, y
);
55 static inline int ceiling_fp(int32_t x
)
60 mask
= (1 << FRAC_BITS
) - 1;
67 int32_t core_pct_busy
;
102 struct timer_list timer
;
104 struct pstate_data pstate
;
108 ktime_t last_sample_time
;
111 struct sample sample
;
114 static struct cpudata
**all_cpu_data
;
115 struct pstate_adjust_policy
{
124 struct pstate_funcs
{
125 int (*get_max
)(void);
126 int (*get_min
)(void);
127 int (*get_turbo
)(void);
128 int (*get_scaling
)(void);
129 void (*set
)(struct cpudata
*, int pstate
);
130 void (*get_vid
)(struct cpudata
*);
133 struct cpu_defaults
{
134 struct pstate_adjust_policy pid_policy
;
135 struct pstate_funcs funcs
;
138 static struct pstate_adjust_policy pid_params
;
139 static struct pstate_funcs pstate_funcs
;
140 static int hwp_active
;
153 static struct perf_limits limits
= {
157 .max_perf
= int_tofp(1),
160 .max_policy_pct
= 100,
161 .max_sysfs_pct
= 100,
164 static inline void pid_reset(struct _pid
*pid
, int setpoint
, int busy
,
165 int deadband
, int integral
) {
166 pid
->setpoint
= setpoint
;
167 pid
->deadband
= deadband
;
168 pid
->integral
= int_tofp(integral
);
169 pid
->last_err
= int_tofp(setpoint
) - int_tofp(busy
);
172 static inline void pid_p_gain_set(struct _pid
*pid
, int percent
)
174 pid
->p_gain
= div_fp(int_tofp(percent
), int_tofp(100));
177 static inline void pid_i_gain_set(struct _pid
*pid
, int percent
)
179 pid
->i_gain
= div_fp(int_tofp(percent
), int_tofp(100));
182 static inline void pid_d_gain_set(struct _pid
*pid
, int percent
)
184 pid
->d_gain
= div_fp(int_tofp(percent
), int_tofp(100));
187 static signed int pid_calc(struct _pid
*pid
, int32_t busy
)
190 int32_t pterm
, dterm
, fp_error
;
191 int32_t integral_limit
;
193 fp_error
= int_tofp(pid
->setpoint
) - busy
;
195 if (abs(fp_error
) <= int_tofp(pid
->deadband
))
198 pterm
= mul_fp(pid
->p_gain
, fp_error
);
200 pid
->integral
+= fp_error
;
203 * We limit the integral here so that it will never
204 * get higher than 30. This prevents it from becoming
205 * too large an input over long periods of time and allows
206 * it to get factored out sooner.
208 * The value of 30 was chosen through experimentation.
210 integral_limit
= int_tofp(30);
211 if (pid
->integral
> integral_limit
)
212 pid
->integral
= integral_limit
;
213 if (pid
->integral
< -integral_limit
)
214 pid
->integral
= -integral_limit
;
216 dterm
= mul_fp(pid
->d_gain
, fp_error
- pid
->last_err
);
217 pid
->last_err
= fp_error
;
219 result
= pterm
+ mul_fp(pid
->integral
, pid
->i_gain
) + dterm
;
220 result
= result
+ (1 << (FRAC_BITS
-1));
221 return (signed int)fp_toint(result
);
224 static inline void intel_pstate_busy_pid_reset(struct cpudata
*cpu
)
226 pid_p_gain_set(&cpu
->pid
, pid_params
.p_gain_pct
);
227 pid_d_gain_set(&cpu
->pid
, pid_params
.d_gain_pct
);
228 pid_i_gain_set(&cpu
->pid
, pid_params
.i_gain_pct
);
230 pid_reset(&cpu
->pid
, pid_params
.setpoint
, 100, pid_params
.deadband
, 0);
233 static inline void intel_pstate_reset_all_pid(void)
237 for_each_online_cpu(cpu
) {
238 if (all_cpu_data
[cpu
])
239 intel_pstate_busy_pid_reset(all_cpu_data
[cpu
]);
243 static inline void update_turbo_state(void)
248 cpu
= all_cpu_data
[0];
249 rdmsrl(MSR_IA32_MISC_ENABLE
, misc_en
);
250 limits
.turbo_disabled
=
251 (misc_en
& MSR_IA32_MISC_ENABLE_TURBO_DISABLE
||
252 cpu
->pstate
.max_pstate
== cpu
->pstate
.turbo_pstate
);
255 #define PCT_TO_HWP(x) (x * 255 / 100)
256 static void intel_pstate_hwp_set(void)
263 for_each_online_cpu(cpu
) {
264 rdmsrl_on_cpu(cpu
, MSR_HWP_REQUEST
, &value
);
265 min
= PCT_TO_HWP(limits
.min_perf_pct
);
266 value
&= ~HWP_MIN_PERF(~0L);
267 value
|= HWP_MIN_PERF(min
);
269 max
= PCT_TO_HWP(limits
.max_perf_pct
);
270 if (limits
.no_turbo
) {
271 rdmsrl( MSR_HWP_CAPABILITIES
, freq
);
272 max
= HWP_GUARANTEED_PERF(freq
);
275 value
&= ~HWP_MAX_PERF(~0L);
276 value
|= HWP_MAX_PERF(max
);
277 wrmsrl_on_cpu(cpu
, MSR_HWP_REQUEST
, value
);
283 /************************** debugfs begin ************************/
284 static int pid_param_set(void *data
, u64 val
)
287 intel_pstate_reset_all_pid();
291 static int pid_param_get(void *data
, u64
*val
)
296 DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param
, pid_param_get
, pid_param_set
, "%llu\n");
303 static struct pid_param pid_files
[] = {
304 {"sample_rate_ms", &pid_params
.sample_rate_ms
},
305 {"d_gain_pct", &pid_params
.d_gain_pct
},
306 {"i_gain_pct", &pid_params
.i_gain_pct
},
307 {"deadband", &pid_params
.deadband
},
308 {"setpoint", &pid_params
.setpoint
},
309 {"p_gain_pct", &pid_params
.p_gain_pct
},
313 static void __init
intel_pstate_debug_expose_params(void)
315 struct dentry
*debugfs_parent
;
320 debugfs_parent
= debugfs_create_dir("pstate_snb", NULL
);
321 if (IS_ERR_OR_NULL(debugfs_parent
))
323 while (pid_files
[i
].name
) {
324 debugfs_create_file(pid_files
[i
].name
, 0660,
325 debugfs_parent
, pid_files
[i
].value
,
331 /************************** debugfs end ************************/
333 /************************** sysfs begin ************************/
334 #define show_one(file_name, object) \
335 static ssize_t show_##file_name \
336 (struct kobject *kobj, struct attribute *attr, char *buf) \
338 return sprintf(buf, "%u\n", limits.object); \
341 static ssize_t
show_turbo_pct(struct kobject
*kobj
,
342 struct attribute
*attr
, char *buf
)
345 int total
, no_turbo
, turbo_pct
;
348 cpu
= all_cpu_data
[0];
350 total
= cpu
->pstate
.turbo_pstate
- cpu
->pstate
.min_pstate
+ 1;
351 no_turbo
= cpu
->pstate
.max_pstate
- cpu
->pstate
.min_pstate
+ 1;
352 turbo_fp
= div_fp(int_tofp(no_turbo
), int_tofp(total
));
353 turbo_pct
= 100 - fp_toint(mul_fp(turbo_fp
, int_tofp(100)));
354 return sprintf(buf
, "%u\n", turbo_pct
);
357 static ssize_t
show_num_pstates(struct kobject
*kobj
,
358 struct attribute
*attr
, char *buf
)
363 cpu
= all_cpu_data
[0];
364 total
= cpu
->pstate
.turbo_pstate
- cpu
->pstate
.min_pstate
+ 1;
365 return sprintf(buf
, "%u\n", total
);
368 static ssize_t
show_no_turbo(struct kobject
*kobj
,
369 struct attribute
*attr
, char *buf
)
373 update_turbo_state();
374 if (limits
.turbo_disabled
)
375 ret
= sprintf(buf
, "%u\n", limits
.turbo_disabled
);
377 ret
= sprintf(buf
, "%u\n", limits
.no_turbo
);
382 static ssize_t
store_no_turbo(struct kobject
*a
, struct attribute
*b
,
383 const char *buf
, size_t count
)
388 ret
= sscanf(buf
, "%u", &input
);
392 update_turbo_state();
393 if (limits
.turbo_disabled
) {
394 pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
398 limits
.no_turbo
= clamp_t(int, input
, 0, 1);
401 intel_pstate_hwp_set();
406 static ssize_t
store_max_perf_pct(struct kobject
*a
, struct attribute
*b
,
407 const char *buf
, size_t count
)
412 ret
= sscanf(buf
, "%u", &input
);
416 limits
.max_sysfs_pct
= clamp_t(int, input
, 0 , 100);
417 limits
.max_perf_pct
= min(limits
.max_policy_pct
, limits
.max_sysfs_pct
);
418 limits
.max_perf
= div_fp(int_tofp(limits
.max_perf_pct
), int_tofp(100));
421 intel_pstate_hwp_set();
425 static ssize_t
store_min_perf_pct(struct kobject
*a
, struct attribute
*b
,
426 const char *buf
, size_t count
)
431 ret
= sscanf(buf
, "%u", &input
);
434 limits
.min_perf_pct
= clamp_t(int, input
, 0 , 100);
435 limits
.min_perf
= div_fp(int_tofp(limits
.min_perf_pct
), int_tofp(100));
438 intel_pstate_hwp_set();
442 show_one(max_perf_pct
, max_perf_pct
);
443 show_one(min_perf_pct
, min_perf_pct
);
445 define_one_global_rw(no_turbo
);
446 define_one_global_rw(max_perf_pct
);
447 define_one_global_rw(min_perf_pct
);
448 define_one_global_ro(turbo_pct
);
449 define_one_global_ro(num_pstates
);
451 static struct attribute
*intel_pstate_attributes
[] = {
460 static struct attribute_group intel_pstate_attr_group
= {
461 .attrs
= intel_pstate_attributes
,
464 static void __init
intel_pstate_sysfs_expose_params(void)
466 struct kobject
*intel_pstate_kobject
;
469 intel_pstate_kobject
= kobject_create_and_add("intel_pstate",
470 &cpu_subsys
.dev_root
->kobj
);
471 BUG_ON(!intel_pstate_kobject
);
472 rc
= sysfs_create_group(intel_pstate_kobject
, &intel_pstate_attr_group
);
475 /************************** sysfs end ************************/
477 static void intel_pstate_hwp_enable(void)
480 pr_info("intel_pstate HWP enabled\n");
482 wrmsrl( MSR_PM_ENABLE
, 0x1);
485 static int byt_get_min_pstate(void)
489 rdmsrl(BYT_RATIOS
, value
);
490 return (value
>> 8) & 0x7F;
493 static int byt_get_max_pstate(void)
497 rdmsrl(BYT_RATIOS
, value
);
498 return (value
>> 16) & 0x7F;
501 static int byt_get_turbo_pstate(void)
505 rdmsrl(BYT_TURBO_RATIOS
, value
);
509 static void byt_set_pstate(struct cpudata
*cpudata
, int pstate
)
516 if (limits
.no_turbo
&& !limits
.turbo_disabled
)
519 vid_fp
= cpudata
->vid
.min
+ mul_fp(
520 int_tofp(pstate
- cpudata
->pstate
.min_pstate
),
523 vid_fp
= clamp_t(int32_t, vid_fp
, cpudata
->vid
.min
, cpudata
->vid
.max
);
524 vid
= ceiling_fp(vid_fp
);
526 if (pstate
> cpudata
->pstate
.max_pstate
)
527 vid
= cpudata
->vid
.turbo
;
531 wrmsrl(MSR_IA32_PERF_CTL
, val
);
534 #define BYT_BCLK_FREQS 5
535 static int byt_freq_table
[BYT_BCLK_FREQS
] = { 833, 1000, 1333, 1167, 800};
537 static int byt_get_scaling(void)
542 rdmsrl(MSR_FSB_FREQ
, value
);
545 BUG_ON(i
> BYT_BCLK_FREQS
);
547 return byt_freq_table
[i
] * 100;
550 static void byt_get_vid(struct cpudata
*cpudata
)
554 rdmsrl(BYT_VIDS
, value
);
555 cpudata
->vid
.min
= int_tofp((value
>> 8) & 0x7f);
556 cpudata
->vid
.max
= int_tofp((value
>> 16) & 0x7f);
557 cpudata
->vid
.ratio
= div_fp(
558 cpudata
->vid
.max
- cpudata
->vid
.min
,
559 int_tofp(cpudata
->pstate
.max_pstate
-
560 cpudata
->pstate
.min_pstate
));
562 rdmsrl(BYT_TURBO_VIDS
, value
);
563 cpudata
->vid
.turbo
= value
& 0x7f;
566 static int core_get_min_pstate(void)
570 rdmsrl(MSR_PLATFORM_INFO
, value
);
571 return (value
>> 40) & 0xFF;
574 static int core_get_max_pstate(void)
578 rdmsrl(MSR_PLATFORM_INFO
, value
);
579 return (value
>> 8) & 0xFF;
582 static int core_get_turbo_pstate(void)
587 rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT
, value
);
588 nont
= core_get_max_pstate();
595 static inline int core_get_scaling(void)
600 static void core_set_pstate(struct cpudata
*cpudata
, int pstate
)
605 if (limits
.no_turbo
&& !limits
.turbo_disabled
)
608 wrmsrl_on_cpu(cpudata
->cpu
, MSR_IA32_PERF_CTL
, val
);
611 static struct cpu_defaults core_params
= {
613 .sample_rate_ms
= 10,
621 .get_max
= core_get_max_pstate
,
622 .get_min
= core_get_min_pstate
,
623 .get_turbo
= core_get_turbo_pstate
,
624 .get_scaling
= core_get_scaling
,
625 .set
= core_set_pstate
,
629 static struct cpu_defaults byt_params
= {
631 .sample_rate_ms
= 10,
639 .get_max
= byt_get_max_pstate
,
640 .get_min
= byt_get_min_pstate
,
641 .get_turbo
= byt_get_turbo_pstate
,
642 .set
= byt_set_pstate
,
643 .get_scaling
= byt_get_scaling
,
644 .get_vid
= byt_get_vid
,
648 static void intel_pstate_get_min_max(struct cpudata
*cpu
, int *min
, int *max
)
650 int max_perf
= cpu
->pstate
.turbo_pstate
;
654 if (limits
.no_turbo
|| limits
.turbo_disabled
)
655 max_perf
= cpu
->pstate
.max_pstate
;
658 * performance can be limited by user through sysfs, by cpufreq
659 * policy, or by cpu specific default values determined through
662 max_perf_adj
= fp_toint(mul_fp(int_tofp(max_perf
), limits
.max_perf
));
663 *max
= clamp_t(int, max_perf_adj
,
664 cpu
->pstate
.min_pstate
, cpu
->pstate
.turbo_pstate
);
666 min_perf
= fp_toint(mul_fp(int_tofp(max_perf
), limits
.min_perf
));
667 *min
= clamp_t(int, min_perf
, cpu
->pstate
.min_pstate
, max_perf
);
670 static void intel_pstate_set_pstate(struct cpudata
*cpu
, int pstate
)
672 int max_perf
, min_perf
;
674 update_turbo_state();
676 intel_pstate_get_min_max(cpu
, &min_perf
, &max_perf
);
678 pstate
= clamp_t(int, pstate
, min_perf
, max_perf
);
680 if (pstate
== cpu
->pstate
.current_pstate
)
683 trace_cpu_frequency(pstate
* cpu
->pstate
.scaling
, cpu
->cpu
);
685 cpu
->pstate
.current_pstate
= pstate
;
687 pstate_funcs
.set(cpu
, pstate
);
690 static void intel_pstate_get_cpu_pstates(struct cpudata
*cpu
)
692 cpu
->pstate
.min_pstate
= pstate_funcs
.get_min();
693 cpu
->pstate
.max_pstate
= pstate_funcs
.get_max();
694 cpu
->pstate
.turbo_pstate
= pstate_funcs
.get_turbo();
695 cpu
->pstate
.scaling
= pstate_funcs
.get_scaling();
697 if (pstate_funcs
.get_vid
)
698 pstate_funcs
.get_vid(cpu
);
699 intel_pstate_set_pstate(cpu
, cpu
->pstate
.min_pstate
);
702 static inline void intel_pstate_calc_busy(struct cpudata
*cpu
)
704 struct sample
*sample
= &cpu
->sample
;
707 core_pct
= int_tofp(sample
->aperf
) * int_tofp(100);
708 core_pct
= div64_u64(core_pct
, int_tofp(sample
->mperf
));
710 sample
->freq
= fp_toint(
712 cpu
->pstate
.max_pstate
* cpu
->pstate
.scaling
/ 100),
715 sample
->core_pct_busy
= (int32_t)core_pct
;
718 static inline void intel_pstate_sample(struct cpudata
*cpu
)
723 local_irq_save(flags
);
724 rdmsrl(MSR_IA32_APERF
, aperf
);
725 rdmsrl(MSR_IA32_MPERF
, mperf
);
726 local_irq_restore(flags
);
728 cpu
->last_sample_time
= cpu
->sample
.time
;
729 cpu
->sample
.time
= ktime_get();
730 cpu
->sample
.aperf
= aperf
;
731 cpu
->sample
.mperf
= mperf
;
732 cpu
->sample
.aperf
-= cpu
->prev_aperf
;
733 cpu
->sample
.mperf
-= cpu
->prev_mperf
;
735 intel_pstate_calc_busy(cpu
);
737 cpu
->prev_aperf
= aperf
;
738 cpu
->prev_mperf
= mperf
;
741 static inline void intel_hwp_set_sample_time(struct cpudata
*cpu
)
745 delay
= msecs_to_jiffies(50);
746 mod_timer_pinned(&cpu
->timer
, jiffies
+ delay
);
749 static inline void intel_pstate_set_sample_time(struct cpudata
*cpu
)
753 delay
= msecs_to_jiffies(pid_params
.sample_rate_ms
);
754 mod_timer_pinned(&cpu
->timer
, jiffies
+ delay
);
757 static inline int32_t intel_pstate_get_scaled_busy(struct cpudata
*cpu
)
759 int32_t core_busy
, max_pstate
, current_pstate
, sample_ratio
;
764 * core_busy is the ratio of actual performance to max
765 * max_pstate is the max non turbo pstate available
766 * current_pstate was the pstate that was requested during
767 * the last sample period.
769 * We normalize core_busy, which was our actual percent
770 * performance to what we requested during the last sample
771 * period. The result will be a percentage of busy at a
774 core_busy
= cpu
->sample
.core_pct_busy
;
775 max_pstate
= int_tofp(cpu
->pstate
.max_pstate
);
776 current_pstate
= int_tofp(cpu
->pstate
.current_pstate
);
777 core_busy
= mul_fp(core_busy
, div_fp(max_pstate
, current_pstate
));
780 * Since we have a deferred timer, it will not fire unless
781 * we are in C0. So, determine if the actual elapsed time
782 * is significantly greater (3x) than our sample interval. If it
783 * is, then we were idle for a long enough period of time
784 * to adjust our busyness.
786 sample_time
= pid_params
.sample_rate_ms
* USEC_PER_MSEC
;
787 duration_us
= (u32
) ktime_us_delta(cpu
->sample
.time
,
788 cpu
->last_sample_time
);
789 if (duration_us
> sample_time
* 3) {
790 sample_ratio
= div_fp(int_tofp(sample_time
),
791 int_tofp(duration_us
));
792 core_busy
= mul_fp(core_busy
, sample_ratio
);
798 static inline void intel_pstate_adjust_busy_pstate(struct cpudata
*cpu
)
805 busy_scaled
= intel_pstate_get_scaled_busy(cpu
);
807 ctl
= pid_calc(pid
, busy_scaled
);
809 /* Negative values of ctl increase the pstate and vice versa */
810 intel_pstate_set_pstate(cpu
, cpu
->pstate
.current_pstate
- ctl
);
813 static void intel_hwp_timer_func(unsigned long __data
)
815 struct cpudata
*cpu
= (struct cpudata
*) __data
;
817 intel_pstate_sample(cpu
);
818 intel_hwp_set_sample_time(cpu
);
821 static void intel_pstate_timer_func(unsigned long __data
)
823 struct cpudata
*cpu
= (struct cpudata
*) __data
;
824 struct sample
*sample
;
826 intel_pstate_sample(cpu
);
828 sample
= &cpu
->sample
;
830 intel_pstate_adjust_busy_pstate(cpu
);
832 trace_pstate_sample(fp_toint(sample
->core_pct_busy
),
833 fp_toint(intel_pstate_get_scaled_busy(cpu
)),
834 cpu
->pstate
.current_pstate
,
839 intel_pstate_set_sample_time(cpu
);
842 #define ICPU(model, policy) \
843 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\
844 (unsigned long)&policy }
846 static const struct x86_cpu_id intel_pstate_cpu_ids
[] = {
847 ICPU(0x2a, core_params
),
848 ICPU(0x2d, core_params
),
849 ICPU(0x37, byt_params
),
850 ICPU(0x3a, core_params
),
851 ICPU(0x3c, core_params
),
852 ICPU(0x3d, core_params
),
853 ICPU(0x3e, core_params
),
854 ICPU(0x3f, core_params
),
855 ICPU(0x45, core_params
),
856 ICPU(0x46, core_params
),
857 ICPU(0x47, core_params
),
858 ICPU(0x4c, byt_params
),
859 ICPU(0x4e, core_params
),
860 ICPU(0x4f, core_params
),
861 ICPU(0x56, core_params
),
864 MODULE_DEVICE_TABLE(x86cpu
, intel_pstate_cpu_ids
);
866 static const struct x86_cpu_id intel_pstate_cpu_oob_ids
[] = {
867 ICPU(0x56, core_params
),
871 static int intel_pstate_init_cpu(unsigned int cpunum
)
875 if (!all_cpu_data
[cpunum
])
876 all_cpu_data
[cpunum
] = kzalloc(sizeof(struct cpudata
),
878 if (!all_cpu_data
[cpunum
])
881 cpu
= all_cpu_data
[cpunum
];
884 intel_pstate_get_cpu_pstates(cpu
);
886 init_timer_deferrable(&cpu
->timer
);
887 cpu
->timer
.data
= (unsigned long)cpu
;
888 cpu
->timer
.expires
= jiffies
+ HZ
/100;
891 cpu
->timer
.function
= intel_pstate_timer_func
;
893 cpu
->timer
.function
= intel_hwp_timer_func
;
895 intel_pstate_busy_pid_reset(cpu
);
896 intel_pstate_sample(cpu
);
898 add_timer_on(&cpu
->timer
, cpunum
);
900 pr_debug("Intel pstate controlling: cpu %d\n", cpunum
);
905 static unsigned int intel_pstate_get(unsigned int cpu_num
)
907 struct sample
*sample
;
910 cpu
= all_cpu_data
[cpu_num
];
913 sample
= &cpu
->sample
;
917 static int intel_pstate_set_policy(struct cpufreq_policy
*policy
)
919 if (!policy
->cpuinfo
.max_freq
)
922 if (policy
->policy
== CPUFREQ_POLICY_PERFORMANCE
) {
923 limits
.min_perf_pct
= 100;
924 limits
.min_perf
= int_tofp(1);
925 limits
.max_policy_pct
= 100;
926 limits
.max_perf_pct
= 100;
927 limits
.max_perf
= int_tofp(1);
932 limits
.min_perf_pct
= (policy
->min
* 100) / policy
->cpuinfo
.max_freq
;
933 limits
.min_perf_pct
= clamp_t(int, limits
.min_perf_pct
, 0 , 100);
934 limits
.min_perf
= div_fp(int_tofp(limits
.min_perf_pct
), int_tofp(100));
936 limits
.max_policy_pct
= (policy
->max
* 100) / policy
->cpuinfo
.max_freq
;
937 limits
.max_policy_pct
= clamp_t(int, limits
.max_policy_pct
, 0 , 100);
938 limits
.max_perf_pct
= min(limits
.max_policy_pct
, limits
.max_sysfs_pct
);
939 limits
.max_perf
= div_fp(int_tofp(limits
.max_perf_pct
), int_tofp(100));
942 intel_pstate_hwp_set();
947 static int intel_pstate_verify_policy(struct cpufreq_policy
*policy
)
949 cpufreq_verify_within_cpu_limits(policy
);
951 if (policy
->policy
!= CPUFREQ_POLICY_POWERSAVE
&&
952 policy
->policy
!= CPUFREQ_POLICY_PERFORMANCE
)
958 static void intel_pstate_stop_cpu(struct cpufreq_policy
*policy
)
960 int cpu_num
= policy
->cpu
;
961 struct cpudata
*cpu
= all_cpu_data
[cpu_num
];
963 pr_info("intel_pstate CPU %d exiting\n", cpu_num
);
965 del_timer_sync(&all_cpu_data
[cpu_num
]->timer
);
969 intel_pstate_set_pstate(cpu
, cpu
->pstate
.min_pstate
);
972 static int intel_pstate_cpu_init(struct cpufreq_policy
*policy
)
977 rc
= intel_pstate_init_cpu(policy
->cpu
);
981 cpu
= all_cpu_data
[policy
->cpu
];
983 if (limits
.min_perf_pct
== 100 && limits
.max_perf_pct
== 100)
984 policy
->policy
= CPUFREQ_POLICY_PERFORMANCE
;
986 policy
->policy
= CPUFREQ_POLICY_POWERSAVE
;
988 policy
->min
= cpu
->pstate
.min_pstate
* cpu
->pstate
.scaling
;
989 policy
->max
= cpu
->pstate
.turbo_pstate
* cpu
->pstate
.scaling
;
991 /* cpuinfo and default policy values */
992 policy
->cpuinfo
.min_freq
= cpu
->pstate
.min_pstate
* cpu
->pstate
.scaling
;
993 policy
->cpuinfo
.max_freq
=
994 cpu
->pstate
.turbo_pstate
* cpu
->pstate
.scaling
;
995 policy
->cpuinfo
.transition_latency
= CPUFREQ_ETERNAL
;
996 cpumask_set_cpu(policy
->cpu
, policy
->cpus
);
1001 static struct cpufreq_driver intel_pstate_driver
= {
1002 .flags
= CPUFREQ_CONST_LOOPS
,
1003 .verify
= intel_pstate_verify_policy
,
1004 .setpolicy
= intel_pstate_set_policy
,
1005 .get
= intel_pstate_get
,
1006 .init
= intel_pstate_cpu_init
,
1007 .stop_cpu
= intel_pstate_stop_cpu
,
1008 .name
= "intel_pstate",
1011 static int __initdata no_load
;
1012 static int __initdata no_hwp
;
1013 static unsigned int force_load
;
1015 static int intel_pstate_msrs_not_valid(void)
1017 /* Check that all the msr's we are using are valid. */
1018 u64 aperf
, mperf
, tmp
;
1020 rdmsrl(MSR_IA32_APERF
, aperf
);
1021 rdmsrl(MSR_IA32_MPERF
, mperf
);
1023 if (!pstate_funcs
.get_max() ||
1024 !pstate_funcs
.get_min() ||
1025 !pstate_funcs
.get_turbo())
1028 rdmsrl(MSR_IA32_APERF
, tmp
);
1032 rdmsrl(MSR_IA32_MPERF
, tmp
);
1039 static void copy_pid_params(struct pstate_adjust_policy
*policy
)
1041 pid_params
.sample_rate_ms
= policy
->sample_rate_ms
;
1042 pid_params
.p_gain_pct
= policy
->p_gain_pct
;
1043 pid_params
.i_gain_pct
= policy
->i_gain_pct
;
1044 pid_params
.d_gain_pct
= policy
->d_gain_pct
;
1045 pid_params
.deadband
= policy
->deadband
;
1046 pid_params
.setpoint
= policy
->setpoint
;
1049 static void copy_cpu_funcs(struct pstate_funcs
*funcs
)
1051 pstate_funcs
.get_max
= funcs
->get_max
;
1052 pstate_funcs
.get_min
= funcs
->get_min
;
1053 pstate_funcs
.get_turbo
= funcs
->get_turbo
;
1054 pstate_funcs
.get_scaling
= funcs
->get_scaling
;
1055 pstate_funcs
.set
= funcs
->set
;
1056 pstate_funcs
.get_vid
= funcs
->get_vid
;
1059 #if IS_ENABLED(CONFIG_ACPI)
1060 #include <acpi/processor.h>
1062 static bool intel_pstate_no_acpi_pss(void)
1066 for_each_possible_cpu(i
) {
1068 union acpi_object
*pss
;
1069 struct acpi_buffer buffer
= { ACPI_ALLOCATE_BUFFER
, NULL
};
1070 struct acpi_processor
*pr
= per_cpu(processors
, i
);
1075 status
= acpi_evaluate_object(pr
->handle
, "_PSS", NULL
, &buffer
);
1076 if (ACPI_FAILURE(status
))
1079 pss
= buffer
.pointer
;
1080 if (pss
&& pss
->type
== ACPI_TYPE_PACKAGE
) {
1091 static bool intel_pstate_has_acpi_ppc(void)
1095 for_each_possible_cpu(i
) {
1096 struct acpi_processor
*pr
= per_cpu(processors
, i
);
1100 if (acpi_has_method(pr
->handle
, "_PPC"))
1111 struct hw_vendor_info
{
1113 char oem_id
[ACPI_OEM_ID_SIZE
];
1114 char oem_table_id
[ACPI_OEM_TABLE_ID_SIZE
];
1118 /* Hardware vendor-specific info that has its own power management modes */
1119 static struct hw_vendor_info vendor_info
[] = {
1120 {1, "HP ", "ProLiant", PSS
},
1121 {1, "ORACLE", "X4-2 ", PPC
},
1122 {1, "ORACLE", "X4-2L ", PPC
},
1123 {1, "ORACLE", "X4-2B ", PPC
},
1124 {1, "ORACLE", "X3-2 ", PPC
},
1125 {1, "ORACLE", "X3-2L ", PPC
},
1126 {1, "ORACLE", "X3-2B ", PPC
},
1127 {1, "ORACLE", "X4470M2 ", PPC
},
1128 {1, "ORACLE", "X4270M3 ", PPC
},
1129 {1, "ORACLE", "X4270M2 ", PPC
},
1130 {1, "ORACLE", "X4170M2 ", PPC
},
1134 static bool intel_pstate_platform_pwr_mgmt_exists(void)
1136 struct acpi_table_header hdr
;
1137 struct hw_vendor_info
*v_info
;
1138 const struct x86_cpu_id
*id
;
1141 id
= x86_match_cpu(intel_pstate_cpu_oob_ids
);
1143 rdmsrl(MSR_MISC_PWR_MGMT
, misc_pwr
);
1144 if ( misc_pwr
& (1 << 8))
1148 if (acpi_disabled
||
1149 ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT
, 0, &hdr
)))
1152 for (v_info
= vendor_info
; v_info
->valid
; v_info
++) {
1153 if (!strncmp(hdr
.oem_id
, v_info
->oem_id
, ACPI_OEM_ID_SIZE
) &&
1154 !strncmp(hdr
.oem_table_id
, v_info
->oem_table_id
,
1155 ACPI_OEM_TABLE_ID_SIZE
))
1156 switch (v_info
->oem_pwr_table
) {
1158 return intel_pstate_no_acpi_pss();
1160 return intel_pstate_has_acpi_ppc() &&
1167 #else /* CONFIG_ACPI not enabled */
1168 static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; }
1169 static inline bool intel_pstate_has_acpi_ppc(void) { return false; }
1170 #endif /* CONFIG_ACPI */
1172 static int __init
intel_pstate_init(void)
1175 const struct x86_cpu_id
*id
;
1176 struct cpu_defaults
*cpu_info
;
1177 struct cpuinfo_x86
*c
= &boot_cpu_data
;
1182 id
= x86_match_cpu(intel_pstate_cpu_ids
);
1187 * The Intel pstate driver will be ignored if the platform
1188 * firmware has its own power management modes.
1190 if (intel_pstate_platform_pwr_mgmt_exists())
1193 cpu_info
= (struct cpu_defaults
*)id
->driver_data
;
1195 copy_pid_params(&cpu_info
->pid_policy
);
1196 copy_cpu_funcs(&cpu_info
->funcs
);
1198 if (intel_pstate_msrs_not_valid())
1201 pr_info("Intel P-state driver initializing.\n");
1203 all_cpu_data
= vzalloc(sizeof(void *) * num_possible_cpus());
1207 if (cpu_has(c
,X86_FEATURE_HWP
) && !no_hwp
)
1208 intel_pstate_hwp_enable();
1210 rc
= cpufreq_register_driver(&intel_pstate_driver
);
1214 intel_pstate_debug_expose_params();
1215 intel_pstate_sysfs_expose_params();
1220 for_each_online_cpu(cpu
) {
1221 if (all_cpu_data
[cpu
]) {
1222 del_timer_sync(&all_cpu_data
[cpu
]->timer
);
1223 kfree(all_cpu_data
[cpu
]);
1228 vfree(all_cpu_data
);
1231 device_initcall(intel_pstate_init
);
1233 static int __init
intel_pstate_setup(char *str
)
1238 if (!strcmp(str
, "disable"))
1240 if (!strcmp(str
, "no_hwp"))
1242 if (!strcmp(str
, "force"))
1246 early_param("intel_pstate", intel_pstate_setup
);
1248 MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>");
1249 MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors");
1250 MODULE_LICENSE("GPL");