2 * intel_pstate.c: Native P state management for Intel processors
4 * (C) Copyright 2012 Intel Corporation
5 * Author: Dirk Brandewie <dirk.j.brandewie@intel.com>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
13 #include <linux/kernel.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/module.h>
16 #include <linux/ktime.h>
17 #include <linux/hrtimer.h>
18 #include <linux/tick.h>
19 #include <linux/slab.h>
20 #include <linux/sched.h>
21 #include <linux/list.h>
22 #include <linux/cpu.h>
23 #include <linux/cpufreq.h>
24 #include <linux/sysfs.h>
25 #include <linux/types.h>
27 #include <linux/debugfs.h>
28 #include <linux/acpi.h>
29 #include <trace/events/power.h>
31 #include <asm/div64.h>
33 #include <asm/cpu_device_id.h>
35 #define BYT_RATIOS 0x66a
36 #define BYT_VIDS 0x66b
37 #define BYT_TURBO_RATIOS 0x66c
38 #define BYT_TURBO_VIDS 0x66d
42 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
43 #define fp_toint(X) ((X) >> FRAC_BITS)
46 static inline int32_t mul_fp(int32_t x
, int32_t y
)
48 return ((int64_t)x
* (int64_t)y
) >> FRAC_BITS
;
51 static inline int32_t div_fp(int32_t x
, int32_t y
)
53 return div_s64((int64_t)x
<< FRAC_BITS
, (int64_t)y
);
57 int32_t core_pct_busy
;
91 struct timer_list timer
;
93 struct pstate_data pstate
;
97 ktime_t last_sample_time
;
100 struct sample sample
;
103 static struct cpudata
**all_cpu_data
;
104 struct pstate_adjust_policy
{
113 struct pstate_funcs
{
114 int (*get_max
)(void);
115 int (*get_min
)(void);
116 int (*get_turbo
)(void);
117 void (*set
)(struct cpudata
*, int pstate
);
118 void (*get_vid
)(struct cpudata
*);
121 struct cpu_defaults
{
122 struct pstate_adjust_policy pid_policy
;
123 struct pstate_funcs funcs
;
126 static struct pstate_adjust_policy pid_params
;
127 static struct pstate_funcs pstate_funcs
;
139 static struct perf_limits limits
= {
142 .max_perf
= int_tofp(1),
145 .max_policy_pct
= 100,
146 .max_sysfs_pct
= 100,
149 static inline void pid_reset(struct _pid
*pid
, int setpoint
, int busy
,
150 int deadband
, int integral
) {
151 pid
->setpoint
= setpoint
;
152 pid
->deadband
= deadband
;
153 pid
->integral
= int_tofp(integral
);
154 pid
->last_err
= int_tofp(setpoint
) - int_tofp(busy
);
157 static inline void pid_p_gain_set(struct _pid
*pid
, int percent
)
159 pid
->p_gain
= div_fp(int_tofp(percent
), int_tofp(100));
162 static inline void pid_i_gain_set(struct _pid
*pid
, int percent
)
164 pid
->i_gain
= div_fp(int_tofp(percent
), int_tofp(100));
167 static inline void pid_d_gain_set(struct _pid
*pid
, int percent
)
170 pid
->d_gain
= div_fp(int_tofp(percent
), int_tofp(100));
173 static signed int pid_calc(struct _pid
*pid
, int32_t busy
)
176 int32_t pterm
, dterm
, fp_error
;
177 int32_t integral_limit
;
179 fp_error
= int_tofp(pid
->setpoint
) - busy
;
181 if (abs(fp_error
) <= int_tofp(pid
->deadband
))
184 pterm
= mul_fp(pid
->p_gain
, fp_error
);
186 pid
->integral
+= fp_error
;
188 /* limit the integral term */
189 integral_limit
= int_tofp(30);
190 if (pid
->integral
> integral_limit
)
191 pid
->integral
= integral_limit
;
192 if (pid
->integral
< -integral_limit
)
193 pid
->integral
= -integral_limit
;
195 dterm
= mul_fp(pid
->d_gain
, fp_error
- pid
->last_err
);
196 pid
->last_err
= fp_error
;
198 result
= pterm
+ mul_fp(pid
->integral
, pid
->i_gain
) + dterm
;
200 result
= result
+ (1 << (FRAC_BITS
-1));
202 result
= result
- (1 << (FRAC_BITS
-1));
203 return (signed int)fp_toint(result
);
206 static inline void intel_pstate_busy_pid_reset(struct cpudata
*cpu
)
208 pid_p_gain_set(&cpu
->pid
, pid_params
.p_gain_pct
);
209 pid_d_gain_set(&cpu
->pid
, pid_params
.d_gain_pct
);
210 pid_i_gain_set(&cpu
->pid
, pid_params
.i_gain_pct
);
219 static inline void intel_pstate_reset_all_pid(void)
222 for_each_online_cpu(cpu
) {
223 if (all_cpu_data
[cpu
])
224 intel_pstate_busy_pid_reset(all_cpu_data
[cpu
]);
228 /************************** debugfs begin ************************/
229 static int pid_param_set(void *data
, u64 val
)
232 intel_pstate_reset_all_pid();
235 static int pid_param_get(void *data
, u64
*val
)
240 DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param
, pid_param_get
,
241 pid_param_set
, "%llu\n");
248 static struct pid_param pid_files
[] = {
249 {"sample_rate_ms", &pid_params
.sample_rate_ms
},
250 {"d_gain_pct", &pid_params
.d_gain_pct
},
251 {"i_gain_pct", &pid_params
.i_gain_pct
},
252 {"deadband", &pid_params
.deadband
},
253 {"setpoint", &pid_params
.setpoint
},
254 {"p_gain_pct", &pid_params
.p_gain_pct
},
258 static struct dentry
*debugfs_parent
;
259 static void intel_pstate_debug_expose_params(void)
263 debugfs_parent
= debugfs_create_dir("pstate_snb", NULL
);
264 if (IS_ERR_OR_NULL(debugfs_parent
))
266 while (pid_files
[i
].name
) {
267 debugfs_create_file(pid_files
[i
].name
, 0660,
268 debugfs_parent
, pid_files
[i
].value
,
274 /************************** debugfs end ************************/
276 /************************** sysfs begin ************************/
277 #define show_one(file_name, object) \
278 static ssize_t show_##file_name \
279 (struct kobject *kobj, struct attribute *attr, char *buf) \
281 return sprintf(buf, "%u\n", limits.object); \
284 static ssize_t
store_no_turbo(struct kobject
*a
, struct attribute
*b
,
285 const char *buf
, size_t count
)
289 ret
= sscanf(buf
, "%u", &input
);
292 limits
.no_turbo
= clamp_t(int, input
, 0 , 1);
297 static ssize_t
store_max_perf_pct(struct kobject
*a
, struct attribute
*b
,
298 const char *buf
, size_t count
)
302 ret
= sscanf(buf
, "%u", &input
);
306 limits
.max_sysfs_pct
= clamp_t(int, input
, 0 , 100);
307 limits
.max_perf_pct
= min(limits
.max_policy_pct
, limits
.max_sysfs_pct
);
308 limits
.max_perf
= div_fp(int_tofp(limits
.max_perf_pct
), int_tofp(100));
312 static ssize_t
store_min_perf_pct(struct kobject
*a
, struct attribute
*b
,
313 const char *buf
, size_t count
)
317 ret
= sscanf(buf
, "%u", &input
);
320 limits
.min_perf_pct
= clamp_t(int, input
, 0 , 100);
321 limits
.min_perf
= div_fp(int_tofp(limits
.min_perf_pct
), int_tofp(100));
326 show_one(no_turbo
, no_turbo
);
327 show_one(max_perf_pct
, max_perf_pct
);
328 show_one(min_perf_pct
, min_perf_pct
);
330 define_one_global_rw(no_turbo
);
331 define_one_global_rw(max_perf_pct
);
332 define_one_global_rw(min_perf_pct
);
334 static struct attribute
*intel_pstate_attributes
[] = {
341 static struct attribute_group intel_pstate_attr_group
= {
342 .attrs
= intel_pstate_attributes
,
344 static struct kobject
*intel_pstate_kobject
;
346 static void intel_pstate_sysfs_expose_params(void)
350 intel_pstate_kobject
= kobject_create_and_add("intel_pstate",
351 &cpu_subsys
.dev_root
->kobj
);
352 BUG_ON(!intel_pstate_kobject
);
353 rc
= sysfs_create_group(intel_pstate_kobject
,
354 &intel_pstate_attr_group
);
358 /************************** sysfs end ************************/
359 static int byt_get_min_pstate(void)
362 rdmsrl(BYT_RATIOS
, value
);
363 return (value
>> 8) & 0x3F;
366 static int byt_get_max_pstate(void)
369 rdmsrl(BYT_RATIOS
, value
);
370 return (value
>> 16) & 0x3F;
373 static int byt_get_turbo_pstate(void)
376 rdmsrl(BYT_TURBO_RATIOS
, value
);
380 static void byt_set_pstate(struct cpudata
*cpudata
, int pstate
)
390 vid_fp
= cpudata
->vid
.min
+ mul_fp(
391 int_tofp(pstate
- cpudata
->pstate
.min_pstate
),
394 vid_fp
= clamp_t(int32_t, vid_fp
, cpudata
->vid
.min
, cpudata
->vid
.max
);
395 vid
= fp_toint(vid_fp
);
397 if (pstate
> cpudata
->pstate
.max_pstate
)
398 vid
= cpudata
->vid
.turbo
;
402 wrmsrl(MSR_IA32_PERF_CTL
, val
);
405 static void byt_get_vid(struct cpudata
*cpudata
)
410 rdmsrl(BYT_VIDS
, value
);
411 cpudata
->vid
.min
= int_tofp((value
>> 8) & 0x3f);
412 cpudata
->vid
.max
= int_tofp((value
>> 16) & 0x3f);
413 cpudata
->vid
.ratio
= div_fp(
414 cpudata
->vid
.max
- cpudata
->vid
.min
,
415 int_tofp(cpudata
->pstate
.max_pstate
-
416 cpudata
->pstate
.min_pstate
));
418 rdmsrl(BYT_TURBO_VIDS
, value
);
419 cpudata
->vid
.turbo
= value
& 0x7f;
423 static int core_get_min_pstate(void)
426 rdmsrl(MSR_PLATFORM_INFO
, value
);
427 return (value
>> 40) & 0xFF;
430 static int core_get_max_pstate(void)
433 rdmsrl(MSR_PLATFORM_INFO
, value
);
434 return (value
>> 8) & 0xFF;
437 static int core_get_turbo_pstate(void)
441 rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT
, value
);
442 nont
= core_get_max_pstate();
443 ret
= ((value
) & 255);
449 static void core_set_pstate(struct cpudata
*cpudata
, int pstate
)
457 wrmsrl_on_cpu(cpudata
->cpu
, MSR_IA32_PERF_CTL
, val
);
460 static struct cpu_defaults core_params
= {
462 .sample_rate_ms
= 10,
470 .get_max
= core_get_max_pstate
,
471 .get_min
= core_get_min_pstate
,
472 .get_turbo
= core_get_turbo_pstate
,
473 .set
= core_set_pstate
,
477 static struct cpu_defaults byt_params
= {
479 .sample_rate_ms
= 10,
487 .get_max
= byt_get_max_pstate
,
488 .get_min
= byt_get_min_pstate
,
489 .get_turbo
= byt_get_turbo_pstate
,
490 .set
= byt_set_pstate
,
491 .get_vid
= byt_get_vid
,
496 static void intel_pstate_get_min_max(struct cpudata
*cpu
, int *min
, int *max
)
498 int max_perf
= cpu
->pstate
.turbo_pstate
;
502 max_perf
= cpu
->pstate
.max_pstate
;
504 max_perf_adj
= fp_toint(mul_fp(int_tofp(max_perf
), limits
.max_perf
));
505 *max
= clamp_t(int, max_perf_adj
,
506 cpu
->pstate
.min_pstate
, cpu
->pstate
.turbo_pstate
);
508 min_perf
= fp_toint(mul_fp(int_tofp(max_perf
), limits
.min_perf
));
509 *min
= clamp_t(int, min_perf
,
510 cpu
->pstate
.min_pstate
, max_perf
);
513 static void intel_pstate_set_pstate(struct cpudata
*cpu
, int pstate
)
515 int max_perf
, min_perf
;
517 intel_pstate_get_min_max(cpu
, &min_perf
, &max_perf
);
519 pstate
= clamp_t(int, pstate
, min_perf
, max_perf
);
521 if (pstate
== cpu
->pstate
.current_pstate
)
524 trace_cpu_frequency(pstate
* 100000, cpu
->cpu
);
526 cpu
->pstate
.current_pstate
= pstate
;
528 pstate_funcs
.set(cpu
, pstate
);
531 static inline void intel_pstate_pstate_increase(struct cpudata
*cpu
, int steps
)
534 target
= cpu
->pstate
.current_pstate
+ steps
;
536 intel_pstate_set_pstate(cpu
, target
);
539 static inline void intel_pstate_pstate_decrease(struct cpudata
*cpu
, int steps
)
542 target
= cpu
->pstate
.current_pstate
- steps
;
543 intel_pstate_set_pstate(cpu
, target
);
546 static void intel_pstate_get_cpu_pstates(struct cpudata
*cpu
)
548 cpu
->pstate
.min_pstate
= pstate_funcs
.get_min();
549 cpu
->pstate
.max_pstate
= pstate_funcs
.get_max();
550 cpu
->pstate
.turbo_pstate
= pstate_funcs
.get_turbo();
552 if (pstate_funcs
.get_vid
)
553 pstate_funcs
.get_vid(cpu
);
554 intel_pstate_set_pstate(cpu
, cpu
->pstate
.min_pstate
);
557 static inline void intel_pstate_calc_busy(struct cpudata
*cpu
)
559 struct sample
*sample
= &cpu
->sample
;
563 core_pct
= int_tofp(sample
->aperf
) * int_tofp(100);
564 core_pct
= div_u64_rem(core_pct
, int_tofp(sample
->mperf
), &rem
);
566 if ((rem
<< 1) >= int_tofp(sample
->mperf
))
569 sample
->freq
= fp_toint(
570 mul_fp(int_tofp(cpu
->pstate
.max_pstate
* 1000), core_pct
));
572 sample
->core_pct_busy
= (int32_t)core_pct
;
575 static inline void intel_pstate_sample(struct cpudata
*cpu
)
579 rdmsrl(MSR_IA32_APERF
, aperf
);
580 rdmsrl(MSR_IA32_MPERF
, mperf
);
582 aperf
= aperf
>> FRAC_BITS
;
583 mperf
= mperf
>> FRAC_BITS
;
585 cpu
->last_sample_time
= cpu
->sample
.time
;
586 cpu
->sample
.time
= ktime_get();
587 cpu
->sample
.aperf
= aperf
;
588 cpu
->sample
.mperf
= mperf
;
589 cpu
->sample
.aperf
-= cpu
->prev_aperf
;
590 cpu
->sample
.mperf
-= cpu
->prev_mperf
;
592 intel_pstate_calc_busy(cpu
);
594 cpu
->prev_aperf
= aperf
;
595 cpu
->prev_mperf
= mperf
;
598 static inline void intel_pstate_set_sample_time(struct cpudata
*cpu
)
600 int sample_time
, delay
;
602 sample_time
= pid_params
.sample_rate_ms
;
603 delay
= msecs_to_jiffies(sample_time
);
604 mod_timer_pinned(&cpu
->timer
, jiffies
+ delay
);
607 static inline int32_t intel_pstate_get_scaled_busy(struct cpudata
*cpu
)
609 int32_t core_busy
, max_pstate
, current_pstate
, sample_ratio
;
613 core_busy
= cpu
->sample
.core_pct_busy
;
614 max_pstate
= int_tofp(cpu
->pstate
.max_pstate
);
615 current_pstate
= int_tofp(cpu
->pstate
.current_pstate
);
616 core_busy
= mul_fp(core_busy
, div_fp(max_pstate
, current_pstate
));
618 sample_time
= (pid_params
.sample_rate_ms
* USEC_PER_MSEC
);
619 duration_us
= (u32
) ktime_us_delta(cpu
->sample
.time
,
620 cpu
->last_sample_time
);
621 if (duration_us
> sample_time
* 3) {
622 sample_ratio
= div_fp(int_tofp(sample_time
),
623 int_tofp(duration_us
));
624 core_busy
= mul_fp(core_busy
, sample_ratio
);
630 static inline void intel_pstate_adjust_busy_pstate(struct cpudata
*cpu
)
638 busy_scaled
= intel_pstate_get_scaled_busy(cpu
);
640 ctl
= pid_calc(pid
, busy_scaled
);
645 intel_pstate_pstate_increase(cpu
, steps
);
647 intel_pstate_pstate_decrease(cpu
, steps
);
650 static void intel_pstate_timer_func(unsigned long __data
)
652 struct cpudata
*cpu
= (struct cpudata
*) __data
;
653 struct sample
*sample
;
655 intel_pstate_sample(cpu
);
657 sample
= &cpu
->sample
;
659 intel_pstate_adjust_busy_pstate(cpu
);
661 trace_pstate_sample(fp_toint(sample
->core_pct_busy
),
662 fp_toint(intel_pstate_get_scaled_busy(cpu
)),
663 cpu
->pstate
.current_pstate
,
668 intel_pstate_set_sample_time(cpu
);
671 #define ICPU(model, policy) \
672 { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\
673 (unsigned long)&policy }
675 static const struct x86_cpu_id intel_pstate_cpu_ids
[] = {
676 ICPU(0x2a, core_params
),
677 ICPU(0x2d, core_params
),
678 ICPU(0x37, byt_params
),
679 ICPU(0x3a, core_params
),
680 ICPU(0x3c, core_params
),
681 ICPU(0x3d, core_params
),
682 ICPU(0x3e, core_params
),
683 ICPU(0x3f, core_params
),
684 ICPU(0x45, core_params
),
685 ICPU(0x46, core_params
),
686 ICPU(0x4f, core_params
),
687 ICPU(0x56, core_params
),
690 MODULE_DEVICE_TABLE(x86cpu
, intel_pstate_cpu_ids
);
692 static int intel_pstate_init_cpu(unsigned int cpunum
)
695 const struct x86_cpu_id
*id
;
698 id
= x86_match_cpu(intel_pstate_cpu_ids
);
702 all_cpu_data
[cpunum
] = kzalloc(sizeof(struct cpudata
), GFP_KERNEL
);
703 if (!all_cpu_data
[cpunum
])
706 cpu
= all_cpu_data
[cpunum
];
708 intel_pstate_get_cpu_pstates(cpu
);
712 init_timer_deferrable(&cpu
->timer
);
713 cpu
->timer
.function
= intel_pstate_timer_func
;
716 cpu
->timer
.expires
= jiffies
+ HZ
/100;
717 intel_pstate_busy_pid_reset(cpu
);
718 intel_pstate_sample(cpu
);
720 add_timer_on(&cpu
->timer
, cpunum
);
722 pr_info("Intel pstate controlling: cpu %d\n", cpunum
);
727 static unsigned int intel_pstate_get(unsigned int cpu_num
)
729 struct sample
*sample
;
732 cpu
= all_cpu_data
[cpu_num
];
735 sample
= &cpu
->sample
;
739 static int intel_pstate_set_policy(struct cpufreq_policy
*policy
)
743 cpu
= all_cpu_data
[policy
->cpu
];
745 if (!policy
->cpuinfo
.max_freq
)
748 if (policy
->policy
== CPUFREQ_POLICY_PERFORMANCE
) {
749 limits
.min_perf_pct
= 100;
750 limits
.min_perf
= int_tofp(1);
751 limits
.max_perf_pct
= 100;
752 limits
.max_perf
= int_tofp(1);
756 limits
.min_perf_pct
= (policy
->min
* 100) / policy
->cpuinfo
.max_freq
;
757 limits
.min_perf_pct
= clamp_t(int, limits
.min_perf_pct
, 0 , 100);
758 limits
.min_perf
= div_fp(int_tofp(limits
.min_perf_pct
), int_tofp(100));
760 limits
.max_policy_pct
= policy
->max
* 100 / policy
->cpuinfo
.max_freq
;
761 limits
.max_policy_pct
= clamp_t(int, limits
.max_policy_pct
, 0 , 100);
762 limits
.max_perf_pct
= min(limits
.max_policy_pct
, limits
.max_sysfs_pct
);
763 limits
.max_perf
= div_fp(int_tofp(limits
.max_perf_pct
), int_tofp(100));
768 static int intel_pstate_verify_policy(struct cpufreq_policy
*policy
)
770 cpufreq_verify_within_cpu_limits(policy
);
772 if ((policy
->policy
!= CPUFREQ_POLICY_POWERSAVE
) &&
773 (policy
->policy
!= CPUFREQ_POLICY_PERFORMANCE
))
779 static void intel_pstate_stop_cpu(struct cpufreq_policy
*policy
)
781 int cpu_num
= policy
->cpu
;
782 struct cpudata
*cpu
= all_cpu_data
[cpu_num
];
784 pr_info("intel_pstate CPU %d exiting\n", cpu_num
);
786 del_timer_sync(&all_cpu_data
[cpu_num
]->timer
);
787 intel_pstate_set_pstate(cpu
, cpu
->pstate
.min_pstate
);
788 kfree(all_cpu_data
[cpu_num
]);
789 all_cpu_data
[cpu_num
] = NULL
;
792 static int intel_pstate_cpu_init(struct cpufreq_policy
*policy
)
797 rc
= intel_pstate_init_cpu(policy
->cpu
);
801 cpu
= all_cpu_data
[policy
->cpu
];
803 if (!limits
.no_turbo
&&
804 limits
.min_perf_pct
== 100 && limits
.max_perf_pct
== 100)
805 policy
->policy
= CPUFREQ_POLICY_PERFORMANCE
;
807 policy
->policy
= CPUFREQ_POLICY_POWERSAVE
;
809 policy
->min
= cpu
->pstate
.min_pstate
* 100000;
810 policy
->max
= cpu
->pstate
.turbo_pstate
* 100000;
812 /* cpuinfo and default policy values */
813 policy
->cpuinfo
.min_freq
= cpu
->pstate
.min_pstate
* 100000;
814 policy
->cpuinfo
.max_freq
= cpu
->pstate
.turbo_pstate
* 100000;
815 policy
->cpuinfo
.transition_latency
= CPUFREQ_ETERNAL
;
816 cpumask_set_cpu(policy
->cpu
, policy
->cpus
);
821 static struct cpufreq_driver intel_pstate_driver
= {
822 .flags
= CPUFREQ_CONST_LOOPS
,
823 .verify
= intel_pstate_verify_policy
,
824 .setpolicy
= intel_pstate_set_policy
,
825 .get
= intel_pstate_get
,
826 .init
= intel_pstate_cpu_init
,
827 .stop_cpu
= intel_pstate_stop_cpu
,
828 .name
= "intel_pstate",
831 static int __initdata no_load
;
833 static int intel_pstate_msrs_not_valid(void)
835 /* Check that all the msr's we are using are valid. */
836 u64 aperf
, mperf
, tmp
;
838 rdmsrl(MSR_IA32_APERF
, aperf
);
839 rdmsrl(MSR_IA32_MPERF
, mperf
);
841 if (!pstate_funcs
.get_max() ||
842 !pstate_funcs
.get_min() ||
843 !pstate_funcs
.get_turbo())
846 rdmsrl(MSR_IA32_APERF
, tmp
);
850 rdmsrl(MSR_IA32_MPERF
, tmp
);
857 static void copy_pid_params(struct pstate_adjust_policy
*policy
)
859 pid_params
.sample_rate_ms
= policy
->sample_rate_ms
;
860 pid_params
.p_gain_pct
= policy
->p_gain_pct
;
861 pid_params
.i_gain_pct
= policy
->i_gain_pct
;
862 pid_params
.d_gain_pct
= policy
->d_gain_pct
;
863 pid_params
.deadband
= policy
->deadband
;
864 pid_params
.setpoint
= policy
->setpoint
;
867 static void copy_cpu_funcs(struct pstate_funcs
*funcs
)
869 pstate_funcs
.get_max
= funcs
->get_max
;
870 pstate_funcs
.get_min
= funcs
->get_min
;
871 pstate_funcs
.get_turbo
= funcs
->get_turbo
;
872 pstate_funcs
.set
= funcs
->set
;
873 pstate_funcs
.get_vid
= funcs
->get_vid
;
876 #if IS_ENABLED(CONFIG_ACPI)
877 #include <acpi/processor.h>
879 static bool intel_pstate_no_acpi_pss(void)
883 for_each_possible_cpu(i
) {
885 union acpi_object
*pss
;
886 struct acpi_buffer buffer
= { ACPI_ALLOCATE_BUFFER
, NULL
};
887 struct acpi_processor
*pr
= per_cpu(processors
, i
);
892 status
= acpi_evaluate_object(pr
->handle
, "_PSS", NULL
, &buffer
);
893 if (ACPI_FAILURE(status
))
896 pss
= buffer
.pointer
;
897 if (pss
&& pss
->type
== ACPI_TYPE_PACKAGE
) {
908 struct hw_vendor_info
{
910 char oem_id
[ACPI_OEM_ID_SIZE
];
911 char oem_table_id
[ACPI_OEM_TABLE_ID_SIZE
];
914 /* Hardware vendor-specific info that has its own power management modes */
915 static struct hw_vendor_info vendor_info
[] = {
916 {1, "HP ", "ProLiant"},
920 static bool intel_pstate_platform_pwr_mgmt_exists(void)
922 struct acpi_table_header hdr
;
923 struct hw_vendor_info
*v_info
;
926 || ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT
, 0, &hdr
)))
929 for (v_info
= vendor_info
; v_info
->valid
; v_info
++) {
930 if (!strncmp(hdr
.oem_id
, v_info
->oem_id
, ACPI_OEM_ID_SIZE
)
931 && !strncmp(hdr
.oem_table_id
, v_info
->oem_table_id
, ACPI_OEM_TABLE_ID_SIZE
)
932 && intel_pstate_no_acpi_pss())
938 #else /* CONFIG_ACPI not enabled */
939 static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; }
940 #endif /* CONFIG_ACPI */
942 static int __init
intel_pstate_init(void)
945 const struct x86_cpu_id
*id
;
946 struct cpu_defaults
*cpu_info
;
951 id
= x86_match_cpu(intel_pstate_cpu_ids
);
956 * The Intel pstate driver will be ignored if the platform
957 * firmware has its own power management modes.
959 if (intel_pstate_platform_pwr_mgmt_exists())
962 cpu_info
= (struct cpu_defaults
*)id
->driver_data
;
964 copy_pid_params(&cpu_info
->pid_policy
);
965 copy_cpu_funcs(&cpu_info
->funcs
);
967 if (intel_pstate_msrs_not_valid())
970 pr_info("Intel P-state driver initializing.\n");
972 all_cpu_data
= vzalloc(sizeof(void *) * num_possible_cpus());
976 rc
= cpufreq_register_driver(&intel_pstate_driver
);
980 intel_pstate_debug_expose_params();
981 intel_pstate_sysfs_expose_params();
986 for_each_online_cpu(cpu
) {
987 if (all_cpu_data
[cpu
]) {
988 del_timer_sync(&all_cpu_data
[cpu
]->timer
);
989 kfree(all_cpu_data
[cpu
]);
997 device_initcall(intel_pstate_init
);
999 static int __init
intel_pstate_setup(char *str
)
1004 if (!strcmp(str
, "disable"))
1008 early_param("intel_pstate", intel_pstate_setup
);
1010 MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>");
1011 MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors");
1012 MODULE_LICENSE("GPL");