[CPUFREQ] Disable sysfs ui for p4-clockmod.
[deliverable/linux.git] / arch / x86 / kernel / cpu / cpufreq / p4-clockmod.c
1 /*
2 * Pentium 4/Xeon CPU on demand clock modulation/speed scaling
3 * (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
4 * (C) 2002 Zwane Mwaikambo <zwane@commfireservices.com>
5 * (C) 2002 Arjan van de Ven <arjanv@redhat.com>
6 * (C) 2002 Tora T. Engstad
7 * All Rights Reserved
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 *
14 * The author(s) of this software shall not be held liable for damages
15 * of any nature resulting due to the use of this software. This
16 * software is provided AS-IS with no warranties.
17 *
18 * Date Errata Description
19 * 20020525 N44, O17 12.5% or 25% DC causes lockup
20 *
21 */
22
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/init.h>
26 #include <linux/smp.h>
27 #include <linux/cpufreq.h>
28 #include <linux/slab.h>
29 #include <linux/cpumask.h>
30
31 #include <asm/processor.h>
32 #include <asm/msr.h>
33 #include <asm/timex.h>
34
35 #include "speedstep-lib.h"
36
37 #define PFX "p4-clockmod: "
38 #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "p4-clockmod", msg)
39
40 /*
41 * Duty Cycle (3bits), note DC_DISABLE is not specified in
42 * intel docs i just use it to mean disable
43 */
44 enum {
45 DC_RESV, DC_DFLT, DC_25PT, DC_38PT, DC_50PT,
46 DC_64PT, DC_75PT, DC_88PT, DC_DISABLE
47 };
48
49 #define DC_ENTRIES 8
50
51
52 static int has_N44_O17_errata[NR_CPUS];
53 static unsigned int stock_freq;
54 static struct cpufreq_driver p4clockmod_driver;
55 static unsigned int cpufreq_p4_get(unsigned int cpu);
56
57 static int cpufreq_p4_setdc(unsigned int cpu, unsigned int newstate)
58 {
59 u32 l, h;
60
61 if (!cpu_online(cpu) || (newstate > DC_DISABLE) || (newstate == DC_RESV))
62 return -EINVAL;
63
64 rdmsr_on_cpu(cpu, MSR_IA32_THERM_STATUS, &l, &h);
65
66 if (l & 0x01)
67 dprintk("CPU#%d currently thermal throttled\n", cpu);
68
69 if (has_N44_O17_errata[cpu] && (newstate == DC_25PT || newstate == DC_DFLT))
70 newstate = DC_38PT;
71
72 rdmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, &l, &h);
73 if (newstate == DC_DISABLE) {
74 dprintk("CPU#%d disabling modulation\n", cpu);
75 wrmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, l & ~(1<<4), h);
76 } else {
77 dprintk("CPU#%d setting duty cycle to %d%%\n",
78 cpu, ((125 * newstate) / 10));
79 /* bits 63 - 5 : reserved
80 * bit 4 : enable/disable
81 * bits 3-1 : duty cycle
82 * bit 0 : reserved
83 */
84 l = (l & ~14);
85 l = l | (1<<4) | ((newstate & 0x7)<<1);
86 wrmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, l, h);
87 }
88
89 return 0;
90 }
91
92
93 static struct cpufreq_frequency_table p4clockmod_table[] = {
94 {DC_RESV, CPUFREQ_ENTRY_INVALID},
95 {DC_DFLT, 0},
96 {DC_25PT, 0},
97 {DC_38PT, 0},
98 {DC_50PT, 0},
99 {DC_64PT, 0},
100 {DC_75PT, 0},
101 {DC_88PT, 0},
102 {DC_DISABLE, 0},
103 {DC_RESV, CPUFREQ_TABLE_END},
104 };
105
106
107 static int cpufreq_p4_target(struct cpufreq_policy *policy,
108 unsigned int target_freq,
109 unsigned int relation)
110 {
111 unsigned int newstate = DC_RESV;
112 struct cpufreq_freqs freqs;
113 int i;
114
115 if (cpufreq_frequency_table_target(policy, &p4clockmod_table[0], target_freq, relation, &newstate))
116 return -EINVAL;
117
118 freqs.old = cpufreq_p4_get(policy->cpu);
119 freqs.new = stock_freq * p4clockmod_table[newstate].index / 8;
120
121 if (freqs.new == freqs.old)
122 return 0;
123
124 /* notifiers */
125 for_each_cpu_mask_nr(i, policy->cpus) {
126 freqs.cpu = i;
127 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
128 }
129
130 /* run on each logical CPU, see section 13.15.3 of IA32 Intel Architecture Software
131 * Developer's Manual, Volume 3
132 */
133 for_each_cpu_mask_nr(i, policy->cpus)
134 cpufreq_p4_setdc(i, p4clockmod_table[newstate].index);
135
136 /* notifiers */
137 for_each_cpu_mask_nr(i, policy->cpus) {
138 freqs.cpu = i;
139 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
140 }
141
142 return 0;
143 }
144
145
146 static int cpufreq_p4_verify(struct cpufreq_policy *policy)
147 {
148 return cpufreq_frequency_table_verify(policy, &p4clockmod_table[0]);
149 }
150
151
152 static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
153 {
154 if (c->x86 == 0x06) {
155 if (cpu_has(c, X86_FEATURE_EST))
156 printk(KERN_WARNING PFX "Warning: EST-capable CPU detected. "
157 "The acpi-cpufreq module offers voltage scaling"
158 " in addition of frequency scaling. You should use "
159 "that instead of p4-clockmod, if possible.\n");
160 switch (c->x86_model) {
161 case 0x0E: /* Core */
162 case 0x0F: /* Core Duo */
163 p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
164 return speedstep_get_processor_frequency(SPEEDSTEP_PROCESSOR_PCORE);
165 case 0x0D: /* Pentium M (Dothan) */
166 p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
167 /* fall through */
168 case 0x09: /* Pentium M (Banias) */
169 return speedstep_get_processor_frequency(SPEEDSTEP_PROCESSOR_PM);
170 }
171 }
172
173 if (c->x86 != 0xF) {
174 if (!cpu_has(c, X86_FEATURE_EST))
175 printk(KERN_WARNING PFX "Unknown p4-clockmod-capable CPU. "
176 "Please send an e-mail to <cpufreq@vger.kernel.org>\n");
177 return 0;
178 }
179
180 /* on P-4s, the TSC runs with constant frequency independent whether
181 * throttling is active or not. */
182 p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
183
184 if (speedstep_detect_processor() == SPEEDSTEP_PROCESSOR_P4M) {
185 printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. "
186 "The speedstep-ich or acpi cpufreq modules offer "
187 "voltage scaling in addition of frequency scaling. "
188 "You should use either one instead of p4-clockmod, "
189 "if possible.\n");
190 return speedstep_get_processor_frequency(SPEEDSTEP_PROCESSOR_P4M);
191 }
192
193 return speedstep_get_processor_frequency(SPEEDSTEP_PROCESSOR_P4D);
194 }
195
196
197
198 static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
199 {
200 struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
201 int cpuid = 0;
202 unsigned int i;
203
204 #ifdef CONFIG_SMP
205 policy->cpus = per_cpu(cpu_sibling_map, policy->cpu);
206 #endif
207
208 /* Errata workaround */
209 cpuid = (c->x86 << 8) | (c->x86_model << 4) | c->x86_mask;
210 switch (cpuid) {
211 case 0x0f07:
212 case 0x0f0a:
213 case 0x0f11:
214 case 0x0f12:
215 has_N44_O17_errata[policy->cpu] = 1;
216 dprintk("has errata -- disabling low frequencies\n");
217 }
218
219 /* get max frequency */
220 stock_freq = cpufreq_p4_get_frequency(c);
221 if (!stock_freq)
222 return -EINVAL;
223
224 /* table init */
225 for (i=1; (p4clockmod_table[i].frequency != CPUFREQ_TABLE_END); i++) {
226 if ((i<2) && (has_N44_O17_errata[policy->cpu]))
227 p4clockmod_table[i].frequency = CPUFREQ_ENTRY_INVALID;
228 else
229 p4clockmod_table[i].frequency = (stock_freq * i)/8;
230 }
231 cpufreq_frequency_table_get_attr(p4clockmod_table, policy->cpu);
232
233 /* cpuinfo and default policy values */
234 policy->cpuinfo.transition_latency = 1000000; /* assumed */
235 policy->cur = stock_freq;
236
237 return cpufreq_frequency_table_cpuinfo(policy, &p4clockmod_table[0]);
238 }
239
240
241 static int cpufreq_p4_cpu_exit(struct cpufreq_policy *policy)
242 {
243 cpufreq_frequency_table_put_attr(policy->cpu);
244 return 0;
245 }
246
247 static unsigned int cpufreq_p4_get(unsigned int cpu)
248 {
249 u32 l, h;
250
251 rdmsr_on_cpu(cpu, MSR_IA32_THERM_CONTROL, &l, &h);
252
253 if (l & 0x10) {
254 l = l >> 1;
255 l &= 0x7;
256 } else
257 l = DC_DISABLE;
258
259 if (l != DC_DISABLE)
260 return (stock_freq * l / 8);
261
262 return stock_freq;
263 }
264
265 static struct freq_attr* p4clockmod_attr[] = {
266 &cpufreq_freq_attr_scaling_available_freqs,
267 NULL,
268 };
269
270 static struct cpufreq_driver p4clockmod_driver = {
271 .verify = cpufreq_p4_verify,
272 .target = cpufreq_p4_target,
273 .init = cpufreq_p4_cpu_init,
274 .exit = cpufreq_p4_cpu_exit,
275 .get = cpufreq_p4_get,
276 .name = "p4-clockmod",
277 .owner = THIS_MODULE,
278 .attr = p4clockmod_attr,
279 .hide_interface = 1,
280 };
281
282
283 static int __init cpufreq_p4_init(void)
284 {
285 struct cpuinfo_x86 *c = &cpu_data(0);
286 int ret;
287
288 /*
289 * THERM_CONTROL is architectural for IA32 now, so
290 * we can rely on the capability checks
291 */
292 if (c->x86_vendor != X86_VENDOR_INTEL)
293 return -ENODEV;
294
295 if (!test_cpu_cap(c, X86_FEATURE_ACPI) ||
296 !test_cpu_cap(c, X86_FEATURE_ACC))
297 return -ENODEV;
298
299 ret = cpufreq_register_driver(&p4clockmod_driver);
300 if (!ret)
301 printk(KERN_INFO PFX "P4/Xeon(TM) CPU On-Demand Clock Modulation available\n");
302
303 return (ret);
304 }
305
306
307 static void __exit cpufreq_p4_exit(void)
308 {
309 cpufreq_unregister_driver(&p4clockmod_driver);
310 }
311
312
313 MODULE_AUTHOR ("Zwane Mwaikambo <zwane@commfireservices.com>");
314 MODULE_DESCRIPTION ("cpufreq driver for Pentium(TM) 4/Xeon(TM)");
315 MODULE_LICENSE ("GPL");
316
317 late_initcall(cpufreq_p4_init);
318 module_exit(cpufreq_p4_exit);
This page took 0.037036 seconds and 5 git commands to generate.