ee9bce18c084b7f24c3c33d46d202f3f92c6804d
[deliverable/linux.git] / drivers / acpi / processor_core.c
1 /*
2 * Copyright (C) 2005 Intel Corporation
3 * Copyright (C) 2009 Hewlett-Packard Development Company, L.P.
4 *
5 * Alex Chiang <achiang@hp.com>
6 * - Unified x86/ia64 implementations
7 * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
8 * - Added _PDC for platforms with Intel CPUs
9 */
10 #include <linux/dmi.h>
11
12 #include <acpi/acpi_drivers.h>
13 #include <acpi/processor.h>
14
15 #include "internal.h"
16
17 #define PREFIX "ACPI: "
18 #define _COMPONENT ACPI_PROCESSOR_COMPONENT
19 ACPI_MODULE_NAME("processor_core");
20
21 static int set_no_mwait(const struct dmi_system_id *id)
22 {
23 printk(KERN_NOTICE PREFIX "%s detected - "
24 "disabling mwait for CPU C-states\n", id->ident);
25 idle_nomwait = 1;
26 return 0;
27 }
28
29 static struct dmi_system_id __cpuinitdata processor_idle_dmi_table[] = {
30 {
31 set_no_mwait, "IFL91 board", {
32 DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"),
33 DMI_MATCH(DMI_SYS_VENDOR, "ZEPTO"),
34 DMI_MATCH(DMI_PRODUCT_VERSION, "3215W"),
35 DMI_MATCH(DMI_BOARD_NAME, "IFL91") }, NULL},
36 {
37 set_no_mwait, "Extensa 5220", {
38 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies LTD"),
39 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
40 DMI_MATCH(DMI_PRODUCT_VERSION, "0100"),
41 DMI_MATCH(DMI_BOARD_NAME, "Columbia") }, NULL},
42 {},
43 };
44
45 #ifdef CONFIG_SMP
46 static struct acpi_table_madt *madt;
47
48 static int map_lapic_id(struct acpi_subtable_header *entry,
49 u32 acpi_id, int *apic_id)
50 {
51 struct acpi_madt_local_apic *lapic =
52 (struct acpi_madt_local_apic *)entry;
53
54 if (!(lapic->lapic_flags & ACPI_MADT_ENABLED))
55 return 0;
56
57 if (lapic->processor_id != acpi_id)
58 return 0;
59
60 *apic_id = lapic->id;
61 return 1;
62 }
63
64 static int map_x2apic_id(struct acpi_subtable_header *entry,
65 int device_declaration, u32 acpi_id, int *apic_id)
66 {
67 struct acpi_madt_local_x2apic *apic =
68 (struct acpi_madt_local_x2apic *)entry;
69
70 if (!(apic->lapic_flags & ACPI_MADT_ENABLED))
71 return 0;
72
73 if (device_declaration && (apic->uid == acpi_id)) {
74 *apic_id = apic->local_apic_id;
75 return 1;
76 }
77
78 return 0;
79 }
80
81 static int map_lsapic_id(struct acpi_subtable_header *entry,
82 int device_declaration, u32 acpi_id, int *apic_id)
83 {
84 struct acpi_madt_local_sapic *lsapic =
85 (struct acpi_madt_local_sapic *)entry;
86
87 if (!(lsapic->lapic_flags & ACPI_MADT_ENABLED))
88 return 0;
89
90 if (device_declaration) {
91 if ((entry->length < 16) || (lsapic->uid != acpi_id))
92 return 0;
93 } else if (lsapic->processor_id != acpi_id)
94 return 0;
95
96 *apic_id = (lsapic->id << 8) | lsapic->eid;
97 return 1;
98 }
99
100 static int map_madt_entry(int type, u32 acpi_id)
101 {
102 unsigned long madt_end, entry;
103 int apic_id = -1;
104
105 if (!madt)
106 return apic_id;
107
108 entry = (unsigned long)madt;
109 madt_end = entry + madt->header.length;
110
111 /* Parse all entries looking for a match. */
112
113 entry += sizeof(struct acpi_table_madt);
114 while (entry + sizeof(struct acpi_subtable_header) < madt_end) {
115 struct acpi_subtable_header *header =
116 (struct acpi_subtable_header *)entry;
117 if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
118 if (map_lapic_id(header, acpi_id, &apic_id))
119 break;
120 } else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) {
121 if (map_x2apic_id(header, type, acpi_id, &apic_id))
122 break;
123 } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
124 if (map_lsapic_id(header, type, acpi_id, &apic_id))
125 break;
126 }
127 entry += header->length;
128 }
129 return apic_id;
130 }
131
132 static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
133 {
134 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
135 union acpi_object *obj;
136 struct acpi_subtable_header *header;
137 int apic_id = -1;
138
139 if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
140 goto exit;
141
142 if (!buffer.length || !buffer.pointer)
143 goto exit;
144
145 obj = buffer.pointer;
146 if (obj->type != ACPI_TYPE_BUFFER ||
147 obj->buffer.length < sizeof(struct acpi_subtable_header)) {
148 goto exit;
149 }
150
151 header = (struct acpi_subtable_header *)obj->buffer.pointer;
152 if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
153 map_lapic_id(header, acpi_id, &apic_id);
154 } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
155 map_lsapic_id(header, type, acpi_id, &apic_id);
156 }
157
158 exit:
159 if (buffer.pointer)
160 kfree(buffer.pointer);
161 return apic_id;
162 }
163
164 int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
165 {
166 int i;
167 int apic_id = -1;
168
169 apic_id = map_mat_entry(handle, type, acpi_id);
170 if (apic_id == -1)
171 apic_id = map_madt_entry(type, acpi_id);
172 if (apic_id == -1)
173 return apic_id;
174
175 for_each_possible_cpu(i) {
176 if (cpu_physical_id(i) == apic_id)
177 return i;
178 }
179 return -1;
180 }
181 EXPORT_SYMBOL_GPL(acpi_get_cpuid);
182 #endif
183
184 static bool processor_physically_present(acpi_handle handle)
185 {
186 int cpuid, type;
187 u32 acpi_id;
188 acpi_status status;
189 acpi_object_type acpi_type;
190 unsigned long long tmp;
191 union acpi_object object = { 0 };
192 struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
193
194 status = acpi_get_type(handle, &acpi_type);
195 if (ACPI_FAILURE(status))
196 return false;
197
198 switch (acpi_type) {
199 case ACPI_TYPE_PROCESSOR:
200 status = acpi_evaluate_object(handle, NULL, NULL, &buffer);
201 if (ACPI_FAILURE(status))
202 return false;
203 acpi_id = object.processor.proc_id;
204 break;
205 case ACPI_TYPE_DEVICE:
206 status = acpi_evaluate_integer(handle, "_UID", NULL, &tmp);
207 if (ACPI_FAILURE(status))
208 return false;
209 acpi_id = tmp;
210 break;
211 default:
212 return false;
213 }
214
215 type = (acpi_type == ACPI_TYPE_DEVICE) ? 1 : 0;
216 cpuid = acpi_get_cpuid(handle, type, acpi_id);
217
218 if (cpuid == -1)
219 return false;
220
221 return true;
222 }
223
224 static void acpi_set_pdc_bits(u32 *buf)
225 {
226 buf[0] = ACPI_PDC_REVISION_ID;
227 buf[1] = 1;
228
229 /* Enable coordination with firmware's _TSD info */
230 buf[2] = ACPI_PDC_SMP_T_SWCOORD;
231
232 /* Twiddle arch-specific bits needed for _PDC */
233 arch_acpi_set_pdc_bits(buf);
234 }
235
236 static struct acpi_object_list *acpi_processor_alloc_pdc(void)
237 {
238 struct acpi_object_list *obj_list;
239 union acpi_object *obj;
240 u32 *buf;
241
242 /* allocate and initialize pdc. It will be used later. */
243 obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL);
244 if (!obj_list) {
245 printk(KERN_ERR "Memory allocation error\n");
246 return NULL;
247 }
248
249 obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL);
250 if (!obj) {
251 printk(KERN_ERR "Memory allocation error\n");
252 kfree(obj_list);
253 return NULL;
254 }
255
256 buf = kmalloc(12, GFP_KERNEL);
257 if (!buf) {
258 printk(KERN_ERR "Memory allocation error\n");
259 kfree(obj);
260 kfree(obj_list);
261 return NULL;
262 }
263
264 acpi_set_pdc_bits(buf);
265
266 obj->type = ACPI_TYPE_BUFFER;
267 obj->buffer.length = 12;
268 obj->buffer.pointer = (u8 *) buf;
269 obj_list->count = 1;
270 obj_list->pointer = obj;
271
272 return obj_list;
273 }
274
275 /*
276 * _PDC is required for a BIOS-OS handshake for most of the newer
277 * ACPI processor features.
278 */
279 static int
280 acpi_processor_eval_pdc(acpi_handle handle, struct acpi_object_list *pdc_in)
281 {
282 acpi_status status = AE_OK;
283
284 if (idle_nomwait) {
285 /*
286 * If mwait is disabled for CPU C-states, the C2C3_FFH access
287 * mode will be disabled in the parameter of _PDC object.
288 * Of course C1_FFH access mode will also be disabled.
289 */
290 union acpi_object *obj;
291 u32 *buffer = NULL;
292
293 obj = pdc_in->pointer;
294 buffer = (u32 *)(obj->buffer.pointer);
295 buffer[2] &= ~(ACPI_PDC_C_C2C3_FFH | ACPI_PDC_C_C1_FFH);
296
297 }
298 status = acpi_evaluate_object(handle, "_PDC", pdc_in, NULL);
299
300 if (ACPI_FAILURE(status))
301 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
302 "Could not evaluate _PDC, using legacy perf. control.\n"));
303
304 return status;
305 }
306
307 void acpi_processor_set_pdc(acpi_handle handle)
308 {
309 struct acpi_object_list *obj_list;
310
311 if (arch_has_acpi_pdc() == false)
312 return;
313
314 obj_list = acpi_processor_alloc_pdc();
315 if (!obj_list)
316 return;
317
318 acpi_processor_eval_pdc(handle, obj_list);
319
320 kfree(obj_list->pointer->buffer.pointer);
321 kfree(obj_list->pointer);
322 kfree(obj_list);
323 }
324 EXPORT_SYMBOL_GPL(acpi_processor_set_pdc);
325
326 static acpi_status
327 early_init_pdc(acpi_handle handle, u32 lvl, void *context, void **rv)
328 {
329 if (processor_physically_present(handle) == false)
330 return AE_OK;
331
332 acpi_processor_set_pdc(handle);
333 return AE_OK;
334 }
335
336 void __init acpi_early_processor_set_pdc(void)
337 {
338
339 #ifdef CONFIG_SMP
340 if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_MADT, 0,
341 (struct acpi_table_header **)&madt)))
342 madt = NULL;
343 #endif
344
345 /*
346 * Check whether the system is DMI table. If yes, OSPM
347 * should not use mwait for CPU-states.
348 */
349 dmi_check_system(processor_idle_dmi_table);
350
351 acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT,
352 ACPI_UINT32_MAX,
353 early_init_pdc, NULL, NULL, NULL);
354 }
This page took 0.038447 seconds and 4 git commands to generate.