Commit | Line | Data |
---|---|---|
59a56802 KRW |
1 | /* |
2 | * Copyright 2012 by Oracle Inc | |
3 | * Author: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | |
4 | * | |
5 | * This code borrows ideas from https://lkml.org/lkml/2011/11/30/249 | |
6 | * so many thanks go to Kevin Tian <kevin.tian@intel.com> | |
7 | * and Yu Ke <ke.yu@intel.com>. | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify it | |
10 | * under the terms and conditions of the GNU General Public License, | |
11 | * version 2, as published by the Free Software Foundation. | |
12 | * | |
13 | * This program is distributed in the hope it will be useful, but WITHOUT | |
14 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
16 | * more details. | |
17 | * | |
18 | */ | |
19 | ||
20 | #include <linux/cpumask.h> | |
21 | #include <linux/cpufreq.h> | |
22 | #include <linux/freezer.h> | |
23 | #include <linux/kernel.h> | |
24 | #include <linux/kthread.h> | |
25 | #include <linux/init.h> | |
26 | #include <linux/module.h> | |
27 | #include <linux/types.h> | |
28 | #include <acpi/acpi_bus.h> | |
29 | #include <acpi/acpi_drivers.h> | |
30 | #include <acpi/processor.h> | |
31 | ||
323f90a6 | 32 | #include <xen/xen.h> |
59a56802 KRW |
33 | #include <xen/interface/platform.h> |
34 | #include <asm/xen/hypercall.h> | |
35 | ||
36 | #define DRV_NAME "xen-acpi-processor: " | |
37 | ||
38 | static int no_hypercall; | |
39 | MODULE_PARM_DESC(off, "Inhibit the hypercall."); | |
40 | module_param_named(off, no_hypercall, int, 0400); | |
41 | ||
42 | /* | |
43 | * Note: Do not convert the acpi_id* below to cpumask_var_t or use cpumask_bit | |
44 | * - as those shrink to nr_cpu_bits (which is dependent on possible_cpu), which | |
45 | * can be less than what we want to put in. Instead use the 'nr_acpi_bits' | |
46 | * which is dynamically computed based on the MADT or x2APIC table. | |
47 | */ | |
48 | static unsigned int nr_acpi_bits; | |
49 | /* Mutex to protect the acpi_ids_done - for CPU hotplug use. */ | |
50 | static DEFINE_MUTEX(acpi_ids_mutex); | |
51 | /* Which ACPI ID we have processed from 'struct acpi_processor'. */ | |
52 | static unsigned long *acpi_ids_done; | |
53 | /* Which ACPI ID exist in the SSDT/DSDT processor definitions. */ | |
54 | static unsigned long __initdata *acpi_id_present; | |
55 | /* And if there is an _CST definition (or a PBLK) for the ACPI IDs */ | |
56 | static unsigned long __initdata *acpi_id_cst_present; | |
57 | ||
58 | static int push_cxx_to_hypervisor(struct acpi_processor *_pr) | |
59 | { | |
60 | struct xen_platform_op op = { | |
61 | .cmd = XENPF_set_processor_pminfo, | |
62 | .interface_version = XENPF_INTERFACE_VERSION, | |
63 | .u.set_pminfo.id = _pr->acpi_id, | |
64 | .u.set_pminfo.type = XEN_PM_CX, | |
65 | }; | |
66 | struct xen_processor_cx *dst_cx, *dst_cx_states = NULL; | |
67 | struct acpi_processor_cx *cx; | |
68 | unsigned int i, ok; | |
69 | int ret = 0; | |
70 | ||
71 | dst_cx_states = kcalloc(_pr->power.count, | |
72 | sizeof(struct xen_processor_cx), GFP_KERNEL); | |
73 | if (!dst_cx_states) | |
74 | return -ENOMEM; | |
75 | ||
76 | for (ok = 0, i = 1; i <= _pr->power.count; i++) { | |
77 | cx = &_pr->power.states[i]; | |
78 | if (!cx->valid) | |
79 | continue; | |
80 | ||
81 | dst_cx = &(dst_cx_states[ok++]); | |
82 | ||
83 | dst_cx->reg.space_id = ACPI_ADR_SPACE_SYSTEM_IO; | |
84 | if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) { | |
85 | dst_cx->reg.bit_width = 8; | |
86 | dst_cx->reg.bit_offset = 0; | |
87 | dst_cx->reg.access_size = 1; | |
88 | } else { | |
89 | dst_cx->reg.space_id = ACPI_ADR_SPACE_FIXED_HARDWARE; | |
90 | if (cx->entry_method == ACPI_CSTATE_FFH) { | |
91 | /* NATIVE_CSTATE_BEYOND_HALT */ | |
92 | dst_cx->reg.bit_offset = 2; | |
93 | dst_cx->reg.bit_width = 1; /* VENDOR_INTEL */ | |
94 | } | |
95 | dst_cx->reg.access_size = 0; | |
96 | } | |
97 | dst_cx->reg.address = cx->address; | |
98 | ||
99 | dst_cx->type = cx->type; | |
100 | dst_cx->latency = cx->latency; | |
101 | dst_cx->power = cx->power; | |
102 | ||
103 | dst_cx->dpcnt = 0; | |
104 | set_xen_guest_handle(dst_cx->dp, NULL); | |
105 | } | |
106 | if (!ok) { | |
107 | pr_debug(DRV_NAME "No _Cx for ACPI CPU %u\n", _pr->acpi_id); | |
108 | kfree(dst_cx_states); | |
109 | return -EINVAL; | |
110 | } | |
111 | op.u.set_pminfo.power.count = ok; | |
112 | op.u.set_pminfo.power.flags.bm_control = _pr->flags.bm_control; | |
113 | op.u.set_pminfo.power.flags.bm_check = _pr->flags.bm_check; | |
114 | op.u.set_pminfo.power.flags.has_cst = _pr->flags.has_cst; | |
115 | op.u.set_pminfo.power.flags.power_setup_done = | |
116 | _pr->flags.power_setup_done; | |
117 | ||
118 | set_xen_guest_handle(op.u.set_pminfo.power.states, dst_cx_states); | |
119 | ||
120 | if (!no_hypercall) | |
121 | ret = HYPERVISOR_dom0_op(&op); | |
122 | ||
123 | if (!ret) { | |
124 | pr_debug("ACPI CPU%u - C-states uploaded.\n", _pr->acpi_id); | |
125 | for (i = 1; i <= _pr->power.count; i++) { | |
126 | cx = &_pr->power.states[i]; | |
127 | if (!cx->valid) | |
128 | continue; | |
129 | pr_debug(" C%d: %s %d uS\n", | |
130 | cx->type, cx->desc, (u32)cx->latency); | |
131 | } | |
b930fe5e KRW |
132 | } else if (ret != -EINVAL) |
133 | /* EINVAL means the ACPI ID is incorrect - meaning the ACPI | |
134 | * table is referencing a non-existing CPU - which can happen | |
135 | * with broken ACPI tables. */ | |
59a56802 KRW |
136 | pr_err(DRV_NAME "(CX): Hypervisor error (%d) for ACPI CPU%u\n", |
137 | ret, _pr->acpi_id); | |
138 | ||
139 | kfree(dst_cx_states); | |
140 | ||
141 | return ret; | |
142 | } | |
143 | static struct xen_processor_px * | |
144 | xen_copy_pss_data(struct acpi_processor *_pr, | |
145 | struct xen_processor_performance *dst_perf) | |
146 | { | |
147 | struct xen_processor_px *dst_states = NULL; | |
148 | unsigned int i; | |
149 | ||
150 | BUILD_BUG_ON(sizeof(struct xen_processor_px) != | |
151 | sizeof(struct acpi_processor_px)); | |
152 | ||
153 | dst_states = kcalloc(_pr->performance->state_count, | |
154 | sizeof(struct xen_processor_px), GFP_KERNEL); | |
155 | if (!dst_states) | |
156 | return ERR_PTR(-ENOMEM); | |
157 | ||
158 | dst_perf->state_count = _pr->performance->state_count; | |
159 | for (i = 0; i < _pr->performance->state_count; i++) { | |
160 | /* Fortunatly for us, they are both the same size */ | |
161 | memcpy(&(dst_states[i]), &(_pr->performance->states[i]), | |
162 | sizeof(struct acpi_processor_px)); | |
163 | } | |
164 | return dst_states; | |
165 | } | |
166 | static int xen_copy_psd_data(struct acpi_processor *_pr, | |
167 | struct xen_processor_performance *dst) | |
168 | { | |
169 | struct acpi_psd_package *pdomain; | |
170 | ||
171 | BUILD_BUG_ON(sizeof(struct xen_psd_package) != | |
172 | sizeof(struct acpi_psd_package)); | |
173 | ||
174 | /* This information is enumerated only if acpi_processor_preregister_performance | |
175 | * has been called. | |
176 | */ | |
177 | dst->shared_type = _pr->performance->shared_type; | |
178 | ||
179 | pdomain = &(_pr->performance->domain_info); | |
180 | ||
181 | /* 'acpi_processor_preregister_performance' does not parse if the | |
182 | * num_processors <= 1, but Xen still requires it. Do it manually here. | |
183 | */ | |
184 | if (pdomain->num_processors <= 1) { | |
185 | if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL) | |
186 | dst->shared_type = CPUFREQ_SHARED_TYPE_ALL; | |
187 | else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL) | |
188 | dst->shared_type = CPUFREQ_SHARED_TYPE_HW; | |
189 | else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY) | |
190 | dst->shared_type = CPUFREQ_SHARED_TYPE_ANY; | |
191 | ||
192 | } | |
193 | memcpy(&(dst->domain_info), pdomain, sizeof(struct acpi_psd_package)); | |
194 | return 0; | |
195 | } | |
196 | static int xen_copy_pct_data(struct acpi_pct_register *pct, | |
197 | struct xen_pct_register *dst_pct) | |
198 | { | |
199 | /* It would be nice if you could just do 'memcpy(pct, dst_pct') but | |
200 | * sadly the Xen structure did not have the proper padding so the | |
201 | * descriptor field takes two (dst_pct) bytes instead of one (pct). | |
202 | */ | |
203 | dst_pct->descriptor = pct->descriptor; | |
204 | dst_pct->length = pct->length; | |
205 | dst_pct->space_id = pct->space_id; | |
206 | dst_pct->bit_width = pct->bit_width; | |
207 | dst_pct->bit_offset = pct->bit_offset; | |
208 | dst_pct->reserved = pct->reserved; | |
209 | dst_pct->address = pct->address; | |
210 | return 0; | |
211 | } | |
212 | static int push_pxx_to_hypervisor(struct acpi_processor *_pr) | |
213 | { | |
214 | int ret = 0; | |
215 | struct xen_platform_op op = { | |
216 | .cmd = XENPF_set_processor_pminfo, | |
217 | .interface_version = XENPF_INTERFACE_VERSION, | |
218 | .u.set_pminfo.id = _pr->acpi_id, | |
219 | .u.set_pminfo.type = XEN_PM_PX, | |
220 | }; | |
221 | struct xen_processor_performance *dst_perf; | |
222 | struct xen_processor_px *dst_states = NULL; | |
223 | ||
224 | dst_perf = &op.u.set_pminfo.perf; | |
225 | ||
226 | dst_perf->platform_limit = _pr->performance_platform_limit; | |
227 | dst_perf->flags |= XEN_PX_PPC; | |
228 | xen_copy_pct_data(&(_pr->performance->control_register), | |
229 | &dst_perf->control_register); | |
230 | xen_copy_pct_data(&(_pr->performance->status_register), | |
231 | &dst_perf->status_register); | |
232 | dst_perf->flags |= XEN_PX_PCT; | |
233 | dst_states = xen_copy_pss_data(_pr, dst_perf); | |
234 | if (!IS_ERR_OR_NULL(dst_states)) { | |
235 | set_xen_guest_handle(dst_perf->states, dst_states); | |
236 | dst_perf->flags |= XEN_PX_PSS; | |
237 | } | |
238 | if (!xen_copy_psd_data(_pr, dst_perf)) | |
239 | dst_perf->flags |= XEN_PX_PSD; | |
240 | ||
241 | if (dst_perf->flags != (XEN_PX_PSD | XEN_PX_PSS | XEN_PX_PCT | XEN_PX_PPC)) { | |
242 | pr_warn(DRV_NAME "ACPI CPU%u missing some P-state data (%x), skipping.\n", | |
243 | _pr->acpi_id, dst_perf->flags); | |
244 | ret = -ENODEV; | |
245 | goto err_free; | |
246 | } | |
247 | ||
248 | if (!no_hypercall) | |
249 | ret = HYPERVISOR_dom0_op(&op); | |
250 | ||
251 | if (!ret) { | |
252 | struct acpi_processor_performance *perf; | |
253 | unsigned int i; | |
254 | ||
255 | perf = _pr->performance; | |
256 | pr_debug("ACPI CPU%u - P-states uploaded.\n", _pr->acpi_id); | |
257 | for (i = 0; i < perf->state_count; i++) { | |
258 | pr_debug(" %cP%d: %d MHz, %d mW, %d uS\n", | |
259 | (i == perf->state ? '*' : ' '), i, | |
260 | (u32) perf->states[i].core_frequency, | |
261 | (u32) perf->states[i].power, | |
262 | (u32) perf->states[i].transition_latency); | |
263 | } | |
264 | } else if (ret != -EINVAL) | |
265 | /* EINVAL means the ACPI ID is incorrect - meaning the ACPI | |
266 | * table is referencing a non-existing CPU - which can happen | |
267 | * with broken ACPI tables. */ | |
268 | pr_warn(DRV_NAME "(_PXX): Hypervisor error (%d) for ACPI CPU%u\n", | |
269 | ret, _pr->acpi_id); | |
270 | err_free: | |
271 | if (!IS_ERR_OR_NULL(dst_states)) | |
272 | kfree(dst_states); | |
273 | ||
274 | return ret; | |
275 | } | |
276 | static int upload_pm_data(struct acpi_processor *_pr) | |
277 | { | |
278 | int err = 0; | |
279 | ||
280 | mutex_lock(&acpi_ids_mutex); | |
281 | if (__test_and_set_bit(_pr->acpi_id, acpi_ids_done)) { | |
282 | mutex_unlock(&acpi_ids_mutex); | |
283 | return -EBUSY; | |
284 | } | |
285 | if (_pr->flags.power) | |
286 | err = push_cxx_to_hypervisor(_pr); | |
287 | ||
288 | if (_pr->performance && _pr->performance->states) | |
289 | err |= push_pxx_to_hypervisor(_pr); | |
290 | ||
291 | mutex_unlock(&acpi_ids_mutex); | |
292 | return err; | |
293 | } | |
294 | static unsigned int __init get_max_acpi_id(void) | |
295 | { | |
296 | struct xenpf_pcpuinfo *info; | |
297 | struct xen_platform_op op = { | |
298 | .cmd = XENPF_get_cpuinfo, | |
299 | .interface_version = XENPF_INTERFACE_VERSION, | |
300 | }; | |
301 | int ret = 0; | |
302 | unsigned int i, last_cpu, max_acpi_id = 0; | |
303 | ||
304 | info = &op.u.pcpu_info; | |
305 | info->xen_cpuid = 0; | |
306 | ||
307 | ret = HYPERVISOR_dom0_op(&op); | |
308 | if (ret) | |
309 | return NR_CPUS; | |
310 | ||
311 | /* The max_present is the same irregardless of the xen_cpuid */ | |
312 | last_cpu = op.u.pcpu_info.max_present; | |
313 | for (i = 0; i <= last_cpu; i++) { | |
314 | info->xen_cpuid = i; | |
315 | ret = HYPERVISOR_dom0_op(&op); | |
316 | if (ret) | |
317 | continue; | |
318 | max_acpi_id = max(info->acpi_id, max_acpi_id); | |
319 | } | |
320 | max_acpi_id *= 2; /* Slack for CPU hotplug support. */ | |
321 | pr_debug(DRV_NAME "Max ACPI ID: %u\n", max_acpi_id); | |
322 | return max_acpi_id; | |
323 | } | |
324 | /* | |
325 | * The read_acpi_id and check_acpi_ids are there to support the Xen | |
326 | * oddity of virtual CPUs != physical CPUs in the initial domain. | |
327 | * The user can supply 'xen_max_vcpus=X' on the Xen hypervisor line | |
328 | * which will band the amount of CPUs the initial domain can see. | |
329 | * In general that is OK, except it plays havoc with any of the | |
330 | * for_each_[present|online]_cpu macros which are banded to the virtual | |
331 | * CPU amount. | |
332 | */ | |
333 | static acpi_status __init | |
334 | read_acpi_id(acpi_handle handle, u32 lvl, void *context, void **rv) | |
335 | { | |
336 | u32 acpi_id; | |
337 | acpi_status status; | |
338 | acpi_object_type acpi_type; | |
339 | unsigned long long tmp; | |
340 | union acpi_object object = { 0 }; | |
341 | struct acpi_buffer buffer = { sizeof(union acpi_object), &object }; | |
342 | acpi_io_address pblk = 0; | |
343 | ||
344 | status = acpi_get_type(handle, &acpi_type); | |
345 | if (ACPI_FAILURE(status)) | |
346 | return AE_OK; | |
347 | ||
348 | switch (acpi_type) { | |
349 | case ACPI_TYPE_PROCESSOR: | |
350 | status = acpi_evaluate_object(handle, NULL, NULL, &buffer); | |
351 | if (ACPI_FAILURE(status)) | |
352 | return AE_OK; | |
353 | acpi_id = object.processor.proc_id; | |
354 | pblk = object.processor.pblk_address; | |
355 | break; | |
356 | case ACPI_TYPE_DEVICE: | |
357 | status = acpi_evaluate_integer(handle, "_UID", NULL, &tmp); | |
358 | if (ACPI_FAILURE(status)) | |
359 | return AE_OK; | |
360 | acpi_id = tmp; | |
361 | break; | |
362 | default: | |
363 | return AE_OK; | |
364 | } | |
365 | /* There are more ACPI Processor objects than in x2APIC or MADT. | |
366 | * This can happen with incorrect ACPI SSDT declerations. */ | |
367 | if (acpi_id > nr_acpi_bits) { | |
368 | pr_debug(DRV_NAME "We only have %u, trying to set %u\n", | |
369 | nr_acpi_bits, acpi_id); | |
370 | return AE_OK; | |
371 | } | |
372 | /* OK, There is a ACPI Processor object */ | |
373 | __set_bit(acpi_id, acpi_id_present); | |
374 | ||
375 | pr_debug(DRV_NAME "ACPI CPU%u w/ PBLK:0x%lx\n", acpi_id, | |
376 | (unsigned long)pblk); | |
377 | ||
378 | status = acpi_evaluate_object(handle, "_CST", NULL, &buffer); | |
379 | if (ACPI_FAILURE(status)) { | |
380 | if (!pblk) | |
381 | return AE_OK; | |
382 | } | |
383 | /* .. and it has a C-state */ | |
384 | __set_bit(acpi_id, acpi_id_cst_present); | |
385 | ||
386 | return AE_OK; | |
387 | } | |
388 | static int __init check_acpi_ids(struct acpi_processor *pr_backup) | |
389 | { | |
390 | ||
391 | if (!pr_backup) | |
392 | return -ENODEV; | |
393 | ||
394 | /* All online CPUs have been processed at this stage. Now verify | |
395 | * whether in fact "online CPUs" == physical CPUs. | |
396 | */ | |
397 | acpi_id_present = kcalloc(BITS_TO_LONGS(nr_acpi_bits), sizeof(unsigned long), GFP_KERNEL); | |
398 | if (!acpi_id_present) | |
399 | return -ENOMEM; | |
400 | ||
401 | acpi_id_cst_present = kcalloc(BITS_TO_LONGS(nr_acpi_bits), sizeof(unsigned long), GFP_KERNEL); | |
402 | if (!acpi_id_cst_present) { | |
403 | kfree(acpi_id_present); | |
404 | return -ENOMEM; | |
405 | } | |
406 | ||
407 | acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, | |
408 | ACPI_UINT32_MAX, | |
409 | read_acpi_id, NULL, NULL, NULL); | |
410 | acpi_get_devices("ACPI0007", read_acpi_id, NULL, NULL); | |
411 | ||
412 | if (!bitmap_equal(acpi_id_present, acpi_ids_done, nr_acpi_bits)) { | |
413 | unsigned int i; | |
414 | for_each_set_bit(i, acpi_id_present, nr_acpi_bits) { | |
415 | pr_backup->acpi_id = i; | |
416 | /* Mask out C-states if there are no _CST or PBLK */ | |
417 | pr_backup->flags.power = test_bit(i, acpi_id_cst_present); | |
418 | (void)upload_pm_data(pr_backup); | |
419 | } | |
420 | } | |
421 | kfree(acpi_id_present); | |
422 | acpi_id_present = NULL; | |
423 | kfree(acpi_id_cst_present); | |
424 | acpi_id_cst_present = NULL; | |
425 | return 0; | |
426 | } | |
427 | static int __init check_prereq(void) | |
428 | { | |
429 | struct cpuinfo_x86 *c = &cpu_data(0); | |
430 | ||
431 | if (!xen_initial_domain()) | |
432 | return -ENODEV; | |
433 | ||
434 | if (!acpi_gbl_FADT.smi_command) | |
435 | return -ENODEV; | |
436 | ||
437 | if (c->x86_vendor == X86_VENDOR_INTEL) { | |
438 | if (!cpu_has(c, X86_FEATURE_EST)) | |
439 | return -ENODEV; | |
440 | ||
441 | return 0; | |
442 | } | |
443 | if (c->x86_vendor == X86_VENDOR_AMD) { | |
444 | /* Copied from powernow-k8.h, can't include ../cpufreq/powernow | |
445 | * as we get compile warnings for the static functions. | |
446 | */ | |
447 | #define CPUID_FREQ_VOLT_CAPABILITIES 0x80000007 | |
448 | #define USE_HW_PSTATE 0x00000080 | |
449 | u32 eax, ebx, ecx, edx; | |
450 | cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx); | |
451 | if ((edx & USE_HW_PSTATE) != USE_HW_PSTATE) | |
452 | return -ENODEV; | |
453 | return 0; | |
454 | } | |
455 | return -ENODEV; | |
456 | } | |
457 | /* acpi_perf_data is a pointer to percpu data. */ | |
458 | static struct acpi_processor_performance __percpu *acpi_perf_data; | |
459 | ||
460 | static void free_acpi_perf_data(void) | |
461 | { | |
462 | unsigned int i; | |
463 | ||
464 | /* Freeing a NULL pointer is OK, and alloc_percpu zeroes. */ | |
465 | for_each_possible_cpu(i) | |
466 | free_cpumask_var(per_cpu_ptr(acpi_perf_data, i) | |
467 | ->shared_cpu_map); | |
468 | free_percpu(acpi_perf_data); | |
469 | } | |
470 | ||
471 | static int __init xen_acpi_processor_init(void) | |
472 | { | |
473 | struct acpi_processor *pr_backup = NULL; | |
474 | unsigned int i; | |
475 | int rc = check_prereq(); | |
476 | ||
477 | if (rc) | |
478 | return rc; | |
479 | ||
480 | nr_acpi_bits = get_max_acpi_id() + 1; | |
481 | acpi_ids_done = kcalloc(BITS_TO_LONGS(nr_acpi_bits), sizeof(unsigned long), GFP_KERNEL); | |
482 | if (!acpi_ids_done) | |
483 | return -ENOMEM; | |
484 | ||
485 | acpi_perf_data = alloc_percpu(struct acpi_processor_performance); | |
486 | if (!acpi_perf_data) { | |
487 | pr_debug(DRV_NAME "Memory allocation error for acpi_perf_data.\n"); | |
488 | kfree(acpi_ids_done); | |
489 | return -ENOMEM; | |
490 | } | |
491 | for_each_possible_cpu(i) { | |
492 | if (!zalloc_cpumask_var_node( | |
493 | &per_cpu_ptr(acpi_perf_data, i)->shared_cpu_map, | |
494 | GFP_KERNEL, cpu_to_node(i))) { | |
495 | rc = -ENOMEM; | |
496 | goto err_out; | |
497 | } | |
498 | } | |
499 | ||
500 | /* Do initialization in ACPI core. It is OK to fail here. */ | |
501 | (void)acpi_processor_preregister_performance(acpi_perf_data); | |
502 | ||
503 | for_each_possible_cpu(i) { | |
504 | struct acpi_processor_performance *perf; | |
505 | ||
506 | perf = per_cpu_ptr(acpi_perf_data, i); | |
507 | rc = acpi_processor_register_performance(perf, i); | |
27257fc0 | 508 | if (rc) |
59a56802 KRW |
509 | goto err_out; |
510 | } | |
511 | rc = acpi_processor_notify_smm(THIS_MODULE); | |
27257fc0 | 512 | if (rc) |
59a56802 KRW |
513 | goto err_unregister; |
514 | ||
515 | for_each_possible_cpu(i) { | |
516 | struct acpi_processor *_pr; | |
517 | _pr = per_cpu(processors, i /* APIC ID */); | |
518 | if (!_pr) | |
519 | continue; | |
520 | ||
521 | if (!pr_backup) { | |
522 | pr_backup = kzalloc(sizeof(struct acpi_processor), GFP_KERNEL); | |
523 | memcpy(pr_backup, _pr, sizeof(struct acpi_processor)); | |
524 | } | |
525 | (void)upload_pm_data(_pr); | |
526 | } | |
527 | rc = check_acpi_ids(pr_backup); | |
528 | if (rc) | |
529 | goto err_unregister; | |
530 | ||
531 | kfree(pr_backup); | |
532 | ||
533 | return 0; | |
534 | err_unregister: | |
535 | for_each_possible_cpu(i) { | |
536 | struct acpi_processor_performance *perf; | |
537 | perf = per_cpu_ptr(acpi_perf_data, i); | |
538 | acpi_processor_unregister_performance(perf, i); | |
539 | } | |
540 | err_out: | |
541 | /* Freeing a NULL pointer is OK: alloc_percpu zeroes. */ | |
542 | free_acpi_perf_data(); | |
543 | kfree(acpi_ids_done); | |
544 | return rc; | |
545 | } | |
546 | static void __exit xen_acpi_processor_exit(void) | |
547 | { | |
548 | int i; | |
549 | ||
550 | kfree(acpi_ids_done); | |
551 | for_each_possible_cpu(i) { | |
552 | struct acpi_processor_performance *perf; | |
553 | perf = per_cpu_ptr(acpi_perf_data, i); | |
554 | acpi_processor_unregister_performance(perf, i); | |
555 | } | |
556 | free_acpi_perf_data(); | |
557 | } | |
558 | ||
559 | MODULE_AUTHOR("Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>"); | |
560 | MODULE_DESCRIPTION("Xen ACPI Processor P-states (and Cx) driver which uploads PM data to Xen hypervisor"); | |
561 | MODULE_LICENSE("GPL"); | |
562 | ||
563 | /* We want to be loaded before the CPU freq scaling drivers are loaded. | |
564 | * They are loaded in late_initcall. */ | |
565 | device_initcall(xen_acpi_processor_init); | |
566 | module_exit(xen_acpi_processor_exit); |