arm64: mm: ensure patched kernel text is fetched from PoU
[deliverable/linux.git] / arch / arm64 / kernel / psci.c
1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * Copyright (C) 2013 ARM Limited
12 *
13 * Author: Will Deacon <will.deacon@arm.com>
14 */
15
16 #define pr_fmt(fmt) "psci: " fmt
17
18 #include <linux/init.h>
19 #include <linux/of.h>
20 #include <linux/smp.h>
21 #include <linux/reboot.h>
22 #include <linux/pm.h>
23 #include <linux/delay.h>
24 #include <linux/slab.h>
25 #include <uapi/linux/psci.h>
26
27 #include <asm/compiler.h>
28 #include <asm/cputype.h>
29 #include <asm/cpu_ops.h>
30 #include <asm/errno.h>
31 #include <asm/psci.h>
32 #include <asm/smp_plat.h>
33 #include <asm/suspend.h>
34 #include <asm/system_misc.h>
35
36 #define PSCI_POWER_STATE_TYPE_STANDBY 0
37 #define PSCI_POWER_STATE_TYPE_POWER_DOWN 1
38
39 static bool psci_power_state_loses_context(u32 state)
40 {
41 return state & PSCI_0_2_POWER_STATE_TYPE_MASK;
42 }
43
44 static bool psci_power_state_is_valid(u32 state)
45 {
46 const u32 valid_mask = PSCI_0_2_POWER_STATE_ID_MASK |
47 PSCI_0_2_POWER_STATE_TYPE_MASK |
48 PSCI_0_2_POWER_STATE_AFFL_MASK;
49
50 return !(state & ~valid_mask);
51 }
52
53 /*
54 * The CPU any Trusted OS is resident on. The trusted OS may reject CPU_OFF
55 * calls to its resident CPU, so we must avoid issuing those. We never migrate
56 * a Trusted OS even if it claims to be capable of migration -- doing so will
57 * require cooperation with a Trusted OS driver.
58 */
59 static int resident_cpu = -1;
60
61 struct psci_operations {
62 int (*cpu_suspend)(u32 state, unsigned long entry_point);
63 int (*cpu_off)(u32 state);
64 int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
65 int (*migrate)(unsigned long cpuid);
66 int (*affinity_info)(unsigned long target_affinity,
67 unsigned long lowest_affinity_level);
68 int (*migrate_info_type)(void);
69 };
70
71 static struct psci_operations psci_ops;
72
73 typedef unsigned long (psci_fn)(unsigned long, unsigned long,
74 unsigned long, unsigned long);
75 asmlinkage psci_fn __invoke_psci_fn_hvc;
76 asmlinkage psci_fn __invoke_psci_fn_smc;
77 static psci_fn *invoke_psci_fn;
78
79 enum psci_function {
80 PSCI_FN_CPU_SUSPEND,
81 PSCI_FN_CPU_ON,
82 PSCI_FN_CPU_OFF,
83 PSCI_FN_MIGRATE,
84 PSCI_FN_MAX,
85 };
86
87 static DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state);
88
89 static u32 psci_function_id[PSCI_FN_MAX];
90
91 static int psci_to_linux_errno(int errno)
92 {
93 switch (errno) {
94 case PSCI_RET_SUCCESS:
95 return 0;
96 case PSCI_RET_NOT_SUPPORTED:
97 return -EOPNOTSUPP;
98 case PSCI_RET_INVALID_PARAMS:
99 return -EINVAL;
100 case PSCI_RET_DENIED:
101 return -EPERM;
102 };
103
104 return -EINVAL;
105 }
106
107 static u32 psci_get_version(void)
108 {
109 return invoke_psci_fn(PSCI_0_2_FN_PSCI_VERSION, 0, 0, 0);
110 }
111
112 static int psci_cpu_suspend(u32 state, unsigned long entry_point)
113 {
114 int err;
115 u32 fn;
116
117 fn = psci_function_id[PSCI_FN_CPU_SUSPEND];
118 err = invoke_psci_fn(fn, state, entry_point, 0);
119 return psci_to_linux_errno(err);
120 }
121
122 static int psci_cpu_off(u32 state)
123 {
124 int err;
125 u32 fn;
126
127 fn = psci_function_id[PSCI_FN_CPU_OFF];
128 err = invoke_psci_fn(fn, state, 0, 0);
129 return psci_to_linux_errno(err);
130 }
131
132 static int psci_cpu_on(unsigned long cpuid, unsigned long entry_point)
133 {
134 int err;
135 u32 fn;
136
137 fn = psci_function_id[PSCI_FN_CPU_ON];
138 err = invoke_psci_fn(fn, cpuid, entry_point, 0);
139 return psci_to_linux_errno(err);
140 }
141
142 static int psci_migrate(unsigned long cpuid)
143 {
144 int err;
145 u32 fn;
146
147 fn = psci_function_id[PSCI_FN_MIGRATE];
148 err = invoke_psci_fn(fn, cpuid, 0, 0);
149 return psci_to_linux_errno(err);
150 }
151
152 static int psci_affinity_info(unsigned long target_affinity,
153 unsigned long lowest_affinity_level)
154 {
155 return invoke_psci_fn(PSCI_0_2_FN64_AFFINITY_INFO, target_affinity,
156 lowest_affinity_level, 0);
157 }
158
159 static int psci_migrate_info_type(void)
160 {
161 return invoke_psci_fn(PSCI_0_2_FN_MIGRATE_INFO_TYPE, 0, 0, 0);
162 }
163
164 static unsigned long psci_migrate_info_up_cpu(void)
165 {
166 return invoke_psci_fn(PSCI_0_2_FN64_MIGRATE_INFO_UP_CPU, 0, 0, 0);
167 }
168
169 static int __maybe_unused cpu_psci_cpu_init_idle(unsigned int cpu)
170 {
171 int i, ret, count = 0;
172 u32 *psci_states;
173 struct device_node *state_node, *cpu_node;
174
175 cpu_node = of_get_cpu_node(cpu, NULL);
176 if (!cpu_node)
177 return -ENODEV;
178
179 /*
180 * If the PSCI cpu_suspend function hook has not been initialized
181 * idle states must not be enabled, so bail out
182 */
183 if (!psci_ops.cpu_suspend)
184 return -EOPNOTSUPP;
185
186 /* Count idle states */
187 while ((state_node = of_parse_phandle(cpu_node, "cpu-idle-states",
188 count))) {
189 count++;
190 of_node_put(state_node);
191 }
192
193 if (!count)
194 return -ENODEV;
195
196 psci_states = kcalloc(count, sizeof(*psci_states), GFP_KERNEL);
197 if (!psci_states)
198 return -ENOMEM;
199
200 for (i = 0; i < count; i++) {
201 u32 state;
202
203 state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i);
204
205 ret = of_property_read_u32(state_node,
206 "arm,psci-suspend-param",
207 &state);
208 if (ret) {
209 pr_warn(" * %s missing arm,psci-suspend-param property\n",
210 state_node->full_name);
211 of_node_put(state_node);
212 goto free_mem;
213 }
214
215 of_node_put(state_node);
216 pr_debug("psci-power-state %#x index %d\n", state, i);
217 if (!psci_power_state_is_valid(state)) {
218 pr_warn("Invalid PSCI power state %#x\n", state);
219 ret = -EINVAL;
220 goto free_mem;
221 }
222 psci_states[i] = state;
223 }
224 /* Idle states parsed correctly, initialize per-cpu pointer */
225 per_cpu(psci_power_state, cpu) = psci_states;
226 return 0;
227
228 free_mem:
229 kfree(psci_states);
230 return ret;
231 }
232
233 static int get_set_conduit_method(struct device_node *np)
234 {
235 const char *method;
236
237 pr_info("probing for conduit method from DT.\n");
238
239 if (of_property_read_string(np, "method", &method)) {
240 pr_warn("missing \"method\" property\n");
241 return -ENXIO;
242 }
243
244 if (!strcmp("hvc", method)) {
245 invoke_psci_fn = __invoke_psci_fn_hvc;
246 } else if (!strcmp("smc", method)) {
247 invoke_psci_fn = __invoke_psci_fn_smc;
248 } else {
249 pr_warn("invalid \"method\" property: %s\n", method);
250 return -EINVAL;
251 }
252 return 0;
253 }
254
255 static void psci_sys_reset(enum reboot_mode reboot_mode, const char *cmd)
256 {
257 invoke_psci_fn(PSCI_0_2_FN_SYSTEM_RESET, 0, 0, 0);
258 }
259
260 static void psci_sys_poweroff(void)
261 {
262 invoke_psci_fn(PSCI_0_2_FN_SYSTEM_OFF, 0, 0, 0);
263 }
264
265 /*
266 * Detect the presence of a resident Trusted OS which may cause CPU_OFF to
267 * return DENIED (which would be fatal).
268 */
269 static void __init psci_init_migrate(void)
270 {
271 unsigned long cpuid;
272 int type, cpu;
273
274 type = psci_ops.migrate_info_type();
275
276 if (type == PSCI_0_2_TOS_MP) {
277 pr_info("Trusted OS migration not required\n");
278 return;
279 }
280
281 if (type == PSCI_RET_NOT_SUPPORTED) {
282 pr_info("MIGRATE_INFO_TYPE not supported.\n");
283 return;
284 }
285
286 if (type != PSCI_0_2_TOS_UP_MIGRATE &&
287 type != PSCI_0_2_TOS_UP_NO_MIGRATE) {
288 pr_err("MIGRATE_INFO_TYPE returned unknown type (%d)\n", type);
289 return;
290 }
291
292 cpuid = psci_migrate_info_up_cpu();
293 if (cpuid & ~MPIDR_HWID_BITMASK) {
294 pr_warn("MIGRATE_INFO_UP_CPU reported invalid physical ID (0x%lx)\n",
295 cpuid);
296 return;
297 }
298
299 cpu = get_logical_index(cpuid);
300 resident_cpu = cpu >= 0 ? cpu : -1;
301
302 pr_info("Trusted OS resident on physical CPU 0x%lx\n", cpuid);
303 }
304
305 static void __init psci_0_2_set_functions(void)
306 {
307 pr_info("Using standard PSCI v0.2 function IDs\n");
308 psci_function_id[PSCI_FN_CPU_SUSPEND] = PSCI_0_2_FN64_CPU_SUSPEND;
309 psci_ops.cpu_suspend = psci_cpu_suspend;
310
311 psci_function_id[PSCI_FN_CPU_OFF] = PSCI_0_2_FN_CPU_OFF;
312 psci_ops.cpu_off = psci_cpu_off;
313
314 psci_function_id[PSCI_FN_CPU_ON] = PSCI_0_2_FN64_CPU_ON;
315 psci_ops.cpu_on = psci_cpu_on;
316
317 psci_function_id[PSCI_FN_MIGRATE] = PSCI_0_2_FN64_MIGRATE;
318 psci_ops.migrate = psci_migrate;
319
320 psci_ops.affinity_info = psci_affinity_info;
321
322 psci_ops.migrate_info_type = psci_migrate_info_type;
323
324 arm_pm_restart = psci_sys_reset;
325
326 pm_power_off = psci_sys_poweroff;
327 }
328
329 /*
330 * Probe function for PSCI firmware versions >= 0.2
331 */
332 static int __init psci_probe(void)
333 {
334 u32 ver = psci_get_version();
335
336 pr_info("PSCIv%d.%d detected in firmware.\n",
337 PSCI_VERSION_MAJOR(ver),
338 PSCI_VERSION_MINOR(ver));
339
340 if (PSCI_VERSION_MAJOR(ver) == 0 && PSCI_VERSION_MINOR(ver) < 2) {
341 pr_err("Conflicting PSCI version detected.\n");
342 return -EINVAL;
343 }
344
345 psci_0_2_set_functions();
346
347 psci_init_migrate();
348
349 return 0;
350 }
351
352 typedef int (*psci_initcall_t)(const struct device_node *);
353
354 /*
355 * PSCI init function for PSCI versions >=0.2
356 *
357 * Probe based on PSCI PSCI_VERSION function
358 */
359 static int __init psci_0_2_init(struct device_node *np)
360 {
361 int err;
362
363 err = get_set_conduit_method(np);
364
365 if (err)
366 goto out_put_node;
367 /*
368 * Starting with v0.2, the PSCI specification introduced a call
369 * (PSCI_VERSION) that allows probing the firmware version, so
370 * that PSCI function IDs and version specific initialization
371 * can be carried out according to the specific version reported
372 * by firmware
373 */
374 err = psci_probe();
375
376 out_put_node:
377 of_node_put(np);
378 return err;
379 }
380
381 /*
382 * PSCI < v0.2 get PSCI Function IDs via DT.
383 */
384 static int __init psci_0_1_init(struct device_node *np)
385 {
386 u32 id;
387 int err;
388
389 err = get_set_conduit_method(np);
390
391 if (err)
392 goto out_put_node;
393
394 pr_info("Using PSCI v0.1 Function IDs from DT\n");
395
396 if (!of_property_read_u32(np, "cpu_suspend", &id)) {
397 psci_function_id[PSCI_FN_CPU_SUSPEND] = id;
398 psci_ops.cpu_suspend = psci_cpu_suspend;
399 }
400
401 if (!of_property_read_u32(np, "cpu_off", &id)) {
402 psci_function_id[PSCI_FN_CPU_OFF] = id;
403 psci_ops.cpu_off = psci_cpu_off;
404 }
405
406 if (!of_property_read_u32(np, "cpu_on", &id)) {
407 psci_function_id[PSCI_FN_CPU_ON] = id;
408 psci_ops.cpu_on = psci_cpu_on;
409 }
410
411 if (!of_property_read_u32(np, "migrate", &id)) {
412 psci_function_id[PSCI_FN_MIGRATE] = id;
413 psci_ops.migrate = psci_migrate;
414 }
415
416 out_put_node:
417 of_node_put(np);
418 return err;
419 }
420
421 static const struct of_device_id psci_of_match[] __initconst = {
422 { .compatible = "arm,psci", .data = psci_0_1_init},
423 { .compatible = "arm,psci-0.2", .data = psci_0_2_init},
424 {},
425 };
426
427 int __init psci_dt_init(void)
428 {
429 struct device_node *np;
430 const struct of_device_id *matched_np;
431 psci_initcall_t init_fn;
432
433 np = of_find_matching_node_and_match(NULL, psci_of_match, &matched_np);
434
435 if (!np)
436 return -ENODEV;
437
438 init_fn = (psci_initcall_t)matched_np->data;
439 return init_fn(np);
440 }
441
442 #ifdef CONFIG_ACPI
443 /*
444 * We use PSCI 0.2+ when ACPI is deployed on ARM64 and it's
445 * explicitly clarified in SBBR
446 */
447 int __init psci_acpi_init(void)
448 {
449 if (!acpi_psci_present()) {
450 pr_info("is not implemented in ACPI.\n");
451 return -EOPNOTSUPP;
452 }
453
454 pr_info("probing for conduit method from ACPI.\n");
455
456 if (acpi_psci_use_hvc())
457 invoke_psci_fn = __invoke_psci_fn_hvc;
458 else
459 invoke_psci_fn = __invoke_psci_fn_smc;
460
461 return psci_probe();
462 }
463 #endif
464
465 static int __init cpu_psci_cpu_init(unsigned int cpu)
466 {
467 return 0;
468 }
469
470 static int __init cpu_psci_cpu_prepare(unsigned int cpu)
471 {
472 if (!psci_ops.cpu_on) {
473 pr_err("no cpu_on method, not booting CPU%d\n", cpu);
474 return -ENODEV;
475 }
476
477 return 0;
478 }
479
480 static int cpu_psci_cpu_boot(unsigned int cpu)
481 {
482 int err = psci_ops.cpu_on(cpu_logical_map(cpu), __pa(secondary_entry));
483 if (err)
484 pr_err("failed to boot CPU%d (%d)\n", cpu, err);
485
486 return err;
487 }
488
489 #ifdef CONFIG_HOTPLUG_CPU
490 static bool psci_tos_resident_on(int cpu)
491 {
492 return cpu == resident_cpu;
493 }
494
495 static int cpu_psci_cpu_disable(unsigned int cpu)
496 {
497 /* Fail early if we don't have CPU_OFF support */
498 if (!psci_ops.cpu_off)
499 return -EOPNOTSUPP;
500
501 /* Trusted OS will deny CPU_OFF */
502 if (psci_tos_resident_on(cpu))
503 return -EPERM;
504
505 return 0;
506 }
507
508 static void cpu_psci_cpu_die(unsigned int cpu)
509 {
510 int ret;
511 /*
512 * There are no known implementations of PSCI actually using the
513 * power state field, pass a sensible default for now.
514 */
515 u32 state = PSCI_POWER_STATE_TYPE_POWER_DOWN <<
516 PSCI_0_2_POWER_STATE_TYPE_SHIFT;
517
518 ret = psci_ops.cpu_off(state);
519
520 pr_crit("unable to power off CPU%u (%d)\n", cpu, ret);
521 }
522
523 static int cpu_psci_cpu_kill(unsigned int cpu)
524 {
525 int err, i;
526
527 if (!psci_ops.affinity_info)
528 return 0;
529 /*
530 * cpu_kill could race with cpu_die and we can
531 * potentially end up declaring this cpu undead
532 * while it is dying. So, try again a few times.
533 */
534
535 for (i = 0; i < 10; i++) {
536 err = psci_ops.affinity_info(cpu_logical_map(cpu), 0);
537 if (err == PSCI_0_2_AFFINITY_LEVEL_OFF) {
538 pr_info("CPU%d killed.\n", cpu);
539 return 0;
540 }
541
542 msleep(10);
543 pr_info("Retrying again to check for CPU kill\n");
544 }
545
546 pr_warn("CPU%d may not have shut down cleanly (AFFINITY_INFO reports %d)\n",
547 cpu, err);
548 return -ETIMEDOUT;
549 }
550 #endif
551
552 static int psci_suspend_finisher(unsigned long index)
553 {
554 u32 *state = __this_cpu_read(psci_power_state);
555
556 return psci_ops.cpu_suspend(state[index - 1],
557 virt_to_phys(cpu_resume));
558 }
559
560 static int __maybe_unused cpu_psci_cpu_suspend(unsigned long index)
561 {
562 int ret;
563 u32 *state = __this_cpu_read(psci_power_state);
564 /*
565 * idle state index 0 corresponds to wfi, should never be called
566 * from the cpu_suspend operations
567 */
568 if (WARN_ON_ONCE(!index))
569 return -EINVAL;
570
571 if (!psci_power_state_loses_context(state[index - 1]))
572 ret = psci_ops.cpu_suspend(state[index - 1], 0);
573 else
574 ret = cpu_suspend(index, psci_suspend_finisher);
575
576 return ret;
577 }
578
579 const struct cpu_operations cpu_psci_ops = {
580 .name = "psci",
581 #ifdef CONFIG_CPU_IDLE
582 .cpu_init_idle = cpu_psci_cpu_init_idle,
583 .cpu_suspend = cpu_psci_cpu_suspend,
584 #endif
585 .cpu_init = cpu_psci_cpu_init,
586 .cpu_prepare = cpu_psci_cpu_prepare,
587 .cpu_boot = cpu_psci_cpu_boot,
588 #ifdef CONFIG_HOTPLUG_CPU
589 .cpu_disable = cpu_psci_cpu_disable,
590 .cpu_die = cpu_psci_cpu_die,
591 .cpu_kill = cpu_psci_cpu_kill,
592 #endif
593 };
594
This page took 0.06654 seconds and 5 git commands to generate.