Commit | Line | Data |
---|---|---|
fb4a9602 | 1 | #include <linux/percpu.h> |
95322526 LP |
2 | #include <linux/slab.h> |
3 | #include <asm/cacheflush.h> | |
4 | #include <asm/cpu_ops.h> | |
5 | #include <asm/debug-monitors.h> | |
6 | #include <asm/pgtable.h> | |
7 | #include <asm/memory.h> | |
8 | #include <asm/smp_plat.h> | |
9 | #include <asm/suspend.h> | |
10 | #include <asm/tlbflush.h> | |
11 | ||
714f5992 | 12 | extern int __cpu_suspend_enter(unsigned long arg, int (*fn)(unsigned long)); |
95322526 | 13 | /* |
714f5992 | 14 | * This is called by __cpu_suspend_enter() to save the state, and do whatever |
95322526 LP |
15 | * flushing is required to ensure that when the CPU goes to sleep we have |
16 | * the necessary data available when the caches are not searched. | |
17 | * | |
714f5992 LP |
18 | * ptr: CPU context virtual address |
19 | * save_ptr: address of the location where the context physical address | |
20 | * must be saved | |
95322526 | 21 | */ |
714f5992 LP |
22 | void notrace __cpu_suspend_save(struct cpu_suspend_ctx *ptr, |
23 | phys_addr_t *save_ptr) | |
95322526 | 24 | { |
95322526 LP |
25 | *save_ptr = virt_to_phys(ptr); |
26 | ||
27 | cpu_do_suspend(ptr); | |
28 | /* | |
29 | * Only flush the context that must be retrieved with the MMU | |
30 | * off. VA primitives ensure the flush is applied to all | |
31 | * cache levels so context is pushed to DRAM. | |
32 | */ | |
33 | __flush_dcache_area(ptr, sizeof(*ptr)); | |
34 | __flush_dcache_area(save_ptr, sizeof(*save_ptr)); | |
95322526 LP |
35 | } |
36 | ||
65c021bb LP |
37 | /* |
38 | * This hook is provided so that cpu_suspend code can restore HW | |
39 | * breakpoints as early as possible in the resume path, before reenabling | |
40 | * debug exceptions. Code cannot be run from a CPU PM notifier since by the | |
41 | * time the notifier runs debug exceptions might have been enabled already, | |
42 | * with HW breakpoints registers content still in an unknown state. | |
43 | */ | |
44 | void (*hw_breakpoint_restore)(void *); | |
45 | void __init cpu_suspend_set_dbg_restorer(void (*hw_bp_restore)(void *)) | |
46 | { | |
47 | /* Prevent multiple restore hook initializations */ | |
48 | if (WARN_ON(hw_breakpoint_restore)) | |
49 | return; | |
50 | hw_breakpoint_restore = hw_bp_restore; | |
51 | } | |
52 | ||
95322526 | 53 | /** |
714f5992 LP |
54 | * cpu_suspend() - function to enter a low-power state |
55 | * @arg: argument to pass to CPU suspend operations | |
95322526 | 56 | * |
714f5992 LP |
57 | * Return: 0 on success, -EOPNOTSUPP if CPU suspend hook not initialized, CPU |
58 | * operations back-end error code otherwise. | |
95322526 LP |
59 | */ |
60 | int cpu_suspend(unsigned long arg) | |
61 | { | |
714f5992 | 62 | int cpu = smp_processor_id(); |
95322526 LP |
63 | |
64 | /* | |
65 | * If cpu_ops have not been registered or suspend | |
66 | * has not been initialized, cpu_suspend call fails early. | |
67 | */ | |
68 | if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_suspend) | |
69 | return -EOPNOTSUPP; | |
714f5992 LP |
70 | return cpu_ops[cpu]->cpu_suspend(arg); |
71 | } | |
72 | ||
73 | /* | |
74 | * __cpu_suspend | |
75 | * | |
76 | * arg: argument to pass to the finisher function | |
77 | * fn: finisher function pointer | |
78 | * | |
79 | */ | |
80 | int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) | |
81 | { | |
82 | struct mm_struct *mm = current->active_mm; | |
83 | int ret; | |
84 | unsigned long flags; | |
95322526 LP |
85 | |
86 | /* | |
87 | * From this point debug exceptions are disabled to prevent | |
88 | * updates to mdscr register (saved and restored along with | |
89 | * general purpose registers) from kernel debuggers. | |
90 | */ | |
91 | local_dbg_save(flags); | |
92 | ||
93 | /* | |
94 | * mm context saved on the stack, it will be restored when | |
95 | * the cpu comes out of reset through the identity mapped | |
96 | * page tables, so that the thread address space is properly | |
97 | * set-up on function return. | |
98 | */ | |
714f5992 | 99 | ret = __cpu_suspend_enter(arg, fn); |
95322526 LP |
100 | if (ret == 0) { |
101 | cpu_switch_mm(mm->pgd, mm); | |
102 | flush_tlb_all(); | |
fb4a9602 LP |
103 | |
104 | /* | |
105 | * Restore per-cpu offset before any kernel | |
106 | * subsystem relying on it has a chance to run. | |
107 | */ | |
714f5992 | 108 | set_my_cpu_offset(per_cpu_offset(smp_processor_id())); |
fb4a9602 | 109 | |
65c021bb LP |
110 | /* |
111 | * Restore HW breakpoint registers to sane values | |
112 | * before debug exceptions are possibly reenabled | |
113 | * through local_dbg_restore. | |
114 | */ | |
115 | if (hw_breakpoint_restore) | |
116 | hw_breakpoint_restore(NULL); | |
95322526 LP |
117 | } |
118 | ||
119 | /* | |
120 | * Restore pstate flags. OS lock and mdscr have been already | |
121 | * restored, so from this point onwards, debugging is fully | |
122 | * renabled if it was enabled when core started shutdown. | |
123 | */ | |
124 | local_dbg_restore(flags); | |
125 | ||
126 | return ret; | |
127 | } | |
128 | ||
129 | extern struct sleep_save_sp sleep_save_sp; | |
130 | extern phys_addr_t sleep_idmap_phys; | |
131 | ||
18ab7db6 | 132 | static int __init cpu_suspend_init(void) |
95322526 LP |
133 | { |
134 | void *ctx_ptr; | |
135 | ||
136 | /* ctx_ptr is an array of physical addresses */ | |
137 | ctx_ptr = kcalloc(mpidr_hash_size(), sizeof(phys_addr_t), GFP_KERNEL); | |
138 | ||
139 | if (WARN_ON(!ctx_ptr)) | |
140 | return -ENOMEM; | |
141 | ||
142 | sleep_save_sp.save_ptr_stash = ctx_ptr; | |
143 | sleep_save_sp.save_ptr_stash_phys = virt_to_phys(ctx_ptr); | |
144 | sleep_idmap_phys = virt_to_phys(idmap_pg_dir); | |
145 | __flush_dcache_area(&sleep_save_sp, sizeof(struct sleep_save_sp)); | |
146 | __flush_dcache_area(&sleep_idmap_phys, sizeof(sleep_idmap_phys)); | |
147 | ||
148 | return 0; | |
149 | } | |
150 | early_initcall(cpu_suspend_init); |