Commit | Line | Data |
---|---|---|
1c33be57 NP |
1 | /* |
2 | * arch/arm/common/bL_switcher.c -- big.LITTLE cluster switcher core driver | |
3 | * | |
4 | * Created by: Nicolas Pitre, March 2012 | |
5 | * Copyright: (C) 2012-2013 Linaro Limited | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | */ | |
11 | ||
12 | #include <linux/init.h> | |
13 | #include <linux/kernel.h> | |
14 | #include <linux/module.h> | |
15 | #include <linux/sched.h> | |
16 | #include <linux/interrupt.h> | |
17 | #include <linux/cpu_pm.h> | |
18 | #include <linux/workqueue.h> | |
19 | #include <linux/mm.h> | |
20 | #include <linux/string.h> | |
21 | #include <linux/irqchip/arm-gic.h> | |
22 | ||
23 | #include <asm/smp_plat.h> | |
24 | #include <asm/suspend.h> | |
25 | #include <asm/mcpm.h> | |
26 | #include <asm/bL_switcher.h> | |
27 | ||
28 | ||
29 | /* | |
30 | * Use our own MPIDR accessors as the generic ones in asm/cputype.h have | |
31 | * __attribute_const__ and we don't want the compiler to assume any | |
32 | * constness here as the value _does_ change along some code paths. | |
33 | */ | |
34 | ||
35 | static int read_mpidr(void) | |
36 | { | |
37 | unsigned int id; | |
38 | asm volatile ("mrc p15, 0, %0, c0, c0, 5" : "=r" (id)); | |
39 | return id & MPIDR_HWID_BITMASK; | |
40 | } | |
41 | ||
42 | /* | |
43 | * bL switcher core code. | |
44 | */ | |
45 | ||
46 | static void bL_do_switch(void *_unused) | |
47 | { | |
48 | unsigned mpidr, cpuid, clusterid, ob_cluster, ib_cluster; | |
49 | ||
50 | /* | |
51 | * We now have a piece of stack borrowed from the init task's. | |
52 | * Let's also switch to init_mm right away to match it. | |
53 | */ | |
54 | cpu_switch_mm(init_mm.pgd, &init_mm); | |
55 | ||
56 | pr_debug("%s\n", __func__); | |
57 | ||
58 | mpidr = read_mpidr(); | |
59 | cpuid = MPIDR_AFFINITY_LEVEL(mpidr, 0); | |
60 | clusterid = MPIDR_AFFINITY_LEVEL(mpidr, 1); | |
61 | ob_cluster = clusterid; | |
62 | ib_cluster = clusterid ^ 1; | |
63 | ||
64 | /* | |
65 | * Our state has been saved at this point. Let's release our | |
66 | * inbound CPU. | |
67 | */ | |
68 | mcpm_set_entry_vector(cpuid, ib_cluster, cpu_resume); | |
69 | sev(); | |
70 | ||
71 | /* | |
72 | * From this point, we must assume that our counterpart CPU might | |
73 | * have taken over in its parallel world already, as if execution | |
74 | * just returned from cpu_suspend(). It is therefore important to | |
75 | * be very careful not to make any change the other guy is not | |
76 | * expecting. This is why we need stack isolation. | |
77 | * | |
78 | * Fancy under cover tasks could be performed here. For now | |
79 | * we have none. | |
80 | */ | |
81 | ||
82 | /* Let's put ourself down. */ | |
83 | mcpm_cpu_power_down(); | |
84 | ||
85 | /* should never get here */ | |
86 | BUG(); | |
87 | } | |
88 | ||
89 | /* | |
90 | * Stack isolation. To ensure 'current' remains valid, we just borrow | |
91 | * a slice of the init/idle task which should be fairly lightly used. | |
92 | * The borrowed area starts just above the thread_info structure located | |
93 | * at the very bottom of the stack, aligned to a cache line. | |
94 | */ | |
95 | #define STACK_SIZE 256 | |
96 | extern void call_with_stack(void (*fn)(void *), void *arg, void *sp); | |
97 | static int bL_switchpoint(unsigned long _arg) | |
98 | { | |
99 | unsigned int mpidr = read_mpidr(); | |
100 | unsigned int cpuid = MPIDR_AFFINITY_LEVEL(mpidr, 0); | |
101 | unsigned int clusterid = MPIDR_AFFINITY_LEVEL(mpidr, 1); | |
102 | unsigned int cpu_index = cpuid + clusterid * MAX_CPUS_PER_CLUSTER; | |
103 | void *stack = &init_thread_info + 1; | |
104 | stack = PTR_ALIGN(stack, L1_CACHE_BYTES); | |
105 | stack += cpu_index * STACK_SIZE + STACK_SIZE; | |
106 | call_with_stack(bL_do_switch, (void *)_arg, stack); | |
107 | BUG(); | |
108 | } | |
109 | ||
110 | /* | |
111 | * Generic switcher interface | |
112 | */ | |
113 | ||
114 | /* | |
115 | * bL_switch_to - Switch to a specific cluster for the current CPU | |
116 | * @new_cluster_id: the ID of the cluster to switch to. | |
117 | * | |
118 | * This function must be called on the CPU to be switched. | |
119 | * Returns 0 on success, else a negative status code. | |
120 | */ | |
121 | static int bL_switch_to(unsigned int new_cluster_id) | |
122 | { | |
123 | unsigned int mpidr, cpuid, clusterid, ob_cluster, ib_cluster, this_cpu; | |
124 | int ret; | |
125 | ||
126 | mpidr = read_mpidr(); | |
127 | cpuid = MPIDR_AFFINITY_LEVEL(mpidr, 0); | |
128 | clusterid = MPIDR_AFFINITY_LEVEL(mpidr, 1); | |
129 | ob_cluster = clusterid; | |
130 | ib_cluster = clusterid ^ 1; | |
131 | ||
132 | if (new_cluster_id == clusterid) | |
133 | return 0; | |
134 | ||
135 | pr_debug("before switch: CPU %d in cluster %d\n", cpuid, clusterid); | |
136 | ||
137 | /* Close the gate for our entry vectors */ | |
138 | mcpm_set_entry_vector(cpuid, ob_cluster, NULL); | |
139 | mcpm_set_entry_vector(cpuid, ib_cluster, NULL); | |
140 | ||
141 | /* | |
142 | * Let's wake up the inbound CPU now in case it requires some delay | |
143 | * to come online, but leave it gated in our entry vector code. | |
144 | */ | |
145 | ret = mcpm_cpu_power_up(cpuid, ib_cluster); | |
146 | if (ret) { | |
147 | pr_err("%s: mcpm_cpu_power_up() returned %d\n", __func__, ret); | |
148 | return ret; | |
149 | } | |
150 | ||
151 | /* | |
152 | * From this point we are entering the switch critical zone | |
153 | * and can't take any interrupts anymore. | |
154 | */ | |
155 | local_irq_disable(); | |
156 | local_fiq_disable(); | |
157 | ||
158 | this_cpu = smp_processor_id(); | |
159 | ||
160 | /* redirect GIC's SGIs to our counterpart */ | |
161 | gic_migrate_target(cpuid + ib_cluster*4); | |
162 | ||
163 | /* | |
164 | * Raise a SGI on the inbound CPU to make sure it doesn't stall | |
165 | * in a possible WFI, such as in mcpm_power_down(). | |
166 | */ | |
167 | arch_send_wakeup_ipi_mask(cpumask_of(this_cpu)); | |
168 | ||
169 | ret = cpu_pm_enter(); | |
170 | ||
171 | /* we can not tolerate errors at this point */ | |
172 | if (ret) | |
173 | panic("%s: cpu_pm_enter() returned %d\n", __func__, ret); | |
174 | ||
175 | /* Flip the cluster in the CPU logical map for this CPU. */ | |
176 | cpu_logical_map(this_cpu) ^= (1 << 8); | |
177 | ||
178 | /* Let's do the actual CPU switch. */ | |
179 | ret = cpu_suspend(0, bL_switchpoint); | |
180 | if (ret > 0) | |
181 | panic("%s: cpu_suspend() returned %d\n", __func__, ret); | |
182 | ||
183 | /* We are executing on the inbound CPU at this point */ | |
184 | mpidr = read_mpidr(); | |
185 | cpuid = MPIDR_AFFINITY_LEVEL(mpidr, 0); | |
186 | clusterid = MPIDR_AFFINITY_LEVEL(mpidr, 1); | |
187 | pr_debug("after switch: CPU %d in cluster %d\n", cpuid, clusterid); | |
188 | BUG_ON(clusterid != ib_cluster); | |
189 | ||
190 | mcpm_cpu_powered_up(); | |
191 | ||
192 | ret = cpu_pm_exit(); | |
193 | ||
194 | local_fiq_enable(); | |
195 | local_irq_enable(); | |
196 | ||
197 | if (ret) | |
198 | pr_err("%s exiting with error %d\n", __func__, ret); | |
199 | return ret; | |
200 | } | |
201 | ||
202 | struct switch_args { | |
203 | unsigned int cluster; | |
204 | struct work_struct work; | |
205 | }; | |
206 | ||
207 | static void __bL_switch_to(struct work_struct *work) | |
208 | { | |
209 | struct switch_args *args = container_of(work, struct switch_args, work); | |
210 | bL_switch_to(args->cluster); | |
211 | } | |
212 | ||
213 | /* | |
214 | * bL_switch_request - Switch to a specific cluster for the given CPU | |
215 | * | |
216 | * @cpu: the CPU to switch | |
217 | * @new_cluster_id: the ID of the cluster to switch to. | |
218 | * | |
219 | * This function causes a cluster switch on the given CPU. If the given | |
220 | * CPU is the same as the calling CPU then the switch happens right away. | |
221 | * Otherwise the request is put on a work queue to be scheduled on the | |
222 | * remote CPU. | |
223 | */ | |
224 | void bL_switch_request(unsigned int cpu, unsigned int new_cluster_id) | |
225 | { | |
226 | unsigned int this_cpu = get_cpu(); | |
227 | struct switch_args args; | |
228 | ||
229 | if (cpu == this_cpu) { | |
230 | bL_switch_to(new_cluster_id); | |
231 | put_cpu(); | |
232 | return; | |
233 | } | |
234 | put_cpu(); | |
235 | ||
236 | args.cluster = new_cluster_id; | |
237 | INIT_WORK_ONSTACK(&args.work, __bL_switch_to); | |
238 | schedule_work_on(cpu, &args.work); | |
239 | flush_work(&args.work); | |
240 | } | |
241 | EXPORT_SYMBOL_GPL(bL_switch_request); |