Commit | Line | Data |
---|---|---|
6b3087c6 | 1 | /* |
96f1050d | 2 | * IPI management based on arch/arm/kernel/smp.c (Copyright 2002 ARM Limited) |
6b3087c6 | 3 | * |
96f1050d RG |
4 | * Copyright 2007-2009 Analog Devices Inc. |
5 | * Philippe Gerum <rpm@xenomai.org> | |
6b3087c6 | 6 | * |
96f1050d | 7 | * Licensed under the GPL-2. |
6b3087c6 GY |
8 | */ |
9 | ||
10 | #include <linux/module.h> | |
11 | #include <linux/delay.h> | |
12 | #include <linux/init.h> | |
13 | #include <linux/spinlock.h> | |
14 | #include <linux/sched.h> | |
15 | #include <linux/interrupt.h> | |
16 | #include <linux/cache.h> | |
17 | #include <linux/profile.h> | |
18 | #include <linux/errno.h> | |
19 | #include <linux/mm.h> | |
20 | #include <linux/cpu.h> | |
21 | #include <linux/smp.h> | |
22 | #include <linux/seq_file.h> | |
23 | #include <linux/irq.h> | |
24 | #include <asm/atomic.h> | |
25 | #include <asm/cacheflush.h> | |
26 | #include <asm/mmu_context.h> | |
27 | #include <asm/pgtable.h> | |
28 | #include <asm/pgalloc.h> | |
29 | #include <asm/processor.h> | |
30 | #include <asm/ptrace.h> | |
31 | #include <asm/cpu.h> | |
1fa9be72 | 32 | #include <asm/time.h> |
6b3087c6 GY |
33 | #include <linux/err.h> |
34 | ||
555487bb GY |
35 | /* |
36 | * Anomaly notes: | |
37 | * 05000120 - we always define corelock as 32-bit integer in L2 | |
38 | */ | |
6b3087c6 GY |
39 | struct corelock_slot corelock __attribute__ ((__section__(".l2.bss"))); |
40 | ||
41 | void __cpuinitdata *init_retx_coreb, *init_saved_retx_coreb, | |
42 | *init_saved_seqstat_coreb, *init_saved_icplb_fault_addr_coreb, | |
43 | *init_saved_dcplb_fault_addr_coreb; | |
44 | ||
45 | cpumask_t cpu_possible_map; | |
46 | EXPORT_SYMBOL(cpu_possible_map); | |
47 | ||
48 | cpumask_t cpu_online_map; | |
49 | EXPORT_SYMBOL(cpu_online_map); | |
50 | ||
51 | #define BFIN_IPI_RESCHEDULE 0 | |
52 | #define BFIN_IPI_CALL_FUNC 1 | |
53 | #define BFIN_IPI_CPU_STOP 2 | |
54 | ||
55 | struct blackfin_flush_data { | |
56 | unsigned long start; | |
57 | unsigned long end; | |
58 | }; | |
59 | ||
60 | void *secondary_stack; | |
61 | ||
62 | ||
63 | struct smp_call_struct { | |
64 | void (*func)(void *info); | |
65 | void *info; | |
66 | int wait; | |
67 | cpumask_t pending; | |
68 | cpumask_t waitmask; | |
69 | }; | |
70 | ||
71 | static struct blackfin_flush_data smp_flush_data; | |
72 | ||
73 | static DEFINE_SPINLOCK(stop_lock); | |
74 | ||
75 | struct ipi_message { | |
76 | struct list_head list; | |
77 | unsigned long type; | |
78 | struct smp_call_struct call_struct; | |
79 | }; | |
80 | ||
81 | struct ipi_message_queue { | |
82 | struct list_head head; | |
83 | spinlock_t lock; | |
84 | unsigned long count; | |
85 | }; | |
86 | ||
87 | static DEFINE_PER_CPU(struct ipi_message_queue, ipi_msg_queue); | |
88 | ||
89 | static void ipi_cpu_stop(unsigned int cpu) | |
90 | { | |
91 | spin_lock(&stop_lock); | |
92 | printk(KERN_CRIT "CPU%u: stopping\n", cpu); | |
93 | dump_stack(); | |
94 | spin_unlock(&stop_lock); | |
95 | ||
96 | cpu_clear(cpu, cpu_online_map); | |
97 | ||
98 | local_irq_disable(); | |
99 | ||
100 | while (1) | |
101 | SSYNC(); | |
102 | } | |
103 | ||
104 | static void ipi_flush_icache(void *info) | |
105 | { | |
106 | struct blackfin_flush_data *fdata = info; | |
107 | ||
108 | /* Invalidate the memory holding the bounds of the flushed region. */ | |
109 | blackfin_dcache_invalidate_range((unsigned long)fdata, | |
110 | (unsigned long)fdata + sizeof(*fdata)); | |
111 | ||
112 | blackfin_icache_flush_range(fdata->start, fdata->end); | |
113 | } | |
114 | ||
115 | static void ipi_call_function(unsigned int cpu, struct ipi_message *msg) | |
116 | { | |
117 | int wait; | |
118 | void (*func)(void *info); | |
119 | void *info; | |
120 | func = msg->call_struct.func; | |
121 | info = msg->call_struct.info; | |
122 | wait = msg->call_struct.wait; | |
123 | cpu_clear(cpu, msg->call_struct.pending); | |
124 | func(info); | |
c9784ebb YL |
125 | if (wait) { |
126 | #ifdef __ARCH_SYNC_CORE_DCACHE | |
127 | /* | |
128 | * 'wait' usually means synchronization between CPUs. | |
129 | * Invalidate D cache in case shared data was changed | |
130 | * by func() to ensure cache coherence. | |
131 | */ | |
132 | resync_core_dcache(); | |
133 | #endif | |
6b3087c6 | 134 | cpu_clear(cpu, msg->call_struct.waitmask); |
c9784ebb | 135 | } else |
6b3087c6 GY |
136 | kfree(msg); |
137 | } | |
138 | ||
139 | static irqreturn_t ipi_handler(int irq, void *dev_instance) | |
140 | { | |
86f2008b | 141 | struct ipi_message *msg; |
6b3087c6 GY |
142 | struct ipi_message_queue *msg_queue; |
143 | unsigned int cpu = smp_processor_id(); | |
144 | ||
145 | platform_clear_ipi(cpu); | |
146 | ||
147 | msg_queue = &__get_cpu_var(ipi_msg_queue); | |
148 | msg_queue->count++; | |
149 | ||
150 | spin_lock(&msg_queue->lock); | |
86f2008b SZ |
151 | while (!list_empty(&msg_queue->head)) { |
152 | msg = list_entry(msg_queue->head.next, typeof(*msg), list); | |
6b3087c6 GY |
153 | list_del(&msg->list); |
154 | switch (msg->type) { | |
155 | case BFIN_IPI_RESCHEDULE: | |
156 | /* That's the easiest one; leave it to | |
157 | * return_from_int. */ | |
158 | kfree(msg); | |
159 | break; | |
160 | case BFIN_IPI_CALL_FUNC: | |
0bf3d933 | 161 | spin_unlock(&msg_queue->lock); |
6b3087c6 | 162 | ipi_call_function(cpu, msg); |
0bf3d933 | 163 | spin_lock(&msg_queue->lock); |
6b3087c6 GY |
164 | break; |
165 | case BFIN_IPI_CPU_STOP: | |
0bf3d933 | 166 | spin_unlock(&msg_queue->lock); |
6b3087c6 | 167 | ipi_cpu_stop(cpu); |
0bf3d933 | 168 | spin_lock(&msg_queue->lock); |
6b3087c6 GY |
169 | kfree(msg); |
170 | break; | |
171 | default: | |
172 | printk(KERN_CRIT "CPU%u: Unknown IPI message \ | |
173 | 0x%lx\n", cpu, msg->type); | |
174 | kfree(msg); | |
175 | break; | |
176 | } | |
177 | } | |
178 | spin_unlock(&msg_queue->lock); | |
179 | return IRQ_HANDLED; | |
180 | } | |
181 | ||
182 | static void ipi_queue_init(void) | |
183 | { | |
184 | unsigned int cpu; | |
185 | struct ipi_message_queue *msg_queue; | |
186 | for_each_possible_cpu(cpu) { | |
187 | msg_queue = &per_cpu(ipi_msg_queue, cpu); | |
188 | INIT_LIST_HEAD(&msg_queue->head); | |
189 | spin_lock_init(&msg_queue->lock); | |
190 | msg_queue->count = 0; | |
191 | } | |
192 | } | |
193 | ||
194 | int smp_call_function(void (*func)(void *info), void *info, int wait) | |
195 | { | |
196 | unsigned int cpu; | |
197 | cpumask_t callmap; | |
198 | unsigned long flags; | |
199 | struct ipi_message_queue *msg_queue; | |
200 | struct ipi_message *msg; | |
201 | ||
202 | callmap = cpu_online_map; | |
203 | cpu_clear(smp_processor_id(), callmap); | |
204 | if (cpus_empty(callmap)) | |
205 | return 0; | |
206 | ||
207 | msg = kmalloc(sizeof(*msg), GFP_ATOMIC); | |
994e9a2e JL |
208 | if (!msg) |
209 | return -ENOMEM; | |
6b3087c6 GY |
210 | INIT_LIST_HEAD(&msg->list); |
211 | msg->call_struct.func = func; | |
212 | msg->call_struct.info = info; | |
213 | msg->call_struct.wait = wait; | |
214 | msg->call_struct.pending = callmap; | |
215 | msg->call_struct.waitmask = callmap; | |
216 | msg->type = BFIN_IPI_CALL_FUNC; | |
217 | ||
218 | for_each_cpu_mask(cpu, callmap) { | |
219 | msg_queue = &per_cpu(ipi_msg_queue, cpu); | |
220 | spin_lock_irqsave(&msg_queue->lock, flags); | |
86f2008b | 221 | list_add_tail(&msg->list, &msg_queue->head); |
6b3087c6 GY |
222 | spin_unlock_irqrestore(&msg_queue->lock, flags); |
223 | platform_send_ipi_cpu(cpu); | |
224 | } | |
225 | if (wait) { | |
226 | while (!cpus_empty(msg->call_struct.waitmask)) | |
227 | blackfin_dcache_invalidate_range( | |
228 | (unsigned long)(&msg->call_struct.waitmask), | |
229 | (unsigned long)(&msg->call_struct.waitmask)); | |
c9784ebb YL |
230 | #ifdef __ARCH_SYNC_CORE_DCACHE |
231 | /* | |
232 | * Invalidate D cache in case shared data was changed by | |
233 | * other processors to ensure cache coherence. | |
234 | */ | |
235 | resync_core_dcache(); | |
236 | #endif | |
6b3087c6 GY |
237 | kfree(msg); |
238 | } | |
239 | return 0; | |
240 | } | |
241 | EXPORT_SYMBOL_GPL(smp_call_function); | |
242 | ||
243 | int smp_call_function_single(int cpuid, void (*func) (void *info), void *info, | |
244 | int wait) | |
245 | { | |
246 | unsigned int cpu = cpuid; | |
247 | cpumask_t callmap; | |
248 | unsigned long flags; | |
249 | struct ipi_message_queue *msg_queue; | |
250 | struct ipi_message *msg; | |
251 | ||
252 | if (cpu_is_offline(cpu)) | |
253 | return 0; | |
254 | cpus_clear(callmap); | |
255 | cpu_set(cpu, callmap); | |
256 | ||
257 | msg = kmalloc(sizeof(*msg), GFP_ATOMIC); | |
994e9a2e JL |
258 | if (!msg) |
259 | return -ENOMEM; | |
6b3087c6 GY |
260 | INIT_LIST_HEAD(&msg->list); |
261 | msg->call_struct.func = func; | |
262 | msg->call_struct.info = info; | |
263 | msg->call_struct.wait = wait; | |
264 | msg->call_struct.pending = callmap; | |
265 | msg->call_struct.waitmask = callmap; | |
266 | msg->type = BFIN_IPI_CALL_FUNC; | |
267 | ||
268 | msg_queue = &per_cpu(ipi_msg_queue, cpu); | |
269 | spin_lock_irqsave(&msg_queue->lock, flags); | |
86f2008b | 270 | list_add_tail(&msg->list, &msg_queue->head); |
6b3087c6 GY |
271 | spin_unlock_irqrestore(&msg_queue->lock, flags); |
272 | platform_send_ipi_cpu(cpu); | |
273 | ||
274 | if (wait) { | |
275 | while (!cpus_empty(msg->call_struct.waitmask)) | |
276 | blackfin_dcache_invalidate_range( | |
277 | (unsigned long)(&msg->call_struct.waitmask), | |
278 | (unsigned long)(&msg->call_struct.waitmask)); | |
c9784ebb YL |
279 | #ifdef __ARCH_SYNC_CORE_DCACHE |
280 | /* | |
281 | * Invalidate D cache in case shared data was changed by | |
282 | * other processors to ensure cache coherence. | |
283 | */ | |
284 | resync_core_dcache(); | |
285 | #endif | |
6b3087c6 GY |
286 | kfree(msg); |
287 | } | |
288 | return 0; | |
289 | } | |
290 | EXPORT_SYMBOL_GPL(smp_call_function_single); | |
291 | ||
292 | void smp_send_reschedule(int cpu) | |
293 | { | |
294 | unsigned long flags; | |
295 | struct ipi_message_queue *msg_queue; | |
296 | struct ipi_message *msg; | |
297 | ||
298 | if (cpu_is_offline(cpu)) | |
299 | return; | |
300 | ||
05bad36c | 301 | msg = kzalloc(sizeof(*msg), GFP_ATOMIC); |
994e9a2e JL |
302 | if (!msg) |
303 | return; | |
6b3087c6 GY |
304 | INIT_LIST_HEAD(&msg->list); |
305 | msg->type = BFIN_IPI_RESCHEDULE; | |
306 | ||
307 | msg_queue = &per_cpu(ipi_msg_queue, cpu); | |
308 | spin_lock_irqsave(&msg_queue->lock, flags); | |
86f2008b | 309 | list_add_tail(&msg->list, &msg_queue->head); |
6b3087c6 GY |
310 | spin_unlock_irqrestore(&msg_queue->lock, flags); |
311 | platform_send_ipi_cpu(cpu); | |
312 | ||
313 | return; | |
314 | } | |
315 | ||
316 | void smp_send_stop(void) | |
317 | { | |
318 | unsigned int cpu; | |
319 | cpumask_t callmap; | |
320 | unsigned long flags; | |
321 | struct ipi_message_queue *msg_queue; | |
322 | struct ipi_message *msg; | |
323 | ||
324 | callmap = cpu_online_map; | |
325 | cpu_clear(smp_processor_id(), callmap); | |
326 | if (cpus_empty(callmap)) | |
327 | return; | |
328 | ||
05bad36c | 329 | msg = kzalloc(sizeof(*msg), GFP_ATOMIC); |
994e9a2e JL |
330 | if (!msg) |
331 | return; | |
6b3087c6 GY |
332 | INIT_LIST_HEAD(&msg->list); |
333 | msg->type = BFIN_IPI_CPU_STOP; | |
334 | ||
335 | for_each_cpu_mask(cpu, callmap) { | |
336 | msg_queue = &per_cpu(ipi_msg_queue, cpu); | |
337 | spin_lock_irqsave(&msg_queue->lock, flags); | |
86f2008b | 338 | list_add_tail(&msg->list, &msg_queue->head); |
6b3087c6 GY |
339 | spin_unlock_irqrestore(&msg_queue->lock, flags); |
340 | platform_send_ipi_cpu(cpu); | |
341 | } | |
342 | return; | |
343 | } | |
344 | ||
345 | int __cpuinit __cpu_up(unsigned int cpu) | |
346 | { | |
347 | struct task_struct *idle; | |
348 | int ret; | |
349 | ||
350 | idle = fork_idle(cpu); | |
351 | if (IS_ERR(idle)) { | |
352 | printk(KERN_ERR "CPU%u: fork() failed\n", cpu); | |
353 | return PTR_ERR(idle); | |
354 | } | |
355 | ||
356 | secondary_stack = task_stack_page(idle) + THREAD_SIZE; | |
357 | smp_wmb(); | |
358 | ||
359 | ret = platform_boot_secondary(cpu, idle); | |
360 | ||
6b3087c6 GY |
361 | secondary_stack = NULL; |
362 | ||
363 | return ret; | |
364 | } | |
365 | ||
366 | static void __cpuinit setup_secondary(unsigned int cpu) | |
367 | { | |
6b3087c6 GY |
368 | unsigned long ilat; |
369 | ||
370 | bfin_write_IMASK(0); | |
371 | CSYNC(); | |
372 | ilat = bfin_read_ILAT(); | |
373 | CSYNC(); | |
374 | bfin_write_ILAT(ilat); | |
375 | CSYNC(); | |
376 | ||
6b3087c6 GY |
377 | /* Enable interrupt levels IVG7-15. IARs have been already |
378 | * programmed by the boot CPU. */ | |
40059784 | 379 | bfin_irq_flags |= IMASK_IVG15 | |
6b3087c6 GY |
380 | IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 | |
381 | IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW; | |
6b3087c6 GY |
382 | } |
383 | ||
384 | void __cpuinit secondary_start_kernel(void) | |
385 | { | |
386 | unsigned int cpu = smp_processor_id(); | |
387 | struct mm_struct *mm = &init_mm; | |
388 | ||
389 | if (_bfin_swrst & SWRST_DBL_FAULT_B) { | |
390 | printk(KERN_EMERG "CoreB Recovering from DOUBLE FAULT event\n"); | |
391 | #ifdef CONFIG_DEBUG_DOUBLEFAULT | |
392 | printk(KERN_EMERG " While handling exception (EXCAUSE = 0x%x) at %pF\n", | |
393 | (int)init_saved_seqstat_coreb & SEQSTAT_EXCAUSE, init_saved_retx_coreb); | |
394 | printk(KERN_NOTICE " DCPLB_FAULT_ADDR: %pF\n", init_saved_dcplb_fault_addr_coreb); | |
395 | printk(KERN_NOTICE " ICPLB_FAULT_ADDR: %pF\n", init_saved_icplb_fault_addr_coreb); | |
396 | #endif | |
397 | printk(KERN_NOTICE " The instruction at %pF caused a double exception\n", | |
398 | init_retx_coreb); | |
399 | } | |
400 | ||
401 | /* | |
402 | * We want the D-cache to be enabled early, in case the atomic | |
403 | * support code emulates cache coherence (see | |
404 | * __ARCH_SYNC_CORE_DCACHE). | |
405 | */ | |
406 | init_exception_vectors(); | |
407 | ||
408 | bfin_setup_caches(cpu); | |
409 | ||
410 | local_irq_disable(); | |
411 | ||
412 | /* Attach the new idle task to the global mm. */ | |
413 | atomic_inc(&mm->mm_users); | |
414 | atomic_inc(&mm->mm_count); | |
415 | current->active_mm = mm; | |
416 | BUG_ON(current->mm); /* Can't be, but better be safe than sorry. */ | |
417 | ||
418 | preempt_disable(); | |
419 | ||
420 | setup_secondary(cpu); | |
421 | ||
578d36f5 YL |
422 | platform_secondary_init(cpu); |
423 | ||
0d152c27 YL |
424 | /* setup local core timer */ |
425 | bfin_local_timer_setup(); | |
426 | ||
6b3087c6 GY |
427 | local_irq_enable(); |
428 | ||
578d36f5 YL |
429 | /* |
430 | * Calibrate loops per jiffy value. | |
431 | * IRQs need to be enabled here - D-cache can be invalidated | |
432 | * in timer irq handler, so core B can read correct jiffies. | |
433 | */ | |
434 | calibrate_delay(); | |
6b3087c6 GY |
435 | |
436 | cpu_idle(); | |
437 | } | |
438 | ||
439 | void __init smp_prepare_boot_cpu(void) | |
440 | { | |
441 | } | |
442 | ||
443 | void __init smp_prepare_cpus(unsigned int max_cpus) | |
444 | { | |
445 | platform_prepare_cpus(max_cpus); | |
446 | ipi_queue_init(); | |
447 | platform_request_ipi(&ipi_handler); | |
448 | } | |
449 | ||
450 | void __init smp_cpus_done(unsigned int max_cpus) | |
451 | { | |
452 | unsigned long bogosum = 0; | |
453 | unsigned int cpu; | |
454 | ||
455 | for_each_online_cpu(cpu) | |
c70c754f | 456 | bogosum += loops_per_jiffy; |
6b3087c6 GY |
457 | |
458 | printk(KERN_INFO "SMP: Total of %d processors activated " | |
459 | "(%lu.%02lu BogoMIPS).\n", | |
460 | num_online_cpus(), | |
461 | bogosum / (500000/HZ), | |
462 | (bogosum / (5000/HZ)) % 100); | |
463 | } | |
464 | ||
465 | void smp_icache_flush_range_others(unsigned long start, unsigned long end) | |
466 | { | |
467 | smp_flush_data.start = start; | |
468 | smp_flush_data.end = end; | |
469 | ||
0bf3d933 | 470 | if (smp_call_function(&ipi_flush_icache, &smp_flush_data, 0)) |
6b3087c6 GY |
471 | printk(KERN_WARNING "SMP: failed to run I-cache flush request on other CPUs\n"); |
472 | } | |
473 | EXPORT_SYMBOL_GPL(smp_icache_flush_range_others); | |
474 | ||
47e9dedb SZ |
475 | #ifdef __ARCH_SYNC_CORE_ICACHE |
476 | void resync_core_icache(void) | |
477 | { | |
478 | unsigned int cpu = get_cpu(); | |
479 | blackfin_invalidate_entire_icache(); | |
480 | ++per_cpu(cpu_data, cpu).icache_invld_count; | |
481 | put_cpu(); | |
482 | } | |
483 | EXPORT_SYMBOL(resync_core_icache); | |
484 | #endif | |
485 | ||
6b3087c6 GY |
486 | #ifdef __ARCH_SYNC_CORE_DCACHE |
487 | unsigned long barrier_mask __attribute__ ((__section__(".l2.bss"))); | |
488 | ||
489 | void resync_core_dcache(void) | |
490 | { | |
491 | unsigned int cpu = get_cpu(); | |
492 | blackfin_invalidate_entire_dcache(); | |
493 | ++per_cpu(cpu_data, cpu).dcache_invld_count; | |
494 | put_cpu(); | |
495 | } | |
496 | EXPORT_SYMBOL(resync_core_dcache); | |
497 | #endif |