Blackfin: drop cpu_callin_map on SMP systems
[deliverable/linux.git] / arch / blackfin / mach-common / smp.c
CommitLineData
6b3087c6 1/*
96f1050d 2 * IPI management based on arch/arm/kernel/smp.c (Copyright 2002 ARM Limited)
6b3087c6 3 *
96f1050d
RG
4 * Copyright 2007-2009 Analog Devices Inc.
5 * Philippe Gerum <rpm@xenomai.org>
6b3087c6 6 *
96f1050d 7 * Licensed under the GPL-2.
6b3087c6
GY
8 */
9
10#include <linux/module.h>
11#include <linux/delay.h>
12#include <linux/init.h>
13#include <linux/spinlock.h>
14#include <linux/sched.h>
15#include <linux/interrupt.h>
16#include <linux/cache.h>
17#include <linux/profile.h>
18#include <linux/errno.h>
19#include <linux/mm.h>
20#include <linux/cpu.h>
21#include <linux/smp.h>
22#include <linux/seq_file.h>
23#include <linux/irq.h>
24#include <asm/atomic.h>
25#include <asm/cacheflush.h>
26#include <asm/mmu_context.h>
27#include <asm/pgtable.h>
28#include <asm/pgalloc.h>
29#include <asm/processor.h>
30#include <asm/ptrace.h>
31#include <asm/cpu.h>
1fa9be72 32#include <asm/time.h>
6b3087c6
GY
33#include <linux/err.h>
34
555487bb
GY
35/*
36 * Anomaly notes:
37 * 05000120 - we always define corelock as 32-bit integer in L2
38 */
6b3087c6
GY
39struct corelock_slot corelock __attribute__ ((__section__(".l2.bss")));
40
41void __cpuinitdata *init_retx_coreb, *init_saved_retx_coreb,
42 *init_saved_seqstat_coreb, *init_saved_icplb_fault_addr_coreb,
43 *init_saved_dcplb_fault_addr_coreb;
44
45cpumask_t cpu_possible_map;
46EXPORT_SYMBOL(cpu_possible_map);
47
48cpumask_t cpu_online_map;
49EXPORT_SYMBOL(cpu_online_map);
50
51#define BFIN_IPI_RESCHEDULE 0
52#define BFIN_IPI_CALL_FUNC 1
53#define BFIN_IPI_CPU_STOP 2
54
55struct blackfin_flush_data {
56 unsigned long start;
57 unsigned long end;
58};
59
60void *secondary_stack;
61
62
63struct smp_call_struct {
64 void (*func)(void *info);
65 void *info;
66 int wait;
67 cpumask_t pending;
68 cpumask_t waitmask;
69};
70
71static struct blackfin_flush_data smp_flush_data;
72
73static DEFINE_SPINLOCK(stop_lock);
74
75struct ipi_message {
76 struct list_head list;
77 unsigned long type;
78 struct smp_call_struct call_struct;
79};
80
81struct ipi_message_queue {
82 struct list_head head;
83 spinlock_t lock;
84 unsigned long count;
85};
86
87static DEFINE_PER_CPU(struct ipi_message_queue, ipi_msg_queue);
88
89static void ipi_cpu_stop(unsigned int cpu)
90{
91 spin_lock(&stop_lock);
92 printk(KERN_CRIT "CPU%u: stopping\n", cpu);
93 dump_stack();
94 spin_unlock(&stop_lock);
95
96 cpu_clear(cpu, cpu_online_map);
97
98 local_irq_disable();
99
100 while (1)
101 SSYNC();
102}
103
104static void ipi_flush_icache(void *info)
105{
106 struct blackfin_flush_data *fdata = info;
107
108 /* Invalidate the memory holding the bounds of the flushed region. */
109 blackfin_dcache_invalidate_range((unsigned long)fdata,
110 (unsigned long)fdata + sizeof(*fdata));
111
112 blackfin_icache_flush_range(fdata->start, fdata->end);
113}
114
115static void ipi_call_function(unsigned int cpu, struct ipi_message *msg)
116{
117 int wait;
118 void (*func)(void *info);
119 void *info;
120 func = msg->call_struct.func;
121 info = msg->call_struct.info;
122 wait = msg->call_struct.wait;
123 cpu_clear(cpu, msg->call_struct.pending);
124 func(info);
c9784ebb
YL
125 if (wait) {
126#ifdef __ARCH_SYNC_CORE_DCACHE
127 /*
128 * 'wait' usually means synchronization between CPUs.
129 * Invalidate D cache in case shared data was changed
130 * by func() to ensure cache coherence.
131 */
132 resync_core_dcache();
133#endif
6b3087c6 134 cpu_clear(cpu, msg->call_struct.waitmask);
c9784ebb 135 } else
6b3087c6
GY
136 kfree(msg);
137}
138
139static irqreturn_t ipi_handler(int irq, void *dev_instance)
140{
86f2008b 141 struct ipi_message *msg;
6b3087c6
GY
142 struct ipi_message_queue *msg_queue;
143 unsigned int cpu = smp_processor_id();
144
145 platform_clear_ipi(cpu);
146
147 msg_queue = &__get_cpu_var(ipi_msg_queue);
148 msg_queue->count++;
149
150 spin_lock(&msg_queue->lock);
86f2008b
SZ
151 while (!list_empty(&msg_queue->head)) {
152 msg = list_entry(msg_queue->head.next, typeof(*msg), list);
6b3087c6
GY
153 list_del(&msg->list);
154 switch (msg->type) {
155 case BFIN_IPI_RESCHEDULE:
156 /* That's the easiest one; leave it to
157 * return_from_int. */
158 kfree(msg);
159 break;
160 case BFIN_IPI_CALL_FUNC:
0bf3d933 161 spin_unlock(&msg_queue->lock);
6b3087c6 162 ipi_call_function(cpu, msg);
0bf3d933 163 spin_lock(&msg_queue->lock);
6b3087c6
GY
164 break;
165 case BFIN_IPI_CPU_STOP:
0bf3d933 166 spin_unlock(&msg_queue->lock);
6b3087c6 167 ipi_cpu_stop(cpu);
0bf3d933 168 spin_lock(&msg_queue->lock);
6b3087c6
GY
169 kfree(msg);
170 break;
171 default:
172 printk(KERN_CRIT "CPU%u: Unknown IPI message \
173 0x%lx\n", cpu, msg->type);
174 kfree(msg);
175 break;
176 }
177 }
178 spin_unlock(&msg_queue->lock);
179 return IRQ_HANDLED;
180}
181
182static void ipi_queue_init(void)
183{
184 unsigned int cpu;
185 struct ipi_message_queue *msg_queue;
186 for_each_possible_cpu(cpu) {
187 msg_queue = &per_cpu(ipi_msg_queue, cpu);
188 INIT_LIST_HEAD(&msg_queue->head);
189 spin_lock_init(&msg_queue->lock);
190 msg_queue->count = 0;
191 }
192}
193
194int smp_call_function(void (*func)(void *info), void *info, int wait)
195{
196 unsigned int cpu;
197 cpumask_t callmap;
198 unsigned long flags;
199 struct ipi_message_queue *msg_queue;
200 struct ipi_message *msg;
201
202 callmap = cpu_online_map;
203 cpu_clear(smp_processor_id(), callmap);
204 if (cpus_empty(callmap))
205 return 0;
206
207 msg = kmalloc(sizeof(*msg), GFP_ATOMIC);
994e9a2e
JL
208 if (!msg)
209 return -ENOMEM;
6b3087c6
GY
210 INIT_LIST_HEAD(&msg->list);
211 msg->call_struct.func = func;
212 msg->call_struct.info = info;
213 msg->call_struct.wait = wait;
214 msg->call_struct.pending = callmap;
215 msg->call_struct.waitmask = callmap;
216 msg->type = BFIN_IPI_CALL_FUNC;
217
218 for_each_cpu_mask(cpu, callmap) {
219 msg_queue = &per_cpu(ipi_msg_queue, cpu);
220 spin_lock_irqsave(&msg_queue->lock, flags);
86f2008b 221 list_add_tail(&msg->list, &msg_queue->head);
6b3087c6
GY
222 spin_unlock_irqrestore(&msg_queue->lock, flags);
223 platform_send_ipi_cpu(cpu);
224 }
225 if (wait) {
226 while (!cpus_empty(msg->call_struct.waitmask))
227 blackfin_dcache_invalidate_range(
228 (unsigned long)(&msg->call_struct.waitmask),
229 (unsigned long)(&msg->call_struct.waitmask));
c9784ebb
YL
230#ifdef __ARCH_SYNC_CORE_DCACHE
231 /*
232 * Invalidate D cache in case shared data was changed by
233 * other processors to ensure cache coherence.
234 */
235 resync_core_dcache();
236#endif
6b3087c6
GY
237 kfree(msg);
238 }
239 return 0;
240}
241EXPORT_SYMBOL_GPL(smp_call_function);
242
243int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
244 int wait)
245{
246 unsigned int cpu = cpuid;
247 cpumask_t callmap;
248 unsigned long flags;
249 struct ipi_message_queue *msg_queue;
250 struct ipi_message *msg;
251
252 if (cpu_is_offline(cpu))
253 return 0;
254 cpus_clear(callmap);
255 cpu_set(cpu, callmap);
256
257 msg = kmalloc(sizeof(*msg), GFP_ATOMIC);
994e9a2e
JL
258 if (!msg)
259 return -ENOMEM;
6b3087c6
GY
260 INIT_LIST_HEAD(&msg->list);
261 msg->call_struct.func = func;
262 msg->call_struct.info = info;
263 msg->call_struct.wait = wait;
264 msg->call_struct.pending = callmap;
265 msg->call_struct.waitmask = callmap;
266 msg->type = BFIN_IPI_CALL_FUNC;
267
268 msg_queue = &per_cpu(ipi_msg_queue, cpu);
269 spin_lock_irqsave(&msg_queue->lock, flags);
86f2008b 270 list_add_tail(&msg->list, &msg_queue->head);
6b3087c6
GY
271 spin_unlock_irqrestore(&msg_queue->lock, flags);
272 platform_send_ipi_cpu(cpu);
273
274 if (wait) {
275 while (!cpus_empty(msg->call_struct.waitmask))
276 blackfin_dcache_invalidate_range(
277 (unsigned long)(&msg->call_struct.waitmask),
278 (unsigned long)(&msg->call_struct.waitmask));
c9784ebb
YL
279#ifdef __ARCH_SYNC_CORE_DCACHE
280 /*
281 * Invalidate D cache in case shared data was changed by
282 * other processors to ensure cache coherence.
283 */
284 resync_core_dcache();
285#endif
6b3087c6
GY
286 kfree(msg);
287 }
288 return 0;
289}
290EXPORT_SYMBOL_GPL(smp_call_function_single);
291
292void smp_send_reschedule(int cpu)
293{
294 unsigned long flags;
295 struct ipi_message_queue *msg_queue;
296 struct ipi_message *msg;
297
298 if (cpu_is_offline(cpu))
299 return;
300
05bad36c 301 msg = kzalloc(sizeof(*msg), GFP_ATOMIC);
994e9a2e
JL
302 if (!msg)
303 return;
6b3087c6
GY
304 INIT_LIST_HEAD(&msg->list);
305 msg->type = BFIN_IPI_RESCHEDULE;
306
307 msg_queue = &per_cpu(ipi_msg_queue, cpu);
308 spin_lock_irqsave(&msg_queue->lock, flags);
86f2008b 309 list_add_tail(&msg->list, &msg_queue->head);
6b3087c6
GY
310 spin_unlock_irqrestore(&msg_queue->lock, flags);
311 platform_send_ipi_cpu(cpu);
312
313 return;
314}
315
316void smp_send_stop(void)
317{
318 unsigned int cpu;
319 cpumask_t callmap;
320 unsigned long flags;
321 struct ipi_message_queue *msg_queue;
322 struct ipi_message *msg;
323
324 callmap = cpu_online_map;
325 cpu_clear(smp_processor_id(), callmap);
326 if (cpus_empty(callmap))
327 return;
328
05bad36c 329 msg = kzalloc(sizeof(*msg), GFP_ATOMIC);
994e9a2e
JL
330 if (!msg)
331 return;
6b3087c6
GY
332 INIT_LIST_HEAD(&msg->list);
333 msg->type = BFIN_IPI_CPU_STOP;
334
335 for_each_cpu_mask(cpu, callmap) {
336 msg_queue = &per_cpu(ipi_msg_queue, cpu);
337 spin_lock_irqsave(&msg_queue->lock, flags);
86f2008b 338 list_add_tail(&msg->list, &msg_queue->head);
6b3087c6
GY
339 spin_unlock_irqrestore(&msg_queue->lock, flags);
340 platform_send_ipi_cpu(cpu);
341 }
342 return;
343}
344
345int __cpuinit __cpu_up(unsigned int cpu)
346{
347 struct task_struct *idle;
348 int ret;
349
350 idle = fork_idle(cpu);
351 if (IS_ERR(idle)) {
352 printk(KERN_ERR "CPU%u: fork() failed\n", cpu);
353 return PTR_ERR(idle);
354 }
355
356 secondary_stack = task_stack_page(idle) + THREAD_SIZE;
357 smp_wmb();
358
359 ret = platform_boot_secondary(cpu, idle);
360
6b3087c6
GY
361 secondary_stack = NULL;
362
363 return ret;
364}
365
366static void __cpuinit setup_secondary(unsigned int cpu)
367{
1fa9be72 368#if !defined(CONFIG_TICKSOURCE_GPTMR0)
6b3087c6
GY
369 struct irq_desc *timer_desc;
370#endif
371 unsigned long ilat;
372
373 bfin_write_IMASK(0);
374 CSYNC();
375 ilat = bfin_read_ILAT();
376 CSYNC();
377 bfin_write_ILAT(ilat);
378 CSYNC();
379
6b3087c6
GY
380 /* Enable interrupt levels IVG7-15. IARs have been already
381 * programmed by the boot CPU. */
40059784 382 bfin_irq_flags |= IMASK_IVG15 |
6b3087c6
GY
383 IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
384 IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW;
385
1fa9be72 386#if defined(CONFIG_TICKSOURCE_GPTMR0)
6b3087c6
GY
387 /* Power down the core timer, just to play safe. */
388 bfin_write_TCNTL(0);
389
390 /* system timer0 has been setup by CoreA. */
391#else
392 timer_desc = irq_desc + IRQ_CORETMR;
393 setup_core_timer();
394 timer_desc->chip->enable(IRQ_CORETMR);
395#endif
396}
397
398void __cpuinit secondary_start_kernel(void)
399{
400 unsigned int cpu = smp_processor_id();
401 struct mm_struct *mm = &init_mm;
402
403 if (_bfin_swrst & SWRST_DBL_FAULT_B) {
404 printk(KERN_EMERG "CoreB Recovering from DOUBLE FAULT event\n");
405#ifdef CONFIG_DEBUG_DOUBLEFAULT
406 printk(KERN_EMERG " While handling exception (EXCAUSE = 0x%x) at %pF\n",
407 (int)init_saved_seqstat_coreb & SEQSTAT_EXCAUSE, init_saved_retx_coreb);
408 printk(KERN_NOTICE " DCPLB_FAULT_ADDR: %pF\n", init_saved_dcplb_fault_addr_coreb);
409 printk(KERN_NOTICE " ICPLB_FAULT_ADDR: %pF\n", init_saved_icplb_fault_addr_coreb);
410#endif
411 printk(KERN_NOTICE " The instruction at %pF caused a double exception\n",
412 init_retx_coreb);
413 }
414
415 /*
416 * We want the D-cache to be enabled early, in case the atomic
417 * support code emulates cache coherence (see
418 * __ARCH_SYNC_CORE_DCACHE).
419 */
420 init_exception_vectors();
421
422 bfin_setup_caches(cpu);
423
424 local_irq_disable();
425
426 /* Attach the new idle task to the global mm. */
427 atomic_inc(&mm->mm_users);
428 atomic_inc(&mm->mm_count);
429 current->active_mm = mm;
430 BUG_ON(current->mm); /* Can't be, but better be safe than sorry. */
431
432 preempt_disable();
433
434 setup_secondary(cpu);
435
578d36f5
YL
436 platform_secondary_init(cpu);
437
6b3087c6
GY
438 local_irq_enable();
439
578d36f5
YL
440 /*
441 * Calibrate loops per jiffy value.
442 * IRQs need to be enabled here - D-cache can be invalidated
443 * in timer irq handler, so core B can read correct jiffies.
444 */
445 calibrate_delay();
6b3087c6
GY
446
447 cpu_idle();
448}
449
450void __init smp_prepare_boot_cpu(void)
451{
452}
453
454void __init smp_prepare_cpus(unsigned int max_cpus)
455{
456 platform_prepare_cpus(max_cpus);
457 ipi_queue_init();
458 platform_request_ipi(&ipi_handler);
459}
460
461void __init smp_cpus_done(unsigned int max_cpus)
462{
463 unsigned long bogosum = 0;
464 unsigned int cpu;
465
466 for_each_online_cpu(cpu)
c70c754f 467 bogosum += loops_per_jiffy;
6b3087c6
GY
468
469 printk(KERN_INFO "SMP: Total of %d processors activated "
470 "(%lu.%02lu BogoMIPS).\n",
471 num_online_cpus(),
472 bogosum / (500000/HZ),
473 (bogosum / (5000/HZ)) % 100);
474}
475
476void smp_icache_flush_range_others(unsigned long start, unsigned long end)
477{
478 smp_flush_data.start = start;
479 smp_flush_data.end = end;
480
0bf3d933 481 if (smp_call_function(&ipi_flush_icache, &smp_flush_data, 0))
6b3087c6
GY
482 printk(KERN_WARNING "SMP: failed to run I-cache flush request on other CPUs\n");
483}
484EXPORT_SYMBOL_GPL(smp_icache_flush_range_others);
485
47e9dedb
SZ
486#ifdef __ARCH_SYNC_CORE_ICACHE
487void resync_core_icache(void)
488{
489 unsigned int cpu = get_cpu();
490 blackfin_invalidate_entire_icache();
491 ++per_cpu(cpu_data, cpu).icache_invld_count;
492 put_cpu();
493}
494EXPORT_SYMBOL(resync_core_icache);
495#endif
496
6b3087c6
GY
497#ifdef __ARCH_SYNC_CORE_DCACHE
498unsigned long barrier_mask __attribute__ ((__section__(".l2.bss")));
499
500void resync_core_dcache(void)
501{
502 unsigned int cpu = get_cpu();
503 blackfin_invalidate_entire_dcache();
504 ++per_cpu(cpu_data, cpu).dcache_invld_count;
505 put_cpu();
506}
507EXPORT_SYMBOL(resync_core_dcache);
508#endif
This page took 0.124426 seconds and 5 git commands to generate.