Commit | Line | Data |
---|---|---|
14e968ba VG |
1 | /* |
2 | * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License version 2 as | |
6 | * published by the Free Software Foundation. | |
7 | */ | |
8 | ||
9 | #ifndef __ASM_ARC_SMP_H | |
10 | #define __ASM_ARC_SMP_H | |
11 | ||
41195d23 VG |
12 | #ifdef CONFIG_SMP |
13 | ||
14 | #include <linux/types.h> | |
15 | #include <linux/init.h> | |
16 | #include <linux/threads.h> | |
17 | ||
18 | #define raw_smp_processor_id() (current_thread_info()->cpu) | |
19 | ||
20 | /* including cpumask.h leads to cyclic deps hence this Forward declaration */ | |
21 | struct cpumask; | |
22 | ||
23 | /* | |
24 | * APIs provided by arch SMP code to generic code | |
25 | */ | |
26 | extern void arch_send_call_function_single_ipi(int cpu); | |
27 | extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); | |
28 | ||
29 | /* | |
30 | * APIs provided by arch SMP code to rest of arch code | |
31 | */ | |
32 | extern void __init smp_init_cpus(void); | |
8f5d221b | 33 | extern void first_lines_of_secondary(void); |
10b12718 | 34 | extern const char *arc_platform_smp_cpuinfo(void); |
41195d23 VG |
35 | |
36 | /* | |
37 | * API expected BY platform smp code (FROM arch smp code) | |
38 | * | |
39 | * smp_ipi_irq_setup: | |
40 | * Takes @cpu and @irq to which the arch-common ISR is hooked up | |
41 | */ | |
42 | extern int smp_ipi_irq_setup(int cpu, int irq); | |
43 | ||
44 | /* | |
10b12718 | 45 | * struct plat_smp_ops - SMP callbacks provided by platform to ARC SMP |
41195d23 | 46 | * |
10b12718 | 47 | * @info: SoC SMP specific info for /proc/cpuinfo etc |
e55af4da VG |
48 | * @init_early_smp: A SMP specific h/w block can init itself |
49 | * Could be common across platforms so not covered by | |
50 | * mach_desc->init_early() | |
b474a023 | 51 | * @init_per_cpu: Called for each core so SMP h/w block driver can do |
286130eb | 52 | * any needed setup per cpu (e.g. IPI request) |
10b12718 | 53 | * @cpu_kick: For Master to kickstart a cpu (optionally at a PC) |
ddf84433 | 54 | * @ipi_send: To send IPI to a @cpu |
ccdaa6e0 | 55 | * @ips_clear: To clear IPI received at @irq |
41195d23 | 56 | */ |
10b12718 VG |
57 | struct plat_smp_ops { |
58 | const char *info; | |
e55af4da | 59 | void (*init_early_smp)(void); |
b474a023 | 60 | void (*init_per_cpu)(int cpu); |
10b12718 | 61 | void (*cpu_kick)(int cpu, unsigned long pc); |
ddf84433 | 62 | void (*ipi_send)(int cpu); |
ccdaa6e0 | 63 | void (*ipi_clear)(int irq); |
10b12718 VG |
64 | }; |
65 | ||
66 | /* TBD: stop exporting it for direct population by platform */ | |
67 | extern struct plat_smp_ops plat_smp_ops; | |
41195d23 | 68 | |
619f3018 VG |
69 | #else /* CONFIG_SMP */ |
70 | ||
71 | static inline void smp_init_cpus(void) {} | |
72 | static inline const char *arc_platform_smp_cpuinfo(void) | |
73 | { | |
74 | return ""; | |
75 | } | |
76 | ||
77 | #endif /* !CONFIG_SMP */ | |
41195d23 | 78 | |
14e968ba VG |
79 | /* |
80 | * ARC700 doesn't support atomic Read-Modify-Write ops. | |
81 | * Originally Interrupts had to be disabled around code to gaurantee atomicity. | |
82 | * The LLOCK/SCOND insns allow writing interrupt-hassle-free based atomic ops | |
83 | * based on retry-if-irq-in-atomic (with hardware assist). | |
84 | * However despite these, we provide the IRQ disabling variant | |
85 | * | |
86 | * (1) These insn were introduced only in 4.10 release. So for older released | |
87 | * support needed. | |
41195d23 | 88 | * |
2547476a | 89 | * (2) In a SMP setup, the LLOCK/SCOND atomicity across CPUs needs to be |
41195d23 VG |
90 | * gaurantted by the platform (not something which core handles). |
91 | * Assuming a platform won't, SMP Linux needs to use spinlocks + local IRQ | |
92 | * disabling for atomicity. | |
93 | * | |
94 | * However exported spinlock API is not usable due to cyclic hdr deps | |
95 | * (even after system.h disintegration upstream) | |
96 | * asm/bitops.h -> linux/spinlock.h -> linux/preempt.h | |
97 | * -> linux/thread_info.h -> linux/bitops.h -> asm/bitops.h | |
98 | * | |
99 | * So the workaround is to use the lowest level arch spinlock API. | |
100 | * The exported spinlock API is smart enough to be NOP for !CONFIG_SMP, | |
101 | * but same is not true for ARCH backend, hence the need for 2 variants | |
14e968ba VG |
102 | */ |
103 | #ifndef CONFIG_ARC_HAS_LLSC | |
104 | ||
105 | #include <linux/irqflags.h> | |
41195d23 VG |
106 | #ifdef CONFIG_SMP |
107 | ||
108 | #include <asm/spinlock.h> | |
109 | ||
110 | extern arch_spinlock_t smp_atomic_ops_lock; | |
111 | extern arch_spinlock_t smp_bitops_lock; | |
112 | ||
113 | #define atomic_ops_lock(flags) do { \ | |
114 | local_irq_save(flags); \ | |
115 | arch_spin_lock(&smp_atomic_ops_lock); \ | |
116 | } while (0) | |
117 | ||
118 | #define atomic_ops_unlock(flags) do { \ | |
119 | arch_spin_unlock(&smp_atomic_ops_lock); \ | |
120 | local_irq_restore(flags); \ | |
121 | } while (0) | |
122 | ||
123 | #define bitops_lock(flags) do { \ | |
124 | local_irq_save(flags); \ | |
125 | arch_spin_lock(&smp_bitops_lock); \ | |
126 | } while (0) | |
127 | ||
128 | #define bitops_unlock(flags) do { \ | |
129 | arch_spin_unlock(&smp_bitops_lock); \ | |
130 | local_irq_restore(flags); \ | |
131 | } while (0) | |
132 | ||
133 | #else /* !CONFIG_SMP */ | |
14e968ba VG |
134 | |
135 | #define atomic_ops_lock(flags) local_irq_save(flags) | |
136 | #define atomic_ops_unlock(flags) local_irq_restore(flags) | |
137 | ||
138 | #define bitops_lock(flags) local_irq_save(flags) | |
139 | #define bitops_unlock(flags) local_irq_restore(flags) | |
140 | ||
41195d23 VG |
141 | #endif /* !CONFIG_SMP */ |
142 | ||
14e968ba VG |
143 | #endif /* !CONFIG_ARC_HAS_LLSC */ |
144 | ||
145 | #endif |