Commit | Line | Data |
---|---|---|
867e359b CM |
1 | /* |
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License | |
6 | * as published by the Free Software Foundation, version 2. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, but | |
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | |
11 | * NON INFRINGEMENT. See the GNU General Public License for | |
12 | * more details. | |
13 | */ | |
14 | ||
15 | #include <linux/module.h> | |
16 | #include <linux/init.h> | |
17 | #include <linux/kernel.h> | |
18 | #include <linux/mm.h> | |
19 | #include <linux/sched.h> | |
20 | #include <linux/kernel_stat.h> | |
867e359b CM |
21 | #include <linux/bootmem.h> |
22 | #include <linux/notifier.h> | |
23 | #include <linux/cpu.h> | |
24 | #include <linux/percpu.h> | |
25 | #include <linux/delay.h> | |
26 | #include <linux/err.h> | |
0707ad30 | 27 | #include <linux/irq.h> |
867e359b CM |
28 | #include <asm/mmu_context.h> |
29 | #include <asm/tlbflush.h> | |
30 | #include <asm/sections.h> | |
31 | ||
867e359b | 32 | /* State of each CPU. */ |
0707ad30 | 33 | static DEFINE_PER_CPU(int, cpu_state) = { 0 }; |
867e359b CM |
34 | |
35 | /* The messaging code jumps to this pointer during boot-up */ | |
36 | unsigned long start_cpu_function_addr; | |
37 | ||
38 | /* Called very early during startup to mark boot cpu as online */ | |
39 | void __init smp_prepare_boot_cpu(void) | |
40 | { | |
41 | int cpu = smp_processor_id(); | |
42 | set_cpu_online(cpu, 1); | |
43 | set_cpu_present(cpu, 1); | |
44 | __get_cpu_var(cpu_state) = CPU_ONLINE; | |
45 | ||
46 | init_messaging(); | |
47 | } | |
48 | ||
49 | static void start_secondary(void); | |
50 | ||
51 | /* | |
52 | * Called at the top of init() to launch all the other CPUs. | |
53 | * They run free to complete their initialization and then wait | |
54 | * until they get an IPI from the boot cpu to come online. | |
55 | */ | |
56 | void __init smp_prepare_cpus(unsigned int max_cpus) | |
57 | { | |
58 | long rc; | |
59 | int cpu, cpu_count; | |
60 | int boot_cpu = smp_processor_id(); | |
61 | ||
62 | current_thread_info()->cpu = boot_cpu; | |
63 | ||
64 | /* | |
65 | * Pin this task to the boot CPU while we bring up the others, | |
66 | * just to make sure we don't uselessly migrate as they come up. | |
67 | */ | |
68 | rc = sched_setaffinity(current->pid, cpumask_of(boot_cpu)); | |
69 | if (rc != 0) | |
0707ad30 | 70 | pr_err("Couldn't set init affinity to boot cpu (%ld)\n", rc); |
867e359b CM |
71 | |
72 | /* Print information about disabled and dataplane cpus. */ | |
73 | print_disabled_cpus(); | |
74 | ||
75 | /* | |
76 | * Tell the messaging subsystem how to respond to the | |
77 | * startup message. We use a level of indirection to avoid | |
78 | * confusing the linker with the fact that the messaging | |
79 | * subsystem is calling __init code. | |
80 | */ | |
81 | start_cpu_function_addr = (unsigned long) &online_secondary; | |
82 | ||
83 | /* Set up thread context for all new processors. */ | |
84 | cpu_count = 1; | |
85 | for (cpu = 0; cpu < NR_CPUS; ++cpu) { | |
86 | struct task_struct *idle; | |
87 | ||
88 | if (cpu == boot_cpu) | |
89 | continue; | |
90 | ||
91 | if (!cpu_possible(cpu)) { | |
92 | /* | |
93 | * Make this processor do nothing on boot. | |
94 | * Note that we don't give the boot_pc function | |
95 | * a stack, so it has to be assembly code. | |
96 | */ | |
97 | per_cpu(boot_sp, cpu) = 0; | |
98 | per_cpu(boot_pc, cpu) = (unsigned long) smp_nap; | |
99 | continue; | |
100 | } | |
101 | ||
102 | /* Create a new idle thread to run start_secondary() */ | |
103 | idle = fork_idle(cpu); | |
104 | if (IS_ERR(idle)) | |
105 | panic("failed fork for CPU %d", cpu); | |
106 | idle->thread.pc = (unsigned long) start_secondary; | |
107 | ||
108 | /* Make this thread the boot thread for this processor */ | |
109 | per_cpu(boot_sp, cpu) = task_ksp0(idle); | |
110 | per_cpu(boot_pc, cpu) = idle->thread.pc; | |
111 | ||
112 | ++cpu_count; | |
113 | } | |
114 | BUG_ON(cpu_count > (max_cpus ? max_cpus : 1)); | |
115 | ||
116 | /* Fire up the other tiles, if any */ | |
117 | init_cpu_present(cpu_possible_mask); | |
118 | if (cpumask_weight(cpu_present_mask) > 1) { | |
119 | mb(); /* make sure all data is visible to new processors */ | |
120 | hv_start_all_tiles(); | |
121 | } | |
122 | } | |
123 | ||
124 | static __initdata struct cpumask init_affinity; | |
125 | ||
126 | static __init int reset_init_affinity(void) | |
127 | { | |
128 | long rc = sched_setaffinity(current->pid, &init_affinity); | |
129 | if (rc != 0) | |
0707ad30 | 130 | pr_warning("couldn't reset init affinity (%ld)\n", |
867e359b CM |
131 | rc); |
132 | return 0; | |
133 | } | |
134 | late_initcall(reset_init_affinity); | |
135 | ||
18f894c1 | 136 | static struct cpumask cpu_started; |
867e359b CM |
137 | |
138 | /* | |
139 | * Activate a secondary processor. Very minimal; don't add anything | |
140 | * to this path without knowing what you're doing, since SMP booting | |
141 | * is pretty fragile. | |
142 | */ | |
18f894c1 | 143 | static void start_secondary(void) |
867e359b | 144 | { |
bc1a298f CM |
145 | int cpuid; |
146 | ||
147 | preempt_disable(); | |
148 | ||
149 | cpuid = smp_processor_id(); | |
867e359b CM |
150 | |
151 | /* Set our thread pointer appropriately. */ | |
152 | set_my_cpu_offset(__per_cpu_offset[cpuid]); | |
153 | ||
867e359b CM |
154 | /* |
155 | * In large machines even this will slow us down, since we | |
156 | * will be contending for for the printk spinlock. | |
157 | */ | |
158 | /* printk(KERN_DEBUG "Initializing CPU#%d\n", cpuid); */ | |
159 | ||
160 | /* Initialize the current asid for our first page table. */ | |
161 | __get_cpu_var(current_asid) = min_asid; | |
162 | ||
163 | /* Set up this thread as another owner of the init_mm */ | |
164 | atomic_inc(&init_mm.mm_count); | |
165 | current->active_mm = &init_mm; | |
166 | if (current->mm) | |
167 | BUG(); | |
168 | enter_lazy_tlb(&init_mm, current); | |
169 | ||
867e359b CM |
170 | /* Allow hypervisor messages to be received */ |
171 | init_messaging(); | |
172 | local_irq_enable(); | |
173 | ||
174 | /* Indicate that we're ready to come up. */ | |
175 | /* Must not do this before we're ready to receive messages */ | |
176 | if (cpumask_test_and_set_cpu(cpuid, &cpu_started)) { | |
0707ad30 | 177 | pr_warning("CPU#%d already started!\n", cpuid); |
867e359b CM |
178 | for (;;) |
179 | local_irq_enable(); | |
180 | } | |
181 | ||
182 | smp_nap(); | |
183 | } | |
184 | ||
867e359b CM |
185 | /* |
186 | * Bring a secondary processor online. | |
187 | */ | |
18f894c1 | 188 | void online_secondary(void) |
867e359b CM |
189 | { |
190 | /* | |
191 | * low-memory mappings have been cleared, flush them from | |
192 | * the local TLBs too. | |
193 | */ | |
194 | local_flush_tlb(); | |
195 | ||
196 | BUG_ON(in_interrupt()); | |
197 | ||
198 | /* This must be done before setting cpu_online_mask */ | |
199 | wmb(); | |
200 | ||
d1640130 SB |
201 | notify_cpu_starting(smp_processor_id()); |
202 | ||
867e359b | 203 | set_cpu_online(smp_processor_id(), 1); |
867e359b CM |
204 | __get_cpu_var(cpu_state) = CPU_ONLINE; |
205 | ||
0707ad30 CM |
206 | /* Set up tile-specific state for this cpu. */ |
207 | setup_cpu(0); | |
867e359b CM |
208 | |
209 | /* Set up tile-timer clock-event device on this cpu */ | |
210 | setup_tile_timer(); | |
211 | ||
0dc8153c | 212 | cpu_startup_entry(CPUHP_ONLINE); |
867e359b CM |
213 | } |
214 | ||
18f894c1 | 215 | int __cpu_up(unsigned int cpu, struct task_struct *tidle) |
867e359b CM |
216 | { |
217 | /* Wait 5s total for all CPUs for them to come online */ | |
218 | static int timeout; | |
219 | for (; !cpumask_test_cpu(cpu, &cpu_started); timeout++) { | |
220 | if (timeout >= 50000) { | |
0707ad30 | 221 | pr_info("skipping unresponsive cpu%d\n", cpu); |
867e359b CM |
222 | local_irq_enable(); |
223 | return -EIO; | |
224 | } | |
225 | udelay(100); | |
226 | } | |
227 | ||
228 | local_irq_enable(); | |
229 | per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; | |
230 | ||
231 | /* Unleash the CPU! */ | |
232 | send_IPI_single(cpu, MSG_TAG_START_CPU); | |
233 | while (!cpumask_test_cpu(cpu, cpu_online_mask)) | |
234 | cpu_relax(); | |
235 | return 0; | |
236 | } | |
237 | ||
238 | static void panic_start_cpu(void) | |
239 | { | |
240 | panic("Received a MSG_START_CPU IPI after boot finished."); | |
241 | } | |
242 | ||
243 | void __init smp_cpus_done(unsigned int max_cpus) | |
244 | { | |
245 | int cpu, next, rc; | |
246 | ||
247 | /* Reset the response to a (now illegal) MSG_START_CPU IPI. */ | |
248 | start_cpu_function_addr = (unsigned long) &panic_start_cpu; | |
249 | ||
250 | cpumask_copy(&init_affinity, cpu_online_mask); | |
251 | ||
252 | /* | |
253 | * Pin ourselves to a single cpu in the initial affinity set | |
254 | * so that kernel mappings for the rootfs are not in the dataplane, | |
255 | * if set, and to avoid unnecessary migrating during bringup. | |
256 | * Use the last cpu just in case the whole chip has been | |
257 | * isolated from the scheduler, to keep init away from likely | |
258 | * more useful user code. This also ensures that work scheduled | |
259 | * via schedule_delayed_work() in the init routines will land | |
260 | * on this cpu. | |
261 | */ | |
262 | for (cpu = cpumask_first(&init_affinity); | |
263 | (next = cpumask_next(cpu, &init_affinity)) < nr_cpu_ids; | |
264 | cpu = next) | |
265 | ; | |
266 | rc = sched_setaffinity(current->pid, cpumask_of(cpu)); | |
267 | if (rc != 0) | |
0707ad30 | 268 | pr_err("Couldn't set init affinity to cpu %d (%d)\n", cpu, rc); |
867e359b | 269 | } |