Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * arch/ppc64/kernel/head.S | |
3 | * | |
4 | * PowerPC version | |
5 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | |
6 | * | |
7 | * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP | |
8 | * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu> | |
9 | * Adapted for Power Macintosh by Paul Mackerras. | |
10 | * Low-level exception handlers and MMU support | |
11 | * rewritten by Paul Mackerras. | |
12 | * Copyright (C) 1996 Paul Mackerras. | |
13 | * | |
14 | * Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and | |
15 | * Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com | |
16 | * | |
17 | * This file contains the low-level support and setup for the | |
18 | * PowerPC-64 platform, including trap and interrupt dispatch. | |
19 | * | |
20 | * This program is free software; you can redistribute it and/or | |
21 | * modify it under the terms of the GNU General Public License | |
22 | * as published by the Free Software Foundation; either version | |
23 | * 2 of the License, or (at your option) any later version. | |
24 | */ | |
25 | ||
26 | #define SECONDARY_PROCESSORS | |
27 | ||
28 | #include <linux/config.h> | |
29 | #include <linux/threads.h> | |
30 | #include <asm/processor.h> | |
31 | #include <asm/page.h> | |
32 | #include <asm/mmu.h> | |
33 | #include <asm/naca.h> | |
34 | #include <asm/systemcfg.h> | |
35 | #include <asm/ppc_asm.h> | |
36 | #include <asm/offsets.h> | |
37 | #include <asm/bug.h> | |
38 | #include <asm/cputable.h> | |
39 | #include <asm/setup.h> | |
40 | #include <asm/hvcall.h> | |
41 | ||
42 | #ifdef CONFIG_PPC_ISERIES | |
43 | #define DO_SOFT_DISABLE | |
44 | #endif | |
45 | ||
46 | /* | |
47 | * hcall interface to pSeries LPAR | |
48 | */ | |
49 | #define H_SET_ASR 0x30 | |
50 | ||
51 | /* | |
52 | * We layout physical memory as follows: | |
53 | * 0x0000 - 0x00ff : Secondary processor spin code | |
54 | * 0x0100 - 0x2fff : pSeries Interrupt prologs | |
55 | * 0x3000 - 0x3fff : Interrupt support | |
56 | * 0x4000 - 0x4fff : NACA | |
57 | * 0x6000 : iSeries and common interrupt prologs | |
58 | * 0x9000 - 0x9fff : Initial segment table | |
59 | */ | |
60 | ||
61 | /* | |
62 | * SPRG Usage | |
63 | * | |
64 | * Register Definition | |
65 | * | |
66 | * SPRG0 reserved for hypervisor | |
67 | * SPRG1 temp - used to save gpr | |
68 | * SPRG2 temp - used to save gpr | |
69 | * SPRG3 virt addr of paca | |
70 | */ | |
71 | ||
72 | /* | |
73 | * Entering into this code we make the following assumptions: | |
74 | * For pSeries: | |
75 | * 1. The MMU is off & open firmware is running in real mode. | |
76 | * 2. The kernel is entered at __start | |
77 | * | |
78 | * For iSeries: | |
79 | * 1. The MMU is on (as it always is for iSeries) | |
80 | * 2. The kernel is entered at system_reset_iSeries | |
81 | */ | |
82 | ||
83 | .text | |
84 | .globl _stext | |
85 | _stext: | |
86 | #ifdef CONFIG_PPC_MULTIPLATFORM | |
87 | _GLOBAL(__start) | |
88 | /* NOP this out unconditionally */ | |
89 | BEGIN_FTR_SECTION | |
90 | b .__start_initialization_multiplatform | |
91 | END_FTR_SECTION(0, 1) | |
92 | #endif /* CONFIG_PPC_MULTIPLATFORM */ | |
93 | ||
94 | /* Catch branch to 0 in real mode */ | |
95 | trap | |
96 | #ifdef CONFIG_PPC_ISERIES | |
97 | /* | |
98 | * At offset 0x20, there is a pointer to iSeries LPAR data. | |
99 | * This is required by the hypervisor | |
100 | */ | |
101 | . = 0x20 | |
102 | .llong hvReleaseData-KERNELBASE | |
103 | ||
104 | /* | |
105 | * At offset 0x28 and 0x30 are offsets to the msChunks | |
106 | * array (used by the iSeries LPAR debugger to do translation | |
107 | * between physical addresses and absolute addresses) and | |
108 | * to the pidhash table (also used by the debugger) | |
109 | */ | |
110 | .llong msChunks-KERNELBASE | |
111 | .llong 0 /* pidhash-KERNELBASE SFRXXX */ | |
112 | ||
113 | /* Offset 0x38 - Pointer to start of embedded System.map */ | |
114 | .globl embedded_sysmap_start | |
115 | embedded_sysmap_start: | |
116 | .llong 0 | |
117 | /* Offset 0x40 - Pointer to end of embedded System.map */ | |
118 | .globl embedded_sysmap_end | |
119 | embedded_sysmap_end: | |
120 | .llong 0 | |
121 | ||
122 | #else /* CONFIG_PPC_ISERIES */ | |
123 | ||
124 | /* Secondary processors spin on this value until it goes to 1. */ | |
125 | .globl __secondary_hold_spinloop | |
126 | __secondary_hold_spinloop: | |
127 | .llong 0x0 | |
128 | ||
129 | /* Secondary processors write this value with their cpu # */ | |
130 | /* after they enter the spin loop immediately below. */ | |
131 | .globl __secondary_hold_acknowledge | |
132 | __secondary_hold_acknowledge: | |
133 | .llong 0x0 | |
134 | ||
135 | . = 0x60 | |
136 | /* | |
137 | * The following code is used on pSeries to hold secondary processors | |
138 | * in a spin loop after they have been freed from OpenFirmware, but | |
139 | * before the bulk of the kernel has been relocated. This code | |
140 | * is relocated to physical address 0x60 before prom_init is run. | |
141 | * All of it must fit below the first exception vector at 0x100. | |
142 | */ | |
143 | _GLOBAL(__secondary_hold) | |
144 | mfmsr r24 | |
145 | ori r24,r24,MSR_RI | |
146 | mtmsrd r24 /* RI on */ | |
147 | ||
148 | /* Grab our linux cpu number */ | |
149 | mr r24,r3 | |
150 | ||
151 | /* Tell the master cpu we're here */ | |
152 | /* Relocation is off & we are located at an address less */ | |
153 | /* than 0x100, so only need to grab low order offset. */ | |
154 | std r24,__secondary_hold_acknowledge@l(0) | |
155 | sync | |
156 | ||
157 | /* All secondary cpu's wait here until told to start. */ | |
158 | 100: ld r4,__secondary_hold_spinloop@l(0) | |
159 | cmpdi 0,r4,1 | |
160 | bne 100b | |
161 | ||
162 | #ifdef CONFIG_HMT | |
163 | b .hmt_init | |
164 | #else | |
165 | #ifdef CONFIG_SMP | |
166 | mr r3,r24 | |
167 | b .pSeries_secondary_smp_init | |
168 | #else | |
169 | BUG_OPCODE | |
170 | #endif | |
171 | #endif | |
172 | #endif | |
173 | ||
174 | /* This value is used to mark exception frames on the stack. */ | |
175 | .section ".toc","aw" | |
176 | exception_marker: | |
177 | .tc ID_72656773_68657265[TC],0x7265677368657265 | |
178 | .text | |
179 | ||
180 | /* | |
181 | * The following macros define the code that appears as | |
182 | * the prologue to each of the exception handlers. They | |
183 | * are split into two parts to allow a single kernel binary | |
184 | * to be used for pSeries and iSeries. | |
185 | * LOL. One day... - paulus | |
186 | */ | |
187 | ||
188 | /* | |
189 | * We make as much of the exception code common between native | |
190 | * exception handlers (including pSeries LPAR) and iSeries LPAR | |
191 | * implementations as possible. | |
192 | */ | |
193 | ||
194 | /* | |
195 | * This is the start of the interrupt handlers for pSeries | |
196 | * This code runs with relocation off. | |
197 | */ | |
198 | #define EX_R9 0 | |
199 | #define EX_R10 8 | |
200 | #define EX_R11 16 | |
201 | #define EX_R12 24 | |
202 | #define EX_R13 32 | |
203 | #define EX_SRR0 40 | |
204 | #define EX_R3 40 /* SLB miss saves R3, but not SRR0 */ | |
205 | #define EX_DAR 48 | |
206 | #define EX_LR 48 /* SLB miss saves LR, but not DAR */ | |
207 | #define EX_DSISR 56 | |
208 | #define EX_CCR 60 | |
209 | ||
210 | #define EXCEPTION_PROLOG_PSERIES(area, label) \ | |
211 | mfspr r13,SPRG3; /* get paca address into r13 */ \ | |
212 | std r9,area+EX_R9(r13); /* save r9 - r12 */ \ | |
213 | std r10,area+EX_R10(r13); \ | |
214 | std r11,area+EX_R11(r13); \ | |
215 | std r12,area+EX_R12(r13); \ | |
216 | mfspr r9,SPRG1; \ | |
217 | std r9,area+EX_R13(r13); \ | |
218 | mfcr r9; \ | |
219 | clrrdi r12,r13,32; /* get high part of &label */ \ | |
220 | mfmsr r10; \ | |
221 | mfspr r11,SRR0; /* save SRR0 */ \ | |
222 | ori r12,r12,(label)@l; /* virt addr of handler */ \ | |
223 | ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \ | |
224 | mtspr SRR0,r12; \ | |
225 | mfspr r12,SRR1; /* and SRR1 */ \ | |
226 | mtspr SRR1,r10; \ | |
227 | rfid; \ | |
228 | b . /* prevent speculative execution */ | |
229 | ||
230 | /* | |
231 | * This is the start of the interrupt handlers for iSeries | |
232 | * This code runs with relocation on. | |
233 | */ | |
234 | #define EXCEPTION_PROLOG_ISERIES_1(area) \ | |
235 | mfspr r13,SPRG3; /* get paca address into r13 */ \ | |
236 | std r9,area+EX_R9(r13); /* save r9 - r12 */ \ | |
237 | std r10,area+EX_R10(r13); \ | |
238 | std r11,area+EX_R11(r13); \ | |
239 | std r12,area+EX_R12(r13); \ | |
240 | mfspr r9,SPRG1; \ | |
241 | std r9,area+EX_R13(r13); \ | |
242 | mfcr r9 | |
243 | ||
244 | #define EXCEPTION_PROLOG_ISERIES_2 \ | |
245 | mfmsr r10; \ | |
246 | ld r11,PACALPPACA+LPPACASRR0(r13); \ | |
247 | ld r12,PACALPPACA+LPPACASRR1(r13); \ | |
248 | ori r10,r10,MSR_RI; \ | |
249 | mtmsrd r10,1 | |
250 | ||
251 | /* | |
252 | * The common exception prolog is used for all except a few exceptions | |
253 | * such as a segment miss on a kernel address. We have to be prepared | |
254 | * to take another exception from the point where we first touch the | |
255 | * kernel stack onwards. | |
256 | * | |
257 | * On entry r13 points to the paca, r9-r13 are saved in the paca, | |
258 | * r9 contains the saved CR, r11 and r12 contain the saved SRR0 and | |
259 | * SRR1, and relocation is on. | |
260 | */ | |
261 | #define EXCEPTION_PROLOG_COMMON(n, area) \ | |
262 | andi. r10,r12,MSR_PR; /* See if coming from user */ \ | |
263 | mr r10,r1; /* Save r1 */ \ | |
264 | subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \ | |
265 | beq- 1f; \ | |
266 | ld r1,PACAKSAVE(r13); /* kernel stack to use */ \ | |
267 | 1: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \ | |
268 | bge- cr1,bad_stack; /* abort if it is */ \ | |
269 | std r9,_CCR(r1); /* save CR in stackframe */ \ | |
270 | std r11,_NIP(r1); /* save SRR0 in stackframe */ \ | |
271 | std r12,_MSR(r1); /* save SRR1 in stackframe */ \ | |
272 | std r10,0(r1); /* make stack chain pointer */ \ | |
273 | std r0,GPR0(r1); /* save r0 in stackframe */ \ | |
274 | std r10,GPR1(r1); /* save r1 in stackframe */ \ | |
275 | std r2,GPR2(r1); /* save r2 in stackframe */ \ | |
276 | SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \ | |
277 | SAVE_2GPRS(7, r1); /* save r7, r8 in stackframe */ \ | |
278 | ld r9,area+EX_R9(r13); /* move r9, r10 to stackframe */ \ | |
279 | ld r10,area+EX_R10(r13); \ | |
280 | std r9,GPR9(r1); \ | |
281 | std r10,GPR10(r1); \ | |
282 | ld r9,area+EX_R11(r13); /* move r11 - r13 to stackframe */ \ | |
283 | ld r10,area+EX_R12(r13); \ | |
284 | ld r11,area+EX_R13(r13); \ | |
285 | std r9,GPR11(r1); \ | |
286 | std r10,GPR12(r1); \ | |
287 | std r11,GPR13(r1); \ | |
288 | ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \ | |
289 | mflr r9; /* save LR in stackframe */ \ | |
290 | std r9,_LINK(r1); \ | |
291 | mfctr r10; /* save CTR in stackframe */ \ | |
292 | std r10,_CTR(r1); \ | |
293 | mfspr r11,XER; /* save XER in stackframe */ \ | |
294 | std r11,_XER(r1); \ | |
295 | li r9,(n)+1; \ | |
296 | std r9,_TRAP(r1); /* set trap number */ \ | |
297 | li r10,0; \ | |
298 | ld r11,exception_marker@toc(r2); \ | |
299 | std r10,RESULT(r1); /* clear regs->result */ \ | |
300 | std r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */ | |
301 | ||
302 | /* | |
303 | * Exception vectors. | |
304 | */ | |
305 | #define STD_EXCEPTION_PSERIES(n, label) \ | |
306 | . = n; \ | |
307 | .globl label##_pSeries; \ | |
308 | label##_pSeries: \ | |
309 | HMT_MEDIUM; \ | |
310 | mtspr SPRG1,r13; /* save r13 */ \ | |
8dc4fd87 | 311 | RUNLATCH_ON(r13); \ |
1da177e4 LT |
312 | EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common) |
313 | ||
314 | #define STD_EXCEPTION_ISERIES(n, label, area) \ | |
315 | .globl label##_iSeries; \ | |
316 | label##_iSeries: \ | |
317 | HMT_MEDIUM; \ | |
318 | mtspr SPRG1,r13; /* save r13 */ \ | |
8dc4fd87 | 319 | RUNLATCH_ON(r13); \ |
1da177e4 LT |
320 | EXCEPTION_PROLOG_ISERIES_1(area); \ |
321 | EXCEPTION_PROLOG_ISERIES_2; \ | |
322 | b label##_common | |
323 | ||
324 | #define MASKABLE_EXCEPTION_ISERIES(n, label) \ | |
325 | .globl label##_iSeries; \ | |
326 | label##_iSeries: \ | |
327 | HMT_MEDIUM; \ | |
328 | mtspr SPRG1,r13; /* save r13 */ \ | |
8dc4fd87 | 329 | RUNLATCH_ON(r13); \ |
1da177e4 LT |
330 | EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN); \ |
331 | lbz r10,PACAPROCENABLED(r13); \ | |
332 | cmpwi 0,r10,0; \ | |
333 | beq- label##_iSeries_masked; \ | |
334 | EXCEPTION_PROLOG_ISERIES_2; \ | |
335 | b label##_common; \ | |
336 | ||
337 | #ifdef DO_SOFT_DISABLE | |
338 | #define DISABLE_INTS \ | |
339 | lbz r10,PACAPROCENABLED(r13); \ | |
340 | li r11,0; \ | |
341 | std r10,SOFTE(r1); \ | |
342 | mfmsr r10; \ | |
343 | stb r11,PACAPROCENABLED(r13); \ | |
344 | ori r10,r10,MSR_EE; \ | |
345 | mtmsrd r10,1 | |
346 | ||
347 | #define ENABLE_INTS \ | |
348 | lbz r10,PACAPROCENABLED(r13); \ | |
349 | mfmsr r11; \ | |
350 | std r10,SOFTE(r1); \ | |
351 | ori r11,r11,MSR_EE; \ | |
352 | mtmsrd r11,1 | |
353 | ||
354 | #else /* hard enable/disable interrupts */ | |
355 | #define DISABLE_INTS | |
356 | ||
357 | #define ENABLE_INTS \ | |
358 | ld r12,_MSR(r1); \ | |
359 | mfmsr r11; \ | |
360 | rlwimi r11,r12,0,MSR_EE; \ | |
361 | mtmsrd r11,1 | |
362 | ||
363 | #endif | |
364 | ||
365 | #define STD_EXCEPTION_COMMON(trap, label, hdlr) \ | |
366 | .align 7; \ | |
367 | .globl label##_common; \ | |
368 | label##_common: \ | |
369 | EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \ | |
370 | DISABLE_INTS; \ | |
371 | bl .save_nvgprs; \ | |
372 | addi r3,r1,STACK_FRAME_OVERHEAD; \ | |
373 | bl hdlr; \ | |
374 | b .ret_from_except | |
375 | ||
376 | #define STD_EXCEPTION_COMMON_LITE(trap, label, hdlr) \ | |
377 | .align 7; \ | |
378 | .globl label##_common; \ | |
379 | label##_common: \ | |
380 | EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \ | |
381 | DISABLE_INTS; \ | |
382 | addi r3,r1,STACK_FRAME_OVERHEAD; \ | |
383 | bl hdlr; \ | |
384 | b .ret_from_except_lite | |
385 | ||
386 | /* | |
387 | * Start of pSeries system interrupt routines | |
388 | */ | |
389 | . = 0x100 | |
390 | .globl __start_interrupts | |
391 | __start_interrupts: | |
392 | ||
393 | STD_EXCEPTION_PSERIES(0x100, system_reset) | |
394 | ||
395 | . = 0x200 | |
396 | _machine_check_pSeries: | |
397 | HMT_MEDIUM | |
398 | mtspr SPRG1,r13 /* save r13 */ | |
8dc4fd87 | 399 | RUNLATCH_ON(r13) |
1da177e4 LT |
400 | EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) |
401 | ||
402 | . = 0x300 | |
403 | .globl data_access_pSeries | |
404 | data_access_pSeries: | |
405 | HMT_MEDIUM | |
406 | mtspr SPRG1,r13 | |
407 | BEGIN_FTR_SECTION | |
408 | mtspr SPRG2,r12 | |
409 | mfspr r13,DAR | |
410 | mfspr r12,DSISR | |
411 | srdi r13,r13,60 | |
412 | rlwimi r13,r12,16,0x20 | |
413 | mfcr r12 | |
414 | cmpwi r13,0x2c | |
415 | beq .do_stab_bolted_pSeries | |
416 | mtcrf 0x80,r12 | |
417 | mfspr r12,SPRG2 | |
418 | END_FTR_SECTION_IFCLR(CPU_FTR_SLB) | |
419 | EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common) | |
420 | ||
421 | . = 0x380 | |
422 | .globl data_access_slb_pSeries | |
423 | data_access_slb_pSeries: | |
424 | HMT_MEDIUM | |
425 | mtspr SPRG1,r13 | |
8dc4fd87 | 426 | RUNLATCH_ON(r13) |
1da177e4 LT |
427 | mfspr r13,SPRG3 /* get paca address into r13 */ |
428 | std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ | |
429 | std r10,PACA_EXSLB+EX_R10(r13) | |
430 | std r11,PACA_EXSLB+EX_R11(r13) | |
431 | std r12,PACA_EXSLB+EX_R12(r13) | |
432 | std r3,PACA_EXSLB+EX_R3(r13) | |
433 | mfspr r9,SPRG1 | |
434 | std r9,PACA_EXSLB+EX_R13(r13) | |
435 | mfcr r9 | |
436 | mfspr r12,SRR1 /* and SRR1 */ | |
437 | mfspr r3,DAR | |
438 | b .do_slb_miss /* Rel. branch works in real mode */ | |
439 | ||
440 | STD_EXCEPTION_PSERIES(0x400, instruction_access) | |
441 | ||
442 | . = 0x480 | |
443 | .globl instruction_access_slb_pSeries | |
444 | instruction_access_slb_pSeries: | |
445 | HMT_MEDIUM | |
446 | mtspr SPRG1,r13 | |
8dc4fd87 | 447 | RUNLATCH_ON(r13) |
1da177e4 LT |
448 | mfspr r13,SPRG3 /* get paca address into r13 */ |
449 | std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ | |
450 | std r10,PACA_EXSLB+EX_R10(r13) | |
451 | std r11,PACA_EXSLB+EX_R11(r13) | |
452 | std r12,PACA_EXSLB+EX_R12(r13) | |
453 | std r3,PACA_EXSLB+EX_R3(r13) | |
454 | mfspr r9,SPRG1 | |
455 | std r9,PACA_EXSLB+EX_R13(r13) | |
456 | mfcr r9 | |
457 | mfspr r12,SRR1 /* and SRR1 */ | |
458 | mfspr r3,SRR0 /* SRR0 is faulting address */ | |
459 | b .do_slb_miss /* Rel. branch works in real mode */ | |
460 | ||
461 | STD_EXCEPTION_PSERIES(0x500, hardware_interrupt) | |
462 | STD_EXCEPTION_PSERIES(0x600, alignment) | |
463 | STD_EXCEPTION_PSERIES(0x700, program_check) | |
464 | STD_EXCEPTION_PSERIES(0x800, fp_unavailable) | |
465 | STD_EXCEPTION_PSERIES(0x900, decrementer) | |
466 | STD_EXCEPTION_PSERIES(0xa00, trap_0a) | |
467 | STD_EXCEPTION_PSERIES(0xb00, trap_0b) | |
468 | ||
469 | . = 0xc00 | |
470 | .globl system_call_pSeries | |
471 | system_call_pSeries: | |
472 | HMT_MEDIUM | |
8dc4fd87 | 473 | RUNLATCH_ON(r9) |
1da177e4 LT |
474 | mr r9,r13 |
475 | mfmsr r10 | |
476 | mfspr r13,SPRG3 | |
477 | mfspr r11,SRR0 | |
478 | clrrdi r12,r13,32 | |
479 | oris r12,r12,system_call_common@h | |
480 | ori r12,r12,system_call_common@l | |
481 | mtspr SRR0,r12 | |
482 | ori r10,r10,MSR_IR|MSR_DR|MSR_RI | |
483 | mfspr r12,SRR1 | |
484 | mtspr SRR1,r10 | |
485 | rfid | |
486 | b . /* prevent speculative execution */ | |
487 | ||
488 | STD_EXCEPTION_PSERIES(0xd00, single_step) | |
489 | STD_EXCEPTION_PSERIES(0xe00, trap_0e) | |
490 | ||
491 | /* We need to deal with the Altivec unavailable exception | |
492 | * here which is at 0xf20, thus in the middle of the | |
493 | * prolog code of the PerformanceMonitor one. A little | |
494 | * trickery is thus necessary | |
495 | */ | |
496 | . = 0xf00 | |
497 | b performance_monitor_pSeries | |
498 | ||
499 | STD_EXCEPTION_PSERIES(0xf20, altivec_unavailable) | |
500 | ||
501 | STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint) | |
502 | STD_EXCEPTION_PSERIES(0x1700, altivec_assist) | |
503 | ||
504 | /* moved from 0xf00 */ | |
505 | STD_EXCEPTION_PSERIES(0x3000, performance_monitor) | |
506 | ||
507 | . = 0x3100 | |
508 | _GLOBAL(do_stab_bolted_pSeries) | |
509 | mtcrf 0x80,r12 | |
510 | mfspr r12,SPRG2 | |
511 | EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted) | |
512 | ||
513 | ||
514 | /* Space for the naca. Architected to be located at real address | |
515 | * NACA_PHYS_ADDR. Various tools rely on this location being fixed. | |
516 | * The first dword of the naca is required by iSeries LPAR to | |
517 | * point to itVpdAreas. On pSeries native, this value is not used. | |
518 | */ | |
519 | . = NACA_PHYS_ADDR | |
520 | .globl __end_interrupts | |
521 | __end_interrupts: | |
522 | #ifdef CONFIG_PPC_ISERIES | |
523 | .globl naca | |
524 | naca: | |
488f8499 DG |
525 | .llong itVpdAreas |
526 | .llong 0 /* xRamDisk */ | |
527 | .llong 0 /* xRamDiskSize */ | |
1da177e4 LT |
528 | |
529 | . = 0x6100 | |
530 | ||
531 | /*** ISeries-LPAR interrupt handlers ***/ | |
532 | ||
533 | STD_EXCEPTION_ISERIES(0x200, machine_check, PACA_EXMC) | |
534 | ||
535 | .globl data_access_iSeries | |
536 | data_access_iSeries: | |
537 | mtspr SPRG1,r13 | |
538 | BEGIN_FTR_SECTION | |
539 | mtspr SPRG2,r12 | |
540 | mfspr r13,DAR | |
541 | mfspr r12,DSISR | |
542 | srdi r13,r13,60 | |
543 | rlwimi r13,r12,16,0x20 | |
544 | mfcr r12 | |
545 | cmpwi r13,0x2c | |
546 | beq .do_stab_bolted_iSeries | |
547 | mtcrf 0x80,r12 | |
548 | mfspr r12,SPRG2 | |
549 | END_FTR_SECTION_IFCLR(CPU_FTR_SLB) | |
550 | EXCEPTION_PROLOG_ISERIES_1(PACA_EXGEN) | |
551 | EXCEPTION_PROLOG_ISERIES_2 | |
552 | b data_access_common | |
553 | ||
554 | .do_stab_bolted_iSeries: | |
555 | mtcrf 0x80,r12 | |
556 | mfspr r12,SPRG2 | |
557 | EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB) | |
558 | EXCEPTION_PROLOG_ISERIES_2 | |
559 | b .do_stab_bolted | |
560 | ||
561 | .globl data_access_slb_iSeries | |
562 | data_access_slb_iSeries: | |
563 | mtspr SPRG1,r13 /* save r13 */ | |
564 | EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB) | |
565 | std r3,PACA_EXSLB+EX_R3(r13) | |
566 | ld r12,PACALPPACA+LPPACASRR1(r13) | |
567 | mfspr r3,DAR | |
568 | b .do_slb_miss | |
569 | ||
570 | STD_EXCEPTION_ISERIES(0x400, instruction_access, PACA_EXGEN) | |
571 | ||
572 | .globl instruction_access_slb_iSeries | |
573 | instruction_access_slb_iSeries: | |
574 | mtspr SPRG1,r13 /* save r13 */ | |
575 | EXCEPTION_PROLOG_ISERIES_1(PACA_EXSLB) | |
576 | std r3,PACA_EXSLB+EX_R3(r13) | |
577 | ld r12,PACALPPACA+LPPACASRR1(r13) | |
578 | ld r3,PACALPPACA+LPPACASRR0(r13) | |
579 | b .do_slb_miss | |
580 | ||
581 | MASKABLE_EXCEPTION_ISERIES(0x500, hardware_interrupt) | |
582 | STD_EXCEPTION_ISERIES(0x600, alignment, PACA_EXGEN) | |
583 | STD_EXCEPTION_ISERIES(0x700, program_check, PACA_EXGEN) | |
584 | STD_EXCEPTION_ISERIES(0x800, fp_unavailable, PACA_EXGEN) | |
585 | MASKABLE_EXCEPTION_ISERIES(0x900, decrementer) | |
586 | STD_EXCEPTION_ISERIES(0xa00, trap_0a, PACA_EXGEN) | |
587 | STD_EXCEPTION_ISERIES(0xb00, trap_0b, PACA_EXGEN) | |
588 | ||
589 | .globl system_call_iSeries | |
590 | system_call_iSeries: | |
591 | mr r9,r13 | |
592 | mfspr r13,SPRG3 | |
593 | EXCEPTION_PROLOG_ISERIES_2 | |
594 | b system_call_common | |
595 | ||
596 | STD_EXCEPTION_ISERIES( 0xd00, single_step, PACA_EXGEN) | |
597 | STD_EXCEPTION_ISERIES( 0xe00, trap_0e, PACA_EXGEN) | |
598 | STD_EXCEPTION_ISERIES( 0xf00, performance_monitor, PACA_EXGEN) | |
599 | ||
600 | .globl system_reset_iSeries | |
601 | system_reset_iSeries: | |
602 | mfspr r13,SPRG3 /* Get paca address */ | |
603 | mfmsr r24 | |
604 | ori r24,r24,MSR_RI | |
605 | mtmsrd r24 /* RI on */ | |
606 | lhz r24,PACAPACAINDEX(r13) /* Get processor # */ | |
607 | cmpwi 0,r24,0 /* Are we processor 0? */ | |
608 | beq .__start_initialization_iSeries /* Start up the first processor */ | |
6dc2f0c7 AB |
609 | mfspr r4,SPRN_CTRLF |
610 | li r5,CTRL_RUNLATCH /* Turn off the run light */ | |
1da177e4 | 611 | andc r4,r4,r5 |
6dc2f0c7 | 612 | mtspr SPRN_CTRLT,r4 |
1da177e4 LT |
613 | |
614 | 1: | |
615 | HMT_LOW | |
616 | #ifdef CONFIG_SMP | |
617 | lbz r23,PACAPROCSTART(r13) /* Test if this processor | |
618 | * should start */ | |
619 | sync | |
620 | LOADADDR(r3,current_set) | |
621 | sldi r28,r24,3 /* get current_set[cpu#] */ | |
622 | ldx r3,r3,r28 | |
623 | addi r1,r3,THREAD_SIZE | |
624 | subi r1,r1,STACK_FRAME_OVERHEAD | |
625 | ||
626 | cmpwi 0,r23,0 | |
627 | beq iSeries_secondary_smp_loop /* Loop until told to go */ | |
628 | #ifdef SECONDARY_PROCESSORS | |
629 | bne .__secondary_start /* Loop until told to go */ | |
630 | #endif | |
631 | iSeries_secondary_smp_loop: | |
632 | /* Let the Hypervisor know we are alive */ | |
633 | /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */ | |
634 | lis r3,0x8002 | |
635 | rldicr r3,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */ | |
636 | #else /* CONFIG_SMP */ | |
637 | /* Yield the processor. This is required for non-SMP kernels | |
638 | which are running on multi-threaded machines. */ | |
639 | lis r3,0x8000 | |
640 | rldicr r3,r3,32,15 /* r3 = (r3 << 32) & 0xffff000000000000 */ | |
641 | addi r3,r3,18 /* r3 = 0x8000000000000012 which is "yield" */ | |
642 | li r4,0 /* "yield timed" */ | |
643 | li r5,-1 /* "yield forever" */ | |
644 | #endif /* CONFIG_SMP */ | |
645 | li r0,-1 /* r0=-1 indicates a Hypervisor call */ | |
646 | sc /* Invoke the hypervisor via a system call */ | |
647 | mfspr r13,SPRG3 /* Put r13 back ???? */ | |
648 | b 1b /* If SMP not configured, secondaries | |
649 | * loop forever */ | |
650 | ||
651 | .globl decrementer_iSeries_masked | |
652 | decrementer_iSeries_masked: | |
653 | li r11,1 | |
654 | stb r11,PACALPPACA+LPPACADECRINT(r13) | |
655 | lwz r12,PACADEFAULTDECR(r13) | |
656 | mtspr SPRN_DEC,r12 | |
657 | /* fall through */ | |
658 | ||
659 | .globl hardware_interrupt_iSeries_masked | |
660 | hardware_interrupt_iSeries_masked: | |
661 | mtcrf 0x80,r9 /* Restore regs */ | |
662 | ld r11,PACALPPACA+LPPACASRR0(r13) | |
663 | ld r12,PACALPPACA+LPPACASRR1(r13) | |
664 | mtspr SRR0,r11 | |
665 | mtspr SRR1,r12 | |
666 | ld r9,PACA_EXGEN+EX_R9(r13) | |
667 | ld r10,PACA_EXGEN+EX_R10(r13) | |
668 | ld r11,PACA_EXGEN+EX_R11(r13) | |
669 | ld r12,PACA_EXGEN+EX_R12(r13) | |
670 | ld r13,PACA_EXGEN+EX_R13(r13) | |
671 | rfid | |
672 | b . /* prevent speculative execution */ | |
673 | #endif | |
674 | ||
675 | /* | |
676 | * Data area reserved for FWNMI option. | |
677 | */ | |
678 | .= 0x7000 | |
679 | .globl fwnmi_data_area | |
680 | fwnmi_data_area: | |
681 | ||
682 | /* | |
683 | * Vectors for the FWNMI option. Share common code. | |
684 | */ | |
685 | . = 0x8000 | |
686 | .globl system_reset_fwnmi | |
687 | system_reset_fwnmi: | |
688 | HMT_MEDIUM | |
689 | mtspr SPRG1,r13 /* save r13 */ | |
8dc4fd87 | 690 | RUNLATCH_ON(r13) |
1da177e4 LT |
691 | EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common) |
692 | .globl machine_check_fwnmi | |
693 | machine_check_fwnmi: | |
694 | HMT_MEDIUM | |
695 | mtspr SPRG1,r13 /* save r13 */ | |
8dc4fd87 | 696 | RUNLATCH_ON(r13) |
1da177e4 LT |
697 | EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) |
698 | ||
699 | /* | |
700 | * Space for the initial segment table | |
701 | * For LPAR, the hypervisor must fill in at least one entry | |
702 | * before we get control (with relocate on) | |
703 | */ | |
704 | . = STAB0_PHYS_ADDR | |
705 | .globl __start_stab | |
706 | __start_stab: | |
707 | ||
708 | . = (STAB0_PHYS_ADDR + PAGE_SIZE) | |
709 | .globl __end_stab | |
710 | __end_stab: | |
711 | ||
712 | ||
713 | /*** Common interrupt handlers ***/ | |
714 | ||
715 | STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception) | |
716 | ||
717 | /* | |
718 | * Machine check is different because we use a different | |
719 | * save area: PACA_EXMC instead of PACA_EXGEN. | |
720 | */ | |
721 | .align 7 | |
722 | .globl machine_check_common | |
723 | machine_check_common: | |
724 | EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC) | |
725 | DISABLE_INTS | |
726 | bl .save_nvgprs | |
727 | addi r3,r1,STACK_FRAME_OVERHEAD | |
728 | bl .machine_check_exception | |
729 | b .ret_from_except | |
730 | ||
731 | STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt) | |
732 | STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception) | |
733 | STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception) | |
734 | STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception) | |
735 | STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception) | |
736 | STD_EXCEPTION_COMMON(0xf00, performance_monitor, .performance_monitor_exception) | |
737 | STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception) | |
738 | #ifdef CONFIG_ALTIVEC | |
739 | STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception) | |
740 | #else | |
741 | STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception) | |
742 | #endif | |
743 | ||
744 | /* | |
745 | * Here we have detected that the kernel stack pointer is bad. | |
746 | * R9 contains the saved CR, r13 points to the paca, | |
747 | * r10 contains the (bad) kernel stack pointer, | |
748 | * r11 and r12 contain the saved SRR0 and SRR1. | |
749 | * We switch to using the paca guard page as an emergency stack, | |
750 | * save the registers there, and call kernel_bad_stack(), which panics. | |
751 | */ | |
752 | bad_stack: | |
753 | ld r1,PACAEMERGSP(r13) | |
754 | subi r1,r1,64+INT_FRAME_SIZE | |
755 | std r9,_CCR(r1) | |
756 | std r10,GPR1(r1) | |
757 | std r11,_NIP(r1) | |
758 | std r12,_MSR(r1) | |
759 | mfspr r11,DAR | |
760 | mfspr r12,DSISR | |
761 | std r11,_DAR(r1) | |
762 | std r12,_DSISR(r1) | |
763 | mflr r10 | |
764 | mfctr r11 | |
765 | mfxer r12 | |
766 | std r10,_LINK(r1) | |
767 | std r11,_CTR(r1) | |
768 | std r12,_XER(r1) | |
769 | SAVE_GPR(0,r1) | |
770 | SAVE_GPR(2,r1) | |
771 | SAVE_4GPRS(3,r1) | |
772 | SAVE_2GPRS(7,r1) | |
773 | SAVE_10GPRS(12,r1) | |
774 | SAVE_10GPRS(22,r1) | |
775 | addi r11,r1,INT_FRAME_SIZE | |
776 | std r11,0(r1) | |
777 | li r12,0 | |
778 | std r12,0(r11) | |
779 | ld r2,PACATOC(r13) | |
780 | 1: addi r3,r1,STACK_FRAME_OVERHEAD | |
781 | bl .kernel_bad_stack | |
782 | b 1b | |
783 | ||
784 | /* | |
785 | * Return from an exception with minimal checks. | |
786 | * The caller is assumed to have done EXCEPTION_PROLOG_COMMON. | |
787 | * If interrupts have been enabled, or anything has been | |
788 | * done that might have changed the scheduling status of | |
789 | * any task or sent any task a signal, you should use | |
790 | * ret_from_except or ret_from_except_lite instead of this. | |
791 | */ | |
792 | fast_exception_return: | |
793 | ld r12,_MSR(r1) | |
794 | ld r11,_NIP(r1) | |
795 | andi. r3,r12,MSR_RI /* check if RI is set */ | |
796 | beq- unrecov_fer | |
797 | ld r3,_CCR(r1) | |
798 | ld r4,_LINK(r1) | |
799 | ld r5,_CTR(r1) | |
800 | ld r6,_XER(r1) | |
801 | mtcr r3 | |
802 | mtlr r4 | |
803 | mtctr r5 | |
804 | mtxer r6 | |
805 | REST_GPR(0, r1) | |
806 | REST_8GPRS(2, r1) | |
807 | ||
808 | mfmsr r10 | |
809 | clrrdi r10,r10,2 /* clear RI (LE is 0 already) */ | |
810 | mtmsrd r10,1 | |
811 | ||
812 | mtspr SRR1,r12 | |
813 | mtspr SRR0,r11 | |
814 | REST_4GPRS(10, r1) | |
815 | ld r1,GPR1(r1) | |
816 | rfid | |
817 | b . /* prevent speculative execution */ | |
818 | ||
819 | unrecov_fer: | |
820 | bl .save_nvgprs | |
821 | 1: addi r3,r1,STACK_FRAME_OVERHEAD | |
822 | bl .unrecoverable_exception | |
823 | b 1b | |
824 | ||
825 | /* | |
826 | * Here r13 points to the paca, r9 contains the saved CR, | |
827 | * SRR0 and SRR1 are saved in r11 and r12, | |
828 | * r9 - r13 are saved in paca->exgen. | |
829 | */ | |
830 | .align 7 | |
831 | .globl data_access_common | |
832 | data_access_common: | |
8dc4fd87 | 833 | RUNLATCH_ON(r10) /* It wont fit in the 0x300 handler */ |
1da177e4 LT |
834 | mfspr r10,DAR |
835 | std r10,PACA_EXGEN+EX_DAR(r13) | |
836 | mfspr r10,DSISR | |
837 | stw r10,PACA_EXGEN+EX_DSISR(r13) | |
838 | EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN) | |
839 | ld r3,PACA_EXGEN+EX_DAR(r13) | |
840 | lwz r4,PACA_EXGEN+EX_DSISR(r13) | |
841 | li r5,0x300 | |
842 | b .do_hash_page /* Try to handle as hpte fault */ | |
843 | ||
844 | .align 7 | |
845 | .globl instruction_access_common | |
846 | instruction_access_common: | |
847 | EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN) | |
848 | ld r3,_NIP(r1) | |
849 | andis. r4,r12,0x5820 | |
850 | li r5,0x400 | |
851 | b .do_hash_page /* Try to handle as hpte fault */ | |
852 | ||
853 | .align 7 | |
854 | .globl hardware_interrupt_common | |
855 | .globl hardware_interrupt_entry | |
856 | hardware_interrupt_common: | |
857 | EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN) | |
858 | hardware_interrupt_entry: | |
859 | DISABLE_INTS | |
860 | addi r3,r1,STACK_FRAME_OVERHEAD | |
861 | bl .do_IRQ | |
862 | b .ret_from_except_lite | |
863 | ||
864 | .align 7 | |
865 | .globl alignment_common | |
866 | alignment_common: | |
867 | mfspr r10,DAR | |
868 | std r10,PACA_EXGEN+EX_DAR(r13) | |
869 | mfspr r10,DSISR | |
870 | stw r10,PACA_EXGEN+EX_DSISR(r13) | |
871 | EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN) | |
872 | ld r3,PACA_EXGEN+EX_DAR(r13) | |
873 | lwz r4,PACA_EXGEN+EX_DSISR(r13) | |
874 | std r3,_DAR(r1) | |
875 | std r4,_DSISR(r1) | |
876 | bl .save_nvgprs | |
877 | addi r3,r1,STACK_FRAME_OVERHEAD | |
878 | ENABLE_INTS | |
879 | bl .alignment_exception | |
880 | b .ret_from_except | |
881 | ||
882 | .align 7 | |
883 | .globl program_check_common | |
884 | program_check_common: | |
885 | EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN) | |
886 | bl .save_nvgprs | |
887 | addi r3,r1,STACK_FRAME_OVERHEAD | |
888 | ENABLE_INTS | |
889 | bl .program_check_exception | |
890 | b .ret_from_except | |
891 | ||
892 | .align 7 | |
893 | .globl fp_unavailable_common | |
894 | fp_unavailable_common: | |
895 | EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN) | |
896 | bne .load_up_fpu /* if from user, just load it up */ | |
897 | bl .save_nvgprs | |
898 | addi r3,r1,STACK_FRAME_OVERHEAD | |
899 | ENABLE_INTS | |
900 | bl .kernel_fp_unavailable_exception | |
901 | BUG_OPCODE | |
902 | ||
903 | .align 7 | |
904 | .globl altivec_unavailable_common | |
905 | altivec_unavailable_common: | |
906 | EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN) | |
907 | #ifdef CONFIG_ALTIVEC | |
187335a4 | 908 | BEGIN_FTR_SECTION |
1da177e4 | 909 | bne .load_up_altivec /* if from user, just load it up */ |
187335a4 | 910 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) |
1da177e4 LT |
911 | #endif |
912 | bl .save_nvgprs | |
913 | addi r3,r1,STACK_FRAME_OVERHEAD | |
914 | ENABLE_INTS | |
915 | bl .altivec_unavailable_exception | |
916 | b .ret_from_except | |
917 | ||
918 | /* | |
919 | * Hash table stuff | |
920 | */ | |
921 | .align 7 | |
922 | _GLOBAL(do_hash_page) | |
923 | std r3,_DAR(r1) | |
924 | std r4,_DSISR(r1) | |
925 | ||
926 | andis. r0,r4,0xa450 /* weird error? */ | |
927 | bne- .handle_page_fault /* if not, try to insert a HPTE */ | |
928 | BEGIN_FTR_SECTION | |
929 | andis. r0,r4,0x0020 /* Is it a segment table fault? */ | |
930 | bne- .do_ste_alloc /* If so handle it */ | |
931 | END_FTR_SECTION_IFCLR(CPU_FTR_SLB) | |
932 | ||
933 | /* | |
934 | * We need to set the _PAGE_USER bit if MSR_PR is set or if we are | |
935 | * accessing a userspace segment (even from the kernel). We assume | |
936 | * kernel addresses always have the high bit set. | |
937 | */ | |
938 | rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */ | |
939 | rotldi r0,r3,15 /* Move high bit into MSR_PR posn */ | |
940 | orc r0,r12,r0 /* MSR_PR | ~high_bit */ | |
941 | rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */ | |
942 | ori r4,r4,1 /* add _PAGE_PRESENT */ | |
943 | rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */ | |
944 | ||
945 | /* | |
946 | * On iSeries, we soft-disable interrupts here, then | |
947 | * hard-enable interrupts so that the hash_page code can spin on | |
948 | * the hash_table_lock without problems on a shared processor. | |
949 | */ | |
950 | DISABLE_INTS | |
951 | ||
952 | /* | |
953 | * r3 contains the faulting address | |
954 | * r4 contains the required access permissions | |
955 | * r5 contains the trap number | |
956 | * | |
957 | * at return r3 = 0 for success | |
958 | */ | |
959 | bl .hash_page /* build HPTE if possible */ | |
960 | cmpdi r3,0 /* see if hash_page succeeded */ | |
961 | ||
962 | #ifdef DO_SOFT_DISABLE | |
963 | /* | |
964 | * If we had interrupts soft-enabled at the point where the | |
965 | * DSI/ISI occurred, and an interrupt came in during hash_page, | |
966 | * handle it now. | |
967 | * We jump to ret_from_except_lite rather than fast_exception_return | |
968 | * because ret_from_except_lite will check for and handle pending | |
969 | * interrupts if necessary. | |
970 | */ | |
971 | beq .ret_from_except_lite | |
972 | /* For a hash failure, we don't bother re-enabling interrupts */ | |
973 | ble- 12f | |
974 | ||
975 | /* | |
976 | * hash_page couldn't handle it, set soft interrupt enable back | |
977 | * to what it was before the trap. Note that .local_irq_restore | |
978 | * handles any interrupts pending at this point. | |
979 | */ | |
980 | ld r3,SOFTE(r1) | |
981 | bl .local_irq_restore | |
982 | b 11f | |
983 | #else | |
984 | beq fast_exception_return /* Return from exception on success */ | |
985 | ble- 12f /* Failure return from hash_page */ | |
986 | ||
987 | /* fall through */ | |
988 | #endif | |
989 | ||
990 | /* Here we have a page fault that hash_page can't handle. */ | |
991 | _GLOBAL(handle_page_fault) | |
992 | ENABLE_INTS | |
993 | 11: ld r4,_DAR(r1) | |
994 | ld r5,_DSISR(r1) | |
995 | addi r3,r1,STACK_FRAME_OVERHEAD | |
996 | bl .do_page_fault | |
997 | cmpdi r3,0 | |
998 | beq+ .ret_from_except_lite | |
999 | bl .save_nvgprs | |
1000 | mr r5,r3 | |
1001 | addi r3,r1,STACK_FRAME_OVERHEAD | |
1002 | lwz r4,_DAR(r1) | |
1003 | bl .bad_page_fault | |
1004 | b .ret_from_except | |
1005 | ||
1006 | /* We have a page fault that hash_page could handle but HV refused | |
1007 | * the PTE insertion | |
1008 | */ | |
1009 | 12: bl .save_nvgprs | |
1010 | addi r3,r1,STACK_FRAME_OVERHEAD | |
1011 | lwz r4,_DAR(r1) | |
1012 | bl .low_hash_fault | |
1013 | b .ret_from_except | |
1014 | ||
1015 | /* here we have a segment miss */ | |
1016 | _GLOBAL(do_ste_alloc) | |
1017 | bl .ste_allocate /* try to insert stab entry */ | |
1018 | cmpdi r3,0 | |
1019 | beq+ fast_exception_return | |
1020 | b .handle_page_fault | |
1021 | ||
1022 | /* | |
1023 | * r13 points to the PACA, r9 contains the saved CR, | |
1024 | * r11 and r12 contain the saved SRR0 and SRR1. | |
1025 | * r9 - r13 are saved in paca->exslb. | |
1026 | * We assume we aren't going to take any exceptions during this procedure. | |
1027 | * We assume (DAR >> 60) == 0xc. | |
1028 | */ | |
1029 | .align 7 | |
1030 | _GLOBAL(do_stab_bolted) | |
1031 | stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ | |
1032 | std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */ | |
1033 | ||
1034 | /* Hash to the primary group */ | |
1035 | ld r10,PACASTABVIRT(r13) | |
1036 | mfspr r11,DAR | |
1037 | srdi r11,r11,28 | |
1038 | rldimi r10,r11,7,52 /* r10 = first ste of the group */ | |
1039 | ||
1040 | /* Calculate VSID */ | |
1041 | /* This is a kernel address, so protovsid = ESID */ | |
1042 | ASM_VSID_SCRAMBLE(r11, r9) | |
1043 | rldic r9,r11,12,16 /* r9 = vsid << 12 */ | |
1044 | ||
1045 | /* Search the primary group for a free entry */ | |
1046 | 1: ld r11,0(r10) /* Test valid bit of the current ste */ | |
1047 | andi. r11,r11,0x80 | |
1048 | beq 2f | |
1049 | addi r10,r10,16 | |
1050 | andi. r11,r10,0x70 | |
1051 | bne 1b | |
1052 | ||
1053 | /* Stick for only searching the primary group for now. */ | |
1054 | /* At least for now, we use a very simple random castout scheme */ | |
1055 | /* Use the TB as a random number ; OR in 1 to avoid entry 0 */ | |
1056 | mftb r11 | |
1057 | rldic r11,r11,4,57 /* r11 = (r11 << 4) & 0x70 */ | |
1058 | ori r11,r11,0x10 | |
1059 | ||
1060 | /* r10 currently points to an ste one past the group of interest */ | |
1061 | /* make it point to the randomly selected entry */ | |
1062 | subi r10,r10,128 | |
1063 | or r10,r10,r11 /* r10 is the entry to invalidate */ | |
1064 | ||
1065 | isync /* mark the entry invalid */ | |
1066 | ld r11,0(r10) | |
1067 | rldicl r11,r11,56,1 /* clear the valid bit */ | |
1068 | rotldi r11,r11,8 | |
1069 | std r11,0(r10) | |
1070 | sync | |
1071 | ||
1072 | clrrdi r11,r11,28 /* Get the esid part of the ste */ | |
1073 | slbie r11 | |
1074 | ||
1075 | 2: std r9,8(r10) /* Store the vsid part of the ste */ | |
1076 | eieio | |
1077 | ||
1078 | mfspr r11,DAR /* Get the new esid */ | |
1079 | clrrdi r11,r11,28 /* Permits a full 32b of ESID */ | |
1080 | ori r11,r11,0x90 /* Turn on valid and kp */ | |
1081 | std r11,0(r10) /* Put new entry back into the stab */ | |
1082 | ||
1083 | sync | |
1084 | ||
1085 | /* All done -- return from exception. */ | |
1086 | lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ | |
1087 | ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */ | |
1088 | ||
1089 | andi. r10,r12,MSR_RI | |
1090 | beq- unrecov_slb | |
1091 | ||
1092 | mtcrf 0x80,r9 /* restore CR */ | |
1093 | ||
1094 | mfmsr r10 | |
1095 | clrrdi r10,r10,2 | |
1096 | mtmsrd r10,1 | |
1097 | ||
1098 | mtspr SRR0,r11 | |
1099 | mtspr SRR1,r12 | |
1100 | ld r9,PACA_EXSLB+EX_R9(r13) | |
1101 | ld r10,PACA_EXSLB+EX_R10(r13) | |
1102 | ld r11,PACA_EXSLB+EX_R11(r13) | |
1103 | ld r12,PACA_EXSLB+EX_R12(r13) | |
1104 | ld r13,PACA_EXSLB+EX_R13(r13) | |
1105 | rfid | |
1106 | b . /* prevent speculative execution */ | |
1107 | ||
1108 | /* | |
1109 | * r13 points to the PACA, r9 contains the saved CR, | |
1110 | * r11 and r12 contain the saved SRR0 and SRR1. | |
1111 | * r3 has the faulting address | |
1112 | * r9 - r13 are saved in paca->exslb. | |
1113 | * r3 is saved in paca->slb_r3 | |
1114 | * We assume we aren't going to take any exceptions during this procedure. | |
1115 | */ | |
1116 | _GLOBAL(do_slb_miss) | |
1117 | mflr r10 | |
1118 | ||
1119 | stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ | |
1120 | std r10,PACA_EXSLB+EX_LR(r13) /* save LR */ | |
1121 | ||
1122 | bl .slb_allocate /* handle it */ | |
1123 | ||
1124 | /* All done -- return from exception. */ | |
1125 | ||
1126 | ld r10,PACA_EXSLB+EX_LR(r13) | |
1127 | ld r3,PACA_EXSLB+EX_R3(r13) | |
1128 | lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ | |
1129 | #ifdef CONFIG_PPC_ISERIES | |
1130 | ld r11,PACALPPACA+LPPACASRR0(r13) /* get SRR0 value */ | |
1131 | #endif /* CONFIG_PPC_ISERIES */ | |
1132 | ||
1133 | mtlr r10 | |
1134 | ||
1135 | andi. r10,r12,MSR_RI /* check for unrecoverable exception */ | |
1136 | beq- unrecov_slb | |
1137 | ||
1138 | .machine push | |
1139 | .machine "power4" | |
1140 | mtcrf 0x80,r9 | |
1141 | mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */ | |
1142 | .machine pop | |
1143 | ||
1144 | #ifdef CONFIG_PPC_ISERIES | |
1145 | mtspr SRR0,r11 | |
1146 | mtspr SRR1,r12 | |
1147 | #endif /* CONFIG_PPC_ISERIES */ | |
1148 | ld r9,PACA_EXSLB+EX_R9(r13) | |
1149 | ld r10,PACA_EXSLB+EX_R10(r13) | |
1150 | ld r11,PACA_EXSLB+EX_R11(r13) | |
1151 | ld r12,PACA_EXSLB+EX_R12(r13) | |
1152 | ld r13,PACA_EXSLB+EX_R13(r13) | |
1153 | rfid | |
1154 | b . /* prevent speculative execution */ | |
1155 | ||
1156 | unrecov_slb: | |
1157 | EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB) | |
1158 | DISABLE_INTS | |
1159 | bl .save_nvgprs | |
1160 | 1: addi r3,r1,STACK_FRAME_OVERHEAD | |
1161 | bl .unrecoverable_exception | |
1162 | b 1b | |
1163 | ||
1164 | ||
1165 | /* | |
1166 | * On pSeries, secondary processors spin in the following code. | |
1167 | * At entry, r3 = this processor's number (physical cpu id) | |
1168 | */ | |
1169 | _GLOBAL(pSeries_secondary_smp_init) | |
1170 | mr r24,r3 | |
1171 | ||
1172 | /* turn on 64-bit mode */ | |
1173 | bl .enable_64b_mode | |
1174 | isync | |
1175 | ||
1176 | /* Copy some CPU settings from CPU 0 */ | |
1177 | bl .__restore_cpu_setup | |
1178 | ||
1179 | /* Set up a paca value for this processor. Since we have the | |
fce0d574 | 1180 | * physical cpu id in r24, we need to search the pacas to find |
1da177e4 LT |
1181 | * which logical id maps to our physical one. |
1182 | */ | |
1183 | LOADADDR(r13, paca) /* Get base vaddr of paca array */ | |
1184 | li r5,0 /* logical cpu id */ | |
1185 | 1: lhz r6,PACAHWCPUID(r13) /* Load HW procid from paca */ | |
1186 | cmpw r6,r24 /* Compare to our id */ | |
1187 | beq 2f | |
1188 | addi r13,r13,PACA_SIZE /* Loop to next PACA on miss */ | |
1189 | addi r5,r5,1 | |
1190 | cmpwi r5,NR_CPUS | |
1191 | blt 1b | |
1192 | ||
fce0d574 S |
1193 | mr r3,r24 /* not found, copy phys to r3 */ |
1194 | b .kexec_wait /* next kernel might do better */ | |
1da177e4 LT |
1195 | |
1196 | 2: mtspr SPRG3,r13 /* Save vaddr of paca in SPRG3 */ | |
1197 | /* From now on, r24 is expected to be logica cpuid */ | |
1198 | mr r24,r5 | |
1199 | 3: HMT_LOW | |
1200 | lbz r23,PACAPROCSTART(r13) /* Test if this processor should */ | |
1201 | /* start. */ | |
1202 | sync | |
1203 | ||
1204 | /* Create a temp kernel stack for use before relocation is on. */ | |
1205 | ld r1,PACAEMERGSP(r13) | |
1206 | subi r1,r1,STACK_FRAME_OVERHEAD | |
1207 | ||
1208 | cmpwi 0,r23,0 | |
1209 | #ifdef CONFIG_SMP | |
1210 | #ifdef SECONDARY_PROCESSORS | |
1211 | bne .__secondary_start | |
1212 | #endif | |
1213 | #endif | |
1214 | b 3b /* Loop until told to go */ | |
1215 | ||
1216 | #ifdef CONFIG_PPC_ISERIES | |
1217 | _STATIC(__start_initialization_iSeries) | |
1218 | /* Clear out the BSS */ | |
1219 | LOADADDR(r11,__bss_stop) | |
1220 | LOADADDR(r8,__bss_start) | |
1221 | sub r11,r11,r8 /* bss size */ | |
1222 | addi r11,r11,7 /* round up to an even double word */ | |
1223 | rldicl. r11,r11,61,3 /* shift right by 3 */ | |
1224 | beq 4f | |
1225 | addi r8,r8,-8 | |
1226 | li r0,0 | |
1227 | mtctr r11 /* zero this many doublewords */ | |
1228 | 3: stdu r0,8(r8) | |
1229 | bdnz 3b | |
1230 | 4: | |
1231 | LOADADDR(r1,init_thread_union) | |
1232 | addi r1,r1,THREAD_SIZE | |
1233 | li r0,0 | |
1234 | stdu r0,-STACK_FRAME_OVERHEAD(r1) | |
1235 | ||
1236 | LOADADDR(r3,cpu_specs) | |
1237 | LOADADDR(r4,cur_cpu_spec) | |
1238 | li r5,0 | |
1239 | bl .identify_cpu | |
1240 | ||
1241 | LOADADDR(r2,__toc_start) | |
1242 | addi r2,r2,0x4000 | |
1243 | addi r2,r2,0x4000 | |
1244 | ||
1245 | bl .iSeries_early_setup | |
1246 | ||
1247 | /* relocation is on at this point */ | |
1248 | ||
1249 | b .start_here_common | |
1250 | #endif /* CONFIG_PPC_ISERIES */ | |
1251 | ||
1252 | #ifdef CONFIG_PPC_MULTIPLATFORM | |
1253 | ||
1254 | _STATIC(__mmu_off) | |
1255 | mfmsr r3 | |
1256 | andi. r0,r3,MSR_IR|MSR_DR | |
1257 | beqlr | |
1258 | andc r3,r3,r0 | |
1259 | mtspr SPRN_SRR0,r4 | |
1260 | mtspr SPRN_SRR1,r3 | |
1261 | sync | |
1262 | rfid | |
1263 | b . /* prevent speculative execution */ | |
1264 | ||
1265 | ||
1266 | /* | |
1267 | * Here is our main kernel entry point. We support currently 2 kind of entries | |
1268 | * depending on the value of r5. | |
1269 | * | |
1270 | * r5 != NULL -> OF entry, we go to prom_init, "legacy" parameter content | |
1271 | * in r3...r7 | |
1272 | * | |
1273 | * r5 == NULL -> kexec style entry. r3 is a physical pointer to the | |
1274 | * DT block, r4 is a physical pointer to the kernel itself | |
1275 | * | |
1276 | */ | |
1277 | _GLOBAL(__start_initialization_multiplatform) | |
1278 | /* | |
1279 | * Are we booted from a PROM Of-type client-interface ? | |
1280 | */ | |
1281 | cmpldi cr0,r5,0 | |
1282 | bne .__boot_from_prom /* yes -> prom */ | |
1283 | ||
1284 | /* Save parameters */ | |
1285 | mr r31,r3 | |
1286 | mr r30,r4 | |
1287 | ||
1288 | /* Make sure we are running in 64 bits mode */ | |
1289 | bl .enable_64b_mode | |
1290 | ||
1291 | /* Setup some critical 970 SPRs before switching MMU off */ | |
1292 | bl .__970_cpu_preinit | |
1293 | ||
1294 | /* cpu # */ | |
1295 | li r24,0 | |
1296 | ||
1297 | /* Switch off MMU if not already */ | |
1298 | LOADADDR(r4, .__after_prom_start - KERNELBASE) | |
1299 | add r4,r4,r30 | |
1300 | bl .__mmu_off | |
1301 | b .__after_prom_start | |
1302 | ||
1303 | _STATIC(__boot_from_prom) | |
1304 | /* Save parameters */ | |
1305 | mr r31,r3 | |
1306 | mr r30,r4 | |
1307 | mr r29,r5 | |
1308 | mr r28,r6 | |
1309 | mr r27,r7 | |
1310 | ||
1311 | /* Make sure we are running in 64 bits mode */ | |
1312 | bl .enable_64b_mode | |
1313 | ||
1314 | /* put a relocation offset into r3 */ | |
1315 | bl .reloc_offset | |
1316 | ||
1317 | LOADADDR(r2,__toc_start) | |
1318 | addi r2,r2,0x4000 | |
1319 | addi r2,r2,0x4000 | |
1320 | ||
1321 | /* Relocate the TOC from a virt addr to a real addr */ | |
1322 | sub r2,r2,r3 | |
1323 | ||
1324 | /* Restore parameters */ | |
1325 | mr r3,r31 | |
1326 | mr r4,r30 | |
1327 | mr r5,r29 | |
1328 | mr r6,r28 | |
1329 | mr r7,r27 | |
1330 | ||
1331 | /* Do all of the interaction with OF client interface */ | |
1332 | bl .prom_init | |
1333 | /* We never return */ | |
1334 | trap | |
1335 | ||
1336 | /* | |
1337 | * At this point, r3 contains the physical address we are running at, | |
1338 | * returned by prom_init() | |
1339 | */ | |
1340 | _STATIC(__after_prom_start) | |
1341 | ||
1342 | /* | |
1343 | * We need to run with __start at physical address 0. | |
1344 | * This will leave some code in the first 256B of | |
1345 | * real memory, which are reserved for software use. | |
1346 | * The remainder of the first page is loaded with the fixed | |
1347 | * interrupt vectors. The next two pages are filled with | |
1348 | * unknown exception placeholders. | |
1349 | * | |
1350 | * Note: This process overwrites the OF exception vectors. | |
1351 | * r26 == relocation offset | |
1352 | * r27 == KERNELBASE | |
1353 | */ | |
1354 | bl .reloc_offset | |
1355 | mr r26,r3 | |
1356 | SET_REG_TO_CONST(r27,KERNELBASE) | |
1357 | ||
1358 | li r3,0 /* target addr */ | |
1359 | ||
1360 | // XXX FIXME: Use phys returned by OF (r30) | |
1361 | sub r4,r27,r26 /* source addr */ | |
1362 | /* current address of _start */ | |
1363 | /* i.e. where we are running */ | |
1364 | /* the source addr */ | |
1365 | ||
1366 | LOADADDR(r5,copy_to_here) /* # bytes of memory to copy */ | |
1367 | sub r5,r5,r27 | |
1368 | ||
1369 | li r6,0x100 /* Start offset, the first 0x100 */ | |
1370 | /* bytes were copied earlier. */ | |
1371 | ||
1372 | bl .copy_and_flush /* copy the first n bytes */ | |
1373 | /* this includes the code being */ | |
1374 | /* executed here. */ | |
1375 | ||
1376 | LOADADDR(r0, 4f) /* Jump to the copy of this code */ | |
1377 | mtctr r0 /* that we just made/relocated */ | |
1378 | bctr | |
1379 | ||
1380 | 4: LOADADDR(r5,klimit) | |
1381 | sub r5,r5,r26 | |
1382 | ld r5,0(r5) /* get the value of klimit */ | |
1383 | sub r5,r5,r27 | |
1384 | bl .copy_and_flush /* copy the rest */ | |
1385 | b .start_here_multiplatform | |
1386 | ||
1387 | #endif /* CONFIG_PPC_MULTIPLATFORM */ | |
1388 | ||
1389 | /* | |
1390 | * Copy routine used to copy the kernel to start at physical address 0 | |
1391 | * and flush and invalidate the caches as needed. | |
1392 | * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset | |
1393 | * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5. | |
1394 | * | |
1395 | * Note: this routine *only* clobbers r0, r6 and lr | |
1396 | */ | |
1397 | _GLOBAL(copy_and_flush) | |
1398 | addi r5,r5,-8 | |
1399 | addi r6,r6,-8 | |
1400 | 4: li r0,16 /* Use the least common */ | |
1401 | /* denominator cache line */ | |
1402 | /* size. This results in */ | |
1403 | /* extra cache line flushes */ | |
1404 | /* but operation is correct. */ | |
1405 | /* Can't get cache line size */ | |
1406 | /* from NACA as it is being */ | |
1407 | /* moved too. */ | |
1408 | ||
1409 | mtctr r0 /* put # words/line in ctr */ | |
1410 | 3: addi r6,r6,8 /* copy a cache line */ | |
1411 | ldx r0,r6,r4 | |
1412 | stdx r0,r6,r3 | |
1413 | bdnz 3b | |
1414 | dcbst r6,r3 /* write it to memory */ | |
1415 | sync | |
1416 | icbi r6,r3 /* flush the icache line */ | |
1417 | cmpld 0,r6,r5 | |
1418 | blt 4b | |
1419 | sync | |
1420 | addi r5,r5,8 | |
1421 | addi r6,r6,8 | |
1422 | blr | |
1423 | ||
1424 | .align 8 | |
1425 | copy_to_here: | |
1426 | ||
1427 | /* | |
1428 | * load_up_fpu(unused, unused, tsk) | |
1429 | * Disable FP for the task which had the FPU previously, | |
1430 | * and save its floating-point registers in its thread_struct. | |
1431 | * Enables the FPU for use in the kernel on return. | |
1432 | * On SMP we know the fpu is free, since we give it up every | |
1433 | * switch (ie, no lazy save of the FP registers). | |
1434 | * On entry: r13 == 'current' && last_task_used_math != 'current' | |
1435 | */ | |
1436 | _STATIC(load_up_fpu) | |
1437 | mfmsr r5 /* grab the current MSR */ | |
1438 | ori r5,r5,MSR_FP | |
1439 | mtmsrd r5 /* enable use of fpu now */ | |
1440 | isync | |
1441 | /* | |
1442 | * For SMP, we don't do lazy FPU switching because it just gets too | |
1443 | * horrendously complex, especially when a task switches from one CPU | |
1444 | * to another. Instead we call giveup_fpu in switch_to. | |
1445 | * | |
1446 | */ | |
1447 | #ifndef CONFIG_SMP | |
1448 | ld r3,last_task_used_math@got(r2) | |
1449 | ld r4,0(r3) | |
1450 | cmpdi 0,r4,0 | |
1451 | beq 1f | |
1452 | /* Save FP state to last_task_used_math's THREAD struct */ | |
1453 | addi r4,r4,THREAD | |
1454 | SAVE_32FPRS(0, r4) | |
1455 | mffs fr0 | |
1456 | stfd fr0,THREAD_FPSCR(r4) | |
1457 | /* Disable FP for last_task_used_math */ | |
1458 | ld r5,PT_REGS(r4) | |
1459 | ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) | |
1460 | li r6,MSR_FP|MSR_FE0|MSR_FE1 | |
1461 | andc r4,r4,r6 | |
1462 | std r4,_MSR-STACK_FRAME_OVERHEAD(r5) | |
1463 | 1: | |
1464 | #endif /* CONFIG_SMP */ | |
1465 | /* enable use of FP after return */ | |
1466 | ld r4,PACACURRENT(r13) | |
1467 | addi r5,r4,THREAD /* Get THREAD */ | |
1468 | ld r4,THREAD_FPEXC_MODE(r5) | |
1469 | ori r12,r12,MSR_FP | |
1470 | or r12,r12,r4 | |
1471 | std r12,_MSR(r1) | |
1472 | lfd fr0,THREAD_FPSCR(r5) | |
1473 | mtfsf 0xff,fr0 | |
1474 | REST_32FPRS(0, r5) | |
1475 | #ifndef CONFIG_SMP | |
1476 | /* Update last_task_used_math to 'current' */ | |
1477 | subi r4,r5,THREAD /* Back to 'current' */ | |
1478 | std r4,0(r3) | |
1479 | #endif /* CONFIG_SMP */ | |
1480 | /* restore registers and return */ | |
1481 | b fast_exception_return | |
1482 | ||
1483 | /* | |
1484 | * disable_kernel_fp() | |
1485 | * Disable the FPU. | |
1486 | */ | |
1487 | _GLOBAL(disable_kernel_fp) | |
1488 | mfmsr r3 | |
1489 | rldicl r0,r3,(63-MSR_FP_LG),1 | |
1490 | rldicl r3,r0,(MSR_FP_LG+1),0 | |
1491 | mtmsrd r3 /* disable use of fpu now */ | |
1492 | isync | |
1493 | blr | |
1494 | ||
1495 | /* | |
1496 | * giveup_fpu(tsk) | |
1497 | * Disable FP for the task given as the argument, | |
1498 | * and save the floating-point registers in its thread_struct. | |
1499 | * Enables the FPU for use in the kernel on return. | |
1500 | */ | |
1501 | _GLOBAL(giveup_fpu) | |
1502 | mfmsr r5 | |
1503 | ori r5,r5,MSR_FP | |
1504 | mtmsrd r5 /* enable use of fpu now */ | |
1505 | isync | |
1506 | cmpdi 0,r3,0 | |
1507 | beqlr- /* if no previous owner, done */ | |
1508 | addi r3,r3,THREAD /* want THREAD of task */ | |
1509 | ld r5,PT_REGS(r3) | |
1510 | cmpdi 0,r5,0 | |
1511 | SAVE_32FPRS(0, r3) | |
1512 | mffs fr0 | |
1513 | stfd fr0,THREAD_FPSCR(r3) | |
1514 | beq 1f | |
1515 | ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) | |
1516 | li r3,MSR_FP|MSR_FE0|MSR_FE1 | |
1517 | andc r4,r4,r3 /* disable FP for previous task */ | |
1518 | std r4,_MSR-STACK_FRAME_OVERHEAD(r5) | |
1519 | 1: | |
1520 | #ifndef CONFIG_SMP | |
1521 | li r5,0 | |
1522 | ld r4,last_task_used_math@got(r2) | |
1523 | std r5,0(r4) | |
1524 | #endif /* CONFIG_SMP */ | |
1525 | blr | |
1526 | ||
1527 | ||
1528 | #ifdef CONFIG_ALTIVEC | |
1529 | ||
1530 | /* | |
1531 | * load_up_altivec(unused, unused, tsk) | |
1532 | * Disable VMX for the task which had it previously, | |
1533 | * and save its vector registers in its thread_struct. | |
1534 | * Enables the VMX for use in the kernel on return. | |
1535 | * On SMP we know the VMX is free, since we give it up every | |
1536 | * switch (ie, no lazy save of the vector registers). | |
1537 | * On entry: r13 == 'current' && last_task_used_altivec != 'current' | |
1538 | */ | |
1539 | _STATIC(load_up_altivec) | |
1540 | mfmsr r5 /* grab the current MSR */ | |
1541 | oris r5,r5,MSR_VEC@h | |
1542 | mtmsrd r5 /* enable use of VMX now */ | |
1543 | isync | |
1544 | ||
1545 | /* | |
1546 | * For SMP, we don't do lazy VMX switching because it just gets too | |
1547 | * horrendously complex, especially when a task switches from one CPU | |
1548 | * to another. Instead we call giveup_altvec in switch_to. | |
1549 | * VRSAVE isn't dealt with here, that is done in the normal context | |
1550 | * switch code. Note that we could rely on vrsave value to eventually | |
1551 | * avoid saving all of the VREGs here... | |
1552 | */ | |
1553 | #ifndef CONFIG_SMP | |
1554 | ld r3,last_task_used_altivec@got(r2) | |
1555 | ld r4,0(r3) | |
1556 | cmpdi 0,r4,0 | |
1557 | beq 1f | |
1558 | /* Save VMX state to last_task_used_altivec's THREAD struct */ | |
1559 | addi r4,r4,THREAD | |
1560 | SAVE_32VRS(0,r5,r4) | |
1561 | mfvscr vr0 | |
1562 | li r10,THREAD_VSCR | |
1563 | stvx vr0,r10,r4 | |
1564 | /* Disable VMX for last_task_used_altivec */ | |
1565 | ld r5,PT_REGS(r4) | |
1566 | ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) | |
1567 | lis r6,MSR_VEC@h | |
1568 | andc r4,r4,r6 | |
1569 | std r4,_MSR-STACK_FRAME_OVERHEAD(r5) | |
1570 | 1: | |
1571 | #endif /* CONFIG_SMP */ | |
1572 | /* Hack: if we get an altivec unavailable trap with VRSAVE | |
1573 | * set to all zeros, we assume this is a broken application | |
1574 | * that fails to set it properly, and thus we switch it to | |
1575 | * all 1's | |
1576 | */ | |
1577 | mfspr r4,SPRN_VRSAVE | |
1578 | cmpdi 0,r4,0 | |
1579 | bne+ 1f | |
1580 | li r4,-1 | |
1581 | mtspr SPRN_VRSAVE,r4 | |
1582 | 1: | |
1583 | /* enable use of VMX after return */ | |
1584 | ld r4,PACACURRENT(r13) | |
1585 | addi r5,r4,THREAD /* Get THREAD */ | |
1586 | oris r12,r12,MSR_VEC@h | |
1587 | std r12,_MSR(r1) | |
1588 | li r4,1 | |
1589 | li r10,THREAD_VSCR | |
1590 | stw r4,THREAD_USED_VR(r5) | |
1591 | lvx vr0,r10,r5 | |
1592 | mtvscr vr0 | |
1593 | REST_32VRS(0,r4,r5) | |
1594 | #ifndef CONFIG_SMP | |
1595 | /* Update last_task_used_math to 'current' */ | |
1596 | subi r4,r5,THREAD /* Back to 'current' */ | |
1597 | std r4,0(r3) | |
1598 | #endif /* CONFIG_SMP */ | |
1599 | /* restore registers and return */ | |
1600 | b fast_exception_return | |
1601 | ||
1602 | /* | |
1603 | * disable_kernel_altivec() | |
1604 | * Disable the VMX. | |
1605 | */ | |
1606 | _GLOBAL(disable_kernel_altivec) | |
1607 | mfmsr r3 | |
1608 | rldicl r0,r3,(63-MSR_VEC_LG),1 | |
1609 | rldicl r3,r0,(MSR_VEC_LG+1),0 | |
1610 | mtmsrd r3 /* disable use of VMX now */ | |
1611 | isync | |
1612 | blr | |
1613 | ||
1614 | /* | |
1615 | * giveup_altivec(tsk) | |
1616 | * Disable VMX for the task given as the argument, | |
1617 | * and save the vector registers in its thread_struct. | |
1618 | * Enables the VMX for use in the kernel on return. | |
1619 | */ | |
1620 | _GLOBAL(giveup_altivec) | |
1621 | mfmsr r5 | |
1622 | oris r5,r5,MSR_VEC@h | |
1623 | mtmsrd r5 /* enable use of VMX now */ | |
1624 | isync | |
1625 | cmpdi 0,r3,0 | |
1626 | beqlr- /* if no previous owner, done */ | |
1627 | addi r3,r3,THREAD /* want THREAD of task */ | |
1628 | ld r5,PT_REGS(r3) | |
1629 | cmpdi 0,r5,0 | |
1630 | SAVE_32VRS(0,r4,r3) | |
1631 | mfvscr vr0 | |
1632 | li r4,THREAD_VSCR | |
1633 | stvx vr0,r4,r3 | |
1634 | beq 1f | |
1635 | ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) | |
1636 | lis r3,MSR_VEC@h | |
1637 | andc r4,r4,r3 /* disable FP for previous task */ | |
1638 | std r4,_MSR-STACK_FRAME_OVERHEAD(r5) | |
1639 | 1: | |
1640 | #ifndef CONFIG_SMP | |
1641 | li r5,0 | |
1642 | ld r4,last_task_used_altivec@got(r2) | |
1643 | std r5,0(r4) | |
1644 | #endif /* CONFIG_SMP */ | |
1645 | blr | |
1646 | ||
1647 | #endif /* CONFIG_ALTIVEC */ | |
1648 | ||
1649 | #ifdef CONFIG_SMP | |
1650 | #ifdef CONFIG_PPC_PMAC | |
1651 | /* | |
1652 | * On PowerMac, secondary processors starts from the reset vector, which | |
1653 | * is temporarily turned into a call to one of the functions below. | |
1654 | */ | |
1655 | .section ".text"; | |
1656 | .align 2 ; | |
1657 | ||
1658 | .globl pmac_secondary_start_1 | |
1659 | pmac_secondary_start_1: | |
1660 | li r24, 1 | |
1661 | b .pmac_secondary_start | |
1662 | ||
1663 | .globl pmac_secondary_start_2 | |
1664 | pmac_secondary_start_2: | |
1665 | li r24, 2 | |
1666 | b .pmac_secondary_start | |
1667 | ||
1668 | .globl pmac_secondary_start_3 | |
1669 | pmac_secondary_start_3: | |
1670 | li r24, 3 | |
1671 | b .pmac_secondary_start | |
1672 | ||
1673 | _GLOBAL(pmac_secondary_start) | |
1674 | /* turn on 64-bit mode */ | |
1675 | bl .enable_64b_mode | |
1676 | isync | |
1677 | ||
1678 | /* Copy some CPU settings from CPU 0 */ | |
1679 | bl .__restore_cpu_setup | |
1680 | ||
1681 | /* pSeries do that early though I don't think we really need it */ | |
1682 | mfmsr r3 | |
1683 | ori r3,r3,MSR_RI | |
1684 | mtmsrd r3 /* RI on */ | |
1685 | ||
1686 | /* Set up a paca value for this processor. */ | |
1687 | LOADADDR(r4, paca) /* Get base vaddr of paca array */ | |
1688 | mulli r13,r24,PACA_SIZE /* Calculate vaddr of right paca */ | |
1689 | add r13,r13,r4 /* for this processor. */ | |
1690 | mtspr SPRG3,r13 /* Save vaddr of paca in SPRG3 */ | |
1691 | ||
1692 | /* Create a temp kernel stack for use before relocation is on. */ | |
1693 | ld r1,PACAEMERGSP(r13) | |
1694 | subi r1,r1,STACK_FRAME_OVERHEAD | |
1695 | ||
1696 | b .__secondary_start | |
1697 | ||
1698 | #endif /* CONFIG_PPC_PMAC */ | |
1699 | ||
1700 | /* | |
1701 | * This function is called after the master CPU has released the | |
1702 | * secondary processors. The execution environment is relocation off. | |
1703 | * The paca for this processor has the following fields initialized at | |
1704 | * this point: | |
1705 | * 1. Processor number | |
1706 | * 2. Segment table pointer (virtual address) | |
1707 | * On entry the following are set: | |
1708 | * r1 = stack pointer. vaddr for iSeries, raddr (temp stack) for pSeries | |
1709 | * r24 = cpu# (in Linux terms) | |
1710 | * r13 = paca virtual address | |
1711 | * SPRG3 = paca virtual address | |
1712 | */ | |
1713 | _GLOBAL(__secondary_start) | |
1714 | ||
1715 | HMT_MEDIUM /* Set thread priority to MEDIUM */ | |
1716 | ||
1717 | ld r2,PACATOC(r13) | |
1718 | li r6,0 | |
1719 | stb r6,PACAPROCENABLED(r13) | |
1720 | ||
1721 | #ifndef CONFIG_PPC_ISERIES | |
1722 | /* Initialize the page table pointer register. */ | |
1723 | LOADADDR(r6,_SDR1) | |
1724 | ld r6,0(r6) /* get the value of _SDR1 */ | |
1725 | mtspr SDR1,r6 /* set the htab location */ | |
1726 | #endif | |
1727 | /* Initialize the first segment table (or SLB) entry */ | |
1728 | ld r3,PACASTABVIRT(r13) /* get addr of segment table */ | |
1729 | bl .stab_initialize | |
1730 | ||
1731 | /* Initialize the kernel stack. Just a repeat for iSeries. */ | |
1732 | LOADADDR(r3,current_set) | |
1733 | sldi r28,r24,3 /* get current_set[cpu#] */ | |
1734 | ldx r1,r3,r28 | |
1735 | addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD | |
1736 | std r1,PACAKSAVE(r13) | |
1737 | ||
1738 | ld r3,PACASTABREAL(r13) /* get raddr of segment table */ | |
1739 | ori r4,r3,1 /* turn on valid bit */ | |
1740 | ||
1741 | #ifdef CONFIG_PPC_ISERIES | |
1742 | li r0,-1 /* hypervisor call */ | |
1743 | li r3,1 | |
1744 | sldi r3,r3,63 /* 0x8000000000000000 */ | |
1745 | ori r3,r3,4 /* 0x8000000000000004 */ | |
1746 | sc /* HvCall_setASR */ | |
1747 | #else | |
1748 | /* set the ASR */ | |
1749 | ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */ | |
1750 | lwz r3,PLATFORM(r3) /* r3 = platform flags */ | |
1751 | cmpldi r3,PLATFORM_PSERIES_LPAR | |
1752 | bne 98f | |
1753 | mfspr r3,PVR | |
1754 | srwi r3,r3,16 | |
1755 | cmpwi r3,0x37 /* SStar */ | |
1756 | beq 97f | |
1757 | cmpwi r3,0x36 /* IStar */ | |
1758 | beq 97f | |
1759 | cmpwi r3,0x34 /* Pulsar */ | |
1760 | bne 98f | |
1761 | 97: li r3,H_SET_ASR /* hcall = H_SET_ASR */ | |
1762 | HVSC /* Invoking hcall */ | |
1763 | b 99f | |
1764 | 98: /* !(rpa hypervisor) || !(star) */ | |
1765 | mtasr r4 /* set the stab location */ | |
1766 | 99: | |
1767 | #endif | |
1768 | li r7,0 | |
1769 | mtlr r7 | |
1770 | ||
1771 | /* enable MMU and jump to start_secondary */ | |
1772 | LOADADDR(r3,.start_secondary_prolog) | |
1773 | SET_REG_TO_CONST(r4, MSR_KERNEL) | |
1774 | #ifdef DO_SOFT_DISABLE | |
1775 | ori r4,r4,MSR_EE | |
1776 | #endif | |
1777 | mtspr SRR0,r3 | |
1778 | mtspr SRR1,r4 | |
1779 | rfid | |
1780 | b . /* prevent speculative execution */ | |
1781 | ||
1782 | /* | |
1783 | * Running with relocation on at this point. All we want to do is | |
1784 | * zero the stack back-chain pointer before going into C code. | |
1785 | */ | |
1786 | _GLOBAL(start_secondary_prolog) | |
1787 | li r3,0 | |
1788 | std r3,0(r1) /* Zero the stack frame pointer */ | |
1789 | bl .start_secondary | |
1790 | #endif | |
1791 | ||
1792 | /* | |
1793 | * This subroutine clobbers r11 and r12 | |
1794 | */ | |
1795 | _GLOBAL(enable_64b_mode) | |
1796 | mfmsr r11 /* grab the current MSR */ | |
1797 | li r12,1 | |
1798 | rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG) | |
1799 | or r11,r11,r12 | |
1800 | li r12,1 | |
1801 | rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG) | |
1802 | or r11,r11,r12 | |
1803 | mtmsrd r11 | |
1804 | isync | |
1805 | blr | |
1806 | ||
1807 | #ifdef CONFIG_PPC_MULTIPLATFORM | |
1808 | /* | |
1809 | * This is where the main kernel code starts. | |
1810 | */ | |
1811 | _STATIC(start_here_multiplatform) | |
1812 | /* get a new offset, now that the kernel has moved. */ | |
1813 | bl .reloc_offset | |
1814 | mr r26,r3 | |
1815 | ||
1816 | /* Clear out the BSS. It may have been done in prom_init, | |
1817 | * already but that's irrelevant since prom_init will soon | |
1818 | * be detached from the kernel completely. Besides, we need | |
1819 | * to clear it now for kexec-style entry. | |
1820 | */ | |
1821 | LOADADDR(r11,__bss_stop) | |
1822 | LOADADDR(r8,__bss_start) | |
1823 | sub r11,r11,r8 /* bss size */ | |
1824 | addi r11,r11,7 /* round up to an even double word */ | |
1825 | rldicl. r11,r11,61,3 /* shift right by 3 */ | |
1826 | beq 4f | |
1827 | addi r8,r8,-8 | |
1828 | li r0,0 | |
1829 | mtctr r11 /* zero this many doublewords */ | |
1830 | 3: stdu r0,8(r8) | |
1831 | bdnz 3b | |
1832 | 4: | |
1833 | ||
1834 | mfmsr r6 | |
1835 | ori r6,r6,MSR_RI | |
1836 | mtmsrd r6 /* RI on */ | |
1837 | ||
1838 | #ifdef CONFIG_HMT | |
1839 | /* Start up the second thread on cpu 0 */ | |
1840 | mfspr r3,PVR | |
1841 | srwi r3,r3,16 | |
1842 | cmpwi r3,0x34 /* Pulsar */ | |
1843 | beq 90f | |
1844 | cmpwi r3,0x36 /* Icestar */ | |
1845 | beq 90f | |
1846 | cmpwi r3,0x37 /* SStar */ | |
1847 | beq 90f | |
1848 | b 91f /* HMT not supported */ | |
1849 | 90: li r3,0 | |
1850 | bl .hmt_start_secondary | |
1851 | 91: | |
1852 | #endif | |
1853 | ||
1854 | /* The following gets the stack and TOC set up with the regs */ | |
1855 | /* pointing to the real addr of the kernel stack. This is */ | |
1856 | /* all done to support the C function call below which sets */ | |
1857 | /* up the htab. This is done because we have relocated the */ | |
1858 | /* kernel but are still running in real mode. */ | |
1859 | ||
1860 | LOADADDR(r3,init_thread_union) | |
1861 | sub r3,r3,r26 | |
1862 | ||
1863 | /* set up a stack pointer (physical address) */ | |
1864 | addi r1,r3,THREAD_SIZE | |
1865 | li r0,0 | |
1866 | stdu r0,-STACK_FRAME_OVERHEAD(r1) | |
1867 | ||
1868 | /* set up the TOC (physical address) */ | |
1869 | LOADADDR(r2,__toc_start) | |
1870 | addi r2,r2,0x4000 | |
1871 | addi r2,r2,0x4000 | |
1872 | sub r2,r2,r26 | |
1873 | ||
1874 | LOADADDR(r3,cpu_specs) | |
1875 | sub r3,r3,r26 | |
1876 | LOADADDR(r4,cur_cpu_spec) | |
1877 | sub r4,r4,r26 | |
1878 | mr r5,r26 | |
1879 | bl .identify_cpu | |
1880 | ||
1881 | /* Save some low level config HIDs of CPU0 to be copied to | |
1882 | * other CPUs later on, or used for suspend/resume | |
1883 | */ | |
1884 | bl .__save_cpu_setup | |
1885 | sync | |
1886 | ||
1887 | /* Setup a valid physical PACA pointer in SPRG3 for early_setup | |
1888 | * note that boot_cpuid can always be 0 nowadays since there is | |
1889 | * nowhere it can be initialized differently before we reach this | |
1890 | * code | |
1891 | */ | |
1892 | LOADADDR(r27, boot_cpuid) | |
1893 | sub r27,r27,r26 | |
1894 | lwz r27,0(r27) | |
1895 | ||
1896 | LOADADDR(r24, paca) /* Get base vaddr of paca array */ | |
1897 | mulli r13,r27,PACA_SIZE /* Calculate vaddr of right paca */ | |
1898 | add r13,r13,r24 /* for this processor. */ | |
1899 | sub r13,r13,r26 /* convert to physical addr */ | |
1900 | mtspr SPRG3,r13 /* PPPBBB: Temp... -Peter */ | |
1901 | ||
1902 | /* Do very early kernel initializations, including initial hash table, | |
1903 | * stab and slb setup before we turn on relocation. */ | |
1904 | ||
1905 | /* Restore parameters passed from prom_init/kexec */ | |
1906 | mr r3,r31 | |
1907 | bl .early_setup | |
1908 | ||
1909 | /* set the ASR */ | |
1910 | ld r3,PACASTABREAL(r13) | |
1911 | ori r4,r3,1 /* turn on valid bit */ | |
1912 | ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */ | |
1913 | lwz r3,PLATFORM(r3) /* r3 = platform flags */ | |
1914 | cmpldi r3,PLATFORM_PSERIES_LPAR | |
1915 | bne 98f | |
1916 | mfspr r3,PVR | |
1917 | srwi r3,r3,16 | |
1918 | cmpwi r3,0x37 /* SStar */ | |
1919 | beq 97f | |
1920 | cmpwi r3,0x36 /* IStar */ | |
1921 | beq 97f | |
1922 | cmpwi r3,0x34 /* Pulsar */ | |
1923 | bne 98f | |
1924 | 97: li r3,H_SET_ASR /* hcall = H_SET_ASR */ | |
1925 | HVSC /* Invoking hcall */ | |
1926 | b 99f | |
1927 | 98: /* !(rpa hypervisor) || !(star) */ | |
1928 | mtasr r4 /* set the stab location */ | |
1929 | 99: | |
1930 | /* Set SDR1 (hash table pointer) */ | |
1931 | ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */ | |
1932 | lwz r3,PLATFORM(r3) /* r3 = platform flags */ | |
1933 | /* Test if bit 0 is set (LPAR bit) */ | |
1934 | andi. r3,r3,0x1 | |
1935 | bne 98f | |
1936 | LOADADDR(r6,_SDR1) /* Only if NOT LPAR */ | |
1937 | sub r6,r6,r26 | |
1938 | ld r6,0(r6) /* get the value of _SDR1 */ | |
1939 | mtspr SDR1,r6 /* set the htab location */ | |
1940 | 98: | |
1941 | LOADADDR(r3,.start_here_common) | |
1942 | SET_REG_TO_CONST(r4, MSR_KERNEL) | |
1943 | mtspr SRR0,r3 | |
1944 | mtspr SRR1,r4 | |
1945 | rfid | |
1946 | b . /* prevent speculative execution */ | |
1947 | #endif /* CONFIG_PPC_MULTIPLATFORM */ | |
1948 | ||
1949 | /* This is where all platforms converge execution */ | |
1950 | _STATIC(start_here_common) | |
1951 | /* relocation is on at this point */ | |
1952 | ||
1953 | /* The following code sets up the SP and TOC now that we are */ | |
1954 | /* running with translation enabled. */ | |
1955 | ||
1956 | LOADADDR(r3,init_thread_union) | |
1957 | ||
1958 | /* set up the stack */ | |
1959 | addi r1,r3,THREAD_SIZE | |
1960 | li r0,0 | |
1961 | stdu r0,-STACK_FRAME_OVERHEAD(r1) | |
1962 | ||
1963 | /* Apply the CPUs-specific fixups (nop out sections not relevant | |
1964 | * to this CPU | |
1965 | */ | |
1966 | li r3,0 | |
1967 | bl .do_cpu_ftr_fixups | |
1968 | ||
1969 | LOADADDR(r26, boot_cpuid) | |
1970 | lwz r26,0(r26) | |
1971 | ||
1972 | LOADADDR(r24, paca) /* Get base vaddr of paca array */ | |
1973 | mulli r13,r26,PACA_SIZE /* Calculate vaddr of right paca */ | |
1974 | add r13,r13,r24 /* for this processor. */ | |
1975 | mtspr SPRG3,r13 | |
1976 | ||
1977 | /* ptr to current */ | |
1978 | LOADADDR(r4,init_task) | |
1979 | std r4,PACACURRENT(r13) | |
1980 | ||
1981 | /* Load the TOC */ | |
1982 | ld r2,PACATOC(r13) | |
1983 | std r1,PACAKSAVE(r13) | |
1984 | ||
1985 | bl .setup_system | |
1986 | ||
1987 | /* Load up the kernel context */ | |
1988 | 5: | |
1989 | #ifdef DO_SOFT_DISABLE | |
1990 | li r5,0 | |
1991 | stb r5,PACAPROCENABLED(r13) /* Soft Disabled */ | |
1992 | mfmsr r5 | |
1993 | ori r5,r5,MSR_EE /* Hard Enabled */ | |
1994 | mtmsrd r5 | |
1995 | #endif | |
1996 | ||
1997 | bl .start_kernel | |
1998 | ||
1999 | _GLOBAL(__setup_cpu_power3) | |
2000 | blr | |
2001 | ||
2002 | _GLOBAL(hmt_init) | |
2003 | #ifdef CONFIG_HMT | |
2004 | LOADADDR(r5, hmt_thread_data) | |
2005 | mfspr r7,PVR | |
2006 | srwi r7,r7,16 | |
2007 | cmpwi r7,0x34 /* Pulsar */ | |
2008 | beq 90f | |
2009 | cmpwi r7,0x36 /* Icestar */ | |
2010 | beq 91f | |
2011 | cmpwi r7,0x37 /* SStar */ | |
2012 | beq 91f | |
2013 | b 101f | |
2014 | 90: mfspr r6,PIR | |
2015 | andi. r6,r6,0x1f | |
2016 | b 92f | |
2017 | 91: mfspr r6,PIR | |
2018 | andi. r6,r6,0x3ff | |
2019 | 92: sldi r4,r24,3 | |
2020 | stwx r6,r5,r4 | |
2021 | bl .hmt_start_secondary | |
2022 | b 101f | |
2023 | ||
2024 | __hmt_secondary_hold: | |
2025 | LOADADDR(r5, hmt_thread_data) | |
2026 | clrldi r5,r5,4 | |
2027 | li r7,0 | |
2028 | mfspr r6,PIR | |
2029 | mfspr r8,PVR | |
2030 | srwi r8,r8,16 | |
2031 | cmpwi r8,0x34 | |
2032 | bne 93f | |
2033 | andi. r6,r6,0x1f | |
2034 | b 103f | |
2035 | 93: andi. r6,r6,0x3f | |
2036 | ||
2037 | 103: lwzx r8,r5,r7 | |
2038 | cmpw r8,r6 | |
2039 | beq 104f | |
2040 | addi r7,r7,8 | |
2041 | b 103b | |
2042 | ||
2043 | 104: addi r7,r7,4 | |
2044 | lwzx r9,r5,r7 | |
2045 | mr r24,r9 | |
2046 | 101: | |
2047 | #endif | |
2048 | mr r3,r24 | |
2049 | b .pSeries_secondary_smp_init | |
2050 | ||
2051 | #ifdef CONFIG_HMT | |
2052 | _GLOBAL(hmt_start_secondary) | |
2053 | LOADADDR(r4,__hmt_secondary_hold) | |
2054 | clrldi r4,r4,4 | |
2055 | mtspr NIADORM, r4 | |
2056 | mfspr r4, MSRDORM | |
2057 | li r5, -65 | |
2058 | and r4, r4, r5 | |
2059 | mtspr MSRDORM, r4 | |
2060 | lis r4,0xffef | |
2061 | ori r4,r4,0x7403 | |
2062 | mtspr TSC, r4 | |
2063 | li r4,0x1f4 | |
2064 | mtspr TST, r4 | |
2065 | mfspr r4, HID0 | |
2066 | ori r4, r4, 0x1 | |
2067 | mtspr HID0, r4 | |
6dc2f0c7 | 2068 | mfspr r4, SPRN_CTRLF |
1da177e4 | 2069 | oris r4, r4, 0x40 |
6dc2f0c7 | 2070 | mtspr SPRN_CTRLT, r4 |
1da177e4 LT |
2071 | blr |
2072 | #endif | |
2073 | ||
2074 | #if defined(CONFIG_SMP) && !defined(CONFIG_PPC_ISERIES) | |
2075 | _GLOBAL(smp_release_cpus) | |
2076 | /* All secondary cpus are spinning on a common | |
2077 | * spinloop, release them all now so they can start | |
2078 | * to spin on their individual paca spinloops. | |
2079 | * For non SMP kernels, the secondary cpus never | |
2080 | * get out of the common spinloop. | |
2081 | */ | |
2082 | li r3,1 | |
2083 | LOADADDR(r5,__secondary_hold_spinloop) | |
2084 | std r3,0(r5) | |
2085 | sync | |
2086 | blr | |
2087 | #endif /* CONFIG_SMP && !CONFIG_PPC_ISERIES */ | |
2088 | ||
2089 | ||
2090 | /* | |
2091 | * We put a few things here that have to be page-aligned. | |
2092 | * This stuff goes at the beginning of the data segment, | |
2093 | * which is page-aligned. | |
2094 | */ | |
2095 | .data | |
2096 | .align 12 | |
2097 | .globl sdata | |
2098 | sdata: | |
2099 | .globl empty_zero_page | |
2100 | empty_zero_page: | |
2101 | .space 4096 | |
2102 | ||
2103 | .globl swapper_pg_dir | |
2104 | swapper_pg_dir: | |
2105 | .space 4096 | |
2106 | ||
1da177e4 LT |
2107 | /* |
2108 | * This space gets a copy of optional info passed to us by the bootstrap | |
2109 | * Used to pass parameters into the kernel like root=/dev/sda1, etc. | |
2110 | */ | |
2111 | .globl cmd_line | |
2112 | cmd_line: | |
2113 | .space COMMAND_LINE_SIZE |