Commit | Line | Data |
---|---|---|
14cf11af | 1 | /* |
14cf11af PM |
2 | * PowerPC version |
3 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | |
4 | * | |
5 | * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP | |
6 | * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu> | |
7 | * Adapted for Power Macintosh by Paul Mackerras. | |
8 | * Low-level exception handlers and MMU support | |
9 | * rewritten by Paul Mackerras. | |
10 | * Copyright (C) 1996 Paul Mackerras. | |
11 | * | |
12 | * Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and | |
13 | * Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com | |
14 | * | |
15 | * This file contains the low-level support and setup for the | |
16 | * PowerPC-64 platform, including trap and interrupt dispatch. | |
17 | * | |
18 | * This program is free software; you can redistribute it and/or | |
19 | * modify it under the terms of the GNU General Public License | |
20 | * as published by the Free Software Foundation; either version | |
21 | * 2 of the License, or (at your option) any later version. | |
22 | */ | |
23 | ||
14cf11af | 24 | #include <linux/threads.h> |
b5bbeb23 | 25 | #include <asm/reg.h> |
14cf11af PM |
26 | #include <asm/page.h> |
27 | #include <asm/mmu.h> | |
14cf11af PM |
28 | #include <asm/ppc_asm.h> |
29 | #include <asm/asm-offsets.h> | |
30 | #include <asm/bug.h> | |
31 | #include <asm/cputable.h> | |
32 | #include <asm/setup.h> | |
33 | #include <asm/hvcall.h> | |
c43a55ff | 34 | #include <asm/iseries/lpar_map.h> |
6cb7bfeb | 35 | #include <asm/thread_info.h> |
3f639ee8 | 36 | #include <asm/firmware.h> |
16a15a30 | 37 | #include <asm/page_64.h> |
f9ff0f30 | 38 | #include <asm/exception.h> |
945feb17 | 39 | #include <asm/irqflags.h> |
14cf11af PM |
40 | |
41 | /* | |
42 | * We layout physical memory as follows: | |
43 | * 0x0000 - 0x00ff : Secondary processor spin code | |
44 | * 0x0100 - 0x2fff : pSeries Interrupt prologs | |
45 | * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs | |
46 | * 0x6000 - 0x6fff : Initial (CPU0) segment table | |
47 | * 0x7000 - 0x7fff : FWNMI data area | |
48 | * 0x8000 - : Early init and support code | |
49 | */ | |
50 | ||
51 | /* | |
52 | * SPRG Usage | |
53 | * | |
54 | * Register Definition | |
55 | * | |
56 | * SPRG0 reserved for hypervisor | |
57 | * SPRG1 temp - used to save gpr | |
58 | * SPRG2 temp - used to save gpr | |
59 | * SPRG3 virt addr of paca | |
60 | */ | |
61 | ||
62 | /* | |
63 | * Entering into this code we make the following assumptions: | |
64 | * For pSeries: | |
65 | * 1. The MMU is off & open firmware is running in real mode. | |
66 | * 2. The kernel is entered at __start | |
67 | * | |
68 | * For iSeries: | |
69 | * 1. The MMU is on (as it always is for iSeries) | |
70 | * 2. The kernel is entered at system_reset_iSeries | |
71 | */ | |
72 | ||
73 | .text | |
74 | .globl _stext | |
75 | _stext: | |
14cf11af PM |
76 | _GLOBAL(__start) |
77 | /* NOP this out unconditionally */ | |
78 | BEGIN_FTR_SECTION | |
b85a046a | 79 | b .__start_initialization_multiplatform |
14cf11af | 80 | END_FTR_SECTION(0, 1) |
14cf11af PM |
81 | |
82 | /* Catch branch to 0 in real mode */ | |
83 | trap | |
84 | ||
1f6a93e4 PM |
85 | /* Secondary processors spin on this value until it becomes nonzero. |
86 | * When it does it contains the real address of the descriptor | |
87 | * of the function that the cpu should jump to to continue | |
88 | * initialization. | |
89 | */ | |
14cf11af PM |
90 | .globl __secondary_hold_spinloop |
91 | __secondary_hold_spinloop: | |
92 | .llong 0x0 | |
93 | ||
94 | /* Secondary processors write this value with their cpu # */ | |
95 | /* after they enter the spin loop immediately below. */ | |
96 | .globl __secondary_hold_acknowledge | |
97 | __secondary_hold_acknowledge: | |
98 | .llong 0x0 | |
99 | ||
1dce0e30 ME |
100 | #ifdef CONFIG_PPC_ISERIES |
101 | /* | |
102 | * At offset 0x20, there is a pointer to iSeries LPAR data. | |
103 | * This is required by the hypervisor | |
104 | */ | |
105 | . = 0x20 | |
106 | .llong hvReleaseData-KERNELBASE | |
107 | #endif /* CONFIG_PPC_ISERIES */ | |
108 | ||
8b8b0cc1 MM |
109 | #ifdef CONFIG_CRASH_DUMP |
110 | /* This flag is set to 1 by a loader if the kernel should run | |
111 | * at the loaded address instead of the linked address. This | |
112 | * is used by kexec-tools to keep the the kdump kernel in the | |
113 | * crash_kernel region. The loader is responsible for | |
114 | * observing the alignment requirement. | |
115 | */ | |
116 | /* Do not move this variable as kexec-tools knows about it. */ | |
117 | . = 0x5c | |
118 | .globl __run_at_load | |
119 | __run_at_load: | |
120 | .long 0x72756e30 /* "run0" -- relocate to 0 by default */ | |
121 | #endif | |
122 | ||
14cf11af PM |
123 | . = 0x60 |
124 | /* | |
75423b7b GL |
125 | * The following code is used to hold secondary processors |
126 | * in a spin loop after they have entered the kernel, but | |
14cf11af PM |
127 | * before the bulk of the kernel has been relocated. This code |
128 | * is relocated to physical address 0x60 before prom_init is run. | |
129 | * All of it must fit below the first exception vector at 0x100. | |
1f6a93e4 PM |
130 | * Use .globl here not _GLOBAL because we want __secondary_hold |
131 | * to be the actual text address, not a descriptor. | |
14cf11af | 132 | */ |
1f6a93e4 PM |
133 | .globl __secondary_hold |
134 | __secondary_hold: | |
14cf11af PM |
135 | mfmsr r24 |
136 | ori r24,r24,MSR_RI | |
137 | mtmsrd r24 /* RI on */ | |
138 | ||
f1870f77 | 139 | /* Grab our physical cpu number */ |
14cf11af PM |
140 | mr r24,r3 |
141 | ||
142 | /* Tell the master cpu we're here */ | |
143 | /* Relocation is off & we are located at an address less */ | |
144 | /* than 0x100, so only need to grab low order offset. */ | |
e31aa453 | 145 | std r24,__secondary_hold_acknowledge-_stext(0) |
14cf11af PM |
146 | sync |
147 | ||
148 | /* All secondary cpus wait here until told to start. */ | |
e31aa453 | 149 | 100: ld r4,__secondary_hold_spinloop-_stext(0) |
1f6a93e4 PM |
150 | cmpdi 0,r4,0 |
151 | beq 100b | |
14cf11af | 152 | |
f1870f77 | 153 | #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC) |
1f6a93e4 | 154 | ld r4,0(r4) /* deref function descriptor */ |
758438a7 | 155 | mtctr r4 |
14cf11af | 156 | mr r3,r24 |
758438a7 | 157 | bctr |
14cf11af PM |
158 | #else |
159 | BUG_OPCODE | |
160 | #endif | |
14cf11af PM |
161 | |
162 | /* This value is used to mark exception frames on the stack. */ | |
163 | .section ".toc","aw" | |
164 | exception_marker: | |
165 | .tc ID_72656773_68657265[TC],0x7265677368657265 | |
166 | .text | |
167 | ||
14cf11af PM |
168 | /* |
169 | * This is the start of the interrupt handlers for pSeries | |
170 | * This code runs with relocation off. | |
1f6a93e4 PM |
171 | * Code from here to __end_interrupts gets copied down to real |
172 | * address 0x100 when we are running a relocatable kernel. | |
173 | * Therefore any relative branches in this section must only | |
174 | * branch to labels in this section. | |
14cf11af PM |
175 | */ |
176 | . = 0x100 | |
177 | .globl __start_interrupts | |
178 | __start_interrupts: | |
179 | ||
180 | STD_EXCEPTION_PSERIES(0x100, system_reset) | |
181 | ||
182 | . = 0x200 | |
183 | _machine_check_pSeries: | |
184 | HMT_MEDIUM | |
b5bbeb23 | 185 | mtspr SPRN_SPRG1,r13 /* save r13 */ |
14cf11af PM |
186 | EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) |
187 | ||
188 | . = 0x300 | |
189 | .globl data_access_pSeries | |
190 | data_access_pSeries: | |
191 | HMT_MEDIUM | |
b5bbeb23 | 192 | mtspr SPRN_SPRG1,r13 |
14cf11af | 193 | BEGIN_FTR_SECTION |
b5bbeb23 PM |
194 | mtspr SPRN_SPRG2,r12 |
195 | mfspr r13,SPRN_DAR | |
196 | mfspr r12,SPRN_DSISR | |
14cf11af PM |
197 | srdi r13,r13,60 |
198 | rlwimi r13,r12,16,0x20 | |
199 | mfcr r12 | |
200 | cmpwi r13,0x2c | |
3ccfc65c | 201 | beq do_stab_bolted_pSeries |
14cf11af | 202 | mtcrf 0x80,r12 |
b5bbeb23 | 203 | mfspr r12,SPRN_SPRG2 |
14cf11af PM |
204 | END_FTR_SECTION_IFCLR(CPU_FTR_SLB) |
205 | EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common) | |
206 | ||
207 | . = 0x380 | |
208 | .globl data_access_slb_pSeries | |
209 | data_access_slb_pSeries: | |
210 | HMT_MEDIUM | |
b5bbeb23 | 211 | mtspr SPRN_SPRG1,r13 |
b5bbeb23 | 212 | mfspr r13,SPRN_SPRG3 /* get paca address into r13 */ |
3c726f8d BH |
213 | std r3,PACA_EXSLB+EX_R3(r13) |
214 | mfspr r3,SPRN_DAR | |
14cf11af | 215 | std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ |
3c726f8d BH |
216 | mfcr r9 |
217 | #ifdef __DISABLED__ | |
218 | /* Keep that around for when we re-implement dynamic VSIDs */ | |
219 | cmpdi r3,0 | |
220 | bge slb_miss_user_pseries | |
221 | #endif /* __DISABLED__ */ | |
14cf11af PM |
222 | std r10,PACA_EXSLB+EX_R10(r13) |
223 | std r11,PACA_EXSLB+EX_R11(r13) | |
224 | std r12,PACA_EXSLB+EX_R12(r13) | |
3c726f8d BH |
225 | mfspr r10,SPRN_SPRG1 |
226 | std r10,PACA_EXSLB+EX_R13(r13) | |
b5bbeb23 | 227 | mfspr r12,SPRN_SRR1 /* and SRR1 */ |
1f6a93e4 PM |
228 | #ifndef CONFIG_RELOCATABLE |
229 | b .slb_miss_realmode | |
230 | #else | |
231 | /* | |
232 | * We can't just use a direct branch to .slb_miss_realmode | |
233 | * because the distance from here to there depends on where | |
234 | * the kernel ends up being put. | |
235 | */ | |
236 | mfctr r11 | |
237 | ld r10,PACAKBASE(r13) | |
238 | LOAD_HANDLER(r10, .slb_miss_realmode) | |
239 | mtctr r10 | |
240 | bctr | |
241 | #endif | |
14cf11af PM |
242 | |
243 | STD_EXCEPTION_PSERIES(0x400, instruction_access) | |
244 | ||
245 | . = 0x480 | |
246 | .globl instruction_access_slb_pSeries | |
247 | instruction_access_slb_pSeries: | |
248 | HMT_MEDIUM | |
b5bbeb23 | 249 | mtspr SPRN_SPRG1,r13 |
b5bbeb23 | 250 | mfspr r13,SPRN_SPRG3 /* get paca address into r13 */ |
3c726f8d BH |
251 | std r3,PACA_EXSLB+EX_R3(r13) |
252 | mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ | |
14cf11af | 253 | std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ |
3c726f8d BH |
254 | mfcr r9 |
255 | #ifdef __DISABLED__ | |
256 | /* Keep that around for when we re-implement dynamic VSIDs */ | |
257 | cmpdi r3,0 | |
258 | bge slb_miss_user_pseries | |
259 | #endif /* __DISABLED__ */ | |
14cf11af PM |
260 | std r10,PACA_EXSLB+EX_R10(r13) |
261 | std r11,PACA_EXSLB+EX_R11(r13) | |
262 | std r12,PACA_EXSLB+EX_R12(r13) | |
3c726f8d BH |
263 | mfspr r10,SPRN_SPRG1 |
264 | std r10,PACA_EXSLB+EX_R13(r13) | |
b5bbeb23 | 265 | mfspr r12,SPRN_SRR1 /* and SRR1 */ |
1f6a93e4 PM |
266 | #ifndef CONFIG_RELOCATABLE |
267 | b .slb_miss_realmode | |
268 | #else | |
269 | mfctr r11 | |
270 | ld r10,PACAKBASE(r13) | |
271 | LOAD_HANDLER(r10, .slb_miss_realmode) | |
272 | mtctr r10 | |
273 | bctr | |
274 | #endif | |
14cf11af | 275 | |
d04c56f7 | 276 | MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt) |
14cf11af PM |
277 | STD_EXCEPTION_PSERIES(0x600, alignment) |
278 | STD_EXCEPTION_PSERIES(0x700, program_check) | |
279 | STD_EXCEPTION_PSERIES(0x800, fp_unavailable) | |
d04c56f7 | 280 | MASKABLE_EXCEPTION_PSERIES(0x900, decrementer) |
14cf11af PM |
281 | STD_EXCEPTION_PSERIES(0xa00, trap_0a) |
282 | STD_EXCEPTION_PSERIES(0xb00, trap_0b) | |
283 | ||
284 | . = 0xc00 | |
285 | .globl system_call_pSeries | |
286 | system_call_pSeries: | |
287 | HMT_MEDIUM | |
745a14cc PM |
288 | BEGIN_FTR_SECTION |
289 | cmpdi r0,0x1ebe | |
290 | beq- 1f | |
291 | END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) | |
14cf11af | 292 | mr r9,r13 |
b5bbeb23 PM |
293 | mfspr r13,SPRN_SPRG3 |
294 | mfspr r11,SPRN_SRR0 | |
1f6a93e4 PM |
295 | ld r12,PACAKBASE(r13) |
296 | ld r10,PACAKMSR(r13) | |
297 | LOAD_HANDLER(r12, system_call_entry) | |
b5bbeb23 | 298 | mtspr SPRN_SRR0,r12 |
b5bbeb23 PM |
299 | mfspr r12,SPRN_SRR1 |
300 | mtspr SPRN_SRR1,r10 | |
14cf11af PM |
301 | rfid |
302 | b . /* prevent speculative execution */ | |
303 | ||
745a14cc PM |
304 | /* Fast LE/BE switch system call */ |
305 | 1: mfspr r12,SPRN_SRR1 | |
306 | xori r12,r12,MSR_LE | |
307 | mtspr SPRN_SRR1,r12 | |
308 | rfid /* return to userspace */ | |
309 | b . | |
310 | ||
14cf11af PM |
311 | STD_EXCEPTION_PSERIES(0xd00, single_step) |
312 | STD_EXCEPTION_PSERIES(0xe00, trap_0e) | |
313 | ||
314 | /* We need to deal with the Altivec unavailable exception | |
315 | * here which is at 0xf20, thus in the middle of the | |
316 | * prolog code of the PerformanceMonitor one. A little | |
317 | * trickery is thus necessary | |
318 | */ | |
319 | . = 0xf00 | |
320 | b performance_monitor_pSeries | |
321 | ||
10e34392 MN |
322 | . = 0xf20 |
323 | b altivec_unavailable_pSeries | |
14cf11af | 324 | |
ce48b210 MN |
325 | . = 0xf40 |
326 | b vsx_unavailable_pSeries | |
327 | ||
acf7d768 BH |
328 | #ifdef CONFIG_CBE_RAS |
329 | HSTD_EXCEPTION_PSERIES(0x1200, cbe_system_error) | |
330 | #endif /* CONFIG_CBE_RAS */ | |
14cf11af | 331 | STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint) |
acf7d768 BH |
332 | #ifdef CONFIG_CBE_RAS |
333 | HSTD_EXCEPTION_PSERIES(0x1600, cbe_maintenance) | |
334 | #endif /* CONFIG_CBE_RAS */ | |
14cf11af | 335 | STD_EXCEPTION_PSERIES(0x1700, altivec_assist) |
acf7d768 BH |
336 | #ifdef CONFIG_CBE_RAS |
337 | HSTD_EXCEPTION_PSERIES(0x1800, cbe_thermal) | |
338 | #endif /* CONFIG_CBE_RAS */ | |
14cf11af PM |
339 | |
340 | . = 0x3000 | |
341 | ||
342 | /*** pSeries interrupt support ***/ | |
343 | ||
344 | /* moved from 0xf00 */ | |
449d846d | 345 | STD_EXCEPTION_PSERIES(., performance_monitor) |
10e34392 | 346 | STD_EXCEPTION_PSERIES(., altivec_unavailable) |
ce48b210 | 347 | STD_EXCEPTION_PSERIES(., vsx_unavailable) |
d04c56f7 PM |
348 | |
349 | /* | |
350 | * An interrupt came in while soft-disabled; clear EE in SRR1, | |
351 | * clear paca->hard_enabled and return. | |
352 | */ | |
353 | masked_interrupt: | |
354 | stb r10,PACAHARDIRQEN(r13) | |
355 | mtcrf 0x80,r9 | |
356 | ld r9,PACA_EXGEN+EX_R9(r13) | |
357 | mfspr r10,SPRN_SRR1 | |
358 | rldicl r10,r10,48,1 /* clear MSR_EE */ | |
359 | rotldi r10,r10,16 | |
360 | mtspr SPRN_SRR1,r10 | |
361 | ld r10,PACA_EXGEN+EX_R10(r13) | |
362 | mfspr r13,SPRN_SPRG1 | |
363 | rfid | |
364 | b . | |
14cf11af PM |
365 | |
366 | .align 7 | |
3ccfc65c | 367 | do_stab_bolted_pSeries: |
14cf11af | 368 | mtcrf 0x80,r12 |
b5bbeb23 | 369 | mfspr r12,SPRN_SPRG2 |
14cf11af PM |
370 | EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted) |
371 | ||
9a955167 PM |
372 | #ifdef CONFIG_PPC_PSERIES |
373 | /* | |
374 | * Vectors for the FWNMI option. Share common code. | |
375 | */ | |
376 | .globl system_reset_fwnmi | |
377 | .align 7 | |
378 | system_reset_fwnmi: | |
379 | HMT_MEDIUM | |
380 | mtspr SPRN_SPRG1,r13 /* save r13 */ | |
381 | EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common) | |
382 | ||
383 | .globl machine_check_fwnmi | |
384 | .align 7 | |
385 | machine_check_fwnmi: | |
386 | HMT_MEDIUM | |
387 | mtspr SPRN_SPRG1,r13 /* save r13 */ | |
388 | EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) | |
389 | ||
390 | #endif /* CONFIG_PPC_PSERIES */ | |
391 | ||
392 | #ifdef __DISABLED__ | |
3c726f8d | 393 | /* |
3c726f8d BH |
394 | * This is used for when the SLB miss handler has to go virtual, |
395 | * which doesn't happen for now anymore but will once we re-implement | |
396 | * dynamic VSIDs for shared page tables | |
397 | */ | |
3c726f8d BH |
398 | slb_miss_user_pseries: |
399 | std r10,PACA_EXGEN+EX_R10(r13) | |
400 | std r11,PACA_EXGEN+EX_R11(r13) | |
401 | std r12,PACA_EXGEN+EX_R12(r13) | |
402 | mfspr r10,SPRG1 | |
403 | ld r11,PACA_EXSLB+EX_R9(r13) | |
404 | ld r12,PACA_EXSLB+EX_R3(r13) | |
405 | std r10,PACA_EXGEN+EX_R13(r13) | |
406 | std r11,PACA_EXGEN+EX_R9(r13) | |
407 | std r12,PACA_EXGEN+EX_R3(r13) | |
408 | clrrdi r12,r13,32 | |
409 | mfmsr r10 | |
410 | mfspr r11,SRR0 /* save SRR0 */ | |
411 | ori r12,r12,slb_miss_user_common@l /* virt addr of handler */ | |
412 | ori r10,r10,MSR_IR|MSR_DR|MSR_RI | |
413 | mtspr SRR0,r12 | |
414 | mfspr r12,SRR1 /* and SRR1 */ | |
415 | mtspr SRR1,r10 | |
416 | rfid | |
417 | b . /* prevent spec. execution */ | |
418 | #endif /* __DISABLED__ */ | |
419 | ||
9a955167 PM |
420 | .align 7 |
421 | .globl __end_interrupts | |
422 | __end_interrupts: | |
423 | ||
14cf11af | 424 | /* |
9a955167 | 425 | * Code from here down to __end_handlers is invoked from the |
1f6a93e4 PM |
426 | * exception prologs above. Because the prologs assemble the |
427 | * addresses of these handlers using the LOAD_HANDLER macro, | |
428 | * which uses an addi instruction, these handlers must be in | |
429 | * the first 32k of the kernel image. | |
14cf11af | 430 | */ |
9e4859ef | 431 | |
14cf11af PM |
432 | /*** Common interrupt handlers ***/ |
433 | ||
434 | STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception) | |
435 | ||
436 | /* | |
437 | * Machine check is different because we use a different | |
438 | * save area: PACA_EXMC instead of PACA_EXGEN. | |
439 | */ | |
440 | .align 7 | |
441 | .globl machine_check_common | |
442 | machine_check_common: | |
443 | EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC) | |
f39224a8 | 444 | FINISH_NAP |
14cf11af PM |
445 | DISABLE_INTS |
446 | bl .save_nvgprs | |
447 | addi r3,r1,STACK_FRAME_OVERHEAD | |
448 | bl .machine_check_exception | |
449 | b .ret_from_except | |
450 | ||
451 | STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt) | |
452 | STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception) | |
453 | STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception) | |
454 | STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception) | |
455 | STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception) | |
f39224a8 | 456 | STD_EXCEPTION_COMMON_IDLE(0xf00, performance_monitor, .performance_monitor_exception) |
14cf11af PM |
457 | STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception) |
458 | #ifdef CONFIG_ALTIVEC | |
459 | STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception) | |
460 | #else | |
461 | STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception) | |
462 | #endif | |
acf7d768 BH |
463 | #ifdef CONFIG_CBE_RAS |
464 | STD_EXCEPTION_COMMON(0x1200, cbe_system_error, .cbe_system_error_exception) | |
465 | STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, .cbe_maintenance_exception) | |
466 | STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception) | |
467 | #endif /* CONFIG_CBE_RAS */ | |
14cf11af | 468 | |
1f6a93e4 PM |
469 | .align 7 |
470 | system_call_entry: | |
471 | b system_call_common | |
472 | ||
14cf11af PM |
473 | /* |
474 | * Here we have detected that the kernel stack pointer is bad. | |
475 | * R9 contains the saved CR, r13 points to the paca, | |
476 | * r10 contains the (bad) kernel stack pointer, | |
477 | * r11 and r12 contain the saved SRR0 and SRR1. | |
478 | * We switch to using an emergency stack, save the registers there, | |
479 | * and call kernel_bad_stack(), which panics. | |
480 | */ | |
481 | bad_stack: | |
482 | ld r1,PACAEMERGSP(r13) | |
483 | subi r1,r1,64+INT_FRAME_SIZE | |
484 | std r9,_CCR(r1) | |
485 | std r10,GPR1(r1) | |
486 | std r11,_NIP(r1) | |
487 | std r12,_MSR(r1) | |
b5bbeb23 PM |
488 | mfspr r11,SPRN_DAR |
489 | mfspr r12,SPRN_DSISR | |
14cf11af PM |
490 | std r11,_DAR(r1) |
491 | std r12,_DSISR(r1) | |
492 | mflr r10 | |
493 | mfctr r11 | |
494 | mfxer r12 | |
495 | std r10,_LINK(r1) | |
496 | std r11,_CTR(r1) | |
497 | std r12,_XER(r1) | |
498 | SAVE_GPR(0,r1) | |
499 | SAVE_GPR(2,r1) | |
500 | SAVE_4GPRS(3,r1) | |
501 | SAVE_2GPRS(7,r1) | |
502 | SAVE_10GPRS(12,r1) | |
503 | SAVE_10GPRS(22,r1) | |
68730401 OJ |
504 | lhz r12,PACA_TRAP_SAVE(r13) |
505 | std r12,_TRAP(r1) | |
14cf11af PM |
506 | addi r11,r1,INT_FRAME_SIZE |
507 | std r11,0(r1) | |
508 | li r12,0 | |
509 | std r12,0(r11) | |
510 | ld r2,PACATOC(r13) | |
511 | 1: addi r3,r1,STACK_FRAME_OVERHEAD | |
512 | bl .kernel_bad_stack | |
513 | b 1b | |
514 | ||
14cf11af PM |
515 | /* |
516 | * Here r13 points to the paca, r9 contains the saved CR, | |
517 | * SRR0 and SRR1 are saved in r11 and r12, | |
518 | * r9 - r13 are saved in paca->exgen. | |
519 | */ | |
520 | .align 7 | |
521 | .globl data_access_common | |
522 | data_access_common: | |
b5bbeb23 | 523 | mfspr r10,SPRN_DAR |
14cf11af | 524 | std r10,PACA_EXGEN+EX_DAR(r13) |
b5bbeb23 | 525 | mfspr r10,SPRN_DSISR |
14cf11af PM |
526 | stw r10,PACA_EXGEN+EX_DSISR(r13) |
527 | EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN) | |
528 | ld r3,PACA_EXGEN+EX_DAR(r13) | |
529 | lwz r4,PACA_EXGEN+EX_DSISR(r13) | |
530 | li r5,0x300 | |
531 | b .do_hash_page /* Try to handle as hpte fault */ | |
532 | ||
533 | .align 7 | |
534 | .globl instruction_access_common | |
535 | instruction_access_common: | |
536 | EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN) | |
537 | ld r3,_NIP(r1) | |
538 | andis. r4,r12,0x5820 | |
539 | li r5,0x400 | |
540 | b .do_hash_page /* Try to handle as hpte fault */ | |
541 | ||
3c726f8d BH |
542 | /* |
543 | * Here is the common SLB miss user that is used when going to virtual | |
544 | * mode for SLB misses, that is currently not used | |
545 | */ | |
546 | #ifdef __DISABLED__ | |
547 | .align 7 | |
548 | .globl slb_miss_user_common | |
549 | slb_miss_user_common: | |
550 | mflr r10 | |
551 | std r3,PACA_EXGEN+EX_DAR(r13) | |
552 | stw r9,PACA_EXGEN+EX_CCR(r13) | |
553 | std r10,PACA_EXGEN+EX_LR(r13) | |
554 | std r11,PACA_EXGEN+EX_SRR0(r13) | |
555 | bl .slb_allocate_user | |
556 | ||
557 | ld r10,PACA_EXGEN+EX_LR(r13) | |
558 | ld r3,PACA_EXGEN+EX_R3(r13) | |
559 | lwz r9,PACA_EXGEN+EX_CCR(r13) | |
560 | ld r11,PACA_EXGEN+EX_SRR0(r13) | |
561 | mtlr r10 | |
562 | beq- slb_miss_fault | |
563 | ||
564 | andi. r10,r12,MSR_RI /* check for unrecoverable exception */ | |
565 | beq- unrecov_user_slb | |
566 | mfmsr r10 | |
567 | ||
568 | .machine push | |
569 | .machine "power4" | |
570 | mtcrf 0x80,r9 | |
571 | .machine pop | |
572 | ||
573 | clrrdi r10,r10,2 /* clear RI before setting SRR0/1 */ | |
574 | mtmsrd r10,1 | |
575 | ||
576 | mtspr SRR0,r11 | |
577 | mtspr SRR1,r12 | |
578 | ||
579 | ld r9,PACA_EXGEN+EX_R9(r13) | |
580 | ld r10,PACA_EXGEN+EX_R10(r13) | |
581 | ld r11,PACA_EXGEN+EX_R11(r13) | |
582 | ld r12,PACA_EXGEN+EX_R12(r13) | |
583 | ld r13,PACA_EXGEN+EX_R13(r13) | |
584 | rfid | |
585 | b . | |
586 | ||
587 | slb_miss_fault: | |
588 | EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN) | |
589 | ld r4,PACA_EXGEN+EX_DAR(r13) | |
590 | li r5,0 | |
591 | std r4,_DAR(r1) | |
592 | std r5,_DSISR(r1) | |
3ccfc65c | 593 | b handle_page_fault |
3c726f8d BH |
594 | |
595 | unrecov_user_slb: | |
596 | EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN) | |
597 | DISABLE_INTS | |
598 | bl .save_nvgprs | |
599 | 1: addi r3,r1,STACK_FRAME_OVERHEAD | |
600 | bl .unrecoverable_exception | |
601 | b 1b | |
602 | ||
603 | #endif /* __DISABLED__ */ | |
604 | ||
605 | ||
606 | /* | |
607 | * r13 points to the PACA, r9 contains the saved CR, | |
608 | * r12 contain the saved SRR1, SRR0 is still ready for return | |
609 | * r3 has the faulting address | |
610 | * r9 - r13 are saved in paca->exslb. | |
611 | * r3 is saved in paca->slb_r3 | |
612 | * We assume we aren't going to take any exceptions during this procedure. | |
613 | */ | |
614 | _GLOBAL(slb_miss_realmode) | |
615 | mflr r10 | |
1f6a93e4 PM |
616 | #ifdef CONFIG_RELOCATABLE |
617 | mtctr r11 | |
618 | #endif | |
3c726f8d BH |
619 | |
620 | stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ | |
621 | std r10,PACA_EXSLB+EX_LR(r13) /* save LR */ | |
622 | ||
623 | bl .slb_allocate_realmode | |
624 | ||
625 | /* All done -- return from exception. */ | |
626 | ||
627 | ld r10,PACA_EXSLB+EX_LR(r13) | |
628 | ld r3,PACA_EXSLB+EX_R3(r13) | |
629 | lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ | |
630 | #ifdef CONFIG_PPC_ISERIES | |
3f639ee8 | 631 | BEGIN_FW_FTR_SECTION |
3356bb9f DG |
632 | ld r11,PACALPPACAPTR(r13) |
633 | ld r11,LPPACASRR0(r11) /* get SRR0 value */ | |
3f639ee8 | 634 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) |
3c726f8d BH |
635 | #endif /* CONFIG_PPC_ISERIES */ |
636 | ||
637 | mtlr r10 | |
638 | ||
639 | andi. r10,r12,MSR_RI /* check for unrecoverable exception */ | |
320787c7 | 640 | beq- 2f |
3c726f8d BH |
641 | |
642 | .machine push | |
643 | .machine "power4" | |
644 | mtcrf 0x80,r9 | |
645 | mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */ | |
646 | .machine pop | |
647 | ||
648 | #ifdef CONFIG_PPC_ISERIES | |
3f639ee8 | 649 | BEGIN_FW_FTR_SECTION |
3c726f8d BH |
650 | mtspr SPRN_SRR0,r11 |
651 | mtspr SPRN_SRR1,r12 | |
3f639ee8 | 652 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) |
3c726f8d BH |
653 | #endif /* CONFIG_PPC_ISERIES */ |
654 | ld r9,PACA_EXSLB+EX_R9(r13) | |
655 | ld r10,PACA_EXSLB+EX_R10(r13) | |
656 | ld r11,PACA_EXSLB+EX_R11(r13) | |
657 | ld r12,PACA_EXSLB+EX_R12(r13) | |
658 | ld r13,PACA_EXSLB+EX_R13(r13) | |
659 | rfid | |
660 | b . /* prevent speculative execution */ | |
661 | ||
320787c7 PM |
662 | 2: |
663 | #ifdef CONFIG_PPC_ISERIES | |
664 | BEGIN_FW_FTR_SECTION | |
665 | b unrecov_slb | |
666 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) | |
667 | #endif /* CONFIG_PPC_ISERIES */ | |
668 | mfspr r11,SPRN_SRR0 | |
1f6a93e4 | 669 | ld r10,PACAKBASE(r13) |
320787c7 PM |
670 | LOAD_HANDLER(r10,unrecov_slb) |
671 | mtspr SPRN_SRR0,r10 | |
1f6a93e4 | 672 | ld r10,PACAKMSR(r13) |
320787c7 PM |
673 | mtspr SPRN_SRR1,r10 |
674 | rfid | |
675 | b . | |
676 | ||
3c726f8d BH |
677 | unrecov_slb: |
678 | EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB) | |
679 | DISABLE_INTS | |
680 | bl .save_nvgprs | |
681 | 1: addi r3,r1,STACK_FRAME_OVERHEAD | |
682 | bl .unrecoverable_exception | |
683 | b 1b | |
684 | ||
14cf11af PM |
685 | .align 7 |
686 | .globl hardware_interrupt_common | |
687 | .globl hardware_interrupt_entry | |
688 | hardware_interrupt_common: | |
689 | EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN) | |
f39224a8 | 690 | FINISH_NAP |
14cf11af PM |
691 | hardware_interrupt_entry: |
692 | DISABLE_INTS | |
a416561b | 693 | BEGIN_FTR_SECTION |
cb2c9b27 | 694 | bl .ppc64_runlatch_on |
a416561b | 695 | END_FTR_SECTION_IFSET(CPU_FTR_CTRL) |
14cf11af PM |
696 | addi r3,r1,STACK_FRAME_OVERHEAD |
697 | bl .do_IRQ | |
698 | b .ret_from_except_lite | |
699 | ||
f39224a8 PM |
700 | #ifdef CONFIG_PPC_970_NAP |
701 | power4_fixup_nap: | |
702 | andc r9,r9,r10 | |
703 | std r9,TI_LOCAL_FLAGS(r11) | |
704 | ld r10,_LINK(r1) /* make idle task do the */ | |
705 | std r10,_NIP(r1) /* equivalent of a blr */ | |
706 | blr | |
707 | #endif | |
708 | ||
14cf11af PM |
709 | .align 7 |
710 | .globl alignment_common | |
711 | alignment_common: | |
b5bbeb23 | 712 | mfspr r10,SPRN_DAR |
14cf11af | 713 | std r10,PACA_EXGEN+EX_DAR(r13) |
b5bbeb23 | 714 | mfspr r10,SPRN_DSISR |
14cf11af PM |
715 | stw r10,PACA_EXGEN+EX_DSISR(r13) |
716 | EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN) | |
717 | ld r3,PACA_EXGEN+EX_DAR(r13) | |
718 | lwz r4,PACA_EXGEN+EX_DSISR(r13) | |
719 | std r3,_DAR(r1) | |
720 | std r4,_DSISR(r1) | |
721 | bl .save_nvgprs | |
722 | addi r3,r1,STACK_FRAME_OVERHEAD | |
723 | ENABLE_INTS | |
724 | bl .alignment_exception | |
725 | b .ret_from_except | |
726 | ||
727 | .align 7 | |
728 | .globl program_check_common | |
729 | program_check_common: | |
730 | EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN) | |
731 | bl .save_nvgprs | |
732 | addi r3,r1,STACK_FRAME_OVERHEAD | |
733 | ENABLE_INTS | |
734 | bl .program_check_exception | |
735 | b .ret_from_except | |
736 | ||
737 | .align 7 | |
738 | .globl fp_unavailable_common | |
739 | fp_unavailable_common: | |
740 | EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN) | |
3ccfc65c | 741 | bne 1f /* if from user, just load it up */ |
14cf11af PM |
742 | bl .save_nvgprs |
743 | addi r3,r1,STACK_FRAME_OVERHEAD | |
744 | ENABLE_INTS | |
745 | bl .kernel_fp_unavailable_exception | |
746 | BUG_OPCODE | |
6f3d8e69 MN |
747 | 1: bl .load_up_fpu |
748 | b fast_exception_return | |
14cf11af | 749 | |
14cf11af PM |
750 | .align 7 |
751 | .globl altivec_unavailable_common | |
752 | altivec_unavailable_common: | |
753 | EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN) | |
754 | #ifdef CONFIG_ALTIVEC | |
755 | BEGIN_FTR_SECTION | |
6f3d8e69 MN |
756 | beq 1f |
757 | bl .load_up_altivec | |
758 | b fast_exception_return | |
759 | 1: | |
14cf11af PM |
760 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) |
761 | #endif | |
762 | bl .save_nvgprs | |
763 | addi r3,r1,STACK_FRAME_OVERHEAD | |
764 | ENABLE_INTS | |
765 | bl .altivec_unavailable_exception | |
766 | b .ret_from_except | |
767 | ||
9a955167 PM |
768 | .align 7 |
769 | .globl vsx_unavailable_common | |
770 | vsx_unavailable_common: | |
771 | EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN) | |
772 | #ifdef CONFIG_VSX | |
773 | BEGIN_FTR_SECTION | |
774 | bne .load_up_vsx | |
775 | 1: | |
776 | END_FTR_SECTION_IFSET(CPU_FTR_VSX) | |
777 | #endif | |
778 | bl .save_nvgprs | |
779 | addi r3,r1,STACK_FRAME_OVERHEAD | |
780 | ENABLE_INTS | |
781 | bl .vsx_unavailable_exception | |
782 | b .ret_from_except | |
783 | ||
784 | .align 7 | |
785 | .globl __end_handlers | |
786 | __end_handlers: | |
787 | ||
788 | /* | |
789 | * Return from an exception with minimal checks. | |
790 | * The caller is assumed to have done EXCEPTION_PROLOG_COMMON. | |
791 | * If interrupts have been enabled, or anything has been | |
792 | * done that might have changed the scheduling status of | |
793 | * any task or sent any task a signal, you should use | |
794 | * ret_from_except or ret_from_except_lite instead of this. | |
795 | */ | |
796 | fast_exc_return_irq: /* restores irq state too */ | |
797 | ld r3,SOFTE(r1) | |
798 | TRACE_AND_RESTORE_IRQ(r3); | |
799 | ld r12,_MSR(r1) | |
800 | rldicl r4,r12,49,63 /* get MSR_EE to LSB */ | |
801 | stb r4,PACAHARDIRQEN(r13) /* restore paca->hard_enabled */ | |
802 | b 1f | |
803 | ||
804 | .globl fast_exception_return | |
805 | fast_exception_return: | |
806 | ld r12,_MSR(r1) | |
807 | 1: ld r11,_NIP(r1) | |
808 | andi. r3,r12,MSR_RI /* check if RI is set */ | |
809 | beq- unrecov_fer | |
810 | ||
811 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | |
812 | andi. r3,r12,MSR_PR | |
813 | beq 2f | |
814 | ACCOUNT_CPU_USER_EXIT(r3, r4) | |
815 | 2: | |
816 | #endif | |
817 | ||
818 | ld r3,_CCR(r1) | |
819 | ld r4,_LINK(r1) | |
820 | ld r5,_CTR(r1) | |
821 | ld r6,_XER(r1) | |
822 | mtcr r3 | |
823 | mtlr r4 | |
824 | mtctr r5 | |
825 | mtxer r6 | |
826 | REST_GPR(0, r1) | |
827 | REST_8GPRS(2, r1) | |
828 | ||
829 | mfmsr r10 | |
830 | rldicl r10,r10,48,1 /* clear EE */ | |
831 | rldicr r10,r10,16,61 /* clear RI (LE is 0 already) */ | |
832 | mtmsrd r10,1 | |
833 | ||
834 | mtspr SPRN_SRR1,r12 | |
835 | mtspr SPRN_SRR0,r11 | |
836 | REST_4GPRS(10, r1) | |
837 | ld r1,GPR1(r1) | |
838 | rfid | |
839 | b . /* prevent speculative execution */ | |
840 | ||
841 | unrecov_fer: | |
842 | bl .save_nvgprs | |
843 | 1: addi r3,r1,STACK_FRAME_OVERHEAD | |
844 | bl .unrecoverable_exception | |
845 | b 1b | |
846 | ||
14cf11af PM |
847 | #ifdef CONFIG_ALTIVEC |
848 | /* | |
849 | * load_up_altivec(unused, unused, tsk) | |
850 | * Disable VMX for the task which had it previously, | |
851 | * and save its vector registers in its thread_struct. | |
852 | * Enables the VMX for use in the kernel on return. | |
853 | * On SMP we know the VMX is free, since we give it up every | |
854 | * switch (ie, no lazy save of the vector registers). | |
855 | * On entry: r13 == 'current' && last_task_used_altivec != 'current' | |
856 | */ | |
857 | _STATIC(load_up_altivec) | |
858 | mfmsr r5 /* grab the current MSR */ | |
859 | oris r5,r5,MSR_VEC@h | |
860 | mtmsrd r5 /* enable use of VMX now */ | |
861 | isync | |
862 | ||
863 | /* | |
864 | * For SMP, we don't do lazy VMX switching because it just gets too | |
865 | * horrendously complex, especially when a task switches from one CPU | |
866 | * to another. Instead we call giveup_altvec in switch_to. | |
867 | * VRSAVE isn't dealt with here, that is done in the normal context | |
868 | * switch code. Note that we could rely on vrsave value to eventually | |
869 | * avoid saving all of the VREGs here... | |
870 | */ | |
871 | #ifndef CONFIG_SMP | |
872 | ld r3,last_task_used_altivec@got(r2) | |
873 | ld r4,0(r3) | |
874 | cmpdi 0,r4,0 | |
875 | beq 1f | |
876 | /* Save VMX state to last_task_used_altivec's THREAD struct */ | |
877 | addi r4,r4,THREAD | |
878 | SAVE_32VRS(0,r5,r4) | |
879 | mfvscr vr0 | |
880 | li r10,THREAD_VSCR | |
881 | stvx vr0,r10,r4 | |
882 | /* Disable VMX for last_task_used_altivec */ | |
883 | ld r5,PT_REGS(r4) | |
884 | ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) | |
885 | lis r6,MSR_VEC@h | |
886 | andc r4,r4,r6 | |
887 | std r4,_MSR-STACK_FRAME_OVERHEAD(r5) | |
888 | 1: | |
889 | #endif /* CONFIG_SMP */ | |
890 | /* Hack: if we get an altivec unavailable trap with VRSAVE | |
891 | * set to all zeros, we assume this is a broken application | |
892 | * that fails to set it properly, and thus we switch it to | |
893 | * all 1's | |
894 | */ | |
895 | mfspr r4,SPRN_VRSAVE | |
896 | cmpdi 0,r4,0 | |
897 | bne+ 1f | |
898 | li r4,-1 | |
899 | mtspr SPRN_VRSAVE,r4 | |
900 | 1: | |
901 | /* enable use of VMX after return */ | |
902 | ld r4,PACACURRENT(r13) | |
903 | addi r5,r4,THREAD /* Get THREAD */ | |
904 | oris r12,r12,MSR_VEC@h | |
905 | std r12,_MSR(r1) | |
906 | li r4,1 | |
907 | li r10,THREAD_VSCR | |
908 | stw r4,THREAD_USED_VR(r5) | |
909 | lvx vr0,r10,r5 | |
910 | mtvscr vr0 | |
911 | REST_32VRS(0,r4,r5) | |
912 | #ifndef CONFIG_SMP | |
913 | /* Update last_task_used_math to 'current' */ | |
914 | subi r4,r5,THREAD /* Back to 'current' */ | |
915 | std r4,0(r3) | |
916 | #endif /* CONFIG_SMP */ | |
917 | /* restore registers and return */ | |
6f3d8e69 | 918 | blr |
14cf11af PM |
919 | #endif /* CONFIG_ALTIVEC */ |
920 | ||
ce48b210 MN |
921 | #ifdef CONFIG_VSX |
922 | /* | |
923 | * load_up_vsx(unused, unused, tsk) | |
924 | * Disable VSX for the task which had it previously, | |
925 | * and save its vector registers in its thread_struct. | |
926 | * Reuse the fp and vsx saves, but first check to see if they have | |
927 | * been saved already. | |
928 | * On entry: r13 == 'current' && last_task_used_vsx != 'current' | |
929 | */ | |
930 | _STATIC(load_up_vsx) | |
931 | /* Load FP and VSX registers if they haven't been done yet */ | |
932 | andi. r5,r12,MSR_FP | |
933 | beql+ load_up_fpu /* skip if already loaded */ | |
934 | andis. r5,r12,MSR_VEC@h | |
935 | beql+ load_up_altivec /* skip if already loaded */ | |
936 | ||
937 | #ifndef CONFIG_SMP | |
938 | ld r3,last_task_used_vsx@got(r2) | |
939 | ld r4,0(r3) | |
940 | cmpdi 0,r4,0 | |
941 | beq 1f | |
942 | /* Disable VSX for last_task_used_vsx */ | |
943 | addi r4,r4,THREAD | |
944 | ld r5,PT_REGS(r4) | |
945 | ld r4,_MSR-STACK_FRAME_OVERHEAD(r5) | |
946 | lis r6,MSR_VSX@h | |
947 | andc r6,r4,r6 | |
948 | std r6,_MSR-STACK_FRAME_OVERHEAD(r5) | |
949 | 1: | |
950 | #endif /* CONFIG_SMP */ | |
951 | ld r4,PACACURRENT(r13) | |
952 | addi r4,r4,THREAD /* Get THREAD */ | |
953 | li r6,1 | |
954 | stw r6,THREAD_USED_VSR(r4) /* ... also set thread used vsr */ | |
955 | /* enable use of VSX after return */ | |
956 | oris r12,r12,MSR_VSX@h | |
957 | std r12,_MSR(r1) | |
958 | #ifndef CONFIG_SMP | |
959 | /* Update last_task_used_math to 'current' */ | |
960 | ld r4,PACACURRENT(r13) | |
961 | std r4,0(r3) | |
962 | #endif /* CONFIG_SMP */ | |
963 | b fast_exception_return | |
964 | #endif /* CONFIG_VSX */ | |
965 | ||
14cf11af PM |
966 | /* |
967 | * Hash table stuff | |
968 | */ | |
969 | .align 7 | |
945feb17 | 970 | _STATIC(do_hash_page) |
14cf11af PM |
971 | std r3,_DAR(r1) |
972 | std r4,_DSISR(r1) | |
973 | ||
974 | andis. r0,r4,0xa450 /* weird error? */ | |
3ccfc65c | 975 | bne- handle_page_fault /* if not, try to insert a HPTE */ |
14cf11af PM |
976 | BEGIN_FTR_SECTION |
977 | andis. r0,r4,0x0020 /* Is it a segment table fault? */ | |
3ccfc65c | 978 | bne- do_ste_alloc /* If so handle it */ |
14cf11af PM |
979 | END_FTR_SECTION_IFCLR(CPU_FTR_SLB) |
980 | ||
945feb17 BH |
981 | /* |
982 | * On iSeries, we soft-disable interrupts here, then | |
983 | * hard-enable interrupts so that the hash_page code can spin on | |
984 | * the hash_table_lock without problems on a shared processor. | |
985 | */ | |
986 | DISABLE_INTS | |
987 | ||
988 | /* | |
989 | * Currently, trace_hardirqs_off() will be called by DISABLE_INTS | |
990 | * and will clobber volatile registers when irq tracing is enabled | |
991 | * so we need to reload them. It may be possible to be smarter here | |
992 | * and move the irq tracing elsewhere but let's keep it simple for | |
993 | * now | |
994 | */ | |
995 | #ifdef CONFIG_TRACE_IRQFLAGS | |
996 | ld r3,_DAR(r1) | |
997 | ld r4,_DSISR(r1) | |
998 | ld r5,_TRAP(r1) | |
999 | ld r12,_MSR(r1) | |
1000 | clrrdi r5,r5,4 | |
1001 | #endif /* CONFIG_TRACE_IRQFLAGS */ | |
14cf11af PM |
1002 | /* |
1003 | * We need to set the _PAGE_USER bit if MSR_PR is set or if we are | |
1004 | * accessing a userspace segment (even from the kernel). We assume | |
1005 | * kernel addresses always have the high bit set. | |
1006 | */ | |
1007 | rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */ | |
1008 | rotldi r0,r3,15 /* Move high bit into MSR_PR posn */ | |
1009 | orc r0,r12,r0 /* MSR_PR | ~high_bit */ | |
1010 | rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */ | |
1011 | ori r4,r4,1 /* add _PAGE_PRESENT */ | |
1012 | rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */ | |
1013 | ||
14cf11af PM |
1014 | /* |
1015 | * r3 contains the faulting address | |
1016 | * r4 contains the required access permissions | |
1017 | * r5 contains the trap number | |
1018 | * | |
1019 | * at return r3 = 0 for success | |
1020 | */ | |
1021 | bl .hash_page /* build HPTE if possible */ | |
1022 | cmpdi r3,0 /* see if hash_page succeeded */ | |
1023 | ||
3f639ee8 | 1024 | BEGIN_FW_FTR_SECTION |
14cf11af PM |
1025 | /* |
1026 | * If we had interrupts soft-enabled at the point where the | |
1027 | * DSI/ISI occurred, and an interrupt came in during hash_page, | |
1028 | * handle it now. | |
1029 | * We jump to ret_from_except_lite rather than fast_exception_return | |
1030 | * because ret_from_except_lite will check for and handle pending | |
1031 | * interrupts if necessary. | |
1032 | */ | |
3ccfc65c | 1033 | beq 13f |
b0a779de | 1034 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) |
945feb17 | 1035 | |
b0a779de PM |
1036 | BEGIN_FW_FTR_SECTION |
1037 | /* | |
1038 | * Here we have interrupts hard-disabled, so it is sufficient | |
1039 | * to restore paca->{soft,hard}_enable and get out. | |
1040 | */ | |
1041 | beq fast_exc_return_irq /* Return from exception on success */ | |
1042 | END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES) | |
1043 | ||
14cf11af PM |
1044 | /* For a hash failure, we don't bother re-enabling interrupts */ |
1045 | ble- 12f | |
1046 | ||
1047 | /* | |
1048 | * hash_page couldn't handle it, set soft interrupt enable back | |
945feb17 | 1049 | * to what it was before the trap. Note that .raw_local_irq_restore |
14cf11af PM |
1050 | * handles any interrupts pending at this point. |
1051 | */ | |
1052 | ld r3,SOFTE(r1) | |
945feb17 BH |
1053 | TRACE_AND_RESTORE_IRQ_PARTIAL(r3, 11f) |
1054 | bl .raw_local_irq_restore | |
14cf11af | 1055 | b 11f |
14cf11af PM |
1056 | |
1057 | /* Here we have a page fault that hash_page can't handle. */ | |
3ccfc65c | 1058 | handle_page_fault: |
14cf11af PM |
1059 | ENABLE_INTS |
1060 | 11: ld r4,_DAR(r1) | |
1061 | ld r5,_DSISR(r1) | |
1062 | addi r3,r1,STACK_FRAME_OVERHEAD | |
1063 | bl .do_page_fault | |
1064 | cmpdi r3,0 | |
3ccfc65c | 1065 | beq+ 13f |
14cf11af PM |
1066 | bl .save_nvgprs |
1067 | mr r5,r3 | |
1068 | addi r3,r1,STACK_FRAME_OVERHEAD | |
1069 | lwz r4,_DAR(r1) | |
1070 | bl .bad_page_fault | |
1071 | b .ret_from_except | |
1072 | ||
79acbb3f PM |
1073 | 13: b .ret_from_except_lite |
1074 | ||
14cf11af PM |
1075 | /* We have a page fault that hash_page could handle but HV refused |
1076 | * the PTE insertion | |
1077 | */ | |
1078 | 12: bl .save_nvgprs | |
fa28237c | 1079 | mr r5,r3 |
14cf11af | 1080 | addi r3,r1,STACK_FRAME_OVERHEAD |
a792e75d | 1081 | ld r4,_DAR(r1) |
14cf11af PM |
1082 | bl .low_hash_fault |
1083 | b .ret_from_except | |
1084 | ||
1085 | /* here we have a segment miss */ | |
3ccfc65c | 1086 | do_ste_alloc: |
14cf11af PM |
1087 | bl .ste_allocate /* try to insert stab entry */ |
1088 | cmpdi r3,0 | |
3ccfc65c PM |
1089 | bne- handle_page_fault |
1090 | b fast_exception_return | |
14cf11af PM |
1091 | |
1092 | /* | |
1093 | * r13 points to the PACA, r9 contains the saved CR, | |
1094 | * r11 and r12 contain the saved SRR0 and SRR1. | |
1095 | * r9 - r13 are saved in paca->exslb. | |
1096 | * We assume we aren't going to take any exceptions during this procedure. | |
1097 | * We assume (DAR >> 60) == 0xc. | |
1098 | */ | |
1099 | .align 7 | |
1100 | _GLOBAL(do_stab_bolted) | |
1101 | stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ | |
1102 | std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */ | |
1103 | ||
1104 | /* Hash to the primary group */ | |
1105 | ld r10,PACASTABVIRT(r13) | |
b5bbeb23 | 1106 | mfspr r11,SPRN_DAR |
14cf11af PM |
1107 | srdi r11,r11,28 |
1108 | rldimi r10,r11,7,52 /* r10 = first ste of the group */ | |
1109 | ||
1110 | /* Calculate VSID */ | |
1111 | /* This is a kernel address, so protovsid = ESID */ | |
1189be65 | 1112 | ASM_VSID_SCRAMBLE(r11, r9, 256M) |
14cf11af PM |
1113 | rldic r9,r11,12,16 /* r9 = vsid << 12 */ |
1114 | ||
1115 | /* Search the primary group for a free entry */ | |
1116 | 1: ld r11,0(r10) /* Test valid bit of the current ste */ | |
1117 | andi. r11,r11,0x80 | |
1118 | beq 2f | |
1119 | addi r10,r10,16 | |
1120 | andi. r11,r10,0x70 | |
1121 | bne 1b | |
1122 | ||
1123 | /* Stick for only searching the primary group for now. */ | |
1124 | /* At least for now, we use a very simple random castout scheme */ | |
1125 | /* Use the TB as a random number ; OR in 1 to avoid entry 0 */ | |
1126 | mftb r11 | |
1127 | rldic r11,r11,4,57 /* r11 = (r11 << 4) & 0x70 */ | |
1128 | ori r11,r11,0x10 | |
1129 | ||
1130 | /* r10 currently points to an ste one past the group of interest */ | |
1131 | /* make it point to the randomly selected entry */ | |
1132 | subi r10,r10,128 | |
1133 | or r10,r10,r11 /* r10 is the entry to invalidate */ | |
1134 | ||
1135 | isync /* mark the entry invalid */ | |
1136 | ld r11,0(r10) | |
1137 | rldicl r11,r11,56,1 /* clear the valid bit */ | |
1138 | rotldi r11,r11,8 | |
1139 | std r11,0(r10) | |
1140 | sync | |
1141 | ||
1142 | clrrdi r11,r11,28 /* Get the esid part of the ste */ | |
1143 | slbie r11 | |
1144 | ||
1145 | 2: std r9,8(r10) /* Store the vsid part of the ste */ | |
1146 | eieio | |
1147 | ||
b5bbeb23 | 1148 | mfspr r11,SPRN_DAR /* Get the new esid */ |
14cf11af PM |
1149 | clrrdi r11,r11,28 /* Permits a full 32b of ESID */ |
1150 | ori r11,r11,0x90 /* Turn on valid and kp */ | |
1151 | std r11,0(r10) /* Put new entry back into the stab */ | |
1152 | ||
1153 | sync | |
1154 | ||
1155 | /* All done -- return from exception. */ | |
1156 | lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ | |
1157 | ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */ | |
1158 | ||
1159 | andi. r10,r12,MSR_RI | |
1160 | beq- unrecov_slb | |
1161 | ||
1162 | mtcrf 0x80,r9 /* restore CR */ | |
1163 | ||
1164 | mfmsr r10 | |
1165 | clrrdi r10,r10,2 | |
1166 | mtmsrd r10,1 | |
1167 | ||
b5bbeb23 PM |
1168 | mtspr SPRN_SRR0,r11 |
1169 | mtspr SPRN_SRR1,r12 | |
14cf11af PM |
1170 | ld r9,PACA_EXSLB+EX_R9(r13) |
1171 | ld r10,PACA_EXSLB+EX_R10(r13) | |
1172 | ld r11,PACA_EXSLB+EX_R11(r13) | |
1173 | ld r12,PACA_EXSLB+EX_R12(r13) | |
1174 | ld r13,PACA_EXSLB+EX_R13(r13) | |
1175 | rfid | |
1176 | b . /* prevent speculative execution */ | |
1177 | ||
14cf11af PM |
1178 | /* |
1179 | * Space for CPU0's segment table. | |
1180 | * | |
1181 | * On iSeries, the hypervisor must fill in at least one entry before | |
16a15a30 SR |
1182 | * we get control (with relocate on). The address is given to the hv |
1183 | * as a page number (see xLparMap below), so this must be at a | |
14cf11af PM |
1184 | * fixed address (the linker can't compute (u64)&initial_stab >> |
1185 | * PAGE_SHIFT). | |
1186 | */ | |
758438a7 | 1187 | . = STAB0_OFFSET /* 0x6000 */ |
14cf11af PM |
1188 | .globl initial_stab |
1189 | initial_stab: | |
1190 | .space 4096 | |
1191 | ||
9e4859ef | 1192 | #ifdef CONFIG_PPC_PSERIES |
14cf11af PM |
1193 | /* |
1194 | * Data area reserved for FWNMI option. | |
1195 | * This address (0x7000) is fixed by the RPA. | |
1196 | */ | |
1197 | .= 0x7000 | |
1198 | .globl fwnmi_data_area | |
1199 | fwnmi_data_area: | |
9e4859ef | 1200 | #endif /* CONFIG_PPC_PSERIES */ |
14cf11af PM |
1201 | |
1202 | /* iSeries does not use the FWNMI stuff, so it is safe to put | |
1203 | * this here, even if we later allow kernels that will boot on | |
1204 | * both pSeries and iSeries */ | |
1205 | #ifdef CONFIG_PPC_ISERIES | |
1206 | . = LPARMAP_PHYS | |
16a15a30 SR |
1207 | .globl xLparMap |
1208 | xLparMap: | |
1209 | .quad HvEsidsToMap /* xNumberEsids */ | |
1210 | .quad HvRangesToMap /* xNumberRanges */ | |
1211 | .quad STAB0_PAGE /* xSegmentTableOffs */ | |
1212 | .zero 40 /* xRsvd */ | |
1213 | /* xEsids (HvEsidsToMap entries of 2 quads) */ | |
1214 | .quad PAGE_OFFSET_ESID /* xKernelEsid */ | |
1215 | .quad PAGE_OFFSET_VSID /* xKernelVsid */ | |
1216 | .quad VMALLOC_START_ESID /* xKernelEsid */ | |
1217 | .quad VMALLOC_START_VSID /* xKernelVsid */ | |
1218 | /* xRanges (HvRangesToMap entries of 3 quads) */ | |
1219 | .quad HvPagesToMap /* xPages */ | |
1220 | .quad 0 /* xOffset */ | |
1221 | .quad PAGE_OFFSET_VSID << (SID_SHIFT - HW_PAGE_SHIFT) /* xVPN */ | |
1222 | ||
14cf11af PM |
1223 | #endif /* CONFIG_PPC_ISERIES */ |
1224 | ||
9e4859ef | 1225 | #ifdef CONFIG_PPC_PSERIES |
14cf11af | 1226 | . = 0x8000 |
9e4859ef | 1227 | #endif /* CONFIG_PPC_PSERIES */ |
14cf11af PM |
1228 | |
1229 | /* | |
f39b7a55 OJ |
1230 | * On pSeries and most other platforms, secondary processors spin |
1231 | * in the following code. | |
14cf11af PM |
1232 | * At entry, r3 = this processor's number (physical cpu id) |
1233 | */ | |
f39b7a55 | 1234 | _GLOBAL(generic_secondary_smp_init) |
14cf11af PM |
1235 | mr r24,r3 |
1236 | ||
1237 | /* turn on 64-bit mode */ | |
1238 | bl .enable_64b_mode | |
14cf11af | 1239 | |
e31aa453 PM |
1240 | /* get the TOC pointer (real address) */ |
1241 | bl .relative_toc | |
1242 | ||
14cf11af PM |
1243 | /* Set up a paca value for this processor. Since we have the |
1244 | * physical cpu id in r24, we need to search the pacas to find | |
1245 | * which logical id maps to our physical one. | |
1246 | */ | |
e31aa453 | 1247 | LOAD_REG_ADDR(r13, paca) /* Get base vaddr of paca array */ |
14cf11af PM |
1248 | li r5,0 /* logical cpu id */ |
1249 | 1: lhz r6,PACAHWCPUID(r13) /* Load HW procid from paca */ | |
1250 | cmpw r6,r24 /* Compare to our id */ | |
1251 | beq 2f | |
1252 | addi r13,r13,PACA_SIZE /* Loop to next PACA on miss */ | |
1253 | addi r5,r5,1 | |
1254 | cmpwi r5,NR_CPUS | |
1255 | blt 1b | |
1256 | ||
1257 | mr r3,r24 /* not found, copy phys to r3 */ | |
1258 | b .kexec_wait /* next kernel might do better */ | |
1259 | ||
b5bbeb23 | 1260 | 2: mtspr SPRN_SPRG3,r13 /* Save vaddr of paca in SPRG3 */ |
14cf11af PM |
1261 | /* From now on, r24 is expected to be logical cpuid */ |
1262 | mr r24,r5 | |
1263 | 3: HMT_LOW | |
1264 | lbz r23,PACAPROCSTART(r13) /* Test if this processor should */ | |
1265 | /* start. */ | |
14cf11af | 1266 | |
f39b7a55 OJ |
1267 | #ifndef CONFIG_SMP |
1268 | b 3b /* Never go on non-SMP */ | |
1269 | #else | |
1270 | cmpwi 0,r23,0 | |
1271 | beq 3b /* Loop until told to go */ | |
1272 | ||
b6f6b98a SR |
1273 | sync /* order paca.run and cur_cpu_spec */ |
1274 | ||
f39b7a55 | 1275 | /* See if we need to call a cpu state restore handler */ |
e31aa453 | 1276 | LOAD_REG_ADDR(r23, cur_cpu_spec) |
f39b7a55 OJ |
1277 | ld r23,0(r23) |
1278 | ld r23,CPU_SPEC_RESTORE(r23) | |
1279 | cmpdi 0,r23,0 | |
1280 | beq 4f | |
1281 | ld r23,0(r23) | |
1282 | mtctr r23 | |
1283 | bctrl | |
1284 | ||
1285 | 4: /* Create a temp kernel stack for use before relocation is on. */ | |
14cf11af PM |
1286 | ld r1,PACAEMERGSP(r13) |
1287 | subi r1,r1,STACK_FRAME_OVERHEAD | |
1288 | ||
c705677e | 1289 | b __secondary_start |
14cf11af | 1290 | #endif |
14cf11af | 1291 | |
e31aa453 PM |
1292 | /* |
1293 | * Turn the MMU off. | |
1294 | * Assumes we're mapped EA == RA if the MMU is on. | |
1295 | */ | |
14cf11af PM |
1296 | _STATIC(__mmu_off) |
1297 | mfmsr r3 | |
1298 | andi. r0,r3,MSR_IR|MSR_DR | |
1299 | beqlr | |
e31aa453 | 1300 | mflr r4 |
14cf11af PM |
1301 | andc r3,r3,r0 |
1302 | mtspr SPRN_SRR0,r4 | |
1303 | mtspr SPRN_SRR1,r3 | |
1304 | sync | |
1305 | rfid | |
1306 | b . /* prevent speculative execution */ | |
1307 | ||
1308 | ||
1309 | /* | |
1310 | * Here is our main kernel entry point. We support currently 2 kind of entries | |
1311 | * depending on the value of r5. | |
1312 | * | |
1313 | * r5 != NULL -> OF entry, we go to prom_init, "legacy" parameter content | |
1314 | * in r3...r7 | |
1315 | * | |
1316 | * r5 == NULL -> kexec style entry. r3 is a physical pointer to the | |
1317 | * DT block, r4 is a physical pointer to the kernel itself | |
1318 | * | |
1319 | */ | |
1320 | _GLOBAL(__start_initialization_multiplatform) | |
e31aa453 PM |
1321 | /* Make sure we are running in 64 bits mode */ |
1322 | bl .enable_64b_mode | |
1323 | ||
1324 | /* Get TOC pointer (current runtime address) */ | |
1325 | bl .relative_toc | |
1326 | ||
1327 | /* find out where we are now */ | |
1328 | bcl 20,31,$+4 | |
1329 | 0: mflr r26 /* r26 = runtime addr here */ | |
1330 | addis r26,r26,(_stext - 0b)@ha | |
1331 | addi r26,r26,(_stext - 0b)@l /* current runtime base addr */ | |
1332 | ||
14cf11af PM |
1333 | /* |
1334 | * Are we booted from a PROM Of-type client-interface ? | |
1335 | */ | |
1336 | cmpldi cr0,r5,0 | |
939e60f6 SR |
1337 | beq 1f |
1338 | b .__boot_from_prom /* yes -> prom */ | |
1339 | 1: | |
14cf11af PM |
1340 | /* Save parameters */ |
1341 | mr r31,r3 | |
1342 | mr r30,r4 | |
1343 | ||
14cf11af | 1344 | /* Setup some critical 970 SPRs before switching MMU off */ |
f39b7a55 OJ |
1345 | mfspr r0,SPRN_PVR |
1346 | srwi r0,r0,16 | |
1347 | cmpwi r0,0x39 /* 970 */ | |
1348 | beq 1f | |
1349 | cmpwi r0,0x3c /* 970FX */ | |
1350 | beq 1f | |
1351 | cmpwi r0,0x44 /* 970MP */ | |
190a24f5 OJ |
1352 | beq 1f |
1353 | cmpwi r0,0x45 /* 970GX */ | |
f39b7a55 OJ |
1354 | bne 2f |
1355 | 1: bl .__cpu_preinit_ppc970 | |
1356 | 2: | |
14cf11af | 1357 | |
e31aa453 | 1358 | /* Switch off MMU if not already off */ |
14cf11af PM |
1359 | bl .__mmu_off |
1360 | b .__after_prom_start | |
1361 | ||
939e60f6 | 1362 | _INIT_STATIC(__boot_from_prom) |
14cf11af PM |
1363 | /* Save parameters */ |
1364 | mr r31,r3 | |
1365 | mr r30,r4 | |
1366 | mr r29,r5 | |
1367 | mr r28,r6 | |
1368 | mr r27,r7 | |
1369 | ||
6088857b OH |
1370 | /* |
1371 | * Align the stack to 16-byte boundary | |
1372 | * Depending on the size and layout of the ELF sections in the initial | |
e31aa453 | 1373 | * boot binary, the stack pointer may be unaligned on PowerMac |
6088857b | 1374 | */ |
c05b4770 LT |
1375 | rldicr r1,r1,0,59 |
1376 | ||
549e8152 PM |
1377 | #ifdef CONFIG_RELOCATABLE |
1378 | /* Relocate code for where we are now */ | |
1379 | mr r3,r26 | |
1380 | bl .relocate | |
1381 | #endif | |
1382 | ||
14cf11af PM |
1383 | /* Restore parameters */ |
1384 | mr r3,r31 | |
1385 | mr r4,r30 | |
1386 | mr r5,r29 | |
1387 | mr r6,r28 | |
1388 | mr r7,r27 | |
1389 | ||
1390 | /* Do all of the interaction with OF client interface */ | |
549e8152 | 1391 | mr r8,r26 |
14cf11af PM |
1392 | bl .prom_init |
1393 | /* We never return */ | |
1394 | trap | |
1395 | ||
14cf11af | 1396 | _STATIC(__after_prom_start) |
549e8152 PM |
1397 | #ifdef CONFIG_RELOCATABLE |
1398 | /* process relocations for the final address of the kernel */ | |
1399 | lis r25,PAGE_OFFSET@highest /* compute virtual base of kernel */ | |
1400 | sldi r25,r25,32 | |
54622f10 | 1401 | #ifdef CONFIG_CRASH_DUMP |
8b8b0cc1 MM |
1402 | lwz r7,__run_at_load-_stext(r26) |
1403 | cmplwi cr0,r7,1 /* kdump kernel ? - stay where we are */ | |
54622f10 MK |
1404 | bne 1f |
1405 | add r25,r25,r26 | |
1406 | #endif | |
1407 | 1: mr r3,r25 | |
549e8152 PM |
1408 | bl .relocate |
1409 | #endif | |
14cf11af PM |
1410 | |
1411 | /* | |
e31aa453 | 1412 | * We need to run with _stext at physical address PHYSICAL_START. |
14cf11af PM |
1413 | * This will leave some code in the first 256B of |
1414 | * real memory, which are reserved for software use. | |
14cf11af PM |
1415 | * |
1416 | * Note: This process overwrites the OF exception vectors. | |
14cf11af | 1417 | */ |
549e8152 PM |
1418 | li r3,0 /* target addr */ |
1419 | mr. r4,r26 /* In some cases the loader may */ | |
e31aa453 | 1420 | beq 9f /* have already put us at zero */ |
14cf11af PM |
1421 | li r6,0x100 /* Start offset, the first 0x100 */ |
1422 | /* bytes were copied earlier. */ | |
1423 | ||
54622f10 MK |
1424 | #ifdef CONFIG_CRASH_DUMP |
1425 | /* | |
1426 | * Check if the kernel has to be running as relocatable kernel based on the | |
8b8b0cc1 | 1427 | * variable __run_at_load, if it is set the kernel is treated as relocatable |
54622f10 MK |
1428 | * kernel, otherwise it will be moved to PHYSICAL_START |
1429 | */ | |
8b8b0cc1 MM |
1430 | lwz r7,__run_at_load-_stext(r26) |
1431 | cmplwi cr0,r7,1 | |
54622f10 MK |
1432 | bne 3f |
1433 | ||
1434 | li r5,__end_interrupts - _stext /* just copy interrupts */ | |
1435 | b 5f | |
1436 | 3: | |
1437 | #endif | |
1438 | lis r5,(copy_to_here - _stext)@ha | |
1439 | addi r5,r5,(copy_to_here - _stext)@l /* # bytes of memory to copy */ | |
1440 | ||
14cf11af PM |
1441 | bl .copy_and_flush /* copy the first n bytes */ |
1442 | /* this includes the code being */ | |
1443 | /* executed here. */ | |
e31aa453 PM |
1444 | addis r8,r3,(4f - _stext)@ha /* Jump to the copy of this code */ |
1445 | addi r8,r8,(4f - _stext)@l /* that we just made */ | |
1446 | mtctr r8 | |
14cf11af PM |
1447 | bctr |
1448 | ||
54622f10 MK |
1449 | p_end: .llong _end - _stext |
1450 | ||
e31aa453 PM |
1451 | 4: /* Now copy the rest of the kernel up to _end */ |
1452 | addis r5,r26,(p_end - _stext)@ha | |
1453 | ld r5,(p_end - _stext)@l(r5) /* get _end */ | |
54622f10 | 1454 | 5: bl .copy_and_flush /* copy the rest */ |
e31aa453 PM |
1455 | |
1456 | 9: b .start_here_multiplatform | |
1457 | ||
14cf11af PM |
1458 | /* |
1459 | * Copy routine used to copy the kernel to start at physical address 0 | |
1460 | * and flush and invalidate the caches as needed. | |
1461 | * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset | |
1462 | * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5. | |
1463 | * | |
1464 | * Note: this routine *only* clobbers r0, r6 and lr | |
1465 | */ | |
1466 | _GLOBAL(copy_and_flush) | |
1467 | addi r5,r5,-8 | |
1468 | addi r6,r6,-8 | |
5a2fe38d | 1469 | 4: li r0,8 /* Use the smallest common */ |
14cf11af PM |
1470 | /* denominator cache line */ |
1471 | /* size. This results in */ | |
1472 | /* extra cache line flushes */ | |
1473 | /* but operation is correct. */ | |
1474 | /* Can't get cache line size */ | |
1475 | /* from NACA as it is being */ | |
1476 | /* moved too. */ | |
1477 | ||
1478 | mtctr r0 /* put # words/line in ctr */ | |
1479 | 3: addi r6,r6,8 /* copy a cache line */ | |
1480 | ldx r0,r6,r4 | |
1481 | stdx r0,r6,r3 | |
1482 | bdnz 3b | |
1483 | dcbst r6,r3 /* write it to memory */ | |
1484 | sync | |
1485 | icbi r6,r3 /* flush the icache line */ | |
1486 | cmpld 0,r6,r5 | |
1487 | blt 4b | |
1488 | sync | |
1489 | addi r5,r5,8 | |
1490 | addi r6,r6,8 | |
1491 | blr | |
1492 | ||
1493 | .align 8 | |
1494 | copy_to_here: | |
1495 | ||
1496 | #ifdef CONFIG_SMP | |
1497 | #ifdef CONFIG_PPC_PMAC | |
1498 | /* | |
1499 | * On PowerMac, secondary processors starts from the reset vector, which | |
1500 | * is temporarily turned into a call to one of the functions below. | |
1501 | */ | |
1502 | .section ".text"; | |
1503 | .align 2 ; | |
1504 | ||
35499c01 PM |
1505 | .globl __secondary_start_pmac_0 |
1506 | __secondary_start_pmac_0: | |
1507 | /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */ | |
1508 | li r24,0 | |
1509 | b 1f | |
1510 | li r24,1 | |
1511 | b 1f | |
1512 | li r24,2 | |
1513 | b 1f | |
1514 | li r24,3 | |
1515 | 1: | |
14cf11af PM |
1516 | |
1517 | _GLOBAL(pmac_secondary_start) | |
1518 | /* turn on 64-bit mode */ | |
1519 | bl .enable_64b_mode | |
14cf11af | 1520 | |
e31aa453 PM |
1521 | /* get TOC pointer (real address) */ |
1522 | bl .relative_toc | |
1523 | ||
14cf11af | 1524 | /* Copy some CPU settings from CPU 0 */ |
f39b7a55 | 1525 | bl .__restore_cpu_ppc970 |
14cf11af PM |
1526 | |
1527 | /* pSeries do that early though I don't think we really need it */ | |
1528 | mfmsr r3 | |
1529 | ori r3,r3,MSR_RI | |
1530 | mtmsrd r3 /* RI on */ | |
1531 | ||
1532 | /* Set up a paca value for this processor. */ | |
e31aa453 PM |
1533 | LOAD_REG_ADDR(r4,paca) /* Get base vaddr of paca array */ |
1534 | mulli r13,r24,PACA_SIZE /* Calculate vaddr of right paca */ | |
14cf11af | 1535 | add r13,r13,r4 /* for this processor. */ |
e31aa453 | 1536 | mtspr SPRN_SPRG3,r13 /* Save vaddr of paca in SPRG3 */ |
14cf11af PM |
1537 | |
1538 | /* Create a temp kernel stack for use before relocation is on. */ | |
1539 | ld r1,PACAEMERGSP(r13) | |
1540 | subi r1,r1,STACK_FRAME_OVERHEAD | |
1541 | ||
c705677e | 1542 | b __secondary_start |
14cf11af PM |
1543 | |
1544 | #endif /* CONFIG_PPC_PMAC */ | |
1545 | ||
1546 | /* | |
1547 | * This function is called after the master CPU has released the | |
1548 | * secondary processors. The execution environment is relocation off. | |
1549 | * The paca for this processor has the following fields initialized at | |
1550 | * this point: | |
1551 | * 1. Processor number | |
1552 | * 2. Segment table pointer (virtual address) | |
1553 | * On entry the following are set: | |
1554 | * r1 = stack pointer. vaddr for iSeries, raddr (temp stack) for pSeries | |
1555 | * r24 = cpu# (in Linux terms) | |
1556 | * r13 = paca virtual address | |
1557 | * SPRG3 = paca virtual address | |
1558 | */ | |
fc68e869 | 1559 | .globl __secondary_start |
c705677e | 1560 | __secondary_start: |
799d6046 PM |
1561 | /* Set thread priority to MEDIUM */ |
1562 | HMT_MEDIUM | |
14cf11af | 1563 | |
799d6046 PM |
1564 | /* Do early setup for that CPU (stab, slb, hash table pointer) */ |
1565 | bl .early_setup_secondary | |
14cf11af PM |
1566 | |
1567 | /* Initialize the kernel stack. Just a repeat for iSeries. */ | |
e58c3495 | 1568 | LOAD_REG_ADDR(r3, current_set) |
14cf11af PM |
1569 | sldi r28,r24,3 /* get current_set[cpu#] */ |
1570 | ldx r1,r3,r28 | |
1571 | addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD | |
1572 | std r1,PACAKSAVE(r13) | |
1573 | ||
799d6046 | 1574 | /* Clear backchain so we get nice backtraces */ |
14cf11af PM |
1575 | li r7,0 |
1576 | mtlr r7 | |
1577 | ||
1578 | /* enable MMU and jump to start_secondary */ | |
e58c3495 DG |
1579 | LOAD_REG_ADDR(r3, .start_secondary_prolog) |
1580 | LOAD_REG_IMMEDIATE(r4, MSR_KERNEL) | |
d04c56f7 | 1581 | #ifdef CONFIG_PPC_ISERIES |
3f639ee8 | 1582 | BEGIN_FW_FTR_SECTION |
14cf11af | 1583 | ori r4,r4,MSR_EE |
ff3da2e0 BH |
1584 | li r8,1 |
1585 | stb r8,PACAHARDIRQEN(r13) | |
3f639ee8 | 1586 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) |
14cf11af | 1587 | #endif |
d04c56f7 | 1588 | BEGIN_FW_FTR_SECTION |
d04c56f7 PM |
1589 | stb r7,PACAHARDIRQEN(r13) |
1590 | END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES) | |
ff3da2e0 | 1591 | stb r7,PACASOFTIRQEN(r13) |
d04c56f7 | 1592 | |
b5bbeb23 PM |
1593 | mtspr SPRN_SRR0,r3 |
1594 | mtspr SPRN_SRR1,r4 | |
14cf11af PM |
1595 | rfid |
1596 | b . /* prevent speculative execution */ | |
1597 | ||
1598 | /* | |
1599 | * Running with relocation on at this point. All we want to do is | |
e31aa453 PM |
1600 | * zero the stack back-chain pointer and get the TOC virtual address |
1601 | * before going into C code. | |
14cf11af PM |
1602 | */ |
1603 | _GLOBAL(start_secondary_prolog) | |
e31aa453 | 1604 | ld r2,PACATOC(r13) |
14cf11af PM |
1605 | li r3,0 |
1606 | std r3,0(r1) /* Zero the stack frame pointer */ | |
1607 | bl .start_secondary | |
799d6046 | 1608 | b . |
14cf11af PM |
1609 | #endif |
1610 | ||
1611 | /* | |
1612 | * This subroutine clobbers r11 and r12 | |
1613 | */ | |
1614 | _GLOBAL(enable_64b_mode) | |
1615 | mfmsr r11 /* grab the current MSR */ | |
e31aa453 PM |
1616 | li r12,(MSR_SF | MSR_ISF)@highest |
1617 | sldi r12,r12,48 | |
14cf11af PM |
1618 | or r11,r11,r12 |
1619 | mtmsrd r11 | |
1620 | isync | |
1621 | blr | |
1622 | ||
e31aa453 PM |
1623 | /* |
1624 | * This puts the TOC pointer into r2, offset by 0x8000 (as expected | |
1625 | * by the toolchain). It computes the correct value for wherever we | |
1626 | * are running at the moment, using position-independent code. | |
1627 | */ | |
1628 | _GLOBAL(relative_toc) | |
1629 | mflr r0 | |
1630 | bcl 20,31,$+4 | |
1631 | 0: mflr r9 | |
1632 | ld r2,(p_toc - 0b)(r9) | |
1633 | add r2,r2,r9 | |
1634 | mtlr r0 | |
1635 | blr | |
1636 | ||
1637 | p_toc: .llong __toc_start + 0x8000 - 0b | |
1638 | ||
14cf11af PM |
1639 | /* |
1640 | * This is where the main kernel code starts. | |
1641 | */ | |
939e60f6 | 1642 | _INIT_STATIC(start_here_multiplatform) |
e31aa453 PM |
1643 | /* set up the TOC (real address) */ |
1644 | bl .relative_toc | |
14cf11af PM |
1645 | |
1646 | /* Clear out the BSS. It may have been done in prom_init, | |
1647 | * already but that's irrelevant since prom_init will soon | |
1648 | * be detached from the kernel completely. Besides, we need | |
1649 | * to clear it now for kexec-style entry. | |
1650 | */ | |
e31aa453 PM |
1651 | LOAD_REG_ADDR(r11,__bss_stop) |
1652 | LOAD_REG_ADDR(r8,__bss_start) | |
14cf11af PM |
1653 | sub r11,r11,r8 /* bss size */ |
1654 | addi r11,r11,7 /* round up to an even double word */ | |
e31aa453 | 1655 | srdi. r11,r11,3 /* shift right by 3 */ |
14cf11af PM |
1656 | beq 4f |
1657 | addi r8,r8,-8 | |
1658 | li r0,0 | |
1659 | mtctr r11 /* zero this many doublewords */ | |
1660 | 3: stdu r0,8(r8) | |
1661 | bdnz 3b | |
1662 | 4: | |
1663 | ||
1664 | mfmsr r6 | |
1665 | ori r6,r6,MSR_RI | |
1666 | mtmsrd r6 /* RI on */ | |
1667 | ||
549e8152 PM |
1668 | #ifdef CONFIG_RELOCATABLE |
1669 | /* Save the physical address we're running at in kernstart_addr */ | |
1670 | LOAD_REG_ADDR(r4, kernstart_addr) | |
1671 | clrldi r0,r25,2 | |
1672 | std r0,0(r4) | |
1673 | #endif | |
1674 | ||
e31aa453 | 1675 | /* The following gets the stack set up with the regs */ |
14cf11af PM |
1676 | /* pointing to the real addr of the kernel stack. This is */ |
1677 | /* all done to support the C function call below which sets */ | |
1678 | /* up the htab. This is done because we have relocated the */ | |
1679 | /* kernel but are still running in real mode. */ | |
1680 | ||
e31aa453 | 1681 | LOAD_REG_ADDR(r3,init_thread_union) |
14cf11af | 1682 | |
e31aa453 | 1683 | /* set up a stack pointer */ |
14cf11af PM |
1684 | addi r1,r3,THREAD_SIZE |
1685 | li r0,0 | |
1686 | stdu r0,-STACK_FRAME_OVERHEAD(r1) | |
1687 | ||
14cf11af PM |
1688 | /* Do very early kernel initializations, including initial hash table, |
1689 | * stab and slb setup before we turn on relocation. */ | |
1690 | ||
1691 | /* Restore parameters passed from prom_init/kexec */ | |
1692 | mr r3,r31 | |
e31aa453 | 1693 | bl .early_setup /* also sets r13 and SPRG3 */ |
14cf11af | 1694 | |
e31aa453 PM |
1695 | LOAD_REG_ADDR(r3, .start_here_common) |
1696 | ld r4,PACAKMSR(r13) | |
b5bbeb23 PM |
1697 | mtspr SPRN_SRR0,r3 |
1698 | mtspr SPRN_SRR1,r4 | |
14cf11af PM |
1699 | rfid |
1700 | b . /* prevent speculative execution */ | |
14cf11af PM |
1701 | |
1702 | /* This is where all platforms converge execution */ | |
fc68e869 | 1703 | _INIT_GLOBAL(start_here_common) |
14cf11af | 1704 | /* relocation is on at this point */ |
e31aa453 | 1705 | std r1,PACAKSAVE(r13) |
14cf11af | 1706 | |
e31aa453 | 1707 | /* Load the TOC (virtual address) */ |
14cf11af | 1708 | ld r2,PACATOC(r13) |
14cf11af PM |
1709 | |
1710 | bl .setup_system | |
1711 | ||
1712 | /* Load up the kernel context */ | |
1713 | 5: | |
14cf11af | 1714 | li r5,0 |
d04c56f7 PM |
1715 | stb r5,PACASOFTIRQEN(r13) /* Soft Disabled */ |
1716 | #ifdef CONFIG_PPC_ISERIES | |
1717 | BEGIN_FW_FTR_SECTION | |
14cf11af | 1718 | mfmsr r5 |
ff3da2e0 | 1719 | ori r5,r5,MSR_EE /* Hard Enabled on iSeries*/ |
14cf11af | 1720 | mtmsrd r5 |
ff3da2e0 | 1721 | li r5,1 |
3f639ee8 | 1722 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) |
14cf11af | 1723 | #endif |
ff3da2e0 | 1724 | stb r5,PACAHARDIRQEN(r13) /* Hard Disabled on others */ |
14cf11af | 1725 | |
ff3da2e0 | 1726 | bl .start_kernel |
14cf11af | 1727 | |
f1870f77 AB |
1728 | /* Not reached */ |
1729 | BUG_OPCODE | |
14cf11af | 1730 | |
14cf11af PM |
1731 | /* |
1732 | * We put a few things here that have to be page-aligned. | |
1733 | * This stuff goes at the beginning of the bss, which is page-aligned. | |
1734 | */ | |
1735 | .section ".bss" | |
1736 | ||
1737 | .align PAGE_SHIFT | |
1738 | ||
1739 | .globl empty_zero_page | |
1740 | empty_zero_page: | |
1741 | .space PAGE_SIZE | |
1742 | ||
1743 | .globl swapper_pg_dir | |
1744 | swapper_pg_dir: | |
ee7a76da | 1745 | .space PGD_TABLE_SIZE |