Merge tag 'for-4.1-rc' of git://git.kernel.org/pub/scm/linux/kernel/git/kishon/linux...
[deliverable/linux.git] / arch / powerpc / kernel / misc_64.S
CommitLineData
9994a338 1/*
9994a338
PM
2 * This file contains miscellaneous low-level functions.
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
6 * and Paul Mackerras.
7 * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
127efeb2
SR
8 * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
9 *
9994a338
PM
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 *
15 */
16
9994a338
PM
17#include <linux/sys.h>
18#include <asm/unistd.h>
19#include <asm/errno.h>
20#include <asm/processor.h>
21#include <asm/page.h>
22#include <asm/cache.h>
23#include <asm/ppc_asm.h>
24#include <asm/asm-offsets.h>
25#include <asm/cputable.h>
6cb7bfeb 26#include <asm/thread_info.h>
1fc711f7 27#include <asm/kexec.h>
46f52210 28#include <asm/ptrace.h>
9994a338
PM
29
30 .text
31
9994a338
PM
32_GLOBAL(call_do_softirq)
33 mflr r0
34 std r0,16(r1)
4ae2dcb6 35 stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
9994a338 36 mr r1,r3
b1576fec 37 bl __do_softirq
9994a338
PM
38 ld r1,0(r1)
39 ld r0,16(r1)
40 mtlr r0
41 blr
42
0366a1c7 43_GLOBAL(call_do_irq)
9994a338
PM
44 mflr r0
45 std r0,16(r1)
0366a1c7
BH
46 stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
47 mr r1,r4
b1576fec 48 bl __do_irq
9994a338
PM
49 ld r1,0(r1)
50 ld r0,16(r1)
51 mtlr r0
52 blr
9994a338 53
9994a338
PM
54 .section ".toc","aw"
55PPC64_CACHES:
56 .tc ppc64_caches[TC],ppc64_caches
57 .section ".text"
58
59/*
60 * Write any modified data cache blocks out to memory
61 * and invalidate the corresponding instruction cache blocks.
62 *
63 * flush_icache_range(unsigned long start, unsigned long stop)
64 *
65 * flush all bytes from start through stop-1 inclusive
66 */
67
3b04c300 68_KPROBE(flush_icache_range)
abb29c3b 69BEGIN_FTR_SECTION
0ce63670 70 PURGE_PREFETCHED_INS
abb29c3b
KH
71 blr
72END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
9994a338
PM
73/*
74 * Flush the data cache to memory
75 *
76 * Different systems have different cache line sizes
77 * and in some cases i-cache and d-cache line sizes differ from
78 * each other.
79 */
80 ld r10,PPC64_CACHES@toc(r2)
81 lwz r7,DCACHEL1LINESIZE(r10)/* Get cache line size */
82 addi r5,r7,-1
83 andc r6,r3,r5 /* round low to line bdy */
84 subf r8,r6,r4 /* compute length */
85 add r8,r8,r5 /* ensure we get enough */
86 lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of cache line size */
87 srw. r8,r8,r9 /* compute line count */
88 beqlr /* nothing to do? */
89 mtctr r8
901: dcbst 0,r6
91 add r6,r6,r7
92 bdnz 1b
93 sync
94
95/* Now invalidate the instruction cache */
96
97 lwz r7,ICACHEL1LINESIZE(r10) /* Get Icache line size */
98 addi r5,r7,-1
99 andc r6,r3,r5 /* round low to line bdy */
100 subf r8,r6,r4 /* compute length */
101 add r8,r8,r5
102 lwz r9,ICACHEL1LOGLINESIZE(r10) /* Get log-2 of Icache line size */
103 srw. r8,r8,r9 /* compute line count */
104 beqlr /* nothing to do? */
105 mtctr r8
1062: icbi 0,r6
107 add r6,r6,r7
108 bdnz 2b
109 isync
110 blr
111 .previous .text
112/*
113 * Like above, but only do the D-cache.
114 *
115 * flush_dcache_range(unsigned long start, unsigned long stop)
116 *
117 * flush all bytes from start to stop-1 inclusive
118 */
119_GLOBAL(flush_dcache_range)
120
121/*
122 * Flush the data cache to memory
123 *
124 * Different systems have different cache line sizes
125 */
126 ld r10,PPC64_CACHES@toc(r2)
127 lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */
128 addi r5,r7,-1
129 andc r6,r3,r5 /* round low to line bdy */
130 subf r8,r6,r4 /* compute length */
131 add r8,r8,r5 /* ensure we get enough */
132 lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of dcache line size */
133 srw. r8,r8,r9 /* compute line count */
134 beqlr /* nothing to do? */
135 mtctr r8
1360: dcbst 0,r6
137 add r6,r6,r7
138 bdnz 0b
139 sync
140 blr
141
142/*
143 * Like above, but works on non-mapped physical addresses.
144 * Use only for non-LPAR setups ! It also assumes real mode
145 * is cacheable. Used for flushing out the DART before using
146 * it as uncacheable memory
147 *
148 * flush_dcache_phys_range(unsigned long start, unsigned long stop)
149 *
150 * flush all bytes from start to stop-1 inclusive
151 */
152_GLOBAL(flush_dcache_phys_range)
153 ld r10,PPC64_CACHES@toc(r2)
154 lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */
155 addi r5,r7,-1
156 andc r6,r3,r5 /* round low to line bdy */
157 subf r8,r6,r4 /* compute length */
158 add r8,r8,r5 /* ensure we get enough */
159 lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of dcache line size */
160 srw. r8,r8,r9 /* compute line count */
161 beqlr /* nothing to do? */
162 mfmsr r5 /* Disable MMU Data Relocation */
163 ori r0,r5,MSR_DR
164 xori r0,r0,MSR_DR
165 sync
166 mtmsr r0
167 sync
168 isync
169 mtctr r8
1700: dcbst 0,r6
171 add r6,r6,r7
172 bdnz 0b
173 sync
174 isync
175 mtmsr r5 /* Re-enable MMU Data Relocation */
176 sync
177 isync
178 blr
179
180_GLOBAL(flush_inval_dcache_range)
181 ld r10,PPC64_CACHES@toc(r2)
182 lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */
183 addi r5,r7,-1
184 andc r6,r3,r5 /* round low to line bdy */
185 subf r8,r6,r4 /* compute length */
186 add r8,r8,r5 /* ensure we get enough */
187 lwz r9,DCACHEL1LOGLINESIZE(r10)/* Get log-2 of dcache line size */
188 srw. r8,r8,r9 /* compute line count */
189 beqlr /* nothing to do? */
190 sync
191 isync
192 mtctr r8
1930: dcbf 0,r6
194 add r6,r6,r7
195 bdnz 0b
196 sync
197 isync
198 blr
199
200
201/*
202 * Flush a particular page from the data cache to RAM.
203 * Note: this is necessary because the instruction cache does *not*
204 * snoop from the data cache.
205 *
206 * void __flush_dcache_icache(void *page)
207 */
208_GLOBAL(__flush_dcache_icache)
209/*
210 * Flush the data cache to memory
211 *
212 * Different systems have different cache line sizes
213 */
214
0ce63670
KH
215BEGIN_FTR_SECTION
216 PURGE_PREFETCHED_INS
217 blr
218END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
219
9994a338
PM
220/* Flush the dcache */
221 ld r7,PPC64_CACHES@toc(r2)
222 clrrdi r3,r3,PAGE_SHIFT /* Page align */
223 lwz r4,DCACHEL1LINESPERPAGE(r7) /* Get # dcache lines per page */
224 lwz r5,DCACHEL1LINESIZE(r7) /* Get dcache line size */
225 mr r6,r3
226 mtctr r4
2270: dcbst 0,r6
228 add r6,r6,r5
229 bdnz 0b
230 sync
231
232/* Now invalidate the icache */
233
234 lwz r4,ICACHEL1LINESPERPAGE(r7) /* Get # icache lines per page */
235 lwz r5,ICACHEL1LINESIZE(r7) /* Get icache line size */
236 mtctr r4
2371: icbi 0,r3
238 add r3,r3,r5
239 bdnz 1b
240 isync
241 blr
9994a338 242
ca9d7aea
DW
243_GLOBAL(__bswapdi2)
244 srdi r8,r3,32
245 rlwinm r7,r3,8,0xffffffff
246 rlwimi r7,r3,24,0,7
247 rlwinm r9,r8,8,0xffffffff
248 rlwimi r7,r3,24,16,23
249 rlwimi r9,r8,24,0,7
250 rlwimi r9,r8,24,16,23
251 sldi r7,r7,32
252 or r3,r7,r9
253 blr
3f639ee8 254
7191b615 255
2d6f0c3a 256#ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
7191b615
BH
257_GLOBAL(rmci_on)
258 sync
259 isync
260 li r3,0x100
261 rldicl r3,r3,32,0
262 mfspr r5,SPRN_HID4
263 or r5,r5,r3
264 sync
265 mtspr SPRN_HID4,r5
266 isync
267 slbia
268 isync
269 sync
270 blr
271
272_GLOBAL(rmci_off)
273 sync
274 isync
275 li r3,0x100
276 rldicl r3,r3,32,0
277 mfspr r5,SPRN_HID4
278 andc r5,r5,r3
279 sync
280 mtspr SPRN_HID4,r5
281 isync
282 slbia
283 isync
284 sync
285 blr
2d6f0c3a
ME
286#endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
287
288#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE)
7191b615 289
9994a338
PM
290/*
291 * Do an IO access in real mode
292 */
293_GLOBAL(real_readb)
294 mfmsr r7
295 ori r0,r7,MSR_DR
296 xori r0,r0,MSR_DR
297 sync
298 mtmsrd r0
299 sync
300 isync
301 mfspr r6,SPRN_HID4
302 rldicl r5,r6,32,0
303 ori r5,r5,0x100
304 rldicl r5,r5,32,0
305 sync
306 mtspr SPRN_HID4,r5
307 isync
308 slbia
309 isync
310 lbz r3,0(r3)
311 sync
312 mtspr SPRN_HID4,r6
313 isync
314 slbia
315 isync
316 mtmsrd r7
317 sync
318 isync
319 blr
320
321 /*
322 * Do an IO access in real mode
323 */
324_GLOBAL(real_writeb)
325 mfmsr r7
326 ori r0,r7,MSR_DR
327 xori r0,r0,MSR_DR
328 sync
329 mtmsrd r0
330 sync
331 isync
332 mfspr r6,SPRN_HID4
333 rldicl r5,r6,32,0
334 ori r5,r5,0x100
335 rldicl r5,r5,32,0
336 sync
337 mtspr SPRN_HID4,r5
338 isync
339 slbia
340 isync
341 stb r3,0(r4)
342 sync
343 mtspr SPRN_HID4,r6
344 isync
345 slbia
346 isync
347 mtmsrd r7
348 sync
349 isync
350 blr
351#endif /* defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) */
352
39c870d5
OJ
353#ifdef CONFIG_PPC_PASEMI
354
39c870d5
OJ
355_GLOBAL(real_205_readb)
356 mfmsr r7
357 ori r0,r7,MSR_DR
358 xori r0,r0,MSR_DR
359 sync
360 mtmsrd r0
361 sync
362 isync
e55174e9 363 LBZCIX(R3,R0,R3)
39c870d5
OJ
364 isync
365 mtmsrd r7
366 sync
367 isync
368 blr
369
370_GLOBAL(real_205_writeb)
371 mfmsr r7
372 ori r0,r7,MSR_DR
373 xori r0,r0,MSR_DR
374 sync
375 mtmsrd r0
376 sync
377 isync
e55174e9 378 STBCIX(R3,R0,R4)
39c870d5
OJ
379 isync
380 mtmsrd r7
381 sync
382 isync
383 blr
384
385#endif /* CONFIG_PPC_PASEMI */
386
387
e48f7eb2 388#if defined(CONFIG_CPU_FREQ_PMAC64) || defined(CONFIG_CPU_FREQ_MAPLE)
4350147a
BH
389/*
390 * SCOM access functions for 970 (FX only for now)
391 *
392 * unsigned long scom970_read(unsigned int address);
393 * void scom970_write(unsigned int address, unsigned long value);
394 *
395 * The address passed in is the 24 bits register address. This code
396 * is 970 specific and will not check the status bits, so you should
397 * know what you are doing.
398 */
399_GLOBAL(scom970_read)
400 /* interrupts off */
401 mfmsr r4
402 ori r0,r4,MSR_EE
403 xori r0,r0,MSR_EE
404 mtmsrd r0,1
405
406 /* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits
407 * (including parity). On current CPUs they must be 0'd,
408 * and finally or in RW bit
409 */
410 rlwinm r3,r3,8,0,15
411 ori r3,r3,0x8000
412
413 /* do the actual scom read */
414 sync
415 mtspr SPRN_SCOMC,r3
416 isync
417 mfspr r3,SPRN_SCOMD
418 isync
419 mfspr r0,SPRN_SCOMC
420 isync
421
422 /* XXX: fixup result on some buggy 970's (ouch ! we lost a bit, bah
423 * that's the best we can do). Not implemented yet as we don't use
424 * the scom on any of the bogus CPUs yet, but may have to be done
425 * ultimately
426 */
427
428 /* restore interrupts */
429 mtmsrd r4,1
430 blr
431
432
433_GLOBAL(scom970_write)
434 /* interrupts off */
435 mfmsr r5
436 ori r0,r5,MSR_EE
437 xori r0,r0,MSR_EE
438 mtmsrd r0,1
439
440 /* rotate 24 bits SCOM address 8 bits left and mask out it's low 8 bits
441 * (including parity). On current CPUs they must be 0'd.
442 */
443
444 rlwinm r3,r3,8,0,15
445
446 sync
447 mtspr SPRN_SCOMD,r4 /* write data */
448 isync
449 mtspr SPRN_SCOMC,r3 /* write command */
450 isync
451 mfspr 3,SPRN_SCOMC
452 isync
453
454 /* restore interrupts */
455 mtmsrd r5,1
456 blr
e48f7eb2 457#endif /* CONFIG_CPU_FREQ_PMAC64 || CONFIG_CPU_FREQ_MAPLE */
4350147a 458
9994a338
PM
459/* kexec_wait(phys_cpu)
460 *
461 * wait for the flag to change, indicating this kernel is going away but
462 * the slave code for the next one is at addresses 0 to 100.
463 *
3d2cea73
MM
464 * This is used by all slaves, even those that did not find a matching
465 * paca in the secondary startup code.
9994a338
PM
466 *
467 * Physical (hardware) cpu id should be in r3.
468 */
469_GLOBAL(kexec_wait)
470 bl 1f
4711: mflr r5
472 addi r5,r5,kexec_flag-1b
473
47499: HMT_LOW
475#ifdef CONFIG_KEXEC /* use no memory without kexec */
476 lwz r4,0(r5)
477 cmpwi 0,r4,0
478 bnea 0x60
479#endif
480 b 99b
481
482/* this can be in text because we won't change it until we are
483 * running in real anyways
484 */
485kexec_flag:
486 .long 0
487
488
489#ifdef CONFIG_KEXEC
490
491/* kexec_smp_wait(void)
492 *
493 * call with interrupts off
494 * note: this is a terminal routine, it does not save lr
495 *
496 * get phys id from paca
9994a338 497 * switch to real mode
3d2cea73 498 * mark the paca as no longer used
9994a338
PM
499 * join other cpus in kexec_wait(phys_id)
500 */
501_GLOBAL(kexec_smp_wait)
502 lhz r3,PACAHWCPUID(r13)
9994a338 503 bl real_mode
3d2cea73
MM
504
505 li r4,KEXEC_STATE_REAL_MODE
506 stb r4,PACAKEXECSTATE(r13)
507 SYNC
508
b1576fec 509 b kexec_wait
9994a338
PM
510
511/*
512 * switch to real mode (turn mmu off)
513 * we use the early kernel trick that the hardware ignores bits
514 * 0 and 1 (big endian) of the effective address in real mode
515 *
516 * don't overwrite r3 here, it is live for kexec_wait above.
517 */
518real_mode: /* assume normal blr return */
5191: li r9,MSR_RI
520 li r10,MSR_DR|MSR_IR
521 mflr r11 /* return address to SRR0 */
522 mfmsr r12
523 andc r9,r12,r9
524 andc r10,r12,r10
525
526 mtmsrd r9,1
527 mtspr SPRN_SRR1,r10
528 mtspr SPRN_SRR0,r11
529 rfid
530
531
532/*
1767c8f3 533 * kexec_sequence(newstack, start, image, control, clear_all())
9994a338
PM
534 *
535 * does the grungy work with stack switching and real mode switches
536 * also does simple calls to other code
537 */
538
539_GLOBAL(kexec_sequence)
540 mflr r0
541 std r0,16(r1)
542
543 /* switch stacks to newstack -- &kexec_stack.stack */
4ae2dcb6 544 stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
9994a338
PM
545 mr r1,r3
546
547 li r0,0
548 std r0,16(r1)
549
550 /* save regs for local vars on new stack.
551 * yes, we won't go back, but ...
552 */
553 std r31,-8(r1)
554 std r30,-16(r1)
555 std r29,-24(r1)
556 std r28,-32(r1)
557 std r27,-40(r1)
558 std r26,-48(r1)
559 std r25,-56(r1)
560
4ae2dcb6 561 stdu r1,-STACK_FRAME_OVERHEAD-64(r1)
9994a338
PM
562
563 /* save args into preserved regs */
564 mr r31,r3 /* newstack (both) */
565 mr r30,r4 /* start (real) */
566 mr r29,r5 /* image (virt) */
567 mr r28,r6 /* control, unused */
568 mr r27,r7 /* clear_all() fn desc */
1767c8f3 569 mr r26,r8 /* spare */
9994a338
PM
570 lhz r25,PACAHWCPUID(r13) /* get our phys cpu from paca */
571
572 /* disable interrupts, we are overwriting kernel data next */
573 mfmsr r3
574 rlwinm r3,r3,0,17,15
575 mtmsrd r3,1
576
577 /* copy dest pages, flush whole dest image */
578 mr r3,r29
b1576fec 579 bl kexec_copy_flush /* (image) */
9994a338
PM
580
581 /* turn off mmu */
582 bl real_mode
583
ee46a90b
MM
584 /* copy 0x100 bytes starting at start to 0 */
585 li r3,0
586 mr r4,r30 /* start, aka phys mem offset */
587 li r5,0x100
588 li r6,0
b1576fec 589 bl copy_and_flush /* (dest, src, copy limit, start offset) */
ee46a90b
MM
5901: /* assume normal blr return */
591
592 /* release other cpus to the new kernel secondary start at 0x60 */
593 mflr r5
594 li r6,1
595 stw r6,kexec_flag-1b(5)
596
9994a338 597 /* clear out hardware hash page table and tlb */
cc7efbf9
AB
598#if !defined(_CALL_ELF) || _CALL_ELF != 2
599 ld r12,0(r27) /* deref function descriptor */
600#else
601 mr r12,r27
602#endif
603 mtctr r12
8d950cb8 604 bctrl /* ppc_md.hpte_clear_all(void); */
9994a338
PM
605
606/*
607 * kexec image calling is:
608 * the first 0x100 bytes of the entry point are copied to 0
609 *
610 * all slaves branch to slave = 0x60 (absolute)
611 * slave(phys_cpu_id);
612 *
613 * master goes to start = entry point
614 * start(phys_cpu_id, start, 0);
615 *
616 *
617 * a wrapper is needed to call existing kernels, here is an approximate
618 * description of one method:
619 *
620 * v2: (2.6.10)
621 * start will be near the boot_block (maybe 0x100 bytes before it?)
622 * it will have a 0x60, which will b to boot_block, where it will wait
623 * and 0 will store phys into struct boot-block and load r3 from there,
624 * copy kernel 0-0x100 and tell slaves to back down to 0x60 again
625 *
626 * v1: (2.6.9)
627 * boot block will have all cpus scanning device tree to see if they
628 * are the boot cpu ?????
629 * other device tree differences (prop sizes, va vs pa, etc)...
630 */
9994a338
PM
631 mr r3,r25 # my phys cpu
632 mr r4,r30 # start, aka phys mem offset
633 mtlr 4
634 li r5,0
1767c8f3 635 blr /* image->start(physid, image->start, 0); */
9994a338 636#endif /* CONFIG_KEXEC */
9baeaef6
RR
637
638#ifdef CONFIG_MODULES
639#if defined(_CALL_ELF) && _CALL_ELF == 2
71ec7c55
RR
640
641#ifdef CONFIG_MODVERSIONS
642.weak __crc_TOC.
643.section "___kcrctab+TOC.","a"
644.globl __kcrctab_TOC.
645__kcrctab_TOC.:
646 .llong __crc_TOC.
647#endif
648
9baeaef6
RR
649/*
650 * Export a fake .TOC. since both modpost and depmod will complain otherwise.
651 * Both modpost and depmod strip the leading . so we do the same here.
652 */
653.section "__ksymtab_strings","a"
654__kstrtab_TOC.:
655 .asciz "TOC."
656
657.section "___ksymtab+TOC.","a"
658/* This symbol name is important: it's used by modpost to find exported syms */
659.globl __ksymtab_TOC.
660__ksymtab_TOC.:
661 .llong 0 /* .value */
662 .llong __kstrtab_TOC.
663#endif /* ELFv2 */
664#endif /* MODULES */
This page took 0.662558 seconds and 5 git commands to generate.