Commit | Line | Data |
---|---|---|
867e359b CM |
1 | /* |
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License | |
6 | * as published by the Free Software Foundation, version 2. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, but | |
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | |
11 | * NON INFRINGEMENT. See the GNU General Public License for | |
12 | * more details. | |
13 | */ | |
14 | ||
15 | #include <linux/linkage.h> | |
0707ad30 | 16 | #include <linux/unistd.h> |
867e359b | 17 | #include <asm/irqflags.h> |
a78c942d | 18 | #include <asm/processor.h> |
0707ad30 | 19 | #include <arch/abi.h> |
a78c942d | 20 | #include <arch/spr_def.h> |
867e359b CM |
21 | |
22 | #ifdef __tilegx__ | |
23 | #define bnzt bnezt | |
24 | #endif | |
25 | ||
26 | STD_ENTRY(current_text_addr) | |
27 | { move r0, lr; jrp lr } | |
28 | STD_ENDPROC(current_text_addr) | |
29 | ||
867e359b CM |
30 | STD_ENTRY(dump_stack) |
31 | { move r2, lr; lnk r1 } | |
32 | { move r4, r52; addli r1, r1, dump_stack - . } | |
33 | { move r3, sp; j _dump_stack } | |
34 | jrp lr /* keep backtracer happy */ | |
35 | STD_ENDPROC(dump_stack) | |
36 | ||
37 | STD_ENTRY(KBacktraceIterator_init_current) | |
38 | { move r2, lr; lnk r1 } | |
39 | { move r4, r52; addli r1, r1, KBacktraceIterator_init_current - . } | |
40 | { move r3, sp; j _KBacktraceIterator_init_current } | |
41 | jrp lr /* keep backtracer happy */ | |
42 | STD_ENDPROC(KBacktraceIterator_init_current) | |
867e359b CM |
43 | |
44 | /* Loop forever on a nap during SMP boot. */ | |
45 | STD_ENTRY(smp_nap) | |
46 | nap | |
8c92ba6c | 47 | nop /* avoid provoking the icache prefetch with a jump */ |
867e359b CM |
48 | j smp_nap /* we are not architecturally guaranteed not to exit nap */ |
49 | jrp lr /* clue in the backtracer */ | |
50 | STD_ENDPROC(smp_nap) | |
51 | ||
52 | /* | |
53 | * Enable interrupts racelessly and then nap until interrupted. | |
0b989cac CM |
54 | * Architecturally, we are guaranteed that enabling interrupts via |
55 | * mtspr to INTERRUPT_CRITICAL_SECTION only interrupts at the next PC. | |
867e359b CM |
56 | * This function's _cpu_idle_nap address is special; see intvec.S. |
57 | * When interrupted at _cpu_idle_nap, we bump the PC forward 8, and | |
58 | * as a result return to the function that called _cpu_idle(). | |
59 | */ | |
60 | STD_ENTRY(_cpu_idle) | |
0b989cac | 61 | movei r1, 1 |
51007004 | 62 | IRQ_ENABLE_LOAD(r2, r3) |
0b989cac | 63 | mtspr INTERRUPT_CRITICAL_SECTION, r1 |
51007004 | 64 | IRQ_ENABLE_APPLY(r2, r3) /* unmask, but still with ICS set */ |
0b989cac | 65 | mtspr INTERRUPT_CRITICAL_SECTION, zero |
867e359b CM |
66 | .global _cpu_idle_nap |
67 | _cpu_idle_nap: | |
68 | nap | |
8c92ba6c | 69 | nop /* avoid provoking the icache prefetch with a jump */ |
867e359b CM |
70 | jrp lr |
71 | STD_ENDPROC(_cpu_idle) |