Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc
[deliverable/linux.git] / arch / tile / kernel / entry.S
1 /*
2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15 #include <linux/linkage.h>
16 #include <linux/unistd.h>
17 #include <asm/irqflags.h>
18 #include <asm/processor.h>
19 #include <arch/abi.h>
20 #include <arch/spr_def.h>
21
22 #ifdef __tilegx__
23 #define bnzt bnezt
24 #endif
25
26 STD_ENTRY(current_text_addr)
27 { move r0, lr; jrp lr }
28 STD_ENDPROC(current_text_addr)
29
30 STD_ENTRY(KBacktraceIterator_init_current)
31 { move r2, lr; lnk r1 }
32 { move r4, r52; addli r1, r1, KBacktraceIterator_init_current - . }
33 { move r3, sp; j _KBacktraceIterator_init_current }
34 jrp lr /* keep backtracer happy */
35 STD_ENDPROC(KBacktraceIterator_init_current)
36
37 /* Loop forever on a nap during SMP boot. */
38 STD_ENTRY(smp_nap)
39 nap
40 nop /* avoid provoking the icache prefetch with a jump */
41 j smp_nap /* we are not architecturally guaranteed not to exit nap */
42 jrp lr /* clue in the backtracer */
43 STD_ENDPROC(smp_nap)
44
45 /*
46 * Enable interrupts racelessly and then nap until interrupted.
47 * Architecturally, we are guaranteed that enabling interrupts via
48 * mtspr to INTERRUPT_CRITICAL_SECTION only interrupts at the next PC.
49 * This function's _cpu_idle_nap address is special; see intvec.S.
50 * When interrupted at _cpu_idle_nap, we bump the PC forward 8, and
51 * as a result return to the function that called _cpu_idle().
52 */
53 STD_ENTRY(_cpu_idle)
54 movei r1, 1
55 IRQ_ENABLE_LOAD(r2, r3)
56 mtspr INTERRUPT_CRITICAL_SECTION, r1
57 IRQ_ENABLE_APPLY(r2, r3) /* unmask, but still with ICS set */
58 mtspr INTERRUPT_CRITICAL_SECTION, zero
59 .global _cpu_idle_nap
60 _cpu_idle_nap:
61 nap
62 nop /* avoid provoking the icache prefetch with a jump */
63 jrp lr
64 STD_ENDPROC(_cpu_idle)
This page took 0.03501 seconds and 5 git commands to generate.