Merge branch 'misc' of git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild
[deliverable/linux.git] / arch / mips / include / asm / irqflags.h
1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2003 by Ralf Baechle
7 * Copyright (C) 1996 by Paul M. Antoine
8 * Copyright (C) 1999 Silicon Graphics
9 * Copyright (C) 2000 MIPS Technologies, Inc.
10 */
11 #ifndef _ASM_IRQFLAGS_H
12 #define _ASM_IRQFLAGS_H
13
14 #ifndef __ASSEMBLY__
15
16 #include <linux/compiler.h>
17 #include <linux/stringify.h>
18 #include <asm/hazards.h>
19
20 #if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC)
21
22 static inline void arch_local_irq_disable(void)
23 {
24 __asm__ __volatile__(
25 " .set push \n"
26 " .set noat \n"
27 " di \n"
28 " " __stringify(__irq_disable_hazard) " \n"
29 " .set pop \n"
30 : /* no outputs */
31 : /* no inputs */
32 : "memory");
33 }
34
35 static inline unsigned long arch_local_irq_save(void)
36 {
37 unsigned long flags;
38
39 asm __volatile__(
40 " .set push \n"
41 " .set reorder \n"
42 " .set noat \n"
43 " di %[flags] \n"
44 " andi %[flags], 1 \n"
45 " " __stringify(__irq_disable_hazard) " \n"
46 " .set pop \n"
47 : [flags] "=r" (flags)
48 : /* no inputs */
49 : "memory");
50
51 return flags;
52 }
53
54 static inline void arch_local_irq_restore(unsigned long flags)
55 {
56 unsigned long __tmp1;
57
58 __asm__ __volatile__(
59 " .set push \n"
60 " .set noreorder \n"
61 " .set noat \n"
62 #if defined(CONFIG_IRQ_CPU)
63 /*
64 * Slow, but doesn't suffer from a relatively unlikely race
65 * condition we're having since days 1.
66 */
67 " beqz %[flags], 1f \n"
68 " di \n"
69 " ei \n"
70 "1: \n"
71 #else
72 /*
73 * Fast, dangerous. Life is fun, life is good.
74 */
75 " mfc0 $1, $12 \n"
76 " ins $1, %[flags], 0, 1 \n"
77 " mtc0 $1, $12 \n"
78 #endif
79 " " __stringify(__irq_disable_hazard) " \n"
80 " .set pop \n"
81 : [flags] "=r" (__tmp1)
82 : "0" (flags)
83 : "memory");
84 }
85
86 static inline void __arch_local_irq_restore(unsigned long flags)
87 {
88 __asm__ __volatile__(
89 " .set push \n"
90 " .set noreorder \n"
91 " .set noat \n"
92 #if defined(CONFIG_IRQ_CPU)
93 /*
94 * Slow, but doesn't suffer from a relatively unlikely race
95 * condition we're having since days 1.
96 */
97 " beqz %[flags], 1f \n"
98 " di \n"
99 " ei \n"
100 "1: \n"
101 #else
102 /*
103 * Fast, dangerous. Life is fun, life is good.
104 */
105 " mfc0 $1, $12 \n"
106 " ins $1, %[flags], 0, 1 \n"
107 " mtc0 $1, $12 \n"
108 #endif
109 " " __stringify(__irq_disable_hazard) " \n"
110 " .set pop \n"
111 : [flags] "=r" (flags)
112 : "0" (flags)
113 : "memory");
114 }
115 #else
116 /* Functions that require preempt_{dis,en}able() are in mips-atomic.c */
117 void arch_local_irq_disable(void);
118 unsigned long arch_local_irq_save(void);
119 void arch_local_irq_restore(unsigned long flags);
120 void __arch_local_irq_restore(unsigned long flags);
121 #endif /* if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) */
122
123
124 extern void smtc_ipi_replay(void);
125
126 static inline void arch_local_irq_enable(void)
127 {
128 #ifdef CONFIG_MIPS_MT_SMTC
129 /*
130 * SMTC kernel needs to do a software replay of queued
131 * IPIs, at the cost of call overhead on each local_irq_enable()
132 */
133 smtc_ipi_replay();
134 #endif
135 __asm__ __volatile__(
136 " .set push \n"
137 " .set reorder \n"
138 " .set noat \n"
139 #ifdef CONFIG_MIPS_MT_SMTC
140 " mfc0 $1, $2, 1 # SMTC - clear TCStatus.IXMT \n"
141 " ori $1, 0x400 \n"
142 " xori $1, 0x400 \n"
143 " mtc0 $1, $2, 1 \n"
144 #elif defined(CONFIG_CPU_MIPSR2)
145 " ei \n"
146 #else
147 " mfc0 $1,$12 \n"
148 " ori $1,0x1f \n"
149 " xori $1,0x1e \n"
150 " mtc0 $1,$12 \n"
151 #endif
152 " " __stringify(__irq_enable_hazard) " \n"
153 " .set pop \n"
154 : /* no outputs */
155 : /* no inputs */
156 : "memory");
157 }
158
159 static inline unsigned long arch_local_save_flags(void)
160 {
161 unsigned long flags;
162
163 asm __volatile__(
164 " .set push \n"
165 " .set reorder \n"
166 #ifdef CONFIG_MIPS_MT_SMTC
167 " mfc0 %[flags], $2, 1 \n"
168 #else
169 " mfc0 %[flags], $12 \n"
170 #endif
171 " .set pop \n"
172 : [flags] "=r" (flags));
173
174 return flags;
175 }
176
177
178 static inline int arch_irqs_disabled_flags(unsigned long flags)
179 {
180 #ifdef CONFIG_MIPS_MT_SMTC
181 /*
182 * SMTC model uses TCStatus.IXMT to disable interrupts for a thread/CPU
183 */
184 return flags & 0x400;
185 #else
186 return !(flags & 1);
187 #endif
188 }
189
190 #endif /* #ifndef __ASSEMBLY__ */
191
192 /*
193 * Do the CPU's IRQ-state tracing from assembly code.
194 */
195 #ifdef CONFIG_TRACE_IRQFLAGS
196 /* Reload some registers clobbered by trace_hardirqs_on */
197 #ifdef CONFIG_64BIT
198 # define TRACE_IRQS_RELOAD_REGS \
199 LONG_L $11, PT_R11(sp); \
200 LONG_L $10, PT_R10(sp); \
201 LONG_L $9, PT_R9(sp); \
202 LONG_L $8, PT_R8(sp); \
203 LONG_L $7, PT_R7(sp); \
204 LONG_L $6, PT_R6(sp); \
205 LONG_L $5, PT_R5(sp); \
206 LONG_L $4, PT_R4(sp); \
207 LONG_L $2, PT_R2(sp)
208 #else
209 # define TRACE_IRQS_RELOAD_REGS \
210 LONG_L $7, PT_R7(sp); \
211 LONG_L $6, PT_R6(sp); \
212 LONG_L $5, PT_R5(sp); \
213 LONG_L $4, PT_R4(sp); \
214 LONG_L $2, PT_R2(sp)
215 #endif
216 # define TRACE_IRQS_ON \
217 CLI; /* make sure trace_hardirqs_on() is called in kernel level */ \
218 jal trace_hardirqs_on
219 # define TRACE_IRQS_ON_RELOAD \
220 TRACE_IRQS_ON; \
221 TRACE_IRQS_RELOAD_REGS
222 # define TRACE_IRQS_OFF \
223 jal trace_hardirqs_off
224 #else
225 # define TRACE_IRQS_ON
226 # define TRACE_IRQS_ON_RELOAD
227 # define TRACE_IRQS_OFF
228 #endif
229
230 #endif /* _ASM_IRQFLAGS_H */
This page took 0.05398 seconds and 5 git commands to generate.