tracing: Avoid soft lockup in trace_pipe
[deliverable/linux.git] / arch / x86 / include / asm / irqflags.h
CommitLineData
6abcd98f
GOC
1#ifndef _X86_IRQFLAGS_H_
2#define _X86_IRQFLAGS_H_
3
4#include <asm/processor-flags.h>
5
6#ifndef __ASSEMBLY__
7/*
8 * Interrupt control:
9 */
10
11static inline unsigned long native_save_fl(void)
12{
13 unsigned long flags;
14
f1f029c7 15 /*
ab94fcf5
PA
16 * "=rm" is safe here, because "pop" adjusts the stack before
17 * it evaluates its effective address -- this is part of the
18 * documented behavior of the "pop" instruction.
f1f029c7 19 */
cf7f7191
JP
20 asm volatile("# __raw_save_flags\n\t"
21 "pushf ; pop %0"
ab94fcf5 22 : "=rm" (flags)
cf7f7191
JP
23 : /* no input */
24 : "memory");
6abcd98f
GOC
25
26 return flags;
27}
28
29static inline void native_restore_fl(unsigned long flags)
30{
cf7f7191
JP
31 asm volatile("push %0 ; popf"
32 : /* no output */
33 :"g" (flags)
34 :"memory", "cc");
6abcd98f
GOC
35}
36
37static inline void native_irq_disable(void)
38{
39 asm volatile("cli": : :"memory");
40}
41
42static inline void native_irq_enable(void)
43{
44 asm volatile("sti": : :"memory");
45}
46
47static inline void native_safe_halt(void)
48{
49 asm volatile("sti; hlt": : :"memory");
50}
51
52static inline void native_halt(void)
53{
54 asm volatile("hlt": : :"memory");
55}
56
57#endif
58
59#ifdef CONFIG_PARAVIRT
60#include <asm/paravirt.h>
61#else
62#ifndef __ASSEMBLY__
63
df9ee292 64static inline unsigned long arch_local_save_flags(void)
6abcd98f
GOC
65{
66 return native_save_fl();
67}
68
df9ee292 69static inline void arch_local_irq_restore(unsigned long flags)
6abcd98f
GOC
70{
71 native_restore_fl(flags);
72}
73
df9ee292 74static inline void arch_local_irq_disable(void)
6abcd98f
GOC
75{
76 native_irq_disable();
77}
78
df9ee292 79static inline void arch_local_irq_enable(void)
6abcd98f
GOC
80{
81 native_irq_enable();
82}
83
84/*
85 * Used in the idle loop; sti takes one instruction cycle
86 * to complete:
87 */
df9ee292 88static inline void arch_safe_halt(void)
6abcd98f
GOC
89{
90 native_safe_halt();
91}
92
93/*
94 * Used when interrupts are already enabled or to
95 * shutdown the processor:
96 */
97static inline void halt(void)
98{
99 native_halt();
100}
101
102/*
103 * For spinlocks, etc:
104 */
df9ee292 105static inline unsigned long arch_local_irq_save(void)
6abcd98f 106{
df9ee292
DH
107 unsigned long flags = arch_local_save_flags();
108 arch_local_irq_disable();
6abcd98f
GOC
109 return flags;
110}
111#else
112
113#define ENABLE_INTERRUPTS(x) sti
114#define DISABLE_INTERRUPTS(x) cli
115
116#ifdef CONFIG_X86_64
df366e98
JF
117#define SWAPGS swapgs
118/*
119 * Currently paravirt can't handle swapgs nicely when we
120 * don't have a stack we can rely on (such as a user space
121 * stack). So we either find a way around these or just fault
122 * and emulate if a guest tries to call swapgs directly.
123 *
124 * Either way, this is a good way to document that we don't
125 * have a reliable stack. x86_64 only.
126 */
a00394f8 127#define SWAPGS_UNSAFE_STACK swapgs
df366e98
JF
128
129#define PARAVIRT_ADJUST_EXCEPTION_FRAME /* */
130
6abcd98f 131#define INTERRUPT_RETURN iretq
2be29982
JF
132#define USERGS_SYSRET64 \
133 swapgs; \
134 sysretq;
135#define USERGS_SYSRET32 \
136 swapgs; \
137 sysretl
138#define ENABLE_INTERRUPTS_SYSEXIT32 \
139 swapgs; \
140 sti; \
141 sysexit
142
6abcd98f
GOC
143#else
144#define INTERRUPT_RETURN iret
d75cd22f 145#define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
6abcd98f
GOC
146#define GET_CR0_INTO_EAX movl %cr0, %eax
147#endif
148
149
150#endif /* __ASSEMBLY__ */
151#endif /* CONFIG_PARAVIRT */
152
153#ifndef __ASSEMBLY__
df9ee292 154static inline int arch_irqs_disabled_flags(unsigned long flags)
6abcd98f
GOC
155{
156 return !(flags & X86_EFLAGS_IF);
157}
158
df9ee292 159static inline int arch_irqs_disabled(void)
6abcd98f 160{
df9ee292 161 unsigned long flags = arch_local_save_flags();
6abcd98f 162
df9ee292 163 return arch_irqs_disabled_flags(flags);
6abcd98f
GOC
164}
165
96a388de 166#else
6abcd98f
GOC
167
168#ifdef CONFIG_X86_64
6abcd98f
GOC
169#define ARCH_LOCKDEP_SYS_EXIT call lockdep_sys_exit_thunk
170#define ARCH_LOCKDEP_SYS_EXIT_IRQ \
171 TRACE_IRQS_ON; \
172 sti; \
173 SAVE_REST; \
174 LOCKDEP_SYS_EXIT; \
175 RESTORE_REST; \
176 cli; \
177 TRACE_IRQS_OFF;
178
179#else
6abcd98f
GOC
180#define ARCH_LOCKDEP_SYS_EXIT \
181 pushl %eax; \
182 pushl %ecx; \
183 pushl %edx; \
184 call lockdep_sys_exit; \
185 popl %edx; \
186 popl %ecx; \
187 popl %eax;
188
189#define ARCH_LOCKDEP_SYS_EXIT_IRQ
190#endif
191
192#ifdef CONFIG_TRACE_IRQFLAGS
81d68a96
SR
193# define TRACE_IRQS_ON call trace_hardirqs_on_thunk;
194# define TRACE_IRQS_OFF call trace_hardirqs_off_thunk;
6abcd98f
GOC
195#else
196# define TRACE_IRQS_ON
197# define TRACE_IRQS_OFF
198#endif
199#ifdef CONFIG_DEBUG_LOCK_ALLOC
200# define LOCKDEP_SYS_EXIT ARCH_LOCKDEP_SYS_EXIT
201# define LOCKDEP_SYS_EXIT_IRQ ARCH_LOCKDEP_SYS_EXIT_IRQ
202# else
203# define LOCKDEP_SYS_EXIT
204# define LOCKDEP_SYS_EXIT_IRQ
205# endif
206
207#endif /* __ASSEMBLY__ */
96a388de 208#endif
This page took 0.364233 seconds and 5 git commands to generate.