tracing: Avoid soft lockup in trace_pipe
[deliverable/linux.git] / arch / x86 / include / asm / ftrace.h
1 #ifndef _ASM_X86_FTRACE_H
2 #define _ASM_X86_FTRACE_H
3
4 #ifdef __ASSEMBLY__
5
6 .macro MCOUNT_SAVE_FRAME
7 /* taken from glibc */
8 subq $0x38, %rsp
9 movq %rax, (%rsp)
10 movq %rcx, 8(%rsp)
11 movq %rdx, 16(%rsp)
12 movq %rsi, 24(%rsp)
13 movq %rdi, 32(%rsp)
14 movq %r8, 40(%rsp)
15 movq %r9, 48(%rsp)
16 .endm
17
18 .macro MCOUNT_RESTORE_FRAME
19 movq 48(%rsp), %r9
20 movq 40(%rsp), %r8
21 movq 32(%rsp), %rdi
22 movq 24(%rsp), %rsi
23 movq 16(%rsp), %rdx
24 movq 8(%rsp), %rcx
25 movq (%rsp), %rax
26 addq $0x38, %rsp
27 .endm
28
29 #endif
30
31 #ifdef CONFIG_FUNCTION_TRACER
32 #define MCOUNT_ADDR ((long)(mcount))
33 #define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */
34
35 #ifndef __ASSEMBLY__
36 extern void mcount(void);
37
38 static inline unsigned long ftrace_call_adjust(unsigned long addr)
39 {
40 /*
41 * call mcount is "e8 <4 byte offset>"
42 * The addr points to the 4 byte offset and the caller of this
43 * function wants the pointer to e8. Simply subtract one.
44 */
45 return addr - 1;
46 }
47
48 #ifdef CONFIG_DYNAMIC_FTRACE
49
50 struct dyn_arch_ftrace {
51 /* No extra data needed for x86 */
52 };
53
54 #endif /* CONFIG_DYNAMIC_FTRACE */
55 #endif /* __ASSEMBLY__ */
56 #endif /* CONFIG_FUNCTION_TRACER */
57
58 #endif /* _ASM_X86_FTRACE_H */
This page took 0.035388 seconds and 5 git commands to generate.