Merge branch 'linus' into perf/core, to fix conflicts
[deliverable/linux.git] / arch / x86 / include / asm / ftrace.h
CommitLineData
1965aae3
PA
1#ifndef _ASM_X86_FTRACE_H
2#define _ASM_X86_FTRACE_H
395a59d0 3
d680fe44
CG
4#ifdef __ASSEMBLY__
5
08f6fba5
SR
6 /* skip is set if the stack was already partially adjusted */
7 .macro MCOUNT_SAVE_FRAME skip=0
8 /*
9 * We add enough stack to save all regs.
10 */
11 subq $(SS+8-\skip), %rsp
12 movq %rax, RAX(%rsp)
13 movq %rcx, RCX(%rsp)
14 movq %rdx, RDX(%rsp)
15 movq %rsi, RSI(%rsp)
16 movq %rdi, RDI(%rsp)
17 movq %r8, R8(%rsp)
18 movq %r9, R9(%rsp)
19 /* Move RIP to its proper location */
20 movq SS+8(%rsp), %rdx
21 movq %rdx, RIP(%rsp)
d680fe44
CG
22 .endm
23
08f6fba5
SR
24 .macro MCOUNT_RESTORE_FRAME skip=0
25 movq R9(%rsp), %r9
26 movq R8(%rsp), %r8
27 movq RDI(%rsp), %rdi
28 movq RSI(%rsp), %rsi
29 movq RDX(%rsp), %rdx
30 movq RCX(%rsp), %rcx
31 movq RAX(%rsp), %rax
32 addq $(SS+8-\skip), %rsp
d680fe44
CG
33 .endm
34
35#endif
36
606576ce 37#ifdef CONFIG_FUNCTION_TRACER
d57c5d51
SR
38#ifdef CC_USING_FENTRY
39# define MCOUNT_ADDR ((long)(__fentry__))
40#else
41# define MCOUNT_ADDR ((long)(mcount))
42#endif
395a59d0
AS
43#define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */
44
28fb5dfa 45#ifdef CONFIG_DYNAMIC_FTRACE
2f5f6ad9 46#define ARCH_SUPPORTS_FTRACE_OPS 1
08f6fba5 47#endif
2f5f6ad9 48
395a59d0
AS
49#ifndef __ASSEMBLY__
50extern void mcount(void);
a192cd04 51extern atomic_t modifying_ftrace_code;
d57c5d51 52extern void __fentry__(void);
68bf21aa
SR
53
54static inline unsigned long ftrace_call_adjust(unsigned long addr)
55{
56 /*
521ccb5c
MS
57 * addr is the address of the mcount call instruction.
58 * recordmcount does the necessary offset calculation.
68bf21aa 59 */
521ccb5c 60 return addr;
68bf21aa 61}
31e88909
SR
62
63#ifdef CONFIG_DYNAMIC_FTRACE
64
65struct dyn_arch_ftrace {
66 /* No extra data needed for x86 */
67};
68
08d636b6
SR
69int ftrace_int3_handler(struct pt_regs *regs);
70
1026ff9b
SRRH
71#define FTRACE_GRAPH_TRAMP_ADDR FTRACE_GRAPH_ADDR
72
31e88909 73#endif /* CONFIG_DYNAMIC_FTRACE */
a26a2a27 74#endif /* __ASSEMBLY__ */
606576ce 75#endif /* CONFIG_FUNCTION_TRACER */
395a59d0 76
f431b634
SR
77
78#if !defined(__ASSEMBLY__) && !defined(COMPILE_OFFSETS)
79
80#if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_IA32_EMULATION)
81#include <asm/compat.h>
82
83/*
84 * Because ia32 syscalls do not map to x86_64 syscall numbers
85 * this screws up the trace output when tracing a ia32 task.
86 * Instead of reporting bogus syscalls, just do not trace them.
87 *
88 * If the user realy wants these, then they should use the
89 * raw syscall tracepoints with filtering.
90 */
91#define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS 1
92static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs)
93{
94 if (is_compat_task())
95 return true;
96 return false;
97}
98#endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_IA32_EMULATION */
99#endif /* !__ASSEMBLY__ && !COMPILE_OFFSETS */
100
1965aae3 101#endif /* _ASM_X86_FTRACE_H */
This page took 0.344648 seconds and 5 git commands to generate.