Merge tag 'phy-for-4.6-rc' of git://git.kernel.org/pub/scm/linux/kernel/git/kishon...
[deliverable/linux.git] / arch / arm / kernel / perf_callchain.c
CommitLineData
d39976f0
MR
1/*
2 * ARM callchain support
3 *
4 * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
5 * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
6 *
7 * This code is based on the ARM OProfile backtrace code.
8 */
9#include <linux/perf_event.h>
10#include <linux/uaccess.h>
11
12#include <asm/stacktrace.h>
13
14/*
15 * The registers we're interested in are at the end of the variable
16 * length saved register structure. The fp points at the end of this
17 * structure so the address of this struct is:
18 * (struct frame_tail *)(xxx->fp)-1
19 *
20 * This code has been adapted from the ARM OProfile support.
21 */
22struct frame_tail {
23 struct frame_tail __user *fp;
24 unsigned long sp;
25 unsigned long lr;
26} __attribute__((packed));
27
28/*
29 * Get the return address for a single stackframe and return a pointer to the
30 * next frame tail.
31 */
32static struct frame_tail __user *
33user_backtrace(struct frame_tail __user *tail,
34 struct perf_callchain_entry *entry)
35{
36 struct frame_tail buftail;
37 unsigned long err;
38
39 if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
40 return NULL;
41
42 pagefault_disable();
43 err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
44 pagefault_enable();
45
46 if (err)
47 return NULL;
48
49 perf_callchain_store(entry, buftail.lr);
50
51 /*
52 * Frame pointers should strictly progress back up the stack
53 * (towards higher addresses).
54 */
55 if (tail + 1 >= buftail.fp)
56 return NULL;
57
58 return buftail.fp - 1;
59}
60
61void
62perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
63{
64 struct frame_tail __user *tail;
65
66 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
67 /* We don't support guest os callchain now */
68 return;
69 }
70
71 perf_callchain_store(entry, regs->ARM_pc);
72
73 if (!current->mm)
74 return;
75
76 tail = (struct frame_tail __user *)regs->ARM_fp - 1;
77
78 while ((entry->nr < PERF_MAX_STACK_DEPTH) &&
79 tail && !((unsigned long)tail & 0x3))
80 tail = user_backtrace(tail, entry);
81}
82
83/*
84 * Gets called by walk_stackframe() for every stackframe. This will be called
85 * whist unwinding the stackframe and is like a subroutine return so we use
86 * the PC.
87 */
88static int
89callchain_trace(struct stackframe *fr,
90 void *data)
91{
92 struct perf_callchain_entry *entry = data;
93 perf_callchain_store(entry, fr->pc);
94 return 0;
95}
96
97void
98perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
99{
100 struct stackframe fr;
101
102 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
103 /* We don't support guest os callchain now */
104 return;
105 }
106
107 arm_get_current_stackframe(regs, &fr);
108 walk_stackframe(&fr, callchain_trace, entry);
109}
110
111unsigned long perf_instruction_pointer(struct pt_regs *regs)
112{
113 if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
114 return perf_guest_cbs->get_guest_ip();
115
116 return instruction_pointer(regs);
117}
118
119unsigned long perf_misc_flags(struct pt_regs *regs)
120{
121 int misc = 0;
122
123 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
124 if (perf_guest_cbs->is_user_mode())
125 misc |= PERF_RECORD_MISC_GUEST_USER;
126 else
127 misc |= PERF_RECORD_MISC_GUEST_KERNEL;
128 } else {
129 if (user_mode(regs))
130 misc |= PERF_RECORD_MISC_USER;
131 else
132 misc |= PERF_RECORD_MISC_KERNEL;
133 }
134
135 return misc;
136}
This page took 0.085114 seconds and 5 git commands to generate.