ftrace: use nops instead of jmp
[deliverable/linux.git] / arch / x86 / kernel / ftrace.c
CommitLineData
3d083395
SR
1/*
2 * Code for replacing ftrace calls with jumps.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 *
6 * Thanks goes to Ingo Molnar, for suggesting the idea.
7 * Mathieu Desnoyers, for suggesting postponing the modifications.
8 * Arjan van de Ven, for keeping me straight, and explaining to me
9 * the dangers of modifying code on the run.
10 */
11
12#include <linux/spinlock.h>
13#include <linux/hardirq.h>
14#include <linux/ftrace.h>
15#include <linux/percpu.h>
16#include <linux/init.h>
17#include <linux/list.h>
18
dfa60aba 19#include <asm/alternative.h>
3d083395 20
dfa60aba 21#define CALL_BACK 5
3d083395 22
dfa60aba
SR
23/* Long is fine, even if it is only 4 bytes ;-) */
24static long *ftrace_nop;
3d083395
SR
25
26struct ftrace_record {
27 struct dyn_ftrace rec;
28 int failed;
29} __attribute__((packed));
30
31struct ftrace_page {
32 struct ftrace_page *next;
33 int index;
34 struct ftrace_record records[];
35} __attribute__((packed));
36
37#define ENTRIES_PER_PAGE \
38 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct ftrace_record))
39
40/* estimate from running different kernels */
41#define NR_TO_INIT 10000
42
43#define MCOUNT_ADDR ((long)(&mcount))
44
45union ftrace_code_union {
46 char code[5];
47 struct {
48 char e8;
49 int offset;
50 } __attribute__((packed));
51};
52
53static struct ftrace_page *ftrace_pages_start;
54static struct ftrace_page *ftrace_pages;
55
56notrace struct dyn_ftrace *ftrace_alloc_shutdown_node(unsigned long ip)
57{
58 struct ftrace_record *rec;
dfa60aba 59 unsigned long save;
3d083395
SR
60
61 ip -= CALL_BACK;
dfa60aba 62 save = *(long *)ip;
3d083395
SR
63
64 /* If this was already converted, skip it */
dfa60aba 65 if (save == *ftrace_nop)
3d083395
SR
66 return NULL;
67
68 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
69 if (!ftrace_pages->next)
70 return NULL;
71 ftrace_pages = ftrace_pages->next;
72 }
73
74 rec = &ftrace_pages->records[ftrace_pages->index++];
75
76 return &rec->rec;
77}
78
79static int notrace
80ftrace_modify_code(unsigned long ip, unsigned char *old_code,
81 unsigned char *new_code)
82{
dfa60aba
SR
83 unsigned replaced;
84 unsigned old = *(unsigned *)old_code; /* 4 bytes */
85 unsigned new = *(unsigned *)new_code; /* 4 bytes */
86 unsigned char newch = new_code[4];
3d083395
SR
87 int faulted = 0;
88
89 /*
90 * Note: Due to modules and __init, code can
91 * disappear and change, we need to protect against faulting
92 * as well as code changing.
93 *
94 * No real locking needed, this code is run through
95 * kstop_machine.
96 */
97 asm volatile (
98 "1: lock\n"
dfa60aba
SR
99 " cmpxchg %3, (%2)\n"
100 " jnz 2f\n"
101 " movb %b4, 4(%2)\n"
3d083395
SR
102 "2:\n"
103 ".section .fixup, \"ax\"\n"
104 " movl $1, %0\n"
105 "3: jmp 2b\n"
106 ".previous\n"
107 _ASM_EXTABLE(1b, 3b)
108 : "=r"(faulted), "=a"(replaced)
dfa60aba
SR
109 : "r"(ip), "r"(new), "r"(newch),
110 "0"(faulted), "a"(old)
3d083395
SR
111 : "memory");
112 sync_core();
113
dfa60aba 114 if (replaced != old && replaced != new)
3d083395
SR
115 faulted = 2;
116
117 return faulted;
118}
119
120static int notrace ftrace_calc_offset(long ip)
121{
122 return (int)(MCOUNT_ADDR - ip);
123}
124
125notrace void ftrace_code_disable(struct dyn_ftrace *rec)
126{
127 unsigned long ip;
128 union ftrace_code_union save;
129 struct ftrace_record *r =
130 container_of(rec, struct ftrace_record, rec);
131
132 ip = rec->ip;
133
134 save.e8 = 0xe8;
135 save.offset = ftrace_calc_offset(ip);
136
137 /* move the IP back to the start of the call */
138 ip -= CALL_BACK;
139
dfa60aba 140 r->failed = ftrace_modify_code(ip, save.code, (char *)ftrace_nop);
3d083395
SR
141}
142
143static void notrace ftrace_replace_code(int saved)
144{
145 unsigned char *new = NULL, *old = NULL;
146 struct ftrace_record *rec;
147 struct ftrace_page *pg;
148 unsigned long ip;
149 int i;
150
151 if (saved)
dfa60aba 152 old = (char *)ftrace_nop;
3d083395 153 else
dfa60aba 154 new = (char *)ftrace_nop;
3d083395
SR
155
156 for (pg = ftrace_pages_start; pg; pg = pg->next) {
157 for (i = 0; i < pg->index; i++) {
158 union ftrace_code_union calc;
159 rec = &pg->records[i];
160
161 /* don't modify code that has already faulted */
162 if (rec->failed)
163 continue;
164
165 ip = rec->rec.ip;
166
167 calc.e8 = 0xe8;
168 calc.offset = ftrace_calc_offset(ip);
169
170 if (saved)
171 new = calc.code;
172 else
173 old = calc.code;
174
175 ip -= CALL_BACK;
176
177 rec->failed = ftrace_modify_code(ip, old, new);
178 }
179 }
180
181}
182
183notrace void ftrace_startup_code(void)
184{
185 ftrace_replace_code(1);
186}
187
188notrace void ftrace_shutdown_code(void)
189{
190 ftrace_replace_code(0);
191}
192
193notrace void ftrace_shutdown_replenish(void)
194{
195 if (ftrace_pages->next)
196 return;
197
198 /* allocate another page */
199 ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
200}
201
dfa60aba 202notrace int __init ftrace_shutdown_arch_init(void)
3d083395 203{
dfa60aba 204 const unsigned char *const *noptable = find_nop_table();
3d083395
SR
205 struct ftrace_page *pg;
206 int cnt;
207 int i;
208
dfa60aba
SR
209 ftrace_nop = (unsigned long *)noptable[CALL_BACK];
210
3d083395
SR
211 /* allocate a few pages */
212 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
213 if (!ftrace_pages_start)
214 return -1;
215
216 /*
217 * Allocate a few more pages.
218 *
219 * TODO: have some parser search vmlinux before
220 * final linking to find all calls to ftrace.
221 * Then we can:
222 * a) know how many pages to allocate.
223 * and/or
224 * b) set up the table then.
225 *
226 * The dynamic code is still necessary for
227 * modules.
228 */
229
230 pg = ftrace_pages = ftrace_pages_start;
231
232 cnt = NR_TO_INIT / ENTRIES_PER_PAGE;
233
234 for (i = 0; i < cnt; i++) {
235 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
236
237 /* If we fail, we'll try later anyway */
238 if (!pg->next)
239 break;
240
241 pg = pg->next;
242 }
243
244 return 0;
245}
This page took 0.035212 seconds and 5 git commands to generate.