ftrace: add traceon traceoff commands to enable/disable the buffers
[deliverable/linux.git] / kernel / trace / ftrace.c
CommitLineData
16444a8a
ACM
1/*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code in the latency_tracer, that is:
11 *
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
14 */
15
3d083395
SR
16#include <linux/stop_machine.h>
17#include <linux/clocksource.h>
18#include <linux/kallsyms.h>
5072c59f 19#include <linux/seq_file.h>
4a2b8dda 20#include <linux/suspend.h>
5072c59f 21#include <linux/debugfs.h>
3d083395 22#include <linux/hardirq.h>
2d8b820b 23#include <linux/kthread.h>
5072c59f 24#include <linux/uaccess.h>
f22f9a89 25#include <linux/kprobes.h>
2d8b820b 26#include <linux/ftrace.h>
b0fc494f 27#include <linux/sysctl.h>
5072c59f 28#include <linux/ctype.h>
3d083395 29#include <linux/list.h>
59df055f 30#include <linux/hash.h>
3d083395 31
395a59d0
AS
32#include <asm/ftrace.h>
33
3d083395 34#include "trace.h"
16444a8a 35
6912896e
SR
36#define FTRACE_WARN_ON(cond) \
37 do { \
38 if (WARN_ON(cond)) \
39 ftrace_kill(); \
40 } while (0)
41
42#define FTRACE_WARN_ON_ONCE(cond) \
43 do { \
44 if (WARN_ON_ONCE(cond)) \
45 ftrace_kill(); \
46 } while (0)
47
4eebcc81
SR
48/* ftrace_enabled is a method to turn ftrace on or off */
49int ftrace_enabled __read_mostly;
d61f82d0 50static int last_ftrace_enabled;
b0fc494f 51
0ef8cde5 52/* set when tracing only a pid */
978f3a45 53struct pid *ftrace_pid_trace;
21bbecda 54static struct pid * const ftrace_swapper_pid = &init_struct_pid;
df4fc315 55
60a7ecf4
SR
56/* Quick disabling of function tracer. */
57int function_trace_stop;
58
4eebcc81
SR
59/*
60 * ftrace_disabled is set when an anomaly is discovered.
61 * ftrace_disabled is much stronger than ftrace_enabled.
62 */
63static int ftrace_disabled __read_mostly;
64
52baf119 65static DEFINE_MUTEX(ftrace_lock);
b0fc494f 66
16444a8a
ACM
67static struct ftrace_ops ftrace_list_end __read_mostly =
68{
69 .func = ftrace_stub,
70};
71
72static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
73ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
60a7ecf4 74ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
df4fc315 75ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
16444a8a 76
f2252935 77static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
16444a8a
ACM
78{
79 struct ftrace_ops *op = ftrace_list;
80
81 /* in case someone actually ports this to alpha! */
82 read_barrier_depends();
83
84 while (op != &ftrace_list_end) {
85 /* silly alpha */
86 read_barrier_depends();
87 op->func(ip, parent_ip);
88 op = op->next;
89 };
90}
91
df4fc315
SR
92static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
93{
0ef8cde5 94 if (!test_tsk_trace_trace(current))
df4fc315
SR
95 return;
96
97 ftrace_pid_function(ip, parent_ip);
98}
99
100static void set_ftrace_pid_function(ftrace_func_t func)
101{
102 /* do not set ftrace_pid_function to itself! */
103 if (func != ftrace_pid_func)
104 ftrace_pid_function = func;
105}
106
16444a8a 107/**
3d083395 108 * clear_ftrace_function - reset the ftrace function
16444a8a 109 *
3d083395
SR
110 * This NULLs the ftrace function and in essence stops
111 * tracing. There may be lag
16444a8a 112 */
3d083395 113void clear_ftrace_function(void)
16444a8a 114{
3d083395 115 ftrace_trace_function = ftrace_stub;
60a7ecf4 116 __ftrace_trace_function = ftrace_stub;
df4fc315 117 ftrace_pid_function = ftrace_stub;
3d083395
SR
118}
119
60a7ecf4
SR
120#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
121/*
122 * For those archs that do not test ftrace_trace_stop in their
123 * mcount call site, we need to do it from C.
124 */
125static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
126{
127 if (function_trace_stop)
128 return;
129
130 __ftrace_trace_function(ip, parent_ip);
131}
132#endif
133
e309b41d 134static int __register_ftrace_function(struct ftrace_ops *ops)
3d083395 135{
16444a8a
ACM
136 ops->next = ftrace_list;
137 /*
138 * We are entering ops into the ftrace_list but another
139 * CPU might be walking that list. We need to make sure
140 * the ops->next pointer is valid before another CPU sees
141 * the ops pointer included into the ftrace_list.
142 */
143 smp_wmb();
144 ftrace_list = ops;
3d083395 145
b0fc494f 146 if (ftrace_enabled) {
df4fc315
SR
147 ftrace_func_t func;
148
149 if (ops->next == &ftrace_list_end)
150 func = ops->func;
151 else
152 func = ftrace_list_func;
153
978f3a45 154 if (ftrace_pid_trace) {
df4fc315
SR
155 set_ftrace_pid_function(func);
156 func = ftrace_pid_func;
157 }
158
b0fc494f
SR
159 /*
160 * For one func, simply call it directly.
161 * For more than one func, call the chain.
162 */
60a7ecf4 163#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
df4fc315 164 ftrace_trace_function = func;
60a7ecf4 165#else
df4fc315 166 __ftrace_trace_function = func;
60a7ecf4
SR
167 ftrace_trace_function = ftrace_test_stop_func;
168#endif
b0fc494f 169 }
3d083395 170
16444a8a
ACM
171 return 0;
172}
173
e309b41d 174static int __unregister_ftrace_function(struct ftrace_ops *ops)
16444a8a 175{
16444a8a 176 struct ftrace_ops **p;
16444a8a
ACM
177
178 /*
3d083395
SR
179 * If we are removing the last function, then simply point
180 * to the ftrace_stub.
16444a8a
ACM
181 */
182 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
183 ftrace_trace_function = ftrace_stub;
184 ftrace_list = &ftrace_list_end;
e6ea44e9 185 return 0;
16444a8a
ACM
186 }
187
188 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
189 if (*p == ops)
190 break;
191
e6ea44e9
SR
192 if (*p != ops)
193 return -1;
16444a8a
ACM
194
195 *p = (*p)->next;
196
b0fc494f
SR
197 if (ftrace_enabled) {
198 /* If we only have one func left, then call that directly */
df4fc315
SR
199 if (ftrace_list->next == &ftrace_list_end) {
200 ftrace_func_t func = ftrace_list->func;
201
978f3a45 202 if (ftrace_pid_trace) {
df4fc315
SR
203 set_ftrace_pid_function(func);
204 func = ftrace_pid_func;
205 }
206#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
207 ftrace_trace_function = func;
208#else
209 __ftrace_trace_function = func;
210#endif
211 }
b0fc494f 212 }
16444a8a 213
e6ea44e9 214 return 0;
3d083395
SR
215}
216
df4fc315
SR
217static void ftrace_update_pid_func(void)
218{
219 ftrace_func_t func;
220
52baf119 221 mutex_lock(&ftrace_lock);
df4fc315
SR
222
223 if (ftrace_trace_function == ftrace_stub)
224 goto out;
225
226 func = ftrace_trace_function;
227
978f3a45 228 if (ftrace_pid_trace) {
df4fc315
SR
229 set_ftrace_pid_function(func);
230 func = ftrace_pid_func;
231 } else {
66eafebc
LW
232 if (func == ftrace_pid_func)
233 func = ftrace_pid_function;
df4fc315
SR
234 }
235
236#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
237 ftrace_trace_function = func;
238#else
239 __ftrace_trace_function = func;
240#endif
241
242 out:
52baf119 243 mutex_unlock(&ftrace_lock);
df4fc315
SR
244}
245
3d083395 246#ifdef CONFIG_DYNAMIC_FTRACE
99ecdc43 247#ifndef CONFIG_FTRACE_MCOUNT_RECORD
cb7be3b2 248# error Dynamic ftrace depends on MCOUNT_RECORD
99ecdc43
SR
249#endif
250
d61f82d0
SR
251enum {
252 FTRACE_ENABLE_CALLS = (1 << 0),
253 FTRACE_DISABLE_CALLS = (1 << 1),
254 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
255 FTRACE_ENABLE_MCOUNT = (1 << 3),
256 FTRACE_DISABLE_MCOUNT = (1 << 4),
5a45cfe1
SR
257 FTRACE_START_FUNC_RET = (1 << 5),
258 FTRACE_STOP_FUNC_RET = (1 << 6),
d61f82d0
SR
259};
260
5072c59f
SR
261static int ftrace_filtered;
262
08f5ac90 263static LIST_HEAD(ftrace_new_addrs);
3d083395 264
41c52c0d 265static DEFINE_MUTEX(ftrace_regex_lock);
3d083395 266
3c1720f0
SR
267struct ftrace_page {
268 struct ftrace_page *next;
431aa3fb 269 int index;
3c1720f0 270 struct dyn_ftrace records[];
aa5e5cea 271};
3c1720f0
SR
272
273#define ENTRIES_PER_PAGE \
274 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
275
276/* estimate from running different kernels */
277#define NR_TO_INIT 10000
278
279static struct ftrace_page *ftrace_pages_start;
280static struct ftrace_page *ftrace_pages;
281
37ad5084
SR
282static struct dyn_ftrace *ftrace_free_records;
283
265c831c
SR
284/*
285 * This is a double for. Do not use 'break' to break out of the loop,
286 * you must use a goto.
287 */
288#define do_for_each_ftrace_rec(pg, rec) \
289 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
290 int _____i; \
291 for (_____i = 0; _____i < pg->index; _____i++) { \
292 rec = &pg->records[_____i];
293
294#define while_for_each_ftrace_rec() \
295 } \
296 }
ecea656d
AS
297
298#ifdef CONFIG_KPROBES
f17845e5
IM
299
300static int frozen_record_count;
301
ecea656d
AS
302static inline void freeze_record(struct dyn_ftrace *rec)
303{
304 if (!(rec->flags & FTRACE_FL_FROZEN)) {
305 rec->flags |= FTRACE_FL_FROZEN;
306 frozen_record_count++;
307 }
308}
309
310static inline void unfreeze_record(struct dyn_ftrace *rec)
311{
312 if (rec->flags & FTRACE_FL_FROZEN) {
313 rec->flags &= ~FTRACE_FL_FROZEN;
314 frozen_record_count--;
315 }
316}
317
318static inline int record_frozen(struct dyn_ftrace *rec)
319{
320 return rec->flags & FTRACE_FL_FROZEN;
321}
322#else
323# define freeze_record(rec) ({ 0; })
324# define unfreeze_record(rec) ({ 0; })
325# define record_frozen(rec) ({ 0; })
326#endif /* CONFIG_KPROBES */
327
e309b41d 328static void ftrace_free_rec(struct dyn_ftrace *rec)
37ad5084 329{
37ad5084
SR
330 rec->ip = (unsigned long)ftrace_free_records;
331 ftrace_free_records = rec;
332 rec->flags |= FTRACE_FL_FREE;
333}
334
fed1939c
SR
335void ftrace_release(void *start, unsigned long size)
336{
337 struct dyn_ftrace *rec;
338 struct ftrace_page *pg;
339 unsigned long s = (unsigned long)start;
340 unsigned long e = s + size;
fed1939c 341
00fd61ae 342 if (ftrace_disabled || !start)
fed1939c
SR
343 return;
344
52baf119 345 mutex_lock(&ftrace_lock);
265c831c
SR
346 do_for_each_ftrace_rec(pg, rec) {
347 if ((rec->ip >= s) && (rec->ip < e))
348 ftrace_free_rec(rec);
349 } while_for_each_ftrace_rec();
52baf119 350 mutex_unlock(&ftrace_lock);
fed1939c
SR
351}
352
e309b41d 353static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
3c1720f0 354{
37ad5084
SR
355 struct dyn_ftrace *rec;
356
357 /* First check for freed records */
358 if (ftrace_free_records) {
359 rec = ftrace_free_records;
360
37ad5084 361 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
6912896e 362 FTRACE_WARN_ON_ONCE(1);
37ad5084
SR
363 ftrace_free_records = NULL;
364 return NULL;
365 }
366
367 ftrace_free_records = (void *)rec->ip;
368 memset(rec, 0, sizeof(*rec));
369 return rec;
370 }
371
3c1720f0 372 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
08f5ac90
SR
373 if (!ftrace_pages->next) {
374 /* allocate another page */
375 ftrace_pages->next =
376 (void *)get_zeroed_page(GFP_KERNEL);
377 if (!ftrace_pages->next)
378 return NULL;
379 }
3c1720f0
SR
380 ftrace_pages = ftrace_pages->next;
381 }
382
383 return &ftrace_pages->records[ftrace_pages->index++];
384}
385
08f5ac90 386static struct dyn_ftrace *
d61f82d0 387ftrace_record_ip(unsigned long ip)
3d083395 388{
08f5ac90 389 struct dyn_ftrace *rec;
3d083395 390
f3c7ac40 391 if (ftrace_disabled)
08f5ac90 392 return NULL;
3d083395 393
08f5ac90
SR
394 rec = ftrace_alloc_dyn_node(ip);
395 if (!rec)
396 return NULL;
3d083395 397
08f5ac90 398 rec->ip = ip;
3d083395 399
08f5ac90 400 list_add(&rec->list, &ftrace_new_addrs);
3d083395 401
08f5ac90 402 return rec;
3d083395
SR
403}
404
b17e8a37
SR
405static void print_ip_ins(const char *fmt, unsigned char *p)
406{
407 int i;
408
409 printk(KERN_CONT "%s", fmt);
410
411 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
412 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
413}
414
31e88909 415static void ftrace_bug(int failed, unsigned long ip)
b17e8a37
SR
416{
417 switch (failed) {
418 case -EFAULT:
419 FTRACE_WARN_ON_ONCE(1);
420 pr_info("ftrace faulted on modifying ");
421 print_ip_sym(ip);
422 break;
423 case -EINVAL:
424 FTRACE_WARN_ON_ONCE(1);
425 pr_info("ftrace failed to modify ");
426 print_ip_sym(ip);
b17e8a37 427 print_ip_ins(" actual: ", (unsigned char *)ip);
b17e8a37
SR
428 printk(KERN_CONT "\n");
429 break;
430 case -EPERM:
431 FTRACE_WARN_ON_ONCE(1);
432 pr_info("ftrace faulted on writing ");
433 print_ip_sym(ip);
434 break;
435 default:
436 FTRACE_WARN_ON_ONCE(1);
437 pr_info("ftrace faulted on unknown error ");
438 print_ip_sym(ip);
439 }
440}
441
3c1720f0 442
0eb96701 443static int
31e88909 444__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
5072c59f 445{
41c52c0d 446 unsigned long ip, fl;
e7d3737e
FW
447 unsigned long ftrace_addr;
448
f0001207 449 ftrace_addr = (unsigned long)FTRACE_ADDR;
5072c59f
SR
450
451 ip = rec->ip;
452
982c350b
SR
453 /*
454 * If this record is not to be traced and
455 * it is not enabled then do nothing.
456 *
457 * If this record is not to be traced and
57794a9d 458 * it is enabled then disable it.
982c350b
SR
459 *
460 */
461 if (rec->flags & FTRACE_FL_NOTRACE) {
462 if (rec->flags & FTRACE_FL_ENABLED)
463 rec->flags &= ~FTRACE_FL_ENABLED;
464 else
465 return 0;
466
467 } else if (ftrace_filtered && enable) {
5072c59f 468 /*
982c350b 469 * Filtering is on:
5072c59f 470 */
a4500b84 471
982c350b 472 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
5072c59f 473
982c350b
SR
474 /* Record is filtered and enabled, do nothing */
475 if (fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED))
0eb96701 476 return 0;
5072c59f 477
57794a9d 478 /* Record is not filtered or enabled, do nothing */
982c350b
SR
479 if (!fl)
480 return 0;
481
482 /* Record is not filtered but enabled, disable it */
483 if (fl == FTRACE_FL_ENABLED)
5072c59f 484 rec->flags &= ~FTRACE_FL_ENABLED;
982c350b
SR
485 else
486 /* Otherwise record is filtered but not enabled, enable it */
5072c59f 487 rec->flags |= FTRACE_FL_ENABLED;
5072c59f 488 } else {
982c350b 489 /* Disable or not filtered */
5072c59f 490
41c52c0d 491 if (enable) {
982c350b 492 /* if record is enabled, do nothing */
5072c59f 493 if (rec->flags & FTRACE_FL_ENABLED)
0eb96701 494 return 0;
982c350b 495
5072c59f 496 rec->flags |= FTRACE_FL_ENABLED;
982c350b 497
5072c59f 498 } else {
982c350b 499
57794a9d 500 /* if record is not enabled, do nothing */
5072c59f 501 if (!(rec->flags & FTRACE_FL_ENABLED))
0eb96701 502 return 0;
982c350b 503
5072c59f
SR
504 rec->flags &= ~FTRACE_FL_ENABLED;
505 }
506 }
507
982c350b 508 if (rec->flags & FTRACE_FL_ENABLED)
e7d3737e 509 return ftrace_make_call(rec, ftrace_addr);
31e88909 510 else
e7d3737e 511 return ftrace_make_nop(NULL, rec, ftrace_addr);
5072c59f
SR
512}
513
e309b41d 514static void ftrace_replace_code(int enable)
3c1720f0 515{
265c831c 516 int failed;
3c1720f0
SR
517 struct dyn_ftrace *rec;
518 struct ftrace_page *pg;
3c1720f0 519
265c831c
SR
520 do_for_each_ftrace_rec(pg, rec) {
521 /*
522 * Skip over free records and records that have
523 * failed.
524 */
525 if (rec->flags & FTRACE_FL_FREE ||
526 rec->flags & FTRACE_FL_FAILED)
527 continue;
528
529 /* ignore updates to this record's mcount site */
530 if (get_kprobe((void *)rec->ip)) {
531 freeze_record(rec);
532 continue;
533 } else {
534 unfreeze_record(rec);
535 }
f22f9a89 536
265c831c
SR
537 failed = __ftrace_replace_code(rec, enable);
538 if (failed && (rec->flags & FTRACE_FL_CONVERTED)) {
539 rec->flags |= FTRACE_FL_FAILED;
540 if ((system_state == SYSTEM_BOOTING) ||
541 !core_kernel_text(rec->ip)) {
542 ftrace_free_rec(rec);
543 } else
544 ftrace_bug(failed, rec->ip);
3c1720f0 545 }
265c831c 546 } while_for_each_ftrace_rec();
3c1720f0
SR
547}
548
492a7ea5 549static int
31e88909 550ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
3c1720f0
SR
551{
552 unsigned long ip;
593eb8a2 553 int ret;
3c1720f0
SR
554
555 ip = rec->ip;
556
25aac9dc 557 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
593eb8a2 558 if (ret) {
31e88909 559 ftrace_bug(ret, ip);
3c1720f0 560 rec->flags |= FTRACE_FL_FAILED;
492a7ea5 561 return 0;
37ad5084 562 }
492a7ea5 563 return 1;
3c1720f0
SR
564}
565
e309b41d 566static int __ftrace_modify_code(void *data)
3d083395 567{
d61f82d0
SR
568 int *command = data;
569
a3583244 570 if (*command & FTRACE_ENABLE_CALLS)
d61f82d0 571 ftrace_replace_code(1);
a3583244 572 else if (*command & FTRACE_DISABLE_CALLS)
d61f82d0
SR
573 ftrace_replace_code(0);
574
575 if (*command & FTRACE_UPDATE_TRACE_FUNC)
576 ftrace_update_ftrace_func(ftrace_trace_function);
577
5a45cfe1
SR
578 if (*command & FTRACE_START_FUNC_RET)
579 ftrace_enable_ftrace_graph_caller();
580 else if (*command & FTRACE_STOP_FUNC_RET)
581 ftrace_disable_ftrace_graph_caller();
582
d61f82d0 583 return 0;
3d083395
SR
584}
585
e309b41d 586static void ftrace_run_update_code(int command)
3d083395 587{
784e2d76 588 stop_machine(__ftrace_modify_code, &command, NULL);
3d083395
SR
589}
590
d61f82d0 591static ftrace_func_t saved_ftrace_func;
60a7ecf4 592static int ftrace_start_up;
df4fc315
SR
593
594static void ftrace_startup_enable(int command)
595{
596 if (saved_ftrace_func != ftrace_trace_function) {
597 saved_ftrace_func = ftrace_trace_function;
598 command |= FTRACE_UPDATE_TRACE_FUNC;
599 }
600
601 if (!command || !ftrace_enabled)
602 return;
603
604 ftrace_run_update_code(command);
605}
d61f82d0 606
5a45cfe1 607static void ftrace_startup(int command)
3d083395 608{
4eebcc81
SR
609 if (unlikely(ftrace_disabled))
610 return;
611
60a7ecf4 612 ftrace_start_up++;
982c350b 613 command |= FTRACE_ENABLE_CALLS;
d61f82d0 614
df4fc315 615 ftrace_startup_enable(command);
3d083395
SR
616}
617
5a45cfe1 618static void ftrace_shutdown(int command)
3d083395 619{
4eebcc81
SR
620 if (unlikely(ftrace_disabled))
621 return;
622
60a7ecf4
SR
623 ftrace_start_up--;
624 if (!ftrace_start_up)
d61f82d0 625 command |= FTRACE_DISABLE_CALLS;
3d083395 626
d61f82d0
SR
627 if (saved_ftrace_func != ftrace_trace_function) {
628 saved_ftrace_func = ftrace_trace_function;
629 command |= FTRACE_UPDATE_TRACE_FUNC;
630 }
3d083395 631
d61f82d0 632 if (!command || !ftrace_enabled)
e6ea44e9 633 return;
d61f82d0
SR
634
635 ftrace_run_update_code(command);
3d083395
SR
636}
637
e309b41d 638static void ftrace_startup_sysctl(void)
b0fc494f 639{
d61f82d0
SR
640 int command = FTRACE_ENABLE_MCOUNT;
641
4eebcc81
SR
642 if (unlikely(ftrace_disabled))
643 return;
644
d61f82d0
SR
645 /* Force update next time */
646 saved_ftrace_func = NULL;
60a7ecf4
SR
647 /* ftrace_start_up is true if we want ftrace running */
648 if (ftrace_start_up)
d61f82d0
SR
649 command |= FTRACE_ENABLE_CALLS;
650
651 ftrace_run_update_code(command);
b0fc494f
SR
652}
653
e309b41d 654static void ftrace_shutdown_sysctl(void)
b0fc494f 655{
d61f82d0
SR
656 int command = FTRACE_DISABLE_MCOUNT;
657
4eebcc81
SR
658 if (unlikely(ftrace_disabled))
659 return;
660
60a7ecf4
SR
661 /* ftrace_start_up is true if ftrace is running */
662 if (ftrace_start_up)
d61f82d0
SR
663 command |= FTRACE_DISABLE_CALLS;
664
665 ftrace_run_update_code(command);
b0fc494f
SR
666}
667
3d083395
SR
668static cycle_t ftrace_update_time;
669static unsigned long ftrace_update_cnt;
670unsigned long ftrace_update_tot_cnt;
671
31e88909 672static int ftrace_update_code(struct module *mod)
3d083395 673{
08f5ac90 674 struct dyn_ftrace *p, *t;
f22f9a89 675 cycle_t start, stop;
3d083395 676
750ed1a4 677 start = ftrace_now(raw_smp_processor_id());
3d083395
SR
678 ftrace_update_cnt = 0;
679
08f5ac90 680 list_for_each_entry_safe(p, t, &ftrace_new_addrs, list) {
3d083395 681
08f5ac90
SR
682 /* If something went wrong, bail without enabling anything */
683 if (unlikely(ftrace_disabled))
684 return -1;
f22f9a89 685
08f5ac90 686 list_del_init(&p->list);
f22f9a89 687
08f5ac90 688 /* convert record (i.e, patch mcount-call with NOP) */
31e88909 689 if (ftrace_code_disable(mod, p)) {
08f5ac90
SR
690 p->flags |= FTRACE_FL_CONVERTED;
691 ftrace_update_cnt++;
692 } else
693 ftrace_free_rec(p);
3d083395
SR
694 }
695
750ed1a4 696 stop = ftrace_now(raw_smp_processor_id());
3d083395
SR
697 ftrace_update_time = stop - start;
698 ftrace_update_tot_cnt += ftrace_update_cnt;
699
16444a8a
ACM
700 return 0;
701}
702
68bf21aa 703static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
3c1720f0
SR
704{
705 struct ftrace_page *pg;
706 int cnt;
707 int i;
3c1720f0
SR
708
709 /* allocate a few pages */
710 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
711 if (!ftrace_pages_start)
712 return -1;
713
714 /*
715 * Allocate a few more pages.
716 *
717 * TODO: have some parser search vmlinux before
718 * final linking to find all calls to ftrace.
719 * Then we can:
720 * a) know how many pages to allocate.
721 * and/or
722 * b) set up the table then.
723 *
724 * The dynamic code is still necessary for
725 * modules.
726 */
727
728 pg = ftrace_pages = ftrace_pages_start;
729
68bf21aa 730 cnt = num_to_init / ENTRIES_PER_PAGE;
08f5ac90 731 pr_info("ftrace: allocating %ld entries in %d pages\n",
5821e1b7 732 num_to_init, cnt + 1);
3c1720f0
SR
733
734 for (i = 0; i < cnt; i++) {
735 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
736
737 /* If we fail, we'll try later anyway */
738 if (!pg->next)
739 break;
740
741 pg = pg->next;
742 }
743
744 return 0;
745}
746
5072c59f
SR
747enum {
748 FTRACE_ITER_FILTER = (1 << 0),
749 FTRACE_ITER_CONT = (1 << 1),
41c52c0d 750 FTRACE_ITER_NOTRACE = (1 << 2),
eb9a7bf0 751 FTRACE_ITER_FAILURES = (1 << 3),
0c75a3ed 752 FTRACE_ITER_PRINTALL = (1 << 4),
5072c59f
SR
753};
754
755#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
756
757struct ftrace_iterator {
5072c59f 758 struct ftrace_page *pg;
431aa3fb 759 int idx;
5072c59f
SR
760 unsigned flags;
761 unsigned char buffer[FTRACE_BUFF_MAX+1];
762 unsigned buffer_idx;
763 unsigned filtered;
764};
765
e309b41d 766static void *
5072c59f
SR
767t_next(struct seq_file *m, void *v, loff_t *pos)
768{
769 struct ftrace_iterator *iter = m->private;
770 struct dyn_ftrace *rec = NULL;
771
772 (*pos)++;
773
0c75a3ed
SR
774 if (iter->flags & FTRACE_ITER_PRINTALL)
775 return NULL;
776
52baf119 777 mutex_lock(&ftrace_lock);
5072c59f
SR
778 retry:
779 if (iter->idx >= iter->pg->index) {
780 if (iter->pg->next) {
781 iter->pg = iter->pg->next;
782 iter->idx = 0;
783 goto retry;
50cdaf08
LW
784 } else {
785 iter->idx = -1;
5072c59f
SR
786 }
787 } else {
788 rec = &iter->pg->records[iter->idx++];
a9fdda33
SR
789 if ((rec->flags & FTRACE_FL_FREE) ||
790
791 (!(iter->flags & FTRACE_ITER_FAILURES) &&
eb9a7bf0
AS
792 (rec->flags & FTRACE_FL_FAILED)) ||
793
794 ((iter->flags & FTRACE_ITER_FAILURES) &&
a9fdda33 795 !(rec->flags & FTRACE_FL_FAILED)) ||
eb9a7bf0 796
0183fb1c
SR
797 ((iter->flags & FTRACE_ITER_FILTER) &&
798 !(rec->flags & FTRACE_FL_FILTER)) ||
799
41c52c0d
SR
800 ((iter->flags & FTRACE_ITER_NOTRACE) &&
801 !(rec->flags & FTRACE_FL_NOTRACE))) {
5072c59f
SR
802 rec = NULL;
803 goto retry;
804 }
805 }
52baf119 806 mutex_unlock(&ftrace_lock);
5072c59f 807
5072c59f
SR
808 return rec;
809}
810
811static void *t_start(struct seq_file *m, loff_t *pos)
812{
813 struct ftrace_iterator *iter = m->private;
814 void *p = NULL;
5072c59f 815
0c75a3ed
SR
816 /*
817 * For set_ftrace_filter reading, if we have the filter
818 * off, we can short cut and just print out that all
819 * functions are enabled.
820 */
821 if (iter->flags & FTRACE_ITER_FILTER && !ftrace_filtered) {
822 if (*pos > 0)
823 return NULL;
824 iter->flags |= FTRACE_ITER_PRINTALL;
825 (*pos)++;
826 return iter;
827 }
828
50cdaf08
LW
829 if (*pos > 0) {
830 if (iter->idx < 0)
831 return p;
832 (*pos)--;
833 iter->idx--;
834 }
5821e1b7 835
50cdaf08 836 p = t_next(m, p, pos);
5072c59f
SR
837
838 return p;
839}
840
841static void t_stop(struct seq_file *m, void *p)
842{
843}
844
845static int t_show(struct seq_file *m, void *v)
846{
0c75a3ed 847 struct ftrace_iterator *iter = m->private;
5072c59f
SR
848 struct dyn_ftrace *rec = v;
849 char str[KSYM_SYMBOL_LEN];
850
0c75a3ed
SR
851 if (iter->flags & FTRACE_ITER_PRINTALL) {
852 seq_printf(m, "#### all functions enabled ####\n");
853 return 0;
854 }
855
5072c59f
SR
856 if (!rec)
857 return 0;
858
859 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
860
50cdaf08 861 seq_printf(m, "%s\n", str);
5072c59f
SR
862
863 return 0;
864}
865
866static struct seq_operations show_ftrace_seq_ops = {
867 .start = t_start,
868 .next = t_next,
869 .stop = t_stop,
870 .show = t_show,
871};
872
e309b41d 873static int
5072c59f
SR
874ftrace_avail_open(struct inode *inode, struct file *file)
875{
876 struct ftrace_iterator *iter;
877 int ret;
878
4eebcc81
SR
879 if (unlikely(ftrace_disabled))
880 return -ENODEV;
881
5072c59f
SR
882 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
883 if (!iter)
884 return -ENOMEM;
885
886 iter->pg = ftrace_pages_start;
5072c59f
SR
887
888 ret = seq_open(file, &show_ftrace_seq_ops);
889 if (!ret) {
890 struct seq_file *m = file->private_data;
4bf39a94 891
5072c59f 892 m->private = iter;
4bf39a94 893 } else {
5072c59f 894 kfree(iter);
4bf39a94 895 }
5072c59f
SR
896
897 return ret;
898}
899
900int ftrace_avail_release(struct inode *inode, struct file *file)
901{
902 struct seq_file *m = (struct seq_file *)file->private_data;
903 struct ftrace_iterator *iter = m->private;
904
905 seq_release(inode, file);
906 kfree(iter);
4bf39a94 907
5072c59f
SR
908 return 0;
909}
910
eb9a7bf0
AS
911static int
912ftrace_failures_open(struct inode *inode, struct file *file)
913{
914 int ret;
915 struct seq_file *m;
916 struct ftrace_iterator *iter;
917
918 ret = ftrace_avail_open(inode, file);
919 if (!ret) {
920 m = (struct seq_file *)file->private_data;
921 iter = (struct ftrace_iterator *)m->private;
922 iter->flags = FTRACE_ITER_FAILURES;
923 }
924
925 return ret;
926}
927
928
41c52c0d 929static void ftrace_filter_reset(int enable)
5072c59f
SR
930{
931 struct ftrace_page *pg;
932 struct dyn_ftrace *rec;
41c52c0d 933 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
5072c59f 934
52baf119 935 mutex_lock(&ftrace_lock);
41c52c0d
SR
936 if (enable)
937 ftrace_filtered = 0;
265c831c
SR
938 do_for_each_ftrace_rec(pg, rec) {
939 if (rec->flags & FTRACE_FL_FAILED)
940 continue;
941 rec->flags &= ~type;
942 } while_for_each_ftrace_rec();
52baf119 943 mutex_unlock(&ftrace_lock);
5072c59f
SR
944}
945
e309b41d 946static int
41c52c0d 947ftrace_regex_open(struct inode *inode, struct file *file, int enable)
5072c59f
SR
948{
949 struct ftrace_iterator *iter;
950 int ret = 0;
951
4eebcc81
SR
952 if (unlikely(ftrace_disabled))
953 return -ENODEV;
954
5072c59f
SR
955 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
956 if (!iter)
957 return -ENOMEM;
958
41c52c0d 959 mutex_lock(&ftrace_regex_lock);
5072c59f
SR
960 if ((file->f_mode & FMODE_WRITE) &&
961 !(file->f_flags & O_APPEND))
41c52c0d 962 ftrace_filter_reset(enable);
5072c59f
SR
963
964 if (file->f_mode & FMODE_READ) {
965 iter->pg = ftrace_pages_start;
41c52c0d
SR
966 iter->flags = enable ? FTRACE_ITER_FILTER :
967 FTRACE_ITER_NOTRACE;
5072c59f
SR
968
969 ret = seq_open(file, &show_ftrace_seq_ops);
970 if (!ret) {
971 struct seq_file *m = file->private_data;
972 m->private = iter;
973 } else
974 kfree(iter);
975 } else
976 file->private_data = iter;
41c52c0d 977 mutex_unlock(&ftrace_regex_lock);
5072c59f
SR
978
979 return ret;
980}
981
41c52c0d
SR
982static int
983ftrace_filter_open(struct inode *inode, struct file *file)
984{
985 return ftrace_regex_open(inode, file, 1);
986}
987
988static int
989ftrace_notrace_open(struct inode *inode, struct file *file)
990{
991 return ftrace_regex_open(inode, file, 0);
992}
993
e309b41d 994static ssize_t
41c52c0d 995ftrace_regex_read(struct file *file, char __user *ubuf,
5072c59f
SR
996 size_t cnt, loff_t *ppos)
997{
998 if (file->f_mode & FMODE_READ)
999 return seq_read(file, ubuf, cnt, ppos);
1000 else
1001 return -EPERM;
1002}
1003
e309b41d 1004static loff_t
41c52c0d 1005ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
5072c59f
SR
1006{
1007 loff_t ret;
1008
1009 if (file->f_mode & FMODE_READ)
1010 ret = seq_lseek(file, offset, origin);
1011 else
1012 file->f_pos = ret = 1;
1013
1014 return ret;
1015}
1016
1017enum {
1018 MATCH_FULL,
1019 MATCH_FRONT_ONLY,
1020 MATCH_MIDDLE_ONLY,
1021 MATCH_END_ONLY,
1022};
1023
9f4801e3
SR
1024/*
1025 * (static function - no need for kernel doc)
1026 *
1027 * Pass in a buffer containing a glob and this function will
1028 * set search to point to the search part of the buffer and
1029 * return the type of search it is (see enum above).
1030 * This does modify buff.
1031 *
1032 * Returns enum type.
1033 * search returns the pointer to use for comparison.
1034 * not returns 1 if buff started with a '!'
1035 * 0 otherwise.
1036 */
1037static int
64e7c440 1038ftrace_setup_glob(char *buff, int len, char **search, int *not)
5072c59f 1039{
5072c59f 1040 int type = MATCH_FULL;
9f4801e3 1041 int i;
ea3a6d6d
SR
1042
1043 if (buff[0] == '!') {
9f4801e3 1044 *not = 1;
ea3a6d6d
SR
1045 buff++;
1046 len--;
9f4801e3
SR
1047 } else
1048 *not = 0;
1049
1050 *search = buff;
5072c59f
SR
1051
1052 for (i = 0; i < len; i++) {
1053 if (buff[i] == '*') {
1054 if (!i) {
9f4801e3 1055 *search = buff + 1;
5072c59f 1056 type = MATCH_END_ONLY;
5072c59f 1057 } else {
9f4801e3 1058 if (type == MATCH_END_ONLY)
5072c59f 1059 type = MATCH_MIDDLE_ONLY;
9f4801e3 1060 else
5072c59f 1061 type = MATCH_FRONT_ONLY;
5072c59f
SR
1062 buff[i] = 0;
1063 break;
1064 }
1065 }
1066 }
1067
9f4801e3
SR
1068 return type;
1069}
1070
64e7c440 1071static int ftrace_match(char *str, char *regex, int len, int type)
9f4801e3 1072{
9f4801e3
SR
1073 int matched = 0;
1074 char *ptr;
1075
9f4801e3
SR
1076 switch (type) {
1077 case MATCH_FULL:
1078 if (strcmp(str, regex) == 0)
1079 matched = 1;
1080 break;
1081 case MATCH_FRONT_ONLY:
1082 if (strncmp(str, regex, len) == 0)
1083 matched = 1;
1084 break;
1085 case MATCH_MIDDLE_ONLY:
1086 if (strstr(str, regex))
1087 matched = 1;
1088 break;
1089 case MATCH_END_ONLY:
1090 ptr = strstr(str, regex);
1091 if (ptr && (ptr[len] == 0))
1092 matched = 1;
1093 break;
1094 }
1095
1096 return matched;
1097}
1098
64e7c440
SR
1099static int
1100ftrace_match_record(struct dyn_ftrace *rec, char *regex, int len, int type)
1101{
1102 char str[KSYM_SYMBOL_LEN];
1103
1104 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1105 return ftrace_match(str, regex, len, type);
1106}
1107
9f4801e3
SR
1108static void ftrace_match_records(char *buff, int len, int enable)
1109{
1110 char *search;
1111 struct ftrace_page *pg;
1112 struct dyn_ftrace *rec;
1113 int type;
1114 unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1115 unsigned search_len;
1116 int not;
1117
1118 type = ftrace_setup_glob(buff, len, &search, &not);
1119
1120 search_len = strlen(search);
1121
52baf119 1122 mutex_lock(&ftrace_lock);
265c831c 1123 do_for_each_ftrace_rec(pg, rec) {
265c831c
SR
1124
1125 if (rec->flags & FTRACE_FL_FAILED)
1126 continue;
9f4801e3
SR
1127
1128 if (ftrace_match_record(rec, search, search_len, type)) {
265c831c
SR
1129 if (not)
1130 rec->flags &= ~flag;
1131 else
1132 rec->flags |= flag;
1133 }
e68746a2
SR
1134 /*
1135 * Only enable filtering if we have a function that
1136 * is filtered on.
1137 */
1138 if (enable && (rec->flags & FTRACE_FL_FILTER))
1139 ftrace_filtered = 1;
265c831c 1140 } while_for_each_ftrace_rec();
52baf119 1141 mutex_unlock(&ftrace_lock);
5072c59f
SR
1142}
1143
64e7c440
SR
1144static int
1145ftrace_match_module_record(struct dyn_ftrace *rec, char *mod,
1146 char *regex, int len, int type)
1147{
1148 char str[KSYM_SYMBOL_LEN];
1149 char *modname;
1150
1151 kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
1152
1153 if (!modname || strcmp(modname, mod))
1154 return 0;
1155
1156 /* blank search means to match all funcs in the mod */
1157 if (len)
1158 return ftrace_match(str, regex, len, type);
1159 else
1160 return 1;
1161}
1162
1163static void ftrace_match_module_records(char *buff, char *mod, int enable)
1164{
1165 char *search = buff;
1166 struct ftrace_page *pg;
1167 struct dyn_ftrace *rec;
1168 int type = MATCH_FULL;
1169 unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1170 unsigned search_len = 0;
1171 int not = 0;
1172
1173 /* blank or '*' mean the same */
1174 if (strcmp(buff, "*") == 0)
1175 buff[0] = 0;
1176
1177 /* handle the case of 'dont filter this module' */
1178 if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
1179 buff[0] = 0;
1180 not = 1;
1181 }
1182
1183 if (strlen(buff)) {
1184 type = ftrace_setup_glob(buff, strlen(buff), &search, &not);
1185 search_len = strlen(search);
1186 }
1187
52baf119 1188 mutex_lock(&ftrace_lock);
64e7c440
SR
1189 do_for_each_ftrace_rec(pg, rec) {
1190
1191 if (rec->flags & FTRACE_FL_FAILED)
1192 continue;
1193
1194 if (ftrace_match_module_record(rec, mod,
1195 search, search_len, type)) {
1196 if (not)
1197 rec->flags &= ~flag;
1198 else
1199 rec->flags |= flag;
1200 }
e68746a2
SR
1201 if (enable && (rec->flags & FTRACE_FL_FILTER))
1202 ftrace_filtered = 1;
64e7c440
SR
1203
1204 } while_for_each_ftrace_rec();
52baf119 1205 mutex_unlock(&ftrace_lock);
64e7c440
SR
1206}
1207
f6180773
SR
1208/*
1209 * We register the module command as a template to show others how
1210 * to register the a command as well.
1211 */
1212
1213static int
1214ftrace_mod_callback(char *func, char *cmd, char *param, int enable)
1215{
1216 char *mod;
1217
1218 /*
1219 * cmd == 'mod' because we only registered this func
1220 * for the 'mod' ftrace_func_command.
1221 * But if you register one func with multiple commands,
1222 * you can tell which command was used by the cmd
1223 * parameter.
1224 */
1225
1226 /* we must have a module name */
1227 if (!param)
1228 return -EINVAL;
1229
1230 mod = strsep(&param, ":");
1231 if (!strlen(mod))
1232 return -EINVAL;
1233
1234 ftrace_match_module_records(func, mod, enable);
1235 return 0;
1236}
1237
1238static struct ftrace_func_command ftrace_mod_cmd = {
1239 .name = "mod",
1240 .func = ftrace_mod_callback,
1241};
1242
1243static int __init ftrace_mod_cmd_init(void)
1244{
1245 return register_ftrace_command(&ftrace_mod_cmd);
1246}
1247device_initcall(ftrace_mod_cmd_init);
1248
59df055f
SR
1249#define FTRACE_HASH_BITS 7
1250#define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
1251static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
1252
1253struct ftrace_func_hook {
1254 struct hlist_node node;
1255 struct ftrace_hook_ops *ops;
1256 unsigned long flags;
1257 unsigned long ip;
1258 void *data;
1259 struct rcu_head rcu;
1260};
1261
1262static void
1263function_trace_hook_call(unsigned long ip, unsigned long parent_ip)
1264{
1265 struct ftrace_func_hook *entry;
1266 struct hlist_head *hhd;
1267 struct hlist_node *n;
1268 unsigned long key;
1269 int resched;
1270
1271 key = hash_long(ip, FTRACE_HASH_BITS);
1272
1273 hhd = &ftrace_func_hash[key];
1274
1275 if (hlist_empty(hhd))
1276 return;
1277
1278 /*
1279 * Disable preemption for these calls to prevent a RCU grace
1280 * period. This syncs the hash iteration and freeing of items
1281 * on the hash. rcu_read_lock is too dangerous here.
1282 */
1283 resched = ftrace_preempt_disable();
1284 hlist_for_each_entry_rcu(entry, n, hhd, node) {
1285 if (entry->ip == ip)
1286 entry->ops->func(ip, parent_ip, &entry->data);
1287 }
1288 ftrace_preempt_enable(resched);
1289}
1290
1291static struct ftrace_ops trace_hook_ops __read_mostly =
1292{
1293 .func = function_trace_hook_call,
1294};
1295
1296static int ftrace_hook_registered;
1297
1298static void __enable_ftrace_function_hook(void)
1299{
1300 int i;
1301
1302 if (ftrace_hook_registered)
1303 return;
1304
1305 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1306 struct hlist_head *hhd = &ftrace_func_hash[i];
1307 if (hhd->first)
1308 break;
1309 }
1310 /* Nothing registered? */
1311 if (i == FTRACE_FUNC_HASHSIZE)
1312 return;
1313
1314 __register_ftrace_function(&trace_hook_ops);
1315 ftrace_startup(0);
1316 ftrace_hook_registered = 1;
1317}
1318
1319static void __disable_ftrace_function_hook(void)
1320{
1321 int i;
1322
1323 if (!ftrace_hook_registered)
1324 return;
1325
1326 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1327 struct hlist_head *hhd = &ftrace_func_hash[i];
1328 if (hhd->first)
1329 return;
1330 }
1331
1332 /* no more funcs left */
1333 __unregister_ftrace_function(&trace_hook_ops);
1334 ftrace_shutdown(0);
1335 ftrace_hook_registered = 0;
1336}
1337
1338
1339static void ftrace_free_entry_rcu(struct rcu_head *rhp)
1340{
1341 struct ftrace_func_hook *entry =
1342 container_of(rhp, struct ftrace_func_hook, rcu);
1343
1344 if (entry->ops->free)
1345 entry->ops->free(&entry->data);
1346 kfree(entry);
1347}
1348
1349
1350int
1351register_ftrace_function_hook(char *glob, struct ftrace_hook_ops *ops,
1352 void *data)
1353{
1354 struct ftrace_func_hook *entry;
1355 struct ftrace_page *pg;
1356 struct dyn_ftrace *rec;
1357 unsigned long key;
1358 int type, len, not;
1359 int count = 0;
1360 char *search;
1361
1362 type = ftrace_setup_glob(glob, strlen(glob), &search, &not);
1363 len = strlen(search);
1364
1365 /* we do not support '!' for function hooks */
1366 if (WARN_ON(not))
1367 return -EINVAL;
1368
1369 mutex_lock(&ftrace_lock);
1370 do_for_each_ftrace_rec(pg, rec) {
1371
1372 if (rec->flags & FTRACE_FL_FAILED)
1373 continue;
1374
1375 if (!ftrace_match_record(rec, search, len, type))
1376 continue;
1377
1378 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1379 if (!entry) {
1380 /* If we did not hook to any, then return error */
1381 if (!count)
1382 count = -ENOMEM;
1383 goto out_unlock;
1384 }
1385
1386 count++;
1387
1388 entry->data = data;
1389
1390 /*
1391 * The caller might want to do something special
1392 * for each function we find. We call the callback
1393 * to give the caller an opportunity to do so.
1394 */
1395 if (ops->callback) {
1396 if (ops->callback(rec->ip, &entry->data) < 0) {
1397 /* caller does not like this func */
1398 kfree(entry);
1399 continue;
1400 }
1401 }
1402
1403 entry->ops = ops;
1404 entry->ip = rec->ip;
1405
1406 key = hash_long(entry->ip, FTRACE_HASH_BITS);
1407 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
1408
1409 } while_for_each_ftrace_rec();
1410 __enable_ftrace_function_hook();
1411
1412 out_unlock:
1413 mutex_unlock(&ftrace_lock);
1414
1415 return count;
1416}
1417
1418enum {
1419 HOOK_TEST_FUNC = 1,
1420 HOOK_TEST_DATA = 2
1421};
1422
1423static void
1424__unregister_ftrace_function_hook(char *glob, struct ftrace_hook_ops *ops,
1425 void *data, int flags)
1426{
1427 struct ftrace_func_hook *entry;
1428 struct hlist_node *n, *tmp;
1429 char str[KSYM_SYMBOL_LEN];
1430 int type = MATCH_FULL;
1431 int i, len = 0;
1432 char *search;
1433
1434 if (glob && (strcmp(glob, "*") || !strlen(glob)))
1435 glob = NULL;
1436 else {
1437 int not;
1438
1439 type = ftrace_setup_glob(glob, strlen(glob), &search, &not);
1440 len = strlen(search);
1441
1442 /* we do not support '!' for function hooks */
1443 if (WARN_ON(not))
1444 return;
1445 }
1446
1447 mutex_lock(&ftrace_lock);
1448 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
1449 struct hlist_head *hhd = &ftrace_func_hash[i];
1450
1451 hlist_for_each_entry_safe(entry, n, tmp, hhd, node) {
1452
1453 /* break up if statements for readability */
1454 if ((flags & HOOK_TEST_FUNC) && entry->ops != ops)
1455 continue;
1456
1457 if ((flags & HOOK_TEST_DATA) && entry->data != data)
1458 continue;
1459
1460 /* do this last, since it is the most expensive */
1461 if (glob) {
1462 kallsyms_lookup(entry->ip, NULL, NULL,
1463 NULL, str);
1464 if (!ftrace_match(str, glob, len, type))
1465 continue;
1466 }
1467
1468 hlist_del(&entry->node);
1469 call_rcu(&entry->rcu, ftrace_free_entry_rcu);
1470 }
1471 }
1472 __disable_ftrace_function_hook();
1473 mutex_unlock(&ftrace_lock);
1474}
1475
1476void
1477unregister_ftrace_function_hook(char *glob, struct ftrace_hook_ops *ops,
1478 void *data)
1479{
1480 __unregister_ftrace_function_hook(glob, ops, data,
1481 HOOK_TEST_FUNC | HOOK_TEST_DATA);
1482}
1483
1484void
1485unregister_ftrace_function_hook_func(char *glob, struct ftrace_hook_ops *ops)
1486{
1487 __unregister_ftrace_function_hook(glob, ops, NULL, HOOK_TEST_FUNC);
1488}
1489
1490void unregister_ftrace_function_hook_all(char *glob)
1491{
1492 __unregister_ftrace_function_hook(glob, NULL, NULL, 0);
1493}
1494
f6180773
SR
1495static LIST_HEAD(ftrace_commands);
1496static DEFINE_MUTEX(ftrace_cmd_mutex);
1497
1498int register_ftrace_command(struct ftrace_func_command *cmd)
1499{
1500 struct ftrace_func_command *p;
1501 int ret = 0;
1502
1503 mutex_lock(&ftrace_cmd_mutex);
1504 list_for_each_entry(p, &ftrace_commands, list) {
1505 if (strcmp(cmd->name, p->name) == 0) {
1506 ret = -EBUSY;
1507 goto out_unlock;
1508 }
1509 }
1510 list_add(&cmd->list, &ftrace_commands);
1511 out_unlock:
1512 mutex_unlock(&ftrace_cmd_mutex);
1513
1514 return ret;
1515}
1516
1517int unregister_ftrace_command(struct ftrace_func_command *cmd)
1518{
1519 struct ftrace_func_command *p, *n;
1520 int ret = -ENODEV;
1521
1522 mutex_lock(&ftrace_cmd_mutex);
1523 list_for_each_entry_safe(p, n, &ftrace_commands, list) {
1524 if (strcmp(cmd->name, p->name) == 0) {
1525 ret = 0;
1526 list_del_init(&p->list);
1527 goto out_unlock;
1528 }
1529 }
1530 out_unlock:
1531 mutex_unlock(&ftrace_cmd_mutex);
1532
1533 return ret;
1534}
1535
64e7c440
SR
1536static int ftrace_process_regex(char *buff, int len, int enable)
1537{
f6180773
SR
1538 struct ftrace_func_command *p;
1539 char *func, *command, *next = buff;
1540 int ret = -EINVAL;
64e7c440
SR
1541
1542 func = strsep(&next, ":");
1543
1544 if (!next) {
1545 ftrace_match_records(func, len, enable);
1546 return 0;
1547 }
1548
f6180773 1549 /* command found */
64e7c440
SR
1550
1551 command = strsep(&next, ":");
1552
f6180773
SR
1553 mutex_lock(&ftrace_cmd_mutex);
1554 list_for_each_entry(p, &ftrace_commands, list) {
1555 if (strcmp(p->name, command) == 0) {
1556 ret = p->func(func, command, next, enable);
1557 goto out_unlock;
1558 }
64e7c440 1559 }
f6180773
SR
1560 out_unlock:
1561 mutex_unlock(&ftrace_cmd_mutex);
64e7c440 1562
f6180773 1563 return ret;
64e7c440
SR
1564}
1565
e309b41d 1566static ssize_t
41c52c0d
SR
1567ftrace_regex_write(struct file *file, const char __user *ubuf,
1568 size_t cnt, loff_t *ppos, int enable)
5072c59f
SR
1569{
1570 struct ftrace_iterator *iter;
1571 char ch;
1572 size_t read = 0;
1573 ssize_t ret;
1574
1575 if (!cnt || cnt < 0)
1576 return 0;
1577
41c52c0d 1578 mutex_lock(&ftrace_regex_lock);
5072c59f
SR
1579
1580 if (file->f_mode & FMODE_READ) {
1581 struct seq_file *m = file->private_data;
1582 iter = m->private;
1583 } else
1584 iter = file->private_data;
1585
1586 if (!*ppos) {
1587 iter->flags &= ~FTRACE_ITER_CONT;
1588 iter->buffer_idx = 0;
1589 }
1590
1591 ret = get_user(ch, ubuf++);
1592 if (ret)
1593 goto out;
1594 read++;
1595 cnt--;
1596
1597 if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1598 /* skip white space */
1599 while (cnt && isspace(ch)) {
1600 ret = get_user(ch, ubuf++);
1601 if (ret)
1602 goto out;
1603 read++;
1604 cnt--;
1605 }
1606
5072c59f
SR
1607 if (isspace(ch)) {
1608 file->f_pos += read;
1609 ret = read;
1610 goto out;
1611 }
1612
1613 iter->buffer_idx = 0;
1614 }
1615
1616 while (cnt && !isspace(ch)) {
1617 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1618 iter->buffer[iter->buffer_idx++] = ch;
1619 else {
1620 ret = -EINVAL;
1621 goto out;
1622 }
1623 ret = get_user(ch, ubuf++);
1624 if (ret)
1625 goto out;
1626 read++;
1627 cnt--;
1628 }
1629
1630 if (isspace(ch)) {
1631 iter->filtered++;
1632 iter->buffer[iter->buffer_idx] = 0;
64e7c440
SR
1633 ret = ftrace_process_regex(iter->buffer,
1634 iter->buffer_idx, enable);
1635 if (ret)
1636 goto out;
5072c59f
SR
1637 iter->buffer_idx = 0;
1638 } else
1639 iter->flags |= FTRACE_ITER_CONT;
1640
1641
1642 file->f_pos += read;
1643
1644 ret = read;
1645 out:
41c52c0d 1646 mutex_unlock(&ftrace_regex_lock);
5072c59f
SR
1647
1648 return ret;
1649}
1650
41c52c0d
SR
1651static ssize_t
1652ftrace_filter_write(struct file *file, const char __user *ubuf,
1653 size_t cnt, loff_t *ppos)
1654{
1655 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1656}
1657
1658static ssize_t
1659ftrace_notrace_write(struct file *file, const char __user *ubuf,
1660 size_t cnt, loff_t *ppos)
1661{
1662 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1663}
1664
1665static void
1666ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1667{
1668 if (unlikely(ftrace_disabled))
1669 return;
1670
1671 mutex_lock(&ftrace_regex_lock);
1672 if (reset)
1673 ftrace_filter_reset(enable);
1674 if (buf)
7f24b31b 1675 ftrace_match_records(buf, len, enable);
41c52c0d
SR
1676 mutex_unlock(&ftrace_regex_lock);
1677}
1678
77a2b37d
SR
1679/**
1680 * ftrace_set_filter - set a function to filter on in ftrace
1681 * @buf - the string that holds the function filter text.
1682 * @len - the length of the string.
1683 * @reset - non zero to reset all filters before applying this filter.
1684 *
1685 * Filters denote which functions should be enabled when tracing is enabled.
1686 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1687 */
e309b41d 1688void ftrace_set_filter(unsigned char *buf, int len, int reset)
77a2b37d 1689{
41c52c0d
SR
1690 ftrace_set_regex(buf, len, reset, 1);
1691}
4eebcc81 1692
41c52c0d
SR
1693/**
1694 * ftrace_set_notrace - set a function to not trace in ftrace
1695 * @buf - the string that holds the function notrace text.
1696 * @len - the length of the string.
1697 * @reset - non zero to reset all filters before applying this filter.
1698 *
1699 * Notrace Filters denote which functions should not be enabled when tracing
1700 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1701 * for tracing.
1702 */
1703void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1704{
1705 ftrace_set_regex(buf, len, reset, 0);
77a2b37d
SR
1706}
1707
e309b41d 1708static int
41c52c0d 1709ftrace_regex_release(struct inode *inode, struct file *file, int enable)
5072c59f
SR
1710{
1711 struct seq_file *m = (struct seq_file *)file->private_data;
1712 struct ftrace_iterator *iter;
1713
41c52c0d 1714 mutex_lock(&ftrace_regex_lock);
5072c59f
SR
1715 if (file->f_mode & FMODE_READ) {
1716 iter = m->private;
1717
1718 seq_release(inode, file);
1719 } else
1720 iter = file->private_data;
1721
1722 if (iter->buffer_idx) {
1723 iter->filtered++;
1724 iter->buffer[iter->buffer_idx] = 0;
7f24b31b 1725 ftrace_match_records(iter->buffer, iter->buffer_idx, enable);
5072c59f
SR
1726 }
1727
e6ea44e9 1728 mutex_lock(&ftrace_lock);
ee02a2e5 1729 if (ftrace_start_up && ftrace_enabled)
5072c59f 1730 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
e6ea44e9 1731 mutex_unlock(&ftrace_lock);
5072c59f
SR
1732
1733 kfree(iter);
41c52c0d 1734 mutex_unlock(&ftrace_regex_lock);
5072c59f
SR
1735 return 0;
1736}
1737
41c52c0d
SR
1738static int
1739ftrace_filter_release(struct inode *inode, struct file *file)
1740{
1741 return ftrace_regex_release(inode, file, 1);
1742}
1743
1744static int
1745ftrace_notrace_release(struct inode *inode, struct file *file)
1746{
1747 return ftrace_regex_release(inode, file, 0);
1748}
1749
5072c59f
SR
1750static struct file_operations ftrace_avail_fops = {
1751 .open = ftrace_avail_open,
1752 .read = seq_read,
1753 .llseek = seq_lseek,
1754 .release = ftrace_avail_release,
1755};
1756
eb9a7bf0
AS
1757static struct file_operations ftrace_failures_fops = {
1758 .open = ftrace_failures_open,
1759 .read = seq_read,
1760 .llseek = seq_lseek,
1761 .release = ftrace_avail_release,
1762};
1763
5072c59f
SR
1764static struct file_operations ftrace_filter_fops = {
1765 .open = ftrace_filter_open,
41c52c0d 1766 .read = ftrace_regex_read,
5072c59f 1767 .write = ftrace_filter_write,
41c52c0d 1768 .llseek = ftrace_regex_lseek,
5072c59f
SR
1769 .release = ftrace_filter_release,
1770};
1771
41c52c0d
SR
1772static struct file_operations ftrace_notrace_fops = {
1773 .open = ftrace_notrace_open,
1774 .read = ftrace_regex_read,
1775 .write = ftrace_notrace_write,
1776 .llseek = ftrace_regex_lseek,
1777 .release = ftrace_notrace_release,
1778};
1779
ea4e2bc4
SR
1780#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1781
1782static DEFINE_MUTEX(graph_lock);
1783
1784int ftrace_graph_count;
1785unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
1786
1787static void *
1788g_next(struct seq_file *m, void *v, loff_t *pos)
1789{
1790 unsigned long *array = m->private;
1791 int index = *pos;
1792
1793 (*pos)++;
1794
1795 if (index >= ftrace_graph_count)
1796 return NULL;
1797
1798 return &array[index];
1799}
1800
1801static void *g_start(struct seq_file *m, loff_t *pos)
1802{
1803 void *p = NULL;
1804
1805 mutex_lock(&graph_lock);
1806
1807 p = g_next(m, p, pos);
1808
1809 return p;
1810}
1811
1812static void g_stop(struct seq_file *m, void *p)
1813{
1814 mutex_unlock(&graph_lock);
1815}
1816
1817static int g_show(struct seq_file *m, void *v)
1818{
1819 unsigned long *ptr = v;
1820 char str[KSYM_SYMBOL_LEN];
1821
1822 if (!ptr)
1823 return 0;
1824
1825 kallsyms_lookup(*ptr, NULL, NULL, NULL, str);
1826
1827 seq_printf(m, "%s\n", str);
1828
1829 return 0;
1830}
1831
1832static struct seq_operations ftrace_graph_seq_ops = {
1833 .start = g_start,
1834 .next = g_next,
1835 .stop = g_stop,
1836 .show = g_show,
1837};
1838
1839static int
1840ftrace_graph_open(struct inode *inode, struct file *file)
1841{
1842 int ret = 0;
1843
1844 if (unlikely(ftrace_disabled))
1845 return -ENODEV;
1846
1847 mutex_lock(&graph_lock);
1848 if ((file->f_mode & FMODE_WRITE) &&
1849 !(file->f_flags & O_APPEND)) {
1850 ftrace_graph_count = 0;
1851 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
1852 }
1853
1854 if (file->f_mode & FMODE_READ) {
1855 ret = seq_open(file, &ftrace_graph_seq_ops);
1856 if (!ret) {
1857 struct seq_file *m = file->private_data;
1858 m->private = ftrace_graph_funcs;
1859 }
1860 } else
1861 file->private_data = ftrace_graph_funcs;
1862 mutex_unlock(&graph_lock);
1863
1864 return ret;
1865}
1866
1867static ssize_t
1868ftrace_graph_read(struct file *file, char __user *ubuf,
1869 size_t cnt, loff_t *ppos)
1870{
1871 if (file->f_mode & FMODE_READ)
1872 return seq_read(file, ubuf, cnt, ppos);
1873 else
1874 return -EPERM;
1875}
1876
1877static int
1878ftrace_set_func(unsigned long *array, int idx, char *buffer)
1879{
1880 char str[KSYM_SYMBOL_LEN];
1881 struct dyn_ftrace *rec;
1882 struct ftrace_page *pg;
1883 int found = 0;
265c831c 1884 int j;
ea4e2bc4
SR
1885
1886 if (ftrace_disabled)
1887 return -ENODEV;
1888
52baf119 1889 mutex_lock(&ftrace_lock);
265c831c
SR
1890 do_for_each_ftrace_rec(pg, rec) {
1891
1892 if (rec->flags & (FTRACE_FL_FAILED | FTRACE_FL_FREE))
1893 continue;
1894
1895 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1896 if (strcmp(str, buffer) == 0) {
1897 /* Return 1 if we add it to the array */
1898 found = 1;
1899 for (j = 0; j < idx; j++)
1900 if (array[j] == rec->ip) {
1901 found = 0;
1902 break;
1903 }
1904 if (found)
1905 array[idx] = rec->ip;
1906 goto out;
ea4e2bc4 1907 }
265c831c
SR
1908 } while_for_each_ftrace_rec();
1909 out:
52baf119 1910 mutex_unlock(&ftrace_lock);
ea4e2bc4
SR
1911
1912 return found ? 0 : -EINVAL;
1913}
1914
1915static ssize_t
1916ftrace_graph_write(struct file *file, const char __user *ubuf,
1917 size_t cnt, loff_t *ppos)
1918{
1919 unsigned char buffer[FTRACE_BUFF_MAX+1];
1920 unsigned long *array;
1921 size_t read = 0;
1922 ssize_t ret;
1923 int index = 0;
1924 char ch;
1925
1926 if (!cnt || cnt < 0)
1927 return 0;
1928
1929 mutex_lock(&graph_lock);
1930
1931 if (ftrace_graph_count >= FTRACE_GRAPH_MAX_FUNCS) {
1932 ret = -EBUSY;
1933 goto out;
1934 }
1935
1936 if (file->f_mode & FMODE_READ) {
1937 struct seq_file *m = file->private_data;
1938 array = m->private;
1939 } else
1940 array = file->private_data;
1941
1942 ret = get_user(ch, ubuf++);
1943 if (ret)
1944 goto out;
1945 read++;
1946 cnt--;
1947
1948 /* skip white space */
1949 while (cnt && isspace(ch)) {
1950 ret = get_user(ch, ubuf++);
1951 if (ret)
1952 goto out;
1953 read++;
1954 cnt--;
1955 }
1956
1957 if (isspace(ch)) {
1958 *ppos += read;
1959 ret = read;
1960 goto out;
1961 }
1962
1963 while (cnt && !isspace(ch)) {
1964 if (index < FTRACE_BUFF_MAX)
1965 buffer[index++] = ch;
1966 else {
1967 ret = -EINVAL;
1968 goto out;
1969 }
1970 ret = get_user(ch, ubuf++);
1971 if (ret)
1972 goto out;
1973 read++;
1974 cnt--;
1975 }
1976 buffer[index] = 0;
1977
1978 /* we allow only one at a time */
1979 ret = ftrace_set_func(array, ftrace_graph_count, buffer);
1980 if (ret)
1981 goto out;
1982
1983 ftrace_graph_count++;
1984
1985 file->f_pos += read;
1986
1987 ret = read;
1988 out:
1989 mutex_unlock(&graph_lock);
1990
1991 return ret;
1992}
1993
1994static const struct file_operations ftrace_graph_fops = {
1995 .open = ftrace_graph_open,
1996 .read = ftrace_graph_read,
1997 .write = ftrace_graph_write,
1998};
1999#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2000
df4fc315 2001static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
5072c59f 2002{
5072c59f
SR
2003 struct dentry *entry;
2004
5072c59f
SR
2005 entry = debugfs_create_file("available_filter_functions", 0444,
2006 d_tracer, NULL, &ftrace_avail_fops);
2007 if (!entry)
2008 pr_warning("Could not create debugfs "
2009 "'available_filter_functions' entry\n");
2010
eb9a7bf0
AS
2011 entry = debugfs_create_file("failures", 0444,
2012 d_tracer, NULL, &ftrace_failures_fops);
2013 if (!entry)
2014 pr_warning("Could not create debugfs 'failures' entry\n");
2015
5072c59f
SR
2016 entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
2017 NULL, &ftrace_filter_fops);
2018 if (!entry)
2019 pr_warning("Could not create debugfs "
2020 "'set_ftrace_filter' entry\n");
41c52c0d
SR
2021
2022 entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
2023 NULL, &ftrace_notrace_fops);
2024 if (!entry)
2025 pr_warning("Could not create debugfs "
2026 "'set_ftrace_notrace' entry\n");
ad90c0e3 2027
ea4e2bc4
SR
2028#ifdef CONFIG_FUNCTION_GRAPH_TRACER
2029 entry = debugfs_create_file("set_graph_function", 0444, d_tracer,
2030 NULL,
2031 &ftrace_graph_fops);
2032 if (!entry)
2033 pr_warning("Could not create debugfs "
2034 "'set_graph_function' entry\n");
2035#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2036
5072c59f
SR
2037 return 0;
2038}
2039
31e88909
SR
2040static int ftrace_convert_nops(struct module *mod,
2041 unsigned long *start,
68bf21aa
SR
2042 unsigned long *end)
2043{
2044 unsigned long *p;
2045 unsigned long addr;
2046 unsigned long flags;
2047
e6ea44e9 2048 mutex_lock(&ftrace_lock);
68bf21aa
SR
2049 p = start;
2050 while (p < end) {
2051 addr = ftrace_call_adjust(*p++);
20e5227e
SR
2052 /*
2053 * Some architecture linkers will pad between
2054 * the different mcount_loc sections of different
2055 * object files to satisfy alignments.
2056 * Skip any NULL pointers.
2057 */
2058 if (!addr)
2059 continue;
68bf21aa 2060 ftrace_record_ip(addr);
68bf21aa
SR
2061 }
2062
08f5ac90 2063 /* disable interrupts to prevent kstop machine */
68bf21aa 2064 local_irq_save(flags);
31e88909 2065 ftrace_update_code(mod);
68bf21aa 2066 local_irq_restore(flags);
e6ea44e9 2067 mutex_unlock(&ftrace_lock);
68bf21aa
SR
2068
2069 return 0;
2070}
2071
31e88909
SR
2072void ftrace_init_module(struct module *mod,
2073 unsigned long *start, unsigned long *end)
90d595fe 2074{
00fd61ae 2075 if (ftrace_disabled || start == end)
fed1939c 2076 return;
31e88909 2077 ftrace_convert_nops(mod, start, end);
90d595fe
SR
2078}
2079
68bf21aa
SR
2080extern unsigned long __start_mcount_loc[];
2081extern unsigned long __stop_mcount_loc[];
2082
2083void __init ftrace_init(void)
2084{
2085 unsigned long count, addr, flags;
2086 int ret;
2087
2088 /* Keep the ftrace pointer to the stub */
2089 addr = (unsigned long)ftrace_stub;
2090
2091 local_irq_save(flags);
2092 ftrace_dyn_arch_init(&addr);
2093 local_irq_restore(flags);
2094
2095 /* ftrace_dyn_arch_init places the return code in addr */
2096 if (addr)
2097 goto failed;
2098
2099 count = __stop_mcount_loc - __start_mcount_loc;
2100
2101 ret = ftrace_dyn_table_alloc(count);
2102 if (ret)
2103 goto failed;
2104
2105 last_ftrace_enabled = ftrace_enabled = 1;
2106
31e88909
SR
2107 ret = ftrace_convert_nops(NULL,
2108 __start_mcount_loc,
68bf21aa
SR
2109 __stop_mcount_loc);
2110
2111 return;
2112 failed:
2113 ftrace_disabled = 1;
2114}
68bf21aa 2115
3d083395 2116#else
0b6e4d56
FW
2117
2118static int __init ftrace_nodyn_init(void)
2119{
2120 ftrace_enabled = 1;
2121 return 0;
2122}
2123device_initcall(ftrace_nodyn_init);
2124
df4fc315
SR
2125static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
2126static inline void ftrace_startup_enable(int command) { }
5a45cfe1
SR
2127/* Keep as macros so we do not need to define the commands */
2128# define ftrace_startup(command) do { } while (0)
2129# define ftrace_shutdown(command) do { } while (0)
c7aafc54
IM
2130# define ftrace_startup_sysctl() do { } while (0)
2131# define ftrace_shutdown_sysctl() do { } while (0)
3d083395
SR
2132#endif /* CONFIG_DYNAMIC_FTRACE */
2133
df4fc315
SR
2134static ssize_t
2135ftrace_pid_read(struct file *file, char __user *ubuf,
2136 size_t cnt, loff_t *ppos)
2137{
2138 char buf[64];
2139 int r;
2140
e32d8956
SR
2141 if (ftrace_pid_trace == ftrace_swapper_pid)
2142 r = sprintf(buf, "swapper tasks\n");
2143 else if (ftrace_pid_trace)
978f3a45 2144 r = sprintf(buf, "%u\n", pid_nr(ftrace_pid_trace));
df4fc315
SR
2145 else
2146 r = sprintf(buf, "no pid\n");
2147
2148 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2149}
2150
e32d8956 2151static void clear_ftrace_swapper(void)
978f3a45
SR
2152{
2153 struct task_struct *p;
e32d8956 2154 int cpu;
978f3a45 2155
e32d8956
SR
2156 get_online_cpus();
2157 for_each_online_cpu(cpu) {
2158 p = idle_task(cpu);
978f3a45 2159 clear_tsk_trace_trace(p);
e32d8956
SR
2160 }
2161 put_online_cpus();
2162}
978f3a45 2163
e32d8956
SR
2164static void set_ftrace_swapper(void)
2165{
2166 struct task_struct *p;
2167 int cpu;
2168
2169 get_online_cpus();
2170 for_each_online_cpu(cpu) {
2171 p = idle_task(cpu);
2172 set_tsk_trace_trace(p);
2173 }
2174 put_online_cpus();
978f3a45
SR
2175}
2176
e32d8956
SR
2177static void clear_ftrace_pid(struct pid *pid)
2178{
2179 struct task_struct *p;
2180
229c4ef8 2181 rcu_read_lock();
e32d8956
SR
2182 do_each_pid_task(pid, PIDTYPE_PID, p) {
2183 clear_tsk_trace_trace(p);
2184 } while_each_pid_task(pid, PIDTYPE_PID, p);
229c4ef8
ON
2185 rcu_read_unlock();
2186
e32d8956
SR
2187 put_pid(pid);
2188}
2189
2190static void set_ftrace_pid(struct pid *pid)
978f3a45
SR
2191{
2192 struct task_struct *p;
2193
229c4ef8 2194 rcu_read_lock();
978f3a45
SR
2195 do_each_pid_task(pid, PIDTYPE_PID, p) {
2196 set_tsk_trace_trace(p);
2197 } while_each_pid_task(pid, PIDTYPE_PID, p);
229c4ef8 2198 rcu_read_unlock();
978f3a45
SR
2199}
2200
e32d8956
SR
2201static void clear_ftrace_pid_task(struct pid **pid)
2202{
2203 if (*pid == ftrace_swapper_pid)
2204 clear_ftrace_swapper();
2205 else
2206 clear_ftrace_pid(*pid);
2207
2208 *pid = NULL;
2209}
2210
2211static void set_ftrace_pid_task(struct pid *pid)
2212{
2213 if (pid == ftrace_swapper_pid)
2214 set_ftrace_swapper();
2215 else
2216 set_ftrace_pid(pid);
2217}
2218
df4fc315
SR
2219static ssize_t
2220ftrace_pid_write(struct file *filp, const char __user *ubuf,
2221 size_t cnt, loff_t *ppos)
2222{
978f3a45 2223 struct pid *pid;
df4fc315
SR
2224 char buf[64];
2225 long val;
2226 int ret;
2227
2228 if (cnt >= sizeof(buf))
2229 return -EINVAL;
2230
2231 if (copy_from_user(&buf, ubuf, cnt))
2232 return -EFAULT;
2233
2234 buf[cnt] = 0;
2235
2236 ret = strict_strtol(buf, 10, &val);
2237 if (ret < 0)
2238 return ret;
2239
e6ea44e9 2240 mutex_lock(&ftrace_lock);
978f3a45 2241 if (val < 0) {
df4fc315 2242 /* disable pid tracing */
978f3a45 2243 if (!ftrace_pid_trace)
df4fc315 2244 goto out;
978f3a45
SR
2245
2246 clear_ftrace_pid_task(&ftrace_pid_trace);
df4fc315
SR
2247
2248 } else {
e32d8956
SR
2249 /* swapper task is special */
2250 if (!val) {
2251 pid = ftrace_swapper_pid;
2252 if (pid == ftrace_pid_trace)
2253 goto out;
2254 } else {
2255 pid = find_get_pid(val);
df4fc315 2256
e32d8956
SR
2257 if (pid == ftrace_pid_trace) {
2258 put_pid(pid);
2259 goto out;
2260 }
0ef8cde5 2261 }
0ef8cde5 2262
978f3a45
SR
2263 if (ftrace_pid_trace)
2264 clear_ftrace_pid_task(&ftrace_pid_trace);
2265
2266 if (!pid)
2267 goto out;
2268
2269 ftrace_pid_trace = pid;
2270
2271 set_ftrace_pid_task(ftrace_pid_trace);
df4fc315
SR
2272 }
2273
2274 /* update the function call */
2275 ftrace_update_pid_func();
2276 ftrace_startup_enable(0);
2277
2278 out:
e6ea44e9 2279 mutex_unlock(&ftrace_lock);
df4fc315
SR
2280
2281 return cnt;
2282}
2283
2284static struct file_operations ftrace_pid_fops = {
2285 .read = ftrace_pid_read,
2286 .write = ftrace_pid_write,
2287};
2288
2289static __init int ftrace_init_debugfs(void)
2290{
2291 struct dentry *d_tracer;
2292 struct dentry *entry;
2293
2294 d_tracer = tracing_init_dentry();
2295 if (!d_tracer)
2296 return 0;
2297
2298 ftrace_init_dyn_debugfs(d_tracer);
2299
2300 entry = debugfs_create_file("set_ftrace_pid", 0644, d_tracer,
2301 NULL, &ftrace_pid_fops);
2302 if (!entry)
2303 pr_warning("Could not create debugfs "
2304 "'set_ftrace_pid' entry\n");
2305 return 0;
2306}
2307
2308fs_initcall(ftrace_init_debugfs);
2309
a2bb6a3d 2310/**
81adbdc0 2311 * ftrace_kill - kill ftrace
a2bb6a3d
SR
2312 *
2313 * This function should be used by panic code. It stops ftrace
2314 * but in a not so nice way. If you need to simply kill ftrace
2315 * from a non-atomic section, use ftrace_kill.
2316 */
81adbdc0 2317void ftrace_kill(void)
a2bb6a3d
SR
2318{
2319 ftrace_disabled = 1;
2320 ftrace_enabled = 0;
a2bb6a3d
SR
2321 clear_ftrace_function();
2322}
2323
16444a8a 2324/**
3d083395
SR
2325 * register_ftrace_function - register a function for profiling
2326 * @ops - ops structure that holds the function for profiling.
16444a8a 2327 *
3d083395
SR
2328 * Register a function to be called by all functions in the
2329 * kernel.
2330 *
2331 * Note: @ops->func and all the functions it calls must be labeled
2332 * with "notrace", otherwise it will go into a
2333 * recursive loop.
16444a8a 2334 */
3d083395 2335int register_ftrace_function(struct ftrace_ops *ops)
16444a8a 2336{
b0fc494f
SR
2337 int ret;
2338
4eebcc81
SR
2339 if (unlikely(ftrace_disabled))
2340 return -1;
2341
e6ea44e9 2342 mutex_lock(&ftrace_lock);
e7d3737e 2343
b0fc494f 2344 ret = __register_ftrace_function(ops);
5a45cfe1 2345 ftrace_startup(0);
b0fc494f 2346
e6ea44e9 2347 mutex_unlock(&ftrace_lock);
b0fc494f 2348 return ret;
3d083395
SR
2349}
2350
2351/**
32632920 2352 * unregister_ftrace_function - unregister a function for profiling.
3d083395
SR
2353 * @ops - ops structure that holds the function to unregister
2354 *
2355 * Unregister a function that was added to be called by ftrace profiling.
2356 */
2357int unregister_ftrace_function(struct ftrace_ops *ops)
2358{
2359 int ret;
2360
e6ea44e9 2361 mutex_lock(&ftrace_lock);
3d083395 2362 ret = __unregister_ftrace_function(ops);
5a45cfe1 2363 ftrace_shutdown(0);
e6ea44e9 2364 mutex_unlock(&ftrace_lock);
b0fc494f
SR
2365
2366 return ret;
2367}
2368
e309b41d 2369int
b0fc494f 2370ftrace_enable_sysctl(struct ctl_table *table, int write,
5072c59f 2371 struct file *file, void __user *buffer, size_t *lenp,
b0fc494f
SR
2372 loff_t *ppos)
2373{
2374 int ret;
2375
4eebcc81
SR
2376 if (unlikely(ftrace_disabled))
2377 return -ENODEV;
2378
e6ea44e9 2379 mutex_lock(&ftrace_lock);
b0fc494f 2380
5072c59f 2381 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
b0fc494f
SR
2382
2383 if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
2384 goto out;
2385
2386 last_ftrace_enabled = ftrace_enabled;
2387
2388 if (ftrace_enabled) {
2389
2390 ftrace_startup_sysctl();
2391
2392 /* we are starting ftrace again */
2393 if (ftrace_list != &ftrace_list_end) {
2394 if (ftrace_list->next == &ftrace_list_end)
2395 ftrace_trace_function = ftrace_list->func;
2396 else
2397 ftrace_trace_function = ftrace_list_func;
2398 }
2399
2400 } else {
2401 /* stopping ftrace calls (just send to ftrace_stub) */
2402 ftrace_trace_function = ftrace_stub;
2403
2404 ftrace_shutdown_sysctl();
2405 }
2406
2407 out:
e6ea44e9 2408 mutex_unlock(&ftrace_lock);
3d083395 2409 return ret;
16444a8a 2410}
f17845e5 2411
fb52607a 2412#ifdef CONFIG_FUNCTION_GRAPH_TRACER
e7d3737e 2413
287b6e68 2414static atomic_t ftrace_graph_active;
4a2b8dda 2415static struct notifier_block ftrace_suspend_notifier;
e7d3737e 2416
e49dc19c
SR
2417int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
2418{
2419 return 0;
2420}
2421
287b6e68
FW
2422/* The callbacks that hook a function */
2423trace_func_graph_ret_t ftrace_graph_return =
2424 (trace_func_graph_ret_t)ftrace_stub;
e49dc19c 2425trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
f201ae23
FW
2426
2427/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
2428static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
2429{
2430 int i;
2431 int ret = 0;
2432 unsigned long flags;
2433 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
2434 struct task_struct *g, *t;
2435
2436 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
2437 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
2438 * sizeof(struct ftrace_ret_stack),
2439 GFP_KERNEL);
2440 if (!ret_stack_list[i]) {
2441 start = 0;
2442 end = i;
2443 ret = -ENOMEM;
2444 goto free;
2445 }
2446 }
2447
2448 read_lock_irqsave(&tasklist_lock, flags);
2449 do_each_thread(g, t) {
2450 if (start == end) {
2451 ret = -EAGAIN;
2452 goto unlock;
2453 }
2454
2455 if (t->ret_stack == NULL) {
f201ae23 2456 t->curr_ret_stack = -1;
48d68b20
FW
2457 /* Make sure IRQs see the -1 first: */
2458 barrier();
2459 t->ret_stack = ret_stack_list[start++];
380c4b14 2460 atomic_set(&t->tracing_graph_pause, 0);
f201ae23
FW
2461 atomic_set(&t->trace_overrun, 0);
2462 }
2463 } while_each_thread(g, t);
2464
2465unlock:
2466 read_unlock_irqrestore(&tasklist_lock, flags);
2467free:
2468 for (i = start; i < end; i++)
2469 kfree(ret_stack_list[i]);
2470 return ret;
2471}
2472
2473/* Allocate a return stack for each task */
fb52607a 2474static int start_graph_tracing(void)
f201ae23
FW
2475{
2476 struct ftrace_ret_stack **ret_stack_list;
2477 int ret;
2478
2479 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
2480 sizeof(struct ftrace_ret_stack *),
2481 GFP_KERNEL);
2482
2483 if (!ret_stack_list)
2484 return -ENOMEM;
2485
2486 do {
2487 ret = alloc_retstack_tasklist(ret_stack_list);
2488 } while (ret == -EAGAIN);
2489
2490 kfree(ret_stack_list);
2491 return ret;
2492}
2493
4a2b8dda
FW
2494/*
2495 * Hibernation protection.
2496 * The state of the current task is too much unstable during
2497 * suspend/restore to disk. We want to protect against that.
2498 */
2499static int
2500ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
2501 void *unused)
2502{
2503 switch (state) {
2504 case PM_HIBERNATION_PREPARE:
2505 pause_graph_tracing();
2506 break;
2507
2508 case PM_POST_HIBERNATION:
2509 unpause_graph_tracing();
2510 break;
2511 }
2512 return NOTIFY_DONE;
2513}
2514
287b6e68
FW
2515int register_ftrace_graph(trace_func_graph_ret_t retfunc,
2516 trace_func_graph_ent_t entryfunc)
15e6cb36 2517{
e7d3737e
FW
2518 int ret = 0;
2519
e6ea44e9 2520 mutex_lock(&ftrace_lock);
e7d3737e 2521
4a2b8dda
FW
2522 ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
2523 register_pm_notifier(&ftrace_suspend_notifier);
2524
287b6e68 2525 atomic_inc(&ftrace_graph_active);
fb52607a 2526 ret = start_graph_tracing();
f201ae23 2527 if (ret) {
287b6e68 2528 atomic_dec(&ftrace_graph_active);
f201ae23
FW
2529 goto out;
2530 }
e53a6319 2531
287b6e68
FW
2532 ftrace_graph_return = retfunc;
2533 ftrace_graph_entry = entryfunc;
e53a6319 2534
5a45cfe1 2535 ftrace_startup(FTRACE_START_FUNC_RET);
e7d3737e
FW
2536
2537out:
e6ea44e9 2538 mutex_unlock(&ftrace_lock);
e7d3737e 2539 return ret;
15e6cb36
FW
2540}
2541
fb52607a 2542void unregister_ftrace_graph(void)
15e6cb36 2543{
e6ea44e9 2544 mutex_lock(&ftrace_lock);
e7d3737e 2545
287b6e68
FW
2546 atomic_dec(&ftrace_graph_active);
2547 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
e49dc19c 2548 ftrace_graph_entry = ftrace_graph_entry_stub;
5a45cfe1 2549 ftrace_shutdown(FTRACE_STOP_FUNC_RET);
4a2b8dda 2550 unregister_pm_notifier(&ftrace_suspend_notifier);
e7d3737e 2551
e6ea44e9 2552 mutex_unlock(&ftrace_lock);
15e6cb36 2553}
f201ae23
FW
2554
2555/* Allocate a return stack for newly created task */
fb52607a 2556void ftrace_graph_init_task(struct task_struct *t)
f201ae23 2557{
287b6e68 2558 if (atomic_read(&ftrace_graph_active)) {
f201ae23
FW
2559 t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
2560 * sizeof(struct ftrace_ret_stack),
2561 GFP_KERNEL);
2562 if (!t->ret_stack)
2563 return;
2564 t->curr_ret_stack = -1;
380c4b14 2565 atomic_set(&t->tracing_graph_pause, 0);
f201ae23
FW
2566 atomic_set(&t->trace_overrun, 0);
2567 } else
2568 t->ret_stack = NULL;
2569}
2570
fb52607a 2571void ftrace_graph_exit_task(struct task_struct *t)
f201ae23 2572{
eae849ca
FW
2573 struct ftrace_ret_stack *ret_stack = t->ret_stack;
2574
f201ae23 2575 t->ret_stack = NULL;
eae849ca
FW
2576 /* NULL must become visible to IRQs before we free it: */
2577 barrier();
2578
2579 kfree(ret_stack);
f201ae23 2580}
14a866c5
SR
2581
2582void ftrace_graph_stop(void)
2583{
2584 ftrace_stop();
2585}
15e6cb36
FW
2586#endif
2587
This page took 0.412582 seconds and 5 git commands to generate.