ftrace: remove printks from irqsoff trace
[deliverable/linux.git] / kernel / trace / ftrace.c
CommitLineData
16444a8a
ACM
1/*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code in the latency_tracer, that is:
11 *
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
14 */
15
3d083395
SR
16#include <linux/stop_machine.h>
17#include <linux/clocksource.h>
18#include <linux/kallsyms.h>
5072c59f
SR
19#include <linux/seq_file.h>
20#include <linux/debugfs.h>
3d083395 21#include <linux/hardirq.h>
2d8b820b 22#include <linux/kthread.h>
5072c59f 23#include <linux/uaccess.h>
2d8b820b 24#include <linux/ftrace.h>
b0fc494f 25#include <linux/sysctl.h>
5072c59f 26#include <linux/ctype.h>
2d8b820b 27#include <linux/hash.h>
3d083395
SR
28#include <linux/list.h>
29
30#include "trace.h"
16444a8a 31
4eebcc81
SR
32/* ftrace_enabled is a method to turn ftrace on or off */
33int ftrace_enabled __read_mostly;
d61f82d0 34static int last_ftrace_enabled;
b0fc494f 35
4eebcc81
SR
36/*
37 * ftrace_disabled is set when an anomaly is discovered.
38 * ftrace_disabled is much stronger than ftrace_enabled.
39 */
40static int ftrace_disabled __read_mostly;
41
3d083395 42static DEFINE_SPINLOCK(ftrace_lock);
b0fc494f
SR
43static DEFINE_MUTEX(ftrace_sysctl_lock);
44
16444a8a
ACM
45static struct ftrace_ops ftrace_list_end __read_mostly =
46{
47 .func = ftrace_stub,
48};
49
50static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
51ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
52
e309b41d 53void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
16444a8a
ACM
54{
55 struct ftrace_ops *op = ftrace_list;
56
57 /* in case someone actually ports this to alpha! */
58 read_barrier_depends();
59
60 while (op != &ftrace_list_end) {
61 /* silly alpha */
62 read_barrier_depends();
63 op->func(ip, parent_ip);
64 op = op->next;
65 };
66}
67
68/**
3d083395 69 * clear_ftrace_function - reset the ftrace function
16444a8a 70 *
3d083395
SR
71 * This NULLs the ftrace function and in essence stops
72 * tracing. There may be lag
16444a8a 73 */
3d083395 74void clear_ftrace_function(void)
16444a8a 75{
3d083395
SR
76 ftrace_trace_function = ftrace_stub;
77}
78
e309b41d 79static int __register_ftrace_function(struct ftrace_ops *ops)
3d083395
SR
80{
81 /* Should never be called by interrupts */
82 spin_lock(&ftrace_lock);
16444a8a 83
16444a8a
ACM
84 ops->next = ftrace_list;
85 /*
86 * We are entering ops into the ftrace_list but another
87 * CPU might be walking that list. We need to make sure
88 * the ops->next pointer is valid before another CPU sees
89 * the ops pointer included into the ftrace_list.
90 */
91 smp_wmb();
92 ftrace_list = ops;
3d083395 93
b0fc494f
SR
94 if (ftrace_enabled) {
95 /*
96 * For one func, simply call it directly.
97 * For more than one func, call the chain.
98 */
99 if (ops->next == &ftrace_list_end)
100 ftrace_trace_function = ops->func;
101 else
102 ftrace_trace_function = ftrace_list_func;
103 }
3d083395
SR
104
105 spin_unlock(&ftrace_lock);
16444a8a
ACM
106
107 return 0;
108}
109
e309b41d 110static int __unregister_ftrace_function(struct ftrace_ops *ops)
16444a8a 111{
16444a8a
ACM
112 struct ftrace_ops **p;
113 int ret = 0;
114
3d083395 115 spin_lock(&ftrace_lock);
16444a8a
ACM
116
117 /*
3d083395
SR
118 * If we are removing the last function, then simply point
119 * to the ftrace_stub.
16444a8a
ACM
120 */
121 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
122 ftrace_trace_function = ftrace_stub;
123 ftrace_list = &ftrace_list_end;
124 goto out;
125 }
126
127 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
128 if (*p == ops)
129 break;
130
131 if (*p != ops) {
132 ret = -1;
133 goto out;
134 }
135
136 *p = (*p)->next;
137
b0fc494f
SR
138 if (ftrace_enabled) {
139 /* If we only have one func left, then call that directly */
140 if (ftrace_list == &ftrace_list_end ||
141 ftrace_list->next == &ftrace_list_end)
142 ftrace_trace_function = ftrace_list->func;
143 }
16444a8a
ACM
144
145 out:
3d083395
SR
146 spin_unlock(&ftrace_lock);
147
148 return ret;
149}
150
151#ifdef CONFIG_DYNAMIC_FTRACE
152
e1c08bdd
SR
153static struct task_struct *ftraced_task;
154static DECLARE_WAIT_QUEUE_HEAD(ftraced_waiters);
155static unsigned long ftraced_iteration_counter;
156
d61f82d0
SR
157enum {
158 FTRACE_ENABLE_CALLS = (1 << 0),
159 FTRACE_DISABLE_CALLS = (1 << 1),
160 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
161 FTRACE_ENABLE_MCOUNT = (1 << 3),
162 FTRACE_DISABLE_MCOUNT = (1 << 4),
163};
164
5072c59f
SR
165static int ftrace_filtered;
166
3d083395
SR
167static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
168
169static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
170
171static DEFINE_SPINLOCK(ftrace_shutdown_lock);
172static DEFINE_MUTEX(ftraced_lock);
5072c59f 173static DEFINE_MUTEX(ftrace_filter_lock);
3d083395 174
3c1720f0
SR
175struct ftrace_page {
176 struct ftrace_page *next;
aa5e5cea 177 unsigned long index;
3c1720f0 178 struct dyn_ftrace records[];
aa5e5cea 179};
3c1720f0
SR
180
181#define ENTRIES_PER_PAGE \
182 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
183
184/* estimate from running different kernels */
185#define NR_TO_INIT 10000
186
187static struct ftrace_page *ftrace_pages_start;
188static struct ftrace_page *ftrace_pages;
189
3d083395
SR
190static int ftraced_trigger;
191static int ftraced_suspend;
192
193static int ftrace_record_suspend;
194
37ad5084
SR
195static struct dyn_ftrace *ftrace_free_records;
196
e309b41d 197static inline int
9ff9cdb2 198ftrace_ip_in_hash(unsigned long ip, unsigned long key)
3d083395
SR
199{
200 struct dyn_ftrace *p;
201 struct hlist_node *t;
202 int found = 0;
203
204 hlist_for_each_entry(p, t, &ftrace_hash[key], node) {
205 if (p->ip == ip) {
206 found = 1;
207 break;
208 }
209 }
210
211 return found;
212}
213
e309b41d 214static inline void
3d083395
SR
215ftrace_add_hash(struct dyn_ftrace *node, unsigned long key)
216{
217 hlist_add_head(&node->node, &ftrace_hash[key]);
218}
219
e309b41d 220static void ftrace_free_rec(struct dyn_ftrace *rec)
37ad5084
SR
221{
222 /* no locking, only called from kstop_machine */
223
224 rec->ip = (unsigned long)ftrace_free_records;
225 ftrace_free_records = rec;
226 rec->flags |= FTRACE_FL_FREE;
227}
228
e309b41d 229static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
3c1720f0 230{
37ad5084
SR
231 struct dyn_ftrace *rec;
232
233 /* First check for freed records */
234 if (ftrace_free_records) {
235 rec = ftrace_free_records;
236
37ad5084
SR
237 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
238 WARN_ON_ONCE(1);
239 ftrace_free_records = NULL;
4eebcc81
SR
240 ftrace_disabled = 1;
241 ftrace_enabled = 0;
37ad5084
SR
242 return NULL;
243 }
244
245 ftrace_free_records = (void *)rec->ip;
246 memset(rec, 0, sizeof(*rec));
247 return rec;
248 }
249
3c1720f0
SR
250 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
251 if (!ftrace_pages->next)
252 return NULL;
253 ftrace_pages = ftrace_pages->next;
254 }
255
256 return &ftrace_pages->records[ftrace_pages->index++];
257}
258
e309b41d 259static void
d61f82d0 260ftrace_record_ip(unsigned long ip)
3d083395
SR
261{
262 struct dyn_ftrace *node;
263 unsigned long flags;
264 unsigned long key;
265 int resched;
266 int atomic;
2bb6f8d6 267 int cpu;
3d083395 268
4eebcc81 269 if (!ftrace_enabled || ftrace_disabled)
d61f82d0
SR
270 return;
271
3d083395
SR
272 resched = need_resched();
273 preempt_disable_notrace();
274
2bb6f8d6
SR
275 /*
276 * We simply need to protect against recursion.
277 * Use the the raw version of smp_processor_id and not
278 * __get_cpu_var which can call debug hooks that can
279 * cause a recursive crash here.
280 */
281 cpu = raw_smp_processor_id();
282 per_cpu(ftrace_shutdown_disable_cpu, cpu)++;
283 if (per_cpu(ftrace_shutdown_disable_cpu, cpu) != 1)
3d083395
SR
284 goto out;
285
286 if (unlikely(ftrace_record_suspend))
287 goto out;
288
289 key = hash_long(ip, FTRACE_HASHBITS);
290
291 WARN_ON_ONCE(key >= FTRACE_HASHSIZE);
292
293 if (ftrace_ip_in_hash(ip, key))
294 goto out;
295
296 atomic = irqs_disabled();
297
298 spin_lock_irqsave(&ftrace_shutdown_lock, flags);
299
300 /* This ip may have hit the hash before the lock */
301 if (ftrace_ip_in_hash(ip, key))
302 goto out_unlock;
303
304 /*
305 * There's a slight race that the ftraced will update the
d61f82d0 306 * hash and reset here. If it is already converted, skip it.
3d083395 307 */
d61f82d0
SR
308 if (ftrace_ip_converted(ip))
309 goto out_unlock;
310
311 node = ftrace_alloc_dyn_node(ip);
3d083395
SR
312 if (!node)
313 goto out_unlock;
314
315 node->ip = ip;
316
317 ftrace_add_hash(node, key);
318
319 ftraced_trigger = 1;
320
321 out_unlock:
322 spin_unlock_irqrestore(&ftrace_shutdown_lock, flags);
323 out:
2bb6f8d6 324 per_cpu(ftrace_shutdown_disable_cpu, cpu)--;
3d083395
SR
325
326 /* prevent recursion with scheduler */
327 if (resched)
328 preempt_enable_no_resched_notrace();
329 else
330 preempt_enable_notrace();
331}
332
caf8cdeb
SR
333#define FTRACE_ADDR ((long)(ftrace_caller))
334#define MCOUNT_ADDR ((long)(mcount))
3c1720f0 335
e309b41d 336static void
5072c59f
SR
337__ftrace_replace_code(struct dyn_ftrace *rec,
338 unsigned char *old, unsigned char *new, int enable)
339{
340 unsigned long ip;
341 int failed;
342
343 ip = rec->ip;
344
345 if (ftrace_filtered && enable) {
346 unsigned long fl;
347 /*
348 * If filtering is on:
349 *
350 * If this record is set to be filtered and
351 * is enabled then do nothing.
352 *
353 * If this record is set to be filtered and
354 * it is not enabled, enable it.
355 *
356 * If this record is not set to be filtered
357 * and it is not enabled do nothing.
358 *
359 * If this record is not set to be filtered and
360 * it is enabled, disable it.
361 */
362 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
363
364 if ((fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) ||
365 (fl == 0))
366 return;
367
368 /*
369 * If it is enabled disable it,
370 * otherwise enable it!
371 */
372 if (fl == FTRACE_FL_ENABLED) {
373 /* swap new and old */
374 new = old;
375 old = ftrace_call_replace(ip, FTRACE_ADDR);
376 rec->flags &= ~FTRACE_FL_ENABLED;
377 } else {
378 new = ftrace_call_replace(ip, FTRACE_ADDR);
379 rec->flags |= FTRACE_FL_ENABLED;
380 }
381 } else {
382
383 if (enable)
384 new = ftrace_call_replace(ip, FTRACE_ADDR);
385 else
386 old = ftrace_call_replace(ip, FTRACE_ADDR);
387
388 if (enable) {
389 if (rec->flags & FTRACE_FL_ENABLED)
390 return;
391 rec->flags |= FTRACE_FL_ENABLED;
392 } else {
393 if (!(rec->flags & FTRACE_FL_ENABLED))
394 return;
395 rec->flags &= ~FTRACE_FL_ENABLED;
396 }
397 }
398
399 failed = ftrace_modify_code(ip, old, new);
37ad5084
SR
400 if (failed) {
401 unsigned long key;
402 /* It is possible that the function hasn't been converted yet */
403 key = hash_long(ip, FTRACE_HASHBITS);
404 if (!ftrace_ip_in_hash(ip, key)) {
405 rec->flags |= FTRACE_FL_FAILED;
406 ftrace_free_rec(rec);
407 }
408
409 }
5072c59f
SR
410}
411
e309b41d 412static void ftrace_replace_code(int enable)
3c1720f0
SR
413{
414 unsigned char *new = NULL, *old = NULL;
415 struct dyn_ftrace *rec;
416 struct ftrace_page *pg;
3c1720f0
SR
417 int i;
418
5072c59f 419 if (enable)
3c1720f0
SR
420 old = ftrace_nop_replace();
421 else
422 new = ftrace_nop_replace();
423
424 for (pg = ftrace_pages_start; pg; pg = pg->next) {
425 for (i = 0; i < pg->index; i++) {
426 rec = &pg->records[i];
427
428 /* don't modify code that has already faulted */
429 if (rec->flags & FTRACE_FL_FAILED)
430 continue;
431
5072c59f 432 __ftrace_replace_code(rec, old, new, enable);
3c1720f0
SR
433 }
434 }
435}
436
e309b41d 437static void ftrace_shutdown_replenish(void)
3c1720f0
SR
438{
439 if (ftrace_pages->next)
440 return;
441
442 /* allocate another page */
443 ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
444}
3d083395 445
e309b41d 446static void
d61f82d0 447ftrace_code_disable(struct dyn_ftrace *rec)
3c1720f0
SR
448{
449 unsigned long ip;
450 unsigned char *nop, *call;
451 int failed;
452
453 ip = rec->ip;
454
455 nop = ftrace_nop_replace();
d61f82d0 456 call = ftrace_call_replace(ip, MCOUNT_ADDR);
3c1720f0
SR
457
458 failed = ftrace_modify_code(ip, call, nop);
37ad5084 459 if (failed) {
3c1720f0 460 rec->flags |= FTRACE_FL_FAILED;
37ad5084
SR
461 ftrace_free_rec(rec);
462 }
3c1720f0
SR
463}
464
e309b41d 465static int __ftrace_modify_code(void *data)
3d083395 466{
d61f82d0
SR
467 unsigned long addr;
468 int *command = data;
469
470 if (*command & FTRACE_ENABLE_CALLS)
471 ftrace_replace_code(1);
472 else if (*command & FTRACE_DISABLE_CALLS)
473 ftrace_replace_code(0);
474
475 if (*command & FTRACE_UPDATE_TRACE_FUNC)
476 ftrace_update_ftrace_func(ftrace_trace_function);
477
478 if (*command & FTRACE_ENABLE_MCOUNT) {
479 addr = (unsigned long)ftrace_record_ip;
480 ftrace_mcount_set(&addr);
481 } else if (*command & FTRACE_DISABLE_MCOUNT) {
482 addr = (unsigned long)ftrace_stub;
483 ftrace_mcount_set(&addr);
484 }
485
486 return 0;
3d083395
SR
487}
488
e309b41d 489static void ftrace_run_update_code(int command)
3d083395 490{
d61f82d0 491 stop_machine_run(__ftrace_modify_code, &command, NR_CPUS);
3d083395
SR
492}
493
d61f82d0
SR
494static ftrace_func_t saved_ftrace_func;
495
e309b41d 496static void ftrace_startup(void)
3d083395 497{
d61f82d0
SR
498 int command = 0;
499
4eebcc81
SR
500 if (unlikely(ftrace_disabled))
501 return;
502
3d083395
SR
503 mutex_lock(&ftraced_lock);
504 ftraced_suspend++;
d61f82d0
SR
505 if (ftraced_suspend == 1)
506 command |= FTRACE_ENABLE_CALLS;
507
508 if (saved_ftrace_func != ftrace_trace_function) {
509 saved_ftrace_func = ftrace_trace_function;
510 command |= FTRACE_UPDATE_TRACE_FUNC;
511 }
512
513 if (!command || !ftrace_enabled)
3d083395 514 goto out;
3d083395 515
d61f82d0 516 ftrace_run_update_code(command);
3d083395
SR
517 out:
518 mutex_unlock(&ftraced_lock);
519}
520
e309b41d 521static void ftrace_shutdown(void)
3d083395 522{
d61f82d0
SR
523 int command = 0;
524
4eebcc81
SR
525 if (unlikely(ftrace_disabled))
526 return;
527
3d083395
SR
528 mutex_lock(&ftraced_lock);
529 ftraced_suspend--;
d61f82d0
SR
530 if (!ftraced_suspend)
531 command |= FTRACE_DISABLE_CALLS;
3d083395 532
d61f82d0
SR
533 if (saved_ftrace_func != ftrace_trace_function) {
534 saved_ftrace_func = ftrace_trace_function;
535 command |= FTRACE_UPDATE_TRACE_FUNC;
536 }
3d083395 537
d61f82d0
SR
538 if (!command || !ftrace_enabled)
539 goto out;
540
541 ftrace_run_update_code(command);
3d083395
SR
542 out:
543 mutex_unlock(&ftraced_lock);
544}
545
e309b41d 546static void ftrace_startup_sysctl(void)
b0fc494f 547{
d61f82d0
SR
548 int command = FTRACE_ENABLE_MCOUNT;
549
4eebcc81
SR
550 if (unlikely(ftrace_disabled))
551 return;
552
b0fc494f 553 mutex_lock(&ftraced_lock);
d61f82d0
SR
554 /* Force update next time */
555 saved_ftrace_func = NULL;
b0fc494f
SR
556 /* ftraced_suspend is true if we want ftrace running */
557 if (ftraced_suspend)
d61f82d0
SR
558 command |= FTRACE_ENABLE_CALLS;
559
560 ftrace_run_update_code(command);
b0fc494f
SR
561 mutex_unlock(&ftraced_lock);
562}
563
e309b41d 564static void ftrace_shutdown_sysctl(void)
b0fc494f 565{
d61f82d0
SR
566 int command = FTRACE_DISABLE_MCOUNT;
567
4eebcc81
SR
568 if (unlikely(ftrace_disabled))
569 return;
570
b0fc494f
SR
571 mutex_lock(&ftraced_lock);
572 /* ftraced_suspend is true if ftrace is running */
573 if (ftraced_suspend)
d61f82d0
SR
574 command |= FTRACE_DISABLE_CALLS;
575
576 ftrace_run_update_code(command);
b0fc494f
SR
577 mutex_unlock(&ftraced_lock);
578}
579
3d083395
SR
580static cycle_t ftrace_update_time;
581static unsigned long ftrace_update_cnt;
582unsigned long ftrace_update_tot_cnt;
583
e309b41d 584static int __ftrace_update_code(void *ignore)
3d083395
SR
585{
586 struct dyn_ftrace *p;
587 struct hlist_head head;
588 struct hlist_node *t;
d61f82d0 589 int save_ftrace_enabled;
3d083395
SR
590 cycle_t start, stop;
591 int i;
592
d61f82d0
SR
593 /* Don't be recording funcs now */
594 save_ftrace_enabled = ftrace_enabled;
595 ftrace_enabled = 0;
3d083395 596
750ed1a4 597 start = ftrace_now(raw_smp_processor_id());
3d083395
SR
598 ftrace_update_cnt = 0;
599
600 /* No locks needed, the machine is stopped! */
601 for (i = 0; i < FTRACE_HASHSIZE; i++) {
602 if (hlist_empty(&ftrace_hash[i]))
603 continue;
604
605 head = ftrace_hash[i];
606 INIT_HLIST_HEAD(&ftrace_hash[i]);
607
608 /* all CPUS are stopped, we are safe to modify code */
609 hlist_for_each_entry(p, t, &head, node) {
d61f82d0 610 ftrace_code_disable(p);
3d083395
SR
611 ftrace_update_cnt++;
612 }
613
614 }
615
750ed1a4 616 stop = ftrace_now(raw_smp_processor_id());
3d083395
SR
617 ftrace_update_time = stop - start;
618 ftrace_update_tot_cnt += ftrace_update_cnt;
619
d61f82d0 620 ftrace_enabled = save_ftrace_enabled;
16444a8a
ACM
621
622 return 0;
623}
624
e309b41d 625static void ftrace_update_code(void)
3d083395 626{
4eebcc81
SR
627 if (unlikely(ftrace_disabled))
628 return;
629
3d083395
SR
630 stop_machine_run(__ftrace_update_code, NULL, NR_CPUS);
631}
632
e309b41d 633static int ftraced(void *ignore)
3d083395
SR
634{
635 unsigned long usecs;
636
3d083395
SR
637 while (!kthread_should_stop()) {
638
07a267cd
SR
639 set_current_state(TASK_INTERRUPTIBLE);
640
3d083395
SR
641 /* check once a second */
642 schedule_timeout(HZ);
643
4eebcc81
SR
644 if (unlikely(ftrace_disabled))
645 continue;
646
b0fc494f 647 mutex_lock(&ftrace_sysctl_lock);
3d083395 648 mutex_lock(&ftraced_lock);
b0fc494f 649 if (ftrace_enabled && ftraced_trigger && !ftraced_suspend) {
3d083395
SR
650 ftrace_record_suspend++;
651 ftrace_update_code();
652 usecs = nsecs_to_usecs(ftrace_update_time);
653 if (ftrace_update_tot_cnt > 100000) {
654 ftrace_update_tot_cnt = 0;
655 pr_info("hm, dftrace overflow: %lu change%s"
656 " (%lu total) in %lu usec%s\n",
657 ftrace_update_cnt,
658 ftrace_update_cnt != 1 ? "s" : "",
659 ftrace_update_tot_cnt,
660 usecs, usecs != 1 ? "s" : "");
4eebcc81 661 ftrace_disabled = 1;
3d083395
SR
662 WARN_ON_ONCE(1);
663 }
664 ftraced_trigger = 0;
665 ftrace_record_suspend--;
666 }
e1c08bdd 667 ftraced_iteration_counter++;
3d083395 668 mutex_unlock(&ftraced_lock);
b0fc494f 669 mutex_unlock(&ftrace_sysctl_lock);
3d083395 670
e1c08bdd
SR
671 wake_up_interruptible(&ftraced_waiters);
672
3d083395 673 ftrace_shutdown_replenish();
3d083395
SR
674 }
675 __set_current_state(TASK_RUNNING);
676 return 0;
677}
678
3c1720f0
SR
679static int __init ftrace_dyn_table_alloc(void)
680{
681 struct ftrace_page *pg;
682 int cnt;
683 int i;
3c1720f0
SR
684
685 /* allocate a few pages */
686 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
687 if (!ftrace_pages_start)
688 return -1;
689
690 /*
691 * Allocate a few more pages.
692 *
693 * TODO: have some parser search vmlinux before
694 * final linking to find all calls to ftrace.
695 * Then we can:
696 * a) know how many pages to allocate.
697 * and/or
698 * b) set up the table then.
699 *
700 * The dynamic code is still necessary for
701 * modules.
702 */
703
704 pg = ftrace_pages = ftrace_pages_start;
705
706 cnt = NR_TO_INIT / ENTRIES_PER_PAGE;
707
708 for (i = 0; i < cnt; i++) {
709 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
710
711 /* If we fail, we'll try later anyway */
712 if (!pg->next)
713 break;
714
715 pg = pg->next;
716 }
717
718 return 0;
719}
720
5072c59f
SR
721enum {
722 FTRACE_ITER_FILTER = (1 << 0),
723 FTRACE_ITER_CONT = (1 << 1),
724};
725
726#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
727
728struct ftrace_iterator {
729 loff_t pos;
730 struct ftrace_page *pg;
731 unsigned idx;
732 unsigned flags;
733 unsigned char buffer[FTRACE_BUFF_MAX+1];
734 unsigned buffer_idx;
735 unsigned filtered;
736};
737
e309b41d 738static void *
5072c59f
SR
739t_next(struct seq_file *m, void *v, loff_t *pos)
740{
741 struct ftrace_iterator *iter = m->private;
742 struct dyn_ftrace *rec = NULL;
743
744 (*pos)++;
745
746 retry:
747 if (iter->idx >= iter->pg->index) {
748 if (iter->pg->next) {
749 iter->pg = iter->pg->next;
750 iter->idx = 0;
751 goto retry;
752 }
753 } else {
754 rec = &iter->pg->records[iter->idx++];
755 if ((rec->flags & FTRACE_FL_FAILED) ||
756 ((iter->flags & FTRACE_ITER_FILTER) &&
757 !(rec->flags & FTRACE_FL_FILTER))) {
758 rec = NULL;
759 goto retry;
760 }
761 }
762
763 iter->pos = *pos;
764
765 return rec;
766}
767
768static void *t_start(struct seq_file *m, loff_t *pos)
769{
770 struct ftrace_iterator *iter = m->private;
771 void *p = NULL;
772 loff_t l = -1;
773
774 if (*pos != iter->pos) {
775 for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l))
776 ;
777 } else {
778 l = *pos;
779 p = t_next(m, p, &l);
780 }
781
782 return p;
783}
784
785static void t_stop(struct seq_file *m, void *p)
786{
787}
788
789static int t_show(struct seq_file *m, void *v)
790{
791 struct dyn_ftrace *rec = v;
792 char str[KSYM_SYMBOL_LEN];
793
794 if (!rec)
795 return 0;
796
797 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
798
799 seq_printf(m, "%s\n", str);
800
801 return 0;
802}
803
804static struct seq_operations show_ftrace_seq_ops = {
805 .start = t_start,
806 .next = t_next,
807 .stop = t_stop,
808 .show = t_show,
809};
810
e309b41d 811static int
5072c59f
SR
812ftrace_avail_open(struct inode *inode, struct file *file)
813{
814 struct ftrace_iterator *iter;
815 int ret;
816
4eebcc81
SR
817 if (unlikely(ftrace_disabled))
818 return -ENODEV;
819
5072c59f
SR
820 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
821 if (!iter)
822 return -ENOMEM;
823
824 iter->pg = ftrace_pages_start;
825 iter->pos = -1;
826
827 ret = seq_open(file, &show_ftrace_seq_ops);
828 if (!ret) {
829 struct seq_file *m = file->private_data;
4bf39a94 830
5072c59f 831 m->private = iter;
4bf39a94 832 } else {
5072c59f 833 kfree(iter);
4bf39a94 834 }
5072c59f
SR
835
836 return ret;
837}
838
839int ftrace_avail_release(struct inode *inode, struct file *file)
840{
841 struct seq_file *m = (struct seq_file *)file->private_data;
842 struct ftrace_iterator *iter = m->private;
843
844 seq_release(inode, file);
845 kfree(iter);
4bf39a94 846
5072c59f
SR
847 return 0;
848}
849
e309b41d 850static void ftrace_filter_reset(void)
5072c59f
SR
851{
852 struct ftrace_page *pg;
853 struct dyn_ftrace *rec;
854 unsigned i;
855
856 /* keep kstop machine from running */
857 preempt_disable();
858 ftrace_filtered = 0;
859 pg = ftrace_pages_start;
860 while (pg) {
861 for (i = 0; i < pg->index; i++) {
862 rec = &pg->records[i];
863 if (rec->flags & FTRACE_FL_FAILED)
864 continue;
865 rec->flags &= ~FTRACE_FL_FILTER;
866 }
867 pg = pg->next;
868 }
869 preempt_enable();
870}
871
e309b41d 872static int
5072c59f
SR
873ftrace_filter_open(struct inode *inode, struct file *file)
874{
875 struct ftrace_iterator *iter;
876 int ret = 0;
877
4eebcc81
SR
878 if (unlikely(ftrace_disabled))
879 return -ENODEV;
880
5072c59f
SR
881 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
882 if (!iter)
883 return -ENOMEM;
884
885 mutex_lock(&ftrace_filter_lock);
886 if ((file->f_mode & FMODE_WRITE) &&
887 !(file->f_flags & O_APPEND))
888 ftrace_filter_reset();
889
890 if (file->f_mode & FMODE_READ) {
891 iter->pg = ftrace_pages_start;
892 iter->pos = -1;
893 iter->flags = FTRACE_ITER_FILTER;
894
895 ret = seq_open(file, &show_ftrace_seq_ops);
896 if (!ret) {
897 struct seq_file *m = file->private_data;
898 m->private = iter;
899 } else
900 kfree(iter);
901 } else
902 file->private_data = iter;
903 mutex_unlock(&ftrace_filter_lock);
904
905 return ret;
906}
907
e309b41d 908static ssize_t
5072c59f
SR
909ftrace_filter_read(struct file *file, char __user *ubuf,
910 size_t cnt, loff_t *ppos)
911{
912 if (file->f_mode & FMODE_READ)
913 return seq_read(file, ubuf, cnt, ppos);
914 else
915 return -EPERM;
916}
917
e309b41d 918static loff_t
5072c59f
SR
919ftrace_filter_lseek(struct file *file, loff_t offset, int origin)
920{
921 loff_t ret;
922
923 if (file->f_mode & FMODE_READ)
924 ret = seq_lseek(file, offset, origin);
925 else
926 file->f_pos = ret = 1;
927
928 return ret;
929}
930
931enum {
932 MATCH_FULL,
933 MATCH_FRONT_ONLY,
934 MATCH_MIDDLE_ONLY,
935 MATCH_END_ONLY,
936};
937
e309b41d 938static void
5072c59f
SR
939ftrace_match(unsigned char *buff, int len)
940{
941 char str[KSYM_SYMBOL_LEN];
942 char *search = NULL;
943 struct ftrace_page *pg;
944 struct dyn_ftrace *rec;
945 int type = MATCH_FULL;
946 unsigned i, match = 0, search_len = 0;
947
948 for (i = 0; i < len; i++) {
949 if (buff[i] == '*') {
950 if (!i) {
951 search = buff + i + 1;
952 type = MATCH_END_ONLY;
953 search_len = len - (i + 1);
954 } else {
955 if (type == MATCH_END_ONLY) {
956 type = MATCH_MIDDLE_ONLY;
957 } else {
958 match = i;
959 type = MATCH_FRONT_ONLY;
960 }
961 buff[i] = 0;
962 break;
963 }
964 }
965 }
966
967 /* keep kstop machine from running */
968 preempt_disable();
969 ftrace_filtered = 1;
970 pg = ftrace_pages_start;
971 while (pg) {
972 for (i = 0; i < pg->index; i++) {
973 int matched = 0;
974 char *ptr;
975
976 rec = &pg->records[i];
977 if (rec->flags & FTRACE_FL_FAILED)
978 continue;
979 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
980 switch (type) {
981 case MATCH_FULL:
982 if (strcmp(str, buff) == 0)
983 matched = 1;
984 break;
985 case MATCH_FRONT_ONLY:
986 if (memcmp(str, buff, match) == 0)
987 matched = 1;
988 break;
989 case MATCH_MIDDLE_ONLY:
990 if (strstr(str, search))
991 matched = 1;
992 break;
993 case MATCH_END_ONLY:
994 ptr = strstr(str, search);
995 if (ptr && (ptr[search_len] == 0))
996 matched = 1;
997 break;
998 }
999 if (matched)
1000 rec->flags |= FTRACE_FL_FILTER;
1001 }
1002 pg = pg->next;
1003 }
1004 preempt_enable();
1005}
1006
e309b41d 1007static ssize_t
5072c59f
SR
1008ftrace_filter_write(struct file *file, const char __user *ubuf,
1009 size_t cnt, loff_t *ppos)
1010{
1011 struct ftrace_iterator *iter;
1012 char ch;
1013 size_t read = 0;
1014 ssize_t ret;
1015
1016 if (!cnt || cnt < 0)
1017 return 0;
1018
1019 mutex_lock(&ftrace_filter_lock);
1020
1021 if (file->f_mode & FMODE_READ) {
1022 struct seq_file *m = file->private_data;
1023 iter = m->private;
1024 } else
1025 iter = file->private_data;
1026
1027 if (!*ppos) {
1028 iter->flags &= ~FTRACE_ITER_CONT;
1029 iter->buffer_idx = 0;
1030 }
1031
1032 ret = get_user(ch, ubuf++);
1033 if (ret)
1034 goto out;
1035 read++;
1036 cnt--;
1037
1038 if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1039 /* skip white space */
1040 while (cnt && isspace(ch)) {
1041 ret = get_user(ch, ubuf++);
1042 if (ret)
1043 goto out;
1044 read++;
1045 cnt--;
1046 }
1047
1048
1049 if (isspace(ch)) {
1050 file->f_pos += read;
1051 ret = read;
1052 goto out;
1053 }
1054
1055 iter->buffer_idx = 0;
1056 }
1057
1058 while (cnt && !isspace(ch)) {
1059 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1060 iter->buffer[iter->buffer_idx++] = ch;
1061 else {
1062 ret = -EINVAL;
1063 goto out;
1064 }
1065 ret = get_user(ch, ubuf++);
1066 if (ret)
1067 goto out;
1068 read++;
1069 cnt--;
1070 }
1071
1072 if (isspace(ch)) {
1073 iter->filtered++;
1074 iter->buffer[iter->buffer_idx] = 0;
1075 ftrace_match(iter->buffer, iter->buffer_idx);
1076 iter->buffer_idx = 0;
1077 } else
1078 iter->flags |= FTRACE_ITER_CONT;
1079
1080
1081 file->f_pos += read;
1082
1083 ret = read;
1084 out:
1085 mutex_unlock(&ftrace_filter_lock);
1086
1087 return ret;
1088}
1089
77a2b37d
SR
1090/**
1091 * ftrace_set_filter - set a function to filter on in ftrace
1092 * @buf - the string that holds the function filter text.
1093 * @len - the length of the string.
1094 * @reset - non zero to reset all filters before applying this filter.
1095 *
1096 * Filters denote which functions should be enabled when tracing is enabled.
1097 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1098 */
e309b41d 1099void ftrace_set_filter(unsigned char *buf, int len, int reset)
77a2b37d 1100{
4eebcc81
SR
1101 if (unlikely(ftrace_disabled))
1102 return;
1103
77a2b37d
SR
1104 mutex_lock(&ftrace_filter_lock);
1105 if (reset)
1106 ftrace_filter_reset();
1107 if (buf)
1108 ftrace_match(buf, len);
1109 mutex_unlock(&ftrace_filter_lock);
1110}
1111
e309b41d 1112static int
5072c59f
SR
1113ftrace_filter_release(struct inode *inode, struct file *file)
1114{
1115 struct seq_file *m = (struct seq_file *)file->private_data;
1116 struct ftrace_iterator *iter;
1117
1118 mutex_lock(&ftrace_filter_lock);
1119 if (file->f_mode & FMODE_READ) {
1120 iter = m->private;
1121
1122 seq_release(inode, file);
1123 } else
1124 iter = file->private_data;
1125
1126 if (iter->buffer_idx) {
1127 iter->filtered++;
1128 iter->buffer[iter->buffer_idx] = 0;
1129 ftrace_match(iter->buffer, iter->buffer_idx);
1130 }
1131
1132 mutex_lock(&ftrace_sysctl_lock);
1133 mutex_lock(&ftraced_lock);
1134 if (iter->filtered && ftraced_suspend && ftrace_enabled)
1135 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1136 mutex_unlock(&ftraced_lock);
1137 mutex_unlock(&ftrace_sysctl_lock);
1138
1139 kfree(iter);
1140 mutex_unlock(&ftrace_filter_lock);
1141 return 0;
1142}
1143
1144static struct file_operations ftrace_avail_fops = {
1145 .open = ftrace_avail_open,
1146 .read = seq_read,
1147 .llseek = seq_lseek,
1148 .release = ftrace_avail_release,
1149};
1150
1151static struct file_operations ftrace_filter_fops = {
1152 .open = ftrace_filter_open,
1153 .read = ftrace_filter_read,
1154 .write = ftrace_filter_write,
1155 .llseek = ftrace_filter_lseek,
1156 .release = ftrace_filter_release,
1157};
1158
e1c08bdd
SR
1159/**
1160 * ftrace_force_update - force an update to all recording ftrace functions
1161 *
1162 * The ftrace dynamic update daemon only wakes up once a second.
1163 * There may be cases where an update needs to be done immediately
1164 * for tests or internal kernel tracing to begin. This function
1165 * wakes the daemon to do an update and will not return until the
1166 * update is complete.
1167 */
1168int ftrace_force_update(void)
1169{
1170 unsigned long last_counter;
1171 DECLARE_WAITQUEUE(wait, current);
1172 int ret = 0;
1173
4eebcc81 1174 if (unlikely(ftrace_disabled))
e1c08bdd
SR
1175 return -ENODEV;
1176
1177 mutex_lock(&ftraced_lock);
1178 last_counter = ftraced_iteration_counter;
1179
1180 set_current_state(TASK_INTERRUPTIBLE);
1181 add_wait_queue(&ftraced_waiters, &wait);
1182
4eebcc81
SR
1183 if (unlikely(!ftraced_task)) {
1184 ret = -ENODEV;
1185 goto out;
1186 }
1187
e1c08bdd
SR
1188 do {
1189 mutex_unlock(&ftraced_lock);
1190 wake_up_process(ftraced_task);
1191 schedule();
1192 mutex_lock(&ftraced_lock);
1193 if (signal_pending(current)) {
1194 ret = -EINTR;
1195 break;
1196 }
1197 set_current_state(TASK_INTERRUPTIBLE);
1198 } while (last_counter == ftraced_iteration_counter);
1199
4eebcc81 1200 out:
e1c08bdd
SR
1201 mutex_unlock(&ftraced_lock);
1202 remove_wait_queue(&ftraced_waiters, &wait);
1203 set_current_state(TASK_RUNNING);
1204
1205 return ret;
1206}
1207
4eebcc81
SR
1208static void ftrace_force_shutdown(void)
1209{
1210 struct task_struct *task;
1211 int command = FTRACE_DISABLE_CALLS | FTRACE_UPDATE_TRACE_FUNC;
1212
1213 mutex_lock(&ftraced_lock);
1214 task = ftraced_task;
1215 ftraced_task = NULL;
1216 ftraced_suspend = -1;
1217 ftrace_run_update_code(command);
1218 mutex_unlock(&ftraced_lock);
1219
1220 if (task)
1221 kthread_stop(task);
1222}
1223
5072c59f
SR
1224static __init int ftrace_init_debugfs(void)
1225{
1226 struct dentry *d_tracer;
1227 struct dentry *entry;
1228
1229 d_tracer = tracing_init_dentry();
1230
1231 entry = debugfs_create_file("available_filter_functions", 0444,
1232 d_tracer, NULL, &ftrace_avail_fops);
1233 if (!entry)
1234 pr_warning("Could not create debugfs "
1235 "'available_filter_functions' entry\n");
1236
1237 entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1238 NULL, &ftrace_filter_fops);
1239 if (!entry)
1240 pr_warning("Could not create debugfs "
1241 "'set_ftrace_filter' entry\n");
1242 return 0;
1243}
1244
1245fs_initcall(ftrace_init_debugfs);
1246
e309b41d 1247static int __init ftrace_dynamic_init(void)
3d083395
SR
1248{
1249 struct task_struct *p;
d61f82d0 1250 unsigned long addr;
3d083395
SR
1251 int ret;
1252
d61f82d0 1253 addr = (unsigned long)ftrace_record_ip;
9ff9cdb2 1254
d61f82d0
SR
1255 stop_machine_run(ftrace_dyn_arch_init, &addr, NR_CPUS);
1256
1257 /* ftrace_dyn_arch_init places the return code in addr */
4eebcc81
SR
1258 if (addr) {
1259 ret = (int)addr;
1260 goto failed;
1261 }
d61f82d0 1262
3c1720f0 1263 ret = ftrace_dyn_table_alloc();
3d083395 1264 if (ret)
4eebcc81 1265 goto failed;
3d083395
SR
1266
1267 p = kthread_run(ftraced, NULL, "ftraced");
4eebcc81
SR
1268 if (IS_ERR(p)) {
1269 ret = -1;
1270 goto failed;
1271 }
3d083395 1272
d61f82d0 1273 last_ftrace_enabled = ftrace_enabled = 1;
e1c08bdd 1274 ftraced_task = p;
3d083395
SR
1275
1276 return 0;
4eebcc81
SR
1277
1278 failed:
1279 ftrace_disabled = 1;
1280 return ret;
3d083395
SR
1281}
1282
d61f82d0 1283core_initcall(ftrace_dynamic_init);
3d083395 1284#else
c7aafc54
IM
1285# define ftrace_startup() do { } while (0)
1286# define ftrace_shutdown() do { } while (0)
1287# define ftrace_startup_sysctl() do { } while (0)
1288# define ftrace_shutdown_sysctl() do { } while (0)
4eebcc81 1289# define ftrace_force_shutdown() do { } while (0)
3d083395
SR
1290#endif /* CONFIG_DYNAMIC_FTRACE */
1291
4eebcc81
SR
1292/**
1293 * ftrace_kill - totally shutdown ftrace
1294 *
1295 * This is a safety measure. If something was detected that seems
1296 * wrong, calling this function will keep ftrace from doing
1297 * any more modifications, and updates.
1298 * used when something went wrong.
1299 */
1300void ftrace_kill(void)
1301{
1302 mutex_lock(&ftrace_sysctl_lock);
1303 ftrace_disabled = 1;
1304 ftrace_enabled = 0;
1305
1306 clear_ftrace_function();
1307 mutex_unlock(&ftrace_sysctl_lock);
1308
1309 /* Try to totally disable ftrace */
1310 ftrace_force_shutdown();
1311}
1312
16444a8a 1313/**
3d083395
SR
1314 * register_ftrace_function - register a function for profiling
1315 * @ops - ops structure that holds the function for profiling.
16444a8a 1316 *
3d083395
SR
1317 * Register a function to be called by all functions in the
1318 * kernel.
1319 *
1320 * Note: @ops->func and all the functions it calls must be labeled
1321 * with "notrace", otherwise it will go into a
1322 * recursive loop.
16444a8a 1323 */
3d083395 1324int register_ftrace_function(struct ftrace_ops *ops)
16444a8a 1325{
b0fc494f
SR
1326 int ret;
1327
4eebcc81
SR
1328 if (unlikely(ftrace_disabled))
1329 return -1;
1330
b0fc494f 1331 mutex_lock(&ftrace_sysctl_lock);
b0fc494f 1332 ret = __register_ftrace_function(ops);
d61f82d0 1333 ftrace_startup();
b0fc494f
SR
1334 mutex_unlock(&ftrace_sysctl_lock);
1335
1336 return ret;
3d083395
SR
1337}
1338
1339/**
1340 * unregister_ftrace_function - unresgister a function for profiling.
1341 * @ops - ops structure that holds the function to unregister
1342 *
1343 * Unregister a function that was added to be called by ftrace profiling.
1344 */
1345int unregister_ftrace_function(struct ftrace_ops *ops)
1346{
1347 int ret;
1348
b0fc494f 1349 mutex_lock(&ftrace_sysctl_lock);
3d083395 1350 ret = __unregister_ftrace_function(ops);
d61f82d0 1351 ftrace_shutdown();
b0fc494f
SR
1352 mutex_unlock(&ftrace_sysctl_lock);
1353
1354 return ret;
1355}
1356
e309b41d 1357int
b0fc494f 1358ftrace_enable_sysctl(struct ctl_table *table, int write,
5072c59f 1359 struct file *file, void __user *buffer, size_t *lenp,
b0fc494f
SR
1360 loff_t *ppos)
1361{
1362 int ret;
1363
4eebcc81
SR
1364 if (unlikely(ftrace_disabled))
1365 return -ENODEV;
1366
b0fc494f
SR
1367 mutex_lock(&ftrace_sysctl_lock);
1368
5072c59f 1369 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
b0fc494f
SR
1370
1371 if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1372 goto out;
1373
1374 last_ftrace_enabled = ftrace_enabled;
1375
1376 if (ftrace_enabled) {
1377
1378 ftrace_startup_sysctl();
1379
1380 /* we are starting ftrace again */
1381 if (ftrace_list != &ftrace_list_end) {
1382 if (ftrace_list->next == &ftrace_list_end)
1383 ftrace_trace_function = ftrace_list->func;
1384 else
1385 ftrace_trace_function = ftrace_list_func;
1386 }
1387
1388 } else {
1389 /* stopping ftrace calls (just send to ftrace_stub) */
1390 ftrace_trace_function = ftrace_stub;
1391
1392 ftrace_shutdown_sysctl();
1393 }
1394
1395 out:
1396 mutex_unlock(&ftrace_sysctl_lock);
3d083395 1397 return ret;
16444a8a 1398}
This page took 0.092173 seconds and 5 git commands to generate.