ftrace: safe traversal of ftrace_hash hlist
[deliverable/linux.git] / kernel / trace / ftrace.c
CommitLineData
16444a8a
ACM
1/*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code in the latency_tracer, that is:
11 *
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 William Lee Irwin III
14 */
15
3d083395
SR
16#include <linux/stop_machine.h>
17#include <linux/clocksource.h>
18#include <linux/kallsyms.h>
5072c59f
SR
19#include <linux/seq_file.h>
20#include <linux/debugfs.h>
3d083395 21#include <linux/hardirq.h>
2d8b820b 22#include <linux/kthread.h>
5072c59f 23#include <linux/uaccess.h>
2d8b820b 24#include <linux/ftrace.h>
b0fc494f 25#include <linux/sysctl.h>
5072c59f 26#include <linux/ctype.h>
2d8b820b 27#include <linux/hash.h>
3d083395
SR
28#include <linux/list.h>
29
30#include "trace.h"
16444a8a 31
4eebcc81
SR
32/* ftrace_enabled is a method to turn ftrace on or off */
33int ftrace_enabled __read_mostly;
d61f82d0 34static int last_ftrace_enabled;
b0fc494f 35
4eebcc81
SR
36/*
37 * ftrace_disabled is set when an anomaly is discovered.
38 * ftrace_disabled is much stronger than ftrace_enabled.
39 */
40static int ftrace_disabled __read_mostly;
41
3d083395 42static DEFINE_SPINLOCK(ftrace_lock);
b0fc494f
SR
43static DEFINE_MUTEX(ftrace_sysctl_lock);
44
16444a8a
ACM
45static struct ftrace_ops ftrace_list_end __read_mostly =
46{
47 .func = ftrace_stub,
48};
49
50static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
51ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
52
e309b41d 53void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
16444a8a
ACM
54{
55 struct ftrace_ops *op = ftrace_list;
56
57 /* in case someone actually ports this to alpha! */
58 read_barrier_depends();
59
60 while (op != &ftrace_list_end) {
61 /* silly alpha */
62 read_barrier_depends();
63 op->func(ip, parent_ip);
64 op = op->next;
65 };
66}
67
68/**
3d083395 69 * clear_ftrace_function - reset the ftrace function
16444a8a 70 *
3d083395
SR
71 * This NULLs the ftrace function and in essence stops
72 * tracing. There may be lag
16444a8a 73 */
3d083395 74void clear_ftrace_function(void)
16444a8a 75{
3d083395
SR
76 ftrace_trace_function = ftrace_stub;
77}
78
e309b41d 79static int __register_ftrace_function(struct ftrace_ops *ops)
3d083395
SR
80{
81 /* Should never be called by interrupts */
82 spin_lock(&ftrace_lock);
16444a8a 83
16444a8a
ACM
84 ops->next = ftrace_list;
85 /*
86 * We are entering ops into the ftrace_list but another
87 * CPU might be walking that list. We need to make sure
88 * the ops->next pointer is valid before another CPU sees
89 * the ops pointer included into the ftrace_list.
90 */
91 smp_wmb();
92 ftrace_list = ops;
3d083395 93
b0fc494f
SR
94 if (ftrace_enabled) {
95 /*
96 * For one func, simply call it directly.
97 * For more than one func, call the chain.
98 */
99 if (ops->next == &ftrace_list_end)
100 ftrace_trace_function = ops->func;
101 else
102 ftrace_trace_function = ftrace_list_func;
103 }
3d083395
SR
104
105 spin_unlock(&ftrace_lock);
16444a8a
ACM
106
107 return 0;
108}
109
e309b41d 110static int __unregister_ftrace_function(struct ftrace_ops *ops)
16444a8a 111{
16444a8a
ACM
112 struct ftrace_ops **p;
113 int ret = 0;
114
3d083395 115 spin_lock(&ftrace_lock);
16444a8a
ACM
116
117 /*
3d083395
SR
118 * If we are removing the last function, then simply point
119 * to the ftrace_stub.
16444a8a
ACM
120 */
121 if (ftrace_list == ops && ops->next == &ftrace_list_end) {
122 ftrace_trace_function = ftrace_stub;
123 ftrace_list = &ftrace_list_end;
124 goto out;
125 }
126
127 for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
128 if (*p == ops)
129 break;
130
131 if (*p != ops) {
132 ret = -1;
133 goto out;
134 }
135
136 *p = (*p)->next;
137
b0fc494f
SR
138 if (ftrace_enabled) {
139 /* If we only have one func left, then call that directly */
140 if (ftrace_list == &ftrace_list_end ||
141 ftrace_list->next == &ftrace_list_end)
142 ftrace_trace_function = ftrace_list->func;
143 }
16444a8a
ACM
144
145 out:
3d083395
SR
146 spin_unlock(&ftrace_lock);
147
148 return ret;
149}
150
151#ifdef CONFIG_DYNAMIC_FTRACE
152
e1c08bdd
SR
153static struct task_struct *ftraced_task;
154static DECLARE_WAIT_QUEUE_HEAD(ftraced_waiters);
155static unsigned long ftraced_iteration_counter;
156
d61f82d0
SR
157enum {
158 FTRACE_ENABLE_CALLS = (1 << 0),
159 FTRACE_DISABLE_CALLS = (1 << 1),
160 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
161 FTRACE_ENABLE_MCOUNT = (1 << 3),
162 FTRACE_DISABLE_MCOUNT = (1 << 4),
163};
164
5072c59f
SR
165static int ftrace_filtered;
166
3d083395
SR
167static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
168
169static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
170
171static DEFINE_SPINLOCK(ftrace_shutdown_lock);
172static DEFINE_MUTEX(ftraced_lock);
41c52c0d 173static DEFINE_MUTEX(ftrace_regex_lock);
3d083395 174
3c1720f0
SR
175struct ftrace_page {
176 struct ftrace_page *next;
aa5e5cea 177 unsigned long index;
3c1720f0 178 struct dyn_ftrace records[];
aa5e5cea 179};
3c1720f0
SR
180
181#define ENTRIES_PER_PAGE \
182 ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
183
184/* estimate from running different kernels */
185#define NR_TO_INIT 10000
186
187static struct ftrace_page *ftrace_pages_start;
188static struct ftrace_page *ftrace_pages;
189
3d083395
SR
190static int ftraced_trigger;
191static int ftraced_suspend;
192
193static int ftrace_record_suspend;
194
37ad5084
SR
195static struct dyn_ftrace *ftrace_free_records;
196
e309b41d 197static inline int
9ff9cdb2 198ftrace_ip_in_hash(unsigned long ip, unsigned long key)
3d083395
SR
199{
200 struct dyn_ftrace *p;
201 struct hlist_node *t;
202 int found = 0;
203
ffdaa358 204 hlist_for_each_entry_rcu(p, t, &ftrace_hash[key], node) {
3d083395
SR
205 if (p->ip == ip) {
206 found = 1;
207 break;
208 }
209 }
210
211 return found;
212}
213
e309b41d 214static inline void
3d083395
SR
215ftrace_add_hash(struct dyn_ftrace *node, unsigned long key)
216{
ffdaa358 217 hlist_add_head_rcu(&node->node, &ftrace_hash[key]);
3d083395
SR
218}
219
e309b41d 220static void ftrace_free_rec(struct dyn_ftrace *rec)
37ad5084
SR
221{
222 /* no locking, only called from kstop_machine */
223
224 rec->ip = (unsigned long)ftrace_free_records;
225 ftrace_free_records = rec;
226 rec->flags |= FTRACE_FL_FREE;
227}
228
e309b41d 229static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
3c1720f0 230{
37ad5084
SR
231 struct dyn_ftrace *rec;
232
233 /* First check for freed records */
234 if (ftrace_free_records) {
235 rec = ftrace_free_records;
236
37ad5084
SR
237 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
238 WARN_ON_ONCE(1);
239 ftrace_free_records = NULL;
4eebcc81
SR
240 ftrace_disabled = 1;
241 ftrace_enabled = 0;
37ad5084
SR
242 return NULL;
243 }
244
245 ftrace_free_records = (void *)rec->ip;
246 memset(rec, 0, sizeof(*rec));
247 return rec;
248 }
249
3c1720f0
SR
250 if (ftrace_pages->index == ENTRIES_PER_PAGE) {
251 if (!ftrace_pages->next)
252 return NULL;
253 ftrace_pages = ftrace_pages->next;
254 }
255
256 return &ftrace_pages->records[ftrace_pages->index++];
257}
258
e309b41d 259static void
d61f82d0 260ftrace_record_ip(unsigned long ip)
3d083395
SR
261{
262 struct dyn_ftrace *node;
263 unsigned long flags;
264 unsigned long key;
265 int resched;
266 int atomic;
2bb6f8d6 267 int cpu;
3d083395 268
4eebcc81 269 if (!ftrace_enabled || ftrace_disabled)
d61f82d0
SR
270 return;
271
3d083395
SR
272 resched = need_resched();
273 preempt_disable_notrace();
274
2bb6f8d6
SR
275 /*
276 * We simply need to protect against recursion.
277 * Use the the raw version of smp_processor_id and not
278 * __get_cpu_var which can call debug hooks that can
279 * cause a recursive crash here.
280 */
281 cpu = raw_smp_processor_id();
282 per_cpu(ftrace_shutdown_disable_cpu, cpu)++;
283 if (per_cpu(ftrace_shutdown_disable_cpu, cpu) != 1)
3d083395
SR
284 goto out;
285
286 if (unlikely(ftrace_record_suspend))
287 goto out;
288
289 key = hash_long(ip, FTRACE_HASHBITS);
290
291 WARN_ON_ONCE(key >= FTRACE_HASHSIZE);
292
293 if (ftrace_ip_in_hash(ip, key))
294 goto out;
295
296 atomic = irqs_disabled();
297
298 spin_lock_irqsave(&ftrace_shutdown_lock, flags);
299
300 /* This ip may have hit the hash before the lock */
301 if (ftrace_ip_in_hash(ip, key))
302 goto out_unlock;
303
304 /*
305 * There's a slight race that the ftraced will update the
d61f82d0 306 * hash and reset here. If it is already converted, skip it.
3d083395 307 */
d61f82d0
SR
308 if (ftrace_ip_converted(ip))
309 goto out_unlock;
310
311 node = ftrace_alloc_dyn_node(ip);
3d083395
SR
312 if (!node)
313 goto out_unlock;
314
315 node->ip = ip;
316
317 ftrace_add_hash(node, key);
318
319 ftraced_trigger = 1;
320
321 out_unlock:
322 spin_unlock_irqrestore(&ftrace_shutdown_lock, flags);
323 out:
2bb6f8d6 324 per_cpu(ftrace_shutdown_disable_cpu, cpu)--;
3d083395
SR
325
326 /* prevent recursion with scheduler */
327 if (resched)
328 preempt_enable_no_resched_notrace();
329 else
330 preempt_enable_notrace();
331}
332
caf8cdeb
SR
333#define FTRACE_ADDR ((long)(ftrace_caller))
334#define MCOUNT_ADDR ((long)(mcount))
3c1720f0 335
e309b41d 336static void
5072c59f
SR
337__ftrace_replace_code(struct dyn_ftrace *rec,
338 unsigned char *old, unsigned char *new, int enable)
339{
41c52c0d 340 unsigned long ip, fl;
5072c59f
SR
341 int failed;
342
343 ip = rec->ip;
344
345 if (ftrace_filtered && enable) {
5072c59f
SR
346 /*
347 * If filtering is on:
348 *
349 * If this record is set to be filtered and
350 * is enabled then do nothing.
351 *
352 * If this record is set to be filtered and
353 * it is not enabled, enable it.
354 *
355 * If this record is not set to be filtered
356 * and it is not enabled do nothing.
357 *
41c52c0d
SR
358 * If this record is set not to trace then
359 * do nothing.
360 *
5072c59f
SR
361 * If this record is not set to be filtered and
362 * it is enabled, disable it.
363 */
364 fl = rec->flags & (FTRACE_FL_FILTER | FTRACE_FL_ENABLED);
365
366 if ((fl == (FTRACE_FL_FILTER | FTRACE_FL_ENABLED)) ||
41c52c0d 367 (fl == 0) || (rec->flags & FTRACE_FL_NOTRACE))
5072c59f
SR
368 return;
369
370 /*
371 * If it is enabled disable it,
372 * otherwise enable it!
373 */
374 if (fl == FTRACE_FL_ENABLED) {
375 /* swap new and old */
376 new = old;
377 old = ftrace_call_replace(ip, FTRACE_ADDR);
378 rec->flags &= ~FTRACE_FL_ENABLED;
379 } else {
380 new = ftrace_call_replace(ip, FTRACE_ADDR);
381 rec->flags |= FTRACE_FL_ENABLED;
382 }
383 } else {
384
41c52c0d
SR
385 if (enable) {
386 /*
387 * If this record is set not to trace and is
388 * not enabled, do nothing.
389 */
390 fl = rec->flags & (FTRACE_FL_NOTRACE | FTRACE_FL_ENABLED);
391 if (fl == FTRACE_FL_NOTRACE)
392 return;
393
5072c59f 394 new = ftrace_call_replace(ip, FTRACE_ADDR);
41c52c0d 395 } else
5072c59f
SR
396 old = ftrace_call_replace(ip, FTRACE_ADDR);
397
398 if (enable) {
399 if (rec->flags & FTRACE_FL_ENABLED)
400 return;
401 rec->flags |= FTRACE_FL_ENABLED;
402 } else {
403 if (!(rec->flags & FTRACE_FL_ENABLED))
404 return;
405 rec->flags &= ~FTRACE_FL_ENABLED;
406 }
407 }
408
409 failed = ftrace_modify_code(ip, old, new);
37ad5084
SR
410 if (failed) {
411 unsigned long key;
412 /* It is possible that the function hasn't been converted yet */
413 key = hash_long(ip, FTRACE_HASHBITS);
414 if (!ftrace_ip_in_hash(ip, key)) {
415 rec->flags |= FTRACE_FL_FAILED;
416 ftrace_free_rec(rec);
417 }
418
419 }
5072c59f
SR
420}
421
e309b41d 422static void ftrace_replace_code(int enable)
3c1720f0
SR
423{
424 unsigned char *new = NULL, *old = NULL;
425 struct dyn_ftrace *rec;
426 struct ftrace_page *pg;
3c1720f0
SR
427 int i;
428
5072c59f 429 if (enable)
3c1720f0
SR
430 old = ftrace_nop_replace();
431 else
432 new = ftrace_nop_replace();
433
434 for (pg = ftrace_pages_start; pg; pg = pg->next) {
435 for (i = 0; i < pg->index; i++) {
436 rec = &pg->records[i];
437
438 /* don't modify code that has already faulted */
439 if (rec->flags & FTRACE_FL_FAILED)
440 continue;
441
5072c59f 442 __ftrace_replace_code(rec, old, new, enable);
3c1720f0
SR
443 }
444 }
445}
446
e309b41d 447static void ftrace_shutdown_replenish(void)
3c1720f0
SR
448{
449 if (ftrace_pages->next)
450 return;
451
452 /* allocate another page */
453 ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
454}
3d083395 455
e309b41d 456static void
d61f82d0 457ftrace_code_disable(struct dyn_ftrace *rec)
3c1720f0
SR
458{
459 unsigned long ip;
460 unsigned char *nop, *call;
461 int failed;
462
463 ip = rec->ip;
464
465 nop = ftrace_nop_replace();
d61f82d0 466 call = ftrace_call_replace(ip, MCOUNT_ADDR);
3c1720f0
SR
467
468 failed = ftrace_modify_code(ip, call, nop);
37ad5084 469 if (failed) {
3c1720f0 470 rec->flags |= FTRACE_FL_FAILED;
37ad5084
SR
471 ftrace_free_rec(rec);
472 }
3c1720f0
SR
473}
474
e309b41d 475static int __ftrace_modify_code(void *data)
3d083395 476{
d61f82d0
SR
477 unsigned long addr;
478 int *command = data;
479
480 if (*command & FTRACE_ENABLE_CALLS)
481 ftrace_replace_code(1);
482 else if (*command & FTRACE_DISABLE_CALLS)
483 ftrace_replace_code(0);
484
485 if (*command & FTRACE_UPDATE_TRACE_FUNC)
486 ftrace_update_ftrace_func(ftrace_trace_function);
487
488 if (*command & FTRACE_ENABLE_MCOUNT) {
489 addr = (unsigned long)ftrace_record_ip;
490 ftrace_mcount_set(&addr);
491 } else if (*command & FTRACE_DISABLE_MCOUNT) {
492 addr = (unsigned long)ftrace_stub;
493 ftrace_mcount_set(&addr);
494 }
495
496 return 0;
3d083395
SR
497}
498
e309b41d 499static void ftrace_run_update_code(int command)
3d083395 500{
d61f82d0 501 stop_machine_run(__ftrace_modify_code, &command, NR_CPUS);
3d083395
SR
502}
503
d61f82d0
SR
504static ftrace_func_t saved_ftrace_func;
505
e309b41d 506static void ftrace_startup(void)
3d083395 507{
d61f82d0
SR
508 int command = 0;
509
4eebcc81
SR
510 if (unlikely(ftrace_disabled))
511 return;
512
3d083395
SR
513 mutex_lock(&ftraced_lock);
514 ftraced_suspend++;
d61f82d0
SR
515 if (ftraced_suspend == 1)
516 command |= FTRACE_ENABLE_CALLS;
517
518 if (saved_ftrace_func != ftrace_trace_function) {
519 saved_ftrace_func = ftrace_trace_function;
520 command |= FTRACE_UPDATE_TRACE_FUNC;
521 }
522
523 if (!command || !ftrace_enabled)
3d083395 524 goto out;
3d083395 525
d61f82d0 526 ftrace_run_update_code(command);
3d083395
SR
527 out:
528 mutex_unlock(&ftraced_lock);
529}
530
e309b41d 531static void ftrace_shutdown(void)
3d083395 532{
d61f82d0
SR
533 int command = 0;
534
4eebcc81
SR
535 if (unlikely(ftrace_disabled))
536 return;
537
3d083395
SR
538 mutex_lock(&ftraced_lock);
539 ftraced_suspend--;
d61f82d0
SR
540 if (!ftraced_suspend)
541 command |= FTRACE_DISABLE_CALLS;
3d083395 542
d61f82d0
SR
543 if (saved_ftrace_func != ftrace_trace_function) {
544 saved_ftrace_func = ftrace_trace_function;
545 command |= FTRACE_UPDATE_TRACE_FUNC;
546 }
3d083395 547
d61f82d0
SR
548 if (!command || !ftrace_enabled)
549 goto out;
550
551 ftrace_run_update_code(command);
3d083395
SR
552 out:
553 mutex_unlock(&ftraced_lock);
554}
555
e309b41d 556static void ftrace_startup_sysctl(void)
b0fc494f 557{
d61f82d0
SR
558 int command = FTRACE_ENABLE_MCOUNT;
559
4eebcc81
SR
560 if (unlikely(ftrace_disabled))
561 return;
562
b0fc494f 563 mutex_lock(&ftraced_lock);
d61f82d0
SR
564 /* Force update next time */
565 saved_ftrace_func = NULL;
b0fc494f
SR
566 /* ftraced_suspend is true if we want ftrace running */
567 if (ftraced_suspend)
d61f82d0
SR
568 command |= FTRACE_ENABLE_CALLS;
569
570 ftrace_run_update_code(command);
b0fc494f
SR
571 mutex_unlock(&ftraced_lock);
572}
573
e309b41d 574static void ftrace_shutdown_sysctl(void)
b0fc494f 575{
d61f82d0
SR
576 int command = FTRACE_DISABLE_MCOUNT;
577
4eebcc81
SR
578 if (unlikely(ftrace_disabled))
579 return;
580
b0fc494f
SR
581 mutex_lock(&ftraced_lock);
582 /* ftraced_suspend is true if ftrace is running */
583 if (ftraced_suspend)
d61f82d0
SR
584 command |= FTRACE_DISABLE_CALLS;
585
586 ftrace_run_update_code(command);
b0fc494f
SR
587 mutex_unlock(&ftraced_lock);
588}
589
3d083395
SR
590static cycle_t ftrace_update_time;
591static unsigned long ftrace_update_cnt;
592unsigned long ftrace_update_tot_cnt;
593
e309b41d 594static int __ftrace_update_code(void *ignore)
3d083395
SR
595{
596 struct dyn_ftrace *p;
597 struct hlist_head head;
598 struct hlist_node *t;
d61f82d0 599 int save_ftrace_enabled;
3d083395
SR
600 cycle_t start, stop;
601 int i;
602
d61f82d0
SR
603 /* Don't be recording funcs now */
604 save_ftrace_enabled = ftrace_enabled;
605 ftrace_enabled = 0;
3d083395 606
750ed1a4 607 start = ftrace_now(raw_smp_processor_id());
3d083395
SR
608 ftrace_update_cnt = 0;
609
610 /* No locks needed, the machine is stopped! */
611 for (i = 0; i < FTRACE_HASHSIZE; i++) {
612 if (hlist_empty(&ftrace_hash[i]))
613 continue;
614
615 head = ftrace_hash[i];
616 INIT_HLIST_HEAD(&ftrace_hash[i]);
617
618 /* all CPUS are stopped, we are safe to modify code */
619 hlist_for_each_entry(p, t, &head, node) {
d61f82d0 620 ftrace_code_disable(p);
3d083395
SR
621 ftrace_update_cnt++;
622 }
623
624 }
625
750ed1a4 626 stop = ftrace_now(raw_smp_processor_id());
3d083395
SR
627 ftrace_update_time = stop - start;
628 ftrace_update_tot_cnt += ftrace_update_cnt;
629
d61f82d0 630 ftrace_enabled = save_ftrace_enabled;
16444a8a
ACM
631
632 return 0;
633}
634
e309b41d 635static void ftrace_update_code(void)
3d083395 636{
4eebcc81
SR
637 if (unlikely(ftrace_disabled))
638 return;
639
3d083395
SR
640 stop_machine_run(__ftrace_update_code, NULL, NR_CPUS);
641}
642
e309b41d 643static int ftraced(void *ignore)
3d083395
SR
644{
645 unsigned long usecs;
646
3d083395
SR
647 while (!kthread_should_stop()) {
648
07a267cd
SR
649 set_current_state(TASK_INTERRUPTIBLE);
650
3d083395
SR
651 /* check once a second */
652 schedule_timeout(HZ);
653
4eebcc81
SR
654 if (unlikely(ftrace_disabled))
655 continue;
656
b0fc494f 657 mutex_lock(&ftrace_sysctl_lock);
3d083395 658 mutex_lock(&ftraced_lock);
b0fc494f 659 if (ftrace_enabled && ftraced_trigger && !ftraced_suspend) {
3d083395
SR
660 ftrace_record_suspend++;
661 ftrace_update_code();
662 usecs = nsecs_to_usecs(ftrace_update_time);
663 if (ftrace_update_tot_cnt > 100000) {
664 ftrace_update_tot_cnt = 0;
665 pr_info("hm, dftrace overflow: %lu change%s"
666 " (%lu total) in %lu usec%s\n",
667 ftrace_update_cnt,
668 ftrace_update_cnt != 1 ? "s" : "",
669 ftrace_update_tot_cnt,
670 usecs, usecs != 1 ? "s" : "");
4eebcc81 671 ftrace_disabled = 1;
3d083395
SR
672 WARN_ON_ONCE(1);
673 }
674 ftraced_trigger = 0;
675 ftrace_record_suspend--;
676 }
e1c08bdd 677 ftraced_iteration_counter++;
3d083395 678 mutex_unlock(&ftraced_lock);
b0fc494f 679 mutex_unlock(&ftrace_sysctl_lock);
3d083395 680
e1c08bdd
SR
681 wake_up_interruptible(&ftraced_waiters);
682
3d083395 683 ftrace_shutdown_replenish();
3d083395
SR
684 }
685 __set_current_state(TASK_RUNNING);
686 return 0;
687}
688
3c1720f0
SR
689static int __init ftrace_dyn_table_alloc(void)
690{
691 struct ftrace_page *pg;
692 int cnt;
693 int i;
3c1720f0
SR
694
695 /* allocate a few pages */
696 ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
697 if (!ftrace_pages_start)
698 return -1;
699
700 /*
701 * Allocate a few more pages.
702 *
703 * TODO: have some parser search vmlinux before
704 * final linking to find all calls to ftrace.
705 * Then we can:
706 * a) know how many pages to allocate.
707 * and/or
708 * b) set up the table then.
709 *
710 * The dynamic code is still necessary for
711 * modules.
712 */
713
714 pg = ftrace_pages = ftrace_pages_start;
715
716 cnt = NR_TO_INIT / ENTRIES_PER_PAGE;
717
718 for (i = 0; i < cnt; i++) {
719 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
720
721 /* If we fail, we'll try later anyway */
722 if (!pg->next)
723 break;
724
725 pg = pg->next;
726 }
727
728 return 0;
729}
730
5072c59f
SR
731enum {
732 FTRACE_ITER_FILTER = (1 << 0),
733 FTRACE_ITER_CONT = (1 << 1),
41c52c0d 734 FTRACE_ITER_NOTRACE = (1 << 2),
5072c59f
SR
735};
736
737#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
738
739struct ftrace_iterator {
740 loff_t pos;
741 struct ftrace_page *pg;
742 unsigned idx;
743 unsigned flags;
744 unsigned char buffer[FTRACE_BUFF_MAX+1];
745 unsigned buffer_idx;
746 unsigned filtered;
747};
748
e309b41d 749static void *
5072c59f
SR
750t_next(struct seq_file *m, void *v, loff_t *pos)
751{
752 struct ftrace_iterator *iter = m->private;
753 struct dyn_ftrace *rec = NULL;
754
755 (*pos)++;
756
757 retry:
758 if (iter->idx >= iter->pg->index) {
759 if (iter->pg->next) {
760 iter->pg = iter->pg->next;
761 iter->idx = 0;
762 goto retry;
763 }
764 } else {
765 rec = &iter->pg->records[iter->idx++];
766 if ((rec->flags & FTRACE_FL_FAILED) ||
767 ((iter->flags & FTRACE_ITER_FILTER) &&
41c52c0d
SR
768 !(rec->flags & FTRACE_FL_FILTER)) ||
769 ((iter->flags & FTRACE_ITER_NOTRACE) &&
770 !(rec->flags & FTRACE_FL_NOTRACE))) {
5072c59f
SR
771 rec = NULL;
772 goto retry;
773 }
774 }
775
776 iter->pos = *pos;
777
778 return rec;
779}
780
781static void *t_start(struct seq_file *m, loff_t *pos)
782{
783 struct ftrace_iterator *iter = m->private;
784 void *p = NULL;
785 loff_t l = -1;
786
787 if (*pos != iter->pos) {
788 for (p = t_next(m, p, &l); p && l < *pos; p = t_next(m, p, &l))
789 ;
790 } else {
791 l = *pos;
792 p = t_next(m, p, &l);
793 }
794
795 return p;
796}
797
798static void t_stop(struct seq_file *m, void *p)
799{
800}
801
802static int t_show(struct seq_file *m, void *v)
803{
804 struct dyn_ftrace *rec = v;
805 char str[KSYM_SYMBOL_LEN];
806
807 if (!rec)
808 return 0;
809
810 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
811
812 seq_printf(m, "%s\n", str);
813
814 return 0;
815}
816
817static struct seq_operations show_ftrace_seq_ops = {
818 .start = t_start,
819 .next = t_next,
820 .stop = t_stop,
821 .show = t_show,
822};
823
e309b41d 824static int
5072c59f
SR
825ftrace_avail_open(struct inode *inode, struct file *file)
826{
827 struct ftrace_iterator *iter;
828 int ret;
829
4eebcc81
SR
830 if (unlikely(ftrace_disabled))
831 return -ENODEV;
832
5072c59f
SR
833 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
834 if (!iter)
835 return -ENOMEM;
836
837 iter->pg = ftrace_pages_start;
838 iter->pos = -1;
839
840 ret = seq_open(file, &show_ftrace_seq_ops);
841 if (!ret) {
842 struct seq_file *m = file->private_data;
4bf39a94 843
5072c59f 844 m->private = iter;
4bf39a94 845 } else {
5072c59f 846 kfree(iter);
4bf39a94 847 }
5072c59f
SR
848
849 return ret;
850}
851
852int ftrace_avail_release(struct inode *inode, struct file *file)
853{
854 struct seq_file *m = (struct seq_file *)file->private_data;
855 struct ftrace_iterator *iter = m->private;
856
857 seq_release(inode, file);
858 kfree(iter);
4bf39a94 859
5072c59f
SR
860 return 0;
861}
862
41c52c0d 863static void ftrace_filter_reset(int enable)
5072c59f
SR
864{
865 struct ftrace_page *pg;
866 struct dyn_ftrace *rec;
41c52c0d 867 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
5072c59f
SR
868 unsigned i;
869
870 /* keep kstop machine from running */
871 preempt_disable();
41c52c0d
SR
872 if (enable)
873 ftrace_filtered = 0;
5072c59f
SR
874 pg = ftrace_pages_start;
875 while (pg) {
876 for (i = 0; i < pg->index; i++) {
877 rec = &pg->records[i];
878 if (rec->flags & FTRACE_FL_FAILED)
879 continue;
41c52c0d 880 rec->flags &= ~type;
5072c59f
SR
881 }
882 pg = pg->next;
883 }
884 preempt_enable();
885}
886
e309b41d 887static int
41c52c0d 888ftrace_regex_open(struct inode *inode, struct file *file, int enable)
5072c59f
SR
889{
890 struct ftrace_iterator *iter;
891 int ret = 0;
892
4eebcc81
SR
893 if (unlikely(ftrace_disabled))
894 return -ENODEV;
895
5072c59f
SR
896 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
897 if (!iter)
898 return -ENOMEM;
899
41c52c0d 900 mutex_lock(&ftrace_regex_lock);
5072c59f
SR
901 if ((file->f_mode & FMODE_WRITE) &&
902 !(file->f_flags & O_APPEND))
41c52c0d 903 ftrace_filter_reset(enable);
5072c59f
SR
904
905 if (file->f_mode & FMODE_READ) {
906 iter->pg = ftrace_pages_start;
907 iter->pos = -1;
41c52c0d
SR
908 iter->flags = enable ? FTRACE_ITER_FILTER :
909 FTRACE_ITER_NOTRACE;
5072c59f
SR
910
911 ret = seq_open(file, &show_ftrace_seq_ops);
912 if (!ret) {
913 struct seq_file *m = file->private_data;
914 m->private = iter;
915 } else
916 kfree(iter);
917 } else
918 file->private_data = iter;
41c52c0d 919 mutex_unlock(&ftrace_regex_lock);
5072c59f
SR
920
921 return ret;
922}
923
41c52c0d
SR
924static int
925ftrace_filter_open(struct inode *inode, struct file *file)
926{
927 return ftrace_regex_open(inode, file, 1);
928}
929
930static int
931ftrace_notrace_open(struct inode *inode, struct file *file)
932{
933 return ftrace_regex_open(inode, file, 0);
934}
935
e309b41d 936static ssize_t
41c52c0d 937ftrace_regex_read(struct file *file, char __user *ubuf,
5072c59f
SR
938 size_t cnt, loff_t *ppos)
939{
940 if (file->f_mode & FMODE_READ)
941 return seq_read(file, ubuf, cnt, ppos);
942 else
943 return -EPERM;
944}
945
e309b41d 946static loff_t
41c52c0d 947ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
5072c59f
SR
948{
949 loff_t ret;
950
951 if (file->f_mode & FMODE_READ)
952 ret = seq_lseek(file, offset, origin);
953 else
954 file->f_pos = ret = 1;
955
956 return ret;
957}
958
959enum {
960 MATCH_FULL,
961 MATCH_FRONT_ONLY,
962 MATCH_MIDDLE_ONLY,
963 MATCH_END_ONLY,
964};
965
e309b41d 966static void
41c52c0d 967ftrace_match(unsigned char *buff, int len, int enable)
5072c59f
SR
968{
969 char str[KSYM_SYMBOL_LEN];
970 char *search = NULL;
971 struct ftrace_page *pg;
972 struct dyn_ftrace *rec;
973 int type = MATCH_FULL;
41c52c0d 974 unsigned long flag = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
5072c59f
SR
975 unsigned i, match = 0, search_len = 0;
976
977 for (i = 0; i < len; i++) {
978 if (buff[i] == '*') {
979 if (!i) {
980 search = buff + i + 1;
981 type = MATCH_END_ONLY;
982 search_len = len - (i + 1);
983 } else {
984 if (type == MATCH_END_ONLY) {
985 type = MATCH_MIDDLE_ONLY;
986 } else {
987 match = i;
988 type = MATCH_FRONT_ONLY;
989 }
990 buff[i] = 0;
991 break;
992 }
993 }
994 }
995
996 /* keep kstop machine from running */
997 preempt_disable();
41c52c0d
SR
998 if (enable)
999 ftrace_filtered = 1;
5072c59f
SR
1000 pg = ftrace_pages_start;
1001 while (pg) {
1002 for (i = 0; i < pg->index; i++) {
1003 int matched = 0;
1004 char *ptr;
1005
1006 rec = &pg->records[i];
1007 if (rec->flags & FTRACE_FL_FAILED)
1008 continue;
1009 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
1010 switch (type) {
1011 case MATCH_FULL:
1012 if (strcmp(str, buff) == 0)
1013 matched = 1;
1014 break;
1015 case MATCH_FRONT_ONLY:
1016 if (memcmp(str, buff, match) == 0)
1017 matched = 1;
1018 break;
1019 case MATCH_MIDDLE_ONLY:
1020 if (strstr(str, search))
1021 matched = 1;
1022 break;
1023 case MATCH_END_ONLY:
1024 ptr = strstr(str, search);
1025 if (ptr && (ptr[search_len] == 0))
1026 matched = 1;
1027 break;
1028 }
1029 if (matched)
41c52c0d 1030 rec->flags |= flag;
5072c59f
SR
1031 }
1032 pg = pg->next;
1033 }
1034 preempt_enable();
1035}
1036
e309b41d 1037static ssize_t
41c52c0d
SR
1038ftrace_regex_write(struct file *file, const char __user *ubuf,
1039 size_t cnt, loff_t *ppos, int enable)
5072c59f
SR
1040{
1041 struct ftrace_iterator *iter;
1042 char ch;
1043 size_t read = 0;
1044 ssize_t ret;
1045
1046 if (!cnt || cnt < 0)
1047 return 0;
1048
41c52c0d 1049 mutex_lock(&ftrace_regex_lock);
5072c59f
SR
1050
1051 if (file->f_mode & FMODE_READ) {
1052 struct seq_file *m = file->private_data;
1053 iter = m->private;
1054 } else
1055 iter = file->private_data;
1056
1057 if (!*ppos) {
1058 iter->flags &= ~FTRACE_ITER_CONT;
1059 iter->buffer_idx = 0;
1060 }
1061
1062 ret = get_user(ch, ubuf++);
1063 if (ret)
1064 goto out;
1065 read++;
1066 cnt--;
1067
1068 if (!(iter->flags & ~FTRACE_ITER_CONT)) {
1069 /* skip white space */
1070 while (cnt && isspace(ch)) {
1071 ret = get_user(ch, ubuf++);
1072 if (ret)
1073 goto out;
1074 read++;
1075 cnt--;
1076 }
1077
5072c59f
SR
1078 if (isspace(ch)) {
1079 file->f_pos += read;
1080 ret = read;
1081 goto out;
1082 }
1083
1084 iter->buffer_idx = 0;
1085 }
1086
1087 while (cnt && !isspace(ch)) {
1088 if (iter->buffer_idx < FTRACE_BUFF_MAX)
1089 iter->buffer[iter->buffer_idx++] = ch;
1090 else {
1091 ret = -EINVAL;
1092 goto out;
1093 }
1094 ret = get_user(ch, ubuf++);
1095 if (ret)
1096 goto out;
1097 read++;
1098 cnt--;
1099 }
1100
1101 if (isspace(ch)) {
1102 iter->filtered++;
1103 iter->buffer[iter->buffer_idx] = 0;
41c52c0d 1104 ftrace_match(iter->buffer, iter->buffer_idx, enable);
5072c59f
SR
1105 iter->buffer_idx = 0;
1106 } else
1107 iter->flags |= FTRACE_ITER_CONT;
1108
1109
1110 file->f_pos += read;
1111
1112 ret = read;
1113 out:
41c52c0d 1114 mutex_unlock(&ftrace_regex_lock);
5072c59f
SR
1115
1116 return ret;
1117}
1118
41c52c0d
SR
1119static ssize_t
1120ftrace_filter_write(struct file *file, const char __user *ubuf,
1121 size_t cnt, loff_t *ppos)
1122{
1123 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
1124}
1125
1126static ssize_t
1127ftrace_notrace_write(struct file *file, const char __user *ubuf,
1128 size_t cnt, loff_t *ppos)
1129{
1130 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
1131}
1132
1133static void
1134ftrace_set_regex(unsigned char *buf, int len, int reset, int enable)
1135{
1136 if (unlikely(ftrace_disabled))
1137 return;
1138
1139 mutex_lock(&ftrace_regex_lock);
1140 if (reset)
1141 ftrace_filter_reset(enable);
1142 if (buf)
1143 ftrace_match(buf, len, enable);
1144 mutex_unlock(&ftrace_regex_lock);
1145}
1146
77a2b37d
SR
1147/**
1148 * ftrace_set_filter - set a function to filter on in ftrace
1149 * @buf - the string that holds the function filter text.
1150 * @len - the length of the string.
1151 * @reset - non zero to reset all filters before applying this filter.
1152 *
1153 * Filters denote which functions should be enabled when tracing is enabled.
1154 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
1155 */
e309b41d 1156void ftrace_set_filter(unsigned char *buf, int len, int reset)
77a2b37d 1157{
41c52c0d
SR
1158 ftrace_set_regex(buf, len, reset, 1);
1159}
4eebcc81 1160
41c52c0d
SR
1161/**
1162 * ftrace_set_notrace - set a function to not trace in ftrace
1163 * @buf - the string that holds the function notrace text.
1164 * @len - the length of the string.
1165 * @reset - non zero to reset all filters before applying this filter.
1166 *
1167 * Notrace Filters denote which functions should not be enabled when tracing
1168 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
1169 * for tracing.
1170 */
1171void ftrace_set_notrace(unsigned char *buf, int len, int reset)
1172{
1173 ftrace_set_regex(buf, len, reset, 0);
77a2b37d
SR
1174}
1175
e309b41d 1176static int
41c52c0d 1177ftrace_regex_release(struct inode *inode, struct file *file, int enable)
5072c59f
SR
1178{
1179 struct seq_file *m = (struct seq_file *)file->private_data;
1180 struct ftrace_iterator *iter;
1181
41c52c0d 1182 mutex_lock(&ftrace_regex_lock);
5072c59f
SR
1183 if (file->f_mode & FMODE_READ) {
1184 iter = m->private;
1185
1186 seq_release(inode, file);
1187 } else
1188 iter = file->private_data;
1189
1190 if (iter->buffer_idx) {
1191 iter->filtered++;
1192 iter->buffer[iter->buffer_idx] = 0;
41c52c0d 1193 ftrace_match(iter->buffer, iter->buffer_idx, enable);
5072c59f
SR
1194 }
1195
1196 mutex_lock(&ftrace_sysctl_lock);
1197 mutex_lock(&ftraced_lock);
1198 if (iter->filtered && ftraced_suspend && ftrace_enabled)
1199 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1200 mutex_unlock(&ftraced_lock);
1201 mutex_unlock(&ftrace_sysctl_lock);
1202
1203 kfree(iter);
41c52c0d 1204 mutex_unlock(&ftrace_regex_lock);
5072c59f
SR
1205 return 0;
1206}
1207
41c52c0d
SR
1208static int
1209ftrace_filter_release(struct inode *inode, struct file *file)
1210{
1211 return ftrace_regex_release(inode, file, 1);
1212}
1213
1214static int
1215ftrace_notrace_release(struct inode *inode, struct file *file)
1216{
1217 return ftrace_regex_release(inode, file, 0);
1218}
1219
5072c59f
SR
1220static struct file_operations ftrace_avail_fops = {
1221 .open = ftrace_avail_open,
1222 .read = seq_read,
1223 .llseek = seq_lseek,
1224 .release = ftrace_avail_release,
1225};
1226
1227static struct file_operations ftrace_filter_fops = {
1228 .open = ftrace_filter_open,
41c52c0d 1229 .read = ftrace_regex_read,
5072c59f 1230 .write = ftrace_filter_write,
41c52c0d 1231 .llseek = ftrace_regex_lseek,
5072c59f
SR
1232 .release = ftrace_filter_release,
1233};
1234
41c52c0d
SR
1235static struct file_operations ftrace_notrace_fops = {
1236 .open = ftrace_notrace_open,
1237 .read = ftrace_regex_read,
1238 .write = ftrace_notrace_write,
1239 .llseek = ftrace_regex_lseek,
1240 .release = ftrace_notrace_release,
1241};
1242
e1c08bdd
SR
1243/**
1244 * ftrace_force_update - force an update to all recording ftrace functions
1245 *
1246 * The ftrace dynamic update daemon only wakes up once a second.
1247 * There may be cases where an update needs to be done immediately
1248 * for tests or internal kernel tracing to begin. This function
1249 * wakes the daemon to do an update and will not return until the
1250 * update is complete.
1251 */
1252int ftrace_force_update(void)
1253{
1254 unsigned long last_counter;
1255 DECLARE_WAITQUEUE(wait, current);
1256 int ret = 0;
1257
4eebcc81 1258 if (unlikely(ftrace_disabled))
e1c08bdd
SR
1259 return -ENODEV;
1260
1261 mutex_lock(&ftraced_lock);
1262 last_counter = ftraced_iteration_counter;
1263
1264 set_current_state(TASK_INTERRUPTIBLE);
1265 add_wait_queue(&ftraced_waiters, &wait);
1266
4eebcc81
SR
1267 if (unlikely(!ftraced_task)) {
1268 ret = -ENODEV;
1269 goto out;
1270 }
1271
e1c08bdd
SR
1272 do {
1273 mutex_unlock(&ftraced_lock);
1274 wake_up_process(ftraced_task);
1275 schedule();
1276 mutex_lock(&ftraced_lock);
1277 if (signal_pending(current)) {
1278 ret = -EINTR;
1279 break;
1280 }
1281 set_current_state(TASK_INTERRUPTIBLE);
1282 } while (last_counter == ftraced_iteration_counter);
1283
4eebcc81 1284 out:
e1c08bdd
SR
1285 mutex_unlock(&ftraced_lock);
1286 remove_wait_queue(&ftraced_waiters, &wait);
1287 set_current_state(TASK_RUNNING);
1288
1289 return ret;
1290}
1291
4eebcc81
SR
1292static void ftrace_force_shutdown(void)
1293{
1294 struct task_struct *task;
1295 int command = FTRACE_DISABLE_CALLS | FTRACE_UPDATE_TRACE_FUNC;
1296
1297 mutex_lock(&ftraced_lock);
1298 task = ftraced_task;
1299 ftraced_task = NULL;
1300 ftraced_suspend = -1;
1301 ftrace_run_update_code(command);
1302 mutex_unlock(&ftraced_lock);
1303
1304 if (task)
1305 kthread_stop(task);
1306}
1307
5072c59f
SR
1308static __init int ftrace_init_debugfs(void)
1309{
1310 struct dentry *d_tracer;
1311 struct dentry *entry;
1312
1313 d_tracer = tracing_init_dentry();
1314
1315 entry = debugfs_create_file("available_filter_functions", 0444,
1316 d_tracer, NULL, &ftrace_avail_fops);
1317 if (!entry)
1318 pr_warning("Could not create debugfs "
1319 "'available_filter_functions' entry\n");
1320
1321 entry = debugfs_create_file("set_ftrace_filter", 0644, d_tracer,
1322 NULL, &ftrace_filter_fops);
1323 if (!entry)
1324 pr_warning("Could not create debugfs "
1325 "'set_ftrace_filter' entry\n");
41c52c0d
SR
1326
1327 entry = debugfs_create_file("set_ftrace_notrace", 0644, d_tracer,
1328 NULL, &ftrace_notrace_fops);
1329 if (!entry)
1330 pr_warning("Could not create debugfs "
1331 "'set_ftrace_notrace' entry\n");
5072c59f
SR
1332 return 0;
1333}
1334
1335fs_initcall(ftrace_init_debugfs);
1336
e309b41d 1337static int __init ftrace_dynamic_init(void)
3d083395
SR
1338{
1339 struct task_struct *p;
d61f82d0 1340 unsigned long addr;
3d083395
SR
1341 int ret;
1342
d61f82d0 1343 addr = (unsigned long)ftrace_record_ip;
9ff9cdb2 1344
d61f82d0
SR
1345 stop_machine_run(ftrace_dyn_arch_init, &addr, NR_CPUS);
1346
1347 /* ftrace_dyn_arch_init places the return code in addr */
4eebcc81
SR
1348 if (addr) {
1349 ret = (int)addr;
1350 goto failed;
1351 }
d61f82d0 1352
3c1720f0 1353 ret = ftrace_dyn_table_alloc();
3d083395 1354 if (ret)
4eebcc81 1355 goto failed;
3d083395
SR
1356
1357 p = kthread_run(ftraced, NULL, "ftraced");
4eebcc81
SR
1358 if (IS_ERR(p)) {
1359 ret = -1;
1360 goto failed;
1361 }
3d083395 1362
d61f82d0 1363 last_ftrace_enabled = ftrace_enabled = 1;
e1c08bdd 1364 ftraced_task = p;
3d083395
SR
1365
1366 return 0;
4eebcc81
SR
1367
1368 failed:
1369 ftrace_disabled = 1;
1370 return ret;
3d083395
SR
1371}
1372
d61f82d0 1373core_initcall(ftrace_dynamic_init);
3d083395 1374#else
c7aafc54
IM
1375# define ftrace_startup() do { } while (0)
1376# define ftrace_shutdown() do { } while (0)
1377# define ftrace_startup_sysctl() do { } while (0)
1378# define ftrace_shutdown_sysctl() do { } while (0)
4eebcc81 1379# define ftrace_force_shutdown() do { } while (0)
3d083395
SR
1380#endif /* CONFIG_DYNAMIC_FTRACE */
1381
4eebcc81
SR
1382/**
1383 * ftrace_kill - totally shutdown ftrace
1384 *
1385 * This is a safety measure. If something was detected that seems
1386 * wrong, calling this function will keep ftrace from doing
1387 * any more modifications, and updates.
1388 * used when something went wrong.
1389 */
1390void ftrace_kill(void)
1391{
1392 mutex_lock(&ftrace_sysctl_lock);
1393 ftrace_disabled = 1;
1394 ftrace_enabled = 0;
1395
1396 clear_ftrace_function();
1397 mutex_unlock(&ftrace_sysctl_lock);
1398
1399 /* Try to totally disable ftrace */
1400 ftrace_force_shutdown();
1401}
1402
16444a8a 1403/**
3d083395
SR
1404 * register_ftrace_function - register a function for profiling
1405 * @ops - ops structure that holds the function for profiling.
16444a8a 1406 *
3d083395
SR
1407 * Register a function to be called by all functions in the
1408 * kernel.
1409 *
1410 * Note: @ops->func and all the functions it calls must be labeled
1411 * with "notrace", otherwise it will go into a
1412 * recursive loop.
16444a8a 1413 */
3d083395 1414int register_ftrace_function(struct ftrace_ops *ops)
16444a8a 1415{
b0fc494f
SR
1416 int ret;
1417
4eebcc81
SR
1418 if (unlikely(ftrace_disabled))
1419 return -1;
1420
b0fc494f 1421 mutex_lock(&ftrace_sysctl_lock);
b0fc494f 1422 ret = __register_ftrace_function(ops);
d61f82d0 1423 ftrace_startup();
b0fc494f
SR
1424 mutex_unlock(&ftrace_sysctl_lock);
1425
1426 return ret;
3d083395
SR
1427}
1428
1429/**
1430 * unregister_ftrace_function - unresgister a function for profiling.
1431 * @ops - ops structure that holds the function to unregister
1432 *
1433 * Unregister a function that was added to be called by ftrace profiling.
1434 */
1435int unregister_ftrace_function(struct ftrace_ops *ops)
1436{
1437 int ret;
1438
b0fc494f 1439 mutex_lock(&ftrace_sysctl_lock);
3d083395 1440 ret = __unregister_ftrace_function(ops);
d61f82d0 1441 ftrace_shutdown();
b0fc494f
SR
1442 mutex_unlock(&ftrace_sysctl_lock);
1443
1444 return ret;
1445}
1446
e309b41d 1447int
b0fc494f 1448ftrace_enable_sysctl(struct ctl_table *table, int write,
5072c59f 1449 struct file *file, void __user *buffer, size_t *lenp,
b0fc494f
SR
1450 loff_t *ppos)
1451{
1452 int ret;
1453
4eebcc81
SR
1454 if (unlikely(ftrace_disabled))
1455 return -ENODEV;
1456
b0fc494f
SR
1457 mutex_lock(&ftrace_sysctl_lock);
1458
5072c59f 1459 ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
b0fc494f
SR
1460
1461 if (ret || !write || (last_ftrace_enabled == ftrace_enabled))
1462 goto out;
1463
1464 last_ftrace_enabled = ftrace_enabled;
1465
1466 if (ftrace_enabled) {
1467
1468 ftrace_startup_sysctl();
1469
1470 /* we are starting ftrace again */
1471 if (ftrace_list != &ftrace_list_end) {
1472 if (ftrace_list->next == &ftrace_list_end)
1473 ftrace_trace_function = ftrace_list->func;
1474 else
1475 ftrace_trace_function = ftrace_list_func;
1476 }
1477
1478 } else {
1479 /* stopping ftrace calls (just send to ftrace_stub) */
1480 ftrace_trace_function = ftrace_stub;
1481
1482 ftrace_shutdown_sysctl();
1483 }
1484
1485 out:
1486 mutex_unlock(&ftrace_sysctl_lock);
3d083395 1487 return ret;
16444a8a 1488}
This page took 0.097622 seconds and 5 git commands to generate.