ftrace: Add variable ftrace_expected for archs to show expected code
[deliverable/linux.git] / kernel / trace / ftrace.c
CommitLineData
16444a8a
ACM
1/*
2 * Infrastructure for profiling code inserted by 'gcc -pg'.
3 *
4 * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5 * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6 *
7 * Originally ported from the -rt patch by:
8 * Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9 *
10 * Based on code in the latency_tracer, that is:
11 *
12 * Copyright (C) 2004-2006 Ingo Molnar
6d49e352 13 * Copyright (C) 2004 Nadia Yvette Chambers
16444a8a
ACM
14 */
15
3d083395
SR
16#include <linux/stop_machine.h>
17#include <linux/clocksource.h>
18#include <linux/kallsyms.h>
5072c59f 19#include <linux/seq_file.h>
4a2b8dda 20#include <linux/suspend.h>
8434dc93 21#include <linux/tracefs.h>
3d083395 22#include <linux/hardirq.h>
2d8b820b 23#include <linux/kthread.h>
5072c59f 24#include <linux/uaccess.h>
5855fead 25#include <linux/bsearch.h>
56d82e00 26#include <linux/module.h>
2d8b820b 27#include <linux/ftrace.h>
b0fc494f 28#include <linux/sysctl.h>
5a0e3ad6 29#include <linux/slab.h>
5072c59f 30#include <linux/ctype.h>
68950619 31#include <linux/sort.h>
3d083395 32#include <linux/list.h>
59df055f 33#include <linux/hash.h>
3f379b03 34#include <linux/rcupdate.h>
3d083395 35
ad8d75ff 36#include <trace/events/sched.h>
8aef2d28 37
2af15d6a 38#include <asm/setup.h>
395a59d0 39
0706f1c4 40#include "trace_output.h"
bac429f0 41#include "trace_stat.h"
16444a8a 42
6912896e 43#define FTRACE_WARN_ON(cond) \
0778d9ad
SR
44 ({ \
45 int ___r = cond; \
46 if (WARN_ON(___r)) \
6912896e 47 ftrace_kill(); \
0778d9ad
SR
48 ___r; \
49 })
6912896e
SR
50
51#define FTRACE_WARN_ON_ONCE(cond) \
0778d9ad
SR
52 ({ \
53 int ___r = cond; \
54 if (WARN_ON_ONCE(___r)) \
6912896e 55 ftrace_kill(); \
0778d9ad
SR
56 ___r; \
57 })
6912896e 58
8fc0c701
SR
59/* hash bits for specific function selection */
60#define FTRACE_HASH_BITS 7
61#define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
33dc9b12
SR
62#define FTRACE_HASH_DEFAULT_BITS 10
63#define FTRACE_HASH_MAX_BITS 12
8fc0c701 64
4104d326 65#define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_CONTROL)
e248491a 66
f04f24fb 67#ifdef CONFIG_DYNAMIC_FTRACE
33b7f99c
SRRH
68#define INIT_OPS_HASH(opsname) \
69 .func_hash = &opsname.local_hash, \
70 .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
5f151b24
SRRH
71#define ASSIGN_OPS_HASH(opsname, val) \
72 .func_hash = val, \
73 .local_hash.regex_lock = __MUTEX_INITIALIZER(opsname.local_hash.regex_lock),
f04f24fb 74#else
33b7f99c 75#define INIT_OPS_HASH(opsname)
5f151b24 76#define ASSIGN_OPS_HASH(opsname, val)
f04f24fb
MH
77#endif
78
2f5f6ad9
SR
79static struct ftrace_ops ftrace_list_end __read_mostly = {
80 .func = ftrace_stub,
395b97a3 81 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_STUB,
33b7f99c 82 INIT_OPS_HASH(ftrace_list_end)
2f5f6ad9
SR
83};
84
4eebcc81
SR
85/* ftrace_enabled is a method to turn ftrace on or off */
86int ftrace_enabled __read_mostly;
d61f82d0 87static int last_ftrace_enabled;
b0fc494f 88
2f5f6ad9
SR
89/* Current function tracing op */
90struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
405e1d83
SRRH
91/* What to set function_trace_op to */
92static struct ftrace_ops *set_function_trace_op;
60a7ecf4 93
756d17ee 94/* List for set_ftrace_pid's pids. */
95LIST_HEAD(ftrace_pids);
96struct ftrace_pid {
97 struct list_head list;
98 struct pid *pid;
99};
100
e3eea140
SRRH
101static bool ftrace_pids_enabled(void)
102{
103 return !list_empty(&ftrace_pids);
104}
105
106static void ftrace_update_trampoline(struct ftrace_ops *ops);
107
4eebcc81
SR
108/*
109 * ftrace_disabled is set when an anomaly is discovered.
110 * ftrace_disabled is much stronger than ftrace_enabled.
111 */
112static int ftrace_disabled __read_mostly;
113
52baf119 114static DEFINE_MUTEX(ftrace_lock);
b0fc494f 115
e248491a 116static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
b848914c 117static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
16444a8a 118ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
2b499381 119static struct ftrace_ops global_ops;
e248491a 120static struct ftrace_ops control_ops;
16444a8a 121
f1ff6348
SRRH
122static void ftrace_ops_recurs_func(unsigned long ip, unsigned long parent_ip,
123 struct ftrace_ops *op, struct pt_regs *regs);
124
2f5f6ad9
SR
125#if ARCH_SUPPORTS_FTRACE_OPS
126static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
a1e2e31d 127 struct ftrace_ops *op, struct pt_regs *regs);
2f5f6ad9
SR
128#else
129/* See comment below, where ftrace_ops_list_func is defined */
130static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
131#define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
132#endif
b848914c 133
0a016409
SR
134/*
135 * Traverse the ftrace_global_list, invoking all entries. The reason that we
1bb539ca 136 * can use rcu_dereference_raw_notrace() is that elements removed from this list
0a016409 137 * are simply leaked, so there is no need to interact with a grace-period
1bb539ca 138 * mechanism. The rcu_dereference_raw_notrace() calls are needed to handle
0a016409
SR
139 * concurrent insertions into the ftrace_global_list.
140 *
141 * Silly Alpha and silly pointer-speculation compiler optimizations!
142 */
143#define do_for_each_ftrace_op(op, list) \
1bb539ca 144 op = rcu_dereference_raw_notrace(list); \
0a016409
SR
145 do
146
147/*
148 * Optimized for just a single item in the list (as that is the normal case).
149 */
150#define while_for_each_ftrace_op(op) \
1bb539ca 151 while (likely(op = rcu_dereference_raw_notrace((op)->next)) && \
0a016409
SR
152 unlikely((op) != &ftrace_list_end))
153
f04f24fb
MH
154static inline void ftrace_ops_init(struct ftrace_ops *ops)
155{
156#ifdef CONFIG_DYNAMIC_FTRACE
157 if (!(ops->flags & FTRACE_OPS_FL_INITIALIZED)) {
33b7f99c
SRRH
158 mutex_init(&ops->local_hash.regex_lock);
159 ops->func_hash = &ops->local_hash;
f04f24fb
MH
160 ops->flags |= FTRACE_OPS_FL_INITIALIZED;
161 }
162#endif
163}
164
ea701f11
SR
165/**
166 * ftrace_nr_registered_ops - return number of ops registered
167 *
168 * Returns the number of ftrace_ops registered and tracing functions
169 */
170int ftrace_nr_registered_ops(void)
171{
172 struct ftrace_ops *ops;
173 int cnt = 0;
174
175 mutex_lock(&ftrace_lock);
176
177 for (ops = ftrace_ops_list;
178 ops != &ftrace_list_end; ops = ops->next)
179 cnt++;
180
181 mutex_unlock(&ftrace_lock);
182
183 return cnt;
184}
185
2f5f6ad9 186static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
a1e2e31d 187 struct ftrace_ops *op, struct pt_regs *regs)
df4fc315 188{
0ef8cde5 189 if (!test_tsk_trace_trace(current))
df4fc315
SR
190 return;
191
e3eea140 192 op->saved_func(ip, parent_ip, op, regs);
df4fc315
SR
193}
194
16444a8a 195/**
3d083395 196 * clear_ftrace_function - reset the ftrace function
16444a8a 197 *
3d083395
SR
198 * This NULLs the ftrace function and in essence stops
199 * tracing. There may be lag
16444a8a 200 */
3d083395 201void clear_ftrace_function(void)
16444a8a 202{
3d083395
SR
203 ftrace_trace_function = ftrace_stub;
204}
205
e248491a
JO
206static void control_ops_disable_all(struct ftrace_ops *ops)
207{
208 int cpu;
209
210 for_each_possible_cpu(cpu)
211 *per_cpu_ptr(ops->disabled, cpu) = 1;
212}
213
214static int control_ops_alloc(struct ftrace_ops *ops)
215{
216 int __percpu *disabled;
217
218 disabled = alloc_percpu(int);
219 if (!disabled)
220 return -ENOMEM;
221
222 ops->disabled = disabled;
223 control_ops_disable_all(ops);
224 return 0;
225}
226
405e1d83
SRRH
227static void ftrace_sync(struct work_struct *work)
228{
229 /*
230 * This function is just a stub to implement a hard force
231 * of synchronize_sched(). This requires synchronizing
232 * tasks even in userspace and idle.
233 *
234 * Yes, function tracing is rude.
235 */
236}
237
238static void ftrace_sync_ipi(void *data)
239{
240 /* Probably not needed, but do it anyway */
241 smp_rmb();
242}
243
23a8e844
SRRH
244#ifdef CONFIG_FUNCTION_GRAPH_TRACER
245static void update_function_graph_func(void);
55577204
SRRH
246
247/* Both enabled by default (can be cleared by function_graph tracer flags */
248static bool fgraph_sleep_time = true;
249static bool fgraph_graph_time = true;
250
23a8e844
SRRH
251#else
252static inline void update_function_graph_func(void) { }
253#endif
254
00ccbf2f
SRRH
255
256static ftrace_func_t ftrace_ops_get_list_func(struct ftrace_ops *ops)
257{
258 /*
259 * If this is a dynamic ops or we force list func,
260 * then it needs to call the list anyway.
261 */
262 if (ops->flags & FTRACE_OPS_FL_DYNAMIC || FTRACE_FORCE_LIST_FUNC)
263 return ftrace_ops_list_func;
264
265 return ftrace_ops_get_func(ops);
266}
267
2b499381
SR
268static void update_ftrace_function(void)
269{
270 ftrace_func_t func;
271
f7aad4e1
SRRH
272 /*
273 * Prepare the ftrace_ops that the arch callback will use.
274 * If there's only one ftrace_ops registered, the ftrace_ops_list
275 * will point to the ops we want.
276 */
277 set_function_trace_op = ftrace_ops_list;
278
279 /* If there's no ftrace_ops registered, just call the stub function */
280 if (ftrace_ops_list == &ftrace_list_end) {
281 func = ftrace_stub;
282
cdbe61bf
SR
283 /*
284 * If we are at the end of the list and this ops is
4740974a
SR
285 * recursion safe and not dynamic and the arch supports passing ops,
286 * then have the mcount trampoline call the function directly.
cdbe61bf 287 */
f7aad4e1 288 } else if (ftrace_ops_list->next == &ftrace_list_end) {
00ccbf2f 289 func = ftrace_ops_get_list_func(ftrace_ops_list);
f7aad4e1 290
2f5f6ad9
SR
291 } else {
292 /* Just use the default ftrace_ops */
405e1d83 293 set_function_trace_op = &ftrace_list_end;
b848914c 294 func = ftrace_ops_list_func;
2f5f6ad9 295 }
2b499381 296
5f8bf2d2
SRRH
297 update_function_graph_func();
298
405e1d83
SRRH
299 /* If there's no change, then do nothing more here */
300 if (ftrace_trace_function == func)
301 return;
302
303 /*
304 * If we are using the list function, it doesn't care
305 * about the function_trace_ops.
306 */
307 if (func == ftrace_ops_list_func) {
308 ftrace_trace_function = func;
309 /*
310 * Don't even bother setting function_trace_ops,
311 * it would be racy to do so anyway.
312 */
313 return;
314 }
315
316#ifndef CONFIG_DYNAMIC_FTRACE
317 /*
318 * For static tracing, we need to be a bit more careful.
319 * The function change takes affect immediately. Thus,
320 * we need to coorditate the setting of the function_trace_ops
321 * with the setting of the ftrace_trace_function.
322 *
323 * Set the function to the list ops, which will call the
324 * function we want, albeit indirectly, but it handles the
325 * ftrace_ops and doesn't depend on function_trace_op.
326 */
327 ftrace_trace_function = ftrace_ops_list_func;
328 /*
329 * Make sure all CPUs see this. Yes this is slow, but static
330 * tracing is slow and nasty to have enabled.
331 */
332 schedule_on_each_cpu(ftrace_sync);
333 /* Now all cpus are using the list ops. */
334 function_trace_op = set_function_trace_op;
335 /* Make sure the function_trace_op is visible on all CPUs */
336 smp_wmb();
337 /* Nasty way to force a rmb on all cpus */
338 smp_call_function(ftrace_sync_ipi, NULL, 1);
339 /* OK, we are all set to update the ftrace_trace_function now! */
340#endif /* !CONFIG_DYNAMIC_FTRACE */
341
491d0dcf 342 ftrace_trace_function = func;
491d0dcf
SR
343}
344
7eea4fce
JW
345int using_ftrace_ops_list_func(void)
346{
347 return ftrace_trace_function == ftrace_ops_list_func;
348}
349
2b499381 350static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
3d083395 351{
2b499381 352 ops->next = *list;
16444a8a 353 /*
b848914c 354 * We are entering ops into the list but another
16444a8a
ACM
355 * CPU might be walking that list. We need to make sure
356 * the ops->next pointer is valid before another CPU sees
b848914c 357 * the ops pointer included into the list.
16444a8a 358 */
2b499381 359 rcu_assign_pointer(*list, ops);
16444a8a
ACM
360}
361
2b499381 362static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
16444a8a 363{
16444a8a 364 struct ftrace_ops **p;
16444a8a
ACM
365
366 /*
3d083395
SR
367 * If we are removing the last function, then simply point
368 * to the ftrace_stub.
16444a8a 369 */
2b499381
SR
370 if (*list == ops && ops->next == &ftrace_list_end) {
371 *list = &ftrace_list_end;
e6ea44e9 372 return 0;
16444a8a
ACM
373 }
374
2b499381 375 for (p = list; *p != &ftrace_list_end; p = &(*p)->next)
16444a8a
ACM
376 if (*p == ops)
377 break;
378
e6ea44e9
SR
379 if (*p != ops)
380 return -1;
16444a8a
ACM
381
382 *p = (*p)->next;
2b499381
SR
383 return 0;
384}
16444a8a 385
e248491a
JO
386static void add_ftrace_list_ops(struct ftrace_ops **list,
387 struct ftrace_ops *main_ops,
388 struct ftrace_ops *ops)
389{
390 int first = *list == &ftrace_list_end;
391 add_ftrace_ops(list, ops);
392 if (first)
393 add_ftrace_ops(&ftrace_ops_list, main_ops);
394}
395
396static int remove_ftrace_list_ops(struct ftrace_ops **list,
397 struct ftrace_ops *main_ops,
398 struct ftrace_ops *ops)
399{
400 int ret = remove_ftrace_ops(list, ops);
401 if (!ret && *list == &ftrace_list_end)
402 ret = remove_ftrace_ops(&ftrace_ops_list, main_ops);
403 return ret;
404}
405
f3bea491
SRRH
406static void ftrace_update_trampoline(struct ftrace_ops *ops);
407
2b499381
SR
408static int __register_ftrace_function(struct ftrace_ops *ops)
409{
591dffda
SRRH
410 if (ops->flags & FTRACE_OPS_FL_DELETED)
411 return -EINVAL;
412
b848914c
SR
413 if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
414 return -EBUSY;
415
06aeaaea 416#ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS
08f6fba5
SR
417 /*
418 * If the ftrace_ops specifies SAVE_REGS, then it only can be used
419 * if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set.
420 * Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant.
421 */
422 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS &&
423 !(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED))
424 return -EINVAL;
425
426 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)
427 ops->flags |= FTRACE_OPS_FL_SAVE_REGS;
428#endif
429
cdbe61bf
SR
430 if (!core_kernel_data((unsigned long)ops))
431 ops->flags |= FTRACE_OPS_FL_DYNAMIC;
432
4104d326 433 if (ops->flags & FTRACE_OPS_FL_CONTROL) {
e248491a
JO
434 if (control_ops_alloc(ops))
435 return -ENOMEM;
436 add_ftrace_list_ops(&ftrace_control_list, &control_ops, ops);
fe578ba3
SRRH
437 /* The control_ops needs the trampoline update */
438 ops = &control_ops;
b848914c
SR
439 } else
440 add_ftrace_ops(&ftrace_ops_list, ops);
441
e3eea140
SRRH
442 /* Always save the function, and reset at unregistering */
443 ops->saved_func = ops->func;
444
445 if (ops->flags & FTRACE_OPS_FL_PID && ftrace_pids_enabled())
446 ops->func = ftrace_pid_func;
447
f3bea491
SRRH
448 ftrace_update_trampoline(ops);
449
2b499381
SR
450 if (ftrace_enabled)
451 update_ftrace_function();
452
453 return 0;
454}
455
456static int __unregister_ftrace_function(struct ftrace_ops *ops)
457{
458 int ret;
459
b848914c
SR
460 if (WARN_ON(!(ops->flags & FTRACE_OPS_FL_ENABLED)))
461 return -EBUSY;
462
4104d326 463 if (ops->flags & FTRACE_OPS_FL_CONTROL) {
e248491a
JO
464 ret = remove_ftrace_list_ops(&ftrace_control_list,
465 &control_ops, ops);
b848914c
SR
466 } else
467 ret = remove_ftrace_ops(&ftrace_ops_list, ops);
468
2b499381
SR
469 if (ret < 0)
470 return ret;
b848914c 471
491d0dcf
SR
472 if (ftrace_enabled)
473 update_ftrace_function();
16444a8a 474
e3eea140
SRRH
475 ops->func = ops->saved_func;
476
e6ea44e9 477 return 0;
3d083395
SR
478}
479
df4fc315
SR
480static void ftrace_update_pid_func(void)
481{
e3eea140
SRRH
482 bool enabled = ftrace_pids_enabled();
483 struct ftrace_ops *op;
484
491d0dcf 485 /* Only do something if we are tracing something */
df4fc315 486 if (ftrace_trace_function == ftrace_stub)
10dd3ebe 487 return;
df4fc315 488
e3eea140
SRRH
489 do_for_each_ftrace_op(op, ftrace_ops_list) {
490 if (op->flags & FTRACE_OPS_FL_PID) {
491 op->func = enabled ? ftrace_pid_func :
492 op->saved_func;
493 ftrace_update_trampoline(op);
494 }
495 } while_for_each_ftrace_op(op);
496
491d0dcf 497 update_ftrace_function();
df4fc315
SR
498}
499
493762fc
SR
500#ifdef CONFIG_FUNCTION_PROFILER
501struct ftrace_profile {
502 struct hlist_node node;
503 unsigned long ip;
504 unsigned long counter;
0706f1c4
SR
505#ifdef CONFIG_FUNCTION_GRAPH_TRACER
506 unsigned long long time;
e330b3bc 507 unsigned long long time_squared;
0706f1c4 508#endif
8fc0c701
SR
509};
510
493762fc
SR
511struct ftrace_profile_page {
512 struct ftrace_profile_page *next;
513 unsigned long index;
514 struct ftrace_profile records[];
d61f82d0
SR
515};
516
cafb168a
SR
517struct ftrace_profile_stat {
518 atomic_t disabled;
519 struct hlist_head *hash;
520 struct ftrace_profile_page *pages;
521 struct ftrace_profile_page *start;
522 struct tracer_stat stat;
523};
524
493762fc
SR
525#define PROFILE_RECORDS_SIZE \
526 (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
5072c59f 527
493762fc
SR
528#define PROFILES_PER_PAGE \
529 (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
3d083395 530
fb9fb015
SR
531static int ftrace_profile_enabled __read_mostly;
532
533/* ftrace_profile_lock - synchronize the enable and disable of the profiler */
bac429f0
SR
534static DEFINE_MUTEX(ftrace_profile_lock);
535
cafb168a 536static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
493762fc 537
20079ebe
NK
538#define FTRACE_PROFILE_HASH_BITS 10
539#define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS)
493762fc 540
bac429f0
SR
541static void *
542function_stat_next(void *v, int idx)
543{
493762fc
SR
544 struct ftrace_profile *rec = v;
545 struct ftrace_profile_page *pg;
bac429f0 546
493762fc 547 pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
bac429f0
SR
548
549 again:
0296e425
LZ
550 if (idx != 0)
551 rec++;
552
bac429f0
SR
553 if ((void *)rec >= (void *)&pg->records[pg->index]) {
554 pg = pg->next;
555 if (!pg)
556 return NULL;
557 rec = &pg->records[0];
493762fc
SR
558 if (!rec->counter)
559 goto again;
bac429f0
SR
560 }
561
bac429f0
SR
562 return rec;
563}
564
565static void *function_stat_start(struct tracer_stat *trace)
566{
cafb168a
SR
567 struct ftrace_profile_stat *stat =
568 container_of(trace, struct ftrace_profile_stat, stat);
569
570 if (!stat || !stat->start)
571 return NULL;
572
573 return function_stat_next(&stat->start->records[0], 0);
bac429f0
SR
574}
575
0706f1c4
SR
576#ifdef CONFIG_FUNCTION_GRAPH_TRACER
577/* function graph compares on total time */
578static int function_stat_cmp(void *p1, void *p2)
579{
580 struct ftrace_profile *a = p1;
581 struct ftrace_profile *b = p2;
582
583 if (a->time < b->time)
584 return -1;
585 if (a->time > b->time)
586 return 1;
587 else
588 return 0;
589}
590#else
591/* not function graph compares against hits */
bac429f0
SR
592static int function_stat_cmp(void *p1, void *p2)
593{
493762fc
SR
594 struct ftrace_profile *a = p1;
595 struct ftrace_profile *b = p2;
bac429f0
SR
596
597 if (a->counter < b->counter)
598 return -1;
599 if (a->counter > b->counter)
600 return 1;
601 else
602 return 0;
603}
0706f1c4 604#endif
bac429f0
SR
605
606static int function_stat_headers(struct seq_file *m)
607{
0706f1c4 608#ifdef CONFIG_FUNCTION_GRAPH_TRACER
fa6f0cc7
RV
609 seq_puts(m, " Function "
610 "Hit Time Avg s^2\n"
611 " -------- "
612 "--- ---- --- ---\n");
0706f1c4 613#else
fa6f0cc7
RV
614 seq_puts(m, " Function Hit\n"
615 " -------- ---\n");
0706f1c4 616#endif
bac429f0
SR
617 return 0;
618}
619
620static int function_stat_show(struct seq_file *m, void *v)
621{
493762fc 622 struct ftrace_profile *rec = v;
bac429f0 623 char str[KSYM_SYMBOL_LEN];
3aaba20f 624 int ret = 0;
0706f1c4 625#ifdef CONFIG_FUNCTION_GRAPH_TRACER
34886c8b
SR
626 static struct trace_seq s;
627 unsigned long long avg;
e330b3bc 628 unsigned long long stddev;
0706f1c4 629#endif
3aaba20f
LZ
630 mutex_lock(&ftrace_profile_lock);
631
632 /* we raced with function_profile_reset() */
633 if (unlikely(rec->counter == 0)) {
634 ret = -EBUSY;
635 goto out;
636 }
bac429f0 637
8e436ca0
UT
638#ifdef CONFIG_FUNCTION_GRAPH_TRACER
639 avg = rec->time;
640 do_div(avg, rec->counter);
641 if (tracing_thresh && (avg < tracing_thresh))
642 goto out;
643#endif
644
bac429f0 645 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
0706f1c4
SR
646 seq_printf(m, " %-30.30s %10lu", str, rec->counter);
647
648#ifdef CONFIG_FUNCTION_GRAPH_TRACER
fa6f0cc7 649 seq_puts(m, " ");
34886c8b 650
e330b3bc
CD
651 /* Sample standard deviation (s^2) */
652 if (rec->counter <= 1)
653 stddev = 0;
654 else {
52d85d76
JL
655 /*
656 * Apply Welford's method:
657 * s^2 = 1 / (n * (n-1)) * (n * \Sum (x_i)^2 - (\Sum x_i)^2)
658 */
659 stddev = rec->counter * rec->time_squared -
660 rec->time * rec->time;
661
e330b3bc
CD
662 /*
663 * Divide only 1000 for ns^2 -> us^2 conversion.
664 * trace_print_graph_duration will divide 1000 again.
665 */
52d85d76 666 do_div(stddev, rec->counter * (rec->counter - 1) * 1000);
e330b3bc
CD
667 }
668
34886c8b
SR
669 trace_seq_init(&s);
670 trace_print_graph_duration(rec->time, &s);
671 trace_seq_puts(&s, " ");
672 trace_print_graph_duration(avg, &s);
e330b3bc
CD
673 trace_seq_puts(&s, " ");
674 trace_print_graph_duration(stddev, &s);
0706f1c4 675 trace_print_seq(m, &s);
0706f1c4
SR
676#endif
677 seq_putc(m, '\n');
3aaba20f
LZ
678out:
679 mutex_unlock(&ftrace_profile_lock);
bac429f0 680
3aaba20f 681 return ret;
bac429f0
SR
682}
683
cafb168a 684static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
bac429f0 685{
493762fc 686 struct ftrace_profile_page *pg;
bac429f0 687
cafb168a 688 pg = stat->pages = stat->start;
bac429f0 689
493762fc
SR
690 while (pg) {
691 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
692 pg->index = 0;
693 pg = pg->next;
bac429f0
SR
694 }
695
cafb168a 696 memset(stat->hash, 0,
493762fc
SR
697 FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
698}
bac429f0 699
cafb168a 700int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
493762fc
SR
701{
702 struct ftrace_profile_page *pg;
318e0a73
SR
703 int functions;
704 int pages;
493762fc 705 int i;
bac429f0 706
493762fc 707 /* If we already allocated, do nothing */
cafb168a 708 if (stat->pages)
493762fc 709 return 0;
bac429f0 710
cafb168a
SR
711 stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
712 if (!stat->pages)
493762fc 713 return -ENOMEM;
bac429f0 714
318e0a73
SR
715#ifdef CONFIG_DYNAMIC_FTRACE
716 functions = ftrace_update_tot_cnt;
717#else
718 /*
719 * We do not know the number of functions that exist because
720 * dynamic tracing is what counts them. With past experience
721 * we have around 20K functions. That should be more than enough.
722 * It is highly unlikely we will execute every function in
723 * the kernel.
724 */
725 functions = 20000;
726#endif
727
cafb168a 728 pg = stat->start = stat->pages;
bac429f0 729
318e0a73
SR
730 pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
731
39e30cd1 732 for (i = 1; i < pages; i++) {
493762fc 733 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
493762fc 734 if (!pg->next)
318e0a73 735 goto out_free;
493762fc
SR
736 pg = pg->next;
737 }
738
739 return 0;
318e0a73
SR
740
741 out_free:
742 pg = stat->start;
743 while (pg) {
744 unsigned long tmp = (unsigned long)pg;
745
746 pg = pg->next;
747 free_page(tmp);
748 }
749
318e0a73
SR
750 stat->pages = NULL;
751 stat->start = NULL;
752
753 return -ENOMEM;
bac429f0
SR
754}
755
cafb168a 756static int ftrace_profile_init_cpu(int cpu)
bac429f0 757{
cafb168a 758 struct ftrace_profile_stat *stat;
493762fc 759 int size;
bac429f0 760
cafb168a
SR
761 stat = &per_cpu(ftrace_profile_stats, cpu);
762
763 if (stat->hash) {
493762fc 764 /* If the profile is already created, simply reset it */
cafb168a 765 ftrace_profile_reset(stat);
493762fc
SR
766 return 0;
767 }
bac429f0 768
493762fc
SR
769 /*
770 * We are profiling all functions, but usually only a few thousand
771 * functions are hit. We'll make a hash of 1024 items.
772 */
773 size = FTRACE_PROFILE_HASH_SIZE;
bac429f0 774
cafb168a 775 stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
493762fc 776
cafb168a 777 if (!stat->hash)
493762fc
SR
778 return -ENOMEM;
779
318e0a73 780 /* Preallocate the function profiling pages */
cafb168a
SR
781 if (ftrace_profile_pages_init(stat) < 0) {
782 kfree(stat->hash);
783 stat->hash = NULL;
493762fc
SR
784 return -ENOMEM;
785 }
786
787 return 0;
bac429f0
SR
788}
789
cafb168a
SR
790static int ftrace_profile_init(void)
791{
792 int cpu;
793 int ret = 0;
794
c4602c1c 795 for_each_possible_cpu(cpu) {
cafb168a
SR
796 ret = ftrace_profile_init_cpu(cpu);
797 if (ret)
798 break;
799 }
800
801 return ret;
802}
803
493762fc 804/* interrupts must be disabled */
cafb168a
SR
805static struct ftrace_profile *
806ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
bac429f0 807{
493762fc 808 struct ftrace_profile *rec;
bac429f0 809 struct hlist_head *hhd;
bac429f0
SR
810 unsigned long key;
811
20079ebe 812 key = hash_long(ip, FTRACE_PROFILE_HASH_BITS);
cafb168a 813 hhd = &stat->hash[key];
bac429f0
SR
814
815 if (hlist_empty(hhd))
816 return NULL;
817
1bb539ca 818 hlist_for_each_entry_rcu_notrace(rec, hhd, node) {
bac429f0 819 if (rec->ip == ip)
493762fc
SR
820 return rec;
821 }
822
823 return NULL;
824}
825
cafb168a
SR
826static void ftrace_add_profile(struct ftrace_profile_stat *stat,
827 struct ftrace_profile *rec)
493762fc
SR
828{
829 unsigned long key;
830
20079ebe 831 key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS);
cafb168a 832 hlist_add_head_rcu(&rec->node, &stat->hash[key]);
493762fc
SR
833}
834
318e0a73
SR
835/*
836 * The memory is already allocated, this simply finds a new record to use.
837 */
493762fc 838static struct ftrace_profile *
318e0a73 839ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
493762fc
SR
840{
841 struct ftrace_profile *rec = NULL;
842
318e0a73 843 /* prevent recursion (from NMIs) */
cafb168a 844 if (atomic_inc_return(&stat->disabled) != 1)
493762fc
SR
845 goto out;
846
493762fc 847 /*
318e0a73
SR
848 * Try to find the function again since an NMI
849 * could have added it
493762fc 850 */
cafb168a 851 rec = ftrace_find_profiled_func(stat, ip);
493762fc 852 if (rec)
cafb168a 853 goto out;
493762fc 854
cafb168a
SR
855 if (stat->pages->index == PROFILES_PER_PAGE) {
856 if (!stat->pages->next)
857 goto out;
858 stat->pages = stat->pages->next;
bac429f0 859 }
493762fc 860
cafb168a 861 rec = &stat->pages->records[stat->pages->index++];
493762fc 862 rec->ip = ip;
cafb168a 863 ftrace_add_profile(stat, rec);
493762fc 864
bac429f0 865 out:
cafb168a 866 atomic_dec(&stat->disabled);
bac429f0
SR
867
868 return rec;
869}
870
871static void
2f5f6ad9 872function_profile_call(unsigned long ip, unsigned long parent_ip,
a1e2e31d 873 struct ftrace_ops *ops, struct pt_regs *regs)
bac429f0 874{
cafb168a 875 struct ftrace_profile_stat *stat;
493762fc 876 struct ftrace_profile *rec;
bac429f0
SR
877 unsigned long flags;
878
879 if (!ftrace_profile_enabled)
880 return;
881
882 local_irq_save(flags);
cafb168a 883
bdffd893 884 stat = this_cpu_ptr(&ftrace_profile_stats);
0f6ce3de 885 if (!stat->hash || !ftrace_profile_enabled)
cafb168a
SR
886 goto out;
887
888 rec = ftrace_find_profiled_func(stat, ip);
493762fc 889 if (!rec) {
318e0a73 890 rec = ftrace_profile_alloc(stat, ip);
493762fc
SR
891 if (!rec)
892 goto out;
893 }
bac429f0
SR
894
895 rec->counter++;
896 out:
897 local_irq_restore(flags);
898}
899
0706f1c4
SR
900#ifdef CONFIG_FUNCTION_GRAPH_TRACER
901static int profile_graph_entry(struct ftrace_graph_ent *trace)
902{
a1e2e31d 903 function_profile_call(trace->func, 0, NULL, NULL);
0706f1c4
SR
904 return 1;
905}
906
907static void profile_graph_return(struct ftrace_graph_ret *trace)
908{
cafb168a 909 struct ftrace_profile_stat *stat;
a2a16d6a 910 unsigned long long calltime;
0706f1c4 911 struct ftrace_profile *rec;
cafb168a 912 unsigned long flags;
0706f1c4
SR
913
914 local_irq_save(flags);
bdffd893 915 stat = this_cpu_ptr(&ftrace_profile_stats);
0f6ce3de 916 if (!stat->hash || !ftrace_profile_enabled)
cafb168a
SR
917 goto out;
918
37e44bc5
SR
919 /* If the calltime was zero'd ignore it */
920 if (!trace->calltime)
921 goto out;
922
a2a16d6a
SR
923 calltime = trace->rettime - trace->calltime;
924
55577204 925 if (!fgraph_graph_time) {
a2a16d6a
SR
926 int index;
927
928 index = trace->depth;
929
930 /* Append this call time to the parent time to subtract */
931 if (index)
932 current->ret_stack[index - 1].subtime += calltime;
933
934 if (current->ret_stack[index].subtime < calltime)
935 calltime -= current->ret_stack[index].subtime;
936 else
937 calltime = 0;
938 }
939
cafb168a 940 rec = ftrace_find_profiled_func(stat, trace->func);
e330b3bc 941 if (rec) {
a2a16d6a 942 rec->time += calltime;
e330b3bc
CD
943 rec->time_squared += calltime * calltime;
944 }
a2a16d6a 945
cafb168a 946 out:
0706f1c4
SR
947 local_irq_restore(flags);
948}
949
950static int register_ftrace_profiler(void)
951{
952 return register_ftrace_graph(&profile_graph_return,
953 &profile_graph_entry);
954}
955
956static void unregister_ftrace_profiler(void)
957{
958 unregister_ftrace_graph();
959}
960#else
bd38c0e6 961static struct ftrace_ops ftrace_profile_ops __read_mostly = {
fb9fb015 962 .func = function_profile_call,
f04f24fb 963 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
33b7f99c 964 INIT_OPS_HASH(ftrace_profile_ops)
bac429f0
SR
965};
966
0706f1c4
SR
967static int register_ftrace_profiler(void)
968{
969 return register_ftrace_function(&ftrace_profile_ops);
970}
971
972static void unregister_ftrace_profiler(void)
973{
974 unregister_ftrace_function(&ftrace_profile_ops);
975}
976#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
977
bac429f0
SR
978static ssize_t
979ftrace_profile_write(struct file *filp, const char __user *ubuf,
980 size_t cnt, loff_t *ppos)
981{
982 unsigned long val;
bac429f0
SR
983 int ret;
984
22fe9b54
PH
985 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
986 if (ret)
bac429f0
SR
987 return ret;
988
989 val = !!val;
990
991 mutex_lock(&ftrace_profile_lock);
992 if (ftrace_profile_enabled ^ val) {
993 if (val) {
493762fc
SR
994 ret = ftrace_profile_init();
995 if (ret < 0) {
996 cnt = ret;
997 goto out;
998 }
999
0706f1c4
SR
1000 ret = register_ftrace_profiler();
1001 if (ret < 0) {
1002 cnt = ret;
1003 goto out;
1004 }
bac429f0
SR
1005 ftrace_profile_enabled = 1;
1006 } else {
1007 ftrace_profile_enabled = 0;
0f6ce3de
SR
1008 /*
1009 * unregister_ftrace_profiler calls stop_machine
1010 * so this acts like an synchronize_sched.
1011 */
0706f1c4 1012 unregister_ftrace_profiler();
bac429f0
SR
1013 }
1014 }
493762fc 1015 out:
bac429f0
SR
1016 mutex_unlock(&ftrace_profile_lock);
1017
cf8517cf 1018 *ppos += cnt;
bac429f0
SR
1019
1020 return cnt;
1021}
1022
493762fc
SR
1023static ssize_t
1024ftrace_profile_read(struct file *filp, char __user *ubuf,
1025 size_t cnt, loff_t *ppos)
1026{
fb9fb015 1027 char buf[64]; /* big enough to hold a number */
493762fc
SR
1028 int r;
1029
1030 r = sprintf(buf, "%u\n", ftrace_profile_enabled);
1031 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
1032}
1033
bac429f0
SR
1034static const struct file_operations ftrace_profile_fops = {
1035 .open = tracing_open_generic,
1036 .read = ftrace_profile_read,
1037 .write = ftrace_profile_write,
6038f373 1038 .llseek = default_llseek,
bac429f0
SR
1039};
1040
cafb168a
SR
1041/* used to initialize the real stat files */
1042static struct tracer_stat function_stats __initdata = {
fb9fb015
SR
1043 .name = "functions",
1044 .stat_start = function_stat_start,
1045 .stat_next = function_stat_next,
1046 .stat_cmp = function_stat_cmp,
1047 .stat_headers = function_stat_headers,
1048 .stat_show = function_stat_show
cafb168a
SR
1049};
1050
8434dc93 1051static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
bac429f0 1052{
cafb168a 1053 struct ftrace_profile_stat *stat;
bac429f0 1054 struct dentry *entry;
cafb168a 1055 char *name;
bac429f0 1056 int ret;
cafb168a
SR
1057 int cpu;
1058
1059 for_each_possible_cpu(cpu) {
1060 stat = &per_cpu(ftrace_profile_stats, cpu);
1061
1062 /* allocate enough for function name + cpu number */
1063 name = kmalloc(32, GFP_KERNEL);
1064 if (!name) {
1065 /*
1066 * The files created are permanent, if something happens
1067 * we still do not free memory.
1068 */
cafb168a
SR
1069 WARN(1,
1070 "Could not allocate stat file for cpu %d\n",
1071 cpu);
1072 return;
1073 }
1074 stat->stat = function_stats;
1075 snprintf(name, 32, "function%d", cpu);
1076 stat->stat.name = name;
1077 ret = register_stat_tracer(&stat->stat);
1078 if (ret) {
1079 WARN(1,
1080 "Could not register function stat for cpu %d\n",
1081 cpu);
1082 kfree(name);
1083 return;
1084 }
bac429f0
SR
1085 }
1086
8434dc93 1087 entry = tracefs_create_file("function_profile_enabled", 0644,
bac429f0
SR
1088 d_tracer, NULL, &ftrace_profile_fops);
1089 if (!entry)
8434dc93 1090 pr_warning("Could not create tracefs "
bac429f0
SR
1091 "'function_profile_enabled' entry\n");
1092}
1093
bac429f0 1094#else /* CONFIG_FUNCTION_PROFILER */
8434dc93 1095static __init void ftrace_profile_tracefs(struct dentry *d_tracer)
bac429f0
SR
1096{
1097}
bac429f0
SR
1098#endif /* CONFIG_FUNCTION_PROFILER */
1099
493762fc
SR
1100static struct pid * const ftrace_swapper_pid = &init_struct_pid;
1101
1619dc3f
PA
1102#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1103static int ftrace_graph_active;
1104#else
1105# define ftrace_graph_active 0
1106#endif
1107
493762fc
SR
1108#ifdef CONFIG_DYNAMIC_FTRACE
1109
79922b80
SRRH
1110static struct ftrace_ops *removed_ops;
1111
e1effa01
SRRH
1112/*
1113 * Set when doing a global update, like enabling all recs or disabling them.
1114 * It is not set when just updating a single ftrace_ops.
1115 */
1116static bool update_all_ops;
1117
493762fc
SR
1118#ifndef CONFIG_FTRACE_MCOUNT_RECORD
1119# error Dynamic ftrace depends on MCOUNT_RECORD
1120#endif
1121
1122static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
1123
1124struct ftrace_func_probe {
1125 struct hlist_node node;
1126 struct ftrace_probe_ops *ops;
1127 unsigned long flags;
1128 unsigned long ip;
1129 void *data;
7818b388 1130 struct list_head free_list;
493762fc
SR
1131};
1132
b448c4e3
SR
1133struct ftrace_func_entry {
1134 struct hlist_node hlist;
1135 unsigned long ip;
1136};
1137
1138struct ftrace_hash {
1139 unsigned long size_bits;
1140 struct hlist_head *buckets;
1141 unsigned long count;
07fd5515 1142 struct rcu_head rcu;
b448c4e3
SR
1143};
1144
33dc9b12
SR
1145/*
1146 * We make these constant because no one should touch them,
1147 * but they are used as the default "empty hash", to avoid allocating
1148 * it all the time. These are in a read only section such that if
1149 * anyone does try to modify it, it will cause an exception.
1150 */
1151static const struct hlist_head empty_buckets[1];
1152static const struct ftrace_hash empty_hash = {
1153 .buckets = (struct hlist_head *)empty_buckets,
1cf41dd7 1154};
33dc9b12 1155#define EMPTY_HASH ((struct ftrace_hash *)&empty_hash)
493762fc 1156
2b499381 1157static struct ftrace_ops global_ops = {
33b7f99c
SRRH
1158 .func = ftrace_stub,
1159 .local_hash.notrace_hash = EMPTY_HASH,
1160 .local_hash.filter_hash = EMPTY_HASH,
1161 INIT_OPS_HASH(global_ops)
1162 .flags = FTRACE_OPS_FL_RECURSION_SAFE |
e3eea140
SRRH
1163 FTRACE_OPS_FL_INITIALIZED |
1164 FTRACE_OPS_FL_PID,
f45948e8
SR
1165};
1166
aec0be2d
SRRH
1167/*
1168 * This is used by __kernel_text_address() to return true if the
0af26492 1169 * address is on a dynamically allocated trampoline that would
aec0be2d
SRRH
1170 * not return true for either core_kernel_text() or
1171 * is_module_text_address().
1172 */
1173bool is_ftrace_trampoline(unsigned long addr)
1174{
1175 struct ftrace_ops *op;
1176 bool ret = false;
1177
1178 /*
1179 * Some of the ops may be dynamically allocated,
1180 * they are freed after a synchronize_sched().
1181 */
1182 preempt_disable_notrace();
1183
1184 do_for_each_ftrace_op(op, ftrace_ops_list) {
1185 /*
1186 * This is to check for dynamically allocated trampolines.
1187 * Trampolines that are in kernel text will have
1188 * core_kernel_text() return true.
1189 */
1190 if (op->trampoline && op->trampoline_size)
1191 if (addr >= op->trampoline &&
1192 addr < op->trampoline + op->trampoline_size) {
1193 ret = true;
1194 goto out;
1195 }
1196 } while_for_each_ftrace_op(op);
1197
1198 out:
1199 preempt_enable_notrace();
1200
1201 return ret;
1202}
1203
493762fc
SR
1204struct ftrace_page {
1205 struct ftrace_page *next;
a7900875 1206 struct dyn_ftrace *records;
493762fc 1207 int index;
a7900875 1208 int size;
493762fc
SR
1209};
1210
a7900875
SR
1211#define ENTRY_SIZE sizeof(struct dyn_ftrace)
1212#define ENTRIES_PER_PAGE (PAGE_SIZE / ENTRY_SIZE)
493762fc
SR
1213
1214/* estimate from running different kernels */
1215#define NR_TO_INIT 10000
1216
1217static struct ftrace_page *ftrace_pages_start;
1218static struct ftrace_page *ftrace_pages;
1219
68f40969 1220static bool __always_inline ftrace_hash_empty(struct ftrace_hash *hash)
06a51d93
SR
1221{
1222 return !hash || !hash->count;
1223}
1224
b448c4e3
SR
1225static struct ftrace_func_entry *
1226ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
1227{
1228 unsigned long key;
1229 struct ftrace_func_entry *entry;
1230 struct hlist_head *hhd;
b448c4e3 1231
06a51d93 1232 if (ftrace_hash_empty(hash))
b448c4e3
SR
1233 return NULL;
1234
1235 if (hash->size_bits > 0)
1236 key = hash_long(ip, hash->size_bits);
1237 else
1238 key = 0;
1239
1240 hhd = &hash->buckets[key];
1241
1bb539ca 1242 hlist_for_each_entry_rcu_notrace(entry, hhd, hlist) {
b448c4e3
SR
1243 if (entry->ip == ip)
1244 return entry;
1245 }
1246 return NULL;
1247}
1248
33dc9b12
SR
1249static void __add_hash_entry(struct ftrace_hash *hash,
1250 struct ftrace_func_entry *entry)
b448c4e3 1251{
b448c4e3
SR
1252 struct hlist_head *hhd;
1253 unsigned long key;
1254
b448c4e3 1255 if (hash->size_bits)
33dc9b12 1256 key = hash_long(entry->ip, hash->size_bits);
b448c4e3
SR
1257 else
1258 key = 0;
1259
b448c4e3
SR
1260 hhd = &hash->buckets[key];
1261 hlist_add_head(&entry->hlist, hhd);
1262 hash->count++;
33dc9b12
SR
1263}
1264
1265static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
1266{
1267 struct ftrace_func_entry *entry;
1268
1269 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
1270 if (!entry)
1271 return -ENOMEM;
1272
1273 entry->ip = ip;
1274 __add_hash_entry(hash, entry);
b448c4e3
SR
1275
1276 return 0;
1277}
1278
1279static void
33dc9b12 1280free_hash_entry(struct ftrace_hash *hash,
b448c4e3
SR
1281 struct ftrace_func_entry *entry)
1282{
1283 hlist_del(&entry->hlist);
1284 kfree(entry);
1285 hash->count--;
1286}
1287
33dc9b12
SR
1288static void
1289remove_hash_entry(struct ftrace_hash *hash,
1290 struct ftrace_func_entry *entry)
1291{
1292 hlist_del(&entry->hlist);
1293 hash->count--;
1294}
1295
b448c4e3
SR
1296static void ftrace_hash_clear(struct ftrace_hash *hash)
1297{
1298 struct hlist_head *hhd;
b67bfe0d 1299 struct hlist_node *tn;
b448c4e3
SR
1300 struct ftrace_func_entry *entry;
1301 int size = 1 << hash->size_bits;
1302 int i;
1303
33dc9b12
SR
1304 if (!hash->count)
1305 return;
1306
b448c4e3
SR
1307 for (i = 0; i < size; i++) {
1308 hhd = &hash->buckets[i];
b67bfe0d 1309 hlist_for_each_entry_safe(entry, tn, hhd, hlist)
33dc9b12 1310 free_hash_entry(hash, entry);
b448c4e3
SR
1311 }
1312 FTRACE_WARN_ON(hash->count);
1313}
1314
33dc9b12
SR
1315static void free_ftrace_hash(struct ftrace_hash *hash)
1316{
1317 if (!hash || hash == EMPTY_HASH)
1318 return;
1319 ftrace_hash_clear(hash);
1320 kfree(hash->buckets);
1321 kfree(hash);
1322}
1323
07fd5515
SR
1324static void __free_ftrace_hash_rcu(struct rcu_head *rcu)
1325{
1326 struct ftrace_hash *hash;
1327
1328 hash = container_of(rcu, struct ftrace_hash, rcu);
1329 free_ftrace_hash(hash);
1330}
1331
1332static void free_ftrace_hash_rcu(struct ftrace_hash *hash)
1333{
1334 if (!hash || hash == EMPTY_HASH)
1335 return;
1336 call_rcu_sched(&hash->rcu, __free_ftrace_hash_rcu);
1337}
1338
5500fa51
JO
1339void ftrace_free_filter(struct ftrace_ops *ops)
1340{
f04f24fb 1341 ftrace_ops_init(ops);
33b7f99c
SRRH
1342 free_ftrace_hash(ops->func_hash->filter_hash);
1343 free_ftrace_hash(ops->func_hash->notrace_hash);
5500fa51
JO
1344}
1345
33dc9b12
SR
1346static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1347{
1348 struct ftrace_hash *hash;
1349 int size;
1350
1351 hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1352 if (!hash)
1353 return NULL;
1354
1355 size = 1 << size_bits;
47b0edcb 1356 hash->buckets = kcalloc(size, sizeof(*hash->buckets), GFP_KERNEL);
33dc9b12
SR
1357
1358 if (!hash->buckets) {
1359 kfree(hash);
1360 return NULL;
1361 }
1362
1363 hash->size_bits = size_bits;
1364
1365 return hash;
1366}
1367
1368static struct ftrace_hash *
1369alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1370{
1371 struct ftrace_func_entry *entry;
1372 struct ftrace_hash *new_hash;
33dc9b12
SR
1373 int size;
1374 int ret;
1375 int i;
1376
1377 new_hash = alloc_ftrace_hash(size_bits);
1378 if (!new_hash)
1379 return NULL;
1380
1381 /* Empty hash? */
06a51d93 1382 if (ftrace_hash_empty(hash))
33dc9b12
SR
1383 return new_hash;
1384
1385 size = 1 << hash->size_bits;
1386 for (i = 0; i < size; i++) {
b67bfe0d 1387 hlist_for_each_entry(entry, &hash->buckets[i], hlist) {
33dc9b12
SR
1388 ret = add_hash_entry(new_hash, entry->ip);
1389 if (ret < 0)
1390 goto free_hash;
1391 }
1392 }
1393
1394 FTRACE_WARN_ON(new_hash->count != hash->count);
1395
1396 return new_hash;
1397
1398 free_hash:
1399 free_ftrace_hash(new_hash);
1400 return NULL;
1401}
1402
41fb61c2 1403static void
84261912 1404ftrace_hash_rec_disable_modify(struct ftrace_ops *ops, int filter_hash);
41fb61c2 1405static void
84261912 1406ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash);
41fb61c2 1407
f8b8be8a
MH
1408static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
1409 struct ftrace_hash *new_hash);
1410
33dc9b12 1411static int
41fb61c2
SR
1412ftrace_hash_move(struct ftrace_ops *ops, int enable,
1413 struct ftrace_hash **dst, struct ftrace_hash *src)
33dc9b12
SR
1414{
1415 struct ftrace_func_entry *entry;
b67bfe0d 1416 struct hlist_node *tn;
33dc9b12 1417 struct hlist_head *hhd;
07fd5515 1418 struct ftrace_hash *new_hash;
33dc9b12
SR
1419 int size = src->count;
1420 int bits = 0;
f8b8be8a 1421 int ret;
33dc9b12
SR
1422 int i;
1423
f8b8be8a
MH
1424 /* Reject setting notrace hash on IPMODIFY ftrace_ops */
1425 if (ops->flags & FTRACE_OPS_FL_IPMODIFY && !enable)
1426 return -EINVAL;
1427
33dc9b12
SR
1428 /*
1429 * If the new source is empty, just free dst and assign it
1430 * the empty_hash.
1431 */
1432 if (!src->count) {
5c27c775
MH
1433 new_hash = EMPTY_HASH;
1434 goto update;
33dc9b12
SR
1435 }
1436
33dc9b12
SR
1437 /*
1438 * Make the hash size about 1/2 the # found
1439 */
1440 for (size /= 2; size; size >>= 1)
1441 bits++;
1442
1443 /* Don't allocate too much */
1444 if (bits > FTRACE_HASH_MAX_BITS)
1445 bits = FTRACE_HASH_MAX_BITS;
1446
07fd5515
SR
1447 new_hash = alloc_ftrace_hash(bits);
1448 if (!new_hash)
5c27c775 1449 return -ENOMEM;
33dc9b12
SR
1450
1451 size = 1 << src->size_bits;
1452 for (i = 0; i < size; i++) {
1453 hhd = &src->buckets[i];
b67bfe0d 1454 hlist_for_each_entry_safe(entry, tn, hhd, hlist) {
33dc9b12 1455 remove_hash_entry(src, entry);
07fd5515 1456 __add_hash_entry(new_hash, entry);
33dc9b12
SR
1457 }
1458 }
1459
5c27c775 1460update:
f8b8be8a
MH
1461 /* Make sure this can be applied if it is IPMODIFY ftrace_ops */
1462 if (enable) {
1463 /* IPMODIFY should be updated only when filter_hash updating */
1464 ret = ftrace_hash_ipmodify_update(ops, new_hash);
1465 if (ret < 0) {
1466 free_ftrace_hash(new_hash);
1467 return ret;
1468 }
1469 }
1470
5c27c775
MH
1471 /*
1472 * Remove the current set, update the hash and add
1473 * them back.
1474 */
84261912 1475 ftrace_hash_rec_disable_modify(ops, enable);
5c27c775 1476
07fd5515 1477 rcu_assign_pointer(*dst, new_hash);
07fd5515 1478
84261912 1479 ftrace_hash_rec_enable_modify(ops, enable);
41fb61c2 1480
5c27c775 1481 return 0;
33dc9b12
SR
1482}
1483
fef5aeee
SRRH
1484static bool hash_contains_ip(unsigned long ip,
1485 struct ftrace_ops_hash *hash)
1486{
1487 /*
1488 * The function record is a match if it exists in the filter
1489 * hash and not in the notrace hash. Note, an emty hash is
1490 * considered a match for the filter hash, but an empty
1491 * notrace hash is considered not in the notrace hash.
1492 */
1493 return (ftrace_hash_empty(hash->filter_hash) ||
1494 ftrace_lookup_ip(hash->filter_hash, ip)) &&
1495 (ftrace_hash_empty(hash->notrace_hash) ||
1496 !ftrace_lookup_ip(hash->notrace_hash, ip));
1497}
1498
b848914c
SR
1499/*
1500 * Test the hashes for this ops to see if we want to call
1501 * the ops->func or not.
1502 *
1503 * It's a match if the ip is in the ops->filter_hash or
1504 * the filter_hash does not exist or is empty,
1505 * AND
1506 * the ip is not in the ops->notrace_hash.
cdbe61bf
SR
1507 *
1508 * This needs to be called with preemption disabled as
1509 * the hashes are freed with call_rcu_sched().
b848914c
SR
1510 */
1511static int
195a8afc 1512ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
b848914c 1513{
fef5aeee 1514 struct ftrace_ops_hash hash;
b848914c
SR
1515 int ret;
1516
195a8afc
SRRH
1517#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
1518 /*
1519 * There's a small race when adding ops that the ftrace handler
1520 * that wants regs, may be called without them. We can not
1521 * allow that handler to be called if regs is NULL.
1522 */
1523 if (regs == NULL && (ops->flags & FTRACE_OPS_FL_SAVE_REGS))
1524 return 0;
1525#endif
1526
fef5aeee
SRRH
1527 hash.filter_hash = rcu_dereference_raw_notrace(ops->func_hash->filter_hash);
1528 hash.notrace_hash = rcu_dereference_raw_notrace(ops->func_hash->notrace_hash);
b848914c 1529
fef5aeee 1530 if (hash_contains_ip(ip, &hash))
b848914c
SR
1531 ret = 1;
1532 else
1533 ret = 0;
b848914c
SR
1534
1535 return ret;
1536}
1537
493762fc
SR
1538/*
1539 * This is a double for. Do not use 'break' to break out of the loop,
1540 * you must use a goto.
1541 */
1542#define do_for_each_ftrace_rec(pg, rec) \
1543 for (pg = ftrace_pages_start; pg; pg = pg->next) { \
1544 int _____i; \
1545 for (_____i = 0; _____i < pg->index; _____i++) { \
1546 rec = &pg->records[_____i];
1547
1548#define while_for_each_ftrace_rec() \
1549 } \
1550 }
1551
5855fead
SR
1552
1553static int ftrace_cmp_recs(const void *a, const void *b)
1554{
a650e02a
SR
1555 const struct dyn_ftrace *key = a;
1556 const struct dyn_ftrace *rec = b;
5855fead 1557
a650e02a 1558 if (key->flags < rec->ip)
5855fead 1559 return -1;
a650e02a
SR
1560 if (key->ip >= rec->ip + MCOUNT_INSN_SIZE)
1561 return 1;
5855fead
SR
1562 return 0;
1563}
1564
f0cf973a 1565static unsigned long ftrace_location_range(unsigned long start, unsigned long end)
c88fd863
SR
1566{
1567 struct ftrace_page *pg;
1568 struct dyn_ftrace *rec;
5855fead 1569 struct dyn_ftrace key;
c88fd863 1570
a650e02a
SR
1571 key.ip = start;
1572 key.flags = end; /* overload flags, as it is unsigned long */
5855fead
SR
1573
1574 for (pg = ftrace_pages_start; pg; pg = pg->next) {
a650e02a
SR
1575 if (end < pg->records[0].ip ||
1576 start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE))
9644302e 1577 continue;
5855fead
SR
1578 rec = bsearch(&key, pg->records, pg->index,
1579 sizeof(struct dyn_ftrace),
1580 ftrace_cmp_recs);
1581 if (rec)
f0cf973a 1582 return rec->ip;
5855fead 1583 }
c88fd863
SR
1584
1585 return 0;
1586}
1587
a650e02a
SR
1588/**
1589 * ftrace_location - return true if the ip giving is a traced location
1590 * @ip: the instruction pointer to check
1591 *
f0cf973a 1592 * Returns rec->ip if @ip given is a pointer to a ftrace location.
a650e02a
SR
1593 * That is, the instruction that is either a NOP or call to
1594 * the function tracer. It checks the ftrace internal tables to
1595 * determine if the address belongs or not.
1596 */
f0cf973a 1597unsigned long ftrace_location(unsigned long ip)
a650e02a
SR
1598{
1599 return ftrace_location_range(ip, ip);
1600}
1601
1602/**
1603 * ftrace_text_reserved - return true if range contains an ftrace location
1604 * @start: start of range to search
1605 * @end: end of range to search (inclusive). @end points to the last byte to check.
1606 *
1607 * Returns 1 if @start and @end contains a ftrace location.
1608 * That is, the instruction that is either a NOP or call to
1609 * the function tracer. It checks the ftrace internal tables to
1610 * determine if the address belongs or not.
1611 */
d88471cb 1612int ftrace_text_reserved(const void *start, const void *end)
a650e02a 1613{
f0cf973a
SR
1614 unsigned long ret;
1615
1616 ret = ftrace_location_range((unsigned long)start,
1617 (unsigned long)end);
1618
1619 return (int)!!ret;
a650e02a
SR
1620}
1621
4fbb48cb
SRRH
1622/* Test if ops registered to this rec needs regs */
1623static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec)
1624{
1625 struct ftrace_ops *ops;
1626 bool keep_regs = false;
1627
1628 for (ops = ftrace_ops_list;
1629 ops != &ftrace_list_end; ops = ops->next) {
1630 /* pass rec in as regs to have non-NULL val */
1631 if (ftrace_ops_test(ops, rec->ip, rec)) {
1632 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
1633 keep_regs = true;
1634 break;
1635 }
1636 }
1637 }
1638
1639 return keep_regs;
1640}
1641
ed926f9b
SR
1642static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
1643 int filter_hash,
1644 bool inc)
1645{
1646 struct ftrace_hash *hash;
1647 struct ftrace_hash *other_hash;
1648 struct ftrace_page *pg;
1649 struct dyn_ftrace *rec;
1650 int count = 0;
1651 int all = 0;
1652
1653 /* Only update if the ops has been registered */
1654 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1655 return;
1656
1657 /*
1658 * In the filter_hash case:
1659 * If the count is zero, we update all records.
1660 * Otherwise we just update the items in the hash.
1661 *
1662 * In the notrace_hash case:
1663 * We enable the update in the hash.
1664 * As disabling notrace means enabling the tracing,
1665 * and enabling notrace means disabling, the inc variable
1666 * gets inversed.
1667 */
1668 if (filter_hash) {
33b7f99c
SRRH
1669 hash = ops->func_hash->filter_hash;
1670 other_hash = ops->func_hash->notrace_hash;
06a51d93 1671 if (ftrace_hash_empty(hash))
ed926f9b
SR
1672 all = 1;
1673 } else {
1674 inc = !inc;
33b7f99c
SRRH
1675 hash = ops->func_hash->notrace_hash;
1676 other_hash = ops->func_hash->filter_hash;
ed926f9b
SR
1677 /*
1678 * If the notrace hash has no items,
1679 * then there's nothing to do.
1680 */
06a51d93 1681 if (ftrace_hash_empty(hash))
ed926f9b
SR
1682 return;
1683 }
1684
1685 do_for_each_ftrace_rec(pg, rec) {
1686 int in_other_hash = 0;
1687 int in_hash = 0;
1688 int match = 0;
1689
1690 if (all) {
1691 /*
1692 * Only the filter_hash affects all records.
1693 * Update if the record is not in the notrace hash.
1694 */
b848914c 1695 if (!other_hash || !ftrace_lookup_ip(other_hash, rec->ip))
ed926f9b
SR
1696 match = 1;
1697 } else {
06a51d93
SR
1698 in_hash = !!ftrace_lookup_ip(hash, rec->ip);
1699 in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip);
ed926f9b
SR
1700
1701 /*
19eab4a4
SRRH
1702 * If filter_hash is set, we want to match all functions
1703 * that are in the hash but not in the other hash.
ed926f9b 1704 *
19eab4a4
SRRH
1705 * If filter_hash is not set, then we are decrementing.
1706 * That means we match anything that is in the hash
1707 * and also in the other_hash. That is, we need to turn
1708 * off functions in the other hash because they are disabled
1709 * by this hash.
ed926f9b
SR
1710 */
1711 if (filter_hash && in_hash && !in_other_hash)
1712 match = 1;
1713 else if (!filter_hash && in_hash &&
06a51d93 1714 (in_other_hash || ftrace_hash_empty(other_hash)))
ed926f9b
SR
1715 match = 1;
1716 }
1717 if (!match)
1718 continue;
1719
1720 if (inc) {
1721 rec->flags++;
0376bde1 1722 if (FTRACE_WARN_ON(ftrace_rec_count(rec) == FTRACE_REF_MAX))
ed926f9b 1723 return;
79922b80
SRRH
1724
1725 /*
1726 * If there's only a single callback registered to a
1727 * function, and the ops has a trampoline registered
1728 * for it, then we can call it directly.
1729 */
fef5aeee 1730 if (ftrace_rec_count(rec) == 1 && ops->trampoline)
79922b80 1731 rec->flags |= FTRACE_FL_TRAMP;
fef5aeee 1732 else
79922b80
SRRH
1733 /*
1734 * If we are adding another function callback
1735 * to this function, and the previous had a
bce0b6c5
SRRH
1736 * custom trampoline in use, then we need to go
1737 * back to the default trampoline.
79922b80 1738 */
fef5aeee 1739 rec->flags &= ~FTRACE_FL_TRAMP;
79922b80 1740
08f6fba5
SR
1741 /*
1742 * If any ops wants regs saved for this function
1743 * then all ops will get saved regs.
1744 */
1745 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
1746 rec->flags |= FTRACE_FL_REGS;
ed926f9b 1747 } else {
0376bde1 1748 if (FTRACE_WARN_ON(ftrace_rec_count(rec) == 0))
ed926f9b
SR
1749 return;
1750 rec->flags--;
79922b80 1751
4fbb48cb
SRRH
1752 /*
1753 * If the rec had REGS enabled and the ops that is
1754 * being removed had REGS set, then see if there is
1755 * still any ops for this record that wants regs.
1756 * If not, we can stop recording them.
1757 */
0376bde1 1758 if (ftrace_rec_count(rec) > 0 &&
4fbb48cb
SRRH
1759 rec->flags & FTRACE_FL_REGS &&
1760 ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
1761 if (!test_rec_ops_needs_regs(rec))
1762 rec->flags &= ~FTRACE_FL_REGS;
1763 }
79922b80 1764
fef5aeee
SRRH
1765 /*
1766 * If the rec had TRAMP enabled, then it needs to
1767 * be cleared. As TRAMP can only be enabled iff
1768 * there is only a single ops attached to it.
1769 * In otherwords, always disable it on decrementing.
1770 * In the future, we may set it if rec count is
1771 * decremented to one, and the ops that is left
1772 * has a trampoline.
1773 */
1774 rec->flags &= ~FTRACE_FL_TRAMP;
1775
79922b80
SRRH
1776 /*
1777 * flags will be cleared in ftrace_check_record()
1778 * if rec count is zero.
1779 */
ed926f9b
SR
1780 }
1781 count++;
1782 /* Shortcut, if we handled all records, we are done. */
1783 if (!all && count == hash->count)
1784 return;
1785 } while_for_each_ftrace_rec();
1786}
1787
1788static void ftrace_hash_rec_disable(struct ftrace_ops *ops,
1789 int filter_hash)
1790{
1791 __ftrace_hash_rec_update(ops, filter_hash, 0);
1792}
1793
1794static void ftrace_hash_rec_enable(struct ftrace_ops *ops,
1795 int filter_hash)
1796{
1797 __ftrace_hash_rec_update(ops, filter_hash, 1);
1798}
1799
84261912
SRRH
1800static void ftrace_hash_rec_update_modify(struct ftrace_ops *ops,
1801 int filter_hash, int inc)
1802{
1803 struct ftrace_ops *op;
1804
1805 __ftrace_hash_rec_update(ops, filter_hash, inc);
1806
1807 if (ops->func_hash != &global_ops.local_hash)
1808 return;
1809
1810 /*
1811 * If the ops shares the global_ops hash, then we need to update
1812 * all ops that are enabled and use this hash.
1813 */
1814 do_for_each_ftrace_op(op, ftrace_ops_list) {
1815 /* Already done */
1816 if (op == ops)
1817 continue;
1818 if (op->func_hash == &global_ops.local_hash)
1819 __ftrace_hash_rec_update(op, filter_hash, inc);
1820 } while_for_each_ftrace_op(op);
1821}
1822
1823static void ftrace_hash_rec_disable_modify(struct ftrace_ops *ops,
1824 int filter_hash)
1825{
1826 ftrace_hash_rec_update_modify(ops, filter_hash, 0);
1827}
1828
1829static void ftrace_hash_rec_enable_modify(struct ftrace_ops *ops,
1830 int filter_hash)
1831{
1832 ftrace_hash_rec_update_modify(ops, filter_hash, 1);
1833}
1834
f8b8be8a
MH
1835/*
1836 * Try to update IPMODIFY flag on each ftrace_rec. Return 0 if it is OK
1837 * or no-needed to update, -EBUSY if it detects a conflict of the flag
1838 * on a ftrace_rec, and -EINVAL if the new_hash tries to trace all recs.
1839 * Note that old_hash and new_hash has below meanings
1840 * - If the hash is NULL, it hits all recs (if IPMODIFY is set, this is rejected)
1841 * - If the hash is EMPTY_HASH, it hits nothing
1842 * - Anything else hits the recs which match the hash entries.
1843 */
1844static int __ftrace_hash_update_ipmodify(struct ftrace_ops *ops,
1845 struct ftrace_hash *old_hash,
1846 struct ftrace_hash *new_hash)
1847{
1848 struct ftrace_page *pg;
1849 struct dyn_ftrace *rec, *end = NULL;
1850 int in_old, in_new;
1851
1852 /* Only update if the ops has been registered */
1853 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1854 return 0;
1855
1856 if (!(ops->flags & FTRACE_OPS_FL_IPMODIFY))
1857 return 0;
1858
1859 /*
1860 * Since the IPMODIFY is a very address sensitive action, we do not
1861 * allow ftrace_ops to set all functions to new hash.
1862 */
1863 if (!new_hash || !old_hash)
1864 return -EINVAL;
1865
1866 /* Update rec->flags */
1867 do_for_each_ftrace_rec(pg, rec) {
1868 /* We need to update only differences of filter_hash */
1869 in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
1870 in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
1871 if (in_old == in_new)
1872 continue;
1873
1874 if (in_new) {
1875 /* New entries must ensure no others are using it */
1876 if (rec->flags & FTRACE_FL_IPMODIFY)
1877 goto rollback;
1878 rec->flags |= FTRACE_FL_IPMODIFY;
1879 } else /* Removed entry */
1880 rec->flags &= ~FTRACE_FL_IPMODIFY;
1881 } while_for_each_ftrace_rec();
1882
1883 return 0;
1884
1885rollback:
1886 end = rec;
1887
1888 /* Roll back what we did above */
1889 do_for_each_ftrace_rec(pg, rec) {
1890 if (rec == end)
1891 goto err_out;
1892
1893 in_old = !!ftrace_lookup_ip(old_hash, rec->ip);
1894 in_new = !!ftrace_lookup_ip(new_hash, rec->ip);
1895 if (in_old == in_new)
1896 continue;
1897
1898 if (in_new)
1899 rec->flags &= ~FTRACE_FL_IPMODIFY;
1900 else
1901 rec->flags |= FTRACE_FL_IPMODIFY;
1902 } while_for_each_ftrace_rec();
1903
1904err_out:
1905 return -EBUSY;
1906}
1907
1908static int ftrace_hash_ipmodify_enable(struct ftrace_ops *ops)
1909{
1910 struct ftrace_hash *hash = ops->func_hash->filter_hash;
1911
1912 if (ftrace_hash_empty(hash))
1913 hash = NULL;
1914
1915 return __ftrace_hash_update_ipmodify(ops, EMPTY_HASH, hash);
1916}
1917
1918/* Disabling always succeeds */
1919static void ftrace_hash_ipmodify_disable(struct ftrace_ops *ops)
1920{
1921 struct ftrace_hash *hash = ops->func_hash->filter_hash;
1922
1923 if (ftrace_hash_empty(hash))
1924 hash = NULL;
1925
1926 __ftrace_hash_update_ipmodify(ops, hash, EMPTY_HASH);
1927}
1928
1929static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
1930 struct ftrace_hash *new_hash)
1931{
1932 struct ftrace_hash *old_hash = ops->func_hash->filter_hash;
1933
1934 if (ftrace_hash_empty(old_hash))
1935 old_hash = NULL;
1936
1937 if (ftrace_hash_empty(new_hash))
1938 new_hash = NULL;
1939
1940 return __ftrace_hash_update_ipmodify(ops, old_hash, new_hash);
1941}
1942
b05086c7 1943static void print_ip_ins(const char *fmt, const unsigned char *p)
b17e8a37
SR
1944{
1945 int i;
1946
1947 printk(KERN_CONT "%s", fmt);
1948
1949 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
1950 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
1951}
1952
4fd3279b
SRRH
1953static struct ftrace_ops *
1954ftrace_find_tramp_ops_any(struct dyn_ftrace *rec);
1955
02a392a0 1956enum ftrace_bug_type ftrace_bug_type;
b05086c7 1957const void *ftrace_expected;
02a392a0
SRRH
1958
1959static void print_bug_type(void)
1960{
1961 switch (ftrace_bug_type) {
1962 case FTRACE_BUG_UNKNOWN:
1963 break;
1964 case FTRACE_BUG_INIT:
1965 pr_info("Initializing ftrace call sites\n");
1966 break;
1967 case FTRACE_BUG_NOP:
1968 pr_info("Setting ftrace call site to NOP\n");
1969 break;
1970 case FTRACE_BUG_CALL:
1971 pr_info("Setting ftrace call site to call ftrace function\n");
1972 break;
1973 case FTRACE_BUG_UPDATE:
1974 pr_info("Updating ftrace call site to call a different ftrace function\n");
1975 break;
1976 }
1977}
1978
c88fd863
SR
1979/**
1980 * ftrace_bug - report and shutdown function tracer
1981 * @failed: The failed type (EFAULT, EINVAL, EPERM)
4fd3279b 1982 * @rec: The record that failed
c88fd863
SR
1983 *
1984 * The arch code that enables or disables the function tracing
1985 * can call ftrace_bug() when it has detected a problem in
1986 * modifying the code. @failed should be one of either:
1987 * EFAULT - if the problem happens on reading the @ip address
1988 * EINVAL - if what is read at @ip is not what was expected
1989 * EPERM - if the problem happens on writting to the @ip address
1990 */
4fd3279b 1991void ftrace_bug(int failed, struct dyn_ftrace *rec)
b17e8a37 1992{
4fd3279b
SRRH
1993 unsigned long ip = rec ? rec->ip : 0;
1994
b17e8a37
SR
1995 switch (failed) {
1996 case -EFAULT:
1997 FTRACE_WARN_ON_ONCE(1);
1998 pr_info("ftrace faulted on modifying ");
1999 print_ip_sym(ip);
2000 break;
2001 case -EINVAL:
2002 FTRACE_WARN_ON_ONCE(1);
2003 pr_info("ftrace failed to modify ");
2004 print_ip_sym(ip);
b05086c7 2005 print_ip_ins(" actual: ", (unsigned char *)ip);
4fd3279b 2006 pr_cont("\n");
b05086c7
SRRH
2007 if (ftrace_expected) {
2008 print_ip_ins(" expected: ", ftrace_expected);
2009 pr_cont("\n");
2010 }
b17e8a37
SR
2011 break;
2012 case -EPERM:
2013 FTRACE_WARN_ON_ONCE(1);
2014 pr_info("ftrace faulted on writing ");
2015 print_ip_sym(ip);
2016 break;
2017 default:
2018 FTRACE_WARN_ON_ONCE(1);
2019 pr_info("ftrace faulted on unknown error ");
2020 print_ip_sym(ip);
2021 }
02a392a0 2022 print_bug_type();
4fd3279b
SRRH
2023 if (rec) {
2024 struct ftrace_ops *ops = NULL;
2025
2026 pr_info("ftrace record flags: %lx\n", rec->flags);
2027 pr_cont(" (%ld)%s", ftrace_rec_count(rec),
2028 rec->flags & FTRACE_FL_REGS ? " R" : " ");
2029 if (rec->flags & FTRACE_FL_TRAMP_EN) {
2030 ops = ftrace_find_tramp_ops_any(rec);
2031 if (ops)
2032 pr_cont("\ttramp: %pS",
2033 (void *)ops->trampoline);
2034 else
2035 pr_cont("\ttramp: ERROR!");
2036
2037 }
2038 ip = ftrace_get_addr_curr(rec);
2039 pr_cont(" expected tramp: %lx\n", ip);
2040 }
b17e8a37
SR
2041}
2042
c88fd863 2043static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
5072c59f 2044{
64fbcd16 2045 unsigned long flag = 0UL;
e7d3737e 2046
02a392a0
SRRH
2047 ftrace_bug_type = FTRACE_BUG_UNKNOWN;
2048
982c350b 2049 /*
30fb6aa7 2050 * If we are updating calls:
982c350b 2051 *
ed926f9b
SR
2052 * If the record has a ref count, then we need to enable it
2053 * because someone is using it.
982c350b 2054 *
ed926f9b
SR
2055 * Otherwise we make sure its disabled.
2056 *
30fb6aa7 2057 * If we are disabling calls, then disable all records that
ed926f9b 2058 * are enabled.
982c350b 2059 */
0376bde1 2060 if (enable && ftrace_rec_count(rec))
ed926f9b 2061 flag = FTRACE_FL_ENABLED;
982c350b 2062
08f6fba5 2063 /*
79922b80
SRRH
2064 * If enabling and the REGS flag does not match the REGS_EN, or
2065 * the TRAMP flag doesn't match the TRAMP_EN, then do not ignore
2066 * this record. Set flags to fail the compare against ENABLED.
08f6fba5 2067 */
79922b80
SRRH
2068 if (flag) {
2069 if (!(rec->flags & FTRACE_FL_REGS) !=
2070 !(rec->flags & FTRACE_FL_REGS_EN))
2071 flag |= FTRACE_FL_REGS;
2072
2073 if (!(rec->flags & FTRACE_FL_TRAMP) !=
2074 !(rec->flags & FTRACE_FL_TRAMP_EN))
2075 flag |= FTRACE_FL_TRAMP;
2076 }
08f6fba5 2077
64fbcd16
XG
2078 /* If the state of this record hasn't changed, then do nothing */
2079 if ((rec->flags & FTRACE_FL_ENABLED) == flag)
c88fd863 2080 return FTRACE_UPDATE_IGNORE;
982c350b 2081
64fbcd16 2082 if (flag) {
08f6fba5
SR
2083 /* Save off if rec is being enabled (for return value) */
2084 flag ^= rec->flags & FTRACE_FL_ENABLED;
2085
2086 if (update) {
c88fd863 2087 rec->flags |= FTRACE_FL_ENABLED;
08f6fba5
SR
2088 if (flag & FTRACE_FL_REGS) {
2089 if (rec->flags & FTRACE_FL_REGS)
2090 rec->flags |= FTRACE_FL_REGS_EN;
2091 else
2092 rec->flags &= ~FTRACE_FL_REGS_EN;
2093 }
79922b80
SRRH
2094 if (flag & FTRACE_FL_TRAMP) {
2095 if (rec->flags & FTRACE_FL_TRAMP)
2096 rec->flags |= FTRACE_FL_TRAMP_EN;
2097 else
2098 rec->flags &= ~FTRACE_FL_TRAMP_EN;
2099 }
08f6fba5
SR
2100 }
2101
2102 /*
2103 * If this record is being updated from a nop, then
2104 * return UPDATE_MAKE_CALL.
08f6fba5
SR
2105 * Otherwise,
2106 * return UPDATE_MODIFY_CALL to tell the caller to convert
f1b2f2bd 2107 * from the save regs, to a non-save regs function or
79922b80 2108 * vice versa, or from a trampoline call.
08f6fba5 2109 */
02a392a0
SRRH
2110 if (flag & FTRACE_FL_ENABLED) {
2111 ftrace_bug_type = FTRACE_BUG_CALL;
08f6fba5 2112 return FTRACE_UPDATE_MAKE_CALL;
02a392a0 2113 }
f1b2f2bd 2114
02a392a0 2115 ftrace_bug_type = FTRACE_BUG_UPDATE;
f1b2f2bd 2116 return FTRACE_UPDATE_MODIFY_CALL;
c88fd863
SR
2117 }
2118
08f6fba5
SR
2119 if (update) {
2120 /* If there's no more users, clear all flags */
0376bde1 2121 if (!ftrace_rec_count(rec))
08f6fba5
SR
2122 rec->flags = 0;
2123 else
b24d443b
SRRH
2124 /*
2125 * Just disable the record, but keep the ops TRAMP
2126 * and REGS states. The _EN flags must be disabled though.
2127 */
2128 rec->flags &= ~(FTRACE_FL_ENABLED | FTRACE_FL_TRAMP_EN |
2129 FTRACE_FL_REGS_EN);
08f6fba5 2130 }
c88fd863 2131
02a392a0 2132 ftrace_bug_type = FTRACE_BUG_NOP;
c88fd863
SR
2133 return FTRACE_UPDATE_MAKE_NOP;
2134}
2135
2136/**
2137 * ftrace_update_record, set a record that now is tracing or not
2138 * @rec: the record to update
2139 * @enable: set to 1 if the record is tracing, zero to force disable
2140 *
2141 * The records that represent all functions that can be traced need
2142 * to be updated when tracing has been enabled.
2143 */
2144int ftrace_update_record(struct dyn_ftrace *rec, int enable)
2145{
2146 return ftrace_check_record(rec, enable, 1);
2147}
2148
2149/**
2150 * ftrace_test_record, check if the record has been enabled or not
2151 * @rec: the record to test
2152 * @enable: set to 1 to check if enabled, 0 if it is disabled
2153 *
2154 * The arch code may need to test if a record is already set to
2155 * tracing to determine how to modify the function code that it
2156 * represents.
2157 */
2158int ftrace_test_record(struct dyn_ftrace *rec, int enable)
2159{
2160 return ftrace_check_record(rec, enable, 0);
2161}
2162
5fecaa04
SRRH
2163static struct ftrace_ops *
2164ftrace_find_tramp_ops_any(struct dyn_ftrace *rec)
2165{
2166 struct ftrace_ops *op;
fef5aeee 2167 unsigned long ip = rec->ip;
5fecaa04
SRRH
2168
2169 do_for_each_ftrace_op(op, ftrace_ops_list) {
2170
2171 if (!op->trampoline)
2172 continue;
2173
fef5aeee 2174 if (hash_contains_ip(ip, op->func_hash))
5fecaa04
SRRH
2175 return op;
2176 } while_for_each_ftrace_op(op);
2177
2178 return NULL;
2179}
2180
79922b80
SRRH
2181static struct ftrace_ops *
2182ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec)
2183{
2184 struct ftrace_ops *op;
fef5aeee 2185 unsigned long ip = rec->ip;
79922b80 2186
fef5aeee
SRRH
2187 /*
2188 * Need to check removed ops first.
2189 * If they are being removed, and this rec has a tramp,
2190 * and this rec is in the ops list, then it would be the
2191 * one with the tramp.
2192 */
2193 if (removed_ops) {
2194 if (hash_contains_ip(ip, &removed_ops->old_hash))
79922b80
SRRH
2195 return removed_ops;
2196 }
2197
fef5aeee
SRRH
2198 /*
2199 * Need to find the current trampoline for a rec.
2200 * Now, a trampoline is only attached to a rec if there
2201 * was a single 'ops' attached to it. But this can be called
2202 * when we are adding another op to the rec or removing the
2203 * current one. Thus, if the op is being added, we can
2204 * ignore it because it hasn't attached itself to the rec
4fc40904
SRRH
2205 * yet.
2206 *
2207 * If an ops is being modified (hooking to different functions)
2208 * then we don't care about the new functions that are being
2209 * added, just the old ones (that are probably being removed).
2210 *
2211 * If we are adding an ops to a function that already is using
2212 * a trampoline, it needs to be removed (trampolines are only
2213 * for single ops connected), then an ops that is not being
2214 * modified also needs to be checked.
fef5aeee 2215 */
79922b80 2216 do_for_each_ftrace_op(op, ftrace_ops_list) {
fef5aeee
SRRH
2217
2218 if (!op->trampoline)
2219 continue;
2220
2221 /*
2222 * If the ops is being added, it hasn't gotten to
2223 * the point to be removed from this tree yet.
2224 */
2225 if (op->flags & FTRACE_OPS_FL_ADDING)
79922b80
SRRH
2226 continue;
2227
4fc40904 2228
fef5aeee 2229 /*
4fc40904
SRRH
2230 * If the ops is being modified and is in the old
2231 * hash, then it is probably being removed from this
2232 * function.
fef5aeee 2233 */
fef5aeee
SRRH
2234 if ((op->flags & FTRACE_OPS_FL_MODIFYING) &&
2235 hash_contains_ip(ip, &op->old_hash))
79922b80 2236 return op;
4fc40904
SRRH
2237 /*
2238 * If the ops is not being added or modified, and it's
2239 * in its normal filter hash, then this must be the one
2240 * we want!
2241 */
2242 if (!(op->flags & FTRACE_OPS_FL_MODIFYING) &&
2243 hash_contains_ip(ip, op->func_hash))
2244 return op;
79922b80
SRRH
2245
2246 } while_for_each_ftrace_op(op);
2247
2248 return NULL;
2249}
2250
2251static struct ftrace_ops *
2252ftrace_find_tramp_ops_new(struct dyn_ftrace *rec)
2253{
2254 struct ftrace_ops *op;
fef5aeee 2255 unsigned long ip = rec->ip;
79922b80
SRRH
2256
2257 do_for_each_ftrace_op(op, ftrace_ops_list) {
2258 /* pass rec in as regs to have non-NULL val */
fef5aeee 2259 if (hash_contains_ip(ip, op->func_hash))
79922b80
SRRH
2260 return op;
2261 } while_for_each_ftrace_op(op);
2262
2263 return NULL;
2264}
2265
7413af1f
SRRH
2266/**
2267 * ftrace_get_addr_new - Get the call address to set to
2268 * @rec: The ftrace record descriptor
2269 *
2270 * If the record has the FTRACE_FL_REGS set, that means that it
2271 * wants to convert to a callback that saves all regs. If FTRACE_FL_REGS
2272 * is not not set, then it wants to convert to the normal callback.
2273 *
2274 * Returns the address of the trampoline to set to
2275 */
2276unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec)
2277{
79922b80
SRRH
2278 struct ftrace_ops *ops;
2279
2280 /* Trampolines take precedence over regs */
2281 if (rec->flags & FTRACE_FL_TRAMP) {
2282 ops = ftrace_find_tramp_ops_new(rec);
2283 if (FTRACE_WARN_ON(!ops || !ops->trampoline)) {
bce0b6c5
SRRH
2284 pr_warn("Bad trampoline accounting at: %p (%pS) (%lx)\n",
2285 (void *)rec->ip, (void *)rec->ip, rec->flags);
79922b80
SRRH
2286 /* Ftrace is shutting down, return anything */
2287 return (unsigned long)FTRACE_ADDR;
2288 }
2289 return ops->trampoline;
2290 }
2291
7413af1f
SRRH
2292 if (rec->flags & FTRACE_FL_REGS)
2293 return (unsigned long)FTRACE_REGS_ADDR;
2294 else
2295 return (unsigned long)FTRACE_ADDR;
2296}
2297
2298/**
2299 * ftrace_get_addr_curr - Get the call address that is already there
2300 * @rec: The ftrace record descriptor
2301 *
2302 * The FTRACE_FL_REGS_EN is set when the record already points to
2303 * a function that saves all the regs. Basically the '_EN' version
2304 * represents the current state of the function.
2305 *
2306 * Returns the address of the trampoline that is currently being called
2307 */
2308unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec)
2309{
79922b80
SRRH
2310 struct ftrace_ops *ops;
2311
2312 /* Trampolines take precedence over regs */
2313 if (rec->flags & FTRACE_FL_TRAMP_EN) {
2314 ops = ftrace_find_tramp_ops_curr(rec);
2315 if (FTRACE_WARN_ON(!ops)) {
2316 pr_warning("Bad trampoline accounting at: %p (%pS)\n",
2317 (void *)rec->ip, (void *)rec->ip);
2318 /* Ftrace is shutting down, return anything */
2319 return (unsigned long)FTRACE_ADDR;
2320 }
2321 return ops->trampoline;
2322 }
2323
7413af1f
SRRH
2324 if (rec->flags & FTRACE_FL_REGS_EN)
2325 return (unsigned long)FTRACE_REGS_ADDR;
2326 else
2327 return (unsigned long)FTRACE_ADDR;
2328}
2329
c88fd863
SR
2330static int
2331__ftrace_replace_code(struct dyn_ftrace *rec, int enable)
2332{
08f6fba5 2333 unsigned long ftrace_old_addr;
c88fd863
SR
2334 unsigned long ftrace_addr;
2335 int ret;
2336
7c0868e0 2337 ftrace_addr = ftrace_get_addr_new(rec);
c88fd863 2338
7c0868e0
SRRH
2339 /* This needs to be done before we call ftrace_update_record */
2340 ftrace_old_addr = ftrace_get_addr_curr(rec);
2341
2342 ret = ftrace_update_record(rec, enable);
08f6fba5 2343
02a392a0
SRRH
2344 ftrace_bug_type = FTRACE_BUG_UNKNOWN;
2345
c88fd863
SR
2346 switch (ret) {
2347 case FTRACE_UPDATE_IGNORE:
2348 return 0;
2349
2350 case FTRACE_UPDATE_MAKE_CALL:
02a392a0 2351 ftrace_bug_type = FTRACE_BUG_CALL;
64fbcd16 2352 return ftrace_make_call(rec, ftrace_addr);
c88fd863
SR
2353
2354 case FTRACE_UPDATE_MAKE_NOP:
02a392a0 2355 ftrace_bug_type = FTRACE_BUG_NOP;
39b5552c 2356 return ftrace_make_nop(NULL, rec, ftrace_old_addr);
08f6fba5 2357
08f6fba5 2358 case FTRACE_UPDATE_MODIFY_CALL:
02a392a0 2359 ftrace_bug_type = FTRACE_BUG_UPDATE;
08f6fba5 2360 return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
5072c59f
SR
2361 }
2362
c88fd863 2363 return -1; /* unknow ftrace bug */
5072c59f
SR
2364}
2365
e4f5d544 2366void __weak ftrace_replace_code(int enable)
3c1720f0 2367{
3c1720f0
SR
2368 struct dyn_ftrace *rec;
2369 struct ftrace_page *pg;
6a24a244 2370 int failed;
3c1720f0 2371
45a4a237
SR
2372 if (unlikely(ftrace_disabled))
2373 return;
2374
265c831c 2375 do_for_each_ftrace_rec(pg, rec) {
e4f5d544 2376 failed = __ftrace_replace_code(rec, enable);
fa9d13cf 2377 if (failed) {
4fd3279b 2378 ftrace_bug(failed, rec);
3279ba37
SR
2379 /* Stop processing */
2380 return;
3c1720f0 2381 }
265c831c 2382 } while_for_each_ftrace_rec();
3c1720f0
SR
2383}
2384
c88fd863
SR
2385struct ftrace_rec_iter {
2386 struct ftrace_page *pg;
2387 int index;
2388};
2389
2390/**
2391 * ftrace_rec_iter_start, start up iterating over traced functions
2392 *
2393 * Returns an iterator handle that is used to iterate over all
2394 * the records that represent address locations where functions
2395 * are traced.
2396 *
2397 * May return NULL if no records are available.
2398 */
2399struct ftrace_rec_iter *ftrace_rec_iter_start(void)
2400{
2401 /*
2402 * We only use a single iterator.
2403 * Protected by the ftrace_lock mutex.
2404 */
2405 static struct ftrace_rec_iter ftrace_rec_iter;
2406 struct ftrace_rec_iter *iter = &ftrace_rec_iter;
2407
2408 iter->pg = ftrace_pages_start;
2409 iter->index = 0;
2410
2411 /* Could have empty pages */
2412 while (iter->pg && !iter->pg->index)
2413 iter->pg = iter->pg->next;
2414
2415 if (!iter->pg)
2416 return NULL;
2417
2418 return iter;
2419}
2420
2421/**
2422 * ftrace_rec_iter_next, get the next record to process.
2423 * @iter: The handle to the iterator.
2424 *
2425 * Returns the next iterator after the given iterator @iter.
2426 */
2427struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter)
2428{
2429 iter->index++;
2430
2431 if (iter->index >= iter->pg->index) {
2432 iter->pg = iter->pg->next;
2433 iter->index = 0;
2434
2435 /* Could have empty pages */
2436 while (iter->pg && !iter->pg->index)
2437 iter->pg = iter->pg->next;
2438 }
2439
2440 if (!iter->pg)
2441 return NULL;
2442
2443 return iter;
2444}
2445
2446/**
2447 * ftrace_rec_iter_record, get the record at the iterator location
2448 * @iter: The current iterator location
2449 *
2450 * Returns the record that the current @iter is at.
2451 */
2452struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter)
2453{
2454 return &iter->pg->records[iter->index];
2455}
2456
492a7ea5 2457static int
31e88909 2458ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
3c1720f0 2459{
593eb8a2 2460 int ret;
3c1720f0 2461
45a4a237
SR
2462 if (unlikely(ftrace_disabled))
2463 return 0;
2464
25aac9dc 2465 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
593eb8a2 2466 if (ret) {
02a392a0 2467 ftrace_bug_type = FTRACE_BUG_INIT;
4fd3279b 2468 ftrace_bug(ret, rec);
492a7ea5 2469 return 0;
37ad5084 2470 }
492a7ea5 2471 return 1;
3c1720f0
SR
2472}
2473
000ab691
SR
2474/*
2475 * archs can override this function if they must do something
2476 * before the modifying code is performed.
2477 */
2478int __weak ftrace_arch_code_modify_prepare(void)
2479{
2480 return 0;
2481}
2482
2483/*
2484 * archs can override this function if they must do something
2485 * after the modifying code is performed.
2486 */
2487int __weak ftrace_arch_code_modify_post_process(void)
2488{
2489 return 0;
2490}
2491
8ed3e2cf 2492void ftrace_modify_all_code(int command)
3d083395 2493{
59338f75 2494 int update = command & FTRACE_UPDATE_TRACE_FUNC;
cd21067f 2495 int err = 0;
59338f75
SRRH
2496
2497 /*
2498 * If the ftrace_caller calls a ftrace_ops func directly,
2499 * we need to make sure that it only traces functions it
2500 * expects to trace. When doing the switch of functions,
2501 * we need to update to the ftrace_ops_list_func first
2502 * before the transition between old and new calls are set,
2503 * as the ftrace_ops_list_func will check the ops hashes
2504 * to make sure the ops are having the right functions
2505 * traced.
2506 */
cd21067f
PM
2507 if (update) {
2508 err = ftrace_update_ftrace_func(ftrace_ops_list_func);
2509 if (FTRACE_WARN_ON(err))
2510 return;
2511 }
59338f75 2512
8ed3e2cf 2513 if (command & FTRACE_UPDATE_CALLS)
d61f82d0 2514 ftrace_replace_code(1);
8ed3e2cf 2515 else if (command & FTRACE_DISABLE_CALLS)
d61f82d0
SR
2516 ftrace_replace_code(0);
2517
405e1d83
SRRH
2518 if (update && ftrace_trace_function != ftrace_ops_list_func) {
2519 function_trace_op = set_function_trace_op;
2520 smp_wmb();
2521 /* If irqs are disabled, we are in stop machine */
2522 if (!irqs_disabled())
2523 smp_call_function(ftrace_sync_ipi, NULL, 1);
cd21067f
PM
2524 err = ftrace_update_ftrace_func(ftrace_trace_function);
2525 if (FTRACE_WARN_ON(err))
2526 return;
405e1d83 2527 }
d61f82d0 2528
8ed3e2cf 2529 if (command & FTRACE_START_FUNC_RET)
cd21067f 2530 err = ftrace_enable_ftrace_graph_caller();
8ed3e2cf 2531 else if (command & FTRACE_STOP_FUNC_RET)
cd21067f
PM
2532 err = ftrace_disable_ftrace_graph_caller();
2533 FTRACE_WARN_ON(err);
8ed3e2cf
SR
2534}
2535
2536static int __ftrace_modify_code(void *data)
2537{
2538 int *command = data;
2539
2540 ftrace_modify_all_code(*command);
5a45cfe1 2541
d61f82d0 2542 return 0;
3d083395
SR
2543}
2544
c88fd863
SR
2545/**
2546 * ftrace_run_stop_machine, go back to the stop machine method
2547 * @command: The command to tell ftrace what to do
2548 *
2549 * If an arch needs to fall back to the stop machine method, the
2550 * it can call this function.
2551 */
2552void ftrace_run_stop_machine(int command)
2553{
2554 stop_machine(__ftrace_modify_code, &command, NULL);
2555}
2556
2557/**
2558 * arch_ftrace_update_code, modify the code to trace or not trace
2559 * @command: The command that needs to be done
2560 *
2561 * Archs can override this function if it does not need to
2562 * run stop_machine() to modify code.
2563 */
2564void __weak arch_ftrace_update_code(int command)
2565{
2566 ftrace_run_stop_machine(command);
2567}
2568
e309b41d 2569static void ftrace_run_update_code(int command)
3d083395 2570{
000ab691
SR
2571 int ret;
2572
2573 ret = ftrace_arch_code_modify_prepare();
2574 FTRACE_WARN_ON(ret);
2575 if (ret)
2576 return;
2577
c88fd863
SR
2578 /*
2579 * By default we use stop_machine() to modify the code.
2580 * But archs can do what ever they want as long as it
2581 * is safe. The stop_machine() is the safest, but also
2582 * produces the most overhead.
2583 */
2584 arch_ftrace_update_code(command);
2585
000ab691
SR
2586 ret = ftrace_arch_code_modify_post_process();
2587 FTRACE_WARN_ON(ret);
3d083395
SR
2588}
2589
8252ecf3 2590static void ftrace_run_modify_code(struct ftrace_ops *ops, int command,
7485058e 2591 struct ftrace_ops_hash *old_hash)
e1effa01
SRRH
2592{
2593 ops->flags |= FTRACE_OPS_FL_MODIFYING;
7485058e
SRRH
2594 ops->old_hash.filter_hash = old_hash->filter_hash;
2595 ops->old_hash.notrace_hash = old_hash->notrace_hash;
e1effa01 2596 ftrace_run_update_code(command);
8252ecf3 2597 ops->old_hash.filter_hash = NULL;
7485058e 2598 ops->old_hash.notrace_hash = NULL;
e1effa01
SRRH
2599 ops->flags &= ~FTRACE_OPS_FL_MODIFYING;
2600}
2601
d61f82d0 2602static ftrace_func_t saved_ftrace_func;
60a7ecf4 2603static int ftrace_start_up;
df4fc315 2604
12cce594
SRRH
2605void __weak arch_ftrace_trampoline_free(struct ftrace_ops *ops)
2606{
2607}
2608
db0fbadc
JS
2609static void control_ops_free(struct ftrace_ops *ops)
2610{
2611 free_percpu(ops->disabled);
2612}
2613
df4fc315
SR
2614static void ftrace_startup_enable(int command)
2615{
2616 if (saved_ftrace_func != ftrace_trace_function) {
2617 saved_ftrace_func = ftrace_trace_function;
2618 command |= FTRACE_UPDATE_TRACE_FUNC;
2619 }
2620
2621 if (!command || !ftrace_enabled)
2622 return;
2623
2624 ftrace_run_update_code(command);
2625}
d61f82d0 2626
e1effa01
SRRH
2627static void ftrace_startup_all(int command)
2628{
2629 update_all_ops = true;
2630 ftrace_startup_enable(command);
2631 update_all_ops = false;
2632}
2633
a1cd6173 2634static int ftrace_startup(struct ftrace_ops *ops, int command)
3d083395 2635{
8a56d776 2636 int ret;
b848914c 2637
4eebcc81 2638 if (unlikely(ftrace_disabled))
a1cd6173 2639 return -ENODEV;
4eebcc81 2640
8a56d776
SRRH
2641 ret = __register_ftrace_function(ops);
2642 if (ret)
2643 return ret;
2644
60a7ecf4 2645 ftrace_start_up++;
30fb6aa7 2646 command |= FTRACE_UPDATE_CALLS;
d61f82d0 2647
e1effa01
SRRH
2648 /*
2649 * Note that ftrace probes uses this to start up
2650 * and modify functions it will probe. But we still
2651 * set the ADDING flag for modification, as probes
2652 * do not have trampolines. If they add them in the
2653 * future, then the probes will need to distinguish
2654 * between adding and updating probes.
2655 */
2656 ops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_ADDING;
66209a5b 2657
f8b8be8a
MH
2658 ret = ftrace_hash_ipmodify_enable(ops);
2659 if (ret < 0) {
2660 /* Rollback registration process */
2661 __unregister_ftrace_function(ops);
2662 ftrace_start_up--;
2663 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
2664 return ret;
2665 }
2666
66209a5b 2667 ftrace_hash_rec_enable(ops, 1);
ed926f9b 2668
df4fc315 2669 ftrace_startup_enable(command);
a1cd6173 2670
e1effa01
SRRH
2671 ops->flags &= ~FTRACE_OPS_FL_ADDING;
2672
a1cd6173 2673 return 0;
3d083395
SR
2674}
2675
8a56d776 2676static int ftrace_shutdown(struct ftrace_ops *ops, int command)
3d083395 2677{
8a56d776 2678 int ret;
b848914c 2679
4eebcc81 2680 if (unlikely(ftrace_disabled))
8a56d776
SRRH
2681 return -ENODEV;
2682
2683 ret = __unregister_ftrace_function(ops);
2684 if (ret)
2685 return ret;
4eebcc81 2686
60a7ecf4 2687 ftrace_start_up--;
9ea1a153
FW
2688 /*
2689 * Just warn in case of unbalance, no need to kill ftrace, it's not
2690 * critical but the ftrace_call callers may be never nopped again after
2691 * further ftrace uses.
2692 */
2693 WARN_ON_ONCE(ftrace_start_up < 0);
2694
f8b8be8a
MH
2695 /* Disabling ipmodify never fails */
2696 ftrace_hash_ipmodify_disable(ops);
66209a5b 2697 ftrace_hash_rec_disable(ops, 1);
ed926f9b 2698
a737e6dd 2699 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
b848914c 2700
30fb6aa7 2701 command |= FTRACE_UPDATE_CALLS;
3d083395 2702
d61f82d0
SR
2703 if (saved_ftrace_func != ftrace_trace_function) {
2704 saved_ftrace_func = ftrace_trace_function;
2705 command |= FTRACE_UPDATE_TRACE_FUNC;
2706 }
3d083395 2707
a4c35ed2
SRRH
2708 if (!command || !ftrace_enabled) {
2709 /*
2710 * If these are control ops, they still need their
2711 * per_cpu field freed. Since, function tracing is
2712 * not currently active, we can just free them
2713 * without synchronizing all CPUs.
2714 */
2715 if (ops->flags & FTRACE_OPS_FL_CONTROL)
2716 control_ops_free(ops);
8a56d776 2717 return 0;
a4c35ed2 2718 }
d61f82d0 2719
79922b80
SRRH
2720 /*
2721 * If the ops uses a trampoline, then it needs to be
2722 * tested first on update.
2723 */
e1effa01 2724 ops->flags |= FTRACE_OPS_FL_REMOVING;
79922b80
SRRH
2725 removed_ops = ops;
2726
fef5aeee
SRRH
2727 /* The trampoline logic checks the old hashes */
2728 ops->old_hash.filter_hash = ops->func_hash->filter_hash;
2729 ops->old_hash.notrace_hash = ops->func_hash->notrace_hash;
2730
d61f82d0 2731 ftrace_run_update_code(command);
a4c35ed2 2732
84bde62c
SRRH
2733 /*
2734 * If there's no more ops registered with ftrace, run a
2735 * sanity check to make sure all rec flags are cleared.
2736 */
2737 if (ftrace_ops_list == &ftrace_list_end) {
2738 struct ftrace_page *pg;
2739 struct dyn_ftrace *rec;
2740
2741 do_for_each_ftrace_rec(pg, rec) {
2742 if (FTRACE_WARN_ON_ONCE(rec->flags))
2743 pr_warn(" %pS flags:%lx\n",
2744 (void *)rec->ip, rec->flags);
2745 } while_for_each_ftrace_rec();
2746 }
2747
fef5aeee
SRRH
2748 ops->old_hash.filter_hash = NULL;
2749 ops->old_hash.notrace_hash = NULL;
2750
2751 removed_ops = NULL;
e1effa01 2752 ops->flags &= ~FTRACE_OPS_FL_REMOVING;
79922b80 2753
a4c35ed2
SRRH
2754 /*
2755 * Dynamic ops may be freed, we must make sure that all
2756 * callers are done before leaving this function.
2757 * The same goes for freeing the per_cpu data of the control
2758 * ops.
2759 *
2760 * Again, normal synchronize_sched() is not good enough.
2761 * We need to do a hard force of sched synchronization.
2762 * This is because we use preempt_disable() to do RCU, but
2763 * the function tracers can be called where RCU is not watching
2764 * (like before user_exit()). We can not rely on the RCU
2765 * infrastructure to do the synchronization, thus we must do it
2766 * ourselves.
2767 */
2768 if (ops->flags & (FTRACE_OPS_FL_DYNAMIC | FTRACE_OPS_FL_CONTROL)) {
2769 schedule_on_each_cpu(ftrace_sync);
2770
12cce594
SRRH
2771 arch_ftrace_trampoline_free(ops);
2772
a4c35ed2
SRRH
2773 if (ops->flags & FTRACE_OPS_FL_CONTROL)
2774 control_ops_free(ops);
2775 }
2776
8a56d776 2777 return 0;
3d083395
SR
2778}
2779
e309b41d 2780static void ftrace_startup_sysctl(void)
b0fc494f 2781{
1619dc3f
PA
2782 int command;
2783
4eebcc81
SR
2784 if (unlikely(ftrace_disabled))
2785 return;
2786
d61f82d0
SR
2787 /* Force update next time */
2788 saved_ftrace_func = NULL;
60a7ecf4 2789 /* ftrace_start_up is true if we want ftrace running */
1619dc3f
PA
2790 if (ftrace_start_up) {
2791 command = FTRACE_UPDATE_CALLS;
2792 if (ftrace_graph_active)
2793 command |= FTRACE_START_FUNC_RET;
524a3868 2794 ftrace_startup_enable(command);
1619dc3f 2795 }
b0fc494f
SR
2796}
2797
e309b41d 2798static void ftrace_shutdown_sysctl(void)
b0fc494f 2799{
1619dc3f
PA
2800 int command;
2801
4eebcc81
SR
2802 if (unlikely(ftrace_disabled))
2803 return;
2804
60a7ecf4 2805 /* ftrace_start_up is true if ftrace is running */
1619dc3f
PA
2806 if (ftrace_start_up) {
2807 command = FTRACE_DISABLE_CALLS;
2808 if (ftrace_graph_active)
2809 command |= FTRACE_STOP_FUNC_RET;
2810 ftrace_run_update_code(command);
2811 }
b0fc494f
SR
2812}
2813
3d083395 2814static cycle_t ftrace_update_time;
3d083395
SR
2815unsigned long ftrace_update_tot_cnt;
2816
8c4f3c3f 2817static inline int ops_traces_mod(struct ftrace_ops *ops)
f7bc8b61 2818{
8c4f3c3f
SRRH
2819 /*
2820 * Filter_hash being empty will default to trace module.
2821 * But notrace hash requires a test of individual module functions.
2822 */
33b7f99c
SRRH
2823 return ftrace_hash_empty(ops->func_hash->filter_hash) &&
2824 ftrace_hash_empty(ops->func_hash->notrace_hash);
8c4f3c3f
SRRH
2825}
2826
2827/*
2828 * Check if the current ops references the record.
2829 *
2830 * If the ops traces all functions, then it was already accounted for.
2831 * If the ops does not trace the current record function, skip it.
2832 * If the ops ignores the function via notrace filter, skip it.
2833 */
2834static inline bool
2835ops_references_rec(struct ftrace_ops *ops, struct dyn_ftrace *rec)
2836{
2837 /* If ops isn't enabled, ignore it */
2838 if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
2839 return 0;
2840
2841 /* If ops traces all mods, we already accounted for it */
2842 if (ops_traces_mod(ops))
2843 return 0;
2844
2845 /* The function must be in the filter */
33b7f99c
SRRH
2846 if (!ftrace_hash_empty(ops->func_hash->filter_hash) &&
2847 !ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))
8c4f3c3f 2848 return 0;
f7bc8b61 2849
8c4f3c3f 2850 /* If in notrace hash, we ignore it too */
33b7f99c 2851 if (ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip))
8c4f3c3f
SRRH
2852 return 0;
2853
2854 return 1;
2855}
2856
2857static int referenced_filters(struct dyn_ftrace *rec)
2858{
2859 struct ftrace_ops *ops;
2860 int cnt = 0;
2861
2862 for (ops = ftrace_ops_list; ops != &ftrace_list_end; ops = ops->next) {
2863 if (ops_references_rec(ops, rec))
2864 cnt++;
2865 }
2866
2867 return cnt;
f7bc8b61
SR
2868}
2869
1dc43cf0 2870static int ftrace_update_code(struct module *mod, struct ftrace_page *new_pgs)
3d083395 2871{
85ae32ae 2872 struct ftrace_page *pg;
e94142a6 2873 struct dyn_ftrace *p;
f22f9a89 2874 cycle_t start, stop;
1dc43cf0 2875 unsigned long update_cnt = 0;
f7bc8b61 2876 unsigned long ref = 0;
8c4f3c3f 2877 bool test = false;
85ae32ae 2878 int i;
f7bc8b61
SR
2879
2880 /*
2881 * When adding a module, we need to check if tracers are
2882 * currently enabled and if they are set to trace all functions.
2883 * If they are, we need to enable the module functions as well
2884 * as update the reference counts for those function records.
2885 */
2886 if (mod) {
2887 struct ftrace_ops *ops;
2888
2889 for (ops = ftrace_ops_list;
2890 ops != &ftrace_list_end; ops = ops->next) {
8c4f3c3f
SRRH
2891 if (ops->flags & FTRACE_OPS_FL_ENABLED) {
2892 if (ops_traces_mod(ops))
2893 ref++;
2894 else
2895 test = true;
2896 }
f7bc8b61
SR
2897 }
2898 }
3d083395 2899
750ed1a4 2900 start = ftrace_now(raw_smp_processor_id());
3d083395 2901
1dc43cf0 2902 for (pg = new_pgs; pg; pg = pg->next) {
3d083395 2903
85ae32ae 2904 for (i = 0; i < pg->index; i++) {
8c4f3c3f
SRRH
2905 int cnt = ref;
2906
85ae32ae
SR
2907 /* If something went wrong, bail without enabling anything */
2908 if (unlikely(ftrace_disabled))
2909 return -1;
f22f9a89 2910
85ae32ae 2911 p = &pg->records[i];
8c4f3c3f
SRRH
2912 if (test)
2913 cnt += referenced_filters(p);
2914 p->flags = cnt;
f22f9a89 2915
85ae32ae
SR
2916 /*
2917 * Do the initial record conversion from mcount jump
2918 * to the NOP instructions.
2919 */
2920 if (!ftrace_code_disable(mod, p))
2921 break;
5cb084bb 2922
1dc43cf0 2923 update_cnt++;
5cb084bb 2924
85ae32ae
SR
2925 /*
2926 * If the tracing is enabled, go ahead and enable the record.
2927 *
2928 * The reason not to enable the record immediatelly is the
2929 * inherent check of ftrace_make_nop/ftrace_make_call for
2930 * correct previous instructions. Making first the NOP
2931 * conversion puts the module to the correct state, thus
2932 * passing the ftrace_make_call check.
2933 */
8c4f3c3f 2934 if (ftrace_start_up && cnt) {
85ae32ae
SR
2935 int failed = __ftrace_replace_code(p, 1);
2936 if (failed)
4fd3279b 2937 ftrace_bug(failed, p);
85ae32ae 2938 }
5cb084bb 2939 }
3d083395
SR
2940 }
2941
750ed1a4 2942 stop = ftrace_now(raw_smp_processor_id());
3d083395 2943 ftrace_update_time = stop - start;
1dc43cf0 2944 ftrace_update_tot_cnt += update_cnt;
3d083395 2945
16444a8a
ACM
2946 return 0;
2947}
2948
a7900875 2949static int ftrace_allocate_records(struct ftrace_page *pg, int count)
3c1720f0 2950{
a7900875 2951 int order;
3c1720f0 2952 int cnt;
3c1720f0 2953
a7900875
SR
2954 if (WARN_ON(!count))
2955 return -EINVAL;
2956
2957 order = get_count_order(DIV_ROUND_UP(count, ENTRIES_PER_PAGE));
3c1720f0
SR
2958
2959 /*
a7900875
SR
2960 * We want to fill as much as possible. No more than a page
2961 * may be empty.
3c1720f0 2962 */
a7900875
SR
2963 while ((PAGE_SIZE << order) / ENTRY_SIZE >= count + ENTRIES_PER_PAGE)
2964 order--;
3c1720f0 2965
a7900875
SR
2966 again:
2967 pg->records = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
3c1720f0 2968
a7900875
SR
2969 if (!pg->records) {
2970 /* if we can't allocate this size, try something smaller */
2971 if (!order)
2972 return -ENOMEM;
2973 order >>= 1;
2974 goto again;
2975 }
3c1720f0 2976
a7900875
SR
2977 cnt = (PAGE_SIZE << order) / ENTRY_SIZE;
2978 pg->size = cnt;
3c1720f0 2979
a7900875
SR
2980 if (cnt > count)
2981 cnt = count;
2982
2983 return cnt;
2984}
2985
2986static struct ftrace_page *
2987ftrace_allocate_pages(unsigned long num_to_init)
2988{
2989 struct ftrace_page *start_pg;
2990 struct ftrace_page *pg;
2991 int order;
2992 int cnt;
2993
2994 if (!num_to_init)
2995 return 0;
2996
2997 start_pg = pg = kzalloc(sizeof(*pg), GFP_KERNEL);
2998 if (!pg)
2999 return NULL;
3000
3001 /*
3002 * Try to allocate as much as possible in one continues
3003 * location that fills in all of the space. We want to
3004 * waste as little space as possible.
3005 */
3006 for (;;) {
3007 cnt = ftrace_allocate_records(pg, num_to_init);
3008 if (cnt < 0)
3009 goto free_pages;
3010
3011 num_to_init -= cnt;
3012 if (!num_to_init)
3c1720f0
SR
3013 break;
3014
a7900875
SR
3015 pg->next = kzalloc(sizeof(*pg), GFP_KERNEL);
3016 if (!pg->next)
3017 goto free_pages;
3018
3c1720f0
SR
3019 pg = pg->next;
3020 }
3021
a7900875
SR
3022 return start_pg;
3023
3024 free_pages:
1f61be00
NK
3025 pg = start_pg;
3026 while (pg) {
a7900875
SR
3027 order = get_count_order(pg->size / ENTRIES_PER_PAGE);
3028 free_pages((unsigned long)pg->records, order);
3029 start_pg = pg->next;
3030 kfree(pg);
3031 pg = start_pg;
3032 }
3033 pr_info("ftrace: FAILED to allocate memory for functions\n");
3034 return NULL;
3035}
3036
5072c59f
SR
3037#define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
3038
3039struct ftrace_iterator {
98c4fd04 3040 loff_t pos;
4aeb6967
SR
3041 loff_t func_pos;
3042 struct ftrace_page *pg;
3043 struct dyn_ftrace *func;
3044 struct ftrace_func_probe *probe;
3045 struct trace_parser parser;
1cf41dd7 3046 struct ftrace_hash *hash;
33dc9b12 3047 struct ftrace_ops *ops;
4aeb6967
SR
3048 int hidx;
3049 int idx;
3050 unsigned flags;
5072c59f
SR
3051};
3052
8fc0c701 3053static void *
4aeb6967 3054t_hash_next(struct seq_file *m, loff_t *pos)
8fc0c701
SR
3055{
3056 struct ftrace_iterator *iter = m->private;
4aeb6967 3057 struct hlist_node *hnd = NULL;
8fc0c701
SR
3058 struct hlist_head *hhd;
3059
8fc0c701 3060 (*pos)++;
98c4fd04 3061 iter->pos = *pos;
8fc0c701 3062
4aeb6967
SR
3063 if (iter->probe)
3064 hnd = &iter->probe->node;
8fc0c701
SR
3065 retry:
3066 if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
3067 return NULL;
3068
3069 hhd = &ftrace_func_hash[iter->hidx];
3070
3071 if (hlist_empty(hhd)) {
3072 iter->hidx++;
3073 hnd = NULL;
3074 goto retry;
3075 }
3076
3077 if (!hnd)
3078 hnd = hhd->first;
3079 else {
3080 hnd = hnd->next;
3081 if (!hnd) {
3082 iter->hidx++;
3083 goto retry;
3084 }
3085 }
3086
4aeb6967
SR
3087 if (WARN_ON_ONCE(!hnd))
3088 return NULL;
3089
3090 iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node);
3091
3092 return iter;
8fc0c701
SR
3093}
3094
3095static void *t_hash_start(struct seq_file *m, loff_t *pos)
3096{
3097 struct ftrace_iterator *iter = m->private;
3098 void *p = NULL;
d82d6244
LZ
3099 loff_t l;
3100
69a3083c
SR
3101 if (!(iter->flags & FTRACE_ITER_DO_HASH))
3102 return NULL;
3103
2bccfffd
SR
3104 if (iter->func_pos > *pos)
3105 return NULL;
8fc0c701 3106
d82d6244 3107 iter->hidx = 0;
2bccfffd 3108 for (l = 0; l <= (*pos - iter->func_pos); ) {
4aeb6967 3109 p = t_hash_next(m, &l);
d82d6244
LZ
3110 if (!p)
3111 break;
3112 }
4aeb6967
SR
3113 if (!p)
3114 return NULL;
3115
98c4fd04
SR
3116 /* Only set this if we have an item */
3117 iter->flags |= FTRACE_ITER_HASH;
3118
4aeb6967 3119 return iter;
8fc0c701
SR
3120}
3121
4aeb6967
SR
3122static int
3123t_hash_show(struct seq_file *m, struct ftrace_iterator *iter)
8fc0c701 3124{
b6887d79 3125 struct ftrace_func_probe *rec;
8fc0c701 3126
4aeb6967
SR
3127 rec = iter->probe;
3128 if (WARN_ON_ONCE(!rec))
3129 return -EIO;
8fc0c701 3130
809dcf29
SR
3131 if (rec->ops->print)
3132 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
3133
b375a11a 3134 seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func);
8fc0c701
SR
3135
3136 if (rec->data)
3137 seq_printf(m, ":%p", rec->data);
3138 seq_putc(m, '\n');
3139
3140 return 0;
3141}
3142
e309b41d 3143static void *
5072c59f
SR
3144t_next(struct seq_file *m, void *v, loff_t *pos)
3145{
3146 struct ftrace_iterator *iter = m->private;
fc13cb0c 3147 struct ftrace_ops *ops = iter->ops;
5072c59f
SR
3148 struct dyn_ftrace *rec = NULL;
3149
45a4a237
SR
3150 if (unlikely(ftrace_disabled))
3151 return NULL;
3152
8fc0c701 3153 if (iter->flags & FTRACE_ITER_HASH)
4aeb6967 3154 return t_hash_next(m, pos);
8fc0c701 3155
5072c59f 3156 (*pos)++;
1106b699 3157 iter->pos = iter->func_pos = *pos;
5072c59f 3158
0c75a3ed 3159 if (iter->flags & FTRACE_ITER_PRINTALL)
57c072c7 3160 return t_hash_start(m, pos);
0c75a3ed 3161
5072c59f
SR
3162 retry:
3163 if (iter->idx >= iter->pg->index) {
3164 if (iter->pg->next) {
3165 iter->pg = iter->pg->next;
3166 iter->idx = 0;
3167 goto retry;
3168 }
3169 } else {
3170 rec = &iter->pg->records[iter->idx++];
32082309 3171 if (((iter->flags & FTRACE_ITER_FILTER) &&
33b7f99c 3172 !(ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip))) ||
0183fb1c 3173
41c52c0d 3174 ((iter->flags & FTRACE_ITER_NOTRACE) &&
33b7f99c 3175 !ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip)) ||
647bcd03
SR
3176
3177 ((iter->flags & FTRACE_ITER_ENABLED) &&
23ea9c4d 3178 !(rec->flags & FTRACE_FL_ENABLED))) {
647bcd03 3179
5072c59f
SR
3180 rec = NULL;
3181 goto retry;
3182 }
3183 }
3184
4aeb6967 3185 if (!rec)
57c072c7 3186 return t_hash_start(m, pos);
4aeb6967
SR
3187
3188 iter->func = rec;
3189
3190 return iter;
5072c59f
SR
3191}
3192
98c4fd04
SR
3193static void reset_iter_read(struct ftrace_iterator *iter)
3194{
3195 iter->pos = 0;
3196 iter->func_pos = 0;
70f77b3f 3197 iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_HASH);
5072c59f
SR
3198}
3199
3200static void *t_start(struct seq_file *m, loff_t *pos)
3201{
3202 struct ftrace_iterator *iter = m->private;
fc13cb0c 3203 struct ftrace_ops *ops = iter->ops;
5072c59f 3204 void *p = NULL;
694ce0a5 3205 loff_t l;
5072c59f 3206
8fc0c701 3207 mutex_lock(&ftrace_lock);
45a4a237
SR
3208
3209 if (unlikely(ftrace_disabled))
3210 return NULL;
3211
98c4fd04
SR
3212 /*
3213 * If an lseek was done, then reset and start from beginning.
3214 */
3215 if (*pos < iter->pos)
3216 reset_iter_read(iter);
3217
0c75a3ed
SR
3218 /*
3219 * For set_ftrace_filter reading, if we have the filter
3220 * off, we can short cut and just print out that all
3221 * functions are enabled.
3222 */
8c006cf7 3223 if ((iter->flags & FTRACE_ITER_FILTER &&
33b7f99c 3224 ftrace_hash_empty(ops->func_hash->filter_hash)) ||
8c006cf7 3225 (iter->flags & FTRACE_ITER_NOTRACE &&
33b7f99c 3226 ftrace_hash_empty(ops->func_hash->notrace_hash))) {
0c75a3ed 3227 if (*pos > 0)
8fc0c701 3228 return t_hash_start(m, pos);
0c75a3ed 3229 iter->flags |= FTRACE_ITER_PRINTALL;
df091625
CW
3230 /* reset in case of seek/pread */
3231 iter->flags &= ~FTRACE_ITER_HASH;
0c75a3ed
SR
3232 return iter;
3233 }
3234
8fc0c701
SR
3235 if (iter->flags & FTRACE_ITER_HASH)
3236 return t_hash_start(m, pos);
3237
98c4fd04
SR
3238 /*
3239 * Unfortunately, we need to restart at ftrace_pages_start
3240 * every time we let go of the ftrace_mutex. This is because
3241 * those pointers can change without the lock.
3242 */
694ce0a5
LZ
3243 iter->pg = ftrace_pages_start;
3244 iter->idx = 0;
3245 for (l = 0; l <= *pos; ) {
3246 p = t_next(m, p, &l);
3247 if (!p)
3248 break;
50cdaf08 3249 }
5821e1b7 3250
69a3083c
SR
3251 if (!p)
3252 return t_hash_start(m, pos);
4aeb6967
SR
3253
3254 return iter;
5072c59f
SR
3255}
3256
3257static void t_stop(struct seq_file *m, void *p)
3258{
8fc0c701 3259 mutex_unlock(&ftrace_lock);
5072c59f
SR
3260}
3261
15d5b02c
SRRH
3262void * __weak
3263arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
3264{
3265 return NULL;
3266}
3267
3268static void add_trampoline_func(struct seq_file *m, struct ftrace_ops *ops,
3269 struct dyn_ftrace *rec)
3270{
3271 void *ptr;
3272
3273 ptr = arch_ftrace_trampoline_func(ops, rec);
3274 if (ptr)
3275 seq_printf(m, " ->%pS", ptr);
3276}
3277
5072c59f
SR
3278static int t_show(struct seq_file *m, void *v)
3279{
0c75a3ed 3280 struct ftrace_iterator *iter = m->private;
4aeb6967 3281 struct dyn_ftrace *rec;
5072c59f 3282
8fc0c701 3283 if (iter->flags & FTRACE_ITER_HASH)
4aeb6967 3284 return t_hash_show(m, iter);
8fc0c701 3285
0c75a3ed 3286 if (iter->flags & FTRACE_ITER_PRINTALL) {
8c006cf7 3287 if (iter->flags & FTRACE_ITER_NOTRACE)
fa6f0cc7 3288 seq_puts(m, "#### no functions disabled ####\n");
8c006cf7 3289 else
fa6f0cc7 3290 seq_puts(m, "#### all functions enabled ####\n");
0c75a3ed
SR
3291 return 0;
3292 }
3293
4aeb6967
SR
3294 rec = iter->func;
3295
5072c59f
SR
3296 if (!rec)
3297 return 0;
3298
647bcd03 3299 seq_printf(m, "%ps", (void *)rec->ip);
9674b2fa 3300 if (iter->flags & FTRACE_ITER_ENABLED) {
15d5b02c
SRRH
3301 struct ftrace_ops *ops = NULL;
3302
f8b8be8a 3303 seq_printf(m, " (%ld)%s%s",
0376bde1 3304 ftrace_rec_count(rec),
f8b8be8a
MH
3305 rec->flags & FTRACE_FL_REGS ? " R" : " ",
3306 rec->flags & FTRACE_FL_IPMODIFY ? " I" : " ");
9674b2fa 3307 if (rec->flags & FTRACE_FL_TRAMP_EN) {
5fecaa04 3308 ops = ftrace_find_tramp_ops_any(rec);
fef5aeee 3309 if (ops)
9674b2fa
SRRH
3310 seq_printf(m, "\ttramp: %pS",
3311 (void *)ops->trampoline);
3312 else
fa6f0cc7 3313 seq_puts(m, "\ttramp: ERROR!");
15d5b02c 3314
9674b2fa 3315 }
15d5b02c 3316 add_trampoline_func(m, ops, rec);
9674b2fa
SRRH
3317 }
3318
fa6f0cc7 3319 seq_putc(m, '\n');
5072c59f
SR
3320
3321 return 0;
3322}
3323
88e9d34c 3324static const struct seq_operations show_ftrace_seq_ops = {
5072c59f
SR
3325 .start = t_start,
3326 .next = t_next,
3327 .stop = t_stop,
3328 .show = t_show,
3329};
3330
e309b41d 3331static int
5072c59f
SR
3332ftrace_avail_open(struct inode *inode, struct file *file)
3333{
3334 struct ftrace_iterator *iter;
5072c59f 3335
4eebcc81
SR
3336 if (unlikely(ftrace_disabled))
3337 return -ENODEV;
3338
50e18b94
JO
3339 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
3340 if (iter) {
3341 iter->pg = ftrace_pages_start;
3342 iter->ops = &global_ops;
4bf39a94 3343 }
5072c59f 3344
50e18b94 3345 return iter ? 0 : -ENOMEM;
5072c59f
SR
3346}
3347
647bcd03
SR
3348static int
3349ftrace_enabled_open(struct inode *inode, struct file *file)
3350{
3351 struct ftrace_iterator *iter;
647bcd03 3352
50e18b94
JO
3353 iter = __seq_open_private(file, &show_ftrace_seq_ops, sizeof(*iter));
3354 if (iter) {
3355 iter->pg = ftrace_pages_start;
3356 iter->flags = FTRACE_ITER_ENABLED;
3357 iter->ops = &global_ops;
647bcd03
SR
3358 }
3359
50e18b94 3360 return iter ? 0 : -ENOMEM;
647bcd03
SR
3361}
3362
fc13cb0c
SR
3363/**
3364 * ftrace_regex_open - initialize function tracer filter files
3365 * @ops: The ftrace_ops that hold the hash filters
3366 * @flag: The type of filter to process
3367 * @inode: The inode, usually passed in to your open routine
3368 * @file: The file, usually passed in to your open routine
3369 *
3370 * ftrace_regex_open() initializes the filter files for the
3371 * @ops. Depending on @flag it may process the filter hash or
3372 * the notrace hash of @ops. With this called from the open
3373 * routine, you can use ftrace_filter_write() for the write
3374 * routine if @flag has FTRACE_ITER_FILTER set, or
3375 * ftrace_notrace_write() if @flag has FTRACE_ITER_NOTRACE set.
098c879e 3376 * tracing_lseek() should be used as the lseek routine, and
fc13cb0c
SR
3377 * release must call ftrace_regex_release().
3378 */
3379int
f45948e8 3380ftrace_regex_open(struct ftrace_ops *ops, int flag,
1cf41dd7 3381 struct inode *inode, struct file *file)
5072c59f
SR
3382{
3383 struct ftrace_iterator *iter;
f45948e8 3384 struct ftrace_hash *hash;
5072c59f
SR
3385 int ret = 0;
3386
f04f24fb
MH
3387 ftrace_ops_init(ops);
3388
4eebcc81
SR
3389 if (unlikely(ftrace_disabled))
3390 return -ENODEV;
3391
5072c59f
SR
3392 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
3393 if (!iter)
3394 return -ENOMEM;
3395
689fd8b6 3396 if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
3397 kfree(iter);
3398 return -ENOMEM;
3399 }
3400
3f2367ba
MH
3401 iter->ops = ops;
3402 iter->flags = flag;
3403
33b7f99c 3404 mutex_lock(&ops->func_hash->regex_lock);
3f2367ba 3405
f45948e8 3406 if (flag & FTRACE_ITER_NOTRACE)
33b7f99c 3407 hash = ops->func_hash->notrace_hash;
f45948e8 3408 else
33b7f99c 3409 hash = ops->func_hash->filter_hash;
f45948e8 3410
33dc9b12 3411 if (file->f_mode & FMODE_WRITE) {
ef2fbe16
NK
3412 const int size_bits = FTRACE_HASH_DEFAULT_BITS;
3413
3414 if (file->f_flags & O_TRUNC)
3415 iter->hash = alloc_ftrace_hash(size_bits);
3416 else
3417 iter->hash = alloc_and_copy_ftrace_hash(size_bits, hash);
3418
33dc9b12
SR
3419 if (!iter->hash) {
3420 trace_parser_put(&iter->parser);
3421 kfree(iter);
3f2367ba
MH
3422 ret = -ENOMEM;
3423 goto out_unlock;
33dc9b12
SR
3424 }
3425 }
1cf41dd7 3426
5072c59f
SR
3427 if (file->f_mode & FMODE_READ) {
3428 iter->pg = ftrace_pages_start;
5072c59f
SR
3429
3430 ret = seq_open(file, &show_ftrace_seq_ops);
3431 if (!ret) {
3432 struct seq_file *m = file->private_data;
3433 m->private = iter;
79fe249c 3434 } else {
33dc9b12
SR
3435 /* Failed */
3436 free_ftrace_hash(iter->hash);
79fe249c 3437 trace_parser_put(&iter->parser);
5072c59f 3438 kfree(iter);
79fe249c 3439 }
5072c59f
SR
3440 } else
3441 file->private_data = iter;
3f2367ba
MH
3442
3443 out_unlock:
33b7f99c 3444 mutex_unlock(&ops->func_hash->regex_lock);
5072c59f
SR
3445
3446 return ret;
3447}
3448
41c52c0d
SR
3449static int
3450ftrace_filter_open(struct inode *inode, struct file *file)
3451{
e3b3e2e8
SRRH
3452 struct ftrace_ops *ops = inode->i_private;
3453
3454 return ftrace_regex_open(ops,
69a3083c
SR
3455 FTRACE_ITER_FILTER | FTRACE_ITER_DO_HASH,
3456 inode, file);
41c52c0d
SR
3457}
3458
3459static int
3460ftrace_notrace_open(struct inode *inode, struct file *file)
3461{
e3b3e2e8
SRRH
3462 struct ftrace_ops *ops = inode->i_private;
3463
3464 return ftrace_regex_open(ops, FTRACE_ITER_NOTRACE,
1cf41dd7 3465 inode, file);
41c52c0d
SR
3466}
3467
3ba00929
DS
3468/* Type for quick search ftrace basic regexes (globs) from filter_parse_regex */
3469struct ftrace_glob {
3470 char *search;
3471 unsigned len;
3472 int type;
3473};
3474
3475static int ftrace_match(char *str, struct ftrace_glob *g)
9f4801e3 3476{
9f4801e3 3477 int matched = 0;
751e9983 3478 int slen;
9f4801e3 3479
3ba00929 3480 switch (g->type) {
9f4801e3 3481 case MATCH_FULL:
3ba00929 3482 if (strcmp(str, g->search) == 0)
9f4801e3
SR
3483 matched = 1;
3484 break;
3485 case MATCH_FRONT_ONLY:
3ba00929 3486 if (strncmp(str, g->search, g->len) == 0)
9f4801e3
SR
3487 matched = 1;
3488 break;
3489 case MATCH_MIDDLE_ONLY:
3ba00929 3490 if (strstr(str, g->search))
9f4801e3
SR
3491 matched = 1;
3492 break;
3493 case MATCH_END_ONLY:
751e9983 3494 slen = strlen(str);
3ba00929
DS
3495 if (slen >= g->len &&
3496 memcmp(str + slen - g->len, g->search, g->len) == 0)
9f4801e3
SR
3497 matched = 1;
3498 break;
3499 }
3500
3501 return matched;
3502}
3503
b448c4e3 3504static int
f0a3b154 3505enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int clear_filter)
996e87be 3506{
b448c4e3 3507 struct ftrace_func_entry *entry;
b448c4e3
SR
3508 int ret = 0;
3509
1cf41dd7 3510 entry = ftrace_lookup_ip(hash, rec->ip);
f0a3b154 3511 if (clear_filter) {
1cf41dd7
SR
3512 /* Do nothing if it doesn't exist */
3513 if (!entry)
3514 return 0;
b448c4e3 3515
33dc9b12 3516 free_hash_entry(hash, entry);
1cf41dd7
SR
3517 } else {
3518 /* Do nothing if it exists */
3519 if (entry)
3520 return 0;
b448c4e3 3521
1cf41dd7 3522 ret = add_hash_entry(hash, rec->ip);
b448c4e3
SR
3523 }
3524 return ret;
996e87be
SR
3525}
3526
64e7c440 3527static int
0b507e1e
DS
3528ftrace_match_record(struct dyn_ftrace *rec, struct ftrace_glob *func_g,
3529 struct ftrace_glob *mod_g, int exclude_mod)
64e7c440
SR
3530{
3531 char str[KSYM_SYMBOL_LEN];
b9df92d2
SR
3532 char *modname;
3533
3534 kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
3535
0b507e1e
DS
3536 if (mod_g) {
3537 int mod_matches = (modname) ? ftrace_match(modname, mod_g) : 0;
3538
3539 /* blank module name to match all modules */
3540 if (!mod_g->len) {
3541 /* blank module globbing: modname xor exclude_mod */
3542 if ((!exclude_mod) != (!modname))
3543 goto func_match;
3544 return 0;
3545 }
3546
3547 /* not matching the module */
3548 if (!modname || !mod_matches) {
3549 if (exclude_mod)
3550 goto func_match;
3551 else
3552 return 0;
3553 }
3554
3555 if (mod_matches && exclude_mod)
b9df92d2
SR
3556 return 0;
3557
0b507e1e 3558func_match:
b9df92d2 3559 /* blank search means to match all funcs in the mod */
3ba00929 3560 if (!func_g->len)
b9df92d2
SR
3561 return 1;
3562 }
64e7c440 3563
3ba00929 3564 return ftrace_match(str, func_g);
64e7c440
SR
3565}
3566
1cf41dd7 3567static int
3ba00929 3568match_records(struct ftrace_hash *hash, char *func, int len, char *mod)
9f4801e3 3569{
9f4801e3
SR
3570 struct ftrace_page *pg;
3571 struct dyn_ftrace *rec;
3ba00929 3572 struct ftrace_glob func_g = { .type = MATCH_FULL };
0b507e1e
DS
3573 struct ftrace_glob mod_g = { .type = MATCH_FULL };
3574 struct ftrace_glob *mod_match = (mod) ? &mod_g : NULL;
3575 int exclude_mod = 0;
311d16da 3576 int found = 0;
b448c4e3 3577 int ret;
f0a3b154 3578 int clear_filter;
9f4801e3 3579
0b507e1e 3580 if (func) {
3ba00929
DS
3581 func_g.type = filter_parse_regex(func, len, &func_g.search,
3582 &clear_filter);
3583 func_g.len = strlen(func_g.search);
b9df92d2 3584 }
9f4801e3 3585
0b507e1e
DS
3586 if (mod) {
3587 mod_g.type = filter_parse_regex(mod, strlen(mod),
3588 &mod_g.search, &exclude_mod);
3589 mod_g.len = strlen(mod_g.search);
b9df92d2 3590 }
9f4801e3 3591
52baf119 3592 mutex_lock(&ftrace_lock);
265c831c 3593
b9df92d2
SR
3594 if (unlikely(ftrace_disabled))
3595 goto out_unlock;
9f4801e3 3596
265c831c 3597 do_for_each_ftrace_rec(pg, rec) {
0b507e1e 3598 if (ftrace_match_record(rec, &func_g, mod_match, exclude_mod)) {
f0a3b154 3599 ret = enter_record(hash, rec, clear_filter);
b448c4e3
SR
3600 if (ret < 0) {
3601 found = ret;
3602 goto out_unlock;
3603 }
311d16da 3604 found = 1;
265c831c
SR
3605 }
3606 } while_for_each_ftrace_rec();
b9df92d2 3607 out_unlock:
52baf119 3608 mutex_unlock(&ftrace_lock);
311d16da
LZ
3609
3610 return found;
5072c59f
SR
3611}
3612
64e7c440 3613static int
1cf41dd7 3614ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
64e7c440 3615{
f0a3b154 3616 return match_records(hash, buff, len, NULL);
64e7c440
SR
3617}
3618
64e7c440 3619
f6180773
SR
3620/*
3621 * We register the module command as a template to show others how
3622 * to register the a command as well.
3623 */
3624
3625static int
43dd61c9 3626ftrace_mod_callback(struct ftrace_hash *hash,
f0a3b154 3627 char *func, char *cmd, char *module, int enable)
f6180773 3628{
5e3949f0 3629 int ret;
f6180773
SR
3630
3631 /*
3632 * cmd == 'mod' because we only registered this func
3633 * for the 'mod' ftrace_func_command.
3634 * But if you register one func with multiple commands,
3635 * you can tell which command was used by the cmd
3636 * parameter.
3637 */
f0a3b154 3638 ret = match_records(hash, func, strlen(func), module);
b448c4e3 3639 if (!ret)
5e3949f0 3640 return -EINVAL;
b448c4e3
SR
3641 if (ret < 0)
3642 return ret;
b448c4e3 3643 return 0;
f6180773
SR
3644}
3645
3646static struct ftrace_func_command ftrace_mod_cmd = {
3647 .name = "mod",
3648 .func = ftrace_mod_callback,
3649};
3650
3651static int __init ftrace_mod_cmd_init(void)
3652{
3653 return register_ftrace_command(&ftrace_mod_cmd);
3654}
6f415672 3655core_initcall(ftrace_mod_cmd_init);
f6180773 3656
2f5f6ad9 3657static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
a1e2e31d 3658 struct ftrace_ops *op, struct pt_regs *pt_regs)
59df055f 3659{
b6887d79 3660 struct ftrace_func_probe *entry;
59df055f 3661 struct hlist_head *hhd;
59df055f 3662 unsigned long key;
59df055f
SR
3663
3664 key = hash_long(ip, FTRACE_HASH_BITS);
3665
3666 hhd = &ftrace_func_hash[key];
3667
3668 if (hlist_empty(hhd))
3669 return;
3670
3671 /*
3672 * Disable preemption for these calls to prevent a RCU grace
3673 * period. This syncs the hash iteration and freeing of items
3674 * on the hash. rcu_read_lock is too dangerous here.
3675 */
5168ae50 3676 preempt_disable_notrace();
1bb539ca 3677 hlist_for_each_entry_rcu_notrace(entry, hhd, node) {
59df055f
SR
3678 if (entry->ip == ip)
3679 entry->ops->func(ip, parent_ip, &entry->data);
3680 }
5168ae50 3681 preempt_enable_notrace();
59df055f
SR
3682}
3683
b6887d79 3684static struct ftrace_ops trace_probe_ops __read_mostly =
59df055f 3685{
fb9fb015 3686 .func = function_trace_probe_call,
f04f24fb 3687 .flags = FTRACE_OPS_FL_INITIALIZED,
33b7f99c 3688 INIT_OPS_HASH(trace_probe_ops)
59df055f
SR
3689};
3690
b6887d79 3691static int ftrace_probe_registered;
59df055f 3692
7485058e 3693static void __enable_ftrace_function_probe(struct ftrace_ops_hash *old_hash)
59df055f 3694{
b848914c 3695 int ret;
59df055f
SR
3696 int i;
3697
19dd603e
SRRH
3698 if (ftrace_probe_registered) {
3699 /* still need to update the function call sites */
3700 if (ftrace_enabled)
8252ecf3
SRRH
3701 ftrace_run_modify_code(&trace_probe_ops, FTRACE_UPDATE_CALLS,
3702 old_hash);
59df055f 3703 return;
19dd603e 3704 }
59df055f
SR
3705
3706 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3707 struct hlist_head *hhd = &ftrace_func_hash[i];
3708 if (hhd->first)
3709 break;
3710 }
3711 /* Nothing registered? */
3712 if (i == FTRACE_FUNC_HASHSIZE)
3713 return;
3714
8a56d776 3715 ret = ftrace_startup(&trace_probe_ops, 0);
b848914c 3716
b6887d79 3717 ftrace_probe_registered = 1;
59df055f
SR
3718}
3719
b6887d79 3720static void __disable_ftrace_function_probe(void)
59df055f
SR
3721{
3722 int i;
3723
b6887d79 3724 if (!ftrace_probe_registered)
59df055f
SR
3725 return;
3726
3727 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3728 struct hlist_head *hhd = &ftrace_func_hash[i];
3729 if (hhd->first)
3730 return;
3731 }
3732
3733 /* no more funcs left */
8a56d776 3734 ftrace_shutdown(&trace_probe_ops, 0);
b848914c 3735
b6887d79 3736 ftrace_probe_registered = 0;
59df055f
SR
3737}
3738
3739
7818b388 3740static void ftrace_free_entry(struct ftrace_func_probe *entry)
59df055f 3741{
59df055f 3742 if (entry->ops->free)
e67efb93 3743 entry->ops->free(entry->ops, entry->ip, &entry->data);
59df055f
SR
3744 kfree(entry);
3745}
3746
59df055f 3747int
b6887d79 3748register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
59df055f
SR
3749 void *data)
3750{
7485058e 3751 struct ftrace_ops_hash old_hash_ops;
b6887d79 3752 struct ftrace_func_probe *entry;
3ba00929 3753 struct ftrace_glob func_g;
33b7f99c 3754 struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash;
3296fc4e 3755 struct ftrace_hash *old_hash = *orig_hash;
e1df4cb6 3756 struct ftrace_hash *hash;
59df055f
SR
3757 struct ftrace_page *pg;
3758 struct dyn_ftrace *rec;
3ba00929 3759 int not;
6a24a244 3760 unsigned long key;
59df055f 3761 int count = 0;
e1df4cb6 3762 int ret;
59df055f 3763
3ba00929
DS
3764 func_g.type = filter_parse_regex(glob, strlen(glob),
3765 &func_g.search, &not);
3766 func_g.len = strlen(func_g.search);
59df055f 3767
b6887d79 3768 /* we do not support '!' for function probes */
59df055f
SR
3769 if (WARN_ON(not))
3770 return -EINVAL;
3771
33b7f99c 3772 mutex_lock(&trace_probe_ops.func_hash->regex_lock);
59df055f 3773
7485058e
SRRH
3774 old_hash_ops.filter_hash = old_hash;
3775 /* Probes only have filters */
3776 old_hash_ops.notrace_hash = NULL;
3777
3296fc4e 3778 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
e1df4cb6
SRRH
3779 if (!hash) {
3780 count = -ENOMEM;
5ae0bf59 3781 goto out;
e1df4cb6
SRRH
3782 }
3783
3784 if (unlikely(ftrace_disabled)) {
3785 count = -ENODEV;
5ae0bf59 3786 goto out;
e1df4cb6 3787 }
59df055f 3788
5ae0bf59
SRRH
3789 mutex_lock(&ftrace_lock);
3790
45a4a237 3791 do_for_each_ftrace_rec(pg, rec) {
59df055f 3792
0b507e1e 3793 if (!ftrace_match_record(rec, &func_g, NULL, 0))
59df055f
SR
3794 continue;
3795
3796 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
3797 if (!entry) {
b6887d79 3798 /* If we did not process any, then return error */
59df055f
SR
3799 if (!count)
3800 count = -ENOMEM;
3801 goto out_unlock;
3802 }
3803
3804 count++;
3805
3806 entry->data = data;
3807
3808 /*
3809 * The caller might want to do something special
3810 * for each function we find. We call the callback
3811 * to give the caller an opportunity to do so.
3812 */
e67efb93
SRRH
3813 if (ops->init) {
3814 if (ops->init(ops, rec->ip, &entry->data) < 0) {
59df055f
SR
3815 /* caller does not like this func */
3816 kfree(entry);
3817 continue;
3818 }
3819 }
3820
e1df4cb6
SRRH
3821 ret = enter_record(hash, rec, 0);
3822 if (ret < 0) {
3823 kfree(entry);
3824 count = ret;
3825 goto out_unlock;
3826 }
3827
59df055f
SR
3828 entry->ops = ops;
3829 entry->ip = rec->ip;
3830
3831 key = hash_long(entry->ip, FTRACE_HASH_BITS);
3832 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
3833
3834 } while_for_each_ftrace_rec();
e1df4cb6
SRRH
3835
3836 ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
8252ecf3 3837
7485058e 3838 __enable_ftrace_function_probe(&old_hash_ops);
8252ecf3 3839
3296fc4e
SRRH
3840 if (!ret)
3841 free_ftrace_hash_rcu(old_hash);
3842 else
e1df4cb6
SRRH
3843 count = ret;
3844
59df055f 3845 out_unlock:
5ae0bf59
SRRH
3846 mutex_unlock(&ftrace_lock);
3847 out:
33b7f99c 3848 mutex_unlock(&trace_probe_ops.func_hash->regex_lock);
e1df4cb6 3849 free_ftrace_hash(hash);
59df055f
SR
3850
3851 return count;
3852}
3853
3854enum {
b6887d79
SR
3855 PROBE_TEST_FUNC = 1,
3856 PROBE_TEST_DATA = 2
59df055f
SR
3857};
3858
3859static void
b6887d79 3860__unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
59df055f
SR
3861 void *data, int flags)
3862{
e1df4cb6 3863 struct ftrace_func_entry *rec_entry;
b6887d79 3864 struct ftrace_func_probe *entry;
7818b388 3865 struct ftrace_func_probe *p;
3ba00929 3866 struct ftrace_glob func_g;
33b7f99c 3867 struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash;
3296fc4e 3868 struct ftrace_hash *old_hash = *orig_hash;
7818b388 3869 struct list_head free_list;
e1df4cb6 3870 struct ftrace_hash *hash;
b67bfe0d 3871 struct hlist_node *tmp;
59df055f 3872 char str[KSYM_SYMBOL_LEN];
3ba00929 3873 int i, ret;
59df055f 3874
b36461da 3875 if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
3ba00929 3876 func_g.search = NULL;
b36461da 3877 else if (glob) {
59df055f
SR
3878 int not;
3879
3ba00929
DS
3880 func_g.type = filter_parse_regex(glob, strlen(glob),
3881 &func_g.search, &not);
3882 func_g.len = strlen(func_g.search);
3883 func_g.search = glob;
59df055f 3884
b6887d79 3885 /* we do not support '!' for function probes */
59df055f
SR
3886 if (WARN_ON(not))
3887 return;
3888 }
3889
33b7f99c 3890 mutex_lock(&trace_probe_ops.func_hash->regex_lock);
e1df4cb6
SRRH
3891
3892 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
3893 if (!hash)
3894 /* Hmm, should report this somehow */
3895 goto out_unlock;
3896
7818b388
SRRH
3897 INIT_LIST_HEAD(&free_list);
3898
59df055f
SR
3899 for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
3900 struct hlist_head *hhd = &ftrace_func_hash[i];
3901
b67bfe0d 3902 hlist_for_each_entry_safe(entry, tmp, hhd, node) {
59df055f
SR
3903
3904 /* break up if statements for readability */
b6887d79 3905 if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
59df055f
SR
3906 continue;
3907
b6887d79 3908 if ((flags & PROBE_TEST_DATA) && entry->data != data)
59df055f
SR
3909 continue;
3910
3911 /* do this last, since it is the most expensive */
3ba00929 3912 if (func_g.search) {
59df055f
SR
3913 kallsyms_lookup(entry->ip, NULL, NULL,
3914 NULL, str);
3ba00929 3915 if (!ftrace_match(str, &func_g))
59df055f
SR
3916 continue;
3917 }
3918
e1df4cb6
SRRH
3919 rec_entry = ftrace_lookup_ip(hash, entry->ip);
3920 /* It is possible more than one entry had this ip */
3921 if (rec_entry)
3922 free_hash_entry(hash, rec_entry);
3923
740466bc 3924 hlist_del_rcu(&entry->node);
7818b388 3925 list_add(&entry->free_list, &free_list);
59df055f
SR
3926 }
3927 }
3f2367ba 3928 mutex_lock(&ftrace_lock);
b6887d79 3929 __disable_ftrace_function_probe();
e1df4cb6
SRRH
3930 /*
3931 * Remove after the disable is called. Otherwise, if the last
3932 * probe is removed, a null hash means *all enabled*.
3933 */
3296fc4e 3934 ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
7818b388 3935 synchronize_sched();
3296fc4e
SRRH
3936 if (!ret)
3937 free_ftrace_hash_rcu(old_hash);
3938
7818b388
SRRH
3939 list_for_each_entry_safe(entry, p, &free_list, free_list) {
3940 list_del(&entry->free_list);
3941 ftrace_free_entry(entry);
3942 }
3f2367ba 3943 mutex_unlock(&ftrace_lock);
3ba00929 3944
e1df4cb6 3945 out_unlock:
33b7f99c 3946 mutex_unlock(&trace_probe_ops.func_hash->regex_lock);
e1df4cb6 3947 free_ftrace_hash(hash);
59df055f
SR
3948}
3949
3950void
b6887d79 3951unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
59df055f
SR
3952 void *data)
3953{
b6887d79
SR
3954 __unregister_ftrace_function_probe(glob, ops, data,
3955 PROBE_TEST_FUNC | PROBE_TEST_DATA);
59df055f
SR
3956}
3957
3958void
b6887d79 3959unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
59df055f 3960{
b6887d79 3961 __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
59df055f
SR
3962}
3963
b6887d79 3964void unregister_ftrace_function_probe_all(char *glob)
59df055f 3965{
b6887d79 3966 __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
59df055f
SR
3967}
3968
f6180773
SR
3969static LIST_HEAD(ftrace_commands);
3970static DEFINE_MUTEX(ftrace_cmd_mutex);
3971
38de93ab
TZ
3972/*
3973 * Currently we only register ftrace commands from __init, so mark this
3974 * __init too.
3975 */
3976__init int register_ftrace_command(struct ftrace_func_command *cmd)
f6180773
SR
3977{
3978 struct ftrace_func_command *p;
3979 int ret = 0;
3980
3981 mutex_lock(&ftrace_cmd_mutex);
3982 list_for_each_entry(p, &ftrace_commands, list) {
3983 if (strcmp(cmd->name, p->name) == 0) {
3984 ret = -EBUSY;
3985 goto out_unlock;
3986 }
3987 }
3988 list_add(&cmd->list, &ftrace_commands);
3989 out_unlock:
3990 mutex_unlock(&ftrace_cmd_mutex);
3991
3992 return ret;
3993}
3994
38de93ab
TZ
3995/*
3996 * Currently we only unregister ftrace commands from __init, so mark
3997 * this __init too.
3998 */
3999__init int unregister_ftrace_command(struct ftrace_func_command *cmd)
f6180773
SR
4000{
4001 struct ftrace_func_command *p, *n;
4002 int ret = -ENODEV;
4003
4004 mutex_lock(&ftrace_cmd_mutex);
4005 list_for_each_entry_safe(p, n, &ftrace_commands, list) {
4006 if (strcmp(cmd->name, p->name) == 0) {
4007 ret = 0;
4008 list_del_init(&p->list);
4009 goto out_unlock;
4010 }
4011 }
4012 out_unlock:
4013 mutex_unlock(&ftrace_cmd_mutex);
4014
4015 return ret;
4016}
4017
33dc9b12
SR
4018static int ftrace_process_regex(struct ftrace_hash *hash,
4019 char *buff, int len, int enable)
64e7c440 4020{
f6180773 4021 char *func, *command, *next = buff;
6a24a244 4022 struct ftrace_func_command *p;
0aff1c0c 4023 int ret = -EINVAL;
64e7c440
SR
4024
4025 func = strsep(&next, ":");
4026
4027 if (!next) {
1cf41dd7 4028 ret = ftrace_match_records(hash, func, len);
b448c4e3
SR
4029 if (!ret)
4030 ret = -EINVAL;
4031 if (ret < 0)
4032 return ret;
4033 return 0;
64e7c440
SR
4034 }
4035
f6180773 4036 /* command found */
64e7c440
SR
4037
4038 command = strsep(&next, ":");
4039
f6180773
SR
4040 mutex_lock(&ftrace_cmd_mutex);
4041 list_for_each_entry(p, &ftrace_commands, list) {
4042 if (strcmp(p->name, command) == 0) {
43dd61c9 4043 ret = p->func(hash, func, command, next, enable);
f6180773
SR
4044 goto out_unlock;
4045 }
64e7c440 4046 }
f6180773
SR
4047 out_unlock:
4048 mutex_unlock(&ftrace_cmd_mutex);
64e7c440 4049
f6180773 4050 return ret;
64e7c440
SR
4051}
4052
e309b41d 4053static ssize_t
41c52c0d
SR
4054ftrace_regex_write(struct file *file, const char __user *ubuf,
4055 size_t cnt, loff_t *ppos, int enable)
5072c59f
SR
4056{
4057 struct ftrace_iterator *iter;
689fd8b6 4058 struct trace_parser *parser;
4059 ssize_t ret, read;
5072c59f 4060
4ba7978e 4061 if (!cnt)
5072c59f
SR
4062 return 0;
4063
5072c59f
SR
4064 if (file->f_mode & FMODE_READ) {
4065 struct seq_file *m = file->private_data;
4066 iter = m->private;
4067 } else
4068 iter = file->private_data;
4069
f04f24fb 4070 if (unlikely(ftrace_disabled))
3f2367ba
MH
4071 return -ENODEV;
4072
4073 /* iter->hash is a local copy, so we don't need regex_lock */
f04f24fb 4074
689fd8b6 4075 parser = &iter->parser;
4076 read = trace_get_user(parser, ubuf, cnt, ppos);
5072c59f 4077
4ba7978e 4078 if (read >= 0 && trace_parser_loaded(parser) &&
689fd8b6 4079 !trace_parser_cont(parser)) {
33dc9b12 4080 ret = ftrace_process_regex(iter->hash, parser->buffer,
689fd8b6 4081 parser->idx, enable);
313254a9 4082 trace_parser_clear(parser);
7c088b51 4083 if (ret < 0)
3f2367ba 4084 goto out;
eda1e328 4085 }
5072c59f 4086
5072c59f 4087 ret = read;
3f2367ba 4088 out:
5072c59f
SR
4089 return ret;
4090}
4091
fc13cb0c 4092ssize_t
41c52c0d
SR
4093ftrace_filter_write(struct file *file, const char __user *ubuf,
4094 size_t cnt, loff_t *ppos)
4095{
4096 return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
4097}
4098
fc13cb0c 4099ssize_t
41c52c0d
SR
4100ftrace_notrace_write(struct file *file, const char __user *ubuf,
4101 size_t cnt, loff_t *ppos)
4102{
4103 return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
4104}
4105
33dc9b12 4106static int
647664ea
MH
4107ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
4108{
4109 struct ftrace_func_entry *entry;
4110
4111 if (!ftrace_location(ip))
4112 return -EINVAL;
4113
4114 if (remove) {
4115 entry = ftrace_lookup_ip(hash, ip);
4116 if (!entry)
4117 return -ENOENT;
4118 free_hash_entry(hash, entry);
4119 return 0;
4120 }
4121
4122 return add_hash_entry(hash, ip);
4123}
4124
8252ecf3 4125static void ftrace_ops_update_code(struct ftrace_ops *ops,
7485058e 4126 struct ftrace_ops_hash *old_hash)
1c80c432 4127{
8f86f837
SRRH
4128 struct ftrace_ops *op;
4129
4130 if (!ftrace_enabled)
4131 return;
4132
4133 if (ops->flags & FTRACE_OPS_FL_ENABLED) {
8252ecf3 4134 ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash);
8f86f837
SRRH
4135 return;
4136 }
4137
4138 /*
4139 * If this is the shared global_ops filter, then we need to
4140 * check if there is another ops that shares it, is enabled.
4141 * If so, we still need to run the modify code.
4142 */
4143 if (ops->func_hash != &global_ops.local_hash)
4144 return;
4145
4146 do_for_each_ftrace_op(op, ftrace_ops_list) {
4147 if (op->func_hash == &global_ops.local_hash &&
4148 op->flags & FTRACE_OPS_FL_ENABLED) {
4149 ftrace_run_modify_code(op, FTRACE_UPDATE_CALLS, old_hash);
4150 /* Only need to do this once */
4151 return;
4152 }
4153 } while_for_each_ftrace_op(op);
1c80c432
SRRH
4154}
4155
647664ea
MH
4156static int
4157ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
4158 unsigned long ip, int remove, int reset, int enable)
41c52c0d 4159{
33dc9b12 4160 struct ftrace_hash **orig_hash;
7485058e 4161 struct ftrace_ops_hash old_hash_ops;
3296fc4e 4162 struct ftrace_hash *old_hash;
f45948e8 4163 struct ftrace_hash *hash;
33dc9b12 4164 int ret;
f45948e8 4165
41c52c0d 4166 if (unlikely(ftrace_disabled))
33dc9b12 4167 return -ENODEV;
41c52c0d 4168
33b7f99c 4169 mutex_lock(&ops->func_hash->regex_lock);
3f2367ba 4170
f45948e8 4171 if (enable)
33b7f99c 4172 orig_hash = &ops->func_hash->filter_hash;
f45948e8 4173 else
33b7f99c 4174 orig_hash = &ops->func_hash->notrace_hash;
33dc9b12 4175
b972cc58
WN
4176 if (reset)
4177 hash = alloc_ftrace_hash(FTRACE_HASH_DEFAULT_BITS);
4178 else
4179 hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
4180
3f2367ba
MH
4181 if (!hash) {
4182 ret = -ENOMEM;
4183 goto out_regex_unlock;
4184 }
f45948e8 4185
ac483c44
JO
4186 if (buf && !ftrace_match_records(hash, buf, len)) {
4187 ret = -EINVAL;
4188 goto out_regex_unlock;
4189 }
647664ea
MH
4190 if (ip) {
4191 ret = ftrace_match_addr(hash, ip, remove);
4192 if (ret < 0)
4193 goto out_regex_unlock;
4194 }
33dc9b12
SR
4195
4196 mutex_lock(&ftrace_lock);
3296fc4e 4197 old_hash = *orig_hash;
7485058e
SRRH
4198 old_hash_ops.filter_hash = ops->func_hash->filter_hash;
4199 old_hash_ops.notrace_hash = ops->func_hash->notrace_hash;
41fb61c2 4200 ret = ftrace_hash_move(ops, enable, orig_hash, hash);
3296fc4e 4201 if (!ret) {
7485058e 4202 ftrace_ops_update_code(ops, &old_hash_ops);
3296fc4e
SRRH
4203 free_ftrace_hash_rcu(old_hash);
4204 }
33dc9b12
SR
4205 mutex_unlock(&ftrace_lock);
4206
ac483c44 4207 out_regex_unlock:
33b7f99c 4208 mutex_unlock(&ops->func_hash->regex_lock);
33dc9b12
SR
4209
4210 free_ftrace_hash(hash);
4211 return ret;
41c52c0d
SR
4212}
4213
647664ea
MH
4214static int
4215ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove,
4216 int reset, int enable)
4217{
4218 return ftrace_set_hash(ops, 0, 0, ip, remove, reset, enable);
4219}
4220
4221/**
4222 * ftrace_set_filter_ip - set a function to filter on in ftrace by address
4223 * @ops - the ops to set the filter with
4224 * @ip - the address to add to or remove from the filter.
4225 * @remove - non zero to remove the ip from the filter
4226 * @reset - non zero to reset all filters before applying this filter.
4227 *
4228 * Filters denote which functions should be enabled when tracing is enabled
4229 * If @ip is NULL, it failes to update filter.
4230 */
4231int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
4232 int remove, int reset)
4233{
f04f24fb 4234 ftrace_ops_init(ops);
647664ea
MH
4235 return ftrace_set_addr(ops, ip, remove, reset, 1);
4236}
4237EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
4238
4239static int
4240ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
4241 int reset, int enable)
4242{
4243 return ftrace_set_hash(ops, buf, len, 0, 0, reset, enable);
4244}
4245
77a2b37d
SR
4246/**
4247 * ftrace_set_filter - set a function to filter on in ftrace
936e074b
SR
4248 * @ops - the ops to set the filter with
4249 * @buf - the string that holds the function filter text.
4250 * @len - the length of the string.
4251 * @reset - non zero to reset all filters before applying this filter.
4252 *
4253 * Filters denote which functions should be enabled when tracing is enabled.
4254 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
4255 */
ac483c44 4256int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
936e074b
SR
4257 int len, int reset)
4258{
f04f24fb 4259 ftrace_ops_init(ops);
ac483c44 4260 return ftrace_set_regex(ops, buf, len, reset, 1);
936e074b
SR
4261}
4262EXPORT_SYMBOL_GPL(ftrace_set_filter);
4263
4264/**
4265 * ftrace_set_notrace - set a function to not trace in ftrace
4266 * @ops - the ops to set the notrace filter with
4267 * @buf - the string that holds the function notrace text.
4268 * @len - the length of the string.
4269 * @reset - non zero to reset all filters before applying this filter.
4270 *
4271 * Notrace Filters denote which functions should not be enabled when tracing
4272 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
4273 * for tracing.
4274 */
ac483c44 4275int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
936e074b
SR
4276 int len, int reset)
4277{
f04f24fb 4278 ftrace_ops_init(ops);
ac483c44 4279 return ftrace_set_regex(ops, buf, len, reset, 0);
936e074b
SR
4280}
4281EXPORT_SYMBOL_GPL(ftrace_set_notrace);
4282/**
8d1b065d 4283 * ftrace_set_global_filter - set a function to filter on with global tracers
77a2b37d
SR
4284 * @buf - the string that holds the function filter text.
4285 * @len - the length of the string.
4286 * @reset - non zero to reset all filters before applying this filter.
4287 *
4288 * Filters denote which functions should be enabled when tracing is enabled.
4289 * If @buf is NULL and reset is set, all functions will be enabled for tracing.
4290 */
936e074b 4291void ftrace_set_global_filter(unsigned char *buf, int len, int reset)
77a2b37d 4292{
f45948e8 4293 ftrace_set_regex(&global_ops, buf, len, reset, 1);
41c52c0d 4294}
936e074b 4295EXPORT_SYMBOL_GPL(ftrace_set_global_filter);
4eebcc81 4296
41c52c0d 4297/**
8d1b065d 4298 * ftrace_set_global_notrace - set a function to not trace with global tracers
41c52c0d
SR
4299 * @buf - the string that holds the function notrace text.
4300 * @len - the length of the string.
4301 * @reset - non zero to reset all filters before applying this filter.
4302 *
4303 * Notrace Filters denote which functions should not be enabled when tracing
4304 * is enabled. If @buf is NULL and reset is set, all functions will be enabled
4305 * for tracing.
4306 */
936e074b 4307void ftrace_set_global_notrace(unsigned char *buf, int len, int reset)
41c52c0d 4308{
f45948e8 4309 ftrace_set_regex(&global_ops, buf, len, reset, 0);
77a2b37d 4310}
936e074b 4311EXPORT_SYMBOL_GPL(ftrace_set_global_notrace);
77a2b37d 4312
2af15d6a
SR
4313/*
4314 * command line interface to allow users to set filters on boot up.
4315 */
4316#define FTRACE_FILTER_SIZE COMMAND_LINE_SIZE
4317static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
4318static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
4319
f1ed7c74
SRRH
4320/* Used by function selftest to not test if filter is set */
4321bool ftrace_filter_param __initdata;
4322
2af15d6a
SR
4323static int __init set_ftrace_notrace(char *str)
4324{
f1ed7c74 4325 ftrace_filter_param = true;
75761cc1 4326 strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
2af15d6a
SR
4327 return 1;
4328}
4329__setup("ftrace_notrace=", set_ftrace_notrace);
4330
4331static int __init set_ftrace_filter(char *str)
4332{
f1ed7c74 4333 ftrace_filter_param = true;
75761cc1 4334 strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
2af15d6a
SR
4335 return 1;
4336}
4337__setup("ftrace_filter=", set_ftrace_filter);
4338
369bc18f 4339#ifdef CONFIG_FUNCTION_GRAPH_TRACER
f6060f46 4340static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
0d7d9a16 4341static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
faf982a6 4342static int ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer);
801c29fd 4343
f3bea491
SRRH
4344static unsigned long save_global_trampoline;
4345static unsigned long save_global_flags;
4346
369bc18f
SA
4347static int __init set_graph_function(char *str)
4348{
06f43d66 4349 strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
369bc18f
SA
4350 return 1;
4351}
4352__setup("ftrace_graph_filter=", set_graph_function);
4353
0d7d9a16
NK
4354static int __init set_graph_notrace_function(char *str)
4355{
4356 strlcpy(ftrace_graph_notrace_buf, str, FTRACE_FILTER_SIZE);
4357 return 1;
4358}
4359__setup("ftrace_graph_notrace=", set_graph_notrace_function);
4360
4361static void __init set_ftrace_early_graph(char *buf, int enable)
369bc18f
SA
4362{
4363 int ret;
4364 char *func;
0d7d9a16
NK
4365 unsigned long *table = ftrace_graph_funcs;
4366 int *count = &ftrace_graph_count;
4367
4368 if (!enable) {
4369 table = ftrace_graph_notrace_funcs;
4370 count = &ftrace_graph_notrace_count;
4371 }
369bc18f
SA
4372
4373 while (buf) {
4374 func = strsep(&buf, ",");
4375 /* we allow only one expression at a time */
0d7d9a16 4376 ret = ftrace_set_func(table, count, FTRACE_GRAPH_MAX_FUNCS, func);
369bc18f
SA
4377 if (ret)
4378 printk(KERN_DEBUG "ftrace: function %s not "
4379 "traceable\n", func);
4380 }
4381}
4382#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4383
2a85a37f
SR
4384void __init
4385ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable)
2af15d6a
SR
4386{
4387 char *func;
4388
f04f24fb
MH
4389 ftrace_ops_init(ops);
4390
2af15d6a
SR
4391 while (buf) {
4392 func = strsep(&buf, ",");
f45948e8 4393 ftrace_set_regex(ops, func, strlen(func), 0, enable);
2af15d6a
SR
4394 }
4395}
4396
4397static void __init set_ftrace_early_filters(void)
4398{
4399 if (ftrace_filter_buf[0])
2a85a37f 4400 ftrace_set_early_filter(&global_ops, ftrace_filter_buf, 1);
2af15d6a 4401 if (ftrace_notrace_buf[0])
2a85a37f 4402 ftrace_set_early_filter(&global_ops, ftrace_notrace_buf, 0);
369bc18f
SA
4403#ifdef CONFIG_FUNCTION_GRAPH_TRACER
4404 if (ftrace_graph_buf[0])
0d7d9a16
NK
4405 set_ftrace_early_graph(ftrace_graph_buf, 1);
4406 if (ftrace_graph_notrace_buf[0])
4407 set_ftrace_early_graph(ftrace_graph_notrace_buf, 0);
369bc18f 4408#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2af15d6a
SR
4409}
4410
fc13cb0c 4411int ftrace_regex_release(struct inode *inode, struct file *file)
5072c59f
SR
4412{
4413 struct seq_file *m = (struct seq_file *)file->private_data;
7485058e 4414 struct ftrace_ops_hash old_hash_ops;
5072c59f 4415 struct ftrace_iterator *iter;
33dc9b12 4416 struct ftrace_hash **orig_hash;
3296fc4e 4417 struct ftrace_hash *old_hash;
689fd8b6 4418 struct trace_parser *parser;
ed926f9b 4419 int filter_hash;
33dc9b12 4420 int ret;
5072c59f 4421
5072c59f
SR
4422 if (file->f_mode & FMODE_READ) {
4423 iter = m->private;
5072c59f
SR
4424 seq_release(inode, file);
4425 } else
4426 iter = file->private_data;
4427
689fd8b6 4428 parser = &iter->parser;
4429 if (trace_parser_loaded(parser)) {
4430 parser->buffer[parser->idx] = 0;
1cf41dd7 4431 ftrace_match_records(iter->hash, parser->buffer, parser->idx);
5072c59f
SR
4432 }
4433
689fd8b6 4434 trace_parser_put(parser);
689fd8b6 4435
33b7f99c 4436 mutex_lock(&iter->ops->func_hash->regex_lock);
3f2367ba 4437
058e297d 4438 if (file->f_mode & FMODE_WRITE) {
ed926f9b
SR
4439 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
4440
4441 if (filter_hash)
33b7f99c 4442 orig_hash = &iter->ops->func_hash->filter_hash;
ed926f9b 4443 else
33b7f99c 4444 orig_hash = &iter->ops->func_hash->notrace_hash;
33dc9b12 4445
058e297d 4446 mutex_lock(&ftrace_lock);
3296fc4e 4447 old_hash = *orig_hash;
7485058e
SRRH
4448 old_hash_ops.filter_hash = iter->ops->func_hash->filter_hash;
4449 old_hash_ops.notrace_hash = iter->ops->func_hash->notrace_hash;
41fb61c2
SR
4450 ret = ftrace_hash_move(iter->ops, filter_hash,
4451 orig_hash, iter->hash);
3296fc4e 4452 if (!ret) {
7485058e 4453 ftrace_ops_update_code(iter->ops, &old_hash_ops);
3296fc4e
SRRH
4454 free_ftrace_hash_rcu(old_hash);
4455 }
058e297d
SR
4456 mutex_unlock(&ftrace_lock);
4457 }
3f2367ba 4458
33b7f99c 4459 mutex_unlock(&iter->ops->func_hash->regex_lock);
33dc9b12
SR
4460 free_ftrace_hash(iter->hash);
4461 kfree(iter);
058e297d 4462
5072c59f
SR
4463 return 0;
4464}
4465
5e2336a0 4466static const struct file_operations ftrace_avail_fops = {
5072c59f
SR
4467 .open = ftrace_avail_open,
4468 .read = seq_read,
4469 .llseek = seq_lseek,
3be04b47 4470 .release = seq_release_private,
5072c59f
SR
4471};
4472
647bcd03
SR
4473static const struct file_operations ftrace_enabled_fops = {
4474 .open = ftrace_enabled_open,
4475 .read = seq_read,
4476 .llseek = seq_lseek,
4477 .release = seq_release_private,
4478};
4479
5e2336a0 4480static const struct file_operations ftrace_filter_fops = {
5072c59f 4481 .open = ftrace_filter_open,
850a80cf 4482 .read = seq_read,
5072c59f 4483 .write = ftrace_filter_write,
098c879e 4484 .llseek = tracing_lseek,
1cf41dd7 4485 .release = ftrace_regex_release,
5072c59f
SR
4486};
4487
5e2336a0 4488static const struct file_operations ftrace_notrace_fops = {
41c52c0d 4489 .open = ftrace_notrace_open,
850a80cf 4490 .read = seq_read,
41c52c0d 4491 .write = ftrace_notrace_write,
098c879e 4492 .llseek = tracing_lseek,
1cf41dd7 4493 .release = ftrace_regex_release,
41c52c0d
SR
4494};
4495
ea4e2bc4
SR
4496#ifdef CONFIG_FUNCTION_GRAPH_TRACER
4497
4498static DEFINE_MUTEX(graph_lock);
4499
4500int ftrace_graph_count;
29ad23b0 4501int ftrace_graph_notrace_count;
ea4e2bc4 4502unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
29ad23b0 4503unsigned long ftrace_graph_notrace_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
ea4e2bc4 4504
faf982a6
NK
4505struct ftrace_graph_data {
4506 unsigned long *table;
4507 size_t size;
4508 int *count;
4509 const struct seq_operations *seq_ops;
4510};
4511
ea4e2bc4 4512static void *
85951842 4513__g_next(struct seq_file *m, loff_t *pos)
ea4e2bc4 4514{
faf982a6
NK
4515 struct ftrace_graph_data *fgd = m->private;
4516
4517 if (*pos >= *fgd->count)
ea4e2bc4 4518 return NULL;
faf982a6 4519 return &fgd->table[*pos];
85951842 4520}
ea4e2bc4 4521
85951842
LZ
4522static void *
4523g_next(struct seq_file *m, void *v, loff_t *pos)
4524{
4525 (*pos)++;
4526 return __g_next(m, pos);
ea4e2bc4
SR
4527}
4528
4529static void *g_start(struct seq_file *m, loff_t *pos)
4530{
faf982a6
NK
4531 struct ftrace_graph_data *fgd = m->private;
4532
ea4e2bc4
SR
4533 mutex_lock(&graph_lock);
4534
f9349a8f 4535 /* Nothing, tell g_show to print all functions are enabled */
faf982a6 4536 if (!*fgd->count && !*pos)
f9349a8f
FW
4537 return (void *)1;
4538
85951842 4539 return __g_next(m, pos);
ea4e2bc4
SR
4540}
4541
4542static void g_stop(struct seq_file *m, void *p)
4543{
4544 mutex_unlock(&graph_lock);
4545}
4546
4547static int g_show(struct seq_file *m, void *v)
4548{
4549 unsigned long *ptr = v;
ea4e2bc4
SR
4550
4551 if (!ptr)
4552 return 0;
4553
f9349a8f 4554 if (ptr == (unsigned long *)1) {
280d1429
NK
4555 struct ftrace_graph_data *fgd = m->private;
4556
4557 if (fgd->table == ftrace_graph_funcs)
fa6f0cc7 4558 seq_puts(m, "#### all functions enabled ####\n");
280d1429 4559 else
fa6f0cc7 4560 seq_puts(m, "#### no functions disabled ####\n");
f9349a8f
FW
4561 return 0;
4562 }
4563
b375a11a 4564 seq_printf(m, "%ps\n", (void *)*ptr);
ea4e2bc4
SR
4565
4566 return 0;
4567}
4568
88e9d34c 4569static const struct seq_operations ftrace_graph_seq_ops = {
ea4e2bc4
SR
4570 .start = g_start,
4571 .next = g_next,
4572 .stop = g_stop,
4573 .show = g_show,
4574};
4575
4576static int
faf982a6
NK
4577__ftrace_graph_open(struct inode *inode, struct file *file,
4578 struct ftrace_graph_data *fgd)
ea4e2bc4
SR
4579{
4580 int ret = 0;
4581
ea4e2bc4
SR
4582 mutex_lock(&graph_lock);
4583 if ((file->f_mode & FMODE_WRITE) &&
8650ae32 4584 (file->f_flags & O_TRUNC)) {
faf982a6
NK
4585 *fgd->count = 0;
4586 memset(fgd->table, 0, fgd->size * sizeof(*fgd->table));
ea4e2bc4 4587 }
a4ec5e0c 4588 mutex_unlock(&graph_lock);
ea4e2bc4 4589
faf982a6
NK
4590 if (file->f_mode & FMODE_READ) {
4591 ret = seq_open(file, fgd->seq_ops);
4592 if (!ret) {
4593 struct seq_file *m = file->private_data;
4594 m->private = fgd;
4595 }
4596 } else
4597 file->private_data = fgd;
ea4e2bc4
SR
4598
4599 return ret;
4600}
4601
faf982a6
NK
4602static int
4603ftrace_graph_open(struct inode *inode, struct file *file)
4604{
4605 struct ftrace_graph_data *fgd;
4606
4607 if (unlikely(ftrace_disabled))
4608 return -ENODEV;
4609
4610 fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
4611 if (fgd == NULL)
4612 return -ENOMEM;
4613
4614 fgd->table = ftrace_graph_funcs;
4615 fgd->size = FTRACE_GRAPH_MAX_FUNCS;
4616 fgd->count = &ftrace_graph_count;
4617 fgd->seq_ops = &ftrace_graph_seq_ops;
4618
4619 return __ftrace_graph_open(inode, file, fgd);
4620}
4621
29ad23b0
NK
4622static int
4623ftrace_graph_notrace_open(struct inode *inode, struct file *file)
4624{
4625 struct ftrace_graph_data *fgd;
4626
4627 if (unlikely(ftrace_disabled))
4628 return -ENODEV;
4629
4630 fgd = kmalloc(sizeof(*fgd), GFP_KERNEL);
4631 if (fgd == NULL)
4632 return -ENOMEM;
4633
4634 fgd->table = ftrace_graph_notrace_funcs;
4635 fgd->size = FTRACE_GRAPH_MAX_FUNCS;
4636 fgd->count = &ftrace_graph_notrace_count;
4637 fgd->seq_ops = &ftrace_graph_seq_ops;
4638
4639 return __ftrace_graph_open(inode, file, fgd);
4640}
4641
87827111
LZ
4642static int
4643ftrace_graph_release(struct inode *inode, struct file *file)
4644{
faf982a6
NK
4645 if (file->f_mode & FMODE_READ) {
4646 struct seq_file *m = file->private_data;
4647
4648 kfree(m->private);
87827111 4649 seq_release(inode, file);
faf982a6
NK
4650 } else {
4651 kfree(file->private_data);
4652 }
4653
87827111
LZ
4654 return 0;
4655}
4656
ea4e2bc4 4657static int
faf982a6 4658ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer)
ea4e2bc4 4659{
3ba00929 4660 struct ftrace_glob func_g;
ea4e2bc4
SR
4661 struct dyn_ftrace *rec;
4662 struct ftrace_page *pg;
c7c6b1fe 4663 int fail = 1;
3ba00929 4664 int not;
f9349a8f
FW
4665 bool exists;
4666 int i;
ea4e2bc4 4667
f9349a8f 4668 /* decode regex */
3ba00929
DS
4669 func_g.type = filter_parse_regex(buffer, strlen(buffer),
4670 &func_g.search, &not);
faf982a6 4671 if (!not && *idx >= size)
c7c6b1fe 4672 return -EBUSY;
f9349a8f 4673
3ba00929 4674 func_g.len = strlen(func_g.search);
f9349a8f 4675
52baf119 4676 mutex_lock(&ftrace_lock);
45a4a237
SR
4677
4678 if (unlikely(ftrace_disabled)) {
4679 mutex_unlock(&ftrace_lock);
4680 return -ENODEV;
4681 }
4682
265c831c
SR
4683 do_for_each_ftrace_rec(pg, rec) {
4684
0b507e1e 4685 if (ftrace_match_record(rec, &func_g, NULL, 0)) {
c7c6b1fe 4686 /* if it is in the array */
f9349a8f 4687 exists = false;
c7c6b1fe 4688 for (i = 0; i < *idx; i++) {
f9349a8f
FW
4689 if (array[i] == rec->ip) {
4690 exists = true;
265c831c
SR
4691 break;
4692 }
c7c6b1fe
LZ
4693 }
4694
4695 if (!not) {
4696 fail = 0;
4697 if (!exists) {
4698 array[(*idx)++] = rec->ip;
faf982a6 4699 if (*idx >= size)
c7c6b1fe
LZ
4700 goto out;
4701 }
4702 } else {
4703 if (exists) {
4704 array[i] = array[--(*idx)];
4705 array[*idx] = 0;
4706 fail = 0;
4707 }
4708 }
ea4e2bc4 4709 }
265c831c 4710 } while_for_each_ftrace_rec();
c7c6b1fe 4711out:
52baf119 4712 mutex_unlock(&ftrace_lock);
ea4e2bc4 4713
c7c6b1fe
LZ
4714 if (fail)
4715 return -EINVAL;
4716
c7c6b1fe 4717 return 0;
ea4e2bc4
SR
4718}
4719
4720static ssize_t
4721ftrace_graph_write(struct file *file, const char __user *ubuf,
4722 size_t cnt, loff_t *ppos)
4723{
689fd8b6 4724 struct trace_parser parser;
6a10108b 4725 ssize_t read, ret = 0;
faf982a6 4726 struct ftrace_graph_data *fgd = file->private_data;
ea4e2bc4 4727
c7c6b1fe 4728 if (!cnt)
ea4e2bc4
SR
4729 return 0;
4730
6a10108b
NK
4731 if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX))
4732 return -ENOMEM;
ea4e2bc4 4733
689fd8b6 4734 read = trace_get_user(&parser, ubuf, cnt, ppos);
ea4e2bc4 4735
4ba7978e 4736 if (read >= 0 && trace_parser_loaded((&parser))) {
689fd8b6 4737 parser.buffer[parser.idx] = 0;
4738
6a10108b
NK
4739 mutex_lock(&graph_lock);
4740
689fd8b6 4741 /* we allow only one expression at a time */
faf982a6
NK
4742 ret = ftrace_set_func(fgd->table, fgd->count, fgd->size,
4743 parser.buffer);
6a10108b
NK
4744
4745 mutex_unlock(&graph_lock);
ea4e2bc4 4746 }
ea4e2bc4 4747
6a10108b
NK
4748 if (!ret)
4749 ret = read;
1eb90f13 4750
689fd8b6 4751 trace_parser_put(&parser);
ea4e2bc4
SR
4752
4753 return ret;
4754}
4755
4756static const struct file_operations ftrace_graph_fops = {
87827111
LZ
4757 .open = ftrace_graph_open,
4758 .read = seq_read,
4759 .write = ftrace_graph_write,
098c879e 4760 .llseek = tracing_lseek,
87827111 4761 .release = ftrace_graph_release,
ea4e2bc4 4762};
29ad23b0
NK
4763
4764static const struct file_operations ftrace_graph_notrace_fops = {
4765 .open = ftrace_graph_notrace_open,
4766 .read = seq_read,
4767 .write = ftrace_graph_write,
098c879e 4768 .llseek = tracing_lseek,
29ad23b0
NK
4769 .release = ftrace_graph_release,
4770};
ea4e2bc4
SR
4771#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4772
591dffda
SRRH
4773void ftrace_create_filter_files(struct ftrace_ops *ops,
4774 struct dentry *parent)
4775{
4776
4777 trace_create_file("set_ftrace_filter", 0644, parent,
4778 ops, &ftrace_filter_fops);
4779
4780 trace_create_file("set_ftrace_notrace", 0644, parent,
4781 ops, &ftrace_notrace_fops);
4782}
4783
4784/*
4785 * The name "destroy_filter_files" is really a misnomer. Although
4786 * in the future, it may actualy delete the files, but this is
4787 * really intended to make sure the ops passed in are disabled
4788 * and that when this function returns, the caller is free to
4789 * free the ops.
4790 *
4791 * The "destroy" name is only to match the "create" name that this
4792 * should be paired with.
4793 */
4794void ftrace_destroy_filter_files(struct ftrace_ops *ops)
4795{
4796 mutex_lock(&ftrace_lock);
4797 if (ops->flags & FTRACE_OPS_FL_ENABLED)
4798 ftrace_shutdown(ops, 0);
4799 ops->flags |= FTRACE_OPS_FL_DELETED;
4800 mutex_unlock(&ftrace_lock);
4801}
4802
8434dc93 4803static __init int ftrace_init_dyn_tracefs(struct dentry *d_tracer)
5072c59f 4804{
5072c59f 4805
5452af66
FW
4806 trace_create_file("available_filter_functions", 0444,
4807 d_tracer, NULL, &ftrace_avail_fops);
5072c59f 4808
647bcd03
SR
4809 trace_create_file("enabled_functions", 0444,
4810 d_tracer, NULL, &ftrace_enabled_fops);
4811
591dffda 4812 ftrace_create_filter_files(&global_ops, d_tracer);
ad90c0e3 4813
ea4e2bc4 4814#ifdef CONFIG_FUNCTION_GRAPH_TRACER
5452af66 4815 trace_create_file("set_graph_function", 0444, d_tracer,
ea4e2bc4
SR
4816 NULL,
4817 &ftrace_graph_fops);
29ad23b0
NK
4818 trace_create_file("set_graph_notrace", 0444, d_tracer,
4819 NULL,
4820 &ftrace_graph_notrace_fops);
ea4e2bc4
SR
4821#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
4822
5072c59f
SR
4823 return 0;
4824}
4825
9fd49328 4826static int ftrace_cmp_ips(const void *a, const void *b)
68950619 4827{
9fd49328
SR
4828 const unsigned long *ipa = a;
4829 const unsigned long *ipb = b;
68950619 4830
9fd49328
SR
4831 if (*ipa > *ipb)
4832 return 1;
4833 if (*ipa < *ipb)
4834 return -1;
4835 return 0;
4836}
4837
5cb084bb 4838static int ftrace_process_locs(struct module *mod,
31e88909 4839 unsigned long *start,
68bf21aa
SR
4840 unsigned long *end)
4841{
706c81f8 4842 struct ftrace_page *start_pg;
a7900875 4843 struct ftrace_page *pg;
706c81f8 4844 struct dyn_ftrace *rec;
a7900875 4845 unsigned long count;
68bf21aa
SR
4846 unsigned long *p;
4847 unsigned long addr;
4376cac6 4848 unsigned long flags = 0; /* Shut up gcc */
a7900875
SR
4849 int ret = -ENOMEM;
4850
4851 count = end - start;
4852
4853 if (!count)
4854 return 0;
4855
9fd49328 4856 sort(start, count, sizeof(*start),
6db02903 4857 ftrace_cmp_ips, NULL);
9fd49328 4858
706c81f8
SR
4859 start_pg = ftrace_allocate_pages(count);
4860 if (!start_pg)
a7900875 4861 return -ENOMEM;
68bf21aa 4862
e6ea44e9 4863 mutex_lock(&ftrace_lock);
a7900875 4864
32082309
SR
4865 /*
4866 * Core and each module needs their own pages, as
4867 * modules will free them when they are removed.
4868 * Force a new page to be allocated for modules.
4869 */
a7900875
SR
4870 if (!mod) {
4871 WARN_ON(ftrace_pages || ftrace_pages_start);
4872 /* First initialization */
706c81f8 4873 ftrace_pages = ftrace_pages_start = start_pg;
a7900875 4874 } else {
32082309 4875 if (!ftrace_pages)
a7900875 4876 goto out;
32082309 4877
a7900875
SR
4878 if (WARN_ON(ftrace_pages->next)) {
4879 /* Hmm, we have free pages? */
4880 while (ftrace_pages->next)
4881 ftrace_pages = ftrace_pages->next;
32082309 4882 }
a7900875 4883
706c81f8 4884 ftrace_pages->next = start_pg;
32082309
SR
4885 }
4886
68bf21aa 4887 p = start;
706c81f8 4888 pg = start_pg;
68bf21aa
SR
4889 while (p < end) {
4890 addr = ftrace_call_adjust(*p++);
20e5227e
SR
4891 /*
4892 * Some architecture linkers will pad between
4893 * the different mcount_loc sections of different
4894 * object files to satisfy alignments.
4895 * Skip any NULL pointers.
4896 */
4897 if (!addr)
4898 continue;
706c81f8
SR
4899
4900 if (pg->index == pg->size) {
4901 /* We should have allocated enough */
4902 if (WARN_ON(!pg->next))
4903 break;
4904 pg = pg->next;
4905 }
4906
4907 rec = &pg->records[pg->index++];
4908 rec->ip = addr;
68bf21aa
SR
4909 }
4910
706c81f8
SR
4911 /* We should have used all pages */
4912 WARN_ON(pg->next);
4913
4914 /* Assign the last page to ftrace_pages */
4915 ftrace_pages = pg;
4916
a4f18ed1 4917 /*
4376cac6
SR
4918 * We only need to disable interrupts on start up
4919 * because we are modifying code that an interrupt
4920 * may execute, and the modification is not atomic.
4921 * But for modules, nothing runs the code we modify
4922 * until we are finished with it, and there's no
4923 * reason to cause large interrupt latencies while we do it.
a4f18ed1 4924 */
4376cac6
SR
4925 if (!mod)
4926 local_irq_save(flags);
1dc43cf0 4927 ftrace_update_code(mod, start_pg);
4376cac6
SR
4928 if (!mod)
4929 local_irq_restore(flags);
a7900875
SR
4930 ret = 0;
4931 out:
e6ea44e9 4932 mutex_unlock(&ftrace_lock);
68bf21aa 4933
a7900875 4934 return ret;
68bf21aa
SR
4935}
4936
93eb677d 4937#ifdef CONFIG_MODULES
32082309
SR
4938
4939#define next_to_ftrace_page(p) container_of(p, struct ftrace_page, next)
4940
e7247a15 4941void ftrace_release_mod(struct module *mod)
93eb677d
SR
4942{
4943 struct dyn_ftrace *rec;
32082309 4944 struct ftrace_page **last_pg;
93eb677d 4945 struct ftrace_page *pg;
a7900875 4946 int order;
93eb677d 4947
45a4a237
SR
4948 mutex_lock(&ftrace_lock);
4949
e7247a15 4950 if (ftrace_disabled)
45a4a237 4951 goto out_unlock;
93eb677d 4952
32082309
SR
4953 /*
4954 * Each module has its own ftrace_pages, remove
4955 * them from the list.
4956 */
4957 last_pg = &ftrace_pages_start;
4958 for (pg = ftrace_pages_start; pg; pg = *last_pg) {
4959 rec = &pg->records[0];
e7247a15 4960 if (within_module_core(rec->ip, mod)) {
93eb677d 4961 /*
32082309
SR
4962 * As core pages are first, the first
4963 * page should never be a module page.
93eb677d 4964 */
32082309
SR
4965 if (WARN_ON(pg == ftrace_pages_start))
4966 goto out_unlock;
4967
4968 /* Check if we are deleting the last page */
4969 if (pg == ftrace_pages)
4970 ftrace_pages = next_to_ftrace_page(last_pg);
4971
4972 *last_pg = pg->next;
a7900875
SR
4973 order = get_count_order(pg->size / ENTRIES_PER_PAGE);
4974 free_pages((unsigned long)pg->records, order);
4975 kfree(pg);
32082309
SR
4976 } else
4977 last_pg = &pg->next;
4978 }
45a4a237 4979 out_unlock:
93eb677d
SR
4980 mutex_unlock(&ftrace_lock);
4981}
4982
4983static void ftrace_init_module(struct module *mod,
4984 unsigned long *start, unsigned long *end)
90d595fe 4985{
00fd61ae 4986 if (ftrace_disabled || start == end)
fed1939c 4987 return;
5cb084bb 4988 ftrace_process_locs(mod, start, end);
90d595fe
SR
4989}
4990
a949ae56 4991void ftrace_module_init(struct module *mod)
93eb677d 4992{
a949ae56
SRRH
4993 ftrace_init_module(mod, mod->ftrace_callsites,
4994 mod->ftrace_callsites +
4995 mod->num_ftrace_callsites);
8c189ea6
SRRH
4996}
4997
4998static int ftrace_module_notify_exit(struct notifier_block *self,
4999 unsigned long val, void *data)
5000{
5001 struct module *mod = data;
5002
5003 if (val == MODULE_STATE_GOING)
e7247a15 5004 ftrace_release_mod(mod);
93eb677d
SR
5005
5006 return 0;
5007}
5008#else
8c189ea6
SRRH
5009static int ftrace_module_notify_exit(struct notifier_block *self,
5010 unsigned long val, void *data)
93eb677d
SR
5011{
5012 return 0;
5013}
5014#endif /* CONFIG_MODULES */
5015
8c189ea6
SRRH
5016struct notifier_block ftrace_module_exit_nb = {
5017 .notifier_call = ftrace_module_notify_exit,
5018 .priority = INT_MIN, /* Run after anything that can remove kprobes */
5019};
5020
68bf21aa
SR
5021void __init ftrace_init(void)
5022{
1dc43cf0
JS
5023 extern unsigned long __start_mcount_loc[];
5024 extern unsigned long __stop_mcount_loc[];
3a36cb11 5025 unsigned long count, flags;
68bf21aa
SR
5026 int ret;
5027
68bf21aa 5028 local_irq_save(flags);
3a36cb11 5029 ret = ftrace_dyn_arch_init();
68bf21aa 5030 local_irq_restore(flags);
af64a7cb 5031 if (ret)
68bf21aa
SR
5032 goto failed;
5033
5034 count = __stop_mcount_loc - __start_mcount_loc;
c867ccd8
JS
5035 if (!count) {
5036 pr_info("ftrace: No functions to be traced?\n");
68bf21aa 5037 goto failed;
c867ccd8
JS
5038 }
5039
5040 pr_info("ftrace: allocating %ld entries in %ld pages\n",
5041 count, count / ENTRIES_PER_PAGE + 1);
68bf21aa
SR
5042
5043 last_ftrace_enabled = ftrace_enabled = 1;
5044
5cb084bb 5045 ret = ftrace_process_locs(NULL,
31e88909 5046 __start_mcount_loc,
68bf21aa
SR
5047 __stop_mcount_loc);
5048
8c189ea6 5049 ret = register_module_notifier(&ftrace_module_exit_nb);
24ed0c4b 5050 if (ret)
8c189ea6 5051 pr_warning("Failed to register trace ftrace module exit notifier\n");
93eb677d 5052
2af15d6a
SR
5053 set_ftrace_early_filters();
5054
68bf21aa
SR
5055 return;
5056 failed:
5057 ftrace_disabled = 1;
5058}
68bf21aa 5059
f3bea491
SRRH
5060/* Do nothing if arch does not support this */
5061void __weak arch_ftrace_update_trampoline(struct ftrace_ops *ops)
5062{
5063}
5064
5065static void ftrace_update_trampoline(struct ftrace_ops *ops)
5066{
12cce594
SRRH
5067
5068/*
5069 * Currently there's no safe way to free a trampoline when the kernel
5070 * is configured with PREEMPT. That is because a task could be preempted
5071 * when it jumped to the trampoline, it may be preempted for a long time
5072 * depending on the system load, and currently there's no way to know
5073 * when it will be off the trampoline. If the trampoline is freed
5074 * too early, when the task runs again, it will be executing on freed
5075 * memory and crash.
5076 */
5077#ifdef CONFIG_PREEMPT
f3bea491
SRRH
5078 /* Currently, only non dynamic ops can have a trampoline */
5079 if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
5080 return;
12cce594 5081#endif
f3bea491
SRRH
5082
5083 arch_ftrace_update_trampoline(ops);
5084}
5085
3d083395 5086#else
0b6e4d56 5087
2b499381 5088static struct ftrace_ops global_ops = {
bd69c30b 5089 .func = ftrace_stub,
e3eea140
SRRH
5090 .flags = FTRACE_OPS_FL_RECURSION_SAFE |
5091 FTRACE_OPS_FL_INITIALIZED |
5092 FTRACE_OPS_FL_PID,
bd69c30b
SR
5093};
5094
0b6e4d56
FW
5095static int __init ftrace_nodyn_init(void)
5096{
5097 ftrace_enabled = 1;
5098 return 0;
5099}
6f415672 5100core_initcall(ftrace_nodyn_init);
0b6e4d56 5101
8434dc93 5102static inline int ftrace_init_dyn_tracefs(struct dentry *d_tracer) { return 0; }
df4fc315 5103static inline void ftrace_startup_enable(int command) { }
e1effa01 5104static inline void ftrace_startup_all(int command) { }
5a45cfe1 5105/* Keep as macros so we do not need to define the commands */
8a56d776
SRRH
5106# define ftrace_startup(ops, command) \
5107 ({ \
5108 int ___ret = __register_ftrace_function(ops); \
5109 if (!___ret) \
5110 (ops)->flags |= FTRACE_OPS_FL_ENABLED; \
5111 ___ret; \
3b6cfdb1 5112 })
1fcc1553
SRRH
5113# define ftrace_shutdown(ops, command) \
5114 ({ \
5115 int ___ret = __unregister_ftrace_function(ops); \
5116 if (!___ret) \
5117 (ops)->flags &= ~FTRACE_OPS_FL_ENABLED; \
5118 ___ret; \
5119 })
8a56d776 5120
c7aafc54
IM
5121# define ftrace_startup_sysctl() do { } while (0)
5122# define ftrace_shutdown_sysctl() do { } while (0)
b848914c
SR
5123
5124static inline int
195a8afc 5125ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
b848914c
SR
5126{
5127 return 1;
5128}
5129
f3bea491
SRRH
5130static void ftrace_update_trampoline(struct ftrace_ops *ops)
5131{
5132}
5133
3d083395
SR
5134#endif /* CONFIG_DYNAMIC_FTRACE */
5135
4104d326
SRRH
5136__init void ftrace_init_global_array_ops(struct trace_array *tr)
5137{
5138 tr->ops = &global_ops;
5139 tr->ops->private = tr;
5140}
5141
5142void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func)
5143{
5144 /* If we filter on pids, update to use the pid function */
5145 if (tr->flags & TRACE_ARRAY_FL_GLOBAL) {
5146 if (WARN_ON(tr->ops->func != ftrace_stub))
5147 printk("ftrace ops had %pS for function\n",
5148 tr->ops->func);
4104d326
SRRH
5149 }
5150 tr->ops->func = func;
5151 tr->ops->private = tr;
5152}
5153
5154void ftrace_reset_array_ops(struct trace_array *tr)
5155{
5156 tr->ops->func = ftrace_stub;
5157}
5158
e248491a 5159static void
2f5f6ad9 5160ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
a1e2e31d 5161 struct ftrace_ops *op, struct pt_regs *regs)
e248491a 5162{
e248491a
JO
5163 if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT)))
5164 return;
5165
5166 /*
5167 * Some of the ops may be dynamically allocated,
5168 * they must be freed after a synchronize_sched().
5169 */
5170 preempt_disable_notrace();
5171 trace_recursion_set(TRACE_CONTROL_BIT);
b5aa3a47
SRRH
5172
5173 /*
5174 * Control funcs (perf) uses RCU. Only trace if
5175 * RCU is currently active.
5176 */
5177 if (!rcu_is_watching())
5178 goto out;
5179
0a016409 5180 do_for_each_ftrace_op(op, ftrace_control_list) {
395b97a3
SRRH
5181 if (!(op->flags & FTRACE_OPS_FL_STUB) &&
5182 !ftrace_function_local_disabled(op) &&
195a8afc 5183 ftrace_ops_test(op, ip, regs))
a1e2e31d 5184 op->func(ip, parent_ip, op, regs);
0a016409 5185 } while_for_each_ftrace_op(op);
b5aa3a47 5186 out:
e248491a
JO
5187 trace_recursion_clear(TRACE_CONTROL_BIT);
5188 preempt_enable_notrace();
5189}
5190
5191static struct ftrace_ops control_ops = {
f04f24fb
MH
5192 .func = ftrace_ops_control_func,
5193 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED,
33b7f99c 5194 INIT_OPS_HASH(control_ops)
e248491a
JO
5195};
5196
2f5f6ad9
SR
5197static inline void
5198__ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
a1e2e31d 5199 struct ftrace_ops *ignored, struct pt_regs *regs)
b848914c 5200{
cdbe61bf 5201 struct ftrace_ops *op;
edc15caf 5202 int bit;
b848914c 5203
edc15caf
SR
5204 bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
5205 if (bit < 0)
5206 return;
b1cff0ad 5207
cdbe61bf
SR
5208 /*
5209 * Some of the ops may be dynamically allocated,
5210 * they must be freed after a synchronize_sched().
5211 */
5212 preempt_disable_notrace();
0a016409 5213 do_for_each_ftrace_op(op, ftrace_ops_list) {
4104d326 5214 if (ftrace_ops_test(op, ip, regs)) {
1d48d596
SRRH
5215 if (FTRACE_WARN_ON(!op->func)) {
5216 pr_warn("op=%p %pS\n", op, op);
4104d326
SRRH
5217 goto out;
5218 }
a1e2e31d 5219 op->func(ip, parent_ip, op, regs);
4104d326 5220 }
0a016409 5221 } while_for_each_ftrace_op(op);
4104d326 5222out:
cdbe61bf 5223 preempt_enable_notrace();
edc15caf 5224 trace_clear_recursion(bit);
b848914c
SR
5225}
5226
2f5f6ad9
SR
5227/*
5228 * Some archs only support passing ip and parent_ip. Even though
5229 * the list function ignores the op parameter, we do not want any
5230 * C side effects, where a function is called without the caller
5231 * sending a third parameter.
a1e2e31d
SR
5232 * Archs are to support both the regs and ftrace_ops at the same time.
5233 * If they support ftrace_ops, it is assumed they support regs.
5234 * If call backs want to use regs, they must either check for regs
06aeaaea
MH
5235 * being NULL, or CONFIG_DYNAMIC_FTRACE_WITH_REGS.
5236 * Note, CONFIG_DYNAMIC_FTRACE_WITH_REGS expects a full regs to be saved.
a1e2e31d
SR
5237 * An architecture can pass partial regs with ftrace_ops and still
5238 * set the ARCH_SUPPORT_FTARCE_OPS.
2f5f6ad9
SR
5239 */
5240#if ARCH_SUPPORTS_FTRACE_OPS
5241static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
a1e2e31d 5242 struct ftrace_ops *op, struct pt_regs *regs)
2f5f6ad9 5243{
a1e2e31d 5244 __ftrace_ops_list_func(ip, parent_ip, NULL, regs);
2f5f6ad9
SR
5245}
5246#else
5247static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
5248{
a1e2e31d 5249 __ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
2f5f6ad9
SR
5250}
5251#endif
5252
f1ff6348
SRRH
5253/*
5254 * If there's only one function registered but it does not support
5255 * recursion, this function will be called by the mcount trampoline.
5256 * This function will handle recursion protection.
5257 */
5258static void ftrace_ops_recurs_func(unsigned long ip, unsigned long parent_ip,
5259 struct ftrace_ops *op, struct pt_regs *regs)
5260{
5261 int bit;
5262
5263 bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
5264 if (bit < 0)
5265 return;
5266
5267 op->func(ip, parent_ip, op, regs);
5268
5269 trace_clear_recursion(bit);
5270}
5271
87354059
SRRH
5272/**
5273 * ftrace_ops_get_func - get the function a trampoline should call
5274 * @ops: the ops to get the function for
5275 *
5276 * Normally the mcount trampoline will call the ops->func, but there
5277 * are times that it should not. For example, if the ops does not
5278 * have its own recursion protection, then it should call the
5279 * ftrace_ops_recurs_func() instead.
5280 *
5281 * Returns the function that the trampoline should call for @ops.
5282 */
5283ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops)
5284{
87354059
SRRH
5285 /*
5286 * If the func handles its own recursion, call it directly.
5287 * Otherwise call the recursion protected function that
5288 * will call the ftrace ops function.
5289 */
5290 if (!(ops->flags & FTRACE_OPS_FL_RECURSION_SAFE))
5291 return ftrace_ops_recurs_func;
5292
5293 return ops->func;
5294}
5295
e32d8956 5296static void clear_ftrace_swapper(void)
978f3a45
SR
5297{
5298 struct task_struct *p;
e32d8956 5299 int cpu;
978f3a45 5300
e32d8956
SR
5301 get_online_cpus();
5302 for_each_online_cpu(cpu) {
5303 p = idle_task(cpu);
978f3a45 5304 clear_tsk_trace_trace(p);
e32d8956
SR
5305 }
5306 put_online_cpus();
5307}
978f3a45 5308
e32d8956
SR
5309static void set_ftrace_swapper(void)
5310{
5311 struct task_struct *p;
5312 int cpu;
5313
5314 get_online_cpus();
5315 for_each_online_cpu(cpu) {
5316 p = idle_task(cpu);
5317 set_tsk_trace_trace(p);
5318 }
5319 put_online_cpus();
978f3a45
SR
5320}
5321
e32d8956
SR
5322static void clear_ftrace_pid(struct pid *pid)
5323{
5324 struct task_struct *p;
5325
229c4ef8 5326 rcu_read_lock();
e32d8956
SR
5327 do_each_pid_task(pid, PIDTYPE_PID, p) {
5328 clear_tsk_trace_trace(p);
5329 } while_each_pid_task(pid, PIDTYPE_PID, p);
229c4ef8
ON
5330 rcu_read_unlock();
5331
e32d8956
SR
5332 put_pid(pid);
5333}
5334
5335static void set_ftrace_pid(struct pid *pid)
978f3a45
SR
5336{
5337 struct task_struct *p;
5338
229c4ef8 5339 rcu_read_lock();
978f3a45
SR
5340 do_each_pid_task(pid, PIDTYPE_PID, p) {
5341 set_tsk_trace_trace(p);
5342 } while_each_pid_task(pid, PIDTYPE_PID, p);
229c4ef8 5343 rcu_read_unlock();
978f3a45
SR
5344}
5345
756d17ee 5346static void clear_ftrace_pid_task(struct pid *pid)
e32d8956 5347{
756d17ee 5348 if (pid == ftrace_swapper_pid)
e32d8956
SR
5349 clear_ftrace_swapper();
5350 else
756d17ee 5351 clear_ftrace_pid(pid);
e32d8956
SR
5352}
5353
5354static void set_ftrace_pid_task(struct pid *pid)
5355{
5356 if (pid == ftrace_swapper_pid)
5357 set_ftrace_swapper();
5358 else
5359 set_ftrace_pid(pid);
5360}
5361
756d17ee 5362static int ftrace_pid_add(int p)
df4fc315 5363{
978f3a45 5364 struct pid *pid;
756d17ee 5365 struct ftrace_pid *fpid;
5366 int ret = -EINVAL;
df4fc315 5367
756d17ee 5368 mutex_lock(&ftrace_lock);
df4fc315 5369
756d17ee 5370 if (!p)
5371 pid = ftrace_swapper_pid;
5372 else
5373 pid = find_get_pid(p);
df4fc315 5374
756d17ee 5375 if (!pid)
5376 goto out;
df4fc315 5377
756d17ee 5378 ret = 0;
df4fc315 5379
756d17ee 5380 list_for_each_entry(fpid, &ftrace_pids, list)
5381 if (fpid->pid == pid)
5382 goto out_put;
978f3a45 5383
756d17ee 5384 ret = -ENOMEM;
df4fc315 5385
756d17ee 5386 fpid = kmalloc(sizeof(*fpid), GFP_KERNEL);
5387 if (!fpid)
5388 goto out_put;
df4fc315 5389
756d17ee 5390 list_add(&fpid->list, &ftrace_pids);
5391 fpid->pid = pid;
0ef8cde5 5392
756d17ee 5393 set_ftrace_pid_task(pid);
978f3a45 5394
756d17ee 5395 ftrace_update_pid_func();
e1effa01
SRRH
5396
5397 ftrace_startup_all(0);
756d17ee 5398
5399 mutex_unlock(&ftrace_lock);
5400 return 0;
5401
5402out_put:
5403 if (pid != ftrace_swapper_pid)
5404 put_pid(pid);
978f3a45 5405
756d17ee 5406out:
5407 mutex_unlock(&ftrace_lock);
5408 return ret;
5409}
5410
5411static void ftrace_pid_reset(void)
5412{
5413 struct ftrace_pid *fpid, *safe;
978f3a45 5414
756d17ee 5415 mutex_lock(&ftrace_lock);
5416 list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) {
5417 struct pid *pid = fpid->pid;
5418
5419 clear_ftrace_pid_task(pid);
5420
5421 list_del(&fpid->list);
5422 kfree(fpid);
df4fc315
SR
5423 }
5424
df4fc315 5425 ftrace_update_pid_func();
e1effa01 5426 ftrace_startup_all(0);
df4fc315 5427
e6ea44e9 5428 mutex_unlock(&ftrace_lock);
756d17ee 5429}
df4fc315 5430
756d17ee 5431static void *fpid_start(struct seq_file *m, loff_t *pos)
5432{
5433 mutex_lock(&ftrace_lock);
5434
e3eea140 5435 if (!ftrace_pids_enabled() && (!*pos))
756d17ee 5436 return (void *) 1;
5437
5438 return seq_list_start(&ftrace_pids, *pos);
5439}
5440
5441static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
5442{
5443 if (v == (void *)1)
5444 return NULL;
5445
5446 return seq_list_next(v, &ftrace_pids, pos);
5447}
5448
5449static void fpid_stop(struct seq_file *m, void *p)
5450{
5451 mutex_unlock(&ftrace_lock);
5452}
5453
5454static int fpid_show(struct seq_file *m, void *v)
5455{
5456 const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list);
5457
5458 if (v == (void *)1) {
fa6f0cc7 5459 seq_puts(m, "no pid\n");
756d17ee 5460 return 0;
5461 }
5462
5463 if (fpid->pid == ftrace_swapper_pid)
fa6f0cc7 5464 seq_puts(m, "swapper tasks\n");
756d17ee 5465 else
5466 seq_printf(m, "%u\n", pid_vnr(fpid->pid));
5467
5468 return 0;
5469}
5470
5471static const struct seq_operations ftrace_pid_sops = {
5472 .start = fpid_start,
5473 .next = fpid_next,
5474 .stop = fpid_stop,
5475 .show = fpid_show,
5476};
5477
5478static int
5479ftrace_pid_open(struct inode *inode, struct file *file)
5480{
5481 int ret = 0;
5482
5483 if ((file->f_mode & FMODE_WRITE) &&
5484 (file->f_flags & O_TRUNC))
5485 ftrace_pid_reset();
5486
5487 if (file->f_mode & FMODE_READ)
5488 ret = seq_open(file, &ftrace_pid_sops);
5489
5490 return ret;
5491}
5492
df4fc315
SR
5493static ssize_t
5494ftrace_pid_write(struct file *filp, const char __user *ubuf,
5495 size_t cnt, loff_t *ppos)
5496{
457dc928 5497 char buf[64], *tmp;
df4fc315
SR
5498 long val;
5499 int ret;
5500
5501 if (cnt >= sizeof(buf))
5502 return -EINVAL;
5503
5504 if (copy_from_user(&buf, ubuf, cnt))
5505 return -EFAULT;
5506
5507 buf[cnt] = 0;
5508
756d17ee 5509 /*
5510 * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
5511 * to clean the filter quietly.
5512 */
457dc928
IM
5513 tmp = strstrip(buf);
5514 if (strlen(tmp) == 0)
756d17ee 5515 return 1;
5516
bcd83ea6 5517 ret = kstrtol(tmp, 10, &val);
df4fc315
SR
5518 if (ret < 0)
5519 return ret;
5520
756d17ee 5521 ret = ftrace_pid_add(val);
df4fc315 5522
756d17ee 5523 return ret ? ret : cnt;
5524}
df4fc315 5525
756d17ee 5526static int
5527ftrace_pid_release(struct inode *inode, struct file *file)
5528{
5529 if (file->f_mode & FMODE_READ)
5530 seq_release(inode, file);
df4fc315 5531
756d17ee 5532 return 0;
df4fc315
SR
5533}
5534
5e2336a0 5535static const struct file_operations ftrace_pid_fops = {
756d17ee 5536 .open = ftrace_pid_open,
5537 .write = ftrace_pid_write,
5538 .read = seq_read,
098c879e 5539 .llseek = tracing_lseek,
756d17ee 5540 .release = ftrace_pid_release,
df4fc315
SR
5541};
5542
8434dc93 5543static __init int ftrace_init_tracefs(void)
df4fc315
SR
5544{
5545 struct dentry *d_tracer;
df4fc315
SR
5546
5547 d_tracer = tracing_init_dentry();
14a5ae40 5548 if (IS_ERR(d_tracer))
df4fc315
SR
5549 return 0;
5550
8434dc93 5551 ftrace_init_dyn_tracefs(d_tracer);
df4fc315 5552
5452af66
FW
5553 trace_create_file("set_ftrace_pid", 0644, d_tracer,
5554 NULL, &ftrace_pid_fops);
493762fc 5555
8434dc93 5556 ftrace_profile_tracefs(d_tracer);
493762fc 5557
df4fc315
SR
5558 return 0;
5559}
8434dc93 5560fs_initcall(ftrace_init_tracefs);
df4fc315 5561
a2bb6a3d 5562/**
81adbdc0 5563 * ftrace_kill - kill ftrace
a2bb6a3d
SR
5564 *
5565 * This function should be used by panic code. It stops ftrace
5566 * but in a not so nice way. If you need to simply kill ftrace
5567 * from a non-atomic section, use ftrace_kill.
5568 */
81adbdc0 5569void ftrace_kill(void)
a2bb6a3d
SR
5570{
5571 ftrace_disabled = 1;
5572 ftrace_enabled = 0;
a2bb6a3d
SR
5573 clear_ftrace_function();
5574}
5575
e0a413f6
SR
5576/**
5577 * Test if ftrace is dead or not.
5578 */
5579int ftrace_is_dead(void)
5580{
5581 return ftrace_disabled;
5582}
5583
16444a8a 5584/**
3d083395
SR
5585 * register_ftrace_function - register a function for profiling
5586 * @ops - ops structure that holds the function for profiling.
16444a8a 5587 *
3d083395
SR
5588 * Register a function to be called by all functions in the
5589 * kernel.
5590 *
5591 * Note: @ops->func and all the functions it calls must be labeled
5592 * with "notrace", otherwise it will go into a
5593 * recursive loop.
16444a8a 5594 */
3d083395 5595int register_ftrace_function(struct ftrace_ops *ops)
16444a8a 5596{
45a4a237 5597 int ret = -1;
4eebcc81 5598
f04f24fb
MH
5599 ftrace_ops_init(ops);
5600
e6ea44e9 5601 mutex_lock(&ftrace_lock);
e7d3737e 5602
8a56d776 5603 ret = ftrace_startup(ops, 0);
b848914c 5604
e6ea44e9 5605 mutex_unlock(&ftrace_lock);
8d240dd8 5606
b0fc494f 5607 return ret;
3d083395 5608}
cdbe61bf 5609EXPORT_SYMBOL_GPL(register_ftrace_function);
3d083395
SR
5610
5611/**
32632920 5612 * unregister_ftrace_function - unregister a function for profiling.
3d083395
SR
5613 * @ops - ops structure that holds the function to unregister
5614 *
5615 * Unregister a function that was added to be called by ftrace profiling.
5616 */
5617int unregister_ftrace_function(struct ftrace_ops *ops)
5618{
5619 int ret;
5620
e6ea44e9 5621 mutex_lock(&ftrace_lock);
8a56d776 5622 ret = ftrace_shutdown(ops, 0);
e6ea44e9 5623 mutex_unlock(&ftrace_lock);
b0fc494f
SR
5624
5625 return ret;
5626}
cdbe61bf 5627EXPORT_SYMBOL_GPL(unregister_ftrace_function);
b0fc494f 5628
e309b41d 5629int
b0fc494f 5630ftrace_enable_sysctl(struct ctl_table *table, int write,
8d65af78 5631 void __user *buffer, size_t *lenp,
b0fc494f
SR
5632 loff_t *ppos)
5633{
45a4a237 5634 int ret = -ENODEV;
4eebcc81 5635
e6ea44e9 5636 mutex_lock(&ftrace_lock);
b0fc494f 5637
45a4a237
SR
5638 if (unlikely(ftrace_disabled))
5639 goto out;
5640
5641 ret = proc_dointvec(table, write, buffer, lenp, ppos);
b0fc494f 5642
a32c7765 5643 if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
b0fc494f
SR
5644 goto out;
5645
a32c7765 5646 last_ftrace_enabled = !!ftrace_enabled;
b0fc494f
SR
5647
5648 if (ftrace_enabled) {
5649
b0fc494f 5650 /* we are starting ftrace again */
5000c418
JK
5651 if (ftrace_ops_list != &ftrace_list_end)
5652 update_ftrace_function();
b0fc494f 5653
524a3868
SRRH
5654 ftrace_startup_sysctl();
5655
b0fc494f
SR
5656 } else {
5657 /* stopping ftrace calls (just send to ftrace_stub) */
5658 ftrace_trace_function = ftrace_stub;
5659
5660 ftrace_shutdown_sysctl();
5661 }
5662
5663 out:
e6ea44e9 5664 mutex_unlock(&ftrace_lock);
3d083395 5665 return ret;
16444a8a 5666}
f17845e5 5667
fb52607a 5668#ifdef CONFIG_FUNCTION_GRAPH_TRACER
e7d3737e 5669
5f151b24
SRRH
5670static struct ftrace_ops graph_ops = {
5671 .func = ftrace_stub,
5672 .flags = FTRACE_OPS_FL_RECURSION_SAFE |
5673 FTRACE_OPS_FL_INITIALIZED |
e3eea140 5674 FTRACE_OPS_FL_PID |
5f151b24
SRRH
5675 FTRACE_OPS_FL_STUB,
5676#ifdef FTRACE_GRAPH_TRAMP_ADDR
5677 .trampoline = FTRACE_GRAPH_TRAMP_ADDR,
aec0be2d 5678 /* trampoline_size is only needed for dynamically allocated tramps */
5f151b24
SRRH
5679#endif
5680 ASSIGN_OPS_HASH(graph_ops, &global_ops.local_hash)
5681};
5682
55577204
SRRH
5683void ftrace_graph_sleep_time_control(bool enable)
5684{
5685 fgraph_sleep_time = enable;
5686}
5687
5688void ftrace_graph_graph_time_control(bool enable)
5689{
5690 fgraph_graph_time = enable;
5691}
5692
e49dc19c
SR
5693int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
5694{
5695 return 0;
5696}
5697
287b6e68
FW
5698/* The callbacks that hook a function */
5699trace_func_graph_ret_t ftrace_graph_return =
5700 (trace_func_graph_ret_t)ftrace_stub;
e49dc19c 5701trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
23a8e844 5702static trace_func_graph_ent_t __ftrace_graph_entry = ftrace_graph_entry_stub;
f201ae23
FW
5703
5704/* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
5705static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
5706{
5707 int i;
5708 int ret = 0;
5709 unsigned long flags;
5710 int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
5711 struct task_struct *g, *t;
5712
5713 for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
5714 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
5715 * sizeof(struct ftrace_ret_stack),
5716 GFP_KERNEL);
5717 if (!ret_stack_list[i]) {
5718 start = 0;
5719 end = i;
5720 ret = -ENOMEM;
5721 goto free;
5722 }
5723 }
5724
5725 read_lock_irqsave(&tasklist_lock, flags);
5726 do_each_thread(g, t) {
5727 if (start == end) {
5728 ret = -EAGAIN;
5729 goto unlock;
5730 }
5731
5732 if (t->ret_stack == NULL) {
380c4b14 5733 atomic_set(&t->tracing_graph_pause, 0);
f201ae23 5734 atomic_set(&t->trace_overrun, 0);
26c01624
SR
5735 t->curr_ret_stack = -1;
5736 /* Make sure the tasks see the -1 first: */
5737 smp_wmb();
5738 t->ret_stack = ret_stack_list[start++];
f201ae23
FW
5739 }
5740 } while_each_thread(g, t);
5741
5742unlock:
5743 read_unlock_irqrestore(&tasklist_lock, flags);
5744free:
5745 for (i = start; i < end; i++)
5746 kfree(ret_stack_list[i]);
5747 return ret;
5748}
5749
8aef2d28 5750static void
c73464b1 5751ftrace_graph_probe_sched_switch(void *ignore, bool preempt,
38516ab5 5752 struct task_struct *prev, struct task_struct *next)
8aef2d28
SR
5753{
5754 unsigned long long timestamp;
5755 int index;
5756
be6f164a
SR
5757 /*
5758 * Does the user want to count the time a function was asleep.
5759 * If so, do not update the time stamps.
5760 */
55577204 5761 if (fgraph_sleep_time)
be6f164a
SR
5762 return;
5763
8aef2d28
SR
5764 timestamp = trace_clock_local();
5765
5766 prev->ftrace_timestamp = timestamp;
5767
5768 /* only process tasks that we timestamped */
5769 if (!next->ftrace_timestamp)
5770 return;
5771
5772 /*
5773 * Update all the counters in next to make up for the
5774 * time next was sleeping.
5775 */
5776 timestamp -= next->ftrace_timestamp;
5777
5778 for (index = next->curr_ret_stack; index >= 0; index--)
5779 next->ret_stack[index].calltime += timestamp;
5780}
5781
f201ae23 5782/* Allocate a return stack for each task */
fb52607a 5783static int start_graph_tracing(void)
f201ae23
FW
5784{
5785 struct ftrace_ret_stack **ret_stack_list;
5b058bcd 5786 int ret, cpu;
f201ae23
FW
5787
5788 ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
5789 sizeof(struct ftrace_ret_stack *),
5790 GFP_KERNEL);
5791
5792 if (!ret_stack_list)
5793 return -ENOMEM;
5794
5b058bcd 5795 /* The cpu_boot init_task->ret_stack will never be freed */
179c498a
SR
5796 for_each_online_cpu(cpu) {
5797 if (!idle_task(cpu)->ret_stack)
868baf07 5798 ftrace_graph_init_idle_task(idle_task(cpu), cpu);
179c498a 5799 }
5b058bcd 5800
f201ae23
FW
5801 do {
5802 ret = alloc_retstack_tasklist(ret_stack_list);
5803 } while (ret == -EAGAIN);
5804
8aef2d28 5805 if (!ret) {
38516ab5 5806 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
8aef2d28
SR
5807 if (ret)
5808 pr_info("ftrace_graph: Couldn't activate tracepoint"
5809 " probe to kernel_sched_switch\n");
5810 }
5811
f201ae23
FW
5812 kfree(ret_stack_list);
5813 return ret;
5814}
5815
4a2b8dda
FW
5816/*
5817 * Hibernation protection.
5818 * The state of the current task is too much unstable during
5819 * suspend/restore to disk. We want to protect against that.
5820 */
5821static int
5822ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
5823 void *unused)
5824{
5825 switch (state) {
5826 case PM_HIBERNATION_PREPARE:
5827 pause_graph_tracing();
5828 break;
5829
5830 case PM_POST_HIBERNATION:
5831 unpause_graph_tracing();
5832 break;
5833 }
5834 return NOTIFY_DONE;
5835}
5836
23a8e844
SRRH
5837static int ftrace_graph_entry_test(struct ftrace_graph_ent *trace)
5838{
5839 if (!ftrace_ops_test(&global_ops, trace->func, NULL))
5840 return 0;
5841 return __ftrace_graph_entry(trace);
5842}
5843
5844/*
5845 * The function graph tracer should only trace the functions defined
5846 * by set_ftrace_filter and set_ftrace_notrace. If another function
5847 * tracer ops is registered, the graph tracer requires testing the
5848 * function against the global ops, and not just trace any function
5849 * that any ftrace_ops registered.
5850 */
5851static void update_function_graph_func(void)
5852{
5f151b24
SRRH
5853 struct ftrace_ops *op;
5854 bool do_test = false;
5855
5856 /*
5857 * The graph and global ops share the same set of functions
5858 * to test. If any other ops is on the list, then
5859 * the graph tracing needs to test if its the function
5860 * it should call.
5861 */
5862 do_for_each_ftrace_op(op, ftrace_ops_list) {
5863 if (op != &global_ops && op != &graph_ops &&
5864 op != &ftrace_list_end) {
5865 do_test = true;
5866 /* in double loop, break out with goto */
5867 goto out;
5868 }
5869 } while_for_each_ftrace_op(op);
5870 out:
5871 if (do_test)
23a8e844 5872 ftrace_graph_entry = ftrace_graph_entry_test;
5f151b24
SRRH
5873 else
5874 ftrace_graph_entry = __ftrace_graph_entry;
23a8e844
SRRH
5875}
5876
8275f69f
MK
5877static struct notifier_block ftrace_suspend_notifier = {
5878 .notifier_call = ftrace_suspend_notifier_call,
5879};
5880
287b6e68
FW
5881int register_ftrace_graph(trace_func_graph_ret_t retfunc,
5882 trace_func_graph_ent_t entryfunc)
15e6cb36 5883{
e7d3737e
FW
5884 int ret = 0;
5885
e6ea44e9 5886 mutex_lock(&ftrace_lock);
e7d3737e 5887
05ce5818 5888 /* we currently allow only one tracer registered at a time */
597af815 5889 if (ftrace_graph_active) {
05ce5818
SR
5890 ret = -EBUSY;
5891 goto out;
5892 }
5893
4a2b8dda
FW
5894 register_pm_notifier(&ftrace_suspend_notifier);
5895
597af815 5896 ftrace_graph_active++;
fb52607a 5897 ret = start_graph_tracing();
f201ae23 5898 if (ret) {
597af815 5899 ftrace_graph_active--;
f201ae23
FW
5900 goto out;
5901 }
e53a6319 5902
287b6e68 5903 ftrace_graph_return = retfunc;
23a8e844
SRRH
5904
5905 /*
5906 * Update the indirect function to the entryfunc, and the
5907 * function that gets called to the entry_test first. Then
5908 * call the update fgraph entry function to determine if
5909 * the entryfunc should be called directly or not.
5910 */
5911 __ftrace_graph_entry = entryfunc;
5912 ftrace_graph_entry = ftrace_graph_entry_test;
5913 update_function_graph_func();
e53a6319 5914
5f151b24 5915 ret = ftrace_startup(&graph_ops, FTRACE_START_FUNC_RET);
e7d3737e 5916out:
e6ea44e9 5917 mutex_unlock(&ftrace_lock);
e7d3737e 5918 return ret;
15e6cb36
FW
5919}
5920
fb52607a 5921void unregister_ftrace_graph(void)
15e6cb36 5922{
e6ea44e9 5923 mutex_lock(&ftrace_lock);
e7d3737e 5924
597af815 5925 if (unlikely(!ftrace_graph_active))
2aad1b76
SR
5926 goto out;
5927
597af815 5928 ftrace_graph_active--;
287b6e68 5929 ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
e49dc19c 5930 ftrace_graph_entry = ftrace_graph_entry_stub;
23a8e844 5931 __ftrace_graph_entry = ftrace_graph_entry_stub;
5f151b24 5932 ftrace_shutdown(&graph_ops, FTRACE_STOP_FUNC_RET);
4a2b8dda 5933 unregister_pm_notifier(&ftrace_suspend_notifier);
38516ab5 5934 unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
e7d3737e 5935
f3bea491
SRRH
5936#ifdef CONFIG_DYNAMIC_FTRACE
5937 /*
5938 * Function graph does not allocate the trampoline, but
5939 * other global_ops do. We need to reset the ALLOC_TRAMP flag
5940 * if one was used.
5941 */
5942 global_ops.trampoline = save_global_trampoline;
5943 if (save_global_flags & FTRACE_OPS_FL_ALLOC_TRAMP)
5944 global_ops.flags |= FTRACE_OPS_FL_ALLOC_TRAMP;
5945#endif
5946
2aad1b76 5947 out:
e6ea44e9 5948 mutex_unlock(&ftrace_lock);
15e6cb36 5949}
f201ae23 5950
868baf07
SR
5951static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
5952
5953static void
5954graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
5955{
5956 atomic_set(&t->tracing_graph_pause, 0);
5957 atomic_set(&t->trace_overrun, 0);
5958 t->ftrace_timestamp = 0;
25985edc 5959 /* make curr_ret_stack visible before we add the ret_stack */
868baf07
SR
5960 smp_wmb();
5961 t->ret_stack = ret_stack;
5962}
5963
5964/*
5965 * Allocate a return stack for the idle task. May be the first
5966 * time through, or it may be done by CPU hotplug online.
5967 */
5968void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
5969{
5970 t->curr_ret_stack = -1;
5971 /*
5972 * The idle task has no parent, it either has its own
5973 * stack or no stack at all.
5974 */
5975 if (t->ret_stack)
5976 WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
5977
5978 if (ftrace_graph_active) {
5979 struct ftrace_ret_stack *ret_stack;
5980
5981 ret_stack = per_cpu(idle_ret_stack, cpu);
5982 if (!ret_stack) {
5983 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
5984 * sizeof(struct ftrace_ret_stack),
5985 GFP_KERNEL);
5986 if (!ret_stack)
5987 return;
5988 per_cpu(idle_ret_stack, cpu) = ret_stack;
5989 }
5990 graph_init_task(t, ret_stack);
5991 }
5992}
5993
f201ae23 5994/* Allocate a return stack for newly created task */
fb52607a 5995void ftrace_graph_init_task(struct task_struct *t)
f201ae23 5996{
84047e36
SR
5997 /* Make sure we do not use the parent ret_stack */
5998 t->ret_stack = NULL;
ea14eb71 5999 t->curr_ret_stack = -1;
84047e36 6000
597af815 6001 if (ftrace_graph_active) {
82310a32
SR
6002 struct ftrace_ret_stack *ret_stack;
6003
6004 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
f201ae23
FW
6005 * sizeof(struct ftrace_ret_stack),
6006 GFP_KERNEL);
82310a32 6007 if (!ret_stack)
f201ae23 6008 return;
868baf07 6009 graph_init_task(t, ret_stack);
84047e36 6010 }
f201ae23
FW
6011}
6012
fb52607a 6013void ftrace_graph_exit_task(struct task_struct *t)
f201ae23 6014{
eae849ca
FW
6015 struct ftrace_ret_stack *ret_stack = t->ret_stack;
6016
f201ae23 6017 t->ret_stack = NULL;
eae849ca
FW
6018 /* NULL must become visible to IRQs before we free it: */
6019 barrier();
6020
6021 kfree(ret_stack);
f201ae23 6022}
15e6cb36 6023#endif
This page took 0.854633 seconds and 5 git commands to generate.