#include "trace.h"
-int ftrace_enabled;
+/* ftrace_enabled is a method to turn ftrace on or off */
+int ftrace_enabled __read_mostly;
static int last_ftrace_enabled;
+/*
+ * ftrace_disabled is set when an anomaly is discovered.
+ * ftrace_disabled is much stronger than ftrace_enabled.
+ */
+static int ftrace_disabled __read_mostly;
+
static DEFINE_SPINLOCK(ftrace_lock);
static DEFINE_MUTEX(ftrace_sysctl_lock);
/* mcount is defined per arch in assembly */
EXPORT_SYMBOL(mcount);
-notrace void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
+void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
{
struct ftrace_ops *op = ftrace_list;
ftrace_trace_function = ftrace_stub;
}
-static int notrace __register_ftrace_function(struct ftrace_ops *ops)
+static int __register_ftrace_function(struct ftrace_ops *ops)
{
/* Should never be called by interrupts */
spin_lock(&ftrace_lock);
return 0;
}
-static int notrace __unregister_ftrace_function(struct ftrace_ops *ops)
+static int __unregister_ftrace_function(struct ftrace_ops *ops)
{
struct ftrace_ops **p;
int ret = 0;
static int ftrace_record_suspend;
+static struct dyn_ftrace *ftrace_free_records;
+
static inline int
-notrace ftrace_ip_in_hash(unsigned long ip, unsigned long key)
+ftrace_ip_in_hash(unsigned long ip, unsigned long key)
{
struct dyn_ftrace *p;
struct hlist_node *t;
return found;
}
-static inline void notrace
+static inline void
ftrace_add_hash(struct dyn_ftrace *node, unsigned long key)
{
hlist_add_head(&node->node, &ftrace_hash[key]);
}
-static notrace struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
+static void ftrace_free_rec(struct dyn_ftrace *rec)
+{
+ /* no locking, only called from kstop_machine */
+
+ rec->ip = (unsigned long)ftrace_free_records;
+ ftrace_free_records = rec;
+ rec->flags |= FTRACE_FL_FREE;
+}
+
+static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
{
+ struct dyn_ftrace *rec;
+
+ /* First check for freed records */
+ if (ftrace_free_records) {
+ rec = ftrace_free_records;
+
+ if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
+ WARN_ON_ONCE(1);
+ ftrace_free_records = NULL;
+ ftrace_disabled = 1;
+ ftrace_enabled = 0;
+ return NULL;
+ }
+
+ ftrace_free_records = (void *)rec->ip;
+ memset(rec, 0, sizeof(*rec));
+ return rec;
+ }
+
if (ftrace_pages->index == ENTRIES_PER_PAGE) {
if (!ftrace_pages->next)
return NULL;
return &ftrace_pages->records[ftrace_pages->index++];
}
-static void notrace
+static void
ftrace_record_ip(unsigned long ip)
{
struct dyn_ftrace *node;
int resched;
int atomic;
- if (!ftrace_enabled)
+ if (!ftrace_enabled || ftrace_disabled)
return;
resched = need_resched();
preempt_enable_notrace();
}
-#define FTRACE_ADDR ((long)(&ftrace_caller))
-#define MCOUNT_ADDR ((long)(&mcount))
+#define FTRACE_ADDR ((long)(ftrace_caller))
+#define MCOUNT_ADDR ((long)(mcount))
-static void notrace
+static void
__ftrace_replace_code(struct dyn_ftrace *rec,
unsigned char *old, unsigned char *new, int enable)
{
}
failed = ftrace_modify_code(ip, old, new);
- if (failed)
- rec->flags |= FTRACE_FL_FAILED;
+ if (failed) {
+ unsigned long key;
+ /* It is possible that the function hasn't been converted yet */
+ key = hash_long(ip, FTRACE_HASHBITS);
+ if (!ftrace_ip_in_hash(ip, key)) {
+ rec->flags |= FTRACE_FL_FAILED;
+ ftrace_free_rec(rec);
+ }
+
+ }
}
-static void notrace ftrace_replace_code(int enable)
+static void ftrace_replace_code(int enable)
{
unsigned char *new = NULL, *old = NULL;
struct dyn_ftrace *rec;
}
}
-static notrace void ftrace_shutdown_replenish(void)
+static void ftrace_shutdown_replenish(void)
{
if (ftrace_pages->next)
return;
ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
}
-static notrace void
+static void
ftrace_code_disable(struct dyn_ftrace *rec)
{
unsigned long ip;
call = ftrace_call_replace(ip, MCOUNT_ADDR);
failed = ftrace_modify_code(ip, call, nop);
- if (failed)
+ if (failed) {
rec->flags |= FTRACE_FL_FAILED;
+ ftrace_free_rec(rec);
+ }
}
-static int notrace __ftrace_modify_code(void *data)
+static int __ftrace_modify_code(void *data)
{
unsigned long addr;
int *command = data;
return 0;
}
-static void notrace ftrace_run_update_code(int command)
+static void ftrace_run_update_code(int command)
{
stop_machine_run(__ftrace_modify_code, &command, NR_CPUS);
}
static ftrace_func_t saved_ftrace_func;
-static void notrace ftrace_startup(void)
+static void ftrace_startup(void)
{
int command = 0;
+ if (unlikely(ftrace_disabled))
+ return;
+
mutex_lock(&ftraced_lock);
ftraced_suspend++;
if (ftraced_suspend == 1)
mutex_unlock(&ftraced_lock);
}
-static void notrace ftrace_shutdown(void)
+static void ftrace_shutdown(void)
{
int command = 0;
+ if (unlikely(ftrace_disabled))
+ return;
+
mutex_lock(&ftraced_lock);
ftraced_suspend--;
if (!ftraced_suspend)
mutex_unlock(&ftraced_lock);
}
-static void notrace ftrace_startup_sysctl(void)
+static void ftrace_startup_sysctl(void)
{
int command = FTRACE_ENABLE_MCOUNT;
+ if (unlikely(ftrace_disabled))
+ return;
+
mutex_lock(&ftraced_lock);
/* Force update next time */
saved_ftrace_func = NULL;
mutex_unlock(&ftraced_lock);
}
-static void notrace ftrace_shutdown_sysctl(void)
+static void ftrace_shutdown_sysctl(void)
{
int command = FTRACE_DISABLE_MCOUNT;
+ if (unlikely(ftrace_disabled))
+ return;
+
mutex_lock(&ftraced_lock);
/* ftraced_suspend is true if ftrace is running */
if (ftraced_suspend)
static unsigned long ftrace_update_cnt;
unsigned long ftrace_update_tot_cnt;
-static int notrace __ftrace_update_code(void *ignore)
+static int __ftrace_update_code(void *ignore)
{
struct dyn_ftrace *p;
struct hlist_head head;
save_ftrace_enabled = ftrace_enabled;
ftrace_enabled = 0;
- start = now(raw_smp_processor_id());
+ start = ftrace_now(raw_smp_processor_id());
ftrace_update_cnt = 0;
/* No locks needed, the machine is stopped! */
}
- stop = now(raw_smp_processor_id());
+ stop = ftrace_now(raw_smp_processor_id());
ftrace_update_time = stop - start;
ftrace_update_tot_cnt += ftrace_update_cnt;
return 0;
}
-static void notrace ftrace_update_code(void)
+static void ftrace_update_code(void)
{
+ if (unlikely(ftrace_disabled))
+ return;
+
stop_machine_run(__ftrace_update_code, NULL, NR_CPUS);
}
-static int notrace ftraced(void *ignore)
+static int ftraced(void *ignore)
{
unsigned long usecs;
/* check once a second */
schedule_timeout(HZ);
+ if (unlikely(ftrace_disabled))
+ continue;
+
mutex_lock(&ftrace_sysctl_lock);
mutex_lock(&ftraced_lock);
if (ftrace_enabled && ftraced_trigger && !ftraced_suspend) {
ftrace_update_cnt != 1 ? "s" : "",
ftrace_update_tot_cnt,
usecs, usecs != 1 ? "s" : "");
+ ftrace_disabled = 1;
WARN_ON_ONCE(1);
}
ftraced_trigger = 0;
unsigned filtered;
};
-static void notrace *
+static void *
t_next(struct seq_file *m, void *v, loff_t *pos)
{
struct ftrace_iterator *iter = m->private;
.show = t_show,
};
-static int notrace
+static int
ftrace_avail_open(struct inode *inode, struct file *file)
{
struct ftrace_iterator *iter;
int ret;
+ if (unlikely(ftrace_disabled))
+ return -ENODEV;
+
iter = kzalloc(sizeof(*iter), GFP_KERNEL);
if (!iter)
return -ENOMEM;
return 0;
}
-static void notrace ftrace_filter_reset(void)
+static void ftrace_filter_reset(void)
{
struct ftrace_page *pg;
struct dyn_ftrace *rec;
preempt_enable();
}
-static int notrace
+static int
ftrace_filter_open(struct inode *inode, struct file *file)
{
struct ftrace_iterator *iter;
int ret = 0;
+ if (unlikely(ftrace_disabled))
+ return -ENODEV;
+
iter = kzalloc(sizeof(*iter), GFP_KERNEL);
if (!iter)
return -ENOMEM;
return ret;
}
-static ssize_t notrace
+static ssize_t
ftrace_filter_read(struct file *file, char __user *ubuf,
size_t cnt, loff_t *ppos)
{
return -EPERM;
}
-static loff_t notrace
+static loff_t
ftrace_filter_lseek(struct file *file, loff_t offset, int origin)
{
loff_t ret;
MATCH_END_ONLY,
};
-static void notrace
+static void
ftrace_match(unsigned char *buff, int len)
{
char str[KSYM_SYMBOL_LEN];
preempt_enable();
}
-static ssize_t notrace
+static ssize_t
ftrace_filter_write(struct file *file, const char __user *ubuf,
size_t cnt, loff_t *ppos)
{
* Filters denote which functions should be enabled when tracing is enabled.
* If @buf is NULL and reset is set, all functions will be enabled for tracing.
*/
-notrace void ftrace_set_filter(unsigned char *buf, int len, int reset)
+void ftrace_set_filter(unsigned char *buf, int len, int reset)
{
+ if (unlikely(ftrace_disabled))
+ return;
+
mutex_lock(&ftrace_filter_lock);
if (reset)
ftrace_filter_reset();
mutex_unlock(&ftrace_filter_lock);
}
-static int notrace
+static int
ftrace_filter_release(struct inode *inode, struct file *file)
{
struct seq_file *m = (struct seq_file *)file->private_data;
DECLARE_WAITQUEUE(wait, current);
int ret = 0;
- if (!ftraced_task)
+ if (unlikely(ftrace_disabled))
return -ENODEV;
mutex_lock(&ftraced_lock);
set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&ftraced_waiters, &wait);
+ if (unlikely(!ftraced_task)) {
+ ret = -ENODEV;
+ goto out;
+ }
+
do {
mutex_unlock(&ftraced_lock);
wake_up_process(ftraced_task);
set_current_state(TASK_INTERRUPTIBLE);
} while (last_counter == ftraced_iteration_counter);
+ out:
mutex_unlock(&ftraced_lock);
remove_wait_queue(&ftraced_waiters, &wait);
set_current_state(TASK_RUNNING);
return ret;
}
+static void ftrace_force_shutdown(void)
+{
+ struct task_struct *task;
+ int command = FTRACE_DISABLE_CALLS | FTRACE_UPDATE_TRACE_FUNC;
+
+ mutex_lock(&ftraced_lock);
+ task = ftraced_task;
+ ftraced_task = NULL;
+ ftraced_suspend = -1;
+ ftrace_run_update_code(command);
+ mutex_unlock(&ftraced_lock);
+
+ if (task)
+ kthread_stop(task);
+}
+
static __init int ftrace_init_debugfs(void)
{
struct dentry *d_tracer;
fs_initcall(ftrace_init_debugfs);
-static int __init notrace ftrace_dynamic_init(void)
+static int __init ftrace_dynamic_init(void)
{
struct task_struct *p;
unsigned long addr;
int ret;
addr = (unsigned long)ftrace_record_ip;
+
stop_machine_run(ftrace_dyn_arch_init, &addr, NR_CPUS);
/* ftrace_dyn_arch_init places the return code in addr */
- if (addr)
- return addr;
+ if (addr) {
+ ret = (int)addr;
+ goto failed;
+ }
ret = ftrace_dyn_table_alloc();
if (ret)
- return ret;
+ goto failed;
p = kthread_run(ftraced, NULL, "ftraced");
- if (IS_ERR(p))
- return -1;
+ if (IS_ERR(p)) {
+ ret = -1;
+ goto failed;
+ }
last_ftrace_enabled = ftrace_enabled = 1;
ftraced_task = p;
return 0;
+
+ failed:
+ ftrace_disabled = 1;
+ return ret;
}
core_initcall(ftrace_dynamic_init);
# define ftrace_shutdown() do { } while (0)
# define ftrace_startup_sysctl() do { } while (0)
# define ftrace_shutdown_sysctl() do { } while (0)
+# define ftrace_force_shutdown() do { } while (0)
#endif /* CONFIG_DYNAMIC_FTRACE */
+/**
+ * ftrace_kill - totally shutdown ftrace
+ *
+ * This is a safety measure. If something was detected that seems
+ * wrong, calling this function will keep ftrace from doing
+ * any more modifications, and updates.
+ * used when something went wrong.
+ */
+void ftrace_kill(void)
+{
+ mutex_lock(&ftrace_sysctl_lock);
+ ftrace_disabled = 1;
+ ftrace_enabled = 0;
+
+ clear_ftrace_function();
+ mutex_unlock(&ftrace_sysctl_lock);
+
+ /* Try to totally disable ftrace */
+ ftrace_force_shutdown();
+}
+
/**
* register_ftrace_function - register a function for profiling
* @ops - ops structure that holds the function for profiling.
{
int ret;
+ if (unlikely(ftrace_disabled))
+ return -1;
+
mutex_lock(&ftrace_sysctl_lock);
ret = __register_ftrace_function(ops);
ftrace_startup();
return ret;
}
-notrace int
+int
ftrace_enable_sysctl(struct ctl_table *table, int write,
struct file *file, void __user *buffer, size_t *lenp,
loff_t *ppos)
{
int ret;
+ if (unlikely(ftrace_disabled))
+ return -ENODEV;
+
mutex_lock(&ftrace_sysctl_lock);
ret = proc_dointvec(table, write, file, buffer, lenp, ppos);