return nsec;
}
-static const int time_sync_freq_max = 128;
-static const cycle_t time_sync_thresh = 100000;
-
-static DEFINE_PER_CPU(cycle_t, time_offset);
-static DEFINE_PER_CPU(cycle_t, prev_cpu_time);
-static DEFINE_PER_CPU(int, time_sync_count);
-static DEFINE_PER_CPU(int, time_sync_freq);
-
-/*
- * Global lock which we take every now and then to synchronize
- * the CPUs time. This method is not warp-safe, but it's good
- * enough to synchronize slowly diverging time sources and thus
- * it's good enough for tracing:
- */
-static DEFINE_SPINLOCK(time_sync_lock);
-static cycle_t prev_global_time;
-
-static notrace cycle_t __ftrace_now_sync(cycles_t time, int cpu)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&time_sync_lock, flags);
-
- /*
- * Update the synchronization frequency:
- */
- if (per_cpu(time_sync_freq, cpu) < time_sync_freq_max)
- per_cpu(time_sync_freq, cpu) *= 2;
- per_cpu(time_sync_count, cpu) = per_cpu(time_sync_freq, cpu);
-
- if (time < prev_global_time) {
- per_cpu(time_offset, cpu) += prev_global_time - time;
- time = prev_global_time;
- } else {
- prev_global_time = time;
- }
-
- spin_unlock_irqrestore(&time_sync_lock, flags);
-
- return time;
-}
-
notrace cycle_t ftrace_now(int cpu)
{
- cycle_t prev_cpu_time, time, delta_time;
-
- prev_cpu_time = per_cpu(prev_cpu_time, cpu);
- time = sched_clock() + per_cpu(time_offset, cpu);
- delta_time = time-prev_cpu_time;
-
- if (unlikely(delta_time > time_sync_thresh ||
- --per_cpu(time_sync_count, cpu) <= 0))
- time = __ftrace_now_sync(time, cpu);
-
- return time;
+ return cpu_clock(cpu);
}
static struct trace_array global_trace;
static DEFINE_PER_CPU(struct trace_array_cpu, max_data);
-static int tracer_enabled;
+static int tracer_enabled = 1;
static unsigned long trace_nr_entries = 16384UL;
static struct tracer *trace_types __read_mostly;
data->trace_tail_idx = 0;
}
-#ifdef CONFIG_FTRACE
-static notrace void
-function_trace_call(unsigned long ip, unsigned long parent_ip)
-{
- struct trace_array *tr = &global_trace;
- struct trace_array_cpu *data;
- unsigned long flags;
- long disabled;
- int cpu;
-
- if (unlikely(!tracer_enabled))
- return;
-
- local_irq_save(flags);
- cpu = raw_smp_processor_id();
- data = tr->data[cpu];
- disabled = atomic_inc_return(&data->disabled);
-
- if (likely(disabled == 1))
- ftrace(tr, data, ip, parent_ip, flags);
-
- atomic_dec(&data->disabled);
- local_irq_restore(flags);
-}
-
-static struct ftrace_ops trace_ops __read_mostly =
-{
- .func = function_trace_call,
-};
-#endif
-
-notrace void tracing_start_function_trace(void)
-{
- register_ftrace_function(&trace_ops);
-}
-
-notrace void tracing_stop_function_trace(void)
-{
- unregister_ftrace_function(&trace_ops);
-}
-
#define SAVED_CMDLINES 128
static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
}
notrace void
-ftrace(struct trace_array *tr, struct trace_array_cpu *data,
- unsigned long ip, unsigned long parent_ip, unsigned long flags)
+__ftrace(struct trace_array *tr, struct trace_array_cpu *data,
+ unsigned long ip, unsigned long parent_ip, unsigned long flags)
{
struct trace_entry *entry;
+ unsigned long irq_flags;
- spin_lock(&data->lock);
+ spin_lock_irqsave(&data->lock, irq_flags);
entry = tracing_get_trace_entry(tr, data);
tracing_generic_entry_update(entry, flags);
entry->type = TRACE_FN;
entry->fn.ip = ip;
entry->fn.parent_ip = parent_ip;
- spin_unlock(&data->lock);
+ spin_unlock_irqrestore(&data->lock, irq_flags);
+}
+
+notrace void
+ftrace(struct trace_array *tr, struct trace_array_cpu *data,
+ unsigned long ip, unsigned long parent_ip, unsigned long flags)
+{
+ if (likely(!atomic_read(&data->disabled)))
+ __ftrace(tr, data, ip, parent_ip, flags);
}
notrace void
unsigned long arg1, unsigned long arg2, unsigned long arg3)
{
struct trace_entry *entry;
+ unsigned long irq_flags;
- spin_lock(&data->lock);
+ spin_lock_irqsave(&data->lock, irq_flags);
entry = tracing_get_trace_entry(tr, data);
tracing_generic_entry_update(entry, 0);
entry->type = TRACE_SPECIAL;
entry->special.arg1 = arg1;
entry->special.arg2 = arg2;
entry->special.arg3 = arg3;
- spin_unlock(&data->lock);
+ spin_unlock_irqrestore(&data->lock, irq_flags);
}
notrace void
unsigned long flags)
{
struct trace_entry *entry;
+ unsigned long irq_flags;
- spin_lock(&data->lock);
+ spin_lock_irqsave(&data->lock, irq_flags);
entry = tracing_get_trace_entry(tr, data);
tracing_generic_entry_update(entry, flags);
entry->type = TRACE_CTX;
entry->ctx.prev_state = prev->state;
entry->ctx.next_pid = next->pid;
entry->ctx.next_prio = next->prio;
- spin_unlock(&data->lock);
+ spin_unlock_irqrestore(&data->lock, irq_flags);
+}
+
+#ifdef CONFIG_FTRACE
+static notrace void
+function_trace_call(unsigned long ip, unsigned long parent_ip)
+{
+ struct trace_array *tr = &global_trace;
+ struct trace_array_cpu *data;
+ unsigned long flags;
+ long disabled;
+ int cpu;
+
+ if (unlikely(!tracer_enabled))
+ return;
+
+ local_irq_save(flags);
+ cpu = raw_smp_processor_id();
+ data = tr->data[cpu];
+ disabled = atomic_inc_return(&data->disabled);
+
+ if (likely(disabled == 1))
+ __ftrace(tr, data, ip, parent_ip, flags);
+
+ atomic_dec(&data->disabled);
+ local_irq_restore(flags);
}
+static struct ftrace_ops trace_ops __read_mostly =
+{
+ .func = function_trace_call,
+};
+
+notrace void tracing_start_function_trace(void)
+{
+ register_ftrace_function(&trace_ops);
+}
+
+notrace void tracing_stop_function_trace(void)
+{
+ unregister_ftrace_function(&trace_ops);
+}
+#endif
+
enum trace_file_type {
TRACE_FILE_LAT_FMT = 1,
};
array = page_address(page);
- /* Still possible to catch up to the tail */
- if (iter->next_idx[cpu] && array == data->trace_tail &&
- iter->next_page_idx[cpu] == data->trace_tail_idx)
- return NULL;
-
WARN_ON(iter->next_page_idx[cpu] >= ENTRIES_PER_PAGE);
return &array[iter->next_page_idx[cpu]];
}
-static struct notrace trace_entry *
+static struct trace_entry * notrace
find_next_entry(struct trace_iterator *iter, int *ent_cpu)
{
struct trace_array *tr = iter->tr;
struct trace_iterator *iter = filp->private_data;
struct trace_array_cpu *data;
static cpumask_t mask;
- struct trace_entry *entry;
static int start;
unsigned long flags;
+ int ftrace_save;
int read = 0;
int cpu;
int len;
cpus_clear(mask);
local_irq_save(flags);
+ ftrace_save = ftrace_enabled;
+ ftrace_enabled = 0;
+ smp_wmb();
for_each_possible_cpu(cpu) {
data = iter->tr->data[cpu];
continue;
atomic_inc(&data->disabled);
- spin_lock(&data->lock);
cpu_set(cpu, mask);
}
- while ((entry = find_next_entry_inc(iter)) != NULL) {
+ for_each_cpu_mask(cpu, mask) {
+ data = iter->tr->data[cpu];
+ spin_lock(&data->lock);
+ }
+
+ while (find_next_entry_inc(iter) != NULL) {
+ int len = iter->seq.len;
+
ret = print_trace_line(iter);
- if (!ret)
+ if (!ret) {
+ /* don't print partial lines */
+ iter->seq.len = len;
break;
+ }
trace_consume(iter);
for_each_cpu_mask(cpu, mask) {
data = iter->tr->data[cpu];
spin_unlock(&data->lock);
+ }
+
+ for_each_cpu_mask(cpu, mask) {
+ data = iter->tr->data[cpu];
atomic_dec(&data->disabled);
}
+ ftrace_enabled = ftrace_save;
local_irq_restore(flags);
/* Now copy what we have to the user */
int ret = -ENOMEM;
int i;
+ global_trace.ctrl = tracer_enabled;
+
/* Allocate the first page for all buffers */
for_each_possible_cpu(i) {
data = global_trace.data[i] = &per_cpu(global_trace_cpu, i);