static int tracing_disabled = 1;
-static long
+long
ns2usecs(cycle_t nsec)
{
nsec += 500;
return nsecs / 1000;
}
-enum trace_type {
- __TRACE_FIRST_TYPE = 0,
-
- TRACE_FN,
- TRACE_CTX,
- TRACE_WAKE,
- TRACE_STACK,
- TRACE_SPECIAL,
-
- __TRACE_LAST_TYPE
-};
-
enum trace_flag_type {
TRACE_FLAG_IRQS_OFF = 0x01,
TRACE_FLAG_NEED_RESCHED = 0x02,
"bin",
"block",
"stacktrace",
+ "sched-tree",
NULL
};
-static DEFINE_SPINLOCK(ftrace_max_lock);
+static raw_spinlock_t ftrace_max_lock =
+ (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
/*
* Copy the new maximum trace into the separate maximum-trace
return page_address(page);
}
-static int
+int
trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
{
int len = (PAGE_SIZE - 1) - s->len;
va_end(ap);
/* If we can't write it all, don't bother writing anything */
- if (ret > len)
+ if (ret >= len)
return 0;
s->len += ret;
int i;
WARN_ON_ONCE(!irqs_disabled());
- spin_lock(&ftrace_max_lock);
+ __raw_spin_lock(&ftrace_max_lock);
/* clear out all the previous traces */
for_each_possible_cpu(i) {
data = tr->data[i];
}
__update_max_tr(tr, tsk, cpu);
- spin_unlock(&ftrace_max_lock);
+ __raw_spin_unlock(&ftrace_max_lock);
}
/**
int i;
WARN_ON_ONCE(!irqs_disabled());
- spin_lock(&ftrace_max_lock);
+ __raw_spin_lock(&ftrace_max_lock);
for_each_possible_cpu(i)
tracing_reset(max_tr.data[i]);
tracing_reset(data);
__update_max_tr(tr, tsk, cpu);
- spin_unlock(&ftrace_max_lock);
+ __raw_spin_unlock(&ftrace_max_lock);
}
int register_tracer(struct tracer *type)
pc = preempt_count();
entry->preempt_count = pc & 0xff;
- entry->pid = tsk->pid;
+ entry->pid = (tsk) ? tsk->pid : 0;
entry->t = ftrace_now(raw_smp_processor_id());
entry->flags = (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
struct trace_entry *entry;
unsigned long irq_flags;
- spin_lock_irqsave(&data->lock, irq_flags);
+ raw_local_irq_save(irq_flags);
+ __raw_spin_lock(&data->lock);
entry = tracing_get_trace_entry(tr, data);
tracing_generic_entry_update(entry, flags);
entry->type = TRACE_FN;
entry->fn.ip = ip;
entry->fn.parent_ip = parent_ip;
- spin_unlock_irqrestore(&data->lock, irq_flags);
-
- trace_wake_up();
+ __raw_spin_unlock(&data->lock);
+ raw_local_irq_restore(irq_flags);
}
void
struct trace_entry *entry;
unsigned long irq_flags;
- spin_lock_irqsave(&data->lock, irq_flags);
+ raw_local_irq_save(irq_flags);
+ __raw_spin_lock(&data->lock);
entry = tracing_get_trace_entry(tr, data);
tracing_generic_entry_update(entry, 0);
entry->type = TRACE_SPECIAL;
entry->special.arg1 = arg1;
entry->special.arg2 = arg2;
entry->special.arg3 = arg3;
- spin_unlock_irqrestore(&data->lock, irq_flags);
+ __raw_spin_unlock(&data->lock);
+ raw_local_irq_restore(irq_flags);
trace_wake_up();
}
struct trace_entry *entry;
unsigned long irq_flags;
- spin_lock_irqsave(&data->lock, irq_flags);
+ raw_local_irq_save(irq_flags);
+ __raw_spin_lock(&data->lock);
entry = tracing_get_trace_entry(tr, data);
tracing_generic_entry_update(entry, flags);
entry->type = TRACE_CTX;
entry->ctx.prev_state = prev->state;
entry->ctx.next_pid = next->pid;
entry->ctx.next_prio = next->prio;
+ entry->ctx.next_state = next->state;
__trace_stack(tr, data, flags, 4);
- spin_unlock_irqrestore(&data->lock, irq_flags);
+ __raw_spin_unlock(&data->lock);
+ raw_local_irq_restore(irq_flags);
}
void
struct trace_entry *entry;
unsigned long irq_flags;
- spin_lock_irqsave(&data->lock, irq_flags);
+ raw_local_irq_save(irq_flags);
+ __raw_spin_lock(&data->lock);
entry = tracing_get_trace_entry(tr, data);
tracing_generic_entry_update(entry, flags);
entry->type = TRACE_WAKE;
entry->ctx.prev_state = curr->state;
entry->ctx.next_pid = wakee->pid;
entry->ctx.next_prio = wakee->prio;
+ entry->ctx.next_state = wakee->state;
__trace_stack(tr, data, flags, 5);
- spin_unlock_irqrestore(&data->lock, irq_flags);
+ __raw_spin_unlock(&data->lock);
+ raw_local_irq_restore(irq_flags);
trace_wake_up();
}
mutex_lock(&trace_types_lock);
- if (!current_trace || current_trace != iter->trace)
+ if (!current_trace || current_trace != iter->trace) {
+ mutex_unlock(&trace_types_lock);
return NULL;
+ }
atomic_inc(&trace_record_cmdline_disabled);
unsigned long abs_usecs;
unsigned long rel_usecs;
char *comm;
- int S;
+ int S, T;
int i;
+ unsigned state;
if (!next_entry)
next_entry = entry;
abs_usecs % 1000, rel_usecs/1000,
rel_usecs % 1000);
} else {
- if (entry->type != TRACE_STACK) {
- lat_print_generic(s, entry, cpu);
- lat_print_timestamp(s, abs_usecs, rel_usecs);
- }
+ lat_print_generic(s, entry, cpu);
+ lat_print_timestamp(s, abs_usecs, rel_usecs);
}
switch (entry->type) {
case TRACE_FN:
break;
case TRACE_CTX:
case TRACE_WAKE:
- S = entry->ctx.prev_state < sizeof(state_to_char) ?
- state_to_char[entry->ctx.prev_state] : 'X';
+ T = entry->ctx.next_state < sizeof(state_to_char) ?
+ state_to_char[entry->ctx.next_state] : 'X';
+
+ state = entry->ctx.prev_state ? __ffs(entry->ctx.prev_state) + 1 : 0;
+ S = state < sizeof(state_to_char) - 1 ? state_to_char[state] : 'X';
comm = trace_find_cmdline(entry->ctx.next_pid);
- trace_seq_printf(s, " %5d:%3d:%c %s %5d:%3d %s\n",
+ trace_seq_printf(s, " %5d:%3d:%c %s %5d:%3d:%c %s\n",
entry->ctx.prev_pid,
entry->ctx.prev_prio,
S, entry->type == TRACE_CTX ? "==>" : " +",
entry->ctx.next_pid,
entry->ctx.next_prio,
- comm);
+ T, comm);
break;
case TRACE_SPECIAL:
- trace_seq_printf(s, " %ld %ld %ld\n",
+ trace_seq_printf(s, "# %ld %ld %ld\n",
entry->special.arg1,
entry->special.arg2,
entry->special.arg3);
unsigned long secs;
char *comm;
int ret;
- int S;
+ int S, T;
int i;
entry = iter->ent;
usec_rem = do_div(t, 1000000ULL);
secs = (unsigned long)t;
- if (entry->type != TRACE_STACK) {
- ret = trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid);
- if (!ret)
- return 0;
- ret = trace_seq_printf(s, "[%02d] ", iter->cpu);
- if (!ret)
- return 0;
- ret = trace_seq_printf(s, "%5lu.%06lu: ", secs, usec_rem);
- if (!ret)
- return 0;
- }
+ ret = trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid);
+ if (!ret)
+ return 0;
+ ret = trace_seq_printf(s, "[%02d] ", iter->cpu);
+ if (!ret)
+ return 0;
+ ret = trace_seq_printf(s, "%5lu.%06lu: ", secs, usec_rem);
+ if (!ret)
+ return 0;
switch (entry->type) {
case TRACE_FN:
case TRACE_WAKE:
S = entry->ctx.prev_state < sizeof(state_to_char) ?
state_to_char[entry->ctx.prev_state] : 'X';
- ret = trace_seq_printf(s, " %5d:%3d:%c %s %5d:%3d\n",
+ T = entry->ctx.next_state < sizeof(state_to_char) ?
+ state_to_char[entry->ctx.next_state] : 'X';
+ ret = trace_seq_printf(s, " %5d:%3d:%c %s %5d:%3d:%c\n",
entry->ctx.prev_pid,
entry->ctx.prev_prio,
S,
entry->type == TRACE_CTX ? "==>" : " +",
entry->ctx.next_pid,
- entry->ctx.next_prio);
+ entry->ctx.next_prio,
+ T);
if (!ret)
return 0;
break;
case TRACE_SPECIAL:
- ret = trace_seq_printf(s, " %ld %ld %ld\n",
+ ret = trace_seq_printf(s, "# %ld %ld %ld\n",
entry->special.arg1,
entry->special.arg2,
entry->special.arg3);
struct trace_seq *s = &iter->seq;
struct trace_entry *entry;
int ret;
- int S;
+ int S, T;
entry = iter->ent;
case TRACE_WAKE:
S = entry->ctx.prev_state < sizeof(state_to_char) ?
state_to_char[entry->ctx.prev_state] : 'X';
+ T = entry->ctx.next_state < sizeof(state_to_char) ?
+ state_to_char[entry->ctx.next_state] : 'X';
if (entry->type == TRACE_WAKE)
S = '+';
- ret = trace_seq_printf(s, "%d %d %c %d %d\n",
+ ret = trace_seq_printf(s, "%d %d %c %d %d %c\n",
entry->ctx.prev_pid,
entry->ctx.prev_prio,
S,
entry->ctx.next_pid,
- entry->ctx.next_prio);
+ entry->ctx.next_prio,
+ T);
if (!ret)
return 0;
break;
case TRACE_SPECIAL:
case TRACE_STACK:
- ret = trace_seq_printf(s, " %ld %ld %ld\n",
+ ret = trace_seq_printf(s, "# %ld %ld %ld\n",
entry->special.arg1,
entry->special.arg2,
entry->special.arg3);
struct trace_seq *s = &iter->seq;
unsigned char newline = '\n';
struct trace_entry *entry;
- int S;
+ int S, T;
entry = iter->ent;
case TRACE_WAKE:
S = entry->ctx.prev_state < sizeof(state_to_char) ?
state_to_char[entry->ctx.prev_state] : 'X';
+ T = entry->ctx.next_state < sizeof(state_to_char) ?
+ state_to_char[entry->ctx.next_state] : 'X';
if (entry->type == TRACE_WAKE)
S = '+';
SEQ_PUT_HEX_FIELD_RET(s, entry->ctx.prev_pid);
SEQ_PUT_HEX_FIELD_RET(s, entry->ctx.next_pid);
SEQ_PUT_HEX_FIELD_RET(s, entry->ctx.next_prio);
SEQ_PUT_HEX_FIELD_RET(s, entry->fn.parent_ip);
+ SEQ_PUT_HEX_FIELD_RET(s, T);
break;
case TRACE_SPECIAL:
case TRACE_STACK:
SEQ_PUT_FIELD_RET(s, entry->ctx.prev_state);
SEQ_PUT_FIELD_RET(s, entry->ctx.next_pid);
SEQ_PUT_FIELD_RET(s, entry->ctx.next_prio);
+ SEQ_PUT_FIELD_RET(s, entry->ctx.next_state);
break;
case TRACE_SPECIAL:
case TRACE_STACK:
static int print_trace_line(struct trace_iterator *iter)
{
+ if (iter->trace && iter->trace->print_line)
+ return iter->trace->print_line(iter);
+
if (trace_flags & TRACE_ITER_BIN)
return print_bin_fmt(iter);
};
static struct file_operations show_traces_fops = {
- .open = show_traces_open,
- .read = seq_read,
- .release = seq_release,
+ .open = show_traces_open,
+ .read = seq_read,
+ .release = seq_release,
+};
+
+/*
+ * Only trace on a CPU if the bitmask is set:
+ */
+static cpumask_t tracing_cpumask = CPU_MASK_ALL;
+
+/*
+ * When tracing/tracing_cpu_mask is modified then this holds
+ * the new bitmask we are about to install:
+ */
+static cpumask_t tracing_cpumask_new;
+
+/*
+ * The tracer itself will not take this lock, but still we want
+ * to provide a consistent cpumask to user-space:
+ */
+static DEFINE_MUTEX(tracing_cpumask_update_lock);
+
+/*
+ * Temporary storage for the character representation of the
+ * CPU bitmask (and one more byte for the newline):
+ */
+static char mask_str[NR_CPUS + 1];
+
+static ssize_t
+tracing_cpumask_read(struct file *filp, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ int len;
+
+ mutex_lock(&tracing_cpumask_update_lock);
+
+ len = cpumask_scnprintf(mask_str, count, tracing_cpumask);
+ if (count - len < 2) {
+ count = -EINVAL;
+ goto out_err;
+ }
+ len += sprintf(mask_str + len, "\n");
+ count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
+
+out_err:
+ mutex_unlock(&tracing_cpumask_update_lock);
+
+ return count;
+}
+
+static ssize_t
+tracing_cpumask_write(struct file *filp, const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ int err, cpu;
+
+ mutex_lock(&tracing_cpumask_update_lock);
+ err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
+ if (err)
+ goto err_unlock;
+
+ raw_local_irq_disable();
+ __raw_spin_lock(&ftrace_max_lock);
+ for_each_possible_cpu(cpu) {
+ /*
+ * Increase/decrease the disabled counter if we are
+ * about to flip a bit in the cpumask:
+ */
+ if (cpu_isset(cpu, tracing_cpumask) &&
+ !cpu_isset(cpu, tracing_cpumask_new)) {
+ atomic_inc(&global_trace.data[cpu]->disabled);
+ }
+ if (!cpu_isset(cpu, tracing_cpumask) &&
+ cpu_isset(cpu, tracing_cpumask_new)) {
+ atomic_dec(&global_trace.data[cpu]->disabled);
+ }
+ }
+ __raw_spin_unlock(&ftrace_max_lock);
+ raw_local_irq_enable();
+
+ tracing_cpumask = tracing_cpumask_new;
+
+ mutex_unlock(&tracing_cpumask_update_lock);
+
+ return count;
+
+err_unlock:
+ mutex_unlock(&tracing_cpumask_update_lock);
+
+ return err;
+}
+
+static struct file_operations tracing_cpumask_fops = {
+ .open = tracing_open_generic,
+ .read = tracing_cpumask_read,
+ .write = tracing_cpumask_write,
};
static ssize_t
r += sprintf(buf + r, "\n");
WARN_ON(r >= len + 2);
- r = simple_read_from_buffer(ubuf, cnt, ppos,
- buf, r);
+ r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
kfree(buf);
break;
}
}
+ /*
+ * If no option could be set, return an error:
+ */
+ if (!trace_options[i])
+ return -EINVAL;
filp->f_pos += cnt;
}
static struct file_operations tracing_iter_fops = {
- .open = tracing_open_generic,
- .read = tracing_iter_ctrl_read,
- .write = tracing_iter_ctrl_write,
+ .open = tracing_open_generic,
+ .read = tracing_iter_ctrl_read,
+ .write = tracing_iter_ctrl_write,
};
static const char readme_msg[] =
}
static struct file_operations tracing_readme_fops = {
- .open = tracing_open_generic,
- .read = tracing_readme_read,
+ .open = tracing_open_generic,
+ .read = tracing_readme_read,
};
static ssize_t
return -ENOMEM;
iter->tr = &global_trace;
+ iter->trace = current_trace;
filp->private_data = iter;
{
struct trace_iterator *iter = filp->private_data;
struct trace_array_cpu *data;
+ struct trace_array *tr = iter->tr;
+ struct tracer *tracer = iter->trace;
static cpumask_t mask;
static int start;
unsigned long flags;
start = 0;
while (trace_empty(iter)) {
- if (!(trace_flags & TRACE_ITER_BLOCK))
- return -EWOULDBLOCK;
/*
* This is a make-shift waitqueue. The reason we don't use
* an actual wait queue is because:
cnt = PAGE_SIZE - 1;
memset(iter, 0, sizeof(*iter));
- iter->tr = &global_trace;
+ iter->tr = tr;
+ iter->trace = tracer;
iter->pos = -1;
/*
for_each_cpu_mask(cpu, mask) {
data = iter->tr->data[cpu];
- spin_lock(&data->lock);
+ __raw_spin_lock(&data->lock);
}
while (find_next_entry_inc(iter) != NULL) {
for_each_cpu_mask(cpu, mask) {
data = iter->tr->data[cpu];
- spin_unlock(&data->lock);
+ __raw_spin_unlock(&data->lock);
}
for_each_cpu_mask(cpu, mask) {
if (!entry)
pr_warning("Could not create debugfs 'iter_ctrl' entry\n");
+ entry = debugfs_create_file("tracing_cpumask", 0644, d_tracer,
+ NULL, &tracing_cpumask_fops);
+ if (!entry)
+ pr_warning("Could not create debugfs 'tracing_cpumask' entry\n");
+
entry = debugfs_create_file("latency_trace", 0444, d_tracer,
&global_trace, &tracing_lt_fops);
if (!entry)
/* Now that we successfully allocate a page per CPU, add them */
for_each_possible_cpu(i) {
data = global_trace.data[i];
- spin_lock_init(&data->lock);
- lockdep_set_class(&data->lock, &data->lock_key);
+ data->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
page = list_entry(pages.next, struct page, lru);
list_del_init(&page->lru);
list_add_tail(&page->lru, &data->trace_pages);
#ifdef CONFIG_TRACER_MAX_TRACE
data = max_tr.data[i];
- spin_lock_init(&data->lock);
- lockdep_set_class(&data->lock, &data->lock_key);
+ data->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
page = list_entry(pages.next, struct page, lru);
list_del_init(&page->lru);
list_add_tail(&page->lru, &data->trace_pages);