projects
/
deliverable
/
linux.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
locking: Rename __RAW_SPIN_LOCK_UNLOCKED to __ARCH_SPIN_LOCK_UNLOCKED
[deliverable/linux.git]
/
kernel
/
trace
/
trace.c
diff --git
a/kernel/trace/trace.c
b/kernel/trace/trace.c
index 874f2893cff0e1d65d97b99217feb30f9900b4f2..63bc1cc3821979dfb6663d3d28af57579174a48a 100644
(file)
--- a/
kernel/trace/trace.c
+++ b/
kernel/trace/trace.c
@@
-86,17
+86,17
@@
static int dummy_set_flag(u32 old_flags, u32 bit, int set)
*/
static int tracing_disabled = 1;
*/
static int tracing_disabled = 1;
-DEFINE_PER_CPU(
local_
t, ftrace_cpu_disabled);
+DEFINE_PER_CPU(
in
t, ftrace_cpu_disabled);
static inline void ftrace_disable_cpu(void)
{
preempt_disable();
static inline void ftrace_disable_cpu(void)
{
preempt_disable();
-
local_inc(&__get
_cpu_var(ftrace_cpu_disabled));
+
__this_cpu_inc(per
_cpu_var(ftrace_cpu_disabled));
}
static inline void ftrace_enable_cpu(void)
{
}
static inline void ftrace_enable_cpu(void)
{
-
local_dec(&__get
_cpu_var(ftrace_cpu_disabled));
+
__this_cpu_dec(per
_cpu_var(ftrace_cpu_disabled));
preempt_enable();
}
preempt_enable();
}
@@
-203,7
+203,7
@@
cycle_t ftrace_now(int cpu)
*/
static struct trace_array max_tr;
*/
static struct trace_array max_tr;
-static DEFINE_PER_CPU(struct trace_array_cpu, max_data);
+static DEFINE_PER_CPU(struct trace_array_cpu, max_
tr_
data);
/* tracer_enabled is used to toggle activation of a tracer */
static int tracer_enabled = 1;
/* tracer_enabled is used to toggle activation of a tracer */
static int tracer_enabled = 1;
@@
-493,15
+493,15
@@
static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
* protected by per_cpu spinlocks. But the action of the swap
* needs its own lock.
*
* protected by per_cpu spinlocks. But the action of the swap
* needs its own lock.
*
- * This is defined as a
raw
_spinlock_t in order to help
+ * This is defined as a
arch
_spinlock_t in order to help
* with performance when lockdep debugging is enabled.
*
* It is also used in other places outside the update_max_tr
* so it needs to be defined outside of the
* CONFIG_TRACER_MAX_TRACE.
*/
* with performance when lockdep debugging is enabled.
*
* It is also used in other places outside the update_max_tr
* so it needs to be defined outside of the
* CONFIG_TRACER_MAX_TRACE.
*/
-static
raw
_spinlock_t ftrace_max_lock =
- (
raw_spinlock_t)__RAW
_SPIN_LOCK_UNLOCKED;
+static
arch
_spinlock_t ftrace_max_lock =
+ (
arch_spinlock_t)__ARCH
_SPIN_LOCK_UNLOCKED;
#ifdef CONFIG_TRACER_MAX_TRACE
unsigned long __read_mostly tracing_max_latency;
#ifdef CONFIG_TRACER_MAX_TRACE
unsigned long __read_mostly tracing_max_latency;
@@
-802,7
+802,7
@@
static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
static int cmdline_idx;
static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
static int cmdline_idx;
-static
raw_spinlock_t trace_cmdline_lock = __RAW
_SPIN_LOCK_UNLOCKED;
+static
arch_spinlock_t trace_cmdline_lock = __ARCH
_SPIN_LOCK_UNLOCKED;
/* temporary disable recording */
static atomic_t trace_record_cmdline_disabled __read_mostly;
/* temporary disable recording */
static atomic_t trace_record_cmdline_disabled __read_mostly;
@@
-1085,7
+1085,7
@@
trace_function(struct trace_array *tr,
struct ftrace_entry *entry;
/* If we are reading the ring buffer, don't trace */
struct ftrace_entry *entry;
/* If we are reading the ring buffer, don't trace */
- if (unlikely(
local_read(&__get
_cpu_var(ftrace_cpu_disabled))))
+ if (unlikely(
__this_cpu_read(per
_cpu_var(ftrace_cpu_disabled))))
return;
event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
return;
event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
@@
-1251,8
+1251,8
@@
ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
*/
int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
{
*/
int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
{
- static
raw
_spinlock_t trace_buf_lock =
- (
raw_spinlock_t)__RAW
_SPIN_LOCK_UNLOCKED;
+ static
arch
_spinlock_t trace_buf_lock =
+ (
arch_spinlock_t)__ARCH
_SPIN_LOCK_UNLOCKED;
static u32 trace_buf[TRACE_BUF_SIZE];
struct ftrace_event_call *call = &event_bprint;
static u32 trace_buf[TRACE_BUF_SIZE];
struct ftrace_event_call *call = &event_bprint;
@@
-1334,7
+1334,7
@@
int trace_array_printk(struct trace_array *tr,
int trace_array_vprintk(struct trace_array *tr,
unsigned long ip, const char *fmt, va_list args)
{
int trace_array_vprintk(struct trace_array *tr,
unsigned long ip, const char *fmt, va_list args)
{
- static
raw_spinlock_t trace_buf_lock = __RAW
_SPIN_LOCK_UNLOCKED;
+ static
arch_spinlock_t trace_buf_lock = __ARCH
_SPIN_LOCK_UNLOCKED;
static char trace_buf[TRACE_BUF_SIZE];
struct ftrace_event_call *call = &event_print;
static char trace_buf[TRACE_BUF_SIZE];
struct ftrace_event_call *call = &event_print;
@@
-1361,11
+1361,7
@@
int trace_array_vprintk(struct trace_array *tr,
pause_graph_tracing();
raw_local_irq_save(irq_flags);
__raw_spin_lock(&trace_buf_lock);
pause_graph_tracing();
raw_local_irq_save(irq_flags);
__raw_spin_lock(&trace_buf_lock);
- if (args == NULL) {
- strncpy(trace_buf, fmt, TRACE_BUF_SIZE);
- len = strlen(trace_buf);
- } else
- len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
+ len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
size = sizeof(*entry) + len + 1;
buffer = tr->buffer;
size = sizeof(*entry) + len + 1;
buffer = tr->buffer;
@@
-1516,6
+1512,8
@@
static void *s_next(struct seq_file *m, void *v, loff_t *pos)
int i = (int)*pos;
void *ent;
int i = (int)*pos;
void *ent;
+ WARN_ON_ONCE(iter->leftover);
+
(*pos)++;
/* can't go backwards */
(*pos)++;
/* can't go backwards */
@@
-1614,8
+1612,16
@@
static void *s_start(struct seq_file *m, loff_t *pos)
;
} else {
;
} else {
- l = *pos - 1;
- p = s_next(m, p, &l);
+ /*
+ * If we overflowed the seq_file before, then we want
+ * to just reuse the trace_seq buffer again.
+ */
+ if (iter->leftover)
+ p = iter;
+ else {
+ l = *pos - 1;
+ p = s_next(m, p, &l);
+ }
}
trace_event_read_lock();
}
trace_event_read_lock();
@@
-1923,6
+1929,7
@@
static enum print_line_t print_trace_line(struct trace_iterator *iter)
static int s_show(struct seq_file *m, void *v)
{
struct trace_iterator *iter = v;
static int s_show(struct seq_file *m, void *v)
{
struct trace_iterator *iter = v;
+ int ret;
if (iter->ent == NULL) {
if (iter->tr) {
if (iter->ent == NULL) {
if (iter->tr) {
@@
-1942,9
+1949,27
@@
static int s_show(struct seq_file *m, void *v)
if (!(trace_flags & TRACE_ITER_VERBOSE))
print_func_help_header(m);
}
if (!(trace_flags & TRACE_ITER_VERBOSE))
print_func_help_header(m);
}
+ } else if (iter->leftover) {
+ /*
+ * If we filled the seq_file buffer earlier, we
+ * want to just show it now.
+ */
+ ret = trace_print_seq(m, &iter->seq);
+
+ /* ret should this time be zero, but you never know */
+ iter->leftover = ret;
+
} else {
print_trace_line(iter);
} else {
print_trace_line(iter);
- trace_print_seq(m, &iter->seq);
+ ret = trace_print_seq(m, &iter->seq);
+ /*
+ * If we overflow the seq_file buffer, then it will
+ * ask us for this data again at start up.
+ * Use that instead.
+ * ret is 0 if seq_file write succeeded.
+ * -1 otherwise.
+ */
+ iter->leftover = ret;
}
return 0;
}
return 0;
@@
-2898,6
+2923,10
@@
static int tracing_release_pipe(struct inode *inode, struct file *file)
else
cpumask_clear_cpu(iter->cpu_file, tracing_reader_cpumask);
else
cpumask_clear_cpu(iter->cpu_file, tracing_reader_cpumask);
+
+ if (iter->trace->pipe_close)
+ iter->trace->pipe_close(iter);
+
mutex_unlock(&trace_types_lock);
free_cpumask_var(iter->started);
mutex_unlock(&trace_types_lock);
free_cpumask_var(iter->started);
@@
-3320,6
+3349,16
@@
tracing_entries_write(struct file *filp, const char __user *ubuf,
return cnt;
}
return cnt;
}
+static int mark_printk(const char *fmt, ...)
+{
+ int ret;
+ va_list args;
+ va_start(args, fmt);
+ ret = trace_vprintk(0, fmt, args);
+ va_end(args);
+ return ret;
+}
+
static ssize_t
tracing_mark_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *fpos)
static ssize_t
tracing_mark_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *fpos)
@@
-3346,7
+3385,7
@@
tracing_mark_write(struct file *filp, const char __user *ubuf,
} else
buf[cnt] = '\0';
} else
buf[cnt] = '\0';
- cnt =
trace_vprintk(0, buf, NULL
);
+ cnt =
mark_printk("%s", buf
);
kfree(buf);
*fpos += cnt;
kfree(buf);
*fpos += cnt;
@@
-4268,8
+4307,8
@@
trace_printk_seq(struct trace_seq *s)
static void __ftrace_dump(bool disable_tracing)
{
static void __ftrace_dump(bool disable_tracing)
{
- static
raw
_spinlock_t ftrace_dump_lock =
- (
raw_spinlock_t)__RAW
_SPIN_LOCK_UNLOCKED;
+ static
arch
_spinlock_t ftrace_dump_lock =
+ (
arch_spinlock_t)__ARCH
_SPIN_LOCK_UNLOCKED;
/* use static because iter can be a bit big for the stack */
static struct trace_iterator iter;
unsigned int old_userobj;
/* use static because iter can be a bit big for the stack */
static struct trace_iterator iter;
unsigned int old_userobj;
@@
-4415,7
+4454,7
@@
__init static int tracer_alloc_buffers(void)
/* Allocate the first page for all buffers */
for_each_tracing_cpu(i) {
global_trace.data[i] = &per_cpu(global_trace_cpu, i);
/* Allocate the first page for all buffers */
for_each_tracing_cpu(i) {
global_trace.data[i] = &per_cpu(global_trace_cpu, i);
- max_tr.data[i] = &per_cpu(max_data, i);
+ max_tr.data[i] = &per_cpu(max_
tr_
data, i);
}
trace_init_cmdlines();
}
trace_init_cmdlines();
This page took
0.047551 seconds
and
5
git commands to generate.