tracing: Remove NR_CPUS array from trace_iterator
authorSteven Rostedt <srostedt@redhat.com>
Thu, 28 Jun 2012 00:46:14 +0000 (20:46 -0400)
committerSteven Rostedt <rostedt@goodmis.org>
Thu, 28 Jun 2012 17:52:15 +0000 (13:52 -0400)
Replace the NR_CPUS array of buffer_iter from the trace_iterator
with an allocated array. This will just create an array of
possible CPUS instead of the max number specified.

The use of NR_CPUS in that array caused allocation failures for
machines that were tight on memory. This did not cause any failures
to the system itself (no crashes), but caused unnecessary failures
for reading the trace files.

Added a helper function called 'trace_buffer_iter()' that returns
the buffer_iter item or NULL if it is not defined or the array was
not allocated. Some routines do not require the array
(tracing_open_pipe() for one).

Reported-by: Dave Jones <davej@redhat.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
include/linux/ftrace_event.h
kernel/trace/trace.c
kernel/trace/trace.h
kernel/trace/trace_functions_graph.c

index 1aff18346c7184e13ef2d8d5e7e15afee8dfb60b..af961d6f7ab14edc6b3a0c8ec813124fba5415da 100644 (file)
@@ -65,7 +65,7 @@ struct trace_iterator {
        void                    *private;
        int                     cpu_file;
        struct mutex            mutex;
-       struct ring_buffer_iter *buffer_iter[NR_CPUS];
+       struct ring_buffer_iter **buffer_iter;
        unsigned long           iter_flags;
 
        /* trace_seq for __print_flags() and __print_symbolic() etc. */
index 748f6401edf657bb9d09e5d610dbd5ab7f508939..b2af14e94c28c7c9bcea9ac72550fe40398be33c 100644 (file)
@@ -1710,9 +1710,11 @@ EXPORT_SYMBOL_GPL(trace_vprintk);
 
 static void trace_iterator_increment(struct trace_iterator *iter)
 {
+       struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
+
        iter->idx++;
-       if (iter->buffer_iter[iter->cpu])
-               ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
+       if (buf_iter)
+               ring_buffer_read(buf_iter, NULL);
 }
 
 static struct trace_entry *
@@ -1720,7 +1722,7 @@ peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
                unsigned long *lost_events)
 {
        struct ring_buffer_event *event;
-       struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu];
+       struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
 
        if (buf_iter)
                event = ring_buffer_iter_peek(buf_iter, ts);
@@ -1858,10 +1860,10 @@ void tracing_iter_reset(struct trace_iterator *iter, int cpu)
 
        tr->data[cpu]->skipped_entries = 0;
 
-       if (!iter->buffer_iter[cpu])
+       buf_iter = trace_buffer_iter(iter, cpu);
+       if (!buf_iter)
                return;
 
-       buf_iter = iter->buffer_iter[cpu];
        ring_buffer_iter_reset(buf_iter);
 
        /*
@@ -2207,13 +2209,15 @@ static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
 
 int trace_empty(struct trace_iterator *iter)
 {
+       struct ring_buffer_iter *buf_iter;
        int cpu;
 
        /* If we are looking at one CPU buffer, only check that one */
        if (iter->cpu_file != TRACE_PIPE_ALL_CPU) {
                cpu = iter->cpu_file;
-               if (iter->buffer_iter[cpu]) {
-                       if (!ring_buffer_iter_empty(iter->buffer_iter[cpu]))
+               buf_iter = trace_buffer_iter(iter, cpu);
+               if (buf_iter) {
+                       if (!ring_buffer_iter_empty(buf_iter))
                                return 0;
                } else {
                        if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu))
@@ -2223,8 +2227,9 @@ int trace_empty(struct trace_iterator *iter)
        }
 
        for_each_tracing_cpu(cpu) {
-               if (iter->buffer_iter[cpu]) {
-                       if (!ring_buffer_iter_empty(iter->buffer_iter[cpu]))
+               buf_iter = trace_buffer_iter(iter, cpu);
+               if (buf_iter) {
+                       if (!ring_buffer_iter_empty(buf_iter))
                                return 0;
                } else {
                        if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu))
@@ -2383,6 +2388,8 @@ __tracing_open(struct inode *inode, struct file *file)
        if (!iter)
                return ERR_PTR(-ENOMEM);
 
+       iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(),
+                                   GFP_KERNEL);
        /*
         * We make a copy of the current tracer to avoid concurrent
         * changes on it while we are reading.
@@ -2443,6 +2450,7 @@ __tracing_open(struct inode *inode, struct file *file)
  fail:
        mutex_unlock(&trace_types_lock);
        kfree(iter->trace);
+       kfree(iter->buffer_iter);
        seq_release_private(inode, file);
        return ERR_PTR(-ENOMEM);
 }
@@ -2483,6 +2491,7 @@ static int tracing_release(struct inode *inode, struct file *file)
        mutex_destroy(&iter->mutex);
        free_cpumask_var(iter->started);
        kfree(iter->trace);
+       kfree(iter->buffer_iter);
        seq_release_private(inode, file);
        return 0;
 }
index 5aec220d2de0de314b4d292eaaeba43c60f34a1a..55e1f7f0db126edf4bbcbe07245a9558b1030bec 100644 (file)
@@ -317,6 +317,14 @@ struct tracer {
 
 #define TRACE_PIPE_ALL_CPU     -1
 
+static inline struct ring_buffer_iter *
+trace_buffer_iter(struct trace_iterator *iter, int cpu)
+{
+       if (iter->buffer_iter && iter->buffer_iter[cpu])
+               return iter->buffer_iter[cpu];
+       return NULL;
+}
+
 int tracer_init(struct tracer *t, struct trace_array *tr);
 int tracing_is_enabled(void);
 void trace_wake_up(void);
index a7d2a4c653d8d893f51652c8cd599bf780359f45..ce27c8ba8d318ffa6337f231e12a680f6deb7720 100644 (file)
@@ -538,7 +538,7 @@ get_return_for_leaf(struct trace_iterator *iter,
                next = &data->ret;
        } else {
 
-               ring_iter = iter->buffer_iter[iter->cpu];
+               ring_iter = trace_buffer_iter(iter, iter->cpu);
 
                /* First peek to compare current entry and the next one */
                if (ring_iter)
This page took 0.032446 seconds and 5 git commands to generate.