Merge branches 'tracing/kmemtrace2' and 'tracing/ftrace' into tracing/urgent
authorIngo Molnar <mingo@elte.hu>
Tue, 6 Jan 2009 09:18:43 +0000 (10:18 +0100)
committerIngo Molnar <mingo@elte.hu>
Tue, 6 Jan 2009 09:18:43 +0000 (10:18 +0100)
1  2 
kernel/trace/Makefile
kernel/trace/trace.c
kernel/trace/trace.h
kernel/trace/trace_boot.c
kernel/trace/trace_functions_graph.c
kernel/trace/trace_hw_branches.c
kernel/trace/trace_power.c

diff --combined kernel/trace/Makefile
index 513dc86b5dfabb3fc4969257804dde99e60edc7f,31cd5fbc0eedfd537891bac6a503256fad1d37d8..05c9182061dece9b818a8574016d58429fb65162
@@@ -19,6 -19,8 +19,8 @@@ obj-$(CONFIG_FUNCTION_TRACER) += libftr
  obj-$(CONFIG_RING_BUFFER) += ring_buffer.o
  
  obj-$(CONFIG_TRACING) += trace.o
+ obj-$(CONFIG_TRACING) += trace_output.o
+ obj-$(CONFIG_TRACING) += trace_stat.o
  obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o
  obj-$(CONFIG_SYSPROF_TRACER) += trace_sysprof.o
  obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o
@@@ -33,6 -35,5 +35,6 @@@ obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += 
  obj-$(CONFIG_TRACE_BRANCH_PROFILING) += trace_branch.o
  obj-$(CONFIG_HW_BRANCH_TRACER) += trace_hw_branches.o
  obj-$(CONFIG_POWER_TRACER) += trace_power.o
 +obj-$(CONFIG_KMEMTRACE) += kmemtrace.o
  
  libftrace-y := ftrace.o
diff --combined kernel/trace/trace.c
index c580233add95a0915f4797b84ae05a728aa986c6,b789c010512c57c98fadd09443503b87b6b808a5..0418fc338b5c2d2d36cdc2987e7e7d4666126929
@@@ -30,6 -30,7 +30,6 @@@
  #include <linux/gfp.h>
  #include <linux/fs.h>
  #include <linux/kprobes.h>
 -#include <linux/seq_file.h>
  #include <linux/writeback.h>
  
  #include <linux/stacktrace.h>
@@@ -37,6 -38,7 +37,7 @@@
  #include <linux/irqflags.h>
  
  #include "trace.h"
+ #include "trace_output.h"
  
  #define TRACE_BUFFER_FLAGS    (RB_FL_OVERWRITE)
  
@@@ -89,10 -91,10 +90,10 @@@ static inline void ftrace_enable_cpu(vo
        preempt_enable();
  }
  
 -static cpumask_t __read_mostly                tracing_buffer_mask;
 +static cpumask_var_t __read_mostly    tracing_buffer_mask;
  
  #define for_each_tracing_cpu(cpu)     \
 -      for_each_cpu_mask(cpu, tracing_buffer_mask)
 +      for_each_cpu(cpu, tracing_buffer_mask)
  
  /*
   * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
@@@ -329,132 -331,6 +330,6 @@@ __update_max_tr(struct trace_array *tr
        tracing_record_cmdline(current);
  }
  
- /**
-  * trace_seq_printf - sequence printing of trace information
-  * @s: trace sequence descriptor
-  * @fmt: printf format string
-  *
-  * The tracer may use either sequence operations or its own
-  * copy to user routines. To simplify formating of a trace
-  * trace_seq_printf is used to store strings into a special
-  * buffer (@s). Then the output may be either used by
-  * the sequencer or pulled into another buffer.
-  */
- int
- trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
- {
-       int len = (PAGE_SIZE - 1) - s->len;
-       va_list ap;
-       int ret;
-       if (!len)
-               return 0;
-       va_start(ap, fmt);
-       ret = vsnprintf(s->buffer + s->len, len, fmt, ap);
-       va_end(ap);
-       /* If we can't write it all, don't bother writing anything */
-       if (ret >= len)
-               return 0;
-       s->len += ret;
-       return len;
- }
- /**
-  * trace_seq_puts - trace sequence printing of simple string
-  * @s: trace sequence descriptor
-  * @str: simple string to record
-  *
-  * The tracer may use either the sequence operations or its own
-  * copy to user routines. This function records a simple string
-  * into a special buffer (@s) for later retrieval by a sequencer
-  * or other mechanism.
-  */
- static int
- trace_seq_puts(struct trace_seq *s, const char *str)
- {
-       int len = strlen(str);
-       if (len > ((PAGE_SIZE - 1) - s->len))
-               return 0;
-       memcpy(s->buffer + s->len, str, len);
-       s->len += len;
-       return len;
- }
- static int
- trace_seq_putc(struct trace_seq *s, unsigned char c)
- {
-       if (s->len >= (PAGE_SIZE - 1))
-               return 0;
-       s->buffer[s->len++] = c;
-       return 1;
- }
- static int
- trace_seq_putmem(struct trace_seq *s, void *mem, size_t len)
- {
-       if (len > ((PAGE_SIZE - 1) - s->len))
-               return 0;
-       memcpy(s->buffer + s->len, mem, len);
-       s->len += len;
-       return len;
- }
- #define MAX_MEMHEX_BYTES      8
- #define HEX_CHARS             (MAX_MEMHEX_BYTES*2 + 1)
- static int
- trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len)
- {
-       unsigned char hex[HEX_CHARS];
-       unsigned char *data = mem;
-       int i, j;
- #ifdef __BIG_ENDIAN
-       for (i = 0, j = 0; i < len; i++) {
- #else
-       for (i = len-1, j = 0; i >= 0; i--) {
- #endif
-               hex[j++] = hex_asc_hi(data[i]);
-               hex[j++] = hex_asc_lo(data[i]);
-       }
-       hex[j++] = ' ';
-       return trace_seq_putmem(s, hex, j);
- }
- static int
- trace_seq_path(struct trace_seq *s, struct path *path)
- {
-       unsigned char *p;
-       if (s->len >= (PAGE_SIZE - 1))
-               return 0;
-       p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
-       if (!IS_ERR(p)) {
-               p = mangle_path(s->buffer + s->len, p, "\n");
-               if (p) {
-                       s->len = p - s->buffer;
-                       return 1;
-               }
-       } else {
-               s->buffer[s->len++] = '?';
-               return 1;
-       }
-       return 0;
- }
  static void
  trace_seq_reset(struct trace_seq *s)
  {
@@@ -1309,7 -1185,7 +1184,7 @@@ enum trace_file_type 
        TRACE_FILE_ANNOTATE     = 2,
  };
  
 -static void trace_iterator_increment(struct trace_iterator *iter, int cpu)
 +static void trace_iterator_increment(struct trace_iterator *iter)
  {
        /* Don't allow ftrace to trace into the ring buffers */
        ftrace_disable_cpu();
@@@ -1388,7 -1264,7 +1263,7 @@@ static void *find_next_entry_inc(struc
        iter->ent = __find_next_entry(iter, &iter->cpu, &iter->ts);
  
        if (iter->ent)
 -              trace_iterator_increment(iter, iter->cpu);
 +              trace_iterator_increment(iter);
  
        return iter->ent ? iter : NULL;
  }
@@@ -1472,154 -1348,6 +1347,6 @@@ static void s_stop(struct seq_file *m, 
        mutex_unlock(&trace_types_lock);
  }
  
- #ifdef CONFIG_KRETPROBES
- static inline const char *kretprobed(const char *name)
- {
-       static const char tramp_name[] = "kretprobe_trampoline";
-       int size = sizeof(tramp_name);
-       if (strncmp(tramp_name, name, size) == 0)
-               return "[unknown/kretprobe'd]";
-       return name;
- }
- #else
- static inline const char *kretprobed(const char *name)
- {
-       return name;
- }
- #endif /* CONFIG_KRETPROBES */
- static int
- seq_print_sym_short(struct trace_seq *s, const char *fmt, unsigned long address)
- {
- #ifdef CONFIG_KALLSYMS
-       char str[KSYM_SYMBOL_LEN];
-       const char *name;
-       kallsyms_lookup(address, NULL, NULL, NULL, str);
-       name = kretprobed(str);
-       return trace_seq_printf(s, fmt, name);
- #endif
-       return 1;
- }
- static int
- seq_print_sym_offset(struct trace_seq *s, const char *fmt,
-                    unsigned long address)
- {
- #ifdef CONFIG_KALLSYMS
-       char str[KSYM_SYMBOL_LEN];
-       const char *name;
-       sprint_symbol(str, address);
-       name = kretprobed(str);
-       return trace_seq_printf(s, fmt, name);
- #endif
-       return 1;
- }
- #ifndef CONFIG_64BIT
- # define IP_FMT "%08lx"
- #else
- # define IP_FMT "%016lx"
- #endif
- int
- seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
- {
-       int ret;
-       if (!ip)
-               return trace_seq_printf(s, "0");
-       if (sym_flags & TRACE_ITER_SYM_OFFSET)
-               ret = seq_print_sym_offset(s, "%s", ip);
-       else
-               ret = seq_print_sym_short(s, "%s", ip);
-       if (!ret)
-               return 0;
-       if (sym_flags & TRACE_ITER_SYM_ADDR)
-               ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
-       return ret;
- }
- static inline int seq_print_user_ip(struct trace_seq *s, struct mm_struct *mm,
-                                   unsigned long ip, unsigned long sym_flags)
- {
-       struct file *file = NULL;
-       unsigned long vmstart = 0;
-       int ret = 1;
-       if (mm) {
-               const struct vm_area_struct *vma;
-               down_read(&mm->mmap_sem);
-               vma = find_vma(mm, ip);
-               if (vma) {
-                       file = vma->vm_file;
-                       vmstart = vma->vm_start;
-               }
-               if (file) {
-                       ret = trace_seq_path(s, &file->f_path);
-                       if (ret)
-                               ret = trace_seq_printf(s, "[+0x%lx]", ip - vmstart);
-               }
-               up_read(&mm->mmap_sem);
-       }
-       if (ret && ((sym_flags & TRACE_ITER_SYM_ADDR) || !file))
-               ret = trace_seq_printf(s, " <" IP_FMT ">", ip);
-       return ret;
- }
- static int
- seq_print_userip_objs(const struct userstack_entry *entry, struct trace_seq *s,
-                     unsigned long sym_flags)
- {
-       struct mm_struct *mm = NULL;
-       int ret = 1;
-       unsigned int i;
-       if (trace_flags & TRACE_ITER_SYM_USEROBJ) {
-               struct task_struct *task;
-               /*
-                * we do the lookup on the thread group leader,
-                * since individual threads might have already quit!
-                */
-               rcu_read_lock();
-               task = find_task_by_vpid(entry->ent.tgid);
-               if (task)
-                       mm = get_task_mm(task);
-               rcu_read_unlock();
-       }
-       for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
-               unsigned long ip = entry->caller[i];
-               if (ip == ULONG_MAX || !ret)
-                       break;
-               if (i && ret)
-                       ret = trace_seq_puts(s, " <- ");
-               if (!ip) {
-                       if (ret)
-                               ret = trace_seq_puts(s, "??");
-                       continue;
-               }
-               if (!ret)
-                       break;
-               if (ret)
-                       ret = seq_print_user_ip(s, mm, ip, sym_flags);
-       }
-       if (mm)
-               mmput(mm);
-       return ret;
- }
  static void print_lat_help_header(struct seq_file *m)
  {
        seq_puts(m, "#                  _------=> CPU#            \n");
@@@ -1755,52 -1483,6 +1482,6 @@@ lat_print_timestamp(struct trace_seq *s
                trace_seq_puts(s, " : ");
  }
  
- static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
- static int task_state_char(unsigned long state)
- {
-       int bit = state ? __ffs(state) + 1 : 0;
-       return bit < sizeof(state_to_char) - 1 ? state_to_char[bit] : '?';
- }
- /*
-  * The message is supposed to contain an ending newline.
-  * If the printing stops prematurely, try to add a newline of our own.
-  */
- void trace_seq_print_cont(struct trace_seq *s, struct trace_iterator *iter)
- {
-       struct trace_entry *ent;
-       struct trace_field_cont *cont;
-       bool ok = true;
-       ent = peek_next_entry(iter, iter->cpu, NULL);
-       if (!ent || ent->type != TRACE_CONT) {
-               trace_seq_putc(s, '\n');
-               return;
-       }
-       do {
-               cont = (struct trace_field_cont *)ent;
-               if (ok)
-                       ok = (trace_seq_printf(s, "%s", cont->buf) > 0);
-               ftrace_disable_cpu();
-               if (iter->buffer_iter[iter->cpu])
-                       ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
-               else
-                       ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL);
-               ftrace_enable_cpu();
-               ent = peek_next_entry(iter, iter->cpu, NULL);
-       } while (ent && ent->type == TRACE_CONT);
-       if (!ok)
-               trace_seq_putc(s, '\n');
- }
  static void test_cpu_buff_start(struct trace_iterator *iter)
  {
        struct trace_seq *s = &iter->seq;
        if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
                return;
  
 -      if (cpu_isset(iter->cpu, iter->started))
 +      if (cpumask_test_cpu(iter->cpu, iter->started))
                return;
  
 -      cpu_set(iter->cpu, iter->started);
 +      cpumask_set_cpu(iter->cpu, iter->started);
        trace_seq_printf(s, "##### CPU %u buffer started ####\n", iter->cpu);
  }
  
@@@ -1824,17 -1506,14 +1505,14 @@@ print_lat_fmt(struct trace_iterator *it
        struct trace_seq *s = &iter->seq;
        unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
        struct trace_entry *next_entry;
+       struct trace_event *event;
        unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
        struct trace_entry *entry = iter->ent;
        unsigned long abs_usecs;
        unsigned long rel_usecs;
        u64 next_ts;
        char *comm;
-       int S, T;
-       int i;
-       if (entry->type == TRACE_CONT)
-               return TRACE_TYPE_HANDLED;
+       int ret;
  
        test_cpu_buff_start(iter);
  
                lat_print_generic(s, entry, cpu);
                lat_print_timestamp(s, abs_usecs, rel_usecs);
        }
-       switch (entry->type) {
-       case TRACE_FN: {
-               struct ftrace_entry *field;
-               trace_assign_type(field, entry);
-               seq_print_ip_sym(s, field->ip, sym_flags);
-               trace_seq_puts(s, " (");
-               seq_print_ip_sym(s, field->parent_ip, sym_flags);
-               trace_seq_puts(s, ")\n");
-               break;
-       }
-       case TRACE_CTX:
-       case TRACE_WAKE: {
-               struct ctx_switch_entry *field;
-               trace_assign_type(field, entry);
-               T = task_state_char(field->next_state);
-               S = task_state_char(field->prev_state);
-               comm = trace_find_cmdline(field->next_pid);
-               trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n",
-                                field->prev_pid,
-                                field->prev_prio,
-                                S, entry->type == TRACE_CTX ? "==>" : "  +",
-                                field->next_cpu,
-                                field->next_pid,
-                                field->next_prio,
-                                T, comm);
-               break;
-       }
-       case TRACE_SPECIAL: {
-               struct special_entry *field;
-               trace_assign_type(field, entry);
-               trace_seq_printf(s, "# %ld %ld %ld\n",
-                                field->arg1,
-                                field->arg2,
-                                field->arg3);
-               break;
-       }
-       case TRACE_STACK: {
-               struct stack_entry *field;
-               trace_assign_type(field, entry);
-               for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
-                       if (i)
-                               trace_seq_puts(s, " <= ");
-                       seq_print_ip_sym(s, field->caller[i], sym_flags);
-               }
-               trace_seq_puts(s, "\n");
-               break;
-       }
-       case TRACE_PRINT: {
-               struct print_entry *field;
-               trace_assign_type(field, entry);
  
-               seq_print_ip_sym(s, field->ip, sym_flags);
-               trace_seq_printf(s, ": %s", field->buf);
-               if (entry->flags & TRACE_FLAG_CONT)
-                       trace_seq_print_cont(s, iter);
-               break;
-       }
-       case TRACE_BRANCH: {
-               struct trace_branch *field;
-               trace_assign_type(field, entry);
-               trace_seq_printf(s, "[%s] %s:%s:%d\n",
-                                field->correct ? "  ok  " : " MISS ",
-                                field->func,
-                                field->file,
-                                field->line);
-               break;
+       event = ftrace_find_event(entry->type);
+       if (event && event->latency_trace) {
+               ret = event->latency_trace(s, entry, sym_flags);
+               if (ret)
+                       return ret;
+               return TRACE_TYPE_HANDLED;
        }
-       case TRACE_USER_STACK: {
-               struct userstack_entry *field;
-               trace_assign_type(field, entry);
  
-               seq_print_userip_objs(field, s, sym_flags);
-               trace_seq_putc(s, '\n');
-               break;
-       }
-       default:
-               trace_seq_printf(s, "Unknown type %d\n", entry->type);
-       }
+       trace_seq_printf(s, "Unknown type %d\n", entry->type);
        return TRACE_TYPE_HANDLED;
  }
  
@@@ -1957,19 -1556,15 +1555,15 @@@ static enum print_line_t print_trace_fm
        struct trace_seq *s = &iter->seq;
        unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
        struct trace_entry *entry;
+       struct trace_event *event;
        unsigned long usec_rem;
        unsigned long long t;
        unsigned long secs;
        char *comm;
        int ret;
-       int S, T;
-       int i;
  
        entry = iter->ent;
  
-       if (entry->type == TRACE_CONT)
-               return TRACE_TYPE_HANDLED;
        test_cpu_buff_start(iter);
  
        comm = trace_find_cmdline(iter->ent->pid);
        if (!ret)
                return TRACE_TYPE_PARTIAL_LINE;
  
-       switch (entry->type) {
-       case TRACE_FN: {
-               struct ftrace_entry *field;
-               trace_assign_type(field, entry);
-               ret = seq_print_ip_sym(s, field->ip, sym_flags);
-               if (!ret)
-                       return TRACE_TYPE_PARTIAL_LINE;
-               if ((sym_flags & TRACE_ITER_PRINT_PARENT) &&
-                                               field->parent_ip) {
-                       ret = trace_seq_printf(s, " <-");
-                       if (!ret)
-                               return TRACE_TYPE_PARTIAL_LINE;
-                       ret = seq_print_ip_sym(s,
-                                              field->parent_ip,
-                                              sym_flags);
-                       if (!ret)
-                               return TRACE_TYPE_PARTIAL_LINE;
-               }
-               ret = trace_seq_printf(s, "\n");
-               if (!ret)
-                       return TRACE_TYPE_PARTIAL_LINE;
-               break;
-       }
-       case TRACE_CTX:
-       case TRACE_WAKE: {
-               struct ctx_switch_entry *field;
-               trace_assign_type(field, entry);
-               T = task_state_char(field->next_state);
-               S = task_state_char(field->prev_state);
-               ret = trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c\n",
-                                      field->prev_pid,
-                                      field->prev_prio,
-                                      S,
-                                      entry->type == TRACE_CTX ? "==>" : "  +",
-                                      field->next_cpu,
-                                      field->next_pid,
-                                      field->next_prio,
-                                      T);
-               if (!ret)
-                       return TRACE_TYPE_PARTIAL_LINE;
-               break;
-       }
-       case TRACE_SPECIAL: {
-               struct special_entry *field;
-               trace_assign_type(field, entry);
-               ret = trace_seq_printf(s, "# %ld %ld %ld\n",
-                                field->arg1,
-                                field->arg2,
-                                field->arg3);
-               if (!ret)
-                       return TRACE_TYPE_PARTIAL_LINE;
-               break;
-       }
-       case TRACE_STACK: {
-               struct stack_entry *field;
-               trace_assign_type(field, entry);
-               for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
-                       if (i) {
-                               ret = trace_seq_puts(s, " <= ");
-                               if (!ret)
-                                       return TRACE_TYPE_PARTIAL_LINE;
-                       }
-                       ret = seq_print_ip_sym(s, field->caller[i],
-                                              sym_flags);
-                       if (!ret)
-                               return TRACE_TYPE_PARTIAL_LINE;
-               }
-               ret = trace_seq_puts(s, "\n");
-               if (!ret)
-                       return TRACE_TYPE_PARTIAL_LINE;
-               break;
-       }
-       case TRACE_PRINT: {
-               struct print_entry *field;
-               trace_assign_type(field, entry);
-               seq_print_ip_sym(s, field->ip, sym_flags);
-               trace_seq_printf(s, ": %s", field->buf);
-               if (entry->flags & TRACE_FLAG_CONT)
-                       trace_seq_print_cont(s, iter);
-               break;
-       }
-       case TRACE_GRAPH_RET: {
-               return print_graph_function(iter);
-       }
-       case TRACE_GRAPH_ENT: {
-               return print_graph_function(iter);
-       }
-       case TRACE_BRANCH: {
-               struct trace_branch *field;
-               trace_assign_type(field, entry);
-               trace_seq_printf(s, "[%s] %s:%s:%d\n",
-                                field->correct ? "  ok  " : " MISS ",
-                                field->func,
-                                field->file,
-                                field->line);
-               break;
+       event = ftrace_find_event(entry->type);
+       if (event && event->trace) {
+               ret = event->trace(s, entry, sym_flags);
+               if (ret)
+                       return ret;
+               return TRACE_TYPE_HANDLED;
        }
-       case TRACE_USER_STACK: {
-               struct userstack_entry *field;
-               trace_assign_type(field, entry);
+       ret = trace_seq_printf(s, "Unknown type %d\n", entry->type);
+       if (!ret)
+               return TRACE_TYPE_PARTIAL_LINE;
  
-               ret = seq_print_userip_objs(field, s, sym_flags);
-               if (!ret)
-                       return TRACE_TYPE_PARTIAL_LINE;
-               ret = trace_seq_putc(s, '\n');
-               if (!ret)
-                       return TRACE_TYPE_PARTIAL_LINE;
-               break;
-       }
-       }
        return TRACE_TYPE_HANDLED;
  }
  
@@@ -2118,152 -1601,47 +1600,47 @@@ static enum print_line_t print_raw_fmt(
  {
        struct trace_seq *s = &iter->seq;
        struct trace_entry *entry;
+       struct trace_event *event;
        int ret;
-       int S, T;
  
        entry = iter->ent;
  
-       if (entry->type == TRACE_CONT)
-               return TRACE_TYPE_HANDLED;
        ret = trace_seq_printf(s, "%d %d %llu ",
                entry->pid, iter->cpu, iter->ts);
        if (!ret)
                return TRACE_TYPE_PARTIAL_LINE;
  
-       switch (entry->type) {
-       case TRACE_FN: {
-               struct ftrace_entry *field;
-               trace_assign_type(field, entry);
-               ret = trace_seq_printf(s, "%x %x\n",
-                                       field->ip,
-                                       field->parent_ip);
-               if (!ret)
-                       return TRACE_TYPE_PARTIAL_LINE;
-               break;
-       }
-       case TRACE_CTX:
-       case TRACE_WAKE: {
-               struct ctx_switch_entry *field;
-               trace_assign_type(field, entry);
-               T = task_state_char(field->next_state);
-               S = entry->type == TRACE_WAKE ? '+' :
-                       task_state_char(field->prev_state);
-               ret = trace_seq_printf(s, "%d %d %c %d %d %d %c\n",
-                                      field->prev_pid,
-                                      field->prev_prio,
-                                      S,
-                                      field->next_cpu,
-                                      field->next_pid,
-                                      field->next_prio,
-                                      T);
-               if (!ret)
-                       return TRACE_TYPE_PARTIAL_LINE;
-               break;
-       }
-       case TRACE_SPECIAL:
-       case TRACE_USER_STACK:
-       case TRACE_STACK: {
-               struct special_entry *field;
-               trace_assign_type(field, entry);
-               ret = trace_seq_printf(s, "# %ld %ld %ld\n",
-                                field->arg1,
-                                field->arg2,
-                                field->arg3);
-               if (!ret)
-                       return TRACE_TYPE_PARTIAL_LINE;
-               break;
+       event = ftrace_find_event(entry->type);
+       if (event && event->raw) {
+               ret = event->raw(s, entry, 0);
+               if (ret)
+                       return ret;
+               return TRACE_TYPE_HANDLED;
        }
-       case TRACE_PRINT: {
-               struct print_entry *field;
-               trace_assign_type(field, entry);
+       ret = trace_seq_printf(s, "%d ?\n", entry->type);
+       if (!ret)
+               return TRACE_TYPE_PARTIAL_LINE;
  
-               trace_seq_printf(s, "# %lx %s", field->ip, field->buf);
-               if (entry->flags & TRACE_FLAG_CONT)
-                       trace_seq_print_cont(s, iter);
-               break;
-       }
-       }
        return TRACE_TYPE_HANDLED;
  }
  
- #define SEQ_PUT_FIELD_RET(s, x)                               \
- do {                                                  \
-       if (!trace_seq_putmem(s, &(x), sizeof(x)))      \
-               return 0;                               \
- } while (0)
- #define SEQ_PUT_HEX_FIELD_RET(s, x)                   \
- do {                                                  \
-       BUILD_BUG_ON(sizeof(x) > MAX_MEMHEX_BYTES);     \
-       if (!trace_seq_putmem_hex(s, &(x), sizeof(x)))  \
-               return 0;                               \
- } while (0)
  static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
  {
        struct trace_seq *s = &iter->seq;
        unsigned char newline = '\n';
        struct trace_entry *entry;
-       int S, T;
+       struct trace_event *event;
  
        entry = iter->ent;
  
-       if (entry->type == TRACE_CONT)
-               return TRACE_TYPE_HANDLED;
        SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
        SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
        SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
  
-       switch (entry->type) {
-       case TRACE_FN: {
-               struct ftrace_entry *field;
+       event = ftrace_find_event(entry->type);
+       if (event && event->hex)
+               event->hex(s, entry, 0);
  
-               trace_assign_type(field, entry);
-               SEQ_PUT_HEX_FIELD_RET(s, field->ip);
-               SEQ_PUT_HEX_FIELD_RET(s, field->parent_ip);
-               break;
-       }
-       case TRACE_CTX:
-       case TRACE_WAKE: {
-               struct ctx_switch_entry *field;
-               trace_assign_type(field, entry);
-               T = task_state_char(field->next_state);
-               S = entry->type == TRACE_WAKE ? '+' :
-                       task_state_char(field->prev_state);
-               SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid);
-               SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio);
-               SEQ_PUT_HEX_FIELD_RET(s, S);
-               SEQ_PUT_HEX_FIELD_RET(s, field->next_cpu);
-               SEQ_PUT_HEX_FIELD_RET(s, field->next_pid);
-               SEQ_PUT_HEX_FIELD_RET(s, field->next_prio);
-               SEQ_PUT_HEX_FIELD_RET(s, T);
-               break;
-       }
-       case TRACE_SPECIAL:
-       case TRACE_USER_STACK:
-       case TRACE_STACK: {
-               struct special_entry *field;
-               trace_assign_type(field, entry);
-               SEQ_PUT_HEX_FIELD_RET(s, field->arg1);
-               SEQ_PUT_HEX_FIELD_RET(s, field->arg2);
-               SEQ_PUT_HEX_FIELD_RET(s, field->arg3);
-               break;
-       }
-       }
        SEQ_PUT_FIELD_RET(s, newline);
  
        return TRACE_TYPE_HANDLED;
@@@ -2282,9 -1660,6 +1659,6 @@@ static enum print_line_t print_printk_m
        if (!ret)
                return TRACE_TYPE_PARTIAL_LINE;
  
-       if (entry->flags & TRACE_FLAG_CONT)
-               trace_seq_print_cont(s, iter);
        return TRACE_TYPE_HANDLED;
  }
  
@@@ -2292,53 -1667,19 +1666,19 @@@ static enum print_line_t print_bin_fmt(
  {
        struct trace_seq *s = &iter->seq;
        struct trace_entry *entry;
+       struct trace_event *event;
  
        entry = iter->ent;
  
-       if (entry->type == TRACE_CONT)
-               return TRACE_TYPE_HANDLED;
        SEQ_PUT_FIELD_RET(s, entry->pid);
        SEQ_PUT_FIELD_RET(s, entry->cpu);
        SEQ_PUT_FIELD_RET(s, iter->ts);
  
-       switch (entry->type) {
-       case TRACE_FN: {
-               struct ftrace_entry *field;
-               trace_assign_type(field, entry);
-               SEQ_PUT_FIELD_RET(s, field->ip);
-               SEQ_PUT_FIELD_RET(s, field->parent_ip);
-               break;
-       }
-       case TRACE_CTX: {
-               struct ctx_switch_entry *field;
-               trace_assign_type(field, entry);
-               SEQ_PUT_FIELD_RET(s, field->prev_pid);
-               SEQ_PUT_FIELD_RET(s, field->prev_prio);
-               SEQ_PUT_FIELD_RET(s, field->prev_state);
-               SEQ_PUT_FIELD_RET(s, field->next_pid);
-               SEQ_PUT_FIELD_RET(s, field->next_prio);
-               SEQ_PUT_FIELD_RET(s, field->next_state);
-               break;
-       }
-       case TRACE_SPECIAL:
-       case TRACE_USER_STACK:
-       case TRACE_STACK: {
-               struct special_entry *field;
-               trace_assign_type(field, entry);
+       event = ftrace_find_event(entry->type);
+       if (event && event->binary)
+               event->binary(s, entry, 0);
  
-               SEQ_PUT_FIELD_RET(s, field->arg1);
-               SEQ_PUT_FIELD_RET(s, field->arg2);
-               SEQ_PUT_FIELD_RET(s, field->arg3);
-               break;
-       }
-       }
-       return 1;
+       return TRACE_TYPE_HANDLED;
  }
  
  static int trace_empty(struct trace_iterator *iter)
@@@ -2646,7 -1987,13 +1986,7 @@@ static struct file_operations show_trac
  /*
   * Only trace on a CPU if the bitmask is set:
   */
 -static cpumask_t tracing_cpumask = CPU_MASK_ALL;
 -
 -/*
 - * When tracing/tracing_cpu_mask is modified then this holds
 - * the new bitmask we are about to install:
 - */
 -static cpumask_t tracing_cpumask_new;
 +static cpumask_var_t tracing_cpumask;
  
  /*
   * The tracer itself will not take this lock, but still we want
@@@ -2687,10 -2034,6 +2027,10 @@@ tracing_cpumask_write(struct file *filp
                      size_t count, loff_t *ppos)
  {
        int err, cpu;
 +      cpumask_var_t tracing_cpumask_new;
 +
 +      if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
 +              return -ENOMEM;
  
        mutex_lock(&tracing_cpumask_update_lock);
        err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
                 * Increase/decrease the disabled counter if we are
                 * about to flip a bit in the cpumask:
                 */
 -              if (cpu_isset(cpu, tracing_cpumask) &&
 -                              !cpu_isset(cpu, tracing_cpumask_new)) {
 +              if (cpumask_test_cpu(cpu, tracing_cpumask) &&
 +                              !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
                        atomic_inc(&global_trace.data[cpu]->disabled);
                }
 -              if (!cpu_isset(cpu, tracing_cpumask) &&
 -                              cpu_isset(cpu, tracing_cpumask_new)) {
 +              if (!cpumask_test_cpu(cpu, tracing_cpumask) &&
 +                              cpumask_test_cpu(cpu, tracing_cpumask_new)) {
                        atomic_dec(&global_trace.data[cpu]->disabled);
                }
        }
        __raw_spin_unlock(&ftrace_max_lock);
        local_irq_enable();
  
 -      tracing_cpumask = tracing_cpumask_new;
 +      cpumask_copy(tracing_cpumask, tracing_cpumask_new);
  
        mutex_unlock(&tracing_cpumask_update_lock);
 +      free_cpumask_var(tracing_cpumask_new);
  
        return count;
  
  err_unlock:
        mutex_unlock(&tracing_cpumask_update_lock);
 +      free_cpumask_var(tracing_cpumask);
  
        return err;
  }
@@@ -3013,6 -2354,7 +2353,7 @@@ static int tracing_set_tracer(char *buf
                if (ret)
                        goto out;
        }
+       init_tracer_stat(t);
  
        trace_branch_enable(tr);
   out:
@@@ -3114,15 -2456,10 +2455,15 @@@ static int tracing_open_pipe(struct ino
        if (!iter)
                return -ENOMEM;
  
 +      if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
 +              kfree(iter);
 +              return -ENOMEM;
 +      }
 +
        mutex_lock(&trace_types_lock);
  
        /* trace pipe does not show start of buffer */
 -      cpus_setall(iter->started);
 +      cpumask_setall(iter->started);
  
        iter->tr = &global_trace;
        iter->trace = current_trace;
@@@ -3139,7 -2476,6 +2480,7 @@@ static int tracing_release_pipe(struct 
  {
        struct trace_iterator *iter = file->private_data;
  
 +      free_cpumask_var(iter->started);
        kfree(iter);
        atomic_dec(&tracing_reader);
  
@@@ -3758,6 -3094,7 +3099,6 @@@ void ftrace_dump(void
        static DEFINE_SPINLOCK(ftrace_dump_lock);
        /* use static because iter can be a bit big for the stack */
        static struct trace_iterator iter;
 -      static cpumask_t mask;
        static int dump_ran;
        unsigned long flags;
        int cnt = 0, cpu;
         * and then release the locks again.
         */
  
 -      cpus_clear(mask);
 -
        while (!trace_empty(&iter)) {
  
                if (!cnt)
@@@ -3826,28 -3165,19 +3167,28 @@@ __init static int tracer_alloc_buffers(
  {
        struct trace_array_cpu *data;
        int i;
 +      int ret = -ENOMEM;
  
 -      /* TODO: make the number of buffers hot pluggable with CPUS */
 -      tracing_buffer_mask = cpu_possible_map;
 +      if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
 +              goto out;
 +
 +      if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
 +              goto out_free_buffer_mask;
 +
 +      cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
 +      cpumask_copy(tracing_cpumask, cpu_all_mask);
  
 +      /* TODO: make the number of buffers hot pluggable with CPUS */
        global_trace.buffer = ring_buffer_alloc(trace_buf_size,
                                                   TRACE_BUFFER_FLAGS);
        if (!global_trace.buffer) {
                printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
                WARN_ON(1);
 -              return 0;
 +              goto out_free_cpumask;
        }
        global_trace.entries = ring_buffer_size(global_trace.buffer);
  
 +
  #ifdef CONFIG_TRACER_MAX_TRACE
        max_tr.buffer = ring_buffer_alloc(trace_buf_size,
                                             TRACE_BUFFER_FLAGS);
                printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n");
                WARN_ON(1);
                ring_buffer_free(global_trace.buffer);
 -              return 0;
 +              goto out_free_cpumask;
        }
        max_tr.entries = ring_buffer_size(max_tr.buffer);
        WARN_ON(max_tr.entries != global_trace.entries);
  #else
        current_trace = &nop_trace;
  #endif
+       init_tracer_stat(current_trace);
        /* All seems OK, enable tracing */
        tracing_disabled = 0;
  
                                       &trace_panic_notifier);
  
        register_die_notifier(&trace_die_notifier);
 +      ret = 0;
  
 -      return 0;
 +out_free_cpumask:
 +      free_cpumask_var(tracing_cpumask);
 +out_free_buffer_mask:
 +      free_cpumask_var(tracing_buffer_mask);
 +out:
 +      return ret;
  }
  early_initcall(tracer_alloc_buffers);
  fs_initcall(tracer_init_debugfs);
diff --combined kernel/trace/trace.h
index 742fe134927692649b81eaa8c087e8f14a2dd575,a8b624ccd4d63b5380f12534ad9c161210cfbb5b..94ed45e93a80774c0f07f1d46c2df90ce9ef7671
@@@ -9,7 -9,6 +9,7 @@@
  #include <linux/mmiotrace.h>
  #include <linux/ftrace.h>
  #include <trace/boot.h>
 +#include <trace/kmemtrace.h>
  
  enum trace_type {
        __TRACE_FIRST_TYPE = 0,
@@@ -17,7 -16,6 +17,6 @@@
        TRACE_FN,
        TRACE_CTX,
        TRACE_WAKE,
-       TRACE_CONT,
        TRACE_STACK,
        TRACE_PRINT,
        TRACE_SPECIAL,
        TRACE_GRAPH_ENT,
        TRACE_USER_STACK,
        TRACE_HW_BRANCHES,
 +      TRACE_KMEM_ALLOC,
 +      TRACE_KMEM_FREE,
        TRACE_POWER,
  
-       __TRACE_LAST_TYPE
+       __TRACE_LAST_TYPE,
  };
  
  /*
@@@ -173,24 -169,6 +172,24 @@@ struct trace_power 
        struct power_trace      state_data;
  };
  
 +struct kmemtrace_alloc_entry {
 +      struct trace_entry      ent;
 +      enum kmemtrace_type_id type_id;
 +      unsigned long call_site;
 +      const void *ptr;
 +      size_t bytes_req;
 +      size_t bytes_alloc;
 +      gfp_t gfp_flags;
 +      int node;
 +};
 +
 +struct kmemtrace_free_entry {
 +      struct trace_entry      ent;
 +      enum kmemtrace_type_id type_id;
 +      unsigned long call_site;
 +      const void *ptr;
 +};
 +
  /*
   * trace_flag_type is an enumeration that holds different
   * states when a trace occurs. These are:
   *  NEED_RESCED               - reschedule is requested
   *  HARDIRQ           - inside an interrupt handler
   *  SOFTIRQ           - inside a softirq handler
-  *  CONT              - multiple entries hold the trace item
   */
  enum trace_flag_type {
        TRACE_FLAG_IRQS_OFF             = 0x01,
        TRACE_FLAG_NEED_RESCHED         = 0x04,
        TRACE_FLAG_HARDIRQ              = 0x08,
        TRACE_FLAG_SOFTIRQ              = 0x10,
-       TRACE_FLAG_CONT                 = 0x20,
  };
  
  #define TRACE_BUF_SIZE                1024
@@@ -283,7 -259,6 +280,6 @@@ extern void __ftrace_bad_type(void)
        do {                                                            \
                IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN);     \
                IF_ASSIGN(var, ent, struct ctx_switch_entry, 0);        \
-               IF_ASSIGN(var, ent, struct trace_field_cont, TRACE_CONT); \
                IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK);   \
                IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
                IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT);   \
                          TRACE_GRAPH_RET);             \
                IF_ASSIGN(var, ent, struct hw_branch_entry, TRACE_HW_BRANCHES);\
                IF_ASSIGN(var, ent, struct trace_power, TRACE_POWER); \
 +              IF_ASSIGN(var, ent, struct kmemtrace_alloc_entry,       \
 +                        TRACE_KMEM_ALLOC);    \
 +              IF_ASSIGN(var, ent, struct kmemtrace_free_entry,        \
 +                        TRACE_KMEM_FREE);     \
                __ftrace_bad_type();                                    \
        } while (0)
  
@@@ -365,6 -336,21 +361,21 @@@ struct tracer 
        struct tracer           *next;
        int                     print_max;
        struct tracer_flags     *flags;
+       /*
+        * If you change one of the following on tracing runtime, recall
+        * init_tracer_stat()
+        */
+       /* Iteration over statistic entries */
+       void                    *(*stat_start)(void);
+       void                    *(*stat_next)(void *prev, int idx);
+       /* Compare two entries for sorting (optional) for stats */
+       int                     (*stat_cmp)(void *p1, void *p2);
+       /* Print a stat entry */
+       int                     (*stat_show)(struct seq_file *s, void *p);
+       /* Print the headers of your stat entries */
+       int                     (*stat_headers)(struct seq_file *s);
  };
  
  struct trace_seq {
@@@ -393,7 -379,7 +404,7 @@@ struct trace_iterator 
        loff_t                  pos;
        long                    idx;
  
 -      cpumask_t               started;
 +      cpumask_var_t           started;
  };
  
  int tracing_is_enabled(void);
@@@ -450,6 -436,8 +461,8 @@@ void tracing_start_sched_switch_record(
  int register_tracer(struct tracer *type);
  void unregister_tracer(struct tracer *type);
  
+ void init_tracer_stat(struct tracer *trace);
  extern unsigned long nsecs_to_usecs(unsigned long nsecs);
  
  extern unsigned long tracing_max_latency;
@@@ -481,10 -469,10 +494,10 @@@ struct tracer_switch_ops 
        void                            *private;
        struct tracer_switch_ops        *next;
  };
- char *trace_find_cmdline(int pid);
  #endif /* CONFIG_CONTEXT_SWITCH_TRACER */
  
+ extern char *trace_find_cmdline(int pid);
  #ifdef CONFIG_DYNAMIC_FTRACE
  extern unsigned long ftrace_update_tot_cnt;
  #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
@@@ -513,15 -501,6 +526,6 @@@ extern int trace_selftest_startup_branc
  #endif /* CONFIG_FTRACE_STARTUP_TEST */
  
  extern void *head_page(struct trace_array_cpu *data);
- extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...);
- extern void trace_seq_print_cont(struct trace_seq *s,
-                                struct trace_iterator *iter);
- extern int
- seq_print_ip_sym(struct trace_seq *s, unsigned long ip,
-               unsigned long sym_flags);
- extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
-                                size_t cnt);
  extern long ns2usecs(cycle_t nsec);
  extern int
  trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args);
index 366c8c333e136e77d79a4579b4e70f24242b1404,cb2ff3e297b1f8bfc6aae0a2bf25c6abfb1e4ea0..0e94b3d091f70229d4ff521ea18aa3823c6b5ed7
@@@ -11,6 -11,7 +11,7 @@@
  #include <linux/kallsyms.h>
  
  #include "trace.h"
+ #include "trace_output.h"
  
  static struct trace_array *boot_trace;
  static bool pre_initcalls_finished;
@@@ -42,7 -43,7 +43,7 @@@ static int boot_trace_init(struct trace
        int cpu;
        boot_trace = tr;
  
 -      for_each_cpu_mask(cpu, cpu_possible_map)
 +      for_each_cpu(cpu, cpu_possible_mask)
                tracing_reset(tr, cpu);
  
        tracing_sched_switch_assign_trace(tr);
index 8516e4f09e1b4559c8b450183f70c0cb39614950,f8ac5417afc83a040d5fc9cfb93d72a60c983c80..3c545984816f35654b5fb8a72735bec75e9950f8
@@@ -12,6 -12,7 +12,7 @@@
  #include <linux/fs.h>
  
  #include "trace.h"
+ #include "trace_output.h"
  
  #define TRACE_GRAPH_INDENT    2
  
@@@ -79,7 -80,7 +80,7 @@@ print_graph_cpu(struct trace_seq *s, in
        int i;
        int ret;
        int log10_this = log10_cpu(cpu);
 -      int log10_all = log10_cpu(cpus_weight_nr(cpu_online_map));
 +      int log10_all = log10_cpu(cpumask_weight(cpu_online_mask));
  
  
        /*
@@@ -589,9 -590,6 +590,6 @@@ print_graph_comment(struct print_entry 
        if (!ret)
                return TRACE_TYPE_PARTIAL_LINE;
  
-       if (ent->flags & TRACE_FLAG_CONT)
-               trace_seq_print_cont(s, iter);
        /* Strip ending newline */
        if (s->buffer[s->len - 1] == '\n') {
                s->buffer[s->len - 1] = '\0';
index 649df22d435fc3507d05aecbb244ccf5cbcfffa1,879752b006b3fecbf11b688646502af26cf38aec..df21c1e72b95f865ab5ea2a21f3787463117dcf3
@@@ -14,6 -14,7 +14,7 @@@
  #include <asm/ds.h>
  
  #include "trace.h"
+ #include "trace_output.h"
  
  
  #define SIZEOF_BTS (1 << 13)
@@@ -46,7 -47,7 +47,7 @@@ static void bts_trace_start(struct trac
  
        tracing_reset_online_cpus(tr);
  
 -      for_each_cpu_mask(cpu, cpu_possible_map)
 +      for_each_cpu(cpu, cpu_possible_mask)
                smp_call_function_single(cpu, bts_trace_start_cpu, NULL, 1);
  }
  
@@@ -62,7 -63,7 +63,7 @@@ static void bts_trace_stop(struct trace
  {
        int cpu;
  
 -      for_each_cpu_mask(cpu, cpu_possible_map)
 +      for_each_cpu(cpu, cpu_possible_mask)
                smp_call_function_single(cpu, bts_trace_stop_cpu, NULL, 1);
  }
  
@@@ -172,7 -173,7 +173,7 @@@ static void trace_bts_prepare(struct tr
  {
        int cpu;
  
 -      for_each_cpu_mask(cpu, cpu_possible_map)
 +      for_each_cpu(cpu, cpu_possible_mask)
                smp_call_function_single(cpu, trace_bts_cpu, iter->tr, 1);
  }
  
index 7bda248daf5557a04ff1f178764b80777a33dff5,b9b13c39b4bbc9a4de0b91b6ce402474f827fd20..faa6ab7a1f5c589b92a9b4ea7e23f20168ea6263
@@@ -16,6 -16,7 +16,7 @@@
  #include <linux/module.h>
  
  #include "trace.h"
+ #include "trace_output.h"
  
  static struct trace_array *power_trace;
  static int __read_mostly trace_power_enabled;
@@@ -39,7 -40,7 +40,7 @@@ static int power_trace_init(struct trac
  
        trace_power_enabled = 1;
  
 -      for_each_cpu_mask(cpu, cpu_possible_map)
 +      for_each_cpu(cpu, cpu_possible_mask)
                tracing_reset(tr, cpu);
        return 0;
  }
This page took 0.050795 seconds and 5 git commands to generate.