perf session: There is no need for a per session hists instance
[deliverable/linux.git] / tools / perf / util / session.c
index ce6f5116238670c0b5f737533cc65ef3f924d220..b0bcc328d1fb53c2bd11ef1381ea6344c8aa0357 100644 (file)
@@ -16,7 +16,6 @@
 #include "cpumap.h"
 #include "event-parse.h"
 #include "perf_regs.h"
-#include "unwind.h"
 #include "vdso.h"
 
 static int perf_session__open(struct perf_session *self, bool force)
@@ -128,22 +127,12 @@ struct perf_session *perf_session__new(const char *filename, int mode,
                goto out;
 
        memcpy(self->filename, filename, len);
-       /*
-        * On 64bit we can mmap the data file in one go. No need for tiny mmap
-        * slices. On 32bit we use 32MB.
-        */
-#if BITS_PER_LONG == 64
-       self->mmap_window = ULLONG_MAX;
-#else
-       self->mmap_window = 32 * 1024 * 1024ULL;
-#endif
        self->machines = RB_ROOT;
        self->repipe = repipe;
        INIT_LIST_HEAD(&self->ordered_samples.samples);
        INIT_LIST_HEAD(&self->ordered_samples.sample_cache);
        INIT_LIST_HEAD(&self->ordered_samples.to_free);
        machine__init(&self->host_machine, "", HOST_KERNEL_ID);
-       hists__init(&self->hists);
 
        if (mode == O_RDONLY) {
                if (perf_session__open(self, force) < 0)
@@ -171,37 +160,30 @@ out_delete:
        return NULL;
 }
 
-static void machine__delete_dead_threads(struct machine *machine)
-{
-       struct thread *n, *t;
-
-       list_for_each_entry_safe(t, n, &machine->dead_threads, node) {
-               list_del(&t->node);
-               thread__delete(t);
-       }
-}
-
 static void perf_session__delete_dead_threads(struct perf_session *session)
 {
        machine__delete_dead_threads(&session->host_machine);
 }
 
-static void machine__delete_threads(struct machine *self)
+static void perf_session__delete_threads(struct perf_session *session)
 {
-       struct rb_node *nd = rb_first(&self->threads);
-
-       while (nd) {
-               struct thread *t = rb_entry(nd, struct thread, rb_node);
-
-               rb_erase(&t->rb_node, &self->threads);
-               nd = rb_next(nd);
-               thread__delete(t);
-       }
+       machine__delete_threads(&session->host_machine);
 }
 
-static void perf_session__delete_threads(struct perf_session *session)
+static void perf_session_env__delete(struct perf_session_env *env)
 {
-       machine__delete_threads(&session->host_machine);
+       free(env->hostname);
+       free(env->os_release);
+       free(env->version);
+       free(env->arch);
+       free(env->cpu_desc);
+       free(env->cpuid);
+
+       free(env->cmdline);
+       free(env->sibling_cores);
+       free(env->sibling_threads);
+       free(env->numa_nodes);
+       free(env->pmu_mappings);
 }
 
 void perf_session__delete(struct perf_session *self)
@@ -209,198 +191,13 @@ void perf_session__delete(struct perf_session *self)
        perf_session__destroy_kernel_maps(self);
        perf_session__delete_dead_threads(self);
        perf_session__delete_threads(self);
+       perf_session_env__delete(&self->header.env);
        machine__exit(&self->host_machine);
        close(self->fd);
        free(self);
        vdso__exit();
 }
 
-void machine__remove_thread(struct machine *self, struct thread *th)
-{
-       self->last_match = NULL;
-       rb_erase(&th->rb_node, &self->threads);
-       /*
-        * We may have references to this thread, for instance in some hist_entry
-        * instances, so just move them to a separate list.
-        */
-       list_add_tail(&th->node, &self->dead_threads);
-}
-
-static bool symbol__match_parent_regex(struct symbol *sym)
-{
-       if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
-               return 1;
-
-       return 0;
-}
-
-static const u8 cpumodes[] = {
-       PERF_RECORD_MISC_USER,
-       PERF_RECORD_MISC_KERNEL,
-       PERF_RECORD_MISC_GUEST_USER,
-       PERF_RECORD_MISC_GUEST_KERNEL
-};
-#define NCPUMODES (sizeof(cpumodes)/sizeof(u8))
-
-static void ip__resolve_ams(struct machine *self, struct thread *thread,
-                           struct addr_map_symbol *ams,
-                           u64 ip)
-{
-       struct addr_location al;
-       size_t i;
-       u8 m;
-
-       memset(&al, 0, sizeof(al));
-
-       for (i = 0; i < NCPUMODES; i++) {
-               m = cpumodes[i];
-               /*
-                * We cannot use the header.misc hint to determine whether a
-                * branch stack address is user, kernel, guest, hypervisor.
-                * Branches may straddle the kernel/user/hypervisor boundaries.
-                * Thus, we have to try consecutively until we find a match
-                * or else, the symbol is unknown
-                */
-               thread__find_addr_location(thread, self, m, MAP__FUNCTION,
-                               ip, &al, NULL);
-               if (al.sym)
-                       goto found;
-       }
-found:
-       ams->addr = ip;
-       ams->al_addr = al.addr;
-       ams->sym = al.sym;
-       ams->map = al.map;
-}
-
-struct branch_info *machine__resolve_bstack(struct machine *self,
-                                           struct thread *thr,
-                                           struct branch_stack *bs)
-{
-       struct branch_info *bi;
-       unsigned int i;
-
-       bi = calloc(bs->nr, sizeof(struct branch_info));
-       if (!bi)
-               return NULL;
-
-       for (i = 0; i < bs->nr; i++) {
-               ip__resolve_ams(self, thr, &bi[i].to, bs->entries[i].to);
-               ip__resolve_ams(self, thr, &bi[i].from, bs->entries[i].from);
-               bi[i].flags = bs->entries[i].flags;
-       }
-       return bi;
-}
-
-static int machine__resolve_callchain_sample(struct machine *machine,
-                                            struct thread *thread,
-                                            struct ip_callchain *chain,
-                                            struct symbol **parent)
-
-{
-       u8 cpumode = PERF_RECORD_MISC_USER;
-       unsigned int i;
-       int err;
-
-       callchain_cursor_reset(&callchain_cursor);
-
-       if (chain->nr > PERF_MAX_STACK_DEPTH) {
-               pr_warning("corrupted callchain. skipping...\n");
-               return 0;
-       }
-
-       for (i = 0; i < chain->nr; i++) {
-               u64 ip;
-               struct addr_location al;
-
-               if (callchain_param.order == ORDER_CALLEE)
-                       ip = chain->ips[i];
-               else
-                       ip = chain->ips[chain->nr - i - 1];
-
-               if (ip >= PERF_CONTEXT_MAX) {
-                       switch (ip) {
-                       case PERF_CONTEXT_HV:
-                               cpumode = PERF_RECORD_MISC_HYPERVISOR;
-                               break;
-                       case PERF_CONTEXT_KERNEL:
-                               cpumode = PERF_RECORD_MISC_KERNEL;
-                               break;
-                       case PERF_CONTEXT_USER:
-                               cpumode = PERF_RECORD_MISC_USER;
-                               break;
-                       default:
-                               pr_debug("invalid callchain context: "
-                                        "%"PRId64"\n", (s64) ip);
-                               /*
-                                * It seems the callchain is corrupted.
-                                * Discard all.
-                                */
-                               callchain_cursor_reset(&callchain_cursor);
-                               return 0;
-                       }
-                       continue;
-               }
-
-               al.filtered = false;
-               thread__find_addr_location(thread, machine, cpumode,
-                                          MAP__FUNCTION, ip, &al, NULL);
-               if (al.sym != NULL) {
-                       if (sort__has_parent && !*parent &&
-                           symbol__match_parent_regex(al.sym))
-                               *parent = al.sym;
-                       if (!symbol_conf.use_callchain)
-                               break;
-               }
-
-               err = callchain_cursor_append(&callchain_cursor,
-                                             ip, al.map, al.sym);
-               if (err)
-                       return err;
-       }
-
-       return 0;
-}
-
-static int unwind_entry(struct unwind_entry *entry, void *arg)
-{
-       struct callchain_cursor *cursor = arg;
-       return callchain_cursor_append(cursor, entry->ip,
-                                      entry->map, entry->sym);
-}
-
-int machine__resolve_callchain(struct machine *machine,
-                              struct perf_evsel *evsel,
-                              struct thread *thread,
-                              struct perf_sample *sample,
-                              struct symbol **parent)
-
-{
-       int ret;
-
-       callchain_cursor_reset(&callchain_cursor);
-
-       ret = machine__resolve_callchain_sample(machine, thread,
-                                               sample->callchain, parent);
-       if (ret)
-               return ret;
-
-       /* Can we do dwarf post unwind? */
-       if (!((evsel->attr.sample_type & PERF_SAMPLE_REGS_USER) &&
-             (evsel->attr.sample_type & PERF_SAMPLE_STACK_USER)))
-               return 0;
-
-       /* Bail out if nothing was captured. */
-       if ((!sample->user_regs.regs) ||
-           (!sample->user_stack.size))
-               return 0;
-
-       return unwind__get_entries(unwind_entry, &callchain_cursor, machine,
-                                  thread, evsel->attr.sample_regs_user,
-                                  sample);
-
-}
-
 static int process_event_synth_tracing_data_stub(union perf_event *event
                                                 __maybe_unused,
                                                 struct perf_session *session
@@ -1065,11 +862,11 @@ static int perf_session_deliver_event(struct perf_session *session,
        case PERF_RECORD_SAMPLE:
                dump_sample(evsel, event, sample);
                if (evsel == NULL) {
-                       ++session->hists.stats.nr_unknown_id;
+                       ++session->stats.nr_unknown_id;
                        return 0;
                }
                if (machine == NULL) {
-                       ++session->hists.stats.nr_unprocessable_samples;
+                       ++session->stats.nr_unprocessable_samples;
                        return 0;
                }
                return tool->sample(tool, event, sample, evsel, machine);
@@ -1083,7 +880,7 @@ static int perf_session_deliver_event(struct perf_session *session,
                return tool->exit(tool, event, sample, machine);
        case PERF_RECORD_LOST:
                if (tool->lost == perf_event__process_lost)
-                       session->hists.stats.total_lost += event->lost.lost;
+                       session->stats.total_lost += event->lost.lost;
                return tool->lost(tool, event, sample, machine);
        case PERF_RECORD_READ:
                return tool->read(tool, event, sample, evsel, machine);
@@ -1092,7 +889,7 @@ static int perf_session_deliver_event(struct perf_session *session,
        case PERF_RECORD_UNTHROTTLE:
                return tool->unthrottle(tool, event, sample, machine);
        default:
-               ++session->hists.stats.nr_unknown_events;
+               ++session->stats.nr_unknown_events;
                return -1;
        }
 }
@@ -1106,8 +903,8 @@ static int perf_session__preprocess_sample(struct perf_session *session,
 
        if (!ip_callchain__valid(sample->callchain, event)) {
                pr_debug("call-chain problem with event, skipping it.\n");
-               ++session->hists.stats.nr_invalid_chains;
-               session->hists.stats.total_invalid_chains += sample->period;
+               ++session->stats.nr_invalid_chains;
+               session->stats.total_invalid_chains += sample->period;
                return -EINVAL;
        }
        return 0;
@@ -1165,7 +962,7 @@ static int perf_session__process_event(struct perf_session *session,
        if (event->header.type >= PERF_RECORD_HEADER_MAX)
                return -EINVAL;
 
-       hists__inc_nr_events(&session->hists, event->header.type);
+       events_stats__inc(&session->stats, event->header.type);
 
        if (event->header.type >= PERF_RECORD_USER_TYPE_START)
                return perf_session__process_user_event(session, event, tool, file_offset);
@@ -1220,39 +1017,39 @@ static void perf_session__warn_about_errors(const struct perf_session *session,
                                            const struct perf_tool *tool)
 {
        if (tool->lost == perf_event__process_lost &&
-           session->hists.stats.nr_events[PERF_RECORD_LOST] != 0) {
+           session->stats.nr_events[PERF_RECORD_LOST] != 0) {
                ui__warning("Processed %d events and lost %d chunks!\n\n"
                            "Check IO/CPU overload!\n\n",
-                           session->hists.stats.nr_events[0],
-                           session->hists.stats.nr_events[PERF_RECORD_LOST]);
+                           session->stats.nr_events[0],
+                           session->stats.nr_events[PERF_RECORD_LOST]);
        }
 
-       if (session->hists.stats.nr_unknown_events != 0) {
+       if (session->stats.nr_unknown_events != 0) {
                ui__warning("Found %u unknown events!\n\n"
                            "Is this an older tool processing a perf.data "
                            "file generated by a more recent tool?\n\n"
                            "If that is not the case, consider "
                            "reporting to linux-kernel@vger.kernel.org.\n\n",
-                           session->hists.stats.nr_unknown_events);
+                           session->stats.nr_unknown_events);
        }
 
-       if (session->hists.stats.nr_unknown_id != 0) {
+       if (session->stats.nr_unknown_id != 0) {
                ui__warning("%u samples with id not present in the header\n",
-                           session->hists.stats.nr_unknown_id);
+                           session->stats.nr_unknown_id);
        }
 
-       if (session->hists.stats.nr_invalid_chains != 0) {
+       if (session->stats.nr_invalid_chains != 0) {
                ui__warning("Found invalid callchains!\n\n"
                            "%u out of %u events were discarded for this reason.\n\n"
                            "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
-                           session->hists.stats.nr_invalid_chains,
-                           session->hists.stats.nr_events[PERF_RECORD_SAMPLE]);
+                           session->stats.nr_invalid_chains,
+                           session->stats.nr_events[PERF_RECORD_SAMPLE]);
        }
 
-       if (session->hists.stats.nr_unprocessable_samples != 0) {
+       if (session->stats.nr_unprocessable_samples != 0) {
                ui__warning("%u unprocessable samples recorded.\n"
                            "Do you have a KVM guest running and not using 'perf kvm'?\n",
-                           session->hists.stats.nr_unprocessable_samples);
+                           session->stats.nr_unprocessable_samples);
        }
 }
 
@@ -1369,6 +1166,18 @@ fetch_mmaped_event(struct perf_session *session,
        return event;
 }
 
+/*
+ * On 64bit we can mmap the data file in one go. No need for tiny mmap
+ * slices. On 32bit we use 32MB.
+ */
+#if BITS_PER_LONG == 64
+#define MMAP_SIZE ULLONG_MAX
+#define NUM_MMAPS 1
+#else
+#define MMAP_SIZE (32 * 1024 * 1024ULL)
+#define NUM_MMAPS 128
+#endif
+
 int __perf_session__process_events(struct perf_session *session,
                                   u64 data_offset, u64 data_size,
                                   u64 file_size, struct perf_tool *tool)
@@ -1376,7 +1185,7 @@ int __perf_session__process_events(struct perf_session *session,
        u64 head, page_offset, file_offset, file_pos, progress_next;
        int err, mmap_prot, mmap_flags, map_idx = 0;
        size_t  mmap_size;
-       char *buf, *mmaps[8];
+       char *buf, *mmaps[NUM_MMAPS];
        union perf_event *event;
        uint32_t size;
 
@@ -1391,7 +1200,7 @@ int __perf_session__process_events(struct perf_session *session,
 
        progress_next = file_size / 16;
 
-       mmap_size = session->mmap_window;
+       mmap_size = MMAP_SIZE;
        if (mmap_size > file_size)
                mmap_size = file_size;
 
@@ -1532,10 +1341,10 @@ size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp)
 }
 
 size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp,
-                                         bool with_hits)
+                                         bool (skip)(struct dso *dso, int parm), int parm)
 {
-       size_t ret = machine__fprintf_dsos_buildid(&self->host_machine, fp, with_hits);
-       return ret + machines__fprintf_dsos_buildid(&self->machines, fp, with_hits);
+       size_t ret = machine__fprintf_dsos_buildid(&self->host_machine, fp, skip, parm);
+       return ret + machines__fprintf_dsos_buildid(&self->machines, fp, skip, parm);
 }
 
 size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
@@ -1543,11 +1352,11 @@ size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
        struct perf_evsel *pos;
        size_t ret = fprintf(fp, "Aggregated stats:\n");
 
-       ret += hists__fprintf_nr_events(&session->hists, fp);
+       ret += events_stats__fprintf(&session->stats, fp);
 
        list_for_each_entry(pos, &session->evlist->entries, node) {
                ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
-               ret += hists__fprintf_nr_events(&pos->hists, fp);
+               ret += events_stats__fprintf(&pos->hists.stats, fp);
        }
 
        return ret;
This page took 0.120996 seconds and 5 git commands to generate.