perf machine: Protect the machine->threads with a rwlock
authorArnaldo Carvalho de Melo <acme@redhat.com>
Mon, 6 Apr 2015 23:43:22 +0000 (20:43 -0300)
committerArnaldo Carvalho de Melo <acme@redhat.com>
Fri, 8 May 2015 19:19:27 +0000 (16:19 -0300)
In addition to using refcounts for the struct thread lifetime
management, we need to protect access to machine->threads from
concurrent access.

That happens in 'perf top', where a thread processes events, inserting
and deleting entries from that rb_tree while another thread decays
hist_entries, that end up dropping references and ultimately deleting
threads from the rb_tree and releasing its resources when no further
hist_entry (or other data structures, like in 'perf sched') references
it.

So the rule is the same for refcounts + protected trees in the kernel,
get the tree lock, find object, bump the refcount, drop the tree lock,
return, use object, drop the refcount if no more use of it is needed,
keep it if storing it in some other data structure, drop when releasing
that data structure.

I.e. pair "t = machine__find(new)_thread()" with a "thread__put(t)", and
"perf_event__preprocess_sample(&al)" with "addr_location__put(&al)".

The addr_location__put() one is because as we return references to
several data structures, we may end up adding more reference counting
for the other data structures and then we'll drop it at
addr_location__put() time.

Acked-by: David Ahern <dsahern@gmail.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: Don Zickus <dzickus@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Stephane Eranian <eranian@google.com>
Link: http://lkml.kernel.org/n/tip-bs9rt4n0jw3hi9f3zxyy3xln@git.kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
29 files changed:
tools/perf/builtin-annotate.c
tools/perf/builtin-diff.c
tools/perf/builtin-inject.c
tools/perf/builtin-kmem.c
tools/perf/builtin-kvm.c
tools/perf/builtin-lock.c
tools/perf/builtin-mem.c
tools/perf/builtin-report.c
tools/perf/builtin-sched.c
tools/perf/builtin-script.c
tools/perf/builtin-timechart.c
tools/perf/builtin-top.c
tools/perf/builtin-trace.c
tools/perf/tests/code-reading.c
tools/perf/tests/dwarf-unwind.c
tools/perf/tests/hists_common.c
tools/perf/tests/hists_cumulate.c
tools/perf/tests/hists_filter.c
tools/perf/tests/hists_link.c
tools/perf/tests/hists_output.c
tools/perf/tests/mmap-thread-lookup.c
tools/perf/tests/thread-mg-share.c
tools/perf/util/build-id.c
tools/perf/util/db-export.c
tools/perf/util/event.c
tools/perf/util/event.h
tools/perf/util/machine.c
tools/perf/util/machine.h
tools/perf/util/thread.c

index 71bf7451c0cad1bf1f43b946631bf949eed96c3c..b57a027fb200dc77ba64becd351b02d26b811f56 100644 (file)
@@ -84,6 +84,7 @@ static int process_sample_event(struct perf_tool *tool,
 {
        struct perf_annotate *ann = container_of(tool, struct perf_annotate, tool);
        struct addr_location al;
+       int ret = 0;
 
        if (perf_event__preprocess_sample(event, machine, &al, sample) < 0) {
                pr_warning("problem processing %d event, skipping it.\n",
@@ -92,15 +93,16 @@ static int process_sample_event(struct perf_tool *tool,
        }
 
        if (ann->cpu_list && !test_bit(sample->cpu, ann->cpu_bitmap))
-               return 0;
+               goto out_put;
 
        if (!al.filtered && perf_evsel__add_sample(evsel, sample, &al, ann)) {
                pr_warning("problem incrementing symbol count, "
                           "skipping event\n");
-               return -1;
+               ret = -1;
        }
-
-       return 0;
+out_put:
+       addr_location__put(&al);
+       return ret;
 }
 
 static int hist_entry__tty_annotate(struct hist_entry *he,
index df6307b4050aaa2ab7e51403d22563e0a1866279..daaa7dca9c3ba81e1e2a7c671dc7660fe34ba4a5 100644 (file)
@@ -328,6 +328,7 @@ static int diff__process_sample_event(struct perf_tool *tool __maybe_unused,
 {
        struct addr_location al;
        struct hists *hists = evsel__hists(evsel);
+       int ret = -1;
 
        if (perf_event__preprocess_sample(event, machine, &al, sample) < 0) {
                pr_warning("problem processing %d event, skipping it.\n",
@@ -338,7 +339,7 @@ static int diff__process_sample_event(struct perf_tool *tool __maybe_unused,
        if (hists__add_entry(hists, &al, sample->period,
                             sample->weight, sample->transaction)) {
                pr_warning("problem incrementing symbol period, skipping event\n");
-               return -1;
+               goto out_put;
        }
 
        /*
@@ -350,8 +351,10 @@ static int diff__process_sample_event(struct perf_tool *tool __maybe_unused,
        hists->stats.total_period += sample->period;
        if (!al.filtered)
                hists->stats.total_non_filtered_period += sample->period;
-
-       return 0;
+       ret = 0;
+out_put:
+       addr_location__put(&al);
+       return ret;
 }
 
 static struct perf_tool tool = {
index d6a47e854b2bd6643882d0089287a016d3af32c8..52ec66b236076c46c1bd0666967d1d204d2c95c3 100644 (file)
@@ -365,6 +365,7 @@ static int perf_event__inject_buildid(struct perf_tool *tool,
                }
        }
 
+       thread__put(thread);
 repipe:
        perf_event__repipe(tool, event, sample, machine);
        return 0;
index e628bf1a0c24b4baa58fada1c7c4b7faf1b6791f..fe3fcb75f20a8be2319877482bcf916035be07f7 100644 (file)
@@ -906,6 +906,7 @@ static int process_sample_event(struct perf_tool *tool __maybe_unused,
                                struct perf_evsel *evsel,
                                struct machine *machine)
 {
+       int err = 0;
        struct thread *thread = machine__findnew_thread(machine, sample->pid,
                                                        sample->tid);
 
@@ -919,10 +920,12 @@ static int process_sample_event(struct perf_tool *tool __maybe_unused,
 
        if (evsel->handler != NULL) {
                tracepoint_handler f = evsel->handler;
-               return f(evsel, sample);
+               err = f(evsel, sample);
        }
 
-       return 0;
+       thread__put(thread);
+
+       return err;
 }
 
 static struct perf_tool perf_kmem = {
index 1f9338f6109cdbe79f8f08e510c00e4a171d1f82..15fecd3dc5d8c49f1fcb22e2a83649a16f808fde 100644 (file)
@@ -651,6 +651,7 @@ static int process_sample_event(struct perf_tool *tool,
                                struct perf_evsel *evsel,
                                struct machine *machine)
 {
+       int err = 0;
        struct thread *thread;
        struct perf_kvm_stat *kvm = container_of(tool, struct perf_kvm_stat,
                                                 tool);
@@ -666,9 +667,10 @@ static int process_sample_event(struct perf_tool *tool,
        }
 
        if (!handle_kvm_event(kvm, thread, evsel, sample))
-               return -1;
+               err = -1;
 
-       return 0;
+       thread__put(thread);
+       return err;
 }
 
 static int cpu_isa_config(struct perf_kvm_stat *kvm)
index d49c2ab85fc2dd1e3c6560391b71f7e3269cf31e..de16aaed516e6016b2a8d887f87727a8179acf19 100644 (file)
@@ -769,6 +769,7 @@ static void dump_threads(void)
                t = perf_session__findnew(session, st->tid);
                pr_info("%10d: %s\n", st->tid, thread__comm_str(t));
                node = rb_next(node);
+               thread__put(t);
        };
 }
 
@@ -810,6 +811,7 @@ static int process_sample_event(struct perf_tool *tool __maybe_unused,
                                struct perf_evsel *evsel,
                                struct machine *machine)
 {
+       int err = 0;
        struct thread *thread = machine__findnew_thread(machine, sample->pid,
                                                        sample->tid);
 
@@ -821,10 +823,12 @@ static int process_sample_event(struct perf_tool *tool __maybe_unused,
 
        if (evsel->handler != NULL) {
                tracepoint_handler f = evsel->handler;
-               return f(evsel, sample);
+               err = f(evsel, sample);
        }
 
-       return 0;
+       thread__put(thread);
+
+       return err;
 }
 
 static void sort_result(void)
index 675216e08bfcd04baf2336ece7da328e914e21fd..da2ec06f0742dc6acf98c1c9b74d7cf45ff0fcb2 100644 (file)
@@ -74,7 +74,7 @@ dump_raw_samples(struct perf_tool *tool,
        }
 
        if (al.filtered || (mem->hide_unresolved && al.sym == NULL))
-               return 0;
+               goto out_put;
 
        if (al.map != NULL)
                al.map->dso->hit = 1;
@@ -103,7 +103,8 @@ dump_raw_samples(struct perf_tool *tool,
                symbol_conf.field_sep,
                al.map ? (al.map->dso ? al.map->dso->long_name : "???") : "???",
                al.sym ? al.sym->name : "???");
-
+out_put:
+       addr_location__put(&al);
        return 0;
 }
 
index 18cb0ff39b4ed0bf9e88fe1396fbab73594b659a..8d5118f5ceddec40a5c911dfd5563d4df3a10a73 100644 (file)
@@ -142,7 +142,7 @@ static int process_sample_event(struct perf_tool *tool,
                .hide_unresolved = rep->hide_unresolved,
                .add_entry_cb = hist_iter__report_callback,
        };
-       int ret;
+       int ret = 0;
 
        if (perf_event__preprocess_sample(event, machine, &al, sample) < 0) {
                pr_debug("problem processing %d event, skipping it.\n",
@@ -151,10 +151,10 @@ static int process_sample_event(struct perf_tool *tool,
        }
 
        if (rep->hide_unresolved && al.sym == NULL)
-               return 0;
+               goto out_put;
 
        if (rep->cpu_list && !test_bit(sample->cpu, rep->cpu_bitmap))
-               return 0;
+               goto out_put;
 
        if (sort__mode == SORT_MODE__BRANCH)
                iter.ops = &hist_iter_branch;
@@ -172,7 +172,8 @@ static int process_sample_event(struct perf_tool *tool,
                                   rep);
        if (ret < 0)
                pr_debug("problem adding hist entry, skipping event\n");
-
+out_put:
+       addr_location__put(&al);
        return ret;
 }
 
index 5275bab703138cbeb9c40f1ff22174ac52ca2d13..79273ecf92eba4ab262ecd01c5f5f2d8e0dfff87 100644 (file)
@@ -770,7 +770,7 @@ static int replay_fork_event(struct perf_sched *sched,
        if (child == NULL || parent == NULL) {
                pr_debug("thread does not exist on fork event: child %p, parent %p\n",
                                 child, parent);
-               return 0;
+               goto out_put;
        }
 
        if (verbose) {
@@ -781,6 +781,9 @@ static int replay_fork_event(struct perf_sched *sched,
 
        register_pid(sched, parent->tid, thread__comm_str(parent));
        register_pid(sched, child->tid, thread__comm_str(child));
+out_put:
+       thread__put(child);
+       thread__put(parent);
        return 0;
 }
 
@@ -957,7 +960,7 @@ static int latency_switch_event(struct perf_sched *sched,
        struct work_atoms *out_events, *in_events;
        struct thread *sched_out, *sched_in;
        u64 timestamp0, timestamp = sample->time;
-       int cpu = sample->cpu;
+       int cpu = sample->cpu, err = -1;
        s64 delta;
 
        BUG_ON(cpu >= MAX_CPUS || cpu < 0);
@@ -976,15 +979,17 @@ static int latency_switch_event(struct perf_sched *sched,
 
        sched_out = machine__findnew_thread(machine, -1, prev_pid);
        sched_in = machine__findnew_thread(machine, -1, next_pid);
+       if (sched_out == NULL || sched_in == NULL)
+               goto out_put;
 
        out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
        if (!out_events) {
                if (thread_atoms_insert(sched, sched_out))
-                       return -1;
+                       goto out_put;
                out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
                if (!out_events) {
                        pr_err("out-event: Internal tree error");
-                       return -1;
+                       goto out_put;
                }
        }
        if (add_sched_out_event(out_events, sched_out_state(prev_state), timestamp))
@@ -993,22 +998,25 @@ static int latency_switch_event(struct perf_sched *sched,
        in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
        if (!in_events) {
                if (thread_atoms_insert(sched, sched_in))
-                       return -1;
+                       goto out_put;
                in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
                if (!in_events) {
                        pr_err("in-event: Internal tree error");
-                       return -1;
+                       goto out_put;
                }
                /*
                 * Take came in we have not heard about yet,
                 * add in an initial atom in runnable state:
                 */
                if (add_sched_out_event(in_events, 'R', timestamp))
-                       return -1;
+                       goto out_put;
        }
        add_sched_in_event(in_events, timestamp);
-
-       return 0;
+       err = 0;
+out_put:
+       thread__put(sched_out);
+       thread__put(sched_in);
+       return err;
 }
 
 static int latency_runtime_event(struct perf_sched *sched,
@@ -1021,23 +1029,29 @@ static int latency_runtime_event(struct perf_sched *sched,
        struct thread *thread = machine__findnew_thread(machine, -1, pid);
        struct work_atoms *atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
        u64 timestamp = sample->time;
-       int cpu = sample->cpu;
+       int cpu = sample->cpu, err = -1;
+
+       if (thread == NULL)
+               return -1;
 
        BUG_ON(cpu >= MAX_CPUS || cpu < 0);
        if (!atoms) {
                if (thread_atoms_insert(sched, thread))
-                       return -1;
+                       goto out_put;
                atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
                if (!atoms) {
                        pr_err("in-event: Internal tree error");
-                       return -1;
+                       goto out_put;
                }
                if (add_sched_out_event(atoms, 'R', timestamp))
-                       return -1;
+                       goto out_put;
        }
 
        add_runtime_event(atoms, runtime, timestamp);
-       return 0;
+       err = 0;
+out_put:
+       thread__put(thread);
+       return err;
 }
 
 static int latency_wakeup_event(struct perf_sched *sched,
@@ -1050,19 +1064,22 @@ static int latency_wakeup_event(struct perf_sched *sched,
        struct work_atom *atom;
        struct thread *wakee;
        u64 timestamp = sample->time;
+       int err = -1;
 
        wakee = machine__findnew_thread(machine, -1, pid);
+       if (wakee == NULL)
+               return -1;
        atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
        if (!atoms) {
                if (thread_atoms_insert(sched, wakee))
-                       return -1;
+                       goto out_put;
                atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
                if (!atoms) {
                        pr_err("wakeup-event: Internal tree error");
-                       return -1;
+                       goto out_put;
                }
                if (add_sched_out_event(atoms, 'S', timestamp))
-                       return -1;
+                       goto out_put;
        }
 
        BUG_ON(list_empty(&atoms->work_list));
@@ -1081,17 +1098,21 @@ static int latency_wakeup_event(struct perf_sched *sched,
         * skip in this case.
         */
        if (sched->profile_cpu == -1 && atom->state != THREAD_SLEEPING)
-               return 0;
+               goto out_ok;
 
        sched->nr_timestamps++;
        if (atom->sched_out_time > timestamp) {
                sched->nr_unordered_timestamps++;
-               return 0;
+               goto out_ok;
        }
 
        atom->state = THREAD_WAIT_CPU;
        atom->wake_up_time = timestamp;
-       return 0;
+out_ok:
+       err = 0;
+out_put:
+       thread__put(wakee);
+       return err;
 }
 
 static int latency_migrate_task_event(struct perf_sched *sched,
@@ -1104,6 +1125,7 @@ static int latency_migrate_task_event(struct perf_sched *sched,
        struct work_atoms *atoms;
        struct work_atom *atom;
        struct thread *migrant;
+       int err = -1;
 
        /*
         * Only need to worry about migration when profiling one CPU.
@@ -1112,18 +1134,20 @@ static int latency_migrate_task_event(struct perf_sched *sched,
                return 0;
 
        migrant = machine__findnew_thread(machine, -1, pid);
+       if (migrant == NULL)
+               return -1;
        atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
        if (!atoms) {
                if (thread_atoms_insert(sched, migrant))
-                       return -1;
+                       goto out_put;
                register_pid(sched, migrant->tid, thread__comm_str(migrant));
                atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
                if (!atoms) {
                        pr_err("migration-event: Internal tree error");
-                       return -1;
+                       goto out_put;
                }
                if (add_sched_out_event(atoms, 'R', timestamp))
-                       return -1;
+                       goto out_put;
        }
 
        BUG_ON(list_empty(&atoms->work_list));
@@ -1135,8 +1159,10 @@ static int latency_migrate_task_event(struct perf_sched *sched,
 
        if (atom->sched_out_time > timestamp)
                sched->nr_unordered_timestamps++;
-
-       return 0;
+       err = 0;
+out_put:
+       thread__put(migrant);
+       return err;
 }
 
 static void output_lat_thread(struct perf_sched *sched, struct work_atoms *work_list)
@@ -1330,8 +1356,10 @@ static int map_switch_event(struct perf_sched *sched, struct perf_evsel *evsel,
        }
 
        sched_in = machine__findnew_thread(machine, -1, next_pid);
+       if (sched_in == NULL)
+               return -1;
 
-       sched->curr_thread[this_cpu] = sched_in;
+       sched->curr_thread[this_cpu] = thread__get(sched_in);
 
        printf("  ");
 
@@ -1381,6 +1409,8 @@ static int map_switch_event(struct perf_sched *sched, struct perf_evsel *evsel,
                printf("\n");
        }
 
+       thread__put(sched_in);
+
        return 0;
 }
 
index 6805098e3751db40544193dd0eaa530705fe0281..24809787369f5a1303451de5264798d0e31792b6 100644 (file)
@@ -607,13 +607,14 @@ static int process_sample_event(struct perf_tool *tool __maybe_unused,
        }
 
        if (al.filtered)
-               return 0;
+               goto out_put;
 
        if (cpu_list && !test_bit(sample->cpu, cpu_bitmap))
-               return 0;
+               goto out_put;
 
        scripting_ops->process_event(event, sample, evsel, &al);
-
+out_put:
+       addr_location__put(&al);
        return 0;
 }
 
@@ -681,8 +682,8 @@ static int process_comm_event(struct perf_tool *tool,
        print_sample_start(sample, thread, evsel);
        perf_event__fprintf(event, stdout);
        ret = 0;
-
 out:
+       thread__put(thread);
        return ret;
 }
 
@@ -713,6 +714,7 @@ static int process_fork_event(struct perf_tool *tool,
        }
        print_sample_start(sample, thread, evsel);
        perf_event__fprintf(event, stdout);
+       thread__put(thread);
 
        return 0;
 }
@@ -721,6 +723,7 @@ static int process_exit_event(struct perf_tool *tool,
                              struct perf_sample *sample,
                              struct machine *machine)
 {
+       int err = 0;
        struct thread *thread;
        struct perf_script *script = container_of(tool, struct perf_script, tool);
        struct perf_session *session = script->session;
@@ -742,9 +745,10 @@ static int process_exit_event(struct perf_tool *tool,
        perf_event__fprintf(event, stdout);
 
        if (perf_event__process_exit(tool, event, sample, machine) < 0)
-               return -1;
+               err = -1;
 
-       return 0;
+       thread__put(thread);
+       return err;
 }
 
 static int process_mmap_event(struct perf_tool *tool,
@@ -774,7 +778,7 @@ static int process_mmap_event(struct perf_tool *tool,
        }
        print_sample_start(sample, thread, evsel);
        perf_event__fprintf(event, stdout);
-
+       thread__put(thread);
        return 0;
 }
 
@@ -805,7 +809,7 @@ static int process_mmap2_event(struct perf_tool *tool,
        }
        print_sample_start(sample, thread, evsel);
        perf_event__fprintf(event, stdout);
-
+       thread__put(thread);
        return 0;
 }
 
index e50fe1187b0ba2ca808f6c0a0d3effd8156b4982..3b884e37ab8ba3e553781c43529d2ac630d16c3f 100644 (file)
@@ -523,7 +523,7 @@ static const char *cat_backtrace(union perf_event *event,
                                 * Discard all.
                                 */
                                zfree(&p);
-                               goto exit;
+                               goto exit_put;
                        }
                        continue;
                }
@@ -538,7 +538,8 @@ static const char *cat_backtrace(union perf_event *event,
                else
                        fprintf(f, "..... %016" PRIx64 "\n", ip);
        }
-
+exit_put:
+       addr_location__put(&al);
 exit:
        fclose(f);
 
index 1cb3436276d1599ea0f1ffc80d36e655aad690be..232658337f18a5e9c02b89e098b92d896d615d42 100644 (file)
@@ -793,7 +793,7 @@ static void perf_event__process_sample(struct perf_tool *tool,
                pthread_mutex_unlock(&hists->lock);
        }
 
-       return;
+       addr_location__put(&al);
 }
 
 static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
index d1139b6bd534e65a33858b8dbe727cefa8ae2e86..bb05e44f8f0c00102932f872f37b6ba53b82733a 100644 (file)
@@ -1712,7 +1712,7 @@ static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
        void *args;
        size_t printed = 0;
        struct thread *thread;
-       int id = perf_evsel__sc_tp_uint(evsel, id, sample);
+       int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1;
        struct syscall *sc = trace__syscall_info(trace, evsel, id);
        struct thread_trace *ttrace;
 
@@ -1725,14 +1725,14 @@ static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
        thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
        ttrace = thread__trace(thread, trace->output);
        if (ttrace == NULL)
-               return -1;
+               goto out_put;
 
        args = perf_evsel__sc_tp_ptr(evsel, args, sample);
 
        if (ttrace->entry_str == NULL) {
                ttrace->entry_str = malloc(1024);
                if (!ttrace->entry_str)
-                       return -1;
+                       goto out_put;
        }
 
        if (!trace->summary_only)
@@ -1757,8 +1757,10 @@ static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
                thread__put(trace->current);
                trace->current = thread__get(thread);
        }
-
-       return 0;
+       err = 0;
+out_put:
+       thread__put(thread);
+       return err;
 }
 
 static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
@@ -1768,7 +1770,7 @@ static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
        long ret;
        u64 duration = 0;
        struct thread *thread;
-       int id = perf_evsel__sc_tp_uint(evsel, id, sample);
+       int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1;
        struct syscall *sc = trace__syscall_info(trace, evsel, id);
        struct thread_trace *ttrace;
 
@@ -1781,7 +1783,7 @@ static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
        thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
        ttrace = thread__trace(thread, trace->output);
        if (ttrace == NULL)
-               return -1;
+               goto out_put;
 
        if (trace->summary)
                thread__update_stats(ttrace, id, sample);
@@ -1835,8 +1837,10 @@ signed_print:
        fputc('\n', trace->output);
 out:
        ttrace->entry_pending = false;
-
-       return 0;
+       err = 0;
+out_put:
+       thread__put(thread);
+       return err;
 }
 
 static int trace__vfs_getname(struct trace *trace, struct perf_evsel *evsel,
@@ -1863,6 +1867,7 @@ static int trace__sched_stat_runtime(struct trace *trace, struct perf_evsel *evs
 
        ttrace->runtime_ms += runtime_ms;
        trace->runtime_ms += runtime_ms;
+       thread__put(thread);
        return 0;
 
 out_dump:
@@ -1872,6 +1877,7 @@ out_dump:
               (pid_t)perf_evsel__intval(evsel, sample, "pid"),
               runtime,
               perf_evsel__intval(evsel, sample, "vruntime"));
+       thread__put(thread);
        return 0;
 }
 
@@ -1924,11 +1930,12 @@ static int trace__pgfault(struct trace *trace,
        struct addr_location al;
        char map_type = 'd';
        struct thread_trace *ttrace;
+       int err = -1;
 
        thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
        ttrace = thread__trace(thread, trace->output);
        if (ttrace == NULL)
-               return -1;
+               goto out_put;
 
        if (evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ)
                ttrace->pfmaj++;
@@ -1936,7 +1943,7 @@ static int trace__pgfault(struct trace *trace,
                ttrace->pfmin++;
 
        if (trace->summary_only)
-               return 0;
+               goto out;
 
        thread__find_addr_location(thread, cpumode, MAP__FUNCTION,
                              sample->ip, &al);
@@ -1967,8 +1974,11 @@ static int trace__pgfault(struct trace *trace,
        print_location(trace->output, sample, &al, true, false);
 
        fprintf(trace->output, " (%c%c)\n", map_type, al.level);
-
-       return 0;
+out:
+       err = 0;
+out_put:
+       thread__put(thread);
+       return err;
 }
 
 static bool skip_sample(struct trace *trace, struct perf_sample *sample)
index ca0e480e741baed24f59db5aac05d191b4ff0c9d..e2a432b67d52082e96e018d563742fcefa151d50 100644 (file)
@@ -248,6 +248,7 @@ static int process_sample_event(struct machine *machine,
        struct perf_sample sample;
        struct thread *thread;
        u8 cpumode;
+       int ret;
 
        if (perf_evlist__parse_sample(evlist, event, &sample)) {
                pr_debug("perf_evlist__parse_sample failed\n");
@@ -262,7 +263,9 @@ static int process_sample_event(struct machine *machine,
 
        cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
 
-       return read_object_code(sample.ip, READLEN, cpumode, thread, state);
+       ret = read_object_code(sample.ip, READLEN, cpumode, thread, state);
+       thread__put(thread);
+       return ret;
 }
 
 static int process_event(struct machine *machine, struct perf_evlist *evlist,
@@ -457,13 +460,13 @@ static int do_test_code_reading(bool try_kcore)
        thread = machine__findnew_thread(machine, pid, pid);
        if (!thread) {
                pr_debug("machine__findnew_thread failed\n");
-               goto out_err;
+               goto out_put;
        }
 
        cpus = cpu_map__new(NULL);
        if (!cpus) {
                pr_debug("cpu_map__new failed\n");
-               goto out_err;
+               goto out_put;
        }
 
        while (1) {
@@ -472,7 +475,7 @@ static int do_test_code_reading(bool try_kcore)
                evlist = perf_evlist__new();
                if (!evlist) {
                        pr_debug("perf_evlist__new failed\n");
-                       goto out_err;
+                       goto out_put;
                }
 
                perf_evlist__set_maps(evlist, cpus, threads);
@@ -485,7 +488,7 @@ static int do_test_code_reading(bool try_kcore)
                ret = parse_events(evlist, str, NULL);
                if (ret < 0) {
                        pr_debug("parse_events failed\n");
-                       goto out_err;
+                       goto out_put;
                }
 
                perf_evlist__config(evlist, &opts);
@@ -506,7 +509,7 @@ static int do_test_code_reading(bool try_kcore)
                                continue;
                        }
                        pr_debug("perf_evlist__open failed\n");
-                       goto out_err;
+                       goto out_put;
                }
                break;
        }
@@ -514,7 +517,7 @@ static int do_test_code_reading(bool try_kcore)
        ret = perf_evlist__mmap(evlist, UINT_MAX, false);
        if (ret < 0) {
                pr_debug("perf_evlist__mmap failed\n");
-               goto out_err;
+               goto out_put;
        }
 
        perf_evlist__enable(evlist);
@@ -525,7 +528,7 @@ static int do_test_code_reading(bool try_kcore)
 
        ret = process_events(machine, evlist, &state);
        if (ret < 0)
-               goto out_err;
+               goto out_put;
 
        if (!have_vmlinux && !have_kcore && !try_kcore)
                err = TEST_CODE_READING_NO_KERNEL_OBJ;
@@ -535,7 +538,10 @@ static int do_test_code_reading(bool try_kcore)
                err = TEST_CODE_READING_NO_ACCESS;
        else
                err = TEST_CODE_READING_OK;
+out_put:
+       thread__put(thread);
 out_err:
+
        if (evlist) {
                perf_evlist__delete(evlist);
        } else {
index 0bf06bec68c7e9786668990ad399b578326726c5..9b748e1ad46ed2848b7ed4771aca3cdf15f4e908 100644 (file)
@@ -170,6 +170,7 @@ int test__dwarf_unwind(void)
        }
 
        err = krava_1(thread);
+       thread__put(thread);
 
  out:
        machine__delete_threads(machine);
index a62c091345163f70ea72ad2cb9dbc2297ae2da1a..456f884eb27b6c3a523a2ae06b28b104e13855c8 100644 (file)
@@ -96,6 +96,7 @@ struct machine *setup_fake_machine(struct machines *machines)
                        goto out;
 
                thread__set_comm(thread, fake_threads[i].comm, 0);
+               thread__put(thread);
        }
 
        for (i = 0; i < ARRAY_SIZE(fake_mmap_info); i++) {
index b08a95a5ca1aa53f4faed0aa8b27168b59f5e330..620f626e5b3517326d0550e6198f4384945a392d 100644 (file)
@@ -105,8 +105,10 @@ static int add_hist_entries(struct hists *hists, struct machine *machine)
                        goto out;
 
                if (hist_entry_iter__add(&iter, &al, evsel, &sample,
-                                        PERF_MAX_STACK_DEPTH, NULL) < 0)
+                                        PERF_MAX_STACK_DEPTH, NULL) < 0) {
+                       addr_location__put(&al);
                        goto out;
+               }
 
                fake_samples[i].thread = al.thread;
                fake_samples[i].map = al.map;
index 108488cd71fa39c2f56cce1b9d817dba7b9444a0..82e1ee52e024559c16b443ebfc28f9cb5dbdb4cc 100644 (file)
@@ -82,8 +82,10 @@ static int add_hist_entries(struct perf_evlist *evlist,
                                goto out;
 
                        if (hist_entry_iter__add(&iter, &al, evsel, &sample,
-                                                PERF_MAX_STACK_DEPTH, NULL) < 0)
+                                                PERF_MAX_STACK_DEPTH, NULL) < 0) {
+                               addr_location__put(&al);
                                goto out;
+                       }
 
                        fake_samples[i].thread = al.thread;
                        fake_samples[i].map = al.map;
index 34c61e4d3352fbfd6984d5b30c05e6d32bf2d64c..8c102b0114249708e4ae2059c81732ab23793c4b 100644 (file)
@@ -91,8 +91,10 @@ static int add_hist_entries(struct perf_evlist *evlist, struct machine *machine)
 
                        he = __hists__add_entry(hists, &al, NULL,
                                                NULL, NULL, 1, 1, 0, true);
-                       if (he == NULL)
+                       if (he == NULL) {
+                               addr_location__put(&al);
                                goto out;
+                       }
 
                        fake_common_samples[k].thread = al.thread;
                        fake_common_samples[k].map = al.map;
@@ -115,8 +117,10 @@ static int add_hist_entries(struct perf_evlist *evlist, struct machine *machine)
 
                        he = __hists__add_entry(hists, &al, NULL,
                                                NULL, NULL, 1, 1, 0, true);
-                       if (he == NULL)
+                       if (he == NULL) {
+                               addr_location__put(&al);
                                goto out;
+                       }
 
                        fake_samples[i][k].thread = al.thread;
                        fake_samples[i][k].map = al.map;
index d8a23db80094f3cb8a9a4d9efc56b6cf1c5639f5..fd7ec4f9aeb4abfedd2b508a135b7baa8c4a717c 100644 (file)
@@ -71,8 +71,10 @@ static int add_hist_entries(struct hists *hists, struct machine *machine)
                        goto out;
 
                if (hist_entry_iter__add(&iter, &al, evsel, &sample,
-                                        PERF_MAX_STACK_DEPTH, NULL) < 0)
+                                        PERF_MAX_STACK_DEPTH, NULL) < 0) {
+                       addr_location__put(&al);
                        goto out;
+               }
 
                fake_samples[i].thread = al.thread;
                fake_samples[i].map = al.map;
index 2113f1c8611fb569b0fb6bc7f676477e7de0a666..264e215c0d36a9c2e2c2e775e57d7148f1223cec 100644 (file)
@@ -191,6 +191,8 @@ static int mmap_events(synth_cb synth)
                                      PERF_RECORD_MISC_USER, MAP__FUNCTION,
                                      (unsigned long) (td->map + 1), &al);
 
+               thread__put(thread);
+
                if (!al.map) {
                        pr_debug("failed, couldn't find map\n");
                        err = -1;
index b028499dd3cf0f5ee7530a6c869aaa40ef802521..dc05bd62b4a3345baacff5c51acf27f52eddea03 100644 (file)
@@ -64,22 +64,22 @@ int test__thread_mg_share(void)
        TEST_ASSERT_VAL("map groups don't match", other_mg == other_leader->mg);
 
        /* release thread group */
-       thread__delete(leader);
+       thread__put(leader);
        TEST_ASSERT_VAL("wrong refcnt", mg->refcnt == 3);
 
-       thread__delete(t1);
+       thread__put(t1);
        TEST_ASSERT_VAL("wrong refcnt", mg->refcnt == 2);
 
-       thread__delete(t2);
+       thread__put(t2);
        TEST_ASSERT_VAL("wrong refcnt", mg->refcnt == 1);
 
-       thread__delete(t3);
+       thread__put(t3);
 
        /* release other group  */
-       thread__delete(other_leader);
+       thread__put(other_leader);
        TEST_ASSERT_VAL("wrong refcnt", other_mg->refcnt == 1);
 
-       thread__delete(other);
+       thread__put(other);
 
        /*
         * Cannot call machine__delete_threads(machine) now,
index 61867dff5d5aa6dea4079d060514f1bd57a63630..ad8cfcbaa25debdcdb2f4c63b154bdcb027ac7a9 100644 (file)
@@ -43,6 +43,7 @@ int build_id__mark_dso_hit(struct perf_tool *tool __maybe_unused,
        if (al.map != NULL)
                al.map->dso->hit = 1;
 
+       thread__put(thread);
        return 0;
 }
 
@@ -59,8 +60,10 @@ static int perf_event__exit_del_thread(struct perf_tool *tool __maybe_unused,
        dump_printf("(%d:%d):(%d:%d)\n", event->fork.pid, event->fork.tid,
                    event->fork.ppid, event->fork.ptid);
 
-       if (thread)
+       if (thread) {
                machine__remove_thread(machine, thread);
+               thread__put(thread);
+       }
 
        return 0;
 }
index bb39a3ffc70b3951f88f9260f01836cc4e959fc9..eb7a2acb973b02a8dd512c223eeea69dcb505802 100644 (file)
@@ -122,6 +122,7 @@ int db_export__machine(struct db_export *dbe, struct machine *machine)
 int db_export__thread(struct db_export *dbe, struct thread *thread,
                      struct machine *machine, struct comm *comm)
 {
+       struct thread *main_thread;
        u64 main_thread_db_id = 0;
        int err;
 
@@ -131,8 +132,6 @@ int db_export__thread(struct db_export *dbe, struct thread *thread,
        thread->db_id = ++dbe->thread_last_db_id;
 
        if (thread->pid_ != -1) {
-               struct thread *main_thread;
-
                if (thread->pid_ == thread->tid) {
                        main_thread = thread;
                } else {
@@ -144,14 +143,16 @@ int db_export__thread(struct db_export *dbe, struct thread *thread,
                        err = db_export__thread(dbe, main_thread, machine,
                                                comm);
                        if (err)
-                               return err;
+                               goto out_put;
                        if (comm) {
                                err = db_export__comm_thread(dbe, comm, thread);
                                if (err)
-                                       return err;
+                                       goto out_put;
                        }
                }
                main_thread_db_id = main_thread->db_id;
+               if (main_thread != thread)
+                       thread__put(main_thread);
        }
 
        if (dbe->export_thread)
@@ -159,6 +160,10 @@ int db_export__thread(struct db_export *dbe, struct thread *thread,
                                          machine);
 
        return 0;
+
+out_put:
+       thread__put(main_thread);
+       return err;
 }
 
 int db_export__comm(struct db_export *dbe, struct comm *comm,
@@ -303,6 +308,7 @@ int db_export__sample(struct db_export *dbe, union perf_event *event,
        if (err)
                return err;
 
+       /* FIXME: check refcounting for get_main_thread, that calls machine__find_thread... */
        main_thread = get_main_thread(al->machine, thread);
        if (main_thread)
                comm = machine__thread_exec_comm(al->machine, main_thread);
index db526091f58099d54a2181175f147339ccfbe83f..a513a51f733095d6eb5887579f164a3d8c551f57 100644 (file)
@@ -919,6 +919,10 @@ void thread__find_addr_location(struct thread *thread,
                al->sym = NULL;
 }
 
+/*
+ * Callers need to drop the reference to al->thread, obtained in
+ * machine__findnew_thread()
+ */
 int perf_event__preprocess_sample(const union perf_event *event,
                                  struct machine *machine,
                                  struct addr_location *al,
@@ -979,6 +983,17 @@ int perf_event__preprocess_sample(const union perf_event *event,
        return 0;
 }
 
+/*
+ * The preprocess_sample method will return with reference counts for the
+ * in it, when done using (and perhaps getting ref counts if needing to
+ * keep a pointer to one of those entries) it must be paired with
+ * addr_location__put(), so that the refcounts can be decremented.
+ */
+void addr_location__put(struct addr_location *al)
+{
+       thread__zput(al->thread);
+}
+
 bool is_bts_event(struct perf_event_attr *attr)
 {
        return attr->type == PERF_TYPE_HARDWARE &&
index 7eecd5e23d7752ea29f0bac749ba3af53f2423cc..97179abc80a12332087f0f79f95a0567014cf10a 100644 (file)
@@ -426,6 +426,8 @@ int perf_event__preprocess_sample(const union perf_event *event,
                                  struct addr_location *al,
                                  struct perf_sample *sample);
 
+void addr_location__put(struct addr_location *al);
+
 struct thread;
 
 bool is_bts_event(struct perf_event_attr *attr);
index 2f471105efb1f98e5fe855f976220e832feb466d..8b0b307d91f48447996a50dfb6ecc3b9234b9e1b 100644 (file)
@@ -14,6 +14,8 @@
 #include "unwind.h"
 #include "linux/hash.h"
 
+static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock);
+
 static void dsos__init(struct dsos *dsos)
 {
        INIT_LIST_HEAD(&dsos->head);
@@ -28,6 +30,7 @@ int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
        dsos__init(&machine->kernel_dsos);
 
        machine->threads = RB_ROOT;
+       pthread_rwlock_init(&machine->threads_lock, NULL);
        INIT_LIST_HEAD(&machine->dead_threads);
        machine->last_match = NULL;
 
@@ -54,6 +57,7 @@ int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
 
                snprintf(comm, sizeof(comm), "[guest/%d]", pid);
                thread__set_comm(thread, comm, 0);
+               thread__put(thread);
        }
 
        machine->current_tid = NULL;
@@ -91,14 +95,17 @@ static void dsos__delete(struct dsos *dsos)
 
 void machine__delete_threads(struct machine *machine)
 {
-       struct rb_node *nd = rb_first(&machine->threads);
+       struct rb_node *nd;
 
+       pthread_rwlock_wrlock(&machine->threads_lock);
+       nd = rb_first(&machine->threads);
        while (nd) {
                struct thread *t = rb_entry(nd, struct thread, rb_node);
 
                nd = rb_next(nd);
-               machine__remove_thread(machine, t);
+               __machine__remove_thread(machine, t, false);
        }
+       pthread_rwlock_unlock(&machine->threads_lock);
 }
 
 void machine__exit(struct machine *machine)
@@ -109,6 +116,7 @@ void machine__exit(struct machine *machine)
        vdso__exit(machine);
        zfree(&machine->root_dir);
        zfree(&machine->current_tid);
+       pthread_rwlock_destroy(&machine->threads_lock);
 }
 
 void machine__delete(struct machine *machine)
@@ -303,7 +311,7 @@ static void machine__update_thread_pid(struct machine *machine,
        if (th->pid_ == th->tid)
                return;
 
-       leader = machine__findnew_thread(machine, th->pid_, th->pid_);
+       leader = __machine__findnew_thread(machine, th->pid_, th->pid_);
        if (!leader)
                goto out_err;
 
@@ -336,9 +344,9 @@ out_err:
        pr_err("Failed to join map groups for %d:%d\n", th->pid_, th->tid);
 }
 
-static struct thread *__machine__findnew_thread(struct machine *machine,
-                                               pid_t pid, pid_t tid,
-                                               bool create)
+static struct thread *____machine__findnew_thread(struct machine *machine,
+                                                 pid_t pid, pid_t tid,
+                                                 bool create)
 {
        struct rb_node **p = &machine->threads.rb_node;
        struct rb_node *parent = NULL;
@@ -393,6 +401,7 @@ static struct thread *__machine__findnew_thread(struct machine *machine,
                 */
                if (thread__init_map_groups(th, machine)) {
                        rb_erase(&th->rb_node, &machine->threads);
+                       RB_CLEAR_NODE(&th->rb_node);
                        thread__delete(th);
                        return NULL;
                }
@@ -406,16 +415,30 @@ static struct thread *__machine__findnew_thread(struct machine *machine,
        return th;
 }
 
+struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid)
+{
+       return ____machine__findnew_thread(machine, pid, tid, true);
+}
+
 struct thread *machine__findnew_thread(struct machine *machine, pid_t pid,
                                       pid_t tid)
 {
-       return __machine__findnew_thread(machine, pid, tid, true);
+       struct thread *th;
+
+       pthread_rwlock_wrlock(&machine->threads_lock);
+       th = thread__get(__machine__findnew_thread(machine, pid, tid));
+       pthread_rwlock_unlock(&machine->threads_lock);
+       return th;
 }
 
 struct thread *machine__find_thread(struct machine *machine, pid_t pid,
                                    pid_t tid)
 {
-       return __machine__findnew_thread(machine, pid, tid, false);
+       struct thread *th;
+       pthread_rwlock_rdlock(&machine->threads_lock);
+       th =  thread__get(____machine__findnew_thread(machine, pid, tid, false));
+       pthread_rwlock_unlock(&machine->threads_lock);
+       return th;
 }
 
 struct comm *machine__thread_exec_comm(struct machine *machine,
@@ -434,6 +457,7 @@ int machine__process_comm_event(struct machine *machine, union perf_event *event
                                                        event->comm.pid,
                                                        event->comm.tid);
        bool exec = event->header.misc & PERF_RECORD_MISC_COMM_EXEC;
+       int err = 0;
 
        if (exec)
                machine->comm_exec = true;
@@ -444,10 +468,12 @@ int machine__process_comm_event(struct machine *machine, union perf_event *event
        if (thread == NULL ||
            __thread__set_comm(thread, event->comm.comm, sample->time, exec)) {
                dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
-               return -1;
+               err = -1;
        }
 
-       return 0;
+       thread__put(thread);
+
+       return err;
 }
 
 int machine__process_lost_event(struct machine *machine __maybe_unused,
@@ -591,12 +617,16 @@ size_t machine__fprintf(struct machine *machine, FILE *fp)
        size_t ret = 0;
        struct rb_node *nd;
 
+       pthread_rwlock_rdlock(&machine->threads_lock);
+
        for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) {
                struct thread *pos = rb_entry(nd, struct thread, rb_node);
 
                ret += thread__fprintf(pos, fp);
        }
 
+       pthread_rwlock_unlock(&machine->threads_lock);
+
        return ret;
 }
 
@@ -1213,11 +1243,14 @@ int machine__process_mmap2_event(struct machine *machine,
                        event->mmap2.filename, type, thread);
 
        if (map == NULL)
-               goto out_problem;
+               goto out_problem_map;
 
        thread__insert_map(thread, map);
+       thread__put(thread);
        return 0;
 
+out_problem_map:
+       thread__put(thread);
 out_problem:
        dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n");
        return 0;
@@ -1260,31 +1293,45 @@ int machine__process_mmap_event(struct machine *machine, union perf_event *event
                        type, thread);
 
        if (map == NULL)
-               goto out_problem;
+               goto out_problem_map;
 
        thread__insert_map(thread, map);
+       thread__put(thread);
        return 0;
 
+out_problem_map:
+       thread__put(thread);
 out_problem:
        dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
        return 0;
 }
 
-void machine__remove_thread(struct machine *machine, struct thread *th)
+static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock)
 {
        if (machine->last_match == th)
                thread__zput(machine->last_match);
 
+       BUG_ON(th->refcnt.counter == 0);
+       if (lock)
+               pthread_rwlock_wrlock(&machine->threads_lock);
        rb_erase(&th->rb_node, &machine->threads);
+       RB_CLEAR_NODE(&th->rb_node);
        /*
         * Move it first to the dead_threads list, then drop the reference,
         * if this is the last reference, then the thread__delete destructor
         * will be called and we will remove it from the dead_threads list.
         */
        list_add_tail(&th->node, &machine->dead_threads);
+       if (lock)
+               pthread_rwlock_unlock(&machine->threads_lock);
        thread__put(th);
 }
 
+void machine__remove_thread(struct machine *machine, struct thread *th)
+{
+       return __machine__remove_thread(machine, th, true);
+}
+
 int machine__process_fork_event(struct machine *machine, union perf_event *event,
                                struct perf_sample *sample)
 {
@@ -1294,10 +1341,13 @@ int machine__process_fork_event(struct machine *machine, union perf_event *event
        struct thread *parent = machine__findnew_thread(machine,
                                                        event->fork.ppid,
                                                        event->fork.ptid);
+       int err = 0;
 
        /* if a thread currently exists for the thread id remove it */
-       if (thread != NULL)
+       if (thread != NULL) {
                machine__remove_thread(machine, thread);
+               thread__put(thread);
+       }
 
        thread = machine__findnew_thread(machine, event->fork.pid,
                                         event->fork.tid);
@@ -1307,10 +1357,12 @@ int machine__process_fork_event(struct machine *machine, union perf_event *event
        if (thread == NULL || parent == NULL ||
            thread__fork(thread, parent, sample->time) < 0) {
                dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
-               return -1;
+               err = -1;
        }
+       thread__put(thread);
+       thread__put(parent);
 
-       return 0;
+       return err;
 }
 
 int machine__process_exit_event(struct machine *machine, union perf_event *event,
@@ -1323,8 +1375,10 @@ int machine__process_exit_event(struct machine *machine, union perf_event *event
        if (dump_trace)
                perf_event__fprintf_task(event, stdout);
 
-       if (thread != NULL)
+       if (thread != NULL) {
                thread__exited(thread);
+               thread__put(thread);
+       }
 
        return 0;
 }
@@ -1841,6 +1895,7 @@ int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
                return -ENOMEM;
 
        thread->cpu = cpu;
+       thread__put(thread);
 
        return 0;
 }
index 1d992961d5d1195797cbfc022bcea800736146dc..c7963c63c4746eb220241a0796f5033bd31a92d2 100644 (file)
@@ -30,6 +30,7 @@ struct machine {
        bool              comm_exec;
        char              *root_dir;
        struct rb_root    threads;
+       pthread_rwlock_t  threads_lock;
        struct list_head  dead_threads;
        struct thread     *last_match;
        struct vdso_info  *vdso_info;
@@ -151,8 +152,8 @@ static inline bool machine__is_host(struct machine *machine)
        return machine ? machine->pid == HOST_KERNEL_ID : false;
 }
 
-struct thread *machine__findnew_thread(struct machine *machine, pid_t pid,
-                                      pid_t tid);
+struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid);
+struct thread *machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid);
 
 size_t machine__fprintf(struct machine *machine, FILE *fp);
 
index 1b265521836c464df8a50cc942673390f0cd5dcc..16c28a37a9e4e0fbf4bb4f631f9d8212071c80ad 100644 (file)
@@ -18,7 +18,7 @@ int thread__init_map_groups(struct thread *thread, struct machine *machine)
        if (pid == thread->tid || pid == -1) {
                thread->mg = map_groups__new(machine);
        } else {
-               leader = machine__findnew_thread(machine, pid, pid);
+               leader = __machine__findnew_thread(machine, pid, pid);
                if (leader)
                        thread->mg = map_groups__get(leader->mg);
        }
@@ -54,6 +54,8 @@ struct thread *thread__new(pid_t pid, pid_t tid)
 
                list_add(&comm->list, &thread->comm_list);
                atomic_set(&thread->refcnt, 0);
+               INIT_LIST_HEAD(&thread->node);
+               RB_CLEAR_NODE(&thread->rb_node);
        }
 
        return thread;
@@ -67,6 +69,9 @@ void thread__delete(struct thread *thread)
 {
        struct comm *comm, *tmp;
 
+       BUG_ON(!RB_EMPTY_NODE(&thread->rb_node));
+       BUG_ON(!list_empty(&thread->node));
+
        thread_stack__free(thread);
 
        if (thread->mg) {
@@ -84,7 +89,8 @@ void thread__delete(struct thread *thread)
 
 struct thread *thread__get(struct thread *thread)
 {
-       atomic_inc(&thread->refcnt);
+       if (thread)
+               atomic_inc(&thread->refcnt);
        return thread;
 }
 
This page took 0.132551 seconds and 5 git commands to generate.