Implement list.h wrapper for hlist in older kernels
[deliverable/lttng-modules.git] / lttng-tracker-pid.c
CommitLineData
e0130fab
MD
1/*
2 * lttng-tracker-pid.c
3 *
4 * LTTng Process ID trackering.
5 *
6 * Copyright (C) 2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; only
11 * version 2.1 of the License.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23#include <linux/module.h>
24#include <linux/slab.h>
25#include <linux/err.h>
26#include <linux/seq_file.h>
27#include <linux/stringify.h>
e0130fab
MD
28#include <linux/hash.h>
29#include <linux/rcupdate.h>
30
31#include "wrapper/tracepoint.h"
f934e302 32#include "wrapper/list.h"
e0130fab
MD
33#include "lttng-events.h"
34
35/*
36 * Hash table is allocated and freed when there are no possible
37 * concurrent lookups (ensured by the alloc/free caller). However,
38 * there can be concurrent RCU lookups vs add/del operations.
39 *
40 * Concurrent updates of the PID hash table are forbidden: the caller
41 * must ensure mutual exclusion. This is currently done by holding the
42 * sessions_mutex across calls to create, destroy, add, and del
43 * functions of this API.
44 */
7e6f9ef6
MD
45int lttng_pid_tracker_get_node_pid(const struct lttng_pid_hash_node *node)
46{
47 return node->pid;
48}
e0130fab
MD
49
50/*
51 * Lookup performed from RCU read-side critical section (RCU sched),
52 * protected by preemption off at the tracepoint call site.
53 * Return 1 if found, 0 if not found.
54 */
55bool lttng_pid_tracker_lookup(struct lttng_pid_tracker *lpf, int pid)
56{
57 struct hlist_head *head;
58 struct lttng_pid_hash_node *e;
59 uint32_t hash = hash_32(pid, 32);
60
61 head = &lpf->pid_hash[hash & (LTTNG_PID_TABLE_SIZE - 1)];
f934e302 62 lttng_hlist_for_each_entry_rcu_notrace(e, head, hlist) {
e0130fab
MD
63 if (pid == e->pid)
64 return 1; /* Found */
65 }
66 return 0;
67}
68EXPORT_SYMBOL_GPL(lttng_pid_tracker_lookup);
69
70/*
71 * Tracker add and del operations support concurrent RCU lookups.
72 */
73int lttng_pid_tracker_add(struct lttng_pid_tracker *lpf, int pid)
74{
75 struct hlist_head *head;
76 struct lttng_pid_hash_node *e;
77 uint32_t hash = hash_32(pid, 32);
78
79 head = &lpf->pid_hash[hash & (LTTNG_PID_TABLE_SIZE - 1)];
f934e302 80 lttng_hlist_for_each_entry(e, head, hlist) {
e0130fab
MD
81 if (pid == e->pid)
82 return -EEXIST;
83 }
84 e = kmalloc(sizeof(struct lttng_pid_hash_node), GFP_KERNEL);
85 if (!e)
86 return -ENOMEM;
87 e->pid = pid;
88 hlist_add_head_rcu(&e->hlist, head);
89 return 0;
90}
91
92static
93void pid_tracker_del_node_rcu(struct lttng_pid_hash_node *e)
94{
95 hlist_del_rcu(&e->hlist);
96 /*
97 * We choose to use a heavyweight synchronize on removal here,
98 * since removal of a PID from the tracker mask is a rare
99 * operation, and we don't want to use more cache lines than
100 * what we really need when doing the PID lookups, so we don't
101 * want to afford adding a rcu_head field to those pid hash
102 * node.
103 */
104 synchronize_trace();
105 kfree(e);
106}
107
108/*
109 * This removal is only used on destroy, so it does not need to support
110 * concurrent RCU lookups.
111 */
112static
113void pid_tracker_del_node(struct lttng_pid_hash_node *e)
114{
115 hlist_del(&e->hlist);
116 kfree(e);
117}
118
119int lttng_pid_tracker_del(struct lttng_pid_tracker *lpf, int pid)
120{
121 struct hlist_head *head;
122 struct lttng_pid_hash_node *e;
123 uint32_t hash = hash_32(pid, 32);
124
125 head = &lpf->pid_hash[hash & (LTTNG_PID_TABLE_SIZE - 1)];
126 /*
127 * No need of _safe iteration, because we stop traversal as soon
128 * as we remove the entry.
129 */
f934e302 130 lttng_hlist_for_each_entry(e, head, hlist) {
e0130fab
MD
131 if (pid == e->pid) {
132 pid_tracker_del_node_rcu(e);
133 return 0;
134 }
135 }
136 return -ENOENT; /* Not found */
137}
138
139struct lttng_pid_tracker *lttng_pid_tracker_create(void)
140{
141 return kzalloc(sizeof(struct lttng_pid_tracker), GFP_KERNEL);
142}
143
144void lttng_pid_tracker_destroy(struct lttng_pid_tracker *lpf)
145{
146 int i;
147
148 for (i = 0; i < LTTNG_PID_TABLE_SIZE; i++) {
149 struct hlist_head *head = &lpf->pid_hash[i];
150 struct lttng_pid_hash_node *e;
151 struct hlist_node *tmp;
152
f934e302 153 lttng_hlist_for_each_entry_safe(e, tmp, head, hlist)
e0130fab
MD
154 pid_tracker_del_node(e);
155 }
156 kfree(lpf);
157}
This page took 0.047395 seconds and 5 git commands to generate.