4 * (C) Copyright 2008 - Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
6 * LTTng channel management.
9 * Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca)
11 * Dual LGPL v2.1/GPL v2 license.
14 #include <linux/module.h>
15 #include <linux/mutex.h>
16 #include <linux/slab.h>
17 #include <linux/vmalloc.h>
18 #include "ltt-channels.h"
21 * ltt_channel_mutex may be nested inside the LTT trace mutex.
22 * ltt_channel_mutex mutex may be nested inside markers mutex.
24 static DEFINE_MUTEX(ltt_channel_mutex
);
25 static LIST_HEAD(ltt_channels
);
27 * Index of next channel in array. Makes sure that as long as a trace channel is
28 * allocated, no array index will be re-used when a channel is freed and then
29 * another channel is allocated. This index is cleared and the array indexeds
30 * get reassigned when the index_kref goes back to 0, which indicates that no
31 * more trace channels are allocated.
33 static unsigned int free_index
;
34 /* index_kref is protected by both ltt_channel_mutex and lock_markers */
35 static struct kref index_kref
; /* Keeps track of allocated trace channels */
37 static struct ltt_channel_setting
*lookup_channel(const char *name
)
39 struct ltt_channel_setting
*iter
;
41 list_for_each_entry(iter
, <t_channels
, list
)
42 if (strcmp(name
, iter
->name
) == 0)
48 * Must be called when channel refcount falls to 0 _and_ also when the last
49 * trace is freed. This function is responsible for compacting the channel and
50 * event IDs when no users are active.
52 * Called with lock_markers() and channels mutex held.
54 static void release_channel_setting(struct kref
*kref
)
56 struct ltt_channel_setting
*setting
= container_of(kref
,
57 struct ltt_channel_setting
, kref
);
58 struct ltt_channel_setting
*iter
;
60 if (atomic_read(&index_kref
.refcount
) == 0
61 && atomic_read(&setting
->kref
.refcount
) == 0) {
62 list_del(&setting
->list
);
66 list_for_each_entry(iter
, <t_channels
, list
) {
67 iter
->index
= free_index
++;
68 iter
->free_event_id
= 0;
74 * Perform channel index compaction when the last trace channel is freed.
76 * Called with lock_markers() and channels mutex held.
78 static void release_trace_channel(struct kref
*kref
)
80 struct ltt_channel_setting
*iter
, *n
;
82 list_for_each_entry_safe(iter
, n
, <t_channels
, list
)
83 release_channel_setting(&iter
->kref
);
84 if (atomic_read(&index_kref
.refcount
) == 0)
85 markers_compact_event_ids();
89 * ltt_channel_trace_ref : Is there an existing trace session ?
91 * Must be called with lock_markers() held.
93 int ltt_channels_trace_ref(void)
95 return !!atomic_read(&index_kref
.refcount
);
97 EXPORT_SYMBOL_GPL(ltt_channels_trace_ref
);
100 * ltt_channels_register - Register a trace channel.
101 * @name: channel name
105 int ltt_channels_register(const char *name
)
107 struct ltt_channel_setting
*setting
;
110 mutex_lock(<t_channel_mutex
);
111 setting
= lookup_channel(name
);
113 if (atomic_read(&setting
->kref
.refcount
) == 0)
116 kref_get(&setting
->kref
);
120 setting
= kzalloc(sizeof(*setting
), GFP_KERNEL
);
125 list_add(&setting
->list
, <t_channels
);
126 strncpy(setting
->name
, name
, PATH_MAX
-1);
127 setting
->index
= free_index
++;
129 kref_init(&setting
->kref
);
131 mutex_unlock(<t_channel_mutex
);
134 EXPORT_SYMBOL_GPL(ltt_channels_register
);
137 * ltt_channels_unregister - Unregister a trace channel.
138 * @name: channel name
139 * @compacting: performing compaction
141 * Must be called with markers mutex held.
143 int ltt_channels_unregister(const char *name
, int compacting
)
145 struct ltt_channel_setting
*setting
;
149 mutex_lock(<t_channel_mutex
);
150 setting
= lookup_channel(name
);
151 if (!setting
|| atomic_read(&setting
->kref
.refcount
) == 0) {
155 kref_put(&setting
->kref
, release_channel_setting
);
156 if (!compacting
&& atomic_read(&index_kref
.refcount
) == 0)
157 markers_compact_event_ids();
160 mutex_unlock(<t_channel_mutex
);
163 EXPORT_SYMBOL_GPL(ltt_channels_unregister
);
166 * ltt_channels_set_default - Set channel default behavior.
167 * @name: default channel name
168 * @sb_size: size of the subbuffers
169 * @n_sb: number of subbuffers
171 int ltt_channels_set_default(const char *name
,
172 unsigned int sb_size
,
175 struct ltt_channel_setting
*setting
;
178 mutex_lock(<t_channel_mutex
);
179 setting
= lookup_channel(name
);
180 if (!setting
|| atomic_read(&setting
->kref
.refcount
) == 0) {
184 setting
->sb_size
= sb_size
;
185 setting
->n_sb
= n_sb
;
187 mutex_unlock(<t_channel_mutex
);
190 EXPORT_SYMBOL_GPL(ltt_channels_set_default
);
193 * ltt_channels_get_name_from_index - get channel name from channel index
194 * @index: channel index
196 * Allows to lookup the channel name given its index. Done to keep the name
197 * information outside of each trace channel instance.
199 const char *ltt_channels_get_name_from_index(unsigned int index
)
201 struct ltt_channel_setting
*iter
;
203 list_for_each_entry(iter
, <t_channels
, list
)
204 if (iter
->index
== index
&& atomic_read(&iter
->kref
.refcount
))
208 EXPORT_SYMBOL_GPL(ltt_channels_get_name_from_index
);
210 static struct ltt_channel_setting
*
211 ltt_channels_get_setting_from_name(const char *name
)
213 struct ltt_channel_setting
*iter
;
215 list_for_each_entry(iter
, <t_channels
, list
)
216 if (!strcmp(iter
->name
, name
)
217 && atomic_read(&iter
->kref
.refcount
))
223 * ltt_channels_get_index_from_name - get channel index from channel name
224 * @name: channel name
226 * Allows to lookup the channel index given its name. Done to keep the name
227 * information outside of each trace channel instance.
228 * Returns -1 if not found.
230 int ltt_channels_get_index_from_name(const char *name
)
232 struct ltt_channel_setting
*setting
;
234 setting
= ltt_channels_get_setting_from_name(name
);
236 return setting
->index
;
240 EXPORT_SYMBOL_GPL(ltt_channels_get_index_from_name
);
243 * ltt_channels_trace_alloc - Allocate channel structures for a trace
245 * Use the current channel list to allocate the channels for a trace.
246 * Called with trace lock held. Does not perform the trace buffer allocation,
247 * because we must let the user overwrite specific channel sizes.
249 int ltt_channels_trace_alloc(struct ltt_trace
*trace
, int overwrite
)
251 struct channel
**chan
= NULL
;
252 struct ltt_channel_setting
*chans
, *iter
;
256 mutex_lock(<t_channel_mutex
);
259 if (!atomic_read(&index_kref
.refcount
))
260 kref_init(&index_kref
);
262 kref_get(&index_kref
);
263 trace
->nr_channels
= free_index
;
264 chan
= kzalloc(sizeof(struct channel
*) * free_index
, GFP_KERNEL
);
267 chans
= kzalloc(sizeof(struct ltt_channel_setting
) * free_index
,
271 list_for_each_entry(iter
, <t_channels
, list
) {
272 if (!atomic_read(&iter
->kref
.refcount
))
274 chans
[iter
->index
].sb_size
= iter
->sb_size
;
275 chans
[iter
->index
].n_sb
= iter
->n_sb
;
276 chans
[iter
->index
].overwrite
= overwrite
;
277 strncpy(chans
[iter
->index
].filename
, iter
->name
,
279 chans
[iter
->index
].switch_timer_interval
= 0;
280 chans
[iter
->index
].read_timer_interval
= LTT_READ_TIMER_INTERVAL
;
282 trace
->channels
= chan
;
283 trace
->settings
= chans
;
285 mutex_unlock(<t_channel_mutex
);
294 EXPORT_SYMBOL_GPL(ltt_channels_trace_alloc
);
297 * ltt_channels_trace_free - Free one trace's channels
298 * @channels: channels to free
300 * Called with trace lock held. The actual channel buffers must be freed before
301 * this function is called.
303 void ltt_channels_trace_free(struct ltt_trace
*trace
)
306 mutex_lock(<t_channel_mutex
);
307 kfree(trace
->settings
);
308 kfree(trace
->channels
);
309 kref_put(&index_kref
, release_trace_channel
);
310 mutex_unlock(<t_channel_mutex
);
312 marker_update_probes();
314 EXPORT_SYMBOL_GPL(ltt_channels_trace_free
);
317 * ltt_channels_trace_set_timer - set switch timer
319 * @interval: interval of timer interrupt, in jiffies. 0 inhibits timer.
322 void ltt_channels_trace_set_timer(struct ltt_chan
*chan
,
323 unsigned long interval
)
325 chan
->switch_timer_interval
= interval
;
327 EXPORT_SYMBOL_GPL(ltt_channels_trace_set_timer
);
330 * _ltt_channels_get_event_id - get next event ID for a marker
331 * @channel: channel name
334 * Returns a unique event ID (for this channel) or < 0 on error.
335 * Must be called with channels mutex held.
337 int _ltt_channels_get_event_id(const char *channel
, const char *name
)
339 struct ltt_channel_setting
*setting
;
342 setting
= ltt_channels_get_setting_from_name(channel
);
347 if (strcmp(channel
, "metadata") == 0) {
348 if (strcmp(name
, "core_marker_id") == 0)
350 else if (strcmp(name
, "core_marker_format") == 0)
356 if (setting
->free_event_id
== EVENTS_PER_CHANNEL
- 1) {
360 ret
= setting
->free_event_id
++;
366 * ltt_channels_get_event_id - get next event ID for a marker
367 * @channel: channel name
370 * Returns a unique event ID (for this channel) or < 0 on error.
372 int ltt_channels_get_event_id(const char *channel
, const char *name
)
376 mutex_lock(<t_channel_mutex
);
377 ret
= _ltt_channels_get_event_id(channel
, name
);
378 mutex_unlock(<t_channel_mutex
);
383 * ltt_channels_reset_event_ids - reset event IDs at compaction
385 * Called with lock marker and channel mutex held.
387 void _ltt_channels_reset_event_ids(void)
389 struct ltt_channel_setting
*iter
;
391 list_for_each_entry(iter
, <t_channels
, list
)
392 iter
->free_event_id
= 0;
395 MODULE_LICENSE("GPL and additional rights");
396 MODULE_AUTHOR("Mathieu Desnoyers");
397 MODULE_DESCRIPTION("Linux Trace Toolkit Next Generation Channel Management");