Commit | Line | Data |
---|---|---|
dbd0b4b3 FW |
1 | /* |
2 | * Infrastructure for statistic tracing (histogram output). | |
3 | * | |
4 | * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com> | |
5 | * | |
6 | * Based on the code from trace_branch.c which is | |
7 | * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com> | |
8 | * | |
9 | */ | |
10 | ||
11 | ||
12 | #include <linux/list.h> | |
13 | #include <linux/seq_file.h> | |
14 | #include <linux/debugfs.h> | |
15 | #include "trace.h" | |
16 | ||
17 | ||
18 | /* List of stat entries from a tracer */ | |
19 | struct trace_stat_list { | |
20 | struct list_head list; | |
21 | void *stat; | |
22 | }; | |
23 | ||
24 | static struct trace_stat_list stat_list; | |
25 | ||
26 | /* | |
27 | * This is a copy of the current tracer to avoid racy | |
28 | * and dangerous output while the current tracer is | |
29 | * switched. | |
30 | */ | |
31 | static struct tracer current_tracer; | |
32 | ||
33 | /* | |
34 | * Protect both the current tracer and the global | |
35 | * stat list. | |
36 | */ | |
37 | static DEFINE_MUTEX(stat_list_mutex); | |
38 | ||
39 | ||
40 | static void reset_stat_list(void) | |
41 | { | |
42 | struct trace_stat_list *node; | |
43 | struct list_head *next; | |
44 | ||
45 | if (list_empty(&stat_list.list)) | |
46 | return; | |
47 | ||
48 | node = list_entry(stat_list.list.next, struct trace_stat_list, list); | |
49 | next = node->list.next; | |
50 | ||
51 | while (&node->list != next) { | |
52 | kfree(node); | |
53 | node = list_entry(next, struct trace_stat_list, list); | |
54 | } | |
55 | kfree(node); | |
56 | ||
57 | INIT_LIST_HEAD(&stat_list.list); | |
58 | } | |
59 | ||
60 | void init_tracer_stat(struct tracer *trace) | |
61 | { | |
62 | mutex_lock(&stat_list_mutex); | |
63 | current_tracer = *trace; | |
64 | mutex_unlock(&stat_list_mutex); | |
65 | } | |
66 | ||
67 | /* | |
68 | * For tracers that don't provide a stat_cmp callback. | |
69 | * This one will force an immediate insertion on tail of | |
70 | * the list. | |
71 | */ | |
72 | static int dummy_cmp(void *p1, void *p2) | |
73 | { | |
74 | return 1; | |
75 | } | |
76 | ||
77 | /* | |
78 | * Initialize the stat list at each trace_stat file opening. | |
79 | * All of these copies and sorting are required on all opening | |
80 | * since the stats could have changed between two file sessions. | |
81 | */ | |
82 | static int stat_seq_init(void) | |
83 | { | |
84 | struct trace_stat_list *iter_entry, *new_entry; | |
85 | void *prev_stat; | |
86 | int ret = 0; | |
87 | int i; | |
88 | ||
89 | mutex_lock(&stat_list_mutex); | |
90 | reset_stat_list(); | |
91 | ||
92 | if (!current_tracer.stat_start || !current_tracer.stat_next || | |
93 | !current_tracer.stat_show) | |
94 | goto exit; | |
95 | ||
96 | if (!current_tracer.stat_cmp) | |
97 | current_tracer.stat_cmp = dummy_cmp; | |
98 | ||
99 | /* | |
100 | * The first entry. Actually this is the second, but the first | |
101 | * one (the stat_list head) is pointless. | |
102 | */ | |
103 | new_entry = kmalloc(sizeof(struct trace_stat_list), GFP_KERNEL); | |
104 | if (!new_entry) { | |
105 | ret = -ENOMEM; | |
106 | goto exit; | |
107 | } | |
108 | ||
109 | INIT_LIST_HEAD(&new_entry->list); | |
110 | list_add(&new_entry->list, &stat_list.list); | |
111 | new_entry->stat = current_tracer.stat_start(); | |
112 | ||
113 | prev_stat = new_entry->stat; | |
114 | ||
115 | /* | |
116 | * Iterate over the tracer stat entries and store them in a sorted | |
117 | * list. | |
118 | */ | |
119 | for (i = 1; ; i++) { | |
120 | new_entry = kmalloc(sizeof(struct trace_stat_list), GFP_KERNEL); | |
121 | if (!new_entry) { | |
122 | ret = -ENOMEM; | |
123 | goto exit_free_list; | |
124 | } | |
125 | ||
126 | INIT_LIST_HEAD(&new_entry->list); | |
127 | new_entry->stat = current_tracer.stat_next(prev_stat, i); | |
128 | ||
129 | /* End of insertion */ | |
130 | if (!new_entry->stat) | |
131 | break; | |
132 | ||
133 | list_for_each_entry(iter_entry, &stat_list.list, list) { | |
134 | /* Insertion with a descendent sorting */ | |
135 | if (current_tracer.stat_cmp(new_entry->stat, | |
136 | iter_entry->stat) > 0) { | |
137 | ||
138 | list_add_tail(&new_entry->list, | |
139 | &iter_entry->list); | |
140 | break; | |
141 | ||
142 | /* The current smaller value */ | |
143 | } else if (list_is_last(&iter_entry->list, | |
144 | &stat_list.list)) { | |
145 | list_add(&new_entry->list, &iter_entry->list); | |
146 | break; | |
147 | } | |
148 | } | |
149 | ||
150 | prev_stat = new_entry->stat; | |
151 | } | |
152 | exit: | |
153 | mutex_unlock(&stat_list_mutex); | |
154 | return ret; | |
155 | ||
156 | exit_free_list: | |
157 | reset_stat_list(); | |
158 | mutex_unlock(&stat_list_mutex); | |
159 | return ret; | |
160 | } | |
161 | ||
162 | ||
163 | static void *stat_seq_start(struct seq_file *s, loff_t *pos) | |
164 | { | |
165 | struct trace_stat_list *l = (struct trace_stat_list *)s->private; | |
166 | ||
167 | /* Prevent from tracer switch or stat_list modification */ | |
168 | mutex_lock(&stat_list_mutex); | |
169 | ||
170 | /* If we are in the beginning of the file, print the headers */ | |
171 | if (!*pos && current_tracer.stat_headers) | |
172 | current_tracer.stat_headers(s); | |
173 | ||
174 | return seq_list_start(&l->list, *pos); | |
175 | } | |
176 | ||
177 | static void *stat_seq_next(struct seq_file *s, void *p, loff_t *pos) | |
178 | { | |
179 | struct trace_stat_list *l = (struct trace_stat_list *)s->private; | |
180 | ||
181 | return seq_list_next(p, &l->list, pos); | |
182 | } | |
183 | ||
184 | static void stat_seq_stop(struct seq_file *m, void *p) | |
185 | { | |
186 | mutex_unlock(&stat_list_mutex); | |
187 | } | |
188 | ||
189 | static int stat_seq_show(struct seq_file *s, void *v) | |
190 | { | |
191 | struct trace_stat_list *l = list_entry(v, struct trace_stat_list, list); | |
192 | return current_tracer.stat_show(s, l->stat); | |
193 | } | |
194 | ||
195 | static const struct seq_operations trace_stat_seq_ops = { | |
196 | .start = stat_seq_start, | |
197 | .next = stat_seq_next, | |
198 | .stop = stat_seq_stop, | |
199 | .show = stat_seq_show | |
200 | }; | |
201 | ||
202 | static int tracing_stat_open(struct inode *inode, struct file *file) | |
203 | { | |
204 | int ret; | |
205 | ||
206 | ret = seq_open(file, &trace_stat_seq_ops); | |
207 | if (!ret) { | |
208 | struct seq_file *m = file->private_data; | |
209 | m->private = &stat_list; | |
210 | ret = stat_seq_init(); | |
211 | } | |
212 | ||
213 | return ret; | |
214 | } | |
215 | ||
216 | ||
217 | /* | |
218 | * Avoid consuming memory with our now useless list. | |
219 | */ | |
220 | static int tracing_stat_release(struct inode *i, struct file *f) | |
221 | { | |
222 | mutex_lock(&stat_list_mutex); | |
223 | reset_stat_list(); | |
224 | mutex_unlock(&stat_list_mutex); | |
225 | return 0; | |
226 | } | |
227 | ||
228 | static const struct file_operations tracing_stat_fops = { | |
229 | .open = tracing_stat_open, | |
230 | .read = seq_read, | |
231 | .llseek = seq_lseek, | |
232 | .release = tracing_stat_release | |
233 | }; | |
234 | ||
235 | static int __init tracing_stat_init(void) | |
236 | { | |
237 | struct dentry *d_tracing; | |
238 | struct dentry *entry; | |
239 | ||
240 | INIT_LIST_HEAD(&stat_list.list); | |
241 | d_tracing = tracing_init_dentry(); | |
242 | ||
243 | entry = debugfs_create_file("trace_stat", 0444, d_tracing, | |
244 | NULL, | |
245 | &tracing_stat_fops); | |
246 | if (!entry) | |
247 | pr_warning("Could not create debugfs " | |
248 | "'trace_stat' entry\n"); | |
249 | return 0; | |
250 | } | |
251 | fs_initcall(tracing_stat_init); |