markers: use rcu_*_sched_notrace and notrace
[deliverable/linux.git] / kernel / marker.c
1 /*
2 * Copyright (C) 2007 Mathieu Desnoyers
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 */
18 #include <linux/module.h>
19 #include <linux/mutex.h>
20 #include <linux/types.h>
21 #include <linux/jhash.h>
22 #include <linux/list.h>
23 #include <linux/rcupdate.h>
24 #include <linux/marker.h>
25 #include <linux/err.h>
26 #include <linux/slab.h>
27
28 extern struct marker __start___markers[];
29 extern struct marker __stop___markers[];
30
31 /* Set to 1 to enable marker debug output */
32 static const int marker_debug;
33
34 /*
35 * markers_mutex nests inside module_mutex. Markers mutex protects the builtin
36 * and module markers and the hash table.
37 */
38 static DEFINE_MUTEX(markers_mutex);
39
40 /*
41 * Marker hash table, containing the active markers.
42 * Protected by module_mutex.
43 */
44 #define MARKER_HASH_BITS 6
45 #define MARKER_TABLE_SIZE (1 << MARKER_HASH_BITS)
46 static struct hlist_head marker_table[MARKER_TABLE_SIZE];
47
48 /*
49 * Note about RCU :
50 * It is used to make sure every handler has finished using its private data
51 * between two consecutive operation (add or remove) on a given marker. It is
52 * also used to delay the free of multiple probes array until a quiescent state
53 * is reached.
54 * marker entries modifications are protected by the markers_mutex.
55 */
56 struct marker_entry {
57 struct hlist_node hlist;
58 char *format;
59 /* Probe wrapper */
60 void (*call)(const struct marker *mdata, void *call_private, ...);
61 struct marker_probe_closure single;
62 struct marker_probe_closure *multi;
63 int refcount; /* Number of times armed. 0 if disarmed. */
64 struct rcu_head rcu;
65 void *oldptr;
66 int rcu_pending;
67 unsigned char ptype:1;
68 unsigned char format_allocated:1;
69 char name[0]; /* Contains name'\0'format'\0' */
70 };
71
72 /**
73 * __mark_empty_function - Empty probe callback
74 * @probe_private: probe private data
75 * @call_private: call site private data
76 * @fmt: format string
77 * @...: variable argument list
78 *
79 * Empty callback provided as a probe to the markers. By providing this to a
80 * disabled marker, we make sure the execution flow is always valid even
81 * though the function pointer change and the marker enabling are two distinct
82 * operations that modifies the execution flow of preemptible code.
83 */
84 notrace void __mark_empty_function(void *probe_private, void *call_private,
85 const char *fmt, va_list *args)
86 {
87 }
88 EXPORT_SYMBOL_GPL(__mark_empty_function);
89
90 /*
91 * marker_probe_cb Callback that prepares the variable argument list for probes.
92 * @mdata: pointer of type struct marker
93 * @call_private: caller site private data
94 * @...: Variable argument list.
95 *
96 * Since we do not use "typical" pointer based RCU in the 1 argument case, we
97 * need to put a full smp_rmb() in this branch. This is why we do not use
98 * rcu_dereference() for the pointer read.
99 */
100 notrace void marker_probe_cb(const struct marker *mdata,
101 void *call_private, ...)
102 {
103 va_list args;
104 char ptype;
105
106 /*
107 * rcu_read_lock_sched does two things : disabling preemption to make
108 * sure the teardown of the callbacks can be done correctly when they
109 * are in modules and they insure RCU read coherency.
110 */
111 rcu_read_lock_sched_notrace();
112 ptype = mdata->ptype;
113 if (likely(!ptype)) {
114 marker_probe_func *func;
115 /* Must read the ptype before ptr. They are not data dependant,
116 * so we put an explicit smp_rmb() here. */
117 smp_rmb();
118 func = mdata->single.func;
119 /* Must read the ptr before private data. They are not data
120 * dependant, so we put an explicit smp_rmb() here. */
121 smp_rmb();
122 va_start(args, call_private);
123 func(mdata->single.probe_private, call_private, mdata->format,
124 &args);
125 va_end(args);
126 } else {
127 struct marker_probe_closure *multi;
128 int i;
129 /*
130 * Read mdata->ptype before mdata->multi.
131 */
132 smp_rmb();
133 multi = mdata->multi;
134 /*
135 * multi points to an array, therefore accessing the array
136 * depends on reading multi. However, even in this case,
137 * we must insure that the pointer is read _before_ the array
138 * data. Same as rcu_dereference, but we need a full smp_rmb()
139 * in the fast path, so put the explicit barrier here.
140 */
141 smp_read_barrier_depends();
142 for (i = 0; multi[i].func; i++) {
143 va_start(args, call_private);
144 multi[i].func(multi[i].probe_private, call_private,
145 mdata->format, &args);
146 va_end(args);
147 }
148 }
149 rcu_read_unlock_sched_notrace();
150 }
151 EXPORT_SYMBOL_GPL(marker_probe_cb);
152
153 /*
154 * marker_probe_cb Callback that does not prepare the variable argument list.
155 * @mdata: pointer of type struct marker
156 * @call_private: caller site private data
157 * @...: Variable argument list.
158 *
159 * Should be connected to markers "MARK_NOARGS".
160 */
161 static notrace void marker_probe_cb_noarg(const struct marker *mdata,
162 void *call_private, ...)
163 {
164 va_list args; /* not initialized */
165 char ptype;
166
167 rcu_read_lock_sched_notrace();
168 ptype = mdata->ptype;
169 if (likely(!ptype)) {
170 marker_probe_func *func;
171 /* Must read the ptype before ptr. They are not data dependant,
172 * so we put an explicit smp_rmb() here. */
173 smp_rmb();
174 func = mdata->single.func;
175 /* Must read the ptr before private data. They are not data
176 * dependant, so we put an explicit smp_rmb() here. */
177 smp_rmb();
178 func(mdata->single.probe_private, call_private, mdata->format,
179 &args);
180 } else {
181 struct marker_probe_closure *multi;
182 int i;
183 /*
184 * Read mdata->ptype before mdata->multi.
185 */
186 smp_rmb();
187 multi = mdata->multi;
188 /*
189 * multi points to an array, therefore accessing the array
190 * depends on reading multi. However, even in this case,
191 * we must insure that the pointer is read _before_ the array
192 * data. Same as rcu_dereference, but we need a full smp_rmb()
193 * in the fast path, so put the explicit barrier here.
194 */
195 smp_read_barrier_depends();
196 for (i = 0; multi[i].func; i++)
197 multi[i].func(multi[i].probe_private, call_private,
198 mdata->format, &args);
199 }
200 rcu_read_unlock_sched_notrace();
201 }
202
203 static void free_old_closure(struct rcu_head *head)
204 {
205 struct marker_entry *entry = container_of(head,
206 struct marker_entry, rcu);
207 kfree(entry->oldptr);
208 /* Make sure we free the data before setting the pending flag to 0 */
209 smp_wmb();
210 entry->rcu_pending = 0;
211 }
212
213 static void debug_print_probes(struct marker_entry *entry)
214 {
215 int i;
216
217 if (!marker_debug)
218 return;
219
220 if (!entry->ptype) {
221 printk(KERN_DEBUG "Single probe : %p %p\n",
222 entry->single.func,
223 entry->single.probe_private);
224 } else {
225 for (i = 0; entry->multi[i].func; i++)
226 printk(KERN_DEBUG "Multi probe %d : %p %p\n", i,
227 entry->multi[i].func,
228 entry->multi[i].probe_private);
229 }
230 }
231
232 static struct marker_probe_closure *
233 marker_entry_add_probe(struct marker_entry *entry,
234 marker_probe_func *probe, void *probe_private)
235 {
236 int nr_probes = 0;
237 struct marker_probe_closure *old, *new;
238
239 WARN_ON(!probe);
240
241 debug_print_probes(entry);
242 old = entry->multi;
243 if (!entry->ptype) {
244 if (entry->single.func == probe &&
245 entry->single.probe_private == probe_private)
246 return ERR_PTR(-EBUSY);
247 if (entry->single.func == __mark_empty_function) {
248 /* 0 -> 1 probes */
249 entry->single.func = probe;
250 entry->single.probe_private = probe_private;
251 entry->refcount = 1;
252 entry->ptype = 0;
253 debug_print_probes(entry);
254 return NULL;
255 } else {
256 /* 1 -> 2 probes */
257 nr_probes = 1;
258 old = NULL;
259 }
260 } else {
261 /* (N -> N+1), (N != 0, 1) probes */
262 for (nr_probes = 0; old[nr_probes].func; nr_probes++)
263 if (old[nr_probes].func == probe
264 && old[nr_probes].probe_private
265 == probe_private)
266 return ERR_PTR(-EBUSY);
267 }
268 /* + 2 : one for new probe, one for NULL func */
269 new = kzalloc((nr_probes + 2) * sizeof(struct marker_probe_closure),
270 GFP_KERNEL);
271 if (new == NULL)
272 return ERR_PTR(-ENOMEM);
273 if (!old)
274 new[0] = entry->single;
275 else
276 memcpy(new, old,
277 nr_probes * sizeof(struct marker_probe_closure));
278 new[nr_probes].func = probe;
279 new[nr_probes].probe_private = probe_private;
280 entry->refcount = nr_probes + 1;
281 entry->multi = new;
282 entry->ptype = 1;
283 debug_print_probes(entry);
284 return old;
285 }
286
287 static struct marker_probe_closure *
288 marker_entry_remove_probe(struct marker_entry *entry,
289 marker_probe_func *probe, void *probe_private)
290 {
291 int nr_probes = 0, nr_del = 0, i;
292 struct marker_probe_closure *old, *new;
293
294 old = entry->multi;
295
296 debug_print_probes(entry);
297 if (!entry->ptype) {
298 /* 0 -> N is an error */
299 WARN_ON(entry->single.func == __mark_empty_function);
300 /* 1 -> 0 probes */
301 WARN_ON(probe && entry->single.func != probe);
302 WARN_ON(entry->single.probe_private != probe_private);
303 entry->single.func = __mark_empty_function;
304 entry->refcount = 0;
305 entry->ptype = 0;
306 debug_print_probes(entry);
307 return NULL;
308 } else {
309 /* (N -> M), (N > 1, M >= 0) probes */
310 for (nr_probes = 0; old[nr_probes].func; nr_probes++) {
311 if ((!probe || old[nr_probes].func == probe)
312 && old[nr_probes].probe_private
313 == probe_private)
314 nr_del++;
315 }
316 }
317
318 if (nr_probes - nr_del == 0) {
319 /* N -> 0, (N > 1) */
320 entry->single.func = __mark_empty_function;
321 entry->refcount = 0;
322 entry->ptype = 0;
323 } else if (nr_probes - nr_del == 1) {
324 /* N -> 1, (N > 1) */
325 for (i = 0; old[i].func; i++)
326 if ((probe && old[i].func != probe) ||
327 old[i].probe_private != probe_private)
328 entry->single = old[i];
329 entry->refcount = 1;
330 entry->ptype = 0;
331 } else {
332 int j = 0;
333 /* N -> M, (N > 1, M > 1) */
334 /* + 1 for NULL */
335 new = kzalloc((nr_probes - nr_del + 1)
336 * sizeof(struct marker_probe_closure), GFP_KERNEL);
337 if (new == NULL)
338 return ERR_PTR(-ENOMEM);
339 for (i = 0; old[i].func; i++)
340 if ((probe && old[i].func != probe) ||
341 old[i].probe_private != probe_private)
342 new[j++] = old[i];
343 entry->refcount = nr_probes - nr_del;
344 entry->ptype = 1;
345 entry->multi = new;
346 }
347 debug_print_probes(entry);
348 return old;
349 }
350
351 /*
352 * Get marker if the marker is present in the marker hash table.
353 * Must be called with markers_mutex held.
354 * Returns NULL if not present.
355 */
356 static struct marker_entry *get_marker(const char *name)
357 {
358 struct hlist_head *head;
359 struct hlist_node *node;
360 struct marker_entry *e;
361 u32 hash = jhash(name, strlen(name), 0);
362
363 head = &marker_table[hash & ((1 << MARKER_HASH_BITS)-1)];
364 hlist_for_each_entry(e, node, head, hlist) {
365 if (!strcmp(name, e->name))
366 return e;
367 }
368 return NULL;
369 }
370
371 /*
372 * Add the marker to the marker hash table. Must be called with markers_mutex
373 * held.
374 */
375 static struct marker_entry *add_marker(const char *name, const char *format)
376 {
377 struct hlist_head *head;
378 struct hlist_node *node;
379 struct marker_entry *e;
380 size_t name_len = strlen(name) + 1;
381 size_t format_len = 0;
382 u32 hash = jhash(name, name_len-1, 0);
383
384 if (format)
385 format_len = strlen(format) + 1;
386 head = &marker_table[hash & ((1 << MARKER_HASH_BITS)-1)];
387 hlist_for_each_entry(e, node, head, hlist) {
388 if (!strcmp(name, e->name)) {
389 printk(KERN_NOTICE
390 "Marker %s busy\n", name);
391 return ERR_PTR(-EBUSY); /* Already there */
392 }
393 }
394 /*
395 * Using kmalloc here to allocate a variable length element. Could
396 * cause some memory fragmentation if overused.
397 */
398 e = kmalloc(sizeof(struct marker_entry) + name_len + format_len,
399 GFP_KERNEL);
400 if (!e)
401 return ERR_PTR(-ENOMEM);
402 memcpy(&e->name[0], name, name_len);
403 if (format) {
404 e->format = &e->name[name_len];
405 memcpy(e->format, format, format_len);
406 if (strcmp(e->format, MARK_NOARGS) == 0)
407 e->call = marker_probe_cb_noarg;
408 else
409 e->call = marker_probe_cb;
410 trace_mark(core_marker_format, "name %s format %s",
411 e->name, e->format);
412 } else {
413 e->format = NULL;
414 e->call = marker_probe_cb;
415 }
416 e->single.func = __mark_empty_function;
417 e->single.probe_private = NULL;
418 e->multi = NULL;
419 e->ptype = 0;
420 e->format_allocated = 0;
421 e->refcount = 0;
422 e->rcu_pending = 0;
423 hlist_add_head(&e->hlist, head);
424 return e;
425 }
426
427 /*
428 * Remove the marker from the marker hash table. Must be called with mutex_lock
429 * held.
430 */
431 static int remove_marker(const char *name)
432 {
433 struct hlist_head *head;
434 struct hlist_node *node;
435 struct marker_entry *e;
436 int found = 0;
437 size_t len = strlen(name) + 1;
438 u32 hash = jhash(name, len-1, 0);
439
440 head = &marker_table[hash & ((1 << MARKER_HASH_BITS)-1)];
441 hlist_for_each_entry(e, node, head, hlist) {
442 if (!strcmp(name, e->name)) {
443 found = 1;
444 break;
445 }
446 }
447 if (!found)
448 return -ENOENT;
449 if (e->single.func != __mark_empty_function)
450 return -EBUSY;
451 hlist_del(&e->hlist);
452 if (e->format_allocated)
453 kfree(e->format);
454 /* Make sure the call_rcu has been executed */
455 if (e->rcu_pending)
456 rcu_barrier_sched();
457 kfree(e);
458 return 0;
459 }
460
461 /*
462 * Set the mark_entry format to the format found in the element.
463 */
464 static int marker_set_format(struct marker_entry *entry, const char *format)
465 {
466 entry->format = kstrdup(format, GFP_KERNEL);
467 if (!entry->format)
468 return -ENOMEM;
469 entry->format_allocated = 1;
470
471 trace_mark(core_marker_format, "name %s format %s",
472 entry->name, entry->format);
473 return 0;
474 }
475
476 /*
477 * Sets the probe callback corresponding to one marker.
478 */
479 static int set_marker(struct marker_entry *entry, struct marker *elem,
480 int active)
481 {
482 int ret;
483 WARN_ON(strcmp(entry->name, elem->name) != 0);
484
485 if (entry->format) {
486 if (strcmp(entry->format, elem->format) != 0) {
487 printk(KERN_NOTICE
488 "Format mismatch for probe %s "
489 "(%s), marker (%s)\n",
490 entry->name,
491 entry->format,
492 elem->format);
493 return -EPERM;
494 }
495 } else {
496 ret = marker_set_format(entry, elem->format);
497 if (ret)
498 return ret;
499 }
500
501 /*
502 * probe_cb setup (statically known) is done here. It is
503 * asynchronous with the rest of execution, therefore we only
504 * pass from a "safe" callback (with argument) to an "unsafe"
505 * callback (does not set arguments).
506 */
507 elem->call = entry->call;
508 /*
509 * Sanity check :
510 * We only update the single probe private data when the ptr is
511 * set to a _non_ single probe! (0 -> 1 and N -> 1, N != 1)
512 */
513 WARN_ON(elem->single.func != __mark_empty_function
514 && elem->single.probe_private != entry->single.probe_private
515 && !elem->ptype);
516 elem->single.probe_private = entry->single.probe_private;
517 /*
518 * Make sure the private data is valid when we update the
519 * single probe ptr.
520 */
521 smp_wmb();
522 elem->single.func = entry->single.func;
523 /*
524 * We also make sure that the new probe callbacks array is consistent
525 * before setting a pointer to it.
526 */
527 rcu_assign_pointer(elem->multi, entry->multi);
528 /*
529 * Update the function or multi probe array pointer before setting the
530 * ptype.
531 */
532 smp_wmb();
533 elem->ptype = entry->ptype;
534 elem->state = active;
535
536 return 0;
537 }
538
539 /*
540 * Disable a marker and its probe callback.
541 * Note: only waiting an RCU period after setting elem->call to the empty
542 * function insures that the original callback is not used anymore. This insured
543 * by rcu_read_lock_sched around the call site.
544 */
545 static void disable_marker(struct marker *elem)
546 {
547 /* leave "call" as is. It is known statically. */
548 elem->state = 0;
549 elem->single.func = __mark_empty_function;
550 /* Update the function before setting the ptype */
551 smp_wmb();
552 elem->ptype = 0; /* single probe */
553 /*
554 * Leave the private data and id there, because removal is racy and
555 * should be done only after an RCU period. These are never used until
556 * the next initialization anyway.
557 */
558 }
559
560 /**
561 * marker_update_probe_range - Update a probe range
562 * @begin: beginning of the range
563 * @end: end of the range
564 *
565 * Updates the probe callback corresponding to a range of markers.
566 */
567 void marker_update_probe_range(struct marker *begin,
568 struct marker *end)
569 {
570 struct marker *iter;
571 struct marker_entry *mark_entry;
572
573 mutex_lock(&markers_mutex);
574 for (iter = begin; iter < end; iter++) {
575 mark_entry = get_marker(iter->name);
576 if (mark_entry) {
577 set_marker(mark_entry, iter, !!mark_entry->refcount);
578 /*
579 * ignore error, continue
580 */
581 } else {
582 disable_marker(iter);
583 }
584 }
585 mutex_unlock(&markers_mutex);
586 }
587
588 /*
589 * Update probes, removing the faulty probes.
590 *
591 * Internal callback only changed before the first probe is connected to it.
592 * Single probe private data can only be changed on 0 -> 1 and 2 -> 1
593 * transitions. All other transitions will leave the old private data valid.
594 * This makes the non-atomicity of the callback/private data updates valid.
595 *
596 * "special case" updates :
597 * 0 -> 1 callback
598 * 1 -> 0 callback
599 * 1 -> 2 callbacks
600 * 2 -> 1 callbacks
601 * Other updates all behave the same, just like the 2 -> 3 or 3 -> 2 updates.
602 * Site effect : marker_set_format may delete the marker entry (creating a
603 * replacement).
604 */
605 static void marker_update_probes(void)
606 {
607 /* Core kernel markers */
608 marker_update_probe_range(__start___markers, __stop___markers);
609 /* Markers in modules. */
610 module_update_markers();
611 }
612
613 /**
614 * marker_probe_register - Connect a probe to a marker
615 * @name: marker name
616 * @format: format string
617 * @probe: probe handler
618 * @probe_private: probe private data
619 *
620 * private data must be a valid allocated memory address, or NULL.
621 * Returns 0 if ok, error value on error.
622 * The probe address must at least be aligned on the architecture pointer size.
623 */
624 int marker_probe_register(const char *name, const char *format,
625 marker_probe_func *probe, void *probe_private)
626 {
627 struct marker_entry *entry;
628 int ret = 0;
629 struct marker_probe_closure *old;
630
631 mutex_lock(&markers_mutex);
632 entry = get_marker(name);
633 if (!entry) {
634 entry = add_marker(name, format);
635 if (IS_ERR(entry))
636 ret = PTR_ERR(entry);
637 } else if (format) {
638 if (!entry->format)
639 ret = marker_set_format(entry, format);
640 else if (strcmp(entry->format, format))
641 ret = -EPERM;
642 }
643 if (ret)
644 goto end;
645
646 /*
647 * If we detect that a call_rcu is pending for this marker,
648 * make sure it's executed now.
649 */
650 if (entry->rcu_pending)
651 rcu_barrier_sched();
652 old = marker_entry_add_probe(entry, probe, probe_private);
653 if (IS_ERR(old)) {
654 ret = PTR_ERR(old);
655 goto end;
656 }
657 mutex_unlock(&markers_mutex);
658 marker_update_probes();
659 mutex_lock(&markers_mutex);
660 entry = get_marker(name);
661 if (!entry)
662 goto end;
663 if (entry->rcu_pending)
664 rcu_barrier_sched();
665 entry->oldptr = old;
666 entry->rcu_pending = 1;
667 /* write rcu_pending before calling the RCU callback */
668 smp_wmb();
669 call_rcu_sched(&entry->rcu, free_old_closure);
670 end:
671 mutex_unlock(&markers_mutex);
672 return ret;
673 }
674 EXPORT_SYMBOL_GPL(marker_probe_register);
675
676 /**
677 * marker_probe_unregister - Disconnect a probe from a marker
678 * @name: marker name
679 * @probe: probe function pointer
680 * @probe_private: probe private data
681 *
682 * Returns the private data given to marker_probe_register, or an ERR_PTR().
683 * We do not need to call a synchronize_sched to make sure the probes have
684 * finished running before doing a module unload, because the module unload
685 * itself uses stop_machine(), which insures that every preempt disabled section
686 * have finished.
687 */
688 int marker_probe_unregister(const char *name,
689 marker_probe_func *probe, void *probe_private)
690 {
691 struct marker_entry *entry;
692 struct marker_probe_closure *old;
693 int ret = -ENOENT;
694
695 mutex_lock(&markers_mutex);
696 entry = get_marker(name);
697 if (!entry)
698 goto end;
699 if (entry->rcu_pending)
700 rcu_barrier_sched();
701 old = marker_entry_remove_probe(entry, probe, probe_private);
702 mutex_unlock(&markers_mutex);
703 marker_update_probes();
704 mutex_lock(&markers_mutex);
705 entry = get_marker(name);
706 if (!entry)
707 goto end;
708 if (entry->rcu_pending)
709 rcu_barrier_sched();
710 entry->oldptr = old;
711 entry->rcu_pending = 1;
712 /* write rcu_pending before calling the RCU callback */
713 smp_wmb();
714 call_rcu_sched(&entry->rcu, free_old_closure);
715 remove_marker(name); /* Ignore busy error message */
716 ret = 0;
717 end:
718 mutex_unlock(&markers_mutex);
719 return ret;
720 }
721 EXPORT_SYMBOL_GPL(marker_probe_unregister);
722
723 static struct marker_entry *
724 get_marker_from_private_data(marker_probe_func *probe, void *probe_private)
725 {
726 struct marker_entry *entry;
727 unsigned int i;
728 struct hlist_head *head;
729 struct hlist_node *node;
730
731 for (i = 0; i < MARKER_TABLE_SIZE; i++) {
732 head = &marker_table[i];
733 hlist_for_each_entry(entry, node, head, hlist) {
734 if (!entry->ptype) {
735 if (entry->single.func == probe
736 && entry->single.probe_private
737 == probe_private)
738 return entry;
739 } else {
740 struct marker_probe_closure *closure;
741 closure = entry->multi;
742 for (i = 0; closure[i].func; i++) {
743 if (closure[i].func == probe &&
744 closure[i].probe_private
745 == probe_private)
746 return entry;
747 }
748 }
749 }
750 }
751 return NULL;
752 }
753
754 /**
755 * marker_probe_unregister_private_data - Disconnect a probe from a marker
756 * @probe: probe function
757 * @probe_private: probe private data
758 *
759 * Unregister a probe by providing the registered private data.
760 * Only removes the first marker found in hash table.
761 * Return 0 on success or error value.
762 * We do not need to call a synchronize_sched to make sure the probes have
763 * finished running before doing a module unload, because the module unload
764 * itself uses stop_machine(), which insures that every preempt disabled section
765 * have finished.
766 */
767 int marker_probe_unregister_private_data(marker_probe_func *probe,
768 void *probe_private)
769 {
770 struct marker_entry *entry;
771 int ret = 0;
772 struct marker_probe_closure *old;
773
774 mutex_lock(&markers_mutex);
775 entry = get_marker_from_private_data(probe, probe_private);
776 if (!entry) {
777 ret = -ENOENT;
778 goto end;
779 }
780 if (entry->rcu_pending)
781 rcu_barrier_sched();
782 old = marker_entry_remove_probe(entry, NULL, probe_private);
783 mutex_unlock(&markers_mutex);
784 marker_update_probes();
785 mutex_lock(&markers_mutex);
786 entry = get_marker_from_private_data(probe, probe_private);
787 if (!entry)
788 goto end;
789 if (entry->rcu_pending)
790 rcu_barrier_sched();
791 entry->oldptr = old;
792 entry->rcu_pending = 1;
793 /* write rcu_pending before calling the RCU callback */
794 smp_wmb();
795 call_rcu_sched(&entry->rcu, free_old_closure);
796 remove_marker(entry->name); /* Ignore busy error message */
797 end:
798 mutex_unlock(&markers_mutex);
799 return ret;
800 }
801 EXPORT_SYMBOL_GPL(marker_probe_unregister_private_data);
802
803 /**
804 * marker_get_private_data - Get a marker's probe private data
805 * @name: marker name
806 * @probe: probe to match
807 * @num: get the nth matching probe's private data
808 *
809 * Returns the nth private data pointer (starting from 0) matching, or an
810 * ERR_PTR.
811 * Returns the private data pointer, or an ERR_PTR.
812 * The private data pointer should _only_ be dereferenced if the caller is the
813 * owner of the data, or its content could vanish. This is mostly used to
814 * confirm that a caller is the owner of a registered probe.
815 */
816 void *marker_get_private_data(const char *name, marker_probe_func *probe,
817 int num)
818 {
819 struct hlist_head *head;
820 struct hlist_node *node;
821 struct marker_entry *e;
822 size_t name_len = strlen(name) + 1;
823 u32 hash = jhash(name, name_len-1, 0);
824 int i;
825
826 head = &marker_table[hash & ((1 << MARKER_HASH_BITS)-1)];
827 hlist_for_each_entry(e, node, head, hlist) {
828 if (!strcmp(name, e->name)) {
829 if (!e->ptype) {
830 if (num == 0 && e->single.func == probe)
831 return e->single.probe_private;
832 } else {
833 struct marker_probe_closure *closure;
834 int match = 0;
835 closure = e->multi;
836 for (i = 0; closure[i].func; i++) {
837 if (closure[i].func != probe)
838 continue;
839 if (match++ == num)
840 return closure[i].probe_private;
841 }
842 }
843 break;
844 }
845 }
846 return ERR_PTR(-ENOENT);
847 }
848 EXPORT_SYMBOL_GPL(marker_get_private_data);
This page took 0.052197 seconds and 6 git commands to generate.