rcu: optionally leave lockdep enabled after RCU lockdep splat
[deliverable/linux.git] / include / linux / lockdep.h
1 /*
2 * Runtime locking correctness validator
3 *
4 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
6 *
7 * see Documentation/lockdep-design.txt for more details.
8 */
9 #ifndef __LINUX_LOCKDEP_H
10 #define __LINUX_LOCKDEP_H
11
12 struct task_struct;
13 struct lockdep_map;
14
15 /* for sysctl */
16 extern int prove_locking;
17 extern int lock_stat;
18
19 #ifdef CONFIG_LOCKDEP
20
21 #include <linux/linkage.h>
22 #include <linux/list.h>
23 #include <linux/debug_locks.h>
24 #include <linux/stacktrace.h>
25
26 /*
27 * We'd rather not expose kernel/lockdep_states.h this wide, but we do need
28 * the total number of states... :-(
29 */
30 #define XXX_LOCK_USAGE_STATES (1+3*4)
31
32 #define MAX_LOCKDEP_SUBCLASSES 8UL
33
34 /*
35 * Lock-classes are keyed via unique addresses, by embedding the
36 * lockclass-key into the kernel (or module) .data section. (For
37 * static locks we use the lock address itself as the key.)
38 */
39 struct lockdep_subclass_key {
40 char __one_byte;
41 } __attribute__ ((__packed__));
42
43 struct lock_class_key {
44 struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES];
45 };
46
47 #define LOCKSTAT_POINTS 4
48
49 /*
50 * The lock-class itself:
51 */
52 struct lock_class {
53 /*
54 * class-hash:
55 */
56 struct list_head hash_entry;
57
58 /*
59 * global list of all lock-classes:
60 */
61 struct list_head lock_entry;
62
63 struct lockdep_subclass_key *key;
64 unsigned int subclass;
65 unsigned int dep_gen_id;
66
67 /*
68 * IRQ/softirq usage tracking bits:
69 */
70 unsigned long usage_mask;
71 struct stack_trace usage_traces[XXX_LOCK_USAGE_STATES];
72
73 /*
74 * These fields represent a directed graph of lock dependencies,
75 * to every node we attach a list of "forward" and a list of
76 * "backward" graph nodes.
77 */
78 struct list_head locks_after, locks_before;
79
80 /*
81 * Generation counter, when doing certain classes of graph walking,
82 * to ensure that we check one node only once:
83 */
84 unsigned int version;
85
86 /*
87 * Statistics counter:
88 */
89 unsigned long ops;
90
91 const char *name;
92 int name_version;
93
94 #ifdef CONFIG_LOCK_STAT
95 unsigned long contention_point[LOCKSTAT_POINTS];
96 unsigned long contending_point[LOCKSTAT_POINTS];
97 #endif
98 };
99
100 #ifdef CONFIG_LOCK_STAT
101 struct lock_time {
102 s64 min;
103 s64 max;
104 s64 total;
105 unsigned long nr;
106 };
107
108 enum bounce_type {
109 bounce_acquired_write,
110 bounce_acquired_read,
111 bounce_contended_write,
112 bounce_contended_read,
113 nr_bounce_types,
114
115 bounce_acquired = bounce_acquired_write,
116 bounce_contended = bounce_contended_write,
117 };
118
119 struct lock_class_stats {
120 unsigned long contention_point[4];
121 unsigned long contending_point[4];
122 struct lock_time read_waittime;
123 struct lock_time write_waittime;
124 struct lock_time read_holdtime;
125 struct lock_time write_holdtime;
126 unsigned long bounces[nr_bounce_types];
127 };
128
129 struct lock_class_stats lock_stats(struct lock_class *class);
130 void clear_lock_stats(struct lock_class *class);
131 #endif
132
133 /*
134 * Map the lock object (the lock instance) to the lock-class object.
135 * This is embedded into specific lock instances:
136 */
137 struct lockdep_map {
138 struct lock_class_key *key;
139 struct lock_class *class_cache;
140 const char *name;
141 #ifdef CONFIG_LOCK_STAT
142 int cpu;
143 unsigned long ip;
144 #endif
145 };
146
147 /*
148 * Every lock has a list of other locks that were taken after it.
149 * We only grow the list, never remove from it:
150 */
151 struct lock_list {
152 struct list_head entry;
153 struct lock_class *class;
154 struct stack_trace trace;
155 int distance;
156
157 /*
158 * The parent field is used to implement breadth-first search, and the
159 * bit 0 is reused to indicate if the lock has been accessed in BFS.
160 */
161 struct lock_list *parent;
162 };
163
164 /*
165 * We record lock dependency chains, so that we can cache them:
166 */
167 struct lock_chain {
168 u8 irq_context;
169 u8 depth;
170 u16 base;
171 struct list_head entry;
172 u64 chain_key;
173 };
174
175 #define MAX_LOCKDEP_KEYS_BITS 13
176 /*
177 * Subtract one because we offset hlock->class_idx by 1 in order
178 * to make 0 mean no class. This avoids overflowing the class_idx
179 * bitfield and hitting the BUG in hlock_class().
180 */
181 #define MAX_LOCKDEP_KEYS ((1UL << MAX_LOCKDEP_KEYS_BITS) - 1)
182
183 struct held_lock {
184 /*
185 * One-way hash of the dependency chain up to this point. We
186 * hash the hashes step by step as the dependency chain grows.
187 *
188 * We use it for dependency-caching and we skip detection
189 * passes and dependency-updates if there is a cache-hit, so
190 * it is absolutely critical for 100% coverage of the validator
191 * to have a unique key value for every unique dependency path
192 * that can occur in the system, to make a unique hash value
193 * as likely as possible - hence the 64-bit width.
194 *
195 * The task struct holds the current hash value (initialized
196 * with zero), here we store the previous hash value:
197 */
198 u64 prev_chain_key;
199 unsigned long acquire_ip;
200 struct lockdep_map *instance;
201 struct lockdep_map *nest_lock;
202 #ifdef CONFIG_LOCK_STAT
203 u64 waittime_stamp;
204 u64 holdtime_stamp;
205 #endif
206 unsigned int class_idx:MAX_LOCKDEP_KEYS_BITS;
207 /*
208 * The lock-stack is unified in that the lock chains of interrupt
209 * contexts nest ontop of process context chains, but we 'separate'
210 * the hashes by starting with 0 if we cross into an interrupt
211 * context, and we also keep do not add cross-context lock
212 * dependencies - the lock usage graph walking covers that area
213 * anyway, and we'd just unnecessarily increase the number of
214 * dependencies otherwise. [Note: hardirq and softirq contexts
215 * are separated from each other too.]
216 *
217 * The following field is used to detect when we cross into an
218 * interrupt context:
219 */
220 unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
221 unsigned int trylock:1; /* 16 bits */
222
223 unsigned int read:2; /* see lock_acquire() comment */
224 unsigned int check:2; /* see lock_acquire() comment */
225 unsigned int hardirqs_off:1;
226 unsigned int references:11; /* 32 bits */
227 };
228
229 /*
230 * Initialization, self-test and debugging-output methods:
231 */
232 extern void lockdep_init(void);
233 extern void lockdep_info(void);
234 extern void lockdep_reset(void);
235 extern void lockdep_reset_lock(struct lockdep_map *lock);
236 extern void lockdep_free_key_range(void *start, unsigned long size);
237 extern void lockdep_sys_exit(void);
238
239 extern void lockdep_off(void);
240 extern void lockdep_on(void);
241
242 /*
243 * These methods are used by specific locking variants (spinlocks,
244 * rwlocks, mutexes and rwsems) to pass init/acquire/release events
245 * to lockdep:
246 */
247
248 extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
249 struct lock_class_key *key, int subclass);
250
251 /*
252 * To initialize a lockdep_map statically use this macro.
253 * Note that _name must not be NULL.
254 */
255 #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
256 { .name = (_name), .key = (void *)(_key), }
257
258 /*
259 * Reinitialize a lock key - for cases where there is special locking or
260 * special initialization of locks so that the validator gets the scope
261 * of dependencies wrong: they are either too broad (they need a class-split)
262 * or they are too narrow (they suffer from a false class-split):
263 */
264 #define lockdep_set_class(lock, key) \
265 lockdep_init_map(&(lock)->dep_map, #key, key, 0)
266 #define lockdep_set_class_and_name(lock, key, name) \
267 lockdep_init_map(&(lock)->dep_map, name, key, 0)
268 #define lockdep_set_class_and_subclass(lock, key, sub) \
269 lockdep_init_map(&(lock)->dep_map, #key, key, sub)
270 #define lockdep_set_subclass(lock, sub) \
271 lockdep_init_map(&(lock)->dep_map, #lock, \
272 (lock)->dep_map.key, sub)
273 /*
274 * Compare locking classes
275 */
276 #define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key)
277
278 static inline int lockdep_match_key(struct lockdep_map *lock,
279 struct lock_class_key *key)
280 {
281 return lock->key == key;
282 }
283
284 /*
285 * Acquire a lock.
286 *
287 * Values for "read":
288 *
289 * 0: exclusive (write) acquire
290 * 1: read-acquire (no recursion allowed)
291 * 2: read-acquire with same-instance recursion allowed
292 *
293 * Values for check:
294 *
295 * 0: disabled
296 * 1: simple checks (freeing, held-at-exit-time, etc.)
297 * 2: full validation
298 */
299 extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
300 int trylock, int read, int check,
301 struct lockdep_map *nest_lock, unsigned long ip);
302
303 extern void lock_release(struct lockdep_map *lock, int nested,
304 unsigned long ip);
305
306 #define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map)
307
308 extern int lock_is_held(struct lockdep_map *lock);
309
310 extern void lock_set_class(struct lockdep_map *lock, const char *name,
311 struct lock_class_key *key, unsigned int subclass,
312 unsigned long ip);
313
314 static inline void lock_set_subclass(struct lockdep_map *lock,
315 unsigned int subclass, unsigned long ip)
316 {
317 lock_set_class(lock, lock->name, lock->key, subclass, ip);
318 }
319
320 extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask);
321 extern void lockdep_clear_current_reclaim_state(void);
322 extern void lockdep_trace_alloc(gfp_t mask);
323
324 # define INIT_LOCKDEP .lockdep_recursion = 0, .lockdep_reclaim_gfp = 0,
325
326 #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
327
328 #define lockdep_assert_held(l) WARN_ON(debug_locks && !lockdep_is_held(l))
329
330 #else /* !LOCKDEP */
331
332 static inline void lockdep_off(void)
333 {
334 }
335
336 static inline void lockdep_on(void)
337 {
338 }
339
340 # define lock_acquire(l, s, t, r, c, n, i) do { } while (0)
341 # define lock_release(l, n, i) do { } while (0)
342 # define lock_set_class(l, n, k, s, i) do { } while (0)
343 # define lock_set_subclass(l, s, i) do { } while (0)
344 # define lockdep_set_current_reclaim_state(g) do { } while (0)
345 # define lockdep_clear_current_reclaim_state() do { } while (0)
346 # define lockdep_trace_alloc(g) do { } while (0)
347 # define lockdep_init() do { } while (0)
348 # define lockdep_info() do { } while (0)
349 # define lockdep_init_map(lock, name, key, sub) \
350 do { (void)(name); (void)(key); } while (0)
351 # define lockdep_set_class(lock, key) do { (void)(key); } while (0)
352 # define lockdep_set_class_and_name(lock, key, name) \
353 do { (void)(key); (void)(name); } while (0)
354 #define lockdep_set_class_and_subclass(lock, key, sub) \
355 do { (void)(key); } while (0)
356 #define lockdep_set_subclass(lock, sub) do { } while (0)
357 /*
358 * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP
359 * case since the result is not well defined and the caller should rather
360 * #ifdef the call himself.
361 */
362
363 # define INIT_LOCKDEP
364 # define lockdep_reset() do { debug_locks = 1; } while (0)
365 # define lockdep_free_key_range(start, size) do { } while (0)
366 # define lockdep_sys_exit() do { } while (0)
367 /*
368 * The class key takes no space if lockdep is disabled:
369 */
370 struct lock_class_key { };
371
372 #define lockdep_depth(tsk) (0)
373
374 #define lockdep_assert_held(l) do { } while (0)
375
376 #endif /* !LOCKDEP */
377
378 #ifdef CONFIG_LOCK_STAT
379
380 extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
381 extern void lock_acquired(struct lockdep_map *lock, unsigned long ip);
382
383 #define LOCK_CONTENDED(_lock, try, lock) \
384 do { \
385 if (!try(_lock)) { \
386 lock_contended(&(_lock)->dep_map, _RET_IP_); \
387 lock(_lock); \
388 } \
389 lock_acquired(&(_lock)->dep_map, _RET_IP_); \
390 } while (0)
391
392 #else /* CONFIG_LOCK_STAT */
393
394 #define lock_contended(lockdep_map, ip) do {} while (0)
395 #define lock_acquired(lockdep_map, ip) do {} while (0)
396
397 #define LOCK_CONTENDED(_lock, try, lock) \
398 lock(_lock)
399
400 #endif /* CONFIG_LOCK_STAT */
401
402 #ifdef CONFIG_LOCKDEP
403
404 /*
405 * On lockdep we dont want the hand-coded irq-enable of
406 * _raw_*_lock_flags() code, because lockdep assumes
407 * that interrupts are not re-enabled during lock-acquire:
408 */
409 #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
410 LOCK_CONTENDED((_lock), (try), (lock))
411
412 #else /* CONFIG_LOCKDEP */
413
414 #define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
415 lockfl((_lock), (flags))
416
417 #endif /* CONFIG_LOCKDEP */
418
419 #ifdef CONFIG_GENERIC_HARDIRQS
420 extern void early_init_irq_lock_class(void);
421 #else
422 static inline void early_init_irq_lock_class(void)
423 {
424 }
425 #endif
426
427 #ifdef CONFIG_TRACE_IRQFLAGS
428 extern void early_boot_irqs_off(void);
429 extern void early_boot_irqs_on(void);
430 extern void print_irqtrace_events(struct task_struct *curr);
431 #else
432 static inline void early_boot_irqs_off(void)
433 {
434 }
435 static inline void early_boot_irqs_on(void)
436 {
437 }
438 static inline void print_irqtrace_events(struct task_struct *curr)
439 {
440 }
441 #endif
442
443 /*
444 * For trivial one-depth nesting of a lock-class, the following
445 * global define can be used. (Subsystems with multiple levels
446 * of nesting should define their own lock-nesting subclasses.)
447 */
448 #define SINGLE_DEPTH_NESTING 1
449
450 /*
451 * Map the dependency ops to NOP or to real lockdep ops, depending
452 * on the per lock-class debug mode:
453 */
454
455 #ifdef CONFIG_DEBUG_LOCK_ALLOC
456 # ifdef CONFIG_PROVE_LOCKING
457 # define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
458 # define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 2, n, i)
459 # else
460 # define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
461 # define spin_acquire_nest(l, s, t, n, i) lock_acquire(l, s, t, 0, 1, NULL, i)
462 # endif
463 # define spin_release(l, n, i) lock_release(l, n, i)
464 #else
465 # define spin_acquire(l, s, t, i) do { } while (0)
466 # define spin_release(l, n, i) do { } while (0)
467 #endif
468
469 #ifdef CONFIG_DEBUG_LOCK_ALLOC
470 # ifdef CONFIG_PROVE_LOCKING
471 # define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
472 # define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, NULL, i)
473 # else
474 # define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
475 # define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, NULL, i)
476 # endif
477 # define rwlock_release(l, n, i) lock_release(l, n, i)
478 #else
479 # define rwlock_acquire(l, s, t, i) do { } while (0)
480 # define rwlock_acquire_read(l, s, t, i) do { } while (0)
481 # define rwlock_release(l, n, i) do { } while (0)
482 #endif
483
484 #ifdef CONFIG_DEBUG_LOCK_ALLOC
485 # ifdef CONFIG_PROVE_LOCKING
486 # define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
487 # else
488 # define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
489 # endif
490 # define mutex_release(l, n, i) lock_release(l, n, i)
491 #else
492 # define mutex_acquire(l, s, t, i) do { } while (0)
493 # define mutex_release(l, n, i) do { } while (0)
494 #endif
495
496 #ifdef CONFIG_DEBUG_LOCK_ALLOC
497 # ifdef CONFIG_PROVE_LOCKING
498 # define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, NULL, i)
499 # define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, NULL, i)
500 # else
501 # define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, NULL, i)
502 # define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, NULL, i)
503 # endif
504 # define rwsem_release(l, n, i) lock_release(l, n, i)
505 #else
506 # define rwsem_acquire(l, s, t, i) do { } while (0)
507 # define rwsem_acquire_read(l, s, t, i) do { } while (0)
508 # define rwsem_release(l, n, i) do { } while (0)
509 #endif
510
511 #ifdef CONFIG_DEBUG_LOCK_ALLOC
512 # ifdef CONFIG_PROVE_LOCKING
513 # define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_)
514 # else
515 # define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_)
516 # endif
517 # define lock_map_release(l) lock_release(l, 1, _THIS_IP_)
518 #else
519 # define lock_map_acquire(l) do { } while (0)
520 # define lock_map_release(l) do { } while (0)
521 #endif
522
523 #ifdef CONFIG_PROVE_LOCKING
524 # define might_lock(lock) \
525 do { \
526 typecheck(struct lockdep_map *, &(lock)->dep_map); \
527 lock_acquire(&(lock)->dep_map, 0, 0, 0, 2, NULL, _THIS_IP_); \
528 lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
529 } while (0)
530 # define might_lock_read(lock) \
531 do { \
532 typecheck(struct lockdep_map *, &(lock)->dep_map); \
533 lock_acquire(&(lock)->dep_map, 0, 0, 1, 2, NULL, _THIS_IP_); \
534 lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
535 } while (0)
536 #else
537 # define might_lock(lock) do { } while (0)
538 # define might_lock_read(lock) do { } while (0)
539 #endif
540
541 #ifdef CONFIG_PROVE_RCU
542 extern void lockdep_rcu_dereference(const char *file, const int line);
543 #endif
544
545 #endif /* __LINUX_LOCKDEP_H */
This page took 0.067078 seconds and 5 git commands to generate.