498bfbd3b4e1acfdd0ba200e21e6a91c3eab2e3f
[deliverable/linux.git] / include / linux / lockdep.h
1 /*
2 * Runtime locking correctness validator
3 *
4 * Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 *
6 * see Documentation/lockdep-design.txt for more details.
7 */
8 #ifndef __LINUX_LOCKDEP_H
9 #define __LINUX_LOCKDEP_H
10
11 #ifdef CONFIG_LOCKDEP
12
13 #include <linux/linkage.h>
14 #include <linux/list.h>
15 #include <linux/debug_locks.h>
16 #include <linux/stacktrace.h>
17
18 /*
19 * Lock-class usage-state bits:
20 */
21 enum lock_usage_bit
22 {
23 LOCK_USED = 0,
24 LOCK_USED_IN_HARDIRQ,
25 LOCK_USED_IN_SOFTIRQ,
26 LOCK_ENABLED_SOFTIRQS,
27 LOCK_ENABLED_HARDIRQS,
28 LOCK_USED_IN_HARDIRQ_READ,
29 LOCK_USED_IN_SOFTIRQ_READ,
30 LOCK_ENABLED_SOFTIRQS_READ,
31 LOCK_ENABLED_HARDIRQS_READ,
32 LOCK_USAGE_STATES
33 };
34
35 /*
36 * Usage-state bitmasks:
37 */
38 #define LOCKF_USED (1 << LOCK_USED)
39 #define LOCKF_USED_IN_HARDIRQ (1 << LOCK_USED_IN_HARDIRQ)
40 #define LOCKF_USED_IN_SOFTIRQ (1 << LOCK_USED_IN_SOFTIRQ)
41 #define LOCKF_ENABLED_HARDIRQS (1 << LOCK_ENABLED_HARDIRQS)
42 #define LOCKF_ENABLED_SOFTIRQS (1 << LOCK_ENABLED_SOFTIRQS)
43
44 #define LOCKF_ENABLED_IRQS (LOCKF_ENABLED_HARDIRQS | LOCKF_ENABLED_SOFTIRQS)
45 #define LOCKF_USED_IN_IRQ (LOCKF_USED_IN_HARDIRQ | LOCKF_USED_IN_SOFTIRQ)
46
47 #define LOCKF_USED_IN_HARDIRQ_READ (1 << LOCK_USED_IN_HARDIRQ_READ)
48 #define LOCKF_USED_IN_SOFTIRQ_READ (1 << LOCK_USED_IN_SOFTIRQ_READ)
49 #define LOCKF_ENABLED_HARDIRQS_READ (1 << LOCK_ENABLED_HARDIRQS_READ)
50 #define LOCKF_ENABLED_SOFTIRQS_READ (1 << LOCK_ENABLED_SOFTIRQS_READ)
51
52 #define LOCKF_ENABLED_IRQS_READ \
53 (LOCKF_ENABLED_HARDIRQS_READ | LOCKF_ENABLED_SOFTIRQS_READ)
54 #define LOCKF_USED_IN_IRQ_READ \
55 (LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
56
57 #define MAX_LOCKDEP_SUBCLASSES 8UL
58
59 /*
60 * Lock-classes are keyed via unique addresses, by embedding the
61 * lockclass-key into the kernel (or module) .data section. (For
62 * static locks we use the lock address itself as the key.)
63 */
64 struct lockdep_subclass_key {
65 char __one_byte;
66 } __attribute__ ((__packed__));
67
68 struct lock_class_key {
69 struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES];
70 };
71
72 /*
73 * The lock-class itself:
74 */
75 struct lock_class {
76 /*
77 * class-hash:
78 */
79 struct list_head hash_entry;
80
81 /*
82 * global list of all lock-classes:
83 */
84 struct list_head lock_entry;
85
86 struct lockdep_subclass_key *key;
87 unsigned int subclass;
88
89 /*
90 * IRQ/softirq usage tracking bits:
91 */
92 unsigned long usage_mask;
93 struct stack_trace usage_traces[LOCK_USAGE_STATES];
94
95 /*
96 * These fields represent a directed graph of lock dependencies,
97 * to every node we attach a list of "forward" and a list of
98 * "backward" graph nodes.
99 */
100 struct list_head locks_after, locks_before;
101
102 /*
103 * Generation counter, when doing certain classes of graph walking,
104 * to ensure that we check one node only once:
105 */
106 unsigned int version;
107
108 /*
109 * Statistics counter:
110 */
111 unsigned long ops;
112
113 const char *name;
114 int name_version;
115 };
116
117 /*
118 * Map the lock object (the lock instance) to the lock-class object.
119 * This is embedded into specific lock instances:
120 */
121 struct lockdep_map {
122 struct lock_class_key *key;
123 struct lock_class *class_cache;
124 const char *name;
125 };
126
127 /*
128 * Every lock has a list of other locks that were taken after it.
129 * We only grow the list, never remove from it:
130 */
131 struct lock_list {
132 struct list_head entry;
133 struct lock_class *class;
134 struct stack_trace trace;
135 };
136
137 /*
138 * We record lock dependency chains, so that we can cache them:
139 */
140 struct lock_chain {
141 struct list_head entry;
142 u64 chain_key;
143 };
144
145 struct held_lock {
146 /*
147 * One-way hash of the dependency chain up to this point. We
148 * hash the hashes step by step as the dependency chain grows.
149 *
150 * We use it for dependency-caching and we skip detection
151 * passes and dependency-updates if there is a cache-hit, so
152 * it is absolutely critical for 100% coverage of the validator
153 * to have a unique key value for every unique dependency path
154 * that can occur in the system, to make a unique hash value
155 * as likely as possible - hence the 64-bit width.
156 *
157 * The task struct holds the current hash value (initialized
158 * with zero), here we store the previous hash value:
159 */
160 u64 prev_chain_key;
161 struct lock_class *class;
162 unsigned long acquire_ip;
163 struct lockdep_map *instance;
164
165 /*
166 * The lock-stack is unified in that the lock chains of interrupt
167 * contexts nest ontop of process context chains, but we 'separate'
168 * the hashes by starting with 0 if we cross into an interrupt
169 * context, and we also keep do not add cross-context lock
170 * dependencies - the lock usage graph walking covers that area
171 * anyway, and we'd just unnecessarily increase the number of
172 * dependencies otherwise. [Note: hardirq and softirq contexts
173 * are separated from each other too.]
174 *
175 * The following field is used to detect when we cross into an
176 * interrupt context:
177 */
178 int irq_context;
179 int trylock;
180 int read;
181 int check;
182 int hardirqs_off;
183 };
184
185 /*
186 * Initialization, self-test and debugging-output methods:
187 */
188 extern void lockdep_init(void);
189 extern void lockdep_info(void);
190 extern void lockdep_reset(void);
191 extern void lockdep_reset_lock(struct lockdep_map *lock);
192 extern void lockdep_free_key_range(void *start, unsigned long size);
193
194 extern void lockdep_off(void);
195 extern void lockdep_on(void);
196
197 /*
198 * These methods are used by specific locking variants (spinlocks,
199 * rwlocks, mutexes and rwsems) to pass init/acquire/release events
200 * to lockdep:
201 */
202
203 extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
204 struct lock_class_key *key, int subclass);
205
206 /*
207 * Reinitialize a lock key - for cases where there is special locking or
208 * special initialization of locks so that the validator gets the scope
209 * of dependencies wrong: they are either too broad (they need a class-split)
210 * or they are too narrow (they suffer from a false class-split):
211 */
212 #define lockdep_set_class(lock, key) \
213 lockdep_init_map(&(lock)->dep_map, #key, key, 0)
214 #define lockdep_set_class_and_name(lock, key, name) \
215 lockdep_init_map(&(lock)->dep_map, name, key, 0)
216 #define lockdep_set_class_and_subclass(lock, key, sub) \
217 lockdep_init_map(&(lock)->dep_map, #key, key, sub)
218 #define lockdep_set_subclass(lock, sub) \
219 lockdep_init_map(&(lock)->dep_map, #lock, \
220 (lock)->dep_map.key, sub)
221
222 /*
223 * Acquire a lock.
224 *
225 * Values for "read":
226 *
227 * 0: exclusive (write) acquire
228 * 1: read-acquire (no recursion allowed)
229 * 2: read-acquire with same-instance recursion allowed
230 *
231 * Values for check:
232 *
233 * 0: disabled
234 * 1: simple checks (freeing, held-at-exit-time, etc.)
235 * 2: full validation
236 */
237 extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
238 int trylock, int read, int check, unsigned long ip);
239
240 extern void lock_release(struct lockdep_map *lock, int nested,
241 unsigned long ip);
242
243 # define INIT_LOCKDEP .lockdep_recursion = 0,
244
245 #define lockdep_depth(tsk) ((tsk)->lockdep_depth)
246
247 #else /* !LOCKDEP */
248
249 static inline void lockdep_off(void)
250 {
251 }
252
253 static inline void lockdep_on(void)
254 {
255 }
256
257 # define lock_acquire(l, s, t, r, c, i) do { } while (0)
258 # define lock_release(l, n, i) do { } while (0)
259 # define lockdep_init() do { } while (0)
260 # define lockdep_info() do { } while (0)
261 # define lockdep_init_map(lock, name, key, sub) do { (void)(key); } while (0)
262 # define lockdep_set_class(lock, key) do { (void)(key); } while (0)
263 # define lockdep_set_class_and_name(lock, key, name) \
264 do { (void)(key); } while (0)
265 #define lockdep_set_class_and_subclass(lock, key, sub) \
266 do { (void)(key); } while (0)
267 #define lockdep_set_subclass(lock, sub) do { } while (0)
268
269 # define INIT_LOCKDEP
270 # define lockdep_reset() do { debug_locks = 1; } while (0)
271 # define lockdep_free_key_range(start, size) do { } while (0)
272 /*
273 * The class key takes no space if lockdep is disabled:
274 */
275 struct lock_class_key { };
276
277 #define lockdep_depth(tsk) (0)
278
279 #endif /* !LOCKDEP */
280
281 #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_GENERIC_HARDIRQS)
282 extern void early_init_irq_lock_class(void);
283 #else
284 # define early_init_irq_lock_class() do { } while (0)
285 #endif
286
287 #ifdef CONFIG_TRACE_IRQFLAGS
288 extern void early_boot_irqs_off(void);
289 extern void early_boot_irqs_on(void);
290 #else
291 # define early_boot_irqs_off() do { } while (0)
292 # define early_boot_irqs_on() do { } while (0)
293 #endif
294
295 /*
296 * For trivial one-depth nesting of a lock-class, the following
297 * global define can be used. (Subsystems with multiple levels
298 * of nesting should define their own lock-nesting subclasses.)
299 */
300 #define SINGLE_DEPTH_NESTING 1
301
302 /*
303 * Map the dependency ops to NOP or to real lockdep ops, depending
304 * on the per lock-class debug mode:
305 */
306
307 #ifdef CONFIG_DEBUG_LOCK_ALLOC
308 # ifdef CONFIG_PROVE_LOCKING
309 # define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
310 # else
311 # define spin_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
312 # endif
313 # define spin_release(l, n, i) lock_release(l, n, i)
314 #else
315 # define spin_acquire(l, s, t, i) do { } while (0)
316 # define spin_release(l, n, i) do { } while (0)
317 #endif
318
319 #ifdef CONFIG_DEBUG_LOCK_ALLOC
320 # ifdef CONFIG_PROVE_LOCKING
321 # define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
322 # define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 2, i)
323 # else
324 # define rwlock_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
325 # define rwlock_acquire_read(l, s, t, i) lock_acquire(l, s, t, 2, 1, i)
326 # endif
327 # define rwlock_release(l, n, i) lock_release(l, n, i)
328 #else
329 # define rwlock_acquire(l, s, t, i) do { } while (0)
330 # define rwlock_acquire_read(l, s, t, i) do { } while (0)
331 # define rwlock_release(l, n, i) do { } while (0)
332 #endif
333
334 #ifdef CONFIG_DEBUG_LOCK_ALLOC
335 # ifdef CONFIG_PROVE_LOCKING
336 # define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
337 # else
338 # define mutex_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
339 # endif
340 # define mutex_release(l, n, i) lock_release(l, n, i)
341 #else
342 # define mutex_acquire(l, s, t, i) do { } while (0)
343 # define mutex_release(l, n, i) do { } while (0)
344 #endif
345
346 #ifdef CONFIG_DEBUG_LOCK_ALLOC
347 # ifdef CONFIG_PROVE_LOCKING
348 # define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 2, i)
349 # define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 2, i)
350 # else
351 # define rwsem_acquire(l, s, t, i) lock_acquire(l, s, t, 0, 1, i)
352 # define rwsem_acquire_read(l, s, t, i) lock_acquire(l, s, t, 1, 1, i)
353 # endif
354 # define rwsem_release(l, n, i) lock_release(l, n, i)
355 #else
356 # define rwsem_acquire(l, s, t, i) do { } while (0)
357 # define rwsem_acquire_read(l, s, t, i) do { } while (0)
358 # define rwsem_release(l, n, i) do { } while (0)
359 #endif
360
361 #endif /* __LINUX_LOCKDEP_H */
This page took 0.097774 seconds and 5 git commands to generate.