Btrfs: don't walk around with task->state != TASK_RUNNING
[deliverable/linux.git] / include / linux / smp_lock.h
1 #ifndef __LINUX_SMPLOCK_H
2 #define __LINUX_SMPLOCK_H
3
4 #ifdef CONFIG_LOCK_KERNEL
5 #include <linux/sched.h>
6
7 #define kernel_locked() (current->lock_depth >= 0)
8
9 extern int __lockfunc __reacquire_kernel_lock(void);
10 extern void __lockfunc __release_kernel_lock(void);
11
12 /*
13 * Release/re-acquire global kernel lock for the scheduler
14 */
15 #define release_kernel_lock(tsk) do { \
16 if (unlikely((tsk)->lock_depth >= 0)) \
17 __release_kernel_lock(); \
18 } while (0)
19
20 static inline int reacquire_kernel_lock(struct task_struct *task)
21 {
22 if (unlikely(task->lock_depth >= 0))
23 return __reacquire_kernel_lock();
24 return 0;
25 }
26
27 extern void __lockfunc
28 _lock_kernel(const char *func, const char *file, int line)
29 __acquires(kernel_lock);
30
31 extern void __lockfunc
32 _unlock_kernel(const char *func, const char *file, int line)
33 __releases(kernel_lock);
34
35 #define lock_kernel() do { \
36 _lock_kernel(__func__, __FILE__, __LINE__); \
37 } while (0)
38
39 #define unlock_kernel() do { \
40 _unlock_kernel(__func__, __FILE__, __LINE__); \
41 } while (0)
42
43 /*
44 * Various legacy drivers don't really need the BKL in a specific
45 * function, but they *do* need to know that the BKL became available.
46 * This function just avoids wrapping a bunch of lock/unlock pairs
47 * around code which doesn't really need it.
48 */
49 static inline void cycle_kernel_lock(void)
50 {
51 lock_kernel();
52 unlock_kernel();
53 }
54
55 #else
56
57 #define lock_kernel()
58 #define unlock_kernel()
59 #define release_kernel_lock(task) do { } while(0)
60 #define cycle_kernel_lock() do { } while(0)
61 #define reacquire_kernel_lock(task) 0
62 #define kernel_locked() 1
63
64 #endif /* CONFIG_LOCK_KERNEL */
65 #endif /* __LINUX_SMPLOCK_H */
This page took 0.033357 seconds and 5 git commands to generate.