Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * lib/kernel_lock.c | |
3 | * | |
4 | * This is the traditional BKL - big kernel lock. Largely | |
5895df96 | 5 | * relegated to obsolescence, but used by various less |
1da177e4 LT |
6 | * important (or lazy) subsystems. |
7 | */ | |
8 | #include <linux/smp_lock.h> | |
9 | #include <linux/module.h> | |
10 | #include <linux/kallsyms.h> | |
6188e10d | 11 | #include <linux/semaphore.h> |
1da177e4 | 12 | |
1da177e4 LT |
13 | /* |
14 | * The 'big kernel semaphore' | |
15 | * | |
16 | * This mutex is taken and released recursively by lock_kernel() | |
d6e05edc | 17 | * and unlock_kernel(). It is transparently dropped and reacquired |
1da177e4 LT |
18 | * over schedule(). It is used to protect legacy code that hasn't |
19 | * been migrated to a proper locking design yet. | |
20 | * | |
21 | * Note: code locked by this semaphore will only be serialized against | |
22 | * other code using the same locking facility. The code guarantees that | |
23 | * the task remains on the same CPU. | |
24 | * | |
25 | * Don't use in new code. | |
26 | */ | |
27 | static DECLARE_MUTEX(kernel_sem); | |
28 | ||
29 | /* | |
30 | * Re-acquire the kernel semaphore. | |
31 | * | |
32 | * This function is called with preemption off. | |
33 | * | |
34 | * We are executing in schedule() so the code must be extremely careful | |
35 | * about recursion, both due to the down() and due to the enabling of | |
36 | * preemption. schedule() will re-check the preemption flag after | |
37 | * reacquiring the semaphore. | |
38 | */ | |
39 | int __lockfunc __reacquire_kernel_lock(void) | |
40 | { | |
41 | struct task_struct *task = current; | |
42 | int saved_lock_depth = task->lock_depth; | |
43 | ||
44 | BUG_ON(saved_lock_depth < 0); | |
45 | ||
46 | task->lock_depth = -1; | |
47 | preempt_enable_no_resched(); | |
48 | ||
49 | down(&kernel_sem); | |
50 | ||
51 | preempt_disable(); | |
52 | task->lock_depth = saved_lock_depth; | |
53 | ||
54 | return 0; | |
55 | } | |
56 | ||
57 | void __lockfunc __release_kernel_lock(void) | |
58 | { | |
59 | up(&kernel_sem); | |
60 | } | |
61 | ||
62 | /* | |
63 | * Getting the big kernel semaphore. | |
64 | */ | |
65 | void __lockfunc lock_kernel(void) | |
66 | { | |
67 | struct task_struct *task = current; | |
68 | int depth = task->lock_depth + 1; | |
69 | ||
70 | if (likely(!depth)) | |
71 | /* | |
72 | * No recursion worries - we set up lock_depth _after_ | |
73 | */ | |
74 | down(&kernel_sem); | |
75 | ||
76 | task->lock_depth = depth; | |
77 | } | |
78 | ||
79 | void __lockfunc unlock_kernel(void) | |
80 | { | |
81 | struct task_struct *task = current; | |
82 | ||
83 | BUG_ON(task->lock_depth < 0); | |
84 | ||
85 | if (likely(--task->lock_depth < 0)) | |
86 | up(&kernel_sem); | |
87 | } | |
88 | ||
1da177e4 LT |
89 | EXPORT_SYMBOL(lock_kernel); |
90 | EXPORT_SYMBOL(unlock_kernel); | |
91 |