917fd946b4951147ba2971fe86c3958b33d63434
1 /* rwsem.c: R/W semaphores: contention handling functions
3 * Written by David Howells (dhowells@redhat.com).
4 * Derived from arch/i386/kernel/semaphore.c
6 #include <linux/rwsem.h>
7 #include <linux/sched.h>
8 #include <linux/init.h>
9 #include <linux/module.h>
12 * Initialize an rwsem:
14 void __init_rwsem(struct rw_semaphore
*sem
, const char *name
,
15 struct lock_class_key
*key
)
17 #ifdef CONFIG_DEBUG_LOCK_ALLOC
19 * Make sure we are not reinitializing a held semaphore:
21 debug_check_no_locks_freed((void *)sem
, sizeof(*sem
));
22 lockdep_init_map(&sem
->dep_map
, name
, key
, 0);
24 sem
->count
= RWSEM_UNLOCKED_VALUE
;
25 spin_lock_init(&sem
->wait_lock
);
26 INIT_LIST_HEAD(&sem
->wait_list
);
29 EXPORT_SYMBOL(__init_rwsem
);
32 struct list_head list
;
33 struct task_struct
*task
;
35 #define RWSEM_WAITING_FOR_READ 0x00000001
36 #define RWSEM_WAITING_FOR_WRITE 0x00000002
40 * handle the lock release when processes blocked on it that can now run
41 * - if we come here from up_xxxx(), then:
42 * - the 'active part' of count (&0x0000ffff) reached 0 (but may have changed)
43 * - the 'waiting part' of count (&0xffff0000) is -ve (and will still be so)
44 * - there must be someone on the queue
45 * - the spinlock must be held by the caller
46 * - woken process blocks are discarded from the list after having task zeroed
47 * - writers are only woken if downgrading is false
49 static inline struct rw_semaphore
*
50 __rwsem_do_wake(struct rw_semaphore
*sem
, int downgrading
)
52 struct rwsem_waiter
*waiter
;
53 struct task_struct
*tsk
;
54 struct list_head
*next
;
55 signed long oldcount
, woken
, loop
;
57 waiter
= list_entry(sem
->wait_list
.next
, struct rwsem_waiter
, list
);
58 if (!(waiter
->flags
& RWSEM_WAITING_FOR_WRITE
))
64 /* There's a writer at the front of the queue - try to grant it the
65 * write lock. However, we only wake this writer if we can transition
66 * the active part of the count from 0 -> 1
69 oldcount
= rwsem_atomic_update(RWSEM_ACTIVE_BIAS
, sem
)
71 if (oldcount
& RWSEM_ACTIVE_MASK
)
72 /* Someone grabbed the sem already */
75 /* We must be careful not to touch 'waiter' after we set ->task = NULL.
76 * It is an allocated on the waiter's stack and may become invalid at
77 * any time after that point (due to a wakeup from another source).
79 list_del(&waiter
->list
);
91 /* if we came through an up_xxxx() call, we only only wake someone up
92 * if we can transition the active part of the count from 0 -> 1 */
94 oldcount
= rwsem_atomic_update(RWSEM_ACTIVE_BIAS
, sem
)
96 if (oldcount
& RWSEM_ACTIVE_MASK
)
97 /* Someone grabbed the sem already */
101 /* Grant an infinite number of read locks to the readers at the front
102 * of the queue. Note we increment the 'active part' of the count by
103 * the number of readers before waking any processes up.
109 if (waiter
->list
.next
== &sem
->wait_list
)
112 waiter
= list_entry(waiter
->list
.next
,
113 struct rwsem_waiter
, list
);
115 } while (waiter
->flags
& RWSEM_WAITING_FOR_READ
);
118 woken
*= RWSEM_ACTIVE_BIAS
- RWSEM_WAITING_BIAS
;
120 /* we'd already done one increment earlier */
121 woken
-= RWSEM_ACTIVE_BIAS
;
123 rwsem_atomic_add(woken
, sem
);
125 next
= sem
->wait_list
.next
;
126 for (; loop
> 0; loop
--) {
127 waiter
= list_entry(next
, struct rwsem_waiter
, list
);
128 next
= waiter
->list
.next
;
132 wake_up_process(tsk
);
133 put_task_struct(tsk
);
136 sem
->wait_list
.next
= next
;
137 next
->prev
= &sem
->wait_list
;
142 /* undo the change to the active count, but check for a transition
145 if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS
, sem
) & RWSEM_ACTIVE_MASK
)
147 goto try_again_write
;
149 if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS
, sem
) & RWSEM_ACTIVE_MASK
)
155 * wait for a lock to be granted
157 static struct rw_semaphore __sched
*
158 rwsem_down_failed_common(struct rw_semaphore
*sem
,
159 struct rwsem_waiter
*waiter
, signed long adjustment
)
161 struct task_struct
*tsk
= current
;
164 set_task_state(tsk
, TASK_UNINTERRUPTIBLE
);
166 /* set up my own style of waitqueue */
167 spin_lock_irq(&sem
->wait_lock
);
169 get_task_struct(tsk
);
171 list_add_tail(&waiter
->list
, &sem
->wait_list
);
173 /* we're now waiting on the lock, but no longer actively read-locking */
174 count
= rwsem_atomic_update(adjustment
, sem
);
176 /* if there are no active locks, wake the front queued process(es) up */
177 if (!(count
& RWSEM_ACTIVE_MASK
))
178 sem
= __rwsem_do_wake(sem
, 0);
180 spin_unlock_irq(&sem
->wait_lock
);
182 /* wait to be given the lock */
187 set_task_state(tsk
, TASK_UNINTERRUPTIBLE
);
190 tsk
->state
= TASK_RUNNING
;
196 * wait for the read lock to be granted
198 asmregparm
struct rw_semaphore __sched
*
199 rwsem_down_read_failed(struct rw_semaphore
*sem
)
201 struct rwsem_waiter waiter
;
203 waiter
.flags
= RWSEM_WAITING_FOR_READ
;
204 rwsem_down_failed_common(sem
, &waiter
,
205 RWSEM_WAITING_BIAS
- RWSEM_ACTIVE_BIAS
);
210 * wait for the write lock to be granted
212 asmregparm
struct rw_semaphore __sched
*
213 rwsem_down_write_failed(struct rw_semaphore
*sem
)
215 struct rwsem_waiter waiter
;
217 waiter
.flags
= RWSEM_WAITING_FOR_WRITE
;
218 rwsem_down_failed_common(sem
, &waiter
, -RWSEM_ACTIVE_BIAS
);
224 * handle waking up a waiter on the semaphore
225 * - up_read/up_write has decremented the active part of count if we come here
227 asmregparm
struct rw_semaphore
*rwsem_wake(struct rw_semaphore
*sem
)
231 spin_lock_irqsave(&sem
->wait_lock
, flags
);
233 /* do nothing if list empty */
234 if (!list_empty(&sem
->wait_list
))
235 sem
= __rwsem_do_wake(sem
, 0);
237 spin_unlock_irqrestore(&sem
->wait_lock
, flags
);
243 * downgrade a write lock into a read lock
244 * - caller incremented waiting part of count and discovered it still negative
245 * - just wake up any readers at the front of the queue
247 asmregparm
struct rw_semaphore
*rwsem_downgrade_wake(struct rw_semaphore
*sem
)
251 spin_lock_irqsave(&sem
->wait_lock
, flags
);
253 /* do nothing if list empty */
254 if (!list_empty(&sem
->wait_list
))
255 sem
= __rwsem_do_wake(sem
, 1);
257 spin_unlock_irqrestore(&sem
->wait_lock
, flags
);
262 EXPORT_SYMBOL(rwsem_down_read_failed
);
263 EXPORT_SYMBOL(rwsem_down_write_failed
);
264 EXPORT_SYMBOL(rwsem_wake
);
265 EXPORT_SYMBOL(rwsem_downgrade_wake
);
This page took 0.034611 seconds and 4 git commands to generate.