2 * Copyright (c) 2008 Intel Corporation
3 * Author: Matthew Wilcox <willy@linux.intel.com>
5 * Distributed under the terms of the GNU GPL, version 2
8 #include <linux/compiler.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/sched.h>
12 #include <linux/semaphore.h>
13 #include <linux/spinlock.h>
16 * Some notes on the implementation:
18 * down_trylock() and up() can be called from interrupt context.
19 * So we have to disable interrupts when taking the lock.
21 * The ->count variable, if positive, defines how many more tasks can
22 * acquire the semaphore. If negative, it represents how many tasks are
23 * waiting on the semaphore (*). If zero, no tasks are waiting, and no more
24 * tasks can acquire the semaphore.
26 * (*) Except for the window between one task calling up() and the task
27 * sleeping in a __down_common() waking up. In order to avoid a third task
28 * coming in and stealing the second task's wakeup, we leave the ->count
29 * negative. If we have a more complex situation, the ->count may become
30 * zero or negative (eg a semaphore with count = 2, three tasks attempt to
31 * acquire it, one sleeps, two finish and call up(), the second task to call
32 * up() notices that the list is empty and just increments count).
35 static noinline
void __down(struct semaphore
*sem
);
36 static noinline
int __down_interruptible(struct semaphore
*sem
);
37 static noinline
int __down_killable(struct semaphore
*sem
);
38 static noinline
int __down_timeout(struct semaphore
*sem
, long jiffies
);
39 static noinline
void __up(struct semaphore
*sem
);
41 void down(struct semaphore
*sem
)
45 spin_lock_irqsave(&sem
->lock
, flags
);
46 if (unlikely(sem
->count
-- <= 0))
48 spin_unlock_irqrestore(&sem
->lock
, flags
);
52 int down_interruptible(struct semaphore
*sem
)
57 spin_lock_irqsave(&sem
->lock
, flags
);
58 if (unlikely(sem
->count
-- <= 0))
59 result
= __down_interruptible(sem
);
60 spin_unlock_irqrestore(&sem
->lock
, flags
);
64 EXPORT_SYMBOL(down_interruptible
);
66 int down_killable(struct semaphore
*sem
)
71 spin_lock_irqsave(&sem
->lock
, flags
);
72 if (unlikely(sem
->count
-- <= 0))
73 result
= __down_killable(sem
);
74 spin_unlock_irqrestore(&sem
->lock
, flags
);
78 EXPORT_SYMBOL(down_killable
);
81 * down_trylock - try to acquire the semaphore, without waiting
82 * @sem: the semaphore to be acquired
84 * Try to acquire the semaphore atomically. Returns 0 if the mutex has
85 * been acquired successfully and 1 if it is contended.
87 * NOTE: This return value is inverted from both spin_trylock and
88 * mutex_trylock! Be careful about this when converting code.
90 * Unlike mutex_trylock, this function can be used from interrupt context,
91 * and the semaphore can be released by any task or interrupt.
93 int down_trylock(struct semaphore
*sem
)
98 spin_lock_irqsave(&sem
->lock
, flags
);
99 count
= sem
->count
- 1;
100 if (likely(count
>= 0))
102 spin_unlock_irqrestore(&sem
->lock
, flags
);
106 EXPORT_SYMBOL(down_trylock
);
108 int down_timeout(struct semaphore
*sem
, long jiffies
)
113 spin_lock_irqsave(&sem
->lock
, flags
);
114 if (unlikely(sem
->count
-- <= 0))
115 result
= __down_timeout(sem
, jiffies
);
116 spin_unlock_irqrestore(&sem
->lock
, flags
);
120 EXPORT_SYMBOL(down_timeout
);
122 void up(struct semaphore
*sem
)
126 spin_lock_irqsave(&sem
->lock
, flags
);
127 if (likely(sem
->count
>= 0))
131 spin_unlock_irqrestore(&sem
->lock
, flags
);
135 /* Functions for the contended case */
137 struct semaphore_waiter
{
138 struct list_head list
;
139 struct task_struct
*task
;
144 * Wake up a process waiting on a semaphore. We need to call this from both
145 * __up and __down_common as it's possible to race a task into the semaphore
146 * if it comes in at just the right time between two tasks calling up() and
147 * a third task waking up. This function assumes the wait_list is already
148 * checked for being non-empty.
150 static noinline
void __sched
__up_down_common(struct semaphore
*sem
)
152 struct semaphore_waiter
*waiter
= list_first_entry(&sem
->wait_list
,
153 struct semaphore_waiter
, list
);
154 list_del(&waiter
->list
);
156 wake_up_process(waiter
->task
);
160 * Because this function is inlined, the 'state' parameter will be
161 * constant, and thus optimised away by the compiler. Likewise the
162 * 'timeout' parameter for the cases without timeouts.
164 static inline int __sched
__down_common(struct semaphore
*sem
, long state
,
168 struct task_struct
*task
= current
;
169 struct semaphore_waiter waiter
;
171 list_add_tail(&waiter
.list
, &sem
->wait_list
);
176 if (state
== TASK_INTERRUPTIBLE
&& signal_pending(task
))
178 if (state
== TASK_KILLABLE
&& fatal_signal_pending(task
))
182 __set_task_state(task
, state
);
183 spin_unlock_irq(&sem
->lock
);
184 timeout
= schedule_timeout(timeout
);
185 spin_lock_irq(&sem
->lock
);
191 list_del(&waiter
.list
);
195 list_del(&waiter
.list
);
199 * Account for the process which woke us up. For the case where
200 * we're interrupted, we need to increment the count on our own
201 * behalf. I don't believe we can hit the case where the
202 * sem->count hits zero, *and* there's a second task sleeping,
203 * but it doesn't hurt, that's not a commonly exercised path and
204 * it's not a performance path either.
206 if (unlikely((++sem
->count
>= 0) && !list_empty(&sem
->wait_list
)))
207 __up_down_common(sem
);
211 static noinline
void __sched
__down(struct semaphore
*sem
)
213 __down_common(sem
, TASK_UNINTERRUPTIBLE
, MAX_SCHEDULE_TIMEOUT
);
216 static noinline
int __sched
__down_interruptible(struct semaphore
*sem
)
218 return __down_common(sem
, TASK_INTERRUPTIBLE
, MAX_SCHEDULE_TIMEOUT
);
221 static noinline
int __sched
__down_killable(struct semaphore
*sem
)
223 return __down_common(sem
, TASK_KILLABLE
, MAX_SCHEDULE_TIMEOUT
);
226 static noinline
int __sched
__down_timeout(struct semaphore
*sem
, long jiffies
)
228 return __down_common(sem
, TASK_UNINTERRUPTIBLE
, jiffies
);
231 static noinline
void __sched
__up(struct semaphore
*sem
)
233 if (unlikely(list_empty(&sem
->wait_list
)))
236 __up_down_common(sem
);
This page took 0.035684 seconds and 6 git commands to generate.