bef977b169665cf8ad318354b31872dd348b5390
2 * Copyright (c) 2008 Intel Corporation
3 * Author: Matthew Wilcox <willy@linux.intel.com>
5 * Distributed under the terms of the GNU GPL, version 2
8 #include <linux/compiler.h>
9 #include <linux/kernel.h>
10 #include <linux/module.h>
11 #include <linux/sched.h>
12 #include <linux/semaphore.h>
13 #include <linux/spinlock.h>
16 * Some notes on the implementation:
18 * down_trylock() and up() can be called from interrupt context.
19 * So we have to disable interrupts when taking the lock.
21 * The ->count variable defines how many more tasks can acquire the
22 * semaphore. If it's zero, there may be tasks waiting on the list.
25 static noinline
void __down(struct semaphore
*sem
);
26 static noinline
int __down_interruptible(struct semaphore
*sem
);
27 static noinline
int __down_killable(struct semaphore
*sem
);
28 static noinline
int __down_timeout(struct semaphore
*sem
, long jiffies
);
29 static noinline
void __up(struct semaphore
*sem
);
31 void down(struct semaphore
*sem
)
35 spin_lock_irqsave(&sem
->lock
, flags
);
36 if (likely(sem
->count
> 0))
40 spin_unlock_irqrestore(&sem
->lock
, flags
);
44 int down_interruptible(struct semaphore
*sem
)
49 spin_lock_irqsave(&sem
->lock
, flags
);
50 if (likely(sem
->count
> 0))
53 result
= __down_interruptible(sem
);
54 spin_unlock_irqrestore(&sem
->lock
, flags
);
58 EXPORT_SYMBOL(down_interruptible
);
60 int down_killable(struct semaphore
*sem
)
65 spin_lock_irqsave(&sem
->lock
, flags
);
66 if (likely(sem
->count
> 0))
69 result
= __down_killable(sem
);
70 spin_unlock_irqrestore(&sem
->lock
, flags
);
74 EXPORT_SYMBOL(down_killable
);
77 * down_trylock - try to acquire the semaphore, without waiting
78 * @sem: the semaphore to be acquired
80 * Try to acquire the semaphore atomically. Returns 0 if the mutex has
81 * been acquired successfully and 1 if it is contended.
83 * NOTE: This return value is inverted from both spin_trylock and
84 * mutex_trylock! Be careful about this when converting code.
86 * Unlike mutex_trylock, this function can be used from interrupt context,
87 * and the semaphore can be released by any task or interrupt.
89 int down_trylock(struct semaphore
*sem
)
94 spin_lock_irqsave(&sem
->lock
, flags
);
95 count
= sem
->count
- 1;
96 if (likely(count
>= 0))
98 spin_unlock_irqrestore(&sem
->lock
, flags
);
102 EXPORT_SYMBOL(down_trylock
);
104 int down_timeout(struct semaphore
*sem
, long jiffies
)
109 spin_lock_irqsave(&sem
->lock
, flags
);
110 if (likely(sem
->count
> 0))
113 result
= __down_timeout(sem
, jiffies
);
114 spin_unlock_irqrestore(&sem
->lock
, flags
);
118 EXPORT_SYMBOL(down_timeout
);
120 void up(struct semaphore
*sem
)
124 spin_lock_irqsave(&sem
->lock
, flags
);
125 if (likely(list_empty(&sem
->wait_list
)))
129 spin_unlock_irqrestore(&sem
->lock
, flags
);
133 /* Functions for the contended case */
135 struct semaphore_waiter
{
136 struct list_head list
;
137 struct task_struct
*task
;
142 * Because this function is inlined, the 'state' parameter will be
143 * constant, and thus optimised away by the compiler. Likewise the
144 * 'timeout' parameter for the cases without timeouts.
146 static inline int __sched
__down_common(struct semaphore
*sem
, long state
,
149 struct task_struct
*task
= current
;
150 struct semaphore_waiter waiter
;
152 list_add_tail(&waiter
.list
, &sem
->wait_list
);
157 if (state
== TASK_INTERRUPTIBLE
&& signal_pending(task
))
159 if (state
== TASK_KILLABLE
&& fatal_signal_pending(task
))
163 __set_task_state(task
, state
);
164 spin_unlock_irq(&sem
->lock
);
165 timeout
= schedule_timeout(timeout
);
166 spin_lock_irq(&sem
->lock
);
172 list_del(&waiter
.list
);
176 list_del(&waiter
.list
);
180 static noinline
void __sched
__down(struct semaphore
*sem
)
182 __down_common(sem
, TASK_UNINTERRUPTIBLE
, MAX_SCHEDULE_TIMEOUT
);
185 static noinline
int __sched
__down_interruptible(struct semaphore
*sem
)
187 return __down_common(sem
, TASK_INTERRUPTIBLE
, MAX_SCHEDULE_TIMEOUT
);
190 static noinline
int __sched
__down_killable(struct semaphore
*sem
)
192 return __down_common(sem
, TASK_KILLABLE
, MAX_SCHEDULE_TIMEOUT
);
195 static noinline
int __sched
__down_timeout(struct semaphore
*sem
, long jiffies
)
197 return __down_common(sem
, TASK_UNINTERRUPTIBLE
, jiffies
);
200 static noinline
void __sched
__up(struct semaphore
*sem
)
202 struct semaphore_waiter
*waiter
= list_first_entry(&sem
->wait_list
,
203 struct semaphore_waiter
, list
);
204 list_del(&waiter
->list
);
206 wake_up_process(waiter
->task
);
This page took 0.03501 seconds and 4 git commands to generate.