Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _PPC64_SEMAPHORE_H |
2 | #define _PPC64_SEMAPHORE_H | |
3 | ||
4 | /* | |
5 | * Remove spinlock-based RW semaphores; RW semaphore definitions are | |
6 | * now in rwsem.h and we use the generic lib/rwsem.c implementation. | |
7 | * Rework semaphores to use atomic_dec_if_positive. | |
8 | * -- Paul Mackerras (paulus@samba.org) | |
9 | */ | |
10 | ||
11 | #ifdef __KERNEL__ | |
12 | ||
13 | #include <asm/atomic.h> | |
14 | #include <asm/system.h> | |
15 | #include <linux/wait.h> | |
16 | #include <linux/rwsem.h> | |
17 | ||
18 | struct semaphore { | |
19 | /* | |
20 | * Note that any negative value of count is equivalent to 0, | |
21 | * but additionally indicates that some process(es) might be | |
22 | * sleeping on `wait'. | |
23 | */ | |
24 | atomic_t count; | |
25 | wait_queue_head_t wait; | |
26 | }; | |
27 | ||
28 | #define __SEMAPHORE_INITIALIZER(name, n) \ | |
29 | { \ | |
30 | .count = ATOMIC_INIT(n), \ | |
31 | .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \ | |
32 | } | |
33 | ||
34 | #define __MUTEX_INITIALIZER(name) \ | |
35 | __SEMAPHORE_INITIALIZER(name, 1) | |
36 | ||
37 | #define __DECLARE_SEMAPHORE_GENERIC(name, count) \ | |
38 | struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) | |
39 | ||
40 | #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name, 1) | |
41 | #define DECLARE_MUTEX_LOCKED(name) __DECLARE_SEMAPHORE_GENERIC(name, 0) | |
42 | ||
43 | static inline void sema_init (struct semaphore *sem, int val) | |
44 | { | |
45 | atomic_set(&sem->count, val); | |
46 | init_waitqueue_head(&sem->wait); | |
47 | } | |
48 | ||
49 | static inline void init_MUTEX (struct semaphore *sem) | |
50 | { | |
51 | sema_init(sem, 1); | |
52 | } | |
53 | ||
54 | static inline void init_MUTEX_LOCKED (struct semaphore *sem) | |
55 | { | |
56 | sema_init(sem, 0); | |
57 | } | |
58 | ||
59 | extern void __down(struct semaphore * sem); | |
60 | extern int __down_interruptible(struct semaphore * sem); | |
61 | extern void __up(struct semaphore * sem); | |
62 | ||
63 | static inline void down(struct semaphore * sem) | |
64 | { | |
65 | might_sleep(); | |
66 | ||
67 | /* | |
68 | * Try to get the semaphore, take the slow path if we fail. | |
69 | */ | |
70 | if (unlikely(atomic_dec_return(&sem->count) < 0)) | |
71 | __down(sem); | |
72 | } | |
73 | ||
74 | static inline int down_interruptible(struct semaphore * sem) | |
75 | { | |
76 | int ret = 0; | |
77 | ||
78 | might_sleep(); | |
79 | ||
80 | if (unlikely(atomic_dec_return(&sem->count) < 0)) | |
81 | ret = __down_interruptible(sem); | |
82 | return ret; | |
83 | } | |
84 | ||
85 | static inline int down_trylock(struct semaphore * sem) | |
86 | { | |
87 | return atomic_dec_if_positive(&sem->count) < 0; | |
88 | } | |
89 | ||
90 | static inline void up(struct semaphore * sem) | |
91 | { | |
92 | if (unlikely(atomic_inc_return(&sem->count) <= 0)) | |
93 | __up(sem); | |
94 | } | |
95 | ||
96 | #endif /* __KERNEL__ */ | |
97 | ||
98 | #endif /* !(_PPC64_SEMAPHORE_H) */ |