locking/rwsem: Get rid of __down_write_nested()
[deliverable/linux.git] / arch / sparc / include / asm / rwsem.h
CommitLineData
a00736e9
SR
1/*
2 * rwsem.h: R/W semaphores implemented using CAS
3 *
4 * Written by David S. Miller (davem@redhat.com), 2001.
5 * Derived from asm-i386/rwsem.h
6 */
7#ifndef _SPARC64_RWSEM_H
8#define _SPARC64_RWSEM_H
9
10#ifndef _LINUX_RWSEM_H
11#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
12#endif
13
14#ifdef __KERNEL__
1c8ed640 15
9b3bb86a
DM
16#define RWSEM_UNLOCKED_VALUE 0x00000000L
17#define RWSEM_ACTIVE_BIAS 0x00000001L
18#define RWSEM_ACTIVE_MASK 0xffffffffL
19#define RWSEM_WAITING_BIAS (-RWSEM_ACTIVE_MASK-1)
20#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
21#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
a00736e9 22
9b3bb86a
DM
23/*
24 * lock for reading
25 */
26static inline void __down_read(struct rw_semaphore *sem)
27{
28 if (unlikely(atomic64_inc_return((atomic64_t *)(&sem->count)) <= 0L))
29 rwsem_down_read_failed(sem);
30}
31
32static inline int __down_read_trylock(struct rw_semaphore *sem)
33{
34 long tmp;
35
36 while ((tmp = sem->count) >= 0L) {
37 if (tmp == cmpxchg(&sem->count, tmp,
38 tmp + RWSEM_ACTIVE_READ_BIAS)) {
39 return 1;
40 }
41 }
42 return 0;
43}
a00736e9 44
9b3bb86a
DM
45/*
46 * lock for writing
47 */
f8e04d85 48static inline void __down_write(struct rw_semaphore *sem)
a00736e9 49{
9b3bb86a
DM
50 long tmp;
51
52 tmp = atomic64_add_return(RWSEM_ACTIVE_WRITE_BIAS,
53 (atomic64_t *)(&sem->count));
54 if (unlikely(tmp != RWSEM_ACTIVE_WRITE_BIAS))
55 rwsem_down_write_failed(sem);
a00736e9
SR
56}
57
9b3bb86a
DM
58static inline int __down_write_trylock(struct rw_semaphore *sem)
59{
60 long tmp;
61
62 tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
63 RWSEM_ACTIVE_WRITE_BIAS);
64 return tmp == RWSEM_UNLOCKED_VALUE;
a00736e9
SR
65}
66
9b3bb86a
DM
67/*
68 * unlock after reading
69 */
70static inline void __up_read(struct rw_semaphore *sem)
71{
72 long tmp;
73
74 tmp = atomic64_dec_return((atomic64_t *)(&sem->count));
75 if (unlikely(tmp < -1L && (tmp & RWSEM_ACTIVE_MASK) == 0L))
76 rwsem_wake(sem);
77}
78
79/*
80 * unlock after writing
81 */
82static inline void __up_write(struct rw_semaphore *sem)
83{
84 if (unlikely(atomic64_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
85 (atomic64_t *)(&sem->count)) < 0L))
86 rwsem_wake(sem);
87}
88
89/*
90 * implement atomic add functionality
91 */
92static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
93{
94 atomic64_add(delta, (atomic64_t *)(&sem->count));
95}
96
97/*
98 * downgrade write lock to read lock
99 */
100static inline void __downgrade_write(struct rw_semaphore *sem)
101{
102 long tmp;
103
104 tmp = atomic64_add_return(-RWSEM_WAITING_BIAS, (atomic64_t *)(&sem->count));
105 if (tmp < 0L)
106 rwsem_downgrade_wake(sem);
107}
108
109/*
110 * implement exchange and add functionality
111 */
112static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
a00736e9 113{
9b3bb86a 114 return atomic64_add_return(delta, (atomic64_t *)(&sem->count));
a00736e9
SR
115}
116
a00736e9
SR
117#endif /* __KERNEL__ */
118
119#endif /* _SPARC64_RWSEM_H */
This page took 2.016255 seconds and 5 git commands to generate.