Merge tag 'dlm-4.3' of git://git.kernel.org/pub/scm/linux/kernel/git/teigland/linux-dlm
[deliverable/linux.git] / arch / xtensa / include / asm / rwsem.h
1 /*
2 * include/asm-xtensa/rwsem.h
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Largely copied from include/asm-ppc/rwsem.h
9 *
10 * Copyright (C) 2001 - 2005 Tensilica Inc.
11 */
12
13 #ifndef _XTENSA_RWSEM_H
14 #define _XTENSA_RWSEM_H
15
16 #ifndef _LINUX_RWSEM_H
17 #error "Please don't include <asm/rwsem.h> directly, use <linux/rwsem.h> instead."
18 #endif
19
20 #define RWSEM_UNLOCKED_VALUE 0x00000000
21 #define RWSEM_ACTIVE_BIAS 0x00000001
22 #define RWSEM_ACTIVE_MASK 0x0000ffff
23 #define RWSEM_WAITING_BIAS (-0x00010000)
24 #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
25 #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
26
27 /*
28 * lock for reading
29 */
30 static inline void __down_read(struct rw_semaphore *sem)
31 {
32 if (atomic_add_return(1,(atomic_t *)(&sem->count)) > 0)
33 smp_wmb();
34 else
35 rwsem_down_read_failed(sem);
36 }
37
38 static inline int __down_read_trylock(struct rw_semaphore *sem)
39 {
40 int tmp;
41
42 while ((tmp = sem->count) >= 0) {
43 if (tmp == cmpxchg(&sem->count, tmp,
44 tmp + RWSEM_ACTIVE_READ_BIAS)) {
45 smp_wmb();
46 return 1;
47 }
48 }
49 return 0;
50 }
51
52 /*
53 * lock for writing
54 */
55 static inline void __down_write(struct rw_semaphore *sem)
56 {
57 int tmp;
58
59 tmp = atomic_add_return(RWSEM_ACTIVE_WRITE_BIAS,
60 (atomic_t *)(&sem->count));
61 if (tmp == RWSEM_ACTIVE_WRITE_BIAS)
62 smp_wmb();
63 else
64 rwsem_down_write_failed(sem);
65 }
66
67 static inline int __down_write_trylock(struct rw_semaphore *sem)
68 {
69 int tmp;
70
71 tmp = cmpxchg(&sem->count, RWSEM_UNLOCKED_VALUE,
72 RWSEM_ACTIVE_WRITE_BIAS);
73 smp_wmb();
74 return tmp == RWSEM_UNLOCKED_VALUE;
75 }
76
77 /*
78 * unlock after reading
79 */
80 static inline void __up_read(struct rw_semaphore *sem)
81 {
82 int tmp;
83
84 smp_wmb();
85 tmp = atomic_sub_return(1,(atomic_t *)(&sem->count));
86 if (tmp < -1 && (tmp & RWSEM_ACTIVE_MASK) == 0)
87 rwsem_wake(sem);
88 }
89
90 /*
91 * unlock after writing
92 */
93 static inline void __up_write(struct rw_semaphore *sem)
94 {
95 smp_wmb();
96 if (atomic_sub_return(RWSEM_ACTIVE_WRITE_BIAS,
97 (atomic_t *)(&sem->count)) < 0)
98 rwsem_wake(sem);
99 }
100
101 /*
102 * implement atomic add functionality
103 */
104 static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
105 {
106 atomic_add(delta, (atomic_t *)(&sem->count));
107 }
108
109 /*
110 * downgrade write lock to read lock
111 */
112 static inline void __downgrade_write(struct rw_semaphore *sem)
113 {
114 int tmp;
115
116 smp_wmb();
117 tmp = atomic_add_return(-RWSEM_WAITING_BIAS, (atomic_t *)(&sem->count));
118 if (tmp < 0)
119 rwsem_downgrade_wake(sem);
120 }
121
122 /*
123 * implement exchange and add functionality
124 */
125 static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
126 {
127 smp_mb();
128 return atomic_add_return(delta, (atomic_t *)(&sem->count));
129 }
130
131 #endif /* _XTENSA_RWSEM_H */
This page took 0.078091 seconds and 5 git commands to generate.