Commit | Line | Data |
---|---|---|
bd9a4c7d OBC |
1 | /* |
2 | * Hardware spinlock public header | |
3 | * | |
4 | * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com | |
5 | * | |
6 | * Contact: Ohad Ben-Cohen <ohad@wizery.com> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify it | |
9 | * under the terms of the GNU General Public License version 2 as published | |
10 | * by the Free Software Foundation. | |
11 | * | |
12 | * This program is distributed in the hope that it will be useful, | |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
15 | * GNU General Public License for more details. | |
16 | */ | |
17 | ||
18 | #ifndef __LINUX_HWSPINLOCK_H | |
19 | #define __LINUX_HWSPINLOCK_H | |
20 | ||
21 | #include <linux/err.h> | |
22 | #include <linux/sched.h> | |
23 | ||
24 | /* hwspinlock mode argument */ | |
25 | #define HWLOCK_IRQSTATE 0x01 /* Disable interrupts, save state */ | |
26 | #define HWLOCK_IRQ 0x02 /* Disable interrupts, don't save state */ | |
27 | ||
28 | struct hwspinlock; | |
29 | ||
30 | #if defined(CONFIG_HWSPINLOCK) || defined(CONFIG_HWSPINLOCK_MODULE) | |
31 | ||
32 | int hwspin_lock_register(struct hwspinlock *lock); | |
33 | struct hwspinlock *hwspin_lock_unregister(unsigned int id); | |
34 | struct hwspinlock *hwspin_lock_request(void); | |
35 | struct hwspinlock *hwspin_lock_request_specific(unsigned int id); | |
36 | int hwspin_lock_free(struct hwspinlock *hwlock); | |
37 | int hwspin_lock_get_id(struct hwspinlock *hwlock); | |
38 | int __hwspin_lock_timeout(struct hwspinlock *, unsigned int, int, | |
39 | unsigned long *); | |
40 | int __hwspin_trylock(struct hwspinlock *, int, unsigned long *); | |
41 | void __hwspin_unlock(struct hwspinlock *, int, unsigned long *); | |
42 | ||
43 | #else /* !CONFIG_HWSPINLOCK */ | |
44 | ||
45 | /* | |
46 | * We don't want these functions to fail if CONFIG_HWSPINLOCK is not | |
47 | * enabled. We prefer to silently succeed in this case, and let the | |
48 | * code path get compiled away. This way, if CONFIG_HWSPINLOCK is not | |
49 | * required on a given setup, users will still work. | |
50 | * | |
51 | * The only exception is hwspin_lock_register/hwspin_lock_unregister, with which | |
52 | * we _do_ want users to fail (no point in registering hwspinlock instances if | |
53 | * the framework is not available). | |
54 | * | |
55 | * Note: ERR_PTR(-ENODEV) will still be considered a success for NULL-checking | |
56 | * users. Others, which care, can still check this with IS_ERR. | |
57 | */ | |
58 | static inline struct hwspinlock *hwspin_lock_request(void) | |
59 | { | |
60 | return ERR_PTR(-ENODEV); | |
61 | } | |
62 | ||
63 | static inline struct hwspinlock *hwspin_lock_request_specific(unsigned int id) | |
64 | { | |
65 | return ERR_PTR(-ENODEV); | |
66 | } | |
67 | ||
68 | static inline int hwspin_lock_free(struct hwspinlock *hwlock) | |
69 | { | |
70 | return 0; | |
71 | } | |
72 | ||
73 | static inline | |
74 | int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to, | |
75 | int mode, unsigned long *flags) | |
76 | { | |
77 | return 0; | |
78 | } | |
79 | ||
80 | static inline | |
81 | int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags) | |
82 | { | |
83 | return 0; | |
84 | } | |
85 | ||
86 | static inline | |
87 | void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags) | |
88 | { | |
89 | return 0; | |
90 | } | |
91 | ||
92 | static inline int hwspin_lock_get_id(struct hwspinlock *hwlock) | |
93 | { | |
94 | return 0; | |
95 | } | |
96 | ||
97 | static inline int hwspin_lock_register(struct hwspinlock *hwlock) | |
98 | { | |
99 | return -ENODEV; | |
100 | } | |
101 | ||
102 | static inline struct hwspinlock *hwspin_lock_unregister(unsigned int id) | |
103 | { | |
104 | return NULL; | |
105 | } | |
106 | ||
107 | #endif /* !CONFIG_HWSPINLOCK */ | |
108 | ||
109 | /** | |
110 | * hwspin_trylock_irqsave() - try to lock an hwspinlock, disable interrupts | |
111 | * @hwlock: an hwspinlock which we want to trylock | |
112 | * @flags: a pointer to where the caller's interrupt state will be saved at | |
113 | * | |
114 | * This function attempts to lock the underlying hwspinlock, and will | |
115 | * immediately fail if the hwspinlock is already locked. | |
116 | * | |
117 | * Upon a successful return from this function, preemption and local | |
118 | * interrupts are disabled (previous interrupts state is saved at @flags), | |
119 | * so the caller must not sleep, and is advised to release the hwspinlock | |
120 | * as soon as possible. | |
121 | * | |
122 | * Returns 0 if we successfully locked the hwspinlock, -EBUSY if | |
123 | * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid. | |
124 | */ | |
125 | static inline | |
126 | int hwspin_trylock_irqsave(struct hwspinlock *hwlock, unsigned long *flags) | |
127 | { | |
128 | return __hwspin_trylock(hwlock, HWLOCK_IRQSTATE, flags); | |
129 | } | |
130 | ||
131 | /** | |
132 | * hwspin_trylock_irq() - try to lock an hwspinlock, disable interrupts | |
133 | * @hwlock: an hwspinlock which we want to trylock | |
134 | * | |
135 | * This function attempts to lock the underlying hwspinlock, and will | |
136 | * immediately fail if the hwspinlock is already locked. | |
137 | * | |
138 | * Upon a successful return from this function, preemption and local | |
139 | * interrupts are disabled, so the caller must not sleep, and is advised | |
140 | * to release the hwspinlock as soon as possible. | |
141 | * | |
142 | * Returns 0 if we successfully locked the hwspinlock, -EBUSY if | |
143 | * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid. | |
144 | */ | |
145 | static inline int hwspin_trylock_irq(struct hwspinlock *hwlock) | |
146 | { | |
147 | return __hwspin_trylock(hwlock, HWLOCK_IRQ, NULL); | |
148 | } | |
149 | ||
150 | /** | |
151 | * hwspin_trylock() - attempt to lock a specific hwspinlock | |
152 | * @hwlock: an hwspinlock which we want to trylock | |
153 | * | |
154 | * This function attempts to lock an hwspinlock, and will immediately fail | |
155 | * if the hwspinlock is already taken. | |
156 | * | |
157 | * Upon a successful return from this function, preemption is disabled, | |
158 | * so the caller must not sleep, and is advised to release the hwspinlock | |
159 | * as soon as possible. This is required in order to minimize remote cores | |
160 | * polling on the hardware interconnect. | |
161 | * | |
162 | * Returns 0 if we successfully locked the hwspinlock, -EBUSY if | |
163 | * the hwspinlock was already taken, and -EINVAL if @hwlock is invalid. | |
164 | */ | |
165 | static inline int hwspin_trylock(struct hwspinlock *hwlock) | |
166 | { | |
167 | return __hwspin_trylock(hwlock, 0, NULL); | |
168 | } | |
169 | ||
170 | /** | |
171 | * hwspin_lock_timeout_irqsave() - lock hwspinlock, with timeout, disable irqs | |
172 | * @hwlock: the hwspinlock to be locked | |
173 | * @to: timeout value in msecs | |
174 | * @flags: a pointer to where the caller's interrupt state will be saved at | |
175 | * | |
176 | * This function locks the underlying @hwlock. If the @hwlock | |
177 | * is already taken, the function will busy loop waiting for it to | |
178 | * be released, but give up when @timeout msecs have elapsed. | |
179 | * | |
180 | * Upon a successful return from this function, preemption and local interrupts | |
181 | * are disabled (plus previous interrupt state is saved), so the caller must | |
182 | * not sleep, and is advised to release the hwspinlock as soon as possible. | |
183 | * | |
184 | * Returns 0 when the @hwlock was successfully taken, and an appropriate | |
185 | * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still | |
186 | * busy after @timeout msecs). The function will never sleep. | |
187 | */ | |
188 | static inline int hwspin_lock_timeout_irqsave(struct hwspinlock *hwlock, | |
189 | unsigned int to, unsigned long *flags) | |
190 | { | |
191 | return __hwspin_lock_timeout(hwlock, to, HWLOCK_IRQSTATE, flags); | |
192 | } | |
193 | ||
194 | /** | |
195 | * hwspin_lock_timeout_irq() - lock hwspinlock, with timeout, disable irqs | |
196 | * @hwlock: the hwspinlock to be locked | |
197 | * @to: timeout value in msecs | |
198 | * | |
199 | * This function locks the underlying @hwlock. If the @hwlock | |
200 | * is already taken, the function will busy loop waiting for it to | |
201 | * be released, but give up when @timeout msecs have elapsed. | |
202 | * | |
203 | * Upon a successful return from this function, preemption and local interrupts | |
204 | * are disabled so the caller must not sleep, and is advised to release the | |
205 | * hwspinlock as soon as possible. | |
206 | * | |
207 | * Returns 0 when the @hwlock was successfully taken, and an appropriate | |
208 | * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still | |
209 | * busy after @timeout msecs). The function will never sleep. | |
210 | */ | |
211 | static inline | |
212 | int hwspin_lock_timeout_irq(struct hwspinlock *hwlock, unsigned int to) | |
213 | { | |
214 | return __hwspin_lock_timeout(hwlock, to, HWLOCK_IRQ, NULL); | |
215 | } | |
216 | ||
217 | /** | |
218 | * hwspin_lock_timeout() - lock an hwspinlock with timeout limit | |
219 | * @hwlock: the hwspinlock to be locked | |
220 | * @to: timeout value in msecs | |
221 | * | |
222 | * This function locks the underlying @hwlock. If the @hwlock | |
223 | * is already taken, the function will busy loop waiting for it to | |
224 | * be released, but give up when @timeout msecs have elapsed. | |
225 | * | |
226 | * Upon a successful return from this function, preemption is disabled | |
227 | * so the caller must not sleep, and is advised to release the hwspinlock | |
228 | * as soon as possible. | |
229 | * This is required in order to minimize remote cores polling on the | |
230 | * hardware interconnect. | |
231 | * | |
232 | * Returns 0 when the @hwlock was successfully taken, and an appropriate | |
233 | * error code otherwise (most notably an -ETIMEDOUT if the @hwlock is still | |
234 | * busy after @timeout msecs). The function will never sleep. | |
235 | */ | |
236 | static inline | |
237 | int hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to) | |
238 | { | |
239 | return __hwspin_lock_timeout(hwlock, to, 0, NULL); | |
240 | } | |
241 | ||
242 | /** | |
243 | * hwspin_unlock_irqrestore() - unlock hwspinlock, restore irq state | |
244 | * @hwlock: a previously-acquired hwspinlock which we want to unlock | |
245 | * @flags: previous caller's interrupt state to restore | |
246 | * | |
247 | * This function will unlock a specific hwspinlock, enable preemption and | |
248 | * restore the previous state of the local interrupts. It should be used | |
249 | * to undo, e.g., hwspin_trylock_irqsave(). | |
250 | * | |
251 | * @hwlock must be already locked before calling this function: it is a bug | |
252 | * to call unlock on a @hwlock that is already unlocked. | |
253 | */ | |
254 | static inline void hwspin_unlock_irqrestore(struct hwspinlock *hwlock, | |
255 | unsigned long *flags) | |
256 | { | |
257 | __hwspin_unlock(hwlock, HWLOCK_IRQSTATE, flags); | |
258 | } | |
259 | ||
260 | /** | |
261 | * hwspin_unlock_irq() - unlock hwspinlock, enable interrupts | |
262 | * @hwlock: a previously-acquired hwspinlock which we want to unlock | |
263 | * | |
264 | * This function will unlock a specific hwspinlock, enable preemption and | |
265 | * enable local interrupts. Should be used to undo hwspin_lock_irq(). | |
266 | * | |
267 | * @hwlock must be already locked (e.g. by hwspin_trylock_irq()) before | |
268 | * calling this function: it is a bug to call unlock on a @hwlock that is | |
269 | * already unlocked. | |
270 | */ | |
271 | static inline void hwspin_unlock_irq(struct hwspinlock *hwlock) | |
272 | { | |
273 | __hwspin_unlock(hwlock, HWLOCK_IRQ, NULL); | |
274 | } | |
275 | ||
276 | /** | |
277 | * hwspin_unlock() - unlock hwspinlock | |
278 | * @hwlock: a previously-acquired hwspinlock which we want to unlock | |
279 | * | |
280 | * This function will unlock a specific hwspinlock and enable preemption | |
281 | * back. | |
282 | * | |
283 | * @hwlock must be already locked (e.g. by hwspin_trylock()) before calling | |
284 | * this function: it is a bug to call unlock on a @hwlock that is already | |
285 | * unlocked. | |
286 | */ | |
287 | static inline void hwspin_unlock(struct hwspinlock *hwlock) | |
288 | { | |
289 | __hwspin_unlock(hwlock, 0, NULL); | |
290 | } | |
291 | ||
292 | #endif /* __LINUX_HWSPINLOCK_H */ |