Commit | Line | Data |
---|---|---|
bd9a4c7d OBC |
1 | /* |
2 | * Hardware spinlock framework | |
3 | * | |
4 | * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com | |
5 | * | |
6 | * Contact: Ohad Ben-Cohen <ohad@wizery.com> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify it | |
9 | * under the terms of the GNU General Public License version 2 as published | |
10 | * by the Free Software Foundation. | |
11 | * | |
12 | * This program is distributed in the hope that it will be useful, | |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
15 | * GNU General Public License for more details. | |
16 | */ | |
17 | ||
18 | #define pr_fmt(fmt) "%s: " fmt, __func__ | |
19 | ||
20 | #include <linux/kernel.h> | |
21 | #include <linux/module.h> | |
22 | #include <linux/spinlock.h> | |
23 | #include <linux/types.h> | |
24 | #include <linux/err.h> | |
25 | #include <linux/jiffies.h> | |
26 | #include <linux/radix-tree.h> | |
27 | #include <linux/hwspinlock.h> | |
28 | #include <linux/pm_runtime.h> | |
93b465c2 | 29 | #include <linux/mutex.h> |
fb7737e9 | 30 | #include <linux/of.h> |
bd9a4c7d OBC |
31 | |
32 | #include "hwspinlock_internal.h" | |
33 | ||
34 | /* radix tree tags */ | |
35 | #define HWSPINLOCK_UNUSED (0) /* tags an hwspinlock as unused */ | |
36 | ||
37 | /* | |
38 | * A radix tree is used to maintain the available hwspinlock instances. | |
39 | * The tree associates hwspinlock pointers with their integer key id, | |
40 | * and provides easy-to-use API which makes the hwspinlock core code simple | |
41 | * and easy to read. | |
42 | * | |
43 | * Radix trees are quick on lookups, and reasonably efficient in terms of | |
44 | * storage, especially with high density usages such as this framework | |
45 | * requires (a continuous range of integer keys, beginning with zero, is | |
46 | * used as the ID's of the hwspinlock instances). | |
47 | * | |
48 | * The radix tree API supports tagging items in the tree, which this | |
49 | * framework uses to mark unused hwspinlock instances (see the | |
50 | * HWSPINLOCK_UNUSED tag above). As a result, the process of querying the | |
51 | * tree, looking for an unused hwspinlock instance, is now reduced to a | |
52 | * single radix tree API call. | |
53 | */ | |
54 | static RADIX_TREE(hwspinlock_tree, GFP_KERNEL); | |
55 | ||
56 | /* | |
93b465c2 | 57 | * Synchronization of access to the tree is achieved using this mutex, |
bd9a4c7d | 58 | * as the radix-tree API requires that users provide all synchronisation. |
93b465c2 | 59 | * A mutex is needed because we're using non-atomic radix tree allocations. |
bd9a4c7d | 60 | */ |
93b465c2 JG |
61 | static DEFINE_MUTEX(hwspinlock_tree_lock); |
62 | ||
bd9a4c7d OBC |
63 | |
64 | /** | |
65 | * __hwspin_trylock() - attempt to lock a specific hwspinlock | |
66 | * @hwlock: an hwspinlock which we want to trylock | |
67 | * @mode: controls whether local interrupts are disabled or not | |
68 | * @flags: a pointer where the caller's interrupt state will be saved at (if | |
69 | * requested) | |
70 | * | |
71 | * This function attempts to lock an hwspinlock, and will immediately | |
72 | * fail if the hwspinlock is already taken. | |
73 | * | |
74 | * Upon a successful return from this function, preemption (and possibly | |
75 | * interrupts) is disabled, so the caller must not sleep, and is advised to | |
76 | * release the hwspinlock as soon as possible. This is required in order to | |
77 | * minimize remote cores polling on the hardware interconnect. | |
78 | * | |
79 | * The user decides whether local interrupts are disabled or not, and if yes, | |
80 | * whether he wants their previous state to be saved. It is up to the user | |
81 | * to choose the appropriate @mode of operation, exactly the same way users | |
82 | * should decide between spin_trylock, spin_trylock_irq and | |
83 | * spin_trylock_irqsave. | |
84 | * | |
85 | * Returns 0 if we successfully locked the hwspinlock or -EBUSY if | |
86 | * the hwspinlock was already taken. | |
87 | * This function will never sleep. | |
88 | */ | |
89 | int __hwspin_trylock(struct hwspinlock *hwlock, int mode, unsigned long *flags) | |
90 | { | |
91 | int ret; | |
92 | ||
93 | BUG_ON(!hwlock); | |
94 | BUG_ON(!flags && mode == HWLOCK_IRQSTATE); | |
95 | ||
96 | /* | |
97 | * This spin_lock{_irq, _irqsave} serves three purposes: | |
98 | * | |
99 | * 1. Disable preemption, in order to minimize the period of time | |
100 | * in which the hwspinlock is taken. This is important in order | |
101 | * to minimize the possible polling on the hardware interconnect | |
102 | * by a remote user of this lock. | |
103 | * 2. Make the hwspinlock SMP-safe (so we can take it from | |
104 | * additional contexts on the local host). | |
105 | * 3. Ensure that in_atomic/might_sleep checks catch potential | |
106 | * problems with hwspinlock usage (e.g. scheduler checks like | |
107 | * 'scheduling while atomic' etc.) | |
108 | */ | |
109 | if (mode == HWLOCK_IRQSTATE) | |
110 | ret = spin_trylock_irqsave(&hwlock->lock, *flags); | |
111 | else if (mode == HWLOCK_IRQ) | |
112 | ret = spin_trylock_irq(&hwlock->lock); | |
113 | else | |
114 | ret = spin_trylock(&hwlock->lock); | |
115 | ||
116 | /* is lock already taken by another context on the local cpu ? */ | |
117 | if (!ret) | |
118 | return -EBUSY; | |
119 | ||
120 | /* try to take the hwspinlock device */ | |
300bab97 | 121 | ret = hwlock->bank->ops->trylock(hwlock); |
bd9a4c7d OBC |
122 | |
123 | /* if hwlock is already taken, undo spin_trylock_* and exit */ | |
124 | if (!ret) { | |
125 | if (mode == HWLOCK_IRQSTATE) | |
126 | spin_unlock_irqrestore(&hwlock->lock, *flags); | |
127 | else if (mode == HWLOCK_IRQ) | |
128 | spin_unlock_irq(&hwlock->lock); | |
129 | else | |
130 | spin_unlock(&hwlock->lock); | |
131 | ||
132 | return -EBUSY; | |
133 | } | |
134 | ||
135 | /* | |
136 | * We can be sure the other core's memory operations | |
137 | * are observable to us only _after_ we successfully take | |
138 | * the hwspinlock, and we must make sure that subsequent memory | |
139 | * operations (both reads and writes) will not be reordered before | |
140 | * we actually took the hwspinlock. | |
141 | * | |
142 | * Note: the implicit memory barrier of the spinlock above is too | |
143 | * early, so we need this additional explicit memory barrier. | |
144 | */ | |
145 | mb(); | |
146 | ||
147 | return 0; | |
148 | } | |
149 | EXPORT_SYMBOL_GPL(__hwspin_trylock); | |
150 | ||
151 | /** | |
152 | * __hwspin_lock_timeout() - lock an hwspinlock with timeout limit | |
153 | * @hwlock: the hwspinlock to be locked | |
154 | * @timeout: timeout value in msecs | |
155 | * @mode: mode which controls whether local interrupts are disabled or not | |
156 | * @flags: a pointer to where the caller's interrupt state will be saved at (if | |
157 | * requested) | |
158 | * | |
159 | * This function locks the given @hwlock. If the @hwlock | |
160 | * is already taken, the function will busy loop waiting for it to | |
161 | * be released, but give up after @timeout msecs have elapsed. | |
162 | * | |
163 | * Upon a successful return from this function, preemption is disabled | |
164 | * (and possibly local interrupts, too), so the caller must not sleep, | |
165 | * and is advised to release the hwspinlock as soon as possible. | |
166 | * This is required in order to minimize remote cores polling on the | |
167 | * hardware interconnect. | |
168 | * | |
169 | * The user decides whether local interrupts are disabled or not, and if yes, | |
170 | * whether he wants their previous state to be saved. It is up to the user | |
171 | * to choose the appropriate @mode of operation, exactly the same way users | |
172 | * should decide between spin_lock, spin_lock_irq and spin_lock_irqsave. | |
173 | * | |
174 | * Returns 0 when the @hwlock was successfully taken, and an appropriate | |
175 | * error code otherwise (most notably -ETIMEDOUT if the @hwlock is still | |
176 | * busy after @timeout msecs). The function will never sleep. | |
177 | */ | |
178 | int __hwspin_lock_timeout(struct hwspinlock *hwlock, unsigned int to, | |
179 | int mode, unsigned long *flags) | |
180 | { | |
181 | int ret; | |
182 | unsigned long expire; | |
183 | ||
184 | expire = msecs_to_jiffies(to) + jiffies; | |
185 | ||
186 | for (;;) { | |
187 | /* Try to take the hwspinlock */ | |
188 | ret = __hwspin_trylock(hwlock, mode, flags); | |
189 | if (ret != -EBUSY) | |
190 | break; | |
191 | ||
192 | /* | |
193 | * The lock is already taken, let's check if the user wants | |
194 | * us to try again | |
195 | */ | |
196 | if (time_is_before_eq_jiffies(expire)) | |
197 | return -ETIMEDOUT; | |
198 | ||
199 | /* | |
200 | * Allow platform-specific relax handlers to prevent | |
201 | * hogging the interconnect (no sleeping, though) | |
202 | */ | |
300bab97 OBC |
203 | if (hwlock->bank->ops->relax) |
204 | hwlock->bank->ops->relax(hwlock); | |
bd9a4c7d OBC |
205 | } |
206 | ||
207 | return ret; | |
208 | } | |
209 | EXPORT_SYMBOL_GPL(__hwspin_lock_timeout); | |
210 | ||
211 | /** | |
212 | * __hwspin_unlock() - unlock a specific hwspinlock | |
213 | * @hwlock: a previously-acquired hwspinlock which we want to unlock | |
214 | * @mode: controls whether local interrupts needs to be restored or not | |
215 | * @flags: previous caller's interrupt state to restore (if requested) | |
216 | * | |
217 | * This function will unlock a specific hwspinlock, enable preemption and | |
218 | * (possibly) enable interrupts or restore their previous state. | |
219 | * @hwlock must be already locked before calling this function: it is a bug | |
220 | * to call unlock on a @hwlock that is already unlocked. | |
221 | * | |
222 | * The user decides whether local interrupts should be enabled or not, and | |
223 | * if yes, whether he wants their previous state to be restored. It is up | |
224 | * to the user to choose the appropriate @mode of operation, exactly the | |
225 | * same way users decide between spin_unlock, spin_unlock_irq and | |
226 | * spin_unlock_irqrestore. | |
227 | * | |
228 | * The function will never sleep. | |
229 | */ | |
230 | void __hwspin_unlock(struct hwspinlock *hwlock, int mode, unsigned long *flags) | |
231 | { | |
232 | BUG_ON(!hwlock); | |
233 | BUG_ON(!flags && mode == HWLOCK_IRQSTATE); | |
234 | ||
235 | /* | |
236 | * We must make sure that memory operations (both reads and writes), | |
237 | * done before unlocking the hwspinlock, will not be reordered | |
238 | * after the lock is released. | |
239 | * | |
240 | * That's the purpose of this explicit memory barrier. | |
241 | * | |
242 | * Note: the memory barrier induced by the spin_unlock below is too | |
243 | * late; the other core is going to access memory soon after it will | |
244 | * take the hwspinlock, and by then we want to be sure our memory | |
245 | * operations are already observable. | |
246 | */ | |
247 | mb(); | |
248 | ||
300bab97 | 249 | hwlock->bank->ops->unlock(hwlock); |
bd9a4c7d OBC |
250 | |
251 | /* Undo the spin_trylock{_irq, _irqsave} called while locking */ | |
252 | if (mode == HWLOCK_IRQSTATE) | |
253 | spin_unlock_irqrestore(&hwlock->lock, *flags); | |
254 | else if (mode == HWLOCK_IRQ) | |
255 | spin_unlock_irq(&hwlock->lock); | |
256 | else | |
257 | spin_unlock(&hwlock->lock); | |
258 | } | |
259 | EXPORT_SYMBOL_GPL(__hwspin_unlock); | |
260 | ||
fb7737e9 SA |
261 | /** |
262 | * of_hwspin_lock_simple_xlate - translate hwlock_spec to return a lock id | |
263 | * @bank: the hwspinlock device bank | |
264 | * @hwlock_spec: hwlock specifier as found in the device tree | |
265 | * | |
266 | * This is a simple translation function, suitable for hwspinlock platform | |
267 | * drivers that only has a lock specifier length of 1. | |
268 | * | |
269 | * Returns a relative index of the lock within a specified bank on success, | |
270 | * or -EINVAL on invalid specifier cell count. | |
271 | */ | |
272 | static inline int | |
273 | of_hwspin_lock_simple_xlate(const struct of_phandle_args *hwlock_spec) | |
274 | { | |
275 | if (WARN_ON(hwlock_spec->args_count != 1)) | |
276 | return -EINVAL; | |
277 | ||
278 | return hwlock_spec->args[0]; | |
279 | } | |
280 | ||
281 | /** | |
282 | * of_hwspin_lock_get_id() - get lock id for an OF phandle-based specific lock | |
283 | * @np: device node from which to request the specific hwlock | |
284 | * @index: index of the hwlock in the list of values | |
285 | * | |
286 | * This function provides a means for DT users of the hwspinlock module to | |
287 | * get the global lock id of a specific hwspinlock using the phandle of the | |
288 | * hwspinlock device, so that it can be requested using the normal | |
289 | * hwspin_lock_request_specific() API. | |
290 | * | |
291 | * Returns the global lock id number on success, -EPROBE_DEFER if the hwspinlock | |
292 | * device is not yet registered, -EINVAL on invalid args specifier value or an | |
293 | * appropriate error as returned from the OF parsing of the DT client node. | |
294 | */ | |
295 | int of_hwspin_lock_get_id(struct device_node *np, int index) | |
296 | { | |
297 | struct of_phandle_args args; | |
298 | struct hwspinlock *hwlock; | |
299 | struct radix_tree_iter iter; | |
300 | void **slot; | |
301 | int id; | |
302 | int ret; | |
303 | ||
304 | ret = of_parse_phandle_with_args(np, "hwlocks", "#hwlock-cells", index, | |
305 | &args); | |
306 | if (ret) | |
307 | return ret; | |
308 | ||
309 | /* Find the hwspinlock device: we need its base_id */ | |
310 | ret = -EPROBE_DEFER; | |
311 | rcu_read_lock(); | |
312 | radix_tree_for_each_slot(slot, &hwspinlock_tree, &iter, 0) { | |
313 | hwlock = radix_tree_deref_slot(slot); | |
314 | if (unlikely(!hwlock)) | |
315 | continue; | |
316 | ||
317 | if (hwlock->bank->dev->of_node == args.np) { | |
318 | ret = 0; | |
319 | break; | |
320 | } | |
321 | } | |
322 | rcu_read_unlock(); | |
323 | if (ret < 0) | |
324 | goto out; | |
325 | ||
326 | id = of_hwspin_lock_simple_xlate(&args); | |
327 | if (id < 0 || id >= hwlock->bank->num_locks) { | |
328 | ret = -EINVAL; | |
329 | goto out; | |
330 | } | |
331 | id += hwlock->bank->base_id; | |
332 | ||
333 | out: | |
334 | of_node_put(args.np); | |
335 | return ret ? ret : id; | |
336 | } | |
337 | EXPORT_SYMBOL_GPL(of_hwspin_lock_get_id); | |
338 | ||
300bab97 | 339 | static int hwspin_lock_register_single(struct hwspinlock *hwlock, int id) |
bd9a4c7d OBC |
340 | { |
341 | struct hwspinlock *tmp; | |
342 | int ret; | |
343 | ||
93b465c2 | 344 | mutex_lock(&hwspinlock_tree_lock); |
bd9a4c7d | 345 | |
300bab97 OBC |
346 | ret = radix_tree_insert(&hwspinlock_tree, id, hwlock); |
347 | if (ret) { | |
348 | if (ret == -EEXIST) | |
349 | pr_err("hwspinlock id %d already exists!\n", id); | |
bd9a4c7d | 350 | goto out; |
300bab97 | 351 | } |
bd9a4c7d OBC |
352 | |
353 | /* mark this hwspinlock as available */ | |
300bab97 | 354 | tmp = radix_tree_tag_set(&hwspinlock_tree, id, HWSPINLOCK_UNUSED); |
bd9a4c7d OBC |
355 | |
356 | /* self-sanity check which should never fail */ | |
357 | WARN_ON(tmp != hwlock); | |
358 | ||
359 | out: | |
93b465c2 | 360 | mutex_unlock(&hwspinlock_tree_lock); |
300bab97 | 361 | return 0; |
bd9a4c7d | 362 | } |
bd9a4c7d | 363 | |
300bab97 | 364 | static struct hwspinlock *hwspin_lock_unregister_single(unsigned int id) |
bd9a4c7d OBC |
365 | { |
366 | struct hwspinlock *hwlock = NULL; | |
367 | int ret; | |
368 | ||
93b465c2 | 369 | mutex_lock(&hwspinlock_tree_lock); |
bd9a4c7d OBC |
370 | |
371 | /* make sure the hwspinlock is not in use (tag is set) */ | |
372 | ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED); | |
373 | if (ret == 0) { | |
374 | pr_err("hwspinlock %d still in use (or not present)\n", id); | |
375 | goto out; | |
376 | } | |
377 | ||
378 | hwlock = radix_tree_delete(&hwspinlock_tree, id); | |
379 | if (!hwlock) { | |
380 | pr_err("failed to delete hwspinlock %d\n", id); | |
381 | goto out; | |
382 | } | |
383 | ||
384 | out: | |
93b465c2 | 385 | mutex_unlock(&hwspinlock_tree_lock); |
bd9a4c7d OBC |
386 | return hwlock; |
387 | } | |
300bab97 OBC |
388 | |
389 | /** | |
390 | * hwspin_lock_register() - register a new hw spinlock device | |
391 | * @bank: the hwspinlock device, which usually provides numerous hw locks | |
392 | * @dev: the backing device | |
393 | * @ops: hwspinlock handlers for this device | |
394 | * @base_id: id of the first hardware spinlock in this bank | |
395 | * @num_locks: number of hwspinlocks provided by this device | |
396 | * | |
397 | * This function should be called from the underlying platform-specific | |
398 | * implementation, to register a new hwspinlock device instance. | |
399 | * | |
400 | * Should be called from a process context (might sleep) | |
401 | * | |
402 | * Returns 0 on success, or an appropriate error code on failure | |
403 | */ | |
404 | int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev, | |
405 | const struct hwspinlock_ops *ops, int base_id, int num_locks) | |
406 | { | |
407 | struct hwspinlock *hwlock; | |
408 | int ret = 0, i; | |
409 | ||
410 | if (!bank || !ops || !dev || !num_locks || !ops->trylock || | |
411 | !ops->unlock) { | |
412 | pr_err("invalid parameters\n"); | |
413 | return -EINVAL; | |
414 | } | |
415 | ||
416 | bank->dev = dev; | |
417 | bank->ops = ops; | |
418 | bank->base_id = base_id; | |
419 | bank->num_locks = num_locks; | |
420 | ||
421 | for (i = 0; i < num_locks; i++) { | |
422 | hwlock = &bank->lock[i]; | |
423 | ||
424 | spin_lock_init(&hwlock->lock); | |
425 | hwlock->bank = bank; | |
426 | ||
476a7eeb | 427 | ret = hwspin_lock_register_single(hwlock, base_id + i); |
300bab97 OBC |
428 | if (ret) |
429 | goto reg_failed; | |
430 | } | |
431 | ||
432 | return 0; | |
433 | ||
434 | reg_failed: | |
435 | while (--i >= 0) | |
476a7eeb | 436 | hwspin_lock_unregister_single(base_id + i); |
300bab97 OBC |
437 | return ret; |
438 | } | |
439 | EXPORT_SYMBOL_GPL(hwspin_lock_register); | |
440 | ||
441 | /** | |
442 | * hwspin_lock_unregister() - unregister an hw spinlock device | |
443 | * @bank: the hwspinlock device, which usually provides numerous hw locks | |
444 | * | |
445 | * This function should be called from the underlying platform-specific | |
446 | * implementation, to unregister an existing (and unused) hwspinlock. | |
447 | * | |
448 | * Should be called from a process context (might sleep) | |
449 | * | |
450 | * Returns 0 on success, or an appropriate error code on failure | |
451 | */ | |
452 | int hwspin_lock_unregister(struct hwspinlock_device *bank) | |
453 | { | |
454 | struct hwspinlock *hwlock, *tmp; | |
455 | int i; | |
456 | ||
457 | for (i = 0; i < bank->num_locks; i++) { | |
458 | hwlock = &bank->lock[i]; | |
459 | ||
460 | tmp = hwspin_lock_unregister_single(bank->base_id + i); | |
461 | if (!tmp) | |
462 | return -EBUSY; | |
463 | ||
464 | /* self-sanity check that should never fail */ | |
465 | WARN_ON(tmp != hwlock); | |
466 | } | |
467 | ||
468 | return 0; | |
469 | } | |
bd9a4c7d OBC |
470 | EXPORT_SYMBOL_GPL(hwspin_lock_unregister); |
471 | ||
472 | /** | |
473 | * __hwspin_lock_request() - tag an hwspinlock as used and power it up | |
474 | * | |
475 | * This is an internal function that prepares an hwspinlock instance | |
476 | * before it is given to the user. The function assumes that | |
477 | * hwspinlock_tree_lock is taken. | |
478 | * | |
479 | * Returns 0 or positive to indicate success, and a negative value to | |
480 | * indicate an error (with the appropriate error code) | |
481 | */ | |
482 | static int __hwspin_lock_request(struct hwspinlock *hwlock) | |
483 | { | |
300bab97 | 484 | struct device *dev = hwlock->bank->dev; |
bd9a4c7d OBC |
485 | struct hwspinlock *tmp; |
486 | int ret; | |
487 | ||
488 | /* prevent underlying implementation from being removed */ | |
300bab97 OBC |
489 | if (!try_module_get(dev->driver->owner)) { |
490 | dev_err(dev, "%s: can't get owner\n", __func__); | |
bd9a4c7d OBC |
491 | return -EINVAL; |
492 | } | |
493 | ||
494 | /* notify PM core that power is now needed */ | |
300bab97 | 495 | ret = pm_runtime_get_sync(dev); |
bd9a4c7d | 496 | if (ret < 0) { |
300bab97 | 497 | dev_err(dev, "%s: can't power on device\n", __func__); |
c10b90d8 LF |
498 | pm_runtime_put_noidle(dev); |
499 | module_put(dev->driver->owner); | |
bd9a4c7d OBC |
500 | return ret; |
501 | } | |
502 | ||
503 | /* mark hwspinlock as used, should not fail */ | |
300bab97 | 504 | tmp = radix_tree_tag_clear(&hwspinlock_tree, hwlock_to_id(hwlock), |
bd9a4c7d OBC |
505 | HWSPINLOCK_UNUSED); |
506 | ||
507 | /* self-sanity check that should never fail */ | |
508 | WARN_ON(tmp != hwlock); | |
509 | ||
510 | return ret; | |
511 | } | |
512 | ||
513 | /** | |
514 | * hwspin_lock_get_id() - retrieve id number of a given hwspinlock | |
515 | * @hwlock: a valid hwspinlock instance | |
516 | * | |
517 | * Returns the id number of a given @hwlock, or -EINVAL if @hwlock is invalid. | |
518 | */ | |
519 | int hwspin_lock_get_id(struct hwspinlock *hwlock) | |
520 | { | |
521 | if (!hwlock) { | |
522 | pr_err("invalid hwlock\n"); | |
523 | return -EINVAL; | |
524 | } | |
525 | ||
300bab97 | 526 | return hwlock_to_id(hwlock); |
bd9a4c7d OBC |
527 | } |
528 | EXPORT_SYMBOL_GPL(hwspin_lock_get_id); | |
529 | ||
530 | /** | |
531 | * hwspin_lock_request() - request an hwspinlock | |
532 | * | |
533 | * This function should be called by users of the hwspinlock device, | |
534 | * in order to dynamically assign them an unused hwspinlock. | |
535 | * Usually the user of this lock will then have to communicate the lock's id | |
536 | * to the remote core before it can be used for synchronization (to get the | |
537 | * id of a given hwlock, use hwspin_lock_get_id()). | |
538 | * | |
93b465c2 | 539 | * Should be called from a process context (might sleep) |
bd9a4c7d OBC |
540 | * |
541 | * Returns the address of the assigned hwspinlock, or NULL on error | |
542 | */ | |
543 | struct hwspinlock *hwspin_lock_request(void) | |
544 | { | |
545 | struct hwspinlock *hwlock; | |
546 | int ret; | |
547 | ||
93b465c2 | 548 | mutex_lock(&hwspinlock_tree_lock); |
bd9a4c7d OBC |
549 | |
550 | /* look for an unused lock */ | |
551 | ret = radix_tree_gang_lookup_tag(&hwspinlock_tree, (void **)&hwlock, | |
552 | 0, 1, HWSPINLOCK_UNUSED); | |
553 | if (ret == 0) { | |
554 | pr_warn("a free hwspinlock is not available\n"); | |
555 | hwlock = NULL; | |
556 | goto out; | |
557 | } | |
558 | ||
559 | /* sanity check that should never fail */ | |
560 | WARN_ON(ret > 1); | |
561 | ||
562 | /* mark as used and power up */ | |
563 | ret = __hwspin_lock_request(hwlock); | |
564 | if (ret < 0) | |
565 | hwlock = NULL; | |
566 | ||
567 | out: | |
93b465c2 | 568 | mutex_unlock(&hwspinlock_tree_lock); |
bd9a4c7d OBC |
569 | return hwlock; |
570 | } | |
571 | EXPORT_SYMBOL_GPL(hwspin_lock_request); | |
572 | ||
573 | /** | |
574 | * hwspin_lock_request_specific() - request for a specific hwspinlock | |
575 | * @id: index of the specific hwspinlock that is requested | |
576 | * | |
577 | * This function should be called by users of the hwspinlock module, | |
578 | * in order to assign them a specific hwspinlock. | |
579 | * Usually early board code will be calling this function in order to | |
580 | * reserve specific hwspinlock ids for predefined purposes. | |
581 | * | |
93b465c2 | 582 | * Should be called from a process context (might sleep) |
bd9a4c7d OBC |
583 | * |
584 | * Returns the address of the assigned hwspinlock, or NULL on error | |
585 | */ | |
586 | struct hwspinlock *hwspin_lock_request_specific(unsigned int id) | |
587 | { | |
588 | struct hwspinlock *hwlock; | |
589 | int ret; | |
590 | ||
93b465c2 | 591 | mutex_lock(&hwspinlock_tree_lock); |
bd9a4c7d OBC |
592 | |
593 | /* make sure this hwspinlock exists */ | |
594 | hwlock = radix_tree_lookup(&hwspinlock_tree, id); | |
595 | if (!hwlock) { | |
596 | pr_warn("hwspinlock %u does not exist\n", id); | |
597 | goto out; | |
598 | } | |
599 | ||
600 | /* sanity check (this shouldn't happen) */ | |
300bab97 | 601 | WARN_ON(hwlock_to_id(hwlock) != id); |
bd9a4c7d OBC |
602 | |
603 | /* make sure this hwspinlock is unused */ | |
604 | ret = radix_tree_tag_get(&hwspinlock_tree, id, HWSPINLOCK_UNUSED); | |
605 | if (ret == 0) { | |
606 | pr_warn("hwspinlock %u is already in use\n", id); | |
607 | hwlock = NULL; | |
608 | goto out; | |
609 | } | |
610 | ||
611 | /* mark as used and power up */ | |
612 | ret = __hwspin_lock_request(hwlock); | |
613 | if (ret < 0) | |
614 | hwlock = NULL; | |
615 | ||
616 | out: | |
93b465c2 | 617 | mutex_unlock(&hwspinlock_tree_lock); |
bd9a4c7d OBC |
618 | return hwlock; |
619 | } | |
620 | EXPORT_SYMBOL_GPL(hwspin_lock_request_specific); | |
621 | ||
622 | /** | |
623 | * hwspin_lock_free() - free a specific hwspinlock | |
624 | * @hwlock: the specific hwspinlock to free | |
625 | * | |
626 | * This function mark @hwlock as free again. | |
627 | * Should only be called with an @hwlock that was retrieved from | |
628 | * an earlier call to omap_hwspin_lock_request{_specific}. | |
629 | * | |
93b465c2 | 630 | * Should be called from a process context (might sleep) |
bd9a4c7d OBC |
631 | * |
632 | * Returns 0 on success, or an appropriate error code on failure | |
633 | */ | |
634 | int hwspin_lock_free(struct hwspinlock *hwlock) | |
635 | { | |
e352614c | 636 | struct device *dev; |
bd9a4c7d OBC |
637 | struct hwspinlock *tmp; |
638 | int ret; | |
639 | ||
640 | if (!hwlock) { | |
641 | pr_err("invalid hwlock\n"); | |
642 | return -EINVAL; | |
643 | } | |
644 | ||
e352614c | 645 | dev = hwlock->bank->dev; |
93b465c2 | 646 | mutex_lock(&hwspinlock_tree_lock); |
bd9a4c7d OBC |
647 | |
648 | /* make sure the hwspinlock is used */ | |
300bab97 | 649 | ret = radix_tree_tag_get(&hwspinlock_tree, hwlock_to_id(hwlock), |
bd9a4c7d OBC |
650 | HWSPINLOCK_UNUSED); |
651 | if (ret == 1) { | |
300bab97 | 652 | dev_err(dev, "%s: hwlock is already free\n", __func__); |
bd9a4c7d OBC |
653 | dump_stack(); |
654 | ret = -EINVAL; | |
655 | goto out; | |
656 | } | |
657 | ||
658 | /* notify the underlying device that power is not needed */ | |
300bab97 | 659 | ret = pm_runtime_put(dev); |
bd9a4c7d OBC |
660 | if (ret < 0) |
661 | goto out; | |
662 | ||
663 | /* mark this hwspinlock as available */ | |
300bab97 | 664 | tmp = radix_tree_tag_set(&hwspinlock_tree, hwlock_to_id(hwlock), |
bd9a4c7d OBC |
665 | HWSPINLOCK_UNUSED); |
666 | ||
667 | /* sanity check (this shouldn't happen) */ | |
668 | WARN_ON(tmp != hwlock); | |
669 | ||
300bab97 | 670 | module_put(dev->driver->owner); |
bd9a4c7d OBC |
671 | |
672 | out: | |
93b465c2 | 673 | mutex_unlock(&hwspinlock_tree_lock); |
bd9a4c7d OBC |
674 | return ret; |
675 | } | |
676 | EXPORT_SYMBOL_GPL(hwspin_lock_free); | |
677 | ||
678 | MODULE_LICENSE("GPL v2"); | |
679 | MODULE_DESCRIPTION("Hardware spinlock interface"); | |
680 | MODULE_AUTHOR("Ohad Ben-Cohen <ohad@wizery.com>"); |