blockdev: turn a rw semaphore into a percpu rw semaphore
[deliverable/linux.git] / include / linux / percpu-rwsem.h
CommitLineData
62ac665f
MP
1#ifndef _LINUX_PERCPU_RWSEM_H
2#define _LINUX_PERCPU_RWSEM_H
3
4#include <linux/mutex.h>
5#include <linux/percpu.h>
6#include <linux/rcupdate.h>
7#include <linux/delay.h>
8
9struct percpu_rw_semaphore {
10 unsigned __percpu *counters;
11 bool locked;
12 struct mutex mtx;
13};
14
15static inline void percpu_down_read(struct percpu_rw_semaphore *p)
16{
17 rcu_read_lock();
18 if (unlikely(p->locked)) {
19 rcu_read_unlock();
20 mutex_lock(&p->mtx);
21 this_cpu_inc(*p->counters);
22 mutex_unlock(&p->mtx);
23 return;
24 }
25 this_cpu_inc(*p->counters);
26 rcu_read_unlock();
27}
28
29static inline void percpu_up_read(struct percpu_rw_semaphore *p)
30{
31 /*
32 * On X86, write operation in this_cpu_dec serves as a memory unlock
33 * barrier (i.e. memory accesses may be moved before the write, but
34 * no memory accesses are moved past the write).
35 * On other architectures this may not be the case, so we need smp_mb()
36 * there.
37 */
38#if defined(CONFIG_X86) && (!defined(CONFIG_X86_PPRO_FENCE) && !defined(CONFIG_X86_OOSTORE))
39 barrier();
40#else
41 smp_mb();
42#endif
43 this_cpu_dec(*p->counters);
44}
45
46static inline unsigned __percpu_count(unsigned __percpu *counters)
47{
48 unsigned total = 0;
49 int cpu;
50
51 for_each_possible_cpu(cpu)
52 total += ACCESS_ONCE(*per_cpu_ptr(counters, cpu));
53
54 return total;
55}
56
57static inline void percpu_down_write(struct percpu_rw_semaphore *p)
58{
59 mutex_lock(&p->mtx);
60 p->locked = true;
61 synchronize_rcu();
62 while (__percpu_count(p->counters))
63 msleep(1);
64 smp_rmb(); /* paired with smp_mb() in percpu_sem_up_read() */
65}
66
67static inline void percpu_up_write(struct percpu_rw_semaphore *p)
68{
69 p->locked = false;
70 mutex_unlock(&p->mtx);
71}
72
73static inline int percpu_init_rwsem(struct percpu_rw_semaphore *p)
74{
75 p->counters = alloc_percpu(unsigned);
76 if (unlikely(!p->counters))
77 return -ENOMEM;
78 p->locked = false;
79 mutex_init(&p->mtx);
80 return 0;
81}
82
83static inline void percpu_free_rwsem(struct percpu_rw_semaphore *p)
84{
85 free_percpu(p->counters);
86 p->counters = NULL; /* catch use after free bugs */
87}
88
89#endif
This page took 0.029452 seconds and 5 git commands to generate.