Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _ASM_GENERIC_LOCAL_H |
2 | #define _ASM_GENERIC_LOCAL_H | |
3 | ||
4 | #include <linux/config.h> | |
5 | #include <linux/percpu.h> | |
6 | #include <linux/hardirq.h> | |
7 | #include <asm/types.h> | |
8 | ||
9 | /* An unsigned long type for operations which are atomic for a single | |
10 | * CPU. Usually used in combination with per-cpu variables. */ | |
11 | ||
12 | #if BITS_PER_LONG == 32 | |
13 | /* Implement in terms of atomics. */ | |
14 | ||
15 | /* Don't use typedef: don't want them to be mixed with atomic_t's. */ | |
16 | typedef struct | |
17 | { | |
18 | atomic_t a; | |
19 | } local_t; | |
20 | ||
21 | #define LOCAL_INIT(i) { ATOMIC_INIT(i) } | |
22 | ||
23 | #define local_read(l) ((unsigned long)atomic_read(&(l)->a)) | |
24 | #define local_set(l,i) atomic_set((&(l)->a),(i)) | |
25 | #define local_inc(l) atomic_inc(&(l)->a) | |
26 | #define local_dec(l) atomic_dec(&(l)->a) | |
27 | #define local_add(i,l) atomic_add((i),(&(l)->a)) | |
28 | #define local_sub(i,l) atomic_sub((i),(&(l)->a)) | |
29 | ||
30 | /* Non-atomic variants, ie. preemption disabled and won't be touched | |
31 | * in interrupt, etc. Some archs can optimize this case well. */ | |
32 | #define __local_inc(l) local_set((l), local_read(l) + 1) | |
33 | #define __local_dec(l) local_set((l), local_read(l) - 1) | |
34 | #define __local_add(i,l) local_set((l), local_read(l) + (i)) | |
35 | #define __local_sub(i,l) local_set((l), local_read(l) - (i)) | |
36 | ||
37 | #else /* ... can't use atomics. */ | |
38 | /* Implement in terms of three variables. | |
39 | Another option would be to use local_irq_save/restore. */ | |
40 | ||
41 | typedef struct | |
42 | { | |
43 | /* 0 = in hardirq, 1 = in softirq, 2 = usermode. */ | |
44 | unsigned long v[3]; | |
45 | } local_t; | |
46 | ||
47 | #define _LOCAL_VAR(l) ((l)->v[!in_interrupt() + !in_irq()]) | |
48 | ||
49 | #define LOCAL_INIT(i) { { (i), 0, 0 } } | |
50 | ||
51 | static inline unsigned long local_read(local_t *l) | |
52 | { | |
53 | return l->v[0] + l->v[1] + l->v[2]; | |
54 | } | |
55 | ||
56 | static inline void local_set(local_t *l, unsigned long v) | |
57 | { | |
58 | l->v[0] = v; | |
59 | l->v[1] = l->v[2] = 0; | |
60 | } | |
61 | ||
62 | static inline void local_inc(local_t *l) | |
63 | { | |
64 | preempt_disable(); | |
65 | _LOCAL_VAR(l)++; | |
66 | preempt_enable(); | |
67 | } | |
68 | ||
69 | static inline void local_dec(local_t *l) | |
70 | { | |
71 | preempt_disable(); | |
72 | _LOCAL_VAR(l)--; | |
73 | preempt_enable(); | |
74 | } | |
75 | ||
76 | static inline void local_add(unsigned long v, local_t *l) | |
77 | { | |
78 | preempt_disable(); | |
79 | _LOCAL_VAR(l) += v; | |
80 | preempt_enable(); | |
81 | } | |
82 | ||
83 | static inline void local_sub(unsigned long v, local_t *l) | |
84 | { | |
85 | preempt_disable(); | |
86 | _LOCAL_VAR(l) -= v; | |
87 | preempt_enable(); | |
88 | } | |
89 | ||
90 | /* Non-atomic variants, ie. preemption disabled and won't be touched | |
91 | * in interrupt, etc. Some archs can optimize this case well. */ | |
92 | #define __local_inc(l) ((l)->v[0]++) | |
93 | #define __local_dec(l) ((l)->v[0]--) | |
94 | #define __local_add(i,l) ((l)->v[0] += (i)) | |
95 | #define __local_sub(i,l) ((l)->v[0] -= (i)) | |
96 | ||
97 | #endif /* Non-atomic implementation */ | |
98 | ||
99 | /* Use these for per-cpu local_t variables: on some archs they are | |
100 | * much more efficient than these naive implementations. Note they take | |
101 | * a variable (eg. mystruct.foo), not an address. | |
102 | */ | |
103 | #define cpu_local_read(v) local_read(&__get_cpu_var(v)) | |
104 | #define cpu_local_set(v, i) local_set(&__get_cpu_var(v), (i)) | |
105 | #define cpu_local_inc(v) local_inc(&__get_cpu_var(v)) | |
106 | #define cpu_local_dec(v) local_dec(&__get_cpu_var(v)) | |
107 | #define cpu_local_add(i, v) local_add((i), &__get_cpu_var(v)) | |
108 | #define cpu_local_sub(i, v) local_sub((i), &__get_cpu_var(v)) | |
109 | ||
110 | /* Non-atomic increments, ie. preemption disabled and won't be touched | |
111 | * in interrupt, etc. Some archs can optimize this case well. | |
112 | */ | |
113 | #define __cpu_local_inc(v) __local_inc(&__get_cpu_var(v)) | |
114 | #define __cpu_local_dec(v) __local_dec(&__get_cpu_var(v)) | |
115 | #define __cpu_local_add(i, v) __local_add((i), &__get_cpu_var(v)) | |
116 | #define __cpu_local_sub(i, v) __local_sub((i), &__get_cpu_var(v)) | |
117 | ||
118 | #endif /* _ASM_GENERIC_LOCAL_H */ |