Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef __LINUX_SEQLOCK_H |
2 | #define __LINUX_SEQLOCK_H | |
3 | /* | |
4 | * Reader/writer consistent mechanism without starving writers. This type of | |
d08df601 | 5 | * lock for data where the reader wants a consistent set of information |
1370e97b WL |
6 | * and is willing to retry if the information changes. There are two types |
7 | * of readers: | |
8 | * 1. Sequence readers which never block a writer but they may have to retry | |
9 | * if a writer is in progress by detecting change in sequence number. | |
10 | * Writers do not wait for a sequence reader. | |
11 | * 2. Locking readers which will wait if a writer or another locking reader | |
12 | * is in progress. A locking reader in progress will also block a writer | |
13 | * from going forward. Unlike the regular rwlock, the read lock here is | |
14 | * exclusive so that only one locking reader can get it. | |
1da177e4 | 15 | * |
1370e97b | 16 | * This is not as cache friendly as brlock. Also, this may not work well |
1da177e4 LT |
17 | * for data that contains pointers, because any writer could |
18 | * invalidate a pointer that a reader was following. | |
19 | * | |
1370e97b | 20 | * Expected non-blocking reader usage: |
1da177e4 LT |
21 | * do { |
22 | * seq = read_seqbegin(&foo); | |
23 | * ... | |
24 | * } while (read_seqretry(&foo, seq)); | |
25 | * | |
26 | * | |
27 | * On non-SMP the spin locks disappear but the writer still needs | |
28 | * to increment the sequence variables because an interrupt routine could | |
29 | * change the state of the data. | |
30 | * | |
31 | * Based on x86_64 vsyscall gettimeofday | |
32 | * by Keith Owens and Andrea Arcangeli | |
33 | */ | |
34 | ||
1da177e4 LT |
35 | #include <linux/spinlock.h> |
36 | #include <linux/preempt.h> | |
1ca7d67c | 37 | #include <linux/lockdep.h> |
56a21052 | 38 | #include <asm/processor.h> |
1da177e4 | 39 | |
1da177e4 LT |
40 | /* |
41 | * Version using sequence counter only. | |
42 | * This can be used when code has its own mutex protecting the | |
43 | * updating starting before the write_seqcountbeqin() and ending | |
44 | * after the write_seqcount_end(). | |
45 | */ | |
1da177e4 LT |
46 | typedef struct seqcount { |
47 | unsigned sequence; | |
1ca7d67c JS |
48 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
49 | struct lockdep_map dep_map; | |
50 | #endif | |
1da177e4 LT |
51 | } seqcount_t; |
52 | ||
1ca7d67c JS |
53 | static inline void __seqcount_init(seqcount_t *s, const char *name, |
54 | struct lock_class_key *key) | |
55 | { | |
56 | /* | |
57 | * Make sure we are not reinitializing a held lock: | |
58 | */ | |
59 | lockdep_init_map(&s->dep_map, name, key, 0); | |
60 | s->sequence = 0; | |
61 | } | |
62 | ||
63 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | |
64 | # define SEQCOUNT_DEP_MAP_INIT(lockname) \ | |
65 | .dep_map = { .name = #lockname } \ | |
66 | ||
67 | # define seqcount_init(s) \ | |
68 | do { \ | |
69 | static struct lock_class_key __key; \ | |
70 | __seqcount_init((s), #s, &__key); \ | |
71 | } while (0) | |
72 | ||
73 | static inline void seqcount_lockdep_reader_access(const seqcount_t *s) | |
74 | { | |
75 | seqcount_t *l = (seqcount_t *)s; | |
76 | unsigned long flags; | |
77 | ||
78 | local_irq_save(flags); | |
79 | seqcount_acquire_read(&l->dep_map, 0, 0, _RET_IP_); | |
80 | seqcount_release(&l->dep_map, 1, _RET_IP_); | |
81 | local_irq_restore(flags); | |
82 | } | |
83 | ||
84 | #else | |
85 | # define SEQCOUNT_DEP_MAP_INIT(lockname) | |
86 | # define seqcount_init(s) __seqcount_init(s, NULL, NULL) | |
87 | # define seqcount_lockdep_reader_access(x) | |
88 | #endif | |
89 | ||
90 | #define SEQCNT_ZERO(lockname) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(lockname)} | |
91 | ||
1da177e4 | 92 | |
3c22cd57 NP |
93 | /** |
94 | * __read_seqcount_begin - begin a seq-read critical section (without barrier) | |
95 | * @s: pointer to seqcount_t | |
96 | * Returns: count to be passed to read_seqcount_retry | |
97 | * | |
98 | * __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb() | |
99 | * barrier. Callers should ensure that smp_rmb() or equivalent ordering is | |
100 | * provided before actually loading any of the variables that are to be | |
101 | * protected in this critical section. | |
102 | * | |
103 | * Use carefully, only in critical code, and comment how the barrier is | |
104 | * provided. | |
105 | */ | |
106 | static inline unsigned __read_seqcount_begin(const seqcount_t *s) | |
1da177e4 | 107 | { |
88a411c0 IM |
108 | unsigned ret; |
109 | ||
110 | repeat: | |
2f624278 | 111 | ret = ACCESS_ONCE(s->sequence); |
88a411c0 IM |
112 | if (unlikely(ret & 1)) { |
113 | cpu_relax(); | |
114 | goto repeat; | |
115 | } | |
1da177e4 LT |
116 | return ret; |
117 | } | |
118 | ||
1ca7d67c JS |
119 | /** |
120 | * read_seqcount_begin_no_lockdep - start seq-read critical section w/o lockdep | |
121 | * @s: pointer to seqcount_t | |
122 | * Returns: count to be passed to read_seqcount_retry | |
123 | * | |
124 | * read_seqcount_begin_no_lockdep opens a read critical section of the given | |
125 | * seqcount, but without any lockdep checking. Validity of the critical | |
126 | * section is tested by checking read_seqcount_retry function. | |
127 | */ | |
128 | static inline unsigned read_seqcount_begin_no_lockdep(const seqcount_t *s) | |
129 | { | |
130 | unsigned ret = __read_seqcount_begin(s); | |
131 | smp_rmb(); | |
132 | return ret; | |
133 | } | |
134 | ||
3c22cd57 NP |
135 | /** |
136 | * read_seqcount_begin - begin a seq-read critical section | |
137 | * @s: pointer to seqcount_t | |
138 | * Returns: count to be passed to read_seqcount_retry | |
139 | * | |
140 | * read_seqcount_begin opens a read critical section of the given seqcount. | |
141 | * Validity of the critical section is tested by checking read_seqcount_retry | |
142 | * function. | |
143 | */ | |
144 | static inline unsigned read_seqcount_begin(const seqcount_t *s) | |
145 | { | |
1ca7d67c JS |
146 | seqcount_lockdep_reader_access(s); |
147 | return read_seqcount_begin_no_lockdep(s); | |
3c22cd57 NP |
148 | } |
149 | ||
4f988f15 LT |
150 | /** |
151 | * raw_seqcount_begin - begin a seq-read critical section | |
152 | * @s: pointer to seqcount_t | |
153 | * Returns: count to be passed to read_seqcount_retry | |
154 | * | |
155 | * raw_seqcount_begin opens a read critical section of the given seqcount. | |
156 | * Validity of the critical section is tested by checking read_seqcount_retry | |
157 | * function. | |
158 | * | |
159 | * Unlike read_seqcount_begin(), this function will not wait for the count | |
160 | * to stabilize. If a writer is active when we begin, we will fail the | |
161 | * read_seqcount_retry() instead of stabilizing at the beginning of the | |
162 | * critical section. | |
163 | */ | |
164 | static inline unsigned raw_seqcount_begin(const seqcount_t *s) | |
165 | { | |
166 | unsigned ret = ACCESS_ONCE(s->sequence); | |
1ca7d67c JS |
167 | |
168 | seqcount_lockdep_reader_access(s); | |
4f988f15 LT |
169 | smp_rmb(); |
170 | return ret & ~1; | |
171 | } | |
172 | ||
3c22cd57 NP |
173 | /** |
174 | * __read_seqcount_retry - end a seq-read critical section (without barrier) | |
175 | * @s: pointer to seqcount_t | |
176 | * @start: count, from read_seqcount_begin | |
177 | * Returns: 1 if retry is required, else 0 | |
178 | * | |
179 | * __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb() | |
180 | * barrier. Callers should ensure that smp_rmb() or equivalent ordering is | |
181 | * provided before actually loading any of the variables that are to be | |
182 | * protected in this critical section. | |
183 | * | |
184 | * Use carefully, only in critical code, and comment how the barrier is | |
185 | * provided. | |
186 | */ | |
187 | static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start) | |
188 | { | |
189 | return unlikely(s->sequence != start); | |
190 | } | |
191 | ||
192 | /** | |
193 | * read_seqcount_retry - end a seq-read critical section | |
194 | * @s: pointer to seqcount_t | |
195 | * @start: count, from read_seqcount_begin | |
196 | * Returns: 1 if retry is required, else 0 | |
197 | * | |
198 | * read_seqcount_retry closes a read critical section of the given seqcount. | |
199 | * If the critical section was invalid, it must be ignored (and typically | |
200 | * retried). | |
1da177e4 | 201 | */ |
88a411c0 | 202 | static inline int read_seqcount_retry(const seqcount_t *s, unsigned start) |
1da177e4 LT |
203 | { |
204 | smp_rmb(); | |
3c22cd57 | 205 | return __read_seqcount_retry(s, start); |
1da177e4 LT |
206 | } |
207 | ||
208 | ||
209 | /* | |
210 | * Sequence counter only version assumes that callers are using their | |
211 | * own mutexing. | |
212 | */ | |
1ca7d67c | 213 | static inline void write_seqcount_begin_nested(seqcount_t *s, int subclass) |
1da177e4 LT |
214 | { |
215 | s->sequence++; | |
216 | smp_wmb(); | |
1ca7d67c JS |
217 | seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_); |
218 | } | |
219 | ||
220 | static inline void write_seqcount_begin(seqcount_t *s) | |
221 | { | |
222 | write_seqcount_begin_nested(s, 0); | |
1da177e4 LT |
223 | } |
224 | ||
225 | static inline void write_seqcount_end(seqcount_t *s) | |
226 | { | |
1ca7d67c | 227 | seqcount_release(&s->dep_map, 1, _RET_IP_); |
1da177e4 LT |
228 | smp_wmb(); |
229 | s->sequence++; | |
230 | } | |
231 | ||
3c22cd57 NP |
232 | /** |
233 | * write_seqcount_barrier - invalidate in-progress read-side seq operations | |
234 | * @s: pointer to seqcount_t | |
235 | * | |
236 | * After write_seqcount_barrier, no read-side seq operations will complete | |
237 | * successfully and see data older than this. | |
238 | */ | |
239 | static inline void write_seqcount_barrier(seqcount_t *s) | |
240 | { | |
241 | smp_wmb(); | |
242 | s->sequence+=2; | |
243 | } | |
244 | ||
6617feca TG |
245 | typedef struct { |
246 | struct seqcount seqcount; | |
247 | spinlock_t lock; | |
248 | } seqlock_t; | |
249 | ||
250 | /* | |
251 | * These macros triggered gcc-3.x compile-time problems. We think these are | |
252 | * OK now. Be cautious. | |
253 | */ | |
254 | #define __SEQLOCK_UNLOCKED(lockname) \ | |
255 | { \ | |
1ca7d67c | 256 | .seqcount = SEQCNT_ZERO(lockname), \ |
6617feca TG |
257 | .lock = __SPIN_LOCK_UNLOCKED(lockname) \ |
258 | } | |
259 | ||
260 | #define seqlock_init(x) \ | |
261 | do { \ | |
262 | seqcount_init(&(x)->seqcount); \ | |
263 | spin_lock_init(&(x)->lock); \ | |
264 | } while (0) | |
265 | ||
266 | #define DEFINE_SEQLOCK(x) \ | |
267 | seqlock_t x = __SEQLOCK_UNLOCKED(x) | |
268 | ||
269 | /* | |
270 | * Read side functions for starting and finalizing a read side section. | |
271 | */ | |
272 | static inline unsigned read_seqbegin(const seqlock_t *sl) | |
273 | { | |
274 | return read_seqcount_begin(&sl->seqcount); | |
275 | } | |
276 | ||
277 | static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start) | |
278 | { | |
279 | return read_seqcount_retry(&sl->seqcount, start); | |
280 | } | |
281 | ||
1da177e4 | 282 | /* |
6617feca TG |
283 | * Lock out other writers and update the count. |
284 | * Acts like a normal spin_lock/unlock. | |
285 | * Don't need preempt_disable() because that is in the spin_lock already. | |
1da177e4 | 286 | */ |
6617feca TG |
287 | static inline void write_seqlock(seqlock_t *sl) |
288 | { | |
289 | spin_lock(&sl->lock); | |
290 | write_seqcount_begin(&sl->seqcount); | |
291 | } | |
292 | ||
293 | static inline void write_sequnlock(seqlock_t *sl) | |
294 | { | |
295 | write_seqcount_end(&sl->seqcount); | |
296 | spin_unlock(&sl->lock); | |
297 | } | |
298 | ||
299 | static inline void write_seqlock_bh(seqlock_t *sl) | |
300 | { | |
301 | spin_lock_bh(&sl->lock); | |
302 | write_seqcount_begin(&sl->seqcount); | |
303 | } | |
304 | ||
305 | static inline void write_sequnlock_bh(seqlock_t *sl) | |
306 | { | |
307 | write_seqcount_end(&sl->seqcount); | |
308 | spin_unlock_bh(&sl->lock); | |
309 | } | |
310 | ||
311 | static inline void write_seqlock_irq(seqlock_t *sl) | |
312 | { | |
313 | spin_lock_irq(&sl->lock); | |
314 | write_seqcount_begin(&sl->seqcount); | |
315 | } | |
316 | ||
317 | static inline void write_sequnlock_irq(seqlock_t *sl) | |
318 | { | |
319 | write_seqcount_end(&sl->seqcount); | |
320 | spin_unlock_irq(&sl->lock); | |
321 | } | |
322 | ||
323 | static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl) | |
324 | { | |
325 | unsigned long flags; | |
326 | ||
327 | spin_lock_irqsave(&sl->lock, flags); | |
328 | write_seqcount_begin(&sl->seqcount); | |
329 | return flags; | |
330 | } | |
331 | ||
1da177e4 | 332 | #define write_seqlock_irqsave(lock, flags) \ |
6617feca | 333 | do { flags = __write_seqlock_irqsave(lock); } while (0) |
1da177e4 | 334 | |
6617feca TG |
335 | static inline void |
336 | write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags) | |
337 | { | |
338 | write_seqcount_end(&sl->seqcount); | |
339 | spin_unlock_irqrestore(&sl->lock, flags); | |
340 | } | |
1da177e4 | 341 | |
1370e97b WL |
342 | /* |
343 | * A locking reader exclusively locks out other writers and locking readers, | |
344 | * but doesn't update the sequence number. Acts like a normal spin_lock/unlock. | |
345 | * Don't need preempt_disable() because that is in the spin_lock already. | |
346 | */ | |
347 | static inline void read_seqlock_excl(seqlock_t *sl) | |
348 | { | |
349 | spin_lock(&sl->lock); | |
350 | } | |
351 | ||
352 | static inline void read_sequnlock_excl(seqlock_t *sl) | |
353 | { | |
354 | spin_unlock(&sl->lock); | |
355 | } | |
356 | ||
2bc74feb AV |
357 | /** |
358 | * read_seqbegin_or_lock - begin a sequence number check or locking block | |
359 | * @lock: sequence lock | |
360 | * @seq : sequence number to be checked | |
361 | * | |
362 | * First try it once optimistically without taking the lock. If that fails, | |
363 | * take the lock. The sequence number is also used as a marker for deciding | |
364 | * whether to be a reader (even) or writer (odd). | |
365 | * N.B. seq must be initialized to an even number to begin with. | |
366 | */ | |
367 | static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq) | |
368 | { | |
369 | if (!(*seq & 1)) /* Even */ | |
370 | *seq = read_seqbegin(lock); | |
371 | else /* Odd */ | |
372 | read_seqlock_excl(lock); | |
373 | } | |
374 | ||
375 | static inline int need_seqretry(seqlock_t *lock, int seq) | |
376 | { | |
377 | return !(seq & 1) && read_seqretry(lock, seq); | |
378 | } | |
379 | ||
380 | static inline void done_seqretry(seqlock_t *lock, int seq) | |
381 | { | |
382 | if (seq & 1) | |
383 | read_sequnlock_excl(lock); | |
384 | } | |
385 | ||
1370e97b WL |
386 | static inline void read_seqlock_excl_bh(seqlock_t *sl) |
387 | { | |
388 | spin_lock_bh(&sl->lock); | |
389 | } | |
390 | ||
391 | static inline void read_sequnlock_excl_bh(seqlock_t *sl) | |
392 | { | |
393 | spin_unlock_bh(&sl->lock); | |
394 | } | |
395 | ||
396 | static inline void read_seqlock_excl_irq(seqlock_t *sl) | |
397 | { | |
398 | spin_lock_irq(&sl->lock); | |
399 | } | |
400 | ||
401 | static inline void read_sequnlock_excl_irq(seqlock_t *sl) | |
402 | { | |
403 | spin_unlock_irq(&sl->lock); | |
404 | } | |
405 | ||
406 | static inline unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl) | |
407 | { | |
408 | unsigned long flags; | |
409 | ||
410 | spin_lock_irqsave(&sl->lock, flags); | |
411 | return flags; | |
412 | } | |
413 | ||
414 | #define read_seqlock_excl_irqsave(lock, flags) \ | |
415 | do { flags = __read_seqlock_excl_irqsave(lock); } while (0) | |
416 | ||
417 | static inline void | |
418 | read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags) | |
419 | { | |
420 | spin_unlock_irqrestore(&sl->lock, flags); | |
421 | } | |
422 | ||
1da177e4 | 423 | #endif /* __LINUX_SEQLOCK_H */ |