GFS2: Optimise glock lru and end of life inodes
[deliverable/linux.git] / fs / gfs2 / glock.c
CommitLineData
b3b94faa
DT
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
cf45b752 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
b3b94faa
DT
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
e9fc2aa0 7 * of the GNU General Public License version 2.
b3b94faa
DT
8 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
b3b94faa
DT
13#include <linux/buffer_head.h>
14#include <linux/delay.h>
15#include <linux/sort.h>
16#include <linux/jhash.h>
d0dc80db 17#include <linux/kallsyms.h>
5c676f6d 18#include <linux/gfs2_ondisk.h>
24264434 19#include <linux/list.h>
fee852e3 20#include <linux/wait.h>
95d97b7d 21#include <linux/module.h>
b3b94faa 22#include <asm/uaccess.h>
7c52b166
RP
23#include <linux/seq_file.h>
24#include <linux/debugfs.h>
8fbbfd21
SW
25#include <linux/kthread.h>
26#include <linux/freezer.h>
c4f68a13
BM
27#include <linux/workqueue.h>
28#include <linux/jiffies.h>
bc015cb8
SW
29#include <linux/rcupdate.h>
30#include <linux/rculist_bl.h>
31#include <linux/bit_spinlock.h>
b3b94faa
DT
32
33#include "gfs2.h"
5c676f6d 34#include "incore.h"
b3b94faa
DT
35#include "glock.h"
36#include "glops.h"
37#include "inode.h"
b3b94faa
DT
38#include "lops.h"
39#include "meta_io.h"
40#include "quota.h"
41#include "super.h"
5c676f6d 42#include "util.h"
813e0c46 43#include "bmap.h"
63997775
SW
44#define CREATE_TRACE_POINTS
45#include "trace_gfs2.h"
b3b94faa 46
6802e340
SW
47struct gfs2_glock_iter {
48 int hash; /* hash bucket index */
49 struct gfs2_sbd *sdp; /* incore superblock */
50 struct gfs2_glock *gl; /* current glock struct */
51 char string[512]; /* scratch space */
7c52b166
RP
52};
53
b3b94faa
DT
54typedef void (*glock_examiner) (struct gfs2_glock * gl);
55
6802e340
SW
56static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl);
57#define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { __dump_glock(NULL, gl); BUG(); } } while(0)
58static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
c4f68a13 59
7c52b166 60static struct dentry *gfs2_root;
c4f68a13 61static struct workqueue_struct *glock_workqueue;
b94a170e 62struct workqueue_struct *gfs2_delete_workqueue;
97cc1025
SW
63static LIST_HEAD(lru_list);
64static atomic_t lru_count = ATOMIC_INIT(0);
eb8374e7 65static DEFINE_SPINLOCK(lru_lock);
08bc2dbc 66
b6397893 67#define GFS2_GL_HASH_SHIFT 15
087efdd3
SW
68#define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT)
69#define GFS2_GL_HASH_MASK (GFS2_GL_HASH_SIZE - 1)
70
bc015cb8 71static struct hlist_bl_head gl_hash_table[GFS2_GL_HASH_SIZE];
04b933f2 72static struct dentry *gfs2_root;
087efdd3 73
b3b94faa
DT
74/**
75 * gl_hash() - Turn glock number into hash bucket number
76 * @lock: The glock number
77 *
78 * Returns: The number of the corresponding hash bucket
79 */
80
b8547856
SW
81static unsigned int gl_hash(const struct gfs2_sbd *sdp,
82 const struct lm_lockname *name)
b3b94faa
DT
83{
84 unsigned int h;
85
cd915493 86 h = jhash(&name->ln_number, sizeof(u64), 0);
b3b94faa 87 h = jhash(&name->ln_type, sizeof(unsigned int), h);
b8547856 88 h = jhash(&sdp, sizeof(struct gfs2_sbd *), h);
b3b94faa
DT
89 h &= GFS2_GL_HASH_MASK;
90
91 return h;
92}
93
bc015cb8
SW
94static inline void spin_lock_bucket(unsigned int hash)
95{
96 struct hlist_bl_head *bl = &gl_hash_table[hash];
97 bit_spin_lock(0, (unsigned long *)bl);
98}
b3b94faa 99
bc015cb8
SW
100static inline void spin_unlock_bucket(unsigned int hash)
101{
102 struct hlist_bl_head *bl = &gl_hash_table[hash];
103 __bit_spin_unlock(0, (unsigned long *)bl);
104}
b3b94faa 105
fc0e38da 106static void gfs2_glock_dealloc(struct rcu_head *rcu)
b3b94faa 107{
bc015cb8 108 struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu);
b3b94faa 109
bc015cb8
SW
110 if (gl->gl_ops->go_flags & GLOF_ASPACE)
111 kmem_cache_free(gfs2_glock_aspace_cachep, gl);
112 else
113 kmem_cache_free(gfs2_glock_cachep, gl);
fc0e38da
SW
114}
115
116void gfs2_glock_free(struct gfs2_glock *gl)
b3b94faa
DT
117{
118 struct gfs2_sbd *sdp = gl->gl_sbd;
b3b94faa 119
fc0e38da 120 call_rcu(&gl->gl_rcu, gfs2_glock_dealloc);
bc015cb8
SW
121 if (atomic_dec_and_test(&sdp->sd_glock_disposal))
122 wake_up(&sdp->sd_glock_wait);
b3b94faa
DT
123}
124
125/**
126 * gfs2_glock_hold() - increment reference count on glock
127 * @gl: The glock to hold
128 *
129 */
130
b94a170e 131void gfs2_glock_hold(struct gfs2_glock *gl)
b3b94faa 132{
d8348de0 133 GLOCK_BUG_ON(gl, atomic_read(&gl->gl_ref) == 0);
16feb9fe 134 atomic_inc(&gl->gl_ref);
b3b94faa
DT
135}
136
8ff22a6f
BM
137/**
138 * demote_ok - Check to see if it's ok to unlock a glock
139 * @gl: the glock
140 *
141 * Returns: 1 if it's ok
142 */
143
144static int demote_ok(const struct gfs2_glock *gl)
145{
146 const struct gfs2_glock_operations *glops = gl->gl_ops;
147
148 if (gl->gl_state == LM_ST_UNLOCKED)
149 return 0;
f42ab085 150 if (!list_empty(&gl->gl_holders))
8ff22a6f
BM
151 return 0;
152 if (glops->go_demote_ok)
153 return glops->go_demote_ok(gl);
154 return 1;
155}
156
bc015cb8 157
29687a2a
SW
158void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
159{
160 spin_lock(&lru_lock);
161
162 if (!list_empty(&gl->gl_lru))
163 list_del_init(&gl->gl_lru);
164 else
165 atomic_inc(&lru_count);
166
167 list_add_tail(&gl->gl_lru, &lru_list);
627c10b7 168 set_bit(GLF_LRU, &gl->gl_flags);
29687a2a
SW
169 spin_unlock(&lru_lock);
170}
171
f42ab085
SW
172static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
173{
174 spin_lock(&lru_lock);
175 if (!list_empty(&gl->gl_lru)) {
176 list_del_init(&gl->gl_lru);
177 atomic_dec(&lru_count);
178 clear_bit(GLF_LRU, &gl->gl_flags);
179 }
180 spin_unlock(&lru_lock);
181}
182
97cc1025 183/**
bc015cb8 184 * __gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
97cc1025
SW
185 * @gl: the glock
186 *
bc015cb8
SW
187 * If the glock is demotable, then we add it (or move it) to the end
188 * of the glock LRU list.
97cc1025
SW
189 */
190
bc015cb8 191static void __gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
97cc1025 192{
29687a2a
SW
193 if (demote_ok(gl))
194 gfs2_glock_add_to_lru(gl);
97cc1025
SW
195}
196
8ff22a6f
BM
197/**
198 * gfs2_glock_put_nolock() - Decrement reference count on glock
199 * @gl: The glock to put
200 *
201 * This function should only be used if the caller has its own reference
202 * to the glock, in addition to the one it is dropping.
203 */
204
b94a170e 205void gfs2_glock_put_nolock(struct gfs2_glock *gl)
8ff22a6f
BM
206{
207 if (atomic_dec_and_test(&gl->gl_ref))
208 GLOCK_BUG_ON(gl, 1);
8ff22a6f
BM
209}
210
b3b94faa
DT
211/**
212 * gfs2_glock_put() - Decrement reference count on glock
213 * @gl: The glock to put
214 *
215 */
216
bc015cb8 217void gfs2_glock_put(struct gfs2_glock *gl)
b3b94faa 218{
bc015cb8
SW
219 struct gfs2_sbd *sdp = gl->gl_sbd;
220 struct address_space *mapping = gfs2_glock2aspace(gl);
b3b94faa 221
bc015cb8
SW
222 if (atomic_dec_and_test(&gl->gl_ref)) {
223 spin_lock_bucket(gl->gl_hash);
224 hlist_bl_del_rcu(&gl->gl_list);
225 spin_unlock_bucket(gl->gl_hash);
f42ab085 226 gfs2_glock_remove_from_lru(gl);
6802e340 227 GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
bc015cb8
SW
228 GLOCK_BUG_ON(gl, mapping && mapping->nrpages);
229 trace_gfs2_glock_put(gl);
230 sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
b3b94faa 231 }
b3b94faa
DT
232}
233
b3b94faa
DT
234/**
235 * search_bucket() - Find struct gfs2_glock by lock number
236 * @bucket: the bucket to search
237 * @name: The lock name
238 *
239 * Returns: NULL, or the struct gfs2_glock with the requested number
240 */
241
37b2fa6a 242static struct gfs2_glock *search_bucket(unsigned int hash,
899be4d3 243 const struct gfs2_sbd *sdp,
d6a53727 244 const struct lm_lockname *name)
b3b94faa
DT
245{
246 struct gfs2_glock *gl;
bc015cb8 247 struct hlist_bl_node *h;
b3b94faa 248
bc015cb8 249 hlist_bl_for_each_entry_rcu(gl, h, &gl_hash_table[hash], gl_list) {
b3b94faa
DT
250 if (!lm_name_equal(&gl->gl_name, name))
251 continue;
899be4d3
SW
252 if (gl->gl_sbd != sdp)
253 continue;
bc015cb8
SW
254 if (atomic_inc_not_zero(&gl->gl_ref))
255 return gl;
b3b94faa
DT
256 }
257
258 return NULL;
259}
260
6802e340
SW
261/**
262 * may_grant - check if its ok to grant a new lock
263 * @gl: The glock
264 * @gh: The lock request which we wish to grant
265 *
266 * Returns: true if its ok to grant the lock
267 */
268
269static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
270{
271 const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_list);
272 if ((gh->gh_state == LM_ST_EXCLUSIVE ||
273 gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head)
274 return 0;
275 if (gl->gl_state == gh->gh_state)
276 return 1;
277 if (gh->gh_flags & GL_EXACT)
278 return 0;
209806ab
SW
279 if (gl->gl_state == LM_ST_EXCLUSIVE) {
280 if (gh->gh_state == LM_ST_SHARED && gh_head->gh_state == LM_ST_SHARED)
281 return 1;
282 if (gh->gh_state == LM_ST_DEFERRED && gh_head->gh_state == LM_ST_DEFERRED)
283 return 1;
284 }
6802e340
SW
285 if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY))
286 return 1;
287 return 0;
288}
289
290static void gfs2_holder_wake(struct gfs2_holder *gh)
291{
292 clear_bit(HIF_WAIT, &gh->gh_iflags);
293 smp_mb__after_clear_bit();
294 wake_up_bit(&gh->gh_iflags, HIF_WAIT);
295}
296
d5341a92
SW
297/**
298 * do_error - Something unexpected has happened during a lock request
299 *
300 */
301
302static inline void do_error(struct gfs2_glock *gl, const int ret)
303{
304 struct gfs2_holder *gh, *tmp;
305
306 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
307 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
308 continue;
309 if (ret & LM_OUT_ERROR)
310 gh->gh_error = -EIO;
311 else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
312 gh->gh_error = GLR_TRYFAILED;
313 else
314 continue;
315 list_del_init(&gh->gh_list);
316 trace_gfs2_glock_queue(gh, 0);
317 gfs2_holder_wake(gh);
318 }
319}
320
6802e340
SW
321/**
322 * do_promote - promote as many requests as possible on the current queue
323 * @gl: The glock
324 *
813e0c46
SW
325 * Returns: 1 if there is a blocked holder at the head of the list, or 2
326 * if a type specific operation is underway.
6802e340
SW
327 */
328
329static int do_promote(struct gfs2_glock *gl)
55ba474d
HH
330__releases(&gl->gl_spin)
331__acquires(&gl->gl_spin)
6802e340
SW
332{
333 const struct gfs2_glock_operations *glops = gl->gl_ops;
334 struct gfs2_holder *gh, *tmp;
335 int ret;
336
337restart:
338 list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
339 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
340 continue;
341 if (may_grant(gl, gh)) {
342 if (gh->gh_list.prev == &gl->gl_holders &&
343 glops->go_lock) {
344 spin_unlock(&gl->gl_spin);
345 /* FIXME: eliminate this eventually */
346 ret = glops->go_lock(gh);
347 spin_lock(&gl->gl_spin);
348 if (ret) {
813e0c46
SW
349 if (ret == 1)
350 return 2;
6802e340
SW
351 gh->gh_error = ret;
352 list_del_init(&gh->gh_list);
63997775 353 trace_gfs2_glock_queue(gh, 0);
6802e340
SW
354 gfs2_holder_wake(gh);
355 goto restart;
356 }
357 set_bit(HIF_HOLDER, &gh->gh_iflags);
63997775 358 trace_gfs2_promote(gh, 1);
6802e340
SW
359 gfs2_holder_wake(gh);
360 goto restart;
361 }
362 set_bit(HIF_HOLDER, &gh->gh_iflags);
63997775 363 trace_gfs2_promote(gh, 0);
6802e340
SW
364 gfs2_holder_wake(gh);
365 continue;
366 }
367 if (gh->gh_list.prev == &gl->gl_holders)
368 return 1;
d5341a92 369 do_error(gl, 0);
6802e340
SW
370 break;
371 }
372 return 0;
373}
374
6802e340
SW
375/**
376 * find_first_waiter - find the first gh that's waiting for the glock
377 * @gl: the glock
378 */
379
380static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl)
381{
382 struct gfs2_holder *gh;
383
384 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
385 if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
386 return gh;
387 }
388 return NULL;
389}
390
391/**
392 * state_change - record that the glock is now in a different state
393 * @gl: the glock
394 * @new_state the new state
395 *
396 */
397
398static void state_change(struct gfs2_glock *gl, unsigned int new_state)
399{
400 int held1, held2;
401
402 held1 = (gl->gl_state != LM_ST_UNLOCKED);
403 held2 = (new_state != LM_ST_UNLOCKED);
404
405 if (held1 != held2) {
406 if (held2)
407 gfs2_glock_hold(gl);
408 else
8ff22a6f 409 gfs2_glock_put_nolock(gl);
6802e340 410 }
7b5e3d5f
SW
411 if (held1 && held2 && list_empty(&gl->gl_holders))
412 clear_bit(GLF_QUEUED, &gl->gl_flags);
6802e340
SW
413
414 gl->gl_state = new_state;
415 gl->gl_tchange = jiffies;
416}
417
418static void gfs2_demote_wake(struct gfs2_glock *gl)
419{
420 gl->gl_demote_state = LM_ST_EXCLUSIVE;
421 clear_bit(GLF_DEMOTE, &gl->gl_flags);
422 smp_mb__after_clear_bit();
423 wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
424}
425
426/**
427 * finish_xmote - The DLM has replied to one of our lock requests
428 * @gl: The glock
429 * @ret: The status from the DLM
430 *
431 */
432
433static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
434{
435 const struct gfs2_glock_operations *glops = gl->gl_ops;
436 struct gfs2_holder *gh;
437 unsigned state = ret & LM_OUT_ST_MASK;
813e0c46 438 int rv;
6802e340
SW
439
440 spin_lock(&gl->gl_spin);
63997775 441 trace_gfs2_glock_state_change(gl, state);
6802e340
SW
442 state_change(gl, state);
443 gh = find_first_waiter(gl);
444
445 /* Demote to UN request arrived during demote to SH or DF */
446 if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
447 state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED)
448 gl->gl_target = LM_ST_UNLOCKED;
449
450 /* Check for state != intended state */
451 if (unlikely(state != gl->gl_target)) {
452 if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
453 /* move to back of queue and try next entry */
454 if (ret & LM_OUT_CANCELED) {
455 if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
456 list_move_tail(&gh->gh_list, &gl->gl_holders);
457 gh = find_first_waiter(gl);
458 gl->gl_target = gh->gh_state;
459 goto retry;
460 }
461 /* Some error or failed "try lock" - report it */
462 if ((ret & LM_OUT_ERROR) ||
463 (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
464 gl->gl_target = gl->gl_state;
465 do_error(gl, ret);
466 goto out;
467 }
468 }
469 switch(state) {
470 /* Unlocked due to conversion deadlock, try again */
471 case LM_ST_UNLOCKED:
472retry:
473 do_xmote(gl, gh, gl->gl_target);
474 break;
475 /* Conversion fails, unlock and try again */
476 case LM_ST_SHARED:
477 case LM_ST_DEFERRED:
478 do_xmote(gl, gh, LM_ST_UNLOCKED);
479 break;
480 default: /* Everything else */
481 printk(KERN_ERR "GFS2: wanted %u got %u\n", gl->gl_target, state);
482 GLOCK_BUG_ON(gl, 1);
483 }
484 spin_unlock(&gl->gl_spin);
6802e340
SW
485 return;
486 }
487
488 /* Fast path - we got what we asked for */
489 if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
490 gfs2_demote_wake(gl);
491 if (state != LM_ST_UNLOCKED) {
492 if (glops->go_xmote_bh) {
6802e340
SW
493 spin_unlock(&gl->gl_spin);
494 rv = glops->go_xmote_bh(gl, gh);
6802e340
SW
495 spin_lock(&gl->gl_spin);
496 if (rv) {
497 do_error(gl, rv);
498 goto out;
499 }
500 }
813e0c46
SW
501 rv = do_promote(gl);
502 if (rv == 2)
503 goto out_locked;
6802e340
SW
504 }
505out:
506 clear_bit(GLF_LOCK, &gl->gl_flags);
813e0c46 507out_locked:
6802e340 508 spin_unlock(&gl->gl_spin);
6802e340
SW
509}
510
6802e340
SW
511/**
512 * do_xmote - Calls the DLM to change the state of a lock
513 * @gl: The lock state
514 * @gh: The holder (only for promotes)
515 * @target: The target lock state
516 *
517 */
518
519static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target)
55ba474d
HH
520__releases(&gl->gl_spin)
521__acquires(&gl->gl_spin)
6802e340
SW
522{
523 const struct gfs2_glock_operations *glops = gl->gl_ops;
524 struct gfs2_sbd *sdp = gl->gl_sbd;
525 unsigned int lck_flags = gh ? gh->gh_flags : 0;
526 int ret;
527
528 lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
529 LM_FLAG_PRIORITY);
921169ca
SW
530 GLOCK_BUG_ON(gl, gl->gl_state == target);
531 GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target);
6802e340
SW
532 if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
533 glops->go_inval) {
534 set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
535 do_error(gl, 0); /* Fail queued try locks */
536 }
47a25380 537 gl->gl_req = target;
6802e340
SW
538 spin_unlock(&gl->gl_spin);
539 if (glops->go_xmote_th)
540 glops->go_xmote_th(gl);
541 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
542 glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
543 clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
544
545 gfs2_glock_hold(gl);
546 if (target != LM_ST_UNLOCKED && (gl->gl_state == LM_ST_SHARED ||
547 gl->gl_state == LM_ST_DEFERRED) &&
548 !(lck_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
549 lck_flags |= LM_FLAG_TRY_1CB;
6802e340 550
921169ca
SW
551 if (sdp->sd_lockstruct.ls_ops->lm_lock) {
552 /* lock_dlm */
553 ret = sdp->sd_lockstruct.ls_ops->lm_lock(gl, target, lck_flags);
554 GLOCK_BUG_ON(gl, ret);
555 } else { /* lock_nolock */
556 finish_xmote(gl, target);
6802e340
SW
557 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
558 gfs2_glock_put(gl);
6802e340 559 }
921169ca 560
6802e340
SW
561 spin_lock(&gl->gl_spin);
562}
563
564/**
565 * find_first_holder - find the first "holder" gh
566 * @gl: the glock
567 */
568
569static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
570{
571 struct gfs2_holder *gh;
572
573 if (!list_empty(&gl->gl_holders)) {
574 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
575 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
576 return gh;
577 }
578 return NULL;
579}
580
581/**
582 * run_queue - do all outstanding tasks related to a glock
583 * @gl: The glock in question
584 * @nonblock: True if we must not block in run_queue
585 *
586 */
587
588static void run_queue(struct gfs2_glock *gl, const int nonblock)
55ba474d
HH
589__releases(&gl->gl_spin)
590__acquires(&gl->gl_spin)
6802e340
SW
591{
592 struct gfs2_holder *gh = NULL;
813e0c46 593 int ret;
6802e340
SW
594
595 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
596 return;
597
598 GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));
599
600 if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
601 gl->gl_demote_state != gl->gl_state) {
602 if (find_first_holder(gl))
d8348de0 603 goto out_unlock;
6802e340
SW
604 if (nonblock)
605 goto out_sched;
606 set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
265d529c 607 GLOCK_BUG_ON(gl, gl->gl_demote_state == LM_ST_EXCLUSIVE);
6802e340
SW
608 gl->gl_target = gl->gl_demote_state;
609 } else {
610 if (test_bit(GLF_DEMOTE, &gl->gl_flags))
611 gfs2_demote_wake(gl);
813e0c46
SW
612 ret = do_promote(gl);
613 if (ret == 0)
d8348de0 614 goto out_unlock;
813e0c46 615 if (ret == 2)
a228df63 616 goto out;
6802e340
SW
617 gh = find_first_waiter(gl);
618 gl->gl_target = gh->gh_state;
619 if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
620 do_error(gl, 0); /* Fail queued try locks */
621 }
622 do_xmote(gl, gh, gl->gl_target);
a228df63 623out:
6802e340
SW
624 return;
625
626out_sched:
7e71c55e
SW
627 clear_bit(GLF_LOCK, &gl->gl_flags);
628 smp_mb__after_clear_bit();
6802e340
SW
629 gfs2_glock_hold(gl);
630 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
8ff22a6f 631 gfs2_glock_put_nolock(gl);
7e71c55e
SW
632 return;
633
d8348de0 634out_unlock:
6802e340 635 clear_bit(GLF_LOCK, &gl->gl_flags);
7e71c55e
SW
636 smp_mb__after_clear_bit();
637 return;
6802e340
SW
638}
639
b94a170e
BM
640static void delete_work_func(struct work_struct *work)
641{
642 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_delete);
643 struct gfs2_sbd *sdp = gl->gl_sbd;
044b9414 644 struct gfs2_inode *ip;
b94a170e 645 struct inode *inode;
044b9414
SW
646 u64 no_addr = gl->gl_name.ln_number;
647
648 ip = gl->gl_object;
649 /* Note: Unsafe to dereference ip as we don't hold right refs/locks */
b94a170e 650
b94a170e 651 if (ip)
b94a170e 652 inode = gfs2_ilookup(sdp->sd_vfs, no_addr);
044b9414
SW
653 else
654 inode = gfs2_lookup_by_inum(sdp, no_addr, NULL, GFS2_BLKST_UNLINKED);
655 if (inode && !IS_ERR(inode)) {
656 d_prune_aliases(inode);
657 iput(inode);
b94a170e
BM
658 }
659 gfs2_glock_put(gl);
660}
661
c4f68a13
BM
662static void glock_work_func(struct work_struct *work)
663{
6802e340 664 unsigned long delay = 0;
c4f68a13 665 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
26bb7505 666 int drop_ref = 0;
c4f68a13 667
26bb7505 668 if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags)) {
6802e340 669 finish_xmote(gl, gl->gl_reply);
26bb7505
SW
670 drop_ref = 1;
671 }
c4f68a13 672 spin_lock(&gl->gl_spin);
265d529c
SW
673 if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
674 gl->gl_state != LM_ST_UNLOCKED &&
675 gl->gl_demote_state != LM_ST_EXCLUSIVE) {
6802e340
SW
676 unsigned long holdtime, now = jiffies;
677 holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
678 if (time_before(now, holdtime))
679 delay = holdtime - now;
680 set_bit(delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE, &gl->gl_flags);
681 }
682 run_queue(gl, 0);
c4f68a13 683 spin_unlock(&gl->gl_spin);
6802e340
SW
684 if (!delay ||
685 queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
686 gfs2_glock_put(gl);
26bb7505
SW
687 if (drop_ref)
688 gfs2_glock_put(gl);
c4f68a13
BM
689}
690
b3b94faa
DT
691/**
692 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
693 * @sdp: The GFS2 superblock
694 * @number: the lock number
695 * @glops: The glock_operations to use
696 * @create: If 0, don't create the glock if it doesn't exist
697 * @glp: the glock is returned here
698 *
699 * This does not lock a glock, just finds/creates structures for one.
700 *
701 * Returns: errno
702 */
703
cd915493 704int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
8fb4b536 705 const struct gfs2_glock_operations *glops, int create,
b3b94faa
DT
706 struct gfs2_glock **glp)
707{
009d8518 708 struct super_block *s = sdp->sd_vfs;
37b2fa6a 709 struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
b3b94faa 710 struct gfs2_glock *gl, *tmp;
37b2fa6a 711 unsigned int hash = gl_hash(sdp, &name);
009d8518 712 struct address_space *mapping;
bc015cb8 713 struct kmem_cache *cachep;
b3b94faa 714
bc015cb8 715 rcu_read_lock();
37b2fa6a 716 gl = search_bucket(hash, sdp, &name);
bc015cb8 717 rcu_read_unlock();
b3b94faa 718
64d576ba
SW
719 *glp = gl;
720 if (gl)
b3b94faa 721 return 0;
64d576ba
SW
722 if (!create)
723 return -ENOENT;
b3b94faa 724
009d8518 725 if (glops->go_flags & GLOF_ASPACE)
bc015cb8 726 cachep = gfs2_glock_aspace_cachep;
009d8518 727 else
bc015cb8
SW
728 cachep = gfs2_glock_cachep;
729 gl = kmem_cache_alloc(cachep, GFP_KERNEL);
b3b94faa
DT
730 if (!gl)
731 return -ENOMEM;
732
8f05228e 733 atomic_inc(&sdp->sd_glock_disposal);
ec45d9f5 734 gl->gl_flags = 0;
b3b94faa 735 gl->gl_name = name;
16feb9fe 736 atomic_set(&gl->gl_ref, 1);
b3b94faa 737 gl->gl_state = LM_ST_UNLOCKED;
6802e340 738 gl->gl_target = LM_ST_UNLOCKED;
c4f68a13 739 gl->gl_demote_state = LM_ST_EXCLUSIVE;
37b2fa6a 740 gl->gl_hash = hash;
b3b94faa 741 gl->gl_ops = glops;
f057f6cd
SW
742 snprintf(gl->gl_strname, GDLM_STRNAME_BYTES, "%8x%16llx", name.ln_type, (unsigned long long)number);
743 memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
744 gl->gl_lksb.sb_lvbptr = gl->gl_lvb;
c4f68a13 745 gl->gl_tchange = jiffies;
ec45d9f5 746 gl->gl_object = NULL;
b3b94faa 747 gl->gl_sbd = sdp;
c4f68a13 748 INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
b94a170e 749 INIT_WORK(&gl->gl_delete, delete_work_func);
b3b94faa 750
009d8518
SW
751 mapping = gfs2_glock2aspace(gl);
752 if (mapping) {
753 mapping->a_ops = &gfs2_meta_aops;
754 mapping->host = s->s_bdev->bd_inode;
755 mapping->flags = 0;
756 mapping_set_gfp_mask(mapping, GFP_NOFS);
757 mapping->assoc_mapping = NULL;
758 mapping->backing_dev_info = s->s_bdi;
759 mapping->writeback_index = 0;
b3b94faa
DT
760 }
761
bc015cb8 762 spin_lock_bucket(hash);
37b2fa6a 763 tmp = search_bucket(hash, sdp, &name);
b3b94faa 764 if (tmp) {
bc015cb8
SW
765 spin_unlock_bucket(hash);
766 kmem_cache_free(cachep, gl);
fc0e38da 767 atomic_dec(&sdp->sd_glock_disposal);
b3b94faa
DT
768 gl = tmp;
769 } else {
bc015cb8
SW
770 hlist_bl_add_head_rcu(&gl->gl_list, &gl_hash_table[hash]);
771 spin_unlock_bucket(hash);
b3b94faa
DT
772 }
773
774 *glp = gl;
775
776 return 0;
b3b94faa
DT
777}
778
779/**
780 * gfs2_holder_init - initialize a struct gfs2_holder in the default way
781 * @gl: the glock
782 * @state: the state we're requesting
783 * @flags: the modifier flags
784 * @gh: the holder structure
785 *
786 */
787
190562bd 788void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
b3b94faa
DT
789 struct gfs2_holder *gh)
790{
791 INIT_LIST_HEAD(&gh->gh_list);
792 gh->gh_gl = gl;
d0dc80db 793 gh->gh_ip = (unsigned long)__builtin_return_address(0);
b1e058da 794 gh->gh_owner_pid = get_pid(task_pid(current));
b3b94faa
DT
795 gh->gh_state = state;
796 gh->gh_flags = flags;
797 gh->gh_error = 0;
798 gh->gh_iflags = 0;
b3b94faa
DT
799 gfs2_glock_hold(gl);
800}
801
802/**
803 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
804 * @state: the state we're requesting
805 * @flags: the modifier flags
806 * @gh: the holder structure
807 *
808 * Don't mess with the glock.
809 *
810 */
811
190562bd 812void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
b3b94faa
DT
813{
814 gh->gh_state = state;
579b78a4 815 gh->gh_flags = flags;
3b8249f6 816 gh->gh_iflags = 0;
d0dc80db 817 gh->gh_ip = (unsigned long)__builtin_return_address(0);
1a0eae88
BP
818 if (gh->gh_owner_pid)
819 put_pid(gh->gh_owner_pid);
820 gh->gh_owner_pid = get_pid(task_pid(current));
b3b94faa
DT
821}
822
823/**
824 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
825 * @gh: the holder structure
826 *
827 */
828
829void gfs2_holder_uninit(struct gfs2_holder *gh)
830{
b1e058da 831 put_pid(gh->gh_owner_pid);
b3b94faa
DT
832 gfs2_glock_put(gh->gh_gl);
833 gh->gh_gl = NULL;
d0dc80db 834 gh->gh_ip = 0;
b3b94faa
DT
835}
836
fe64d517
SW
837/**
838 * gfs2_glock_holder_wait
839 * @word: unused
840 *
841 * This function and gfs2_glock_demote_wait both show up in the WCHAN
842 * field. Thus I've separated these otherwise identical functions in
843 * order to be more informative to the user.
844 */
845
846static int gfs2_glock_holder_wait(void *word)
fee852e3
SW
847{
848 schedule();
849 return 0;
850}
851
fe64d517
SW
852static int gfs2_glock_demote_wait(void *word)
853{
854 schedule();
855 return 0;
856}
857
6802e340 858static void wait_on_holder(struct gfs2_holder *gh)
da755fdb 859{
6802e340 860 might_sleep();
fe64d517 861 wait_on_bit(&gh->gh_iflags, HIF_WAIT, gfs2_glock_holder_wait, TASK_UNINTERRUPTIBLE);
da755fdb
SW
862}
863
6802e340 864static void wait_on_demote(struct gfs2_glock *gl)
b3b94faa 865{
6802e340 866 might_sleep();
fe64d517 867 wait_on_bit(&gl->gl_flags, GLF_DEMOTE, gfs2_glock_demote_wait, TASK_UNINTERRUPTIBLE);
b3b94faa
DT
868}
869
870/**
6802e340
SW
871 * handle_callback - process a demote request
872 * @gl: the glock
873 * @state: the state the caller wants us to change to
b3b94faa 874 *
6802e340
SW
875 * There are only two requests that we are going to see in actual
876 * practise: LM_ST_SHARED and LM_ST_UNLOCKED
b3b94faa
DT
877 */
878
6802e340 879static void handle_callback(struct gfs2_glock *gl, unsigned int state,
97cc1025 880 unsigned long delay)
b3b94faa 881{
6802e340 882 int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
b3b94faa 883
6802e340
SW
884 set_bit(bit, &gl->gl_flags);
885 if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
886 gl->gl_demote_state = state;
887 gl->gl_demote_time = jiffies;
6802e340
SW
888 } else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
889 gl->gl_demote_state != state) {
890 gl->gl_demote_state = LM_ST_UNLOCKED;
b3b94faa 891 }
b94a170e
BM
892 if (gl->gl_ops->go_callback)
893 gl->gl_ops->go_callback(gl);
63997775 894 trace_gfs2_demote_rq(gl);
b3b94faa
DT
895}
896
897/**
6802e340 898 * gfs2_glock_wait - wait on a glock acquisition
b3b94faa
DT
899 * @gh: the glock holder
900 *
901 * Returns: 0 on success
902 */
903
6802e340 904int gfs2_glock_wait(struct gfs2_holder *gh)
b3b94faa 905{
fee852e3 906 wait_on_holder(gh);
b3b94faa
DT
907 return gh->gh_error;
908}
909
6802e340 910void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
7c52b166 911{
5e69069c 912 struct va_format vaf;
7c52b166
RP
913 va_list args;
914
915 va_start(args, fmt);
5e69069c 916
6802e340
SW
917 if (seq) {
918 struct gfs2_glock_iter *gi = seq->private;
7c52b166 919 vsprintf(gi->string, fmt, args);
6802e340
SW
920 seq_printf(seq, gi->string);
921 } else {
5e69069c
JP
922 vaf.fmt = fmt;
923 vaf.va = &args;
924
925 printk(KERN_ERR " %pV", &vaf);
6802e340 926 }
5e69069c 927
7c52b166
RP
928 va_end(args);
929}
930
b3b94faa
DT
931/**
932 * add_to_queue - Add a holder to the wait queue (but look for recursion)
933 * @gh: the holder structure to add
934 *
6802e340
SW
935 * Eventually we should move the recursive locking trap to a
936 * debugging option or something like that. This is the fast
937 * path and needs to have the minimum number of distractions.
938 *
b3b94faa
DT
939 */
940
6802e340 941static inline void add_to_queue(struct gfs2_holder *gh)
55ba474d
HH
942__releases(&gl->gl_spin)
943__acquires(&gl->gl_spin)
b3b94faa
DT
944{
945 struct gfs2_glock *gl = gh->gh_gl;
6802e340
SW
946 struct gfs2_sbd *sdp = gl->gl_sbd;
947 struct list_head *insert_pt = NULL;
948 struct gfs2_holder *gh2;
949 int try_lock = 0;
b3b94faa 950
b1e058da 951 BUG_ON(gh->gh_owner_pid == NULL);
fee852e3
SW
952 if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
953 BUG();
190562bd 954
6802e340
SW
955 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
956 if (test_bit(GLF_LOCK, &gl->gl_flags))
957 try_lock = 1;
958 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
959 goto fail;
960 }
961
962 list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
963 if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid &&
964 (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK)))
965 goto trap_recursive;
966 if (try_lock &&
967 !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) &&
968 !may_grant(gl, gh)) {
969fail:
970 gh->gh_error = GLR_TRYFAILED;
971 gfs2_holder_wake(gh);
972 return;
b4c20166 973 }
6802e340
SW
974 if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
975 continue;
976 if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
977 insert_pt = &gh2->gh_list;
978 }
7b5e3d5f 979 set_bit(GLF_QUEUED, &gl->gl_flags);
edae38a6 980 trace_gfs2_glock_queue(gh, 1);
6802e340
SW
981 if (likely(insert_pt == NULL)) {
982 list_add_tail(&gh->gh_list, &gl->gl_holders);
983 if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
984 goto do_cancel;
985 return;
986 }
987 list_add_tail(&gh->gh_list, insert_pt);
988do_cancel:
989 gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
990 if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
991 spin_unlock(&gl->gl_spin);
048bca22 992 if (sdp->sd_lockstruct.ls_ops->lm_cancel)
f057f6cd 993 sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
6802e340 994 spin_lock(&gl->gl_spin);
b3b94faa 995 }
6802e340 996 return;
b3b94faa 997
6802e340
SW
998trap_recursive:
999 print_symbol(KERN_ERR "original: %s\n", gh2->gh_ip);
1000 printk(KERN_ERR "pid: %d\n", pid_nr(gh2->gh_owner_pid));
1001 printk(KERN_ERR "lock type: %d req lock state : %d\n",
1002 gh2->gh_gl->gl_name.ln_type, gh2->gh_state);
1003 print_symbol(KERN_ERR "new: %s\n", gh->gh_ip);
1004 printk(KERN_ERR "pid: %d\n", pid_nr(gh->gh_owner_pid));
1005 printk(KERN_ERR "lock type: %d req lock state : %d\n",
1006 gh->gh_gl->gl_name.ln_type, gh->gh_state);
1007 __dump_glock(NULL, gl);
1008 BUG();
b3b94faa
DT
1009}
1010
1011/**
1012 * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
1013 * @gh: the holder structure
1014 *
1015 * if (gh->gh_flags & GL_ASYNC), this never returns an error
1016 *
1017 * Returns: 0, GLR_TRYFAILED, or errno on failure
1018 */
1019
1020int gfs2_glock_nq(struct gfs2_holder *gh)
1021{
1022 struct gfs2_glock *gl = gh->gh_gl;
1023 struct gfs2_sbd *sdp = gl->gl_sbd;
1024 int error = 0;
1025
6802e340 1026 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
b3b94faa 1027 return -EIO;
b3b94faa 1028
f42ab085
SW
1029 if (test_bit(GLF_LRU, &gl->gl_flags))
1030 gfs2_glock_remove_from_lru(gl);
1031
b3b94faa
DT
1032 spin_lock(&gl->gl_spin);
1033 add_to_queue(gh);
0809f6ec
SW
1034 if ((LM_FLAG_NOEXP & gh->gh_flags) &&
1035 test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
1036 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
6802e340 1037 run_queue(gl, 1);
b3b94faa
DT
1038 spin_unlock(&gl->gl_spin);
1039
6802e340
SW
1040 if (!(gh->gh_flags & GL_ASYNC))
1041 error = gfs2_glock_wait(gh);
b3b94faa 1042
b3b94faa
DT
1043 return error;
1044}
1045
1046/**
1047 * gfs2_glock_poll - poll to see if an async request has been completed
1048 * @gh: the holder
1049 *
1050 * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1051 */
1052
1053int gfs2_glock_poll(struct gfs2_holder *gh)
1054{
6802e340 1055 return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1;
b3b94faa
DT
1056}
1057
1058/**
1059 * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1060 * @gh: the glock holder
1061 *
1062 */
1063
1064void gfs2_glock_dq(struct gfs2_holder *gh)
1065{
1066 struct gfs2_glock *gl = gh->gh_gl;
8fb4b536 1067 const struct gfs2_glock_operations *glops = gl->gl_ops;
c4f68a13 1068 unsigned delay = 0;
6802e340 1069 int fast_path = 0;
b3b94faa 1070
6802e340 1071 spin_lock(&gl->gl_spin);
b3b94faa 1072 if (gh->gh_flags & GL_NOCACHE)
97cc1025 1073 handle_callback(gl, LM_ST_UNLOCKED, 0);
b3b94faa 1074
b3b94faa 1075 list_del_init(&gh->gh_list);
6802e340 1076 if (find_first_holder(gl) == NULL) {
3042a2cc 1077 if (glops->go_unlock) {
6802e340 1078 GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags));
3042a2cc 1079 spin_unlock(&gl->gl_spin);
b3b94faa 1080 glops->go_unlock(gh);
3042a2cc 1081 spin_lock(&gl->gl_spin);
6802e340 1082 clear_bit(GLF_LOCK, &gl->gl_flags);
3042a2cc 1083 }
6802e340
SW
1084 if (list_empty(&gl->gl_holders) &&
1085 !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1086 !test_bit(GLF_DEMOTE, &gl->gl_flags))
1087 fast_path = 1;
b3b94faa 1088 }
f42ab085
SW
1089 if (!test_bit(GLF_LFLUSH, &gl->gl_flags))
1090 __gfs2_glock_schedule_for_reclaim(gl);
63997775 1091 trace_gfs2_glock_queue(gh, 0);
b3b94faa 1092 spin_unlock(&gl->gl_spin);
6802e340
SW
1093 if (likely(fast_path))
1094 return;
c4f68a13
BM
1095
1096 gfs2_glock_hold(gl);
1097 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1098 !test_bit(GLF_DEMOTE, &gl->gl_flags))
1099 delay = gl->gl_ops->go_min_hold_time;
1100 if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1101 gfs2_glock_put(gl);
b3b94faa
DT
1102}
1103
d93cfa98
AD
1104void gfs2_glock_dq_wait(struct gfs2_holder *gh)
1105{
1106 struct gfs2_glock *gl = gh->gh_gl;
1107 gfs2_glock_dq(gh);
1108 wait_on_demote(gl);
1109}
1110
b3b94faa
DT
1111/**
1112 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1113 * @gh: the holder structure
1114 *
1115 */
1116
1117void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1118{
1119 gfs2_glock_dq(gh);
1120 gfs2_holder_uninit(gh);
1121}
1122
1123/**
1124 * gfs2_glock_nq_num - acquire a glock based on lock number
1125 * @sdp: the filesystem
1126 * @number: the lock number
1127 * @glops: the glock operations for the type of glock
1128 * @state: the state to acquire the glock in
25985edc 1129 * @flags: modifier flags for the acquisition
b3b94faa
DT
1130 * @gh: the struct gfs2_holder
1131 *
1132 * Returns: errno
1133 */
1134
cd915493 1135int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
8fb4b536
SW
1136 const struct gfs2_glock_operations *glops,
1137 unsigned int state, int flags, struct gfs2_holder *gh)
b3b94faa
DT
1138{
1139 struct gfs2_glock *gl;
1140 int error;
1141
1142 error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1143 if (!error) {
1144 error = gfs2_glock_nq_init(gl, state, flags, gh);
1145 gfs2_glock_put(gl);
1146 }
1147
1148 return error;
1149}
1150
1151/**
1152 * glock_compare - Compare two struct gfs2_glock structures for sorting
1153 * @arg_a: the first structure
1154 * @arg_b: the second structure
1155 *
1156 */
1157
1158static int glock_compare(const void *arg_a, const void *arg_b)
1159{
a5e08a9e
SW
1160 const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1161 const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1162 const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1163 const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
b3b94faa
DT
1164
1165 if (a->ln_number > b->ln_number)
a5e08a9e
SW
1166 return 1;
1167 if (a->ln_number < b->ln_number)
1168 return -1;
1c0f4872 1169 BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
a5e08a9e 1170 return 0;
b3b94faa
DT
1171}
1172
1173/**
1174 * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1175 * @num_gh: the number of structures
1176 * @ghs: an array of struct gfs2_holder structures
1177 *
1178 * Returns: 0 on success (all glocks acquired),
1179 * errno on failure (no glocks acquired)
1180 */
1181
1182static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1183 struct gfs2_holder **p)
1184{
1185 unsigned int x;
1186 int error = 0;
1187
1188 for (x = 0; x < num_gh; x++)
1189 p[x] = &ghs[x];
1190
1191 sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1192
1193 for (x = 0; x < num_gh; x++) {
1194 p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1195
1196 error = gfs2_glock_nq(p[x]);
1197 if (error) {
1198 while (x--)
1199 gfs2_glock_dq(p[x]);
1200 break;
1201 }
1202 }
1203
1204 return error;
1205}
1206
1207/**
1208 * gfs2_glock_nq_m - acquire multiple glocks
1209 * @num_gh: the number of structures
1210 * @ghs: an array of struct gfs2_holder structures
1211 *
b3b94faa
DT
1212 *
1213 * Returns: 0 on success (all glocks acquired),
1214 * errno on failure (no glocks acquired)
1215 */
1216
1217int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1218{
eaf5bd3c
SW
1219 struct gfs2_holder *tmp[4];
1220 struct gfs2_holder **pph = tmp;
b3b94faa
DT
1221 int error = 0;
1222
eaf5bd3c
SW
1223 switch(num_gh) {
1224 case 0:
b3b94faa 1225 return 0;
eaf5bd3c 1226 case 1:
b3b94faa
DT
1227 ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1228 return gfs2_glock_nq(ghs);
eaf5bd3c
SW
1229 default:
1230 if (num_gh <= 4)
b3b94faa 1231 break;
eaf5bd3c
SW
1232 pph = kmalloc(num_gh * sizeof(struct gfs2_holder *), GFP_NOFS);
1233 if (!pph)
1234 return -ENOMEM;
b3b94faa
DT
1235 }
1236
eaf5bd3c 1237 error = nq_m_sync(num_gh, ghs, pph);
b3b94faa 1238
eaf5bd3c
SW
1239 if (pph != tmp)
1240 kfree(pph);
b3b94faa
DT
1241
1242 return error;
1243}
1244
1245/**
1246 * gfs2_glock_dq_m - release multiple glocks
1247 * @num_gh: the number of structures
1248 * @ghs: an array of struct gfs2_holder structures
1249 *
1250 */
1251
1252void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1253{
fa1bbdea
BP
1254 while (num_gh--)
1255 gfs2_glock_dq(&ghs[num_gh]);
b3b94faa
DT
1256}
1257
1258/**
1259 * gfs2_glock_dq_uninit_m - release multiple glocks
1260 * @num_gh: the number of structures
1261 * @ghs: an array of struct gfs2_holder structures
1262 *
1263 */
1264
1265void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
1266{
fa1bbdea
BP
1267 while (num_gh--)
1268 gfs2_glock_dq_uninit(&ghs[num_gh]);
b3b94faa
DT
1269}
1270
f057f6cd 1271void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
da755fdb 1272{
c4f68a13
BM
1273 unsigned long delay = 0;
1274 unsigned long holdtime;
1275 unsigned long now = jiffies;
b3b94faa 1276
f057f6cd 1277 gfs2_glock_hold(gl);
c4f68a13 1278 holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
7b5e3d5f
SW
1279 if (test_bit(GLF_QUEUED, &gl->gl_flags)) {
1280 if (time_before(now, holdtime))
1281 delay = holdtime - now;
1282 if (test_bit(GLF_REPLY_PENDING, &gl->gl_flags))
1283 delay = gl->gl_ops->go_min_hold_time;
1284 }
b3b94faa 1285
6802e340 1286 spin_lock(&gl->gl_spin);
97cc1025 1287 handle_callback(gl, state, delay);
6802e340 1288 spin_unlock(&gl->gl_spin);
c4f68a13
BM
1289 if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1290 gfs2_glock_put(gl);
b3b94faa
DT
1291}
1292
0809f6ec
SW
1293/**
1294 * gfs2_should_freeze - Figure out if glock should be frozen
1295 * @gl: The glock in question
1296 *
1297 * Glocks are not frozen if (a) the result of the dlm operation is
1298 * an error, (b) the locking operation was an unlock operation or
1299 * (c) if there is a "noexp" flagged request anywhere in the queue
1300 *
1301 * Returns: 1 if freezing should occur, 0 otherwise
1302 */
1303
1304static int gfs2_should_freeze(const struct gfs2_glock *gl)
1305{
1306 const struct gfs2_holder *gh;
1307
1308 if (gl->gl_reply & ~LM_OUT_ST_MASK)
1309 return 0;
1310 if (gl->gl_target == LM_ST_UNLOCKED)
1311 return 0;
1312
1313 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
1314 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1315 continue;
1316 if (LM_FLAG_NOEXP & gh->gh_flags)
1317 return 0;
1318 }
1319
1320 return 1;
1321}
1322
b3b94faa 1323/**
f057f6cd
SW
1324 * gfs2_glock_complete - Callback used by locking
1325 * @gl: Pointer to the glock
1326 * @ret: The return value from the dlm
b3b94faa 1327 *
47a25380
SW
1328 * The gl_reply field is under the gl_spin lock so that it is ok
1329 * to use a bitfield shared with other glock state fields.
b3b94faa
DT
1330 */
1331
f057f6cd 1332void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
b3b94faa 1333{
f057f6cd 1334 struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct;
0809f6ec 1335
47a25380 1336 spin_lock(&gl->gl_spin);
f057f6cd 1337 gl->gl_reply = ret;
0809f6ec 1338
f057f6cd 1339 if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_flags))) {
0809f6ec 1340 if (gfs2_should_freeze(gl)) {
f057f6cd 1341 set_bit(GLF_FROZEN, &gl->gl_flags);
0809f6ec 1342 spin_unlock(&gl->gl_spin);
b3b94faa 1343 return;
0809f6ec 1344 }
b3b94faa 1345 }
47a25380
SW
1346
1347 spin_unlock(&gl->gl_spin);
f057f6cd 1348 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
47a25380 1349 smp_wmb();
f057f6cd
SW
1350 gfs2_glock_hold(gl);
1351 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1352 gfs2_glock_put(gl);
b3b94faa
DT
1353}
1354
b3b94faa 1355
7f8275d0 1356static int gfs2_shrink_glock_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask)
b3b94faa
DT
1357{
1358 struct gfs2_glock *gl;
97cc1025
SW
1359 int may_demote;
1360 int nr_skipped = 0;
97cc1025 1361 LIST_HEAD(skipped);
b3b94faa 1362
97cc1025
SW
1363 if (nr == 0)
1364 goto out;
b3b94faa 1365
97cc1025
SW
1366 if (!(gfp_mask & __GFP_FS))
1367 return -1;
b3b94faa 1368
97cc1025
SW
1369 spin_lock(&lru_lock);
1370 while(nr && !list_empty(&lru_list)) {
1371 gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);
1372 list_del_init(&gl->gl_lru);
627c10b7 1373 clear_bit(GLF_LRU, &gl->gl_flags);
97cc1025
SW
1374 atomic_dec(&lru_count);
1375
1376 /* Test for being demotable */
1377 if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
1378 gfs2_glock_hold(gl);
97cc1025
SW
1379 spin_unlock(&lru_lock);
1380 spin_lock(&gl->gl_spin);
1381 may_demote = demote_ok(gl);
97cc1025
SW
1382 if (may_demote) {
1383 handle_callback(gl, LM_ST_UNLOCKED, 0);
1384 nr--;
97cc1025 1385 }
7e71c55e
SW
1386 clear_bit(GLF_LOCK, &gl->gl_flags);
1387 smp_mb__after_clear_bit();
2163b1e6 1388 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
b94a170e
BM
1389 gfs2_glock_put_nolock(gl);
1390 spin_unlock(&gl->gl_spin);
97cc1025 1391 spin_lock(&lru_lock);
2163b1e6 1392 continue;
97cc1025 1393 }
2163b1e6
SW
1394 nr_skipped++;
1395 list_add(&gl->gl_lru, &skipped);
627c10b7 1396 set_bit(GLF_LRU, &gl->gl_flags);
b3b94faa 1397 }
97cc1025
SW
1398 list_splice(&skipped, &lru_list);
1399 atomic_add(nr_skipped, &lru_count);
1400 spin_unlock(&lru_lock);
1401out:
1402 return (atomic_read(&lru_count) / 100) * sysctl_vfs_cache_pressure;
b3b94faa
DT
1403}
1404
97cc1025
SW
1405static struct shrinker glock_shrinker = {
1406 .shrink = gfs2_shrink_glock_memory,
1407 .seeks = DEFAULT_SEEKS,
1408};
1409
b3b94faa
DT
1410/**
1411 * examine_bucket - Call a function for glock in a hash bucket
1412 * @examiner: the function
1413 * @sdp: the filesystem
1414 * @bucket: the bucket
1415 *
b3b94faa
DT
1416 */
1417
bc015cb8 1418static void examine_bucket(glock_examiner examiner, const struct gfs2_sbd *sdp,
37b2fa6a 1419 unsigned int hash)
b3b94faa 1420{
bc015cb8
SW
1421 struct gfs2_glock *gl;
1422 struct hlist_bl_head *head = &gl_hash_table[hash];
1423 struct hlist_bl_node *pos;
b3b94faa 1424
bc015cb8
SW
1425 rcu_read_lock();
1426 hlist_bl_for_each_entry_rcu(gl, pos, head, gl_list) {
1427 if ((gl->gl_sbd == sdp) && atomic_read(&gl->gl_ref))
24264434 1428 examiner(gl);
b3b94faa 1429 }
bc015cb8 1430 rcu_read_unlock();
8fbbfd21 1431 cond_resched();
bc015cb8
SW
1432}
1433
1434static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
1435{
1436 unsigned x;
1437
1438 for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
1439 examine_bucket(examiner, sdp, x);
b3b94faa
DT
1440}
1441
f057f6cd
SW
1442
1443/**
1444 * thaw_glock - thaw out a glock which has an unprocessed reply waiting
1445 * @gl: The glock to thaw
1446 *
1447 * N.B. When we freeze a glock, we leave a ref to the glock outstanding,
1448 * so this has to result in the ref count being dropped by one.
1449 */
1450
1451static void thaw_glock(struct gfs2_glock *gl)
1452{
1453 if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
1454 return;
f057f6cd
SW
1455 set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
1456 gfs2_glock_hold(gl);
1457 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1458 gfs2_glock_put(gl);
f057f6cd
SW
1459}
1460
b3b94faa
DT
1461/**
1462 * clear_glock - look at a glock and see if we can free it from glock cache
1463 * @gl: the glock to look at
1464 *
1465 */
1466
1467static void clear_glock(struct gfs2_glock *gl)
1468{
f42ab085 1469 gfs2_glock_remove_from_lru(gl);
b3b94faa 1470
6802e340 1471 spin_lock(&gl->gl_spin);
c741c455 1472 if (gl->gl_state != LM_ST_UNLOCKED)
97cc1025 1473 handle_callback(gl, LM_ST_UNLOCKED, 0);
6802e340
SW
1474 spin_unlock(&gl->gl_spin);
1475 gfs2_glock_hold(gl);
1476 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1477 gfs2_glock_put(gl);
b3b94faa
DT
1478}
1479
f057f6cd
SW
1480/**
1481 * gfs2_glock_thaw - Thaw any frozen glocks
1482 * @sdp: The super block
1483 *
1484 */
1485
1486void gfs2_glock_thaw(struct gfs2_sbd *sdp)
1487{
bc015cb8
SW
1488 glock_hash_walk(thaw_glock, sdp);
1489}
f057f6cd 1490
bc015cb8
SW
1491static int dump_glock(struct seq_file *seq, struct gfs2_glock *gl)
1492{
1493 int ret;
1494 spin_lock(&gl->gl_spin);
1495 ret = __dump_glock(seq, gl);
1496 spin_unlock(&gl->gl_spin);
1497 return ret;
1498}
1499
1500static void dump_glock_func(struct gfs2_glock *gl)
1501{
1502 dump_glock(NULL, gl);
f057f6cd
SW
1503}
1504
b3b94faa
DT
1505/**
1506 * gfs2_gl_hash_clear - Empty out the glock hash table
1507 * @sdp: the filesystem
1508 * @wait: wait until it's all gone
1509 *
1bdad606 1510 * Called when unmounting the filesystem.
b3b94faa
DT
1511 */
1512
fefc03bf 1513void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
b3b94faa 1514{
bc015cb8 1515 glock_hash_walk(clear_glock, sdp);
8f05228e
SW
1516 flush_workqueue(glock_workqueue);
1517 wait_event(sdp->sd_glock_wait, atomic_read(&sdp->sd_glock_disposal) == 0);
bc015cb8 1518 glock_hash_walk(dump_glock_func, sdp);
b3b94faa
DT
1519}
1520
813e0c46
SW
1521void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
1522{
1523 struct gfs2_glock *gl = ip->i_gl;
1524 int ret;
1525
1526 ret = gfs2_truncatei_resume(ip);
1527 gfs2_assert_withdraw(gl->gl_sbd, ret == 0);
1528
1529 spin_lock(&gl->gl_spin);
1530 clear_bit(GLF_LOCK, &gl->gl_flags);
1531 run_queue(gl, 1);
1532 spin_unlock(&gl->gl_spin);
1533}
1534
6802e340 1535static const char *state2str(unsigned state)
04b933f2 1536{
6802e340
SW
1537 switch(state) {
1538 case LM_ST_UNLOCKED:
1539 return "UN";
1540 case LM_ST_SHARED:
1541 return "SH";
1542 case LM_ST_DEFERRED:
1543 return "DF";
1544 case LM_ST_EXCLUSIVE:
1545 return "EX";
1546 }
1547 return "??";
1548}
1549
1550static const char *hflags2str(char *buf, unsigned flags, unsigned long iflags)
1551{
1552 char *p = buf;
1553 if (flags & LM_FLAG_TRY)
1554 *p++ = 't';
1555 if (flags & LM_FLAG_TRY_1CB)
1556 *p++ = 'T';
1557 if (flags & LM_FLAG_NOEXP)
1558 *p++ = 'e';
1559 if (flags & LM_FLAG_ANY)
f057f6cd 1560 *p++ = 'A';
6802e340
SW
1561 if (flags & LM_FLAG_PRIORITY)
1562 *p++ = 'p';
1563 if (flags & GL_ASYNC)
1564 *p++ = 'a';
1565 if (flags & GL_EXACT)
1566 *p++ = 'E';
6802e340
SW
1567 if (flags & GL_NOCACHE)
1568 *p++ = 'c';
1569 if (test_bit(HIF_HOLDER, &iflags))
1570 *p++ = 'H';
1571 if (test_bit(HIF_WAIT, &iflags))
1572 *p++ = 'W';
1573 if (test_bit(HIF_FIRST, &iflags))
1574 *p++ = 'F';
1575 *p = 0;
1576 return buf;
04b933f2
RP
1577}
1578
b3b94faa
DT
1579/**
1580 * dump_holder - print information about a glock holder
6802e340 1581 * @seq: the seq_file struct
b3b94faa
DT
1582 * @gh: the glock holder
1583 *
1584 * Returns: 0 on success, -ENOBUFS when we run out of space
1585 */
1586
6802e340 1587static int dump_holder(struct seq_file *seq, const struct gfs2_holder *gh)
b3b94faa 1588{
6802e340 1589 struct task_struct *gh_owner = NULL;
6802e340 1590 char flags_buf[32];
b3b94faa 1591
6802e340 1592 if (gh->gh_owner_pid)
b1e058da 1593 gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
cc18152e
JP
1594 gfs2_print_dbg(seq, " H: s:%s f:%s e:%d p:%ld [%s] %pS\n",
1595 state2str(gh->gh_state),
1596 hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags),
1597 gh->gh_error,
1598 gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1,
1599 gh_owner ? gh_owner->comm : "(ended)",
1600 (void *)gh->gh_ip);
7c52b166 1601 return 0;
b3b94faa
DT
1602}
1603
627c10b7 1604static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
6802e340 1605{
627c10b7 1606 const unsigned long *gflags = &gl->gl_flags;
6802e340 1607 char *p = buf;
627c10b7 1608
6802e340
SW
1609 if (test_bit(GLF_LOCK, gflags))
1610 *p++ = 'l';
6802e340
SW
1611 if (test_bit(GLF_DEMOTE, gflags))
1612 *p++ = 'D';
1613 if (test_bit(GLF_PENDING_DEMOTE, gflags))
1614 *p++ = 'd';
1615 if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags))
1616 *p++ = 'p';
1617 if (test_bit(GLF_DIRTY, gflags))
1618 *p++ = 'y';
1619 if (test_bit(GLF_LFLUSH, gflags))
1620 *p++ = 'f';
1621 if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags))
1622 *p++ = 'i';
1623 if (test_bit(GLF_REPLY_PENDING, gflags))
1624 *p++ = 'r';
f057f6cd 1625 if (test_bit(GLF_INITIAL, gflags))
d8348de0 1626 *p++ = 'I';
f057f6cd
SW
1627 if (test_bit(GLF_FROZEN, gflags))
1628 *p++ = 'F';
7b5e3d5f
SW
1629 if (test_bit(GLF_QUEUED, gflags))
1630 *p++ = 'q';
627c10b7
SW
1631 if (test_bit(GLF_LRU, gflags))
1632 *p++ = 'L';
1633 if (gl->gl_object)
1634 *p++ = 'o';
6802e340
SW
1635 *p = 0;
1636 return buf;
b3b94faa
DT
1637}
1638
1639/**
6802e340
SW
1640 * __dump_glock - print information about a glock
1641 * @seq: The seq_file struct
b3b94faa 1642 * @gl: the glock
6802e340
SW
1643 *
1644 * The file format is as follows:
1645 * One line per object, capital letters are used to indicate objects
1646 * G = glock, I = Inode, R = rgrp, H = holder. Glocks are not indented,
1647 * other objects are indented by a single space and follow the glock to
1648 * which they are related. Fields are indicated by lower case letters
1649 * followed by a colon and the field value, except for strings which are in
1650 * [] so that its possible to see if they are composed of spaces for
1651 * example. The field's are n = number (id of the object), f = flags,
1652 * t = type, s = state, r = refcount, e = error, p = pid.
b3b94faa
DT
1653 *
1654 * Returns: 0 on success, -ENOBUFS when we run out of space
1655 */
1656
6802e340 1657static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
b3b94faa 1658{
6802e340
SW
1659 const struct gfs2_glock_operations *glops = gl->gl_ops;
1660 unsigned long long dtime;
1661 const struct gfs2_holder *gh;
1662 char gflags_buf[32];
1663 int error = 0;
b3b94faa 1664
6802e340
SW
1665 dtime = jiffies - gl->gl_demote_time;
1666 dtime *= 1000000/HZ; /* demote time in uSec */
1667 if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
1668 dtime = 0;
f42ab085 1669 gfs2_print_dbg(seq, "G: s:%s n:%u/%llx f:%s t:%s d:%s/%llu a:%d v:%d r:%d\n",
6802e340
SW
1670 state2str(gl->gl_state),
1671 gl->gl_name.ln_type,
1672 (unsigned long long)gl->gl_name.ln_number,
627c10b7 1673 gflags2str(gflags_buf, gl),
6802e340
SW
1674 state2str(gl->gl_target),
1675 state2str(gl->gl_demote_state), dtime,
6802e340 1676 atomic_read(&gl->gl_ail_count),
f42ab085 1677 atomic_read(&gl->gl_revokes),
6802e340 1678 atomic_read(&gl->gl_ref));
b3b94faa 1679
b3b94faa 1680 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
6802e340 1681 error = dump_holder(seq, gh);
b3b94faa
DT
1682 if (error)
1683 goto out;
1684 }
6802e340
SW
1685 if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump)
1686 error = glops->go_dump(seq, gl);
a91ea69f 1687out:
b3b94faa
DT
1688 return error;
1689}
1690
6802e340 1691
b3b94faa 1692
8fbbfd21 1693
85d1da67
SW
1694int __init gfs2_glock_init(void)
1695{
1696 unsigned i;
1697 for(i = 0; i < GFS2_GL_HASH_SIZE; i++) {
bc015cb8 1698 INIT_HLIST_BL_HEAD(&gl_hash_table[i]);
087efdd3 1699 }
8fbbfd21 1700
d2115778 1701 glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
58a69cb4 1702 WQ_HIGHPRI | WQ_FREEZABLE, 0);
97cc1025 1703 if (IS_ERR(glock_workqueue))
c4f68a13 1704 return PTR_ERR(glock_workqueue);
d2115778 1705 gfs2_delete_workqueue = alloc_workqueue("delete_workqueue",
58a69cb4 1706 WQ_MEM_RECLAIM | WQ_FREEZABLE,
d2115778 1707 0);
b94a170e
BM
1708 if (IS_ERR(gfs2_delete_workqueue)) {
1709 destroy_workqueue(glock_workqueue);
1710 return PTR_ERR(gfs2_delete_workqueue);
1711 }
97cc1025
SW
1712
1713 register_shrinker(&glock_shrinker);
c4f68a13 1714
85d1da67
SW
1715 return 0;
1716}
1717
8fbbfd21
SW
1718void gfs2_glock_exit(void)
1719{
97cc1025 1720 unregister_shrinker(&glock_shrinker);
c4f68a13 1721 destroy_workqueue(glock_workqueue);
b94a170e 1722 destroy_workqueue(gfs2_delete_workqueue);
8fbbfd21
SW
1723}
1724
bc015cb8
SW
1725static inline struct gfs2_glock *glock_hash_chain(unsigned hash)
1726{
1727 return hlist_bl_entry(hlist_bl_first_rcu(&gl_hash_table[hash]),
1728 struct gfs2_glock, gl_list);
1729}
1730
1731static inline struct gfs2_glock *glock_hash_next(struct gfs2_glock *gl)
1732{
7e32d026 1733 return hlist_bl_entry(rcu_dereference(gl->gl_list.next),
bc015cb8
SW
1734 struct gfs2_glock, gl_list);
1735}
1736
6802e340 1737static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
7c52b166 1738{
7b08fc62
SW
1739 struct gfs2_glock *gl;
1740
bc015cb8
SW
1741 do {
1742 gl = gi->gl;
1743 if (gl) {
1744 gi->gl = glock_hash_next(gl);
1745 } else {
1746 gi->gl = glock_hash_chain(gi->hash);
1747 }
1748 while (gi->gl == NULL) {
1749 gi->hash++;
1750 if (gi->hash >= GFS2_GL_HASH_SIZE) {
1751 rcu_read_unlock();
1752 return 1;
1753 }
1754 gi->gl = glock_hash_chain(gi->hash);
1755 }
1756 /* Skip entries for other sb and dead entries */
1757 } while (gi->sdp != gi->gl->gl_sbd || atomic_read(&gi->gl->gl_ref) == 0);
a947e033 1758
7c52b166
RP
1759 return 0;
1760}
1761
6802e340 1762static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
7c52b166 1763{
6802e340 1764 struct gfs2_glock_iter *gi = seq->private;
7c52b166
RP
1765 loff_t n = *pos;
1766
6802e340 1767 gi->hash = 0;
bc015cb8 1768 rcu_read_lock();
7c52b166 1769
6802e340 1770 do {
bc015cb8 1771 if (gfs2_glock_iter_next(gi))
7c52b166 1772 return NULL;
6802e340 1773 } while (n--);
7c52b166 1774
6802e340 1775 return gi->gl;
7c52b166
RP
1776}
1777
6802e340 1778static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
7c52b166
RP
1779 loff_t *pos)
1780{
6802e340 1781 struct gfs2_glock_iter *gi = seq->private;
7c52b166
RP
1782
1783 (*pos)++;
1784
bc015cb8 1785 if (gfs2_glock_iter_next(gi))
7c52b166 1786 return NULL;
7c52b166 1787
6802e340 1788 return gi->gl;
7c52b166
RP
1789}
1790
6802e340 1791static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
7c52b166 1792{
6802e340 1793 struct gfs2_glock_iter *gi = seq->private;
bc015cb8
SW
1794
1795 if (gi->gl)
1796 rcu_read_unlock();
1797 gi->gl = NULL;
7c52b166
RP
1798}
1799
6802e340 1800static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
7c52b166 1801{
6802e340 1802 return dump_glock(seq, iter_ptr);
7c52b166
RP
1803}
1804
4ef29002 1805static const struct seq_operations gfs2_glock_seq_ops = {
7c52b166
RP
1806 .start = gfs2_glock_seq_start,
1807 .next = gfs2_glock_seq_next,
1808 .stop = gfs2_glock_seq_stop,
1809 .show = gfs2_glock_seq_show,
1810};
1811
1812static int gfs2_debugfs_open(struct inode *inode, struct file *file)
1813{
6802e340
SW
1814 int ret = seq_open_private(file, &gfs2_glock_seq_ops,
1815 sizeof(struct gfs2_glock_iter));
1816 if (ret == 0) {
1817 struct seq_file *seq = file->private_data;
1818 struct gfs2_glock_iter *gi = seq->private;
1819 gi->sdp = inode->i_private;
1820 }
1821 return ret;
7c52b166
RP
1822}
1823
1824static const struct file_operations gfs2_debug_fops = {
1825 .owner = THIS_MODULE,
1826 .open = gfs2_debugfs_open,
1827 .read = seq_read,
1828 .llseek = seq_lseek,
6802e340 1829 .release = seq_release_private,
7c52b166
RP
1830};
1831
1832int gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
1833{
5f882096
RP
1834 sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
1835 if (!sdp->debugfs_dir)
1836 return -ENOMEM;
1837 sdp->debugfs_dentry_glocks = debugfs_create_file("glocks",
1838 S_IFREG | S_IRUGO,
1839 sdp->debugfs_dir, sdp,
1840 &gfs2_debug_fops);
1841 if (!sdp->debugfs_dentry_glocks)
7c52b166
RP
1842 return -ENOMEM;
1843
1844 return 0;
1845}
1846
1847void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
1848{
5f882096
RP
1849 if (sdp && sdp->debugfs_dir) {
1850 if (sdp->debugfs_dentry_glocks) {
1851 debugfs_remove(sdp->debugfs_dentry_glocks);
1852 sdp->debugfs_dentry_glocks = NULL;
1853 }
1854 debugfs_remove(sdp->debugfs_dir);
1855 sdp->debugfs_dir = NULL;
1856 }
7c52b166
RP
1857}
1858
1859int gfs2_register_debugfs(void)
1860{
1861 gfs2_root = debugfs_create_dir("gfs2", NULL);
1862 return gfs2_root ? 0 : -ENOMEM;
1863}
1864
1865void gfs2_unregister_debugfs(void)
1866{
1867 debugfs_remove(gfs2_root);
5f882096 1868 gfs2_root = NULL;
7c52b166 1869}
This page took 0.50239 seconds and 5 git commands to generate.