V4L/DVB (6573): unexport flexcop_reset_block_300
[deliverable/linux.git] / fs / gfs2 / glock.c
CommitLineData
b3b94faa
DT
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
398bbe68 3 * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
b3b94faa
DT
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
e9fc2aa0 7 * of the GNU General Public License version 2.
b3b94faa
DT
8 */
9
10#include <linux/sched.h>
11#include <linux/slab.h>
12#include <linux/spinlock.h>
13#include <linux/completion.h>
14#include <linux/buffer_head.h>
15#include <linux/delay.h>
16#include <linux/sort.h>
17#include <linux/jhash.h>
d0dc80db 18#include <linux/kallsyms.h>
5c676f6d 19#include <linux/gfs2_ondisk.h>
24264434 20#include <linux/list.h>
7d308590 21#include <linux/lm_interface.h>
fee852e3 22#include <linux/wait.h>
95d97b7d 23#include <linux/module.h>
61be084e 24#include <linux/rwsem.h>
b3b94faa 25#include <asm/uaccess.h>
7c52b166
RP
26#include <linux/seq_file.h>
27#include <linux/debugfs.h>
8fbbfd21
SW
28#include <linux/kthread.h>
29#include <linux/freezer.h>
c4f68a13
BM
30#include <linux/workqueue.h>
31#include <linux/jiffies.h>
b3b94faa
DT
32
33#include "gfs2.h"
5c676f6d 34#include "incore.h"
b3b94faa
DT
35#include "glock.h"
36#include "glops.h"
37#include "inode.h"
38#include "lm.h"
39#include "lops.h"
40#include "meta_io.h"
41#include "quota.h"
42#include "super.h"
5c676f6d 43#include "util.h"
b3b94faa 44
37b2fa6a 45struct gfs2_gl_hash_bucket {
b6397893 46 struct hlist_head hb_list;
37b2fa6a
SW
47};
48
7c52b166
RP
49struct glock_iter {
50 int hash; /* hash bucket index */
51 struct gfs2_sbd *sdp; /* incore superblock */
52 struct gfs2_glock *gl; /* current glock struct */
7c52b166
RP
53 struct seq_file *seq; /* sequence file for debugfs */
54 char string[512]; /* scratch space */
55};
56
b3b94faa
DT
57typedef void (*glock_examiner) (struct gfs2_glock * gl);
58
08bc2dbc 59static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
04b933f2 60static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl);
3b8249f6 61static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh);
b5d32bea 62static void gfs2_glock_drop_th(struct gfs2_glock *gl);
c4f68a13
BM
63static void run_queue(struct gfs2_glock *gl);
64
61be084e 65static DECLARE_RWSEM(gfs2_umount_flush_sem);
7c52b166 66static struct dentry *gfs2_root;
8fbbfd21
SW
67static struct task_struct *scand_process;
68static unsigned int scand_secs = 5;
c4f68a13 69static struct workqueue_struct *glock_workqueue;
08bc2dbc 70
b6397893 71#define GFS2_GL_HASH_SHIFT 15
087efdd3
SW
72#define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT)
73#define GFS2_GL_HASH_MASK (GFS2_GL_HASH_SIZE - 1)
74
85d1da67 75static struct gfs2_gl_hash_bucket gl_hash_table[GFS2_GL_HASH_SIZE];
04b933f2 76static struct dentry *gfs2_root;
087efdd3
SW
77
78/*
79 * Despite what you might think, the numbers below are not arbitrary :-)
80 * They are taken from the ipv4 routing hash code, which is well tested
81 * and thus should be nearly optimal. Later on we might tweek the numbers
82 * but for now this should be fine.
83 *
84 * The reason for putting the locks in a separate array from the list heads
85 * is that we can have fewer locks than list heads and save memory. We use
86 * the same hash function for both, but with a different hash mask.
87 */
88#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \
89 defined(CONFIG_PROVE_LOCKING)
90
91#ifdef CONFIG_LOCKDEP
92# define GL_HASH_LOCK_SZ 256
93#else
94# if NR_CPUS >= 32
95# define GL_HASH_LOCK_SZ 4096
96# elif NR_CPUS >= 16
97# define GL_HASH_LOCK_SZ 2048
98# elif NR_CPUS >= 8
99# define GL_HASH_LOCK_SZ 1024
100# elif NR_CPUS >= 4
101# define GL_HASH_LOCK_SZ 512
102# else
103# define GL_HASH_LOCK_SZ 256
104# endif
105#endif
106
107/* We never want more locks than chains */
108#if GFS2_GL_HASH_SIZE < GL_HASH_LOCK_SZ
109# undef GL_HASH_LOCK_SZ
110# define GL_HASH_LOCK_SZ GFS2_GL_HASH_SIZE
111#endif
112
113static rwlock_t gl_hash_locks[GL_HASH_LOCK_SZ];
114
115static inline rwlock_t *gl_lock_addr(unsigned int x)
116{
94610610 117 return &gl_hash_locks[x & (GL_HASH_LOCK_SZ-1)];
087efdd3
SW
118}
119#else /* not SMP, so no spinlocks required */
0ac23069 120static inline rwlock_t *gl_lock_addr(unsigned int x)
087efdd3
SW
121{
122 return NULL;
123}
124#endif
85d1da67 125
b3b94faa
DT
126/**
127 * relaxed_state_ok - is a requested lock compatible with the current lock mode?
128 * @actual: the current state of the lock
129 * @requested: the lock state that was requested by the caller
130 * @flags: the modifier flags passed in by the caller
131 *
132 * Returns: 1 if the locks are compatible, 0 otherwise
133 */
134
135static inline int relaxed_state_ok(unsigned int actual, unsigned requested,
136 int flags)
137{
138 if (actual == requested)
139 return 1;
140
141 if (flags & GL_EXACT)
142 return 0;
143
144 if (actual == LM_ST_EXCLUSIVE && requested == LM_ST_SHARED)
145 return 1;
146
147 if (actual != LM_ST_UNLOCKED && (flags & LM_FLAG_ANY))
148 return 1;
149
150 return 0;
151}
152
153/**
154 * gl_hash() - Turn glock number into hash bucket number
155 * @lock: The glock number
156 *
157 * Returns: The number of the corresponding hash bucket
158 */
159
b8547856
SW
160static unsigned int gl_hash(const struct gfs2_sbd *sdp,
161 const struct lm_lockname *name)
b3b94faa
DT
162{
163 unsigned int h;
164
cd915493 165 h = jhash(&name->ln_number, sizeof(u64), 0);
b3b94faa 166 h = jhash(&name->ln_type, sizeof(unsigned int), h);
b8547856 167 h = jhash(&sdp, sizeof(struct gfs2_sbd *), h);
b3b94faa
DT
168 h &= GFS2_GL_HASH_MASK;
169
170 return h;
171}
172
173/**
174 * glock_free() - Perform a few checks and then release struct gfs2_glock
175 * @gl: The glock to release
176 *
177 * Also calls lock module to release its internal structure for this glock.
178 *
179 */
180
181static void glock_free(struct gfs2_glock *gl)
182{
183 struct gfs2_sbd *sdp = gl->gl_sbd;
184 struct inode *aspace = gl->gl_aspace;
185
186 gfs2_lm_put_lock(sdp, gl->gl_lock);
187
188 if (aspace)
189 gfs2_aspace_put(aspace);
190
191 kmem_cache_free(gfs2_glock_cachep, gl);
b3b94faa
DT
192}
193
194/**
195 * gfs2_glock_hold() - increment reference count on glock
196 * @gl: The glock to hold
197 *
198 */
199
200void gfs2_glock_hold(struct gfs2_glock *gl)
201{
16feb9fe 202 atomic_inc(&gl->gl_ref);
b3b94faa
DT
203}
204
205/**
206 * gfs2_glock_put() - Decrement reference count on glock
207 * @gl: The glock to put
208 *
209 */
210
211int gfs2_glock_put(struct gfs2_glock *gl)
212{
b3b94faa 213 int rv = 0;
16feb9fe 214 struct gfs2_sbd *sdp = gl->gl_sbd;
b3b94faa 215
087efdd3 216 write_lock(gl_lock_addr(gl->gl_hash));
16feb9fe 217 if (atomic_dec_and_test(&gl->gl_ref)) {
b6397893 218 hlist_del(&gl->gl_list);
087efdd3 219 write_unlock(gl_lock_addr(gl->gl_hash));
16feb9fe
SW
220 gfs2_assert(sdp, gl->gl_state == LM_ST_UNLOCKED);
221 gfs2_assert(sdp, list_empty(&gl->gl_reclaim));
222 gfs2_assert(sdp, list_empty(&gl->gl_holders));
223 gfs2_assert(sdp, list_empty(&gl->gl_waiters1));
16feb9fe 224 gfs2_assert(sdp, list_empty(&gl->gl_waiters3));
b3b94faa
DT
225 glock_free(gl);
226 rv = 1;
227 goto out;
228 }
087efdd3 229 write_unlock(gl_lock_addr(gl->gl_hash));
a2242db0 230out:
b3b94faa
DT
231 return rv;
232}
233
b3b94faa
DT
234/**
235 * search_bucket() - Find struct gfs2_glock by lock number
236 * @bucket: the bucket to search
237 * @name: The lock name
238 *
239 * Returns: NULL, or the struct gfs2_glock with the requested number
240 */
241
37b2fa6a 242static struct gfs2_glock *search_bucket(unsigned int hash,
899be4d3 243 const struct gfs2_sbd *sdp,
d6a53727 244 const struct lm_lockname *name)
b3b94faa
DT
245{
246 struct gfs2_glock *gl;
b6397893 247 struct hlist_node *h;
b3b94faa 248
b6397893 249 hlist_for_each_entry(gl, h, &gl_hash_table[hash].hb_list, gl_list) {
b3b94faa
DT
250 if (!lm_name_equal(&gl->gl_name, name))
251 continue;
899be4d3
SW
252 if (gl->gl_sbd != sdp)
253 continue;
b3b94faa 254
16feb9fe 255 atomic_inc(&gl->gl_ref);
b3b94faa
DT
256
257 return gl;
258 }
259
260 return NULL;
261}
262
263/**
264 * gfs2_glock_find() - Find glock by lock number
265 * @sdp: The GFS2 superblock
266 * @name: The lock name
267 *
268 * Returns: NULL, or the struct gfs2_glock with the requested number
269 */
270
85d1da67 271static struct gfs2_glock *gfs2_glock_find(const struct gfs2_sbd *sdp,
d6a53727 272 const struct lm_lockname *name)
b3b94faa 273{
37b2fa6a 274 unsigned int hash = gl_hash(sdp, name);
b3b94faa
DT
275 struct gfs2_glock *gl;
276
087efdd3 277 read_lock(gl_lock_addr(hash));
37b2fa6a 278 gl = search_bucket(hash, sdp, name);
087efdd3 279 read_unlock(gl_lock_addr(hash));
b3b94faa
DT
280
281 return gl;
282}
283
c4f68a13
BM
284static void glock_work_func(struct work_struct *work)
285{
286 struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
287
288 spin_lock(&gl->gl_spin);
289 if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags))
290 set_bit(GLF_DEMOTE, &gl->gl_flags);
291 run_queue(gl);
292 spin_unlock(&gl->gl_spin);
293 gfs2_glock_put(gl);
294}
295
b3b94faa
DT
296/**
297 * gfs2_glock_get() - Get a glock, or create one if one doesn't exist
298 * @sdp: The GFS2 superblock
299 * @number: the lock number
300 * @glops: The glock_operations to use
301 * @create: If 0, don't create the glock if it doesn't exist
302 * @glp: the glock is returned here
303 *
304 * This does not lock a glock, just finds/creates structures for one.
305 *
306 * Returns: errno
307 */
308
cd915493 309int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
8fb4b536 310 const struct gfs2_glock_operations *glops, int create,
b3b94faa
DT
311 struct gfs2_glock **glp)
312{
37b2fa6a 313 struct lm_lockname name = { .ln_number = number, .ln_type = glops->go_type };
b3b94faa 314 struct gfs2_glock *gl, *tmp;
37b2fa6a 315 unsigned int hash = gl_hash(sdp, &name);
b3b94faa
DT
316 int error;
317
087efdd3 318 read_lock(gl_lock_addr(hash));
37b2fa6a 319 gl = search_bucket(hash, sdp, &name);
087efdd3 320 read_unlock(gl_lock_addr(hash));
b3b94faa
DT
321
322 if (gl || !create) {
323 *glp = gl;
324 return 0;
325 }
326
327 gl = kmem_cache_alloc(gfs2_glock_cachep, GFP_KERNEL);
328 if (!gl)
329 return -ENOMEM;
330
ec45d9f5 331 gl->gl_flags = 0;
b3b94faa 332 gl->gl_name = name;
16feb9fe 333 atomic_set(&gl->gl_ref, 1);
b3b94faa 334 gl->gl_state = LM_ST_UNLOCKED;
c4f68a13 335 gl->gl_demote_state = LM_ST_EXCLUSIVE;
37b2fa6a 336 gl->gl_hash = hash;
04b933f2 337 gl->gl_owner_pid = 0;
320dd101 338 gl->gl_ip = 0;
b3b94faa 339 gl->gl_ops = glops;
ec45d9f5
SW
340 gl->gl_req_gh = NULL;
341 gl->gl_req_bh = NULL;
342 gl->gl_vn = 0;
343 gl->gl_stamp = jiffies;
c4f68a13 344 gl->gl_tchange = jiffies;
ec45d9f5 345 gl->gl_object = NULL;
b3b94faa 346 gl->gl_sbd = sdp;
ec45d9f5 347 gl->gl_aspace = NULL;
c4f68a13 348 INIT_DELAYED_WORK(&gl->gl_work, glock_work_func);
b3b94faa
DT
349
350 /* If this glock protects actual on-disk data or metadata blocks,
351 create a VFS inode to manage the pages/buffers holding them. */
50299965 352 if (glops == &gfs2_inode_glops || glops == &gfs2_rgrp_glops) {
b3b94faa
DT
353 gl->gl_aspace = gfs2_aspace_get(sdp);
354 if (!gl->gl_aspace) {
355 error = -ENOMEM;
356 goto fail;
357 }
358 }
359
360 error = gfs2_lm_get_lock(sdp, &name, &gl->gl_lock);
361 if (error)
362 goto fail_aspace;
363
087efdd3 364 write_lock(gl_lock_addr(hash));
37b2fa6a 365 tmp = search_bucket(hash, sdp, &name);
b3b94faa 366 if (tmp) {
087efdd3 367 write_unlock(gl_lock_addr(hash));
b3b94faa
DT
368 glock_free(gl);
369 gl = tmp;
370 } else {
b6397893 371 hlist_add_head(&gl->gl_list, &gl_hash_table[hash].hb_list);
087efdd3 372 write_unlock(gl_lock_addr(hash));
b3b94faa
DT
373 }
374
375 *glp = gl;
376
377 return 0;
378
ec45d9f5 379fail_aspace:
b3b94faa
DT
380 if (gl->gl_aspace)
381 gfs2_aspace_put(gl->gl_aspace);
ec45d9f5 382fail:
907b9bce 383 kmem_cache_free(gfs2_glock_cachep, gl);
b3b94faa
DT
384 return error;
385}
386
387/**
388 * gfs2_holder_init - initialize a struct gfs2_holder in the default way
389 * @gl: the glock
390 * @state: the state we're requesting
391 * @flags: the modifier flags
392 * @gh: the holder structure
393 *
394 */
395
190562bd 396void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
b3b94faa
DT
397 struct gfs2_holder *gh)
398{
399 INIT_LIST_HEAD(&gh->gh_list);
400 gh->gh_gl = gl;
d0dc80db 401 gh->gh_ip = (unsigned long)__builtin_return_address(0);
04b933f2 402 gh->gh_owner_pid = current->pid;
b3b94faa
DT
403 gh->gh_state = state;
404 gh->gh_flags = flags;
405 gh->gh_error = 0;
406 gh->gh_iflags = 0;
b3b94faa
DT
407 gfs2_glock_hold(gl);
408}
409
410/**
411 * gfs2_holder_reinit - reinitialize a struct gfs2_holder so we can requeue it
412 * @state: the state we're requesting
413 * @flags: the modifier flags
414 * @gh: the holder structure
415 *
416 * Don't mess with the glock.
417 *
418 */
419
190562bd 420void gfs2_holder_reinit(unsigned int state, unsigned flags, struct gfs2_holder *gh)
b3b94faa
DT
421{
422 gh->gh_state = state;
579b78a4 423 gh->gh_flags = flags;
3b8249f6 424 gh->gh_iflags = 0;
d0dc80db 425 gh->gh_ip = (unsigned long)__builtin_return_address(0);
b3b94faa
DT
426}
427
428/**
429 * gfs2_holder_uninit - uninitialize a holder structure (drop glock reference)
430 * @gh: the holder structure
431 *
432 */
433
434void gfs2_holder_uninit(struct gfs2_holder *gh)
435{
436 gfs2_glock_put(gh->gh_gl);
437 gh->gh_gl = NULL;
d0dc80db 438 gh->gh_ip = 0;
b3b94faa
DT
439}
440
3b8249f6 441static void gfs2_holder_wake(struct gfs2_holder *gh)
fee852e3 442{
fee852e3 443 clear_bit(HIF_WAIT, &gh->gh_iflags);
d93cfa98 444 smp_mb__after_clear_bit();
fee852e3
SW
445 wake_up_bit(&gh->gh_iflags, HIF_WAIT);
446}
447
d93cfa98 448static int just_schedule(void *word)
fee852e3
SW
449{
450 schedule();
451 return 0;
452}
453
454static void wait_on_holder(struct gfs2_holder *gh)
455{
456 might_sleep();
d93cfa98
AD
457 wait_on_bit(&gh->gh_iflags, HIF_WAIT, just_schedule, TASK_UNINTERRUPTIBLE);
458}
459
460static void gfs2_demote_wake(struct gfs2_glock *gl)
461{
c4f68a13 462 gl->gl_demote_state = LM_ST_EXCLUSIVE;
d93cfa98
AD
463 clear_bit(GLF_DEMOTE, &gl->gl_flags);
464 smp_mb__after_clear_bit();
465 wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
466}
467
468static void wait_on_demote(struct gfs2_glock *gl)
469{
470 might_sleep();
471 wait_on_bit(&gl->gl_flags, GLF_DEMOTE, just_schedule, TASK_UNINTERRUPTIBLE);
fee852e3
SW
472}
473
b3b94faa
DT
474/**
475 * rq_mutex - process a mutex request in the queue
476 * @gh: the glock holder
477 *
478 * Returns: 1 if the queue is blocked
479 */
480
481static int rq_mutex(struct gfs2_holder *gh)
482{
483 struct gfs2_glock *gl = gh->gh_gl;
484
485 list_del_init(&gh->gh_list);
486 /* gh->gh_error never examined. */
487 set_bit(GLF_LOCK, &gl->gl_flags);
d043e190 488 clear_bit(HIF_WAIT, &gh->gh_iflags);
fee852e3
SW
489 smp_mb();
490 wake_up_bit(&gh->gh_iflags, HIF_WAIT);
b3b94faa
DT
491
492 return 1;
493}
494
495/**
496 * rq_promote - process a promote request in the queue
497 * @gh: the glock holder
498 *
499 * Acquire a new inter-node lock, or change a lock state to more restrictive.
500 *
501 * Returns: 1 if the queue is blocked
502 */
503
504static int rq_promote(struct gfs2_holder *gh)
505{
506 struct gfs2_glock *gl = gh->gh_gl;
b3b94faa
DT
507
508 if (!relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
509 if (list_empty(&gl->gl_holders)) {
510 gl->gl_req_gh = gh;
511 set_bit(GLF_LOCK, &gl->gl_flags);
512 spin_unlock(&gl->gl_spin);
3b8249f6 513 gfs2_glock_xmote_th(gh->gh_gl, gh);
b3b94faa
DT
514 spin_lock(&gl->gl_spin);
515 }
516 return 1;
517 }
518
519 if (list_empty(&gl->gl_holders)) {
520 set_bit(HIF_FIRST, &gh->gh_iflags);
521 set_bit(GLF_LOCK, &gl->gl_flags);
b3b94faa
DT
522 } else {
523 struct gfs2_holder *next_gh;
1c0f4872 524 if (gh->gh_state == LM_ST_EXCLUSIVE)
b3b94faa
DT
525 return 1;
526 next_gh = list_entry(gl->gl_holders.next, struct gfs2_holder,
527 gh_list);
1c0f4872 528 if (next_gh->gh_state == LM_ST_EXCLUSIVE)
b3b94faa 529 return 1;
b3b94faa
DT
530 }
531
532 list_move_tail(&gh->gh_list, &gl->gl_holders);
533 gh->gh_error = 0;
534 set_bit(HIF_HOLDER, &gh->gh_iflags);
535
3b8249f6 536 gfs2_holder_wake(gh);
b3b94faa
DT
537
538 return 0;
539}
540
541/**
542 * rq_demote - process a demote request in the queue
543 * @gh: the glock holder
544 *
545 * Returns: 1 if the queue is blocked
546 */
547
3b8249f6 548static int rq_demote(struct gfs2_glock *gl)
b3b94faa 549{
b3b94faa
DT
550 if (!list_empty(&gl->gl_holders))
551 return 1;
552
3b8249f6
SW
553 if (gl->gl_state == gl->gl_demote_state ||
554 gl->gl_state == LM_ST_UNLOCKED) {
d93cfa98 555 gfs2_demote_wake(gl);
3b8249f6 556 return 0;
b3b94faa 557 }
cc7e79b1 558
3b8249f6 559 set_bit(GLF_LOCK, &gl->gl_flags);
cc7e79b1
WC
560 set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
561
3b8249f6 562 if (gl->gl_demote_state == LM_ST_UNLOCKED ||
87124e58
SW
563 gl->gl_state != LM_ST_EXCLUSIVE) {
564 spin_unlock(&gl->gl_spin);
3b8249f6 565 gfs2_glock_drop_th(gl);
87124e58
SW
566 } else {
567 spin_unlock(&gl->gl_spin);
3b8249f6 568 gfs2_glock_xmote_th(gl, NULL);
87124e58 569 }
cc7e79b1 570
3b8249f6 571 spin_lock(&gl->gl_spin);
cc7e79b1 572 clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
b3b94faa
DT
573
574 return 0;
575}
576
b3b94faa
DT
577/**
578 * run_queue - process holder structures on a glock
579 * @gl: the glock
580 *
581 */
b3b94faa
DT
582static void run_queue(struct gfs2_glock *gl)
583{
584 struct gfs2_holder *gh;
585 int blocked = 1;
586
587 for (;;) {
588 if (test_bit(GLF_LOCK, &gl->gl_flags))
589 break;
590
591 if (!list_empty(&gl->gl_waiters1)) {
592 gh = list_entry(gl->gl_waiters1.next,
593 struct gfs2_holder, gh_list);
e589665e 594 blocked = rq_mutex(gh);
3b8249f6
SW
595 } else if (test_bit(GLF_DEMOTE, &gl->gl_flags)) {
596 blocked = rq_demote(gl);
cc7e79b1
WC
597 if (gl->gl_waiters2 && !blocked) {
598 set_bit(GLF_DEMOTE, &gl->gl_flags);
599 gl->gl_demote_state = LM_ST_UNLOCKED;
600 }
601 gl->gl_waiters2 = 0;
b3b94faa
DT
602 } else if (!list_empty(&gl->gl_waiters3)) {
603 gh = list_entry(gl->gl_waiters3.next,
604 struct gfs2_holder, gh_list);
e589665e 605 blocked = rq_promote(gh);
b3b94faa
DT
606 } else
607 break;
608
609 if (blocked)
610 break;
611 }
612}
613
614/**
615 * gfs2_glmutex_lock - acquire a local lock on a glock
616 * @gl: the glock
617 *
618 * Gives caller exclusive access to manipulate a glock structure.
619 */
620
feaa7bba 621static void gfs2_glmutex_lock(struct gfs2_glock *gl)
b3b94faa 622{
b3b94faa 623 spin_lock(&gl->gl_spin);
85d1da67 624 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
398bbe68
BP
625 struct gfs2_holder gh;
626
627 gfs2_holder_init(gl, 0, 0, &gh);
628 set_bit(HIF_WAIT, &gh.gh_iflags);
b3b94faa 629 list_add_tail(&gh.gh_list, &gl->gl_waiters1);
398bbe68
BP
630 spin_unlock(&gl->gl_spin);
631 wait_on_holder(&gh);
632 gfs2_holder_uninit(&gh);
85d1da67 633 } else {
04b933f2 634 gl->gl_owner_pid = current->pid;
320dd101 635 gl->gl_ip = (unsigned long)__builtin_return_address(0);
398bbe68 636 spin_unlock(&gl->gl_spin);
320dd101 637 }
b3b94faa
DT
638}
639
640/**
641 * gfs2_glmutex_trylock - try to acquire a local lock on a glock
642 * @gl: the glock
643 *
644 * Returns: 1 if the glock is acquired
645 */
646
08bc2dbc 647static int gfs2_glmutex_trylock(struct gfs2_glock *gl)
b3b94faa
DT
648{
649 int acquired = 1;
650
651 spin_lock(&gl->gl_spin);
85d1da67 652 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
b3b94faa 653 acquired = 0;
85d1da67 654 } else {
04b933f2 655 gl->gl_owner_pid = current->pid;
320dd101
SW
656 gl->gl_ip = (unsigned long)__builtin_return_address(0);
657 }
b3b94faa
DT
658 spin_unlock(&gl->gl_spin);
659
660 return acquired;
661}
662
663/**
664 * gfs2_glmutex_unlock - release a local lock on a glock
665 * @gl: the glock
666 *
667 */
668
feaa7bba 669static void gfs2_glmutex_unlock(struct gfs2_glock *gl)
b3b94faa
DT
670{
671 spin_lock(&gl->gl_spin);
672 clear_bit(GLF_LOCK, &gl->gl_flags);
04b933f2 673 gl->gl_owner_pid = 0;
320dd101 674 gl->gl_ip = 0;
b3b94faa
DT
675 run_queue(gl);
676 spin_unlock(&gl->gl_spin);
677}
678
679/**
3b8249f6 680 * handle_callback - process a demote request
b3b94faa
DT
681 * @gl: the glock
682 * @state: the state the caller wants us to change to
683 *
3b8249f6
SW
684 * There are only two requests that we are going to see in actual
685 * practise: LM_ST_SHARED and LM_ST_UNLOCKED
b3b94faa
DT
686 */
687
c4f68a13
BM
688static void handle_callback(struct gfs2_glock *gl, unsigned int state,
689 int remote, unsigned long delay)
b3b94faa 690{
c4f68a13
BM
691 int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
692
b3b94faa 693 spin_lock(&gl->gl_spin);
c4f68a13
BM
694 set_bit(bit, &gl->gl_flags);
695 if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
3b8249f6
SW
696 gl->gl_demote_state = state;
697 gl->gl_demote_time = jiffies;
d93cfa98
AD
698 if (remote && gl->gl_ops->go_type == LM_TYPE_IOPEN &&
699 gl->gl_object) {
49e61f2e 700 gfs2_glock_schedule_for_reclaim(gl);
d93cfa98 701 spin_unlock(&gl->gl_spin);
d93cfa98
AD
702 return;
703 }
26caee5b
JW
704 } else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
705 gl->gl_demote_state != state) {
cc7e79b1
WC
706 if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
707 gl->gl_waiters2 = 1;
708 else
709 gl->gl_demote_state = LM_ST_UNLOCKED;
b3b94faa 710 }
b3b94faa 711 spin_unlock(&gl->gl_spin);
b3b94faa
DT
712}
713
714/**
715 * state_change - record that the glock is now in a different state
716 * @gl: the glock
717 * @new_state the new state
718 *
719 */
720
721static void state_change(struct gfs2_glock *gl, unsigned int new_state)
722{
b3b94faa
DT
723 int held1, held2;
724
725 held1 = (gl->gl_state != LM_ST_UNLOCKED);
726 held2 = (new_state != LM_ST_UNLOCKED);
727
728 if (held1 != held2) {
6a6b3d01 729 if (held2)
b3b94faa 730 gfs2_glock_hold(gl);
6a6b3d01 731 else
b3b94faa 732 gfs2_glock_put(gl);
b3b94faa
DT
733 }
734
735 gl->gl_state = new_state;
c4f68a13 736 gl->gl_tchange = jiffies;
b3b94faa
DT
737}
738
739/**
740 * xmote_bh - Called after the lock module is done acquiring a lock
741 * @gl: The glock in question
742 * @ret: the int returned from the lock module
743 *
744 */
745
746static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
747{
748 struct gfs2_sbd *sdp = gl->gl_sbd;
8fb4b536 749 const struct gfs2_glock_operations *glops = gl->gl_ops;
b3b94faa
DT
750 struct gfs2_holder *gh = gl->gl_req_gh;
751 int prev_state = gl->gl_state;
752 int op_done = 1;
753
754 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
12132933 755 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
b3b94faa
DT
756 gfs2_assert_warn(sdp, !(ret & LM_OUT_ASYNC));
757
758 state_change(gl, ret & LM_OUT_ST_MASK);
759
760 if (prev_state != LM_ST_UNLOCKED && !(ret & LM_OUT_CACHEABLE)) {
761 if (glops->go_inval)
1a14d3a6 762 glops->go_inval(gl, DIO_METADATA);
b3b94faa
DT
763 } else if (gl->gl_state == LM_ST_DEFERRED) {
764 /* We might not want to do this here.
765 Look at moving to the inode glops. */
766 if (glops->go_inval)
1a14d3a6 767 glops->go_inval(gl, 0);
b3b94faa
DT
768 }
769
770 /* Deal with each possible exit condition */
771
3b8249f6 772 if (!gh) {
b3b94faa 773 gl->gl_stamp = jiffies;
87124e58 774 if (ret & LM_OUT_CANCELED) {
3b8249f6 775 op_done = 0;
87124e58
SW
776 } else {
777 spin_lock(&gl->gl_spin);
778 if (gl->gl_state != gl->gl_demote_state) {
779 gl->gl_req_bh = NULL;
780 spin_unlock(&gl->gl_spin);
781 gfs2_glock_drop_th(gl);
782 gfs2_glock_put(gl);
783 return;
784 }
d93cfa98 785 gfs2_demote_wake(gl);
87124e58
SW
786 spin_unlock(&gl->gl_spin);
787 }
3b8249f6 788 } else {
b3b94faa
DT
789 spin_lock(&gl->gl_spin);
790 list_del_init(&gh->gh_list);
791 gh->gh_error = -EIO;
3b8249f6
SW
792 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
793 goto out;
794 gh->gh_error = GLR_CANCELED;
795 if (ret & LM_OUT_CANCELED)
796 goto out;
797 if (relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
798 list_add_tail(&gh->gh_list, &gl->gl_holders);
b3b94faa 799 gh->gh_error = 0;
3b8249f6
SW
800 set_bit(HIF_HOLDER, &gh->gh_iflags);
801 set_bit(HIF_FIRST, &gh->gh_iflags);
802 op_done = 0;
803 goto out;
b3b94faa 804 }
b3b94faa 805 gh->gh_error = GLR_TRYFAILED;
3b8249f6
SW
806 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
807 goto out;
808 gh->gh_error = -EINVAL;
b3b94faa
DT
809 if (gfs2_assert_withdraw(sdp, 0) == -1)
810 fs_err(sdp, "ret = 0x%.8X\n", ret);
3b8249f6
SW
811out:
812 spin_unlock(&gl->gl_spin);
b3b94faa
DT
813 }
814
815 if (glops->go_xmote_bh)
816 glops->go_xmote_bh(gl);
817
818 if (op_done) {
819 spin_lock(&gl->gl_spin);
820 gl->gl_req_gh = NULL;
821 gl->gl_req_bh = NULL;
822 clear_bit(GLF_LOCK, &gl->gl_flags);
b3b94faa
DT
823 spin_unlock(&gl->gl_spin);
824 }
825
826 gfs2_glock_put(gl);
827
fee852e3 828 if (gh)
3b8249f6 829 gfs2_holder_wake(gh);
b3b94faa
DT
830}
831
832/**
833 * gfs2_glock_xmote_th - Call into the lock module to acquire or change a glock
834 * @gl: The glock in question
835 * @state: the requested state
836 * @flags: modifier flags to the lock call
837 *
838 */
839
87124e58 840static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh)
b3b94faa
DT
841{
842 struct gfs2_sbd *sdp = gl->gl_sbd;
3b8249f6
SW
843 int flags = gh ? gh->gh_flags : 0;
844 unsigned state = gh ? gh->gh_state : gl->gl_demote_state;
8fb4b536 845 const struct gfs2_glock_operations *glops = gl->gl_ops;
b3b94faa
DT
846 int lck_flags = flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB |
847 LM_FLAG_NOEXP | LM_FLAG_ANY |
848 LM_FLAG_PRIORITY);
849 unsigned int lck_ret;
850
b5d32bea
SW
851 if (glops->go_xmote_th)
852 glops->go_xmote_th(gl);
853
b3b94faa 854 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
12132933 855 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
b3b94faa
DT
856 gfs2_assert_warn(sdp, state != LM_ST_UNLOCKED);
857 gfs2_assert_warn(sdp, state != gl->gl_state);
858
b3b94faa
DT
859 gfs2_glock_hold(gl);
860 gl->gl_req_bh = xmote_bh;
861
ec45d9f5 862 lck_ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, state, lck_flags);
b3b94faa
DT
863
864 if (gfs2_assert_withdraw(sdp, !(lck_ret & LM_OUT_ERROR)))
865 return;
866
867 if (lck_ret & LM_OUT_ASYNC)
868 gfs2_assert_warn(sdp, lck_ret == LM_OUT_ASYNC);
869 else
870 xmote_bh(gl, lck_ret);
871}
872
873/**
874 * drop_bh - Called after a lock module unlock completes
875 * @gl: the glock
876 * @ret: the return status
877 *
878 * Doesn't wake up the process waiting on the struct gfs2_holder (if any)
879 * Doesn't drop the reference on the glock the top half took out
880 *
881 */
882
883static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
884{
885 struct gfs2_sbd *sdp = gl->gl_sbd;
8fb4b536 886 const struct gfs2_glock_operations *glops = gl->gl_ops;
b3b94faa
DT
887 struct gfs2_holder *gh = gl->gl_req_gh;
888
b3b94faa 889 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
12132933 890 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
b3b94faa
DT
891 gfs2_assert_warn(sdp, !ret);
892
893 state_change(gl, LM_ST_UNLOCKED);
894
895 if (glops->go_inval)
1a14d3a6 896 glops->go_inval(gl, DIO_METADATA);
b3b94faa
DT
897
898 if (gh) {
899 spin_lock(&gl->gl_spin);
900 list_del_init(&gh->gh_list);
901 gh->gh_error = 0;
902 spin_unlock(&gl->gl_spin);
903 }
904
b3b94faa 905 spin_lock(&gl->gl_spin);
c4f68a13 906 gfs2_demote_wake(gl);
b3b94faa
DT
907 gl->gl_req_gh = NULL;
908 gl->gl_req_bh = NULL;
909 clear_bit(GLF_LOCK, &gl->gl_flags);
b3b94faa
DT
910 spin_unlock(&gl->gl_spin);
911
912 gfs2_glock_put(gl);
913
fee852e3 914 if (gh)
3b8249f6 915 gfs2_holder_wake(gh);
b3b94faa
DT
916}
917
918/**
919 * gfs2_glock_drop_th - call into the lock module to unlock a lock
920 * @gl: the glock
921 *
922 */
923
b5d32bea 924static void gfs2_glock_drop_th(struct gfs2_glock *gl)
b3b94faa
DT
925{
926 struct gfs2_sbd *sdp = gl->gl_sbd;
8fb4b536 927 const struct gfs2_glock_operations *glops = gl->gl_ops;
b3b94faa
DT
928 unsigned int ret;
929
3042a2cc
SW
930 if (glops->go_xmote_th)
931 glops->go_xmote_th(gl);
b5d32bea 932
b3b94faa 933 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
12132933 934 gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
b3b94faa
DT
935 gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED);
936
b3b94faa
DT
937 gfs2_glock_hold(gl);
938 gl->gl_req_bh = drop_bh;
939
b3b94faa
DT
940 ret = gfs2_lm_unlock(sdp, gl->gl_lock, gl->gl_state);
941
942 if (gfs2_assert_withdraw(sdp, !(ret & LM_OUT_ERROR)))
943 return;
944
945 if (!ret)
946 drop_bh(gl, ret);
947 else
948 gfs2_assert_warn(sdp, ret == LM_OUT_ASYNC);
949}
950
951/**
952 * do_cancels - cancel requests for locks stuck waiting on an expire flag
953 * @gh: the LM_FLAG_PRIORITY holder waiting to acquire the lock
954 *
955 * Don't cancel GL_NOCANCEL requests.
956 */
957
958static void do_cancels(struct gfs2_holder *gh)
959{
960 struct gfs2_glock *gl = gh->gh_gl;
961
962 spin_lock(&gl->gl_spin);
963
964 while (gl->gl_req_gh != gh &&
965 !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
966 !list_empty(&gh->gh_list)) {
50299965
SW
967 if (gl->gl_req_bh && !(gl->gl_req_gh &&
968 (gl->gl_req_gh->gh_flags & GL_NOCANCEL))) {
b3b94faa
DT
969 spin_unlock(&gl->gl_spin);
970 gfs2_lm_cancel(gl->gl_sbd, gl->gl_lock);
971 msleep(100);
972 spin_lock(&gl->gl_spin);
973 } else {
974 spin_unlock(&gl->gl_spin);
975 msleep(100);
976 spin_lock(&gl->gl_spin);
977 }
978 }
979
980 spin_unlock(&gl->gl_spin);
981}
982
983/**
984 * glock_wait_internal - wait on a glock acquisition
985 * @gh: the glock holder
986 *
987 * Returns: 0 on success
988 */
989
990static int glock_wait_internal(struct gfs2_holder *gh)
991{
992 struct gfs2_glock *gl = gh->gh_gl;
993 struct gfs2_sbd *sdp = gl->gl_sbd;
8fb4b536 994 const struct gfs2_glock_operations *glops = gl->gl_ops;
b3b94faa
DT
995
996 if (test_bit(HIF_ABORTED, &gh->gh_iflags))
997 return -EIO;
998
999 if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
1000 spin_lock(&gl->gl_spin);
1001 if (gl->gl_req_gh != gh &&
1002 !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
1003 !list_empty(&gh->gh_list)) {
1004 list_del_init(&gh->gh_list);
1005 gh->gh_error = GLR_TRYFAILED;
b3b94faa
DT
1006 run_queue(gl);
1007 spin_unlock(&gl->gl_spin);
1008 return gh->gh_error;
1009 }
1010 spin_unlock(&gl->gl_spin);
1011 }
1012
1013 if (gh->gh_flags & LM_FLAG_PRIORITY)
1014 do_cancels(gh);
1015
fee852e3 1016 wait_on_holder(gh);
b3b94faa
DT
1017 if (gh->gh_error)
1018 return gh->gh_error;
1019
1020 gfs2_assert_withdraw(sdp, test_bit(HIF_HOLDER, &gh->gh_iflags));
85d1da67 1021 gfs2_assert_withdraw(sdp, relaxed_state_ok(gl->gl_state, gh->gh_state,
b3b94faa
DT
1022 gh->gh_flags));
1023
1024 if (test_bit(HIF_FIRST, &gh->gh_iflags)) {
1025 gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
1026
1027 if (glops->go_lock) {
1028 gh->gh_error = glops->go_lock(gh);
1029 if (gh->gh_error) {
1030 spin_lock(&gl->gl_spin);
1031 list_del_init(&gh->gh_list);
b3b94faa
DT
1032 spin_unlock(&gl->gl_spin);
1033 }
1034 }
1035
1036 spin_lock(&gl->gl_spin);
1037 gl->gl_req_gh = NULL;
1038 gl->gl_req_bh = NULL;
1039 clear_bit(GLF_LOCK, &gl->gl_flags);
b3b94faa
DT
1040 run_queue(gl);
1041 spin_unlock(&gl->gl_spin);
1042 }
1043
1044 return gh->gh_error;
1045}
1046
1047static inline struct gfs2_holder *
04b933f2 1048find_holder_by_owner(struct list_head *head, pid_t pid)
b3b94faa
DT
1049{
1050 struct gfs2_holder *gh;
1051
1052 list_for_each_entry(gh, head, gh_list) {
04b933f2 1053 if (gh->gh_owner_pid == pid)
b3b94faa
DT
1054 return gh;
1055 }
1056
1057 return NULL;
1058}
1059
7c52b166
RP
1060static void print_dbg(struct glock_iter *gi, const char *fmt, ...)
1061{
1062 va_list args;
1063
1064 va_start(args, fmt);
1065 if (gi) {
1066 vsprintf(gi->string, fmt, args);
1067 seq_printf(gi->seq, gi->string);
1068 }
1069 else
1070 vprintk(fmt, args);
1071 va_end(args);
1072}
1073
b3b94faa
DT
1074/**
1075 * add_to_queue - Add a holder to the wait queue (but look for recursion)
1076 * @gh: the holder structure to add
1077 *
1078 */
1079
1080static void add_to_queue(struct gfs2_holder *gh)
1081{
1082 struct gfs2_glock *gl = gh->gh_gl;
1083 struct gfs2_holder *existing;
1084
04b933f2 1085 BUG_ON(!gh->gh_owner_pid);
fee852e3
SW
1086 if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
1087 BUG();
190562bd 1088
b4c20166
AD
1089 if (!(gh->gh_flags & GL_FLOCK)) {
1090 existing = find_holder_by_owner(&gl->gl_holders,
1091 gh->gh_owner_pid);
1092 if (existing) {
1093 print_symbol(KERN_WARNING "original: %s\n",
1094 existing->gh_ip);
1095 printk(KERN_INFO "pid : %d\n", existing->gh_owner_pid);
1096 printk(KERN_INFO "lock type : %d lock state : %d\n",
1097 existing->gh_gl->gl_name.ln_type,
1098 existing->gh_gl->gl_state);
1099 print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
1100 printk(KERN_INFO "pid : %d\n", gh->gh_owner_pid);
1101 printk(KERN_INFO "lock type : %d lock state : %d\n",
1102 gl->gl_name.ln_type, gl->gl_state);
1103 BUG();
1104 }
1105
1106 existing = find_holder_by_owner(&gl->gl_waiters3,
1107 gh->gh_owner_pid);
1108 if (existing) {
1109 print_symbol(KERN_WARNING "original: %s\n",
1110 existing->gh_ip);
1111 print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
1112 BUG();
1113 }
b3b94faa
DT
1114 }
1115
b3b94faa
DT
1116 if (gh->gh_flags & LM_FLAG_PRIORITY)
1117 list_add(&gh->gh_list, &gl->gl_waiters3);
1118 else
907b9bce 1119 list_add_tail(&gh->gh_list, &gl->gl_waiters3);
b3b94faa
DT
1120}
1121
1122/**
1123 * gfs2_glock_nq - enqueue a struct gfs2_holder onto a glock (acquire a glock)
1124 * @gh: the holder structure
1125 *
1126 * if (gh->gh_flags & GL_ASYNC), this never returns an error
1127 *
1128 * Returns: 0, GLR_TRYFAILED, or errno on failure
1129 */
1130
1131int gfs2_glock_nq(struct gfs2_holder *gh)
1132{
1133 struct gfs2_glock *gl = gh->gh_gl;
1134 struct gfs2_sbd *sdp = gl->gl_sbd;
1135 int error = 0;
1136
320dd101 1137restart:
b3b94faa
DT
1138 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
1139 set_bit(HIF_ABORTED, &gh->gh_iflags);
1140 return -EIO;
1141 }
1142
b3b94faa
DT
1143 spin_lock(&gl->gl_spin);
1144 add_to_queue(gh);
1145 run_queue(gl);
1146 spin_unlock(&gl->gl_spin);
1147
1148 if (!(gh->gh_flags & GL_ASYNC)) {
1149 error = glock_wait_internal(gh);
1150 if (error == GLR_CANCELED) {
190562bd 1151 msleep(100);
b3b94faa
DT
1152 goto restart;
1153 }
1154 }
1155
b3b94faa
DT
1156 return error;
1157}
1158
1159/**
1160 * gfs2_glock_poll - poll to see if an async request has been completed
1161 * @gh: the holder
1162 *
1163 * Returns: 1 if the request is ready to be gfs2_glock_wait()ed on
1164 */
1165
1166int gfs2_glock_poll(struct gfs2_holder *gh)
1167{
1168 struct gfs2_glock *gl = gh->gh_gl;
1169 int ready = 0;
1170
1171 spin_lock(&gl->gl_spin);
1172
1173 if (test_bit(HIF_HOLDER, &gh->gh_iflags))
1174 ready = 1;
1175 else if (list_empty(&gh->gh_list)) {
1176 if (gh->gh_error == GLR_CANCELED) {
1177 spin_unlock(&gl->gl_spin);
190562bd 1178 msleep(100);
b3b94faa
DT
1179 if (gfs2_glock_nq(gh))
1180 return 1;
1181 return 0;
1182 } else
1183 ready = 1;
1184 }
1185
1186 spin_unlock(&gl->gl_spin);
1187
1188 return ready;
1189}
1190
1191/**
1192 * gfs2_glock_wait - wait for a lock acquisition that ended in a GLR_ASYNC
1193 * @gh: the holder structure
1194 *
1195 * Returns: 0, GLR_TRYFAILED, or errno on failure
1196 */
1197
1198int gfs2_glock_wait(struct gfs2_holder *gh)
1199{
1200 int error;
1201
1202 error = glock_wait_internal(gh);
1203 if (error == GLR_CANCELED) {
190562bd 1204 msleep(100);
b3b94faa
DT
1205 gh->gh_flags &= ~GL_ASYNC;
1206 error = gfs2_glock_nq(gh);
1207 }
1208
1209 return error;
1210}
1211
1212/**
1213 * gfs2_glock_dq - dequeue a struct gfs2_holder from a glock (release a glock)
1214 * @gh: the glock holder
1215 *
1216 */
1217
1218void gfs2_glock_dq(struct gfs2_holder *gh)
1219{
1220 struct gfs2_glock *gl = gh->gh_gl;
8fb4b536 1221 const struct gfs2_glock_operations *glops = gl->gl_ops;
c4f68a13 1222 unsigned delay = 0;
b3b94faa 1223
b3b94faa 1224 if (gh->gh_flags & GL_NOCACHE)
c4f68a13 1225 handle_callback(gl, LM_ST_UNLOCKED, 0, 0);
b3b94faa
DT
1226
1227 gfs2_glmutex_lock(gl);
1228
1229 spin_lock(&gl->gl_spin);
1230 list_del_init(&gh->gh_list);
1231
1232 if (list_empty(&gl->gl_holders)) {
3042a2cc
SW
1233 if (glops->go_unlock) {
1234 spin_unlock(&gl->gl_spin);
b3b94faa 1235 glops->go_unlock(gh);
3042a2cc
SW
1236 spin_lock(&gl->gl_spin);
1237 }
3b8249f6 1238 gl->gl_stamp = jiffies;
b3b94faa
DT
1239 }
1240
1241 clear_bit(GLF_LOCK, &gl->gl_flags);
b3b94faa 1242 spin_unlock(&gl->gl_spin);
c4f68a13
BM
1243
1244 gfs2_glock_hold(gl);
1245 if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
1246 !test_bit(GLF_DEMOTE, &gl->gl_flags))
1247 delay = gl->gl_ops->go_min_hold_time;
1248 if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1249 gfs2_glock_put(gl);
b3b94faa
DT
1250}
1251
d93cfa98
AD
1252void gfs2_glock_dq_wait(struct gfs2_holder *gh)
1253{
1254 struct gfs2_glock *gl = gh->gh_gl;
1255 gfs2_glock_dq(gh);
1256 wait_on_demote(gl);
1257}
1258
b3b94faa
DT
1259/**
1260 * gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
1261 * @gh: the holder structure
1262 *
1263 */
1264
1265void gfs2_glock_dq_uninit(struct gfs2_holder *gh)
1266{
1267 gfs2_glock_dq(gh);
1268 gfs2_holder_uninit(gh);
1269}
1270
1271/**
1272 * gfs2_glock_nq_num - acquire a glock based on lock number
1273 * @sdp: the filesystem
1274 * @number: the lock number
1275 * @glops: the glock operations for the type of glock
1276 * @state: the state to acquire the glock in
1277 * @flags: modifier flags for the aquisition
1278 * @gh: the struct gfs2_holder
1279 *
1280 * Returns: errno
1281 */
1282
cd915493 1283int gfs2_glock_nq_num(struct gfs2_sbd *sdp, u64 number,
8fb4b536
SW
1284 const struct gfs2_glock_operations *glops,
1285 unsigned int state, int flags, struct gfs2_holder *gh)
b3b94faa
DT
1286{
1287 struct gfs2_glock *gl;
1288 int error;
1289
1290 error = gfs2_glock_get(sdp, number, glops, CREATE, &gl);
1291 if (!error) {
1292 error = gfs2_glock_nq_init(gl, state, flags, gh);
1293 gfs2_glock_put(gl);
1294 }
1295
1296 return error;
1297}
1298
1299/**
1300 * glock_compare - Compare two struct gfs2_glock structures for sorting
1301 * @arg_a: the first structure
1302 * @arg_b: the second structure
1303 *
1304 */
1305
1306static int glock_compare(const void *arg_a, const void *arg_b)
1307{
a5e08a9e
SW
1308 const struct gfs2_holder *gh_a = *(const struct gfs2_holder **)arg_a;
1309 const struct gfs2_holder *gh_b = *(const struct gfs2_holder **)arg_b;
1310 const struct lm_lockname *a = &gh_a->gh_gl->gl_name;
1311 const struct lm_lockname *b = &gh_b->gh_gl->gl_name;
b3b94faa
DT
1312
1313 if (a->ln_number > b->ln_number)
a5e08a9e
SW
1314 return 1;
1315 if (a->ln_number < b->ln_number)
1316 return -1;
1c0f4872 1317 BUG_ON(gh_a->gh_gl->gl_ops->go_type == gh_b->gh_gl->gl_ops->go_type);
a5e08a9e 1318 return 0;
b3b94faa
DT
1319}
1320
1321/**
1322 * nq_m_sync - synchonously acquire more than one glock in deadlock free order
1323 * @num_gh: the number of structures
1324 * @ghs: an array of struct gfs2_holder structures
1325 *
1326 * Returns: 0 on success (all glocks acquired),
1327 * errno on failure (no glocks acquired)
1328 */
1329
1330static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
1331 struct gfs2_holder **p)
1332{
1333 unsigned int x;
1334 int error = 0;
1335
1336 for (x = 0; x < num_gh; x++)
1337 p[x] = &ghs[x];
1338
1339 sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
1340
1341 for (x = 0; x < num_gh; x++) {
1342 p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1343
1344 error = gfs2_glock_nq(p[x]);
1345 if (error) {
1346 while (x--)
1347 gfs2_glock_dq(p[x]);
1348 break;
1349 }
1350 }
1351
1352 return error;
1353}
1354
1355/**
1356 * gfs2_glock_nq_m - acquire multiple glocks
1357 * @num_gh: the number of structures
1358 * @ghs: an array of struct gfs2_holder structures
1359 *
b3b94faa
DT
1360 *
1361 * Returns: 0 on success (all glocks acquired),
1362 * errno on failure (no glocks acquired)
1363 */
1364
1365int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1366{
eaf5bd3c
SW
1367 struct gfs2_holder *tmp[4];
1368 struct gfs2_holder **pph = tmp;
b3b94faa
DT
1369 int error = 0;
1370
eaf5bd3c
SW
1371 switch(num_gh) {
1372 case 0:
b3b94faa 1373 return 0;
eaf5bd3c 1374 case 1:
b3b94faa
DT
1375 ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
1376 return gfs2_glock_nq(ghs);
eaf5bd3c
SW
1377 default:
1378 if (num_gh <= 4)
b3b94faa 1379 break;
eaf5bd3c
SW
1380 pph = kmalloc(num_gh * sizeof(struct gfs2_holder *), GFP_NOFS);
1381 if (!pph)
1382 return -ENOMEM;
b3b94faa
DT
1383 }
1384
eaf5bd3c 1385 error = nq_m_sync(num_gh, ghs, pph);
b3b94faa 1386
eaf5bd3c
SW
1387 if (pph != tmp)
1388 kfree(pph);
b3b94faa
DT
1389
1390 return error;
1391}
1392
1393/**
1394 * gfs2_glock_dq_m - release multiple glocks
1395 * @num_gh: the number of structures
1396 * @ghs: an array of struct gfs2_holder structures
1397 *
1398 */
1399
1400void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs)
1401{
1402 unsigned int x;
1403
1404 for (x = 0; x < num_gh; x++)
1405 gfs2_glock_dq(&ghs[x]);
1406}
1407
1408/**
1409 * gfs2_glock_dq_uninit_m - release multiple glocks
1410 * @num_gh: the number of structures
1411 * @ghs: an array of struct gfs2_holder structures
1412 *
1413 */
1414
1415void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
1416{
1417 unsigned int x;
1418
1419 for (x = 0; x < num_gh; x++)
1420 gfs2_glock_dq_uninit(&ghs[x]);
1421}
1422
b3b94faa
DT
1423/**
1424 * gfs2_lvb_hold - attach a LVB from a glock
1425 * @gl: The glock in question
1426 *
1427 */
1428
1429int gfs2_lvb_hold(struct gfs2_glock *gl)
1430{
1431 int error;
1432
1433 gfs2_glmutex_lock(gl);
1434
1435 if (!atomic_read(&gl->gl_lvb_count)) {
1436 error = gfs2_lm_hold_lvb(gl->gl_sbd, gl->gl_lock, &gl->gl_lvb);
1437 if (error) {
1438 gfs2_glmutex_unlock(gl);
1439 return error;
1440 }
1441 gfs2_glock_hold(gl);
1442 }
1443 atomic_inc(&gl->gl_lvb_count);
1444
1445 gfs2_glmutex_unlock(gl);
1446
1447 return 0;
1448}
1449
1450/**
1451 * gfs2_lvb_unhold - detach a LVB from a glock
1452 * @gl: The glock in question
1453 *
1454 */
1455
1456void gfs2_lvb_unhold(struct gfs2_glock *gl)
1457{
1458 gfs2_glock_hold(gl);
1459 gfs2_glmutex_lock(gl);
1460
1461 gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count) > 0);
1462 if (atomic_dec_and_test(&gl->gl_lvb_count)) {
1463 gfs2_lm_unhold_lvb(gl->gl_sbd, gl->gl_lock, gl->gl_lvb);
1464 gl->gl_lvb = NULL;
1465 gfs2_glock_put(gl);
1466 }
1467
1468 gfs2_glmutex_unlock(gl);
1469 gfs2_glock_put(gl);
1470}
1471
b3b94faa
DT
1472static void blocking_cb(struct gfs2_sbd *sdp, struct lm_lockname *name,
1473 unsigned int state)
1474{
1475 struct gfs2_glock *gl;
c4f68a13
BM
1476 unsigned long delay = 0;
1477 unsigned long holdtime;
1478 unsigned long now = jiffies;
b3b94faa
DT
1479
1480 gl = gfs2_glock_find(sdp, name);
1481 if (!gl)
1482 return;
1483
c4f68a13
BM
1484 holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
1485 if (time_before(now, holdtime))
1486 delay = holdtime - now;
b3b94faa 1487
c4f68a13
BM
1488 handle_callback(gl, state, 1, delay);
1489 if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
1490 gfs2_glock_put(gl);
b3b94faa
DT
1491}
1492
1493/**
1494 * gfs2_glock_cb - Callback used by locking module
1c089c32 1495 * @sdp: Pointer to the superblock
b3b94faa
DT
1496 * @type: Type of callback
1497 * @data: Type dependent data pointer
1498 *
1499 * Called by the locking module when it wants to tell us something.
1500 * Either we need to drop a lock, one of our ASYNC requests completed, or
1501 * a journal from another client needs to be recovered.
1502 */
1503
9b47c11d 1504void gfs2_glock_cb(void *cb_data, unsigned int type, void *data)
b3b94faa 1505{
9b47c11d 1506 struct gfs2_sbd *sdp = cb_data;
b3b94faa 1507
b3b94faa
DT
1508 switch (type) {
1509 case LM_CB_NEED_E:
e7f5c01c 1510 blocking_cb(sdp, data, LM_ST_UNLOCKED);
b3b94faa
DT
1511 return;
1512
1513 case LM_CB_NEED_D:
e7f5c01c 1514 blocking_cb(sdp, data, LM_ST_DEFERRED);
b3b94faa
DT
1515 return;
1516
1517 case LM_CB_NEED_S:
e7f5c01c 1518 blocking_cb(sdp, data, LM_ST_SHARED);
b3b94faa
DT
1519 return;
1520
1521 case LM_CB_ASYNC: {
e7f5c01c 1522 struct lm_async_cb *async = data;
b3b94faa
DT
1523 struct gfs2_glock *gl;
1524
61be084e 1525 down_read(&gfs2_umount_flush_sem);
b3b94faa
DT
1526 gl = gfs2_glock_find(sdp, &async->lc_name);
1527 if (gfs2_assert_warn(sdp, gl))
1528 return;
1529 if (!gfs2_assert_warn(sdp, gl->gl_req_bh))
1530 gl->gl_req_bh(gl, async->lc_ret);
c4f68a13
BM
1531 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1532 gfs2_glock_put(gl);
61be084e 1533 up_read(&gfs2_umount_flush_sem);
b3b94faa
DT
1534 return;
1535 }
1536
1537 case LM_CB_NEED_RECOVERY:
1538 gfs2_jdesc_make_dirty(sdp, *(unsigned int *)data);
1539 if (sdp->sd_recoverd_process)
1540 wake_up_process(sdp->sd_recoverd_process);
1541 return;
1542
1543 case LM_CB_DROPLOCKS:
1544 gfs2_gl_hash_clear(sdp, NO_WAIT);
1545 gfs2_quota_scan(sdp);
1546 return;
1547
1548 default:
1549 gfs2_assert_warn(sdp, 0);
1550 return;
1551 }
1552}
1553
b3b94faa
DT
1554/**
1555 * demote_ok - Check to see if it's ok to unlock a glock
1556 * @gl: the glock
1557 *
1558 * Returns: 1 if it's ok
1559 */
1560
1561static int demote_ok(struct gfs2_glock *gl)
1562{
8fb4b536 1563 const struct gfs2_glock_operations *glops = gl->gl_ops;
b3b94faa
DT
1564 int demote = 1;
1565
1566 if (test_bit(GLF_STICKY, &gl->gl_flags))
1567 demote = 0;
b3b94faa
DT
1568 else if (glops->go_demote_ok)
1569 demote = glops->go_demote_ok(gl);
1570
1571 return demote;
1572}
1573
1574/**
1575 * gfs2_glock_schedule_for_reclaim - Add a glock to the reclaim list
1576 * @gl: the glock
1577 *
1578 */
1579
1580void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
1581{
1582 struct gfs2_sbd *sdp = gl->gl_sbd;
1583
1584 spin_lock(&sdp->sd_reclaim_lock);
1585 if (list_empty(&gl->gl_reclaim)) {
1586 gfs2_glock_hold(gl);
1587 list_add(&gl->gl_reclaim, &sdp->sd_reclaim_list);
1588 atomic_inc(&sdp->sd_reclaim_count);
1589 }
1590 spin_unlock(&sdp->sd_reclaim_lock);
1591
1592 wake_up(&sdp->sd_reclaim_wq);
1593}
1594
1595/**
1596 * gfs2_reclaim_glock - process the next glock on the filesystem's reclaim list
1597 * @sdp: the filesystem
1598 *
1599 * Called from gfs2_glockd() glock reclaim daemon, or when promoting a
1600 * different glock and we notice that there are a lot of glocks in the
1601 * reclaim list.
1602 *
1603 */
1604
1605void gfs2_reclaim_glock(struct gfs2_sbd *sdp)
1606{
1607 struct gfs2_glock *gl;
1608
1609 spin_lock(&sdp->sd_reclaim_lock);
1610 if (list_empty(&sdp->sd_reclaim_list)) {
1611 spin_unlock(&sdp->sd_reclaim_lock);
1612 return;
1613 }
1614 gl = list_entry(sdp->sd_reclaim_list.next,
1615 struct gfs2_glock, gl_reclaim);
1616 list_del_init(&gl->gl_reclaim);
1617 spin_unlock(&sdp->sd_reclaim_lock);
1618
1619 atomic_dec(&sdp->sd_reclaim_count);
1620 atomic_inc(&sdp->sd_reclaimed);
1621
1622 if (gfs2_glmutex_trylock(gl)) {
12132933 1623 if (list_empty(&gl->gl_holders) &&
50299965 1624 gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
c4f68a13 1625 handle_callback(gl, LM_ST_UNLOCKED, 0, 0);
b3b94faa
DT
1626 gfs2_glmutex_unlock(gl);
1627 }
1628
1629 gfs2_glock_put(gl);
1630}
1631
1632/**
1633 * examine_bucket - Call a function for glock in a hash bucket
1634 * @examiner: the function
1635 * @sdp: the filesystem
1636 * @bucket: the bucket
1637 *
1638 * Returns: 1 if the bucket has entries
1639 */
1640
1641static int examine_bucket(glock_examiner examiner, struct gfs2_sbd *sdp,
37b2fa6a 1642 unsigned int hash)
b3b94faa 1643{
24264434
SW
1644 struct gfs2_glock *gl, *prev = NULL;
1645 int has_entries = 0;
b6397893 1646 struct hlist_head *head = &gl_hash_table[hash].hb_list;
b3b94faa 1647
24264434 1648 read_lock(gl_lock_addr(hash));
b6397893
SW
1649 /* Can't use hlist_for_each_entry - don't want prefetch here */
1650 if (hlist_empty(head))
24264434 1651 goto out;
b6397893
SW
1652 gl = list_entry(head->first, struct gfs2_glock, gl_list);
1653 while(1) {
8fbbfd21 1654 if (!sdp || gl->gl_sbd == sdp) {
b3b94faa 1655 gfs2_glock_hold(gl);
24264434
SW
1656 read_unlock(gl_lock_addr(hash));
1657 if (prev)
1658 gfs2_glock_put(prev);
1659 prev = gl;
1660 examiner(gl);
a8336344 1661 has_entries = 1;
24264434 1662 read_lock(gl_lock_addr(hash));
b3b94faa 1663 }
b6397893
SW
1664 if (gl->gl_list.next == NULL)
1665 break;
24264434 1666 gl = list_entry(gl->gl_list.next, struct gfs2_glock, gl_list);
b3b94faa 1667 }
24264434
SW
1668out:
1669 read_unlock(gl_lock_addr(hash));
1670 if (prev)
1671 gfs2_glock_put(prev);
8fbbfd21 1672 cond_resched();
24264434 1673 return has_entries;
b3b94faa
DT
1674}
1675
1676/**
1677 * scan_glock - look at a glock and see if we can reclaim it
1678 * @gl: the glock to look at
1679 *
1680 */
1681
1682static void scan_glock(struct gfs2_glock *gl)
1683{
b004157a 1684 if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object)
24264434 1685 return;
a2242db0 1686
b3b94faa 1687 if (gfs2_glmutex_trylock(gl)) {
12132933 1688 if (list_empty(&gl->gl_holders) &&
24264434 1689 gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
b3b94faa 1690 goto out_schedule;
b3b94faa
DT
1691 gfs2_glmutex_unlock(gl);
1692 }
b3b94faa
DT
1693 return;
1694
627add2d 1695out_schedule:
b3b94faa
DT
1696 gfs2_glmutex_unlock(gl);
1697 gfs2_glock_schedule_for_reclaim(gl);
b3b94faa
DT
1698}
1699
b3b94faa
DT
1700/**
1701 * clear_glock - look at a glock and see if we can free it from glock cache
1702 * @gl: the glock to look at
1703 *
1704 */
1705
1706static void clear_glock(struct gfs2_glock *gl)
1707{
1708 struct gfs2_sbd *sdp = gl->gl_sbd;
1709 int released;
1710
1711 spin_lock(&sdp->sd_reclaim_lock);
1712 if (!list_empty(&gl->gl_reclaim)) {
1713 list_del_init(&gl->gl_reclaim);
1714 atomic_dec(&sdp->sd_reclaim_count);
190562bd 1715 spin_unlock(&sdp->sd_reclaim_lock);
b3b94faa
DT
1716 released = gfs2_glock_put(gl);
1717 gfs2_assert(sdp, !released);
190562bd
SW
1718 } else {
1719 spin_unlock(&sdp->sd_reclaim_lock);
b3b94faa 1720 }
b3b94faa
DT
1721
1722 if (gfs2_glmutex_trylock(gl)) {
90101c31 1723 if (list_empty(&gl->gl_holders) &&
b3b94faa 1724 gl->gl_state != LM_ST_UNLOCKED)
c4f68a13 1725 handle_callback(gl, LM_ST_UNLOCKED, 0, 0);
b3b94faa
DT
1726 gfs2_glmutex_unlock(gl);
1727 }
b3b94faa
DT
1728}
1729
1730/**
1731 * gfs2_gl_hash_clear - Empty out the glock hash table
1732 * @sdp: the filesystem
1733 * @wait: wait until it's all gone
1734 *
1735 * Called when unmounting the filesystem, or when inter-node lock manager
1736 * requests DROPLOCKS because it is running out of capacity.
1737 */
1738
1739void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait)
1740{
1741 unsigned long t;
1742 unsigned int x;
1743 int cont;
1744
1745 t = jiffies;
1746
1747 for (;;) {
1748 cont = 0;
24264434 1749 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
907b9bce 1750 if (examine_bucket(clear_glock, sdp, x))
b3b94faa 1751 cont = 1;
24264434 1752 }
b3b94faa
DT
1753
1754 if (!wait || !cont)
1755 break;
1756
1757 if (time_after_eq(jiffies,
1758 t + gfs2_tune_get(sdp, gt_stall_secs) * HZ)) {
1759 fs_warn(sdp, "Unmount seems to be stalled. "
1760 "Dumping lock state...\n");
1761 gfs2_dump_lockstate(sdp);
1762 t = jiffies;
1763 }
1764
61be084e 1765 down_write(&gfs2_umount_flush_sem);
b3b94faa 1766 invalidate_inodes(sdp->sd_vfs);
61be084e 1767 up_write(&gfs2_umount_flush_sem);
fd88de56 1768 msleep(10);
b3b94faa
DT
1769 }
1770}
1771
1772/*
1773 * Diagnostic routines to help debug distributed deadlock
1774 */
1775
04b933f2
RP
1776static void gfs2_print_symbol(struct glock_iter *gi, const char *fmt,
1777 unsigned long address)
1778{
7a0079d9 1779 char buffer[KSYM_SYMBOL_LEN];
04b933f2 1780
7a0079d9
RP
1781 sprint_symbol(buffer, address);
1782 print_dbg(gi, fmt, buffer);
04b933f2
RP
1783}
1784
b3b94faa
DT
1785/**
1786 * dump_holder - print information about a glock holder
1787 * @str: a string naming the type of holder
1788 * @gh: the glock holder
1789 *
1790 * Returns: 0 on success, -ENOBUFS when we run out of space
1791 */
1792
7c52b166
RP
1793static int dump_holder(struct glock_iter *gi, char *str,
1794 struct gfs2_holder *gh)
b3b94faa
DT
1795{
1796 unsigned int x;
04b933f2 1797 struct task_struct *gh_owner;
b3b94faa 1798
7c52b166 1799 print_dbg(gi, " %s\n", str);
04b933f2
RP
1800 if (gh->gh_owner_pid) {
1801 print_dbg(gi, " owner = %ld ", (long)gh->gh_owner_pid);
1802 gh_owner = find_task_by_pid(gh->gh_owner_pid);
1803 if (gh_owner)
1804 print_dbg(gi, "(%s)\n", gh_owner->comm);
1805 else
1806 print_dbg(gi, "(ended)\n");
1807 } else
1808 print_dbg(gi, " owner = -1\n");
7c52b166
RP
1809 print_dbg(gi, " gh_state = %u\n", gh->gh_state);
1810 print_dbg(gi, " gh_flags =");
b3b94faa
DT
1811 for (x = 0; x < 32; x++)
1812 if (gh->gh_flags & (1 << x))
7c52b166
RP
1813 print_dbg(gi, " %u", x);
1814 print_dbg(gi, " \n");
1815 print_dbg(gi, " error = %d\n", gh->gh_error);
1816 print_dbg(gi, " gh_iflags =");
b3b94faa
DT
1817 for (x = 0; x < 32; x++)
1818 if (test_bit(x, &gh->gh_iflags))
7c52b166
RP
1819 print_dbg(gi, " %u", x);
1820 print_dbg(gi, " \n");
04b933f2 1821 gfs2_print_symbol(gi, " initialized at: %s\n", gh->gh_ip);
b3b94faa 1822
7c52b166 1823 return 0;
b3b94faa
DT
1824}
1825
1826/**
1827 * dump_inode - print information about an inode
1828 * @ip: the inode
1829 *
1830 * Returns: 0 on success, -ENOBUFS when we run out of space
1831 */
1832
7c52b166 1833static int dump_inode(struct glock_iter *gi, struct gfs2_inode *ip)
b3b94faa
DT
1834{
1835 unsigned int x;
b3b94faa 1836
7c52b166
RP
1837 print_dbg(gi, " Inode:\n");
1838 print_dbg(gi, " num = %llu/%llu\n",
dbb7cae2
SW
1839 (unsigned long long)ip->i_no_formal_ino,
1840 (unsigned long long)ip->i_no_addr);
7c52b166
RP
1841 print_dbg(gi, " type = %u\n", IF2DT(ip->i_inode.i_mode));
1842 print_dbg(gi, " i_flags =");
b3b94faa
DT
1843 for (x = 0; x < 32; x++)
1844 if (test_bit(x, &ip->i_flags))
7c52b166
RP
1845 print_dbg(gi, " %u", x);
1846 print_dbg(gi, " \n");
1847 return 0;
b3b94faa
DT
1848}
1849
1850/**
1851 * dump_glock - print information about a glock
1852 * @gl: the glock
1853 * @count: where we are in the buffer
1854 *
1855 * Returns: 0 on success, -ENOBUFS when we run out of space
1856 */
1857
7c52b166 1858static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl)
b3b94faa
DT
1859{
1860 struct gfs2_holder *gh;
1861 unsigned int x;
1862 int error = -ENOBUFS;
04b933f2 1863 struct task_struct *gl_owner;
b3b94faa
DT
1864
1865 spin_lock(&gl->gl_spin);
1866
a947e033 1867 print_dbg(gi, "Glock 0x%p (%u, 0x%llx)\n", gl, gl->gl_name.ln_type,
7c52b166
RP
1868 (unsigned long long)gl->gl_name.ln_number);
1869 print_dbg(gi, " gl_flags =");
85d1da67 1870 for (x = 0; x < 32; x++) {
b3b94faa 1871 if (test_bit(x, &gl->gl_flags))
7c52b166
RP
1872 print_dbg(gi, " %u", x);
1873 }
04b933f2
RP
1874 if (!test_bit(GLF_LOCK, &gl->gl_flags))
1875 print_dbg(gi, " (unlocked)");
7c52b166
RP
1876 print_dbg(gi, " \n");
1877 print_dbg(gi, " gl_ref = %d\n", atomic_read(&gl->gl_ref));
1878 print_dbg(gi, " gl_state = %u\n", gl->gl_state);
04b933f2
RP
1879 if (gl->gl_owner_pid) {
1880 gl_owner = find_task_by_pid(gl->gl_owner_pid);
1881 if (gl_owner)
1882 print_dbg(gi, " gl_owner = pid %d (%s)\n",
1883 gl->gl_owner_pid, gl_owner->comm);
1884 else
1885 print_dbg(gi, " gl_owner = %d (ended)\n",
1886 gl->gl_owner_pid);
1887 } else
1888 print_dbg(gi, " gl_owner = -1\n");
7c52b166
RP
1889 print_dbg(gi, " gl_ip = %lu\n", gl->gl_ip);
1890 print_dbg(gi, " req_gh = %s\n", (gl->gl_req_gh) ? "yes" : "no");
1891 print_dbg(gi, " req_bh = %s\n", (gl->gl_req_bh) ? "yes" : "no");
1892 print_dbg(gi, " lvb_count = %d\n", atomic_read(&gl->gl_lvb_count));
1893 print_dbg(gi, " object = %s\n", (gl->gl_object) ? "yes" : "no");
7c52b166
RP
1894 print_dbg(gi, " reclaim = %s\n",
1895 (list_empty(&gl->gl_reclaim)) ? "no" : "yes");
b3b94faa 1896 if (gl->gl_aspace)
7c52b166
RP
1897 print_dbg(gi, " aspace = 0x%p nrpages = %lu\n", gl->gl_aspace,
1898 gl->gl_aspace->i_mapping->nrpages);
b3b94faa 1899 else
7c52b166
RP
1900 print_dbg(gi, " aspace = no\n");
1901 print_dbg(gi, " ail = %d\n", atomic_read(&gl->gl_ail_count));
b3b94faa 1902 if (gl->gl_req_gh) {
7c52b166 1903 error = dump_holder(gi, "Request", gl->gl_req_gh);
b3b94faa
DT
1904 if (error)
1905 goto out;
1906 }
1907 list_for_each_entry(gh, &gl->gl_holders, gh_list) {
7c52b166 1908 error = dump_holder(gi, "Holder", gh);
b3b94faa
DT
1909 if (error)
1910 goto out;
1911 }
1912 list_for_each_entry(gh, &gl->gl_waiters1, gh_list) {
7c52b166 1913 error = dump_holder(gi, "Waiter1", gh);
b3b94faa
DT
1914 if (error)
1915 goto out;
1916 }
b3b94faa 1917 list_for_each_entry(gh, &gl->gl_waiters3, gh_list) {
7c52b166 1918 error = dump_holder(gi, "Waiter3", gh);
b3b94faa
DT
1919 if (error)
1920 goto out;
1921 }
3b8249f6
SW
1922 if (test_bit(GLF_DEMOTE, &gl->gl_flags)) {
1923 print_dbg(gi, " Demotion req to state %u (%llu uS ago)\n",
cd81a4ba
RP
1924 gl->gl_demote_state, (unsigned long long)
1925 (jiffies - gl->gl_demote_time)*(1000000/HZ));
3b8249f6 1926 }
5c676f6d 1927 if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) {
b3b94faa 1928 if (!test_bit(GLF_LOCK, &gl->gl_flags) &&
7c52b166
RP
1929 list_empty(&gl->gl_holders)) {
1930 error = dump_inode(gi, gl->gl_object);
b3b94faa
DT
1931 if (error)
1932 goto out;
1933 } else {
1934 error = -ENOBUFS;
7c52b166 1935 print_dbg(gi, " Inode: busy\n");
b3b94faa
DT
1936 }
1937 }
1938
1939 error = 0;
1940
a91ea69f 1941out:
b3b94faa 1942 spin_unlock(&gl->gl_spin);
b3b94faa
DT
1943 return error;
1944}
1945
1946/**
1947 * gfs2_dump_lockstate - print out the current lockstate
1948 * @sdp: the filesystem
1949 * @ub: the buffer to copy the information into
1950 *
1951 * If @ub is NULL, dump the lockstate to the console.
1952 *
1953 */
1954
08bc2dbc 1955static int gfs2_dump_lockstate(struct gfs2_sbd *sdp)
b3b94faa 1956{
b3b94faa 1957 struct gfs2_glock *gl;
b6397893 1958 struct hlist_node *h;
b3b94faa
DT
1959 unsigned int x;
1960 int error = 0;
1961
1962 for (x = 0; x < GFS2_GL_HASH_SIZE; x++) {
b3b94faa 1963
087efdd3 1964 read_lock(gl_lock_addr(x));
b3b94faa 1965
b6397893 1966 hlist_for_each_entry(gl, h, &gl_hash_table[x].hb_list, gl_list) {
85d1da67
SW
1967 if (gl->gl_sbd != sdp)
1968 continue;
b3b94faa 1969
7c52b166 1970 error = dump_glock(NULL, gl);
b3b94faa
DT
1971 if (error)
1972 break;
1973 }
1974
087efdd3 1975 read_unlock(gl_lock_addr(x));
b3b94faa
DT
1976
1977 if (error)
1978 break;
1979 }
1980
1981
1982 return error;
1983}
1984
8fbbfd21
SW
1985/**
1986 * gfs2_scand - Look for cached glocks and inodes to toss from memory
1987 * @sdp: Pointer to GFS2 superblock
1988 *
1989 * One of these daemons runs, finding candidates to add to sd_reclaim_list.
1990 * See gfs2_glockd()
1991 */
1992
1993static int gfs2_scand(void *data)
1994{
1995 unsigned x;
1996 unsigned delay;
1997
1998 while (!kthread_should_stop()) {
1999 for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
2000 examine_bucket(scan_glock, NULL, x);
2001 if (freezing(current))
2002 refrigerator();
2003 delay = scand_secs;
2004 if (delay < 1)
2005 delay = 1;
2006 schedule_timeout_interruptible(delay * HZ);
2007 }
2008
2009 return 0;
2010}
2011
2012
2013
85d1da67
SW
2014int __init gfs2_glock_init(void)
2015{
2016 unsigned i;
2017 for(i = 0; i < GFS2_GL_HASH_SIZE; i++) {
b6397893 2018 INIT_HLIST_HEAD(&gl_hash_table[i].hb_list);
85d1da67 2019 }
087efdd3
SW
2020#ifdef GL_HASH_LOCK_SZ
2021 for(i = 0; i < GL_HASH_LOCK_SZ; i++) {
2022 rwlock_init(&gl_hash_locks[i]);
2023 }
2024#endif
8fbbfd21
SW
2025
2026 scand_process = kthread_run(gfs2_scand, NULL, "gfs2_scand");
2027 if (IS_ERR(scand_process))
2028 return PTR_ERR(scand_process);
2029
c4f68a13
BM
2030 glock_workqueue = create_workqueue("glock_workqueue");
2031 if (IS_ERR(glock_workqueue)) {
2032 kthread_stop(scand_process);
2033 return PTR_ERR(glock_workqueue);
2034 }
2035
85d1da67
SW
2036 return 0;
2037}
2038
8fbbfd21
SW
2039void gfs2_glock_exit(void)
2040{
c4f68a13 2041 destroy_workqueue(glock_workqueue);
8fbbfd21
SW
2042 kthread_stop(scand_process);
2043}
2044
2045module_param(scand_secs, uint, S_IRUGO|S_IWUSR);
2046MODULE_PARM_DESC(scand_secs, "The number of seconds between scand runs");
2047
7c52b166
RP
2048static int gfs2_glock_iter_next(struct glock_iter *gi)
2049{
7b08fc62
SW
2050 struct gfs2_glock *gl;
2051
a947e033 2052restart:
7a0079d9 2053 read_lock(gl_lock_addr(gi->hash));
7b08fc62
SW
2054 gl = gi->gl;
2055 if (gl) {
a947e033
AD
2056 gi->gl = hlist_entry(gl->gl_list.next,
2057 struct gfs2_glock, gl_list);
7c52b166 2058 if (gi->gl)
7b08fc62 2059 gfs2_glock_hold(gi->gl);
7c52b166 2060 }
7a0079d9 2061 read_unlock(gl_lock_addr(gi->hash));
7b08fc62
SW
2062 if (gl)
2063 gfs2_glock_put(gl);
a947e033 2064 if (gl && gi->gl == NULL)
7b08fc62 2065 gi->hash++;
a947e033 2066 while(gi->gl == NULL) {
7b08fc62
SW
2067 if (gi->hash >= GFS2_GL_HASH_SIZE)
2068 return 1;
2069 read_lock(gl_lock_addr(gi->hash));
2070 gi->gl = hlist_entry(gl_hash_table[gi->hash].hb_list.first,
2071 struct gfs2_glock, gl_list);
2072 if (gi->gl)
2073 gfs2_glock_hold(gi->gl);
2074 read_unlock(gl_lock_addr(gi->hash));
a947e033 2075 gi->hash++;
7b08fc62 2076 }
a947e033
AD
2077
2078 if (gi->sdp != gi->gl->gl_sbd)
2079 goto restart;
2080
7c52b166
RP
2081 return 0;
2082}
2083
2084static void gfs2_glock_iter_free(struct glock_iter *gi)
2085{
7b08fc62
SW
2086 if (gi->gl)
2087 gfs2_glock_put(gi->gl);
7c52b166
RP
2088 kfree(gi);
2089}
2090
2091static struct glock_iter *gfs2_glock_iter_init(struct gfs2_sbd *sdp)
2092{
2093 struct glock_iter *gi;
2094
2095 gi = kmalloc(sizeof (*gi), GFP_KERNEL);
2096 if (!gi)
2097 return NULL;
2098
2099 gi->sdp = sdp;
2100 gi->hash = 0;
7c52b166 2101 gi->seq = NULL;
a947e033 2102 gi->gl = NULL;
7c52b166
RP
2103 memset(gi->string, 0, sizeof(gi->string));
2104
a947e033 2105 if (gfs2_glock_iter_next(gi)) {
7c52b166
RP
2106 gfs2_glock_iter_free(gi);
2107 return NULL;
2108 }
2109
2110 return gi;
2111}
2112
2113static void *gfs2_glock_seq_start(struct seq_file *file, loff_t *pos)
2114{
2115 struct glock_iter *gi;
2116 loff_t n = *pos;
2117
2118 gi = gfs2_glock_iter_init(file->private);
2119 if (!gi)
2120 return NULL;
2121
7b08fc62 2122 while(n--) {
7c52b166
RP
2123 if (gfs2_glock_iter_next(gi)) {
2124 gfs2_glock_iter_free(gi);
2125 return NULL;
2126 }
2127 }
2128
2129 return gi;
2130}
2131
2132static void *gfs2_glock_seq_next(struct seq_file *file, void *iter_ptr,
2133 loff_t *pos)
2134{
2135 struct glock_iter *gi = iter_ptr;
2136
2137 (*pos)++;
2138
2139 if (gfs2_glock_iter_next(gi)) {
2140 gfs2_glock_iter_free(gi);
2141 return NULL;
2142 }
2143
2144 return gi;
2145}
2146
2147static void gfs2_glock_seq_stop(struct seq_file *file, void *iter_ptr)
2148{
7b08fc62
SW
2149 struct glock_iter *gi = iter_ptr;
2150 if (gi)
2151 gfs2_glock_iter_free(gi);
7c52b166
RP
2152}
2153
2154static int gfs2_glock_seq_show(struct seq_file *file, void *iter_ptr)
2155{
2156 struct glock_iter *gi = iter_ptr;
2157
2158 gi->seq = file;
2159 dump_glock(gi, gi->gl);
2160
2161 return 0;
2162}
2163
4ef29002 2164static const struct seq_operations gfs2_glock_seq_ops = {
7c52b166
RP
2165 .start = gfs2_glock_seq_start,
2166 .next = gfs2_glock_seq_next,
2167 .stop = gfs2_glock_seq_stop,
2168 .show = gfs2_glock_seq_show,
2169};
2170
2171static int gfs2_debugfs_open(struct inode *inode, struct file *file)
2172{
2173 struct seq_file *seq;
2174 int ret;
2175
2176 ret = seq_open(file, &gfs2_glock_seq_ops);
2177 if (ret)
2178 return ret;
2179
2180 seq = file->private_data;
2181 seq->private = inode->i_private;
2182
2183 return 0;
2184}
2185
2186static const struct file_operations gfs2_debug_fops = {
2187 .owner = THIS_MODULE,
2188 .open = gfs2_debugfs_open,
2189 .read = seq_read,
2190 .llseek = seq_lseek,
2191 .release = seq_release
2192};
2193
2194int gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
2195{
5f882096
RP
2196 sdp->debugfs_dir = debugfs_create_dir(sdp->sd_table_name, gfs2_root);
2197 if (!sdp->debugfs_dir)
2198 return -ENOMEM;
2199 sdp->debugfs_dentry_glocks = debugfs_create_file("glocks",
2200 S_IFREG | S_IRUGO,
2201 sdp->debugfs_dir, sdp,
2202 &gfs2_debug_fops);
2203 if (!sdp->debugfs_dentry_glocks)
7c52b166
RP
2204 return -ENOMEM;
2205
2206 return 0;
2207}
2208
2209void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp)
2210{
5f882096
RP
2211 if (sdp && sdp->debugfs_dir) {
2212 if (sdp->debugfs_dentry_glocks) {
2213 debugfs_remove(sdp->debugfs_dentry_glocks);
2214 sdp->debugfs_dentry_glocks = NULL;
2215 }
2216 debugfs_remove(sdp->debugfs_dir);
2217 sdp->debugfs_dir = NULL;
2218 }
7c52b166
RP
2219}
2220
2221int gfs2_register_debugfs(void)
2222{
2223 gfs2_root = debugfs_create_dir("gfs2", NULL);
2224 return gfs2_root ? 0 : -ENOMEM;
2225}
2226
2227void gfs2_unregister_debugfs(void)
2228{
2229 debugfs_remove(gfs2_root);
5f882096 2230 gfs2_root = NULL;
7c52b166 2231}
This page took 0.451798 seconds and 5 git commands to generate.