Merge branch 'for-linus' of git://oss.sgi.com/xfs/xfs
[deliverable/linux.git] / fs / dlm / lockspace.c
CommitLineData
e7fd4179
DT
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
0f8e0d9a 5** Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
e7fd4179
DT
6**
7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions
9** of the GNU General Public License v.2.
10**
11*******************************************************************************
12******************************************************************************/
13
14#include "dlm_internal.h"
15#include "lockspace.h"
16#include "member.h"
17#include "recoverd.h"
18#include "ast.h"
19#include "dir.h"
20#include "lowcomms.h"
21#include "config.h"
22#include "memory.h"
23#include "lock.h"
c56b39cd 24#include "recover.h"
2896ee37 25#include "requestqueue.h"
0f8e0d9a 26#include "user.h"
e7fd4179 27
e7fd4179 28static int ls_count;
90135925 29static struct mutex ls_lock;
e7fd4179
DT
30static struct list_head lslist;
31static spinlock_t lslist_lock;
32static struct task_struct * scand_task;
33
34
35static ssize_t dlm_control_store(struct dlm_ls *ls, const char *buf, size_t len)
36{
37 ssize_t ret = len;
38 int n = simple_strtol(buf, NULL, 0);
39
e2de7f56
PC
40 ls = dlm_find_lockspace_local(ls->ls_local_handle);
41 if (!ls)
42 return -EINVAL;
43
e7fd4179
DT
44 switch (n) {
45 case 0:
46 dlm_ls_stop(ls);
47 break;
48 case 1:
49 dlm_ls_start(ls);
50 break;
51 default:
52 ret = -EINVAL;
53 }
e2de7f56 54 dlm_put_lockspace(ls);
e7fd4179
DT
55 return ret;
56}
57
58static ssize_t dlm_event_store(struct dlm_ls *ls, const char *buf, size_t len)
59{
60 ls->ls_uevent_result = simple_strtol(buf, NULL, 0);
61 set_bit(LSFL_UEVENT_WAIT, &ls->ls_flags);
62 wake_up(&ls->ls_uevent_wait);
63 return len;
64}
65
66static ssize_t dlm_id_show(struct dlm_ls *ls, char *buf)
67{
a1d144c7 68 return snprintf(buf, PAGE_SIZE, "%u\n", ls->ls_global_id);
e7fd4179
DT
69}
70
71static ssize_t dlm_id_store(struct dlm_ls *ls, const char *buf, size_t len)
72{
73 ls->ls_global_id = simple_strtoul(buf, NULL, 0);
74 return len;
75}
76
c56b39cd
DT
77static ssize_t dlm_recover_status_show(struct dlm_ls *ls, char *buf)
78{
79 uint32_t status = dlm_recover_status(ls);
a1d144c7 80 return snprintf(buf, PAGE_SIZE, "%x\n", status);
c56b39cd
DT
81}
82
faa0f267
DT
83static ssize_t dlm_recover_nodeid_show(struct dlm_ls *ls, char *buf)
84{
a1d144c7 85 return snprintf(buf, PAGE_SIZE, "%d\n", ls->ls_recover_nodeid);
faa0f267
DT
86}
87
e7fd4179
DT
88struct dlm_attr {
89 struct attribute attr;
90 ssize_t (*show)(struct dlm_ls *, char *);
91 ssize_t (*store)(struct dlm_ls *, const char *, size_t);
92};
93
94static struct dlm_attr dlm_attr_control = {
95 .attr = {.name = "control", .mode = S_IWUSR},
96 .store = dlm_control_store
97};
98
99static struct dlm_attr dlm_attr_event = {
100 .attr = {.name = "event_done", .mode = S_IWUSR},
101 .store = dlm_event_store
102};
103
104static struct dlm_attr dlm_attr_id = {
105 .attr = {.name = "id", .mode = S_IRUGO | S_IWUSR},
106 .show = dlm_id_show,
107 .store = dlm_id_store
108};
109
c56b39cd
DT
110static struct dlm_attr dlm_attr_recover_status = {
111 .attr = {.name = "recover_status", .mode = S_IRUGO},
112 .show = dlm_recover_status_show
113};
114
faa0f267
DT
115static struct dlm_attr dlm_attr_recover_nodeid = {
116 .attr = {.name = "recover_nodeid", .mode = S_IRUGO},
117 .show = dlm_recover_nodeid_show
118};
119
e7fd4179
DT
120static struct attribute *dlm_attrs[] = {
121 &dlm_attr_control.attr,
122 &dlm_attr_event.attr,
123 &dlm_attr_id.attr,
c56b39cd 124 &dlm_attr_recover_status.attr,
faa0f267 125 &dlm_attr_recover_nodeid.attr,
e7fd4179
DT
126 NULL,
127};
128
129static ssize_t dlm_attr_show(struct kobject *kobj, struct attribute *attr,
130 char *buf)
131{
132 struct dlm_ls *ls = container_of(kobj, struct dlm_ls, ls_kobj);
133 struct dlm_attr *a = container_of(attr, struct dlm_attr, attr);
134 return a->show ? a->show(ls, buf) : 0;
135}
136
137static ssize_t dlm_attr_store(struct kobject *kobj, struct attribute *attr,
138 const char *buf, size_t len)
139{
140 struct dlm_ls *ls = container_of(kobj, struct dlm_ls, ls_kobj);
141 struct dlm_attr *a = container_of(attr, struct dlm_attr, attr);
142 return a->store ? a->store(ls, buf, len) : len;
143}
144
ba542e3b
PC
145static void lockspace_kobj_release(struct kobject *k)
146{
147 struct dlm_ls *ls = container_of(k, struct dlm_ls, ls_kobj);
148 kfree(ls);
149}
150
e7fd4179
DT
151static struct sysfs_ops dlm_attr_ops = {
152 .show = dlm_attr_show,
153 .store = dlm_attr_store,
154};
155
156static struct kobj_type dlm_ktype = {
157 .default_attrs = dlm_attrs,
158 .sysfs_ops = &dlm_attr_ops,
ba542e3b 159 .release = lockspace_kobj_release,
e7fd4179
DT
160};
161
d405936b 162static struct kset *dlm_kset;
e7fd4179 163
e7fd4179
DT
164static int do_uevent(struct dlm_ls *ls, int in)
165{
166 int error;
167
168 if (in)
169 kobject_uevent(&ls->ls_kobj, KOBJ_ONLINE);
170 else
171 kobject_uevent(&ls->ls_kobj, KOBJ_OFFLINE);
172
8b0e7b2c
DT
173 log_debug(ls, "%s the lockspace group...", in ? "joining" : "leaving");
174
175 /* dlm_controld will see the uevent, do the necessary group management
176 and then write to sysfs to wake us */
177
e7fd4179
DT
178 error = wait_event_interruptible(ls->ls_uevent_wait,
179 test_and_clear_bit(LSFL_UEVENT_WAIT, &ls->ls_flags));
8b0e7b2c
DT
180
181 log_debug(ls, "group event done %d %d", error, ls->ls_uevent_result);
182
e7fd4179
DT
183 if (error)
184 goto out;
185
186 error = ls->ls_uevent_result;
187 out:
8b0e7b2c
DT
188 if (error)
189 log_error(ls, "group %s failed %d %d", in ? "join" : "leave",
190 error, ls->ls_uevent_result);
e7fd4179
DT
191 return error;
192}
193
194
30727174 195int __init dlm_lockspace_init(void)
e7fd4179 196{
e7fd4179 197 ls_count = 0;
90135925 198 mutex_init(&ls_lock);
e7fd4179
DT
199 INIT_LIST_HEAD(&lslist);
200 spin_lock_init(&lslist_lock);
201
0ff21e46 202 dlm_kset = kset_create_and_add("dlm", NULL, kernel_kobj);
d405936b 203 if (!dlm_kset) {
8e24eea7 204 printk(KERN_WARNING "%s: can not create kset\n", __func__);
d405936b
GKH
205 return -ENOMEM;
206 }
207 return 0;
e7fd4179
DT
208}
209
210void dlm_lockspace_exit(void)
211{
d405936b 212 kset_unregister(dlm_kset);
e7fd4179
DT
213}
214
c1dcf65f
DT
215static struct dlm_ls *find_ls_to_scan(void)
216{
217 struct dlm_ls *ls;
218
219 spin_lock(&lslist_lock);
220 list_for_each_entry(ls, &lslist, ls_list) {
221 if (time_after_eq(jiffies, ls->ls_scan_time +
222 dlm_config.ci_scan_secs * HZ)) {
223 spin_unlock(&lslist_lock);
224 return ls;
225 }
226 }
227 spin_unlock(&lslist_lock);
228 return NULL;
229}
230
e7fd4179
DT
231static int dlm_scand(void *data)
232{
233 struct dlm_ls *ls;
c1dcf65f 234 int timeout_jiffies = dlm_config.ci_scan_secs * HZ;
e7fd4179
DT
235
236 while (!kthread_should_stop()) {
c1dcf65f
DT
237 ls = find_ls_to_scan();
238 if (ls) {
85e86edf 239 if (dlm_lock_recovery_try(ls)) {
c1dcf65f 240 ls->ls_scan_time = jiffies;
85e86edf 241 dlm_scan_rsbs(ls);
3ae1acf9 242 dlm_scan_timeout(ls);
85e86edf 243 dlm_unlock_recovery(ls);
c1dcf65f
DT
244 } else {
245 ls->ls_scan_time += HZ;
85e86edf 246 }
c1dcf65f
DT
247 } else {
248 schedule_timeout_interruptible(timeout_jiffies);
85e86edf 249 }
e7fd4179
DT
250 }
251 return 0;
252}
253
254static int dlm_scand_start(void)
255{
256 struct task_struct *p;
257 int error = 0;
258
259 p = kthread_run(dlm_scand, NULL, "dlm_scand");
260 if (IS_ERR(p))
261 error = PTR_ERR(p);
262 else
263 scand_task = p;
264 return error;
265}
266
267static void dlm_scand_stop(void)
268{
269 kthread_stop(scand_task);
270}
271
e7fd4179
DT
272struct dlm_ls *dlm_find_lockspace_global(uint32_t id)
273{
274 struct dlm_ls *ls;
275
276 spin_lock(&lslist_lock);
277
278 list_for_each_entry(ls, &lslist, ls_list) {
279 if (ls->ls_global_id == id) {
280 ls->ls_count++;
281 goto out;
282 }
283 }
284 ls = NULL;
285 out:
286 spin_unlock(&lslist_lock);
287 return ls;
288}
289
597d0cae 290struct dlm_ls *dlm_find_lockspace_local(dlm_lockspace_t *lockspace)
e7fd4179 291{
597d0cae 292 struct dlm_ls *ls;
e7fd4179
DT
293
294 spin_lock(&lslist_lock);
597d0cae
DT
295 list_for_each_entry(ls, &lslist, ls_list) {
296 if (ls->ls_local_handle == lockspace) {
297 ls->ls_count++;
298 goto out;
299 }
300 }
301 ls = NULL;
302 out:
303 spin_unlock(&lslist_lock);
304 return ls;
305}
306
307struct dlm_ls *dlm_find_lockspace_device(int minor)
308{
309 struct dlm_ls *ls;
310
311 spin_lock(&lslist_lock);
312 list_for_each_entry(ls, &lslist, ls_list) {
313 if (ls->ls_device.minor == minor) {
314 ls->ls_count++;
315 goto out;
316 }
317 }
318 ls = NULL;
319 out:
e7fd4179
DT
320 spin_unlock(&lslist_lock);
321 return ls;
322}
323
324void dlm_put_lockspace(struct dlm_ls *ls)
325{
326 spin_lock(&lslist_lock);
327 ls->ls_count--;
328 spin_unlock(&lslist_lock);
329}
330
331static void remove_lockspace(struct dlm_ls *ls)
332{
333 for (;;) {
334 spin_lock(&lslist_lock);
335 if (ls->ls_count == 0) {
0f8e0d9a 336 WARN_ON(ls->ls_create_count != 0);
e7fd4179
DT
337 list_del(&ls->ls_list);
338 spin_unlock(&lslist_lock);
339 return;
340 }
341 spin_unlock(&lslist_lock);
342 ssleep(1);
343 }
344}
345
346static int threads_start(void)
347{
348 int error;
349
350 /* Thread which process lock requests for all lockspace's */
351 error = dlm_astd_start();
352 if (error) {
353 log_print("cannot start dlm_astd thread %d", error);
354 goto fail;
355 }
356
357 error = dlm_scand_start();
358 if (error) {
359 log_print("cannot start dlm_scand thread %d", error);
360 goto astd_fail;
361 }
362
363 /* Thread for sending/receiving messages for all lockspace's */
364 error = dlm_lowcomms_start();
365 if (error) {
366 log_print("cannot start dlm lowcomms %d", error);
367 goto scand_fail;
368 }
369
370 return 0;
371
372 scand_fail:
373 dlm_scand_stop();
374 astd_fail:
375 dlm_astd_stop();
376 fail:
377 return error;
378}
379
380static void threads_stop(void)
381{
382 dlm_scand_stop();
383 dlm_lowcomms_stop();
384 dlm_astd_stop();
385}
386
08ce4c91 387static int new_lockspace(const char *name, int namelen, void **lockspace,
e7fd4179
DT
388 uint32_t flags, int lvblen)
389{
390 struct dlm_ls *ls;
0f8e0d9a 391 int i, size, error;
79d72b54 392 int do_unreg = 0;
e7fd4179
DT
393
394 if (namelen > DLM_LOCKSPACE_LEN)
395 return -EINVAL;
396
397 if (!lvblen || (lvblen % 8))
398 return -EINVAL;
399
400 if (!try_module_get(THIS_MODULE))
401 return -EINVAL;
402
dc68c7ed
DT
403 if (!dlm_user_daemon_available()) {
404 module_put(THIS_MODULE);
405 return -EUNATCH;
406 }
407
0f8e0d9a
DT
408 error = 0;
409
410 spin_lock(&lslist_lock);
411 list_for_each_entry(ls, &lslist, ls_list) {
412 WARN_ON(ls->ls_create_count <= 0);
413 if (ls->ls_namelen != namelen)
414 continue;
415 if (memcmp(ls->ls_name, name, namelen))
416 continue;
417 if (flags & DLM_LSFL_NEWEXCL) {
418 error = -EEXIST;
419 break;
420 }
421 ls->ls_create_count++;
8511a272
DT
422 *lockspace = ls;
423 error = 1;
0f8e0d9a 424 break;
e7fd4179 425 }
0f8e0d9a
DT
426 spin_unlock(&lslist_lock);
427
0f8e0d9a 428 if (error)
8511a272 429 goto out;
0f8e0d9a
DT
430
431 error = -ENOMEM;
e7fd4179 432
573c24c4 433 ls = kzalloc(sizeof(struct dlm_ls) + namelen, GFP_NOFS);
e7fd4179
DT
434 if (!ls)
435 goto out;
e7fd4179
DT
436 memcpy(ls->ls_name, name, namelen);
437 ls->ls_namelen = namelen;
e7fd4179
DT
438 ls->ls_lvblen = lvblen;
439 ls->ls_count = 0;
440 ls->ls_flags = 0;
c1dcf65f 441 ls->ls_scan_time = jiffies;
e7fd4179 442
3ae1acf9
DT
443 if (flags & DLM_LSFL_TIMEWARN)
444 set_bit(LSFL_TIMEWARN, &ls->ls_flags);
3ae1acf9 445
fad59c13 446 /* ls_exflags are forced to match among nodes, and we don't
0f8e0d9a
DT
447 need to require all nodes to have some flags set */
448 ls->ls_exflags = (flags & ~(DLM_LSFL_TIMEWARN | DLM_LSFL_FS |
449 DLM_LSFL_NEWEXCL));
fad59c13 450
68c817a1 451 size = dlm_config.ci_rsbtbl_size;
e7fd4179
DT
452 ls->ls_rsbtbl_size = size;
453
573c24c4 454 ls->ls_rsbtbl = kmalloc(sizeof(struct dlm_rsbtable) * size, GFP_NOFS);
e7fd4179
DT
455 if (!ls->ls_rsbtbl)
456 goto out_lsfree;
457 for (i = 0; i < size; i++) {
458 INIT_LIST_HEAD(&ls->ls_rsbtbl[i].list);
459 INIT_LIST_HEAD(&ls->ls_rsbtbl[i].toss);
c7be761a 460 spin_lock_init(&ls->ls_rsbtbl[i].lock);
e7fd4179
DT
461 }
462
68c817a1 463 size = dlm_config.ci_lkbtbl_size;
e7fd4179
DT
464 ls->ls_lkbtbl_size = size;
465
573c24c4 466 ls->ls_lkbtbl = kmalloc(sizeof(struct dlm_lkbtable) * size, GFP_NOFS);
e7fd4179
DT
467 if (!ls->ls_lkbtbl)
468 goto out_rsbfree;
469 for (i = 0; i < size; i++) {
470 INIT_LIST_HEAD(&ls->ls_lkbtbl[i].list);
471 rwlock_init(&ls->ls_lkbtbl[i].lock);
472 ls->ls_lkbtbl[i].counter = 1;
473 }
474
68c817a1 475 size = dlm_config.ci_dirtbl_size;
e7fd4179
DT
476 ls->ls_dirtbl_size = size;
477
573c24c4 478 ls->ls_dirtbl = kmalloc(sizeof(struct dlm_dirtable) * size, GFP_NOFS);
e7fd4179
DT
479 if (!ls->ls_dirtbl)
480 goto out_lkbfree;
481 for (i = 0; i < size; i++) {
482 INIT_LIST_HEAD(&ls->ls_dirtbl[i].list);
305a47b1 483 spin_lock_init(&ls->ls_dirtbl[i].lock);
e7fd4179
DT
484 }
485
486 INIT_LIST_HEAD(&ls->ls_waiters);
90135925 487 mutex_init(&ls->ls_waiters_mutex);
ef0c2bb0
DT
488 INIT_LIST_HEAD(&ls->ls_orphans);
489 mutex_init(&ls->ls_orphans_mutex);
3ae1acf9
DT
490 INIT_LIST_HEAD(&ls->ls_timeout);
491 mutex_init(&ls->ls_timeout_mutex);
e7fd4179
DT
492
493 INIT_LIST_HEAD(&ls->ls_nodes);
494 INIT_LIST_HEAD(&ls->ls_nodes_gone);
495 ls->ls_num_nodes = 0;
496 ls->ls_low_nodeid = 0;
497 ls->ls_total_weight = 0;
498 ls->ls_node_array = NULL;
499
500 memset(&ls->ls_stub_rsb, 0, sizeof(struct dlm_rsb));
501 ls->ls_stub_rsb.res_ls = ls;
502
5de6319b
DT
503 ls->ls_debug_rsb_dentry = NULL;
504 ls->ls_debug_waiters_dentry = NULL;
e7fd4179
DT
505
506 init_waitqueue_head(&ls->ls_uevent_wait);
507 ls->ls_uevent_result = 0;
8b0e7b2c
DT
508 init_completion(&ls->ls_members_done);
509 ls->ls_members_result = -1;
e7fd4179
DT
510
511 ls->ls_recoverd_task = NULL;
90135925 512 mutex_init(&ls->ls_recoverd_active);
e7fd4179 513 spin_lock_init(&ls->ls_recover_lock);
98f176fb
DT
514 spin_lock_init(&ls->ls_rcom_spin);
515 get_random_bytes(&ls->ls_rcom_seq, sizeof(uint64_t));
e7fd4179
DT
516 ls->ls_recover_status = 0;
517 ls->ls_recover_seq = 0;
518 ls->ls_recover_args = NULL;
519 init_rwsem(&ls->ls_in_recovery);
c36258b5 520 init_rwsem(&ls->ls_recv_active);
e7fd4179 521 INIT_LIST_HEAD(&ls->ls_requestqueue);
90135925 522 mutex_init(&ls->ls_requestqueue_mutex);
597d0cae 523 mutex_init(&ls->ls_clear_proc_locks);
e7fd4179 524
573c24c4 525 ls->ls_recover_buf = kmalloc(dlm_config.ci_buffer_size, GFP_NOFS);
e7fd4179
DT
526 if (!ls->ls_recover_buf)
527 goto out_dirfree;
528
529 INIT_LIST_HEAD(&ls->ls_recover_list);
530 spin_lock_init(&ls->ls_recover_list_lock);
531 ls->ls_recover_list_count = 0;
597d0cae 532 ls->ls_local_handle = ls;
e7fd4179
DT
533 init_waitqueue_head(&ls->ls_wait_general);
534 INIT_LIST_HEAD(&ls->ls_root_list);
535 init_rwsem(&ls->ls_root_sem);
536
537 down_write(&ls->ls_in_recovery);
538
5f88f1ea 539 spin_lock(&lslist_lock);
0f8e0d9a 540 ls->ls_create_count = 1;
5f88f1ea
DT
541 list_add(&ls->ls_list, &lslist);
542 spin_unlock(&lslist_lock);
543
544 /* needs to find ls in lslist */
e7fd4179
DT
545 error = dlm_recoverd_start(ls);
546 if (error) {
547 log_error(ls, "can't start dlm_recoverd %d", error);
79d72b54 548 goto out_delist;
e7fd4179
DT
549 }
550
901195ed
GKH
551 ls->ls_kobj.kset = dlm_kset;
552 error = kobject_init_and_add(&ls->ls_kobj, &dlm_ktype, NULL,
553 "%s", ls->ls_name);
e7fd4179 554 if (error)
79d72b54 555 goto out_stop;
901195ed 556 kobject_uevent(&ls->ls_kobj, KOBJ_ADD);
79d72b54
DT
557
558 /* let kobject handle freeing of ls if there's an error */
559 do_unreg = 1;
e7fd4179 560
8b0e7b2c
DT
561 /* This uevent triggers dlm_controld in userspace to add us to the
562 group of nodes that are members of this lockspace (managed by the
563 cluster infrastructure.) Once it's done that, it tells us who the
564 current lockspace members are (via configfs) and then tells the
565 lockspace to start running (via sysfs) in dlm_ls_start(). */
566
e7fd4179
DT
567 error = do_uevent(ls, 1);
568 if (error)
79d72b54
DT
569 goto out_stop;
570
8b0e7b2c
DT
571 wait_for_completion(&ls->ls_members_done);
572 error = ls->ls_members_result;
573 if (error)
574 goto out_members;
575
79d72b54
DT
576 dlm_create_debug_file(ls);
577
578 log_debug(ls, "join complete");
e7fd4179
DT
579 *lockspace = ls;
580 return 0;
581
8b0e7b2c
DT
582 out_members:
583 do_uevent(ls, 0);
584 dlm_clear_members(ls);
585 kfree(ls->ls_node_array);
79d72b54 586 out_stop:
5f88f1ea 587 dlm_recoverd_stop(ls);
79d72b54 588 out_delist:
e7fd4179
DT
589 spin_lock(&lslist_lock);
590 list_del(&ls->ls_list);
591 spin_unlock(&lslist_lock);
e7fd4179
DT
592 kfree(ls->ls_recover_buf);
593 out_dirfree:
594 kfree(ls->ls_dirtbl);
595 out_lkbfree:
596 kfree(ls->ls_lkbtbl);
597 out_rsbfree:
598 kfree(ls->ls_rsbtbl);
599 out_lsfree:
79d72b54 600 if (do_unreg)
197b12d6 601 kobject_put(&ls->ls_kobj);
79d72b54
DT
602 else
603 kfree(ls);
e7fd4179
DT
604 out:
605 module_put(THIS_MODULE);
606 return error;
607}
608
08ce4c91 609int dlm_new_lockspace(const char *name, int namelen, void **lockspace,
e7fd4179
DT
610 uint32_t flags, int lvblen)
611{
612 int error = 0;
613
90135925 614 mutex_lock(&ls_lock);
e7fd4179
DT
615 if (!ls_count)
616 error = threads_start();
617 if (error)
618 goto out;
619
620 error = new_lockspace(name, namelen, lockspace, flags, lvblen);
621 if (!error)
622 ls_count++;
8511a272
DT
623 if (error > 0)
624 error = 0;
625 if (!ls_count)
8b0e7b2c 626 threads_stop();
e7fd4179 627 out:
90135925 628 mutex_unlock(&ls_lock);
e7fd4179
DT
629 return error;
630}
631
632/* Return 1 if the lockspace still has active remote locks,
633 * 2 if the lockspace still has active local locks.
634 */
635static int lockspace_busy(struct dlm_ls *ls)
636{
637 int i, lkb_found = 0;
638 struct dlm_lkb *lkb;
639
640 /* NOTE: We check the lockidtbl here rather than the resource table.
641 This is because there may be LKBs queued as ASTs that have been
642 unlinked from their RSBs and are pending deletion once the AST has
643 been delivered */
644
645 for (i = 0; i < ls->ls_lkbtbl_size; i++) {
646 read_lock(&ls->ls_lkbtbl[i].lock);
647 if (!list_empty(&ls->ls_lkbtbl[i].list)) {
648 lkb_found = 1;
649 list_for_each_entry(lkb, &ls->ls_lkbtbl[i].list,
650 lkb_idtbl_list) {
651 if (!lkb->lkb_nodeid) {
652 read_unlock(&ls->ls_lkbtbl[i].lock);
653 return 2;
654 }
655 }
656 }
657 read_unlock(&ls->ls_lkbtbl[i].lock);
658 }
659 return lkb_found;
660}
661
662static int release_lockspace(struct dlm_ls *ls, int force)
663{
664 struct dlm_lkb *lkb;
665 struct dlm_rsb *rsb;
666 struct list_head *head;
0f8e0d9a
DT
667 int i, busy, rv;
668
669 busy = lockspace_busy(ls);
670
671 spin_lock(&lslist_lock);
672 if (ls->ls_create_count == 1) {
673 if (busy > force)
674 rv = -EBUSY;
675 else {
676 /* remove_lockspace takes ls off lslist */
677 ls->ls_create_count = 0;
678 rv = 0;
679 }
680 } else if (ls->ls_create_count > 1) {
681 rv = --ls->ls_create_count;
682 } else {
683 rv = -EINVAL;
684 }
685 spin_unlock(&lslist_lock);
686
687 if (rv) {
688 log_debug(ls, "release_lockspace no remove %d", rv);
689 return rv;
690 }
e7fd4179 691
0f8e0d9a 692 dlm_device_deregister(ls);
e7fd4179 693
dc68c7ed 694 if (force < 3 && dlm_user_daemon_available())
e7fd4179
DT
695 do_uevent(ls, 0);
696
697 dlm_recoverd_stop(ls);
698
699 remove_lockspace(ls);
700
701 dlm_delete_debug_file(ls);
702
703 dlm_astd_suspend();
704
705 kfree(ls->ls_recover_buf);
706
707 /*
708 * Free direntry structs.
709 */
710
711 dlm_dir_clear(ls);
712 kfree(ls->ls_dirtbl);
713
714 /*
715 * Free all lkb's on lkbtbl[] lists.
716 */
717
718 for (i = 0; i < ls->ls_lkbtbl_size; i++) {
719 head = &ls->ls_lkbtbl[i].list;
720 while (!list_empty(head)) {
721 lkb = list_entry(head->next, struct dlm_lkb,
722 lkb_idtbl_list);
723
724 list_del(&lkb->lkb_idtbl_list);
725
726 dlm_del_ast(lkb);
727
728 if (lkb->lkb_lvbptr && lkb->lkb_flags & DLM_IFL_MSTCPY)
52bda2b5 729 dlm_free_lvb(lkb->lkb_lvbptr);
e7fd4179 730
52bda2b5 731 dlm_free_lkb(lkb);
e7fd4179
DT
732 }
733 }
734 dlm_astd_resume();
735
736 kfree(ls->ls_lkbtbl);
737
738 /*
739 * Free all rsb's on rsbtbl[] lists
740 */
741
742 for (i = 0; i < ls->ls_rsbtbl_size; i++) {
743 head = &ls->ls_rsbtbl[i].list;
744 while (!list_empty(head)) {
745 rsb = list_entry(head->next, struct dlm_rsb,
746 res_hashchain);
747
748 list_del(&rsb->res_hashchain);
52bda2b5 749 dlm_free_rsb(rsb);
e7fd4179
DT
750 }
751
752 head = &ls->ls_rsbtbl[i].toss;
753 while (!list_empty(head)) {
754 rsb = list_entry(head->next, struct dlm_rsb,
755 res_hashchain);
756 list_del(&rsb->res_hashchain);
52bda2b5 757 dlm_free_rsb(rsb);
e7fd4179
DT
758 }
759 }
760
761 kfree(ls->ls_rsbtbl);
762
763 /*
764 * Free structures on any other lists
765 */
766
2896ee37 767 dlm_purge_requestqueue(ls);
e7fd4179
DT
768 kfree(ls->ls_recover_args);
769 dlm_clear_free_entries(ls);
770 dlm_clear_members(ls);
771 dlm_clear_members_gone(ls);
772 kfree(ls->ls_node_array);
0f8e0d9a 773 log_debug(ls, "release_lockspace final free");
197b12d6 774 kobject_put(&ls->ls_kobj);
79d72b54 775 /* The ls structure will be freed when the kobject is done with */
e7fd4179 776
e7fd4179
DT
777 module_put(THIS_MODULE);
778 return 0;
779}
780
781/*
782 * Called when a system has released all its locks and is not going to use the
783 * lockspace any longer. We free everything we're managing for this lockspace.
784 * Remaining nodes will go through the recovery process as if we'd died. The
785 * lockspace must continue to function as usual, participating in recoveries,
786 * until this returns.
787 *
788 * Force has 4 possible values:
789 * 0 - don't destroy locksapce if it has any LKBs
790 * 1 - destroy lockspace if it has remote LKBs but not if it has local LKBs
791 * 2 - destroy lockspace regardless of LKBs
792 * 3 - destroy lockspace as part of a forced shutdown
793 */
794
795int dlm_release_lockspace(void *lockspace, int force)
796{
797 struct dlm_ls *ls;
0f8e0d9a 798 int error;
e7fd4179
DT
799
800 ls = dlm_find_lockspace_local(lockspace);
801 if (!ls)
802 return -EINVAL;
803 dlm_put_lockspace(ls);
0f8e0d9a
DT
804
805 mutex_lock(&ls_lock);
806 error = release_lockspace(ls, force);
807 if (!error)
808 ls_count--;
278afcbf 809 if (!ls_count)
0f8e0d9a
DT
810 threads_stop();
811 mutex_unlock(&ls_lock);
812
813 return error;
e7fd4179
DT
814}
815
dc68c7ed
DT
816void dlm_stop_lockspaces(void)
817{
818 struct dlm_ls *ls;
819
820 restart:
821 spin_lock(&lslist_lock);
822 list_for_each_entry(ls, &lslist, ls_list) {
823 if (!test_bit(LSFL_RUNNING, &ls->ls_flags))
824 continue;
825 spin_unlock(&lslist_lock);
826 log_error(ls, "no userland control daemon, stopping lockspace");
827 dlm_ls_stop(ls);
828 goto restart;
829 }
830 spin_unlock(&lslist_lock);
831}
832
This page took 0.309638 seconds and 5 git commands to generate.