[DLM] keep dlm from panicing when traversing rsb list in debugfs
[deliverable/linux.git] / fs / dlm / lockspace.c
CommitLineData
e7fd4179
DT
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
ef0c2bb0 5** Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
e7fd4179
DT
6**
7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions
9** of the GNU General Public License v.2.
10**
11*******************************************************************************
12******************************************************************************/
13
14#include "dlm_internal.h"
15#include "lockspace.h"
16#include "member.h"
17#include "recoverd.h"
18#include "ast.h"
19#include "dir.h"
20#include "lowcomms.h"
21#include "config.h"
22#include "memory.h"
23#include "lock.h"
c56b39cd 24#include "recover.h"
2896ee37 25#include "requestqueue.h"
e7fd4179
DT
26
27#ifdef CONFIG_DLM_DEBUG
28int dlm_create_debug_file(struct dlm_ls *ls);
29void dlm_delete_debug_file(struct dlm_ls *ls);
30#else
31static inline int dlm_create_debug_file(struct dlm_ls *ls) { return 0; }
32static inline void dlm_delete_debug_file(struct dlm_ls *ls) { }
33#endif
34
35static int ls_count;
90135925 36static struct mutex ls_lock;
e7fd4179
DT
37static struct list_head lslist;
38static spinlock_t lslist_lock;
39static struct task_struct * scand_task;
40
41
42static ssize_t dlm_control_store(struct dlm_ls *ls, const char *buf, size_t len)
43{
44 ssize_t ret = len;
45 int n = simple_strtol(buf, NULL, 0);
46
e2de7f56
PC
47 ls = dlm_find_lockspace_local(ls->ls_local_handle);
48 if (!ls)
49 return -EINVAL;
50
e7fd4179
DT
51 switch (n) {
52 case 0:
53 dlm_ls_stop(ls);
54 break;
55 case 1:
56 dlm_ls_start(ls);
57 break;
58 default:
59 ret = -EINVAL;
60 }
e2de7f56 61 dlm_put_lockspace(ls);
e7fd4179
DT
62 return ret;
63}
64
65static ssize_t dlm_event_store(struct dlm_ls *ls, const char *buf, size_t len)
66{
67 ls->ls_uevent_result = simple_strtol(buf, NULL, 0);
68 set_bit(LSFL_UEVENT_WAIT, &ls->ls_flags);
69 wake_up(&ls->ls_uevent_wait);
70 return len;
71}
72
73static ssize_t dlm_id_show(struct dlm_ls *ls, char *buf)
74{
a1d144c7 75 return snprintf(buf, PAGE_SIZE, "%u\n", ls->ls_global_id);
e7fd4179
DT
76}
77
78static ssize_t dlm_id_store(struct dlm_ls *ls, const char *buf, size_t len)
79{
80 ls->ls_global_id = simple_strtoul(buf, NULL, 0);
81 return len;
82}
83
c56b39cd
DT
84static ssize_t dlm_recover_status_show(struct dlm_ls *ls, char *buf)
85{
86 uint32_t status = dlm_recover_status(ls);
a1d144c7 87 return snprintf(buf, PAGE_SIZE, "%x\n", status);
c56b39cd
DT
88}
89
faa0f267
DT
90static ssize_t dlm_recover_nodeid_show(struct dlm_ls *ls, char *buf)
91{
a1d144c7 92 return snprintf(buf, PAGE_SIZE, "%d\n", ls->ls_recover_nodeid);
faa0f267
DT
93}
94
e7fd4179
DT
95struct dlm_attr {
96 struct attribute attr;
97 ssize_t (*show)(struct dlm_ls *, char *);
98 ssize_t (*store)(struct dlm_ls *, const char *, size_t);
99};
100
101static struct dlm_attr dlm_attr_control = {
102 .attr = {.name = "control", .mode = S_IWUSR},
103 .store = dlm_control_store
104};
105
106static struct dlm_attr dlm_attr_event = {
107 .attr = {.name = "event_done", .mode = S_IWUSR},
108 .store = dlm_event_store
109};
110
111static struct dlm_attr dlm_attr_id = {
112 .attr = {.name = "id", .mode = S_IRUGO | S_IWUSR},
113 .show = dlm_id_show,
114 .store = dlm_id_store
115};
116
c56b39cd
DT
117static struct dlm_attr dlm_attr_recover_status = {
118 .attr = {.name = "recover_status", .mode = S_IRUGO},
119 .show = dlm_recover_status_show
120};
121
faa0f267
DT
122static struct dlm_attr dlm_attr_recover_nodeid = {
123 .attr = {.name = "recover_nodeid", .mode = S_IRUGO},
124 .show = dlm_recover_nodeid_show
125};
126
e7fd4179
DT
127static struct attribute *dlm_attrs[] = {
128 &dlm_attr_control.attr,
129 &dlm_attr_event.attr,
130 &dlm_attr_id.attr,
c56b39cd 131 &dlm_attr_recover_status.attr,
faa0f267 132 &dlm_attr_recover_nodeid.attr,
e7fd4179
DT
133 NULL,
134};
135
136static ssize_t dlm_attr_show(struct kobject *kobj, struct attribute *attr,
137 char *buf)
138{
139 struct dlm_ls *ls = container_of(kobj, struct dlm_ls, ls_kobj);
140 struct dlm_attr *a = container_of(attr, struct dlm_attr, attr);
141 return a->show ? a->show(ls, buf) : 0;
142}
143
144static ssize_t dlm_attr_store(struct kobject *kobj, struct attribute *attr,
145 const char *buf, size_t len)
146{
147 struct dlm_ls *ls = container_of(kobj, struct dlm_ls, ls_kobj);
148 struct dlm_attr *a = container_of(attr, struct dlm_attr, attr);
149 return a->store ? a->store(ls, buf, len) : len;
150}
151
ba542e3b
PC
152static void lockspace_kobj_release(struct kobject *k)
153{
154 struct dlm_ls *ls = container_of(k, struct dlm_ls, ls_kobj);
155 kfree(ls);
156}
157
e7fd4179
DT
158static struct sysfs_ops dlm_attr_ops = {
159 .show = dlm_attr_show,
160 .store = dlm_attr_store,
161};
162
163static struct kobj_type dlm_ktype = {
164 .default_attrs = dlm_attrs,
165 .sysfs_ops = &dlm_attr_ops,
ba542e3b 166 .release = lockspace_kobj_release,
e7fd4179
DT
167};
168
169static struct kset dlm_kset = {
e7fd4179
DT
170 .kobj = {.name = "dlm",},
171 .ktype = &dlm_ktype,
172};
173
174static int kobject_setup(struct dlm_ls *ls)
175{
176 char lsname[DLM_LOCKSPACE_LEN];
177 int error;
178
179 memset(lsname, 0, DLM_LOCKSPACE_LEN);
180 snprintf(lsname, DLM_LOCKSPACE_LEN, "%s", ls->ls_name);
181
182 error = kobject_set_name(&ls->ls_kobj, "%s", lsname);
183 if (error)
184 return error;
185
186 ls->ls_kobj.kset = &dlm_kset;
187 ls->ls_kobj.ktype = &dlm_ktype;
188 return 0;
189}
190
191static int do_uevent(struct dlm_ls *ls, int in)
192{
193 int error;
194
195 if (in)
196 kobject_uevent(&ls->ls_kobj, KOBJ_ONLINE);
197 else
198 kobject_uevent(&ls->ls_kobj, KOBJ_OFFLINE);
199
200 error = wait_event_interruptible(ls->ls_uevent_wait,
201 test_and_clear_bit(LSFL_UEVENT_WAIT, &ls->ls_flags));
202 if (error)
203 goto out;
204
205 error = ls->ls_uevent_result;
206 out:
207 return error;
208}
209
210
211int dlm_lockspace_init(void)
212{
213 int error;
214
215 ls_count = 0;
90135925 216 mutex_init(&ls_lock);
e7fd4179
DT
217 INIT_LIST_HEAD(&lslist);
218 spin_lock_init(&lslist_lock);
219
823bccfc 220 kobj_set_kset_s(&dlm_kset, kernel_subsys);
e7fd4179
DT
221 error = kset_register(&dlm_kset);
222 if (error)
223 printk("dlm_lockspace_init: cannot register kset %d\n", error);
224 return error;
225}
226
227void dlm_lockspace_exit(void)
228{
229 kset_unregister(&dlm_kset);
230}
231
232static int dlm_scand(void *data)
233{
234 struct dlm_ls *ls;
235
236 while (!kthread_should_stop()) {
237 list_for_each_entry(ls, &lslist, ls_list)
238 dlm_scan_rsbs(ls);
68c817a1 239 schedule_timeout_interruptible(dlm_config.ci_scan_secs * HZ);
e7fd4179
DT
240 }
241 return 0;
242}
243
244static int dlm_scand_start(void)
245{
246 struct task_struct *p;
247 int error = 0;
248
249 p = kthread_run(dlm_scand, NULL, "dlm_scand");
250 if (IS_ERR(p))
251 error = PTR_ERR(p);
252 else
253 scand_task = p;
254 return error;
255}
256
257static void dlm_scand_stop(void)
258{
259 kthread_stop(scand_task);
260}
261
262static struct dlm_ls *dlm_find_lockspace_name(char *name, int namelen)
263{
264 struct dlm_ls *ls;
265
266 spin_lock(&lslist_lock);
267
268 list_for_each_entry(ls, &lslist, ls_list) {
269 if (ls->ls_namelen == namelen &&
270 memcmp(ls->ls_name, name, namelen) == 0)
271 goto out;
272 }
273 ls = NULL;
274 out:
275 spin_unlock(&lslist_lock);
276 return ls;
277}
278
279struct dlm_ls *dlm_find_lockspace_global(uint32_t id)
280{
281 struct dlm_ls *ls;
282
283 spin_lock(&lslist_lock);
284
285 list_for_each_entry(ls, &lslist, ls_list) {
286 if (ls->ls_global_id == id) {
287 ls->ls_count++;
288 goto out;
289 }
290 }
291 ls = NULL;
292 out:
293 spin_unlock(&lslist_lock);
294 return ls;
295}
296
597d0cae 297struct dlm_ls *dlm_find_lockspace_local(dlm_lockspace_t *lockspace)
e7fd4179 298{
597d0cae 299 struct dlm_ls *ls;
e7fd4179
DT
300
301 spin_lock(&lslist_lock);
597d0cae
DT
302 list_for_each_entry(ls, &lslist, ls_list) {
303 if (ls->ls_local_handle == lockspace) {
304 ls->ls_count++;
305 goto out;
306 }
307 }
308 ls = NULL;
309 out:
310 spin_unlock(&lslist_lock);
311 return ls;
312}
313
314struct dlm_ls *dlm_find_lockspace_device(int minor)
315{
316 struct dlm_ls *ls;
317
318 spin_lock(&lslist_lock);
319 list_for_each_entry(ls, &lslist, ls_list) {
320 if (ls->ls_device.minor == minor) {
321 ls->ls_count++;
322 goto out;
323 }
324 }
325 ls = NULL;
326 out:
e7fd4179
DT
327 spin_unlock(&lslist_lock);
328 return ls;
329}
330
331void dlm_put_lockspace(struct dlm_ls *ls)
332{
333 spin_lock(&lslist_lock);
334 ls->ls_count--;
335 spin_unlock(&lslist_lock);
336}
337
338static void remove_lockspace(struct dlm_ls *ls)
339{
340 for (;;) {
341 spin_lock(&lslist_lock);
342 if (ls->ls_count == 0) {
343 list_del(&ls->ls_list);
344 spin_unlock(&lslist_lock);
345 return;
346 }
347 spin_unlock(&lslist_lock);
348 ssleep(1);
349 }
350}
351
352static int threads_start(void)
353{
354 int error;
355
356 /* Thread which process lock requests for all lockspace's */
357 error = dlm_astd_start();
358 if (error) {
359 log_print("cannot start dlm_astd thread %d", error);
360 goto fail;
361 }
362
363 error = dlm_scand_start();
364 if (error) {
365 log_print("cannot start dlm_scand thread %d", error);
366 goto astd_fail;
367 }
368
369 /* Thread for sending/receiving messages for all lockspace's */
370 error = dlm_lowcomms_start();
371 if (error) {
372 log_print("cannot start dlm lowcomms %d", error);
373 goto scand_fail;
374 }
375
376 return 0;
377
378 scand_fail:
379 dlm_scand_stop();
380 astd_fail:
381 dlm_astd_stop();
382 fail:
383 return error;
384}
385
386static void threads_stop(void)
387{
388 dlm_scand_stop();
389 dlm_lowcomms_stop();
390 dlm_astd_stop();
391}
392
393static int new_lockspace(char *name, int namelen, void **lockspace,
394 uint32_t flags, int lvblen)
395{
396 struct dlm_ls *ls;
397 int i, size, error = -ENOMEM;
398
399 if (namelen > DLM_LOCKSPACE_LEN)
400 return -EINVAL;
401
402 if (!lvblen || (lvblen % 8))
403 return -EINVAL;
404
405 if (!try_module_get(THIS_MODULE))
406 return -EINVAL;
407
408 ls = dlm_find_lockspace_name(name, namelen);
409 if (ls) {
410 *lockspace = ls;
411 module_put(THIS_MODULE);
412 return -EEXIST;
413 }
414
90135925 415 ls = kzalloc(sizeof(struct dlm_ls) + namelen, GFP_KERNEL);
e7fd4179
DT
416 if (!ls)
417 goto out;
e7fd4179
DT
418 memcpy(ls->ls_name, name, namelen);
419 ls->ls_namelen = namelen;
420 ls->ls_exflags = flags;
421 ls->ls_lvblen = lvblen;
422 ls->ls_count = 0;
423 ls->ls_flags = 0;
424
68c817a1 425 size = dlm_config.ci_rsbtbl_size;
e7fd4179
DT
426 ls->ls_rsbtbl_size = size;
427
428 ls->ls_rsbtbl = kmalloc(sizeof(struct dlm_rsbtable) * size, GFP_KERNEL);
429 if (!ls->ls_rsbtbl)
430 goto out_lsfree;
431 for (i = 0; i < size; i++) {
432 INIT_LIST_HEAD(&ls->ls_rsbtbl[i].list);
433 INIT_LIST_HEAD(&ls->ls_rsbtbl[i].toss);
434 rwlock_init(&ls->ls_rsbtbl[i].lock);
435 }
436
68c817a1 437 size = dlm_config.ci_lkbtbl_size;
e7fd4179
DT
438 ls->ls_lkbtbl_size = size;
439
440 ls->ls_lkbtbl = kmalloc(sizeof(struct dlm_lkbtable) * size, GFP_KERNEL);
441 if (!ls->ls_lkbtbl)
442 goto out_rsbfree;
443 for (i = 0; i < size; i++) {
444 INIT_LIST_HEAD(&ls->ls_lkbtbl[i].list);
445 rwlock_init(&ls->ls_lkbtbl[i].lock);
446 ls->ls_lkbtbl[i].counter = 1;
447 }
448
68c817a1 449 size = dlm_config.ci_dirtbl_size;
e7fd4179
DT
450 ls->ls_dirtbl_size = size;
451
452 ls->ls_dirtbl = kmalloc(sizeof(struct dlm_dirtable) * size, GFP_KERNEL);
453 if (!ls->ls_dirtbl)
454 goto out_lkbfree;
455 for (i = 0; i < size; i++) {
456 INIT_LIST_HEAD(&ls->ls_dirtbl[i].list);
457 rwlock_init(&ls->ls_dirtbl[i].lock);
458 }
459
460 INIT_LIST_HEAD(&ls->ls_waiters);
90135925 461 mutex_init(&ls->ls_waiters_mutex);
ef0c2bb0
DT
462 INIT_LIST_HEAD(&ls->ls_orphans);
463 mutex_init(&ls->ls_orphans_mutex);
e7fd4179
DT
464
465 INIT_LIST_HEAD(&ls->ls_nodes);
466 INIT_LIST_HEAD(&ls->ls_nodes_gone);
467 ls->ls_num_nodes = 0;
468 ls->ls_low_nodeid = 0;
469 ls->ls_total_weight = 0;
470 ls->ls_node_array = NULL;
471
472 memset(&ls->ls_stub_rsb, 0, sizeof(struct dlm_rsb));
473 ls->ls_stub_rsb.res_ls = ls;
474
5de6319b
DT
475 ls->ls_debug_rsb_dentry = NULL;
476 ls->ls_debug_waiters_dentry = NULL;
e7fd4179
DT
477
478 init_waitqueue_head(&ls->ls_uevent_wait);
479 ls->ls_uevent_result = 0;
480
481 ls->ls_recoverd_task = NULL;
90135925 482 mutex_init(&ls->ls_recoverd_active);
e7fd4179 483 spin_lock_init(&ls->ls_recover_lock);
98f176fb
DT
484 spin_lock_init(&ls->ls_rcom_spin);
485 get_random_bytes(&ls->ls_rcom_seq, sizeof(uint64_t));
e7fd4179
DT
486 ls->ls_recover_status = 0;
487 ls->ls_recover_seq = 0;
488 ls->ls_recover_args = NULL;
489 init_rwsem(&ls->ls_in_recovery);
490 INIT_LIST_HEAD(&ls->ls_requestqueue);
90135925 491 mutex_init(&ls->ls_requestqueue_mutex);
597d0cae 492 mutex_init(&ls->ls_clear_proc_locks);
e7fd4179 493
68c817a1 494 ls->ls_recover_buf = kmalloc(dlm_config.ci_buffer_size, GFP_KERNEL);
e7fd4179
DT
495 if (!ls->ls_recover_buf)
496 goto out_dirfree;
497
498 INIT_LIST_HEAD(&ls->ls_recover_list);
499 spin_lock_init(&ls->ls_recover_list_lock);
500 ls->ls_recover_list_count = 0;
597d0cae 501 ls->ls_local_handle = ls;
e7fd4179
DT
502 init_waitqueue_head(&ls->ls_wait_general);
503 INIT_LIST_HEAD(&ls->ls_root_list);
504 init_rwsem(&ls->ls_root_sem);
505
506 down_write(&ls->ls_in_recovery);
507
5f88f1ea
DT
508 spin_lock(&lslist_lock);
509 list_add(&ls->ls_list, &lslist);
510 spin_unlock(&lslist_lock);
511
512 /* needs to find ls in lslist */
e7fd4179
DT
513 error = dlm_recoverd_start(ls);
514 if (error) {
515 log_error(ls, "can't start dlm_recoverd %d", error);
516 goto out_rcomfree;
517 }
518
e7fd4179
DT
519 dlm_create_debug_file(ls);
520
521 error = kobject_setup(ls);
522 if (error)
523 goto out_del;
524
525 error = kobject_register(&ls->ls_kobj);
526 if (error)
527 goto out_del;
528
529 error = do_uevent(ls, 1);
530 if (error)
531 goto out_unreg;
532
533 *lockspace = ls;
534 return 0;
535
536 out_unreg:
537 kobject_unregister(&ls->ls_kobj);
538 out_del:
539 dlm_delete_debug_file(ls);
5f88f1ea
DT
540 dlm_recoverd_stop(ls);
541 out_rcomfree:
e7fd4179
DT
542 spin_lock(&lslist_lock);
543 list_del(&ls->ls_list);
544 spin_unlock(&lslist_lock);
e7fd4179
DT
545 kfree(ls->ls_recover_buf);
546 out_dirfree:
547 kfree(ls->ls_dirtbl);
548 out_lkbfree:
549 kfree(ls->ls_lkbtbl);
550 out_rsbfree:
551 kfree(ls->ls_rsbtbl);
552 out_lsfree:
553 kfree(ls);
554 out:
555 module_put(THIS_MODULE);
556 return error;
557}
558
559int dlm_new_lockspace(char *name, int namelen, void **lockspace,
560 uint32_t flags, int lvblen)
561{
562 int error = 0;
563
90135925 564 mutex_lock(&ls_lock);
e7fd4179
DT
565 if (!ls_count)
566 error = threads_start();
567 if (error)
568 goto out;
569
570 error = new_lockspace(name, namelen, lockspace, flags, lvblen);
571 if (!error)
572 ls_count++;
573 out:
90135925 574 mutex_unlock(&ls_lock);
e7fd4179
DT
575 return error;
576}
577
578/* Return 1 if the lockspace still has active remote locks,
579 * 2 if the lockspace still has active local locks.
580 */
581static int lockspace_busy(struct dlm_ls *ls)
582{
583 int i, lkb_found = 0;
584 struct dlm_lkb *lkb;
585
586 /* NOTE: We check the lockidtbl here rather than the resource table.
587 This is because there may be LKBs queued as ASTs that have been
588 unlinked from their RSBs and are pending deletion once the AST has
589 been delivered */
590
591 for (i = 0; i < ls->ls_lkbtbl_size; i++) {
592 read_lock(&ls->ls_lkbtbl[i].lock);
593 if (!list_empty(&ls->ls_lkbtbl[i].list)) {
594 lkb_found = 1;
595 list_for_each_entry(lkb, &ls->ls_lkbtbl[i].list,
596 lkb_idtbl_list) {
597 if (!lkb->lkb_nodeid) {
598 read_unlock(&ls->ls_lkbtbl[i].lock);
599 return 2;
600 }
601 }
602 }
603 read_unlock(&ls->ls_lkbtbl[i].lock);
604 }
605 return lkb_found;
606}
607
608static int release_lockspace(struct dlm_ls *ls, int force)
609{
610 struct dlm_lkb *lkb;
611 struct dlm_rsb *rsb;
612 struct list_head *head;
613 int i;
614 int busy = lockspace_busy(ls);
615
616 if (busy > force)
617 return -EBUSY;
618
619 if (force < 3)
620 do_uevent(ls, 0);
621
622 dlm_recoverd_stop(ls);
623
624 remove_lockspace(ls);
625
626 dlm_delete_debug_file(ls);
627
628 dlm_astd_suspend();
629
630 kfree(ls->ls_recover_buf);
631
632 /*
633 * Free direntry structs.
634 */
635
636 dlm_dir_clear(ls);
637 kfree(ls->ls_dirtbl);
638
639 /*
640 * Free all lkb's on lkbtbl[] lists.
641 */
642
643 for (i = 0; i < ls->ls_lkbtbl_size; i++) {
644 head = &ls->ls_lkbtbl[i].list;
645 while (!list_empty(head)) {
646 lkb = list_entry(head->next, struct dlm_lkb,
647 lkb_idtbl_list);
648
649 list_del(&lkb->lkb_idtbl_list);
650
651 dlm_del_ast(lkb);
652
653 if (lkb->lkb_lvbptr && lkb->lkb_flags & DLM_IFL_MSTCPY)
654 free_lvb(lkb->lkb_lvbptr);
655
656 free_lkb(lkb);
657 }
658 }
659 dlm_astd_resume();
660
661 kfree(ls->ls_lkbtbl);
662
663 /*
664 * Free all rsb's on rsbtbl[] lists
665 */
666
667 for (i = 0; i < ls->ls_rsbtbl_size; i++) {
668 head = &ls->ls_rsbtbl[i].list;
669 while (!list_empty(head)) {
670 rsb = list_entry(head->next, struct dlm_rsb,
671 res_hashchain);
672
673 list_del(&rsb->res_hashchain);
674 free_rsb(rsb);
675 }
676
677 head = &ls->ls_rsbtbl[i].toss;
678 while (!list_empty(head)) {
679 rsb = list_entry(head->next, struct dlm_rsb,
680 res_hashchain);
681 list_del(&rsb->res_hashchain);
682 free_rsb(rsb);
683 }
684 }
685
686 kfree(ls->ls_rsbtbl);
687
688 /*
689 * Free structures on any other lists
690 */
691
2896ee37 692 dlm_purge_requestqueue(ls);
e7fd4179
DT
693 kfree(ls->ls_recover_args);
694 dlm_clear_free_entries(ls);
695 dlm_clear_members(ls);
696 dlm_clear_members_gone(ls);
697 kfree(ls->ls_node_array);
698 kobject_unregister(&ls->ls_kobj);
ba542e3b 699 /* The ls structure will be freed when the kobject is done with */
e7fd4179 700
90135925 701 mutex_lock(&ls_lock);
e7fd4179
DT
702 ls_count--;
703 if (!ls_count)
704 threads_stop();
90135925 705 mutex_unlock(&ls_lock);
e7fd4179
DT
706
707 module_put(THIS_MODULE);
708 return 0;
709}
710
711/*
712 * Called when a system has released all its locks and is not going to use the
713 * lockspace any longer. We free everything we're managing for this lockspace.
714 * Remaining nodes will go through the recovery process as if we'd died. The
715 * lockspace must continue to function as usual, participating in recoveries,
716 * until this returns.
717 *
718 * Force has 4 possible values:
719 * 0 - don't destroy locksapce if it has any LKBs
720 * 1 - destroy lockspace if it has remote LKBs but not if it has local LKBs
721 * 2 - destroy lockspace regardless of LKBs
722 * 3 - destroy lockspace as part of a forced shutdown
723 */
724
725int dlm_release_lockspace(void *lockspace, int force)
726{
727 struct dlm_ls *ls;
728
729 ls = dlm_find_lockspace_local(lockspace);
730 if (!ls)
731 return -EINVAL;
732 dlm_put_lockspace(ls);
733 return release_lockspace(ls, force);
734}
735
This page took 0.128159 seconds and 5 git commands to generate.