kobject: convert kernel_kset to be a kobject
[deliverable/linux.git] / fs / dlm / lockspace.c
1 /******************************************************************************
2 *******************************************************************************
3 **
4 ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5 ** Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
6 **
7 ** This copyrighted material is made available to anyone wishing to use,
8 ** modify, copy, or redistribute it subject to the terms and conditions
9 ** of the GNU General Public License v.2.
10 **
11 *******************************************************************************
12 ******************************************************************************/
13
14 #include "dlm_internal.h"
15 #include "lockspace.h"
16 #include "member.h"
17 #include "recoverd.h"
18 #include "ast.h"
19 #include "dir.h"
20 #include "lowcomms.h"
21 #include "config.h"
22 #include "memory.h"
23 #include "lock.h"
24 #include "recover.h"
25 #include "requestqueue.h"
26
27 #ifdef CONFIG_DLM_DEBUG
28 int dlm_create_debug_file(struct dlm_ls *ls);
29 void dlm_delete_debug_file(struct dlm_ls *ls);
30 #else
31 static inline int dlm_create_debug_file(struct dlm_ls *ls) { return 0; }
32 static inline void dlm_delete_debug_file(struct dlm_ls *ls) { }
33 #endif
34
35 static int ls_count;
36 static struct mutex ls_lock;
37 static struct list_head lslist;
38 static spinlock_t lslist_lock;
39 static struct task_struct * scand_task;
40
41
42 static ssize_t dlm_control_store(struct dlm_ls *ls, const char *buf, size_t len)
43 {
44 ssize_t ret = len;
45 int n = simple_strtol(buf, NULL, 0);
46
47 ls = dlm_find_lockspace_local(ls->ls_local_handle);
48 if (!ls)
49 return -EINVAL;
50
51 switch (n) {
52 case 0:
53 dlm_ls_stop(ls);
54 break;
55 case 1:
56 dlm_ls_start(ls);
57 break;
58 default:
59 ret = -EINVAL;
60 }
61 dlm_put_lockspace(ls);
62 return ret;
63 }
64
65 static ssize_t dlm_event_store(struct dlm_ls *ls, const char *buf, size_t len)
66 {
67 ls->ls_uevent_result = simple_strtol(buf, NULL, 0);
68 set_bit(LSFL_UEVENT_WAIT, &ls->ls_flags);
69 wake_up(&ls->ls_uevent_wait);
70 return len;
71 }
72
73 static ssize_t dlm_id_show(struct dlm_ls *ls, char *buf)
74 {
75 return snprintf(buf, PAGE_SIZE, "%u\n", ls->ls_global_id);
76 }
77
78 static ssize_t dlm_id_store(struct dlm_ls *ls, const char *buf, size_t len)
79 {
80 ls->ls_global_id = simple_strtoul(buf, NULL, 0);
81 return len;
82 }
83
84 static ssize_t dlm_recover_status_show(struct dlm_ls *ls, char *buf)
85 {
86 uint32_t status = dlm_recover_status(ls);
87 return snprintf(buf, PAGE_SIZE, "%x\n", status);
88 }
89
90 static ssize_t dlm_recover_nodeid_show(struct dlm_ls *ls, char *buf)
91 {
92 return snprintf(buf, PAGE_SIZE, "%d\n", ls->ls_recover_nodeid);
93 }
94
95 struct dlm_attr {
96 struct attribute attr;
97 ssize_t (*show)(struct dlm_ls *, char *);
98 ssize_t (*store)(struct dlm_ls *, const char *, size_t);
99 };
100
101 static struct dlm_attr dlm_attr_control = {
102 .attr = {.name = "control", .mode = S_IWUSR},
103 .store = dlm_control_store
104 };
105
106 static struct dlm_attr dlm_attr_event = {
107 .attr = {.name = "event_done", .mode = S_IWUSR},
108 .store = dlm_event_store
109 };
110
111 static struct dlm_attr dlm_attr_id = {
112 .attr = {.name = "id", .mode = S_IRUGO | S_IWUSR},
113 .show = dlm_id_show,
114 .store = dlm_id_store
115 };
116
117 static struct dlm_attr dlm_attr_recover_status = {
118 .attr = {.name = "recover_status", .mode = S_IRUGO},
119 .show = dlm_recover_status_show
120 };
121
122 static struct dlm_attr dlm_attr_recover_nodeid = {
123 .attr = {.name = "recover_nodeid", .mode = S_IRUGO},
124 .show = dlm_recover_nodeid_show
125 };
126
127 static struct attribute *dlm_attrs[] = {
128 &dlm_attr_control.attr,
129 &dlm_attr_event.attr,
130 &dlm_attr_id.attr,
131 &dlm_attr_recover_status.attr,
132 &dlm_attr_recover_nodeid.attr,
133 NULL,
134 };
135
136 static ssize_t dlm_attr_show(struct kobject *kobj, struct attribute *attr,
137 char *buf)
138 {
139 struct dlm_ls *ls = container_of(kobj, struct dlm_ls, ls_kobj);
140 struct dlm_attr *a = container_of(attr, struct dlm_attr, attr);
141 return a->show ? a->show(ls, buf) : 0;
142 }
143
144 static ssize_t dlm_attr_store(struct kobject *kobj, struct attribute *attr,
145 const char *buf, size_t len)
146 {
147 struct dlm_ls *ls = container_of(kobj, struct dlm_ls, ls_kobj);
148 struct dlm_attr *a = container_of(attr, struct dlm_attr, attr);
149 return a->store ? a->store(ls, buf, len) : len;
150 }
151
152 static void lockspace_kobj_release(struct kobject *k)
153 {
154 struct dlm_ls *ls = container_of(k, struct dlm_ls, ls_kobj);
155 kfree(ls);
156 }
157
158 static struct sysfs_ops dlm_attr_ops = {
159 .show = dlm_attr_show,
160 .store = dlm_attr_store,
161 };
162
163 static struct kobj_type dlm_ktype = {
164 .default_attrs = dlm_attrs,
165 .sysfs_ops = &dlm_attr_ops,
166 .release = lockspace_kobj_release,
167 };
168
169 static struct kset *dlm_kset;
170
171 static int kobject_setup(struct dlm_ls *ls)
172 {
173 char lsname[DLM_LOCKSPACE_LEN];
174 int error;
175
176 memset(lsname, 0, DLM_LOCKSPACE_LEN);
177 snprintf(lsname, DLM_LOCKSPACE_LEN, "%s", ls->ls_name);
178
179 error = kobject_set_name(&ls->ls_kobj, "%s", lsname);
180 if (error)
181 return error;
182
183 ls->ls_kobj.kset = dlm_kset;
184 ls->ls_kobj.ktype = &dlm_ktype;
185 return 0;
186 }
187
188 static int do_uevent(struct dlm_ls *ls, int in)
189 {
190 int error;
191
192 if (in)
193 kobject_uevent(&ls->ls_kobj, KOBJ_ONLINE);
194 else
195 kobject_uevent(&ls->ls_kobj, KOBJ_OFFLINE);
196
197 log_debug(ls, "%s the lockspace group...", in ? "joining" : "leaving");
198
199 /* dlm_controld will see the uevent, do the necessary group management
200 and then write to sysfs to wake us */
201
202 error = wait_event_interruptible(ls->ls_uevent_wait,
203 test_and_clear_bit(LSFL_UEVENT_WAIT, &ls->ls_flags));
204
205 log_debug(ls, "group event done %d %d", error, ls->ls_uevent_result);
206
207 if (error)
208 goto out;
209
210 error = ls->ls_uevent_result;
211 out:
212 if (error)
213 log_error(ls, "group %s failed %d %d", in ? "join" : "leave",
214 error, ls->ls_uevent_result);
215 return error;
216 }
217
218
219 int dlm_lockspace_init(void)
220 {
221 ls_count = 0;
222 mutex_init(&ls_lock);
223 INIT_LIST_HEAD(&lslist);
224 spin_lock_init(&lslist_lock);
225
226 dlm_kset = kset_create_and_add("dlm", NULL, kernel_kobj);
227 if (!dlm_kset) {
228 printk(KERN_WARNING "%s: can not create kset\n", __FUNCTION__);
229 return -ENOMEM;
230 }
231 return 0;
232 }
233
234 void dlm_lockspace_exit(void)
235 {
236 kset_unregister(dlm_kset);
237 }
238
239 static int dlm_scand(void *data)
240 {
241 struct dlm_ls *ls;
242
243 while (!kthread_should_stop()) {
244 list_for_each_entry(ls, &lslist, ls_list) {
245 if (dlm_lock_recovery_try(ls)) {
246 dlm_scan_rsbs(ls);
247 dlm_scan_timeout(ls);
248 dlm_unlock_recovery(ls);
249 }
250 }
251 schedule_timeout_interruptible(dlm_config.ci_scan_secs * HZ);
252 }
253 return 0;
254 }
255
256 static int dlm_scand_start(void)
257 {
258 struct task_struct *p;
259 int error = 0;
260
261 p = kthread_run(dlm_scand, NULL, "dlm_scand");
262 if (IS_ERR(p))
263 error = PTR_ERR(p);
264 else
265 scand_task = p;
266 return error;
267 }
268
269 static void dlm_scand_stop(void)
270 {
271 kthread_stop(scand_task);
272 }
273
274 static struct dlm_ls *dlm_find_lockspace_name(char *name, int namelen)
275 {
276 struct dlm_ls *ls;
277
278 spin_lock(&lslist_lock);
279
280 list_for_each_entry(ls, &lslist, ls_list) {
281 if (ls->ls_namelen == namelen &&
282 memcmp(ls->ls_name, name, namelen) == 0)
283 goto out;
284 }
285 ls = NULL;
286 out:
287 spin_unlock(&lslist_lock);
288 return ls;
289 }
290
291 struct dlm_ls *dlm_find_lockspace_global(uint32_t id)
292 {
293 struct dlm_ls *ls;
294
295 spin_lock(&lslist_lock);
296
297 list_for_each_entry(ls, &lslist, ls_list) {
298 if (ls->ls_global_id == id) {
299 ls->ls_count++;
300 goto out;
301 }
302 }
303 ls = NULL;
304 out:
305 spin_unlock(&lslist_lock);
306 return ls;
307 }
308
309 struct dlm_ls *dlm_find_lockspace_local(dlm_lockspace_t *lockspace)
310 {
311 struct dlm_ls *ls;
312
313 spin_lock(&lslist_lock);
314 list_for_each_entry(ls, &lslist, ls_list) {
315 if (ls->ls_local_handle == lockspace) {
316 ls->ls_count++;
317 goto out;
318 }
319 }
320 ls = NULL;
321 out:
322 spin_unlock(&lslist_lock);
323 return ls;
324 }
325
326 struct dlm_ls *dlm_find_lockspace_device(int minor)
327 {
328 struct dlm_ls *ls;
329
330 spin_lock(&lslist_lock);
331 list_for_each_entry(ls, &lslist, ls_list) {
332 if (ls->ls_device.minor == minor) {
333 ls->ls_count++;
334 goto out;
335 }
336 }
337 ls = NULL;
338 out:
339 spin_unlock(&lslist_lock);
340 return ls;
341 }
342
343 void dlm_put_lockspace(struct dlm_ls *ls)
344 {
345 spin_lock(&lslist_lock);
346 ls->ls_count--;
347 spin_unlock(&lslist_lock);
348 }
349
350 static void remove_lockspace(struct dlm_ls *ls)
351 {
352 for (;;) {
353 spin_lock(&lslist_lock);
354 if (ls->ls_count == 0) {
355 list_del(&ls->ls_list);
356 spin_unlock(&lslist_lock);
357 return;
358 }
359 spin_unlock(&lslist_lock);
360 ssleep(1);
361 }
362 }
363
364 static int threads_start(void)
365 {
366 int error;
367
368 /* Thread which process lock requests for all lockspace's */
369 error = dlm_astd_start();
370 if (error) {
371 log_print("cannot start dlm_astd thread %d", error);
372 goto fail;
373 }
374
375 error = dlm_scand_start();
376 if (error) {
377 log_print("cannot start dlm_scand thread %d", error);
378 goto astd_fail;
379 }
380
381 /* Thread for sending/receiving messages for all lockspace's */
382 error = dlm_lowcomms_start();
383 if (error) {
384 log_print("cannot start dlm lowcomms %d", error);
385 goto scand_fail;
386 }
387
388 return 0;
389
390 scand_fail:
391 dlm_scand_stop();
392 astd_fail:
393 dlm_astd_stop();
394 fail:
395 return error;
396 }
397
398 static void threads_stop(void)
399 {
400 dlm_scand_stop();
401 dlm_lowcomms_stop();
402 dlm_astd_stop();
403 }
404
405 static int new_lockspace(char *name, int namelen, void **lockspace,
406 uint32_t flags, int lvblen)
407 {
408 struct dlm_ls *ls;
409 int i, size, error = -ENOMEM;
410 int do_unreg = 0;
411
412 if (namelen > DLM_LOCKSPACE_LEN)
413 return -EINVAL;
414
415 if (!lvblen || (lvblen % 8))
416 return -EINVAL;
417
418 if (!try_module_get(THIS_MODULE))
419 return -EINVAL;
420
421 ls = dlm_find_lockspace_name(name, namelen);
422 if (ls) {
423 *lockspace = ls;
424 module_put(THIS_MODULE);
425 return -EEXIST;
426 }
427
428 ls = kzalloc(sizeof(struct dlm_ls) + namelen, GFP_KERNEL);
429 if (!ls)
430 goto out;
431 memcpy(ls->ls_name, name, namelen);
432 ls->ls_namelen = namelen;
433 ls->ls_lvblen = lvblen;
434 ls->ls_count = 0;
435 ls->ls_flags = 0;
436
437 if (flags & DLM_LSFL_TIMEWARN)
438 set_bit(LSFL_TIMEWARN, &ls->ls_flags);
439
440 if (flags & DLM_LSFL_FS)
441 ls->ls_allocation = GFP_NOFS;
442 else
443 ls->ls_allocation = GFP_KERNEL;
444
445 /* ls_exflags are forced to match among nodes, and we don't
446 need to require all nodes to have TIMEWARN or FS set */
447 ls->ls_exflags = (flags & ~(DLM_LSFL_TIMEWARN | DLM_LSFL_FS));
448
449 size = dlm_config.ci_rsbtbl_size;
450 ls->ls_rsbtbl_size = size;
451
452 ls->ls_rsbtbl = kmalloc(sizeof(struct dlm_rsbtable) * size, GFP_KERNEL);
453 if (!ls->ls_rsbtbl)
454 goto out_lsfree;
455 for (i = 0; i < size; i++) {
456 INIT_LIST_HEAD(&ls->ls_rsbtbl[i].list);
457 INIT_LIST_HEAD(&ls->ls_rsbtbl[i].toss);
458 rwlock_init(&ls->ls_rsbtbl[i].lock);
459 }
460
461 size = dlm_config.ci_lkbtbl_size;
462 ls->ls_lkbtbl_size = size;
463
464 ls->ls_lkbtbl = kmalloc(sizeof(struct dlm_lkbtable) * size, GFP_KERNEL);
465 if (!ls->ls_lkbtbl)
466 goto out_rsbfree;
467 for (i = 0; i < size; i++) {
468 INIT_LIST_HEAD(&ls->ls_lkbtbl[i].list);
469 rwlock_init(&ls->ls_lkbtbl[i].lock);
470 ls->ls_lkbtbl[i].counter = 1;
471 }
472
473 size = dlm_config.ci_dirtbl_size;
474 ls->ls_dirtbl_size = size;
475
476 ls->ls_dirtbl = kmalloc(sizeof(struct dlm_dirtable) * size, GFP_KERNEL);
477 if (!ls->ls_dirtbl)
478 goto out_lkbfree;
479 for (i = 0; i < size; i++) {
480 INIT_LIST_HEAD(&ls->ls_dirtbl[i].list);
481 rwlock_init(&ls->ls_dirtbl[i].lock);
482 }
483
484 INIT_LIST_HEAD(&ls->ls_waiters);
485 mutex_init(&ls->ls_waiters_mutex);
486 INIT_LIST_HEAD(&ls->ls_orphans);
487 mutex_init(&ls->ls_orphans_mutex);
488 INIT_LIST_HEAD(&ls->ls_timeout);
489 mutex_init(&ls->ls_timeout_mutex);
490
491 INIT_LIST_HEAD(&ls->ls_nodes);
492 INIT_LIST_HEAD(&ls->ls_nodes_gone);
493 ls->ls_num_nodes = 0;
494 ls->ls_low_nodeid = 0;
495 ls->ls_total_weight = 0;
496 ls->ls_node_array = NULL;
497
498 memset(&ls->ls_stub_rsb, 0, sizeof(struct dlm_rsb));
499 ls->ls_stub_rsb.res_ls = ls;
500
501 ls->ls_debug_rsb_dentry = NULL;
502 ls->ls_debug_waiters_dentry = NULL;
503
504 init_waitqueue_head(&ls->ls_uevent_wait);
505 ls->ls_uevent_result = 0;
506 init_completion(&ls->ls_members_done);
507 ls->ls_members_result = -1;
508
509 ls->ls_recoverd_task = NULL;
510 mutex_init(&ls->ls_recoverd_active);
511 spin_lock_init(&ls->ls_recover_lock);
512 spin_lock_init(&ls->ls_rcom_spin);
513 get_random_bytes(&ls->ls_rcom_seq, sizeof(uint64_t));
514 ls->ls_recover_status = 0;
515 ls->ls_recover_seq = 0;
516 ls->ls_recover_args = NULL;
517 init_rwsem(&ls->ls_in_recovery);
518 init_rwsem(&ls->ls_recv_active);
519 INIT_LIST_HEAD(&ls->ls_requestqueue);
520 mutex_init(&ls->ls_requestqueue_mutex);
521 mutex_init(&ls->ls_clear_proc_locks);
522
523 ls->ls_recover_buf = kmalloc(dlm_config.ci_buffer_size, GFP_KERNEL);
524 if (!ls->ls_recover_buf)
525 goto out_dirfree;
526
527 INIT_LIST_HEAD(&ls->ls_recover_list);
528 spin_lock_init(&ls->ls_recover_list_lock);
529 ls->ls_recover_list_count = 0;
530 ls->ls_local_handle = ls;
531 init_waitqueue_head(&ls->ls_wait_general);
532 INIT_LIST_HEAD(&ls->ls_root_list);
533 init_rwsem(&ls->ls_root_sem);
534
535 down_write(&ls->ls_in_recovery);
536
537 spin_lock(&lslist_lock);
538 list_add(&ls->ls_list, &lslist);
539 spin_unlock(&lslist_lock);
540
541 /* needs to find ls in lslist */
542 error = dlm_recoverd_start(ls);
543 if (error) {
544 log_error(ls, "can't start dlm_recoverd %d", error);
545 goto out_delist;
546 }
547
548 error = kobject_setup(ls);
549 if (error)
550 goto out_stop;
551
552 error = kobject_register(&ls->ls_kobj);
553 if (error)
554 goto out_stop;
555
556 /* let kobject handle freeing of ls if there's an error */
557 do_unreg = 1;
558
559 /* This uevent triggers dlm_controld in userspace to add us to the
560 group of nodes that are members of this lockspace (managed by the
561 cluster infrastructure.) Once it's done that, it tells us who the
562 current lockspace members are (via configfs) and then tells the
563 lockspace to start running (via sysfs) in dlm_ls_start(). */
564
565 error = do_uevent(ls, 1);
566 if (error)
567 goto out_stop;
568
569 wait_for_completion(&ls->ls_members_done);
570 error = ls->ls_members_result;
571 if (error)
572 goto out_members;
573
574 dlm_create_debug_file(ls);
575
576 log_debug(ls, "join complete");
577
578 *lockspace = ls;
579 return 0;
580
581 out_members:
582 do_uevent(ls, 0);
583 dlm_clear_members(ls);
584 kfree(ls->ls_node_array);
585 out_stop:
586 dlm_recoverd_stop(ls);
587 out_delist:
588 spin_lock(&lslist_lock);
589 list_del(&ls->ls_list);
590 spin_unlock(&lslist_lock);
591 kfree(ls->ls_recover_buf);
592 out_dirfree:
593 kfree(ls->ls_dirtbl);
594 out_lkbfree:
595 kfree(ls->ls_lkbtbl);
596 out_rsbfree:
597 kfree(ls->ls_rsbtbl);
598 out_lsfree:
599 if (do_unreg)
600 kobject_unregister(&ls->ls_kobj);
601 else
602 kfree(ls);
603 out:
604 module_put(THIS_MODULE);
605 return error;
606 }
607
608 int dlm_new_lockspace(char *name, int namelen, void **lockspace,
609 uint32_t flags, int lvblen)
610 {
611 int error = 0;
612
613 mutex_lock(&ls_lock);
614 if (!ls_count)
615 error = threads_start();
616 if (error)
617 goto out;
618
619 error = new_lockspace(name, namelen, lockspace, flags, lvblen);
620 if (!error)
621 ls_count++;
622 else if (!ls_count)
623 threads_stop();
624 out:
625 mutex_unlock(&ls_lock);
626 return error;
627 }
628
629 /* Return 1 if the lockspace still has active remote locks,
630 * 2 if the lockspace still has active local locks.
631 */
632 static int lockspace_busy(struct dlm_ls *ls)
633 {
634 int i, lkb_found = 0;
635 struct dlm_lkb *lkb;
636
637 /* NOTE: We check the lockidtbl here rather than the resource table.
638 This is because there may be LKBs queued as ASTs that have been
639 unlinked from their RSBs and are pending deletion once the AST has
640 been delivered */
641
642 for (i = 0; i < ls->ls_lkbtbl_size; i++) {
643 read_lock(&ls->ls_lkbtbl[i].lock);
644 if (!list_empty(&ls->ls_lkbtbl[i].list)) {
645 lkb_found = 1;
646 list_for_each_entry(lkb, &ls->ls_lkbtbl[i].list,
647 lkb_idtbl_list) {
648 if (!lkb->lkb_nodeid) {
649 read_unlock(&ls->ls_lkbtbl[i].lock);
650 return 2;
651 }
652 }
653 }
654 read_unlock(&ls->ls_lkbtbl[i].lock);
655 }
656 return lkb_found;
657 }
658
659 static int release_lockspace(struct dlm_ls *ls, int force)
660 {
661 struct dlm_lkb *lkb;
662 struct dlm_rsb *rsb;
663 struct list_head *head;
664 int i;
665 int busy = lockspace_busy(ls);
666
667 if (busy > force)
668 return -EBUSY;
669
670 if (force < 3)
671 do_uevent(ls, 0);
672
673 dlm_recoverd_stop(ls);
674
675 remove_lockspace(ls);
676
677 dlm_delete_debug_file(ls);
678
679 dlm_astd_suspend();
680
681 kfree(ls->ls_recover_buf);
682
683 /*
684 * Free direntry structs.
685 */
686
687 dlm_dir_clear(ls);
688 kfree(ls->ls_dirtbl);
689
690 /*
691 * Free all lkb's on lkbtbl[] lists.
692 */
693
694 for (i = 0; i < ls->ls_lkbtbl_size; i++) {
695 head = &ls->ls_lkbtbl[i].list;
696 while (!list_empty(head)) {
697 lkb = list_entry(head->next, struct dlm_lkb,
698 lkb_idtbl_list);
699
700 list_del(&lkb->lkb_idtbl_list);
701
702 dlm_del_ast(lkb);
703
704 if (lkb->lkb_lvbptr && lkb->lkb_flags & DLM_IFL_MSTCPY)
705 free_lvb(lkb->lkb_lvbptr);
706
707 free_lkb(lkb);
708 }
709 }
710 dlm_astd_resume();
711
712 kfree(ls->ls_lkbtbl);
713
714 /*
715 * Free all rsb's on rsbtbl[] lists
716 */
717
718 for (i = 0; i < ls->ls_rsbtbl_size; i++) {
719 head = &ls->ls_rsbtbl[i].list;
720 while (!list_empty(head)) {
721 rsb = list_entry(head->next, struct dlm_rsb,
722 res_hashchain);
723
724 list_del(&rsb->res_hashchain);
725 free_rsb(rsb);
726 }
727
728 head = &ls->ls_rsbtbl[i].toss;
729 while (!list_empty(head)) {
730 rsb = list_entry(head->next, struct dlm_rsb,
731 res_hashchain);
732 list_del(&rsb->res_hashchain);
733 free_rsb(rsb);
734 }
735 }
736
737 kfree(ls->ls_rsbtbl);
738
739 /*
740 * Free structures on any other lists
741 */
742
743 dlm_purge_requestqueue(ls);
744 kfree(ls->ls_recover_args);
745 dlm_clear_free_entries(ls);
746 dlm_clear_members(ls);
747 dlm_clear_members_gone(ls);
748 kfree(ls->ls_node_array);
749 kobject_unregister(&ls->ls_kobj);
750 /* The ls structure will be freed when the kobject is done with */
751
752 mutex_lock(&ls_lock);
753 ls_count--;
754 if (!ls_count)
755 threads_stop();
756 mutex_unlock(&ls_lock);
757
758 module_put(THIS_MODULE);
759 return 0;
760 }
761
762 /*
763 * Called when a system has released all its locks and is not going to use the
764 * lockspace any longer. We free everything we're managing for this lockspace.
765 * Remaining nodes will go through the recovery process as if we'd died. The
766 * lockspace must continue to function as usual, participating in recoveries,
767 * until this returns.
768 *
769 * Force has 4 possible values:
770 * 0 - don't destroy locksapce if it has any LKBs
771 * 1 - destroy lockspace if it has remote LKBs but not if it has local LKBs
772 * 2 - destroy lockspace regardless of LKBs
773 * 3 - destroy lockspace as part of a forced shutdown
774 */
775
776 int dlm_release_lockspace(void *lockspace, int force)
777 {
778 struct dlm_ls *ls;
779
780 ls = dlm_find_lockspace_local(lockspace);
781 if (!ls)
782 return -EINVAL;
783 dlm_put_lockspace(ls);
784 return release_lockspace(ls, force);
785 }
786
This page took 0.070521 seconds and 6 git commands to generate.