vfs: fix inode_init_always calling convention
[deliverable/linux.git] / fs / dlm / lockspace.c
CommitLineData
e7fd4179
DT
1/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
0f8e0d9a 5** Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
e7fd4179
DT
6**
7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions
9** of the GNU General Public License v.2.
10**
11*******************************************************************************
12******************************************************************************/
13
14#include "dlm_internal.h"
15#include "lockspace.h"
16#include "member.h"
17#include "recoverd.h"
18#include "ast.h"
19#include "dir.h"
20#include "lowcomms.h"
21#include "config.h"
22#include "memory.h"
23#include "lock.h"
c56b39cd 24#include "recover.h"
2896ee37 25#include "requestqueue.h"
0f8e0d9a 26#include "user.h"
e7fd4179 27
e7fd4179 28static int ls_count;
90135925 29static struct mutex ls_lock;
e7fd4179
DT
30static struct list_head lslist;
31static spinlock_t lslist_lock;
32static struct task_struct * scand_task;
33
34
35static ssize_t dlm_control_store(struct dlm_ls *ls, const char *buf, size_t len)
36{
37 ssize_t ret = len;
38 int n = simple_strtol(buf, NULL, 0);
39
e2de7f56
PC
40 ls = dlm_find_lockspace_local(ls->ls_local_handle);
41 if (!ls)
42 return -EINVAL;
43
e7fd4179
DT
44 switch (n) {
45 case 0:
46 dlm_ls_stop(ls);
47 break;
48 case 1:
49 dlm_ls_start(ls);
50 break;
51 default:
52 ret = -EINVAL;
53 }
e2de7f56 54 dlm_put_lockspace(ls);
e7fd4179
DT
55 return ret;
56}
57
58static ssize_t dlm_event_store(struct dlm_ls *ls, const char *buf, size_t len)
59{
60 ls->ls_uevent_result = simple_strtol(buf, NULL, 0);
61 set_bit(LSFL_UEVENT_WAIT, &ls->ls_flags);
62 wake_up(&ls->ls_uevent_wait);
63 return len;
64}
65
66static ssize_t dlm_id_show(struct dlm_ls *ls, char *buf)
67{
a1d144c7 68 return snprintf(buf, PAGE_SIZE, "%u\n", ls->ls_global_id);
e7fd4179
DT
69}
70
71static ssize_t dlm_id_store(struct dlm_ls *ls, const char *buf, size_t len)
72{
73 ls->ls_global_id = simple_strtoul(buf, NULL, 0);
74 return len;
75}
76
c56b39cd
DT
77static ssize_t dlm_recover_status_show(struct dlm_ls *ls, char *buf)
78{
79 uint32_t status = dlm_recover_status(ls);
a1d144c7 80 return snprintf(buf, PAGE_SIZE, "%x\n", status);
c56b39cd
DT
81}
82
faa0f267
DT
83static ssize_t dlm_recover_nodeid_show(struct dlm_ls *ls, char *buf)
84{
a1d144c7 85 return snprintf(buf, PAGE_SIZE, "%d\n", ls->ls_recover_nodeid);
faa0f267
DT
86}
87
e7fd4179
DT
88struct dlm_attr {
89 struct attribute attr;
90 ssize_t (*show)(struct dlm_ls *, char *);
91 ssize_t (*store)(struct dlm_ls *, const char *, size_t);
92};
93
94static struct dlm_attr dlm_attr_control = {
95 .attr = {.name = "control", .mode = S_IWUSR},
96 .store = dlm_control_store
97};
98
99static struct dlm_attr dlm_attr_event = {
100 .attr = {.name = "event_done", .mode = S_IWUSR},
101 .store = dlm_event_store
102};
103
104static struct dlm_attr dlm_attr_id = {
105 .attr = {.name = "id", .mode = S_IRUGO | S_IWUSR},
106 .show = dlm_id_show,
107 .store = dlm_id_store
108};
109
c56b39cd
DT
110static struct dlm_attr dlm_attr_recover_status = {
111 .attr = {.name = "recover_status", .mode = S_IRUGO},
112 .show = dlm_recover_status_show
113};
114
faa0f267
DT
115static struct dlm_attr dlm_attr_recover_nodeid = {
116 .attr = {.name = "recover_nodeid", .mode = S_IRUGO},
117 .show = dlm_recover_nodeid_show
118};
119
e7fd4179
DT
120static struct attribute *dlm_attrs[] = {
121 &dlm_attr_control.attr,
122 &dlm_attr_event.attr,
123 &dlm_attr_id.attr,
c56b39cd 124 &dlm_attr_recover_status.attr,
faa0f267 125 &dlm_attr_recover_nodeid.attr,
e7fd4179
DT
126 NULL,
127};
128
129static ssize_t dlm_attr_show(struct kobject *kobj, struct attribute *attr,
130 char *buf)
131{
132 struct dlm_ls *ls = container_of(kobj, struct dlm_ls, ls_kobj);
133 struct dlm_attr *a = container_of(attr, struct dlm_attr, attr);
134 return a->show ? a->show(ls, buf) : 0;
135}
136
137static ssize_t dlm_attr_store(struct kobject *kobj, struct attribute *attr,
138 const char *buf, size_t len)
139{
140 struct dlm_ls *ls = container_of(kobj, struct dlm_ls, ls_kobj);
141 struct dlm_attr *a = container_of(attr, struct dlm_attr, attr);
142 return a->store ? a->store(ls, buf, len) : len;
143}
144
ba542e3b
PC
145static void lockspace_kobj_release(struct kobject *k)
146{
147 struct dlm_ls *ls = container_of(k, struct dlm_ls, ls_kobj);
148 kfree(ls);
149}
150
e7fd4179
DT
151static struct sysfs_ops dlm_attr_ops = {
152 .show = dlm_attr_show,
153 .store = dlm_attr_store,
154};
155
156static struct kobj_type dlm_ktype = {
157 .default_attrs = dlm_attrs,
158 .sysfs_ops = &dlm_attr_ops,
ba542e3b 159 .release = lockspace_kobj_release,
e7fd4179
DT
160};
161
d405936b 162static struct kset *dlm_kset;
e7fd4179 163
e7fd4179
DT
164static int do_uevent(struct dlm_ls *ls, int in)
165{
166 int error;
167
168 if (in)
169 kobject_uevent(&ls->ls_kobj, KOBJ_ONLINE);
170 else
171 kobject_uevent(&ls->ls_kobj, KOBJ_OFFLINE);
172
8b0e7b2c
DT
173 log_debug(ls, "%s the lockspace group...", in ? "joining" : "leaving");
174
175 /* dlm_controld will see the uevent, do the necessary group management
176 and then write to sysfs to wake us */
177
e7fd4179
DT
178 error = wait_event_interruptible(ls->ls_uevent_wait,
179 test_and_clear_bit(LSFL_UEVENT_WAIT, &ls->ls_flags));
8b0e7b2c
DT
180
181 log_debug(ls, "group event done %d %d", error, ls->ls_uevent_result);
182
e7fd4179
DT
183 if (error)
184 goto out;
185
186 error = ls->ls_uevent_result;
187 out:
8b0e7b2c
DT
188 if (error)
189 log_error(ls, "group %s failed %d %d", in ? "join" : "leave",
190 error, ls->ls_uevent_result);
e7fd4179
DT
191 return error;
192}
193
194
30727174 195int __init dlm_lockspace_init(void)
e7fd4179 196{
e7fd4179 197 ls_count = 0;
90135925 198 mutex_init(&ls_lock);
e7fd4179
DT
199 INIT_LIST_HEAD(&lslist);
200 spin_lock_init(&lslist_lock);
201
0ff21e46 202 dlm_kset = kset_create_and_add("dlm", NULL, kernel_kobj);
d405936b 203 if (!dlm_kset) {
8e24eea7 204 printk(KERN_WARNING "%s: can not create kset\n", __func__);
d405936b
GKH
205 return -ENOMEM;
206 }
207 return 0;
e7fd4179
DT
208}
209
210void dlm_lockspace_exit(void)
211{
d405936b 212 kset_unregister(dlm_kset);
e7fd4179
DT
213}
214
c1dcf65f
DT
215static struct dlm_ls *find_ls_to_scan(void)
216{
217 struct dlm_ls *ls;
218
219 spin_lock(&lslist_lock);
220 list_for_each_entry(ls, &lslist, ls_list) {
221 if (time_after_eq(jiffies, ls->ls_scan_time +
222 dlm_config.ci_scan_secs * HZ)) {
223 spin_unlock(&lslist_lock);
224 return ls;
225 }
226 }
227 spin_unlock(&lslist_lock);
228 return NULL;
229}
230
e7fd4179
DT
231static int dlm_scand(void *data)
232{
233 struct dlm_ls *ls;
c1dcf65f 234 int timeout_jiffies = dlm_config.ci_scan_secs * HZ;
e7fd4179
DT
235
236 while (!kthread_should_stop()) {
c1dcf65f
DT
237 ls = find_ls_to_scan();
238 if (ls) {
85e86edf 239 if (dlm_lock_recovery_try(ls)) {
c1dcf65f 240 ls->ls_scan_time = jiffies;
85e86edf 241 dlm_scan_rsbs(ls);
3ae1acf9 242 dlm_scan_timeout(ls);
85e86edf 243 dlm_unlock_recovery(ls);
c1dcf65f
DT
244 } else {
245 ls->ls_scan_time += HZ;
85e86edf 246 }
c1dcf65f
DT
247 } else {
248 schedule_timeout_interruptible(timeout_jiffies);
85e86edf 249 }
e7fd4179
DT
250 }
251 return 0;
252}
253
254static int dlm_scand_start(void)
255{
256 struct task_struct *p;
257 int error = 0;
258
259 p = kthread_run(dlm_scand, NULL, "dlm_scand");
260 if (IS_ERR(p))
261 error = PTR_ERR(p);
262 else
263 scand_task = p;
264 return error;
265}
266
267static void dlm_scand_stop(void)
268{
269 kthread_stop(scand_task);
270}
271
e7fd4179
DT
272struct dlm_ls *dlm_find_lockspace_global(uint32_t id)
273{
274 struct dlm_ls *ls;
275
276 spin_lock(&lslist_lock);
277
278 list_for_each_entry(ls, &lslist, ls_list) {
279 if (ls->ls_global_id == id) {
280 ls->ls_count++;
281 goto out;
282 }
283 }
284 ls = NULL;
285 out:
286 spin_unlock(&lslist_lock);
287 return ls;
288}
289
597d0cae 290struct dlm_ls *dlm_find_lockspace_local(dlm_lockspace_t *lockspace)
e7fd4179 291{
597d0cae 292 struct dlm_ls *ls;
e7fd4179
DT
293
294 spin_lock(&lslist_lock);
597d0cae
DT
295 list_for_each_entry(ls, &lslist, ls_list) {
296 if (ls->ls_local_handle == lockspace) {
297 ls->ls_count++;
298 goto out;
299 }
300 }
301 ls = NULL;
302 out:
303 spin_unlock(&lslist_lock);
304 return ls;
305}
306
307struct dlm_ls *dlm_find_lockspace_device(int minor)
308{
309 struct dlm_ls *ls;
310
311 spin_lock(&lslist_lock);
312 list_for_each_entry(ls, &lslist, ls_list) {
313 if (ls->ls_device.minor == minor) {
314 ls->ls_count++;
315 goto out;
316 }
317 }
318 ls = NULL;
319 out:
e7fd4179
DT
320 spin_unlock(&lslist_lock);
321 return ls;
322}
323
324void dlm_put_lockspace(struct dlm_ls *ls)
325{
326 spin_lock(&lslist_lock);
327 ls->ls_count--;
328 spin_unlock(&lslist_lock);
329}
330
331static void remove_lockspace(struct dlm_ls *ls)
332{
333 for (;;) {
334 spin_lock(&lslist_lock);
335 if (ls->ls_count == 0) {
0f8e0d9a 336 WARN_ON(ls->ls_create_count != 0);
e7fd4179
DT
337 list_del(&ls->ls_list);
338 spin_unlock(&lslist_lock);
339 return;
340 }
341 spin_unlock(&lslist_lock);
342 ssleep(1);
343 }
344}
345
346static int threads_start(void)
347{
348 int error;
349
350 /* Thread which process lock requests for all lockspace's */
351 error = dlm_astd_start();
352 if (error) {
353 log_print("cannot start dlm_astd thread %d", error);
354 goto fail;
355 }
356
357 error = dlm_scand_start();
358 if (error) {
359 log_print("cannot start dlm_scand thread %d", error);
360 goto astd_fail;
361 }
362
363 /* Thread for sending/receiving messages for all lockspace's */
364 error = dlm_lowcomms_start();
365 if (error) {
366 log_print("cannot start dlm lowcomms %d", error);
367 goto scand_fail;
368 }
369
370 return 0;
371
372 scand_fail:
373 dlm_scand_stop();
374 astd_fail:
375 dlm_astd_stop();
376 fail:
377 return error;
378}
379
380static void threads_stop(void)
381{
382 dlm_scand_stop();
383 dlm_lowcomms_stop();
384 dlm_astd_stop();
385}
386
08ce4c91 387static int new_lockspace(const char *name, int namelen, void **lockspace,
e7fd4179
DT
388 uint32_t flags, int lvblen)
389{
390 struct dlm_ls *ls;
0f8e0d9a 391 int i, size, error;
79d72b54 392 int do_unreg = 0;
e7fd4179
DT
393
394 if (namelen > DLM_LOCKSPACE_LEN)
395 return -EINVAL;
396
397 if (!lvblen || (lvblen % 8))
398 return -EINVAL;
399
400 if (!try_module_get(THIS_MODULE))
401 return -EINVAL;
402
dc68c7ed
DT
403 if (!dlm_user_daemon_available()) {
404 module_put(THIS_MODULE);
405 return -EUNATCH;
406 }
407
0f8e0d9a
DT
408 error = 0;
409
410 spin_lock(&lslist_lock);
411 list_for_each_entry(ls, &lslist, ls_list) {
412 WARN_ON(ls->ls_create_count <= 0);
413 if (ls->ls_namelen != namelen)
414 continue;
415 if (memcmp(ls->ls_name, name, namelen))
416 continue;
417 if (flags & DLM_LSFL_NEWEXCL) {
418 error = -EEXIST;
419 break;
420 }
421 ls->ls_create_count++;
8511a272
DT
422 *lockspace = ls;
423 error = 1;
0f8e0d9a 424 break;
e7fd4179 425 }
0f8e0d9a
DT
426 spin_unlock(&lslist_lock);
427
0f8e0d9a 428 if (error)
8511a272 429 goto out;
0f8e0d9a
DT
430
431 error = -ENOMEM;
e7fd4179 432
90135925 433 ls = kzalloc(sizeof(struct dlm_ls) + namelen, GFP_KERNEL);
e7fd4179
DT
434 if (!ls)
435 goto out;
e7fd4179
DT
436 memcpy(ls->ls_name, name, namelen);
437 ls->ls_namelen = namelen;
e7fd4179
DT
438 ls->ls_lvblen = lvblen;
439 ls->ls_count = 0;
440 ls->ls_flags = 0;
c1dcf65f 441 ls->ls_scan_time = jiffies;
e7fd4179 442
3ae1acf9
DT
443 if (flags & DLM_LSFL_TIMEWARN)
444 set_bit(LSFL_TIMEWARN, &ls->ls_flags);
3ae1acf9 445
44f487a5
PC
446 if (flags & DLM_LSFL_FS)
447 ls->ls_allocation = GFP_NOFS;
448 else
449 ls->ls_allocation = GFP_KERNEL;
450
fad59c13 451 /* ls_exflags are forced to match among nodes, and we don't
0f8e0d9a
DT
452 need to require all nodes to have some flags set */
453 ls->ls_exflags = (flags & ~(DLM_LSFL_TIMEWARN | DLM_LSFL_FS |
454 DLM_LSFL_NEWEXCL));
fad59c13 455
68c817a1 456 size = dlm_config.ci_rsbtbl_size;
e7fd4179
DT
457 ls->ls_rsbtbl_size = size;
458
459 ls->ls_rsbtbl = kmalloc(sizeof(struct dlm_rsbtable) * size, GFP_KERNEL);
460 if (!ls->ls_rsbtbl)
461 goto out_lsfree;
462 for (i = 0; i < size; i++) {
463 INIT_LIST_HEAD(&ls->ls_rsbtbl[i].list);
464 INIT_LIST_HEAD(&ls->ls_rsbtbl[i].toss);
c7be761a 465 spin_lock_init(&ls->ls_rsbtbl[i].lock);
e7fd4179
DT
466 }
467
68c817a1 468 size = dlm_config.ci_lkbtbl_size;
e7fd4179
DT
469 ls->ls_lkbtbl_size = size;
470
471 ls->ls_lkbtbl = kmalloc(sizeof(struct dlm_lkbtable) * size, GFP_KERNEL);
472 if (!ls->ls_lkbtbl)
473 goto out_rsbfree;
474 for (i = 0; i < size; i++) {
475 INIT_LIST_HEAD(&ls->ls_lkbtbl[i].list);
476 rwlock_init(&ls->ls_lkbtbl[i].lock);
477 ls->ls_lkbtbl[i].counter = 1;
478 }
479
68c817a1 480 size = dlm_config.ci_dirtbl_size;
e7fd4179
DT
481 ls->ls_dirtbl_size = size;
482
483 ls->ls_dirtbl = kmalloc(sizeof(struct dlm_dirtable) * size, GFP_KERNEL);
484 if (!ls->ls_dirtbl)
485 goto out_lkbfree;
486 for (i = 0; i < size; i++) {
487 INIT_LIST_HEAD(&ls->ls_dirtbl[i].list);
305a47b1 488 spin_lock_init(&ls->ls_dirtbl[i].lock);
e7fd4179
DT
489 }
490
491 INIT_LIST_HEAD(&ls->ls_waiters);
90135925 492 mutex_init(&ls->ls_waiters_mutex);
ef0c2bb0
DT
493 INIT_LIST_HEAD(&ls->ls_orphans);
494 mutex_init(&ls->ls_orphans_mutex);
3ae1acf9
DT
495 INIT_LIST_HEAD(&ls->ls_timeout);
496 mutex_init(&ls->ls_timeout_mutex);
e7fd4179
DT
497
498 INIT_LIST_HEAD(&ls->ls_nodes);
499 INIT_LIST_HEAD(&ls->ls_nodes_gone);
500 ls->ls_num_nodes = 0;
501 ls->ls_low_nodeid = 0;
502 ls->ls_total_weight = 0;
503 ls->ls_node_array = NULL;
504
505 memset(&ls->ls_stub_rsb, 0, sizeof(struct dlm_rsb));
506 ls->ls_stub_rsb.res_ls = ls;
507
5de6319b
DT
508 ls->ls_debug_rsb_dentry = NULL;
509 ls->ls_debug_waiters_dentry = NULL;
e7fd4179
DT
510
511 init_waitqueue_head(&ls->ls_uevent_wait);
512 ls->ls_uevent_result = 0;
8b0e7b2c
DT
513 init_completion(&ls->ls_members_done);
514 ls->ls_members_result = -1;
e7fd4179
DT
515
516 ls->ls_recoverd_task = NULL;
90135925 517 mutex_init(&ls->ls_recoverd_active);
e7fd4179 518 spin_lock_init(&ls->ls_recover_lock);
98f176fb
DT
519 spin_lock_init(&ls->ls_rcom_spin);
520 get_random_bytes(&ls->ls_rcom_seq, sizeof(uint64_t));
e7fd4179
DT
521 ls->ls_recover_status = 0;
522 ls->ls_recover_seq = 0;
523 ls->ls_recover_args = NULL;
524 init_rwsem(&ls->ls_in_recovery);
c36258b5 525 init_rwsem(&ls->ls_recv_active);
e7fd4179 526 INIT_LIST_HEAD(&ls->ls_requestqueue);
90135925 527 mutex_init(&ls->ls_requestqueue_mutex);
597d0cae 528 mutex_init(&ls->ls_clear_proc_locks);
e7fd4179 529
68c817a1 530 ls->ls_recover_buf = kmalloc(dlm_config.ci_buffer_size, GFP_KERNEL);
e7fd4179
DT
531 if (!ls->ls_recover_buf)
532 goto out_dirfree;
533
534 INIT_LIST_HEAD(&ls->ls_recover_list);
535 spin_lock_init(&ls->ls_recover_list_lock);
536 ls->ls_recover_list_count = 0;
597d0cae 537 ls->ls_local_handle = ls;
e7fd4179
DT
538 init_waitqueue_head(&ls->ls_wait_general);
539 INIT_LIST_HEAD(&ls->ls_root_list);
540 init_rwsem(&ls->ls_root_sem);
541
542 down_write(&ls->ls_in_recovery);
543
5f88f1ea 544 spin_lock(&lslist_lock);
0f8e0d9a 545 ls->ls_create_count = 1;
5f88f1ea
DT
546 list_add(&ls->ls_list, &lslist);
547 spin_unlock(&lslist_lock);
548
549 /* needs to find ls in lslist */
e7fd4179
DT
550 error = dlm_recoverd_start(ls);
551 if (error) {
552 log_error(ls, "can't start dlm_recoverd %d", error);
79d72b54 553 goto out_delist;
e7fd4179
DT
554 }
555
901195ed
GKH
556 ls->ls_kobj.kset = dlm_kset;
557 error = kobject_init_and_add(&ls->ls_kobj, &dlm_ktype, NULL,
558 "%s", ls->ls_name);
e7fd4179 559 if (error)
79d72b54 560 goto out_stop;
901195ed 561 kobject_uevent(&ls->ls_kobj, KOBJ_ADD);
79d72b54
DT
562
563 /* let kobject handle freeing of ls if there's an error */
564 do_unreg = 1;
e7fd4179 565
8b0e7b2c
DT
566 /* This uevent triggers dlm_controld in userspace to add us to the
567 group of nodes that are members of this lockspace (managed by the
568 cluster infrastructure.) Once it's done that, it tells us who the
569 current lockspace members are (via configfs) and then tells the
570 lockspace to start running (via sysfs) in dlm_ls_start(). */
571
e7fd4179
DT
572 error = do_uevent(ls, 1);
573 if (error)
79d72b54
DT
574 goto out_stop;
575
8b0e7b2c
DT
576 wait_for_completion(&ls->ls_members_done);
577 error = ls->ls_members_result;
578 if (error)
579 goto out_members;
580
79d72b54
DT
581 dlm_create_debug_file(ls);
582
583 log_debug(ls, "join complete");
e7fd4179
DT
584 *lockspace = ls;
585 return 0;
586
8b0e7b2c
DT
587 out_members:
588 do_uevent(ls, 0);
589 dlm_clear_members(ls);
590 kfree(ls->ls_node_array);
79d72b54 591 out_stop:
5f88f1ea 592 dlm_recoverd_stop(ls);
79d72b54 593 out_delist:
e7fd4179
DT
594 spin_lock(&lslist_lock);
595 list_del(&ls->ls_list);
596 spin_unlock(&lslist_lock);
e7fd4179
DT
597 kfree(ls->ls_recover_buf);
598 out_dirfree:
599 kfree(ls->ls_dirtbl);
600 out_lkbfree:
601 kfree(ls->ls_lkbtbl);
602 out_rsbfree:
603 kfree(ls->ls_rsbtbl);
604 out_lsfree:
79d72b54 605 if (do_unreg)
197b12d6 606 kobject_put(&ls->ls_kobj);
79d72b54
DT
607 else
608 kfree(ls);
e7fd4179
DT
609 out:
610 module_put(THIS_MODULE);
611 return error;
612}
613
08ce4c91 614int dlm_new_lockspace(const char *name, int namelen, void **lockspace,
e7fd4179
DT
615 uint32_t flags, int lvblen)
616{
617 int error = 0;
618
90135925 619 mutex_lock(&ls_lock);
e7fd4179
DT
620 if (!ls_count)
621 error = threads_start();
622 if (error)
623 goto out;
624
625 error = new_lockspace(name, namelen, lockspace, flags, lvblen);
626 if (!error)
627 ls_count++;
8511a272
DT
628 if (error > 0)
629 error = 0;
630 if (!ls_count)
8b0e7b2c 631 threads_stop();
e7fd4179 632 out:
90135925 633 mutex_unlock(&ls_lock);
e7fd4179
DT
634 return error;
635}
636
637/* Return 1 if the lockspace still has active remote locks,
638 * 2 if the lockspace still has active local locks.
639 */
640static int lockspace_busy(struct dlm_ls *ls)
641{
642 int i, lkb_found = 0;
643 struct dlm_lkb *lkb;
644
645 /* NOTE: We check the lockidtbl here rather than the resource table.
646 This is because there may be LKBs queued as ASTs that have been
647 unlinked from their RSBs and are pending deletion once the AST has
648 been delivered */
649
650 for (i = 0; i < ls->ls_lkbtbl_size; i++) {
651 read_lock(&ls->ls_lkbtbl[i].lock);
652 if (!list_empty(&ls->ls_lkbtbl[i].list)) {
653 lkb_found = 1;
654 list_for_each_entry(lkb, &ls->ls_lkbtbl[i].list,
655 lkb_idtbl_list) {
656 if (!lkb->lkb_nodeid) {
657 read_unlock(&ls->ls_lkbtbl[i].lock);
658 return 2;
659 }
660 }
661 }
662 read_unlock(&ls->ls_lkbtbl[i].lock);
663 }
664 return lkb_found;
665}
666
667static int release_lockspace(struct dlm_ls *ls, int force)
668{
669 struct dlm_lkb *lkb;
670 struct dlm_rsb *rsb;
671 struct list_head *head;
0f8e0d9a
DT
672 int i, busy, rv;
673
674 busy = lockspace_busy(ls);
675
676 spin_lock(&lslist_lock);
677 if (ls->ls_create_count == 1) {
678 if (busy > force)
679 rv = -EBUSY;
680 else {
681 /* remove_lockspace takes ls off lslist */
682 ls->ls_create_count = 0;
683 rv = 0;
684 }
685 } else if (ls->ls_create_count > 1) {
686 rv = --ls->ls_create_count;
687 } else {
688 rv = -EINVAL;
689 }
690 spin_unlock(&lslist_lock);
691
692 if (rv) {
693 log_debug(ls, "release_lockspace no remove %d", rv);
694 return rv;
695 }
e7fd4179 696
0f8e0d9a 697 dlm_device_deregister(ls);
e7fd4179 698
dc68c7ed 699 if (force < 3 && dlm_user_daemon_available())
e7fd4179
DT
700 do_uevent(ls, 0);
701
702 dlm_recoverd_stop(ls);
703
704 remove_lockspace(ls);
705
706 dlm_delete_debug_file(ls);
707
708 dlm_astd_suspend();
709
710 kfree(ls->ls_recover_buf);
711
712 /*
713 * Free direntry structs.
714 */
715
716 dlm_dir_clear(ls);
717 kfree(ls->ls_dirtbl);
718
719 /*
720 * Free all lkb's on lkbtbl[] lists.
721 */
722
723 for (i = 0; i < ls->ls_lkbtbl_size; i++) {
724 head = &ls->ls_lkbtbl[i].list;
725 while (!list_empty(head)) {
726 lkb = list_entry(head->next, struct dlm_lkb,
727 lkb_idtbl_list);
728
729 list_del(&lkb->lkb_idtbl_list);
730
731 dlm_del_ast(lkb);
732
733 if (lkb->lkb_lvbptr && lkb->lkb_flags & DLM_IFL_MSTCPY)
52bda2b5 734 dlm_free_lvb(lkb->lkb_lvbptr);
e7fd4179 735
52bda2b5 736 dlm_free_lkb(lkb);
e7fd4179
DT
737 }
738 }
739 dlm_astd_resume();
740
741 kfree(ls->ls_lkbtbl);
742
743 /*
744 * Free all rsb's on rsbtbl[] lists
745 */
746
747 for (i = 0; i < ls->ls_rsbtbl_size; i++) {
748 head = &ls->ls_rsbtbl[i].list;
749 while (!list_empty(head)) {
750 rsb = list_entry(head->next, struct dlm_rsb,
751 res_hashchain);
752
753 list_del(&rsb->res_hashchain);
52bda2b5 754 dlm_free_rsb(rsb);
e7fd4179
DT
755 }
756
757 head = &ls->ls_rsbtbl[i].toss;
758 while (!list_empty(head)) {
759 rsb = list_entry(head->next, struct dlm_rsb,
760 res_hashchain);
761 list_del(&rsb->res_hashchain);
52bda2b5 762 dlm_free_rsb(rsb);
e7fd4179
DT
763 }
764 }
765
766 kfree(ls->ls_rsbtbl);
767
768 /*
769 * Free structures on any other lists
770 */
771
2896ee37 772 dlm_purge_requestqueue(ls);
e7fd4179
DT
773 kfree(ls->ls_recover_args);
774 dlm_clear_free_entries(ls);
775 dlm_clear_members(ls);
776 dlm_clear_members_gone(ls);
777 kfree(ls->ls_node_array);
0f8e0d9a 778 log_debug(ls, "release_lockspace final free");
197b12d6 779 kobject_put(&ls->ls_kobj);
79d72b54 780 /* The ls structure will be freed when the kobject is done with */
e7fd4179 781
e7fd4179
DT
782 module_put(THIS_MODULE);
783 return 0;
784}
785
786/*
787 * Called when a system has released all its locks and is not going to use the
788 * lockspace any longer. We free everything we're managing for this lockspace.
789 * Remaining nodes will go through the recovery process as if we'd died. The
790 * lockspace must continue to function as usual, participating in recoveries,
791 * until this returns.
792 *
793 * Force has 4 possible values:
794 * 0 - don't destroy locksapce if it has any LKBs
795 * 1 - destroy lockspace if it has remote LKBs but not if it has local LKBs
796 * 2 - destroy lockspace regardless of LKBs
797 * 3 - destroy lockspace as part of a forced shutdown
798 */
799
800int dlm_release_lockspace(void *lockspace, int force)
801{
802 struct dlm_ls *ls;
0f8e0d9a 803 int error;
e7fd4179
DT
804
805 ls = dlm_find_lockspace_local(lockspace);
806 if (!ls)
807 return -EINVAL;
808 dlm_put_lockspace(ls);
0f8e0d9a
DT
809
810 mutex_lock(&ls_lock);
811 error = release_lockspace(ls, force);
812 if (!error)
813 ls_count--;
278afcbf 814 if (!ls_count)
0f8e0d9a
DT
815 threads_stop();
816 mutex_unlock(&ls_lock);
817
818 return error;
e7fd4179
DT
819}
820
dc68c7ed
DT
821void dlm_stop_lockspaces(void)
822{
823 struct dlm_ls *ls;
824
825 restart:
826 spin_lock(&lslist_lock);
827 list_for_each_entry(ls, &lslist, ls_list) {
828 if (!test_bit(LSFL_RUNNING, &ls->ls_flags))
829 continue;
830 spin_unlock(&lslist_lock);
831 log_error(ls, "no userland control daemon, stopping lockspace");
832 dlm_ls_stop(ls);
833 goto restart;
834 }
835 spin_unlock(&lslist_lock);
836}
837
This page took 0.295811 seconds and 5 git commands to generate.