Commit | Line | Data |
---|---|---|
6714d8e8 KH |
1 | /* -*- mode: c; c-basic-offset: 8; -*- |
2 | * vim: noexpandtab sw=8 ts=8 sts=0: | |
3 | * | |
4 | * dlmdomain.c | |
5 | * | |
6 | * defines domain join / leave apis | |
7 | * | |
8 | * Copyright (C) 2004 Oracle. All rights reserved. | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or | |
11 | * modify it under the terms of the GNU General Public | |
12 | * License as published by the Free Software Foundation; either | |
13 | * version 2 of the License, or (at your option) any later version. | |
14 | * | |
15 | * This program is distributed in the hope that it will be useful, | |
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
18 | * General Public License for more details. | |
19 | * | |
20 | * You should have received a copy of the GNU General Public | |
21 | * License along with this program; if not, write to the | |
22 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | |
23 | * Boston, MA 021110-1307, USA. | |
24 | * | |
25 | */ | |
26 | ||
27 | #include <linux/module.h> | |
28 | #include <linux/types.h> | |
29 | #include <linux/slab.h> | |
30 | #include <linux/highmem.h> | |
31 | #include <linux/utsname.h> | |
32 | #include <linux/init.h> | |
33 | #include <linux/spinlock.h> | |
34 | #include <linux/delay.h> | |
35 | #include <linux/err.h> | |
36 | ||
37 | #include "cluster/heartbeat.h" | |
38 | #include "cluster/nodemanager.h" | |
39 | #include "cluster/tcp.h" | |
40 | ||
41 | #include "dlmapi.h" | |
42 | #include "dlmcommon.h" | |
43 | ||
6714d8e8 KH |
44 | #include "dlmdomain.h" |
45 | ||
46 | #include "dlmver.h" | |
47 | ||
48 | #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_DOMAIN) | |
49 | #include "cluster/masklog.h" | |
50 | ||
1faf2894 SE |
51 | /* |
52 | * ocfs2 node maps are array of long int, which limits to send them freely | |
53 | * across the wire due to endianness issues. To workaround this, we convert | |
54 | * long ints to byte arrays. Following 3 routines are helper functions to | |
55 | * set/test/copy bits within those array of bytes | |
56 | */ | |
57 | static inline void byte_set_bit(u8 nr, u8 map[]) | |
58 | { | |
59 | map[nr >> 3] |= (1UL << (nr & 7)); | |
60 | } | |
61 | ||
62 | static inline int byte_test_bit(u8 nr, u8 map[]) | |
63 | { | |
64 | return ((1UL << (nr & 7)) & (map[nr >> 3])) != 0; | |
65 | } | |
66 | ||
67 | static inline void byte_copymap(u8 dmap[], unsigned long smap[], | |
68 | unsigned int sz) | |
69 | { | |
70 | unsigned int nn; | |
71 | ||
72 | if (!sz) | |
73 | return; | |
74 | ||
75 | memset(dmap, 0, ((sz + 7) >> 3)); | |
76 | for (nn = 0 ; nn < sz; nn++) | |
77 | if (test_bit(nn, smap)) | |
78 | byte_set_bit(nn, dmap); | |
79 | } | |
80 | ||
03d864c0 DP |
81 | static void dlm_free_pagevec(void **vec, int pages) |
82 | { | |
83 | while (pages--) | |
84 | free_page((unsigned long)vec[pages]); | |
85 | kfree(vec); | |
86 | } | |
87 | ||
88 | static void **dlm_alloc_pagevec(int pages) | |
89 | { | |
90 | void **vec = kmalloc(pages * sizeof(void *), GFP_KERNEL); | |
91 | int i; | |
92 | ||
93 | if (!vec) | |
94 | return NULL; | |
95 | ||
96 | for (i = 0; i < pages; i++) | |
97 | if (!(vec[i] = (void *)__get_free_page(GFP_KERNEL))) | |
98 | goto out_free; | |
c8f33b6e | 99 | |
685f1adb | 100 | mlog(0, "Allocated DLM hash pagevec; %d pages (%lu expected), %lu buckets per page\n", |
f5a923d1 MF |
101 | pages, (unsigned long)DLM_HASH_PAGES, |
102 | (unsigned long)DLM_BUCKETS_PER_PAGE); | |
03d864c0 DP |
103 | return vec; |
104 | out_free: | |
105 | dlm_free_pagevec(vec, i); | |
106 | return NULL; | |
107 | } | |
108 | ||
6714d8e8 KH |
109 | /* |
110 | * | |
111 | * spinlock lock ordering: if multiple locks are needed, obey this ordering: | |
112 | * dlm_domain_lock | |
113 | * struct dlm_ctxt->spinlock | |
114 | * struct dlm_lock_resource->spinlock | |
115 | * struct dlm_ctxt->master_lock | |
116 | * struct dlm_ctxt->ast_lock | |
117 | * dlm_master_list_entry->spinlock | |
118 | * dlm_lock->spinlock | |
119 | * | |
120 | */ | |
121 | ||
34af946a | 122 | DEFINE_SPINLOCK(dlm_domain_lock); |
6714d8e8 KH |
123 | LIST_HEAD(dlm_domains); |
124 | static DECLARE_WAIT_QUEUE_HEAD(dlm_domain_events); | |
125 | ||
126 | #define DLM_DOMAIN_BACKOFF_MS 200 | |
127 | ||
d74c9803 KH |
128 | static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data, |
129 | void **ret_data); | |
130 | static int dlm_assert_joined_handler(struct o2net_msg *msg, u32 len, void *data, | |
131 | void **ret_data); | |
132 | static int dlm_cancel_join_handler(struct o2net_msg *msg, u32 len, void *data, | |
133 | void **ret_data); | |
134 | static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data, | |
135 | void **ret_data); | |
6714d8e8 KH |
136 | |
137 | static void dlm_unregister_domain_handlers(struct dlm_ctxt *dlm); | |
138 | ||
139 | void __dlm_unhash_lockres(struct dlm_lock_resource *lockres) | |
140 | { | |
81f2094a | 141 | hlist_del_init(&lockres->hash_node); |
6714d8e8 KH |
142 | dlm_lockres_put(lockres); |
143 | } | |
144 | ||
145 | void __dlm_insert_lockres(struct dlm_ctxt *dlm, | |
146 | struct dlm_lock_resource *res) | |
147 | { | |
81f2094a | 148 | struct hlist_head *bucket; |
6714d8e8 KH |
149 | struct qstr *q; |
150 | ||
151 | assert_spin_locked(&dlm->spinlock); | |
152 | ||
153 | q = &res->lockname; | |
03d864c0 | 154 | bucket = dlm_lockres_hash(dlm, q->hash); |
6714d8e8 KH |
155 | |
156 | /* get a reference for our hashtable */ | |
157 | dlm_lockres_get(res); | |
158 | ||
81f2094a | 159 | hlist_add_head(&res->hash_node, bucket); |
6714d8e8 KH |
160 | } |
161 | ||
ba2bf218 KH |
162 | struct dlm_lock_resource * __dlm_lookup_lockres_full(struct dlm_ctxt *dlm, |
163 | const char *name, | |
164 | unsigned int len, | |
165 | unsigned int hash) | |
6714d8e8 | 166 | { |
81f2094a | 167 | struct hlist_head *bucket; |
4198985f | 168 | struct hlist_node *list; |
6714d8e8 KH |
169 | |
170 | mlog_entry("%.*s\n", len, name); | |
171 | ||
172 | assert_spin_locked(&dlm->spinlock); | |
173 | ||
03d864c0 DP |
174 | bucket = dlm_lockres_hash(dlm, hash); |
175 | ||
4198985f DP |
176 | hlist_for_each(list, bucket) { |
177 | struct dlm_lock_resource *res = hlist_entry(list, | |
178 | struct dlm_lock_resource, hash_node); | |
179 | if (res->lockname.name[0] != name[0]) | |
180 | continue; | |
181 | if (unlikely(res->lockname.len != len)) | |
182 | continue; | |
183 | if (memcmp(res->lockname.name + 1, name + 1, len - 1)) | |
184 | continue; | |
185 | dlm_lockres_get(res); | |
186 | return res; | |
6714d8e8 | 187 | } |
4198985f | 188 | return NULL; |
6714d8e8 KH |
189 | } |
190 | ||
ba2bf218 KH |
191 | /* intended to be called by functions which do not care about lock |
192 | * resources which are being purged (most net _handler functions). | |
193 | * this will return NULL for any lock resource which is found but | |
194 | * currently in the process of dropping its mastery reference. | |
195 | * use __dlm_lookup_lockres_full when you need the lock resource | |
196 | * regardless (e.g. dlm_get_lock_resource) */ | |
197 | struct dlm_lock_resource * __dlm_lookup_lockres(struct dlm_ctxt *dlm, | |
198 | const char *name, | |
199 | unsigned int len, | |
200 | unsigned int hash) | |
201 | { | |
202 | struct dlm_lock_resource *res = NULL; | |
203 | ||
204 | mlog_entry("%.*s\n", len, name); | |
205 | ||
206 | assert_spin_locked(&dlm->spinlock); | |
207 | ||
208 | res = __dlm_lookup_lockres_full(dlm, name, len, hash); | |
209 | if (res) { | |
210 | spin_lock(&res->spinlock); | |
211 | if (res->state & DLM_LOCK_RES_DROPPING_REF) { | |
212 | spin_unlock(&res->spinlock); | |
213 | dlm_lockres_put(res); | |
214 | return NULL; | |
215 | } | |
216 | spin_unlock(&res->spinlock); | |
217 | } | |
218 | ||
219 | return res; | |
220 | } | |
221 | ||
6714d8e8 KH |
222 | struct dlm_lock_resource * dlm_lookup_lockres(struct dlm_ctxt *dlm, |
223 | const char *name, | |
224 | unsigned int len) | |
225 | { | |
226 | struct dlm_lock_resource *res; | |
a3d33291 | 227 | unsigned int hash = dlm_lockid_hash(name, len); |
6714d8e8 KH |
228 | |
229 | spin_lock(&dlm->spinlock); | |
a3d33291 | 230 | res = __dlm_lookup_lockres(dlm, name, len, hash); |
6714d8e8 KH |
231 | spin_unlock(&dlm->spinlock); |
232 | return res; | |
233 | } | |
234 | ||
235 | static struct dlm_ctxt * __dlm_lookup_domain_full(const char *domain, int len) | |
236 | { | |
237 | struct dlm_ctxt *tmp = NULL; | |
238 | struct list_head *iter; | |
239 | ||
240 | assert_spin_locked(&dlm_domain_lock); | |
241 | ||
242 | /* tmp->name here is always NULL terminated, | |
243 | * but domain may not be! */ | |
244 | list_for_each(iter, &dlm_domains) { | |
245 | tmp = list_entry (iter, struct dlm_ctxt, list); | |
246 | if (strlen(tmp->name) == len && | |
247 | memcmp(tmp->name, domain, len)==0) | |
248 | break; | |
249 | tmp = NULL; | |
250 | } | |
251 | ||
252 | return tmp; | |
253 | } | |
254 | ||
255 | /* For null terminated domain strings ONLY */ | |
256 | static struct dlm_ctxt * __dlm_lookup_domain(const char *domain) | |
257 | { | |
258 | assert_spin_locked(&dlm_domain_lock); | |
259 | ||
260 | return __dlm_lookup_domain_full(domain, strlen(domain)); | |
261 | } | |
262 | ||
263 | ||
264 | /* returns true on one of two conditions: | |
265 | * 1) the domain does not exist | |
266 | * 2) the domain exists and it's state is "joined" */ | |
267 | static int dlm_wait_on_domain_helper(const char *domain) | |
268 | { | |
269 | int ret = 0; | |
270 | struct dlm_ctxt *tmp = NULL; | |
271 | ||
272 | spin_lock(&dlm_domain_lock); | |
273 | ||
274 | tmp = __dlm_lookup_domain(domain); | |
275 | if (!tmp) | |
276 | ret = 1; | |
277 | else if (tmp->dlm_state == DLM_CTXT_JOINED) | |
278 | ret = 1; | |
279 | ||
280 | spin_unlock(&dlm_domain_lock); | |
281 | return ret; | |
282 | } | |
283 | ||
284 | static void dlm_free_ctxt_mem(struct dlm_ctxt *dlm) | |
285 | { | |
81f2094a | 286 | if (dlm->lockres_hash) |
03d864c0 | 287 | dlm_free_pagevec((void **)dlm->lockres_hash, DLM_HASH_PAGES); |
6714d8e8 KH |
288 | |
289 | if (dlm->name) | |
290 | kfree(dlm->name); | |
291 | ||
292 | kfree(dlm); | |
293 | } | |
294 | ||
295 | /* A little strange - this function will be called while holding | |
296 | * dlm_domain_lock and is expected to be holding it on the way out. We | |
297 | * will however drop and reacquire it multiple times */ | |
298 | static void dlm_ctxt_release(struct kref *kref) | |
299 | { | |
300 | struct dlm_ctxt *dlm; | |
301 | ||
302 | dlm = container_of(kref, struct dlm_ctxt, dlm_refs); | |
303 | ||
304 | BUG_ON(dlm->num_joins); | |
305 | BUG_ON(dlm->dlm_state == DLM_CTXT_JOINED); | |
306 | ||
307 | /* we may still be in the list if we hit an error during join. */ | |
308 | list_del_init(&dlm->list); | |
309 | ||
310 | spin_unlock(&dlm_domain_lock); | |
311 | ||
312 | mlog(0, "freeing memory from domain %s\n", dlm->name); | |
313 | ||
314 | wake_up(&dlm_domain_events); | |
315 | ||
316 | dlm_free_ctxt_mem(dlm); | |
317 | ||
318 | spin_lock(&dlm_domain_lock); | |
319 | } | |
320 | ||
321 | void dlm_put(struct dlm_ctxt *dlm) | |
322 | { | |
323 | spin_lock(&dlm_domain_lock); | |
324 | kref_put(&dlm->dlm_refs, dlm_ctxt_release); | |
325 | spin_unlock(&dlm_domain_lock); | |
326 | } | |
327 | ||
328 | static void __dlm_get(struct dlm_ctxt *dlm) | |
329 | { | |
330 | kref_get(&dlm->dlm_refs); | |
331 | } | |
332 | ||
333 | /* given a questionable reference to a dlm object, gets a reference if | |
334 | * it can find it in the list, otherwise returns NULL in which case | |
335 | * you shouldn't trust your pointer. */ | |
336 | struct dlm_ctxt *dlm_grab(struct dlm_ctxt *dlm) | |
337 | { | |
338 | struct list_head *iter; | |
339 | struct dlm_ctxt *target = NULL; | |
340 | ||
341 | spin_lock(&dlm_domain_lock); | |
342 | ||
343 | list_for_each(iter, &dlm_domains) { | |
344 | target = list_entry (iter, struct dlm_ctxt, list); | |
345 | ||
346 | if (target == dlm) { | |
347 | __dlm_get(target); | |
348 | break; | |
349 | } | |
350 | ||
351 | target = NULL; | |
352 | } | |
353 | ||
354 | spin_unlock(&dlm_domain_lock); | |
355 | ||
356 | return target; | |
357 | } | |
358 | ||
359 | int dlm_domain_fully_joined(struct dlm_ctxt *dlm) | |
360 | { | |
361 | int ret; | |
362 | ||
363 | spin_lock(&dlm_domain_lock); | |
364 | ret = (dlm->dlm_state == DLM_CTXT_JOINED) || | |
365 | (dlm->dlm_state == DLM_CTXT_IN_SHUTDOWN); | |
366 | spin_unlock(&dlm_domain_lock); | |
367 | ||
368 | return ret; | |
369 | } | |
370 | ||
3156d267 KH |
371 | static void dlm_destroy_dlm_worker(struct dlm_ctxt *dlm) |
372 | { | |
373 | if (dlm->dlm_worker) { | |
374 | flush_workqueue(dlm->dlm_worker); | |
375 | destroy_workqueue(dlm->dlm_worker); | |
376 | dlm->dlm_worker = NULL; | |
377 | } | |
378 | } | |
379 | ||
6714d8e8 KH |
380 | static void dlm_complete_dlm_shutdown(struct dlm_ctxt *dlm) |
381 | { | |
382 | dlm_unregister_domain_handlers(dlm); | |
383 | dlm_complete_thread(dlm); | |
384 | dlm_complete_recovery_thread(dlm); | |
3156d267 | 385 | dlm_destroy_dlm_worker(dlm); |
6714d8e8 KH |
386 | |
387 | /* We've left the domain. Now we can take ourselves out of the | |
388 | * list and allow the kref stuff to help us free the | |
389 | * memory. */ | |
390 | spin_lock(&dlm_domain_lock); | |
391 | list_del_init(&dlm->list); | |
392 | spin_unlock(&dlm_domain_lock); | |
393 | ||
394 | /* Wake up anyone waiting for us to remove this domain */ | |
395 | wake_up(&dlm_domain_events); | |
396 | } | |
397 | ||
ba2bf218 | 398 | static int dlm_migrate_all_locks(struct dlm_ctxt *dlm) |
6714d8e8 | 399 | { |
ba2bf218 | 400 | int i, num, n, ret = 0; |
6714d8e8 | 401 | struct dlm_lock_resource *res; |
ba2bf218 KH |
402 | struct hlist_node *iter; |
403 | struct hlist_head *bucket; | |
404 | int dropped; | |
6714d8e8 KH |
405 | |
406 | mlog(0, "Migrating locks from domain %s\n", dlm->name); | |
ba2bf218 KH |
407 | |
408 | num = 0; | |
6714d8e8 | 409 | spin_lock(&dlm->spinlock); |
81f2094a | 410 | for (i = 0; i < DLM_HASH_BUCKETS; i++) { |
ba2bf218 KH |
411 | redo_bucket: |
412 | n = 0; | |
413 | bucket = dlm_lockres_hash(dlm, i); | |
414 | iter = bucket->first; | |
415 | while (iter) { | |
416 | n++; | |
417 | res = hlist_entry(iter, struct dlm_lock_resource, | |
418 | hash_node); | |
6714d8e8 | 419 | dlm_lockres_get(res); |
ba2bf218 KH |
420 | /* migrate, if necessary. this will drop the dlm |
421 | * spinlock and retake it if it does migration. */ | |
422 | dropped = dlm_empty_lockres(dlm, res); | |
423 | ||
424 | spin_lock(&res->spinlock); | |
425 | __dlm_lockres_calc_usage(dlm, res); | |
426 | iter = res->hash_node.next; | |
427 | spin_unlock(&res->spinlock); | |
428 | ||
6714d8e8 | 429 | dlm_lockres_put(res); |
ba2bf218 KH |
430 | |
431 | cond_resched_lock(&dlm->spinlock); | |
432 | ||
433 | if (dropped) | |
434 | goto redo_bucket; | |
6714d8e8 | 435 | } |
ba2bf218 KH |
436 | num += n; |
437 | mlog(0, "%s: touched %d lockreses in bucket %d " | |
438 | "(tot=%d)\n", dlm->name, n, i, num); | |
6714d8e8 KH |
439 | } |
440 | spin_unlock(&dlm->spinlock); | |
ba2bf218 KH |
441 | wake_up(&dlm->dlm_thread_wq); |
442 | ||
443 | /* let the dlm thread take care of purging, keep scanning until | |
444 | * nothing remains in the hash */ | |
445 | if (num) { | |
446 | mlog(0, "%s: %d lock resources in hash last pass\n", | |
447 | dlm->name, num); | |
448 | ret = -EAGAIN; | |
449 | } | |
6714d8e8 | 450 | mlog(0, "DONE Migrating locks from domain %s\n", dlm->name); |
ba2bf218 | 451 | return ret; |
6714d8e8 KH |
452 | } |
453 | ||
454 | static int dlm_no_joining_node(struct dlm_ctxt *dlm) | |
455 | { | |
456 | int ret; | |
457 | ||
458 | spin_lock(&dlm->spinlock); | |
459 | ret = dlm->joining_node == DLM_LOCK_RES_OWNER_UNKNOWN; | |
460 | spin_unlock(&dlm->spinlock); | |
461 | ||
462 | return ret; | |
463 | } | |
464 | ||
465 | static void dlm_mark_domain_leaving(struct dlm_ctxt *dlm) | |
466 | { | |
467 | /* Yikes, a double spinlock! I need domain_lock for the dlm | |
468 | * state and the dlm spinlock for join state... Sorry! */ | |
469 | again: | |
470 | spin_lock(&dlm_domain_lock); | |
471 | spin_lock(&dlm->spinlock); | |
472 | ||
473 | if (dlm->joining_node != DLM_LOCK_RES_OWNER_UNKNOWN) { | |
474 | mlog(0, "Node %d is joining, we wait on it.\n", | |
475 | dlm->joining_node); | |
476 | spin_unlock(&dlm->spinlock); | |
477 | spin_unlock(&dlm_domain_lock); | |
478 | ||
479 | wait_event(dlm->dlm_join_events, dlm_no_joining_node(dlm)); | |
480 | goto again; | |
481 | } | |
482 | ||
483 | dlm->dlm_state = DLM_CTXT_LEAVING; | |
484 | spin_unlock(&dlm->spinlock); | |
485 | spin_unlock(&dlm_domain_lock); | |
486 | } | |
487 | ||
488 | static void __dlm_print_nodes(struct dlm_ctxt *dlm) | |
489 | { | |
490 | int node = -1; | |
491 | ||
492 | assert_spin_locked(&dlm->spinlock); | |
493 | ||
781ee3e2 | 494 | printk(KERN_INFO "ocfs2_dlm: Nodes in domain (\"%s\"): ", dlm->name); |
6714d8e8 KH |
495 | |
496 | while ((node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES, | |
497 | node + 1)) < O2NM_MAX_NODES) { | |
781ee3e2 | 498 | printk("%d ", node); |
6714d8e8 | 499 | } |
781ee3e2 | 500 | printk("\n"); |
6714d8e8 KH |
501 | } |
502 | ||
d74c9803 KH |
503 | static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data, |
504 | void **ret_data) | |
6714d8e8 KH |
505 | { |
506 | struct dlm_ctxt *dlm = data; | |
507 | unsigned int node; | |
508 | struct dlm_exit_domain *exit_msg = (struct dlm_exit_domain *) msg->buf; | |
509 | ||
510 | mlog_entry("%p %u %p", msg, len, data); | |
511 | ||
512 | if (!dlm_grab(dlm)) | |
513 | return 0; | |
514 | ||
515 | node = exit_msg->node_idx; | |
516 | ||
781ee3e2 | 517 | printk(KERN_INFO "ocfs2_dlm: Node %u leaves domain %s\n", node, dlm->name); |
6714d8e8 KH |
518 | |
519 | spin_lock(&dlm->spinlock); | |
520 | clear_bit(node, dlm->domain_map); | |
521 | __dlm_print_nodes(dlm); | |
522 | ||
523 | /* notify anything attached to the heartbeat events */ | |
524 | dlm_hb_event_notify_attached(dlm, node, 0); | |
525 | ||
526 | spin_unlock(&dlm->spinlock); | |
527 | ||
528 | dlm_put(dlm); | |
529 | ||
530 | return 0; | |
531 | } | |
532 | ||
533 | static int dlm_send_one_domain_exit(struct dlm_ctxt *dlm, | |
534 | unsigned int node) | |
535 | { | |
536 | int status; | |
537 | struct dlm_exit_domain leave_msg; | |
538 | ||
539 | mlog(0, "Asking node %u if we can leave the domain %s me = %u\n", | |
540 | node, dlm->name, dlm->node_num); | |
541 | ||
542 | memset(&leave_msg, 0, sizeof(leave_msg)); | |
543 | leave_msg.node_idx = dlm->node_num; | |
544 | ||
545 | status = o2net_send_message(DLM_EXIT_DOMAIN_MSG, dlm->key, | |
546 | &leave_msg, sizeof(leave_msg), node, | |
547 | NULL); | |
548 | ||
549 | mlog(0, "status return %d from o2net_send_message\n", status); | |
550 | ||
551 | return status; | |
552 | } | |
553 | ||
554 | ||
555 | static void dlm_leave_domain(struct dlm_ctxt *dlm) | |
556 | { | |
557 | int node, clear_node, status; | |
558 | ||
559 | /* At this point we've migrated away all our locks and won't | |
560 | * accept mastership of new ones. The dlm is responsible for | |
561 | * almost nothing now. We make sure not to confuse any joining | |
562 | * nodes and then commence shutdown procedure. */ | |
563 | ||
564 | spin_lock(&dlm->spinlock); | |
565 | /* Clear ourselves from the domain map */ | |
566 | clear_bit(dlm->node_num, dlm->domain_map); | |
567 | while ((node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES, | |
568 | 0)) < O2NM_MAX_NODES) { | |
569 | /* Drop the dlm spinlock. This is safe wrt the domain_map. | |
570 | * -nodes cannot be added now as the | |
571 | * query_join_handlers knows to respond with OK_NO_MAP | |
572 | * -we catch the right network errors if a node is | |
573 | * removed from the map while we're sending him the | |
574 | * exit message. */ | |
575 | spin_unlock(&dlm->spinlock); | |
576 | ||
577 | clear_node = 1; | |
578 | ||
579 | status = dlm_send_one_domain_exit(dlm, node); | |
580 | if (status < 0 && | |
581 | status != -ENOPROTOOPT && | |
582 | status != -ENOTCONN) { | |
583 | mlog(ML_NOTICE, "Error %d sending domain exit message " | |
584 | "to node %d\n", status, node); | |
585 | ||
586 | /* Not sure what to do here but lets sleep for | |
587 | * a bit in case this was a transient | |
588 | * error... */ | |
589 | msleep(DLM_DOMAIN_BACKOFF_MS); | |
590 | clear_node = 0; | |
591 | } | |
592 | ||
593 | spin_lock(&dlm->spinlock); | |
594 | /* If we're not clearing the node bit then we intend | |
595 | * to loop back around to try again. */ | |
596 | if (clear_node) | |
597 | clear_bit(node, dlm->domain_map); | |
598 | } | |
599 | spin_unlock(&dlm->spinlock); | |
600 | } | |
601 | ||
602 | int dlm_joined(struct dlm_ctxt *dlm) | |
603 | { | |
604 | int ret = 0; | |
605 | ||
606 | spin_lock(&dlm_domain_lock); | |
607 | ||
608 | if (dlm->dlm_state == DLM_CTXT_JOINED) | |
609 | ret = 1; | |
610 | ||
611 | spin_unlock(&dlm_domain_lock); | |
612 | ||
613 | return ret; | |
614 | } | |
615 | ||
616 | int dlm_shutting_down(struct dlm_ctxt *dlm) | |
617 | { | |
618 | int ret = 0; | |
619 | ||
620 | spin_lock(&dlm_domain_lock); | |
621 | ||
622 | if (dlm->dlm_state == DLM_CTXT_IN_SHUTDOWN) | |
623 | ret = 1; | |
624 | ||
625 | spin_unlock(&dlm_domain_lock); | |
626 | ||
627 | return ret; | |
628 | } | |
629 | ||
630 | void dlm_unregister_domain(struct dlm_ctxt *dlm) | |
631 | { | |
632 | int leave = 0; | |
633 | ||
634 | spin_lock(&dlm_domain_lock); | |
635 | BUG_ON(dlm->dlm_state != DLM_CTXT_JOINED); | |
636 | BUG_ON(!dlm->num_joins); | |
637 | ||
638 | dlm->num_joins--; | |
639 | if (!dlm->num_joins) { | |
640 | /* We mark it "in shutdown" now so new register | |
641 | * requests wait until we've completely left the | |
642 | * domain. Don't use DLM_CTXT_LEAVING yet as we still | |
643 | * want new domain joins to communicate with us at | |
644 | * least until we've completed migration of our | |
645 | * resources. */ | |
646 | dlm->dlm_state = DLM_CTXT_IN_SHUTDOWN; | |
647 | leave = 1; | |
648 | } | |
649 | spin_unlock(&dlm_domain_lock); | |
650 | ||
651 | if (leave) { | |
652 | mlog(0, "shutting down domain %s\n", dlm->name); | |
653 | ||
654 | /* We changed dlm state, notify the thread */ | |
655 | dlm_kick_thread(dlm, NULL); | |
656 | ||
ba2bf218 KH |
657 | while (dlm_migrate_all_locks(dlm)) { |
658 | mlog(0, "%s: more migration to do\n", dlm->name); | |
659 | } | |
6714d8e8 KH |
660 | dlm_mark_domain_leaving(dlm); |
661 | dlm_leave_domain(dlm); | |
662 | dlm_complete_dlm_shutdown(dlm); | |
663 | } | |
664 | dlm_put(dlm); | |
665 | } | |
666 | EXPORT_SYMBOL_GPL(dlm_unregister_domain); | |
667 | ||
d74c9803 KH |
668 | static int dlm_query_join_handler(struct o2net_msg *msg, u32 len, void *data, |
669 | void **ret_data) | |
6714d8e8 KH |
670 | { |
671 | struct dlm_query_join_request *query; | |
672 | enum dlm_query_join_response response; | |
673 | struct dlm_ctxt *dlm = NULL; | |
1faf2894 | 674 | u8 nodenum; |
6714d8e8 KH |
675 | |
676 | query = (struct dlm_query_join_request *) msg->buf; | |
677 | ||
678 | mlog(0, "node %u wants to join domain %s\n", query->node_idx, | |
679 | query->domain); | |
680 | ||
681 | /* | |
682 | * If heartbeat doesn't consider the node live, tell it | |
683 | * to back off and try again. This gives heartbeat a chance | |
684 | * to catch up. | |
685 | */ | |
686 | if (!o2hb_check_node_heartbeating(query->node_idx)) { | |
687 | mlog(0, "node %u is not in our live map yet\n", | |
688 | query->node_idx); | |
689 | ||
690 | response = JOIN_DISALLOW; | |
691 | goto respond; | |
692 | } | |
693 | ||
694 | response = JOIN_OK_NO_MAP; | |
695 | ||
696 | spin_lock(&dlm_domain_lock); | |
697 | dlm = __dlm_lookup_domain_full(query->domain, query->name_len); | |
1faf2894 SE |
698 | if (!dlm) |
699 | goto unlock_respond; | |
700 | ||
701 | /* | |
702 | * There is a small window where the joining node may not see the | |
703 | * node(s) that just left but still part of the cluster. DISALLOW | |
704 | * join request if joining node has different node map. | |
705 | */ | |
706 | nodenum=0; | |
707 | while (nodenum < O2NM_MAX_NODES) { | |
708 | if (test_bit(nodenum, dlm->domain_map)) { | |
709 | if (!byte_test_bit(nodenum, query->node_map)) { | |
e4968476 SM |
710 | mlog(0, "disallow join as node %u does not " |
711 | "have node %u in its nodemap\n", | |
712 | query->node_idx, nodenum); | |
1faf2894 SE |
713 | response = JOIN_DISALLOW; |
714 | goto unlock_respond; | |
715 | } | |
716 | } | |
717 | nodenum++; | |
718 | } | |
719 | ||
6714d8e8 | 720 | /* Once the dlm ctxt is marked as leaving then we don't want |
e2faea4c KH |
721 | * to be put in someone's domain map. |
722 | * Also, explicitly disallow joining at certain troublesome | |
723 | * times (ie. during recovery). */ | |
6714d8e8 | 724 | if (dlm && dlm->dlm_state != DLM_CTXT_LEAVING) { |
e2faea4c | 725 | int bit = query->node_idx; |
6714d8e8 KH |
726 | spin_lock(&dlm->spinlock); |
727 | ||
728 | if (dlm->dlm_state == DLM_CTXT_NEW && | |
729 | dlm->joining_node == DLM_LOCK_RES_OWNER_UNKNOWN) { | |
730 | /*If this is a brand new context and we | |
731 | * haven't started our join process yet, then | |
732 | * the other node won the race. */ | |
733 | response = JOIN_OK_NO_MAP; | |
734 | } else if (dlm->joining_node != DLM_LOCK_RES_OWNER_UNKNOWN) { | |
735 | /* Disallow parallel joins. */ | |
736 | response = JOIN_DISALLOW; | |
e2faea4c | 737 | } else if (dlm->reco.state & DLM_RECO_STATE_ACTIVE) { |
e4968476 | 738 | mlog(0, "node %u trying to join, but recovery " |
e2faea4c KH |
739 | "is ongoing.\n", bit); |
740 | response = JOIN_DISALLOW; | |
741 | } else if (test_bit(bit, dlm->recovery_map)) { | |
e4968476 | 742 | mlog(0, "node %u trying to join, but it " |
e2faea4c KH |
743 | "still needs recovery.\n", bit); |
744 | response = JOIN_DISALLOW; | |
745 | } else if (test_bit(bit, dlm->domain_map)) { | |
e4968476 | 746 | mlog(0, "node %u trying to join, but it " |
e2faea4c KH |
747 | "is still in the domain! needs recovery?\n", |
748 | bit); | |
749 | response = JOIN_DISALLOW; | |
6714d8e8 KH |
750 | } else { |
751 | /* Alright we're fully a part of this domain | |
752 | * so we keep some state as to who's joining | |
753 | * and indicate to him that needs to be fixed | |
754 | * up. */ | |
755 | response = JOIN_OK; | |
756 | __dlm_set_joining_node(dlm, query->node_idx); | |
757 | } | |
758 | ||
759 | spin_unlock(&dlm->spinlock); | |
760 | } | |
1faf2894 | 761 | unlock_respond: |
6714d8e8 KH |
762 | spin_unlock(&dlm_domain_lock); |
763 | ||
764 | respond: | |
765 | mlog(0, "We respond with %u\n", response); | |
766 | ||
767 | return response; | |
768 | } | |
769 | ||
d74c9803 KH |
770 | static int dlm_assert_joined_handler(struct o2net_msg *msg, u32 len, void *data, |
771 | void **ret_data) | |
6714d8e8 KH |
772 | { |
773 | struct dlm_assert_joined *assert; | |
774 | struct dlm_ctxt *dlm = NULL; | |
775 | ||
776 | assert = (struct dlm_assert_joined *) msg->buf; | |
777 | ||
778 | mlog(0, "node %u asserts join on domain %s\n", assert->node_idx, | |
779 | assert->domain); | |
780 | ||
781 | spin_lock(&dlm_domain_lock); | |
782 | dlm = __dlm_lookup_domain_full(assert->domain, assert->name_len); | |
783 | /* XXX should we consider no dlm ctxt an error? */ | |
784 | if (dlm) { | |
785 | spin_lock(&dlm->spinlock); | |
786 | ||
787 | /* Alright, this node has officially joined our | |
788 | * domain. Set him in the map and clean up our | |
789 | * leftover join state. */ | |
790 | BUG_ON(dlm->joining_node != assert->node_idx); | |
791 | set_bit(assert->node_idx, dlm->domain_map); | |
792 | __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN); | |
793 | ||
781ee3e2 SM |
794 | printk(KERN_INFO "ocfs2_dlm: Node %u joins domain %s\n", |
795 | assert->node_idx, dlm->name); | |
6714d8e8 KH |
796 | __dlm_print_nodes(dlm); |
797 | ||
798 | /* notify anything attached to the heartbeat events */ | |
799 | dlm_hb_event_notify_attached(dlm, assert->node_idx, 1); | |
800 | ||
801 | spin_unlock(&dlm->spinlock); | |
802 | } | |
803 | spin_unlock(&dlm_domain_lock); | |
804 | ||
805 | return 0; | |
806 | } | |
807 | ||
d74c9803 KH |
808 | static int dlm_cancel_join_handler(struct o2net_msg *msg, u32 len, void *data, |
809 | void **ret_data) | |
6714d8e8 KH |
810 | { |
811 | struct dlm_cancel_join *cancel; | |
812 | struct dlm_ctxt *dlm = NULL; | |
813 | ||
814 | cancel = (struct dlm_cancel_join *) msg->buf; | |
815 | ||
816 | mlog(0, "node %u cancels join on domain %s\n", cancel->node_idx, | |
817 | cancel->domain); | |
818 | ||
819 | spin_lock(&dlm_domain_lock); | |
820 | dlm = __dlm_lookup_domain_full(cancel->domain, cancel->name_len); | |
821 | ||
822 | if (dlm) { | |
823 | spin_lock(&dlm->spinlock); | |
824 | ||
825 | /* Yikes, this guy wants to cancel his join. No | |
826 | * problem, we simply cleanup our join state. */ | |
827 | BUG_ON(dlm->joining_node != cancel->node_idx); | |
828 | __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN); | |
829 | ||
830 | spin_unlock(&dlm->spinlock); | |
831 | } | |
832 | spin_unlock(&dlm_domain_lock); | |
833 | ||
834 | return 0; | |
835 | } | |
836 | ||
837 | static int dlm_send_one_join_cancel(struct dlm_ctxt *dlm, | |
838 | unsigned int node) | |
839 | { | |
840 | int status; | |
841 | struct dlm_cancel_join cancel_msg; | |
842 | ||
843 | memset(&cancel_msg, 0, sizeof(cancel_msg)); | |
844 | cancel_msg.node_idx = dlm->node_num; | |
845 | cancel_msg.name_len = strlen(dlm->name); | |
846 | memcpy(cancel_msg.domain, dlm->name, cancel_msg.name_len); | |
847 | ||
848 | status = o2net_send_message(DLM_CANCEL_JOIN_MSG, DLM_MOD_KEY, | |
849 | &cancel_msg, sizeof(cancel_msg), node, | |
850 | NULL); | |
851 | if (status < 0) { | |
852 | mlog_errno(status); | |
853 | goto bail; | |
854 | } | |
855 | ||
856 | bail: | |
857 | return status; | |
858 | } | |
859 | ||
860 | /* map_size should be in bytes. */ | |
861 | static int dlm_send_join_cancels(struct dlm_ctxt *dlm, | |
862 | unsigned long *node_map, | |
863 | unsigned int map_size) | |
864 | { | |
865 | int status, tmpstat; | |
866 | unsigned int node; | |
867 | ||
868 | if (map_size != (BITS_TO_LONGS(O2NM_MAX_NODES) * | |
869 | sizeof(unsigned long))) { | |
870 | mlog(ML_ERROR, | |
871 | "map_size %u != BITS_TO_LONGS(O2NM_MAX_NODES) %u\n", | |
872 | map_size, BITS_TO_LONGS(O2NM_MAX_NODES)); | |
873 | return -EINVAL; | |
874 | } | |
875 | ||
876 | status = 0; | |
877 | node = -1; | |
878 | while ((node = find_next_bit(node_map, O2NM_MAX_NODES, | |
879 | node + 1)) < O2NM_MAX_NODES) { | |
880 | if (node == dlm->node_num) | |
881 | continue; | |
882 | ||
883 | tmpstat = dlm_send_one_join_cancel(dlm, node); | |
884 | if (tmpstat) { | |
885 | mlog(ML_ERROR, "Error return %d cancelling join on " | |
886 | "node %d\n", tmpstat, node); | |
887 | if (!status) | |
888 | status = tmpstat; | |
889 | } | |
890 | } | |
891 | ||
892 | if (status) | |
893 | mlog_errno(status); | |
894 | return status; | |
895 | } | |
896 | ||
897 | static int dlm_request_join(struct dlm_ctxt *dlm, | |
898 | int node, | |
899 | enum dlm_query_join_response *response) | |
900 | { | |
901 | int status, retval; | |
902 | struct dlm_query_join_request join_msg; | |
903 | ||
904 | mlog(0, "querying node %d\n", node); | |
905 | ||
906 | memset(&join_msg, 0, sizeof(join_msg)); | |
907 | join_msg.node_idx = dlm->node_num; | |
908 | join_msg.name_len = strlen(dlm->name); | |
909 | memcpy(join_msg.domain, dlm->name, join_msg.name_len); | |
910 | ||
1faf2894 SE |
911 | /* copy live node map to join message */ |
912 | byte_copymap(join_msg.node_map, dlm->live_nodes_map, O2NM_MAX_NODES); | |
913 | ||
6714d8e8 KH |
914 | status = o2net_send_message(DLM_QUERY_JOIN_MSG, DLM_MOD_KEY, &join_msg, |
915 | sizeof(join_msg), node, &retval); | |
916 | if (status < 0 && status != -ENOPROTOOPT) { | |
917 | mlog_errno(status); | |
918 | goto bail; | |
919 | } | |
920 | ||
921 | /* -ENOPROTOOPT from the net code means the other side isn't | |
922 | listening for our message type -- that's fine, it means | |
923 | his dlm isn't up, so we can consider him a 'yes' but not | |
924 | joined into the domain. */ | |
925 | if (status == -ENOPROTOOPT) { | |
926 | status = 0; | |
927 | *response = JOIN_OK_NO_MAP; | |
928 | } else if (retval == JOIN_DISALLOW || | |
929 | retval == JOIN_OK || | |
930 | retval == JOIN_OK_NO_MAP) { | |
931 | *response = retval; | |
932 | } else { | |
933 | status = -EINVAL; | |
934 | mlog(ML_ERROR, "invalid response %d from node %u\n", retval, | |
935 | node); | |
936 | } | |
937 | ||
938 | mlog(0, "status %d, node %d response is %d\n", status, node, | |
939 | *response); | |
940 | ||
941 | bail: | |
942 | return status; | |
943 | } | |
944 | ||
945 | static int dlm_send_one_join_assert(struct dlm_ctxt *dlm, | |
946 | unsigned int node) | |
947 | { | |
948 | int status; | |
949 | struct dlm_assert_joined assert_msg; | |
950 | ||
951 | mlog(0, "Sending join assert to node %u\n", node); | |
952 | ||
953 | memset(&assert_msg, 0, sizeof(assert_msg)); | |
954 | assert_msg.node_idx = dlm->node_num; | |
955 | assert_msg.name_len = strlen(dlm->name); | |
956 | memcpy(assert_msg.domain, dlm->name, assert_msg.name_len); | |
957 | ||
958 | status = o2net_send_message(DLM_ASSERT_JOINED_MSG, DLM_MOD_KEY, | |
959 | &assert_msg, sizeof(assert_msg), node, | |
960 | NULL); | |
961 | if (status < 0) | |
962 | mlog_errno(status); | |
963 | ||
964 | return status; | |
965 | } | |
966 | ||
967 | static void dlm_send_join_asserts(struct dlm_ctxt *dlm, | |
968 | unsigned long *node_map) | |
969 | { | |
970 | int status, node, live; | |
971 | ||
972 | status = 0; | |
973 | node = -1; | |
974 | while ((node = find_next_bit(node_map, O2NM_MAX_NODES, | |
975 | node + 1)) < O2NM_MAX_NODES) { | |
976 | if (node == dlm->node_num) | |
977 | continue; | |
978 | ||
979 | do { | |
980 | /* It is very important that this message be | |
981 | * received so we spin until either the node | |
982 | * has died or it gets the message. */ | |
983 | status = dlm_send_one_join_assert(dlm, node); | |
984 | ||
985 | spin_lock(&dlm->spinlock); | |
986 | live = test_bit(node, dlm->live_nodes_map); | |
987 | spin_unlock(&dlm->spinlock); | |
988 | ||
989 | if (status) { | |
990 | mlog(ML_ERROR, "Error return %d asserting " | |
991 | "join on node %d\n", status, node); | |
992 | ||
993 | /* give us some time between errors... */ | |
994 | if (live) | |
995 | msleep(DLM_DOMAIN_BACKOFF_MS); | |
996 | } | |
997 | } while (status && live); | |
998 | } | |
999 | } | |
1000 | ||
1001 | struct domain_join_ctxt { | |
1002 | unsigned long live_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; | |
1003 | unsigned long yes_resp_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; | |
1004 | }; | |
1005 | ||
1006 | static int dlm_should_restart_join(struct dlm_ctxt *dlm, | |
1007 | struct domain_join_ctxt *ctxt, | |
1008 | enum dlm_query_join_response response) | |
1009 | { | |
1010 | int ret; | |
1011 | ||
1012 | if (response == JOIN_DISALLOW) { | |
1013 | mlog(0, "Latest response of disallow -- should restart\n"); | |
1014 | return 1; | |
1015 | } | |
1016 | ||
1017 | spin_lock(&dlm->spinlock); | |
1018 | /* For now, we restart the process if the node maps have | |
1019 | * changed at all */ | |
1020 | ret = memcmp(ctxt->live_map, dlm->live_nodes_map, | |
1021 | sizeof(dlm->live_nodes_map)); | |
1022 | spin_unlock(&dlm->spinlock); | |
1023 | ||
1024 | if (ret) | |
1025 | mlog(0, "Node maps changed -- should restart\n"); | |
1026 | ||
1027 | return ret; | |
1028 | } | |
1029 | ||
1030 | static int dlm_try_to_join_domain(struct dlm_ctxt *dlm) | |
1031 | { | |
1032 | int status = 0, tmpstat, node; | |
1033 | struct domain_join_ctxt *ctxt; | |
1034 | enum dlm_query_join_response response; | |
1035 | ||
1036 | mlog_entry("%p", dlm); | |
1037 | ||
cd861280 | 1038 | ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); |
6714d8e8 KH |
1039 | if (!ctxt) { |
1040 | status = -ENOMEM; | |
1041 | mlog_errno(status); | |
1042 | goto bail; | |
1043 | } | |
1044 | ||
1045 | /* group sem locking should work for us here -- we're already | |
1046 | * registered for heartbeat events so filling this should be | |
1047 | * atomic wrt getting those handlers called. */ | |
1048 | o2hb_fill_node_map(dlm->live_nodes_map, sizeof(dlm->live_nodes_map)); | |
1049 | ||
1050 | spin_lock(&dlm->spinlock); | |
1051 | memcpy(ctxt->live_map, dlm->live_nodes_map, sizeof(ctxt->live_map)); | |
1052 | ||
1053 | __dlm_set_joining_node(dlm, dlm->node_num); | |
1054 | ||
1055 | spin_unlock(&dlm->spinlock); | |
1056 | ||
1057 | node = -1; | |
1058 | while ((node = find_next_bit(ctxt->live_map, O2NM_MAX_NODES, | |
1059 | node + 1)) < O2NM_MAX_NODES) { | |
1060 | if (node == dlm->node_num) | |
1061 | continue; | |
1062 | ||
1063 | status = dlm_request_join(dlm, node, &response); | |
1064 | if (status < 0) { | |
1065 | mlog_errno(status); | |
1066 | goto bail; | |
1067 | } | |
1068 | ||
1069 | /* Ok, either we got a response or the node doesn't have a | |
1070 | * dlm up. */ | |
1071 | if (response == JOIN_OK) | |
1072 | set_bit(node, ctxt->yes_resp_map); | |
1073 | ||
1074 | if (dlm_should_restart_join(dlm, ctxt, response)) { | |
1075 | status = -EAGAIN; | |
1076 | goto bail; | |
1077 | } | |
1078 | } | |
1079 | ||
1080 | mlog(0, "Yay, done querying nodes!\n"); | |
1081 | ||
1082 | /* Yay, everyone agree's we can join the domain. My domain is | |
1083 | * comprised of all nodes who were put in the | |
1084 | * yes_resp_map. Copy that into our domain map and send a join | |
1085 | * assert message to clean up everyone elses state. */ | |
1086 | spin_lock(&dlm->spinlock); | |
1087 | memcpy(dlm->domain_map, ctxt->yes_resp_map, | |
1088 | sizeof(ctxt->yes_resp_map)); | |
1089 | set_bit(dlm->node_num, dlm->domain_map); | |
1090 | spin_unlock(&dlm->spinlock); | |
1091 | ||
1092 | dlm_send_join_asserts(dlm, ctxt->yes_resp_map); | |
1093 | ||
1094 | /* Joined state *must* be set before the joining node | |
1095 | * information, otherwise the query_join handler may read no | |
1096 | * current joiner but a state of NEW and tell joining nodes | |
1097 | * we're not in the domain. */ | |
1098 | spin_lock(&dlm_domain_lock); | |
1099 | dlm->dlm_state = DLM_CTXT_JOINED; | |
1100 | dlm->num_joins++; | |
1101 | spin_unlock(&dlm_domain_lock); | |
1102 | ||
1103 | bail: | |
1104 | spin_lock(&dlm->spinlock); | |
1105 | __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN); | |
1106 | if (!status) | |
1107 | __dlm_print_nodes(dlm); | |
1108 | spin_unlock(&dlm->spinlock); | |
1109 | ||
1110 | if (ctxt) { | |
1111 | /* Do we need to send a cancel message to any nodes? */ | |
1112 | if (status < 0) { | |
1113 | tmpstat = dlm_send_join_cancels(dlm, | |
1114 | ctxt->yes_resp_map, | |
1115 | sizeof(ctxt->yes_resp_map)); | |
1116 | if (tmpstat < 0) | |
1117 | mlog_errno(tmpstat); | |
1118 | } | |
1119 | kfree(ctxt); | |
1120 | } | |
1121 | ||
1122 | mlog(0, "returning %d\n", status); | |
1123 | return status; | |
1124 | } | |
1125 | ||
1126 | static void dlm_unregister_domain_handlers(struct dlm_ctxt *dlm) | |
1127 | { | |
1128 | o2hb_unregister_callback(&dlm->dlm_hb_up); | |
1129 | o2hb_unregister_callback(&dlm->dlm_hb_down); | |
1130 | o2net_unregister_handler_list(&dlm->dlm_domain_handlers); | |
1131 | } | |
1132 | ||
1133 | static int dlm_register_domain_handlers(struct dlm_ctxt *dlm) | |
1134 | { | |
1135 | int status; | |
1136 | ||
1137 | mlog(0, "registering handlers.\n"); | |
1138 | ||
1139 | o2hb_setup_callback(&dlm->dlm_hb_down, O2HB_NODE_DOWN_CB, | |
1140 | dlm_hb_node_down_cb, dlm, DLM_HB_NODE_DOWN_PRI); | |
1141 | status = o2hb_register_callback(&dlm->dlm_hb_down); | |
1142 | if (status) | |
1143 | goto bail; | |
1144 | ||
1145 | o2hb_setup_callback(&dlm->dlm_hb_up, O2HB_NODE_UP_CB, | |
1146 | dlm_hb_node_up_cb, dlm, DLM_HB_NODE_UP_PRI); | |
1147 | status = o2hb_register_callback(&dlm->dlm_hb_up); | |
1148 | if (status) | |
1149 | goto bail; | |
1150 | ||
1151 | status = o2net_register_handler(DLM_MASTER_REQUEST_MSG, dlm->key, | |
1152 | sizeof(struct dlm_master_request), | |
1153 | dlm_master_request_handler, | |
d74c9803 | 1154 | dlm, NULL, &dlm->dlm_domain_handlers); |
6714d8e8 KH |
1155 | if (status) |
1156 | goto bail; | |
1157 | ||
1158 | status = o2net_register_handler(DLM_ASSERT_MASTER_MSG, dlm->key, | |
1159 | sizeof(struct dlm_assert_master), | |
1160 | dlm_assert_master_handler, | |
3b8118cf KH |
1161 | dlm, dlm_assert_master_post_handler, |
1162 | &dlm->dlm_domain_handlers); | |
6714d8e8 KH |
1163 | if (status) |
1164 | goto bail; | |
1165 | ||
1166 | status = o2net_register_handler(DLM_CREATE_LOCK_MSG, dlm->key, | |
1167 | sizeof(struct dlm_create_lock), | |
1168 | dlm_create_lock_handler, | |
d74c9803 | 1169 | dlm, NULL, &dlm->dlm_domain_handlers); |
6714d8e8 KH |
1170 | if (status) |
1171 | goto bail; | |
1172 | ||
1173 | status = o2net_register_handler(DLM_CONVERT_LOCK_MSG, dlm->key, | |
1174 | DLM_CONVERT_LOCK_MAX_LEN, | |
1175 | dlm_convert_lock_handler, | |
d74c9803 | 1176 | dlm, NULL, &dlm->dlm_domain_handlers); |
6714d8e8 KH |
1177 | if (status) |
1178 | goto bail; | |
1179 | ||
1180 | status = o2net_register_handler(DLM_UNLOCK_LOCK_MSG, dlm->key, | |
1181 | DLM_UNLOCK_LOCK_MAX_LEN, | |
1182 | dlm_unlock_lock_handler, | |
d74c9803 | 1183 | dlm, NULL, &dlm->dlm_domain_handlers); |
6714d8e8 KH |
1184 | if (status) |
1185 | goto bail; | |
1186 | ||
1187 | status = o2net_register_handler(DLM_PROXY_AST_MSG, dlm->key, | |
1188 | DLM_PROXY_AST_MAX_LEN, | |
1189 | dlm_proxy_ast_handler, | |
d74c9803 | 1190 | dlm, NULL, &dlm->dlm_domain_handlers); |
6714d8e8 KH |
1191 | if (status) |
1192 | goto bail; | |
1193 | ||
1194 | status = o2net_register_handler(DLM_EXIT_DOMAIN_MSG, dlm->key, | |
1195 | sizeof(struct dlm_exit_domain), | |
1196 | dlm_exit_domain_handler, | |
d74c9803 | 1197 | dlm, NULL, &dlm->dlm_domain_handlers); |
6714d8e8 KH |
1198 | if (status) |
1199 | goto bail; | |
1200 | ||
ba2bf218 KH |
1201 | status = o2net_register_handler(DLM_DEREF_LOCKRES_MSG, dlm->key, |
1202 | sizeof(struct dlm_deref_lockres), | |
1203 | dlm_deref_lockres_handler, | |
d74c9803 | 1204 | dlm, NULL, &dlm->dlm_domain_handlers); |
ba2bf218 KH |
1205 | if (status) |
1206 | goto bail; | |
1207 | ||
6714d8e8 KH |
1208 | status = o2net_register_handler(DLM_MIGRATE_REQUEST_MSG, dlm->key, |
1209 | sizeof(struct dlm_migrate_request), | |
1210 | dlm_migrate_request_handler, | |
d74c9803 | 1211 | dlm, NULL, &dlm->dlm_domain_handlers); |
6714d8e8 KH |
1212 | if (status) |
1213 | goto bail; | |
1214 | ||
1215 | status = o2net_register_handler(DLM_MIG_LOCKRES_MSG, dlm->key, | |
1216 | DLM_MIG_LOCKRES_MAX_LEN, | |
1217 | dlm_mig_lockres_handler, | |
d74c9803 | 1218 | dlm, NULL, &dlm->dlm_domain_handlers); |
6714d8e8 KH |
1219 | if (status) |
1220 | goto bail; | |
1221 | ||
1222 | status = o2net_register_handler(DLM_MASTER_REQUERY_MSG, dlm->key, | |
1223 | sizeof(struct dlm_master_requery), | |
1224 | dlm_master_requery_handler, | |
d74c9803 | 1225 | dlm, NULL, &dlm->dlm_domain_handlers); |
6714d8e8 KH |
1226 | if (status) |
1227 | goto bail; | |
1228 | ||
1229 | status = o2net_register_handler(DLM_LOCK_REQUEST_MSG, dlm->key, | |
1230 | sizeof(struct dlm_lock_request), | |
1231 | dlm_request_all_locks_handler, | |
d74c9803 | 1232 | dlm, NULL, &dlm->dlm_domain_handlers); |
6714d8e8 KH |
1233 | if (status) |
1234 | goto bail; | |
1235 | ||
1236 | status = o2net_register_handler(DLM_RECO_DATA_DONE_MSG, dlm->key, | |
1237 | sizeof(struct dlm_reco_data_done), | |
1238 | dlm_reco_data_done_handler, | |
d74c9803 | 1239 | dlm, NULL, &dlm->dlm_domain_handlers); |
6714d8e8 KH |
1240 | if (status) |
1241 | goto bail; | |
1242 | ||
1243 | status = o2net_register_handler(DLM_BEGIN_RECO_MSG, dlm->key, | |
1244 | sizeof(struct dlm_begin_reco), | |
1245 | dlm_begin_reco_handler, | |
d74c9803 | 1246 | dlm, NULL, &dlm->dlm_domain_handlers); |
6714d8e8 KH |
1247 | if (status) |
1248 | goto bail; | |
1249 | ||
1250 | status = o2net_register_handler(DLM_FINALIZE_RECO_MSG, dlm->key, | |
1251 | sizeof(struct dlm_finalize_reco), | |
1252 | dlm_finalize_reco_handler, | |
d74c9803 | 1253 | dlm, NULL, &dlm->dlm_domain_handlers); |
6714d8e8 KH |
1254 | if (status) |
1255 | goto bail; | |
1256 | ||
1257 | bail: | |
1258 | if (status) | |
1259 | dlm_unregister_domain_handlers(dlm); | |
1260 | ||
1261 | return status; | |
1262 | } | |
1263 | ||
1264 | static int dlm_join_domain(struct dlm_ctxt *dlm) | |
1265 | { | |
1266 | int status; | |
0dd82141 SM |
1267 | unsigned int backoff; |
1268 | unsigned int total_backoff = 0; | |
6714d8e8 KH |
1269 | |
1270 | BUG_ON(!dlm); | |
1271 | ||
1272 | mlog(0, "Join domain %s\n", dlm->name); | |
1273 | ||
1274 | status = dlm_register_domain_handlers(dlm); | |
1275 | if (status) { | |
1276 | mlog_errno(status); | |
1277 | goto bail; | |
1278 | } | |
1279 | ||
1280 | status = dlm_launch_thread(dlm); | |
1281 | if (status < 0) { | |
1282 | mlog_errno(status); | |
1283 | goto bail; | |
1284 | } | |
1285 | ||
1286 | status = dlm_launch_recovery_thread(dlm); | |
1287 | if (status < 0) { | |
1288 | mlog_errno(status); | |
1289 | goto bail; | |
1290 | } | |
1291 | ||
3156d267 KH |
1292 | dlm->dlm_worker = create_singlethread_workqueue("dlm_wq"); |
1293 | if (!dlm->dlm_worker) { | |
1294 | status = -ENOMEM; | |
1295 | mlog_errno(status); | |
1296 | goto bail; | |
1297 | } | |
1298 | ||
6714d8e8 | 1299 | do { |
6714d8e8 KH |
1300 | status = dlm_try_to_join_domain(dlm); |
1301 | ||
1302 | /* If we're racing another node to the join, then we | |
1303 | * need to back off temporarily and let them | |
1304 | * complete. */ | |
0dd82141 | 1305 | #define DLM_JOIN_TIMEOUT_MSECS 90000 |
6714d8e8 KH |
1306 | if (status == -EAGAIN) { |
1307 | if (signal_pending(current)) { | |
1308 | status = -ERESTARTSYS; | |
1309 | goto bail; | |
1310 | } | |
1311 | ||
0dd82141 SM |
1312 | if (total_backoff > |
1313 | msecs_to_jiffies(DLM_JOIN_TIMEOUT_MSECS)) { | |
1314 | status = -ERESTARTSYS; | |
1315 | mlog(ML_NOTICE, "Timed out joining dlm domain " | |
1316 | "%s after %u msecs\n", dlm->name, | |
1317 | jiffies_to_msecs(total_backoff)); | |
1318 | goto bail; | |
1319 | } | |
1320 | ||
6714d8e8 KH |
1321 | /* |
1322 | * <chip> After you! | |
1323 | * <dale> No, after you! | |
1324 | * <chip> I insist! | |
1325 | * <dale> But you first! | |
1326 | * ... | |
1327 | */ | |
1328 | backoff = (unsigned int)(jiffies & 0x3); | |
1329 | backoff *= DLM_DOMAIN_BACKOFF_MS; | |
0dd82141 | 1330 | total_backoff += backoff; |
6714d8e8 KH |
1331 | mlog(0, "backoff %d\n", backoff); |
1332 | msleep(backoff); | |
1333 | } | |
1334 | } while (status == -EAGAIN); | |
1335 | ||
1336 | if (status < 0) { | |
1337 | mlog_errno(status); | |
1338 | goto bail; | |
1339 | } | |
1340 | ||
1341 | status = 0; | |
1342 | bail: | |
1343 | wake_up(&dlm_domain_events); | |
1344 | ||
1345 | if (status) { | |
1346 | dlm_unregister_domain_handlers(dlm); | |
1347 | dlm_complete_thread(dlm); | |
1348 | dlm_complete_recovery_thread(dlm); | |
3156d267 | 1349 | dlm_destroy_dlm_worker(dlm); |
6714d8e8 KH |
1350 | } |
1351 | ||
1352 | return status; | |
1353 | } | |
1354 | ||
1355 | static struct dlm_ctxt *dlm_alloc_ctxt(const char *domain, | |
1356 | u32 key) | |
1357 | { | |
1358 | int i; | |
1359 | struct dlm_ctxt *dlm = NULL; | |
1360 | ||
cd861280 | 1361 | dlm = kzalloc(sizeof(*dlm), GFP_KERNEL); |
6714d8e8 KH |
1362 | if (!dlm) { |
1363 | mlog_errno(-ENOMEM); | |
1364 | goto leave; | |
1365 | } | |
1366 | ||
1367 | dlm->name = kmalloc(strlen(domain) + 1, GFP_KERNEL); | |
1368 | if (dlm->name == NULL) { | |
1369 | mlog_errno(-ENOMEM); | |
1370 | kfree(dlm); | |
1371 | dlm = NULL; | |
1372 | goto leave; | |
1373 | } | |
1374 | ||
03d864c0 | 1375 | dlm->lockres_hash = (struct hlist_head **)dlm_alloc_pagevec(DLM_HASH_PAGES); |
81f2094a | 1376 | if (!dlm->lockres_hash) { |
6714d8e8 KH |
1377 | mlog_errno(-ENOMEM); |
1378 | kfree(dlm->name); | |
1379 | kfree(dlm); | |
1380 | dlm = NULL; | |
1381 | goto leave; | |
1382 | } | |
6714d8e8 | 1383 | |
03d864c0 DP |
1384 | for (i = 0; i < DLM_HASH_BUCKETS; i++) |
1385 | INIT_HLIST_HEAD(dlm_lockres_hash(dlm, i)); | |
6714d8e8 KH |
1386 | |
1387 | strcpy(dlm->name, domain); | |
1388 | dlm->key = key; | |
1389 | dlm->node_num = o2nm_this_node(); | |
1390 | ||
1391 | spin_lock_init(&dlm->spinlock); | |
1392 | spin_lock_init(&dlm->master_lock); | |
1393 | spin_lock_init(&dlm->ast_lock); | |
1394 | INIT_LIST_HEAD(&dlm->list); | |
1395 | INIT_LIST_HEAD(&dlm->dirty_list); | |
1396 | INIT_LIST_HEAD(&dlm->reco.resources); | |
1397 | INIT_LIST_HEAD(&dlm->reco.received); | |
1398 | INIT_LIST_HEAD(&dlm->reco.node_data); | |
1399 | INIT_LIST_HEAD(&dlm->purge_list); | |
1400 | INIT_LIST_HEAD(&dlm->dlm_domain_handlers); | |
1401 | dlm->reco.state = 0; | |
1402 | ||
1403 | INIT_LIST_HEAD(&dlm->pending_asts); | |
1404 | INIT_LIST_HEAD(&dlm->pending_basts); | |
1405 | ||
1406 | mlog(0, "dlm->recovery_map=%p, &(dlm->recovery_map[0])=%p\n", | |
1407 | dlm->recovery_map, &(dlm->recovery_map[0])); | |
1408 | ||
1409 | memset(dlm->recovery_map, 0, sizeof(dlm->recovery_map)); | |
1410 | memset(dlm->live_nodes_map, 0, sizeof(dlm->live_nodes_map)); | |
1411 | memset(dlm->domain_map, 0, sizeof(dlm->domain_map)); | |
1412 | ||
1413 | dlm->dlm_thread_task = NULL; | |
1414 | dlm->dlm_reco_thread_task = NULL; | |
3156d267 | 1415 | dlm->dlm_worker = NULL; |
6714d8e8 KH |
1416 | init_waitqueue_head(&dlm->dlm_thread_wq); |
1417 | init_waitqueue_head(&dlm->dlm_reco_thread_wq); | |
1418 | init_waitqueue_head(&dlm->reco.event); | |
1419 | init_waitqueue_head(&dlm->ast_wq); | |
1420 | init_waitqueue_head(&dlm->migration_wq); | |
1421 | INIT_LIST_HEAD(&dlm->master_list); | |
1422 | INIT_LIST_HEAD(&dlm->mle_hb_events); | |
1423 | ||
1424 | dlm->joining_node = DLM_LOCK_RES_OWNER_UNKNOWN; | |
1425 | init_waitqueue_head(&dlm->dlm_join_events); | |
1426 | ||
1427 | dlm->reco.new_master = O2NM_INVALID_NODE_NUM; | |
1428 | dlm->reco.dead_node = O2NM_INVALID_NODE_NUM; | |
1429 | atomic_set(&dlm->local_resources, 0); | |
1430 | atomic_set(&dlm->remote_resources, 0); | |
1431 | atomic_set(&dlm->unknown_resources, 0); | |
1432 | ||
1433 | spin_lock_init(&dlm->work_lock); | |
1434 | INIT_LIST_HEAD(&dlm->work_list); | |
c4028958 | 1435 | INIT_WORK(&dlm->dispatched_work, dlm_dispatch_work); |
6714d8e8 KH |
1436 | |
1437 | kref_init(&dlm->dlm_refs); | |
1438 | dlm->dlm_state = DLM_CTXT_NEW; | |
1439 | ||
1440 | INIT_LIST_HEAD(&dlm->dlm_eviction_callbacks); | |
1441 | ||
1442 | mlog(0, "context init: refcount %u\n", | |
1443 | atomic_read(&dlm->dlm_refs.refcount)); | |
1444 | ||
1445 | leave: | |
1446 | return dlm; | |
1447 | } | |
1448 | ||
1449 | /* | |
1450 | * dlm_register_domain: one-time setup per "domain" | |
1451 | */ | |
1452 | struct dlm_ctxt * dlm_register_domain(const char *domain, | |
1453 | u32 key) | |
1454 | { | |
1455 | int ret; | |
1456 | struct dlm_ctxt *dlm = NULL; | |
1457 | struct dlm_ctxt *new_ctxt = NULL; | |
1458 | ||
1459 | if (strlen(domain) > O2NM_MAX_NAME_LEN) { | |
1460 | ret = -ENAMETOOLONG; | |
1461 | mlog(ML_ERROR, "domain name length too long\n"); | |
1462 | goto leave; | |
1463 | } | |
1464 | ||
1465 | if (!o2hb_check_local_node_heartbeating()) { | |
1466 | mlog(ML_ERROR, "the local node has not been configured, or is " | |
1467 | "not heartbeating\n"); | |
1468 | ret = -EPROTO; | |
1469 | goto leave; | |
1470 | } | |
1471 | ||
1472 | mlog(0, "register called for domain \"%s\"\n", domain); | |
1473 | ||
1474 | retry: | |
1475 | dlm = NULL; | |
1476 | if (signal_pending(current)) { | |
1477 | ret = -ERESTARTSYS; | |
1478 | mlog_errno(ret); | |
1479 | goto leave; | |
1480 | } | |
1481 | ||
1482 | spin_lock(&dlm_domain_lock); | |
1483 | ||
1484 | dlm = __dlm_lookup_domain(domain); | |
1485 | if (dlm) { | |
1486 | if (dlm->dlm_state != DLM_CTXT_JOINED) { | |
1487 | spin_unlock(&dlm_domain_lock); | |
1488 | ||
1489 | mlog(0, "This ctxt is not joined yet!\n"); | |
1490 | wait_event_interruptible(dlm_domain_events, | |
1491 | dlm_wait_on_domain_helper( | |
1492 | domain)); | |
1493 | goto retry; | |
1494 | } | |
1495 | ||
1496 | __dlm_get(dlm); | |
1497 | dlm->num_joins++; | |
1498 | ||
1499 | spin_unlock(&dlm_domain_lock); | |
1500 | ||
1501 | ret = 0; | |
1502 | goto leave; | |
1503 | } | |
1504 | ||
1505 | /* doesn't exist */ | |
1506 | if (!new_ctxt) { | |
1507 | spin_unlock(&dlm_domain_lock); | |
1508 | ||
1509 | new_ctxt = dlm_alloc_ctxt(domain, key); | |
1510 | if (new_ctxt) | |
1511 | goto retry; | |
1512 | ||
1513 | ret = -ENOMEM; | |
1514 | mlog_errno(ret); | |
1515 | goto leave; | |
1516 | } | |
1517 | ||
1518 | /* a little variable switch-a-roo here... */ | |
1519 | dlm = new_ctxt; | |
1520 | new_ctxt = NULL; | |
1521 | ||
1522 | /* add the new domain */ | |
1523 | list_add_tail(&dlm->list, &dlm_domains); | |
1524 | spin_unlock(&dlm_domain_lock); | |
1525 | ||
1526 | ret = dlm_join_domain(dlm); | |
1527 | if (ret) { | |
1528 | mlog_errno(ret); | |
1529 | dlm_put(dlm); | |
1530 | goto leave; | |
1531 | } | |
1532 | ||
1533 | ret = 0; | |
1534 | leave: | |
1535 | if (new_ctxt) | |
1536 | dlm_free_ctxt_mem(new_ctxt); | |
1537 | ||
1538 | if (ret < 0) | |
1539 | dlm = ERR_PTR(ret); | |
1540 | ||
1541 | return dlm; | |
1542 | } | |
1543 | EXPORT_SYMBOL_GPL(dlm_register_domain); | |
1544 | ||
1545 | static LIST_HEAD(dlm_join_handlers); | |
1546 | ||
1547 | static void dlm_unregister_net_handlers(void) | |
1548 | { | |
1549 | o2net_unregister_handler_list(&dlm_join_handlers); | |
1550 | } | |
1551 | ||
1552 | static int dlm_register_net_handlers(void) | |
1553 | { | |
1554 | int status = 0; | |
1555 | ||
1556 | status = o2net_register_handler(DLM_QUERY_JOIN_MSG, DLM_MOD_KEY, | |
1557 | sizeof(struct dlm_query_join_request), | |
1558 | dlm_query_join_handler, | |
d74c9803 | 1559 | NULL, NULL, &dlm_join_handlers); |
6714d8e8 KH |
1560 | if (status) |
1561 | goto bail; | |
1562 | ||
1563 | status = o2net_register_handler(DLM_ASSERT_JOINED_MSG, DLM_MOD_KEY, | |
1564 | sizeof(struct dlm_assert_joined), | |
1565 | dlm_assert_joined_handler, | |
d74c9803 | 1566 | NULL, NULL, &dlm_join_handlers); |
6714d8e8 KH |
1567 | if (status) |
1568 | goto bail; | |
1569 | ||
1570 | status = o2net_register_handler(DLM_CANCEL_JOIN_MSG, DLM_MOD_KEY, | |
1571 | sizeof(struct dlm_cancel_join), | |
1572 | dlm_cancel_join_handler, | |
d74c9803 | 1573 | NULL, NULL, &dlm_join_handlers); |
6714d8e8 KH |
1574 | |
1575 | bail: | |
1576 | if (status < 0) | |
1577 | dlm_unregister_net_handlers(); | |
1578 | ||
1579 | return status; | |
1580 | } | |
1581 | ||
1582 | /* Domain eviction callback handling. | |
1583 | * | |
1584 | * The file system requires notification of node death *before* the | |
1585 | * dlm completes it's recovery work, otherwise it may be able to | |
1586 | * acquire locks on resources requiring recovery. Since the dlm can | |
1587 | * evict a node from it's domain *before* heartbeat fires, a similar | |
1588 | * mechanism is required. */ | |
1589 | ||
1590 | /* Eviction is not expected to happen often, so a per-domain lock is | |
1591 | * not necessary. Eviction callbacks are allowed to sleep for short | |
1592 | * periods of time. */ | |
1593 | static DECLARE_RWSEM(dlm_callback_sem); | |
1594 | ||
1595 | void dlm_fire_domain_eviction_callbacks(struct dlm_ctxt *dlm, | |
1596 | int node_num) | |
1597 | { | |
1598 | struct list_head *iter; | |
1599 | struct dlm_eviction_cb *cb; | |
1600 | ||
1601 | down_read(&dlm_callback_sem); | |
1602 | list_for_each(iter, &dlm->dlm_eviction_callbacks) { | |
1603 | cb = list_entry(iter, struct dlm_eviction_cb, ec_item); | |
1604 | ||
1605 | cb->ec_func(node_num, cb->ec_data); | |
1606 | } | |
1607 | up_read(&dlm_callback_sem); | |
1608 | } | |
1609 | ||
1610 | void dlm_setup_eviction_cb(struct dlm_eviction_cb *cb, | |
1611 | dlm_eviction_func *f, | |
1612 | void *data) | |
1613 | { | |
1614 | INIT_LIST_HEAD(&cb->ec_item); | |
1615 | cb->ec_func = f; | |
1616 | cb->ec_data = data; | |
1617 | } | |
1618 | EXPORT_SYMBOL_GPL(dlm_setup_eviction_cb); | |
1619 | ||
1620 | void dlm_register_eviction_cb(struct dlm_ctxt *dlm, | |
1621 | struct dlm_eviction_cb *cb) | |
1622 | { | |
1623 | down_write(&dlm_callback_sem); | |
1624 | list_add_tail(&cb->ec_item, &dlm->dlm_eviction_callbacks); | |
1625 | up_write(&dlm_callback_sem); | |
1626 | } | |
1627 | EXPORT_SYMBOL_GPL(dlm_register_eviction_cb); | |
1628 | ||
1629 | void dlm_unregister_eviction_cb(struct dlm_eviction_cb *cb) | |
1630 | { | |
1631 | down_write(&dlm_callback_sem); | |
1632 | list_del_init(&cb->ec_item); | |
1633 | up_write(&dlm_callback_sem); | |
1634 | } | |
1635 | EXPORT_SYMBOL_GPL(dlm_unregister_eviction_cb); | |
1636 | ||
1637 | static int __init dlm_init(void) | |
1638 | { | |
1639 | int status; | |
1640 | ||
1641 | dlm_print_version(); | |
1642 | ||
1643 | status = dlm_init_mle_cache(); | |
1644 | if (status) | |
1645 | return -1; | |
1646 | ||
1647 | status = dlm_register_net_handlers(); | |
1648 | if (status) { | |
1649 | dlm_destroy_mle_cache(); | |
1650 | return -1; | |
1651 | } | |
1652 | ||
1653 | return 0; | |
1654 | } | |
1655 | ||
1656 | static void __exit dlm_exit (void) | |
1657 | { | |
1658 | dlm_unregister_net_handlers(); | |
1659 | dlm_destroy_mle_cache(); | |
1660 | } | |
1661 | ||
1662 | MODULE_AUTHOR("Oracle"); | |
1663 | MODULE_LICENSE("GPL"); | |
1664 | ||
1665 | module_init(dlm_init); | |
1666 | module_exit(dlm_exit); |