Commit | Line | Data |
---|---|---|
6714d8e8 KH |
1 | /* -*- mode: c; c-basic-offset: 8; -*- |
2 | * vim: noexpandtab sw=8 ts=8 sts=0: | |
3 | * | |
4 | * dlmmod.c | |
5 | * | |
6 | * standalone DLM module | |
7 | * | |
8 | * Copyright (C) 2004 Oracle. All rights reserved. | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or | |
11 | * modify it under the terms of the GNU General Public | |
12 | * License as published by the Free Software Foundation; either | |
13 | * version 2 of the License, or (at your option) any later version. | |
14 | * | |
15 | * This program is distributed in the hope that it will be useful, | |
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
18 | * General Public License for more details. | |
19 | * | |
20 | * You should have received a copy of the GNU General Public | |
21 | * License along with this program; if not, write to the | |
22 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | |
23 | * Boston, MA 021110-1307, USA. | |
24 | * | |
25 | */ | |
26 | ||
27 | ||
28 | #include <linux/module.h> | |
29 | #include <linux/fs.h> | |
30 | #include <linux/types.h> | |
31 | #include <linux/slab.h> | |
32 | #include <linux/highmem.h> | |
6714d8e8 KH |
33 | #include <linux/init.h> |
34 | #include <linux/sysctl.h> | |
35 | #include <linux/random.h> | |
36 | #include <linux/blkdev.h> | |
37 | #include <linux/socket.h> | |
38 | #include <linux/inet.h> | |
39 | #include <linux/spinlock.h> | |
40 | #include <linux/delay.h> | |
41 | ||
42 | ||
43 | #include "cluster/heartbeat.h" | |
44 | #include "cluster/nodemanager.h" | |
45 | #include "cluster/tcp.h" | |
46 | ||
47 | #include "dlmapi.h" | |
48 | #include "dlmcommon.h" | |
82353b59 | 49 | #include "dlmdomain.h" |
e5a0334c | 50 | #include "dlmdebug.h" |
6714d8e8 KH |
51 | |
52 | #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_MASTER) | |
53 | #include "cluster/masklog.h" | |
54 | ||
6714d8e8 KH |
55 | static void dlm_mle_node_down(struct dlm_ctxt *dlm, |
56 | struct dlm_master_list_entry *mle, | |
57 | struct o2nm_node *node, | |
58 | int idx); | |
59 | static void dlm_mle_node_up(struct dlm_ctxt *dlm, | |
60 | struct dlm_master_list_entry *mle, | |
61 | struct o2nm_node *node, | |
62 | int idx); | |
63 | ||
64 | static void dlm_assert_master_worker(struct dlm_work_item *item, void *data); | |
ba2bf218 KH |
65 | static int dlm_do_assert_master(struct dlm_ctxt *dlm, |
66 | struct dlm_lock_resource *res, | |
67 | void *nodemap, u32 flags); | |
f3f85464 | 68 | static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data); |
6714d8e8 KH |
69 | |
70 | static inline int dlm_mle_equal(struct dlm_ctxt *dlm, | |
71 | struct dlm_master_list_entry *mle, | |
72 | const char *name, | |
73 | unsigned int namelen) | |
74 | { | |
6714d8e8 KH |
75 | if (dlm != mle->dlm) |
76 | return 0; | |
77 | ||
7141514b SM |
78 | if (namelen != mle->mnamelen || |
79 | memcmp(name, mle->mname, namelen) != 0) | |
f77a9a78 SM |
80 | return 0; |
81 | ||
6714d8e8 KH |
82 | return 1; |
83 | } | |
84 | ||
724bdca9 SM |
85 | static struct kmem_cache *dlm_lockres_cache = NULL; |
86 | static struct kmem_cache *dlm_lockname_cache = NULL; | |
e18b890b | 87 | static struct kmem_cache *dlm_mle_cache = NULL; |
6714d8e8 | 88 | |
6714d8e8 KH |
89 | static void dlm_mle_release(struct kref *kref); |
90 | static void dlm_init_mle(struct dlm_master_list_entry *mle, | |
91 | enum dlm_mle_type type, | |
92 | struct dlm_ctxt *dlm, | |
93 | struct dlm_lock_resource *res, | |
94 | const char *name, | |
95 | unsigned int namelen); | |
96 | static void dlm_put_mle(struct dlm_master_list_entry *mle); | |
97 | static void __dlm_put_mle(struct dlm_master_list_entry *mle); | |
98 | static int dlm_find_mle(struct dlm_ctxt *dlm, | |
99 | struct dlm_master_list_entry **mle, | |
100 | char *name, unsigned int namelen); | |
101 | ||
ba2bf218 KH |
102 | static int dlm_do_master_request(struct dlm_lock_resource *res, |
103 | struct dlm_master_list_entry *mle, int to); | |
6714d8e8 KH |
104 | |
105 | ||
106 | static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm, | |
107 | struct dlm_lock_resource *res, | |
108 | struct dlm_master_list_entry *mle, | |
109 | int *blocked); | |
110 | static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm, | |
111 | struct dlm_lock_resource *res, | |
112 | struct dlm_master_list_entry *mle, | |
113 | int blocked); | |
114 | static int dlm_add_migration_mle(struct dlm_ctxt *dlm, | |
115 | struct dlm_lock_resource *res, | |
116 | struct dlm_master_list_entry *mle, | |
117 | struct dlm_master_list_entry **oldmle, | |
118 | const char *name, unsigned int namelen, | |
119 | u8 new_master, u8 master); | |
120 | ||
121 | static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm, | |
122 | struct dlm_lock_resource *res); | |
123 | static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm, | |
124 | struct dlm_lock_resource *res); | |
125 | static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm, | |
126 | struct dlm_lock_resource *res, | |
127 | u8 target); | |
c03872f5 KH |
128 | static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm, |
129 | struct dlm_lock_resource *res); | |
6714d8e8 KH |
130 | |
131 | ||
132 | int dlm_is_host_down(int errno) | |
133 | { | |
134 | switch (errno) { | |
135 | case -EBADF: | |
136 | case -ECONNREFUSED: | |
137 | case -ENOTCONN: | |
138 | case -ECONNRESET: | |
139 | case -EPIPE: | |
140 | case -EHOSTDOWN: | |
141 | case -EHOSTUNREACH: | |
142 | case -ETIMEDOUT: | |
143 | case -ECONNABORTED: | |
144 | case -ENETDOWN: | |
145 | case -ENETUNREACH: | |
146 | case -ENETRESET: | |
147 | case -ESHUTDOWN: | |
148 | case -ENOPROTOOPT: | |
149 | case -EINVAL: /* if returned from our tcp code, | |
150 | this means there is no socket */ | |
151 | return 1; | |
152 | } | |
153 | return 0; | |
154 | } | |
155 | ||
156 | ||
157 | /* | |
158 | * MASTER LIST FUNCTIONS | |
159 | */ | |
160 | ||
161 | ||
162 | /* | |
163 | * regarding master list entries and heartbeat callbacks: | |
164 | * | |
165 | * in order to avoid sleeping and allocation that occurs in | |
166 | * heartbeat, master list entries are simply attached to the | |
167 | * dlm's established heartbeat callbacks. the mle is attached | |
168 | * when it is created, and since the dlm->spinlock is held at | |
169 | * that time, any heartbeat event will be properly discovered | |
170 | * by the mle. the mle needs to be detached from the | |
171 | * dlm->mle_hb_events list as soon as heartbeat events are no | |
172 | * longer useful to the mle, and before the mle is freed. | |
173 | * | |
174 | * as a general rule, heartbeat events are no longer needed by | |
175 | * the mle once an "answer" regarding the lock master has been | |
176 | * received. | |
177 | */ | |
178 | static inline void __dlm_mle_attach_hb_events(struct dlm_ctxt *dlm, | |
179 | struct dlm_master_list_entry *mle) | |
180 | { | |
181 | assert_spin_locked(&dlm->spinlock); | |
182 | ||
183 | list_add_tail(&mle->hb_events, &dlm->mle_hb_events); | |
184 | } | |
185 | ||
186 | ||
187 | static inline void __dlm_mle_detach_hb_events(struct dlm_ctxt *dlm, | |
188 | struct dlm_master_list_entry *mle) | |
189 | { | |
190 | if (!list_empty(&mle->hb_events)) | |
191 | list_del_init(&mle->hb_events); | |
192 | } | |
193 | ||
194 | ||
195 | static inline void dlm_mle_detach_hb_events(struct dlm_ctxt *dlm, | |
196 | struct dlm_master_list_entry *mle) | |
197 | { | |
198 | spin_lock(&dlm->spinlock); | |
199 | __dlm_mle_detach_hb_events(dlm, mle); | |
200 | spin_unlock(&dlm->spinlock); | |
201 | } | |
202 | ||
a2bf0477 KH |
203 | static void dlm_get_mle_inuse(struct dlm_master_list_entry *mle) |
204 | { | |
205 | struct dlm_ctxt *dlm; | |
206 | dlm = mle->dlm; | |
207 | ||
208 | assert_spin_locked(&dlm->spinlock); | |
209 | assert_spin_locked(&dlm->master_lock); | |
210 | mle->inuse++; | |
211 | kref_get(&mle->mle_refs); | |
212 | } | |
213 | ||
214 | static void dlm_put_mle_inuse(struct dlm_master_list_entry *mle) | |
215 | { | |
216 | struct dlm_ctxt *dlm; | |
217 | dlm = mle->dlm; | |
218 | ||
219 | spin_lock(&dlm->spinlock); | |
220 | spin_lock(&dlm->master_lock); | |
221 | mle->inuse--; | |
222 | __dlm_put_mle(mle); | |
223 | spin_unlock(&dlm->master_lock); | |
224 | spin_unlock(&dlm->spinlock); | |
225 | ||
226 | } | |
227 | ||
6714d8e8 KH |
228 | /* remove from list and free */ |
229 | static void __dlm_put_mle(struct dlm_master_list_entry *mle) | |
230 | { | |
231 | struct dlm_ctxt *dlm; | |
232 | dlm = mle->dlm; | |
233 | ||
234 | assert_spin_locked(&dlm->spinlock); | |
235 | assert_spin_locked(&dlm->master_lock); | |
aa852354 KH |
236 | if (!atomic_read(&mle->mle_refs.refcount)) { |
237 | /* this may or may not crash, but who cares. | |
238 | * it's a BUG. */ | |
239 | mlog(ML_ERROR, "bad mle: %p\n", mle); | |
240 | dlm_print_one_mle(mle); | |
241 | BUG(); | |
242 | } else | |
243 | kref_put(&mle->mle_refs, dlm_mle_release); | |
6714d8e8 KH |
244 | } |
245 | ||
246 | ||
247 | /* must not have any spinlocks coming in */ | |
248 | static void dlm_put_mle(struct dlm_master_list_entry *mle) | |
249 | { | |
250 | struct dlm_ctxt *dlm; | |
251 | dlm = mle->dlm; | |
252 | ||
253 | spin_lock(&dlm->spinlock); | |
254 | spin_lock(&dlm->master_lock); | |
255 | __dlm_put_mle(mle); | |
256 | spin_unlock(&dlm->master_lock); | |
257 | spin_unlock(&dlm->spinlock); | |
258 | } | |
259 | ||
260 | static inline void dlm_get_mle(struct dlm_master_list_entry *mle) | |
261 | { | |
262 | kref_get(&mle->mle_refs); | |
263 | } | |
264 | ||
265 | static void dlm_init_mle(struct dlm_master_list_entry *mle, | |
266 | enum dlm_mle_type type, | |
267 | struct dlm_ctxt *dlm, | |
268 | struct dlm_lock_resource *res, | |
269 | const char *name, | |
270 | unsigned int namelen) | |
271 | { | |
272 | assert_spin_locked(&dlm->spinlock); | |
273 | ||
274 | mle->dlm = dlm; | |
275 | mle->type = type; | |
2ed6c750 | 276 | INIT_HLIST_NODE(&mle->master_hash_node); |
6714d8e8 KH |
277 | INIT_LIST_HEAD(&mle->hb_events); |
278 | memset(mle->maybe_map, 0, sizeof(mle->maybe_map)); | |
279 | spin_lock_init(&mle->spinlock); | |
280 | init_waitqueue_head(&mle->wq); | |
281 | atomic_set(&mle->woken, 0); | |
282 | kref_init(&mle->mle_refs); | |
283 | memset(mle->response_map, 0, sizeof(mle->response_map)); | |
284 | mle->master = O2NM_MAX_NODES; | |
285 | mle->new_master = O2NM_MAX_NODES; | |
a2bf0477 | 286 | mle->inuse = 0; |
6714d8e8 | 287 | |
f77a9a78 SM |
288 | BUG_ON(mle->type != DLM_MLE_BLOCK && |
289 | mle->type != DLM_MLE_MASTER && | |
290 | mle->type != DLM_MLE_MIGRATION); | |
291 | ||
6714d8e8 KH |
292 | if (mle->type == DLM_MLE_MASTER) { |
293 | BUG_ON(!res); | |
7141514b SM |
294 | mle->mleres = res; |
295 | memcpy(mle->mname, res->lockname.name, res->lockname.len); | |
296 | mle->mnamelen = res->lockname.len; | |
297 | mle->mnamehash = res->lockname.hash; | |
f77a9a78 | 298 | } else { |
6714d8e8 | 299 | BUG_ON(!name); |
7141514b SM |
300 | mle->mleres = NULL; |
301 | memcpy(mle->mname, name, namelen); | |
302 | mle->mnamelen = namelen; | |
303 | mle->mnamehash = dlm_lockid_hash(name, namelen); | |
6714d8e8 KH |
304 | } |
305 | ||
2041d8fd SM |
306 | atomic_inc(&dlm->mle_tot_count[mle->type]); |
307 | atomic_inc(&dlm->mle_cur_count[mle->type]); | |
308 | ||
6714d8e8 KH |
309 | /* copy off the node_map and register hb callbacks on our copy */ |
310 | memcpy(mle->node_map, dlm->domain_map, sizeof(mle->node_map)); | |
311 | memcpy(mle->vote_map, dlm->domain_map, sizeof(mle->vote_map)); | |
312 | clear_bit(dlm->node_num, mle->vote_map); | |
313 | clear_bit(dlm->node_num, mle->node_map); | |
314 | ||
315 | /* attach the mle to the domain node up/down events */ | |
316 | __dlm_mle_attach_hb_events(dlm, mle); | |
317 | } | |
318 | ||
1c084577 SM |
319 | void __dlm_unlink_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle) |
320 | { | |
321 | assert_spin_locked(&dlm->spinlock); | |
322 | assert_spin_locked(&dlm->master_lock); | |
323 | ||
2ed6c750 SM |
324 | if (!hlist_unhashed(&mle->master_hash_node)) |
325 | hlist_del_init(&mle->master_hash_node); | |
1c084577 SM |
326 | } |
327 | ||
328 | void __dlm_insert_mle(struct dlm_ctxt *dlm, struct dlm_master_list_entry *mle) | |
329 | { | |
2ed6c750 | 330 | struct hlist_head *bucket; |
2ed6c750 | 331 | |
1c084577 SM |
332 | assert_spin_locked(&dlm->master_lock); |
333 | ||
7141514b | 334 | bucket = dlm_master_hash(dlm, mle->mnamehash); |
2ed6c750 | 335 | hlist_add_head(&mle->master_hash_node, bucket); |
1c084577 | 336 | } |
6714d8e8 KH |
337 | |
338 | /* returns 1 if found, 0 if not */ | |
339 | static int dlm_find_mle(struct dlm_ctxt *dlm, | |
340 | struct dlm_master_list_entry **mle, | |
341 | char *name, unsigned int namelen) | |
342 | { | |
343 | struct dlm_master_list_entry *tmpmle; | |
2ed6c750 | 344 | struct hlist_head *bucket; |
2ed6c750 | 345 | unsigned int hash; |
6714d8e8 KH |
346 | |
347 | assert_spin_locked(&dlm->master_lock); | |
348 | ||
2ed6c750 SM |
349 | hash = dlm_lockid_hash(name, namelen); |
350 | bucket = dlm_master_hash(dlm, hash); | |
df53cd3b | 351 | hlist_for_each_entry(tmpmle, bucket, master_hash_node) { |
6714d8e8 KH |
352 | if (!dlm_mle_equal(dlm, tmpmle, name, namelen)) |
353 | continue; | |
354 | dlm_get_mle(tmpmle); | |
355 | *mle = tmpmle; | |
356 | return 1; | |
357 | } | |
358 | return 0; | |
359 | } | |
360 | ||
361 | void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up) | |
362 | { | |
363 | struct dlm_master_list_entry *mle; | |
6714d8e8 KH |
364 | |
365 | assert_spin_locked(&dlm->spinlock); | |
2bd63216 | 366 | |
800deef3 | 367 | list_for_each_entry(mle, &dlm->mle_hb_events, hb_events) { |
6714d8e8 KH |
368 | if (node_up) |
369 | dlm_mle_node_up(dlm, mle, NULL, idx); | |
370 | else | |
371 | dlm_mle_node_down(dlm, mle, NULL, idx); | |
372 | } | |
373 | } | |
374 | ||
375 | static void dlm_mle_node_down(struct dlm_ctxt *dlm, | |
376 | struct dlm_master_list_entry *mle, | |
377 | struct o2nm_node *node, int idx) | |
378 | { | |
379 | spin_lock(&mle->spinlock); | |
380 | ||
381 | if (!test_bit(idx, mle->node_map)) | |
382 | mlog(0, "node %u already removed from nodemap!\n", idx); | |
383 | else | |
384 | clear_bit(idx, mle->node_map); | |
385 | ||
386 | spin_unlock(&mle->spinlock); | |
387 | } | |
388 | ||
389 | static void dlm_mle_node_up(struct dlm_ctxt *dlm, | |
390 | struct dlm_master_list_entry *mle, | |
391 | struct o2nm_node *node, int idx) | |
392 | { | |
393 | spin_lock(&mle->spinlock); | |
394 | ||
395 | if (test_bit(idx, mle->node_map)) | |
396 | mlog(0, "node %u already in node map!\n", idx); | |
397 | else | |
398 | set_bit(idx, mle->node_map); | |
399 | ||
400 | spin_unlock(&mle->spinlock); | |
401 | } | |
402 | ||
403 | ||
404 | int dlm_init_mle_cache(void) | |
405 | { | |
12eb0035 | 406 | dlm_mle_cache = kmem_cache_create("o2dlm_mle", |
6714d8e8 KH |
407 | sizeof(struct dlm_master_list_entry), |
408 | 0, SLAB_HWCACHE_ALIGN, | |
20c2df83 | 409 | NULL); |
6714d8e8 KH |
410 | if (dlm_mle_cache == NULL) |
411 | return -ENOMEM; | |
412 | return 0; | |
413 | } | |
414 | ||
415 | void dlm_destroy_mle_cache(void) | |
416 | { | |
417 | if (dlm_mle_cache) | |
418 | kmem_cache_destroy(dlm_mle_cache); | |
419 | } | |
420 | ||
421 | static void dlm_mle_release(struct kref *kref) | |
422 | { | |
423 | struct dlm_master_list_entry *mle; | |
424 | struct dlm_ctxt *dlm; | |
425 | ||
6714d8e8 KH |
426 | mle = container_of(kref, struct dlm_master_list_entry, mle_refs); |
427 | dlm = mle->dlm; | |
428 | ||
6714d8e8 KH |
429 | assert_spin_locked(&dlm->spinlock); |
430 | assert_spin_locked(&dlm->master_lock); | |
431 | ||
7141514b SM |
432 | mlog(0, "Releasing mle for %.*s, type %d\n", mle->mnamelen, mle->mname, |
433 | mle->type); | |
2ed6c750 | 434 | |
6714d8e8 | 435 | /* remove from list if not already */ |
1c084577 | 436 | __dlm_unlink_mle(dlm, mle); |
6714d8e8 KH |
437 | |
438 | /* detach the mle from the domain node up/down events */ | |
439 | __dlm_mle_detach_hb_events(dlm, mle); | |
440 | ||
2041d8fd SM |
441 | atomic_dec(&dlm->mle_cur_count[mle->type]); |
442 | ||
6714d8e8 KH |
443 | /* NOTE: kfree under spinlock here. |
444 | * if this is bad, we can move this to a freelist. */ | |
445 | kmem_cache_free(dlm_mle_cache, mle); | |
446 | } | |
447 | ||
448 | ||
449 | /* | |
450 | * LOCK RESOURCE FUNCTIONS | |
451 | */ | |
452 | ||
724bdca9 SM |
453 | int dlm_init_master_caches(void) |
454 | { | |
455 | dlm_lockres_cache = kmem_cache_create("o2dlm_lockres", | |
456 | sizeof(struct dlm_lock_resource), | |
457 | 0, SLAB_HWCACHE_ALIGN, NULL); | |
458 | if (!dlm_lockres_cache) | |
459 | goto bail; | |
460 | ||
461 | dlm_lockname_cache = kmem_cache_create("o2dlm_lockname", | |
462 | DLM_LOCKID_NAME_MAX, 0, | |
463 | SLAB_HWCACHE_ALIGN, NULL); | |
464 | if (!dlm_lockname_cache) | |
465 | goto bail; | |
466 | ||
467 | return 0; | |
468 | bail: | |
469 | dlm_destroy_master_caches(); | |
470 | return -ENOMEM; | |
471 | } | |
472 | ||
473 | void dlm_destroy_master_caches(void) | |
474 | { | |
66db6cfd | 475 | if (dlm_lockname_cache) { |
724bdca9 | 476 | kmem_cache_destroy(dlm_lockname_cache); |
66db6cfd JQ |
477 | dlm_lockname_cache = NULL; |
478 | } | |
724bdca9 | 479 | |
66db6cfd | 480 | if (dlm_lockres_cache) { |
724bdca9 | 481 | kmem_cache_destroy(dlm_lockres_cache); |
66db6cfd JQ |
482 | dlm_lockres_cache = NULL; |
483 | } | |
724bdca9 SM |
484 | } |
485 | ||
6714d8e8 KH |
486 | static void dlm_lockres_release(struct kref *kref) |
487 | { | |
488 | struct dlm_lock_resource *res; | |
b0d4f817 | 489 | struct dlm_ctxt *dlm; |
6714d8e8 KH |
490 | |
491 | res = container_of(kref, struct dlm_lock_resource, refs); | |
b0d4f817 | 492 | dlm = res->dlm; |
6714d8e8 KH |
493 | |
494 | /* This should not happen -- all lockres' have a name | |
495 | * associated with them at init time. */ | |
496 | BUG_ON(!res->lockname.name); | |
497 | ||
498 | mlog(0, "destroying lockres %.*s\n", res->lockname.len, | |
499 | res->lockname.name); | |
500 | ||
b0d4f817 | 501 | spin_lock(&dlm->track_lock); |
29576f8b SM |
502 | if (!list_empty(&res->tracking)) |
503 | list_del_init(&res->tracking); | |
504 | else { | |
505 | mlog(ML_ERROR, "Resource %.*s not on the Tracking list\n", | |
506 | res->lockname.len, res->lockname.name); | |
507 | dlm_print_one_lock_resource(res); | |
508 | } | |
b0d4f817 SM |
509 | spin_unlock(&dlm->track_lock); |
510 | ||
6800791a SM |
511 | atomic_dec(&dlm->res_cur_count); |
512 | ||
a7f90d83 KH |
513 | if (!hlist_unhashed(&res->hash_node) || |
514 | !list_empty(&res->granted) || | |
515 | !list_empty(&res->converting) || | |
516 | !list_empty(&res->blocked) || | |
517 | !list_empty(&res->dirty) || | |
518 | !list_empty(&res->recovering) || | |
519 | !list_empty(&res->purge)) { | |
520 | mlog(ML_ERROR, | |
521 | "Going to BUG for resource %.*s." | |
522 | " We're on a list! [%c%c%c%c%c%c%c]\n", | |
523 | res->lockname.len, res->lockname.name, | |
524 | !hlist_unhashed(&res->hash_node) ? 'H' : ' ', | |
525 | !list_empty(&res->granted) ? 'G' : ' ', | |
526 | !list_empty(&res->converting) ? 'C' : ' ', | |
527 | !list_empty(&res->blocked) ? 'B' : ' ', | |
528 | !list_empty(&res->dirty) ? 'D' : ' ', | |
529 | !list_empty(&res->recovering) ? 'R' : ' ', | |
530 | !list_empty(&res->purge) ? 'P' : ' '); | |
531 | ||
532 | dlm_print_one_lock_resource(res); | |
533 | } | |
534 | ||
6714d8e8 KH |
535 | /* By the time we're ready to blow this guy away, we shouldn't |
536 | * be on any lists. */ | |
81f2094a | 537 | BUG_ON(!hlist_unhashed(&res->hash_node)); |
6714d8e8 KH |
538 | BUG_ON(!list_empty(&res->granted)); |
539 | BUG_ON(!list_empty(&res->converting)); | |
540 | BUG_ON(!list_empty(&res->blocked)); | |
541 | BUG_ON(!list_empty(&res->dirty)); | |
542 | BUG_ON(!list_empty(&res->recovering)); | |
543 | BUG_ON(!list_empty(&res->purge)); | |
544 | ||
724bdca9 | 545 | kmem_cache_free(dlm_lockname_cache, (void *)res->lockname.name); |
6714d8e8 | 546 | |
724bdca9 | 547 | kmem_cache_free(dlm_lockres_cache, res); |
6714d8e8 KH |
548 | } |
549 | ||
6714d8e8 KH |
550 | void dlm_lockres_put(struct dlm_lock_resource *res) |
551 | { | |
552 | kref_put(&res->refs, dlm_lockres_release); | |
553 | } | |
554 | ||
555 | static void dlm_init_lockres(struct dlm_ctxt *dlm, | |
556 | struct dlm_lock_resource *res, | |
557 | const char *name, unsigned int namelen) | |
558 | { | |
559 | char *qname; | |
560 | ||
561 | /* If we memset here, we lose our reference to the kmalloc'd | |
562 | * res->lockname.name, so be sure to init every field | |
563 | * correctly! */ | |
564 | ||
565 | qname = (char *) res->lockname.name; | |
566 | memcpy(qname, name, namelen); | |
567 | ||
568 | res->lockname.len = namelen; | |
a3d33291 | 569 | res->lockname.hash = dlm_lockid_hash(name, namelen); |
6714d8e8 KH |
570 | |
571 | init_waitqueue_head(&res->wq); | |
572 | spin_lock_init(&res->spinlock); | |
81f2094a | 573 | INIT_HLIST_NODE(&res->hash_node); |
6714d8e8 KH |
574 | INIT_LIST_HEAD(&res->granted); |
575 | INIT_LIST_HEAD(&res->converting); | |
576 | INIT_LIST_HEAD(&res->blocked); | |
577 | INIT_LIST_HEAD(&res->dirty); | |
578 | INIT_LIST_HEAD(&res->recovering); | |
579 | INIT_LIST_HEAD(&res->purge); | |
29576f8b | 580 | INIT_LIST_HEAD(&res->tracking); |
6714d8e8 KH |
581 | atomic_set(&res->asts_reserved, 0); |
582 | res->migration_pending = 0; | |
ba2bf218 | 583 | res->inflight_locks = 0; |
6714d8e8 | 584 | |
b0d4f817 SM |
585 | res->dlm = dlm; |
586 | ||
6714d8e8 KH |
587 | kref_init(&res->refs); |
588 | ||
6800791a SM |
589 | atomic_inc(&dlm->res_tot_count); |
590 | atomic_inc(&dlm->res_cur_count); | |
591 | ||
6714d8e8 KH |
592 | /* just for consistency */ |
593 | spin_lock(&res->spinlock); | |
594 | dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN); | |
595 | spin_unlock(&res->spinlock); | |
596 | ||
597 | res->state = DLM_LOCK_RES_IN_PROGRESS; | |
598 | ||
599 | res->last_used = 0; | |
600 | ||
18c6ac38 | 601 | spin_lock(&dlm->spinlock); |
29576f8b | 602 | list_add_tail(&res->tracking, &dlm->tracking_list); |
18c6ac38 | 603 | spin_unlock(&dlm->spinlock); |
29576f8b | 604 | |
6714d8e8 | 605 | memset(res->lvb, 0, DLM_LVB_LEN); |
ba2bf218 | 606 | memset(res->refmap, 0, sizeof(res->refmap)); |
6714d8e8 KH |
607 | } |
608 | ||
609 | struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm, | |
610 | const char *name, | |
611 | unsigned int namelen) | |
612 | { | |
724bdca9 | 613 | struct dlm_lock_resource *res = NULL; |
6714d8e8 | 614 | |
3914ed0c | 615 | res = kmem_cache_zalloc(dlm_lockres_cache, GFP_NOFS); |
6714d8e8 | 616 | if (!res) |
724bdca9 | 617 | goto error; |
6714d8e8 | 618 | |
3914ed0c | 619 | res->lockname.name = kmem_cache_zalloc(dlm_lockname_cache, GFP_NOFS); |
724bdca9 SM |
620 | if (!res->lockname.name) |
621 | goto error; | |
6714d8e8 KH |
622 | |
623 | dlm_init_lockres(dlm, res, name, namelen); | |
624 | return res; | |
724bdca9 SM |
625 | |
626 | error: | |
627 | if (res && res->lockname.name) | |
628 | kmem_cache_free(dlm_lockname_cache, (void *)res->lockname.name); | |
629 | ||
630 | if (res) | |
631 | kmem_cache_free(dlm_lockres_cache, res); | |
632 | return NULL; | |
6714d8e8 KH |
633 | } |
634 | ||
8d400b81 SM |
635 | void dlm_lockres_set_refmap_bit(struct dlm_ctxt *dlm, |
636 | struct dlm_lock_resource *res, int bit) | |
ba2bf218 | 637 | { |
8d400b81 SM |
638 | assert_spin_locked(&res->spinlock); |
639 | ||
640 | mlog(0, "res %.*s, set node %u, %ps()\n", res->lockname.len, | |
641 | res->lockname.name, bit, __builtin_return_address(0)); | |
642 | ||
643 | set_bit(bit, res->refmap); | |
644 | } | |
645 | ||
646 | void dlm_lockres_clear_refmap_bit(struct dlm_ctxt *dlm, | |
647 | struct dlm_lock_resource *res, int bit) | |
648 | { | |
649 | assert_spin_locked(&res->spinlock); | |
650 | ||
651 | mlog(0, "res %.*s, clr node %u, %ps()\n", res->lockname.len, | |
652 | res->lockname.name, bit, __builtin_return_address(0)); | |
653 | ||
654 | clear_bit(bit, res->refmap); | |
655 | } | |
656 | ||
657 | ||
658 | void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm, | |
659 | struct dlm_lock_resource *res) | |
660 | { | |
661 | assert_spin_locked(&res->spinlock); | |
ba2bf218 | 662 | |
ba2bf218 | 663 | res->inflight_locks++; |
ff0a522e | 664 | |
8d400b81 SM |
665 | mlog(0, "%s: res %.*s, inflight++: now %u, %ps()\n", dlm->name, |
666 | res->lockname.len, res->lockname.name, res->inflight_locks, | |
667 | __builtin_return_address(0)); | |
ba2bf218 KH |
668 | } |
669 | ||
8d400b81 SM |
670 | void dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm, |
671 | struct dlm_lock_resource *res) | |
ba2bf218 KH |
672 | { |
673 | assert_spin_locked(&res->spinlock); | |
674 | ||
675 | BUG_ON(res->inflight_locks == 0); | |
8d400b81 | 676 | |
ba2bf218 | 677 | res->inflight_locks--; |
ff0a522e | 678 | |
8d400b81 SM |
679 | mlog(0, "%s: res %.*s, inflight--: now %u, %ps()\n", dlm->name, |
680 | res->lockname.len, res->lockname.name, res->inflight_locks, | |
681 | __builtin_return_address(0)); | |
682 | ||
ba2bf218 KH |
683 | wake_up(&res->wq); |
684 | } | |
685 | ||
6714d8e8 KH |
686 | /* |
687 | * lookup a lock resource by name. | |
688 | * may already exist in the hashtable. | |
689 | * lockid is null terminated | |
690 | * | |
691 | * if not, allocate enough for the lockres and for | |
692 | * the temporary structure used in doing the mastering. | |
693 | * | |
694 | * also, do a lookup in the dlm->master_list to see | |
695 | * if another node has begun mastering the same lock. | |
696 | * if so, there should be a block entry in there | |
697 | * for this name, and we should *not* attempt to master | |
698 | * the lock here. need to wait around for that node | |
699 | * to assert_master (or die). | |
700 | * | |
701 | */ | |
702 | struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm, | |
703 | const char *lockid, | |
3384f3df | 704 | int namelen, |
6714d8e8 KH |
705 | int flags) |
706 | { | |
707 | struct dlm_lock_resource *tmpres=NULL, *res=NULL; | |
708 | struct dlm_master_list_entry *mle = NULL; | |
709 | struct dlm_master_list_entry *alloc_mle = NULL; | |
710 | int blocked = 0; | |
711 | int ret, nodenum; | |
712 | struct dlm_node_iter iter; | |
3384f3df | 713 | unsigned int hash; |
6714d8e8 | 714 | int tries = 0; |
c03872f5 | 715 | int bit, wait_on_recovery = 0; |
6714d8e8 KH |
716 | |
717 | BUG_ON(!lockid); | |
718 | ||
a3d33291 | 719 | hash = dlm_lockid_hash(lockid, namelen); |
6714d8e8 KH |
720 | |
721 | mlog(0, "get lockres %s (len %d)\n", lockid, namelen); | |
722 | ||
723 | lookup: | |
724 | spin_lock(&dlm->spinlock); | |
ba2bf218 | 725 | tmpres = __dlm_lookup_lockres_full(dlm, lockid, namelen, hash); |
6714d8e8 | 726 | if (tmpres) { |
7b791d68 | 727 | spin_unlock(&dlm->spinlock); |
ba2bf218 | 728 | spin_lock(&tmpres->spinlock); |
ff0a522e | 729 | /* Wait on the thread that is mastering the resource */ |
7b791d68 SM |
730 | if (tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN) { |
731 | __dlm_wait_on_lockres(tmpres); | |
732 | BUG_ON(tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN); | |
ff0a522e SM |
733 | spin_unlock(&tmpres->spinlock); |
734 | dlm_lockres_put(tmpres); | |
735 | tmpres = NULL; | |
736 | goto lookup; | |
7b791d68 SM |
737 | } |
738 | ||
ff0a522e SM |
739 | /* Wait on the resource purge to complete before continuing */ |
740 | if (tmpres->state & DLM_LOCK_RES_DROPPING_REF) { | |
741 | BUG_ON(tmpres->owner == dlm->node_num); | |
742 | __dlm_wait_on_lockres_flags(tmpres, | |
743 | DLM_LOCK_RES_DROPPING_REF); | |
ba2bf218 KH |
744 | spin_unlock(&tmpres->spinlock); |
745 | dlm_lockres_put(tmpres); | |
746 | tmpres = NULL; | |
747 | goto lookup; | |
748 | } | |
749 | ||
ff0a522e SM |
750 | /* Grab inflight ref to pin the resource */ |
751 | dlm_lockres_grab_inflight_ref(dlm, tmpres); | |
752 | ||
753 | spin_unlock(&tmpres->spinlock); | |
6714d8e8 KH |
754 | if (res) |
755 | dlm_lockres_put(res); | |
756 | res = tmpres; | |
757 | goto leave; | |
758 | } | |
759 | ||
760 | if (!res) { | |
761 | spin_unlock(&dlm->spinlock); | |
762 | mlog(0, "allocating a new resource\n"); | |
763 | /* nothing found and we need to allocate one. */ | |
3914ed0c | 764 | alloc_mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS); |
6714d8e8 KH |
765 | if (!alloc_mle) |
766 | goto leave; | |
767 | res = dlm_new_lockres(dlm, lockid, namelen); | |
768 | if (!res) | |
769 | goto leave; | |
770 | goto lookup; | |
771 | } | |
772 | ||
773 | mlog(0, "no lockres found, allocated our own: %p\n", res); | |
774 | ||
775 | if (flags & LKM_LOCAL) { | |
776 | /* caller knows it's safe to assume it's not mastered elsewhere | |
777 | * DONE! return right away */ | |
778 | spin_lock(&res->spinlock); | |
779 | dlm_change_lockres_owner(dlm, res, dlm->node_num); | |
780 | __dlm_insert_lockres(dlm, res); | |
ba2bf218 | 781 | dlm_lockres_grab_inflight_ref(dlm, res); |
6714d8e8 KH |
782 | spin_unlock(&res->spinlock); |
783 | spin_unlock(&dlm->spinlock); | |
784 | /* lockres still marked IN_PROGRESS */ | |
785 | goto wake_waiters; | |
786 | } | |
787 | ||
788 | /* check master list to see if another node has started mastering it */ | |
789 | spin_lock(&dlm->master_lock); | |
790 | ||
791 | /* if we found a block, wait for lock to be mastered by another node */ | |
792 | blocked = dlm_find_mle(dlm, &mle, (char *)lockid, namelen); | |
793 | if (blocked) { | |
ba2bf218 | 794 | int mig; |
6714d8e8 KH |
795 | if (mle->type == DLM_MLE_MASTER) { |
796 | mlog(ML_ERROR, "master entry for nonexistent lock!\n"); | |
797 | BUG(); | |
ba2bf218 KH |
798 | } |
799 | mig = (mle->type == DLM_MLE_MIGRATION); | |
800 | /* if there is a migration in progress, let the migration | |
801 | * finish before continuing. we can wait for the absence | |
802 | * of the MIGRATION mle: either the migrate finished or | |
803 | * one of the nodes died and the mle was cleaned up. | |
804 | * if there is a BLOCK here, but it already has a master | |
805 | * set, we are too late. the master does not have a ref | |
806 | * for us in the refmap. detach the mle and drop it. | |
807 | * either way, go back to the top and start over. */ | |
808 | if (mig || mle->master != O2NM_MAX_NODES) { | |
809 | BUG_ON(mig && mle->master == dlm->node_num); | |
810 | /* we arrived too late. the master does not | |
811 | * have a ref for us. retry. */ | |
812 | mlog(0, "%s:%.*s: late on %s\n", | |
813 | dlm->name, namelen, lockid, | |
814 | mig ? "MIGRATION" : "BLOCK"); | |
6714d8e8 | 815 | spin_unlock(&dlm->master_lock); |
6714d8e8 KH |
816 | spin_unlock(&dlm->spinlock); |
817 | ||
818 | /* master is known, detach */ | |
ba2bf218 KH |
819 | if (!mig) |
820 | dlm_mle_detach_hb_events(dlm, mle); | |
6714d8e8 KH |
821 | dlm_put_mle(mle); |
822 | mle = NULL; | |
25985edc | 823 | /* this is lame, but we can't wait on either |
ba2bf218 KH |
824 | * the mle or lockres waitqueue here */ |
825 | if (mig) | |
826 | msleep(100); | |
827 | goto lookup; | |
6714d8e8 KH |
828 | } |
829 | } else { | |
830 | /* go ahead and try to master lock on this node */ | |
831 | mle = alloc_mle; | |
832 | /* make sure this does not get freed below */ | |
833 | alloc_mle = NULL; | |
834 | dlm_init_mle(mle, DLM_MLE_MASTER, dlm, res, NULL, 0); | |
835 | set_bit(dlm->node_num, mle->maybe_map); | |
1c084577 | 836 | __dlm_insert_mle(dlm, mle); |
c03872f5 KH |
837 | |
838 | /* still holding the dlm spinlock, check the recovery map | |
2bd63216 | 839 | * to see if there are any nodes that still need to be |
c03872f5 KH |
840 | * considered. these will not appear in the mle nodemap |
841 | * but they might own this lockres. wait on them. */ | |
842 | bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0); | |
843 | if (bit < O2NM_MAX_NODES) { | |
8decab3c SM |
844 | mlog(0, "%s: res %.*s, At least one node (%d) " |
845 | "to recover before lock mastery can begin\n", | |
c03872f5 KH |
846 | dlm->name, namelen, (char *)lockid, bit); |
847 | wait_on_recovery = 1; | |
848 | } | |
6714d8e8 KH |
849 | } |
850 | ||
851 | /* at this point there is either a DLM_MLE_BLOCK or a | |
852 | * DLM_MLE_MASTER on the master list, so it's safe to add the | |
853 | * lockres to the hashtable. anyone who finds the lock will | |
854 | * still have to wait on the IN_PROGRESS. */ | |
855 | ||
856 | /* finally add the lockres to its hash bucket */ | |
857 | __dlm_insert_lockres(dlm, res); | |
8d400b81 | 858 | |
ff0a522e | 859 | /* Grab inflight ref to pin the resource */ |
8d400b81 SM |
860 | spin_lock(&res->spinlock); |
861 | dlm_lockres_grab_inflight_ref(dlm, res); | |
862 | spin_unlock(&res->spinlock); | |
ba2bf218 | 863 | |
6714d8e8 KH |
864 | /* get an extra ref on the mle in case this is a BLOCK |
865 | * if so, the creator of the BLOCK may try to put the last | |
866 | * ref at this time in the assert master handler, so we | |
867 | * need an extra one to keep from a bad ptr deref. */ | |
a2bf0477 | 868 | dlm_get_mle_inuse(mle); |
6714d8e8 KH |
869 | spin_unlock(&dlm->master_lock); |
870 | spin_unlock(&dlm->spinlock); | |
871 | ||
e7e69eb3 | 872 | redo_request: |
c03872f5 KH |
873 | while (wait_on_recovery) { |
874 | /* any cluster changes that occurred after dropping the | |
875 | * dlm spinlock would be detectable be a change on the mle, | |
876 | * so we only need to clear out the recovery map once. */ | |
877 | if (dlm_is_recovery_lock(lockid, namelen)) { | |
8decab3c SM |
878 | mlog(0, "%s: Recovery map is not empty, but must " |
879 | "master $RECOVERY lock now\n", dlm->name); | |
c03872f5 KH |
880 | if (!dlm_pre_master_reco_lockres(dlm, res)) |
881 | wait_on_recovery = 0; | |
882 | else { | |
883 | mlog(0, "%s: waiting 500ms for heartbeat state " | |
884 | "change\n", dlm->name); | |
885 | msleep(500); | |
886 | } | |
887 | continue; | |
2bd63216 | 888 | } |
c03872f5 KH |
889 | |
890 | dlm_kick_recovery_thread(dlm); | |
aa087b84 | 891 | msleep(1000); |
c03872f5 KH |
892 | dlm_wait_for_recovery(dlm); |
893 | ||
894 | spin_lock(&dlm->spinlock); | |
895 | bit = find_next_bit(dlm->recovery_map, O2NM_MAX_NODES, 0); | |
896 | if (bit < O2NM_MAX_NODES) { | |
8decab3c SM |
897 | mlog(0, "%s: res %.*s, At least one node (%d) " |
898 | "to recover before lock mastery can begin\n", | |
c03872f5 KH |
899 | dlm->name, namelen, (char *)lockid, bit); |
900 | wait_on_recovery = 1; | |
901 | } else | |
902 | wait_on_recovery = 0; | |
903 | spin_unlock(&dlm->spinlock); | |
b7084ab5 KH |
904 | |
905 | if (wait_on_recovery) | |
906 | dlm_wait_for_node_recovery(dlm, bit, 10000); | |
c03872f5 KH |
907 | } |
908 | ||
6714d8e8 KH |
909 | /* must wait for lock to be mastered elsewhere */ |
910 | if (blocked) | |
911 | goto wait; | |
912 | ||
6714d8e8 KH |
913 | ret = -EINVAL; |
914 | dlm_node_iter_init(mle->vote_map, &iter); | |
915 | while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { | |
ba2bf218 | 916 | ret = dlm_do_master_request(res, mle, nodenum); |
6714d8e8 KH |
917 | if (ret < 0) |
918 | mlog_errno(ret); | |
919 | if (mle->master != O2NM_MAX_NODES) { | |
920 | /* found a master ! */ | |
9c6510a5 KH |
921 | if (mle->master <= nodenum) |
922 | break; | |
923 | /* if our master request has not reached the master | |
924 | * yet, keep going until it does. this is how the | |
925 | * master will know that asserts are needed back to | |
926 | * the lower nodes. */ | |
8decab3c SM |
927 | mlog(0, "%s: res %.*s, Requests only up to %u but " |
928 | "master is %u, keep going\n", dlm->name, namelen, | |
9c6510a5 | 929 | lockid, nodenum, mle->master); |
6714d8e8 KH |
930 | } |
931 | } | |
932 | ||
933 | wait: | |
934 | /* keep going until the response map includes all nodes */ | |
935 | ret = dlm_wait_for_lock_mastery(dlm, res, mle, &blocked); | |
936 | if (ret < 0) { | |
e7e69eb3 | 937 | wait_on_recovery = 1; |
8decab3c SM |
938 | mlog(0, "%s: res %.*s, Node map changed, redo the master " |
939 | "request now, blocked=%d\n", dlm->name, res->lockname.len, | |
6714d8e8 KH |
940 | res->lockname.name, blocked); |
941 | if (++tries > 20) { | |
8decab3c SM |
942 | mlog(ML_ERROR, "%s: res %.*s, Spinning on " |
943 | "dlm_wait_for_lock_mastery, blocked = %d\n", | |
2bd63216 | 944 | dlm->name, res->lockname.len, |
6714d8e8 KH |
945 | res->lockname.name, blocked); |
946 | dlm_print_one_lock_resource(res); | |
8a9343fa | 947 | dlm_print_one_mle(mle); |
6714d8e8 KH |
948 | tries = 0; |
949 | } | |
950 | goto redo_request; | |
951 | } | |
952 | ||
8decab3c SM |
953 | mlog(0, "%s: res %.*s, Mastered by %u\n", dlm->name, res->lockname.len, |
954 | res->lockname.name, res->owner); | |
6714d8e8 KH |
955 | /* make sure we never continue without this */ |
956 | BUG_ON(res->owner == O2NM_MAX_NODES); | |
957 | ||
958 | /* master is known, detach if not already detached */ | |
959 | dlm_mle_detach_hb_events(dlm, mle); | |
960 | dlm_put_mle(mle); | |
961 | /* put the extra ref */ | |
a2bf0477 | 962 | dlm_put_mle_inuse(mle); |
6714d8e8 KH |
963 | |
964 | wake_waiters: | |
965 | spin_lock(&res->spinlock); | |
966 | res->state &= ~DLM_LOCK_RES_IN_PROGRESS; | |
967 | spin_unlock(&res->spinlock); | |
968 | wake_up(&res->wq); | |
969 | ||
970 | leave: | |
971 | /* need to free the unused mle */ | |
972 | if (alloc_mle) | |
973 | kmem_cache_free(dlm_mle_cache, alloc_mle); | |
974 | ||
975 | return res; | |
976 | } | |
977 | ||
978 | ||
979 | #define DLM_MASTERY_TIMEOUT_MS 5000 | |
980 | ||
981 | static int dlm_wait_for_lock_mastery(struct dlm_ctxt *dlm, | |
982 | struct dlm_lock_resource *res, | |
983 | struct dlm_master_list_entry *mle, | |
984 | int *blocked) | |
985 | { | |
986 | u8 m; | |
987 | int ret, bit; | |
988 | int map_changed, voting_done; | |
989 | int assert, sleep; | |
990 | ||
991 | recheck: | |
992 | ret = 0; | |
993 | assert = 0; | |
994 | ||
995 | /* check if another node has already become the owner */ | |
996 | spin_lock(&res->spinlock); | |
997 | if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) { | |
9c6510a5 KH |
998 | mlog(0, "%s:%.*s: owner is suddenly %u\n", dlm->name, |
999 | res->lockname.len, res->lockname.name, res->owner); | |
6714d8e8 | 1000 | spin_unlock(&res->spinlock); |
9c6510a5 KH |
1001 | /* this will cause the master to re-assert across |
1002 | * the whole cluster, freeing up mles */ | |
588e0090 | 1003 | if (res->owner != dlm->node_num) { |
ba2bf218 | 1004 | ret = dlm_do_master_request(res, mle, res->owner); |
588e0090 KH |
1005 | if (ret < 0) { |
1006 | /* give recovery a chance to run */ | |
1007 | mlog(ML_ERROR, "link to %u went down?: %d\n", res->owner, ret); | |
1008 | msleep(500); | |
1009 | goto recheck; | |
1010 | } | |
9c6510a5 KH |
1011 | } |
1012 | ret = 0; | |
6714d8e8 KH |
1013 | goto leave; |
1014 | } | |
1015 | spin_unlock(&res->spinlock); | |
1016 | ||
1017 | spin_lock(&mle->spinlock); | |
1018 | m = mle->master; | |
1019 | map_changed = (memcmp(mle->vote_map, mle->node_map, | |
1020 | sizeof(mle->vote_map)) != 0); | |
1021 | voting_done = (memcmp(mle->vote_map, mle->response_map, | |
1022 | sizeof(mle->vote_map)) == 0); | |
1023 | ||
1024 | /* restart if we hit any errors */ | |
1025 | if (map_changed) { | |
1026 | int b; | |
1027 | mlog(0, "%s: %.*s: node map changed, restarting\n", | |
1028 | dlm->name, res->lockname.len, res->lockname.name); | |
1029 | ret = dlm_restart_lock_mastery(dlm, res, mle, *blocked); | |
1030 | b = (mle->type == DLM_MLE_BLOCK); | |
1031 | if ((*blocked && !b) || (!*blocked && b)) { | |
2bd63216 | 1032 | mlog(0, "%s:%.*s: status change: old=%d new=%d\n", |
6714d8e8 KH |
1033 | dlm->name, res->lockname.len, res->lockname.name, |
1034 | *blocked, b); | |
1035 | *blocked = b; | |
1036 | } | |
1037 | spin_unlock(&mle->spinlock); | |
1038 | if (ret < 0) { | |
1039 | mlog_errno(ret); | |
1040 | goto leave; | |
1041 | } | |
1042 | mlog(0, "%s:%.*s: restart lock mastery succeeded, " | |
1043 | "rechecking now\n", dlm->name, res->lockname.len, | |
1044 | res->lockname.name); | |
1045 | goto recheck; | |
aa852354 KH |
1046 | } else { |
1047 | if (!voting_done) { | |
1048 | mlog(0, "map not changed and voting not done " | |
1049 | "for %s:%.*s\n", dlm->name, res->lockname.len, | |
1050 | res->lockname.name); | |
1051 | } | |
6714d8e8 KH |
1052 | } |
1053 | ||
1054 | if (m != O2NM_MAX_NODES) { | |
1055 | /* another node has done an assert! | |
1056 | * all done! */ | |
1057 | sleep = 0; | |
1058 | } else { | |
1059 | sleep = 1; | |
1060 | /* have all nodes responded? */ | |
1061 | if (voting_done && !*blocked) { | |
1062 | bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0); | |
1063 | if (dlm->node_num <= bit) { | |
1064 | /* my node number is lowest. | |
1065 | * now tell other nodes that I am | |
1066 | * mastering this. */ | |
1067 | mle->master = dlm->node_num; | |
ba2bf218 KH |
1068 | /* ref was grabbed in get_lock_resource |
1069 | * will be dropped in dlmlock_master */ | |
6714d8e8 KH |
1070 | assert = 1; |
1071 | sleep = 0; | |
1072 | } | |
1073 | /* if voting is done, but we have not received | |
1074 | * an assert master yet, we must sleep */ | |
1075 | } | |
1076 | } | |
1077 | ||
1078 | spin_unlock(&mle->spinlock); | |
1079 | ||
1080 | /* sleep if we haven't finished voting yet */ | |
1081 | if (sleep) { | |
1082 | unsigned long timeo = msecs_to_jiffies(DLM_MASTERY_TIMEOUT_MS); | |
1083 | ||
1084 | /* | |
1085 | if (atomic_read(&mle->mle_refs.refcount) < 2) | |
1086 | mlog(ML_ERROR, "mle (%p) refs=%d, name=%.*s\n", mle, | |
1087 | atomic_read(&mle->mle_refs.refcount), | |
1088 | res->lockname.len, res->lockname.name); | |
1089 | */ | |
1090 | atomic_set(&mle->woken, 0); | |
1091 | (void)wait_event_timeout(mle->wq, | |
1092 | (atomic_read(&mle->woken) == 1), | |
1093 | timeo); | |
1094 | if (res->owner == O2NM_MAX_NODES) { | |
ba2bf218 KH |
1095 | mlog(0, "%s:%.*s: waiting again\n", dlm->name, |
1096 | res->lockname.len, res->lockname.name); | |
6714d8e8 KH |
1097 | goto recheck; |
1098 | } | |
1099 | mlog(0, "done waiting, master is %u\n", res->owner); | |
1100 | ret = 0; | |
1101 | goto leave; | |
1102 | } | |
1103 | ||
1104 | ret = 0; /* done */ | |
1105 | if (assert) { | |
1106 | m = dlm->node_num; | |
1107 | mlog(0, "about to master %.*s here, this=%u\n", | |
1108 | res->lockname.len, res->lockname.name, m); | |
ba2bf218 | 1109 | ret = dlm_do_assert_master(dlm, res, mle->vote_map, 0); |
6714d8e8 KH |
1110 | if (ret) { |
1111 | /* This is a failure in the network path, | |
1112 | * not in the response to the assert_master | |
1113 | * (any nonzero response is a BUG on this node). | |
1114 | * Most likely a socket just got disconnected | |
1115 | * due to node death. */ | |
1116 | mlog_errno(ret); | |
1117 | } | |
1118 | /* no longer need to restart lock mastery. | |
1119 | * all living nodes have been contacted. */ | |
1120 | ret = 0; | |
1121 | } | |
1122 | ||
1123 | /* set the lockres owner */ | |
1124 | spin_lock(&res->spinlock); | |
ba2bf218 KH |
1125 | /* mastery reference obtained either during |
1126 | * assert_master_handler or in get_lock_resource */ | |
6714d8e8 KH |
1127 | dlm_change_lockres_owner(dlm, res, m); |
1128 | spin_unlock(&res->spinlock); | |
1129 | ||
1130 | leave: | |
1131 | return ret; | |
1132 | } | |
1133 | ||
1134 | struct dlm_bitmap_diff_iter | |
1135 | { | |
1136 | int curnode; | |
1137 | unsigned long *orig_bm; | |
1138 | unsigned long *cur_bm; | |
1139 | unsigned long diff_bm[BITS_TO_LONGS(O2NM_MAX_NODES)]; | |
1140 | }; | |
1141 | ||
1142 | enum dlm_node_state_change | |
1143 | { | |
1144 | NODE_DOWN = -1, | |
1145 | NODE_NO_CHANGE = 0, | |
1146 | NODE_UP | |
1147 | }; | |
1148 | ||
1149 | static void dlm_bitmap_diff_iter_init(struct dlm_bitmap_diff_iter *iter, | |
1150 | unsigned long *orig_bm, | |
1151 | unsigned long *cur_bm) | |
1152 | { | |
1153 | unsigned long p1, p2; | |
1154 | int i; | |
1155 | ||
1156 | iter->curnode = -1; | |
1157 | iter->orig_bm = orig_bm; | |
1158 | iter->cur_bm = cur_bm; | |
1159 | ||
1160 | for (i = 0; i < BITS_TO_LONGS(O2NM_MAX_NODES); i++) { | |
1161 | p1 = *(iter->orig_bm + i); | |
1162 | p2 = *(iter->cur_bm + i); | |
1163 | iter->diff_bm[i] = (p1 & ~p2) | (p2 & ~p1); | |
1164 | } | |
1165 | } | |
1166 | ||
1167 | static int dlm_bitmap_diff_iter_next(struct dlm_bitmap_diff_iter *iter, | |
1168 | enum dlm_node_state_change *state) | |
1169 | { | |
1170 | int bit; | |
1171 | ||
1172 | if (iter->curnode >= O2NM_MAX_NODES) | |
1173 | return -ENOENT; | |
1174 | ||
1175 | bit = find_next_bit(iter->diff_bm, O2NM_MAX_NODES, | |
1176 | iter->curnode+1); | |
1177 | if (bit >= O2NM_MAX_NODES) { | |
1178 | iter->curnode = O2NM_MAX_NODES; | |
1179 | return -ENOENT; | |
1180 | } | |
1181 | ||
1182 | /* if it was there in the original then this node died */ | |
1183 | if (test_bit(bit, iter->orig_bm)) | |
1184 | *state = NODE_DOWN; | |
1185 | else | |
1186 | *state = NODE_UP; | |
1187 | ||
1188 | iter->curnode = bit; | |
1189 | return bit; | |
1190 | } | |
1191 | ||
1192 | ||
1193 | static int dlm_restart_lock_mastery(struct dlm_ctxt *dlm, | |
1194 | struct dlm_lock_resource *res, | |
1195 | struct dlm_master_list_entry *mle, | |
1196 | int blocked) | |
1197 | { | |
1198 | struct dlm_bitmap_diff_iter bdi; | |
1199 | enum dlm_node_state_change sc; | |
1200 | int node; | |
1201 | int ret = 0; | |
1202 | ||
1203 | mlog(0, "something happened such that the " | |
1204 | "master process may need to be restarted!\n"); | |
1205 | ||
1206 | assert_spin_locked(&mle->spinlock); | |
1207 | ||
1208 | dlm_bitmap_diff_iter_init(&bdi, mle->vote_map, mle->node_map); | |
1209 | node = dlm_bitmap_diff_iter_next(&bdi, &sc); | |
1210 | while (node >= 0) { | |
1211 | if (sc == NODE_UP) { | |
e2faea4c KH |
1212 | /* a node came up. clear any old vote from |
1213 | * the response map and set it in the vote map | |
1214 | * then restart the mastery. */ | |
1215 | mlog(ML_NOTICE, "node %d up while restarting\n", node); | |
6714d8e8 KH |
1216 | |
1217 | /* redo the master request, but only for the new node */ | |
1218 | mlog(0, "sending request to new node\n"); | |
1219 | clear_bit(node, mle->response_map); | |
1220 | set_bit(node, mle->vote_map); | |
1221 | } else { | |
1222 | mlog(ML_ERROR, "node down! %d\n", node); | |
6714d8e8 KH |
1223 | if (blocked) { |
1224 | int lowest = find_next_bit(mle->maybe_map, | |
1225 | O2NM_MAX_NODES, 0); | |
1226 | ||
1227 | /* act like it was never there */ | |
1228 | clear_bit(node, mle->maybe_map); | |
1229 | ||
e7e69eb3 KH |
1230 | if (node == lowest) { |
1231 | mlog(0, "expected master %u died" | |
1232 | " while this node was blocked " | |
1233 | "waiting on it!\n", node); | |
1234 | lowest = find_next_bit(mle->maybe_map, | |
1235 | O2NM_MAX_NODES, | |
1236 | lowest+1); | |
1237 | if (lowest < O2NM_MAX_NODES) { | |
1238 | mlog(0, "%s:%.*s:still " | |
1239 | "blocked. waiting on %u " | |
1240 | "now\n", dlm->name, | |
1241 | res->lockname.len, | |
1242 | res->lockname.name, | |
1243 | lowest); | |
1244 | } else { | |
1245 | /* mle is an MLE_BLOCK, but | |
1246 | * there is now nothing left to | |
1247 | * block on. we need to return | |
1248 | * all the way back out and try | |
1249 | * again with an MLE_MASTER. | |
1250 | * dlm_do_local_recovery_cleanup | |
1251 | * has already run, so the mle | |
1252 | * refcount is ok */ | |
1253 | mlog(0, "%s:%.*s: no " | |
1254 | "longer blocking. try to " | |
1255 | "master this here\n", | |
1256 | dlm->name, | |
1257 | res->lockname.len, | |
1258 | res->lockname.name); | |
1259 | mle->type = DLM_MLE_MASTER; | |
7141514b | 1260 | mle->mleres = res; |
e7e69eb3 | 1261 | } |
6714d8e8 | 1262 | } |
6714d8e8 KH |
1263 | } |
1264 | ||
e7e69eb3 KH |
1265 | /* now blank out everything, as if we had never |
1266 | * contacted anyone */ | |
1267 | memset(mle->maybe_map, 0, sizeof(mle->maybe_map)); | |
1268 | memset(mle->response_map, 0, sizeof(mle->response_map)); | |
1269 | /* reset the vote_map to the current node_map */ | |
1270 | memcpy(mle->vote_map, mle->node_map, | |
1271 | sizeof(mle->node_map)); | |
1272 | /* put myself into the maybe map */ | |
1273 | if (mle->type != DLM_MLE_BLOCK) | |
1274 | set_bit(dlm->node_num, mle->maybe_map); | |
6714d8e8 KH |
1275 | } |
1276 | ret = -EAGAIN; | |
6714d8e8 KH |
1277 | node = dlm_bitmap_diff_iter_next(&bdi, &sc); |
1278 | } | |
1279 | return ret; | |
1280 | } | |
1281 | ||
1282 | ||
1283 | /* | |
1284 | * DLM_MASTER_REQUEST_MSG | |
1285 | * | |
1286 | * returns: 0 on success, | |
1287 | * -errno on a network error | |
1288 | * | |
1289 | * on error, the caller should assume the target node is "dead" | |
1290 | * | |
1291 | */ | |
1292 | ||
ba2bf218 KH |
1293 | static int dlm_do_master_request(struct dlm_lock_resource *res, |
1294 | struct dlm_master_list_entry *mle, int to) | |
6714d8e8 KH |
1295 | { |
1296 | struct dlm_ctxt *dlm = mle->dlm; | |
1297 | struct dlm_master_request request; | |
1298 | int ret, response=0, resend; | |
1299 | ||
1300 | memset(&request, 0, sizeof(request)); | |
1301 | request.node_idx = dlm->node_num; | |
1302 | ||
1303 | BUG_ON(mle->type == DLM_MLE_MIGRATION); | |
1304 | ||
7141514b SM |
1305 | request.namelen = (u8)mle->mnamelen; |
1306 | memcpy(request.name, mle->mname, request.namelen); | |
6714d8e8 KH |
1307 | |
1308 | again: | |
1309 | ret = o2net_send_message(DLM_MASTER_REQUEST_MSG, dlm->key, &request, | |
1310 | sizeof(request), to, &response); | |
1311 | if (ret < 0) { | |
1312 | if (ret == -ESRCH) { | |
1313 | /* should never happen */ | |
1314 | mlog(ML_ERROR, "TCP stack not ready!\n"); | |
1315 | BUG(); | |
1316 | } else if (ret == -EINVAL) { | |
1317 | mlog(ML_ERROR, "bad args passed to o2net!\n"); | |
1318 | BUG(); | |
1319 | } else if (ret == -ENOMEM) { | |
1320 | mlog(ML_ERROR, "out of memory while trying to send " | |
1321 | "network message! retrying\n"); | |
1322 | /* this is totally crude */ | |
1323 | msleep(50); | |
1324 | goto again; | |
1325 | } else if (!dlm_is_host_down(ret)) { | |
1326 | /* not a network error. bad. */ | |
1327 | mlog_errno(ret); | |
1328 | mlog(ML_ERROR, "unhandled error!"); | |
1329 | BUG(); | |
1330 | } | |
1331 | /* all other errors should be network errors, | |
1332 | * and likely indicate node death */ | |
1333 | mlog(ML_ERROR, "link to %d went down!\n", to); | |
1334 | goto out; | |
1335 | } | |
1336 | ||
1337 | ret = 0; | |
1338 | resend = 0; | |
1339 | spin_lock(&mle->spinlock); | |
1340 | switch (response) { | |
1341 | case DLM_MASTER_RESP_YES: | |
1342 | set_bit(to, mle->response_map); | |
1343 | mlog(0, "node %u is the master, response=YES\n", to); | |
ba2bf218 KH |
1344 | mlog(0, "%s:%.*s: master node %u now knows I have a " |
1345 | "reference\n", dlm->name, res->lockname.len, | |
1346 | res->lockname.name, to); | |
6714d8e8 KH |
1347 | mle->master = to; |
1348 | break; | |
1349 | case DLM_MASTER_RESP_NO: | |
1350 | mlog(0, "node %u not master, response=NO\n", to); | |
1351 | set_bit(to, mle->response_map); | |
1352 | break; | |
1353 | case DLM_MASTER_RESP_MAYBE: | |
1354 | mlog(0, "node %u not master, response=MAYBE\n", to); | |
1355 | set_bit(to, mle->response_map); | |
1356 | set_bit(to, mle->maybe_map); | |
1357 | break; | |
1358 | case DLM_MASTER_RESP_ERROR: | |
1359 | mlog(0, "node %u hit an error, resending\n", to); | |
1360 | resend = 1; | |
1361 | response = 0; | |
1362 | break; | |
1363 | default: | |
1364 | mlog(ML_ERROR, "bad response! %u\n", response); | |
1365 | BUG(); | |
1366 | } | |
1367 | spin_unlock(&mle->spinlock); | |
1368 | if (resend) { | |
1369 | /* this is also totally crude */ | |
1370 | msleep(50); | |
1371 | goto again; | |
1372 | } | |
1373 | ||
1374 | out: | |
1375 | return ret; | |
1376 | } | |
1377 | ||
1378 | /* | |
1379 | * locks that can be taken here: | |
1380 | * dlm->spinlock | |
1381 | * res->spinlock | |
1382 | * mle->spinlock | |
1383 | * dlm->master_list | |
1384 | * | |
1385 | * if possible, TRIM THIS DOWN!!! | |
1386 | */ | |
d74c9803 KH |
1387 | int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data, |
1388 | void **ret_data) | |
6714d8e8 KH |
1389 | { |
1390 | u8 response = DLM_MASTER_RESP_MAYBE; | |
1391 | struct dlm_ctxt *dlm = data; | |
9c6510a5 | 1392 | struct dlm_lock_resource *res = NULL; |
6714d8e8 KH |
1393 | struct dlm_master_request *request = (struct dlm_master_request *) msg->buf; |
1394 | struct dlm_master_list_entry *mle = NULL, *tmpmle = NULL; | |
1395 | char *name; | |
a3d33291 | 1396 | unsigned int namelen, hash; |
6714d8e8 KH |
1397 | int found, ret; |
1398 | int set_maybe; | |
9c6510a5 | 1399 | int dispatch_assert = 0; |
6714d8e8 KH |
1400 | |
1401 | if (!dlm_grab(dlm)) | |
1402 | return DLM_MASTER_RESP_NO; | |
1403 | ||
1404 | if (!dlm_domain_fully_joined(dlm)) { | |
1405 | response = DLM_MASTER_RESP_NO; | |
1406 | goto send_response; | |
1407 | } | |
1408 | ||
1409 | name = request->name; | |
1410 | namelen = request->namelen; | |
a3d33291 | 1411 | hash = dlm_lockid_hash(name, namelen); |
6714d8e8 KH |
1412 | |
1413 | if (namelen > DLM_LOCKID_NAME_MAX) { | |
1414 | response = DLM_IVBUFLEN; | |
1415 | goto send_response; | |
1416 | } | |
1417 | ||
1418 | way_up_top: | |
1419 | spin_lock(&dlm->spinlock); | |
a3d33291 | 1420 | res = __dlm_lookup_lockres(dlm, name, namelen, hash); |
6714d8e8 KH |
1421 | if (res) { |
1422 | spin_unlock(&dlm->spinlock); | |
1423 | ||
1424 | /* take care of the easy cases up front */ | |
1425 | spin_lock(&res->spinlock); | |
1cd04dbe KH |
1426 | if (res->state & (DLM_LOCK_RES_RECOVERING| |
1427 | DLM_LOCK_RES_MIGRATING)) { | |
6714d8e8 KH |
1428 | spin_unlock(&res->spinlock); |
1429 | mlog(0, "returning DLM_MASTER_RESP_ERROR since res is " | |
1cd04dbe | 1430 | "being recovered/migrated\n"); |
6714d8e8 KH |
1431 | response = DLM_MASTER_RESP_ERROR; |
1432 | if (mle) | |
1433 | kmem_cache_free(dlm_mle_cache, mle); | |
1434 | goto send_response; | |
1435 | } | |
1436 | ||
1437 | if (res->owner == dlm->node_num) { | |
8d400b81 | 1438 | dlm_lockres_set_refmap_bit(dlm, res, request->node_idx); |
6714d8e8 | 1439 | spin_unlock(&res->spinlock); |
6714d8e8 KH |
1440 | response = DLM_MASTER_RESP_YES; |
1441 | if (mle) | |
1442 | kmem_cache_free(dlm_mle_cache, mle); | |
1443 | ||
1444 | /* this node is the owner. | |
1445 | * there is some extra work that needs to | |
1446 | * happen now. the requesting node has | |
1447 | * caused all nodes up to this one to | |
1448 | * create mles. this node now needs to | |
1449 | * go back and clean those up. */ | |
9c6510a5 | 1450 | dispatch_assert = 1; |
6714d8e8 KH |
1451 | goto send_response; |
1452 | } else if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) { | |
1453 | spin_unlock(&res->spinlock); | |
1454 | // mlog(0, "node %u is the master\n", res->owner); | |
1455 | response = DLM_MASTER_RESP_NO; | |
1456 | if (mle) | |
1457 | kmem_cache_free(dlm_mle_cache, mle); | |
1458 | goto send_response; | |
1459 | } | |
1460 | ||
1461 | /* ok, there is no owner. either this node is | |
1462 | * being blocked, or it is actively trying to | |
1463 | * master this lock. */ | |
1464 | if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) { | |
1465 | mlog(ML_ERROR, "lock with no owner should be " | |
1466 | "in-progress!\n"); | |
1467 | BUG(); | |
1468 | } | |
1469 | ||
1470 | // mlog(0, "lockres is in progress...\n"); | |
1471 | spin_lock(&dlm->master_lock); | |
1472 | found = dlm_find_mle(dlm, &tmpmle, name, namelen); | |
1473 | if (!found) { | |
1474 | mlog(ML_ERROR, "no mle found for this lock!\n"); | |
1475 | BUG(); | |
1476 | } | |
1477 | set_maybe = 1; | |
1478 | spin_lock(&tmpmle->spinlock); | |
1479 | if (tmpmle->type == DLM_MLE_BLOCK) { | |
1480 | // mlog(0, "this node is waiting for " | |
1481 | // "lockres to be mastered\n"); | |
1482 | response = DLM_MASTER_RESP_NO; | |
1483 | } else if (tmpmle->type == DLM_MLE_MIGRATION) { | |
1484 | mlog(0, "node %u is master, but trying to migrate to " | |
1485 | "node %u.\n", tmpmle->master, tmpmle->new_master); | |
1486 | if (tmpmle->master == dlm->node_num) { | |
6714d8e8 KH |
1487 | mlog(ML_ERROR, "no owner on lockres, but this " |
1488 | "node is trying to migrate it to %u?!\n", | |
1489 | tmpmle->new_master); | |
1490 | BUG(); | |
1491 | } else { | |
1492 | /* the real master can respond on its own */ | |
1493 | response = DLM_MASTER_RESP_NO; | |
1494 | } | |
1495 | } else if (tmpmle->master != DLM_LOCK_RES_OWNER_UNKNOWN) { | |
1496 | set_maybe = 0; | |
9c6510a5 | 1497 | if (tmpmle->master == dlm->node_num) { |
6714d8e8 | 1498 | response = DLM_MASTER_RESP_YES; |
9c6510a5 KH |
1499 | /* this node will be the owner. |
1500 | * go back and clean the mles on any | |
1501 | * other nodes */ | |
1502 | dispatch_assert = 1; | |
8d400b81 SM |
1503 | dlm_lockres_set_refmap_bit(dlm, res, |
1504 | request->node_idx); | |
9c6510a5 | 1505 | } else |
6714d8e8 KH |
1506 | response = DLM_MASTER_RESP_NO; |
1507 | } else { | |
1508 | // mlog(0, "this node is attempting to " | |
1509 | // "master lockres\n"); | |
1510 | response = DLM_MASTER_RESP_MAYBE; | |
1511 | } | |
1512 | if (set_maybe) | |
1513 | set_bit(request->node_idx, tmpmle->maybe_map); | |
1514 | spin_unlock(&tmpmle->spinlock); | |
1515 | ||
1516 | spin_unlock(&dlm->master_lock); | |
1517 | spin_unlock(&res->spinlock); | |
1518 | ||
1519 | /* keep the mle attached to heartbeat events */ | |
1520 | dlm_put_mle(tmpmle); | |
1521 | if (mle) | |
1522 | kmem_cache_free(dlm_mle_cache, mle); | |
1523 | goto send_response; | |
1524 | } | |
1525 | ||
1526 | /* | |
1527 | * lockres doesn't exist on this node | |
1528 | * if there is an MLE_BLOCK, return NO | |
1529 | * if there is an MLE_MASTER, return MAYBE | |
1530 | * otherwise, add an MLE_BLOCK, return NO | |
1531 | */ | |
1532 | spin_lock(&dlm->master_lock); | |
1533 | found = dlm_find_mle(dlm, &tmpmle, name, namelen); | |
1534 | if (!found) { | |
1535 | /* this lockid has never been seen on this node yet */ | |
1536 | // mlog(0, "no mle found\n"); | |
1537 | if (!mle) { | |
1538 | spin_unlock(&dlm->master_lock); | |
1539 | spin_unlock(&dlm->spinlock); | |
1540 | ||
3914ed0c | 1541 | mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS); |
6714d8e8 | 1542 | if (!mle) { |
6714d8e8 | 1543 | response = DLM_MASTER_RESP_ERROR; |
9c6510a5 | 1544 | mlog_errno(-ENOMEM); |
6714d8e8 KH |
1545 | goto send_response; |
1546 | } | |
6714d8e8 KH |
1547 | goto way_up_top; |
1548 | } | |
1549 | ||
1550 | // mlog(0, "this is second time thru, already allocated, " | |
1551 | // "add the block.\n"); | |
41b8c8a1 | 1552 | dlm_init_mle(mle, DLM_MLE_BLOCK, dlm, NULL, name, namelen); |
6714d8e8 | 1553 | set_bit(request->node_idx, mle->maybe_map); |
1c084577 | 1554 | __dlm_insert_mle(dlm, mle); |
6714d8e8 KH |
1555 | response = DLM_MASTER_RESP_NO; |
1556 | } else { | |
1557 | // mlog(0, "mle was found\n"); | |
1558 | set_maybe = 1; | |
1559 | spin_lock(&tmpmle->spinlock); | |
9c6510a5 KH |
1560 | if (tmpmle->master == dlm->node_num) { |
1561 | mlog(ML_ERROR, "no lockres, but an mle with this node as master!\n"); | |
1562 | BUG(); | |
1563 | } | |
6714d8e8 KH |
1564 | if (tmpmle->type == DLM_MLE_BLOCK) |
1565 | response = DLM_MASTER_RESP_NO; | |
1566 | else if (tmpmle->type == DLM_MLE_MIGRATION) { | |
1567 | mlog(0, "migration mle was found (%u->%u)\n", | |
1568 | tmpmle->master, tmpmle->new_master); | |
6714d8e8 KH |
1569 | /* real master can respond on its own */ |
1570 | response = DLM_MASTER_RESP_NO; | |
9c6510a5 KH |
1571 | } else |
1572 | response = DLM_MASTER_RESP_MAYBE; | |
6714d8e8 KH |
1573 | if (set_maybe) |
1574 | set_bit(request->node_idx, tmpmle->maybe_map); | |
1575 | spin_unlock(&tmpmle->spinlock); | |
1576 | } | |
1577 | spin_unlock(&dlm->master_lock); | |
1578 | spin_unlock(&dlm->spinlock); | |
1579 | ||
1580 | if (found) { | |
1581 | /* keep the mle attached to heartbeat events */ | |
1582 | dlm_put_mle(tmpmle); | |
1583 | } | |
1584 | send_response: | |
b31cfc02 SM |
1585 | /* |
1586 | * __dlm_lookup_lockres() grabbed a reference to this lockres. | |
1587 | * The reference is released by dlm_assert_master_worker() under | |
1588 | * the call to dlm_dispatch_assert_master(). If | |
1589 | * dlm_assert_master_worker() isn't called, we drop it here. | |
1590 | */ | |
9c6510a5 KH |
1591 | if (dispatch_assert) { |
1592 | if (response != DLM_MASTER_RESP_YES) | |
1593 | mlog(ML_ERROR, "invalid response %d\n", response); | |
1594 | if (!res) { | |
1595 | mlog(ML_ERROR, "bad lockres while trying to assert!\n"); | |
1596 | BUG(); | |
1597 | } | |
1598 | mlog(0, "%u is the owner of %.*s, cleaning everyone else\n", | |
1599 | dlm->node_num, res->lockname.len, res->lockname.name); | |
2bd63216 | 1600 | ret = dlm_dispatch_assert_master(dlm, res, 0, request->node_idx, |
9c6510a5 KH |
1601 | DLM_ASSERT_MASTER_MLE_CLEANUP); |
1602 | if (ret < 0) { | |
1603 | mlog(ML_ERROR, "failed to dispatch assert master work\n"); | |
1604 | response = DLM_MASTER_RESP_ERROR; | |
b31cfc02 | 1605 | dlm_lockres_put(res); |
9c6510a5 | 1606 | } |
b31cfc02 SM |
1607 | } else { |
1608 | if (res) | |
1609 | dlm_lockres_put(res); | |
9c6510a5 KH |
1610 | } |
1611 | ||
6714d8e8 KH |
1612 | dlm_put(dlm); |
1613 | return response; | |
1614 | } | |
1615 | ||
1616 | /* | |
1617 | * DLM_ASSERT_MASTER_MSG | |
1618 | */ | |
1619 | ||
1620 | ||
1621 | /* | |
1622 | * NOTE: this can be used for debugging | |
1623 | * can periodically run all locks owned by this node | |
1624 | * and re-assert across the cluster... | |
1625 | */ | |
05488bbe AB |
1626 | static int dlm_do_assert_master(struct dlm_ctxt *dlm, |
1627 | struct dlm_lock_resource *res, | |
1628 | void *nodemap, u32 flags) | |
6714d8e8 KH |
1629 | { |
1630 | struct dlm_assert_master assert; | |
1631 | int to, tmpret; | |
1632 | struct dlm_node_iter iter; | |
1633 | int ret = 0; | |
9c6510a5 | 1634 | int reassert; |
ba2bf218 KH |
1635 | const char *lockname = res->lockname.name; |
1636 | unsigned int namelen = res->lockname.len; | |
6714d8e8 KH |
1637 | |
1638 | BUG_ON(namelen > O2NM_MAX_NAME_LEN); | |
f3f85464 SM |
1639 | |
1640 | spin_lock(&res->spinlock); | |
1641 | res->state |= DLM_LOCK_RES_SETREF_INPROG; | |
1642 | spin_unlock(&res->spinlock); | |
1643 | ||
9c6510a5 KH |
1644 | again: |
1645 | reassert = 0; | |
6714d8e8 KH |
1646 | |
1647 | /* note that if this nodemap is empty, it returns 0 */ | |
1648 | dlm_node_iter_init(nodemap, &iter); | |
1649 | while ((to = dlm_node_iter_next(&iter)) >= 0) { | |
1650 | int r = 0; | |
a9ee4c8a KH |
1651 | struct dlm_master_list_entry *mle = NULL; |
1652 | ||
6714d8e8 KH |
1653 | mlog(0, "sending assert master to %d (%.*s)\n", to, |
1654 | namelen, lockname); | |
1655 | memset(&assert, 0, sizeof(assert)); | |
1656 | assert.node_idx = dlm->node_num; | |
1657 | assert.namelen = namelen; | |
1658 | memcpy(assert.name, lockname, namelen); | |
1659 | assert.flags = cpu_to_be32(flags); | |
1660 | ||
1661 | tmpret = o2net_send_message(DLM_ASSERT_MASTER_MSG, dlm->key, | |
1662 | &assert, sizeof(assert), to, &r); | |
1663 | if (tmpret < 0) { | |
a5196ec5 WW |
1664 | mlog(ML_ERROR, "Error %d when sending message %u (key " |
1665 | "0x%x) to node %u\n", tmpret, | |
1666 | DLM_ASSERT_MASTER_MSG, dlm->key, to); | |
6714d8e8 | 1667 | if (!dlm_is_host_down(tmpret)) { |
3b3b84a8 | 1668 | mlog(ML_ERROR, "unhandled error=%d!\n", tmpret); |
6714d8e8 KH |
1669 | BUG(); |
1670 | } | |
1671 | /* a node died. finish out the rest of the nodes. */ | |
3b3b84a8 | 1672 | mlog(0, "link to %d went down!\n", to); |
6714d8e8 KH |
1673 | /* any nonzero status return will do */ |
1674 | ret = tmpret; | |
ba2bf218 | 1675 | r = 0; |
6714d8e8 KH |
1676 | } else if (r < 0) { |
1677 | /* ok, something horribly messed. kill thyself. */ | |
1678 | mlog(ML_ERROR,"during assert master of %.*s to %u, " | |
1679 | "got %d.\n", namelen, lockname, to, r); | |
a9ee4c8a KH |
1680 | spin_lock(&dlm->spinlock); |
1681 | spin_lock(&dlm->master_lock); | |
1682 | if (dlm_find_mle(dlm, &mle, (char *)lockname, | |
1683 | namelen)) { | |
1684 | dlm_print_one_mle(mle); | |
1685 | __dlm_put_mle(mle); | |
1686 | } | |
1687 | spin_unlock(&dlm->master_lock); | |
1688 | spin_unlock(&dlm->spinlock); | |
6714d8e8 | 1689 | BUG(); |
ba2bf218 KH |
1690 | } |
1691 | ||
1692 | if (r & DLM_ASSERT_RESPONSE_REASSERT && | |
1693 | !(r & DLM_ASSERT_RESPONSE_MASTERY_REF)) { | |
1694 | mlog(ML_ERROR, "%.*s: very strange, " | |
1695 | "master MLE but no lockres on %u\n", | |
1696 | namelen, lockname, to); | |
1697 | } | |
1698 | ||
1699 | if (r & DLM_ASSERT_RESPONSE_REASSERT) { | |
9c6510a5 | 1700 | mlog(0, "%.*s: node %u create mles on other " |
2bd63216 | 1701 | "nodes and requests a re-assert\n", |
9c6510a5 KH |
1702 | namelen, lockname, to); |
1703 | reassert = 1; | |
6714d8e8 | 1704 | } |
ba2bf218 KH |
1705 | if (r & DLM_ASSERT_RESPONSE_MASTERY_REF) { |
1706 | mlog(0, "%.*s: node %u has a reference to this " | |
1707 | "lockres, set the bit in the refmap\n", | |
1708 | namelen, lockname, to); | |
1709 | spin_lock(&res->spinlock); | |
8d400b81 | 1710 | dlm_lockres_set_refmap_bit(dlm, res, to); |
ba2bf218 KH |
1711 | spin_unlock(&res->spinlock); |
1712 | } | |
6714d8e8 KH |
1713 | } |
1714 | ||
9c6510a5 KH |
1715 | if (reassert) |
1716 | goto again; | |
1717 | ||
f3f85464 SM |
1718 | spin_lock(&res->spinlock); |
1719 | res->state &= ~DLM_LOCK_RES_SETREF_INPROG; | |
1720 | spin_unlock(&res->spinlock); | |
1721 | wake_up(&res->wq); | |
1722 | ||
6714d8e8 KH |
1723 | return ret; |
1724 | } | |
1725 | ||
1726 | /* | |
1727 | * locks that can be taken here: | |
1728 | * dlm->spinlock | |
1729 | * res->spinlock | |
1730 | * mle->spinlock | |
1731 | * dlm->master_list | |
1732 | * | |
1733 | * if possible, TRIM THIS DOWN!!! | |
1734 | */ | |
d74c9803 KH |
1735 | int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data, |
1736 | void **ret_data) | |
6714d8e8 KH |
1737 | { |
1738 | struct dlm_ctxt *dlm = data; | |
1739 | struct dlm_master_list_entry *mle = NULL; | |
1740 | struct dlm_assert_master *assert = (struct dlm_assert_master *)msg->buf; | |
1741 | struct dlm_lock_resource *res = NULL; | |
1742 | char *name; | |
a3d33291 | 1743 | unsigned int namelen, hash; |
6714d8e8 | 1744 | u32 flags; |
ba2bf218 | 1745 | int master_request = 0, have_lockres_ref = 0; |
9c6510a5 | 1746 | int ret = 0; |
6714d8e8 KH |
1747 | |
1748 | if (!dlm_grab(dlm)) | |
1749 | return 0; | |
1750 | ||
1751 | name = assert->name; | |
1752 | namelen = assert->namelen; | |
a3d33291 | 1753 | hash = dlm_lockid_hash(name, namelen); |
6714d8e8 KH |
1754 | flags = be32_to_cpu(assert->flags); |
1755 | ||
1756 | if (namelen > DLM_LOCKID_NAME_MAX) { | |
1757 | mlog(ML_ERROR, "Invalid name length!"); | |
1758 | goto done; | |
1759 | } | |
1760 | ||
1761 | spin_lock(&dlm->spinlock); | |
1762 | ||
1763 | if (flags) | |
1764 | mlog(0, "assert_master with flags: %u\n", flags); | |
1765 | ||
1766 | /* find the MLE */ | |
1767 | spin_lock(&dlm->master_lock); | |
1768 | if (!dlm_find_mle(dlm, &mle, name, namelen)) { | |
1769 | /* not an error, could be master just re-asserting */ | |
1770 | mlog(0, "just got an assert_master from %u, but no " | |
1771 | "MLE for it! (%.*s)\n", assert->node_idx, | |
1772 | namelen, name); | |
1773 | } else { | |
1774 | int bit = find_next_bit (mle->maybe_map, O2NM_MAX_NODES, 0); | |
1775 | if (bit >= O2NM_MAX_NODES) { | |
1776 | /* not necessarily an error, though less likely. | |
1777 | * could be master just re-asserting. */ | |
aa852354 | 1778 | mlog(0, "no bits set in the maybe_map, but %u " |
6714d8e8 KH |
1779 | "is asserting! (%.*s)\n", assert->node_idx, |
1780 | namelen, name); | |
1781 | } else if (bit != assert->node_idx) { | |
1782 | if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) { | |
1783 | mlog(0, "master %u was found, %u should " | |
1784 | "back off\n", assert->node_idx, bit); | |
1785 | } else { | |
1786 | /* with the fix for bug 569, a higher node | |
1787 | * number winning the mastery will respond | |
1788 | * YES to mastery requests, but this node | |
1789 | * had no way of knowing. let it pass. */ | |
aa852354 | 1790 | mlog(0, "%u is the lowest node, " |
6714d8e8 KH |
1791 | "%u is asserting. (%.*s) %u must " |
1792 | "have begun after %u won.\n", bit, | |
1793 | assert->node_idx, namelen, name, bit, | |
1794 | assert->node_idx); | |
1795 | } | |
1796 | } | |
2d1a868c KH |
1797 | if (mle->type == DLM_MLE_MIGRATION) { |
1798 | if (flags & DLM_ASSERT_MASTER_MLE_CLEANUP) { | |
1799 | mlog(0, "%s:%.*s: got cleanup assert" | |
1800 | " from %u for migration\n", | |
1801 | dlm->name, namelen, name, | |
1802 | assert->node_idx); | |
1803 | } else if (!(flags & DLM_ASSERT_MASTER_FINISH_MIGRATION)) { | |
1804 | mlog(0, "%s:%.*s: got unrelated assert" | |
1805 | " from %u for migration, ignoring\n", | |
1806 | dlm->name, namelen, name, | |
1807 | assert->node_idx); | |
1808 | __dlm_put_mle(mle); | |
1809 | spin_unlock(&dlm->master_lock); | |
1810 | spin_unlock(&dlm->spinlock); | |
1811 | goto done; | |
2bd63216 | 1812 | } |
2d1a868c | 1813 | } |
6714d8e8 KH |
1814 | } |
1815 | spin_unlock(&dlm->master_lock); | |
1816 | ||
1817 | /* ok everything checks out with the MLE | |
1818 | * now check to see if there is a lockres */ | |
a3d33291 | 1819 | res = __dlm_lookup_lockres(dlm, name, namelen, hash); |
6714d8e8 KH |
1820 | if (res) { |
1821 | spin_lock(&res->spinlock); | |
1822 | if (res->state & DLM_LOCK_RES_RECOVERING) { | |
1823 | mlog(ML_ERROR, "%u asserting but %.*s is " | |
1824 | "RECOVERING!\n", assert->node_idx, namelen, name); | |
1825 | goto kill; | |
1826 | } | |
1827 | if (!mle) { | |
dc2ed195 KH |
1828 | if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN && |
1829 | res->owner != assert->node_idx) { | |
53ecd25e SM |
1830 | mlog(ML_ERROR, "DIE! Mastery assert from %u, " |
1831 | "but current owner is %u! (%.*s)\n", | |
1832 | assert->node_idx, res->owner, namelen, | |
1833 | name); | |
1834 | __dlm_print_one_lock_resource(res); | |
1835 | BUG(); | |
6714d8e8 KH |
1836 | } |
1837 | } else if (mle->type != DLM_MLE_MIGRATION) { | |
1838 | if (res->owner != DLM_LOCK_RES_OWNER_UNKNOWN) { | |
1839 | /* owner is just re-asserting */ | |
1840 | if (res->owner == assert->node_idx) { | |
1841 | mlog(0, "owner %u re-asserting on " | |
1842 | "lock %.*s\n", assert->node_idx, | |
1843 | namelen, name); | |
1844 | goto ok; | |
1845 | } | |
1846 | mlog(ML_ERROR, "got assert_master from " | |
1847 | "node %u, but %u is the owner! " | |
1848 | "(%.*s)\n", assert->node_idx, | |
1849 | res->owner, namelen, name); | |
1850 | goto kill; | |
1851 | } | |
1852 | if (!(res->state & DLM_LOCK_RES_IN_PROGRESS)) { | |
1853 | mlog(ML_ERROR, "got assert from %u, but lock " | |
1854 | "with no owner should be " | |
1855 | "in-progress! (%.*s)\n", | |
1856 | assert->node_idx, | |
1857 | namelen, name); | |
1858 | goto kill; | |
1859 | } | |
1860 | } else /* mle->type == DLM_MLE_MIGRATION */ { | |
1861 | /* should only be getting an assert from new master */ | |
1862 | if (assert->node_idx != mle->new_master) { | |
1863 | mlog(ML_ERROR, "got assert from %u, but " | |
1864 | "new master is %u, and old master " | |
1865 | "was %u (%.*s)\n", | |
1866 | assert->node_idx, mle->new_master, | |
1867 | mle->master, namelen, name); | |
1868 | goto kill; | |
1869 | } | |
1870 | ||
1871 | } | |
1872 | ok: | |
1873 | spin_unlock(&res->spinlock); | |
1874 | } | |
6714d8e8 KH |
1875 | |
1876 | // mlog(0, "woo! got an assert_master from node %u!\n", | |
1877 | // assert->node_idx); | |
1878 | if (mle) { | |
9c6510a5 KH |
1879 | int extra_ref = 0; |
1880 | int nn = -1; | |
a2bf0477 | 1881 | int rr, err = 0; |
2bd63216 | 1882 | |
6714d8e8 | 1883 | spin_lock(&mle->spinlock); |
9c6510a5 KH |
1884 | if (mle->type == DLM_MLE_BLOCK || mle->type == DLM_MLE_MIGRATION) |
1885 | extra_ref = 1; | |
1886 | else { | |
1887 | /* MASTER mle: if any bits set in the response map | |
1888 | * then the calling node needs to re-assert to clear | |
1889 | * up nodes that this node contacted */ | |
2bd63216 | 1890 | while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES, |
9c6510a5 | 1891 | nn+1)) < O2NM_MAX_NODES) { |
728b9805 | 1892 | if (nn != dlm->node_num && nn != assert->node_idx) { |
9c6510a5 | 1893 | master_request = 1; |
728b9805 JB |
1894 | break; |
1895 | } | |
9c6510a5 KH |
1896 | } |
1897 | } | |
6714d8e8 KH |
1898 | mle->master = assert->node_idx; |
1899 | atomic_set(&mle->woken, 1); | |
1900 | wake_up(&mle->wq); | |
1901 | spin_unlock(&mle->spinlock); | |
1902 | ||
a2bf0477 | 1903 | if (res) { |
a6fa3640 | 1904 | int wake = 0; |
6714d8e8 | 1905 | spin_lock(&res->spinlock); |
a2bf0477 KH |
1906 | if (mle->type == DLM_MLE_MIGRATION) { |
1907 | mlog(0, "finishing off migration of lockres %.*s, " | |
1908 | "from %u to %u\n", | |
1909 | res->lockname.len, res->lockname.name, | |
1910 | dlm->node_num, mle->new_master); | |
1911 | res->state &= ~DLM_LOCK_RES_MIGRATING; | |
a6fa3640 | 1912 | wake = 1; |
a2bf0477 KH |
1913 | dlm_change_lockres_owner(dlm, res, mle->new_master); |
1914 | BUG_ON(res->state & DLM_LOCK_RES_DIRTY); | |
1915 | } else { | |
1916 | dlm_change_lockres_owner(dlm, res, mle->master); | |
1917 | } | |
6714d8e8 | 1918 | spin_unlock(&res->spinlock); |
ba2bf218 | 1919 | have_lockres_ref = 1; |
a6fa3640 KH |
1920 | if (wake) |
1921 | wake_up(&res->wq); | |
6714d8e8 | 1922 | } |
a2bf0477 KH |
1923 | |
1924 | /* master is known, detach if not already detached. | |
1925 | * ensures that only one assert_master call will happen | |
1926 | * on this mle. */ | |
a2bf0477 KH |
1927 | spin_lock(&dlm->master_lock); |
1928 | ||
1929 | rr = atomic_read(&mle->mle_refs.refcount); | |
1930 | if (mle->inuse > 0) { | |
1931 | if (extra_ref && rr < 3) | |
1932 | err = 1; | |
1933 | else if (!extra_ref && rr < 2) | |
1934 | err = 1; | |
1935 | } else { | |
1936 | if (extra_ref && rr < 2) | |
1937 | err = 1; | |
1938 | else if (!extra_ref && rr < 1) | |
1939 | err = 1; | |
1940 | } | |
1941 | if (err) { | |
1942 | mlog(ML_ERROR, "%s:%.*s: got assert master from %u " | |
1943 | "that will mess up this node, refs=%d, extra=%d, " | |
1944 | "inuse=%d\n", dlm->name, namelen, name, | |
1945 | assert->node_idx, rr, extra_ref, mle->inuse); | |
1946 | dlm_print_one_mle(mle); | |
1947 | } | |
1c084577 | 1948 | __dlm_unlink_mle(dlm, mle); |
a2bf0477 KH |
1949 | __dlm_mle_detach_hb_events(dlm, mle); |
1950 | __dlm_put_mle(mle); | |
6714d8e8 KH |
1951 | if (extra_ref) { |
1952 | /* the assert master message now balances the extra | |
1953 | * ref given by the master / migration request message. | |
1954 | * if this is the last put, it will be removed | |
1955 | * from the list. */ | |
a2bf0477 KH |
1956 | __dlm_put_mle(mle); |
1957 | } | |
1958 | spin_unlock(&dlm->master_lock); | |
a2bf0477 KH |
1959 | } else if (res) { |
1960 | if (res->owner != assert->node_idx) { | |
1961 | mlog(0, "assert_master from %u, but current " | |
1962 | "owner is %u (%.*s), no mle\n", assert->node_idx, | |
1963 | res->owner, namelen, name); | |
6714d8e8 KH |
1964 | } |
1965 | } | |
14741472 | 1966 | spin_unlock(&dlm->spinlock); |
6714d8e8 KH |
1967 | |
1968 | done: | |
9c6510a5 | 1969 | ret = 0; |
3b8118cf KH |
1970 | if (res) { |
1971 | spin_lock(&res->spinlock); | |
1972 | res->state |= DLM_LOCK_RES_SETREF_INPROG; | |
1973 | spin_unlock(&res->spinlock); | |
1974 | *ret_data = (void *)res; | |
1975 | } | |
6714d8e8 | 1976 | dlm_put(dlm); |
9c6510a5 KH |
1977 | if (master_request) { |
1978 | mlog(0, "need to tell master to reassert\n"); | |
ba2bf218 KH |
1979 | /* positive. negative would shoot down the node. */ |
1980 | ret |= DLM_ASSERT_RESPONSE_REASSERT; | |
1981 | if (!have_lockres_ref) { | |
1982 | mlog(ML_ERROR, "strange, got assert from %u, MASTER " | |
1983 | "mle present here for %s:%.*s, but no lockres!\n", | |
1984 | assert->node_idx, dlm->name, namelen, name); | |
1985 | } | |
1986 | } | |
1987 | if (have_lockres_ref) { | |
1988 | /* let the master know we have a reference to the lockres */ | |
1989 | ret |= DLM_ASSERT_RESPONSE_MASTERY_REF; | |
1990 | mlog(0, "%s:%.*s: got assert from %u, need a ref\n", | |
1991 | dlm->name, namelen, name, assert->node_idx); | |
9c6510a5 KH |
1992 | } |
1993 | return ret; | |
6714d8e8 KH |
1994 | |
1995 | kill: | |
1996 | /* kill the caller! */ | |
a9ee4c8a KH |
1997 | mlog(ML_ERROR, "Bad message received from another node. Dumping state " |
1998 | "and killing the other node now! This node is OK and can continue.\n"); | |
1999 | __dlm_print_one_lock_resource(res); | |
6714d8e8 KH |
2000 | spin_unlock(&res->spinlock); |
2001 | spin_unlock(&dlm->spinlock); | |
2bd63216 | 2002 | *ret_data = (void *)res; |
6714d8e8 KH |
2003 | dlm_put(dlm); |
2004 | return -EINVAL; | |
2005 | } | |
2006 | ||
3b8118cf KH |
2007 | void dlm_assert_master_post_handler(int status, void *data, void *ret_data) |
2008 | { | |
2009 | struct dlm_lock_resource *res = (struct dlm_lock_resource *)ret_data; | |
2010 | ||
2011 | if (ret_data) { | |
2012 | spin_lock(&res->spinlock); | |
2013 | res->state &= ~DLM_LOCK_RES_SETREF_INPROG; | |
2014 | spin_unlock(&res->spinlock); | |
2015 | wake_up(&res->wq); | |
2016 | dlm_lockres_put(res); | |
2017 | } | |
2018 | return; | |
2019 | } | |
2020 | ||
6714d8e8 KH |
2021 | int dlm_dispatch_assert_master(struct dlm_ctxt *dlm, |
2022 | struct dlm_lock_resource *res, | |
2023 | int ignore_higher, u8 request_from, u32 flags) | |
2024 | { | |
2025 | struct dlm_work_item *item; | |
b24ae0b5 | 2026 | item = kzalloc(sizeof(*item), GFP_ATOMIC); |
6714d8e8 KH |
2027 | if (!item) |
2028 | return -ENOMEM; | |
2029 | ||
2030 | ||
2031 | /* queue up work for dlm_assert_master_worker */ | |
2032 | dlm_grab(dlm); /* get an extra ref for the work item */ | |
2033 | dlm_init_work_item(dlm, item, dlm_assert_master_worker, NULL); | |
2034 | item->u.am.lockres = res; /* already have a ref */ | |
2035 | /* can optionally ignore node numbers higher than this node */ | |
2036 | item->u.am.ignore_higher = ignore_higher; | |
2037 | item->u.am.request_from = request_from; | |
2038 | item->u.am.flags = flags; | |
2039 | ||
2bd63216 SM |
2040 | if (ignore_higher) |
2041 | mlog(0, "IGNORE HIGHER: %.*s\n", res->lockname.len, | |
9c6510a5 | 2042 | res->lockname.name); |
2bd63216 | 2043 | |
6714d8e8 KH |
2044 | spin_lock(&dlm->work_lock); |
2045 | list_add_tail(&item->list, &dlm->work_list); | |
2046 | spin_unlock(&dlm->work_lock); | |
2047 | ||
3156d267 | 2048 | queue_work(dlm->dlm_worker, &dlm->dispatched_work); |
6714d8e8 KH |
2049 | return 0; |
2050 | } | |
2051 | ||
2052 | static void dlm_assert_master_worker(struct dlm_work_item *item, void *data) | |
2053 | { | |
2054 | struct dlm_ctxt *dlm = data; | |
2055 | int ret = 0; | |
2056 | struct dlm_lock_resource *res; | |
2057 | unsigned long nodemap[BITS_TO_LONGS(O2NM_MAX_NODES)]; | |
2058 | int ignore_higher; | |
2059 | int bit; | |
2060 | u8 request_from; | |
2061 | u32 flags; | |
2062 | ||
2063 | dlm = item->dlm; | |
2064 | res = item->u.am.lockres; | |
2065 | ignore_higher = item->u.am.ignore_higher; | |
2066 | request_from = item->u.am.request_from; | |
2067 | flags = item->u.am.flags; | |
2068 | ||
2069 | spin_lock(&dlm->spinlock); | |
2070 | memcpy(nodemap, dlm->domain_map, sizeof(nodemap)); | |
2071 | spin_unlock(&dlm->spinlock); | |
2072 | ||
2073 | clear_bit(dlm->node_num, nodemap); | |
2074 | if (ignore_higher) { | |
2075 | /* if is this just to clear up mles for nodes below | |
2076 | * this node, do not send the message to the original | |
2077 | * caller or any node number higher than this */ | |
2078 | clear_bit(request_from, nodemap); | |
2079 | bit = dlm->node_num; | |
2080 | while (1) { | |
2081 | bit = find_next_bit(nodemap, O2NM_MAX_NODES, | |
2082 | bit+1); | |
2083 | if (bit >= O2NM_MAX_NODES) | |
2084 | break; | |
2085 | clear_bit(bit, nodemap); | |
2086 | } | |
2087 | } | |
2088 | ||
36407488 KH |
2089 | /* |
2090 | * If we're migrating this lock to someone else, we are no | |
2091 | * longer allowed to assert out own mastery. OTOH, we need to | |
2092 | * prevent migration from starting while we're still asserting | |
2093 | * our dominance. The reserved ast delays migration. | |
2094 | */ | |
2095 | spin_lock(&res->spinlock); | |
2096 | if (res->state & DLM_LOCK_RES_MIGRATING) { | |
2097 | mlog(0, "Someone asked us to assert mastery, but we're " | |
2098 | "in the middle of migration. Skipping assert, " | |
2099 | "the new master will handle that.\n"); | |
2100 | spin_unlock(&res->spinlock); | |
2101 | goto put; | |
2102 | } else | |
2103 | __dlm_lockres_reserve_ast(res); | |
2104 | spin_unlock(&res->spinlock); | |
2105 | ||
6714d8e8 KH |
2106 | /* this call now finishes out the nodemap |
2107 | * even if one or more nodes die */ | |
2108 | mlog(0, "worker about to master %.*s here, this=%u\n", | |
2109 | res->lockname.len, res->lockname.name, dlm->node_num); | |
ba2bf218 | 2110 | ret = dlm_do_assert_master(dlm, res, nodemap, flags); |
6714d8e8 KH |
2111 | if (ret < 0) { |
2112 | /* no need to restart, we are done */ | |
3b3b84a8 KH |
2113 | if (!dlm_is_host_down(ret)) |
2114 | mlog_errno(ret); | |
6714d8e8 KH |
2115 | } |
2116 | ||
36407488 KH |
2117 | /* Ok, we've asserted ourselves. Let's let migration start. */ |
2118 | dlm_lockres_release_ast(dlm, res); | |
2119 | ||
2120 | put: | |
6714d8e8 KH |
2121 | dlm_lockres_put(res); |
2122 | ||
2123 | mlog(0, "finished with dlm_assert_master_worker\n"); | |
2124 | } | |
2125 | ||
c03872f5 KH |
2126 | /* SPECIAL CASE for the $RECOVERY lock used by the recovery thread. |
2127 | * We cannot wait for node recovery to complete to begin mastering this | |
2128 | * lockres because this lockres is used to kick off recovery! ;-) | |
2129 | * So, do a pre-check on all living nodes to see if any of those nodes | |
2130 | * think that $RECOVERY is currently mastered by a dead node. If so, | |
2131 | * we wait a short time to allow that node to get notified by its own | |
2132 | * heartbeat stack, then check again. All $RECOVERY lock resources | |
2bd63216 | 2133 | * mastered by dead nodes are purged when the hearbeat callback is |
c03872f5 KH |
2134 | * fired, so we can know for sure that it is safe to continue once |
2135 | * the node returns a live node or no node. */ | |
2136 | static int dlm_pre_master_reco_lockres(struct dlm_ctxt *dlm, | |
2137 | struct dlm_lock_resource *res) | |
2138 | { | |
2139 | struct dlm_node_iter iter; | |
2140 | int nodenum; | |
2141 | int ret = 0; | |
2142 | u8 master = DLM_LOCK_RES_OWNER_UNKNOWN; | |
2143 | ||
2144 | spin_lock(&dlm->spinlock); | |
2145 | dlm_node_iter_init(dlm->domain_map, &iter); | |
2146 | spin_unlock(&dlm->spinlock); | |
2147 | ||
2148 | while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { | |
2149 | /* do not send to self */ | |
2150 | if (nodenum == dlm->node_num) | |
2151 | continue; | |
2152 | ret = dlm_do_master_requery(dlm, res, nodenum, &master); | |
2153 | if (ret < 0) { | |
2154 | mlog_errno(ret); | |
2155 | if (!dlm_is_host_down(ret)) | |
2156 | BUG(); | |
2157 | /* host is down, so answer for that node would be | |
2158 | * DLM_LOCK_RES_OWNER_UNKNOWN. continue. */ | |
f42a100b | 2159 | ret = 0; |
c03872f5 KH |
2160 | } |
2161 | ||
2162 | if (master != DLM_LOCK_RES_OWNER_UNKNOWN) { | |
2163 | /* check to see if this master is in the recovery map */ | |
2164 | spin_lock(&dlm->spinlock); | |
2165 | if (test_bit(master, dlm->recovery_map)) { | |
2166 | mlog(ML_NOTICE, "%s: node %u has not seen " | |
2167 | "node %u go down yet, and thinks the " | |
2168 | "dead node is mastering the recovery " | |
2169 | "lock. must wait.\n", dlm->name, | |
2170 | nodenum, master); | |
2171 | ret = -EAGAIN; | |
2172 | } | |
2173 | spin_unlock(&dlm->spinlock); | |
2bd63216 | 2174 | mlog(0, "%s: reco lock master is %u\n", dlm->name, |
c03872f5 KH |
2175 | master); |
2176 | break; | |
2177 | } | |
2178 | } | |
2179 | return ret; | |
2180 | } | |
2181 | ||
ba2bf218 KH |
2182 | /* |
2183 | * DLM_DEREF_LOCKRES_MSG | |
2184 | */ | |
2185 | ||
2186 | int dlm_drop_lockres_ref(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) | |
2187 | { | |
2188 | struct dlm_deref_lockres deref; | |
2189 | int ret = 0, r; | |
2190 | const char *lockname; | |
2191 | unsigned int namelen; | |
2192 | ||
2193 | lockname = res->lockname.name; | |
2194 | namelen = res->lockname.len; | |
2195 | BUG_ON(namelen > O2NM_MAX_NAME_LEN); | |
2196 | ||
ba2bf218 KH |
2197 | memset(&deref, 0, sizeof(deref)); |
2198 | deref.node_idx = dlm->node_num; | |
2199 | deref.namelen = namelen; | |
2200 | memcpy(deref.name, lockname, namelen); | |
2201 | ||
2202 | ret = o2net_send_message(DLM_DEREF_LOCKRES_MSG, dlm->key, | |
2203 | &deref, sizeof(deref), res->owner, &r); | |
2204 | if (ret < 0) | |
8decab3c SM |
2205 | mlog(ML_ERROR, "%s: res %.*s, error %d send DEREF to node %u\n", |
2206 | dlm->name, namelen, lockname, ret, res->owner); | |
ba2bf218 KH |
2207 | else if (r < 0) { |
2208 | /* BAD. other node says I did not have a ref. */ | |
8decab3c SM |
2209 | mlog(ML_ERROR, "%s: res %.*s, DEREF to node %u got %d\n", |
2210 | dlm->name, namelen, lockname, res->owner, r); | |
ba2bf218 KH |
2211 | dlm_print_one_lock_resource(res); |
2212 | BUG(); | |
2213 | } | |
2214 | return ret; | |
2215 | } | |
2216 | ||
d74c9803 KH |
2217 | int dlm_deref_lockres_handler(struct o2net_msg *msg, u32 len, void *data, |
2218 | void **ret_data) | |
ba2bf218 KH |
2219 | { |
2220 | struct dlm_ctxt *dlm = data; | |
2221 | struct dlm_deref_lockres *deref = (struct dlm_deref_lockres *)msg->buf; | |
2222 | struct dlm_lock_resource *res = NULL; | |
2223 | char *name; | |
2224 | unsigned int namelen; | |
2225 | int ret = -EINVAL; | |
2226 | u8 node; | |
2227 | unsigned int hash; | |
f3f85464 SM |
2228 | struct dlm_work_item *item; |
2229 | int cleared = 0; | |
2230 | int dispatch = 0; | |
ba2bf218 KH |
2231 | |
2232 | if (!dlm_grab(dlm)) | |
2233 | return 0; | |
2234 | ||
2235 | name = deref->name; | |
2236 | namelen = deref->namelen; | |
2237 | node = deref->node_idx; | |
2238 | ||
2239 | if (namelen > DLM_LOCKID_NAME_MAX) { | |
2240 | mlog(ML_ERROR, "Invalid name length!"); | |
2241 | goto done; | |
2242 | } | |
2243 | if (deref->node_idx >= O2NM_MAX_NODES) { | |
2244 | mlog(ML_ERROR, "Invalid node number: %u\n", node); | |
2245 | goto done; | |
2246 | } | |
2247 | ||
2248 | hash = dlm_lockid_hash(name, namelen); | |
2249 | ||
2250 | spin_lock(&dlm->spinlock); | |
2251 | res = __dlm_lookup_lockres_full(dlm, name, namelen, hash); | |
2252 | if (!res) { | |
2253 | spin_unlock(&dlm->spinlock); | |
2254 | mlog(ML_ERROR, "%s:%.*s: bad lockres name\n", | |
2255 | dlm->name, namelen, name); | |
2256 | goto done; | |
2257 | } | |
2258 | spin_unlock(&dlm->spinlock); | |
2259 | ||
2260 | spin_lock(&res->spinlock); | |
f3f85464 SM |
2261 | if (res->state & DLM_LOCK_RES_SETREF_INPROG) |
2262 | dispatch = 1; | |
2263 | else { | |
2264 | BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF); | |
2265 | if (test_bit(node, res->refmap)) { | |
8d400b81 | 2266 | dlm_lockres_clear_refmap_bit(dlm, res, node); |
f3f85464 SM |
2267 | cleared = 1; |
2268 | } | |
ba2bf218 KH |
2269 | } |
2270 | spin_unlock(&res->spinlock); | |
2271 | ||
f3f85464 SM |
2272 | if (!dispatch) { |
2273 | if (cleared) | |
2274 | dlm_lockres_calc_usage(dlm, res); | |
2275 | else { | |
2276 | mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref " | |
2277 | "but it is already dropped!\n", dlm->name, | |
2278 | res->lockname.len, res->lockname.name, node); | |
2af37ce8 | 2279 | dlm_print_one_lock_resource(res); |
f3f85464 SM |
2280 | } |
2281 | ret = 0; | |
2282 | goto done; | |
2283 | } | |
2284 | ||
2285 | item = kzalloc(sizeof(*item), GFP_NOFS); | |
2286 | if (!item) { | |
2287 | ret = -ENOMEM; | |
2288 | mlog_errno(ret); | |
2289 | goto done; | |
2290 | } | |
2291 | ||
2292 | dlm_init_work_item(dlm, item, dlm_deref_lockres_worker, NULL); | |
2293 | item->u.dl.deref_res = res; | |
2294 | item->u.dl.deref_node = node; | |
2295 | ||
2296 | spin_lock(&dlm->work_lock); | |
2297 | list_add_tail(&item->list, &dlm->work_list); | |
2298 | spin_unlock(&dlm->work_lock); | |
2299 | ||
2300 | queue_work(dlm->dlm_worker, &dlm->dispatched_work); | |
2301 | return 0; | |
2302 | ||
ba2bf218 KH |
2303 | done: |
2304 | if (res) | |
2305 | dlm_lockres_put(res); | |
2306 | dlm_put(dlm); | |
f3f85464 | 2307 | |
ba2bf218 KH |
2308 | return ret; |
2309 | } | |
2310 | ||
f3f85464 SM |
2311 | static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data) |
2312 | { | |
2313 | struct dlm_ctxt *dlm; | |
2314 | struct dlm_lock_resource *res; | |
2315 | u8 node; | |
2316 | u8 cleared = 0; | |
2317 | ||
2318 | dlm = item->dlm; | |
2319 | res = item->u.dl.deref_res; | |
2320 | node = item->u.dl.deref_node; | |
2321 | ||
2322 | spin_lock(&res->spinlock); | |
2323 | BUG_ON(res->state & DLM_LOCK_RES_DROPPING_REF); | |
2324 | if (test_bit(node, res->refmap)) { | |
2325 | __dlm_wait_on_lockres_flags(res, DLM_LOCK_RES_SETREF_INPROG); | |
8d400b81 | 2326 | dlm_lockres_clear_refmap_bit(dlm, res, node); |
f3f85464 SM |
2327 | cleared = 1; |
2328 | } | |
2329 | spin_unlock(&res->spinlock); | |
2330 | ||
2331 | if (cleared) { | |
2332 | mlog(0, "%s:%.*s node %u ref dropped in dispatch\n", | |
2333 | dlm->name, res->lockname.len, res->lockname.name, node); | |
2334 | dlm_lockres_calc_usage(dlm, res); | |
2335 | } else { | |
2336 | mlog(ML_ERROR, "%s:%.*s: node %u trying to drop ref " | |
2337 | "but it is already dropped!\n", dlm->name, | |
2338 | res->lockname.len, res->lockname.name, node); | |
2af37ce8 | 2339 | dlm_print_one_lock_resource(res); |
f3f85464 SM |
2340 | } |
2341 | ||
2342 | dlm_lockres_put(res); | |
2343 | } | |
2344 | ||
9f62e960 SM |
2345 | /* |
2346 | * A migrateable resource is one that is : | |
2347 | * 1. locally mastered, and, | |
2348 | * 2. zero local locks, and, | |
2349 | * 3. one or more non-local locks, or, one or more references | |
2350 | * Returns 1 if yes, 0 if not. | |
2f5bf1f2 SM |
2351 | */ |
2352 | static int dlm_is_lockres_migrateable(struct dlm_ctxt *dlm, | |
9f62e960 | 2353 | struct dlm_lock_resource *res) |
2f5bf1f2 | 2354 | { |
9f62e960 SM |
2355 | enum dlm_lockres_list idx; |
2356 | int nonlocal = 0, node_ref; | |
800deef3 | 2357 | struct list_head *queue; |
2f5bf1f2 | 2358 | struct dlm_lock *lock; |
9f62e960 | 2359 | u64 cookie; |
2f5bf1f2 SM |
2360 | |
2361 | assert_spin_locked(&res->spinlock); | |
2362 | ||
fae477b6 X |
2363 | /* delay migration when the lockres is in MIGRATING state */ |
2364 | if (res->state & DLM_LOCK_RES_MIGRATING) | |
2365 | return 0; | |
2366 | ||
9f62e960 SM |
2367 | if (res->owner != dlm->node_num) |
2368 | return 0; | |
2f5bf1f2 | 2369 | |
9f62e960 SM |
2370 | for (idx = DLM_GRANTED_LIST; idx <= DLM_BLOCKED_LIST; idx++) { |
2371 | queue = dlm_list_idx_to_ptr(res, idx); | |
800deef3 | 2372 | list_for_each_entry(lock, queue, list) { |
9f62e960 SM |
2373 | if (lock->ml.node != dlm->node_num) { |
2374 | nonlocal++; | |
2375 | continue; | |
2f5bf1f2 | 2376 | } |
9f62e960 SM |
2377 | cookie = be64_to_cpu(lock->ml.cookie); |
2378 | mlog(0, "%s: Not migrateable res %.*s, lock %u:%llu on " | |
2379 | "%s list\n", dlm->name, res->lockname.len, | |
2380 | res->lockname.name, | |
2381 | dlm_get_lock_cookie_node(cookie), | |
2382 | dlm_get_lock_cookie_seq(cookie), | |
2383 | dlm_list_in_text(idx)); | |
2384 | return 0; | |
2f5bf1f2 | 2385 | } |
2f5bf1f2 SM |
2386 | } |
2387 | ||
9f62e960 SM |
2388 | if (!nonlocal) { |
2389 | node_ref = find_next_bit(res->refmap, O2NM_MAX_NODES, 0); | |
2390 | if (node_ref >= O2NM_MAX_NODES) | |
2391 | return 0; | |
2392 | } | |
388c4bcb | 2393 | |
9f62e960 SM |
2394 | mlog(0, "%s: res %.*s, Migrateable\n", dlm->name, res->lockname.len, |
2395 | res->lockname.name); | |
2f5bf1f2 | 2396 | |
9f62e960 | 2397 | return 1; |
2f5bf1f2 | 2398 | } |
6714d8e8 KH |
2399 | |
2400 | /* | |
2401 | * DLM_MIGRATE_LOCKRES | |
2402 | */ | |
2403 | ||
2404 | ||
faf0ec9f | 2405 | static int dlm_migrate_lockres(struct dlm_ctxt *dlm, |
66effd3c | 2406 | struct dlm_lock_resource *res, u8 target) |
6714d8e8 KH |
2407 | { |
2408 | struct dlm_master_list_entry *mle = NULL; | |
2409 | struct dlm_master_list_entry *oldmle = NULL; | |
2410 | struct dlm_migratable_lockres *mres = NULL; | |
2f5bf1f2 | 2411 | int ret = 0; |
6714d8e8 KH |
2412 | const char *name; |
2413 | unsigned int namelen; | |
2414 | int mle_added = 0; | |
2f5bf1f2 | 2415 | int wake = 0; |
6714d8e8 KH |
2416 | |
2417 | if (!dlm_grab(dlm)) | |
2418 | return -EINVAL; | |
2419 | ||
66effd3c SM |
2420 | BUG_ON(target == O2NM_MAX_NODES); |
2421 | ||
6714d8e8 KH |
2422 | name = res->lockname.name; |
2423 | namelen = res->lockname.len; | |
2424 | ||
66effd3c SM |
2425 | mlog(0, "%s: Migrating %.*s to node %u\n", dlm->name, namelen, name, |
2426 | target); | |
6714d8e8 | 2427 | |
66effd3c | 2428 | /* preallocate up front. if this fails, abort */ |
6714d8e8 | 2429 | ret = -ENOMEM; |
ad8100e0 | 2430 | mres = (struct dlm_migratable_lockres *) __get_free_page(GFP_NOFS); |
6714d8e8 KH |
2431 | if (!mres) { |
2432 | mlog_errno(ret); | |
2433 | goto leave; | |
2434 | } | |
2435 | ||
3914ed0c | 2436 | mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS); |
6714d8e8 KH |
2437 | if (!mle) { |
2438 | mlog_errno(ret); | |
2439 | goto leave; | |
2440 | } | |
2441 | ret = 0; | |
2442 | ||
6714d8e8 KH |
2443 | /* |
2444 | * clear any existing master requests and | |
2445 | * add the migration mle to the list | |
2446 | */ | |
66effd3c | 2447 | spin_lock(&dlm->spinlock); |
6714d8e8 KH |
2448 | spin_lock(&dlm->master_lock); |
2449 | ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, name, | |
2450 | namelen, target, dlm->node_num); | |
2451 | spin_unlock(&dlm->master_lock); | |
2452 | spin_unlock(&dlm->spinlock); | |
2453 | ||
2454 | if (ret == -EEXIST) { | |
2455 | mlog(0, "another process is already migrating it\n"); | |
2456 | goto fail; | |
2457 | } | |
2458 | mle_added = 1; | |
2459 | ||
2460 | /* | |
2461 | * set the MIGRATING flag and flush asts | |
2462 | * if we fail after this we need to re-dirty the lockres | |
2463 | */ | |
2464 | if (dlm_mark_lockres_migrating(dlm, res, target) < 0) { | |
2465 | mlog(ML_ERROR, "tried to migrate %.*s to %u, but " | |
2466 | "the target went down.\n", res->lockname.len, | |
2467 | res->lockname.name, target); | |
2468 | spin_lock(&res->spinlock); | |
2469 | res->state &= ~DLM_LOCK_RES_MIGRATING; | |
a6fa3640 | 2470 | wake = 1; |
6714d8e8 KH |
2471 | spin_unlock(&res->spinlock); |
2472 | ret = -EINVAL; | |
2473 | } | |
2474 | ||
2475 | fail: | |
2476 | if (oldmle) { | |
2477 | /* master is known, detach if not already detached */ | |
2478 | dlm_mle_detach_hb_events(dlm, oldmle); | |
2479 | dlm_put_mle(oldmle); | |
2480 | } | |
2481 | ||
2482 | if (ret < 0) { | |
2483 | if (mle_added) { | |
2484 | dlm_mle_detach_hb_events(dlm, mle); | |
2485 | dlm_put_mle(mle); | |
2486 | } else if (mle) { | |
2487 | kmem_cache_free(dlm_mle_cache, mle); | |
66effd3c | 2488 | mle = NULL; |
6714d8e8 KH |
2489 | } |
2490 | goto leave; | |
2491 | } | |
2492 | ||
2493 | /* | |
2494 | * at this point, we have a migration target, an mle | |
2495 | * in the master list, and the MIGRATING flag set on | |
2496 | * the lockres | |
2497 | */ | |
2498 | ||
1cd04dbe KH |
2499 | /* now that remote nodes are spinning on the MIGRATING flag, |
2500 | * ensure that all assert_master work is flushed. */ | |
2501 | flush_workqueue(dlm->dlm_worker); | |
6714d8e8 KH |
2502 | |
2503 | /* get an extra reference on the mle. | |
2504 | * otherwise the assert_master from the new | |
2505 | * master will destroy this. | |
2506 | * also, make sure that all callers of dlm_get_mle | |
2507 | * take both dlm->spinlock and dlm->master_lock */ | |
2508 | spin_lock(&dlm->spinlock); | |
2509 | spin_lock(&dlm->master_lock); | |
a2bf0477 | 2510 | dlm_get_mle_inuse(mle); |
6714d8e8 KH |
2511 | spin_unlock(&dlm->master_lock); |
2512 | spin_unlock(&dlm->spinlock); | |
2513 | ||
2514 | /* notify new node and send all lock state */ | |
2515 | /* call send_one_lockres with migration flag. | |
2516 | * this serves as notice to the target node that a | |
2517 | * migration is starting. */ | |
2518 | ret = dlm_send_one_lockres(dlm, res, mres, target, | |
2519 | DLM_MRES_MIGRATION); | |
2520 | ||
2521 | if (ret < 0) { | |
2522 | mlog(0, "migration to node %u failed with %d\n", | |
2523 | target, ret); | |
2524 | /* migration failed, detach and clean up mle */ | |
2525 | dlm_mle_detach_hb_events(dlm, mle); | |
2526 | dlm_put_mle(mle); | |
a2bf0477 KH |
2527 | dlm_put_mle_inuse(mle); |
2528 | spin_lock(&res->spinlock); | |
2529 | res->state &= ~DLM_LOCK_RES_MIGRATING; | |
a6fa3640 | 2530 | wake = 1; |
a2bf0477 | 2531 | spin_unlock(&res->spinlock); |
df016c66 SM |
2532 | if (dlm_is_host_down(ret)) |
2533 | dlm_wait_for_node_death(dlm, target, | |
2534 | DLM_NODE_DEATH_WAIT_MAX); | |
6714d8e8 KH |
2535 | goto leave; |
2536 | } | |
2537 | ||
2538 | /* at this point, the target sends a message to all nodes, | |
2539 | * (using dlm_do_migrate_request). this node is skipped since | |
2540 | * we had to put an mle in the list to begin the process. this | |
2541 | * node now waits for target to do an assert master. this node | |
2542 | * will be the last one notified, ensuring that the migration | |
2543 | * is complete everywhere. if the target dies while this is | |
2544 | * going on, some nodes could potentially see the target as the | |
2545 | * master, so it is important that my recovery finds the migration | |
af901ca1 | 2546 | * mle and sets the master to UNKNOWN. */ |
6714d8e8 KH |
2547 | |
2548 | ||
2549 | /* wait for new node to assert master */ | |
2550 | while (1) { | |
2551 | ret = wait_event_interruptible_timeout(mle->wq, | |
2552 | (atomic_read(&mle->woken) == 1), | |
2553 | msecs_to_jiffies(5000)); | |
2554 | ||
2555 | if (ret >= 0) { | |
2556 | if (atomic_read(&mle->woken) == 1 || | |
2557 | res->owner == target) | |
2558 | break; | |
2559 | ||
1cd04dbe KH |
2560 | mlog(0, "%s:%.*s: timed out during migration\n", |
2561 | dlm->name, res->lockname.len, res->lockname.name); | |
2bd63216 | 2562 | /* avoid hang during shutdown when migrating lockres |
e2faea4c KH |
2563 | * to a node which also goes down */ |
2564 | if (dlm_is_node_dead(dlm, target)) { | |
aa852354 KH |
2565 | mlog(0, "%s:%.*s: expected migration " |
2566 | "target %u is no longer up, restarting\n", | |
e2faea4c KH |
2567 | dlm->name, res->lockname.len, |
2568 | res->lockname.name, target); | |
1cd04dbe KH |
2569 | ret = -EINVAL; |
2570 | /* migration failed, detach and clean up mle */ | |
2571 | dlm_mle_detach_hb_events(dlm, mle); | |
2572 | dlm_put_mle(mle); | |
2573 | dlm_put_mle_inuse(mle); | |
2574 | spin_lock(&res->spinlock); | |
2575 | res->state &= ~DLM_LOCK_RES_MIGRATING; | |
a6fa3640 | 2576 | wake = 1; |
1cd04dbe KH |
2577 | spin_unlock(&res->spinlock); |
2578 | goto leave; | |
e2faea4c | 2579 | } |
1cd04dbe KH |
2580 | } else |
2581 | mlog(0, "%s:%.*s: caught signal during migration\n", | |
2582 | dlm->name, res->lockname.len, res->lockname.name); | |
6714d8e8 KH |
2583 | } |
2584 | ||
2585 | /* all done, set the owner, clear the flag */ | |
2586 | spin_lock(&res->spinlock); | |
2587 | dlm_set_lockres_owner(dlm, res, target); | |
2588 | res->state &= ~DLM_LOCK_RES_MIGRATING; | |
2589 | dlm_remove_nonlocal_locks(dlm, res); | |
2590 | spin_unlock(&res->spinlock); | |
2591 | wake_up(&res->wq); | |
2592 | ||
2593 | /* master is known, detach if not already detached */ | |
2594 | dlm_mle_detach_hb_events(dlm, mle); | |
a2bf0477 | 2595 | dlm_put_mle_inuse(mle); |
6714d8e8 KH |
2596 | ret = 0; |
2597 | ||
2598 | dlm_lockres_calc_usage(dlm, res); | |
2599 | ||
2600 | leave: | |
2601 | /* re-dirty the lockres if we failed */ | |
2602 | if (ret < 0) | |
2603 | dlm_kick_thread(dlm, res); | |
2604 | ||
a6fa3640 KH |
2605 | /* wake up waiters if the MIGRATING flag got set |
2606 | * but migration failed */ | |
2607 | if (wake) | |
2608 | wake_up(&res->wq); | |
2609 | ||
6714d8e8 KH |
2610 | if (mres) |
2611 | free_page((unsigned long)mres); | |
2612 | ||
2613 | dlm_put(dlm); | |
2614 | ||
9f62e960 SM |
2615 | mlog(0, "%s: Migrating %.*s to %u, returns %d\n", dlm->name, namelen, |
2616 | name, target, ret); | |
6714d8e8 KH |
2617 | return ret; |
2618 | } | |
6714d8e8 | 2619 | |
ba2bf218 KH |
2620 | #define DLM_MIGRATION_RETRY_MS 100 |
2621 | ||
9f62e960 SM |
2622 | /* |
2623 | * Should be called only after beginning the domain leave process. | |
ba2bf218 KH |
2624 | * There should not be any remaining locks on nonlocal lock resources, |
2625 | * and there should be no local locks left on locally mastered resources. | |
2626 | * | |
2627 | * Called with the dlm spinlock held, may drop it to do migration, but | |
2628 | * will re-acquire before exit. | |
2629 | * | |
9f62e960 SM |
2630 | * Returns: 1 if dlm->spinlock was dropped/retaken, 0 if never dropped |
2631 | */ | |
ba2bf218 KH |
2632 | int dlm_empty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) |
2633 | { | |
66effd3c | 2634 | int ret; |
ba2bf218 | 2635 | int lock_dropped = 0; |
66effd3c | 2636 | u8 target = O2NM_MAX_NODES; |
ba2bf218 | 2637 | |
9f62e960 | 2638 | assert_spin_locked(&dlm->spinlock); |
2f5bf1f2 | 2639 | |
9f62e960 | 2640 | spin_lock(&res->spinlock); |
66effd3c SM |
2641 | if (dlm_is_lockres_migrateable(dlm, res)) |
2642 | target = dlm_pick_migration_target(dlm, res); | |
b36c3f84 | 2643 | spin_unlock(&res->spinlock); |
66effd3c SM |
2644 | |
2645 | if (target == O2NM_MAX_NODES) | |
9f62e960 | 2646 | goto leave; |
ba2bf218 KH |
2647 | |
2648 | /* Wheee! Migrate lockres here! Will sleep so drop spinlock. */ | |
2649 | spin_unlock(&dlm->spinlock); | |
2650 | lock_dropped = 1; | |
66effd3c SM |
2651 | ret = dlm_migrate_lockres(dlm, res, target); |
2652 | if (ret) | |
2653 | mlog(0, "%s: res %.*s, Migrate to node %u failed with %d\n", | |
2654 | dlm->name, res->lockname.len, res->lockname.name, | |
2655 | target, ret); | |
ba2bf218 KH |
2656 | spin_lock(&dlm->spinlock); |
2657 | leave: | |
2658 | return lock_dropped; | |
2659 | } | |
2660 | ||
6714d8e8 KH |
2661 | int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock) |
2662 | { | |
2663 | int ret; | |
2664 | spin_lock(&dlm->ast_lock); | |
2665 | spin_lock(&lock->spinlock); | |
2666 | ret = (list_empty(&lock->bast_list) && !lock->bast_pending); | |
2667 | spin_unlock(&lock->spinlock); | |
2668 | spin_unlock(&dlm->ast_lock); | |
2669 | return ret; | |
2670 | } | |
2671 | ||
2672 | static int dlm_migration_can_proceed(struct dlm_ctxt *dlm, | |
2673 | struct dlm_lock_resource *res, | |
2674 | u8 mig_target) | |
2675 | { | |
2676 | int can_proceed; | |
2677 | spin_lock(&res->spinlock); | |
2678 | can_proceed = !!(res->state & DLM_LOCK_RES_MIGRATING); | |
2679 | spin_unlock(&res->spinlock); | |
2680 | ||
2bd63216 | 2681 | /* target has died, so make the caller break out of the |
6714d8e8 KH |
2682 | * wait_event, but caller must recheck the domain_map */ |
2683 | spin_lock(&dlm->spinlock); | |
2684 | if (!test_bit(mig_target, dlm->domain_map)) | |
2685 | can_proceed = 1; | |
2686 | spin_unlock(&dlm->spinlock); | |
2687 | return can_proceed; | |
2688 | } | |
2689 | ||
faf0ec9f AB |
2690 | static int dlm_lockres_is_dirty(struct dlm_ctxt *dlm, |
2691 | struct dlm_lock_resource *res) | |
6714d8e8 KH |
2692 | { |
2693 | int ret; | |
2694 | spin_lock(&res->spinlock); | |
2695 | ret = !!(res->state & DLM_LOCK_RES_DIRTY); | |
2696 | spin_unlock(&res->spinlock); | |
2697 | return ret; | |
2698 | } | |
2699 | ||
2700 | ||
2701 | static int dlm_mark_lockres_migrating(struct dlm_ctxt *dlm, | |
2702 | struct dlm_lock_resource *res, | |
2703 | u8 target) | |
2704 | { | |
2705 | int ret = 0; | |
2706 | ||
2707 | mlog(0, "dlm_mark_lockres_migrating: %.*s, from %u to %u\n", | |
2708 | res->lockname.len, res->lockname.name, dlm->node_num, | |
2709 | target); | |
2710 | /* need to set MIGRATING flag on lockres. this is done by | |
2711 | * ensuring that all asts have been flushed for this lockres. */ | |
2712 | spin_lock(&res->spinlock); | |
2713 | BUG_ON(res->migration_pending); | |
2714 | res->migration_pending = 1; | |
2715 | /* strategy is to reserve an extra ast then release | |
2716 | * it below, letting the release do all of the work */ | |
2717 | __dlm_lockres_reserve_ast(res); | |
2718 | spin_unlock(&res->spinlock); | |
2719 | ||
ddc09c8d | 2720 | /* now flush all the pending asts */ |
6714d8e8 | 2721 | dlm_kick_thread(dlm, res); |
ddc09c8d KH |
2722 | /* before waiting on DIRTY, block processes which may |
2723 | * try to dirty the lockres before MIGRATING is set */ | |
2724 | spin_lock(&res->spinlock); | |
2725 | BUG_ON(res->state & DLM_LOCK_RES_BLOCK_DIRTY); | |
2726 | res->state |= DLM_LOCK_RES_BLOCK_DIRTY; | |
2727 | spin_unlock(&res->spinlock); | |
2728 | /* now wait on any pending asts and the DIRTY state */ | |
6714d8e8 KH |
2729 | wait_event(dlm->ast_wq, !dlm_lockres_is_dirty(dlm, res)); |
2730 | dlm_lockres_release_ast(dlm, res); | |
2731 | ||
2732 | mlog(0, "about to wait on migration_wq, dirty=%s\n", | |
2733 | res->state & DLM_LOCK_RES_DIRTY ? "yes" : "no"); | |
2734 | /* if the extra ref we just put was the final one, this | |
2735 | * will pass thru immediately. otherwise, we need to wait | |
2736 | * for the last ast to finish. */ | |
2737 | again: | |
2738 | ret = wait_event_interruptible_timeout(dlm->migration_wq, | |
2739 | dlm_migration_can_proceed(dlm, res, target), | |
2740 | msecs_to_jiffies(1000)); | |
2741 | if (ret < 0) { | |
2742 | mlog(0, "woken again: migrating? %s, dead? %s\n", | |
2743 | res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no", | |
2744 | test_bit(target, dlm->domain_map) ? "no":"yes"); | |
2745 | } else { | |
2746 | mlog(0, "all is well: migrating? %s, dead? %s\n", | |
2747 | res->state & DLM_LOCK_RES_MIGRATING ? "yes":"no", | |
2748 | test_bit(target, dlm->domain_map) ? "no":"yes"); | |
2749 | } | |
2750 | if (!dlm_migration_can_proceed(dlm, res, target)) { | |
2751 | mlog(0, "trying again...\n"); | |
2752 | goto again; | |
2753 | } | |
2754 | ||
a39953dd | 2755 | ret = 0; |
6714d8e8 KH |
2756 | /* did the target go down or die? */ |
2757 | spin_lock(&dlm->spinlock); | |
2758 | if (!test_bit(target, dlm->domain_map)) { | |
2759 | mlog(ML_ERROR, "aha. migration target %u just went down\n", | |
2760 | target); | |
2761 | ret = -EHOSTDOWN; | |
2762 | } | |
2763 | spin_unlock(&dlm->spinlock); | |
2764 | ||
a39953dd WW |
2765 | /* |
2766 | * if target is down, we need to clear DLM_LOCK_RES_BLOCK_DIRTY for | |
2767 | * another try; otherwise, we are sure the MIGRATING state is there, | |
2768 | * drop the unneded state which blocked threads trying to DIRTY | |
2769 | */ | |
2770 | spin_lock(&res->spinlock); | |
2771 | BUG_ON(!(res->state & DLM_LOCK_RES_BLOCK_DIRTY)); | |
2772 | res->state &= ~DLM_LOCK_RES_BLOCK_DIRTY; | |
2773 | if (!ret) | |
2774 | BUG_ON(!(res->state & DLM_LOCK_RES_MIGRATING)); | |
2775 | spin_unlock(&res->spinlock); | |
2776 | ||
6714d8e8 KH |
2777 | /* |
2778 | * at this point: | |
2779 | * | |
a39953dd | 2780 | * o the DLM_LOCK_RES_MIGRATING flag is set if target not down |
6714d8e8 KH |
2781 | * o there are no pending asts on this lockres |
2782 | * o all processes trying to reserve an ast on this | |
2783 | * lockres must wait for the MIGRATING flag to clear | |
2784 | */ | |
2785 | return ret; | |
2786 | } | |
2787 | ||
2788 | /* last step in the migration process. | |
2789 | * original master calls this to free all of the dlm_lock | |
2790 | * structures that used to be for other nodes. */ | |
2791 | static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm, | |
2792 | struct dlm_lock_resource *res) | |
2793 | { | |
6714d8e8 | 2794 | struct list_head *queue = &res->granted; |
ba2bf218 | 2795 | int i, bit; |
800deef3 | 2796 | struct dlm_lock *lock, *next; |
6714d8e8 KH |
2797 | |
2798 | assert_spin_locked(&res->spinlock); | |
2799 | ||
2800 | BUG_ON(res->owner == dlm->node_num); | |
2801 | ||
2802 | for (i=0; i<3; i++) { | |
800deef3 | 2803 | list_for_each_entry_safe(lock, next, queue, list) { |
6714d8e8 KH |
2804 | if (lock->ml.node != dlm->node_num) { |
2805 | mlog(0, "putting lock for node %u\n", | |
2806 | lock->ml.node); | |
2807 | /* be extra careful */ | |
2808 | BUG_ON(!list_empty(&lock->ast_list)); | |
2809 | BUG_ON(!list_empty(&lock->bast_list)); | |
2810 | BUG_ON(lock->ast_pending); | |
2811 | BUG_ON(lock->bast_pending); | |
8d400b81 SM |
2812 | dlm_lockres_clear_refmap_bit(dlm, res, |
2813 | lock->ml.node); | |
6714d8e8 KH |
2814 | list_del_init(&lock->list); |
2815 | dlm_lock_put(lock); | |
2c5c54ac SM |
2816 | /* In a normal unlock, we would have added a |
2817 | * DLM_UNLOCK_FREE_LOCK action. Force it. */ | |
2818 | dlm_lock_put(lock); | |
6714d8e8 KH |
2819 | } |
2820 | } | |
2821 | queue++; | |
2822 | } | |
ba2bf218 KH |
2823 | bit = 0; |
2824 | while (1) { | |
2825 | bit = find_next_bit(res->refmap, O2NM_MAX_NODES, bit); | |
2826 | if (bit >= O2NM_MAX_NODES) | |
2827 | break; | |
2828 | /* do not clear the local node reference, if there is a | |
2829 | * process holding this, let it drop the ref itself */ | |
2830 | if (bit != dlm->node_num) { | |
2831 | mlog(0, "%s:%.*s: node %u had a ref to this " | |
2832 | "migrating lockres, clearing\n", dlm->name, | |
2833 | res->lockname.len, res->lockname.name, bit); | |
8d400b81 | 2834 | dlm_lockres_clear_refmap_bit(dlm, res, bit); |
ba2bf218 KH |
2835 | } |
2836 | bit++; | |
2837 | } | |
6714d8e8 KH |
2838 | } |
2839 | ||
66effd3c SM |
2840 | /* |
2841 | * Pick a node to migrate the lock resource to. This function selects a | |
2842 | * potential target based first on the locks and then on refmap. It skips | |
2843 | * nodes that are in the process of exiting the domain. | |
2844 | */ | |
6714d8e8 KH |
2845 | static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm, |
2846 | struct dlm_lock_resource *res) | |
2847 | { | |
66effd3c | 2848 | enum dlm_lockres_list idx; |
6714d8e8 | 2849 | struct list_head *queue = &res->granted; |
6714d8e8 | 2850 | struct dlm_lock *lock; |
66effd3c SM |
2851 | int noderef; |
2852 | u8 nodenum = O2NM_MAX_NODES; | |
6714d8e8 KH |
2853 | |
2854 | assert_spin_locked(&dlm->spinlock); | |
66effd3c | 2855 | assert_spin_locked(&res->spinlock); |
6714d8e8 | 2856 | |
66effd3c SM |
2857 | /* Go through all the locks */ |
2858 | for (idx = DLM_GRANTED_LIST; idx <= DLM_BLOCKED_LIST; idx++) { | |
2859 | queue = dlm_list_idx_to_ptr(res, idx); | |
800deef3 | 2860 | list_for_each_entry(lock, queue, list) { |
66effd3c SM |
2861 | if (lock->ml.node == dlm->node_num) |
2862 | continue; | |
2863 | if (test_bit(lock->ml.node, dlm->exit_domain_map)) | |
2864 | continue; | |
2865 | nodenum = lock->ml.node; | |
2866 | goto bail; | |
6714d8e8 | 2867 | } |
6714d8e8 | 2868 | } |
388c4bcb | 2869 | |
66effd3c SM |
2870 | /* Go thru the refmap */ |
2871 | noderef = -1; | |
6714d8e8 | 2872 | while (1) { |
66effd3c SM |
2873 | noderef = find_next_bit(res->refmap, O2NM_MAX_NODES, |
2874 | noderef + 1); | |
2875 | if (noderef >= O2NM_MAX_NODES) | |
6714d8e8 | 2876 | break; |
66effd3c SM |
2877 | if (noderef == dlm->node_num) |
2878 | continue; | |
2879 | if (test_bit(noderef, dlm->exit_domain_map)) | |
2880 | continue; | |
2881 | nodenum = noderef; | |
2882 | goto bail; | |
6714d8e8 KH |
2883 | } |
2884 | ||
66effd3c SM |
2885 | bail: |
2886 | return nodenum; | |
6714d8e8 KH |
2887 | } |
2888 | ||
6714d8e8 KH |
2889 | /* this is called by the new master once all lockres |
2890 | * data has been received */ | |
2891 | static int dlm_do_migrate_request(struct dlm_ctxt *dlm, | |
2892 | struct dlm_lock_resource *res, | |
2893 | u8 master, u8 new_master, | |
2894 | struct dlm_node_iter *iter) | |
2895 | { | |
2896 | struct dlm_migrate_request migrate; | |
2b832564 | 2897 | int ret, skip, status = 0; |
6714d8e8 KH |
2898 | int nodenum; |
2899 | ||
2900 | memset(&migrate, 0, sizeof(migrate)); | |
2901 | migrate.namelen = res->lockname.len; | |
2902 | memcpy(migrate.name, res->lockname.name, migrate.namelen); | |
2903 | migrate.new_master = new_master; | |
2904 | migrate.master = master; | |
2905 | ||
2906 | ret = 0; | |
2907 | ||
2908 | /* send message to all nodes, except the master and myself */ | |
2909 | while ((nodenum = dlm_node_iter_next(iter)) >= 0) { | |
2910 | if (nodenum == master || | |
2911 | nodenum == new_master) | |
2912 | continue; | |
2913 | ||
2b832564 SM |
2914 | /* We could race exit domain. If exited, skip. */ |
2915 | spin_lock(&dlm->spinlock); | |
2916 | skip = (!test_bit(nodenum, dlm->domain_map)); | |
2917 | spin_unlock(&dlm->spinlock); | |
2918 | if (skip) { | |
2919 | clear_bit(nodenum, iter->node_map); | |
2920 | continue; | |
2921 | } | |
2922 | ||
6714d8e8 KH |
2923 | ret = o2net_send_message(DLM_MIGRATE_REQUEST_MSG, dlm->key, |
2924 | &migrate, sizeof(migrate), nodenum, | |
2925 | &status); | |
2b832564 | 2926 | if (ret < 0) { |
8decab3c SM |
2927 | mlog(ML_ERROR, "%s: res %.*s, Error %d send " |
2928 | "MIGRATE_REQUEST to node %u\n", dlm->name, | |
2929 | migrate.namelen, migrate.name, ret, nodenum); | |
2b832564 SM |
2930 | if (!dlm_is_host_down(ret)) { |
2931 | mlog(ML_ERROR, "unhandled error=%d!\n", ret); | |
2932 | BUG(); | |
2933 | } | |
2934 | clear_bit(nodenum, iter->node_map); | |
2935 | ret = 0; | |
2936 | } else if (status < 0) { | |
6714d8e8 KH |
2937 | mlog(0, "migrate request (node %u) returned %d!\n", |
2938 | nodenum, status); | |
2939 | ret = status; | |
ba2bf218 KH |
2940 | } else if (status == DLM_MIGRATE_RESPONSE_MASTERY_REF) { |
2941 | /* during the migration request we short-circuited | |
2942 | * the mastery of the lockres. make sure we have | |
2943 | * a mastery ref for nodenum */ | |
2944 | mlog(0, "%s:%.*s: need ref for node %u\n", | |
2945 | dlm->name, res->lockname.len, res->lockname.name, | |
2946 | nodenum); | |
2947 | spin_lock(&res->spinlock); | |
8d400b81 | 2948 | dlm_lockres_set_refmap_bit(dlm, res, nodenum); |
ba2bf218 | 2949 | spin_unlock(&res->spinlock); |
6714d8e8 KH |
2950 | } |
2951 | } | |
2952 | ||
2953 | if (ret < 0) | |
2954 | mlog_errno(ret); | |
2955 | ||
2956 | mlog(0, "returning ret=%d\n", ret); | |
2957 | return ret; | |
2958 | } | |
2959 | ||
2960 | ||
2961 | /* if there is an existing mle for this lockres, we now know who the master is. | |
2962 | * (the one who sent us *this* message) we can clear it up right away. | |
2963 | * since the process that put the mle on the list still has a reference to it, | |
2964 | * we can unhash it now, set the master and wake the process. as a result, | |
2965 | * we will have no mle in the list to start with. now we can add an mle for | |
2966 | * the migration and this should be the only one found for those scanning the | |
2967 | * list. */ | |
d74c9803 KH |
2968 | int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data, |
2969 | void **ret_data) | |
6714d8e8 KH |
2970 | { |
2971 | struct dlm_ctxt *dlm = data; | |
2972 | struct dlm_lock_resource *res = NULL; | |
2973 | struct dlm_migrate_request *migrate = (struct dlm_migrate_request *) msg->buf; | |
2974 | struct dlm_master_list_entry *mle = NULL, *oldmle = NULL; | |
2975 | const char *name; | |
a3d33291 | 2976 | unsigned int namelen, hash; |
6714d8e8 KH |
2977 | int ret = 0; |
2978 | ||
2979 | if (!dlm_grab(dlm)) | |
2980 | return -EINVAL; | |
2981 | ||
2982 | name = migrate->name; | |
2983 | namelen = migrate->namelen; | |
a3d33291 | 2984 | hash = dlm_lockid_hash(name, namelen); |
6714d8e8 KH |
2985 | |
2986 | /* preallocate.. if this fails, abort */ | |
3914ed0c | 2987 | mle = kmem_cache_alloc(dlm_mle_cache, GFP_NOFS); |
6714d8e8 KH |
2988 | |
2989 | if (!mle) { | |
2990 | ret = -ENOMEM; | |
2991 | goto leave; | |
2992 | } | |
2993 | ||
2994 | /* check for pre-existing lock */ | |
2995 | spin_lock(&dlm->spinlock); | |
a3d33291 | 2996 | res = __dlm_lookup_lockres(dlm, name, namelen, hash); |
6714d8e8 KH |
2997 | if (res) { |
2998 | spin_lock(&res->spinlock); | |
2999 | if (res->state & DLM_LOCK_RES_RECOVERING) { | |
3000 | /* if all is working ok, this can only mean that we got | |
3001 | * a migrate request from a node that we now see as | |
3002 | * dead. what can we do here? drop it to the floor? */ | |
3003 | spin_unlock(&res->spinlock); | |
3004 | mlog(ML_ERROR, "Got a migrate request, but the " | |
3005 | "lockres is marked as recovering!"); | |
3006 | kmem_cache_free(dlm_mle_cache, mle); | |
3007 | ret = -EINVAL; /* need a better solution */ | |
3008 | goto unlock; | |
3009 | } | |
3010 | res->state |= DLM_LOCK_RES_MIGRATING; | |
3011 | spin_unlock(&res->spinlock); | |
3012 | } | |
3013 | ||
6d98c3cc | 3014 | spin_lock(&dlm->master_lock); |
6714d8e8 KH |
3015 | /* ignore status. only nonzero status would BUG. */ |
3016 | ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, | |
3017 | name, namelen, | |
3018 | migrate->new_master, | |
3019 | migrate->master); | |
3020 | ||
6714d8e8 | 3021 | spin_unlock(&dlm->master_lock); |
6d98c3cc | 3022 | unlock: |
6714d8e8 KH |
3023 | spin_unlock(&dlm->spinlock); |
3024 | ||
3025 | if (oldmle) { | |
3026 | /* master is known, detach if not already detached */ | |
3027 | dlm_mle_detach_hb_events(dlm, oldmle); | |
3028 | dlm_put_mle(oldmle); | |
3029 | } | |
3030 | ||
3031 | if (res) | |
3032 | dlm_lockres_put(res); | |
3033 | leave: | |
3034 | dlm_put(dlm); | |
3035 | return ret; | |
3036 | } | |
3037 | ||
3038 | /* must be holding dlm->spinlock and dlm->master_lock | |
3039 | * when adding a migration mle, we can clear any other mles | |
3040 | * in the master list because we know with certainty that | |
3041 | * the master is "master". so we remove any old mle from | |
3042 | * the list after setting it's master field, and then add | |
3043 | * the new migration mle. this way we can hold with the rule | |
3044 | * of having only one mle for a given lock name at all times. */ | |
3045 | static int dlm_add_migration_mle(struct dlm_ctxt *dlm, | |
3046 | struct dlm_lock_resource *res, | |
3047 | struct dlm_master_list_entry *mle, | |
3048 | struct dlm_master_list_entry **oldmle, | |
3049 | const char *name, unsigned int namelen, | |
3050 | u8 new_master, u8 master) | |
3051 | { | |
3052 | int found; | |
3053 | int ret = 0; | |
3054 | ||
3055 | *oldmle = NULL; | |
3056 | ||
6714d8e8 KH |
3057 | assert_spin_locked(&dlm->spinlock); |
3058 | assert_spin_locked(&dlm->master_lock); | |
3059 | ||
3060 | /* caller is responsible for any ref taken here on oldmle */ | |
3061 | found = dlm_find_mle(dlm, oldmle, (char *)name, namelen); | |
3062 | if (found) { | |
3063 | struct dlm_master_list_entry *tmp = *oldmle; | |
3064 | spin_lock(&tmp->spinlock); | |
3065 | if (tmp->type == DLM_MLE_MIGRATION) { | |
3066 | if (master == dlm->node_num) { | |
3067 | /* ah another process raced me to it */ | |
3068 | mlog(0, "tried to migrate %.*s, but some " | |
3069 | "process beat me to it\n", | |
3070 | namelen, name); | |
3071 | ret = -EEXIST; | |
3072 | } else { | |
3073 | /* bad. 2 NODES are trying to migrate! */ | |
3074 | mlog(ML_ERROR, "migration error mle: " | |
3075 | "master=%u new_master=%u // request: " | |
3076 | "master=%u new_master=%u // " | |
3077 | "lockres=%.*s\n", | |
3078 | tmp->master, tmp->new_master, | |
3079 | master, new_master, | |
3080 | namelen, name); | |
3081 | BUG(); | |
3082 | } | |
3083 | } else { | |
3084 | /* this is essentially what assert_master does */ | |
3085 | tmp->master = master; | |
3086 | atomic_set(&tmp->woken, 1); | |
3087 | wake_up(&tmp->wq); | |
1c084577 SM |
3088 | /* remove it so that only one mle will be found */ |
3089 | __dlm_unlink_mle(dlm, tmp); | |
ba2bf218 KH |
3090 | __dlm_mle_detach_hb_events(dlm, tmp); |
3091 | ret = DLM_MIGRATE_RESPONSE_MASTERY_REF; | |
3092 | mlog(0, "%s:%.*s: master=%u, newmaster=%u, " | |
3093 | "telling master to get ref for cleared out mle " | |
3094 | "during migration\n", dlm->name, namelen, name, | |
3095 | master, new_master); | |
6714d8e8 KH |
3096 | } |
3097 | spin_unlock(&tmp->spinlock); | |
3098 | } | |
3099 | ||
3100 | /* now add a migration mle to the tail of the list */ | |
3101 | dlm_init_mle(mle, DLM_MLE_MIGRATION, dlm, res, name, namelen); | |
3102 | mle->new_master = new_master; | |
ba2bf218 KH |
3103 | /* the new master will be sending an assert master for this. |
3104 | * at that point we will get the refmap reference */ | |
6714d8e8 KH |
3105 | mle->master = master; |
3106 | /* do this for consistency with other mle types */ | |
3107 | set_bit(new_master, mle->maybe_map); | |
1c084577 | 3108 | __dlm_insert_mle(dlm, mle); |
6714d8e8 KH |
3109 | |
3110 | return ret; | |
3111 | } | |
3112 | ||
c2cd4a44 SM |
3113 | /* |
3114 | * Sets the owner of the lockres, associated to the mle, to UNKNOWN | |
3115 | */ | |
3116 | static struct dlm_lock_resource *dlm_reset_mleres_owner(struct dlm_ctxt *dlm, | |
3117 | struct dlm_master_list_entry *mle) | |
3118 | { | |
3119 | struct dlm_lock_resource *res; | |
c2cd4a44 SM |
3120 | |
3121 | /* Find the lockres associated to the mle and set its owner to UNK */ | |
7141514b SM |
3122 | res = __dlm_lookup_lockres(dlm, mle->mname, mle->mnamelen, |
3123 | mle->mnamehash); | |
c2cd4a44 SM |
3124 | if (res) { |
3125 | spin_unlock(&dlm->master_lock); | |
3126 | ||
3127 | /* move lockres onto recovery list */ | |
3128 | spin_lock(&res->spinlock); | |
3129 | dlm_set_lockres_owner(dlm, res, DLM_LOCK_RES_OWNER_UNKNOWN); | |
3130 | dlm_move_lockres_to_recovery_list(dlm, res); | |
3131 | spin_unlock(&res->spinlock); | |
3132 | dlm_lockres_put(res); | |
3133 | ||
3134 | /* about to get rid of mle, detach from heartbeat */ | |
3135 | __dlm_mle_detach_hb_events(dlm, mle); | |
3136 | ||
3137 | /* dump the mle */ | |
3138 | spin_lock(&dlm->master_lock); | |
3139 | __dlm_put_mle(mle); | |
3140 | spin_unlock(&dlm->master_lock); | |
3141 | } | |
3142 | ||
3143 | return res; | |
3144 | } | |
3145 | ||
3146 | static void dlm_clean_migration_mle(struct dlm_ctxt *dlm, | |
3147 | struct dlm_master_list_entry *mle) | |
3148 | { | |
3149 | __dlm_mle_detach_hb_events(dlm, mle); | |
3150 | ||
3151 | spin_lock(&mle->spinlock); | |
3152 | __dlm_unlink_mle(dlm, mle); | |
3153 | atomic_set(&mle->woken, 1); | |
3154 | spin_unlock(&mle->spinlock); | |
3155 | ||
3156 | wake_up(&mle->wq); | |
3157 | } | |
3158 | ||
3159 | static void dlm_clean_block_mle(struct dlm_ctxt *dlm, | |
3160 | struct dlm_master_list_entry *mle, u8 dead_node) | |
3161 | { | |
3162 | int bit; | |
3163 | ||
3164 | BUG_ON(mle->type != DLM_MLE_BLOCK); | |
3165 | ||
3166 | spin_lock(&mle->spinlock); | |
3167 | bit = find_next_bit(mle->maybe_map, O2NM_MAX_NODES, 0); | |
3168 | if (bit != dead_node) { | |
3169 | mlog(0, "mle found, but dead node %u would not have been " | |
3170 | "master\n", dead_node); | |
3171 | spin_unlock(&mle->spinlock); | |
3172 | } else { | |
3173 | /* Must drop the refcount by one since the assert_master will | |
3174 | * never arrive. This may result in the mle being unlinked and | |
3175 | * freed, but there may still be a process waiting in the | |
3176 | * dlmlock path which is fine. */ | |
3177 | mlog(0, "node %u was expected master\n", dead_node); | |
3178 | atomic_set(&mle->woken, 1); | |
3179 | spin_unlock(&mle->spinlock); | |
3180 | wake_up(&mle->wq); | |
3181 | ||
3182 | /* Do not need events any longer, so detach from heartbeat */ | |
3183 | __dlm_mle_detach_hb_events(dlm, mle); | |
3184 | __dlm_put_mle(mle); | |
3185 | } | |
3186 | } | |
6714d8e8 KH |
3187 | |
3188 | void dlm_clean_master_list(struct dlm_ctxt *dlm, u8 dead_node) | |
3189 | { | |
2ed6c750 | 3190 | struct dlm_master_list_entry *mle; |
6714d8e8 | 3191 | struct dlm_lock_resource *res; |
2ed6c750 | 3192 | struct hlist_head *bucket; |
df53cd3b | 3193 | struct hlist_node *tmp; |
2ed6c750 | 3194 | unsigned int i; |
6714d8e8 | 3195 | |
ef6b689b | 3196 | mlog(0, "dlm=%s, dead node=%u\n", dlm->name, dead_node); |
6714d8e8 KH |
3197 | top: |
3198 | assert_spin_locked(&dlm->spinlock); | |
3199 | ||
3200 | /* clean the master list */ | |
3201 | spin_lock(&dlm->master_lock); | |
2ed6c750 SM |
3202 | for (i = 0; i < DLM_HASH_BUCKETS; i++) { |
3203 | bucket = dlm_master_hash(dlm, i); | |
df53cd3b | 3204 | hlist_for_each_entry_safe(mle, tmp, bucket, master_hash_node) { |
67ae1f06 SM |
3205 | BUG_ON(mle->type != DLM_MLE_BLOCK && |
3206 | mle->type != DLM_MLE_MASTER && | |
3207 | mle->type != DLM_MLE_MIGRATION); | |
3208 | ||
3209 | /* MASTER mles are initiated locally. The waiting | |
3210 | * process will notice the node map change shortly. | |
3211 | * Let that happen as normal. */ | |
3212 | if (mle->type == DLM_MLE_MASTER) | |
3213 | continue; | |
3214 | ||
3215 | /* BLOCK mles are initiated by other nodes. Need to | |
3216 | * clean up if the dead node would have been the | |
3217 | * master. */ | |
3218 | if (mle->type == DLM_MLE_BLOCK) { | |
3219 | dlm_clean_block_mle(dlm, mle, dead_node); | |
3220 | continue; | |
3221 | } | |
6714d8e8 | 3222 | |
67ae1f06 SM |
3223 | /* Everything else is a MIGRATION mle */ |
3224 | ||
3225 | /* The rule for MIGRATION mles is that the master | |
3226 | * becomes UNKNOWN if *either* the original or the new | |
3227 | * master dies. All UNKNOWN lockres' are sent to | |
3228 | * whichever node becomes the recovery master. The new | |
3229 | * master is responsible for determining if there is | |
3230 | * still a master for this lockres, or if he needs to | |
3231 | * take over mastery. Either way, this node should | |
3232 | * expect another message to resolve this. */ | |
3233 | ||
3234 | if (mle->master != dead_node && | |
3235 | mle->new_master != dead_node) | |
3236 | continue; | |
3237 | ||
3238 | /* If we have reached this point, this mle needs to be | |
3239 | * removed from the list and freed. */ | |
3240 | dlm_clean_migration_mle(dlm, mle); | |
3241 | ||
3242 | mlog(0, "%s: node %u died during migration from " | |
3243 | "%u to %u!\n", dlm->name, dead_node, mle->master, | |
3244 | mle->new_master); | |
3245 | ||
3246 | /* If we find a lockres associated with the mle, we've | |
3247 | * hit this rare case that messes up our lock ordering. | |
3248 | * If so, we need to drop the master lock so that we can | |
3249 | * take the lockres lock, meaning that we will have to | |
3250 | * restart from the head of list. */ | |
3251 | res = dlm_reset_mleres_owner(dlm, mle); | |
3252 | if (res) | |
3253 | /* restart */ | |
3254 | goto top; | |
3255 | ||
3256 | /* This may be the last reference */ | |
3257 | __dlm_put_mle(mle); | |
6714d8e8 | 3258 | } |
2ed6c750 | 3259 | } |
6714d8e8 KH |
3260 | spin_unlock(&dlm->master_lock); |
3261 | } | |
3262 | ||
6714d8e8 KH |
3263 | int dlm_finish_migration(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, |
3264 | u8 old_master) | |
3265 | { | |
3266 | struct dlm_node_iter iter; | |
3267 | int ret = 0; | |
3268 | ||
3269 | spin_lock(&dlm->spinlock); | |
3270 | dlm_node_iter_init(dlm->domain_map, &iter); | |
3271 | clear_bit(old_master, iter.node_map); | |
3272 | clear_bit(dlm->node_num, iter.node_map); | |
3273 | spin_unlock(&dlm->spinlock); | |
3274 | ||
ba2bf218 KH |
3275 | /* ownership of the lockres is changing. account for the |
3276 | * mastery reference here since old_master will briefly have | |
3277 | * a reference after the migration completes */ | |
3278 | spin_lock(&res->spinlock); | |
8d400b81 | 3279 | dlm_lockres_set_refmap_bit(dlm, res, old_master); |
ba2bf218 KH |
3280 | spin_unlock(&res->spinlock); |
3281 | ||
6714d8e8 KH |
3282 | mlog(0, "now time to do a migrate request to other nodes\n"); |
3283 | ret = dlm_do_migrate_request(dlm, res, old_master, | |
3284 | dlm->node_num, &iter); | |
3285 | if (ret < 0) { | |
3286 | mlog_errno(ret); | |
3287 | goto leave; | |
3288 | } | |
3289 | ||
3290 | mlog(0, "doing assert master of %.*s to all except the original node\n", | |
3291 | res->lockname.len, res->lockname.name); | |
3292 | /* this call now finishes out the nodemap | |
3293 | * even if one or more nodes die */ | |
ba2bf218 | 3294 | ret = dlm_do_assert_master(dlm, res, iter.node_map, |
6714d8e8 KH |
3295 | DLM_ASSERT_MASTER_FINISH_MIGRATION); |
3296 | if (ret < 0) { | |
3297 | /* no longer need to retry. all living nodes contacted. */ | |
3298 | mlog_errno(ret); | |
3299 | ret = 0; | |
3300 | } | |
3301 | ||
3302 | memset(iter.node_map, 0, sizeof(iter.node_map)); | |
3303 | set_bit(old_master, iter.node_map); | |
3304 | mlog(0, "doing assert master of %.*s back to %u\n", | |
3305 | res->lockname.len, res->lockname.name, old_master); | |
ba2bf218 | 3306 | ret = dlm_do_assert_master(dlm, res, iter.node_map, |
6714d8e8 KH |
3307 | DLM_ASSERT_MASTER_FINISH_MIGRATION); |
3308 | if (ret < 0) { | |
3309 | mlog(0, "assert master to original master failed " | |
3310 | "with %d.\n", ret); | |
3311 | /* the only nonzero status here would be because of | |
3312 | * a dead original node. we're done. */ | |
3313 | ret = 0; | |
3314 | } | |
3315 | ||
3316 | /* all done, set the owner, clear the flag */ | |
3317 | spin_lock(&res->spinlock); | |
3318 | dlm_set_lockres_owner(dlm, res, dlm->node_num); | |
3319 | res->state &= ~DLM_LOCK_RES_MIGRATING; | |
3320 | spin_unlock(&res->spinlock); | |
3321 | /* re-dirty it on the new master */ | |
3322 | dlm_kick_thread(dlm, res); | |
3323 | wake_up(&res->wq); | |
3324 | leave: | |
3325 | return ret; | |
3326 | } | |
3327 | ||
3328 | /* | |
3329 | * LOCKRES AST REFCOUNT | |
3330 | * this is integral to migration | |
3331 | */ | |
3332 | ||
3333 | /* for future intent to call an ast, reserve one ahead of time. | |
3334 | * this should be called only after waiting on the lockres | |
3335 | * with dlm_wait_on_lockres, and while still holding the | |
3336 | * spinlock after the call. */ | |
3337 | void __dlm_lockres_reserve_ast(struct dlm_lock_resource *res) | |
3338 | { | |
3339 | assert_spin_locked(&res->spinlock); | |
3340 | if (res->state & DLM_LOCK_RES_MIGRATING) { | |
3341 | __dlm_print_one_lock_resource(res); | |
3342 | } | |
3343 | BUG_ON(res->state & DLM_LOCK_RES_MIGRATING); | |
3344 | ||
3345 | atomic_inc(&res->asts_reserved); | |
3346 | } | |
3347 | ||
3348 | /* | |
3349 | * used to drop the reserved ast, either because it went unused, | |
3350 | * or because the ast/bast was actually called. | |
3351 | * | |
3352 | * also, if there is a pending migration on this lockres, | |
3353 | * and this was the last pending ast on the lockres, | |
3354 | * atomically set the MIGRATING flag before we drop the lock. | |
3355 | * this is how we ensure that migration can proceed with no | |
3356 | * asts in progress. note that it is ok if the state of the | |
3357 | * queues is such that a lock should be granted in the future | |
3358 | * or that a bast should be fired, because the new master will | |
3359 | * shuffle the lists on this lockres as soon as it is migrated. | |
3360 | */ | |
3361 | void dlm_lockres_release_ast(struct dlm_ctxt *dlm, | |
3362 | struct dlm_lock_resource *res) | |
3363 | { | |
3364 | if (!atomic_dec_and_lock(&res->asts_reserved, &res->spinlock)) | |
3365 | return; | |
3366 | ||
3367 | if (!res->migration_pending) { | |
3368 | spin_unlock(&res->spinlock); | |
3369 | return; | |
3370 | } | |
3371 | ||
3372 | BUG_ON(res->state & DLM_LOCK_RES_MIGRATING); | |
3373 | res->migration_pending = 0; | |
3374 | res->state |= DLM_LOCK_RES_MIGRATING; | |
3375 | spin_unlock(&res->spinlock); | |
3376 | wake_up(&res->wq); | |
3377 | wake_up(&dlm->migration_wq); | |
3378 | } | |
5dad6c39 SE |
3379 | |
3380 | void dlm_force_free_mles(struct dlm_ctxt *dlm) | |
3381 | { | |
3382 | int i; | |
3383 | struct hlist_head *bucket; | |
3384 | struct dlm_master_list_entry *mle; | |
df53cd3b | 3385 | struct hlist_node *tmp; |
5dad6c39 SE |
3386 | |
3387 | /* | |
3388 | * We notified all other nodes that we are exiting the domain and | |
3389 | * marked the dlm state to DLM_CTXT_LEAVING. If any mles are still | |
3390 | * around we force free them and wake any processes that are waiting | |
3391 | * on the mles | |
3392 | */ | |
3393 | spin_lock(&dlm->spinlock); | |
3394 | spin_lock(&dlm->master_lock); | |
3395 | ||
3396 | BUG_ON(dlm->dlm_state != DLM_CTXT_LEAVING); | |
3397 | BUG_ON((find_next_bit(dlm->domain_map, O2NM_MAX_NODES, 0) < O2NM_MAX_NODES)); | |
3398 | ||
3399 | for (i = 0; i < DLM_HASH_BUCKETS; i++) { | |
3400 | bucket = dlm_master_hash(dlm, i); | |
df53cd3b | 3401 | hlist_for_each_entry_safe(mle, tmp, bucket, master_hash_node) { |
5dad6c39 SE |
3402 | if (mle->type != DLM_MLE_BLOCK) { |
3403 | mlog(ML_ERROR, "bad mle: %p\n", mle); | |
3404 | dlm_print_one_mle(mle); | |
3405 | } | |
3406 | atomic_set(&mle->woken, 1); | |
3407 | wake_up(&mle->wq); | |
3408 | ||
3409 | __dlm_unlink_mle(dlm, mle); | |
3410 | __dlm_mle_detach_hb_events(dlm, mle); | |
3411 | __dlm_put_mle(mle); | |
3412 | } | |
3413 | } | |
3414 | spin_unlock(&dlm->master_lock); | |
3415 | spin_unlock(&dlm->spinlock); | |
3416 | } |