1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
6 * Code which implements the kernel side of a minimal userspace
7 * interface to our DLM.
9 * Many of the functions here are pared down versions of dlmglue.c
12 * Copyright (C) 2003, 2004 Oracle. All rights reserved.
14 * This program is free software; you can redistribute it and/or
15 * modify it under the terms of the GNU General Public
16 * License as published by the Free Software Foundation; either
17 * version 2 of the License, or (at your option) any later version.
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * General Public License for more details.
24 * You should have received a copy of the GNU General Public
25 * License along with this program; if not, write to the
26 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
27 * Boston, MA 021110-1307, USA.
30 #include <linux/signal.h>
32 #include <linux/module.h>
34 #include <linux/types.h>
35 #include <linux/crc32.h>
38 #include "cluster/nodemanager.h"
39 #include "cluster/heartbeat.h"
40 #include "cluster/tcp.h"
46 #define MLOG_MASK_PREFIX ML_DLMFS
47 #include "cluster/masklog.h"
49 static inline int user_check_wait_flag(struct user_lock_res
*lockres
,
54 spin_lock(&lockres
->l_lock
);
55 ret
= lockres
->l_flags
& flag
;
56 spin_unlock(&lockres
->l_lock
);
61 static inline void user_wait_on_busy_lock(struct user_lock_res
*lockres
)
64 wait_event(lockres
->l_event
,
65 !user_check_wait_flag(lockres
, USER_LOCK_BUSY
));
68 static inline void user_wait_on_blocked_lock(struct user_lock_res
*lockres
)
71 wait_event(lockres
->l_event
,
72 !user_check_wait_flag(lockres
, USER_LOCK_BLOCKED
));
75 /* I heart container_of... */
76 static inline struct dlm_ctxt
*
77 dlm_ctxt_from_user_lockres(struct user_lock_res
*lockres
)
79 struct dlmfs_inode_private
*ip
;
81 ip
= container_of(lockres
,
82 struct dlmfs_inode_private
,
88 user_dlm_inode_from_user_lockres(struct user_lock_res
*lockres
)
90 struct dlmfs_inode_private
*ip
;
92 ip
= container_of(lockres
,
93 struct dlmfs_inode_private
,
95 return &ip
->ip_vfs_inode
;
98 static inline void user_recover_from_dlm_error(struct user_lock_res
*lockres
)
100 spin_lock(&lockres
->l_lock
);
101 lockres
->l_flags
&= ~USER_LOCK_BUSY
;
102 spin_unlock(&lockres
->l_lock
);
105 #define user_log_dlm_error(_func, _stat, _lockres) do { \
106 mlog(ML_ERROR, "Dlm error \"%s\" while calling %s on " \
107 "resource %s: %s\n", dlm_errname(_stat), _func, \
108 _lockres->l_name, dlm_errmsg(_stat)); \
111 /* WARNING: This function lives in a world where the only three lock
112 * levels are EX, PR, and NL. It *will* have to be adjusted when more
113 * lock types are added. */
114 static inline int user_highest_compat_lock_level(int level
)
116 int new_level
= LKM_EXMODE
;
118 if (level
== LKM_EXMODE
)
119 new_level
= LKM_NLMODE
;
120 else if (level
== LKM_PRMODE
)
121 new_level
= LKM_PRMODE
;
125 static void user_ast(void *opaque
)
127 struct user_lock_res
*lockres
= opaque
;
128 struct dlm_lockstatus
*lksb
;
130 mlog(0, "AST fired for lockres %s\n", lockres
->l_name
);
132 spin_lock(&lockres
->l_lock
);
134 lksb
= &(lockres
->l_lksb
);
135 if (lksb
->status
!= DLM_NORMAL
) {
136 mlog(ML_ERROR
, "lksb status value of %u on lockres %s\n",
137 lksb
->status
, lockres
->l_name
);
138 spin_unlock(&lockres
->l_lock
);
142 mlog_bug_on_msg(lockres
->l_requested
== LKM_IVMODE
,
143 "Lockres %s, requested ivmode. flags 0x%x\n",
144 lockres
->l_name
, lockres
->l_flags
);
146 /* we're downconverting. */
147 if (lockres
->l_requested
< lockres
->l_level
) {
148 if (lockres
->l_requested
<=
149 user_highest_compat_lock_level(lockres
->l_blocking
)) {
150 lockres
->l_blocking
= LKM_NLMODE
;
151 lockres
->l_flags
&= ~USER_LOCK_BLOCKED
;
155 lockres
->l_level
= lockres
->l_requested
;
156 lockres
->l_requested
= LKM_IVMODE
;
157 lockres
->l_flags
|= USER_LOCK_ATTACHED
;
158 lockres
->l_flags
&= ~USER_LOCK_BUSY
;
160 spin_unlock(&lockres
->l_lock
);
162 wake_up(&lockres
->l_event
);
165 static inline void user_dlm_grab_inode_ref(struct user_lock_res
*lockres
)
168 inode
= user_dlm_inode_from_user_lockres(lockres
);
173 static void user_dlm_unblock_lock(void *opaque
);
175 static void __user_dlm_queue_lockres(struct user_lock_res
*lockres
)
177 if (!(lockres
->l_flags
& USER_LOCK_QUEUED
)) {
178 user_dlm_grab_inode_ref(lockres
);
180 INIT_WORK(&lockres
->l_work
, user_dlm_unblock_lock
,
183 queue_work(user_dlm_worker
, &lockres
->l_work
);
184 lockres
->l_flags
|= USER_LOCK_QUEUED
;
188 static void __user_dlm_cond_queue_lockres(struct user_lock_res
*lockres
)
192 if (!(lockres
->l_flags
& USER_LOCK_BLOCKED
))
195 switch (lockres
->l_blocking
) {
197 if (!lockres
->l_ex_holders
&& !lockres
->l_ro_holders
)
201 if (!lockres
->l_ex_holders
)
209 __user_dlm_queue_lockres(lockres
);
212 static void user_bast(void *opaque
, int level
)
214 struct user_lock_res
*lockres
= opaque
;
216 mlog(0, "Blocking AST fired for lockres %s. Blocking level %d\n",
217 lockres
->l_name
, level
);
219 spin_lock(&lockres
->l_lock
);
220 lockres
->l_flags
|= USER_LOCK_BLOCKED
;
221 if (level
> lockres
->l_blocking
)
222 lockres
->l_blocking
= level
;
224 __user_dlm_queue_lockres(lockres
);
225 spin_unlock(&lockres
->l_lock
);
227 wake_up(&lockres
->l_event
);
230 static void user_unlock_ast(void *opaque
, enum dlm_status status
)
232 struct user_lock_res
*lockres
= opaque
;
234 mlog(0, "UNLOCK AST called on lock %s\n", lockres
->l_name
);
236 if (status
!= DLM_NORMAL
&& status
!= DLM_CANCELGRANT
)
237 mlog(ML_ERROR
, "Dlm returns status %d\n", status
);
239 spin_lock(&lockres
->l_lock
);
240 if (lockres
->l_flags
& USER_LOCK_IN_TEARDOWN
)
241 lockres
->l_level
= LKM_IVMODE
;
242 else if (status
== DLM_CANCELGRANT
) {
243 mlog(0, "Lock %s, cancel fails, flags 0x%x\n",
244 lockres
->l_name
, lockres
->l_flags
);
245 /* We tried to cancel a convert request, but it was
246 * already granted. Don't clear the busy flag - the
247 * ast should've done this already. */
248 BUG_ON(!(lockres
->l_flags
& USER_LOCK_IN_CANCEL
));
249 lockres
->l_flags
&= ~USER_LOCK_IN_CANCEL
;
252 BUG_ON(!(lockres
->l_flags
& USER_LOCK_IN_CANCEL
));
253 /* Cancel succeeded, we want to re-queue */
254 mlog(0, "Lock %s, cancel succeeds, flags 0x%x\n",
255 lockres
->l_name
, lockres
->l_flags
);
256 lockres
->l_requested
= LKM_IVMODE
; /* cancel an
259 lockres
->l_flags
&= ~USER_LOCK_IN_CANCEL
;
260 /* we want the unblock thread to look at it again
262 if (lockres
->l_flags
& USER_LOCK_BLOCKED
)
263 __user_dlm_queue_lockres(lockres
);
266 lockres
->l_flags
&= ~USER_LOCK_BUSY
;
268 spin_unlock(&lockres
->l_lock
);
270 wake_up(&lockres
->l_event
);
273 static inline void user_dlm_drop_inode_ref(struct user_lock_res
*lockres
)
276 inode
= user_dlm_inode_from_user_lockres(lockres
);
280 static void user_dlm_unblock_lock(void *opaque
)
282 int new_level
, status
;
283 struct user_lock_res
*lockres
= (struct user_lock_res
*) opaque
;
284 struct dlm_ctxt
*dlm
= dlm_ctxt_from_user_lockres(lockres
);
286 mlog(0, "processing lockres %s\n", lockres
->l_name
);
288 spin_lock(&lockres
->l_lock
);
290 mlog_bug_on_msg(!(lockres
->l_flags
& USER_LOCK_QUEUED
),
291 "Lockres %s, flags 0x%x\n",
292 lockres
->l_name
, lockres
->l_flags
);
294 /* notice that we don't clear USER_LOCK_BLOCKED here. If it's
295 * set, we want user_ast clear it. */
296 lockres
->l_flags
&= ~USER_LOCK_QUEUED
;
298 /* It's valid to get here and no longer be blocked - if we get
299 * several basts in a row, we might be queued by the first
300 * one, the unblock thread might run and clear the queued
301 * flag, and finally we might get another bast which re-queues
302 * us before our ast for the downconvert is called. */
303 if (!(lockres
->l_flags
& USER_LOCK_BLOCKED
)) {
304 mlog(0, "Lockres %s, flags 0x%x: queued but not blocking\n",
305 lockres
->l_name
, lockres
->l_flags
);
306 spin_unlock(&lockres
->l_lock
);
310 if (lockres
->l_flags
& USER_LOCK_IN_TEARDOWN
) {
311 mlog(0, "lock is in teardown so we do nothing\n");
312 spin_unlock(&lockres
->l_lock
);
316 if (lockres
->l_flags
& USER_LOCK_BUSY
) {
317 mlog(0, "Cancel lock %s, flags 0x%x\n",
318 lockres
->l_name
, lockres
->l_flags
);
320 if (lockres
->l_flags
& USER_LOCK_IN_CANCEL
) {
321 spin_unlock(&lockres
->l_lock
);
325 lockres
->l_flags
|= USER_LOCK_IN_CANCEL
;
326 spin_unlock(&lockres
->l_lock
);
328 status
= dlmunlock(dlm
,
333 if (status
!= DLM_NORMAL
)
334 user_log_dlm_error("dlmunlock", status
, lockres
);
338 /* If there are still incompat holders, we can exit safely
339 * without worrying about re-queueing this lock as that will
340 * happen on the last call to user_cluster_unlock. */
341 if ((lockres
->l_blocking
== LKM_EXMODE
)
342 && (lockres
->l_ex_holders
|| lockres
->l_ro_holders
)) {
343 spin_unlock(&lockres
->l_lock
);
344 mlog(0, "can't downconvert for ex: ro = %u, ex = %u\n",
345 lockres
->l_ro_holders
, lockres
->l_ex_holders
);
349 if ((lockres
->l_blocking
== LKM_PRMODE
)
350 && lockres
->l_ex_holders
) {
351 spin_unlock(&lockres
->l_lock
);
352 mlog(0, "can't downconvert for pr: ex = %u\n",
353 lockres
->l_ex_holders
);
357 /* yay, we can downconvert now. */
358 new_level
= user_highest_compat_lock_level(lockres
->l_blocking
);
359 lockres
->l_requested
= new_level
;
360 lockres
->l_flags
|= USER_LOCK_BUSY
;
361 mlog(0, "Downconvert lock from %d to %d\n",
362 lockres
->l_level
, new_level
);
363 spin_unlock(&lockres
->l_lock
);
365 /* need lock downconvert request now... */
366 status
= dlmlock(dlm
,
369 LKM_CONVERT
|LKM_VALBLK
,
374 if (status
!= DLM_NORMAL
) {
375 user_log_dlm_error("dlmlock", status
, lockres
);
376 user_recover_from_dlm_error(lockres
);
380 user_dlm_drop_inode_ref(lockres
);
383 static inline void user_dlm_inc_holders(struct user_lock_res
*lockres
,
388 lockres
->l_ex_holders
++;
391 lockres
->l_ro_holders
++;
398 /* predict what lock level we'll be dropping down to on behalf
399 * of another node, and return true if the currently wanted
400 * level will be compatible with it. */
402 user_may_continue_on_blocked_lock(struct user_lock_res
*lockres
,
405 BUG_ON(!(lockres
->l_flags
& USER_LOCK_BLOCKED
));
407 return wanted
<= user_highest_compat_lock_level(lockres
->l_blocking
);
410 int user_dlm_cluster_lock(struct user_lock_res
*lockres
,
414 int status
, local_flags
;
415 struct dlm_ctxt
*dlm
= dlm_ctxt_from_user_lockres(lockres
);
417 if (level
!= LKM_EXMODE
&&
418 level
!= LKM_PRMODE
) {
419 mlog(ML_ERROR
, "lockres %s: invalid request!\n",
425 mlog(0, "lockres %s: asking for %s lock, passed flags = 0x%x\n",
427 (level
== LKM_EXMODE
) ? "LKM_EXMODE" : "LKM_PRMODE",
431 if (signal_pending(current
)) {
432 status
= -ERESTARTSYS
;
436 spin_lock(&lockres
->l_lock
);
438 /* We only compare against the currently granted level
439 * here. If the lock is blocked waiting on a downconvert,
440 * we'll get caught below. */
441 if ((lockres
->l_flags
& USER_LOCK_BUSY
) &&
442 (level
> lockres
->l_level
)) {
443 /* is someone sitting in dlm_lock? If so, wait on
445 spin_unlock(&lockres
->l_lock
);
447 user_wait_on_busy_lock(lockres
);
451 if ((lockres
->l_flags
& USER_LOCK_BLOCKED
) &&
452 (!user_may_continue_on_blocked_lock(lockres
, level
))) {
453 /* is the lock is currently blocked on behalf of
455 spin_unlock(&lockres
->l_lock
);
457 user_wait_on_blocked_lock(lockres
);
461 if (level
> lockres
->l_level
) {
462 local_flags
= lkm_flags
| LKM_VALBLK
;
463 if (lockres
->l_level
!= LKM_IVMODE
)
464 local_flags
|= LKM_CONVERT
;
466 lockres
->l_requested
= level
;
467 lockres
->l_flags
|= USER_LOCK_BUSY
;
468 spin_unlock(&lockres
->l_lock
);
470 BUG_ON(level
== LKM_IVMODE
);
471 BUG_ON(level
== LKM_NLMODE
);
473 mlog(0, "lock %s, get lock from %d to level = %d\n",
474 lockres
->l_name
, lockres
->l_level
, level
);
476 /* call dlm_lock to upgrade lock now */
477 status
= dlmlock(dlm
,
485 if (status
!= DLM_NORMAL
) {
486 if ((lkm_flags
& LKM_NOQUEUE
) &&
487 (status
== DLM_NOTQUEUED
))
490 user_log_dlm_error("dlmlock", status
, lockres
);
493 user_recover_from_dlm_error(lockres
);
497 mlog(0, "lock %s, successfull return from dlmlock\n",
500 user_wait_on_busy_lock(lockres
);
504 user_dlm_inc_holders(lockres
, level
);
505 spin_unlock(&lockres
->l_lock
);
507 mlog(0, "lockres %s: Got %s lock!\n", lockres
->l_name
,
508 (level
== LKM_EXMODE
) ? "LKM_EXMODE" : "LKM_PRMODE");
515 static inline void user_dlm_dec_holders(struct user_lock_res
*lockres
,
520 BUG_ON(!lockres
->l_ex_holders
);
521 lockres
->l_ex_holders
--;
524 BUG_ON(!lockres
->l_ro_holders
);
525 lockres
->l_ro_holders
--;
532 void user_dlm_cluster_unlock(struct user_lock_res
*lockres
,
535 if (level
!= LKM_EXMODE
&&
536 level
!= LKM_PRMODE
) {
537 mlog(ML_ERROR
, "lockres %s: invalid request!\n", lockres
->l_name
);
541 mlog(0, "lockres %s: dropping %s lock\n", lockres
->l_name
,
542 (level
== LKM_EXMODE
) ? "LKM_EXMODE" : "LKM_PRMODE");
544 spin_lock(&lockres
->l_lock
);
545 user_dlm_dec_holders(lockres
, level
);
546 __user_dlm_cond_queue_lockres(lockres
);
547 spin_unlock(&lockres
->l_lock
);
550 void user_dlm_write_lvb(struct inode
*inode
,
554 struct user_lock_res
*lockres
= &DLMFS_I(inode
)->ip_lockres
;
555 char *lvb
= lockres
->l_lksb
.lvb
;
557 BUG_ON(len
> DLM_LVB_LEN
);
559 spin_lock(&lockres
->l_lock
);
561 BUG_ON(lockres
->l_level
< LKM_EXMODE
);
562 memcpy(lvb
, val
, len
);
564 spin_unlock(&lockres
->l_lock
);
567 void user_dlm_read_lvb(struct inode
*inode
,
571 struct user_lock_res
*lockres
= &DLMFS_I(inode
)->ip_lockres
;
572 char *lvb
= lockres
->l_lksb
.lvb
;
574 BUG_ON(len
> DLM_LVB_LEN
);
576 spin_lock(&lockres
->l_lock
);
578 BUG_ON(lockres
->l_level
< LKM_PRMODE
);
579 memcpy(val
, lvb
, len
);
581 spin_unlock(&lockres
->l_lock
);
584 void user_dlm_lock_res_init(struct user_lock_res
*lockres
,
585 struct dentry
*dentry
)
587 memset(lockres
, 0, sizeof(*lockres
));
589 spin_lock_init(&lockres
->l_lock
);
590 init_waitqueue_head(&lockres
->l_event
);
591 lockres
->l_level
= LKM_IVMODE
;
592 lockres
->l_requested
= LKM_IVMODE
;
593 lockres
->l_blocking
= LKM_IVMODE
;
595 /* should have been checked before getting here. */
596 BUG_ON(dentry
->d_name
.len
>= USER_DLM_LOCK_ID_MAX_LEN
);
598 memcpy(lockres
->l_name
,
603 int user_dlm_destroy_lock(struct user_lock_res
*lockres
)
606 struct dlm_ctxt
*dlm
= dlm_ctxt_from_user_lockres(lockres
);
608 mlog(0, "asked to destroy %s\n", lockres
->l_name
);
610 spin_lock(&lockres
->l_lock
);
611 while (lockres
->l_flags
& USER_LOCK_BUSY
) {
612 spin_unlock(&lockres
->l_lock
);
614 mlog(0, "lock %s is busy\n", lockres
->l_name
);
616 user_wait_on_busy_lock(lockres
);
618 spin_lock(&lockres
->l_lock
);
621 if (lockres
->l_ro_holders
|| lockres
->l_ex_holders
) {
622 spin_unlock(&lockres
->l_lock
);
623 mlog(0, "lock %s has holders\n", lockres
->l_name
);
628 if (!(lockres
->l_flags
& USER_LOCK_ATTACHED
)) {
629 spin_unlock(&lockres
->l_lock
);
630 mlog(0, "lock %s is not attached\n", lockres
->l_name
);
634 lockres
->l_flags
&= ~USER_LOCK_ATTACHED
;
635 lockres
->l_flags
|= USER_LOCK_BUSY
;
636 lockres
->l_flags
|= USER_LOCK_IN_TEARDOWN
;
637 spin_unlock(&lockres
->l_lock
);
639 mlog(0, "unlocking lockres %s\n", lockres
->l_name
);
640 status
= dlmunlock(dlm
,
645 if (status
!= DLM_NORMAL
) {
646 user_log_dlm_error("dlmunlock", status
, lockres
);
651 user_wait_on_busy_lock(lockres
);
658 struct dlm_ctxt
*user_dlm_register_context(struct qstr
*name
)
660 struct dlm_ctxt
*dlm
;
664 domain
= kmalloc(name
->len
+ 1, GFP_KERNEL
);
667 return ERR_PTR(-ENOMEM
);
670 dlm_key
= crc32_le(0, name
->name
, name
->len
);
672 snprintf(domain
, name
->len
+ 1, "%.*s", name
->len
, name
->name
);
674 dlm
= dlm_register_domain(domain
, dlm_key
);
676 mlog_errno(PTR_ERR(dlm
));
682 void user_dlm_unregister_context(struct dlm_ctxt
*dlm
)
684 dlm_unregister_domain(dlm
);
This page took 0.058776 seconds and 6 git commands to generate.