[libata] build fix after cdb_len move
[deliverable/linux.git] / fs / ocfs2 / dlm / dlmcommon.h
1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
3 *
4 * dlmcommon.h
5 *
6 * Copyright (C) 2004 Oracle. All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public
19 * License along with this program; if not, write to the
20 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
21 * Boston, MA 021110-1307, USA.
22 *
23 */
24
25 #ifndef DLMCOMMON_H
26 #define DLMCOMMON_H
27
28 #include <linux/kref.h>
29
30 #define DLM_HB_NODE_DOWN_PRI (0xf000000)
31 #define DLM_HB_NODE_UP_PRI (0x8000000)
32
33 #define DLM_LOCKID_NAME_MAX 32
34
35 #define DLM_DOMAIN_NAME_MAX_LEN 255
36 #define DLM_LOCK_RES_OWNER_UNKNOWN O2NM_MAX_NODES
37 #define DLM_THREAD_SHUFFLE_INTERVAL 5 // flush everything every 5 passes
38 #define DLM_THREAD_MS 200 // flush at least every 200 ms
39
40 #define DLM_HASH_BITS 7
41 #define DLM_HASH_SIZE (1 << DLM_HASH_BITS)
42 #define DLM_HASH_MASK (DLM_HASH_SIZE - 1)
43
44 enum dlm_ast_type {
45 DLM_AST = 0,
46 DLM_BAST,
47 DLM_ASTUNLOCK
48 };
49
50
51 #define LKM_VALID_FLAGS (LKM_VALBLK | LKM_CONVERT | LKM_UNLOCK | \
52 LKM_CANCEL | LKM_INVVALBLK | LKM_FORCE | \
53 LKM_RECOVERY | LKM_LOCAL | LKM_NOQUEUE)
54
55 #define DLM_RECOVERY_LOCK_NAME "$RECOVERY"
56 #define DLM_RECOVERY_LOCK_NAME_LEN 9
57
58 static inline int dlm_is_recovery_lock(const char *lock_name, int name_len)
59 {
60 if (name_len == DLM_RECOVERY_LOCK_NAME_LEN &&
61 memcmp(lock_name, DLM_RECOVERY_LOCK_NAME, name_len)==0)
62 return 1;
63 return 0;
64 }
65
66 #define DLM_RECO_STATE_ACTIVE 0x0001
67
68 struct dlm_recovery_ctxt
69 {
70 struct list_head resources;
71 struct list_head received;
72 struct list_head node_data;
73 u8 new_master;
74 u8 dead_node;
75 u16 state;
76 unsigned long node_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
77 wait_queue_head_t event;
78 };
79
80 enum dlm_ctxt_state {
81 DLM_CTXT_NEW = 0,
82 DLM_CTXT_JOINED,
83 DLM_CTXT_IN_SHUTDOWN,
84 DLM_CTXT_LEAVING,
85 };
86
87 struct dlm_ctxt
88 {
89 struct list_head list;
90 struct list_head *resources;
91 struct list_head dirty_list;
92 struct list_head purge_list;
93 struct list_head pending_asts;
94 struct list_head pending_basts;
95 unsigned int purge_count;
96 spinlock_t spinlock;
97 spinlock_t ast_lock;
98 char *name;
99 u8 node_num;
100 u32 key;
101 u8 joining_node;
102 wait_queue_head_t dlm_join_events;
103 unsigned long live_nodes_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
104 unsigned long domain_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
105 unsigned long recovery_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
106 struct dlm_recovery_ctxt reco;
107 spinlock_t master_lock;
108 struct list_head master_list;
109 struct list_head mle_hb_events;
110
111 /* these give a really vague idea of the system load */
112 atomic_t local_resources;
113 atomic_t remote_resources;
114 atomic_t unknown_resources;
115
116 /* NOTE: Next three are protected by dlm_domain_lock */
117 struct kref dlm_refs;
118 enum dlm_ctxt_state dlm_state;
119 unsigned int num_joins;
120
121 struct o2hb_callback_func dlm_hb_up;
122 struct o2hb_callback_func dlm_hb_down;
123 struct task_struct *dlm_thread_task;
124 struct task_struct *dlm_reco_thread_task;
125 wait_queue_head_t dlm_thread_wq;
126 wait_queue_head_t dlm_reco_thread_wq;
127 wait_queue_head_t ast_wq;
128 wait_queue_head_t migration_wq;
129
130 struct work_struct dispatched_work;
131 struct list_head work_list;
132 spinlock_t work_lock;
133 struct list_head dlm_domain_handlers;
134 struct list_head dlm_eviction_callbacks;
135 };
136
137 /* these keventd work queue items are for less-frequently
138 * called functions that cannot be directly called from the
139 * net message handlers for some reason, usually because
140 * they need to send net messages of their own. */
141 void dlm_dispatch_work(void *data);
142
143 struct dlm_lock_resource;
144 struct dlm_work_item;
145
146 typedef void (dlm_workfunc_t)(struct dlm_work_item *, void *);
147
148 struct dlm_request_all_locks_priv
149 {
150 u8 reco_master;
151 u8 dead_node;
152 };
153
154 struct dlm_mig_lockres_priv
155 {
156 struct dlm_lock_resource *lockres;
157 u8 real_master;
158 };
159
160 struct dlm_assert_master_priv
161 {
162 struct dlm_lock_resource *lockres;
163 u8 request_from;
164 u32 flags;
165 unsigned ignore_higher:1;
166 };
167
168
169 struct dlm_work_item
170 {
171 struct list_head list;
172 dlm_workfunc_t *func;
173 struct dlm_ctxt *dlm;
174 void *data;
175 union {
176 struct dlm_request_all_locks_priv ral;
177 struct dlm_mig_lockres_priv ml;
178 struct dlm_assert_master_priv am;
179 } u;
180 };
181
182 static inline void dlm_init_work_item(struct dlm_ctxt *dlm,
183 struct dlm_work_item *i,
184 dlm_workfunc_t *f, void *data)
185 {
186 memset(i, 0, sizeof(*i));
187 i->func = f;
188 INIT_LIST_HEAD(&i->list);
189 i->data = data;
190 i->dlm = dlm; /* must have already done a dlm_grab on this! */
191 }
192
193
194
195 static inline void __dlm_set_joining_node(struct dlm_ctxt *dlm,
196 u8 node)
197 {
198 assert_spin_locked(&dlm->spinlock);
199
200 dlm->joining_node = node;
201 wake_up(&dlm->dlm_join_events);
202 }
203
204 #define DLM_LOCK_RES_UNINITED 0x00000001
205 #define DLM_LOCK_RES_RECOVERING 0x00000002
206 #define DLM_LOCK_RES_READY 0x00000004
207 #define DLM_LOCK_RES_DIRTY 0x00000008
208 #define DLM_LOCK_RES_IN_PROGRESS 0x00000010
209 #define DLM_LOCK_RES_MIGRATING 0x00000020
210
211 #define DLM_PURGE_INTERVAL_MS (8 * 1000)
212
213 struct dlm_lock_resource
214 {
215 /* WARNING: Please see the comment in dlm_init_lockres before
216 * adding fields here. */
217 struct list_head list;
218 struct kref refs;
219
220 /* please keep these next 3 in this order
221 * some funcs want to iterate over all lists */
222 struct list_head granted;
223 struct list_head converting;
224 struct list_head blocked;
225
226 struct list_head dirty;
227 struct list_head recovering; // dlm_recovery_ctxt.resources list
228
229 /* unused lock resources have their last_used stamped and are
230 * put on a list for the dlm thread to run. */
231 struct list_head purge;
232 unsigned long last_used;
233
234 unsigned migration_pending:1;
235 atomic_t asts_reserved;
236 spinlock_t spinlock;
237 wait_queue_head_t wq;
238 u8 owner; //node which owns the lock resource, or unknown
239 u16 state;
240 struct qstr lockname;
241 char lvb[DLM_LVB_LEN];
242 };
243
244 struct dlm_migratable_lock
245 {
246 __be64 cookie;
247
248 /* these 3 are just padding for the in-memory structure, but
249 * list and flags are actually used when sent over the wire */
250 __be16 pad1;
251 u8 list; // 0=granted, 1=converting, 2=blocked
252 u8 flags;
253
254 s8 type;
255 s8 convert_type;
256 s8 highest_blocked;
257 u8 node;
258 }; // 16 bytes
259
260 struct dlm_lock
261 {
262 struct dlm_migratable_lock ml;
263
264 struct list_head list;
265 struct list_head ast_list;
266 struct list_head bast_list;
267 struct dlm_lock_resource *lockres;
268 spinlock_t spinlock;
269 struct kref lock_refs;
270
271 // ast and bast must be callable while holding a spinlock!
272 dlm_astlockfunc_t *ast;
273 dlm_bastlockfunc_t *bast;
274 void *astdata;
275 struct dlm_lockstatus *lksb;
276 unsigned ast_pending:1,
277 bast_pending:1,
278 convert_pending:1,
279 lock_pending:1,
280 cancel_pending:1,
281 unlock_pending:1,
282 lksb_kernel_allocated:1;
283 };
284
285
286 #define DLM_LKSB_UNUSED1 0x01
287 #define DLM_LKSB_PUT_LVB 0x02
288 #define DLM_LKSB_GET_LVB 0x04
289 #define DLM_LKSB_UNUSED2 0x08
290 #define DLM_LKSB_UNUSED3 0x10
291 #define DLM_LKSB_UNUSED4 0x20
292 #define DLM_LKSB_UNUSED5 0x40
293 #define DLM_LKSB_UNUSED6 0x80
294
295
296 enum dlm_lockres_list {
297 DLM_GRANTED_LIST = 0,
298 DLM_CONVERTING_LIST,
299 DLM_BLOCKED_LIST
300 };
301
302 static inline struct list_head *
303 dlm_list_idx_to_ptr(struct dlm_lock_resource *res, enum dlm_lockres_list idx)
304 {
305 struct list_head *ret = NULL;
306 if (idx == DLM_GRANTED_LIST)
307 ret = &res->granted;
308 else if (idx == DLM_CONVERTING_LIST)
309 ret = &res->converting;
310 else if (idx == DLM_BLOCKED_LIST)
311 ret = &res->blocked;
312 else
313 BUG();
314 return ret;
315 }
316
317
318
319
320 struct dlm_node_iter
321 {
322 unsigned long node_map[BITS_TO_LONGS(O2NM_MAX_NODES)];
323 int curnode;
324 };
325
326
327 enum {
328 DLM_MASTER_REQUEST_MSG = 500,
329 DLM_UNUSED_MSG1, /* 501 */
330 DLM_ASSERT_MASTER_MSG, /* 502 */
331 DLM_CREATE_LOCK_MSG, /* 503 */
332 DLM_CONVERT_LOCK_MSG, /* 504 */
333 DLM_PROXY_AST_MSG, /* 505 */
334 DLM_UNLOCK_LOCK_MSG, /* 506 */
335 DLM_UNUSED_MSG2, /* 507 */
336 DLM_MIGRATE_REQUEST_MSG, /* 508 */
337 DLM_MIG_LOCKRES_MSG, /* 509 */
338 DLM_QUERY_JOIN_MSG, /* 510 */
339 DLM_ASSERT_JOINED_MSG, /* 511 */
340 DLM_CANCEL_JOIN_MSG, /* 512 */
341 DLM_EXIT_DOMAIN_MSG, /* 513 */
342 DLM_MASTER_REQUERY_MSG, /* 514 */
343 DLM_LOCK_REQUEST_MSG, /* 515 */
344 DLM_RECO_DATA_DONE_MSG, /* 516 */
345 DLM_BEGIN_RECO_MSG, /* 517 */
346 DLM_FINALIZE_RECO_MSG /* 518 */
347 };
348
349 struct dlm_reco_node_data
350 {
351 int state;
352 u8 node_num;
353 struct list_head list;
354 };
355
356 enum {
357 DLM_RECO_NODE_DATA_DEAD = -1,
358 DLM_RECO_NODE_DATA_INIT = 0,
359 DLM_RECO_NODE_DATA_REQUESTING,
360 DLM_RECO_NODE_DATA_REQUESTED,
361 DLM_RECO_NODE_DATA_RECEIVING,
362 DLM_RECO_NODE_DATA_DONE,
363 DLM_RECO_NODE_DATA_FINALIZE_SENT,
364 };
365
366
367 enum {
368 DLM_MASTER_RESP_NO = 0,
369 DLM_MASTER_RESP_YES,
370 DLM_MASTER_RESP_MAYBE,
371 DLM_MASTER_RESP_ERROR
372 };
373
374
375 struct dlm_master_request
376 {
377 u8 node_idx;
378 u8 namelen;
379 __be16 pad1;
380 __be32 flags;
381
382 u8 name[O2NM_MAX_NAME_LEN];
383 };
384
385 #define DLM_ASSERT_MASTER_MLE_CLEANUP 0x00000001
386 #define DLM_ASSERT_MASTER_REQUERY 0x00000002
387 #define DLM_ASSERT_MASTER_FINISH_MIGRATION 0x00000004
388 struct dlm_assert_master
389 {
390 u8 node_idx;
391 u8 namelen;
392 __be16 pad1;
393 __be32 flags;
394
395 u8 name[O2NM_MAX_NAME_LEN];
396 };
397
398 struct dlm_migrate_request
399 {
400 u8 master;
401 u8 new_master;
402 u8 namelen;
403 u8 pad1;
404 __be32 pad2;
405 u8 name[O2NM_MAX_NAME_LEN];
406 };
407
408 struct dlm_master_requery
409 {
410 u8 pad1;
411 u8 pad2;
412 u8 node_idx;
413 u8 namelen;
414 __be32 pad3;
415 u8 name[O2NM_MAX_NAME_LEN];
416 };
417
418 #define DLM_MRES_RECOVERY 0x01
419 #define DLM_MRES_MIGRATION 0x02
420 #define DLM_MRES_ALL_DONE 0x04
421
422 /*
423 * We would like to get one whole lockres into a single network
424 * message whenever possible. Generally speaking, there will be
425 * at most one dlm_lock on a lockres for each node in the cluster,
426 * plus (infrequently) any additional locks coming in from userdlm.
427 *
428 * struct _dlm_lockres_page
429 * {
430 * dlm_migratable_lockres mres;
431 * dlm_migratable_lock ml[DLM_MAX_MIGRATABLE_LOCKS];
432 * u8 pad[DLM_MIG_LOCKRES_RESERVED];
433 * };
434 *
435 * from ../cluster/tcp.h
436 * NET_MAX_PAYLOAD_BYTES (4096 - sizeof(net_msg))
437 * (roughly 4080 bytes)
438 * and sizeof(dlm_migratable_lockres) = 112 bytes
439 * and sizeof(dlm_migratable_lock) = 16 bytes
440 *
441 * Choosing DLM_MAX_MIGRATABLE_LOCKS=240 and
442 * DLM_MIG_LOCKRES_RESERVED=128 means we have this:
443 *
444 * (DLM_MAX_MIGRATABLE_LOCKS * sizeof(dlm_migratable_lock)) +
445 * sizeof(dlm_migratable_lockres) + DLM_MIG_LOCKRES_RESERVED =
446 * NET_MAX_PAYLOAD_BYTES
447 * (240 * 16) + 112 + 128 = 4080
448 *
449 * So a lockres would need more than 240 locks before it would
450 * use more than one network packet to recover. Not too bad.
451 */
452 #define DLM_MAX_MIGRATABLE_LOCKS 240
453
454 struct dlm_migratable_lockres
455 {
456 u8 master;
457 u8 lockname_len;
458 u8 num_locks; // locks sent in this structure
459 u8 flags;
460 __be32 total_locks; // locks to be sent for this migration cookie
461 __be64 mig_cookie; // cookie for this lockres migration
462 // or zero if not needed
463 // 16 bytes
464 u8 lockname[DLM_LOCKID_NAME_MAX];
465 // 48 bytes
466 u8 lvb[DLM_LVB_LEN];
467 // 112 bytes
468 struct dlm_migratable_lock ml[0]; // 16 bytes each, begins at byte 112
469 };
470 #define DLM_MIG_LOCKRES_MAX_LEN \
471 (sizeof(struct dlm_migratable_lockres) + \
472 (sizeof(struct dlm_migratable_lock) * \
473 DLM_MAX_MIGRATABLE_LOCKS) )
474
475 /* from above, 128 bytes
476 * for some undetermined future use */
477 #define DLM_MIG_LOCKRES_RESERVED (NET_MAX_PAYLOAD_BYTES - \
478 DLM_MIG_LOCKRES_MAX_LEN)
479
480 struct dlm_create_lock
481 {
482 __be64 cookie;
483
484 __be32 flags;
485 u8 pad1;
486 u8 node_idx;
487 s8 requested_type;
488 u8 namelen;
489
490 u8 name[O2NM_MAX_NAME_LEN];
491 };
492
493 struct dlm_convert_lock
494 {
495 __be64 cookie;
496
497 __be32 flags;
498 u8 pad1;
499 u8 node_idx;
500 s8 requested_type;
501 u8 namelen;
502
503 u8 name[O2NM_MAX_NAME_LEN];
504
505 s8 lvb[0];
506 };
507 #define DLM_CONVERT_LOCK_MAX_LEN (sizeof(struct dlm_convert_lock)+DLM_LVB_LEN)
508
509 struct dlm_unlock_lock
510 {
511 __be64 cookie;
512
513 __be32 flags;
514 __be16 pad1;
515 u8 node_idx;
516 u8 namelen;
517
518 u8 name[O2NM_MAX_NAME_LEN];
519
520 s8 lvb[0];
521 };
522 #define DLM_UNLOCK_LOCK_MAX_LEN (sizeof(struct dlm_unlock_lock)+DLM_LVB_LEN)
523
524 struct dlm_proxy_ast
525 {
526 __be64 cookie;
527
528 __be32 flags;
529 u8 node_idx;
530 u8 type;
531 u8 blocked_type;
532 u8 namelen;
533
534 u8 name[O2NM_MAX_NAME_LEN];
535
536 s8 lvb[0];
537 };
538 #define DLM_PROXY_AST_MAX_LEN (sizeof(struct dlm_proxy_ast)+DLM_LVB_LEN)
539
540 #define DLM_MOD_KEY (0x666c6172)
541 enum dlm_query_join_response {
542 JOIN_DISALLOW = 0,
543 JOIN_OK,
544 JOIN_OK_NO_MAP,
545 };
546
547 struct dlm_lock_request
548 {
549 u8 node_idx;
550 u8 dead_node;
551 __be16 pad1;
552 __be32 pad2;
553 };
554
555 struct dlm_reco_data_done
556 {
557 u8 node_idx;
558 u8 dead_node;
559 __be16 pad1;
560 __be32 pad2;
561
562 /* unused for now */
563 /* eventually we can use this to attempt
564 * lvb recovery based on each node's info */
565 u8 reco_lvb[DLM_LVB_LEN];
566 };
567
568 struct dlm_begin_reco
569 {
570 u8 node_idx;
571 u8 dead_node;
572 __be16 pad1;
573 __be32 pad2;
574 };
575
576
577 struct dlm_query_join_request
578 {
579 u8 node_idx;
580 u8 pad1[2];
581 u8 name_len;
582 u8 domain[O2NM_MAX_NAME_LEN];
583 };
584
585 struct dlm_assert_joined
586 {
587 u8 node_idx;
588 u8 pad1[2];
589 u8 name_len;
590 u8 domain[O2NM_MAX_NAME_LEN];
591 };
592
593 struct dlm_cancel_join
594 {
595 u8 node_idx;
596 u8 pad1[2];
597 u8 name_len;
598 u8 domain[O2NM_MAX_NAME_LEN];
599 };
600
601 struct dlm_exit_domain
602 {
603 u8 node_idx;
604 u8 pad1[3];
605 };
606
607 struct dlm_finalize_reco
608 {
609 u8 node_idx;
610 u8 dead_node;
611 __be16 pad1;
612 __be32 pad2;
613 };
614
615 static inline enum dlm_status
616 __dlm_lockres_state_to_status(struct dlm_lock_resource *res)
617 {
618 enum dlm_status status = DLM_NORMAL;
619
620 assert_spin_locked(&res->spinlock);
621
622 if (res->state & DLM_LOCK_RES_RECOVERING)
623 status = DLM_RECOVERING;
624 else if (res->state & DLM_LOCK_RES_MIGRATING)
625 status = DLM_MIGRATING;
626 else if (res->state & DLM_LOCK_RES_IN_PROGRESS)
627 status = DLM_FORWARD;
628
629 return status;
630 }
631
632 struct dlm_lock * dlm_new_lock(int type, u8 node, u64 cookie,
633 struct dlm_lockstatus *lksb);
634 void dlm_lock_get(struct dlm_lock *lock);
635 void dlm_lock_put(struct dlm_lock *lock);
636
637 void dlm_lock_attach_lockres(struct dlm_lock *lock,
638 struct dlm_lock_resource *res);
639
640 int dlm_create_lock_handler(struct o2net_msg *msg, u32 len, void *data);
641 int dlm_convert_lock_handler(struct o2net_msg *msg, u32 len, void *data);
642 int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data);
643
644 void dlm_revert_pending_convert(struct dlm_lock_resource *res,
645 struct dlm_lock *lock);
646 void dlm_revert_pending_lock(struct dlm_lock_resource *res,
647 struct dlm_lock *lock);
648
649 int dlm_unlock_lock_handler(struct o2net_msg *msg, u32 len, void *data);
650 void dlm_commit_pending_cancel(struct dlm_lock_resource *res,
651 struct dlm_lock *lock);
652 void dlm_commit_pending_unlock(struct dlm_lock_resource *res,
653 struct dlm_lock *lock);
654
655 int dlm_launch_thread(struct dlm_ctxt *dlm);
656 void dlm_complete_thread(struct dlm_ctxt *dlm);
657 int dlm_launch_recovery_thread(struct dlm_ctxt *dlm);
658 void dlm_complete_recovery_thread(struct dlm_ctxt *dlm);
659 void dlm_wait_for_recovery(struct dlm_ctxt *dlm);
660 int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node);
661
662 void dlm_put(struct dlm_ctxt *dlm);
663 struct dlm_ctxt *dlm_grab(struct dlm_ctxt *dlm);
664 int dlm_domain_fully_joined(struct dlm_ctxt *dlm);
665
666 void __dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
667 struct dlm_lock_resource *res);
668 void dlm_lockres_calc_usage(struct dlm_ctxt *dlm,
669 struct dlm_lock_resource *res);
670 void dlm_purge_lockres(struct dlm_ctxt *dlm,
671 struct dlm_lock_resource *lockres);
672 void dlm_lockres_get(struct dlm_lock_resource *res);
673 void dlm_lockres_put(struct dlm_lock_resource *res);
674 void __dlm_unhash_lockres(struct dlm_lock_resource *res);
675 void __dlm_insert_lockres(struct dlm_ctxt *dlm,
676 struct dlm_lock_resource *res);
677 struct dlm_lock_resource * __dlm_lookup_lockres(struct dlm_ctxt *dlm,
678 const char *name,
679 unsigned int len);
680 struct dlm_lock_resource * dlm_lookup_lockres(struct dlm_ctxt *dlm,
681 const char *name,
682 unsigned int len);
683
684 int dlm_is_host_down(int errno);
685 void dlm_change_lockres_owner(struct dlm_ctxt *dlm,
686 struct dlm_lock_resource *res,
687 u8 owner);
688 struct dlm_lock_resource * dlm_get_lock_resource(struct dlm_ctxt *dlm,
689 const char *lockid,
690 int flags);
691 struct dlm_lock_resource *dlm_new_lockres(struct dlm_ctxt *dlm,
692 const char *name,
693 unsigned int namelen);
694
695 void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
696 void dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
697 void dlm_do_local_ast(struct dlm_ctxt *dlm,
698 struct dlm_lock_resource *res,
699 struct dlm_lock *lock);
700 int dlm_do_remote_ast(struct dlm_ctxt *dlm,
701 struct dlm_lock_resource *res,
702 struct dlm_lock *lock);
703 void dlm_do_local_bast(struct dlm_ctxt *dlm,
704 struct dlm_lock_resource *res,
705 struct dlm_lock *lock,
706 int blocked_type);
707 int dlm_send_proxy_ast_msg(struct dlm_ctxt *dlm,
708 struct dlm_lock_resource *res,
709 struct dlm_lock *lock,
710 int msg_type,
711 int blocked_type, int flags);
712 static inline int dlm_send_proxy_bast(struct dlm_ctxt *dlm,
713 struct dlm_lock_resource *res,
714 struct dlm_lock *lock,
715 int blocked_type)
716 {
717 return dlm_send_proxy_ast_msg(dlm, res, lock, DLM_BAST,
718 blocked_type, 0);
719 }
720
721 static inline int dlm_send_proxy_ast(struct dlm_ctxt *dlm,
722 struct dlm_lock_resource *res,
723 struct dlm_lock *lock,
724 int flags)
725 {
726 return dlm_send_proxy_ast_msg(dlm, res, lock, DLM_AST,
727 0, flags);
728 }
729
730 void dlm_print_one_lock_resource(struct dlm_lock_resource *res);
731 void __dlm_print_one_lock_resource(struct dlm_lock_resource *res);
732
733 u8 dlm_nm_this_node(struct dlm_ctxt *dlm);
734 void dlm_kick_thread(struct dlm_ctxt *dlm, struct dlm_lock_resource *res);
735 void __dlm_dirty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res);
736
737
738 int dlm_nm_init(struct dlm_ctxt *dlm);
739 int dlm_heartbeat_init(struct dlm_ctxt *dlm);
740 void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data);
741 void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data);
742
743 int dlm_lockres_is_dirty(struct dlm_ctxt *dlm, struct dlm_lock_resource *res);
744 int dlm_migrate_lockres(struct dlm_ctxt *dlm,
745 struct dlm_lock_resource *res,
746 u8 target);
747 int dlm_finish_migration(struct dlm_ctxt *dlm,
748 struct dlm_lock_resource *res,
749 u8 old_master);
750 void dlm_lockres_release_ast(struct dlm_ctxt *dlm,
751 struct dlm_lock_resource *res);
752 void __dlm_lockres_reserve_ast(struct dlm_lock_resource *res);
753
754 int dlm_master_request_handler(struct o2net_msg *msg, u32 len, void *data);
755 int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data);
756 int dlm_migrate_request_handler(struct o2net_msg *msg, u32 len, void *data);
757 int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data);
758 int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data);
759 int dlm_request_all_locks_handler(struct o2net_msg *msg, u32 len, void *data);
760 int dlm_reco_data_done_handler(struct o2net_msg *msg, u32 len, void *data);
761 int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data);
762 int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data);
763
764 int dlm_dispatch_assert_master(struct dlm_ctxt *dlm,
765 struct dlm_lock_resource *res,
766 int ignore_higher,
767 u8 request_from,
768 u32 flags);
769
770
771 int dlm_send_one_lockres(struct dlm_ctxt *dlm,
772 struct dlm_lock_resource *res,
773 struct dlm_migratable_lockres *mres,
774 u8 send_to,
775 u8 flags);
776 void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm,
777 struct dlm_lock_resource *res);
778
779 /* will exit holding res->spinlock, but may drop in function */
780 void __dlm_wait_on_lockres_flags(struct dlm_lock_resource *res, int flags);
781 void __dlm_wait_on_lockres_flags_set(struct dlm_lock_resource *res, int flags);
782
783 /* will exit holding res->spinlock, but may drop in function */
784 static inline void __dlm_wait_on_lockres(struct dlm_lock_resource *res)
785 {
786 __dlm_wait_on_lockres_flags(res, (DLM_LOCK_RES_IN_PROGRESS|
787 DLM_LOCK_RES_RECOVERING|
788 DLM_LOCK_RES_MIGRATING));
789 }
790
791
792 int dlm_init_mle_cache(void);
793 void dlm_destroy_mle_cache(void);
794 void dlm_hb_event_notify_attached(struct dlm_ctxt *dlm, int idx, int node_up);
795 void dlm_clean_master_list(struct dlm_ctxt *dlm,
796 u8 dead_node);
797 int dlm_lock_basts_flushed(struct dlm_ctxt *dlm, struct dlm_lock *lock);
798
799
800 static inline const char * dlm_lock_mode_name(int mode)
801 {
802 switch (mode) {
803 case LKM_EXMODE:
804 return "EX";
805 case LKM_PRMODE:
806 return "PR";
807 case LKM_NLMODE:
808 return "NL";
809 }
810 return "UNKNOWN";
811 }
812
813
814 static inline int dlm_lock_compatible(int existing, int request)
815 {
816 /* NO_LOCK compatible with all */
817 if (request == LKM_NLMODE ||
818 existing == LKM_NLMODE)
819 return 1;
820
821 /* EX incompatible with all non-NO_LOCK */
822 if (request == LKM_EXMODE)
823 return 0;
824
825 /* request must be PR, which is compatible with PR */
826 if (existing == LKM_PRMODE)
827 return 1;
828
829 return 0;
830 }
831
832 static inline int dlm_lock_on_list(struct list_head *head,
833 struct dlm_lock *lock)
834 {
835 struct list_head *iter;
836 struct dlm_lock *tmplock;
837
838 list_for_each(iter, head) {
839 tmplock = list_entry(iter, struct dlm_lock, list);
840 if (tmplock == lock)
841 return 1;
842 }
843 return 0;
844 }
845
846
847 static inline enum dlm_status dlm_err_to_dlm_status(int err)
848 {
849 enum dlm_status ret;
850 if (err == -ENOMEM)
851 ret = DLM_SYSERR;
852 else if (err == -ETIMEDOUT || o2net_link_down(err, NULL))
853 ret = DLM_NOLOCKMGR;
854 else if (err == -EINVAL)
855 ret = DLM_BADPARAM;
856 else if (err == -ENAMETOOLONG)
857 ret = DLM_IVBUFLEN;
858 else
859 ret = DLM_BADARGS;
860 return ret;
861 }
862
863
864 static inline void dlm_node_iter_init(unsigned long *map,
865 struct dlm_node_iter *iter)
866 {
867 memcpy(iter->node_map, map, sizeof(iter->node_map));
868 iter->curnode = -1;
869 }
870
871 static inline int dlm_node_iter_next(struct dlm_node_iter *iter)
872 {
873 int bit;
874 bit = find_next_bit(iter->node_map, O2NM_MAX_NODES, iter->curnode+1);
875 if (bit >= O2NM_MAX_NODES) {
876 iter->curnode = O2NM_MAX_NODES;
877 return -ENOENT;
878 }
879 iter->curnode = bit;
880 return bit;
881 }
882
883
884
885 #endif /* DLMCOMMON_H */
This page took 0.050368 seconds and 5 git commands to generate.