4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/module.h>
27 #include <linux/drbd.h>
30 #include <linux/file.h>
31 #include <linux/slab.h>
32 #include <linux/connector.h>
33 #include <linux/blkpg.h>
34 #include <linux/cpumask.h>
37 #include "drbd_wrappers.h"
38 #include <asm/unaligned.h>
39 #include <linux/drbd_tag_magic.h>
40 #include <linux/drbd_limits.h>
41 #include <linux/compiler.h>
42 #include <linux/kthread.h>
44 static unsigned short *tl_add_blob(unsigned short *, enum drbd_tags
, const void *, int);
45 static unsigned short *tl_add_str(unsigned short *, enum drbd_tags
, const char *);
46 static unsigned short *tl_add_int(unsigned short *, enum drbd_tags
, const void *);
48 /* see get_sb_bdev and bd_claim */
49 static char *drbd_m_holder
= "Hands off! this is DRBD's meta data device.";
51 /* Generate the tag_list to struct functions */
52 #define NL_PACKET(name, number, fields) \
53 static int name ## _from_tags(struct drbd_conf *mdev, \
54 unsigned short *tags, struct name *arg) __attribute__ ((unused)); \
55 static int name ## _from_tags(struct drbd_conf *mdev, \
56 unsigned short *tags, struct name *arg) \
61 while ((tag = get_unaligned(tags++)) != TT_END) { \
62 dlen = get_unaligned(tags++); \
63 switch (tag_number(tag)) { \
66 if (tag & T_MANDATORY) { \
67 dev_err(DEV, "Unknown tag: %d\n", tag_number(tag)); \
71 tags = (unsigned short *)((char *)tags + dlen); \
75 #define NL_INTEGER(pn, pr, member) \
76 case pn: /* D_ASSERT( tag_type(tag) == TT_INTEGER ); */ \
77 arg->member = get_unaligned((int *)(tags)); \
79 #define NL_INT64(pn, pr, member) \
80 case pn: /* D_ASSERT( tag_type(tag) == TT_INT64 ); */ \
81 arg->member = get_unaligned((u64 *)(tags)); \
83 #define NL_BIT(pn, pr, member) \
84 case pn: /* D_ASSERT( tag_type(tag) == TT_BIT ); */ \
85 arg->member = *(char *)(tags) ? 1 : 0; \
87 #define NL_STRING(pn, pr, member, len) \
88 case pn: /* D_ASSERT( tag_type(tag) == TT_STRING ); */ \
90 dev_err(DEV, "arg too long: %s (%u wanted, max len: %u bytes)\n", \
91 #member, dlen, (unsigned int)len); \
94 arg->member ## _len = dlen; \
95 memcpy(arg->member, tags, min_t(size_t, dlen, len)); \
97 #include <linux/drbd_nl.h>
99 /* Generate the struct to tag_list functions */
100 #define NL_PACKET(name, number, fields) \
101 static unsigned short* \
102 name ## _to_tags(struct drbd_conf *mdev, \
103 struct name *arg, unsigned short *tags) __attribute__ ((unused)); \
104 static unsigned short* \
105 name ## _to_tags(struct drbd_conf *mdev, \
106 struct name *arg, unsigned short *tags) \
112 #define NL_INTEGER(pn, pr, member) \
113 put_unaligned(pn | pr | TT_INTEGER, tags++); \
114 put_unaligned(sizeof(int), tags++); \
115 put_unaligned(arg->member, (int *)tags); \
116 tags = (unsigned short *)((char *)tags+sizeof(int));
117 #define NL_INT64(pn, pr, member) \
118 put_unaligned(pn | pr | TT_INT64, tags++); \
119 put_unaligned(sizeof(u64), tags++); \
120 put_unaligned(arg->member, (u64 *)tags); \
121 tags = (unsigned short *)((char *)tags+sizeof(u64));
122 #define NL_BIT(pn, pr, member) \
123 put_unaligned(pn | pr | TT_BIT, tags++); \
124 put_unaligned(sizeof(char), tags++); \
125 *(char *)tags = arg->member; \
126 tags = (unsigned short *)((char *)tags+sizeof(char));
127 #define NL_STRING(pn, pr, member, len) \
128 put_unaligned(pn | pr | TT_STRING, tags++); \
129 put_unaligned(arg->member ## _len, tags++); \
130 memcpy(tags, arg->member, arg->member ## _len); \
131 tags = (unsigned short *)((char *)tags + arg->member ## _len);
132 #include <linux/drbd_nl.h>
134 void drbd_bcast_ev_helper(struct drbd_conf
*mdev
, char *helper_name
);
135 void drbd_nl_send_reply(struct cn_msg
*, int);
137 int drbd_khelper(struct drbd_conf
*mdev
, char *cmd
)
139 char *envp
[] = { "HOME=/",
141 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
142 NULL
, /* Will be set to address family */
143 NULL
, /* Will be set to address */
146 char mb
[12], af
[20], ad
[60], *afs
;
147 char *argv
[] = {usermode_helper
, cmd
, mb
, NULL
};
150 snprintf(mb
, 12, "minor-%d", mdev_to_minor(mdev
));
152 if (get_net_conf(mdev
)) {
153 switch (((struct sockaddr
*)mdev
->net_conf
->peer_addr
)->sa_family
) {
156 snprintf(ad
, 60, "DRBD_PEER_ADDRESS=%pI6",
157 &((struct sockaddr_in6
*)mdev
->net_conf
->peer_addr
)->sin6_addr
);
161 snprintf(ad
, 60, "DRBD_PEER_ADDRESS=%pI4",
162 &((struct sockaddr_in
*)mdev
->net_conf
->peer_addr
)->sin_addr
);
166 snprintf(ad
, 60, "DRBD_PEER_ADDRESS=%pI4",
167 &((struct sockaddr_in
*)mdev
->net_conf
->peer_addr
)->sin_addr
);
169 snprintf(af
, 20, "DRBD_PEER_AF=%s", afs
);
175 /* The helper may take some time.
176 * write out any unsynced meta data changes now */
179 dev_info(DEV
, "helper command: %s %s %s\n", usermode_helper
, cmd
, mb
);
181 drbd_bcast_ev_helper(mdev
, cmd
);
182 ret
= call_usermodehelper(usermode_helper
, argv
, envp
, UMH_WAIT_PROC
);
184 dev_warn(DEV
, "helper command: %s %s %s exit code %u (0x%x)\n",
185 usermode_helper
, cmd
, mb
,
186 (ret
>> 8) & 0xff, ret
);
188 dev_info(DEV
, "helper command: %s %s %s exit code %u (0x%x)\n",
189 usermode_helper
, cmd
, mb
,
190 (ret
>> 8) & 0xff, ret
);
192 if (ret
< 0) /* Ignore any ERRNOs we got. */
198 enum drbd_disk_state
drbd_try_outdate_peer(struct drbd_conf
*mdev
)
202 enum drbd_disk_state nps
;
203 enum drbd_fencing_p fp
;
205 D_ASSERT(mdev
->state
.pdsk
== D_UNKNOWN
);
207 if (get_ldev_if_state(mdev
, D_CONSISTENT
)) {
208 fp
= mdev
->ldev
->dc
.fencing
;
211 dev_warn(DEV
, "Not fencing peer, I'm not even Consistent myself.\n");
212 nps
= mdev
->state
.pdsk
;
216 r
= drbd_khelper(mdev
, "fence-peer");
218 switch ((r
>>8) & 0xff) {
219 case 3: /* peer is inconsistent */
220 ex_to_string
= "peer is inconsistent or worse";
221 nps
= D_INCONSISTENT
;
223 case 4: /* peer got outdated, or was already outdated */
224 ex_to_string
= "peer was fenced";
227 case 5: /* peer was down */
228 if (mdev
->state
.disk
== D_UP_TO_DATE
) {
229 /* we will(have) create(d) a new UUID anyways... */
230 ex_to_string
= "peer is unreachable, assumed to be dead";
233 ex_to_string
= "peer unreachable, doing nothing since disk != UpToDate";
234 nps
= mdev
->state
.pdsk
;
237 case 6: /* Peer is primary, voluntarily outdate myself.
238 * This is useful when an unconnected R_SECONDARY is asked to
239 * become R_PRIMARY, but finds the other peer being active. */
240 ex_to_string
= "peer is active";
241 dev_warn(DEV
, "Peer is primary, outdating myself.\n");
243 _drbd_request_state(mdev
, NS(disk
, D_OUTDATED
), CS_WAIT_COMPLETE
);
246 if (fp
!= FP_STONITH
)
247 dev_err(DEV
, "fence-peer() = 7 && fencing != Stonith !!!\n");
248 ex_to_string
= "peer was stonithed";
252 /* The script is broken ... */
254 dev_err(DEV
, "fence-peer helper broken, returned %d\n", (r
>>8)&0xff);
258 dev_info(DEV
, "fence-peer helper returned %d (%s)\n",
259 (r
>>8) & 0xff, ex_to_string
);
262 if (mdev
->state
.susp_fen
&& nps
>= D_UNKNOWN
) {
263 /* The handler was not successful... unfreeze here, the
264 state engine can not unfreeze... */
265 _drbd_request_state(mdev
, NS(susp_fen
, 0), CS_VERBOSE
);
271 static int _try_outdate_peer_async(void *data
)
273 struct drbd_conf
*mdev
= (struct drbd_conf
*)data
;
274 enum drbd_disk_state nps
;
277 nps
= drbd_try_outdate_peer(mdev
);
280 drbd_request_state(mdev, NS(pdsk, nps));
281 here, because we might were able to re-establish the connection
282 in the meantime. This can only partially be solved in the state's
283 engine is_valid_state() and is_valid_state_transition()
286 nps can be D_INCONSISTENT, D_OUTDATED or D_UNKNOWN.
287 pdsk == D_INCONSISTENT while conn >= C_CONNECTED is valid,
288 therefore we have to have the pre state change check here.
290 spin_lock_irq(&mdev
->req_lock
);
292 if (ns
.conn
< C_WF_REPORT_PARAMS
&& !test_bit(STATE_SENT
, &mdev
->flags
)) {
294 _drbd_set_state(mdev
, ns
, CS_VERBOSE
, NULL
);
296 spin_unlock_irq(&mdev
->req_lock
);
301 void drbd_try_outdate_peer_async(struct drbd_conf
*mdev
)
303 struct task_struct
*opa
;
305 opa
= kthread_run(_try_outdate_peer_async
, mdev
, "drbd%d_a_helper", mdev_to_minor(mdev
));
307 dev_err(DEV
, "out of mem, failed to invoke fence-peer helper\n");
311 drbd_set_role(struct drbd_conf
*mdev
, enum drbd_role new_role
, int force
)
313 const int max_tries
= 4;
314 enum drbd_state_rv rv
= SS_UNKNOWN_ERROR
;
317 union drbd_state mask
, val
;
318 enum drbd_disk_state nps
;
320 if (new_role
== R_PRIMARY
)
321 request_ping(mdev
); /* Detect a dead peer ASAP */
323 mutex_lock(&mdev
->state_mutex
);
325 mask
.i
= 0; mask
.role
= R_MASK
;
326 val
.i
= 0; val
.role
= new_role
;
328 while (try++ < max_tries
) {
329 rv
= _drbd_request_state(mdev
, mask
, val
, CS_WAIT_COMPLETE
);
331 /* in case we first succeeded to outdate,
332 * but now suddenly could establish a connection */
333 if (rv
== SS_CW_FAILED_BY_PEER
&& mask
.pdsk
!= 0) {
339 if (rv
== SS_NO_UP_TO_DATE_DISK
&& force
&&
340 (mdev
->state
.disk
< D_UP_TO_DATE
&&
341 mdev
->state
.disk
>= D_INCONSISTENT
)) {
343 val
.disk
= D_UP_TO_DATE
;
348 if (rv
== SS_NO_UP_TO_DATE_DISK
&&
349 mdev
->state
.disk
== D_CONSISTENT
&& mask
.pdsk
== 0) {
350 D_ASSERT(mdev
->state
.pdsk
== D_UNKNOWN
);
351 nps
= drbd_try_outdate_peer(mdev
);
353 if (nps
== D_OUTDATED
|| nps
== D_INCONSISTENT
) {
354 val
.disk
= D_UP_TO_DATE
;
364 if (rv
== SS_NOTHING_TO_DO
)
366 if (rv
== SS_PRIMARY_NOP
&& mask
.pdsk
== 0) {
367 nps
= drbd_try_outdate_peer(mdev
);
369 if (force
&& nps
> D_OUTDATED
) {
370 dev_warn(DEV
, "Forced into split brain situation!\n");
379 if (rv
== SS_TWO_PRIMARIES
) {
380 /* Maybe the peer is detected as dead very soon...
381 retry at most once more in this case. */
382 schedule_timeout_interruptible((mdev
->net_conf
->ping_timeo
+1)*HZ
/10);
387 if (rv
< SS_SUCCESS
) {
388 rv
= _drbd_request_state(mdev
, mask
, val
,
389 CS_VERBOSE
+ CS_WAIT_COMPLETE
);
400 dev_warn(DEV
, "Forced to consider local data as UpToDate!\n");
402 /* Wait until nothing is on the fly :) */
403 wait_event(mdev
->misc_wait
, atomic_read(&mdev
->ap_pending_cnt
) == 0);
405 if (new_role
== R_SECONDARY
) {
406 set_disk_ro(mdev
->vdisk
, true);
407 if (get_ldev(mdev
)) {
408 mdev
->ldev
->md
.uuid
[UI_CURRENT
] &= ~(u64
)1;
412 if (get_net_conf(mdev
)) {
413 mdev
->net_conf
->want_lose
= 0;
416 set_disk_ro(mdev
->vdisk
, false);
417 if (get_ldev(mdev
)) {
418 if (((mdev
->state
.conn
< C_CONNECTED
||
419 mdev
->state
.pdsk
<= D_FAILED
)
420 && mdev
->ldev
->md
.uuid
[UI_BITMAP
] == 0) || forced
)
421 drbd_uuid_new_current(mdev
);
423 mdev
->ldev
->md
.uuid
[UI_CURRENT
] |= (u64
)1;
428 /* writeout of activity log covered areas of the bitmap
429 * to stable storage done in after state change already */
431 if (mdev
->state
.conn
>= C_WF_REPORT_PARAMS
) {
432 /* if this was forced, we should consider sync */
434 drbd_send_uuids(mdev
);
435 drbd_send_current_state(mdev
);
440 kobject_uevent(&disk_to_dev(mdev
->vdisk
)->kobj
, KOBJ_CHANGE
);
442 mutex_unlock(&mdev
->state_mutex
);
446 static struct drbd_conf
*ensure_mdev(int minor
, int create
)
448 struct drbd_conf
*mdev
;
450 if (minor
>= minor_count
)
453 mdev
= minor_to_mdev(minor
);
455 if (!mdev
&& create
) {
456 struct gendisk
*disk
= NULL
;
457 mdev
= drbd_new_device(minor
);
459 spin_lock_irq(&drbd_pp_lock
);
460 if (minor_table
[minor
] == NULL
) {
461 minor_table
[minor
] = mdev
;
464 } /* else: we lost the race */
465 spin_unlock_irq(&drbd_pp_lock
);
467 if (disk
) /* we won the race above */
468 /* in case we ever add a drbd_delete_device(),
469 * don't forget the del_gendisk! */
471 else /* we lost the race above */
472 drbd_free_mdev(mdev
);
474 mdev
= minor_to_mdev(minor
);
480 static int drbd_nl_primary(struct drbd_conf
*mdev
, struct drbd_nl_cfg_req
*nlp
,
481 struct drbd_nl_cfg_reply
*reply
)
483 struct primary primary_args
;
485 memset(&primary_args
, 0, sizeof(struct primary
));
486 if (!primary_from_tags(mdev
, nlp
->tag_list
, &primary_args
)) {
487 reply
->ret_code
= ERR_MANDATORY_TAG
;
492 drbd_set_role(mdev
, R_PRIMARY
, primary_args
.primary_force
);
497 static int drbd_nl_secondary(struct drbd_conf
*mdev
, struct drbd_nl_cfg_req
*nlp
,
498 struct drbd_nl_cfg_reply
*reply
)
500 reply
->ret_code
= drbd_set_role(mdev
, R_SECONDARY
, 0);
505 /* initializes the md.*_offset members, so we are able to find
506 * the on disk meta data */
507 static void drbd_md_set_sector_offsets(struct drbd_conf
*mdev
,
508 struct drbd_backing_dev
*bdev
)
510 sector_t md_size_sect
= 0;
511 switch (bdev
->dc
.meta_dev_idx
) {
513 /* v07 style fixed size indexed meta data */
514 bdev
->md
.md_size_sect
= MD_RESERVED_SECT
;
515 bdev
->md
.md_offset
= drbd_md_ss__(mdev
, bdev
);
516 bdev
->md
.al_offset
= MD_AL_OFFSET
;
517 bdev
->md
.bm_offset
= MD_BM_OFFSET
;
519 case DRBD_MD_INDEX_FLEX_EXT
:
520 /* just occupy the full device; unit: sectors */
521 bdev
->md
.md_size_sect
= drbd_get_capacity(bdev
->md_bdev
);
522 bdev
->md
.md_offset
= 0;
523 bdev
->md
.al_offset
= MD_AL_OFFSET
;
524 bdev
->md
.bm_offset
= MD_BM_OFFSET
;
526 case DRBD_MD_INDEX_INTERNAL
:
527 case DRBD_MD_INDEX_FLEX_INT
:
528 bdev
->md
.md_offset
= drbd_md_ss__(mdev
, bdev
);
529 /* al size is still fixed */
530 bdev
->md
.al_offset
= -MD_AL_MAX_SIZE
;
531 /* we need (slightly less than) ~ this much bitmap sectors: */
532 md_size_sect
= drbd_get_capacity(bdev
->backing_bdev
);
533 md_size_sect
= ALIGN(md_size_sect
, BM_SECT_PER_EXT
);
534 md_size_sect
= BM_SECT_TO_EXT(md_size_sect
);
535 md_size_sect
= ALIGN(md_size_sect
, 8);
537 /* plus the "drbd meta data super block",
538 * and the activity log; */
539 md_size_sect
+= MD_BM_OFFSET
;
541 bdev
->md
.md_size_sect
= md_size_sect
;
542 /* bitmap offset is adjusted by 'super' block size */
543 bdev
->md
.bm_offset
= -md_size_sect
+ MD_AL_OFFSET
;
548 /* input size is expected to be in KB */
549 char *ppsize(char *buf
, unsigned long long size
)
551 /* Needs 9 bytes at max including trailing NUL:
552 * -1ULL ==> "16384 EB" */
553 static char units
[] = { 'K', 'M', 'G', 'T', 'P', 'E' };
555 while (size
>= 10000 && base
< sizeof(units
)-1) {
557 size
= (size
>> 10) + !!(size
& (1<<9));
560 sprintf(buf
, "%u %cB", (unsigned)size
, units
[base
]);
565 /* there is still a theoretical deadlock when called from receiver
566 * on an D_INCONSISTENT R_PRIMARY:
567 * remote READ does inc_ap_bio, receiver would need to receive answer
568 * packet from remote to dec_ap_bio again.
569 * receiver receive_sizes(), comes here,
570 * waits for ap_bio_cnt == 0. -> deadlock.
571 * but this cannot happen, actually, because:
572 * R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable
573 * (not connected, or bad/no disk on peer):
574 * see drbd_fail_request_early, ap_bio_cnt is zero.
575 * R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET:
576 * peer may not initiate a resize.
578 void drbd_suspend_io(struct drbd_conf
*mdev
)
580 set_bit(SUSPEND_IO
, &mdev
->flags
);
581 if (is_susp(mdev
->state
))
583 wait_event(mdev
->misc_wait
, !atomic_read(&mdev
->ap_bio_cnt
));
586 void drbd_resume_io(struct drbd_conf
*mdev
)
588 clear_bit(SUSPEND_IO
, &mdev
->flags
);
589 wake_up(&mdev
->misc_wait
);
593 * drbd_determine_dev_size() - Sets the right device size obeying all constraints
594 * @mdev: DRBD device.
596 * Returns 0 on success, negative return values indicate errors.
597 * You should call drbd_md_sync() after calling this function.
599 enum determine_dev_size
drbd_determine_dev_size(struct drbd_conf
*mdev
, enum dds_flags flags
) __must_hold(local
)
601 sector_t prev_first_sect
, prev_size
; /* previous meta location */
606 int md_moved
, la_size_changed
;
607 enum determine_dev_size rv
= unchanged
;
610 * application request passes inc_ap_bio,
611 * but then cannot get an AL-reference.
612 * this function later may wait on ap_bio_cnt == 0. -> deadlock.
615 * Suspend IO right here.
616 * still lock the act_log to not trigger ASSERTs there.
618 drbd_suspend_io(mdev
);
620 /* no wait necessary anymore, actually we could assert that */
621 wait_event(mdev
->al_wait
, lc_try_lock(mdev
->act_log
));
623 prev_first_sect
= drbd_md_first_sector(mdev
->ldev
);
624 prev_size
= mdev
->ldev
->md
.md_size_sect
;
625 la_size
= mdev
->ldev
->md
.la_size_sect
;
627 /* TODO: should only be some assert here, not (re)init... */
628 drbd_md_set_sector_offsets(mdev
, mdev
->ldev
);
630 size
= drbd_new_dev_size(mdev
, mdev
->ldev
, flags
& DDSF_FORCED
);
632 if (drbd_get_capacity(mdev
->this_bdev
) != size
||
633 drbd_bm_capacity(mdev
) != size
) {
635 err
= drbd_bm_resize(mdev
, size
, !(flags
& DDSF_NO_RESYNC
));
637 /* currently there is only one error: ENOMEM! */
638 size
= drbd_bm_capacity(mdev
)>>1;
640 dev_err(DEV
, "OUT OF MEMORY! "
641 "Could not allocate bitmap!\n");
643 dev_err(DEV
, "BM resizing failed. "
644 "Leaving size unchanged at size = %lu KB\n",
645 (unsigned long)size
);
649 /* racy, see comments above. */
650 drbd_set_my_capacity(mdev
, size
);
651 mdev
->ldev
->md
.la_size_sect
= size
;
652 dev_info(DEV
, "size = %s (%llu KB)\n", ppsize(ppb
, size
>>1),
653 (unsigned long long)size
>>1);
655 if (rv
== dev_size_error
)
658 la_size_changed
= (la_size
!= mdev
->ldev
->md
.la_size_sect
);
660 md_moved
= prev_first_sect
!= drbd_md_first_sector(mdev
->ldev
)
661 || prev_size
!= mdev
->ldev
->md
.md_size_sect
;
663 if (la_size_changed
|| md_moved
) {
666 drbd_al_shrink(mdev
); /* All extents inactive. */
667 dev_info(DEV
, "Writing the whole bitmap, %s\n",
668 la_size_changed
&& md_moved
? "size changed and md moved" :
669 la_size_changed
? "size changed" : "md moved");
670 /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
671 err
= drbd_bitmap_io(mdev
, &drbd_bm_write
,
672 "size changed", BM_LOCKED_MASK
);
677 drbd_md_mark_dirty(mdev
);
685 lc_unlock(mdev
->act_log
);
686 wake_up(&mdev
->al_wait
);
687 drbd_resume_io(mdev
);
693 drbd_new_dev_size(struct drbd_conf
*mdev
, struct drbd_backing_dev
*bdev
, int assume_peer_has_space
)
695 sector_t p_size
= mdev
->p_size
; /* partner's disk size. */
696 sector_t la_size
= bdev
->md
.la_size_sect
; /* last agreed size. */
697 sector_t m_size
; /* my size */
698 sector_t u_size
= bdev
->dc
.disk_size
; /* size requested by user. */
701 m_size
= drbd_get_max_capacity(bdev
);
703 if (mdev
->state
.conn
< C_CONNECTED
&& assume_peer_has_space
) {
704 dev_warn(DEV
, "Resize while not connected was forced by the user!\n");
708 if (p_size
&& m_size
) {
709 size
= min_t(sector_t
, p_size
, m_size
);
713 if (m_size
&& m_size
< size
)
715 if (p_size
&& p_size
< size
)
726 dev_err(DEV
, "Both nodes diskless!\n");
730 dev_err(DEV
, "Requested disk size is too big (%lu > %lu)\n",
731 (unsigned long)u_size
>>1, (unsigned long)size
>>1);
740 * drbd_check_al_size() - Ensures that the AL is of the right size
741 * @mdev: DRBD device.
743 * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation
744 * failed, and 0 on success. You should call drbd_md_sync() after you called
747 static int drbd_check_al_size(struct drbd_conf
*mdev
)
749 struct lru_cache
*n
, *t
;
750 struct lc_element
*e
;
754 ERR_IF(mdev
->sync_conf
.al_extents
< 7)
755 mdev
->sync_conf
.al_extents
= 127;
758 mdev
->act_log
->nr_elements
== mdev
->sync_conf
.al_extents
)
763 n
= lc_create("act_log", drbd_al_ext_cache
,
764 mdev
->sync_conf
.al_extents
, sizeof(struct lc_element
), 0);
767 dev_err(DEV
, "Cannot allocate act_log lru!\n");
770 spin_lock_irq(&mdev
->al_lock
);
772 for (i
= 0; i
< t
->nr_elements
; i
++) {
773 e
= lc_element_by_index(t
, i
);
775 dev_err(DEV
, "refcnt(%d)==%d\n",
776 e
->lc_number
, e
->refcnt
);
782 spin_unlock_irq(&mdev
->al_lock
);
784 dev_err(DEV
, "Activity log still in use!\n");
791 drbd_md_mark_dirty(mdev
); /* we changed mdev->act_log->nr_elemens */
795 static void drbd_setup_queue_param(struct drbd_conf
*mdev
, unsigned int max_bio_size
)
797 struct request_queue
* const q
= mdev
->rq_queue
;
798 int max_hw_sectors
= max_bio_size
>> 9;
799 int max_segments
= 0;
801 if (get_ldev_if_state(mdev
, D_ATTACHING
)) {
802 struct request_queue
* const b
= mdev
->ldev
->backing_bdev
->bd_disk
->queue
;
804 max_hw_sectors
= min(queue_max_hw_sectors(b
), max_bio_size
>> 9);
805 max_segments
= mdev
->ldev
->dc
.max_bio_bvecs
;
809 blk_queue_logical_block_size(q
, 512);
810 blk_queue_max_hw_sectors(q
, max_hw_sectors
);
811 /* This is the workaround for "bio would need to, but cannot, be split" */
812 blk_queue_max_segments(q
, max_segments
? max_segments
: BLK_MAX_SEGMENTS
);
813 blk_queue_segment_boundary(q
, PAGE_CACHE_SIZE
-1);
815 if (get_ldev_if_state(mdev
, D_ATTACHING
)) {
816 struct request_queue
* const b
= mdev
->ldev
->backing_bdev
->bd_disk
->queue
;
818 blk_queue_stack_limits(q
, b
);
820 if (q
->backing_dev_info
.ra_pages
!= b
->backing_dev_info
.ra_pages
) {
821 dev_info(DEV
, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
822 q
->backing_dev_info
.ra_pages
,
823 b
->backing_dev_info
.ra_pages
);
824 q
->backing_dev_info
.ra_pages
= b
->backing_dev_info
.ra_pages
;
830 void drbd_reconsider_max_bio_size(struct drbd_conf
*mdev
)
832 int now
, new, local
, peer
;
834 now
= queue_max_hw_sectors(mdev
->rq_queue
) << 9;
835 local
= mdev
->local_max_bio_size
; /* Eventually last known value, from volatile memory */
836 peer
= mdev
->peer_max_bio_size
; /* Eventually last known value, from meta data */
838 if (get_ldev_if_state(mdev
, D_ATTACHING
)) {
839 local
= queue_max_hw_sectors(mdev
->ldev
->backing_bdev
->bd_disk
->queue
) << 9;
840 mdev
->local_max_bio_size
= local
;
844 /* We may ignore peer limits if the peer is modern enough.
845 Because new from 8.3.8 onwards the peer can use multiple
846 BIOs for a single peer_request */
847 if (mdev
->state
.conn
>= C_CONNECTED
) {
848 if (mdev
->agreed_pro_version
< 94) {
849 peer
= min_t(int, mdev
->peer_max_bio_size
, DRBD_MAX_SIZE_H80_PACKET
);
850 /* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */
851 } else if (mdev
->agreed_pro_version
== 94)
852 peer
= DRBD_MAX_SIZE_H80_PACKET
;
853 else /* drbd 8.3.8 onwards */
854 peer
= DRBD_MAX_BIO_SIZE
;
857 new = min_t(int, local
, peer
);
859 if (mdev
->state
.role
== R_PRIMARY
&& new < now
)
860 dev_err(DEV
, "ASSERT FAILED new < now; (%d < %d)\n", new, now
);
863 dev_info(DEV
, "max BIO size = %u\n", new);
865 drbd_setup_queue_param(mdev
, new);
868 /* serialize deconfig (worker exiting, doing cleanup)
869 * and reconfig (drbdsetup disk, drbdsetup net)
871 * Wait for a potentially exiting worker, then restart it,
872 * or start a new one. Flush any pending work, there may still be an
873 * after_state_change queued.
875 static void drbd_reconfig_start(struct drbd_conf
*mdev
)
877 wait_event(mdev
->state_wait
, !test_and_set_bit(CONFIG_PENDING
, &mdev
->flags
));
878 wait_event(mdev
->state_wait
, !test_bit(DEVICE_DYING
, &mdev
->flags
));
879 drbd_thread_start(&mdev
->worker
);
880 drbd_flush_workqueue(mdev
);
883 /* if still unconfigured, stops worker again.
884 * if configured now, clears CONFIG_PENDING.
885 * wakes potential waiters */
886 static void drbd_reconfig_done(struct drbd_conf
*mdev
)
888 spin_lock_irq(&mdev
->req_lock
);
889 if (mdev
->state
.disk
== D_DISKLESS
&&
890 mdev
->state
.conn
== C_STANDALONE
&&
891 mdev
->state
.role
== R_SECONDARY
) {
892 set_bit(DEVICE_DYING
, &mdev
->flags
);
893 drbd_thread_stop_nowait(&mdev
->worker
);
895 clear_bit(CONFIG_PENDING
, &mdev
->flags
);
896 spin_unlock_irq(&mdev
->req_lock
);
897 wake_up(&mdev
->state_wait
);
900 /* Make sure IO is suspended before calling this function(). */
901 static void drbd_suspend_al(struct drbd_conf
*mdev
)
905 if (lc_try_lock(mdev
->act_log
)) {
906 drbd_al_shrink(mdev
);
907 lc_unlock(mdev
->act_log
);
909 dev_warn(DEV
, "Failed to lock al in drbd_suspend_al()\n");
913 spin_lock_irq(&mdev
->req_lock
);
914 if (mdev
->state
.conn
< C_CONNECTED
)
915 s
= !test_and_set_bit(AL_SUSPENDED
, &mdev
->flags
);
917 spin_unlock_irq(&mdev
->req_lock
);
920 dev_info(DEV
, "Suspended AL updates\n");
923 /* does always return 0;
924 * interesting return code is in reply->ret_code */
925 static int drbd_nl_disk_conf(struct drbd_conf
*mdev
, struct drbd_nl_cfg_req
*nlp
,
926 struct drbd_nl_cfg_reply
*reply
)
928 enum drbd_ret_code retcode
;
929 enum determine_dev_size dd
;
930 sector_t max_possible_sectors
;
931 sector_t min_md_device_sectors
;
932 struct drbd_backing_dev
*nbc
= NULL
; /* new_backing_conf */
933 struct block_device
*bdev
;
934 struct lru_cache
*resync_lru
= NULL
;
935 union drbd_state ns
, os
;
936 enum drbd_state_rv rv
;
937 int cp_discovered
= 0;
938 int logical_block_size
;
940 drbd_reconfig_start(mdev
);
942 /* if you want to reconfigure, please tear down first */
943 if (mdev
->state
.disk
> D_DISKLESS
) {
944 retcode
= ERR_DISK_CONFIGURED
;
947 /* It may just now have detached because of IO error. Make sure
948 * drbd_ldev_destroy is done already, we may end up here very fast,
949 * e.g. if someone calls attach from the on-io-error handler,
950 * to realize a "hot spare" feature (not that I'd recommend that) */
951 wait_event(mdev
->misc_wait
, !atomic_read(&mdev
->local_cnt
));
953 /* allocation not in the IO path, cqueue thread context */
954 nbc
= kzalloc(sizeof(struct drbd_backing_dev
), GFP_KERNEL
);
960 nbc
->dc
.disk_size
= DRBD_DISK_SIZE_SECT_DEF
;
961 nbc
->dc
.on_io_error
= DRBD_ON_IO_ERROR_DEF
;
962 nbc
->dc
.fencing
= DRBD_FENCING_DEF
;
963 nbc
->dc
.max_bio_bvecs
= DRBD_MAX_BIO_BVECS_DEF
;
965 if (!disk_conf_from_tags(mdev
, nlp
->tag_list
, &nbc
->dc
)) {
966 retcode
= ERR_MANDATORY_TAG
;
970 if (nbc
->dc
.meta_dev_idx
< DRBD_MD_INDEX_FLEX_INT
) {
971 retcode
= ERR_MD_IDX_INVALID
;
975 if (get_net_conf(mdev
)) {
976 int prot
= mdev
->net_conf
->wire_protocol
;
978 if (nbc
->dc
.fencing
== FP_STONITH
&& prot
== DRBD_PROT_A
) {
979 retcode
= ERR_STONITH_AND_PROT_A
;
984 bdev
= blkdev_get_by_path(nbc
->dc
.backing_dev
,
985 FMODE_READ
| FMODE_WRITE
| FMODE_EXCL
, mdev
);
987 dev_err(DEV
, "open(\"%s\") failed with %ld\n", nbc
->dc
.backing_dev
,
989 retcode
= ERR_OPEN_DISK
;
992 nbc
->backing_bdev
= bdev
;
995 * meta_dev_idx >= 0: external fixed size, possibly multiple
996 * drbd sharing one meta device. TODO in that case, paranoia
997 * check that [md_bdev, meta_dev_idx] is not yet used by some
998 * other drbd minor! (if you use drbd.conf + drbdadm, that
999 * should check it for you already; but if you don't, or
1000 * someone fooled it, we need to double check here)
1002 bdev
= blkdev_get_by_path(nbc
->dc
.meta_dev
,
1003 FMODE_READ
| FMODE_WRITE
| FMODE_EXCL
,
1004 (nbc
->dc
.meta_dev_idx
< 0) ?
1005 (void *)mdev
: (void *)drbd_m_holder
);
1007 dev_err(DEV
, "open(\"%s\") failed with %ld\n", nbc
->dc
.meta_dev
,
1009 retcode
= ERR_OPEN_MD_DISK
;
1012 nbc
->md_bdev
= bdev
;
1014 if ((nbc
->backing_bdev
== nbc
->md_bdev
) !=
1015 (nbc
->dc
.meta_dev_idx
== DRBD_MD_INDEX_INTERNAL
||
1016 nbc
->dc
.meta_dev_idx
== DRBD_MD_INDEX_FLEX_INT
)) {
1017 retcode
= ERR_MD_IDX_INVALID
;
1021 resync_lru
= lc_create("resync", drbd_bm_ext_cache
,
1022 61, sizeof(struct bm_extent
),
1023 offsetof(struct bm_extent
, lce
));
1025 retcode
= ERR_NOMEM
;
1029 /* RT - for drbd_get_max_capacity() DRBD_MD_INDEX_FLEX_INT */
1030 drbd_md_set_sector_offsets(mdev
, nbc
);
1032 if (drbd_get_max_capacity(nbc
) < nbc
->dc
.disk_size
) {
1033 dev_err(DEV
, "max capacity %llu smaller than disk size %llu\n",
1034 (unsigned long long) drbd_get_max_capacity(nbc
),
1035 (unsigned long long) nbc
->dc
.disk_size
);
1036 retcode
= ERR_DISK_TOO_SMALL
;
1040 if (nbc
->dc
.meta_dev_idx
< 0) {
1041 max_possible_sectors
= DRBD_MAX_SECTORS_FLEX
;
1042 /* at least one MB, otherwise it does not make sense */
1043 min_md_device_sectors
= (2<<10);
1045 max_possible_sectors
= DRBD_MAX_SECTORS
;
1046 min_md_device_sectors
= MD_RESERVED_SECT
* (nbc
->dc
.meta_dev_idx
+ 1);
1049 if (drbd_get_capacity(nbc
->md_bdev
) < min_md_device_sectors
) {
1050 retcode
= ERR_MD_DISK_TOO_SMALL
;
1051 dev_warn(DEV
, "refusing attach: md-device too small, "
1052 "at least %llu sectors needed for this meta-disk type\n",
1053 (unsigned long long) min_md_device_sectors
);
1057 /* Make sure the new disk is big enough
1058 * (we may currently be R_PRIMARY with no local disk...) */
1059 if (drbd_get_max_capacity(nbc
) <
1060 drbd_get_capacity(mdev
->this_bdev
)) {
1061 retcode
= ERR_DISK_TOO_SMALL
;
1065 nbc
->known_size
= drbd_get_capacity(nbc
->backing_bdev
);
1067 if (nbc
->known_size
> max_possible_sectors
) {
1068 dev_warn(DEV
, "==> truncating very big lower level device "
1069 "to currently maximum possible %llu sectors <==\n",
1070 (unsigned long long) max_possible_sectors
);
1071 if (nbc
->dc
.meta_dev_idx
>= 0)
1072 dev_warn(DEV
, "==>> using internal or flexible "
1073 "meta data may help <<==\n");
1076 drbd_suspend_io(mdev
);
1077 /* also wait for the last barrier ack. */
1078 wait_event(mdev
->misc_wait
, !atomic_read(&mdev
->ap_pending_cnt
) || is_susp(mdev
->state
));
1079 /* and for any other previously queued work */
1080 drbd_flush_workqueue(mdev
);
1082 rv
= _drbd_request_state(mdev
, NS(disk
, D_ATTACHING
), CS_VERBOSE
);
1083 retcode
= rv
; /* FIXME: Type mismatch. */
1084 drbd_resume_io(mdev
);
1085 if (rv
< SS_SUCCESS
)
1088 if (!get_ldev_if_state(mdev
, D_ATTACHING
))
1089 goto force_diskless
;
1091 drbd_md_set_sector_offsets(mdev
, nbc
);
1093 /* allocate a second IO page if logical_block_size != 512 */
1094 logical_block_size
= bdev_logical_block_size(nbc
->md_bdev
);
1095 if (logical_block_size
== 0)
1096 logical_block_size
= MD_SECTOR_SIZE
;
1098 if (logical_block_size
!= MD_SECTOR_SIZE
) {
1099 if (!mdev
->md_io_tmpp
) {
1100 struct page
*page
= alloc_page(GFP_NOIO
);
1102 goto force_diskless_dec
;
1104 dev_warn(DEV
, "Meta data's bdev logical_block_size = %d != %d\n",
1105 logical_block_size
, MD_SECTOR_SIZE
);
1106 dev_warn(DEV
, "Workaround engaged (has performance impact).\n");
1108 mdev
->md_io_tmpp
= page
;
1112 if (!mdev
->bitmap
) {
1113 if (drbd_bm_init(mdev
)) {
1114 retcode
= ERR_NOMEM
;
1115 goto force_diskless_dec
;
1119 retcode
= drbd_md_read(mdev
, nbc
);
1120 if (retcode
!= NO_ERROR
)
1121 goto force_diskless_dec
;
1123 if (mdev
->state
.conn
< C_CONNECTED
&&
1124 mdev
->state
.role
== R_PRIMARY
&&
1125 (mdev
->ed_uuid
& ~((u64
)1)) != (nbc
->md
.uuid
[UI_CURRENT
] & ~((u64
)1))) {
1126 dev_err(DEV
, "Can only attach to data with current UUID=%016llX\n",
1127 (unsigned long long)mdev
->ed_uuid
);
1128 retcode
= ERR_DATA_NOT_CURRENT
;
1129 goto force_diskless_dec
;
1132 /* Since we are diskless, fix the activity log first... */
1133 if (drbd_check_al_size(mdev
)) {
1134 retcode
= ERR_NOMEM
;
1135 goto force_diskless_dec
;
1138 /* Prevent shrinking of consistent devices ! */
1139 if (drbd_md_test_flag(nbc
, MDF_CONSISTENT
) &&
1140 drbd_new_dev_size(mdev
, nbc
, 0) < nbc
->md
.la_size_sect
) {
1141 dev_warn(DEV
, "refusing to truncate a consistent device\n");
1142 retcode
= ERR_DISK_TOO_SMALL
;
1143 goto force_diskless_dec
;
1146 if (!drbd_al_read_log(mdev
, nbc
)) {
1147 retcode
= ERR_IO_MD_DISK
;
1148 goto force_diskless_dec
;
1151 /* Reset the "barriers don't work" bits here, then force meta data to
1152 * be written, to ensure we determine if barriers are supported. */
1153 if (nbc
->dc
.no_md_flush
)
1154 set_bit(MD_NO_FUA
, &mdev
->flags
);
1156 clear_bit(MD_NO_FUA
, &mdev
->flags
);
1158 /* Point of no return reached.
1159 * Devices and memory are no longer released by error cleanup below.
1160 * now mdev takes over responsibility, and the state engine should
1161 * clean it up somewhere. */
1162 D_ASSERT(mdev
->ldev
== NULL
);
1164 mdev
->resync
= resync_lru
;
1168 mdev
->write_ordering
= WO_bdev_flush
;
1169 drbd_bump_write_ordering(mdev
, WO_bdev_flush
);
1171 if (drbd_md_test_flag(mdev
->ldev
, MDF_CRASHED_PRIMARY
))
1172 set_bit(CRASHED_PRIMARY
, &mdev
->flags
);
1174 clear_bit(CRASHED_PRIMARY
, &mdev
->flags
);
1176 if (drbd_md_test_flag(mdev
->ldev
, MDF_PRIMARY_IND
) &&
1177 !(mdev
->state
.role
== R_PRIMARY
&& mdev
->state
.susp_nod
)) {
1178 set_bit(CRASHED_PRIMARY
, &mdev
->flags
);
1187 drbd_reconsider_max_bio_size(mdev
);
1189 /* If I am currently not R_PRIMARY,
1190 * but meta data primary indicator is set,
1191 * I just now recover from a hard crash,
1192 * and have been R_PRIMARY before that crash.
1194 * Now, if I had no connection before that crash
1195 * (have been degraded R_PRIMARY), chances are that
1196 * I won't find my peer now either.
1198 * In that case, and _only_ in that case,
1199 * we use the degr-wfc-timeout instead of the default,
1200 * so we can automatically recover from a crash of a
1201 * degraded but active "cluster" after a certain timeout.
1203 clear_bit(USE_DEGR_WFC_T
, &mdev
->flags
);
1204 if (mdev
->state
.role
!= R_PRIMARY
&&
1205 drbd_md_test_flag(mdev
->ldev
, MDF_PRIMARY_IND
) &&
1206 !drbd_md_test_flag(mdev
->ldev
, MDF_CONNECTED_IND
))
1207 set_bit(USE_DEGR_WFC_T
, &mdev
->flags
);
1209 dd
= drbd_determine_dev_size(mdev
, 0);
1210 if (dd
== dev_size_error
) {
1211 retcode
= ERR_NOMEM_BITMAP
;
1212 goto force_diskless_dec
;
1213 } else if (dd
== grew
)
1214 set_bit(RESYNC_AFTER_NEG
, &mdev
->flags
);
1216 if (drbd_md_test_flag(mdev
->ldev
, MDF_FULL_SYNC
)) {
1217 dev_info(DEV
, "Assuming that all blocks are out of sync "
1218 "(aka FullSync)\n");
1219 if (drbd_bitmap_io(mdev
, &drbd_bmio_set_n_write
,
1220 "set_n_write from attaching", BM_LOCKED_MASK
)) {
1221 retcode
= ERR_IO_MD_DISK
;
1222 goto force_diskless_dec
;
1225 if (drbd_bitmap_io(mdev
, &drbd_bm_read
,
1226 "read from attaching", BM_LOCKED_MASK
) < 0) {
1227 retcode
= ERR_IO_MD_DISK
;
1228 goto force_diskless_dec
;
1232 if (cp_discovered
) {
1233 drbd_al_apply_to_bm(mdev
);
1234 if (drbd_bitmap_io(mdev
, &drbd_bm_write
,
1235 "crashed primary apply AL", BM_LOCKED_MASK
)) {
1236 retcode
= ERR_IO_MD_DISK
;
1237 goto force_diskless_dec
;
1241 if (_drbd_bm_total_weight(mdev
) == drbd_bm_bits(mdev
))
1242 drbd_suspend_al(mdev
); /* IO is still suspended here... */
1244 spin_lock_irq(&mdev
->req_lock
);
1247 /* If MDF_CONSISTENT is not set go into inconsistent state,
1248 otherwise investigate MDF_WasUpToDate...
1249 If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
1250 otherwise into D_CONSISTENT state.
1252 if (drbd_md_test_flag(mdev
->ldev
, MDF_CONSISTENT
)) {
1253 if (drbd_md_test_flag(mdev
->ldev
, MDF_WAS_UP_TO_DATE
))
1254 ns
.disk
= D_CONSISTENT
;
1256 ns
.disk
= D_OUTDATED
;
1258 ns
.disk
= D_INCONSISTENT
;
1261 if (drbd_md_test_flag(mdev
->ldev
, MDF_PEER_OUT_DATED
))
1262 ns
.pdsk
= D_OUTDATED
;
1264 if ( ns
.disk
== D_CONSISTENT
&&
1265 (ns
.pdsk
== D_OUTDATED
|| mdev
->ldev
->dc
.fencing
== FP_DONT_CARE
))
1266 ns
.disk
= D_UP_TO_DATE
;
1268 /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
1269 MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before
1270 this point, because drbd_request_state() modifies these
1273 /* In case we are C_CONNECTED postpone any decision on the new disk
1274 state after the negotiation phase. */
1275 if (mdev
->state
.conn
== C_CONNECTED
) {
1276 mdev
->new_state_tmp
.i
= ns
.i
;
1278 ns
.disk
= D_NEGOTIATING
;
1280 /* We expect to receive up-to-date UUIDs soon.
1281 To avoid a race in receive_state, free p_uuid while
1282 holding req_lock. I.e. atomic with the state change */
1283 kfree(mdev
->p_uuid
);
1284 mdev
->p_uuid
= NULL
;
1287 rv
= _drbd_set_state(mdev
, ns
, CS_VERBOSE
, NULL
);
1289 spin_unlock_irq(&mdev
->req_lock
);
1291 if (rv
< SS_SUCCESS
)
1292 goto force_diskless_dec
;
1294 if (mdev
->state
.role
== R_PRIMARY
)
1295 mdev
->ldev
->md
.uuid
[UI_CURRENT
] |= (u64
)1;
1297 mdev
->ldev
->md
.uuid
[UI_CURRENT
] &= ~(u64
)1;
1299 drbd_md_mark_dirty(mdev
);
1302 kobject_uevent(&disk_to_dev(mdev
->vdisk
)->kobj
, KOBJ_CHANGE
);
1304 reply
->ret_code
= retcode
;
1305 drbd_reconfig_done(mdev
);
1311 drbd_force_state(mdev
, NS(disk
, D_FAILED
));
1315 if (nbc
->backing_bdev
)
1316 blkdev_put(nbc
->backing_bdev
,
1317 FMODE_READ
| FMODE_WRITE
| FMODE_EXCL
);
1319 blkdev_put(nbc
->md_bdev
,
1320 FMODE_READ
| FMODE_WRITE
| FMODE_EXCL
);
1323 lc_destroy(resync_lru
);
1325 reply
->ret_code
= retcode
;
1326 drbd_reconfig_done(mdev
);
1330 /* Detaching the disk is a process in multiple stages. First we need to lock
1331 * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
1332 * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
1333 * internal references as well.
1334 * Only then we have finally detached. */
1335 static int drbd_nl_detach(struct drbd_conf
*mdev
, struct drbd_nl_cfg_req
*nlp
,
1336 struct drbd_nl_cfg_reply
*reply
)
1338 enum drbd_ret_code retcode
;
1340 struct detach dt
= {};
1342 if (!detach_from_tags(mdev
, nlp
->tag_list
, &dt
)) {
1343 reply
->ret_code
= ERR_MANDATORY_TAG
;
1347 if (dt
.detach_force
) {
1348 drbd_force_state(mdev
, NS(disk
, D_FAILED
));
1349 reply
->ret_code
= SS_SUCCESS
;
1353 drbd_suspend_io(mdev
); /* so no-one is stuck in drbd_al_begin_io */
1354 drbd_md_get_buffer(mdev
); /* make sure there is no in-flight meta-data IO */
1355 retcode
= drbd_request_state(mdev
, NS(disk
, D_FAILED
));
1356 drbd_md_put_buffer(mdev
);
1357 /* D_FAILED will transition to DISKLESS. */
1358 ret
= wait_event_interruptible(mdev
->misc_wait
,
1359 mdev
->state
.disk
!= D_FAILED
);
1360 drbd_resume_io(mdev
);
1362 if ((int)retcode
== (int)SS_IS_DISKLESS
)
1363 retcode
= SS_NOTHING_TO_DO
;
1366 reply
->ret_code
= retcode
;
1371 static int drbd_nl_net_conf(struct drbd_conf
*mdev
, struct drbd_nl_cfg_req
*nlp
,
1372 struct drbd_nl_cfg_reply
*reply
)
1375 enum drbd_ret_code retcode
;
1376 struct net_conf
*new_conf
= NULL
;
1377 struct crypto_hash
*tfm
= NULL
;
1378 struct crypto_hash
*integrity_w_tfm
= NULL
;
1379 struct crypto_hash
*integrity_r_tfm
= NULL
;
1380 struct hlist_head
*new_tl_hash
= NULL
;
1381 struct hlist_head
*new_ee_hash
= NULL
;
1382 struct drbd_conf
*odev
;
1383 char hmac_name
[CRYPTO_MAX_ALG_NAME
];
1384 void *int_dig_out
= NULL
;
1385 void *int_dig_in
= NULL
;
1386 void *int_dig_vv
= NULL
;
1387 struct sockaddr
*new_my_addr
, *new_peer_addr
, *taken_addr
;
1389 drbd_reconfig_start(mdev
);
1391 if (mdev
->state
.conn
> C_STANDALONE
) {
1392 retcode
= ERR_NET_CONFIGURED
;
1396 /* allocation not in the IO path, cqueue thread context */
1397 new_conf
= kzalloc(sizeof(struct net_conf
), GFP_KERNEL
);
1399 retcode
= ERR_NOMEM
;
1403 new_conf
->timeout
= DRBD_TIMEOUT_DEF
;
1404 new_conf
->try_connect_int
= DRBD_CONNECT_INT_DEF
;
1405 new_conf
->ping_int
= DRBD_PING_INT_DEF
;
1406 new_conf
->max_epoch_size
= DRBD_MAX_EPOCH_SIZE_DEF
;
1407 new_conf
->max_buffers
= DRBD_MAX_BUFFERS_DEF
;
1408 new_conf
->unplug_watermark
= DRBD_UNPLUG_WATERMARK_DEF
;
1409 new_conf
->sndbuf_size
= DRBD_SNDBUF_SIZE_DEF
;
1410 new_conf
->rcvbuf_size
= DRBD_RCVBUF_SIZE_DEF
;
1411 new_conf
->ko_count
= DRBD_KO_COUNT_DEF
;
1412 new_conf
->after_sb_0p
= DRBD_AFTER_SB_0P_DEF
;
1413 new_conf
->after_sb_1p
= DRBD_AFTER_SB_1P_DEF
;
1414 new_conf
->after_sb_2p
= DRBD_AFTER_SB_2P_DEF
;
1415 new_conf
->want_lose
= 0;
1416 new_conf
->two_primaries
= 0;
1417 new_conf
->wire_protocol
= DRBD_PROT_C
;
1418 new_conf
->ping_timeo
= DRBD_PING_TIMEO_DEF
;
1419 new_conf
->rr_conflict
= DRBD_RR_CONFLICT_DEF
;
1420 new_conf
->on_congestion
= DRBD_ON_CONGESTION_DEF
;
1421 new_conf
->cong_extents
= DRBD_CONG_EXTENTS_DEF
;
1423 if (!net_conf_from_tags(mdev
, nlp
->tag_list
, new_conf
)) {
1424 retcode
= ERR_MANDATORY_TAG
;
1428 if (new_conf
->two_primaries
1429 && (new_conf
->wire_protocol
!= DRBD_PROT_C
)) {
1430 retcode
= ERR_NOT_PROTO_C
;
1434 if (get_ldev(mdev
)) {
1435 enum drbd_fencing_p fp
= mdev
->ldev
->dc
.fencing
;
1437 if (new_conf
->wire_protocol
== DRBD_PROT_A
&& fp
== FP_STONITH
) {
1438 retcode
= ERR_STONITH_AND_PROT_A
;
1443 if (new_conf
->on_congestion
!= OC_BLOCK
&& new_conf
->wire_protocol
!= DRBD_PROT_A
) {
1444 retcode
= ERR_CONG_NOT_PROTO_A
;
1448 if (mdev
->state
.role
== R_PRIMARY
&& new_conf
->want_lose
) {
1449 retcode
= ERR_DISCARD
;
1455 new_my_addr
= (struct sockaddr
*)&new_conf
->my_addr
;
1456 new_peer_addr
= (struct sockaddr
*)&new_conf
->peer_addr
;
1457 for (i
= 0; i
< minor_count
; i
++) {
1458 odev
= minor_to_mdev(i
);
1459 if (!odev
|| odev
== mdev
)
1461 if (get_net_conf(odev
)) {
1462 taken_addr
= (struct sockaddr
*)&odev
->net_conf
->my_addr
;
1463 if (new_conf
->my_addr_len
== odev
->net_conf
->my_addr_len
&&
1464 !memcmp(new_my_addr
, taken_addr
, new_conf
->my_addr_len
))
1465 retcode
= ERR_LOCAL_ADDR
;
1467 taken_addr
= (struct sockaddr
*)&odev
->net_conf
->peer_addr
;
1468 if (new_conf
->peer_addr_len
== odev
->net_conf
->peer_addr_len
&&
1469 !memcmp(new_peer_addr
, taken_addr
, new_conf
->peer_addr_len
))
1470 retcode
= ERR_PEER_ADDR
;
1473 if (retcode
!= NO_ERROR
)
1478 if (new_conf
->cram_hmac_alg
[0] != 0) {
1479 snprintf(hmac_name
, CRYPTO_MAX_ALG_NAME
, "hmac(%s)",
1480 new_conf
->cram_hmac_alg
);
1481 tfm
= crypto_alloc_hash(hmac_name
, 0, CRYPTO_ALG_ASYNC
);
1484 retcode
= ERR_AUTH_ALG
;
1488 if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm
))) {
1489 retcode
= ERR_AUTH_ALG_ND
;
1494 if (new_conf
->integrity_alg
[0]) {
1495 integrity_w_tfm
= crypto_alloc_hash(new_conf
->integrity_alg
, 0, CRYPTO_ALG_ASYNC
);
1496 if (IS_ERR(integrity_w_tfm
)) {
1497 integrity_w_tfm
= NULL
;
1498 retcode
=ERR_INTEGRITY_ALG
;
1502 if (!drbd_crypto_is_hash(crypto_hash_tfm(integrity_w_tfm
))) {
1503 retcode
=ERR_INTEGRITY_ALG_ND
;
1507 integrity_r_tfm
= crypto_alloc_hash(new_conf
->integrity_alg
, 0, CRYPTO_ALG_ASYNC
);
1508 if (IS_ERR(integrity_r_tfm
)) {
1509 integrity_r_tfm
= NULL
;
1510 retcode
=ERR_INTEGRITY_ALG
;
1515 ns
= new_conf
->max_epoch_size
/8;
1516 if (mdev
->tl_hash_s
!= ns
) {
1517 new_tl_hash
= kzalloc(ns
*sizeof(void *), GFP_KERNEL
);
1519 retcode
= ERR_NOMEM
;
1524 ns
= new_conf
->max_buffers
/8;
1525 if (new_conf
->two_primaries
&& (mdev
->ee_hash_s
!= ns
)) {
1526 new_ee_hash
= kzalloc(ns
*sizeof(void *), GFP_KERNEL
);
1528 retcode
= ERR_NOMEM
;
1533 ((char *)new_conf
->shared_secret
)[SHARED_SECRET_MAX
-1] = 0;
1535 if (integrity_w_tfm
) {
1536 i
= crypto_hash_digestsize(integrity_w_tfm
);
1537 int_dig_out
= kmalloc(i
, GFP_KERNEL
);
1539 retcode
= ERR_NOMEM
;
1542 int_dig_in
= kmalloc(i
, GFP_KERNEL
);
1544 retcode
= ERR_NOMEM
;
1547 int_dig_vv
= kmalloc(i
, GFP_KERNEL
);
1549 retcode
= ERR_NOMEM
;
1554 if (!mdev
->bitmap
) {
1555 if(drbd_bm_init(mdev
)) {
1556 retcode
= ERR_NOMEM
;
1561 drbd_flush_workqueue(mdev
);
1562 spin_lock_irq(&mdev
->req_lock
);
1563 if (mdev
->net_conf
!= NULL
) {
1564 retcode
= ERR_NET_CONFIGURED
;
1565 spin_unlock_irq(&mdev
->req_lock
);
1568 mdev
->net_conf
= new_conf
;
1574 kfree(mdev
->tl_hash
);
1575 mdev
->tl_hash_s
= mdev
->net_conf
->max_epoch_size
/8;
1576 mdev
->tl_hash
= new_tl_hash
;
1580 kfree(mdev
->ee_hash
);
1581 mdev
->ee_hash_s
= mdev
->net_conf
->max_buffers
/8;
1582 mdev
->ee_hash
= new_ee_hash
;
1585 crypto_free_hash(mdev
->cram_hmac_tfm
);
1586 mdev
->cram_hmac_tfm
= tfm
;
1588 crypto_free_hash(mdev
->integrity_w_tfm
);
1589 mdev
->integrity_w_tfm
= integrity_w_tfm
;
1591 crypto_free_hash(mdev
->integrity_r_tfm
);
1592 mdev
->integrity_r_tfm
= integrity_r_tfm
;
1594 kfree(mdev
->int_dig_out
);
1595 kfree(mdev
->int_dig_in
);
1596 kfree(mdev
->int_dig_vv
);
1597 mdev
->int_dig_out
=int_dig_out
;
1598 mdev
->int_dig_in
=int_dig_in
;
1599 mdev
->int_dig_vv
=int_dig_vv
;
1600 retcode
= _drbd_set_state(_NS(mdev
, conn
, C_UNCONNECTED
), CS_VERBOSE
, NULL
);
1601 spin_unlock_irq(&mdev
->req_lock
);
1603 kobject_uevent(&disk_to_dev(mdev
->vdisk
)->kobj
, KOBJ_CHANGE
);
1604 reply
->ret_code
= retcode
;
1605 drbd_reconfig_done(mdev
);
1612 crypto_free_hash(tfm
);
1613 crypto_free_hash(integrity_w_tfm
);
1614 crypto_free_hash(integrity_r_tfm
);
1619 reply
->ret_code
= retcode
;
1620 drbd_reconfig_done(mdev
);
1624 static int drbd_nl_disconnect(struct drbd_conf
*mdev
, struct drbd_nl_cfg_req
*nlp
,
1625 struct drbd_nl_cfg_reply
*reply
)
1628 struct disconnect dc
;
1630 memset(&dc
, 0, sizeof(struct disconnect
));
1631 if (!disconnect_from_tags(mdev
, nlp
->tag_list
, &dc
)) {
1632 retcode
= ERR_MANDATORY_TAG
;
1637 spin_lock_irq(&mdev
->req_lock
);
1638 if (mdev
->state
.conn
>= C_WF_CONNECTION
)
1639 _drbd_set_state(_NS(mdev
, conn
, C_DISCONNECTING
), CS_HARD
, NULL
);
1640 spin_unlock_irq(&mdev
->req_lock
);
1644 retcode
= _drbd_request_state(mdev
, NS(conn
, C_DISCONNECTING
), CS_ORDERED
);
1646 if (retcode
== SS_NOTHING_TO_DO
)
1648 else if (retcode
== SS_ALREADY_STANDALONE
)
1650 else if (retcode
== SS_PRIMARY_NOP
) {
1651 /* Our statche checking code wants to see the peer outdated. */
1652 retcode
= drbd_request_state(mdev
, NS2(conn
, C_DISCONNECTING
,
1654 } else if (retcode
== SS_CW_FAILED_BY_PEER
) {
1655 /* The peer probably wants to see us outdated. */
1656 retcode
= _drbd_request_state(mdev
, NS2(conn
, C_DISCONNECTING
,
1659 if (retcode
== SS_IS_DISKLESS
|| retcode
== SS_LOWER_THAN_OUTDATED
) {
1660 drbd_force_state(mdev
, NS(conn
, C_DISCONNECTING
));
1661 retcode
= SS_SUCCESS
;
1665 if (retcode
< SS_SUCCESS
)
1668 if (wait_event_interruptible(mdev
->state_wait
,
1669 mdev
->state
.conn
!= C_DISCONNECTING
)) {
1670 /* Do not test for mdev->state.conn == C_STANDALONE, since
1671 someone else might connect us in the mean time! */
1680 reply
->ret_code
= retcode
;
1684 void resync_after_online_grow(struct drbd_conf
*mdev
)
1686 int iass
; /* I am sync source */
1688 dev_info(DEV
, "Resync of new storage after online grow\n");
1689 if (mdev
->state
.role
!= mdev
->state
.peer
)
1690 iass
= (mdev
->state
.role
== R_PRIMARY
);
1692 iass
= test_bit(DISCARD_CONCURRENT
, &mdev
->flags
);
1695 drbd_start_resync(mdev
, C_SYNC_SOURCE
);
1697 _drbd_request_state(mdev
, NS(conn
, C_WF_SYNC_UUID
), CS_VERBOSE
+ CS_SERIALIZE
);
1700 static int drbd_nl_resize(struct drbd_conf
*mdev
, struct drbd_nl_cfg_req
*nlp
,
1701 struct drbd_nl_cfg_reply
*reply
)
1704 int retcode
= NO_ERROR
;
1705 enum determine_dev_size dd
;
1706 enum dds_flags ddsf
;
1708 memset(&rs
, 0, sizeof(struct resize
));
1709 if (!resize_from_tags(mdev
, nlp
->tag_list
, &rs
)) {
1710 retcode
= ERR_MANDATORY_TAG
;
1714 if (mdev
->state
.conn
> C_CONNECTED
) {
1715 retcode
= ERR_RESIZE_RESYNC
;
1719 if (mdev
->state
.role
== R_SECONDARY
&&
1720 mdev
->state
.peer
== R_SECONDARY
) {
1721 retcode
= ERR_NO_PRIMARY
;
1725 if (!get_ldev(mdev
)) {
1726 retcode
= ERR_NO_DISK
;
1730 if (rs
.no_resync
&& mdev
->agreed_pro_version
< 93) {
1731 retcode
= ERR_NEED_APV_93
;
1735 if (mdev
->ldev
->known_size
!= drbd_get_capacity(mdev
->ldev
->backing_bdev
))
1736 mdev
->ldev
->known_size
= drbd_get_capacity(mdev
->ldev
->backing_bdev
);
1738 mdev
->ldev
->dc
.disk_size
= (sector_t
)rs
.resize_size
;
1739 ddsf
= (rs
.resize_force
? DDSF_FORCED
: 0) | (rs
.no_resync
? DDSF_NO_RESYNC
: 0);
1740 dd
= drbd_determine_dev_size(mdev
, ddsf
);
1743 if (dd
== dev_size_error
) {
1744 retcode
= ERR_NOMEM_BITMAP
;
1748 if (mdev
->state
.conn
== C_CONNECTED
) {
1750 set_bit(RESIZE_PENDING
, &mdev
->flags
);
1752 drbd_send_uuids(mdev
);
1753 drbd_send_sizes(mdev
, 1, ddsf
);
1757 reply
->ret_code
= retcode
;
1765 static int drbd_nl_syncer_conf(struct drbd_conf
*mdev
, struct drbd_nl_cfg_req
*nlp
,
1766 struct drbd_nl_cfg_reply
*reply
)
1768 int retcode
= NO_ERROR
;
1770 int ovr
; /* online verify running */
1771 int rsr
; /* re-sync running */
1772 struct crypto_hash
*verify_tfm
= NULL
;
1773 struct crypto_hash
*csums_tfm
= NULL
;
1774 struct syncer_conf sc
;
1775 cpumask_var_t new_cpu_mask
;
1776 int *rs_plan_s
= NULL
;
1779 if (!zalloc_cpumask_var(&new_cpu_mask
, GFP_KERNEL
)) {
1780 retcode
= ERR_NOMEM
;
1784 if (nlp
->flags
& DRBD_NL_SET_DEFAULTS
) {
1785 memset(&sc
, 0, sizeof(struct syncer_conf
));
1786 sc
.rate
= DRBD_RATE_DEF
;
1787 sc
.after
= DRBD_AFTER_DEF
;
1788 sc
.al_extents
= DRBD_AL_EXTENTS_DEF
;
1789 sc
.on_no_data
= DRBD_ON_NO_DATA_DEF
;
1790 sc
.c_plan_ahead
= DRBD_C_PLAN_AHEAD_DEF
;
1791 sc
.c_delay_target
= DRBD_C_DELAY_TARGET_DEF
;
1792 sc
.c_fill_target
= DRBD_C_FILL_TARGET_DEF
;
1793 sc
.c_max_rate
= DRBD_C_MAX_RATE_DEF
;
1794 sc
.c_min_rate
= DRBD_C_MIN_RATE_DEF
;
1796 memcpy(&sc
, &mdev
->sync_conf
, sizeof(struct syncer_conf
));
1798 if (!syncer_conf_from_tags(mdev
, nlp
->tag_list
, &sc
)) {
1799 retcode
= ERR_MANDATORY_TAG
;
1803 /* re-sync running */
1804 rsr
= ( mdev
->state
.conn
== C_SYNC_SOURCE
||
1805 mdev
->state
.conn
== C_SYNC_TARGET
||
1806 mdev
->state
.conn
== C_PAUSED_SYNC_S
||
1807 mdev
->state
.conn
== C_PAUSED_SYNC_T
);
1809 if (rsr
&& strcmp(sc
.csums_alg
, mdev
->sync_conf
.csums_alg
)) {
1810 retcode
= ERR_CSUMS_RESYNC_RUNNING
;
1814 if (!rsr
&& sc
.csums_alg
[0]) {
1815 csums_tfm
= crypto_alloc_hash(sc
.csums_alg
, 0, CRYPTO_ALG_ASYNC
);
1816 if (IS_ERR(csums_tfm
)) {
1818 retcode
= ERR_CSUMS_ALG
;
1822 if (!drbd_crypto_is_hash(crypto_hash_tfm(csums_tfm
))) {
1823 retcode
= ERR_CSUMS_ALG_ND
;
1828 /* online verify running */
1829 ovr
= (mdev
->state
.conn
== C_VERIFY_S
|| mdev
->state
.conn
== C_VERIFY_T
);
1832 if (strcmp(sc
.verify_alg
, mdev
->sync_conf
.verify_alg
)) {
1833 retcode
= ERR_VERIFY_RUNNING
;
1838 if (!ovr
&& sc
.verify_alg
[0]) {
1839 verify_tfm
= crypto_alloc_hash(sc
.verify_alg
, 0, CRYPTO_ALG_ASYNC
);
1840 if (IS_ERR(verify_tfm
)) {
1842 retcode
= ERR_VERIFY_ALG
;
1846 if (!drbd_crypto_is_hash(crypto_hash_tfm(verify_tfm
))) {
1847 retcode
= ERR_VERIFY_ALG_ND
;
1852 /* silently ignore cpu mask on UP kernel */
1853 if (nr_cpu_ids
> 1 && sc
.cpu_mask
[0] != 0) {
1854 err
= bitmap_parse(sc
.cpu_mask
, 32,
1855 cpumask_bits(new_cpu_mask
), nr_cpu_ids
);
1857 dev_warn(DEV
, "bitmap_parse() failed with %d\n", err
);
1858 retcode
= ERR_CPU_MASK_PARSE
;
1863 ERR_IF (sc
.rate
< 1) sc
.rate
= 1;
1864 ERR_IF (sc
.al_extents
< 7) sc
.al_extents
= 127; /* arbitrary minimum */
1865 #define AL_MAX ((MD_AL_MAX_SIZE-1) * AL_EXTENTS_PT)
1866 if (sc
.al_extents
> AL_MAX
) {
1867 dev_err(DEV
, "sc.al_extents > %d\n", AL_MAX
);
1868 sc
.al_extents
= AL_MAX
;
1872 /* to avoid spurious errors when configuring minors before configuring
1873 * the minors they depend on: if necessary, first create the minor we
1876 ensure_mdev(sc
.after
, 1);
1878 /* most sanity checks done, try to assign the new sync-after
1879 * dependency. need to hold the global lock in there,
1880 * to avoid a race in the dependency loop check. */
1881 retcode
= drbd_alter_sa(mdev
, sc
.after
);
1882 if (retcode
!= NO_ERROR
)
1885 fifo_size
= (sc
.c_plan_ahead
* 10 * SLEEP_TIME
) / HZ
;
1886 if (fifo_size
!= mdev
->rs_plan_s
.size
&& fifo_size
> 0) {
1887 rs_plan_s
= kzalloc(sizeof(int) * fifo_size
, GFP_KERNEL
);
1889 dev_err(DEV
, "kmalloc of fifo_buffer failed");
1890 retcode
= ERR_NOMEM
;
1895 /* ok, assign the rest of it as well.
1896 * lock against receive_SyncParam() */
1897 spin_lock(&mdev
->peer_seq_lock
);
1898 mdev
->sync_conf
= sc
;
1901 crypto_free_hash(mdev
->csums_tfm
);
1902 mdev
->csums_tfm
= csums_tfm
;
1907 crypto_free_hash(mdev
->verify_tfm
);
1908 mdev
->verify_tfm
= verify_tfm
;
1912 if (fifo_size
!= mdev
->rs_plan_s
.size
) {
1913 kfree(mdev
->rs_plan_s
.values
);
1914 mdev
->rs_plan_s
.values
= rs_plan_s
;
1915 mdev
->rs_plan_s
.size
= fifo_size
;
1916 mdev
->rs_planed
= 0;
1920 spin_unlock(&mdev
->peer_seq_lock
);
1922 if (get_ldev(mdev
)) {
1923 wait_event(mdev
->al_wait
, lc_try_lock(mdev
->act_log
));
1924 drbd_al_shrink(mdev
);
1925 err
= drbd_check_al_size(mdev
);
1926 lc_unlock(mdev
->act_log
);
1927 wake_up(&mdev
->al_wait
);
1933 retcode
= ERR_NOMEM
;
1938 if (mdev
->state
.conn
>= C_CONNECTED
)
1939 drbd_send_sync_param(mdev
, &sc
);
1941 if (!cpumask_equal(mdev
->cpu_mask
, new_cpu_mask
)) {
1942 cpumask_copy(mdev
->cpu_mask
, new_cpu_mask
);
1943 drbd_calc_cpu_mask(mdev
);
1944 mdev
->receiver
.reset_cpu_mask
= 1;
1945 mdev
->asender
.reset_cpu_mask
= 1;
1946 mdev
->worker
.reset_cpu_mask
= 1;
1949 kobject_uevent(&disk_to_dev(mdev
->vdisk
)->kobj
, KOBJ_CHANGE
);
1952 free_cpumask_var(new_cpu_mask
);
1953 crypto_free_hash(csums_tfm
);
1954 crypto_free_hash(verify_tfm
);
1955 reply
->ret_code
= retcode
;
1959 static int drbd_nl_invalidate(struct drbd_conf
*mdev
, struct drbd_nl_cfg_req
*nlp
,
1960 struct drbd_nl_cfg_reply
*reply
)
1964 /* If there is still bitmap IO pending, probably because of a previous
1965 * resync just being finished, wait for it before requesting a new resync. */
1966 drbd_suspend_io(mdev
);
1967 wait_event(mdev
->misc_wait
, !test_bit(BITMAP_IO
, &mdev
->flags
));
1969 retcode
= _drbd_request_state(mdev
, NS(conn
, C_STARTING_SYNC_T
), CS_ORDERED
);
1971 if (retcode
< SS_SUCCESS
&& retcode
!= SS_NEED_CONNECTION
)
1972 retcode
= drbd_request_state(mdev
, NS(conn
, C_STARTING_SYNC_T
));
1974 while (retcode
== SS_NEED_CONNECTION
) {
1975 spin_lock_irq(&mdev
->req_lock
);
1976 if (mdev
->state
.conn
< C_CONNECTED
)
1977 retcode
= _drbd_set_state(_NS(mdev
, disk
, D_INCONSISTENT
), CS_VERBOSE
, NULL
);
1978 spin_unlock_irq(&mdev
->req_lock
);
1980 if (retcode
!= SS_NEED_CONNECTION
)
1983 retcode
= drbd_request_state(mdev
, NS(conn
, C_STARTING_SYNC_T
));
1985 drbd_resume_io(mdev
);
1987 reply
->ret_code
= retcode
;
1991 static int drbd_bmio_set_susp_al(struct drbd_conf
*mdev
)
1995 rv
= drbd_bmio_set_n_write(mdev
);
1996 drbd_suspend_al(mdev
);
2000 static int drbd_nl_invalidate_peer(struct drbd_conf
*mdev
, struct drbd_nl_cfg_req
*nlp
,
2001 struct drbd_nl_cfg_reply
*reply
)
2005 /* If there is still bitmap IO pending, probably because of a previous
2006 * resync just being finished, wait for it before requesting a new resync. */
2007 drbd_suspend_io(mdev
);
2008 wait_event(mdev
->misc_wait
, !test_bit(BITMAP_IO
, &mdev
->flags
));
2010 retcode
= _drbd_request_state(mdev
, NS(conn
, C_STARTING_SYNC_S
), CS_ORDERED
);
2012 if (retcode
< SS_SUCCESS
) {
2013 if (retcode
== SS_NEED_CONNECTION
&& mdev
->state
.role
== R_PRIMARY
) {
2014 /* The peer will get a resync upon connect anyways. Just make that
2015 into a full resync. */
2016 retcode
= drbd_request_state(mdev
, NS(pdsk
, D_INCONSISTENT
));
2017 if (retcode
>= SS_SUCCESS
) {
2018 if (drbd_bitmap_io(mdev
, &drbd_bmio_set_susp_al
,
2019 "set_n_write from invalidate_peer",
2020 BM_LOCKED_SET_ALLOWED
))
2021 retcode
= ERR_IO_MD_DISK
;
2024 retcode
= drbd_request_state(mdev
, NS(conn
, C_STARTING_SYNC_S
));
2026 drbd_resume_io(mdev
);
2028 reply
->ret_code
= retcode
;
2032 static int drbd_nl_pause_sync(struct drbd_conf
*mdev
, struct drbd_nl_cfg_req
*nlp
,
2033 struct drbd_nl_cfg_reply
*reply
)
2035 int retcode
= NO_ERROR
;
2037 if (drbd_request_state(mdev
, NS(user_isp
, 1)) == SS_NOTHING_TO_DO
)
2038 retcode
= ERR_PAUSE_IS_SET
;
2040 reply
->ret_code
= retcode
;
2044 static int drbd_nl_resume_sync(struct drbd_conf
*mdev
, struct drbd_nl_cfg_req
*nlp
,
2045 struct drbd_nl_cfg_reply
*reply
)
2047 int retcode
= NO_ERROR
;
2050 if (drbd_request_state(mdev
, NS(user_isp
, 0)) == SS_NOTHING_TO_DO
) {
2052 if (s
.conn
== C_PAUSED_SYNC_S
|| s
.conn
== C_PAUSED_SYNC_T
) {
2053 retcode
= s
.aftr_isp
? ERR_PIC_AFTER_DEP
:
2054 s
.peer_isp
? ERR_PIC_PEER_DEP
: ERR_PAUSE_IS_CLEAR
;
2056 retcode
= ERR_PAUSE_IS_CLEAR
;
2060 reply
->ret_code
= retcode
;
2064 static int drbd_nl_suspend_io(struct drbd_conf
*mdev
, struct drbd_nl_cfg_req
*nlp
,
2065 struct drbd_nl_cfg_reply
*reply
)
2067 reply
->ret_code
= drbd_request_state(mdev
, NS(susp
, 1));
2072 static int drbd_nl_resume_io(struct drbd_conf
*mdev
, struct drbd_nl_cfg_req
*nlp
,
2073 struct drbd_nl_cfg_reply
*reply
)
2075 if (test_bit(NEW_CUR_UUID
, &mdev
->flags
)) {
2076 drbd_uuid_new_current(mdev
);
2077 clear_bit(NEW_CUR_UUID
, &mdev
->flags
);
2079 drbd_suspend_io(mdev
);
2080 reply
->ret_code
= drbd_request_state(mdev
, NS3(susp
, 0, susp_nod
, 0, susp_fen
, 0));
2081 if (reply
->ret_code
== SS_SUCCESS
) {
2082 if (mdev
->state
.conn
< C_CONNECTED
)
2084 if (mdev
->state
.disk
== D_DISKLESS
|| mdev
->state
.disk
== D_FAILED
)
2085 tl_restart(mdev
, fail_frozen_disk_io
);
2087 drbd_resume_io(mdev
);
2092 static int drbd_nl_outdate(struct drbd_conf
*mdev
, struct drbd_nl_cfg_req
*nlp
,
2093 struct drbd_nl_cfg_reply
*reply
)
2095 reply
->ret_code
= drbd_request_state(mdev
, NS(disk
, D_OUTDATED
));
2099 static int drbd_nl_get_config(struct drbd_conf
*mdev
, struct drbd_nl_cfg_req
*nlp
,
2100 struct drbd_nl_cfg_reply
*reply
)
2104 tl
= reply
->tag_list
;
2106 if (get_ldev(mdev
)) {
2107 tl
= disk_conf_to_tags(mdev
, &mdev
->ldev
->dc
, tl
);
2111 if (get_net_conf(mdev
)) {
2112 tl
= net_conf_to_tags(mdev
, mdev
->net_conf
, tl
);
2115 tl
= syncer_conf_to_tags(mdev
, &mdev
->sync_conf
, tl
);
2117 put_unaligned(TT_END
, tl
++); /* Close the tag list */
2119 return (int)((char *)tl
- (char *)reply
->tag_list
);
2122 static int drbd_nl_get_state(struct drbd_conf
*mdev
, struct drbd_nl_cfg_req
*nlp
,
2123 struct drbd_nl_cfg_reply
*reply
)
2125 unsigned short *tl
= reply
->tag_list
;
2126 union drbd_state s
= mdev
->state
;
2127 unsigned long rs_left
;
2130 tl
= get_state_to_tags(mdev
, (struct get_state
*)&s
, tl
);
2132 /* no local ref, no bitmap, no syncer progress. */
2133 if (s
.conn
>= C_SYNC_SOURCE
&& s
.conn
<= C_PAUSED_SYNC_T
) {
2134 if (get_ldev(mdev
)) {
2135 drbd_get_syncer_progress(mdev
, &rs_left
, &res
);
2136 tl
= tl_add_int(tl
, T_sync_progress
, &res
);
2140 put_unaligned(TT_END
, tl
++); /* Close the tag list */
2142 return (int)((char *)tl
- (char *)reply
->tag_list
);
2145 static int drbd_nl_get_uuids(struct drbd_conf
*mdev
, struct drbd_nl_cfg_req
*nlp
,
2146 struct drbd_nl_cfg_reply
*reply
)
2150 tl
= reply
->tag_list
;
2152 if (get_ldev(mdev
)) {
2153 tl
= tl_add_blob(tl
, T_uuids
, mdev
->ldev
->md
.uuid
, UI_SIZE
*sizeof(u64
));
2154 tl
= tl_add_int(tl
, T_uuids_flags
, &mdev
->ldev
->md
.flags
);
2157 put_unaligned(TT_END
, tl
++); /* Close the tag list */
2159 return (int)((char *)tl
- (char *)reply
->tag_list
);
2163 * drbd_nl_get_timeout_flag() - Used by drbdsetup to find out which timeout value to use
2164 * @mdev: DRBD device.
2165 * @nlp: Netlink/connector packet from drbdsetup
2166 * @reply: Reply packet for drbdsetup
2168 static int drbd_nl_get_timeout_flag(struct drbd_conf
*mdev
, struct drbd_nl_cfg_req
*nlp
,
2169 struct drbd_nl_cfg_reply
*reply
)
2174 tl
= reply
->tag_list
;
2176 rv
= mdev
->state
.pdsk
== D_OUTDATED
? UT_PEER_OUTDATED
:
2177 test_bit(USE_DEGR_WFC_T
, &mdev
->flags
) ? UT_DEGRADED
: UT_DEFAULT
;
2179 tl
= tl_add_blob(tl
, T_use_degraded
, &rv
, sizeof(rv
));
2180 put_unaligned(TT_END
, tl
++); /* Close the tag list */
2182 return (int)((char *)tl
- (char *)reply
->tag_list
);
2185 static int drbd_nl_start_ov(struct drbd_conf
*mdev
, struct drbd_nl_cfg_req
*nlp
,
2186 struct drbd_nl_cfg_reply
*reply
)
2188 /* default to resume from last known position, if possible */
2189 struct start_ov args
=
2190 { .start_sector
= mdev
->ov_start_sector
};
2192 if (!start_ov_from_tags(mdev
, nlp
->tag_list
, &args
)) {
2193 reply
->ret_code
= ERR_MANDATORY_TAG
;
2197 /* If there is still bitmap IO pending, e.g. previous resync or verify
2198 * just being finished, wait for it before requesting a new resync. */
2199 drbd_suspend_io(mdev
);
2200 wait_event(mdev
->misc_wait
, !test_bit(BITMAP_IO
, &mdev
->flags
));
2202 /* w_make_ov_request expects position to be aligned */
2203 mdev
->ov_start_sector
= args
.start_sector
& ~BM_SECT_PER_BIT
;
2204 reply
->ret_code
= drbd_request_state(mdev
,NS(conn
,C_VERIFY_S
));
2205 drbd_resume_io(mdev
);
2210 static int drbd_nl_new_c_uuid(struct drbd_conf
*mdev
, struct drbd_nl_cfg_req
*nlp
,
2211 struct drbd_nl_cfg_reply
*reply
)
2213 int retcode
= NO_ERROR
;
2214 int skip_initial_sync
= 0;
2217 struct new_c_uuid args
;
2219 memset(&args
, 0, sizeof(struct new_c_uuid
));
2220 if (!new_c_uuid_from_tags(mdev
, nlp
->tag_list
, &args
)) {
2221 reply
->ret_code
= ERR_MANDATORY_TAG
;
2225 mutex_lock(&mdev
->state_mutex
); /* Protects us against serialized state changes. */
2227 if (!get_ldev(mdev
)) {
2228 retcode
= ERR_NO_DISK
;
2232 /* this is "skip initial sync", assume to be clean */
2233 if (mdev
->state
.conn
== C_CONNECTED
&& mdev
->agreed_pro_version
>= 90 &&
2234 mdev
->ldev
->md
.uuid
[UI_CURRENT
] == UUID_JUST_CREATED
&& args
.clear_bm
) {
2235 dev_info(DEV
, "Preparing to skip initial sync\n");
2236 skip_initial_sync
= 1;
2237 } else if (mdev
->state
.conn
!= C_STANDALONE
) {
2238 retcode
= ERR_CONNECTED
;
2242 drbd_uuid_set(mdev
, UI_BITMAP
, 0); /* Rotate UI_BITMAP to History 1, etc... */
2243 drbd_uuid_new_current(mdev
); /* New current, previous to UI_BITMAP */
2245 if (args
.clear_bm
) {
2246 err
= drbd_bitmap_io(mdev
, &drbd_bmio_clear_n_write
,
2247 "clear_n_write from new_c_uuid", BM_LOCKED_MASK
);
2249 dev_err(DEV
, "Writing bitmap failed with %d\n",err
);
2250 retcode
= ERR_IO_MD_DISK
;
2252 if (skip_initial_sync
) {
2253 drbd_send_uuids_skip_initial_sync(mdev
);
2254 _drbd_uuid_set(mdev
, UI_BITMAP
, 0);
2255 drbd_print_uuids(mdev
, "cleared bitmap UUID");
2256 spin_lock_irq(&mdev
->req_lock
);
2257 _drbd_set_state(_NS2(mdev
, disk
, D_UP_TO_DATE
, pdsk
, D_UP_TO_DATE
),
2259 spin_unlock_irq(&mdev
->req_lock
);
2267 mutex_unlock(&mdev
->state_mutex
);
2269 reply
->ret_code
= retcode
;
2273 struct cn_handler_struct
{
2274 int (*function
)(struct drbd_conf
*,
2275 struct drbd_nl_cfg_req
*,
2276 struct drbd_nl_cfg_reply
*);
2277 int reply_body_size
;
2280 static struct cn_handler_struct cnd_table
[] = {
2281 [ P_primary
] = { &drbd_nl_primary
, 0 },
2282 [ P_secondary
] = { &drbd_nl_secondary
, 0 },
2283 [ P_disk_conf
] = { &drbd_nl_disk_conf
, 0 },
2284 [ P_detach
] = { &drbd_nl_detach
, 0 },
2285 [ P_net_conf
] = { &drbd_nl_net_conf
, 0 },
2286 [ P_disconnect
] = { &drbd_nl_disconnect
, 0 },
2287 [ P_resize
] = { &drbd_nl_resize
, 0 },
2288 [ P_syncer_conf
] = { &drbd_nl_syncer_conf
, 0 },
2289 [ P_invalidate
] = { &drbd_nl_invalidate
, 0 },
2290 [ P_invalidate_peer
] = { &drbd_nl_invalidate_peer
, 0 },
2291 [ P_pause_sync
] = { &drbd_nl_pause_sync
, 0 },
2292 [ P_resume_sync
] = { &drbd_nl_resume_sync
, 0 },
2293 [ P_suspend_io
] = { &drbd_nl_suspend_io
, 0 },
2294 [ P_resume_io
] = { &drbd_nl_resume_io
, 0 },
2295 [ P_outdate
] = { &drbd_nl_outdate
, 0 },
2296 [ P_get_config
] = { &drbd_nl_get_config
,
2297 sizeof(struct syncer_conf_tag_len_struct
) +
2298 sizeof(struct disk_conf_tag_len_struct
) +
2299 sizeof(struct net_conf_tag_len_struct
) },
2300 [ P_get_state
] = { &drbd_nl_get_state
,
2301 sizeof(struct get_state_tag_len_struct
) +
2302 sizeof(struct sync_progress_tag_len_struct
) },
2303 [ P_get_uuids
] = { &drbd_nl_get_uuids
,
2304 sizeof(struct get_uuids_tag_len_struct
) },
2305 [ P_get_timeout_flag
] = { &drbd_nl_get_timeout_flag
,
2306 sizeof(struct get_timeout_flag_tag_len_struct
)},
2307 [ P_start_ov
] = { &drbd_nl_start_ov
, 0 },
2308 [ P_new_c_uuid
] = { &drbd_nl_new_c_uuid
, 0 },
2311 static void drbd_connector_callback(struct cn_msg
*req
, struct netlink_skb_parms
*nsp
)
2313 struct drbd_nl_cfg_req
*nlp
= (struct drbd_nl_cfg_req
*)req
->data
;
2314 struct cn_handler_struct
*cm
;
2315 struct cn_msg
*cn_reply
;
2316 struct drbd_nl_cfg_reply
*reply
;
2317 struct drbd_conf
*mdev
;
2319 int reply_size
= sizeof(struct cn_msg
)
2320 + sizeof(struct drbd_nl_cfg_reply
)
2321 + sizeof(short int);
2323 if (!try_module_get(THIS_MODULE
)) {
2324 printk(KERN_ERR
"drbd: try_module_get() failed!\n");
2328 if (!capable(CAP_SYS_ADMIN
)) {
2333 mdev
= ensure_mdev(nlp
->drbd_minor
,
2334 (nlp
->flags
& DRBD_NL_CREATE_DEVICE
));
2336 retcode
= ERR_MINOR_INVALID
;
2340 if (nlp
->packet_type
>= P_nl_after_last_packet
||
2341 nlp
->packet_type
== P_return_code_only
) {
2342 retcode
= ERR_PACKET_NR
;
2346 cm
= cnd_table
+ nlp
->packet_type
;
2348 /* This may happen if packet number is 0: */
2349 if (cm
->function
== NULL
) {
2350 retcode
= ERR_PACKET_NR
;
2354 reply_size
+= cm
->reply_body_size
;
2356 /* allocation not in the IO path, cqueue thread context */
2357 cn_reply
= kzalloc(reply_size
, GFP_KERNEL
);
2359 retcode
= ERR_NOMEM
;
2362 reply
= (struct drbd_nl_cfg_reply
*) cn_reply
->data
;
2364 reply
->packet_type
=
2365 cm
->reply_body_size
? nlp
->packet_type
: P_return_code_only
;
2366 reply
->minor
= nlp
->drbd_minor
;
2367 reply
->ret_code
= NO_ERROR
; /* Might by modified by cm->function. */
2368 /* reply->tag_list; might be modified by cm->function. */
2370 rr
= cm
->function(mdev
, nlp
, reply
);
2372 cn_reply
->id
= req
->id
;
2373 cn_reply
->seq
= req
->seq
;
2374 cn_reply
->ack
= req
->ack
+ 1;
2375 cn_reply
->len
= sizeof(struct drbd_nl_cfg_reply
) + rr
;
2376 cn_reply
->flags
= 0;
2378 rr
= cn_netlink_send(cn_reply
, CN_IDX_DRBD
, GFP_KERNEL
);
2379 if (rr
&& rr
!= -ESRCH
)
2380 printk(KERN_INFO
"drbd: cn_netlink_send()=%d\n", rr
);
2383 module_put(THIS_MODULE
);
2386 drbd_nl_send_reply(req
, retcode
);
2387 module_put(THIS_MODULE
);
2390 static atomic_t drbd_nl_seq
= ATOMIC_INIT(2); /* two. */
2392 static unsigned short *
2393 __tl_add_blob(unsigned short *tl
, enum drbd_tags tag
, const void *data
,
2394 unsigned short len
, int nul_terminated
)
2396 unsigned short l
= tag_descriptions
[tag_number(tag
)].max_len
;
2397 len
= (len
< l
) ? len
: l
;
2398 put_unaligned(tag
, tl
++);
2399 put_unaligned(len
, tl
++);
2400 memcpy(tl
, data
, len
);
2401 tl
= (unsigned short*)((char*)tl
+ len
);
2403 *((char*)tl
- 1) = 0;
2407 static unsigned short *
2408 tl_add_blob(unsigned short *tl
, enum drbd_tags tag
, const void *data
, int len
)
2410 return __tl_add_blob(tl
, tag
, data
, len
, 0);
2413 static unsigned short *
2414 tl_add_str(unsigned short *tl
, enum drbd_tags tag
, const char *str
)
2416 return __tl_add_blob(tl
, tag
, str
, strlen(str
)+1, 0);
2419 static unsigned short *
2420 tl_add_int(unsigned short *tl
, enum drbd_tags tag
, const void *val
)
2422 put_unaligned(tag
, tl
++);
2423 switch(tag_type(tag
)) {
2425 put_unaligned(sizeof(int), tl
++);
2426 put_unaligned(*(int *)val
, (int *)tl
);
2427 tl
= (unsigned short*)((char*)tl
+sizeof(int));
2430 put_unaligned(sizeof(u64
), tl
++);
2431 put_unaligned(*(u64
*)val
, (u64
*)tl
);
2432 tl
= (unsigned short*)((char*)tl
+sizeof(u64
));
2435 /* someone did something stupid. */
2441 void drbd_bcast_state(struct drbd_conf
*mdev
, union drbd_state state
)
2443 char buffer
[sizeof(struct cn_msg
)+
2444 sizeof(struct drbd_nl_cfg_reply
)+
2445 sizeof(struct get_state_tag_len_struct
)+
2447 struct cn_msg
*cn_reply
= (struct cn_msg
*) buffer
;
2448 struct drbd_nl_cfg_reply
*reply
=
2449 (struct drbd_nl_cfg_reply
*)cn_reply
->data
;
2450 unsigned short *tl
= reply
->tag_list
;
2452 /* dev_warn(DEV, "drbd_bcast_state() got called\n"); */
2454 tl
= get_state_to_tags(mdev
, (struct get_state
*)&state
, tl
);
2456 put_unaligned(TT_END
, tl
++); /* Close the tag list */
2458 cn_reply
->id
.idx
= CN_IDX_DRBD
;
2459 cn_reply
->id
.val
= CN_VAL_DRBD
;
2461 cn_reply
->seq
= atomic_add_return(1, &drbd_nl_seq
);
2462 cn_reply
->ack
= 0; /* not used here. */
2463 cn_reply
->len
= sizeof(struct drbd_nl_cfg_reply
) +
2464 (int)((char *)tl
- (char *)reply
->tag_list
);
2465 cn_reply
->flags
= 0;
2467 reply
->packet_type
= P_get_state
;
2468 reply
->minor
= mdev_to_minor(mdev
);
2469 reply
->ret_code
= NO_ERROR
;
2471 cn_netlink_send(cn_reply
, CN_IDX_DRBD
, GFP_NOIO
);
2474 void drbd_bcast_ev_helper(struct drbd_conf
*mdev
, char *helper_name
)
2476 char buffer
[sizeof(struct cn_msg
)+
2477 sizeof(struct drbd_nl_cfg_reply
)+
2478 sizeof(struct call_helper_tag_len_struct
)+
2480 struct cn_msg
*cn_reply
= (struct cn_msg
*) buffer
;
2481 struct drbd_nl_cfg_reply
*reply
=
2482 (struct drbd_nl_cfg_reply
*)cn_reply
->data
;
2483 unsigned short *tl
= reply
->tag_list
;
2485 /* dev_warn(DEV, "drbd_bcast_state() got called\n"); */
2487 tl
= tl_add_str(tl
, T_helper
, helper_name
);
2488 put_unaligned(TT_END
, tl
++); /* Close the tag list */
2490 cn_reply
->id
.idx
= CN_IDX_DRBD
;
2491 cn_reply
->id
.val
= CN_VAL_DRBD
;
2493 cn_reply
->seq
= atomic_add_return(1, &drbd_nl_seq
);
2494 cn_reply
->ack
= 0; /* not used here. */
2495 cn_reply
->len
= sizeof(struct drbd_nl_cfg_reply
) +
2496 (int)((char *)tl
- (char *)reply
->tag_list
);
2497 cn_reply
->flags
= 0;
2499 reply
->packet_type
= P_call_helper
;
2500 reply
->minor
= mdev_to_minor(mdev
);
2501 reply
->ret_code
= NO_ERROR
;
2503 cn_netlink_send(cn_reply
, CN_IDX_DRBD
, GFP_NOIO
);
2506 void drbd_bcast_ee(struct drbd_conf
*mdev
,
2507 const char *reason
, const int dgs
,
2508 const char* seen_hash
, const char* calc_hash
,
2509 const struct drbd_epoch_entry
* e
)
2511 struct cn_msg
*cn_reply
;
2512 struct drbd_nl_cfg_reply
*reply
;
2519 if (!reason
|| !reason
[0])
2522 /* apparently we have to memcpy twice, first to prepare the data for the
2523 * struct cn_msg, then within cn_netlink_send from the cn_msg to the
2525 /* receiver thread context, which is not in the writeout path (of this node),
2526 * but may be in the writeout path of the _other_ node.
2527 * GFP_NOIO to avoid potential "distributed deadlock". */
2529 sizeof(struct cn_msg
)+
2530 sizeof(struct drbd_nl_cfg_reply
)+
2531 sizeof(struct dump_ee_tag_len_struct
)+
2536 dev_err(DEV
, "could not kmalloc buffer for drbd_bcast_ee, sector %llu, size %u\n",
2537 (unsigned long long)e
->sector
, e
->size
);
2541 reply
= (struct drbd_nl_cfg_reply
*)cn_reply
->data
;
2542 tl
= reply
->tag_list
;
2544 tl
= tl_add_str(tl
, T_dump_ee_reason
, reason
);
2545 tl
= tl_add_blob(tl
, T_seen_digest
, seen_hash
, dgs
);
2546 tl
= tl_add_blob(tl
, T_calc_digest
, calc_hash
, dgs
);
2547 tl
= tl_add_int(tl
, T_ee_sector
, &e
->sector
);
2548 tl
= tl_add_int(tl
, T_ee_block_id
, &e
->block_id
);
2550 /* dump the first 32k */
2551 len
= min_t(unsigned, e
->size
, 32 << 10);
2552 put_unaligned(T_ee_data
, tl
++);
2553 put_unaligned(len
, tl
++);
2556 page_chain_for_each(page
) {
2557 void *d
= kmap_atomic(page
);
2558 unsigned l
= min_t(unsigned, len
, PAGE_SIZE
);
2561 tl
= (unsigned short*)((char*)tl
+ l
);
2566 put_unaligned(TT_END
, tl
++); /* Close the tag list */
2568 cn_reply
->id
.idx
= CN_IDX_DRBD
;
2569 cn_reply
->id
.val
= CN_VAL_DRBD
;
2571 cn_reply
->seq
= atomic_add_return(1,&drbd_nl_seq
);
2572 cn_reply
->ack
= 0; // not used here.
2573 cn_reply
->len
= sizeof(struct drbd_nl_cfg_reply
) +
2574 (int)((char*)tl
- (char*)reply
->tag_list
);
2575 cn_reply
->flags
= 0;
2577 reply
->packet_type
= P_dump_ee
;
2578 reply
->minor
= mdev_to_minor(mdev
);
2579 reply
->ret_code
= NO_ERROR
;
2581 cn_netlink_send(cn_reply
, CN_IDX_DRBD
, GFP_NOIO
);
2585 void drbd_bcast_sync_progress(struct drbd_conf
*mdev
)
2587 char buffer
[sizeof(struct cn_msg
)+
2588 sizeof(struct drbd_nl_cfg_reply
)+
2589 sizeof(struct sync_progress_tag_len_struct
)+
2591 struct cn_msg
*cn_reply
= (struct cn_msg
*) buffer
;
2592 struct drbd_nl_cfg_reply
*reply
=
2593 (struct drbd_nl_cfg_reply
*)cn_reply
->data
;
2594 unsigned short *tl
= reply
->tag_list
;
2595 unsigned long rs_left
;
2598 /* no local ref, no bitmap, no syncer progress, no broadcast. */
2599 if (!get_ldev(mdev
))
2601 drbd_get_syncer_progress(mdev
, &rs_left
, &res
);
2604 tl
= tl_add_int(tl
, T_sync_progress
, &res
);
2605 put_unaligned(TT_END
, tl
++); /* Close the tag list */
2607 cn_reply
->id
.idx
= CN_IDX_DRBD
;
2608 cn_reply
->id
.val
= CN_VAL_DRBD
;
2610 cn_reply
->seq
= atomic_add_return(1, &drbd_nl_seq
);
2611 cn_reply
->ack
= 0; /* not used here. */
2612 cn_reply
->len
= sizeof(struct drbd_nl_cfg_reply
) +
2613 (int)((char *)tl
- (char *)reply
->tag_list
);
2614 cn_reply
->flags
= 0;
2616 reply
->packet_type
= P_sync_progress
;
2617 reply
->minor
= mdev_to_minor(mdev
);
2618 reply
->ret_code
= NO_ERROR
;
2620 cn_netlink_send(cn_reply
, CN_IDX_DRBD
, GFP_NOIO
);
2623 int __init
drbd_nl_init(void)
2625 static struct cb_id cn_id_drbd
;
2628 cn_id_drbd
.val
= CN_VAL_DRBD
;
2630 cn_id_drbd
.idx
= cn_idx
;
2631 err
= cn_add_callback(&cn_id_drbd
, "cn_drbd", &drbd_connector_callback
);
2634 cn_idx
= (cn_idx
+ CN_IDX_STEP
);
2638 printk(KERN_ERR
"drbd: cn_drbd failed to register\n");
2645 void drbd_nl_cleanup(void)
2647 static struct cb_id cn_id_drbd
;
2649 cn_id_drbd
.idx
= cn_idx
;
2650 cn_id_drbd
.val
= CN_VAL_DRBD
;
2652 cn_del_callback(&cn_id_drbd
);
2655 void drbd_nl_send_reply(struct cn_msg
*req
, int ret_code
)
2657 char buffer
[sizeof(struct cn_msg
)+sizeof(struct drbd_nl_cfg_reply
)];
2658 struct cn_msg
*cn_reply
= (struct cn_msg
*) buffer
;
2659 struct drbd_nl_cfg_reply
*reply
=
2660 (struct drbd_nl_cfg_reply
*)cn_reply
->data
;
2663 memset(buffer
, 0, sizeof(buffer
));
2664 cn_reply
->id
= req
->id
;
2666 cn_reply
->seq
= req
->seq
;
2667 cn_reply
->ack
= req
->ack
+ 1;
2668 cn_reply
->len
= sizeof(struct drbd_nl_cfg_reply
);
2669 cn_reply
->flags
= 0;
2671 reply
->packet_type
= P_return_code_only
;
2672 reply
->minor
= ((struct drbd_nl_cfg_req
*)req
->data
)->drbd_minor
;
2673 reply
->ret_code
= ret_code
;
2675 rr
= cn_netlink_send(cn_reply
, CN_IDX_DRBD
, GFP_NOIO
);
2676 if (rr
&& rr
!= -ESRCH
)
2677 printk(KERN_INFO
"drbd: cn_netlink_send()=%d\n", rr
);