4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/module.h>
27 #include <linux/drbd.h>
30 #include <linux/file.h>
31 #include <linux/slab.h>
32 #include <linux/blkpg.h>
33 #include <linux/cpumask.h>
36 #include "drbd_wrappers.h"
37 #include <asm/unaligned.h>
38 #include <linux/drbd_limits.h>
39 #include <linux/kthread.h>
41 #include <net/genetlink.h>
44 // int drbd_adm_create_resource(struct sk_buff *skb, struct genl_info *info);
45 // int drbd_adm_delete_resource(struct sk_buff *skb, struct genl_info *info);
47 int drbd_adm_add_minor(struct sk_buff
*skb
, struct genl_info
*info
);
48 int drbd_adm_delete_minor(struct sk_buff
*skb
, struct genl_info
*info
);
50 int drbd_adm_create_connection(struct sk_buff
*skb
, struct genl_info
*info
);
51 int drbd_adm_delete_connection(struct sk_buff
*skb
, struct genl_info
*info
);
52 int drbd_adm_down(struct sk_buff
*skb
, struct genl_info
*info
);
54 int drbd_adm_set_role(struct sk_buff
*skb
, struct genl_info
*info
);
55 int drbd_adm_attach(struct sk_buff
*skb
, struct genl_info
*info
);
56 int drbd_adm_disk_opts(struct sk_buff
*skb
, struct genl_info
*info
);
57 int drbd_adm_detach(struct sk_buff
*skb
, struct genl_info
*info
);
58 int drbd_adm_connect(struct sk_buff
*skb
, struct genl_info
*info
);
59 int drbd_adm_net_opts(struct sk_buff
*skb
, struct genl_info
*info
);
60 int drbd_adm_resize(struct sk_buff
*skb
, struct genl_info
*info
);
61 int drbd_adm_start_ov(struct sk_buff
*skb
, struct genl_info
*info
);
62 int drbd_adm_new_c_uuid(struct sk_buff
*skb
, struct genl_info
*info
);
63 int drbd_adm_disconnect(struct sk_buff
*skb
, struct genl_info
*info
);
64 int drbd_adm_invalidate(struct sk_buff
*skb
, struct genl_info
*info
);
65 int drbd_adm_invalidate_peer(struct sk_buff
*skb
, struct genl_info
*info
);
66 int drbd_adm_pause_sync(struct sk_buff
*skb
, struct genl_info
*info
);
67 int drbd_adm_resume_sync(struct sk_buff
*skb
, struct genl_info
*info
);
68 int drbd_adm_suspend_io(struct sk_buff
*skb
, struct genl_info
*info
);
69 int drbd_adm_resume_io(struct sk_buff
*skb
, struct genl_info
*info
);
70 int drbd_adm_outdate(struct sk_buff
*skb
, struct genl_info
*info
);
71 int drbd_adm_resource_opts(struct sk_buff
*skb
, struct genl_info
*info
);
72 int drbd_adm_get_status(struct sk_buff
*skb
, struct genl_info
*info
);
73 int drbd_adm_get_timeout_type(struct sk_buff
*skb
, struct genl_info
*info
);
75 int drbd_adm_get_status_all(struct sk_buff
*skb
, struct netlink_callback
*cb
);
77 #include <linux/drbd_genl_api.h>
78 #include <linux/genl_magic_func.h>
80 /* used blkdev_get_by_path, to claim our meta data device(s) */
81 static char *drbd_m_holder
= "Hands off! this is DRBD's meta data device.";
83 /* Configuration is strictly serialized, because generic netlink message
84 * processing is strictly serialized by the genl_lock().
85 * Which means we can use one static global drbd_config_context struct.
87 static struct drbd_config_context
{
88 /* assigned from drbd_genlmsghdr */
90 /* assigned from request attributes, if present */
92 #define VOLUME_UNSPECIFIED (-1U)
93 /* pointer into the request skb,
94 * limited lifetime! */
98 struct sk_buff
*reply_skb
;
99 /* pointer into reply buffer */
100 struct drbd_genlmsghdr
*reply_dh
;
101 /* resolved from attributes, if possible */
102 struct drbd_conf
*mdev
;
103 struct drbd_tconn
*tconn
;
106 static void drbd_adm_send_reply(struct sk_buff
*skb
, struct genl_info
*info
)
108 genlmsg_end(skb
, genlmsg_data(nlmsg_data(nlmsg_hdr(skb
))));
109 if (genlmsg_reply(skb
, info
))
110 printk(KERN_ERR
"drbd: error sending genl reply\n");
113 /* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only
114 * reason it could fail was no space in skb, and there are 4k available. */
115 int drbd_msg_put_info(const char *info
)
117 struct sk_buff
*skb
= adm_ctx
.reply_skb
;
121 if (!info
|| !info
[0])
124 nla
= nla_nest_start(skb
, DRBD_NLA_CFG_REPLY
);
128 err
= nla_put_string(skb
, T_info_text
, info
);
130 nla_nest_cancel(skb
, nla
);
133 nla_nest_end(skb
, nla
);
137 /* This would be a good candidate for a "pre_doit" hook,
138 * and per-family private info->pointers.
139 * But we need to stay compatible with older kernels.
140 * If it returns successfully, adm_ctx members are valid.
142 #define DRBD_ADM_NEED_MINOR 1
143 #define DRBD_ADM_NEED_CONN 2
144 static int drbd_adm_prepare(struct sk_buff
*skb
, struct genl_info
*info
,
147 struct drbd_genlmsghdr
*d_in
= info
->userhdr
;
148 const u8 cmd
= info
->genlhdr
->cmd
;
151 memset(&adm_ctx
, 0, sizeof(adm_ctx
));
153 /* genl_rcv_msg only checks for CAP_NET_ADMIN on "GENL_ADMIN_PERM" :( */
154 if (cmd
!= DRBD_ADM_GET_STATUS
155 && security_netlink_recv(skb
, CAP_SYS_ADMIN
))
158 adm_ctx
.reply_skb
= genlmsg_new(NLMSG_GOODSIZE
, GFP_KERNEL
);
159 if (!adm_ctx
.reply_skb
)
162 adm_ctx
.reply_dh
= genlmsg_put_reply(adm_ctx
.reply_skb
,
163 info
, &drbd_genl_family
, 0, cmd
);
164 /* put of a few bytes into a fresh skb of >= 4k will always succeed.
166 if (!adm_ctx
.reply_dh
)
169 adm_ctx
.reply_dh
->minor
= d_in
->minor
;
170 adm_ctx
.reply_dh
->ret_code
= NO_ERROR
;
172 if (info
->attrs
[DRBD_NLA_CFG_CONTEXT
]) {
174 /* parse and validate only */
175 err
= drbd_cfg_context_from_attrs(NULL
, info
);
179 /* It was present, and valid,
180 * copy it over to the reply skb. */
181 err
= nla_put_nohdr(adm_ctx
.reply_skb
,
182 info
->attrs
[DRBD_NLA_CFG_CONTEXT
]->nla_len
,
183 info
->attrs
[DRBD_NLA_CFG_CONTEXT
]);
187 /* and assign stuff to the global adm_ctx */
188 nla
= nested_attr_tb
[__nla_type(T_ctx_volume
)];
189 adm_ctx
.volume
= nla
? nla_get_u32(nla
) : VOLUME_UNSPECIFIED
;
190 nla
= nested_attr_tb
[__nla_type(T_ctx_conn_name
)];
192 adm_ctx
.conn_name
= nla_data(nla
);
194 adm_ctx
.volume
= VOLUME_UNSPECIFIED
;
196 adm_ctx
.minor
= d_in
->minor
;
197 adm_ctx
.mdev
= minor_to_mdev(d_in
->minor
);
198 adm_ctx
.tconn
= conn_get_by_name(adm_ctx
.conn_name
);
200 if (!adm_ctx
.mdev
&& (flags
& DRBD_ADM_NEED_MINOR
)) {
201 drbd_msg_put_info("unknown minor");
202 return ERR_MINOR_INVALID
;
204 if (!adm_ctx
.tconn
&& (flags
& DRBD_ADM_NEED_CONN
)) {
205 drbd_msg_put_info("unknown connection");
206 return ERR_INVALID_REQUEST
;
209 /* some more paranoia, if the request was over-determined */
210 if (adm_ctx
.mdev
&& adm_ctx
.tconn
&&
211 adm_ctx
.mdev
->tconn
!= adm_ctx
.tconn
) {
212 pr_warning("request: minor=%u, conn=%s; but that minor belongs to connection %s\n",
213 adm_ctx
.minor
, adm_ctx
.conn_name
, adm_ctx
.mdev
->tconn
->name
);
214 drbd_msg_put_info("minor exists in different connection");
215 return ERR_INVALID_REQUEST
;
218 adm_ctx
.volume
!= VOLUME_UNSPECIFIED
&&
219 adm_ctx
.volume
!= adm_ctx
.mdev
->vnr
) {
220 pr_warning("request: minor=%u, volume=%u; but that minor is volume %u in %s\n",
221 adm_ctx
.minor
, adm_ctx
.volume
,
222 adm_ctx
.mdev
->vnr
, adm_ctx
.mdev
->tconn
->name
);
223 drbd_msg_put_info("minor exists as different volume");
224 return ERR_INVALID_REQUEST
;
230 nlmsg_free(adm_ctx
.reply_skb
);
231 adm_ctx
.reply_skb
= NULL
;
235 static int drbd_adm_finish(struct genl_info
*info
, int retcode
)
238 const char *conn_name
= NULL
;
241 kref_put(&adm_ctx
.tconn
->kref
, &conn_destroy
);
242 adm_ctx
.tconn
= NULL
;
245 if (!adm_ctx
.reply_skb
)
248 adm_ctx
.reply_dh
->ret_code
= retcode
;
250 nla
= info
->attrs
[DRBD_NLA_CFG_CONTEXT
];
252 nla
= nla_find_nested(nla
, __nla_type(T_ctx_conn_name
));
254 conn_name
= nla_data(nla
);
257 drbd_adm_send_reply(adm_ctx
.reply_skb
, info
);
261 static void setup_khelper_env(struct drbd_tconn
*tconn
, char **envp
)
267 nc
= rcu_dereference(tconn
->net_conf
);
269 switch (((struct sockaddr
*)nc
->peer_addr
)->sa_family
) {
272 snprintf(envp
[4], 60, "DRBD_PEER_ADDRESS=%pI6",
273 &((struct sockaddr_in6
*)nc
->peer_addr
)->sin6_addr
);
277 snprintf(envp
[4], 60, "DRBD_PEER_ADDRESS=%pI4",
278 &((struct sockaddr_in
*)nc
->peer_addr
)->sin_addr
);
282 snprintf(envp
[4], 60, "DRBD_PEER_ADDRESS=%pI4",
283 &((struct sockaddr_in
*)nc
->peer_addr
)->sin_addr
);
285 snprintf(envp
[3], 20, "DRBD_PEER_AF=%s", afs
);
290 int drbd_khelper(struct drbd_conf
*mdev
, char *cmd
)
292 char *envp
[] = { "HOME=/",
294 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
295 (char[20]) { }, /* address family */
296 (char[60]) { }, /* address */
299 char *argv
[] = {usermode_helper
, cmd
, mb
, NULL
};
303 snprintf(mb
, 12, "minor-%d", mdev_to_minor(mdev
));
304 setup_khelper_env(mdev
->tconn
, envp
);
306 /* The helper may take some time.
307 * write out any unsynced meta data changes now */
310 dev_info(DEV
, "helper command: %s %s %s\n", usermode_helper
, cmd
, mb
);
311 sib
.sib_reason
= SIB_HELPER_PRE
;
312 sib
.helper_name
= cmd
;
313 drbd_bcast_event(mdev
, &sib
);
314 ret
= call_usermodehelper(usermode_helper
, argv
, envp
, 1);
316 dev_warn(DEV
, "helper command: %s %s %s exit code %u (0x%x)\n",
317 usermode_helper
, cmd
, mb
,
318 (ret
>> 8) & 0xff, ret
);
320 dev_info(DEV
, "helper command: %s %s %s exit code %u (0x%x)\n",
321 usermode_helper
, cmd
, mb
,
322 (ret
>> 8) & 0xff, ret
);
323 sib
.sib_reason
= SIB_HELPER_POST
;
324 sib
.helper_exit_code
= ret
;
325 drbd_bcast_event(mdev
, &sib
);
327 if (ret
< 0) /* Ignore any ERRNOs we got. */
333 static void conn_md_sync(struct drbd_tconn
*tconn
)
335 struct drbd_conf
*mdev
;
338 down_read(&drbd_cfg_rwsem
);
339 idr_for_each_entry(&tconn
->volumes
, mdev
, vnr
)
341 up_read(&drbd_cfg_rwsem
);
344 int conn_khelper(struct drbd_tconn
*tconn
, char *cmd
)
346 char *envp
[] = { "HOME=/",
348 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
349 (char[20]) { }, /* address family */
350 (char[60]) { }, /* address */
352 char *argv
[] = {usermode_helper
, cmd
, tconn
->name
, NULL
};
355 setup_khelper_env(tconn
, envp
);
358 conn_info(tconn
, "helper command: %s %s %s\n", usermode_helper
, cmd
, tconn
->name
);
359 /* TODO: conn_bcast_event() ?? */
361 ret
= call_usermodehelper(usermode_helper
, argv
, envp
, 1);
363 conn_warn(tconn
, "helper command: %s %s %s exit code %u (0x%x)\n",
364 usermode_helper
, cmd
, tconn
->name
,
365 (ret
>> 8) & 0xff, ret
);
367 conn_info(tconn
, "helper command: %s %s %s exit code %u (0x%x)\n",
368 usermode_helper
, cmd
, tconn
->name
,
369 (ret
>> 8) & 0xff, ret
);
370 /* TODO: conn_bcast_event() ?? */
372 if (ret
< 0) /* Ignore any ERRNOs we got. */
378 static enum drbd_fencing_p
highest_fencing_policy(struct drbd_tconn
*tconn
)
380 enum drbd_fencing_p fp
= FP_NOT_AVAIL
;
381 struct drbd_conf
*mdev
;
385 idr_for_each_entry(&tconn
->volumes
, mdev
, vnr
) {
386 if (get_ldev_if_state(mdev
, D_CONSISTENT
)) {
387 fp
= max_t(enum drbd_fencing_p
, fp
, mdev
->ldev
->dc
.fencing
);
396 bool conn_try_outdate_peer(struct drbd_tconn
*tconn
)
398 union drbd_state mask
= { };
399 union drbd_state val
= { };
400 enum drbd_fencing_p fp
;
404 if (tconn
->cstate
>= C_WF_REPORT_PARAMS
) {
405 conn_err(tconn
, "Expected cstate < C_WF_REPORT_PARAMS\n");
409 fp
= highest_fencing_policy(tconn
);
412 conn_warn(tconn
, "Not fencing peer, I'm not even Consistent myself.\n");
419 r
= conn_khelper(tconn
, "fence-peer");
421 switch ((r
>>8) & 0xff) {
422 case 3: /* peer is inconsistent */
423 ex_to_string
= "peer is inconsistent or worse";
425 val
.pdsk
= D_INCONSISTENT
;
427 case 4: /* peer got outdated, or was already outdated */
428 ex_to_string
= "peer was fenced";
430 val
.pdsk
= D_OUTDATED
;
432 case 5: /* peer was down */
433 if (conn_highest_disk(tconn
) == D_UP_TO_DATE
) {
434 /* we will(have) create(d) a new UUID anyways... */
435 ex_to_string
= "peer is unreachable, assumed to be dead";
437 val
.pdsk
= D_OUTDATED
;
439 ex_to_string
= "peer unreachable, doing nothing since disk != UpToDate";
442 case 6: /* Peer is primary, voluntarily outdate myself.
443 * This is useful when an unconnected R_SECONDARY is asked to
444 * become R_PRIMARY, but finds the other peer being active. */
445 ex_to_string
= "peer is active";
446 conn_warn(tconn
, "Peer is primary, outdating myself.\n");
448 val
.disk
= D_OUTDATED
;
451 if (fp
!= FP_STONITH
)
452 conn_err(tconn
, "fence-peer() = 7 && fencing != Stonith !!!\n");
453 ex_to_string
= "peer was stonithed";
455 val
.pdsk
= D_OUTDATED
;
458 /* The script is broken ... */
459 conn_err(tconn
, "fence-peer helper broken, returned %d\n", (r
>>8)&0xff);
460 return false; /* Eventually leave IO frozen */
463 conn_info(tconn
, "fence-peer helper returned %d (%s)\n",
464 (r
>>8) & 0xff, ex_to_string
);
469 conn_request_state(tconn, mask, val, CS_VERBOSE);
470 here, because we might were able to re-establish the connection in the
472 spin_lock_irq(&tconn
->req_lock
);
473 if (tconn
->cstate
< C_WF_REPORT_PARAMS
)
474 _conn_request_state(tconn
, mask
, val
, CS_VERBOSE
);
475 spin_unlock_irq(&tconn
->req_lock
);
477 return conn_highest_pdsk(tconn
) <= D_OUTDATED
;
480 static int _try_outdate_peer_async(void *data
)
482 struct drbd_tconn
*tconn
= (struct drbd_tconn
*)data
;
484 conn_try_outdate_peer(tconn
);
486 kref_put(&tconn
->kref
, &conn_destroy
);
490 void conn_try_outdate_peer_async(struct drbd_tconn
*tconn
)
492 struct task_struct
*opa
;
494 kref_get(&tconn
->kref
);
495 opa
= kthread_run(_try_outdate_peer_async
, tconn
, "drbd_async_h");
497 conn_err(tconn
, "out of mem, failed to invoke fence-peer helper\n");
498 kref_put(&tconn
->kref
, &conn_destroy
);
503 drbd_set_role(struct drbd_conf
*mdev
, enum drbd_role new_role
, int force
)
505 const int max_tries
= 4;
506 enum drbd_state_rv rv
= SS_UNKNOWN_ERROR
;
510 union drbd_state mask
, val
;
512 if (new_role
== R_PRIMARY
)
513 request_ping(mdev
->tconn
); /* Detect a dead peer ASAP */
515 mutex_lock(mdev
->state_mutex
);
517 mask
.i
= 0; mask
.role
= R_MASK
;
518 val
.i
= 0; val
.role
= new_role
;
520 while (try++ < max_tries
) {
521 rv
= _drbd_request_state(mdev
, mask
, val
, CS_WAIT_COMPLETE
);
523 /* in case we first succeeded to outdate,
524 * but now suddenly could establish a connection */
525 if (rv
== SS_CW_FAILED_BY_PEER
&& mask
.pdsk
!= 0) {
531 if (rv
== SS_NO_UP_TO_DATE_DISK
&& force
&&
532 (mdev
->state
.disk
< D_UP_TO_DATE
&&
533 mdev
->state
.disk
>= D_INCONSISTENT
)) {
535 val
.disk
= D_UP_TO_DATE
;
540 if (rv
== SS_NO_UP_TO_DATE_DISK
&&
541 mdev
->state
.disk
== D_CONSISTENT
&& mask
.pdsk
== 0) {
542 D_ASSERT(mdev
->state
.pdsk
== D_UNKNOWN
);
544 if (conn_try_outdate_peer(mdev
->tconn
)) {
545 val
.disk
= D_UP_TO_DATE
;
551 if (rv
== SS_NOTHING_TO_DO
)
553 if (rv
== SS_PRIMARY_NOP
&& mask
.pdsk
== 0) {
554 if (!conn_try_outdate_peer(mdev
->tconn
) && force
) {
555 dev_warn(DEV
, "Forced into split brain situation!\n");
557 val
.pdsk
= D_OUTDATED
;
562 if (rv
== SS_TWO_PRIMARIES
) {
563 /* Maybe the peer is detected as dead very soon...
564 retry at most once more in this case. */
567 nc
= rcu_dereference(mdev
->tconn
->net_conf
);
568 timeo
= nc
? (nc
->ping_timeo
+ 1) * HZ
/ 10 : 1;
570 schedule_timeout_interruptible(timeo
);
575 if (rv
< SS_SUCCESS
) {
576 rv
= _drbd_request_state(mdev
, mask
, val
,
577 CS_VERBOSE
+ CS_WAIT_COMPLETE
);
588 dev_warn(DEV
, "Forced to consider local data as UpToDate!\n");
590 /* Wait until nothing is on the fly :) */
591 wait_event(mdev
->misc_wait
, atomic_read(&mdev
->ap_pending_cnt
) == 0);
593 if (new_role
== R_SECONDARY
) {
594 set_disk_ro(mdev
->vdisk
, true);
595 if (get_ldev(mdev
)) {
596 mdev
->ldev
->md
.uuid
[UI_CURRENT
] &= ~(u64
)1;
600 mutex_lock(&mdev
->tconn
->net_conf_update
);
601 nc
= mdev
->tconn
->net_conf
;
603 nc
->want_lose
= 0; /* without copy; single bit op is atomic */
604 mutex_unlock(&mdev
->tconn
->net_conf_update
);
606 set_disk_ro(mdev
->vdisk
, false);
607 if (get_ldev(mdev
)) {
608 if (((mdev
->state
.conn
< C_CONNECTED
||
609 mdev
->state
.pdsk
<= D_FAILED
)
610 && mdev
->ldev
->md
.uuid
[UI_BITMAP
] == 0) || forced
)
611 drbd_uuid_new_current(mdev
);
613 mdev
->ldev
->md
.uuid
[UI_CURRENT
] |= (u64
)1;
618 /* writeout of activity log covered areas of the bitmap
619 * to stable storage done in after state change already */
621 if (mdev
->state
.conn
>= C_WF_REPORT_PARAMS
) {
622 /* if this was forced, we should consider sync */
624 drbd_send_uuids(mdev
);
625 drbd_send_state(mdev
);
630 kobject_uevent(&disk_to_dev(mdev
->vdisk
)->kobj
, KOBJ_CHANGE
);
632 mutex_unlock(mdev
->state_mutex
);
636 static const char *from_attrs_err_to_txt(int err
)
638 return err
== -ENOMSG
? "required attribute missing" :
639 err
== -EOPNOTSUPP
? "unknown mandatory attribute" :
640 err
== -EEXIST
? "can not change invariant setting" :
641 "invalid attribute value";
644 int drbd_adm_set_role(struct sk_buff
*skb
, struct genl_info
*info
)
646 struct set_role_parms parms
;
648 enum drbd_ret_code retcode
;
650 retcode
= drbd_adm_prepare(skb
, info
, DRBD_ADM_NEED_MINOR
);
651 if (!adm_ctx
.reply_skb
)
653 if (retcode
!= NO_ERROR
)
656 memset(&parms
, 0, sizeof(parms
));
657 if (info
->attrs
[DRBD_NLA_SET_ROLE_PARMS
]) {
658 err
= set_role_parms_from_attrs(&parms
, info
);
660 retcode
= ERR_MANDATORY_TAG
;
661 drbd_msg_put_info(from_attrs_err_to_txt(err
));
666 if (info
->genlhdr
->cmd
== DRBD_ADM_PRIMARY
)
667 retcode
= drbd_set_role(adm_ctx
.mdev
, R_PRIMARY
, parms
.assume_uptodate
);
669 retcode
= drbd_set_role(adm_ctx
.mdev
, R_SECONDARY
, 0);
671 drbd_adm_finish(info
, retcode
);
675 /* initializes the md.*_offset members, so we are able to find
676 * the on disk meta data */
677 static void drbd_md_set_sector_offsets(struct drbd_conf
*mdev
,
678 struct drbd_backing_dev
*bdev
)
680 sector_t md_size_sect
= 0;
681 switch (bdev
->dc
.meta_dev_idx
) {
683 /* v07 style fixed size indexed meta data */
684 bdev
->md
.md_size_sect
= MD_RESERVED_SECT
;
685 bdev
->md
.md_offset
= drbd_md_ss__(mdev
, bdev
);
686 bdev
->md
.al_offset
= MD_AL_OFFSET
;
687 bdev
->md
.bm_offset
= MD_BM_OFFSET
;
689 case DRBD_MD_INDEX_FLEX_EXT
:
690 /* just occupy the full device; unit: sectors */
691 bdev
->md
.md_size_sect
= drbd_get_capacity(bdev
->md_bdev
);
692 bdev
->md
.md_offset
= 0;
693 bdev
->md
.al_offset
= MD_AL_OFFSET
;
694 bdev
->md
.bm_offset
= MD_BM_OFFSET
;
696 case DRBD_MD_INDEX_INTERNAL
:
697 case DRBD_MD_INDEX_FLEX_INT
:
698 bdev
->md
.md_offset
= drbd_md_ss__(mdev
, bdev
);
699 /* al size is still fixed */
700 bdev
->md
.al_offset
= -MD_AL_SECTORS
;
701 /* we need (slightly less than) ~ this much bitmap sectors: */
702 md_size_sect
= drbd_get_capacity(bdev
->backing_bdev
);
703 md_size_sect
= ALIGN(md_size_sect
, BM_SECT_PER_EXT
);
704 md_size_sect
= BM_SECT_TO_EXT(md_size_sect
);
705 md_size_sect
= ALIGN(md_size_sect
, 8);
707 /* plus the "drbd meta data super block",
708 * and the activity log; */
709 md_size_sect
+= MD_BM_OFFSET
;
711 bdev
->md
.md_size_sect
= md_size_sect
;
712 /* bitmap offset is adjusted by 'super' block size */
713 bdev
->md
.bm_offset
= -md_size_sect
+ MD_AL_OFFSET
;
718 /* input size is expected to be in KB */
719 char *ppsize(char *buf
, unsigned long long size
)
721 /* Needs 9 bytes at max including trailing NUL:
722 * -1ULL ==> "16384 EB" */
723 static char units
[] = { 'K', 'M', 'G', 'T', 'P', 'E' };
725 while (size
>= 10000 && base
< sizeof(units
)-1) {
727 size
= (size
>> 10) + !!(size
& (1<<9));
730 sprintf(buf
, "%u %cB", (unsigned)size
, units
[base
]);
735 /* there is still a theoretical deadlock when called from receiver
736 * on an D_INCONSISTENT R_PRIMARY:
737 * remote READ does inc_ap_bio, receiver would need to receive answer
738 * packet from remote to dec_ap_bio again.
739 * receiver receive_sizes(), comes here,
740 * waits for ap_bio_cnt == 0. -> deadlock.
741 * but this cannot happen, actually, because:
742 * R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable
743 * (not connected, or bad/no disk on peer):
744 * see drbd_fail_request_early, ap_bio_cnt is zero.
745 * R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET:
746 * peer may not initiate a resize.
748 /* Note these are not to be confused with
749 * drbd_adm_suspend_io/drbd_adm_resume_io,
750 * which are (sub) state changes triggered by admin (drbdsetup),
751 * and can be long lived.
752 * This changes an mdev->flag, is triggered by drbd internals,
753 * and should be short-lived. */
754 void drbd_suspend_io(struct drbd_conf
*mdev
)
756 set_bit(SUSPEND_IO
, &mdev
->flags
);
757 if (drbd_suspended(mdev
))
759 wait_event(mdev
->misc_wait
, !atomic_read(&mdev
->ap_bio_cnt
));
762 void drbd_resume_io(struct drbd_conf
*mdev
)
764 clear_bit(SUSPEND_IO
, &mdev
->flags
);
765 wake_up(&mdev
->misc_wait
);
769 * drbd_determine_dev_size() - Sets the right device size obeying all constraints
770 * @mdev: DRBD device.
772 * Returns 0 on success, negative return values indicate errors.
773 * You should call drbd_md_sync() after calling this function.
775 enum determine_dev_size
drbd_determine_dev_size(struct drbd_conf
*mdev
, enum dds_flags flags
) __must_hold(local
)
777 sector_t prev_first_sect
, prev_size
; /* previous meta location */
782 int md_moved
, la_size_changed
;
783 enum determine_dev_size rv
= unchanged
;
786 * application request passes inc_ap_bio,
787 * but then cannot get an AL-reference.
788 * this function later may wait on ap_bio_cnt == 0. -> deadlock.
791 * Suspend IO right here.
792 * still lock the act_log to not trigger ASSERTs there.
794 drbd_suspend_io(mdev
);
796 /* no wait necessary anymore, actually we could assert that */
797 wait_event(mdev
->al_wait
, lc_try_lock(mdev
->act_log
));
799 prev_first_sect
= drbd_md_first_sector(mdev
->ldev
);
800 prev_size
= mdev
->ldev
->md
.md_size_sect
;
801 la_size
= mdev
->ldev
->md
.la_size_sect
;
803 /* TODO: should only be some assert here, not (re)init... */
804 drbd_md_set_sector_offsets(mdev
, mdev
->ldev
);
806 size
= drbd_new_dev_size(mdev
, mdev
->ldev
, flags
& DDSF_FORCED
);
808 if (drbd_get_capacity(mdev
->this_bdev
) != size
||
809 drbd_bm_capacity(mdev
) != size
) {
811 err
= drbd_bm_resize(mdev
, size
, !(flags
& DDSF_NO_RESYNC
));
813 /* currently there is only one error: ENOMEM! */
814 size
= drbd_bm_capacity(mdev
)>>1;
816 dev_err(DEV
, "OUT OF MEMORY! "
817 "Could not allocate bitmap!\n");
819 dev_err(DEV
, "BM resizing failed. "
820 "Leaving size unchanged at size = %lu KB\n",
821 (unsigned long)size
);
825 /* racy, see comments above. */
826 drbd_set_my_capacity(mdev
, size
);
827 mdev
->ldev
->md
.la_size_sect
= size
;
828 dev_info(DEV
, "size = %s (%llu KB)\n", ppsize(ppb
, size
>>1),
829 (unsigned long long)size
>>1);
831 if (rv
== dev_size_error
)
834 la_size_changed
= (la_size
!= mdev
->ldev
->md
.la_size_sect
);
836 md_moved
= prev_first_sect
!= drbd_md_first_sector(mdev
->ldev
)
837 || prev_size
!= mdev
->ldev
->md
.md_size_sect
;
839 if (la_size_changed
|| md_moved
) {
842 drbd_al_shrink(mdev
); /* All extents inactive. */
843 dev_info(DEV
, "Writing the whole bitmap, %s\n",
844 la_size_changed
&& md_moved
? "size changed and md moved" :
845 la_size_changed
? "size changed" : "md moved");
846 /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
847 err
= drbd_bitmap_io(mdev
, &drbd_bm_write
,
848 "size changed", BM_LOCKED_MASK
);
853 drbd_md_mark_dirty(mdev
);
861 lc_unlock(mdev
->act_log
);
862 wake_up(&mdev
->al_wait
);
863 drbd_resume_io(mdev
);
869 drbd_new_dev_size(struct drbd_conf
*mdev
, struct drbd_backing_dev
*bdev
, int assume_peer_has_space
)
871 sector_t p_size
= mdev
->p_size
; /* partner's disk size. */
872 sector_t la_size
= bdev
->md
.la_size_sect
; /* last agreed size. */
873 sector_t m_size
; /* my size */
874 sector_t u_size
= bdev
->dc
.disk_size
; /* size requested by user. */
877 m_size
= drbd_get_max_capacity(bdev
);
879 if (mdev
->state
.conn
< C_CONNECTED
&& assume_peer_has_space
) {
880 dev_warn(DEV
, "Resize while not connected was forced by the user!\n");
884 if (p_size
&& m_size
) {
885 size
= min_t(sector_t
, p_size
, m_size
);
889 if (m_size
&& m_size
< size
)
891 if (p_size
&& p_size
< size
)
902 dev_err(DEV
, "Both nodes diskless!\n");
906 dev_err(DEV
, "Requested disk size is too big (%lu > %lu)\n",
907 (unsigned long)u_size
>>1, (unsigned long)size
>>1);
916 * drbd_check_al_size() - Ensures that the AL is of the right size
917 * @mdev: DRBD device.
919 * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation
920 * failed, and 0 on success. You should call drbd_md_sync() after you called
923 static int drbd_check_al_size(struct drbd_conf
*mdev
, struct disk_conf
*dc
)
925 struct lru_cache
*n
, *t
;
926 struct lc_element
*e
;
930 if (!expect(dc
->al_extents
>= DRBD_AL_EXTENTS_MIN
))
931 dc
->al_extents
= DRBD_AL_EXTENTS_MIN
;
934 mdev
->act_log
->nr_elements
== dc
->al_extents
)
939 n
= lc_create("act_log", drbd_al_ext_cache
, AL_UPDATES_PER_TRANSACTION
,
940 dc
->al_extents
, sizeof(struct lc_element
), 0);
943 dev_err(DEV
, "Cannot allocate act_log lru!\n");
946 spin_lock_irq(&mdev
->al_lock
);
948 for (i
= 0; i
< t
->nr_elements
; i
++) {
949 e
= lc_element_by_index(t
, i
);
951 dev_err(DEV
, "refcnt(%d)==%d\n",
952 e
->lc_number
, e
->refcnt
);
958 spin_unlock_irq(&mdev
->al_lock
);
960 dev_err(DEV
, "Activity log still in use!\n");
967 drbd_md_mark_dirty(mdev
); /* we changed mdev->act_log->nr_elemens */
971 static void drbd_setup_queue_param(struct drbd_conf
*mdev
, unsigned int max_bio_size
)
973 struct request_queue
* const q
= mdev
->rq_queue
;
974 int max_hw_sectors
= max_bio_size
>> 9;
975 int max_segments
= 0;
977 if (get_ldev_if_state(mdev
, D_ATTACHING
)) {
978 struct request_queue
* const b
= mdev
->ldev
->backing_bdev
->bd_disk
->queue
;
980 max_hw_sectors
= min(queue_max_hw_sectors(b
), max_bio_size
>> 9);
981 max_segments
= mdev
->ldev
->dc
.max_bio_bvecs
;
985 blk_queue_logical_block_size(q
, 512);
986 blk_queue_max_hw_sectors(q
, max_hw_sectors
);
987 /* This is the workaround for "bio would need to, but cannot, be split" */
988 blk_queue_max_segments(q
, max_segments
? max_segments
: BLK_MAX_SEGMENTS
);
989 blk_queue_segment_boundary(q
, PAGE_CACHE_SIZE
-1);
991 if (get_ldev_if_state(mdev
, D_ATTACHING
)) {
992 struct request_queue
* const b
= mdev
->ldev
->backing_bdev
->bd_disk
->queue
;
994 blk_queue_stack_limits(q
, b
);
996 if (q
->backing_dev_info
.ra_pages
!= b
->backing_dev_info
.ra_pages
) {
997 dev_info(DEV
, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
998 q
->backing_dev_info
.ra_pages
,
999 b
->backing_dev_info
.ra_pages
);
1000 q
->backing_dev_info
.ra_pages
= b
->backing_dev_info
.ra_pages
;
1006 void drbd_reconsider_max_bio_size(struct drbd_conf
*mdev
)
1008 int now
, new, local
, peer
;
1010 now
= queue_max_hw_sectors(mdev
->rq_queue
) << 9;
1011 local
= mdev
->local_max_bio_size
; /* Eventually last known value, from volatile memory */
1012 peer
= mdev
->peer_max_bio_size
; /* Eventually last known value, from meta data */
1014 if (get_ldev_if_state(mdev
, D_ATTACHING
)) {
1015 local
= queue_max_hw_sectors(mdev
->ldev
->backing_bdev
->bd_disk
->queue
) << 9;
1016 mdev
->local_max_bio_size
= local
;
1020 /* We may ignore peer limits if the peer is modern enough.
1021 Because new from 8.3.8 onwards the peer can use multiple
1022 BIOs for a single peer_request */
1023 if (mdev
->state
.conn
>= C_CONNECTED
) {
1024 if (mdev
->tconn
->agreed_pro_version
< 94)
1025 peer
= mdev
->peer_max_bio_size
;
1026 else if (mdev
->tconn
->agreed_pro_version
== 94)
1027 peer
= DRBD_MAX_SIZE_H80_PACKET
;
1028 else /* drbd 8.3.8 onwards */
1029 peer
= DRBD_MAX_BIO_SIZE
;
1032 new = min_t(int, local
, peer
);
1034 if (mdev
->state
.role
== R_PRIMARY
&& new < now
)
1035 dev_err(DEV
, "ASSERT FAILED new < now; (%d < %d)\n", new, now
);
1038 dev_info(DEV
, "max BIO size = %u\n", new);
1040 drbd_setup_queue_param(mdev
, new);
1043 /* Starts the worker thread */
1044 static void conn_reconfig_start(struct drbd_tconn
*tconn
)
1046 drbd_thread_start(&tconn
->worker
);
1047 conn_flush_workqueue(tconn
);
1050 /* if still unconfigured, stops worker again. */
1051 static void conn_reconfig_done(struct drbd_tconn
*tconn
)
1054 spin_lock_irq(&tconn
->req_lock
);
1055 stop_threads
= conn_all_vols_unconf(tconn
);
1056 spin_unlock_irq(&tconn
->req_lock
);
1058 /* asender is implicitly stopped by receiver
1059 * in drbd_disconnect() */
1060 drbd_thread_stop(&tconn
->receiver
);
1061 drbd_thread_stop(&tconn
->worker
);
1065 /* Make sure IO is suspended before calling this function(). */
1066 static void drbd_suspend_al(struct drbd_conf
*mdev
)
1070 if (!lc_try_lock(mdev
->act_log
)) {
1071 dev_warn(DEV
, "Failed to lock al in drbd_suspend_al()\n");
1075 drbd_al_shrink(mdev
);
1076 spin_lock_irq(&mdev
->tconn
->req_lock
);
1077 if (mdev
->state
.conn
< C_CONNECTED
)
1078 s
= !test_and_set_bit(AL_SUSPENDED
, &mdev
->flags
);
1079 spin_unlock_irq(&mdev
->tconn
->req_lock
);
1080 lc_unlock(mdev
->act_log
);
1083 dev_info(DEV
, "Suspended AL updates\n");
1087 static bool should_set_defaults(struct genl_info
*info
)
1089 unsigned flags
= ((struct drbd_genlmsghdr
*)info
->userhdr
)->flags
;
1090 return 0 != (flags
& DRBD_GENL_F_SET_DEFAULTS
);
1093 int drbd_adm_disk_opts(struct sk_buff
*skb
, struct genl_info
*info
)
1095 enum drbd_ret_code retcode
;
1096 struct drbd_conf
*mdev
;
1097 struct disk_conf
*new_disk_conf
;
1099 int *rs_plan_s
= NULL
;
1101 retcode
= drbd_adm_prepare(skb
, info
, DRBD_ADM_NEED_MINOR
);
1102 if (!adm_ctx
.reply_skb
)
1104 if (retcode
!= NO_ERROR
)
1107 mdev
= adm_ctx
.mdev
;
1109 /* we also need a disk
1110 * to change the options on */
1111 if (!get_ldev(mdev
)) {
1112 retcode
= ERR_NO_DISK
;
1116 /* FIXME freeze IO, cluster wide.
1118 * We should make sure no-one uses
1119 * some half-updated struct when we
1120 * assign it later. */
1122 new_disk_conf
= kmalloc(sizeof(*new_disk_conf
), GFP_KERNEL
);
1123 if (!new_disk_conf
) {
1124 retcode
= ERR_NOMEM
;
1128 memcpy(new_disk_conf
, &mdev
->ldev
->dc
, sizeof(*new_disk_conf
));
1129 if (should_set_defaults(info
))
1130 set_disk_conf_defaults(new_disk_conf
);
1132 err
= disk_conf_from_attrs_for_change(new_disk_conf
, info
);
1134 retcode
= ERR_MANDATORY_TAG
;
1135 drbd_msg_put_info(from_attrs_err_to_txt(err
));
1138 if (!expect(new_disk_conf
->resync_rate
>= 1))
1139 new_disk_conf
->resync_rate
= 1;
1141 /* clip to allowed range */
1142 if (!expect(new_disk_conf
->al_extents
>= DRBD_AL_EXTENTS_MIN
))
1143 new_disk_conf
->al_extents
= DRBD_AL_EXTENTS_MIN
;
1144 if (!expect(new_disk_conf
->al_extents
<= DRBD_AL_EXTENTS_MAX
))
1145 new_disk_conf
->al_extents
= DRBD_AL_EXTENTS_MAX
;
1147 /* most sanity checks done, try to assign the new sync-after
1148 * dependency. need to hold the global lock in there,
1149 * to avoid a race in the dependency loop check. */
1150 retcode
= drbd_alter_sa(mdev
, new_disk_conf
->resync_after
);
1151 if (retcode
!= NO_ERROR
)
1154 fifo_size
= (new_disk_conf
->c_plan_ahead
* 10 * SLEEP_TIME
) / HZ
;
1155 if (fifo_size
!= mdev
->rs_plan_s
.size
&& fifo_size
> 0) {
1156 rs_plan_s
= kzalloc(sizeof(int) * fifo_size
, GFP_KERNEL
);
1158 dev_err(DEV
, "kmalloc of fifo_buffer failed");
1159 retcode
= ERR_NOMEM
;
1164 if (fifo_size
!= mdev
->rs_plan_s
.size
) {
1165 kfree(mdev
->rs_plan_s
.values
);
1166 mdev
->rs_plan_s
.values
= rs_plan_s
;
1167 mdev
->rs_plan_s
.size
= fifo_size
;
1168 mdev
->rs_planed
= 0;
1172 wait_event(mdev
->al_wait
, lc_try_lock(mdev
->act_log
));
1173 drbd_al_shrink(mdev
);
1174 err
= drbd_check_al_size(mdev
, new_disk_conf
);
1175 lc_unlock(mdev
->act_log
);
1176 wake_up(&mdev
->al_wait
);
1179 retcode
= ERR_NOMEM
;
1184 * To avoid someone looking at a half-updated struct, we probably
1185 * should have a rw-semaphor on net_conf and disk_conf.
1187 mdev
->ldev
->dc
= *new_disk_conf
;
1192 if (mdev
->state
.conn
>= C_CONNECTED
)
1193 drbd_send_sync_param(mdev
);
1197 kfree(new_disk_conf
);
1200 drbd_adm_finish(info
, retcode
);
1204 int drbd_adm_attach(struct sk_buff
*skb
, struct genl_info
*info
)
1206 struct drbd_conf
*mdev
;
1208 enum drbd_ret_code retcode
;
1209 enum determine_dev_size dd
;
1210 sector_t max_possible_sectors
;
1211 sector_t min_md_device_sectors
;
1212 struct drbd_backing_dev
*nbc
= NULL
; /* new_backing_conf */
1213 struct block_device
*bdev
;
1214 struct lru_cache
*resync_lru
= NULL
;
1215 union drbd_state ns
, os
;
1216 enum drbd_state_rv rv
;
1217 struct net_conf
*nc
;
1218 int cp_discovered
= 0;
1220 retcode
= drbd_adm_prepare(skb
, info
, DRBD_ADM_NEED_MINOR
);
1221 if (!adm_ctx
.reply_skb
)
1223 if (retcode
!= NO_ERROR
)
1226 mdev
= adm_ctx
.mdev
;
1227 conn_reconfig_start(mdev
->tconn
);
1229 /* if you want to reconfigure, please tear down first */
1230 if (mdev
->state
.disk
> D_DISKLESS
) {
1231 retcode
= ERR_DISK_CONFIGURED
;
1234 /* It may just now have detached because of IO error. Make sure
1235 * drbd_ldev_destroy is done already, we may end up here very fast,
1236 * e.g. if someone calls attach from the on-io-error handler,
1237 * to realize a "hot spare" feature (not that I'd recommend that) */
1238 wait_event(mdev
->misc_wait
, !atomic_read(&mdev
->local_cnt
));
1240 /* allocation not in the IO path, drbdsetup context */
1241 nbc
= kzalloc(sizeof(struct drbd_backing_dev
), GFP_KERNEL
);
1243 retcode
= ERR_NOMEM
;
1247 set_disk_conf_defaults(&nbc
->dc
);
1249 err
= disk_conf_from_attrs(&nbc
->dc
, info
);
1251 retcode
= ERR_MANDATORY_TAG
;
1252 drbd_msg_put_info(from_attrs_err_to_txt(err
));
1256 if ((int)nbc
->dc
.meta_dev_idx
< DRBD_MD_INDEX_FLEX_INT
) {
1257 retcode
= ERR_MD_IDX_INVALID
;
1262 nc
= rcu_dereference(mdev
->tconn
->net_conf
);
1264 if (nbc
->dc
.fencing
== FP_STONITH
&& nc
->wire_protocol
== DRBD_PROT_A
) {
1266 retcode
= ERR_STONITH_AND_PROT_A
;
1272 bdev
= blkdev_get_by_path(nbc
->dc
.backing_dev
,
1273 FMODE_READ
| FMODE_WRITE
| FMODE_EXCL
, mdev
);
1275 dev_err(DEV
, "open(\"%s\") failed with %ld\n", nbc
->dc
.backing_dev
,
1277 retcode
= ERR_OPEN_DISK
;
1280 nbc
->backing_bdev
= bdev
;
1283 * meta_dev_idx >= 0: external fixed size, possibly multiple
1284 * drbd sharing one meta device. TODO in that case, paranoia
1285 * check that [md_bdev, meta_dev_idx] is not yet used by some
1286 * other drbd minor! (if you use drbd.conf + drbdadm, that
1287 * should check it for you already; but if you don't, or
1288 * someone fooled it, we need to double check here)
1290 bdev
= blkdev_get_by_path(nbc
->dc
.meta_dev
,
1291 FMODE_READ
| FMODE_WRITE
| FMODE_EXCL
,
1292 ((int)nbc
->dc
.meta_dev_idx
< 0) ?
1293 (void *)mdev
: (void *)drbd_m_holder
);
1295 dev_err(DEV
, "open(\"%s\") failed with %ld\n", nbc
->dc
.meta_dev
,
1297 retcode
= ERR_OPEN_MD_DISK
;
1300 nbc
->md_bdev
= bdev
;
1302 if ((nbc
->backing_bdev
== nbc
->md_bdev
) !=
1303 (nbc
->dc
.meta_dev_idx
== DRBD_MD_INDEX_INTERNAL
||
1304 nbc
->dc
.meta_dev_idx
== DRBD_MD_INDEX_FLEX_INT
)) {
1305 retcode
= ERR_MD_IDX_INVALID
;
1309 resync_lru
= lc_create("resync", drbd_bm_ext_cache
,
1310 1, 61, sizeof(struct bm_extent
),
1311 offsetof(struct bm_extent
, lce
));
1313 retcode
= ERR_NOMEM
;
1317 /* RT - for drbd_get_max_capacity() DRBD_MD_INDEX_FLEX_INT */
1318 drbd_md_set_sector_offsets(mdev
, nbc
);
1320 if (drbd_get_max_capacity(nbc
) < nbc
->dc
.disk_size
) {
1321 dev_err(DEV
, "max capacity %llu smaller than disk size %llu\n",
1322 (unsigned long long) drbd_get_max_capacity(nbc
),
1323 (unsigned long long) nbc
->dc
.disk_size
);
1324 retcode
= ERR_DISK_TO_SMALL
;
1328 if ((int)nbc
->dc
.meta_dev_idx
< 0) {
1329 max_possible_sectors
= DRBD_MAX_SECTORS_FLEX
;
1330 /* at least one MB, otherwise it does not make sense */
1331 min_md_device_sectors
= (2<<10);
1333 max_possible_sectors
= DRBD_MAX_SECTORS
;
1334 min_md_device_sectors
= MD_RESERVED_SECT
* (nbc
->dc
.meta_dev_idx
+ 1);
1337 if (drbd_get_capacity(nbc
->md_bdev
) < min_md_device_sectors
) {
1338 retcode
= ERR_MD_DISK_TO_SMALL
;
1339 dev_warn(DEV
, "refusing attach: md-device too small, "
1340 "at least %llu sectors needed for this meta-disk type\n",
1341 (unsigned long long) min_md_device_sectors
);
1345 /* Make sure the new disk is big enough
1346 * (we may currently be R_PRIMARY with no local disk...) */
1347 if (drbd_get_max_capacity(nbc
) <
1348 drbd_get_capacity(mdev
->this_bdev
)) {
1349 retcode
= ERR_DISK_TO_SMALL
;
1353 nbc
->known_size
= drbd_get_capacity(nbc
->backing_bdev
);
1355 if (nbc
->known_size
> max_possible_sectors
) {
1356 dev_warn(DEV
, "==> truncating very big lower level device "
1357 "to currently maximum possible %llu sectors <==\n",
1358 (unsigned long long) max_possible_sectors
);
1359 if ((int)nbc
->dc
.meta_dev_idx
>= 0)
1360 dev_warn(DEV
, "==>> using internal or flexible "
1361 "meta data may help <<==\n");
1364 drbd_suspend_io(mdev
);
1365 /* also wait for the last barrier ack. */
1366 wait_event(mdev
->misc_wait
, !atomic_read(&mdev
->ap_pending_cnt
) || drbd_suspended(mdev
));
1367 /* and for any other previously queued work */
1368 drbd_flush_workqueue(mdev
);
1370 rv
= _drbd_request_state(mdev
, NS(disk
, D_ATTACHING
), CS_VERBOSE
);
1371 retcode
= rv
; /* FIXME: Type mismatch. */
1372 drbd_resume_io(mdev
);
1373 if (rv
< SS_SUCCESS
)
1376 if (!get_ldev_if_state(mdev
, D_ATTACHING
))
1377 goto force_diskless
;
1379 drbd_md_set_sector_offsets(mdev
, nbc
);
1381 if (!mdev
->bitmap
) {
1382 if (drbd_bm_init(mdev
)) {
1383 retcode
= ERR_NOMEM
;
1384 goto force_diskless_dec
;
1388 retcode
= drbd_md_read(mdev
, nbc
);
1389 if (retcode
!= NO_ERROR
)
1390 goto force_diskless_dec
;
1392 if (mdev
->state
.conn
< C_CONNECTED
&&
1393 mdev
->state
.role
== R_PRIMARY
&&
1394 (mdev
->ed_uuid
& ~((u64
)1)) != (nbc
->md
.uuid
[UI_CURRENT
] & ~((u64
)1))) {
1395 dev_err(DEV
, "Can only attach to data with current UUID=%016llX\n",
1396 (unsigned long long)mdev
->ed_uuid
);
1397 retcode
= ERR_DATA_NOT_CURRENT
;
1398 goto force_diskless_dec
;
1401 /* Since we are diskless, fix the activity log first... */
1402 if (drbd_check_al_size(mdev
, &nbc
->dc
)) {
1403 retcode
= ERR_NOMEM
;
1404 goto force_diskless_dec
;
1407 /* Prevent shrinking of consistent devices ! */
1408 if (drbd_md_test_flag(nbc
, MDF_CONSISTENT
) &&
1409 drbd_new_dev_size(mdev
, nbc
, 0) < nbc
->md
.la_size_sect
) {
1410 dev_warn(DEV
, "refusing to truncate a consistent device\n");
1411 retcode
= ERR_DISK_TO_SMALL
;
1412 goto force_diskless_dec
;
1415 if (!drbd_al_read_log(mdev
, nbc
)) {
1416 retcode
= ERR_IO_MD_DISK
;
1417 goto force_diskless_dec
;
1420 /* Reset the "barriers don't work" bits here, then force meta data to
1421 * be written, to ensure we determine if barriers are supported. */
1422 if (nbc
->dc
.no_md_flush
)
1423 set_bit(MD_NO_FUA
, &mdev
->flags
);
1425 clear_bit(MD_NO_FUA
, &mdev
->flags
);
1427 /* Point of no return reached.
1428 * Devices and memory are no longer released by error cleanup below.
1429 * now mdev takes over responsibility, and the state engine should
1430 * clean it up somewhere. */
1431 D_ASSERT(mdev
->ldev
== NULL
);
1433 mdev
->resync
= resync_lru
;
1437 mdev
->write_ordering
= WO_bdev_flush
;
1438 drbd_bump_write_ordering(mdev
, WO_bdev_flush
);
1440 if (drbd_md_test_flag(mdev
->ldev
, MDF_CRASHED_PRIMARY
))
1441 set_bit(CRASHED_PRIMARY
, &mdev
->flags
);
1443 clear_bit(CRASHED_PRIMARY
, &mdev
->flags
);
1445 if (drbd_md_test_flag(mdev
->ldev
, MDF_PRIMARY_IND
) &&
1446 !(mdev
->state
.role
== R_PRIMARY
&& mdev
->tconn
->susp_nod
)) {
1447 set_bit(CRASHED_PRIMARY
, &mdev
->flags
);
1456 drbd_reconsider_max_bio_size(mdev
);
1458 /* If I am currently not R_PRIMARY,
1459 * but meta data primary indicator is set,
1460 * I just now recover from a hard crash,
1461 * and have been R_PRIMARY before that crash.
1463 * Now, if I had no connection before that crash
1464 * (have been degraded R_PRIMARY), chances are that
1465 * I won't find my peer now either.
1467 * In that case, and _only_ in that case,
1468 * we use the degr-wfc-timeout instead of the default,
1469 * so we can automatically recover from a crash of a
1470 * degraded but active "cluster" after a certain timeout.
1472 clear_bit(USE_DEGR_WFC_T
, &mdev
->flags
);
1473 if (mdev
->state
.role
!= R_PRIMARY
&&
1474 drbd_md_test_flag(mdev
->ldev
, MDF_PRIMARY_IND
) &&
1475 !drbd_md_test_flag(mdev
->ldev
, MDF_CONNECTED_IND
))
1476 set_bit(USE_DEGR_WFC_T
, &mdev
->flags
);
1478 dd
= drbd_determine_dev_size(mdev
, 0);
1479 if (dd
== dev_size_error
) {
1480 retcode
= ERR_NOMEM_BITMAP
;
1481 goto force_diskless_dec
;
1482 } else if (dd
== grew
)
1483 set_bit(RESYNC_AFTER_NEG
, &mdev
->flags
);
1485 if (drbd_md_test_flag(mdev
->ldev
, MDF_FULL_SYNC
)) {
1486 dev_info(DEV
, "Assuming that all blocks are out of sync "
1487 "(aka FullSync)\n");
1488 if (drbd_bitmap_io(mdev
, &drbd_bmio_set_n_write
,
1489 "set_n_write from attaching", BM_LOCKED_MASK
)) {
1490 retcode
= ERR_IO_MD_DISK
;
1491 goto force_diskless_dec
;
1494 if (drbd_bitmap_io(mdev
, &drbd_bm_read
,
1495 "read from attaching", BM_LOCKED_MASK
)) {
1496 retcode
= ERR_IO_MD_DISK
;
1497 goto force_diskless_dec
;
1501 if (cp_discovered
) {
1502 drbd_al_apply_to_bm(mdev
);
1503 if (drbd_bitmap_io(mdev
, &drbd_bm_write
,
1504 "crashed primary apply AL", BM_LOCKED_MASK
)) {
1505 retcode
= ERR_IO_MD_DISK
;
1506 goto force_diskless_dec
;
1510 if (_drbd_bm_total_weight(mdev
) == drbd_bm_bits(mdev
))
1511 drbd_suspend_al(mdev
); /* IO is still suspended here... */
1513 spin_lock_irq(&mdev
->tconn
->req_lock
);
1514 os
= drbd_read_state(mdev
);
1516 /* If MDF_CONSISTENT is not set go into inconsistent state,
1517 otherwise investigate MDF_WasUpToDate...
1518 If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
1519 otherwise into D_CONSISTENT state.
1521 if (drbd_md_test_flag(mdev
->ldev
, MDF_CONSISTENT
)) {
1522 if (drbd_md_test_flag(mdev
->ldev
, MDF_WAS_UP_TO_DATE
))
1523 ns
.disk
= D_CONSISTENT
;
1525 ns
.disk
= D_OUTDATED
;
1527 ns
.disk
= D_INCONSISTENT
;
1530 if (drbd_md_test_flag(mdev
->ldev
, MDF_PEER_OUT_DATED
))
1531 ns
.pdsk
= D_OUTDATED
;
1533 if ( ns
.disk
== D_CONSISTENT
&&
1534 (ns
.pdsk
== D_OUTDATED
|| mdev
->ldev
->dc
.fencing
== FP_DONT_CARE
))
1535 ns
.disk
= D_UP_TO_DATE
;
1537 /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
1538 MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before
1539 this point, because drbd_request_state() modifies these
1542 /* In case we are C_CONNECTED postpone any decision on the new disk
1543 state after the negotiation phase. */
1544 if (mdev
->state
.conn
== C_CONNECTED
) {
1545 mdev
->new_state_tmp
.i
= ns
.i
;
1547 ns
.disk
= D_NEGOTIATING
;
1549 /* We expect to receive up-to-date UUIDs soon.
1550 To avoid a race in receive_state, free p_uuid while
1551 holding req_lock. I.e. atomic with the state change */
1552 kfree(mdev
->p_uuid
);
1553 mdev
->p_uuid
= NULL
;
1556 rv
= _drbd_set_state(mdev
, ns
, CS_VERBOSE
, NULL
);
1557 spin_unlock_irq(&mdev
->tconn
->req_lock
);
1559 if (rv
< SS_SUCCESS
)
1560 goto force_diskless_dec
;
1562 if (mdev
->state
.role
== R_PRIMARY
)
1563 mdev
->ldev
->md
.uuid
[UI_CURRENT
] |= (u64
)1;
1565 mdev
->ldev
->md
.uuid
[UI_CURRENT
] &= ~(u64
)1;
1567 drbd_md_mark_dirty(mdev
);
1570 kobject_uevent(&disk_to_dev(mdev
->vdisk
)->kobj
, KOBJ_CHANGE
);
1572 conn_reconfig_done(mdev
->tconn
);
1573 drbd_adm_finish(info
, retcode
);
1579 drbd_force_state(mdev
, NS(disk
, D_FAILED
));
1582 conn_reconfig_done(mdev
->tconn
);
1584 if (nbc
->backing_bdev
)
1585 blkdev_put(nbc
->backing_bdev
,
1586 FMODE_READ
| FMODE_WRITE
| FMODE_EXCL
);
1588 blkdev_put(nbc
->md_bdev
,
1589 FMODE_READ
| FMODE_WRITE
| FMODE_EXCL
);
1592 lc_destroy(resync_lru
);
1595 drbd_adm_finish(info
, retcode
);
1599 static int adm_detach(struct drbd_conf
*mdev
)
1601 enum drbd_state_rv retcode
;
1603 drbd_suspend_io(mdev
); /* so no-one is stuck in drbd_al_begin_io */
1604 retcode
= drbd_request_state(mdev
, NS(disk
, D_FAILED
));
1605 /* D_FAILED will transition to DISKLESS. */
1606 ret
= wait_event_interruptible(mdev
->misc_wait
,
1607 mdev
->state
.disk
!= D_FAILED
);
1608 drbd_resume_io(mdev
);
1609 if ((int)retcode
== (int)SS_IS_DISKLESS
)
1610 retcode
= SS_NOTHING_TO_DO
;
1616 /* Detaching the disk is a process in multiple stages. First we need to lock
1617 * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
1618 * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
1619 * internal references as well.
1620 * Only then we have finally detached. */
1621 int drbd_adm_detach(struct sk_buff
*skb
, struct genl_info
*info
)
1623 enum drbd_ret_code retcode
;
1625 retcode
= drbd_adm_prepare(skb
, info
, DRBD_ADM_NEED_MINOR
);
1626 if (!adm_ctx
.reply_skb
)
1628 if (retcode
!= NO_ERROR
)
1631 retcode
= adm_detach(adm_ctx
.mdev
);
1633 drbd_adm_finish(info
, retcode
);
1637 static bool conn_resync_running(struct drbd_tconn
*tconn
)
1639 struct drbd_conf
*mdev
;
1644 idr_for_each_entry(&tconn
->volumes
, mdev
, vnr
) {
1645 if (mdev
->state
.conn
== C_SYNC_SOURCE
||
1646 mdev
->state
.conn
== C_SYNC_TARGET
||
1647 mdev
->state
.conn
== C_PAUSED_SYNC_S
||
1648 mdev
->state
.conn
== C_PAUSED_SYNC_T
) {
1658 static bool conn_ov_running(struct drbd_tconn
*tconn
)
1660 struct drbd_conf
*mdev
;
1665 idr_for_each_entry(&tconn
->volumes
, mdev
, vnr
) {
1666 if (mdev
->state
.conn
== C_VERIFY_S
||
1667 mdev
->state
.conn
== C_VERIFY_T
) {
1677 static enum drbd_ret_code
1678 _check_net_options(struct drbd_tconn
*tconn
, struct net_conf
*old_conf
, struct net_conf
*new_conf
)
1680 struct drbd_conf
*mdev
;
1683 if (old_conf
&& tconn
->agreed_pro_version
< 100 &&
1684 tconn
->cstate
== C_WF_REPORT_PARAMS
&&
1685 new_conf
->wire_protocol
!= old_conf
->wire_protocol
)
1686 return ERR_NEED_APV_100
;
1688 if (new_conf
->two_primaries
&&
1689 (new_conf
->wire_protocol
!= DRBD_PROT_C
))
1690 return ERR_NOT_PROTO_C
;
1692 idr_for_each_entry(&tconn
->volumes
, mdev
, i
) {
1693 if (get_ldev(mdev
)) {
1694 enum drbd_fencing_p fp
= mdev
->ldev
->dc
.fencing
;
1696 if (new_conf
->wire_protocol
== DRBD_PROT_A
&& fp
== FP_STONITH
)
1697 return ERR_STONITH_AND_PROT_A
;
1699 if (mdev
->state
.role
== R_PRIMARY
&& new_conf
->want_lose
)
1703 if (new_conf
->on_congestion
!= OC_BLOCK
&& new_conf
->wire_protocol
!= DRBD_PROT_A
)
1704 return ERR_CONG_NOT_PROTO_A
;
1709 static enum drbd_ret_code
1710 check_net_options(struct drbd_tconn
*tconn
, struct net_conf
*new_conf
)
1712 static enum drbd_ret_code rv
;
1713 struct drbd_conf
*mdev
;
1717 rv
= _check_net_options(tconn
, rcu_dereference(tconn
->net_conf
), new_conf
);
1720 /* tconn->volumes protected by genl_lock() here */
1721 idr_for_each_entry(&tconn
->volumes
, mdev
, i
) {
1722 if (!mdev
->bitmap
) {
1723 if(drbd_bm_init(mdev
))
1732 struct crypto_hash
*verify_tfm
;
1733 struct crypto_hash
*csums_tfm
;
1734 struct crypto_hash
*cram_hmac_tfm
;
1735 struct crypto_hash
*integrity_tfm
;
1741 alloc_hash(struct crypto_hash
**tfm
, char *tfm_name
, int err_alg
)
1746 *tfm
= crypto_alloc_hash(tfm_name
, 0, CRYPTO_ALG_ASYNC
);
1755 static enum drbd_ret_code
1756 alloc_crypto(struct crypto
*crypto
, struct net_conf
*new_conf
)
1758 char hmac_name
[CRYPTO_MAX_ALG_NAME
];
1759 enum drbd_ret_code rv
;
1762 rv
= alloc_hash(&crypto
->csums_tfm
, new_conf
->csums_alg
,
1766 rv
= alloc_hash(&crypto
->verify_tfm
, new_conf
->verify_alg
,
1770 rv
= alloc_hash(&crypto
->integrity_tfm
, new_conf
->integrity_alg
,
1774 if (new_conf
->cram_hmac_alg
[0] != 0) {
1775 snprintf(hmac_name
, CRYPTO_MAX_ALG_NAME
, "hmac(%s)",
1776 new_conf
->cram_hmac_alg
);
1778 rv
= alloc_hash(&crypto
->cram_hmac_tfm
, hmac_name
,
1781 if (crypto
->integrity_tfm
) {
1782 hash_size
= crypto_hash_digestsize(crypto
->integrity_tfm
);
1783 crypto
->int_dig_in
= kmalloc(hash_size
, GFP_KERNEL
);
1784 if (!crypto
->int_dig_in
)
1786 crypto
->int_dig_vv
= kmalloc(hash_size
, GFP_KERNEL
);
1787 if (!crypto
->int_dig_vv
)
1794 static void free_crypto(struct crypto
*crypto
)
1796 kfree(crypto
->int_dig_in
);
1797 kfree(crypto
->int_dig_vv
);
1798 crypto_free_hash(crypto
->cram_hmac_tfm
);
1799 crypto_free_hash(crypto
->integrity_tfm
);
1800 crypto_free_hash(crypto
->csums_tfm
);
1801 crypto_free_hash(crypto
->verify_tfm
);
1804 int drbd_adm_net_opts(struct sk_buff
*skb
, struct genl_info
*info
)
1806 enum drbd_ret_code retcode
;
1807 struct drbd_tconn
*tconn
;
1808 struct net_conf
*old_conf
, *new_conf
= NULL
;
1810 int ovr
; /* online verify running */
1811 int rsr
; /* re-sync running */
1812 struct crypto crypto
= { };
1813 bool change_integrity_alg
;
1815 retcode
= drbd_adm_prepare(skb
, info
, DRBD_ADM_NEED_CONN
);
1816 if (!adm_ctx
.reply_skb
)
1818 if (retcode
!= NO_ERROR
)
1821 tconn
= adm_ctx
.tconn
;
1823 new_conf
= kzalloc(sizeof(struct net_conf
), GFP_KERNEL
);
1825 retcode
= ERR_NOMEM
;
1829 conn_reconfig_start(tconn
);
1831 mutex_lock(&tconn
->data
.mutex
);
1832 mutex_lock(&tconn
->net_conf_update
);
1833 old_conf
= tconn
->net_conf
;
1836 drbd_msg_put_info("net conf missing, try connect");
1837 retcode
= ERR_INVALID_REQUEST
;
1841 *new_conf
= *old_conf
;
1842 if (should_set_defaults(info
))
1843 set_net_conf_defaults(new_conf
);
1845 err
= net_conf_from_attrs_for_change(new_conf
, info
);
1847 retcode
= ERR_MANDATORY_TAG
;
1848 drbd_msg_put_info(from_attrs_err_to_txt(err
));
1852 retcode
= check_net_options(tconn
, new_conf
);
1853 if (retcode
!= NO_ERROR
)
1856 /* re-sync running */
1857 rsr
= conn_resync_running(tconn
);
1858 if (rsr
&& strcmp(new_conf
->csums_alg
, old_conf
->csums_alg
)) {
1859 retcode
= ERR_CSUMS_RESYNC_RUNNING
;
1863 /* online verify running */
1864 ovr
= conn_ov_running(tconn
);
1865 if (ovr
&& strcmp(new_conf
->verify_alg
, old_conf
->verify_alg
)) {
1866 retcode
= ERR_VERIFY_RUNNING
;
1870 change_integrity_alg
= strcmp(old_conf
->integrity_alg
,
1871 new_conf
->integrity_alg
);
1873 retcode
= alloc_crypto(&crypto
, new_conf
);
1874 if (retcode
!= NO_ERROR
)
1877 rcu_assign_pointer(tconn
->net_conf
, new_conf
);
1880 crypto_free_hash(tconn
->csums_tfm
);
1881 tconn
->csums_tfm
= crypto
.csums_tfm
;
1882 crypto
.csums_tfm
= NULL
;
1885 crypto_free_hash(tconn
->verify_tfm
);
1886 tconn
->verify_tfm
= crypto
.verify_tfm
;
1887 crypto
.verify_tfm
= NULL
;
1890 kfree(tconn
->int_dig_in
);
1891 tconn
->int_dig_in
= crypto
.int_dig_in
;
1892 kfree(tconn
->int_dig_vv
);
1893 tconn
->int_dig_vv
= crypto
.int_dig_vv
;
1894 crypto_free_hash(tconn
->integrity_tfm
);
1895 tconn
->integrity_tfm
= crypto
.integrity_tfm
;
1896 if (change_integrity_alg
) {
1897 /* Do this without trying to take tconn->data.mutex again. */
1898 if (__drbd_send_protocol(tconn
))
1902 /* FIXME Changing cram_hmac while the connection is established is useless */
1903 crypto_free_hash(tconn
->cram_hmac_tfm
);
1904 tconn
->cram_hmac_tfm
= crypto
.cram_hmac_tfm
;
1906 mutex_unlock(&tconn
->net_conf_update
);
1907 mutex_unlock(&tconn
->data
.mutex
);
1911 if (tconn
->cstate
>= C_WF_REPORT_PARAMS
)
1912 drbd_send_sync_param(minor_to_mdev(conn_lowest_minor(tconn
)));
1917 mutex_unlock(&tconn
->net_conf_update
);
1918 mutex_unlock(&tconn
->data
.mutex
);
1919 free_crypto(&crypto
);
1922 conn_reconfig_done(tconn
);
1924 drbd_adm_finish(info
, retcode
);
1928 int drbd_adm_connect(struct sk_buff
*skb
, struct genl_info
*info
)
1930 struct drbd_conf
*mdev
;
1931 struct net_conf
*old_conf
, *new_conf
= NULL
;
1932 struct crypto crypto
= { };
1933 struct drbd_tconn
*oconn
;
1934 struct drbd_tconn
*tconn
;
1935 struct sockaddr
*new_my_addr
, *new_peer_addr
, *taken_addr
;
1936 enum drbd_ret_code retcode
;
1940 retcode
= drbd_adm_prepare(skb
, info
, DRBD_ADM_NEED_CONN
);
1941 if (!adm_ctx
.reply_skb
)
1943 if (retcode
!= NO_ERROR
)
1946 tconn
= adm_ctx
.tconn
;
1947 conn_reconfig_start(tconn
);
1949 if (tconn
->cstate
> C_STANDALONE
) {
1950 retcode
= ERR_NET_CONFIGURED
;
1954 /* allocation not in the IO path, cqueue thread context */
1955 new_conf
= kzalloc(sizeof(*new_conf
), GFP_KERNEL
);
1957 retcode
= ERR_NOMEM
;
1961 set_net_conf_defaults(new_conf
);
1963 err
= net_conf_from_attrs(new_conf
, info
);
1965 retcode
= ERR_MANDATORY_TAG
;
1966 drbd_msg_put_info(from_attrs_err_to_txt(err
));
1970 retcode
= check_net_options(tconn
, new_conf
);
1971 if (retcode
!= NO_ERROR
)
1976 new_my_addr
= (struct sockaddr
*)&new_conf
->my_addr
;
1977 new_peer_addr
= (struct sockaddr
*)&new_conf
->peer_addr
;
1979 /* No need to take drbd_cfg_rwsem here. All reconfiguration is
1980 * strictly serialized on genl_lock(). We are protected against
1981 * concurrent reconfiguration/addition/deletion */
1982 list_for_each_entry(oconn
, &drbd_tconns
, all_tconn
) {
1983 struct net_conf
*nc
;
1988 nc
= rcu_dereference(oconn
->net_conf
);
1990 taken_addr
= (struct sockaddr
*)&nc
->my_addr
;
1991 if (new_conf
->my_addr_len
== nc
->my_addr_len
&&
1992 !memcmp(new_my_addr
, taken_addr
, new_conf
->my_addr_len
))
1993 retcode
= ERR_LOCAL_ADDR
;
1995 taken_addr
= (struct sockaddr
*)&nc
->peer_addr
;
1996 if (new_conf
->peer_addr_len
== nc
->peer_addr_len
&&
1997 !memcmp(new_peer_addr
, taken_addr
, new_conf
->peer_addr_len
))
1998 retcode
= ERR_PEER_ADDR
;
2001 if (retcode
!= NO_ERROR
)
2005 retcode
= alloc_crypto(&crypto
, new_conf
);
2006 if (retcode
!= NO_ERROR
)
2009 ((char *)new_conf
->shared_secret
)[SHARED_SECRET_MAX
-1] = 0;
2011 conn_flush_workqueue(tconn
);
2013 mutex_lock(&tconn
->net_conf_update
);
2014 old_conf
= tconn
->net_conf
;
2016 retcode
= ERR_NET_CONFIGURED
;
2017 mutex_unlock(&tconn
->net_conf_update
);
2020 rcu_assign_pointer(tconn
->net_conf
, new_conf
);
2022 conn_free_crypto(tconn
);
2023 tconn
->int_dig_in
= crypto
.int_dig_in
;
2024 tconn
->int_dig_vv
= crypto
.int_dig_vv
;
2025 tconn
->cram_hmac_tfm
= crypto
.cram_hmac_tfm
;
2026 tconn
->integrity_tfm
= crypto
.integrity_tfm
;
2027 tconn
->csums_tfm
= crypto
.csums_tfm
;
2028 tconn
->verify_tfm
= crypto
.verify_tfm
;
2030 mutex_unlock(&tconn
->net_conf_update
);
2033 idr_for_each_entry(&tconn
->volumes
, mdev
, i
) {
2039 retcode
= conn_request_state(tconn
, NS(conn
, C_UNCONNECTED
), CS_VERBOSE
);
2041 conn_reconfig_done(tconn
);
2042 drbd_adm_finish(info
, retcode
);
2046 free_crypto(&crypto
);
2049 conn_reconfig_done(tconn
);
2051 drbd_adm_finish(info
, retcode
);
2055 static enum drbd_state_rv
conn_try_disconnect(struct drbd_tconn
*tconn
, bool force
)
2057 enum drbd_state_rv rv
;
2059 rv
= conn_request_state(tconn
, NS(conn
, C_DISCONNECTING
),
2060 force
? CS_HARD
: 0);
2063 case SS_NOTHING_TO_DO
:
2065 case SS_ALREADY_STANDALONE
:
2067 case SS_PRIMARY_NOP
:
2068 /* Our state checking code wants to see the peer outdated. */
2069 rv
= conn_request_state(tconn
, NS2(conn
, C_DISCONNECTING
,
2070 pdsk
, D_OUTDATED
), CS_VERBOSE
);
2072 case SS_CW_FAILED_BY_PEER
:
2073 /* The peer probably wants to see us outdated. */
2074 rv
= conn_request_state(tconn
, NS2(conn
, C_DISCONNECTING
,
2075 disk
, D_OUTDATED
), 0);
2076 if (rv
== SS_IS_DISKLESS
|| rv
== SS_LOWER_THAN_OUTDATED
) {
2077 rv
= conn_request_state(tconn
, NS(conn
, C_DISCONNECTING
),
2082 /* no special handling necessary */
2085 if (rv
>= SS_SUCCESS
) {
2086 enum drbd_state_rv rv2
;
2087 /* No one else can reconfigure the network while I am here.
2088 * The state handling only uses drbd_thread_stop_nowait(),
2089 * we want to really wait here until the receiver is no more.
2091 drbd_thread_stop(&adm_ctx
.tconn
->receiver
);
2093 /* Race breaker. This additional state change request may be
2094 * necessary, if this was a forced disconnect during a receiver
2095 * restart. We may have "killed" the receiver thread just
2096 * after drbdd_init() returned. Typically, we should be
2097 * C_STANDALONE already, now, and this becomes a no-op.
2099 rv2
= conn_request_state(tconn
, NS(conn
, C_STANDALONE
),
2100 CS_VERBOSE
| CS_HARD
);
2101 if (rv2
< SS_SUCCESS
)
2103 "unexpected rv2=%d in conn_try_disconnect()\n",
2109 int drbd_adm_disconnect(struct sk_buff
*skb
, struct genl_info
*info
)
2111 struct disconnect_parms parms
;
2112 struct drbd_tconn
*tconn
;
2113 enum drbd_state_rv rv
;
2114 enum drbd_ret_code retcode
;
2117 retcode
= drbd_adm_prepare(skb
, info
, DRBD_ADM_NEED_CONN
);
2118 if (!adm_ctx
.reply_skb
)
2120 if (retcode
!= NO_ERROR
)
2123 tconn
= adm_ctx
.tconn
;
2124 memset(&parms
, 0, sizeof(parms
));
2125 if (info
->attrs
[DRBD_NLA_DISCONNECT_PARMS
]) {
2126 err
= disconnect_parms_from_attrs(&parms
, info
);
2128 retcode
= ERR_MANDATORY_TAG
;
2129 drbd_msg_put_info(from_attrs_err_to_txt(err
));
2134 rv
= conn_try_disconnect(tconn
, parms
.force_disconnect
);
2135 if (rv
< SS_SUCCESS
)
2136 retcode
= rv
; /* FIXME: Type mismatch. */
2140 drbd_adm_finish(info
, retcode
);
2144 void resync_after_online_grow(struct drbd_conf
*mdev
)
2146 int iass
; /* I am sync source */
2148 dev_info(DEV
, "Resync of new storage after online grow\n");
2149 if (mdev
->state
.role
!= mdev
->state
.peer
)
2150 iass
= (mdev
->state
.role
== R_PRIMARY
);
2152 iass
= test_bit(DISCARD_CONCURRENT
, &mdev
->tconn
->flags
);
2155 drbd_start_resync(mdev
, C_SYNC_SOURCE
);
2157 _drbd_request_state(mdev
, NS(conn
, C_WF_SYNC_UUID
), CS_VERBOSE
+ CS_SERIALIZE
);
2160 int drbd_adm_resize(struct sk_buff
*skb
, struct genl_info
*info
)
2162 struct resize_parms rs
;
2163 struct drbd_conf
*mdev
;
2164 enum drbd_ret_code retcode
;
2165 enum determine_dev_size dd
;
2166 enum dds_flags ddsf
;
2169 retcode
= drbd_adm_prepare(skb
, info
, DRBD_ADM_NEED_MINOR
);
2170 if (!adm_ctx
.reply_skb
)
2172 if (retcode
!= NO_ERROR
)
2175 memset(&rs
, 0, sizeof(struct resize_parms
));
2176 if (info
->attrs
[DRBD_NLA_RESIZE_PARMS
]) {
2177 err
= resize_parms_from_attrs(&rs
, info
);
2179 retcode
= ERR_MANDATORY_TAG
;
2180 drbd_msg_put_info(from_attrs_err_to_txt(err
));
2185 mdev
= adm_ctx
.mdev
;
2186 if (mdev
->state
.conn
> C_CONNECTED
) {
2187 retcode
= ERR_RESIZE_RESYNC
;
2191 if (mdev
->state
.role
== R_SECONDARY
&&
2192 mdev
->state
.peer
== R_SECONDARY
) {
2193 retcode
= ERR_NO_PRIMARY
;
2197 if (!get_ldev(mdev
)) {
2198 retcode
= ERR_NO_DISK
;
2202 if (rs
.no_resync
&& mdev
->tconn
->agreed_pro_version
< 93) {
2203 retcode
= ERR_NEED_APV_93
;
2207 if (mdev
->ldev
->known_size
!= drbd_get_capacity(mdev
->ldev
->backing_bdev
))
2208 mdev
->ldev
->known_size
= drbd_get_capacity(mdev
->ldev
->backing_bdev
);
2210 mdev
->ldev
->dc
.disk_size
= (sector_t
)rs
.resize_size
;
2211 ddsf
= (rs
.resize_force
? DDSF_FORCED
: 0) | (rs
.no_resync
? DDSF_NO_RESYNC
: 0);
2212 dd
= drbd_determine_dev_size(mdev
, ddsf
);
2215 if (dd
== dev_size_error
) {
2216 retcode
= ERR_NOMEM_BITMAP
;
2220 if (mdev
->state
.conn
== C_CONNECTED
) {
2222 set_bit(RESIZE_PENDING
, &mdev
->flags
);
2224 drbd_send_uuids(mdev
);
2225 drbd_send_sizes(mdev
, 1, ddsf
);
2229 drbd_adm_finish(info
, retcode
);
2233 void drbd_set_res_opts_defaults(struct res_opts
*r
)
2235 return set_res_opts_defaults(r
);
2238 int drbd_adm_resource_opts(struct sk_buff
*skb
, struct genl_info
*info
)
2240 enum drbd_ret_code retcode
;
2241 cpumask_var_t new_cpu_mask
;
2242 struct drbd_tconn
*tconn
;
2243 int *rs_plan_s
= NULL
;
2244 struct res_opts res_opts
;
2247 retcode
= drbd_adm_prepare(skb
, info
, DRBD_ADM_NEED_CONN
);
2248 if (!adm_ctx
.reply_skb
)
2250 if (retcode
!= NO_ERROR
)
2252 tconn
= adm_ctx
.tconn
;
2254 if (!zalloc_cpumask_var(&new_cpu_mask
, GFP_KERNEL
)) {
2255 retcode
= ERR_NOMEM
;
2256 drbd_msg_put_info("unable to allocate cpumask");
2260 res_opts
= tconn
->res_opts
;
2261 if (should_set_defaults(info
))
2262 set_res_opts_defaults(&res_opts
);
2264 err
= res_opts_from_attrs(&res_opts
, info
);
2266 retcode
= ERR_MANDATORY_TAG
;
2267 drbd_msg_put_info(from_attrs_err_to_txt(err
));
2271 /* silently ignore cpu mask on UP kernel */
2272 if (nr_cpu_ids
> 1 && res_opts
.cpu_mask
[0] != 0) {
2273 err
= __bitmap_parse(res_opts
.cpu_mask
, 32, 0,
2274 cpumask_bits(new_cpu_mask
), nr_cpu_ids
);
2276 conn_warn(tconn
, "__bitmap_parse() failed with %d\n", err
);
2277 retcode
= ERR_CPU_MASK_PARSE
;
2283 tconn
->res_opts
= res_opts
;
2285 if (!cpumask_equal(tconn
->cpu_mask
, new_cpu_mask
)) {
2286 cpumask_copy(tconn
->cpu_mask
, new_cpu_mask
);
2287 drbd_calc_cpu_mask(tconn
);
2288 tconn
->receiver
.reset_cpu_mask
= 1;
2289 tconn
->asender
.reset_cpu_mask
= 1;
2290 tconn
->worker
.reset_cpu_mask
= 1;
2295 free_cpumask_var(new_cpu_mask
);
2297 drbd_adm_finish(info
, retcode
);
2301 int drbd_adm_invalidate(struct sk_buff
*skb
, struct genl_info
*info
)
2303 struct drbd_conf
*mdev
;
2304 int retcode
; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2306 retcode
= drbd_adm_prepare(skb
, info
, DRBD_ADM_NEED_MINOR
);
2307 if (!adm_ctx
.reply_skb
)
2309 if (retcode
!= NO_ERROR
)
2312 mdev
= adm_ctx
.mdev
;
2314 /* If there is still bitmap IO pending, probably because of a previous
2315 * resync just being finished, wait for it before requesting a new resync. */
2316 wait_event(mdev
->misc_wait
, !test_bit(BITMAP_IO
, &mdev
->flags
));
2318 retcode
= _drbd_request_state(mdev
, NS(conn
, C_STARTING_SYNC_T
), CS_ORDERED
);
2320 if (retcode
< SS_SUCCESS
&& retcode
!= SS_NEED_CONNECTION
)
2321 retcode
= drbd_request_state(mdev
, NS(conn
, C_STARTING_SYNC_T
));
2323 while (retcode
== SS_NEED_CONNECTION
) {
2324 spin_lock_irq(&mdev
->tconn
->req_lock
);
2325 if (mdev
->state
.conn
< C_CONNECTED
)
2326 retcode
= _drbd_set_state(_NS(mdev
, disk
, D_INCONSISTENT
), CS_VERBOSE
, NULL
);
2327 spin_unlock_irq(&mdev
->tconn
->req_lock
);
2329 if (retcode
!= SS_NEED_CONNECTION
)
2332 retcode
= drbd_request_state(mdev
, NS(conn
, C_STARTING_SYNC_T
));
2336 drbd_adm_finish(info
, retcode
);
2340 static int drbd_bmio_set_susp_al(struct drbd_conf
*mdev
)
2344 rv
= drbd_bmio_set_n_write(mdev
);
2345 drbd_suspend_al(mdev
);
2349 static int drbd_adm_simple_request_state(struct sk_buff
*skb
, struct genl_info
*info
,
2350 union drbd_state mask
, union drbd_state val
)
2352 enum drbd_ret_code retcode
;
2354 retcode
= drbd_adm_prepare(skb
, info
, DRBD_ADM_NEED_MINOR
);
2355 if (!adm_ctx
.reply_skb
)
2357 if (retcode
!= NO_ERROR
)
2360 retcode
= drbd_request_state(adm_ctx
.mdev
, mask
, val
);
2362 drbd_adm_finish(info
, retcode
);
2366 int drbd_adm_invalidate_peer(struct sk_buff
*skb
, struct genl_info
*info
)
2368 return drbd_adm_simple_request_state(skb
, info
, NS(conn
, C_STARTING_SYNC_S
));
2371 int drbd_adm_pause_sync(struct sk_buff
*skb
, struct genl_info
*info
)
2373 enum drbd_ret_code retcode
;
2375 retcode
= drbd_adm_prepare(skb
, info
, DRBD_ADM_NEED_MINOR
);
2376 if (!adm_ctx
.reply_skb
)
2378 if (retcode
!= NO_ERROR
)
2381 if (drbd_request_state(adm_ctx
.mdev
, NS(user_isp
, 1)) == SS_NOTHING_TO_DO
)
2382 retcode
= ERR_PAUSE_IS_SET
;
2384 drbd_adm_finish(info
, retcode
);
2388 int drbd_adm_resume_sync(struct sk_buff
*skb
, struct genl_info
*info
)
2390 union drbd_dev_state s
;
2391 enum drbd_ret_code retcode
;
2393 retcode
= drbd_adm_prepare(skb
, info
, DRBD_ADM_NEED_MINOR
);
2394 if (!adm_ctx
.reply_skb
)
2396 if (retcode
!= NO_ERROR
)
2399 if (drbd_request_state(adm_ctx
.mdev
, NS(user_isp
, 0)) == SS_NOTHING_TO_DO
) {
2400 s
= adm_ctx
.mdev
->state
;
2401 if (s
.conn
== C_PAUSED_SYNC_S
|| s
.conn
== C_PAUSED_SYNC_T
) {
2402 retcode
= s
.aftr_isp
? ERR_PIC_AFTER_DEP
:
2403 s
.peer_isp
? ERR_PIC_PEER_DEP
: ERR_PAUSE_IS_CLEAR
;
2405 retcode
= ERR_PAUSE_IS_CLEAR
;
2410 drbd_adm_finish(info
, retcode
);
2414 int drbd_adm_suspend_io(struct sk_buff
*skb
, struct genl_info
*info
)
2416 return drbd_adm_simple_request_state(skb
, info
, NS(susp
, 1));
2419 int drbd_adm_resume_io(struct sk_buff
*skb
, struct genl_info
*info
)
2421 struct drbd_conf
*mdev
;
2422 int retcode
; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2424 retcode
= drbd_adm_prepare(skb
, info
, DRBD_ADM_NEED_MINOR
);
2425 if (!adm_ctx
.reply_skb
)
2427 if (retcode
!= NO_ERROR
)
2430 mdev
= adm_ctx
.mdev
;
2431 if (test_bit(NEW_CUR_UUID
, &mdev
->flags
)) {
2432 drbd_uuid_new_current(mdev
);
2433 clear_bit(NEW_CUR_UUID
, &mdev
->flags
);
2435 drbd_suspend_io(mdev
);
2436 retcode
= drbd_request_state(mdev
, NS3(susp
, 0, susp_nod
, 0, susp_fen
, 0));
2437 if (retcode
== SS_SUCCESS
) {
2438 if (mdev
->state
.conn
< C_CONNECTED
)
2439 tl_clear(mdev
->tconn
);
2440 if (mdev
->state
.disk
== D_DISKLESS
|| mdev
->state
.disk
== D_FAILED
)
2441 tl_restart(mdev
->tconn
, FAIL_FROZEN_DISK_IO
);
2443 drbd_resume_io(mdev
);
2446 drbd_adm_finish(info
, retcode
);
2450 int drbd_adm_outdate(struct sk_buff
*skb
, struct genl_info
*info
)
2452 return drbd_adm_simple_request_state(skb
, info
, NS(disk
, D_OUTDATED
));
2455 int nla_put_drbd_cfg_context(struct sk_buff
*skb
, const char *conn_name
, unsigned vnr
)
2458 nla
= nla_nest_start(skb
, DRBD_NLA_CFG_CONTEXT
);
2460 goto nla_put_failure
;
2461 if (vnr
!= VOLUME_UNSPECIFIED
)
2462 NLA_PUT_U32(skb
, T_ctx_volume
, vnr
);
2463 NLA_PUT_STRING(skb
, T_ctx_conn_name
, conn_name
);
2464 nla_nest_end(skb
, nla
);
2469 nla_nest_cancel(skb
, nla
);
2473 int nla_put_status_info(struct sk_buff
*skb
, struct drbd_conf
*mdev
,
2474 const struct sib_info
*sib
)
2476 struct state_info
*si
= NULL
; /* for sizeof(si->member); */
2477 struct net_conf
*nc
;
2481 int exclude_sensitive
;
2483 /* If sib != NULL, this is drbd_bcast_event, which anyone can listen
2484 * to. So we better exclude_sensitive information.
2486 * If sib == NULL, this is drbd_adm_get_status, executed synchronously
2487 * in the context of the requesting user process. Exclude sensitive
2488 * information, unless current has superuser.
2490 * NOTE: for drbd_adm_get_status_all(), this is a netlink dump, and
2491 * relies on the current implementation of netlink_dump(), which
2492 * executes the dump callback successively from netlink_recvmsg(),
2493 * always in the context of the receiving process */
2494 exclude_sensitive
= sib
|| !capable(CAP_SYS_ADMIN
);
2496 got_ldev
= get_ldev(mdev
);
2498 /* We need to add connection name and volume number information still.
2499 * Minor number is in drbd_genlmsghdr. */
2500 if (nla_put_drbd_cfg_context(skb
, mdev
->tconn
->name
, mdev
->vnr
))
2501 goto nla_put_failure
;
2503 if (res_opts_to_skb(skb
, &mdev
->tconn
->res_opts
, exclude_sensitive
))
2504 goto nla_put_failure
;
2507 if (disk_conf_to_skb(skb
, &mdev
->ldev
->dc
, exclude_sensitive
))
2508 goto nla_put_failure
;
2511 nc
= rcu_dereference(mdev
->tconn
->net_conf
);
2513 err
= net_conf_to_skb(skb
, nc
, exclude_sensitive
);
2516 goto nla_put_failure
;
2518 nla
= nla_nest_start(skb
, DRBD_NLA_STATE_INFO
);
2520 goto nla_put_failure
;
2521 NLA_PUT_U32(skb
, T_sib_reason
, sib
? sib
->sib_reason
: SIB_GET_STATUS_REPLY
);
2522 NLA_PUT_U32(skb
, T_current_state
, mdev
->state
.i
);
2523 NLA_PUT_U64(skb
, T_ed_uuid
, mdev
->ed_uuid
);
2524 NLA_PUT_U64(skb
, T_capacity
, drbd_get_capacity(mdev
->this_bdev
));
2527 NLA_PUT_U32(skb
, T_disk_flags
, mdev
->ldev
->md
.flags
);
2528 NLA_PUT(skb
, T_uuids
, sizeof(si
->uuids
), mdev
->ldev
->md
.uuid
);
2529 NLA_PUT_U64(skb
, T_bits_total
, drbd_bm_bits(mdev
));
2530 NLA_PUT_U64(skb
, T_bits_oos
, drbd_bm_total_weight(mdev
));
2531 if (C_SYNC_SOURCE
<= mdev
->state
.conn
&&
2532 C_PAUSED_SYNC_T
>= mdev
->state
.conn
) {
2533 NLA_PUT_U64(skb
, T_bits_rs_total
, mdev
->rs_total
);
2534 NLA_PUT_U64(skb
, T_bits_rs_failed
, mdev
->rs_failed
);
2539 switch(sib
->sib_reason
) {
2540 case SIB_SYNC_PROGRESS
:
2541 case SIB_GET_STATUS_REPLY
:
2543 case SIB_STATE_CHANGE
:
2544 NLA_PUT_U32(skb
, T_prev_state
, sib
->os
.i
);
2545 NLA_PUT_U32(skb
, T_new_state
, sib
->ns
.i
);
2547 case SIB_HELPER_POST
:
2549 T_helper_exit_code
, sib
->helper_exit_code
);
2551 case SIB_HELPER_PRE
:
2552 NLA_PUT_STRING(skb
, T_helper
, sib
->helper_name
);
2556 nla_nest_end(skb
, nla
);
2566 int drbd_adm_get_status(struct sk_buff
*skb
, struct genl_info
*info
)
2568 enum drbd_ret_code retcode
;
2571 retcode
= drbd_adm_prepare(skb
, info
, DRBD_ADM_NEED_MINOR
);
2572 if (!adm_ctx
.reply_skb
)
2574 if (retcode
!= NO_ERROR
)
2577 err
= nla_put_status_info(adm_ctx
.reply_skb
, adm_ctx
.mdev
, NULL
);
2579 nlmsg_free(adm_ctx
.reply_skb
);
2583 drbd_adm_finish(info
, retcode
);
2587 int get_one_status(struct sk_buff
*skb
, struct netlink_callback
*cb
)
2589 struct drbd_conf
*mdev
;
2590 struct drbd_genlmsghdr
*dh
;
2591 struct drbd_tconn
*pos
= (struct drbd_tconn
*)cb
->args
[0];
2592 struct drbd_tconn
*tconn
= NULL
;
2593 struct drbd_tconn
*tmp
;
2594 unsigned volume
= cb
->args
[1];
2596 /* Open coded, deferred, iteration:
2597 * list_for_each_entry_safe(tconn, tmp, &drbd_tconns, all_tconn) {
2598 * idr_for_each_entry(&tconn->volumes, mdev, i) {
2602 * where tconn is cb->args[0];
2603 * and i is cb->args[1];
2605 * cb->args[2] indicates if we shall loop over all resources,
2606 * or just dump all volumes of a single resource.
2608 * This may miss entries inserted after this dump started,
2609 * or entries deleted before they are reached.
2611 * We need to make sure the mdev won't disappear while
2612 * we are looking at it, and revalidate our iterators
2613 * on each iteration.
2616 /* synchronize with conn_create()/conn_destroy() */
2617 down_read(&drbd_cfg_rwsem
);
2618 /* revalidate iterator position */
2619 list_for_each_entry(tmp
, &drbd_tconns
, all_tconn
) {
2621 /* first iteration */
2633 mdev
= idr_get_next(&tconn
->volumes
, &volume
);
2635 /* No more volumes to dump on this tconn.
2636 * Advance tconn iterator. */
2637 pos
= list_entry(tconn
->all_tconn
.next
,
2638 struct drbd_tconn
, all_tconn
);
2639 /* Did we dump any volume on this tconn yet? */
2641 /* If we reached the end of the list,
2642 * or only a single resource dump was requested,
2644 if (&pos
->all_tconn
== &drbd_tconns
|| cb
->args
[2])
2652 dh
= genlmsg_put(skb
, NETLINK_CB(cb
->skb
).pid
,
2653 cb
->nlh
->nlmsg_seq
, &drbd_genl_family
,
2654 NLM_F_MULTI
, DRBD_ADM_GET_STATUS
);
2659 /* this is a tconn without a single volume */
2661 dh
->ret_code
= NO_ERROR
;
2662 if (nla_put_drbd_cfg_context(skb
, tconn
->name
, VOLUME_UNSPECIFIED
))
2663 genlmsg_cancel(skb
, dh
);
2665 genlmsg_end(skb
, dh
);
2669 D_ASSERT(mdev
->vnr
== volume
);
2670 D_ASSERT(mdev
->tconn
== tconn
);
2672 dh
->minor
= mdev_to_minor(mdev
);
2673 dh
->ret_code
= NO_ERROR
;
2675 if (nla_put_status_info(skb
, mdev
, NULL
)) {
2676 genlmsg_cancel(skb
, dh
);
2679 genlmsg_end(skb
, dh
);
2683 up_read(&drbd_cfg_rwsem
);
2684 /* where to start the next iteration */
2685 cb
->args
[0] = (long)pos
;
2686 cb
->args
[1] = (pos
== tconn
) ? volume
+ 1 : 0;
2688 /* No more tconns/volumes/minors found results in an empty skb.
2689 * Which will terminate the dump. */
2694 * Request status of all resources, or of all volumes within a single resource.
2696 * This is a dump, as the answer may not fit in a single reply skb otherwise.
2697 * Which means we cannot use the family->attrbuf or other such members, because
2698 * dump is NOT protected by the genl_lock(). During dump, we only have access
2699 * to the incoming skb, and need to opencode "parsing" of the nlattr payload.
2701 * Once things are setup properly, we call into get_one_status().
2703 int drbd_adm_get_status_all(struct sk_buff
*skb
, struct netlink_callback
*cb
)
2705 const unsigned hdrlen
= GENL_HDRLEN
+ GENL_MAGIC_FAMILY_HDRSZ
;
2707 const char *conn_name
;
2708 struct drbd_tconn
*tconn
;
2710 /* Is this a followup call? */
2712 /* ... of a single resource dump,
2713 * and the resource iterator has been advanced already? */
2714 if (cb
->args
[2] && cb
->args
[2] != cb
->args
[0])
2715 return 0; /* DONE. */
2719 /* First call (from netlink_dump_start). We need to figure out
2720 * which resource(s) the user wants us to dump. */
2721 nla
= nla_find(nlmsg_attrdata(cb
->nlh
, hdrlen
),
2722 nlmsg_attrlen(cb
->nlh
, hdrlen
),
2723 DRBD_NLA_CFG_CONTEXT
);
2725 /* No explicit context given. Dump all. */
2728 nla
= nla_find_nested(nla
, __nla_type(T_ctx_conn_name
));
2729 /* context given, but no name present? */
2732 conn_name
= nla_data(nla
);
2733 tconn
= conn_get_by_name(conn_name
);
2738 kref_put(&tconn
->kref
, &conn_destroy
); /* get_one_status() (re)validates tconn by itself */
2740 /* prime iterators, and set "filter" mode mark:
2741 * only dump this tconn. */
2742 cb
->args
[0] = (long)tconn
;
2743 /* cb->args[1] = 0; passed in this way. */
2744 cb
->args
[2] = (long)tconn
;
2747 return get_one_status(skb
, cb
);
2750 int drbd_adm_get_timeout_type(struct sk_buff
*skb
, struct genl_info
*info
)
2752 enum drbd_ret_code retcode
;
2753 struct timeout_parms tp
;
2756 retcode
= drbd_adm_prepare(skb
, info
, DRBD_ADM_NEED_MINOR
);
2757 if (!adm_ctx
.reply_skb
)
2759 if (retcode
!= NO_ERROR
)
2763 adm_ctx
.mdev
->state
.pdsk
== D_OUTDATED
? UT_PEER_OUTDATED
:
2764 test_bit(USE_DEGR_WFC_T
, &adm_ctx
.mdev
->flags
) ? UT_DEGRADED
:
2767 err
= timeout_parms_to_priv_skb(adm_ctx
.reply_skb
, &tp
);
2769 nlmsg_free(adm_ctx
.reply_skb
);
2773 drbd_adm_finish(info
, retcode
);
2777 int drbd_adm_start_ov(struct sk_buff
*skb
, struct genl_info
*info
)
2779 struct drbd_conf
*mdev
;
2780 enum drbd_ret_code retcode
;
2782 retcode
= drbd_adm_prepare(skb
, info
, DRBD_ADM_NEED_MINOR
);
2783 if (!adm_ctx
.reply_skb
)
2785 if (retcode
!= NO_ERROR
)
2788 mdev
= adm_ctx
.mdev
;
2789 if (info
->attrs
[DRBD_NLA_START_OV_PARMS
]) {
2790 /* resume from last known position, if possible */
2791 struct start_ov_parms parms
=
2792 { .ov_start_sector
= mdev
->ov_start_sector
};
2793 int err
= start_ov_parms_from_attrs(&parms
, info
);
2795 retcode
= ERR_MANDATORY_TAG
;
2796 drbd_msg_put_info(from_attrs_err_to_txt(err
));
2799 /* w_make_ov_request expects position to be aligned */
2800 mdev
->ov_start_sector
= parms
.ov_start_sector
& ~BM_SECT_PER_BIT
;
2802 /* If there is still bitmap IO pending, e.g. previous resync or verify
2803 * just being finished, wait for it before requesting a new resync. */
2804 wait_event(mdev
->misc_wait
, !test_bit(BITMAP_IO
, &mdev
->flags
));
2805 retcode
= drbd_request_state(mdev
,NS(conn
,C_VERIFY_S
));
2807 drbd_adm_finish(info
, retcode
);
2812 int drbd_adm_new_c_uuid(struct sk_buff
*skb
, struct genl_info
*info
)
2814 struct drbd_conf
*mdev
;
2815 enum drbd_ret_code retcode
;
2816 int skip_initial_sync
= 0;
2818 struct new_c_uuid_parms args
;
2820 retcode
= drbd_adm_prepare(skb
, info
, DRBD_ADM_NEED_MINOR
);
2821 if (!adm_ctx
.reply_skb
)
2823 if (retcode
!= NO_ERROR
)
2826 mdev
= adm_ctx
.mdev
;
2827 memset(&args
, 0, sizeof(args
));
2828 if (info
->attrs
[DRBD_NLA_NEW_C_UUID_PARMS
]) {
2829 err
= new_c_uuid_parms_from_attrs(&args
, info
);
2831 retcode
= ERR_MANDATORY_TAG
;
2832 drbd_msg_put_info(from_attrs_err_to_txt(err
));
2837 mutex_lock(mdev
->state_mutex
); /* Protects us against serialized state changes. */
2839 if (!get_ldev(mdev
)) {
2840 retcode
= ERR_NO_DISK
;
2844 /* this is "skip initial sync", assume to be clean */
2845 if (mdev
->state
.conn
== C_CONNECTED
&& mdev
->tconn
->agreed_pro_version
>= 90 &&
2846 mdev
->ldev
->md
.uuid
[UI_CURRENT
] == UUID_JUST_CREATED
&& args
.clear_bm
) {
2847 dev_info(DEV
, "Preparing to skip initial sync\n");
2848 skip_initial_sync
= 1;
2849 } else if (mdev
->state
.conn
!= C_STANDALONE
) {
2850 retcode
= ERR_CONNECTED
;
2854 drbd_uuid_set(mdev
, UI_BITMAP
, 0); /* Rotate UI_BITMAP to History 1, etc... */
2855 drbd_uuid_new_current(mdev
); /* New current, previous to UI_BITMAP */
2857 if (args
.clear_bm
) {
2858 err
= drbd_bitmap_io(mdev
, &drbd_bmio_clear_n_write
,
2859 "clear_n_write from new_c_uuid", BM_LOCKED_MASK
);
2861 dev_err(DEV
, "Writing bitmap failed with %d\n",err
);
2862 retcode
= ERR_IO_MD_DISK
;
2864 if (skip_initial_sync
) {
2865 drbd_send_uuids_skip_initial_sync(mdev
);
2866 _drbd_uuid_set(mdev
, UI_BITMAP
, 0);
2867 drbd_print_uuids(mdev
, "cleared bitmap UUID");
2868 spin_lock_irq(&mdev
->tconn
->req_lock
);
2869 _drbd_set_state(_NS2(mdev
, disk
, D_UP_TO_DATE
, pdsk
, D_UP_TO_DATE
),
2871 spin_unlock_irq(&mdev
->tconn
->req_lock
);
2879 mutex_unlock(mdev
->state_mutex
);
2881 drbd_adm_finish(info
, retcode
);
2885 static enum drbd_ret_code
2886 drbd_check_conn_name(const char *name
)
2888 if (!name
|| !name
[0]) {
2889 drbd_msg_put_info("connection name missing");
2890 return ERR_MANDATORY_TAG
;
2892 /* if we want to use these in sysfs/configfs/debugfs some day,
2893 * we must not allow slashes */
2894 if (strchr(name
, '/')) {
2895 drbd_msg_put_info("invalid connection name");
2896 return ERR_INVALID_REQUEST
;
2901 int drbd_adm_create_connection(struct sk_buff
*skb
, struct genl_info
*info
)
2903 enum drbd_ret_code retcode
;
2905 retcode
= drbd_adm_prepare(skb
, info
, 0);
2906 if (!adm_ctx
.reply_skb
)
2908 if (retcode
!= NO_ERROR
)
2911 retcode
= drbd_check_conn_name(adm_ctx
.conn_name
);
2912 if (retcode
!= NO_ERROR
)
2915 if (adm_ctx
.tconn
) {
2916 if (info
->nlhdr
->nlmsg_flags
& NLM_F_EXCL
) {
2917 retcode
= ERR_INVALID_REQUEST
;
2918 drbd_msg_put_info("connection exists");
2920 /* else: still NO_ERROR */
2924 if (!conn_create(adm_ctx
.conn_name
))
2925 retcode
= ERR_NOMEM
;
2927 drbd_adm_finish(info
, retcode
);
2931 int drbd_adm_add_minor(struct sk_buff
*skb
, struct genl_info
*info
)
2933 struct drbd_genlmsghdr
*dh
= info
->userhdr
;
2934 enum drbd_ret_code retcode
;
2936 retcode
= drbd_adm_prepare(skb
, info
, DRBD_ADM_NEED_CONN
);
2937 if (!adm_ctx
.reply_skb
)
2939 if (retcode
!= NO_ERROR
)
2942 /* FIXME drop minor_count parameter, limit to MINORMASK */
2943 if (dh
->minor
>= minor_count
) {
2944 drbd_msg_put_info("requested minor out of range");
2945 retcode
= ERR_INVALID_REQUEST
;
2948 if (adm_ctx
.volume
> DRBD_VOLUME_MAX
) {
2949 drbd_msg_put_info("requested volume id out of range");
2950 retcode
= ERR_INVALID_REQUEST
;
2954 /* drbd_adm_prepare made sure already
2955 * that mdev->tconn and mdev->vnr match the request. */
2957 if (info
->nlhdr
->nlmsg_flags
& NLM_F_EXCL
)
2958 retcode
= ERR_MINOR_EXISTS
;
2959 /* else: still NO_ERROR */
2963 down_write(&drbd_cfg_rwsem
);
2964 retcode
= conn_new_minor(adm_ctx
.tconn
, dh
->minor
, adm_ctx
.volume
);
2965 up_write(&drbd_cfg_rwsem
);
2967 drbd_adm_finish(info
, retcode
);
2971 static enum drbd_ret_code
adm_delete_minor(struct drbd_conf
*mdev
)
2973 if (mdev
->state
.disk
== D_DISKLESS
&&
2974 /* no need to be mdev->state.conn == C_STANDALONE &&
2975 * we may want to delete a minor from a live replication group.
2977 mdev
->state
.role
== R_SECONDARY
) {
2978 drbd_delete_device(mdev
);
2981 return ERR_MINOR_CONFIGURED
;
2984 int drbd_adm_delete_minor(struct sk_buff
*skb
, struct genl_info
*info
)
2986 enum drbd_ret_code retcode
;
2988 retcode
= drbd_adm_prepare(skb
, info
, DRBD_ADM_NEED_MINOR
);
2989 if (!adm_ctx
.reply_skb
)
2991 if (retcode
!= NO_ERROR
)
2994 down_write(&drbd_cfg_rwsem
);
2995 retcode
= adm_delete_minor(adm_ctx
.mdev
);
2996 up_write(&drbd_cfg_rwsem
);
2998 drbd_adm_finish(info
, retcode
);
3002 int drbd_adm_down(struct sk_buff
*skb
, struct genl_info
*info
)
3004 int retcode
; /* enum drbd_ret_code rsp. enum drbd_state_rv */
3005 struct drbd_conf
*mdev
;
3008 retcode
= drbd_adm_prepare(skb
, info
, 0);
3009 if (!adm_ctx
.reply_skb
)
3011 if (retcode
!= NO_ERROR
)
3014 if (!adm_ctx
.tconn
) {
3015 retcode
= ERR_CONN_NOT_KNOWN
;
3019 down_read(&drbd_cfg_rwsem
);
3021 idr_for_each_entry(&adm_ctx
.tconn
->volumes
, mdev
, i
) {
3022 retcode
= drbd_set_role(mdev
, R_SECONDARY
, 0);
3023 if (retcode
< SS_SUCCESS
) {
3024 drbd_msg_put_info("failed to demote");
3028 up_read(&drbd_cfg_rwsem
);
3030 /* disconnect; may stop the receiver;
3031 * must not hold the drbd_cfg_rwsem */
3032 retcode
= conn_try_disconnect(adm_ctx
.tconn
, 0);
3033 if (retcode
< SS_SUCCESS
) {
3034 drbd_msg_put_info("failed to disconnect");
3038 down_read(&drbd_cfg_rwsem
);
3040 idr_for_each_entry(&adm_ctx
.tconn
->volumes
, mdev
, i
) {
3041 retcode
= adm_detach(mdev
);
3042 if (retcode
< SS_SUCCESS
) {
3043 drbd_msg_put_info("failed to detach");
3047 up_read(&drbd_cfg_rwsem
);
3049 /* If we reach this, all volumes (of this tconn) are Secondary,
3050 * Disconnected, Diskless, aka Unconfigured. Make sure all threads have
3051 * actually stopped, state handling only does drbd_thread_stop_nowait().
3052 * This needs to be done without holding drbd_cfg_rwsem. */
3053 drbd_thread_stop(&adm_ctx
.tconn
->worker
);
3055 /* Now, nothing can fail anymore */
3057 /* delete volumes */
3058 down_write(&drbd_cfg_rwsem
);
3059 idr_for_each_entry(&adm_ctx
.tconn
->volumes
, mdev
, i
) {
3060 retcode
= adm_delete_minor(mdev
);
3061 if (retcode
!= NO_ERROR
) {
3062 /* "can not happen" */
3063 drbd_msg_put_info("failed to delete volume");
3064 up_write(&drbd_cfg_rwsem
);
3069 /* delete connection */
3070 if (conn_lowest_minor(adm_ctx
.tconn
) < 0) {
3071 list_del(&adm_ctx
.tconn
->all_tconn
);
3072 kref_put(&adm_ctx
.tconn
->kref
, &conn_destroy
);
3076 /* "can not happen" */
3077 retcode
= ERR_CONN_IN_USE
;
3078 drbd_msg_put_info("failed to delete connection");
3080 up_write(&drbd_cfg_rwsem
);
3083 up_read(&drbd_cfg_rwsem
);
3085 drbd_adm_finish(info
, retcode
);
3089 int drbd_adm_delete_connection(struct sk_buff
*skb
, struct genl_info
*info
)
3091 enum drbd_ret_code retcode
;
3093 retcode
= drbd_adm_prepare(skb
, info
, DRBD_ADM_NEED_CONN
);
3094 if (!adm_ctx
.reply_skb
)
3096 if (retcode
!= NO_ERROR
)
3099 down_write(&drbd_cfg_rwsem
);
3100 if (conn_lowest_minor(adm_ctx
.tconn
) < 0) {
3101 list_del(&adm_ctx
.tconn
->all_tconn
);
3102 kref_put(&adm_ctx
.tconn
->kref
, &conn_destroy
);
3106 retcode
= ERR_CONN_IN_USE
;
3108 up_write(&drbd_cfg_rwsem
);
3110 if (retcode
== NO_ERROR
)
3111 drbd_thread_stop(&adm_ctx
.tconn
->worker
);
3113 drbd_adm_finish(info
, retcode
);
3117 void drbd_bcast_event(struct drbd_conf
*mdev
, const struct sib_info
*sib
)
3119 static atomic_t drbd_genl_seq
= ATOMIC_INIT(2); /* two. */
3120 struct sk_buff
*msg
;
3121 struct drbd_genlmsghdr
*d_out
;
3125 seq
= atomic_inc_return(&drbd_genl_seq
);
3126 msg
= genlmsg_new(NLMSG_GOODSIZE
, GFP_NOIO
);
3131 d_out
= genlmsg_put(msg
, 0, seq
, &drbd_genl_family
, 0, DRBD_EVENT
);
3132 if (!d_out
) /* cannot happen, but anyways. */
3133 goto nla_put_failure
;
3134 d_out
->minor
= mdev_to_minor(mdev
);
3135 d_out
->ret_code
= 0;
3137 if (nla_put_status_info(msg
, mdev
, sib
))
3138 goto nla_put_failure
;
3139 genlmsg_end(msg
, d_out
);
3140 err
= drbd_genl_multicast_events(msg
, 0);
3141 /* msg has been consumed or freed in netlink_broadcast() */
3142 if (err
&& err
!= -ESRCH
)
3150 dev_err(DEV
, "Error %d while broadcasting event. "
3151 "Event seq:%u sib_reason:%u\n",
3152 err
, seq
, sib
->sib_reason
);