4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/module.h>
27 #include <linux/drbd.h>
30 #include <linux/file.h>
31 #include <linux/slab.h>
32 #include <linux/blkpg.h>
33 #include <linux/cpumask.h>
36 #include "drbd_wrappers.h"
37 #include <asm/unaligned.h>
38 #include <linux/drbd_limits.h>
39 #include <linux/kthread.h>
41 #include <net/genetlink.h>
44 // int drbd_adm_create_resource(struct sk_buff *skb, struct genl_info *info);
45 // int drbd_adm_delete_resource(struct sk_buff *skb, struct genl_info *info);
47 int drbd_adm_add_minor(struct sk_buff
*skb
, struct genl_info
*info
);
48 int drbd_adm_delete_minor(struct sk_buff
*skb
, struct genl_info
*info
);
50 int drbd_adm_new_resource(struct sk_buff
*skb
, struct genl_info
*info
);
51 int drbd_adm_del_resource(struct sk_buff
*skb
, struct genl_info
*info
);
52 int drbd_adm_down(struct sk_buff
*skb
, struct genl_info
*info
);
54 int drbd_adm_set_role(struct sk_buff
*skb
, struct genl_info
*info
);
55 int drbd_adm_attach(struct sk_buff
*skb
, struct genl_info
*info
);
56 int drbd_adm_disk_opts(struct sk_buff
*skb
, struct genl_info
*info
);
57 int drbd_adm_detach(struct sk_buff
*skb
, struct genl_info
*info
);
58 int drbd_adm_connect(struct sk_buff
*skb
, struct genl_info
*info
);
59 int drbd_adm_net_opts(struct sk_buff
*skb
, struct genl_info
*info
);
60 int drbd_adm_resize(struct sk_buff
*skb
, struct genl_info
*info
);
61 int drbd_adm_start_ov(struct sk_buff
*skb
, struct genl_info
*info
);
62 int drbd_adm_new_c_uuid(struct sk_buff
*skb
, struct genl_info
*info
);
63 int drbd_adm_disconnect(struct sk_buff
*skb
, struct genl_info
*info
);
64 int drbd_adm_invalidate(struct sk_buff
*skb
, struct genl_info
*info
);
65 int drbd_adm_invalidate_peer(struct sk_buff
*skb
, struct genl_info
*info
);
66 int drbd_adm_pause_sync(struct sk_buff
*skb
, struct genl_info
*info
);
67 int drbd_adm_resume_sync(struct sk_buff
*skb
, struct genl_info
*info
);
68 int drbd_adm_suspend_io(struct sk_buff
*skb
, struct genl_info
*info
);
69 int drbd_adm_resume_io(struct sk_buff
*skb
, struct genl_info
*info
);
70 int drbd_adm_outdate(struct sk_buff
*skb
, struct genl_info
*info
);
71 int drbd_adm_resource_opts(struct sk_buff
*skb
, struct genl_info
*info
);
72 int drbd_adm_get_status(struct sk_buff
*skb
, struct genl_info
*info
);
73 int drbd_adm_get_timeout_type(struct sk_buff
*skb
, struct genl_info
*info
);
75 int drbd_adm_get_status_all(struct sk_buff
*skb
, struct netlink_callback
*cb
);
77 #include <linux/drbd_genl_api.h>
78 #include <linux/genl_magic_func.h>
80 /* used blkdev_get_by_path, to claim our meta data device(s) */
81 static char *drbd_m_holder
= "Hands off! this is DRBD's meta data device.";
83 /* Configuration is strictly serialized, because generic netlink message
84 * processing is strictly serialized by the genl_lock().
85 * Which means we can use one static global drbd_config_context struct.
87 static struct drbd_config_context
{
88 /* assigned from drbd_genlmsghdr */
90 /* assigned from request attributes, if present */
92 #define VOLUME_UNSPECIFIED (-1U)
93 /* pointer into the request skb,
94 * limited lifetime! */
98 struct sk_buff
*reply_skb
;
99 /* pointer into reply buffer */
100 struct drbd_genlmsghdr
*reply_dh
;
101 /* resolved from attributes, if possible */
102 struct drbd_conf
*mdev
;
103 struct drbd_tconn
*tconn
;
106 static void drbd_adm_send_reply(struct sk_buff
*skb
, struct genl_info
*info
)
108 genlmsg_end(skb
, genlmsg_data(nlmsg_data(nlmsg_hdr(skb
))));
109 if (genlmsg_reply(skb
, info
))
110 printk(KERN_ERR
"drbd: error sending genl reply\n");
113 /* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only
114 * reason it could fail was no space in skb, and there are 4k available. */
115 int drbd_msg_put_info(const char *info
)
117 struct sk_buff
*skb
= adm_ctx
.reply_skb
;
121 if (!info
|| !info
[0])
124 nla
= nla_nest_start(skb
, DRBD_NLA_CFG_REPLY
);
128 err
= nla_put_string(skb
, T_info_text
, info
);
130 nla_nest_cancel(skb
, nla
);
133 nla_nest_end(skb
, nla
);
137 /* This would be a good candidate for a "pre_doit" hook,
138 * and per-family private info->pointers.
139 * But we need to stay compatible with older kernels.
140 * If it returns successfully, adm_ctx members are valid.
142 #define DRBD_ADM_NEED_MINOR 1
143 #define DRBD_ADM_NEED_CONN 2
144 static int drbd_adm_prepare(struct sk_buff
*skb
, struct genl_info
*info
,
147 struct drbd_genlmsghdr
*d_in
= info
->userhdr
;
148 const u8 cmd
= info
->genlhdr
->cmd
;
151 memset(&adm_ctx
, 0, sizeof(adm_ctx
));
153 /* genl_rcv_msg only checks for CAP_NET_ADMIN on "GENL_ADMIN_PERM" :( */
154 if (cmd
!= DRBD_ADM_GET_STATUS
155 && security_netlink_recv(skb
, CAP_SYS_ADMIN
))
158 adm_ctx
.reply_skb
= genlmsg_new(NLMSG_GOODSIZE
, GFP_KERNEL
);
159 if (!adm_ctx
.reply_skb
) {
164 adm_ctx
.reply_dh
= genlmsg_put_reply(adm_ctx
.reply_skb
,
165 info
, &drbd_genl_family
, 0, cmd
);
166 /* put of a few bytes into a fresh skb of >= 4k will always succeed.
168 if (!adm_ctx
.reply_dh
) {
173 adm_ctx
.reply_dh
->minor
= d_in
->minor
;
174 adm_ctx
.reply_dh
->ret_code
= NO_ERROR
;
176 if (info
->attrs
[DRBD_NLA_CFG_CONTEXT
]) {
178 /* parse and validate only */
179 err
= drbd_cfg_context_from_attrs(NULL
, info
);
183 /* It was present, and valid,
184 * copy it over to the reply skb. */
185 err
= nla_put_nohdr(adm_ctx
.reply_skb
,
186 info
->attrs
[DRBD_NLA_CFG_CONTEXT
]->nla_len
,
187 info
->attrs
[DRBD_NLA_CFG_CONTEXT
]);
191 /* and assign stuff to the global adm_ctx */
192 nla
= nested_attr_tb
[__nla_type(T_ctx_volume
)];
193 adm_ctx
.volume
= nla
? nla_get_u32(nla
) : VOLUME_UNSPECIFIED
;
194 nla
= nested_attr_tb
[__nla_type(T_ctx_conn_name
)];
196 adm_ctx
.conn_name
= nla_data(nla
);
198 adm_ctx
.volume
= VOLUME_UNSPECIFIED
;
200 adm_ctx
.minor
= d_in
->minor
;
201 adm_ctx
.mdev
= minor_to_mdev(d_in
->minor
);
202 adm_ctx
.tconn
= conn_get_by_name(adm_ctx
.conn_name
);
204 if (!adm_ctx
.mdev
&& (flags
& DRBD_ADM_NEED_MINOR
)) {
205 drbd_msg_put_info("unknown minor");
206 return ERR_MINOR_INVALID
;
208 if (!adm_ctx
.tconn
&& (flags
& DRBD_ADM_NEED_CONN
)) {
209 drbd_msg_put_info("unknown connection");
210 return ERR_INVALID_REQUEST
;
213 /* some more paranoia, if the request was over-determined */
214 if (adm_ctx
.mdev
&& adm_ctx
.tconn
&&
215 adm_ctx
.mdev
->tconn
!= adm_ctx
.tconn
) {
216 pr_warning("request: minor=%u, conn=%s; but that minor belongs to connection %s\n",
217 adm_ctx
.minor
, adm_ctx
.conn_name
, adm_ctx
.mdev
->tconn
->name
);
218 drbd_msg_put_info("minor exists in different connection");
219 return ERR_INVALID_REQUEST
;
222 adm_ctx
.volume
!= VOLUME_UNSPECIFIED
&&
223 adm_ctx
.volume
!= adm_ctx
.mdev
->vnr
) {
224 pr_warning("request: minor=%u, volume=%u; but that minor is volume %u in %s\n",
225 adm_ctx
.minor
, adm_ctx
.volume
,
226 adm_ctx
.mdev
->vnr
, adm_ctx
.mdev
->tconn
->name
);
227 drbd_msg_put_info("minor exists as different volume");
228 return ERR_INVALID_REQUEST
;
234 nlmsg_free(adm_ctx
.reply_skb
);
235 adm_ctx
.reply_skb
= NULL
;
239 static int drbd_adm_finish(struct genl_info
*info
, int retcode
)
242 const char *conn_name
= NULL
;
245 kref_put(&adm_ctx
.tconn
->kref
, &conn_destroy
);
246 adm_ctx
.tconn
= NULL
;
249 if (!adm_ctx
.reply_skb
)
252 adm_ctx
.reply_dh
->ret_code
= retcode
;
254 nla
= info
->attrs
[DRBD_NLA_CFG_CONTEXT
];
256 nla
= nla_find_nested(nla
, __nla_type(T_ctx_conn_name
));
258 conn_name
= nla_data(nla
);
261 drbd_adm_send_reply(adm_ctx
.reply_skb
, info
);
265 static void setup_khelper_env(struct drbd_tconn
*tconn
, char **envp
)
271 nc
= rcu_dereference(tconn
->net_conf
);
273 switch (((struct sockaddr
*)nc
->peer_addr
)->sa_family
) {
276 snprintf(envp
[4], 60, "DRBD_PEER_ADDRESS=%pI6",
277 &((struct sockaddr_in6
*)nc
->peer_addr
)->sin6_addr
);
281 snprintf(envp
[4], 60, "DRBD_PEER_ADDRESS=%pI4",
282 &((struct sockaddr_in
*)nc
->peer_addr
)->sin_addr
);
286 snprintf(envp
[4], 60, "DRBD_PEER_ADDRESS=%pI4",
287 &((struct sockaddr_in
*)nc
->peer_addr
)->sin_addr
);
289 snprintf(envp
[3], 20, "DRBD_PEER_AF=%s", afs
);
294 int drbd_khelper(struct drbd_conf
*mdev
, char *cmd
)
296 char *envp
[] = { "HOME=/",
298 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
299 (char[20]) { }, /* address family */
300 (char[60]) { }, /* address */
303 char *argv
[] = {usermode_helper
, cmd
, mb
, NULL
};
307 snprintf(mb
, 12, "minor-%d", mdev_to_minor(mdev
));
308 setup_khelper_env(mdev
->tconn
, envp
);
310 /* The helper may take some time.
311 * write out any unsynced meta data changes now */
314 dev_info(DEV
, "helper command: %s %s %s\n", usermode_helper
, cmd
, mb
);
315 sib
.sib_reason
= SIB_HELPER_PRE
;
316 sib
.helper_name
= cmd
;
317 drbd_bcast_event(mdev
, &sib
);
318 ret
= call_usermodehelper(usermode_helper
, argv
, envp
, 1);
320 dev_warn(DEV
, "helper command: %s %s %s exit code %u (0x%x)\n",
321 usermode_helper
, cmd
, mb
,
322 (ret
>> 8) & 0xff, ret
);
324 dev_info(DEV
, "helper command: %s %s %s exit code %u (0x%x)\n",
325 usermode_helper
, cmd
, mb
,
326 (ret
>> 8) & 0xff, ret
);
327 sib
.sib_reason
= SIB_HELPER_POST
;
328 sib
.helper_exit_code
= ret
;
329 drbd_bcast_event(mdev
, &sib
);
331 if (ret
< 0) /* Ignore any ERRNOs we got. */
337 static void conn_md_sync(struct drbd_tconn
*tconn
)
339 struct drbd_conf
*mdev
;
343 idr_for_each_entry(&tconn
->volumes
, mdev
, vnr
) {
344 kref_get(&mdev
->kref
);
347 kref_put(&mdev
->kref
, &drbd_minor_destroy
);
353 int conn_khelper(struct drbd_tconn
*tconn
, char *cmd
)
355 char *envp
[] = { "HOME=/",
357 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
358 (char[20]) { }, /* address family */
359 (char[60]) { }, /* address */
361 char *argv
[] = {usermode_helper
, cmd
, tconn
->name
, NULL
};
364 setup_khelper_env(tconn
, envp
);
367 conn_info(tconn
, "helper command: %s %s %s\n", usermode_helper
, cmd
, tconn
->name
);
368 /* TODO: conn_bcast_event() ?? */
370 ret
= call_usermodehelper(usermode_helper
, argv
, envp
, 1);
372 conn_warn(tconn
, "helper command: %s %s %s exit code %u (0x%x)\n",
373 usermode_helper
, cmd
, tconn
->name
,
374 (ret
>> 8) & 0xff, ret
);
376 conn_info(tconn
, "helper command: %s %s %s exit code %u (0x%x)\n",
377 usermode_helper
, cmd
, tconn
->name
,
378 (ret
>> 8) & 0xff, ret
);
379 /* TODO: conn_bcast_event() ?? */
381 if (ret
< 0) /* Ignore any ERRNOs we got. */
387 static enum drbd_fencing_p
highest_fencing_policy(struct drbd_tconn
*tconn
)
389 enum drbd_fencing_p fp
= FP_NOT_AVAIL
;
390 struct drbd_conf
*mdev
;
394 idr_for_each_entry(&tconn
->volumes
, mdev
, vnr
) {
395 if (get_ldev_if_state(mdev
, D_CONSISTENT
)) {
396 fp
= max_t(enum drbd_fencing_p
, fp
,
397 rcu_dereference(mdev
->ldev
->disk_conf
)->fencing
);
406 bool conn_try_outdate_peer(struct drbd_tconn
*tconn
)
408 union drbd_state mask
= { };
409 union drbd_state val
= { };
410 enum drbd_fencing_p fp
;
414 if (tconn
->cstate
>= C_WF_REPORT_PARAMS
) {
415 conn_err(tconn
, "Expected cstate < C_WF_REPORT_PARAMS\n");
419 fp
= highest_fencing_policy(tconn
);
422 conn_warn(tconn
, "Not fencing peer, I'm not even Consistent myself.\n");
429 r
= conn_khelper(tconn
, "fence-peer");
431 switch ((r
>>8) & 0xff) {
432 case 3: /* peer is inconsistent */
433 ex_to_string
= "peer is inconsistent or worse";
435 val
.pdsk
= D_INCONSISTENT
;
437 case 4: /* peer got outdated, or was already outdated */
438 ex_to_string
= "peer was fenced";
440 val
.pdsk
= D_OUTDATED
;
442 case 5: /* peer was down */
443 if (conn_highest_disk(tconn
) == D_UP_TO_DATE
) {
444 /* we will(have) create(d) a new UUID anyways... */
445 ex_to_string
= "peer is unreachable, assumed to be dead";
447 val
.pdsk
= D_OUTDATED
;
449 ex_to_string
= "peer unreachable, doing nothing since disk != UpToDate";
452 case 6: /* Peer is primary, voluntarily outdate myself.
453 * This is useful when an unconnected R_SECONDARY is asked to
454 * become R_PRIMARY, but finds the other peer being active. */
455 ex_to_string
= "peer is active";
456 conn_warn(tconn
, "Peer is primary, outdating myself.\n");
458 val
.disk
= D_OUTDATED
;
461 if (fp
!= FP_STONITH
)
462 conn_err(tconn
, "fence-peer() = 7 && fencing != Stonith !!!\n");
463 ex_to_string
= "peer was stonithed";
465 val
.pdsk
= D_OUTDATED
;
468 /* The script is broken ... */
469 conn_err(tconn
, "fence-peer helper broken, returned %d\n", (r
>>8)&0xff);
470 return false; /* Eventually leave IO frozen */
473 conn_info(tconn
, "fence-peer helper returned %d (%s)\n",
474 (r
>>8) & 0xff, ex_to_string
);
479 conn_request_state(tconn, mask, val, CS_VERBOSE);
480 here, because we might were able to re-establish the connection in the
482 spin_lock_irq(&tconn
->req_lock
);
483 if (tconn
->cstate
< C_WF_REPORT_PARAMS
)
484 _conn_request_state(tconn
, mask
, val
, CS_VERBOSE
);
485 spin_unlock_irq(&tconn
->req_lock
);
487 return conn_highest_pdsk(tconn
) <= D_OUTDATED
;
490 static int _try_outdate_peer_async(void *data
)
492 struct drbd_tconn
*tconn
= (struct drbd_tconn
*)data
;
494 conn_try_outdate_peer(tconn
);
496 kref_put(&tconn
->kref
, &conn_destroy
);
500 void conn_try_outdate_peer_async(struct drbd_tconn
*tconn
)
502 struct task_struct
*opa
;
504 kref_get(&tconn
->kref
);
505 opa
= kthread_run(_try_outdate_peer_async
, tconn
, "drbd_async_h");
507 conn_err(tconn
, "out of mem, failed to invoke fence-peer helper\n");
508 kref_put(&tconn
->kref
, &conn_destroy
);
513 drbd_set_role(struct drbd_conf
*mdev
, enum drbd_role new_role
, int force
)
515 const int max_tries
= 4;
516 enum drbd_state_rv rv
= SS_UNKNOWN_ERROR
;
520 union drbd_state mask
, val
;
522 if (new_role
== R_PRIMARY
)
523 request_ping(mdev
->tconn
); /* Detect a dead peer ASAP */
525 mutex_lock(mdev
->state_mutex
);
527 mask
.i
= 0; mask
.role
= R_MASK
;
528 val
.i
= 0; val
.role
= new_role
;
530 while (try++ < max_tries
) {
531 rv
= _drbd_request_state(mdev
, mask
, val
, CS_WAIT_COMPLETE
);
533 /* in case we first succeeded to outdate,
534 * but now suddenly could establish a connection */
535 if (rv
== SS_CW_FAILED_BY_PEER
&& mask
.pdsk
!= 0) {
541 if (rv
== SS_NO_UP_TO_DATE_DISK
&& force
&&
542 (mdev
->state
.disk
< D_UP_TO_DATE
&&
543 mdev
->state
.disk
>= D_INCONSISTENT
)) {
545 val
.disk
= D_UP_TO_DATE
;
550 if (rv
== SS_NO_UP_TO_DATE_DISK
&&
551 mdev
->state
.disk
== D_CONSISTENT
&& mask
.pdsk
== 0) {
552 D_ASSERT(mdev
->state
.pdsk
== D_UNKNOWN
);
554 if (conn_try_outdate_peer(mdev
->tconn
)) {
555 val
.disk
= D_UP_TO_DATE
;
561 if (rv
== SS_NOTHING_TO_DO
)
563 if (rv
== SS_PRIMARY_NOP
&& mask
.pdsk
== 0) {
564 if (!conn_try_outdate_peer(mdev
->tconn
) && force
) {
565 dev_warn(DEV
, "Forced into split brain situation!\n");
567 val
.pdsk
= D_OUTDATED
;
572 if (rv
== SS_TWO_PRIMARIES
) {
573 /* Maybe the peer is detected as dead very soon...
574 retry at most once more in this case. */
577 nc
= rcu_dereference(mdev
->tconn
->net_conf
);
578 timeo
= nc
? (nc
->ping_timeo
+ 1) * HZ
/ 10 : 1;
580 schedule_timeout_interruptible(timeo
);
585 if (rv
< SS_SUCCESS
) {
586 rv
= _drbd_request_state(mdev
, mask
, val
,
587 CS_VERBOSE
+ CS_WAIT_COMPLETE
);
598 dev_warn(DEV
, "Forced to consider local data as UpToDate!\n");
600 /* Wait until nothing is on the fly :) */
601 wait_event(mdev
->misc_wait
, atomic_read(&mdev
->ap_pending_cnt
) == 0);
603 if (new_role
== R_SECONDARY
) {
604 set_disk_ro(mdev
->vdisk
, true);
605 if (get_ldev(mdev
)) {
606 mdev
->ldev
->md
.uuid
[UI_CURRENT
] &= ~(u64
)1;
610 mutex_lock(&mdev
->tconn
->conf_update
);
611 nc
= mdev
->tconn
->net_conf
;
613 nc
->discard_my_data
= 0; /* without copy; single bit op is atomic */
614 mutex_unlock(&mdev
->tconn
->conf_update
);
616 set_disk_ro(mdev
->vdisk
, false);
617 if (get_ldev(mdev
)) {
618 if (((mdev
->state
.conn
< C_CONNECTED
||
619 mdev
->state
.pdsk
<= D_FAILED
)
620 && mdev
->ldev
->md
.uuid
[UI_BITMAP
] == 0) || forced
)
621 drbd_uuid_new_current(mdev
);
623 mdev
->ldev
->md
.uuid
[UI_CURRENT
] |= (u64
)1;
628 /* writeout of activity log covered areas of the bitmap
629 * to stable storage done in after state change already */
631 if (mdev
->state
.conn
>= C_WF_REPORT_PARAMS
) {
632 /* if this was forced, we should consider sync */
634 drbd_send_uuids(mdev
);
635 drbd_send_state(mdev
);
640 kobject_uevent(&disk_to_dev(mdev
->vdisk
)->kobj
, KOBJ_CHANGE
);
642 mutex_unlock(mdev
->state_mutex
);
646 static const char *from_attrs_err_to_txt(int err
)
648 return err
== -ENOMSG
? "required attribute missing" :
649 err
== -EOPNOTSUPP
? "unknown mandatory attribute" :
650 err
== -EEXIST
? "can not change invariant setting" :
651 "invalid attribute value";
654 int drbd_adm_set_role(struct sk_buff
*skb
, struct genl_info
*info
)
656 struct set_role_parms parms
;
658 enum drbd_ret_code retcode
;
660 retcode
= drbd_adm_prepare(skb
, info
, DRBD_ADM_NEED_MINOR
);
661 if (!adm_ctx
.reply_skb
)
663 if (retcode
!= NO_ERROR
)
666 memset(&parms
, 0, sizeof(parms
));
667 if (info
->attrs
[DRBD_NLA_SET_ROLE_PARMS
]) {
668 err
= set_role_parms_from_attrs(&parms
, info
);
670 retcode
= ERR_MANDATORY_TAG
;
671 drbd_msg_put_info(from_attrs_err_to_txt(err
));
676 if (info
->genlhdr
->cmd
== DRBD_ADM_PRIMARY
)
677 retcode
= drbd_set_role(adm_ctx
.mdev
, R_PRIMARY
, parms
.assume_uptodate
);
679 retcode
= drbd_set_role(adm_ctx
.mdev
, R_SECONDARY
, 0);
681 drbd_adm_finish(info
, retcode
);
685 /* initializes the md.*_offset members, so we are able to find
686 * the on disk meta data */
687 static void drbd_md_set_sector_offsets(struct drbd_conf
*mdev
,
688 struct drbd_backing_dev
*bdev
)
690 sector_t md_size_sect
= 0;
694 meta_dev_idx
= rcu_dereference(bdev
->disk_conf
)->meta_dev_idx
;
696 switch (meta_dev_idx
) {
698 /* v07 style fixed size indexed meta data */
699 bdev
->md
.md_size_sect
= MD_RESERVED_SECT
;
700 bdev
->md
.md_offset
= drbd_md_ss__(mdev
, bdev
);
701 bdev
->md
.al_offset
= MD_AL_OFFSET
;
702 bdev
->md
.bm_offset
= MD_BM_OFFSET
;
704 case DRBD_MD_INDEX_FLEX_EXT
:
705 /* just occupy the full device; unit: sectors */
706 bdev
->md
.md_size_sect
= drbd_get_capacity(bdev
->md_bdev
);
707 bdev
->md
.md_offset
= 0;
708 bdev
->md
.al_offset
= MD_AL_OFFSET
;
709 bdev
->md
.bm_offset
= MD_BM_OFFSET
;
711 case DRBD_MD_INDEX_INTERNAL
:
712 case DRBD_MD_INDEX_FLEX_INT
:
713 bdev
->md
.md_offset
= drbd_md_ss__(mdev
, bdev
);
714 /* al size is still fixed */
715 bdev
->md
.al_offset
= -MD_AL_SECTORS
;
716 /* we need (slightly less than) ~ this much bitmap sectors: */
717 md_size_sect
= drbd_get_capacity(bdev
->backing_bdev
);
718 md_size_sect
= ALIGN(md_size_sect
, BM_SECT_PER_EXT
);
719 md_size_sect
= BM_SECT_TO_EXT(md_size_sect
);
720 md_size_sect
= ALIGN(md_size_sect
, 8);
722 /* plus the "drbd meta data super block",
723 * and the activity log; */
724 md_size_sect
+= MD_BM_OFFSET
;
726 bdev
->md
.md_size_sect
= md_size_sect
;
727 /* bitmap offset is adjusted by 'super' block size */
728 bdev
->md
.bm_offset
= -md_size_sect
+ MD_AL_OFFSET
;
734 /* input size is expected to be in KB */
735 char *ppsize(char *buf
, unsigned long long size
)
737 /* Needs 9 bytes at max including trailing NUL:
738 * -1ULL ==> "16384 EB" */
739 static char units
[] = { 'K', 'M', 'G', 'T', 'P', 'E' };
741 while (size
>= 10000 && base
< sizeof(units
)-1) {
743 size
= (size
>> 10) + !!(size
& (1<<9));
746 sprintf(buf
, "%u %cB", (unsigned)size
, units
[base
]);
751 /* there is still a theoretical deadlock when called from receiver
752 * on an D_INCONSISTENT R_PRIMARY:
753 * remote READ does inc_ap_bio, receiver would need to receive answer
754 * packet from remote to dec_ap_bio again.
755 * receiver receive_sizes(), comes here,
756 * waits for ap_bio_cnt == 0. -> deadlock.
757 * but this cannot happen, actually, because:
758 * R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable
759 * (not connected, or bad/no disk on peer):
760 * see drbd_fail_request_early, ap_bio_cnt is zero.
761 * R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET:
762 * peer may not initiate a resize.
764 /* Note these are not to be confused with
765 * drbd_adm_suspend_io/drbd_adm_resume_io,
766 * which are (sub) state changes triggered by admin (drbdsetup),
767 * and can be long lived.
768 * This changes an mdev->flag, is triggered by drbd internals,
769 * and should be short-lived. */
770 void drbd_suspend_io(struct drbd_conf
*mdev
)
772 set_bit(SUSPEND_IO
, &mdev
->flags
);
773 if (drbd_suspended(mdev
))
775 wait_event(mdev
->misc_wait
, !atomic_read(&mdev
->ap_bio_cnt
));
778 void drbd_resume_io(struct drbd_conf
*mdev
)
780 clear_bit(SUSPEND_IO
, &mdev
->flags
);
781 wake_up(&mdev
->misc_wait
);
785 * drbd_determine_dev_size() - Sets the right device size obeying all constraints
786 * @mdev: DRBD device.
788 * Returns 0 on success, negative return values indicate errors.
789 * You should call drbd_md_sync() after calling this function.
791 enum determine_dev_size
drbd_determine_dev_size(struct drbd_conf
*mdev
, enum dds_flags flags
) __must_hold(local
)
793 sector_t prev_first_sect
, prev_size
; /* previous meta location */
794 sector_t la_size
, u_size
;
798 int md_moved
, la_size_changed
;
799 enum determine_dev_size rv
= unchanged
;
802 * application request passes inc_ap_bio,
803 * but then cannot get an AL-reference.
804 * this function later may wait on ap_bio_cnt == 0. -> deadlock.
807 * Suspend IO right here.
808 * still lock the act_log to not trigger ASSERTs there.
810 drbd_suspend_io(mdev
);
812 /* no wait necessary anymore, actually we could assert that */
813 wait_event(mdev
->al_wait
, lc_try_lock(mdev
->act_log
));
815 prev_first_sect
= drbd_md_first_sector(mdev
->ldev
);
816 prev_size
= mdev
->ldev
->md
.md_size_sect
;
817 la_size
= mdev
->ldev
->md
.la_size_sect
;
819 /* TODO: should only be some assert here, not (re)init... */
820 drbd_md_set_sector_offsets(mdev
, mdev
->ldev
);
823 u_size
= rcu_dereference(mdev
->ldev
->disk_conf
)->disk_size
;
825 size
= drbd_new_dev_size(mdev
, mdev
->ldev
, u_size
, flags
& DDSF_FORCED
);
827 if (drbd_get_capacity(mdev
->this_bdev
) != size
||
828 drbd_bm_capacity(mdev
) != size
) {
830 err
= drbd_bm_resize(mdev
, size
, !(flags
& DDSF_NO_RESYNC
));
832 /* currently there is only one error: ENOMEM! */
833 size
= drbd_bm_capacity(mdev
)>>1;
835 dev_err(DEV
, "OUT OF MEMORY! "
836 "Could not allocate bitmap!\n");
838 dev_err(DEV
, "BM resizing failed. "
839 "Leaving size unchanged at size = %lu KB\n",
840 (unsigned long)size
);
844 /* racy, see comments above. */
845 drbd_set_my_capacity(mdev
, size
);
846 mdev
->ldev
->md
.la_size_sect
= size
;
847 dev_info(DEV
, "size = %s (%llu KB)\n", ppsize(ppb
, size
>>1),
848 (unsigned long long)size
>>1);
850 if (rv
== dev_size_error
)
853 la_size_changed
= (la_size
!= mdev
->ldev
->md
.la_size_sect
);
855 md_moved
= prev_first_sect
!= drbd_md_first_sector(mdev
->ldev
)
856 || prev_size
!= mdev
->ldev
->md
.md_size_sect
;
858 if (la_size_changed
|| md_moved
) {
861 drbd_al_shrink(mdev
); /* All extents inactive. */
862 dev_info(DEV
, "Writing the whole bitmap, %s\n",
863 la_size_changed
&& md_moved
? "size changed and md moved" :
864 la_size_changed
? "size changed" : "md moved");
865 /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
866 err
= drbd_bitmap_io(mdev
, &drbd_bm_write
,
867 "size changed", BM_LOCKED_MASK
);
872 drbd_md_mark_dirty(mdev
);
880 lc_unlock(mdev
->act_log
);
881 wake_up(&mdev
->al_wait
);
882 drbd_resume_io(mdev
);
888 drbd_new_dev_size(struct drbd_conf
*mdev
, struct drbd_backing_dev
*bdev
,
889 sector_t u_size
, int assume_peer_has_space
)
891 sector_t p_size
= mdev
->p_size
; /* partner's disk size. */
892 sector_t la_size
= bdev
->md
.la_size_sect
; /* last agreed size. */
893 sector_t m_size
; /* my size */
896 m_size
= drbd_get_max_capacity(bdev
);
898 if (mdev
->state
.conn
< C_CONNECTED
&& assume_peer_has_space
) {
899 dev_warn(DEV
, "Resize while not connected was forced by the user!\n");
903 if (p_size
&& m_size
) {
904 size
= min_t(sector_t
, p_size
, m_size
);
908 if (m_size
&& m_size
< size
)
910 if (p_size
&& p_size
< size
)
921 dev_err(DEV
, "Both nodes diskless!\n");
925 dev_err(DEV
, "Requested disk size is too big (%lu > %lu)\n",
926 (unsigned long)u_size
>>1, (unsigned long)size
>>1);
935 * drbd_check_al_size() - Ensures that the AL is of the right size
936 * @mdev: DRBD device.
938 * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation
939 * failed, and 0 on success. You should call drbd_md_sync() after you called
942 static int drbd_check_al_size(struct drbd_conf
*mdev
, struct disk_conf
*dc
)
944 struct lru_cache
*n
, *t
;
945 struct lc_element
*e
;
950 mdev
->act_log
->nr_elements
== dc
->al_extents
)
955 n
= lc_create("act_log", drbd_al_ext_cache
, AL_UPDATES_PER_TRANSACTION
,
956 dc
->al_extents
, sizeof(struct lc_element
), 0);
959 dev_err(DEV
, "Cannot allocate act_log lru!\n");
962 spin_lock_irq(&mdev
->al_lock
);
964 for (i
= 0; i
< t
->nr_elements
; i
++) {
965 e
= lc_element_by_index(t
, i
);
967 dev_err(DEV
, "refcnt(%d)==%d\n",
968 e
->lc_number
, e
->refcnt
);
974 spin_unlock_irq(&mdev
->al_lock
);
976 dev_err(DEV
, "Activity log still in use!\n");
983 drbd_md_mark_dirty(mdev
); /* we changed mdev->act_log->nr_elemens */
987 static void drbd_setup_queue_param(struct drbd_conf
*mdev
, unsigned int max_bio_size
)
989 struct request_queue
* const q
= mdev
->rq_queue
;
990 int max_hw_sectors
= max_bio_size
>> 9;
991 int max_segments
= 0;
993 if (get_ldev_if_state(mdev
, D_ATTACHING
)) {
994 struct request_queue
* const b
= mdev
->ldev
->backing_bdev
->bd_disk
->queue
;
996 max_hw_sectors
= min(queue_max_hw_sectors(b
), max_bio_size
>> 9);
998 max_segments
= rcu_dereference(mdev
->ldev
->disk_conf
)->max_bio_bvecs
;
1003 blk_queue_logical_block_size(q
, 512);
1004 blk_queue_max_hw_sectors(q
, max_hw_sectors
);
1005 /* This is the workaround for "bio would need to, but cannot, be split" */
1006 blk_queue_max_segments(q
, max_segments
? max_segments
: BLK_MAX_SEGMENTS
);
1007 blk_queue_segment_boundary(q
, PAGE_CACHE_SIZE
-1);
1009 if (get_ldev_if_state(mdev
, D_ATTACHING
)) {
1010 struct request_queue
* const b
= mdev
->ldev
->backing_bdev
->bd_disk
->queue
;
1012 blk_queue_stack_limits(q
, b
);
1014 if (q
->backing_dev_info
.ra_pages
!= b
->backing_dev_info
.ra_pages
) {
1015 dev_info(DEV
, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
1016 q
->backing_dev_info
.ra_pages
,
1017 b
->backing_dev_info
.ra_pages
);
1018 q
->backing_dev_info
.ra_pages
= b
->backing_dev_info
.ra_pages
;
1024 void drbd_reconsider_max_bio_size(struct drbd_conf
*mdev
)
1026 int now
, new, local
, peer
;
1028 now
= queue_max_hw_sectors(mdev
->rq_queue
) << 9;
1029 local
= mdev
->local_max_bio_size
; /* Eventually last known value, from volatile memory */
1030 peer
= mdev
->peer_max_bio_size
; /* Eventually last known value, from meta data */
1032 if (get_ldev_if_state(mdev
, D_ATTACHING
)) {
1033 local
= queue_max_hw_sectors(mdev
->ldev
->backing_bdev
->bd_disk
->queue
) << 9;
1034 mdev
->local_max_bio_size
= local
;
1038 /* We may ignore peer limits if the peer is modern enough.
1039 Because new from 8.3.8 onwards the peer can use multiple
1040 BIOs for a single peer_request */
1041 if (mdev
->state
.conn
>= C_CONNECTED
) {
1042 if (mdev
->tconn
->agreed_pro_version
< 94)
1043 peer
= mdev
->peer_max_bio_size
;
1044 else if (mdev
->tconn
->agreed_pro_version
== 94)
1045 peer
= DRBD_MAX_SIZE_H80_PACKET
;
1046 else /* drbd 8.3.8 onwards */
1047 peer
= DRBD_MAX_BIO_SIZE
;
1050 new = min_t(int, local
, peer
);
1052 if (mdev
->state
.role
== R_PRIMARY
&& new < now
)
1053 dev_err(DEV
, "ASSERT FAILED new < now; (%d < %d)\n", new, now
);
1056 dev_info(DEV
, "max BIO size = %u\n", new);
1058 drbd_setup_queue_param(mdev
, new);
1061 /* Starts the worker thread */
1062 static void conn_reconfig_start(struct drbd_tconn
*tconn
)
1064 drbd_thread_start(&tconn
->worker
);
1065 conn_flush_workqueue(tconn
);
1068 /* if still unconfigured, stops worker again. */
1069 static void conn_reconfig_done(struct drbd_tconn
*tconn
)
1072 spin_lock_irq(&tconn
->req_lock
);
1073 stop_threads
= conn_all_vols_unconf(tconn
);
1074 spin_unlock_irq(&tconn
->req_lock
);
1076 /* asender is implicitly stopped by receiver
1077 * in conn_disconnect() */
1078 drbd_thread_stop(&tconn
->receiver
);
1079 drbd_thread_stop(&tconn
->worker
);
1083 /* Make sure IO is suspended before calling this function(). */
1084 static void drbd_suspend_al(struct drbd_conf
*mdev
)
1088 if (!lc_try_lock(mdev
->act_log
)) {
1089 dev_warn(DEV
, "Failed to lock al in drbd_suspend_al()\n");
1093 drbd_al_shrink(mdev
);
1094 spin_lock_irq(&mdev
->tconn
->req_lock
);
1095 if (mdev
->state
.conn
< C_CONNECTED
)
1096 s
= !test_and_set_bit(AL_SUSPENDED
, &mdev
->flags
);
1097 spin_unlock_irq(&mdev
->tconn
->req_lock
);
1098 lc_unlock(mdev
->act_log
);
1101 dev_info(DEV
, "Suspended AL updates\n");
1105 static bool should_set_defaults(struct genl_info
*info
)
1107 unsigned flags
= ((struct drbd_genlmsghdr
*)info
->userhdr
)->flags
;
1108 return 0 != (flags
& DRBD_GENL_F_SET_DEFAULTS
);
1111 static void enforce_disk_conf_limits(struct disk_conf
*dc
)
1113 if (dc
->al_extents
< DRBD_AL_EXTENTS_MIN
)
1114 dc
->al_extents
= DRBD_AL_EXTENTS_MIN
;
1115 if (dc
->al_extents
> DRBD_AL_EXTENTS_MAX
)
1116 dc
->al_extents
= DRBD_AL_EXTENTS_MAX
;
1118 if (dc
->c_plan_ahead
> DRBD_C_PLAN_AHEAD_MAX
)
1119 dc
->c_plan_ahead
= DRBD_C_PLAN_AHEAD_MAX
;
1122 int drbd_adm_disk_opts(struct sk_buff
*skb
, struct genl_info
*info
)
1124 enum drbd_ret_code retcode
;
1125 struct drbd_conf
*mdev
;
1126 struct disk_conf
*new_disk_conf
, *old_disk_conf
;
1127 struct fifo_buffer
*old_plan
= NULL
, *new_plan
= NULL
;
1130 retcode
= drbd_adm_prepare(skb
, info
, DRBD_ADM_NEED_MINOR
);
1131 if (!adm_ctx
.reply_skb
)
1133 if (retcode
!= NO_ERROR
)
1136 mdev
= adm_ctx
.mdev
;
1138 /* we also need a disk
1139 * to change the options on */
1140 if (!get_ldev(mdev
)) {
1141 retcode
= ERR_NO_DISK
;
1145 new_disk_conf
= kmalloc(sizeof(struct disk_conf
), GFP_KERNEL
);
1146 if (!new_disk_conf
) {
1147 retcode
= ERR_NOMEM
;
1151 mutex_lock(&mdev
->tconn
->conf_update
);
1152 old_disk_conf
= mdev
->ldev
->disk_conf
;
1153 *new_disk_conf
= *old_disk_conf
;
1154 if (should_set_defaults(info
))
1155 set_disk_conf_defaults(new_disk_conf
);
1157 err
= disk_conf_from_attrs_for_change(new_disk_conf
, info
);
1158 if (err
&& err
!= -ENOMSG
) {
1159 retcode
= ERR_MANDATORY_TAG
;
1160 drbd_msg_put_info(from_attrs_err_to_txt(err
));
1163 if (!expect(new_disk_conf
->resync_rate
>= 1))
1164 new_disk_conf
->resync_rate
= 1;
1166 enforce_disk_conf_limits(new_disk_conf
);
1168 fifo_size
= (new_disk_conf
->c_plan_ahead
* 10 * SLEEP_TIME
) / HZ
;
1169 if (fifo_size
!= mdev
->rs_plan_s
->size
) {
1170 new_plan
= fifo_alloc(fifo_size
);
1172 dev_err(DEV
, "kmalloc of fifo_buffer failed");
1173 retcode
= ERR_NOMEM
;
1178 wait_event(mdev
->al_wait
, lc_try_lock(mdev
->act_log
));
1179 drbd_al_shrink(mdev
);
1180 err
= drbd_check_al_size(mdev
, new_disk_conf
);
1181 lc_unlock(mdev
->act_log
);
1182 wake_up(&mdev
->al_wait
);
1185 retcode
= ERR_NOMEM
;
1189 write_lock_irq(&global_state_lock
);
1190 retcode
= drbd_resync_after_valid(mdev
, new_disk_conf
->resync_after
);
1191 if (retcode
== NO_ERROR
) {
1192 rcu_assign_pointer(mdev
->ldev
->disk_conf
, new_disk_conf
);
1193 drbd_resync_after_changed(mdev
);
1195 write_unlock_irq(&global_state_lock
);
1197 if (retcode
!= NO_ERROR
)
1201 old_plan
= mdev
->rs_plan_s
;
1202 rcu_assign_pointer(mdev
->rs_plan_s
, new_plan
);
1205 mutex_unlock(&mdev
->tconn
->conf_update
);
1208 if (mdev
->state
.conn
>= C_CONNECTED
)
1209 drbd_send_sync_param(mdev
);
1212 kfree(old_disk_conf
);
1217 mutex_unlock(&mdev
->tconn
->conf_update
);
1219 kfree(new_disk_conf
);
1224 drbd_adm_finish(info
, retcode
);
1228 int drbd_adm_attach(struct sk_buff
*skb
, struct genl_info
*info
)
1230 struct drbd_conf
*mdev
;
1232 enum drbd_ret_code retcode
;
1233 enum determine_dev_size dd
;
1234 sector_t max_possible_sectors
;
1235 sector_t min_md_device_sectors
;
1236 struct drbd_backing_dev
*nbc
= NULL
; /* new_backing_conf */
1237 struct disk_conf
*new_disk_conf
= NULL
;
1238 struct block_device
*bdev
;
1239 struct lru_cache
*resync_lru
= NULL
;
1240 struct fifo_buffer
*new_plan
= NULL
;
1241 union drbd_state ns
, os
;
1242 enum drbd_state_rv rv
;
1243 struct net_conf
*nc
;
1244 int cp_discovered
= 0;
1246 retcode
= drbd_adm_prepare(skb
, info
, DRBD_ADM_NEED_MINOR
);
1247 if (!adm_ctx
.reply_skb
)
1249 if (retcode
!= NO_ERROR
)
1252 mdev
= adm_ctx
.mdev
;
1253 conn_reconfig_start(mdev
->tconn
);
1255 /* if you want to reconfigure, please tear down first */
1256 if (mdev
->state
.disk
> D_DISKLESS
) {
1257 retcode
= ERR_DISK_CONFIGURED
;
1260 /* It may just now have detached because of IO error. Make sure
1261 * drbd_ldev_destroy is done already, we may end up here very fast,
1262 * e.g. if someone calls attach from the on-io-error handler,
1263 * to realize a "hot spare" feature (not that I'd recommend that) */
1264 wait_event(mdev
->misc_wait
, !atomic_read(&mdev
->local_cnt
));
1266 /* allocation not in the IO path, drbdsetup context */
1267 nbc
= kzalloc(sizeof(struct drbd_backing_dev
), GFP_KERNEL
);
1269 retcode
= ERR_NOMEM
;
1272 new_disk_conf
= kzalloc(sizeof(struct disk_conf
), GFP_KERNEL
);
1273 if (!new_disk_conf
) {
1274 retcode
= ERR_NOMEM
;
1277 nbc
->disk_conf
= new_disk_conf
;
1279 set_disk_conf_defaults(new_disk_conf
);
1280 err
= disk_conf_from_attrs(new_disk_conf
, info
);
1282 retcode
= ERR_MANDATORY_TAG
;
1283 drbd_msg_put_info(from_attrs_err_to_txt(err
));
1287 enforce_disk_conf_limits(new_disk_conf
);
1289 new_plan
= fifo_alloc((new_disk_conf
->c_plan_ahead
* 10 * SLEEP_TIME
) / HZ
);
1291 retcode
= ERR_NOMEM
;
1295 if (new_disk_conf
->meta_dev_idx
< DRBD_MD_INDEX_FLEX_INT
) {
1296 retcode
= ERR_MD_IDX_INVALID
;
1301 nc
= rcu_dereference(mdev
->tconn
->net_conf
);
1303 if (new_disk_conf
->fencing
== FP_STONITH
&& nc
->wire_protocol
== DRBD_PROT_A
) {
1305 retcode
= ERR_STONITH_AND_PROT_A
;
1311 bdev
= blkdev_get_by_path(new_disk_conf
->backing_dev
,
1312 FMODE_READ
| FMODE_WRITE
| FMODE_EXCL
, mdev
);
1314 dev_err(DEV
, "open(\"%s\") failed with %ld\n", new_disk_conf
->backing_dev
,
1316 retcode
= ERR_OPEN_DISK
;
1319 nbc
->backing_bdev
= bdev
;
1322 * meta_dev_idx >= 0: external fixed size, possibly multiple
1323 * drbd sharing one meta device. TODO in that case, paranoia
1324 * check that [md_bdev, meta_dev_idx] is not yet used by some
1325 * other drbd minor! (if you use drbd.conf + drbdadm, that
1326 * should check it for you already; but if you don't, or
1327 * someone fooled it, we need to double check here)
1329 bdev
= blkdev_get_by_path(new_disk_conf
->meta_dev
,
1330 FMODE_READ
| FMODE_WRITE
| FMODE_EXCL
,
1331 (new_disk_conf
->meta_dev_idx
< 0) ?
1332 (void *)mdev
: (void *)drbd_m_holder
);
1334 dev_err(DEV
, "open(\"%s\") failed with %ld\n", new_disk_conf
->meta_dev
,
1336 retcode
= ERR_OPEN_MD_DISK
;
1339 nbc
->md_bdev
= bdev
;
1341 if ((nbc
->backing_bdev
== nbc
->md_bdev
) !=
1342 (new_disk_conf
->meta_dev_idx
== DRBD_MD_INDEX_INTERNAL
||
1343 new_disk_conf
->meta_dev_idx
== DRBD_MD_INDEX_FLEX_INT
)) {
1344 retcode
= ERR_MD_IDX_INVALID
;
1348 resync_lru
= lc_create("resync", drbd_bm_ext_cache
,
1349 1, 61, sizeof(struct bm_extent
),
1350 offsetof(struct bm_extent
, lce
));
1352 retcode
= ERR_NOMEM
;
1356 /* RT - for drbd_get_max_capacity() DRBD_MD_INDEX_FLEX_INT */
1357 drbd_md_set_sector_offsets(mdev
, nbc
);
1359 if (drbd_get_max_capacity(nbc
) < new_disk_conf
->disk_size
) {
1360 dev_err(DEV
, "max capacity %llu smaller than disk size %llu\n",
1361 (unsigned long long) drbd_get_max_capacity(nbc
),
1362 (unsigned long long) new_disk_conf
->disk_size
);
1363 retcode
= ERR_DISK_TOO_SMALL
;
1367 if (new_disk_conf
->meta_dev_idx
< 0) {
1368 max_possible_sectors
= DRBD_MAX_SECTORS_FLEX
;
1369 /* at least one MB, otherwise it does not make sense */
1370 min_md_device_sectors
= (2<<10);
1372 max_possible_sectors
= DRBD_MAX_SECTORS
;
1373 min_md_device_sectors
= MD_RESERVED_SECT
* (new_disk_conf
->meta_dev_idx
+ 1);
1376 if (drbd_get_capacity(nbc
->md_bdev
) < min_md_device_sectors
) {
1377 retcode
= ERR_MD_DISK_TOO_SMALL
;
1378 dev_warn(DEV
, "refusing attach: md-device too small, "
1379 "at least %llu sectors needed for this meta-disk type\n",
1380 (unsigned long long) min_md_device_sectors
);
1384 /* Make sure the new disk is big enough
1385 * (we may currently be R_PRIMARY with no local disk...) */
1386 if (drbd_get_max_capacity(nbc
) <
1387 drbd_get_capacity(mdev
->this_bdev
)) {
1388 retcode
= ERR_DISK_TOO_SMALL
;
1392 nbc
->known_size
= drbd_get_capacity(nbc
->backing_bdev
);
1394 if (nbc
->known_size
> max_possible_sectors
) {
1395 dev_warn(DEV
, "==> truncating very big lower level device "
1396 "to currently maximum possible %llu sectors <==\n",
1397 (unsigned long long) max_possible_sectors
);
1398 if (new_disk_conf
->meta_dev_idx
>= 0)
1399 dev_warn(DEV
, "==>> using internal or flexible "
1400 "meta data may help <<==\n");
1403 drbd_suspend_io(mdev
);
1404 /* also wait for the last barrier ack. */
1405 wait_event(mdev
->misc_wait
, !atomic_read(&mdev
->ap_pending_cnt
) || drbd_suspended(mdev
));
1406 /* and for any other previously queued work */
1407 drbd_flush_workqueue(mdev
);
1409 rv
= _drbd_request_state(mdev
, NS(disk
, D_ATTACHING
), CS_VERBOSE
);
1410 retcode
= rv
; /* FIXME: Type mismatch. */
1411 drbd_resume_io(mdev
);
1412 if (rv
< SS_SUCCESS
)
1415 if (!get_ldev_if_state(mdev
, D_ATTACHING
))
1416 goto force_diskless
;
1418 drbd_md_set_sector_offsets(mdev
, nbc
);
1420 if (!mdev
->bitmap
) {
1421 if (drbd_bm_init(mdev
)) {
1422 retcode
= ERR_NOMEM
;
1423 goto force_diskless_dec
;
1427 retcode
= drbd_md_read(mdev
, nbc
);
1428 if (retcode
!= NO_ERROR
)
1429 goto force_diskless_dec
;
1431 if (mdev
->state
.conn
< C_CONNECTED
&&
1432 mdev
->state
.role
== R_PRIMARY
&&
1433 (mdev
->ed_uuid
& ~((u64
)1)) != (nbc
->md
.uuid
[UI_CURRENT
] & ~((u64
)1))) {
1434 dev_err(DEV
, "Can only attach to data with current UUID=%016llX\n",
1435 (unsigned long long)mdev
->ed_uuid
);
1436 retcode
= ERR_DATA_NOT_CURRENT
;
1437 goto force_diskless_dec
;
1440 /* Since we are diskless, fix the activity log first... */
1441 if (drbd_check_al_size(mdev
, new_disk_conf
)) {
1442 retcode
= ERR_NOMEM
;
1443 goto force_diskless_dec
;
1446 /* Prevent shrinking of consistent devices ! */
1447 if (drbd_md_test_flag(nbc
, MDF_CONSISTENT
) &&
1448 drbd_new_dev_size(mdev
, nbc
, nbc
->disk_conf
->disk_size
, 0) < nbc
->md
.la_size_sect
) {
1449 dev_warn(DEV
, "refusing to truncate a consistent device\n");
1450 retcode
= ERR_DISK_TOO_SMALL
;
1451 goto force_diskless_dec
;
1454 if (!drbd_al_read_log(mdev
, nbc
)) {
1455 retcode
= ERR_IO_MD_DISK
;
1456 goto force_diskless_dec
;
1459 /* Reset the "barriers don't work" bits here, then force meta data to
1460 * be written, to ensure we determine if barriers are supported. */
1461 if (new_disk_conf
->md_flushes
)
1462 clear_bit(MD_NO_FUA
, &mdev
->flags
);
1464 set_bit(MD_NO_FUA
, &mdev
->flags
);
1466 /* Point of no return reached.
1467 * Devices and memory are no longer released by error cleanup below.
1468 * now mdev takes over responsibility, and the state engine should
1469 * clean it up somewhere. */
1470 D_ASSERT(mdev
->ldev
== NULL
);
1472 mdev
->resync
= resync_lru
;
1473 mdev
->rs_plan_s
= new_plan
;
1476 new_disk_conf
= NULL
;
1479 mdev
->write_ordering
= WO_bdev_flush
;
1480 drbd_bump_write_ordering(mdev
, WO_bdev_flush
);
1482 if (drbd_md_test_flag(mdev
->ldev
, MDF_CRASHED_PRIMARY
))
1483 set_bit(CRASHED_PRIMARY
, &mdev
->flags
);
1485 clear_bit(CRASHED_PRIMARY
, &mdev
->flags
);
1487 if (drbd_md_test_flag(mdev
->ldev
, MDF_PRIMARY_IND
) &&
1488 !(mdev
->state
.role
== R_PRIMARY
&& mdev
->tconn
->susp_nod
)) {
1489 set_bit(CRASHED_PRIMARY
, &mdev
->flags
);
1498 drbd_reconsider_max_bio_size(mdev
);
1500 /* If I am currently not R_PRIMARY,
1501 * but meta data primary indicator is set,
1502 * I just now recover from a hard crash,
1503 * and have been R_PRIMARY before that crash.
1505 * Now, if I had no connection before that crash
1506 * (have been degraded R_PRIMARY), chances are that
1507 * I won't find my peer now either.
1509 * In that case, and _only_ in that case,
1510 * we use the degr-wfc-timeout instead of the default,
1511 * so we can automatically recover from a crash of a
1512 * degraded but active "cluster" after a certain timeout.
1514 clear_bit(USE_DEGR_WFC_T
, &mdev
->flags
);
1515 if (mdev
->state
.role
!= R_PRIMARY
&&
1516 drbd_md_test_flag(mdev
->ldev
, MDF_PRIMARY_IND
) &&
1517 !drbd_md_test_flag(mdev
->ldev
, MDF_CONNECTED_IND
))
1518 set_bit(USE_DEGR_WFC_T
, &mdev
->flags
);
1520 dd
= drbd_determine_dev_size(mdev
, 0);
1521 if (dd
== dev_size_error
) {
1522 retcode
= ERR_NOMEM_BITMAP
;
1523 goto force_diskless_dec
;
1524 } else if (dd
== grew
)
1525 set_bit(RESYNC_AFTER_NEG
, &mdev
->flags
);
1527 if (drbd_md_test_flag(mdev
->ldev
, MDF_FULL_SYNC
)) {
1528 dev_info(DEV
, "Assuming that all blocks are out of sync "
1529 "(aka FullSync)\n");
1530 if (drbd_bitmap_io(mdev
, &drbd_bmio_set_n_write
,
1531 "set_n_write from attaching", BM_LOCKED_MASK
)) {
1532 retcode
= ERR_IO_MD_DISK
;
1533 goto force_diskless_dec
;
1536 if (drbd_bitmap_io(mdev
, &drbd_bm_read
,
1537 "read from attaching", BM_LOCKED_MASK
)) {
1538 retcode
= ERR_IO_MD_DISK
;
1539 goto force_diskless_dec
;
1543 if (cp_discovered
) {
1544 drbd_al_apply_to_bm(mdev
);
1545 if (drbd_bitmap_io(mdev
, &drbd_bm_write
,
1546 "crashed primary apply AL", BM_LOCKED_MASK
)) {
1547 retcode
= ERR_IO_MD_DISK
;
1548 goto force_diskless_dec
;
1552 if (_drbd_bm_total_weight(mdev
) == drbd_bm_bits(mdev
))
1553 drbd_suspend_al(mdev
); /* IO is still suspended here... */
1555 spin_lock_irq(&mdev
->tconn
->req_lock
);
1556 os
= drbd_read_state(mdev
);
1558 /* If MDF_CONSISTENT is not set go into inconsistent state,
1559 otherwise investigate MDF_WasUpToDate...
1560 If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
1561 otherwise into D_CONSISTENT state.
1563 if (drbd_md_test_flag(mdev
->ldev
, MDF_CONSISTENT
)) {
1564 if (drbd_md_test_flag(mdev
->ldev
, MDF_WAS_UP_TO_DATE
))
1565 ns
.disk
= D_CONSISTENT
;
1567 ns
.disk
= D_OUTDATED
;
1569 ns
.disk
= D_INCONSISTENT
;
1572 if (drbd_md_test_flag(mdev
->ldev
, MDF_PEER_OUT_DATED
))
1573 ns
.pdsk
= D_OUTDATED
;
1576 if (ns
.disk
== D_CONSISTENT
&&
1577 (ns
.pdsk
== D_OUTDATED
|| rcu_dereference(mdev
->ldev
->disk_conf
)->fencing
== FP_DONT_CARE
))
1578 ns
.disk
= D_UP_TO_DATE
;
1581 /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
1582 MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before
1583 this point, because drbd_request_state() modifies these
1586 /* In case we are C_CONNECTED postpone any decision on the new disk
1587 state after the negotiation phase. */
1588 if (mdev
->state
.conn
== C_CONNECTED
) {
1589 mdev
->new_state_tmp
.i
= ns
.i
;
1591 ns
.disk
= D_NEGOTIATING
;
1593 /* We expect to receive up-to-date UUIDs soon.
1594 To avoid a race in receive_state, free p_uuid while
1595 holding req_lock. I.e. atomic with the state change */
1596 kfree(mdev
->p_uuid
);
1597 mdev
->p_uuid
= NULL
;
1600 rv
= _drbd_set_state(mdev
, ns
, CS_VERBOSE
, NULL
);
1601 spin_unlock_irq(&mdev
->tconn
->req_lock
);
1603 if (rv
< SS_SUCCESS
)
1604 goto force_diskless_dec
;
1606 if (mdev
->state
.role
== R_PRIMARY
)
1607 mdev
->ldev
->md
.uuid
[UI_CURRENT
] |= (u64
)1;
1609 mdev
->ldev
->md
.uuid
[UI_CURRENT
] &= ~(u64
)1;
1611 drbd_md_mark_dirty(mdev
);
1614 kobject_uevent(&disk_to_dev(mdev
->vdisk
)->kobj
, KOBJ_CHANGE
);
1616 conn_reconfig_done(mdev
->tconn
);
1617 drbd_adm_finish(info
, retcode
);
1623 drbd_force_state(mdev
, NS(disk
, D_FAILED
));
1626 conn_reconfig_done(mdev
->tconn
);
1628 if (nbc
->backing_bdev
)
1629 blkdev_put(nbc
->backing_bdev
,
1630 FMODE_READ
| FMODE_WRITE
| FMODE_EXCL
);
1632 blkdev_put(nbc
->md_bdev
,
1633 FMODE_READ
| FMODE_WRITE
| FMODE_EXCL
);
1636 kfree(new_disk_conf
);
1637 lc_destroy(resync_lru
);
1641 drbd_adm_finish(info
, retcode
);
1645 static int adm_detach(struct drbd_conf
*mdev
)
1647 enum drbd_state_rv retcode
;
1649 drbd_suspend_io(mdev
); /* so no-one is stuck in drbd_al_begin_io */
1650 retcode
= drbd_request_state(mdev
, NS(disk
, D_FAILED
));
1651 /* D_FAILED will transition to DISKLESS. */
1652 ret
= wait_event_interruptible(mdev
->misc_wait
,
1653 mdev
->state
.disk
!= D_FAILED
);
1654 drbd_resume_io(mdev
);
1655 if ((int)retcode
== (int)SS_IS_DISKLESS
)
1656 retcode
= SS_NOTHING_TO_DO
;
1662 /* Detaching the disk is a process in multiple stages. First we need to lock
1663 * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
1664 * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
1665 * internal references as well.
1666 * Only then we have finally detached. */
1667 int drbd_adm_detach(struct sk_buff
*skb
, struct genl_info
*info
)
1669 enum drbd_ret_code retcode
;
1671 retcode
= drbd_adm_prepare(skb
, info
, DRBD_ADM_NEED_MINOR
);
1672 if (!adm_ctx
.reply_skb
)
1674 if (retcode
!= NO_ERROR
)
1677 retcode
= adm_detach(adm_ctx
.mdev
);
1679 drbd_adm_finish(info
, retcode
);
1683 static bool conn_resync_running(struct drbd_tconn
*tconn
)
1685 struct drbd_conf
*mdev
;
1690 idr_for_each_entry(&tconn
->volumes
, mdev
, vnr
) {
1691 if (mdev
->state
.conn
== C_SYNC_SOURCE
||
1692 mdev
->state
.conn
== C_SYNC_TARGET
||
1693 mdev
->state
.conn
== C_PAUSED_SYNC_S
||
1694 mdev
->state
.conn
== C_PAUSED_SYNC_T
) {
1704 static bool conn_ov_running(struct drbd_tconn
*tconn
)
1706 struct drbd_conf
*mdev
;
1711 idr_for_each_entry(&tconn
->volumes
, mdev
, vnr
) {
1712 if (mdev
->state
.conn
== C_VERIFY_S
||
1713 mdev
->state
.conn
== C_VERIFY_T
) {
1723 static enum drbd_ret_code
1724 _check_net_options(struct drbd_tconn
*tconn
, struct net_conf
*old_conf
, struct net_conf
*new_conf
)
1726 struct drbd_conf
*mdev
;
1729 if (old_conf
&& tconn
->cstate
== C_WF_REPORT_PARAMS
&& tconn
->agreed_pro_version
< 100) {
1730 if (new_conf
->wire_protocol
!= old_conf
->wire_protocol
)
1731 return ERR_NEED_APV_100
;
1733 if (new_conf
->two_primaries
!= old_conf
->two_primaries
)
1734 return ERR_NEED_APV_100
;
1736 if (!new_conf
->integrity_alg
!= !old_conf
->integrity_alg
)
1737 return ERR_NEED_APV_100
;
1739 if (strcmp(new_conf
->integrity_alg
, old_conf
->integrity_alg
))
1740 return ERR_NEED_APV_100
;
1743 if (!new_conf
->two_primaries
&&
1744 conn_highest_role(tconn
) == R_PRIMARY
&&
1745 conn_highest_peer(tconn
) == R_PRIMARY
)
1746 return ERR_NEED_ALLOW_TWO_PRI
;
1748 if (new_conf
->two_primaries
&&
1749 (new_conf
->wire_protocol
!= DRBD_PROT_C
))
1750 return ERR_NOT_PROTO_C
;
1752 idr_for_each_entry(&tconn
->volumes
, mdev
, i
) {
1753 if (get_ldev(mdev
)) {
1754 enum drbd_fencing_p fp
= rcu_dereference(mdev
->ldev
->disk_conf
)->fencing
;
1756 if (new_conf
->wire_protocol
== DRBD_PROT_A
&& fp
== FP_STONITH
)
1757 return ERR_STONITH_AND_PROT_A
;
1759 if (mdev
->state
.role
== R_PRIMARY
&& new_conf
->discard_my_data
)
1763 if (new_conf
->on_congestion
!= OC_BLOCK
&& new_conf
->wire_protocol
!= DRBD_PROT_A
)
1764 return ERR_CONG_NOT_PROTO_A
;
1769 static enum drbd_ret_code
1770 check_net_options(struct drbd_tconn
*tconn
, struct net_conf
*new_conf
)
1772 static enum drbd_ret_code rv
;
1773 struct drbd_conf
*mdev
;
1777 rv
= _check_net_options(tconn
, rcu_dereference(tconn
->net_conf
), new_conf
);
1780 /* tconn->volumes protected by genl_lock() here */
1781 idr_for_each_entry(&tconn
->volumes
, mdev
, i
) {
1782 if (!mdev
->bitmap
) {
1783 if(drbd_bm_init(mdev
))
1792 struct crypto_hash
*verify_tfm
;
1793 struct crypto_hash
*csums_tfm
;
1794 struct crypto_hash
*cram_hmac_tfm
;
1795 struct crypto_hash
*integrity_tfm
;
1801 alloc_hash(struct crypto_hash
**tfm
, char *tfm_name
, int err_alg
)
1806 *tfm
= crypto_alloc_hash(tfm_name
, 0, CRYPTO_ALG_ASYNC
);
1815 static enum drbd_ret_code
1816 alloc_crypto(struct crypto
*crypto
, struct net_conf
*new_conf
)
1818 char hmac_name
[CRYPTO_MAX_ALG_NAME
];
1819 enum drbd_ret_code rv
;
1822 rv
= alloc_hash(&crypto
->csums_tfm
, new_conf
->csums_alg
,
1826 rv
= alloc_hash(&crypto
->verify_tfm
, new_conf
->verify_alg
,
1830 rv
= alloc_hash(&crypto
->integrity_tfm
, new_conf
->integrity_alg
,
1834 if (new_conf
->cram_hmac_alg
[0] != 0) {
1835 snprintf(hmac_name
, CRYPTO_MAX_ALG_NAME
, "hmac(%s)",
1836 new_conf
->cram_hmac_alg
);
1838 rv
= alloc_hash(&crypto
->cram_hmac_tfm
, hmac_name
,
1841 if (crypto
->integrity_tfm
) {
1842 hash_size
= crypto_hash_digestsize(crypto
->integrity_tfm
);
1843 crypto
->int_dig_in
= kmalloc(hash_size
, GFP_KERNEL
);
1844 if (!crypto
->int_dig_in
)
1846 crypto
->int_dig_vv
= kmalloc(hash_size
, GFP_KERNEL
);
1847 if (!crypto
->int_dig_vv
)
1854 static void free_crypto(struct crypto
*crypto
)
1856 kfree(crypto
->int_dig_in
);
1857 kfree(crypto
->int_dig_vv
);
1858 crypto_free_hash(crypto
->cram_hmac_tfm
);
1859 crypto_free_hash(crypto
->integrity_tfm
);
1860 crypto_free_hash(crypto
->csums_tfm
);
1861 crypto_free_hash(crypto
->verify_tfm
);
1864 int drbd_adm_net_opts(struct sk_buff
*skb
, struct genl_info
*info
)
1866 enum drbd_ret_code retcode
;
1867 struct drbd_tconn
*tconn
;
1868 struct net_conf
*old_conf
, *new_conf
= NULL
;
1870 int ovr
; /* online verify running */
1871 int rsr
; /* re-sync running */
1872 struct crypto crypto
= { };
1874 retcode
= drbd_adm_prepare(skb
, info
, DRBD_ADM_NEED_CONN
);
1875 if (!adm_ctx
.reply_skb
)
1877 if (retcode
!= NO_ERROR
)
1880 tconn
= adm_ctx
.tconn
;
1882 new_conf
= kzalloc(sizeof(struct net_conf
), GFP_KERNEL
);
1884 retcode
= ERR_NOMEM
;
1888 conn_reconfig_start(tconn
);
1890 mutex_lock(&tconn
->data
.mutex
);
1891 mutex_lock(&tconn
->conf_update
);
1892 old_conf
= tconn
->net_conf
;
1895 drbd_msg_put_info("net conf missing, try connect");
1896 retcode
= ERR_INVALID_REQUEST
;
1900 *new_conf
= *old_conf
;
1901 if (should_set_defaults(info
))
1902 set_net_conf_defaults(new_conf
);
1904 err
= net_conf_from_attrs_for_change(new_conf
, info
);
1905 if (err
&& err
!= -ENOMSG
) {
1906 retcode
= ERR_MANDATORY_TAG
;
1907 drbd_msg_put_info(from_attrs_err_to_txt(err
));
1911 retcode
= check_net_options(tconn
, new_conf
);
1912 if (retcode
!= NO_ERROR
)
1915 /* re-sync running */
1916 rsr
= conn_resync_running(tconn
);
1917 if (rsr
&& strcmp(new_conf
->csums_alg
, old_conf
->csums_alg
)) {
1918 retcode
= ERR_CSUMS_RESYNC_RUNNING
;
1922 /* online verify running */
1923 ovr
= conn_ov_running(tconn
);
1924 if (ovr
&& strcmp(new_conf
->verify_alg
, old_conf
->verify_alg
)) {
1925 retcode
= ERR_VERIFY_RUNNING
;
1929 retcode
= alloc_crypto(&crypto
, new_conf
);
1930 if (retcode
!= NO_ERROR
)
1933 rcu_assign_pointer(tconn
->net_conf
, new_conf
);
1936 crypto_free_hash(tconn
->csums_tfm
);
1937 tconn
->csums_tfm
= crypto
.csums_tfm
;
1938 crypto
.csums_tfm
= NULL
;
1941 crypto_free_hash(tconn
->verify_tfm
);
1942 tconn
->verify_tfm
= crypto
.verify_tfm
;
1943 crypto
.verify_tfm
= NULL
;
1946 kfree(tconn
->int_dig_in
);
1947 tconn
->int_dig_in
= crypto
.int_dig_in
;
1948 kfree(tconn
->int_dig_vv
);
1949 tconn
->int_dig_vv
= crypto
.int_dig_vv
;
1950 crypto_free_hash(tconn
->integrity_tfm
);
1951 tconn
->integrity_tfm
= crypto
.integrity_tfm
;
1952 if (tconn
->cstate
>= C_WF_REPORT_PARAMS
&& tconn
->agreed_pro_version
>= 100)
1953 /* Do this without trying to take tconn->data.mutex again. */
1954 __drbd_send_protocol(tconn
, P_PROTOCOL_UPDATE
);
1956 crypto_free_hash(tconn
->cram_hmac_tfm
);
1957 tconn
->cram_hmac_tfm
= crypto
.cram_hmac_tfm
;
1959 mutex_unlock(&tconn
->conf_update
);
1960 mutex_unlock(&tconn
->data
.mutex
);
1964 if (tconn
->cstate
>= C_WF_REPORT_PARAMS
)
1965 drbd_send_sync_param(minor_to_mdev(conn_lowest_minor(tconn
)));
1970 mutex_unlock(&tconn
->conf_update
);
1971 mutex_unlock(&tconn
->data
.mutex
);
1972 free_crypto(&crypto
);
1975 conn_reconfig_done(tconn
);
1977 drbd_adm_finish(info
, retcode
);
1981 int drbd_adm_connect(struct sk_buff
*skb
, struct genl_info
*info
)
1983 struct drbd_conf
*mdev
;
1984 struct net_conf
*old_conf
, *new_conf
= NULL
;
1985 struct crypto crypto
= { };
1986 struct drbd_tconn
*oconn
;
1987 struct drbd_tconn
*tconn
;
1988 struct sockaddr
*new_my_addr
, *new_peer_addr
, *taken_addr
;
1989 enum drbd_ret_code retcode
;
1993 retcode
= drbd_adm_prepare(skb
, info
, DRBD_ADM_NEED_CONN
);
1994 if (!adm_ctx
.reply_skb
)
1996 if (retcode
!= NO_ERROR
)
1999 tconn
= adm_ctx
.tconn
;
2000 conn_reconfig_start(tconn
);
2002 if (tconn
->cstate
> C_STANDALONE
) {
2003 retcode
= ERR_NET_CONFIGURED
;
2007 /* allocation not in the IO path, cqueue thread context */
2008 new_conf
= kzalloc(sizeof(*new_conf
), GFP_KERNEL
);
2010 retcode
= ERR_NOMEM
;
2014 set_net_conf_defaults(new_conf
);
2016 err
= net_conf_from_attrs(new_conf
, info
);
2018 retcode
= ERR_MANDATORY_TAG
;
2019 drbd_msg_put_info(from_attrs_err_to_txt(err
));
2023 retcode
= check_net_options(tconn
, new_conf
);
2024 if (retcode
!= NO_ERROR
)
2029 new_my_addr
= (struct sockaddr
*)&new_conf
->my_addr
;
2030 new_peer_addr
= (struct sockaddr
*)&new_conf
->peer_addr
;
2032 /* No need for _rcu here. All reconfiguration is
2033 * strictly serialized on genl_lock(). We are protected against
2034 * concurrent reconfiguration/addition/deletion */
2035 list_for_each_entry(oconn
, &drbd_tconns
, all_tconn
) {
2036 struct net_conf
*nc
;
2041 nc
= rcu_dereference(oconn
->net_conf
);
2043 taken_addr
= (struct sockaddr
*)&nc
->my_addr
;
2044 if (new_conf
->my_addr_len
== nc
->my_addr_len
&&
2045 !memcmp(new_my_addr
, taken_addr
, new_conf
->my_addr_len
))
2046 retcode
= ERR_LOCAL_ADDR
;
2048 taken_addr
= (struct sockaddr
*)&nc
->peer_addr
;
2049 if (new_conf
->peer_addr_len
== nc
->peer_addr_len
&&
2050 !memcmp(new_peer_addr
, taken_addr
, new_conf
->peer_addr_len
))
2051 retcode
= ERR_PEER_ADDR
;
2054 if (retcode
!= NO_ERROR
)
2058 retcode
= alloc_crypto(&crypto
, new_conf
);
2059 if (retcode
!= NO_ERROR
)
2062 ((char *)new_conf
->shared_secret
)[SHARED_SECRET_MAX
-1] = 0;
2064 conn_flush_workqueue(tconn
);
2066 mutex_lock(&tconn
->conf_update
);
2067 old_conf
= tconn
->net_conf
;
2069 retcode
= ERR_NET_CONFIGURED
;
2070 mutex_unlock(&tconn
->conf_update
);
2073 rcu_assign_pointer(tconn
->net_conf
, new_conf
);
2075 conn_free_crypto(tconn
);
2076 tconn
->int_dig_in
= crypto
.int_dig_in
;
2077 tconn
->int_dig_vv
= crypto
.int_dig_vv
;
2078 tconn
->cram_hmac_tfm
= crypto
.cram_hmac_tfm
;
2079 tconn
->integrity_tfm
= crypto
.integrity_tfm
;
2080 tconn
->csums_tfm
= crypto
.csums_tfm
;
2081 tconn
->verify_tfm
= crypto
.verify_tfm
;
2083 mutex_unlock(&tconn
->conf_update
);
2086 idr_for_each_entry(&tconn
->volumes
, mdev
, i
) {
2092 retcode
= conn_request_state(tconn
, NS(conn
, C_UNCONNECTED
), CS_VERBOSE
);
2094 conn_reconfig_done(tconn
);
2095 drbd_adm_finish(info
, retcode
);
2099 free_crypto(&crypto
);
2102 conn_reconfig_done(tconn
);
2104 drbd_adm_finish(info
, retcode
);
2108 static enum drbd_state_rv
conn_try_disconnect(struct drbd_tconn
*tconn
, bool force
)
2110 enum drbd_state_rv rv
;
2112 rv
= conn_request_state(tconn
, NS(conn
, C_DISCONNECTING
),
2113 force
? CS_HARD
: 0);
2116 case SS_NOTHING_TO_DO
:
2118 case SS_ALREADY_STANDALONE
:
2120 case SS_PRIMARY_NOP
:
2121 /* Our state checking code wants to see the peer outdated. */
2122 rv
= conn_request_state(tconn
, NS2(conn
, C_DISCONNECTING
,
2123 pdsk
, D_OUTDATED
), CS_VERBOSE
);
2125 case SS_CW_FAILED_BY_PEER
:
2126 /* The peer probably wants to see us outdated. */
2127 rv
= conn_request_state(tconn
, NS2(conn
, C_DISCONNECTING
,
2128 disk
, D_OUTDATED
), 0);
2129 if (rv
== SS_IS_DISKLESS
|| rv
== SS_LOWER_THAN_OUTDATED
) {
2130 rv
= conn_request_state(tconn
, NS(conn
, C_DISCONNECTING
),
2135 /* no special handling necessary */
2138 if (rv
>= SS_SUCCESS
) {
2139 enum drbd_state_rv rv2
;
2140 /* No one else can reconfigure the network while I am here.
2141 * The state handling only uses drbd_thread_stop_nowait(),
2142 * we want to really wait here until the receiver is no more.
2144 drbd_thread_stop(&adm_ctx
.tconn
->receiver
);
2146 /* Race breaker. This additional state change request may be
2147 * necessary, if this was a forced disconnect during a receiver
2148 * restart. We may have "killed" the receiver thread just
2149 * after drbdd_init() returned. Typically, we should be
2150 * C_STANDALONE already, now, and this becomes a no-op.
2152 rv2
= conn_request_state(tconn
, NS(conn
, C_STANDALONE
),
2153 CS_VERBOSE
| CS_HARD
);
2154 if (rv2
< SS_SUCCESS
)
2156 "unexpected rv2=%d in conn_try_disconnect()\n",
2162 int drbd_adm_disconnect(struct sk_buff
*skb
, struct genl_info
*info
)
2164 struct disconnect_parms parms
;
2165 struct drbd_tconn
*tconn
;
2166 enum drbd_state_rv rv
;
2167 enum drbd_ret_code retcode
;
2170 retcode
= drbd_adm_prepare(skb
, info
, DRBD_ADM_NEED_CONN
);
2171 if (!adm_ctx
.reply_skb
)
2173 if (retcode
!= NO_ERROR
)
2176 tconn
= adm_ctx
.tconn
;
2177 memset(&parms
, 0, sizeof(parms
));
2178 if (info
->attrs
[DRBD_NLA_DISCONNECT_PARMS
]) {
2179 err
= disconnect_parms_from_attrs(&parms
, info
);
2181 retcode
= ERR_MANDATORY_TAG
;
2182 drbd_msg_put_info(from_attrs_err_to_txt(err
));
2187 rv
= conn_try_disconnect(tconn
, parms
.force_disconnect
);
2188 if (rv
< SS_SUCCESS
)
2189 retcode
= rv
; /* FIXME: Type mismatch. */
2193 drbd_adm_finish(info
, retcode
);
2197 void resync_after_online_grow(struct drbd_conf
*mdev
)
2199 int iass
; /* I am sync source */
2201 dev_info(DEV
, "Resync of new storage after online grow\n");
2202 if (mdev
->state
.role
!= mdev
->state
.peer
)
2203 iass
= (mdev
->state
.role
== R_PRIMARY
);
2205 iass
= test_bit(DISCARD_CONCURRENT
, &mdev
->tconn
->flags
);
2208 drbd_start_resync(mdev
, C_SYNC_SOURCE
);
2210 _drbd_request_state(mdev
, NS(conn
, C_WF_SYNC_UUID
), CS_VERBOSE
+ CS_SERIALIZE
);
2213 int drbd_adm_resize(struct sk_buff
*skb
, struct genl_info
*info
)
2215 struct disk_conf
*old_disk_conf
, *new_disk_conf
= NULL
;
2216 struct resize_parms rs
;
2217 struct drbd_conf
*mdev
;
2218 enum drbd_ret_code retcode
;
2219 enum determine_dev_size dd
;
2220 enum dds_flags ddsf
;
2224 retcode
= drbd_adm_prepare(skb
, info
, DRBD_ADM_NEED_MINOR
);
2225 if (!adm_ctx
.reply_skb
)
2227 if (retcode
!= NO_ERROR
)
2230 memset(&rs
, 0, sizeof(struct resize_parms
));
2231 if (info
->attrs
[DRBD_NLA_RESIZE_PARMS
]) {
2232 err
= resize_parms_from_attrs(&rs
, info
);
2234 retcode
= ERR_MANDATORY_TAG
;
2235 drbd_msg_put_info(from_attrs_err_to_txt(err
));
2240 mdev
= adm_ctx
.mdev
;
2241 if (mdev
->state
.conn
> C_CONNECTED
) {
2242 retcode
= ERR_RESIZE_RESYNC
;
2246 if (mdev
->state
.role
== R_SECONDARY
&&
2247 mdev
->state
.peer
== R_SECONDARY
) {
2248 retcode
= ERR_NO_PRIMARY
;
2252 if (!get_ldev(mdev
)) {
2253 retcode
= ERR_NO_DISK
;
2257 if (rs
.no_resync
&& mdev
->tconn
->agreed_pro_version
< 93) {
2258 retcode
= ERR_NEED_APV_93
;
2263 u_size
= rcu_dereference(mdev
->ldev
->disk_conf
)->disk_size
;
2265 if (u_size
!= (sector_t
)rs
.resize_size
) {
2266 new_disk_conf
= kmalloc(sizeof(struct disk_conf
), GFP_KERNEL
);
2267 if (!new_disk_conf
) {
2268 retcode
= ERR_NOMEM
;
2273 if (mdev
->ldev
->known_size
!= drbd_get_capacity(mdev
->ldev
->backing_bdev
))
2274 mdev
->ldev
->known_size
= drbd_get_capacity(mdev
->ldev
->backing_bdev
);
2276 if (new_disk_conf
) {
2277 mutex_lock(&mdev
->tconn
->conf_update
);
2278 old_disk_conf
= mdev
->ldev
->disk_conf
;
2279 *new_disk_conf
= *old_disk_conf
;
2280 new_disk_conf
->disk_size
= (sector_t
)rs
.resize_size
;
2281 rcu_assign_pointer(mdev
->ldev
->disk_conf
, new_disk_conf
);
2282 mutex_unlock(&mdev
->tconn
->conf_update
);
2284 kfree(old_disk_conf
);
2287 ddsf
= (rs
.resize_force
? DDSF_FORCED
: 0) | (rs
.no_resync
? DDSF_NO_RESYNC
: 0);
2288 dd
= drbd_determine_dev_size(mdev
, ddsf
);
2291 if (dd
== dev_size_error
) {
2292 retcode
= ERR_NOMEM_BITMAP
;
2296 if (mdev
->state
.conn
== C_CONNECTED
) {
2298 set_bit(RESIZE_PENDING
, &mdev
->flags
);
2300 drbd_send_uuids(mdev
);
2301 drbd_send_sizes(mdev
, 1, ddsf
);
2305 drbd_adm_finish(info
, retcode
);
2309 void drbd_set_res_opts_defaults(struct res_opts
*r
)
2311 return set_res_opts_defaults(r
);
2314 int drbd_adm_resource_opts(struct sk_buff
*skb
, struct genl_info
*info
)
2316 enum drbd_ret_code retcode
;
2317 cpumask_var_t new_cpu_mask
;
2318 struct drbd_tconn
*tconn
;
2319 struct res_opts res_opts
;
2322 retcode
= drbd_adm_prepare(skb
, info
, DRBD_ADM_NEED_CONN
);
2323 if (!adm_ctx
.reply_skb
)
2325 if (retcode
!= NO_ERROR
)
2327 tconn
= adm_ctx
.tconn
;
2329 if (!zalloc_cpumask_var(&new_cpu_mask
, GFP_KERNEL
)) {
2330 retcode
= ERR_NOMEM
;
2331 drbd_msg_put_info("unable to allocate cpumask");
2335 res_opts
= tconn
->res_opts
;
2336 if (should_set_defaults(info
))
2337 set_res_opts_defaults(&res_opts
);
2339 err
= res_opts_from_attrs(&res_opts
, info
);
2340 if (err
&& err
!= -ENOMSG
) {
2341 retcode
= ERR_MANDATORY_TAG
;
2342 drbd_msg_put_info(from_attrs_err_to_txt(err
));
2346 /* silently ignore cpu mask on UP kernel */
2347 if (nr_cpu_ids
> 1 && res_opts
.cpu_mask
[0] != 0) {
2348 err
= __bitmap_parse(res_opts
.cpu_mask
, 32, 0,
2349 cpumask_bits(new_cpu_mask
), nr_cpu_ids
);
2351 conn_warn(tconn
, "__bitmap_parse() failed with %d\n", err
);
2352 retcode
= ERR_CPU_MASK_PARSE
;
2358 tconn
->res_opts
= res_opts
;
2360 if (!cpumask_equal(tconn
->cpu_mask
, new_cpu_mask
)) {
2361 cpumask_copy(tconn
->cpu_mask
, new_cpu_mask
);
2362 drbd_calc_cpu_mask(tconn
);
2363 tconn
->receiver
.reset_cpu_mask
= 1;
2364 tconn
->asender
.reset_cpu_mask
= 1;
2365 tconn
->worker
.reset_cpu_mask
= 1;
2369 free_cpumask_var(new_cpu_mask
);
2371 drbd_adm_finish(info
, retcode
);
2375 int drbd_adm_invalidate(struct sk_buff
*skb
, struct genl_info
*info
)
2377 struct drbd_conf
*mdev
;
2378 int retcode
; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2380 retcode
= drbd_adm_prepare(skb
, info
, DRBD_ADM_NEED_MINOR
);
2381 if (!adm_ctx
.reply_skb
)
2383 if (retcode
!= NO_ERROR
)
2386 mdev
= adm_ctx
.mdev
;
2388 /* If there is still bitmap IO pending, probably because of a previous
2389 * resync just being finished, wait for it before requesting a new resync. */
2390 wait_event(mdev
->misc_wait
, !test_bit(BITMAP_IO
, &mdev
->flags
));
2392 retcode
= _drbd_request_state(mdev
, NS(conn
, C_STARTING_SYNC_T
), CS_ORDERED
);
2394 if (retcode
< SS_SUCCESS
&& retcode
!= SS_NEED_CONNECTION
)
2395 retcode
= drbd_request_state(mdev
, NS(conn
, C_STARTING_SYNC_T
));
2397 while (retcode
== SS_NEED_CONNECTION
) {
2398 spin_lock_irq(&mdev
->tconn
->req_lock
);
2399 if (mdev
->state
.conn
< C_CONNECTED
)
2400 retcode
= _drbd_set_state(_NS(mdev
, disk
, D_INCONSISTENT
), CS_VERBOSE
, NULL
);
2401 spin_unlock_irq(&mdev
->tconn
->req_lock
);
2403 if (retcode
!= SS_NEED_CONNECTION
)
2406 retcode
= drbd_request_state(mdev
, NS(conn
, C_STARTING_SYNC_T
));
2410 drbd_adm_finish(info
, retcode
);
2414 static int drbd_bmio_set_susp_al(struct drbd_conf
*mdev
)
2418 rv
= drbd_bmio_set_n_write(mdev
);
2419 drbd_suspend_al(mdev
);
2423 static int drbd_adm_simple_request_state(struct sk_buff
*skb
, struct genl_info
*info
,
2424 union drbd_state mask
, union drbd_state val
)
2426 enum drbd_ret_code retcode
;
2428 retcode
= drbd_adm_prepare(skb
, info
, DRBD_ADM_NEED_MINOR
);
2429 if (!adm_ctx
.reply_skb
)
2431 if (retcode
!= NO_ERROR
)
2434 retcode
= drbd_request_state(adm_ctx
.mdev
, mask
, val
);
2436 drbd_adm_finish(info
, retcode
);
2440 int drbd_adm_invalidate_peer(struct sk_buff
*skb
, struct genl_info
*info
)
2442 return drbd_adm_simple_request_state(skb
, info
, NS(conn
, C_STARTING_SYNC_S
));
2445 int drbd_adm_pause_sync(struct sk_buff
*skb
, struct genl_info
*info
)
2447 enum drbd_ret_code retcode
;
2449 retcode
= drbd_adm_prepare(skb
, info
, DRBD_ADM_NEED_MINOR
);
2450 if (!adm_ctx
.reply_skb
)
2452 if (retcode
!= NO_ERROR
)
2455 if (drbd_request_state(adm_ctx
.mdev
, NS(user_isp
, 1)) == SS_NOTHING_TO_DO
)
2456 retcode
= ERR_PAUSE_IS_SET
;
2458 drbd_adm_finish(info
, retcode
);
2462 int drbd_adm_resume_sync(struct sk_buff
*skb
, struct genl_info
*info
)
2464 union drbd_dev_state s
;
2465 enum drbd_ret_code retcode
;
2467 retcode
= drbd_adm_prepare(skb
, info
, DRBD_ADM_NEED_MINOR
);
2468 if (!adm_ctx
.reply_skb
)
2470 if (retcode
!= NO_ERROR
)
2473 if (drbd_request_state(adm_ctx
.mdev
, NS(user_isp
, 0)) == SS_NOTHING_TO_DO
) {
2474 s
= adm_ctx
.mdev
->state
;
2475 if (s
.conn
== C_PAUSED_SYNC_S
|| s
.conn
== C_PAUSED_SYNC_T
) {
2476 retcode
= s
.aftr_isp
? ERR_PIC_AFTER_DEP
:
2477 s
.peer_isp
? ERR_PIC_PEER_DEP
: ERR_PAUSE_IS_CLEAR
;
2479 retcode
= ERR_PAUSE_IS_CLEAR
;
2484 drbd_adm_finish(info
, retcode
);
2488 int drbd_adm_suspend_io(struct sk_buff
*skb
, struct genl_info
*info
)
2490 return drbd_adm_simple_request_state(skb
, info
, NS(susp
, 1));
2493 int drbd_adm_resume_io(struct sk_buff
*skb
, struct genl_info
*info
)
2495 struct drbd_conf
*mdev
;
2496 int retcode
; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2498 retcode
= drbd_adm_prepare(skb
, info
, DRBD_ADM_NEED_MINOR
);
2499 if (!adm_ctx
.reply_skb
)
2501 if (retcode
!= NO_ERROR
)
2504 mdev
= adm_ctx
.mdev
;
2505 if (test_bit(NEW_CUR_UUID
, &mdev
->flags
)) {
2506 drbd_uuid_new_current(mdev
);
2507 clear_bit(NEW_CUR_UUID
, &mdev
->flags
);
2509 drbd_suspend_io(mdev
);
2510 retcode
= drbd_request_state(mdev
, NS3(susp
, 0, susp_nod
, 0, susp_fen
, 0));
2511 if (retcode
== SS_SUCCESS
) {
2512 if (mdev
->state
.conn
< C_CONNECTED
)
2513 tl_clear(mdev
->tconn
);
2514 if (mdev
->state
.disk
== D_DISKLESS
|| mdev
->state
.disk
== D_FAILED
)
2515 tl_restart(mdev
->tconn
, FAIL_FROZEN_DISK_IO
);
2517 drbd_resume_io(mdev
);
2520 drbd_adm_finish(info
, retcode
);
2524 int drbd_adm_outdate(struct sk_buff
*skb
, struct genl_info
*info
)
2526 return drbd_adm_simple_request_state(skb
, info
, NS(disk
, D_OUTDATED
));
2529 int nla_put_drbd_cfg_context(struct sk_buff
*skb
, const char *conn_name
, unsigned vnr
)
2532 nla
= nla_nest_start(skb
, DRBD_NLA_CFG_CONTEXT
);
2534 goto nla_put_failure
;
2535 if (vnr
!= VOLUME_UNSPECIFIED
)
2536 NLA_PUT_U32(skb
, T_ctx_volume
, vnr
);
2537 NLA_PUT_STRING(skb
, T_ctx_conn_name
, conn_name
);
2538 nla_nest_end(skb
, nla
);
2543 nla_nest_cancel(skb
, nla
);
2547 int nla_put_status_info(struct sk_buff
*skb
, struct drbd_conf
*mdev
,
2548 const struct sib_info
*sib
)
2550 struct state_info
*si
= NULL
; /* for sizeof(si->member); */
2551 struct net_conf
*nc
;
2555 int exclude_sensitive
;
2557 /* If sib != NULL, this is drbd_bcast_event, which anyone can listen
2558 * to. So we better exclude_sensitive information.
2560 * If sib == NULL, this is drbd_adm_get_status, executed synchronously
2561 * in the context of the requesting user process. Exclude sensitive
2562 * information, unless current has superuser.
2564 * NOTE: for drbd_adm_get_status_all(), this is a netlink dump, and
2565 * relies on the current implementation of netlink_dump(), which
2566 * executes the dump callback successively from netlink_recvmsg(),
2567 * always in the context of the receiving process */
2568 exclude_sensitive
= sib
|| !capable(CAP_SYS_ADMIN
);
2570 got_ldev
= get_ldev(mdev
);
2572 /* We need to add connection name and volume number information still.
2573 * Minor number is in drbd_genlmsghdr. */
2574 if (nla_put_drbd_cfg_context(skb
, mdev
->tconn
->name
, mdev
->vnr
))
2575 goto nla_put_failure
;
2577 if (res_opts_to_skb(skb
, &mdev
->tconn
->res_opts
, exclude_sensitive
))
2578 goto nla_put_failure
;
2582 if (disk_conf_to_skb(skb
, rcu_dereference(mdev
->ldev
->disk_conf
), exclude_sensitive
))
2583 goto nla_put_failure
;
2585 nc
= rcu_dereference(mdev
->tconn
->net_conf
);
2587 err
= net_conf_to_skb(skb
, nc
, exclude_sensitive
);
2590 goto nla_put_failure
;
2592 nla
= nla_nest_start(skb
, DRBD_NLA_STATE_INFO
);
2594 goto nla_put_failure
;
2595 NLA_PUT_U32(skb
, T_sib_reason
, sib
? sib
->sib_reason
: SIB_GET_STATUS_REPLY
);
2596 NLA_PUT_U32(skb
, T_current_state
, mdev
->state
.i
);
2597 NLA_PUT_U64(skb
, T_ed_uuid
, mdev
->ed_uuid
);
2598 NLA_PUT_U64(skb
, T_capacity
, drbd_get_capacity(mdev
->this_bdev
));
2601 NLA_PUT_U32(skb
, T_disk_flags
, mdev
->ldev
->md
.flags
);
2602 NLA_PUT(skb
, T_uuids
, sizeof(si
->uuids
), mdev
->ldev
->md
.uuid
);
2603 NLA_PUT_U64(skb
, T_bits_total
, drbd_bm_bits(mdev
));
2604 NLA_PUT_U64(skb
, T_bits_oos
, drbd_bm_total_weight(mdev
));
2605 if (C_SYNC_SOURCE
<= mdev
->state
.conn
&&
2606 C_PAUSED_SYNC_T
>= mdev
->state
.conn
) {
2607 NLA_PUT_U64(skb
, T_bits_rs_total
, mdev
->rs_total
);
2608 NLA_PUT_U64(skb
, T_bits_rs_failed
, mdev
->rs_failed
);
2613 switch(sib
->sib_reason
) {
2614 case SIB_SYNC_PROGRESS
:
2615 case SIB_GET_STATUS_REPLY
:
2617 case SIB_STATE_CHANGE
:
2618 NLA_PUT_U32(skb
, T_prev_state
, sib
->os
.i
);
2619 NLA_PUT_U32(skb
, T_new_state
, sib
->ns
.i
);
2621 case SIB_HELPER_POST
:
2623 T_helper_exit_code
, sib
->helper_exit_code
);
2625 case SIB_HELPER_PRE
:
2626 NLA_PUT_STRING(skb
, T_helper
, sib
->helper_name
);
2630 nla_nest_end(skb
, nla
);
2640 int drbd_adm_get_status(struct sk_buff
*skb
, struct genl_info
*info
)
2642 enum drbd_ret_code retcode
;
2645 retcode
= drbd_adm_prepare(skb
, info
, DRBD_ADM_NEED_MINOR
);
2646 if (!adm_ctx
.reply_skb
)
2648 if (retcode
!= NO_ERROR
)
2651 err
= nla_put_status_info(adm_ctx
.reply_skb
, adm_ctx
.mdev
, NULL
);
2653 nlmsg_free(adm_ctx
.reply_skb
);
2657 drbd_adm_finish(info
, retcode
);
2661 int get_one_status(struct sk_buff
*skb
, struct netlink_callback
*cb
)
2663 struct drbd_conf
*mdev
;
2664 struct drbd_genlmsghdr
*dh
;
2665 struct drbd_tconn
*pos
= (struct drbd_tconn
*)cb
->args
[0];
2666 struct drbd_tconn
*tconn
= NULL
;
2667 struct drbd_tconn
*tmp
;
2668 unsigned volume
= cb
->args
[1];
2670 /* Open coded, deferred, iteration:
2671 * list_for_each_entry_safe(tconn, tmp, &drbd_tconns, all_tconn) {
2672 * idr_for_each_entry(&tconn->volumes, mdev, i) {
2676 * where tconn is cb->args[0];
2677 * and i is cb->args[1];
2679 * cb->args[2] indicates if we shall loop over all resources,
2680 * or just dump all volumes of a single resource.
2682 * This may miss entries inserted after this dump started,
2683 * or entries deleted before they are reached.
2685 * We need to make sure the mdev won't disappear while
2686 * we are looking at it, and revalidate our iterators
2687 * on each iteration.
2690 /* synchronize with conn_create()/conn_destroy() */
2692 /* revalidate iterator position */
2693 list_for_each_entry_rcu(tmp
, &drbd_tconns
, all_tconn
) {
2695 /* first iteration */
2707 mdev
= idr_get_next(&tconn
->volumes
, &volume
);
2709 /* No more volumes to dump on this tconn.
2710 * Advance tconn iterator. */
2711 pos
= list_entry_rcu(tconn
->all_tconn
.next
,
2712 struct drbd_tconn
, all_tconn
);
2713 /* Did we dump any volume on this tconn yet? */
2715 /* If we reached the end of the list,
2716 * or only a single resource dump was requested,
2718 if (&pos
->all_tconn
== &drbd_tconns
|| cb
->args
[2])
2726 dh
= genlmsg_put(skb
, NETLINK_CB(cb
->skb
).pid
,
2727 cb
->nlh
->nlmsg_seq
, &drbd_genl_family
,
2728 NLM_F_MULTI
, DRBD_ADM_GET_STATUS
);
2733 /* this is a tconn without a single volume */
2735 dh
->ret_code
= NO_ERROR
;
2736 if (nla_put_drbd_cfg_context(skb
, tconn
->name
, VOLUME_UNSPECIFIED
))
2737 genlmsg_cancel(skb
, dh
);
2739 genlmsg_end(skb
, dh
);
2743 D_ASSERT(mdev
->vnr
== volume
);
2744 D_ASSERT(mdev
->tconn
== tconn
);
2746 dh
->minor
= mdev_to_minor(mdev
);
2747 dh
->ret_code
= NO_ERROR
;
2749 if (nla_put_status_info(skb
, mdev
, NULL
)) {
2750 genlmsg_cancel(skb
, dh
);
2753 genlmsg_end(skb
, dh
);
2758 /* where to start the next iteration */
2759 cb
->args
[0] = (long)pos
;
2760 cb
->args
[1] = (pos
== tconn
) ? volume
+ 1 : 0;
2762 /* No more tconns/volumes/minors found results in an empty skb.
2763 * Which will terminate the dump. */
2768 * Request status of all resources, or of all volumes within a single resource.
2770 * This is a dump, as the answer may not fit in a single reply skb otherwise.
2771 * Which means we cannot use the family->attrbuf or other such members, because
2772 * dump is NOT protected by the genl_lock(). During dump, we only have access
2773 * to the incoming skb, and need to opencode "parsing" of the nlattr payload.
2775 * Once things are setup properly, we call into get_one_status().
2777 int drbd_adm_get_status_all(struct sk_buff
*skb
, struct netlink_callback
*cb
)
2779 const unsigned hdrlen
= GENL_HDRLEN
+ GENL_MAGIC_FAMILY_HDRSZ
;
2781 const char *conn_name
;
2782 struct drbd_tconn
*tconn
;
2784 /* Is this a followup call? */
2786 /* ... of a single resource dump,
2787 * and the resource iterator has been advanced already? */
2788 if (cb
->args
[2] && cb
->args
[2] != cb
->args
[0])
2789 return 0; /* DONE. */
2793 /* First call (from netlink_dump_start). We need to figure out
2794 * which resource(s) the user wants us to dump. */
2795 nla
= nla_find(nlmsg_attrdata(cb
->nlh
, hdrlen
),
2796 nlmsg_attrlen(cb
->nlh
, hdrlen
),
2797 DRBD_NLA_CFG_CONTEXT
);
2799 /* No explicit context given. Dump all. */
2802 nla
= nla_find_nested(nla
, __nla_type(T_ctx_conn_name
));
2803 /* context given, but no name present? */
2806 conn_name
= nla_data(nla
);
2807 tconn
= conn_get_by_name(conn_name
);
2812 kref_put(&tconn
->kref
, &conn_destroy
); /* get_one_status() (re)validates tconn by itself */
2814 /* prime iterators, and set "filter" mode mark:
2815 * only dump this tconn. */
2816 cb
->args
[0] = (long)tconn
;
2817 /* cb->args[1] = 0; passed in this way. */
2818 cb
->args
[2] = (long)tconn
;
2821 return get_one_status(skb
, cb
);
2824 int drbd_adm_get_timeout_type(struct sk_buff
*skb
, struct genl_info
*info
)
2826 enum drbd_ret_code retcode
;
2827 struct timeout_parms tp
;
2830 retcode
= drbd_adm_prepare(skb
, info
, DRBD_ADM_NEED_MINOR
);
2831 if (!adm_ctx
.reply_skb
)
2833 if (retcode
!= NO_ERROR
)
2837 adm_ctx
.mdev
->state
.pdsk
== D_OUTDATED
? UT_PEER_OUTDATED
:
2838 test_bit(USE_DEGR_WFC_T
, &adm_ctx
.mdev
->flags
) ? UT_DEGRADED
:
2841 err
= timeout_parms_to_priv_skb(adm_ctx
.reply_skb
, &tp
);
2843 nlmsg_free(adm_ctx
.reply_skb
);
2847 drbd_adm_finish(info
, retcode
);
2851 int drbd_adm_start_ov(struct sk_buff
*skb
, struct genl_info
*info
)
2853 struct drbd_conf
*mdev
;
2854 enum drbd_ret_code retcode
;
2856 retcode
= drbd_adm_prepare(skb
, info
, DRBD_ADM_NEED_MINOR
);
2857 if (!adm_ctx
.reply_skb
)
2859 if (retcode
!= NO_ERROR
)
2862 mdev
= adm_ctx
.mdev
;
2863 if (info
->attrs
[DRBD_NLA_START_OV_PARMS
]) {
2864 /* resume from last known position, if possible */
2865 struct start_ov_parms parms
=
2866 { .ov_start_sector
= mdev
->ov_start_sector
};
2867 int err
= start_ov_parms_from_attrs(&parms
, info
);
2869 retcode
= ERR_MANDATORY_TAG
;
2870 drbd_msg_put_info(from_attrs_err_to_txt(err
));
2873 /* w_make_ov_request expects position to be aligned */
2874 mdev
->ov_start_sector
= parms
.ov_start_sector
& ~BM_SECT_PER_BIT
;
2876 /* If there is still bitmap IO pending, e.g. previous resync or verify
2877 * just being finished, wait for it before requesting a new resync. */
2878 wait_event(mdev
->misc_wait
, !test_bit(BITMAP_IO
, &mdev
->flags
));
2879 retcode
= drbd_request_state(mdev
,NS(conn
,C_VERIFY_S
));
2881 drbd_adm_finish(info
, retcode
);
2886 int drbd_adm_new_c_uuid(struct sk_buff
*skb
, struct genl_info
*info
)
2888 struct drbd_conf
*mdev
;
2889 enum drbd_ret_code retcode
;
2890 int skip_initial_sync
= 0;
2892 struct new_c_uuid_parms args
;
2894 retcode
= drbd_adm_prepare(skb
, info
, DRBD_ADM_NEED_MINOR
);
2895 if (!adm_ctx
.reply_skb
)
2897 if (retcode
!= NO_ERROR
)
2900 mdev
= adm_ctx
.mdev
;
2901 memset(&args
, 0, sizeof(args
));
2902 if (info
->attrs
[DRBD_NLA_NEW_C_UUID_PARMS
]) {
2903 err
= new_c_uuid_parms_from_attrs(&args
, info
);
2905 retcode
= ERR_MANDATORY_TAG
;
2906 drbd_msg_put_info(from_attrs_err_to_txt(err
));
2911 mutex_lock(mdev
->state_mutex
); /* Protects us against serialized state changes. */
2913 if (!get_ldev(mdev
)) {
2914 retcode
= ERR_NO_DISK
;
2918 /* this is "skip initial sync", assume to be clean */
2919 if (mdev
->state
.conn
== C_CONNECTED
&& mdev
->tconn
->agreed_pro_version
>= 90 &&
2920 mdev
->ldev
->md
.uuid
[UI_CURRENT
] == UUID_JUST_CREATED
&& args
.clear_bm
) {
2921 dev_info(DEV
, "Preparing to skip initial sync\n");
2922 skip_initial_sync
= 1;
2923 } else if (mdev
->state
.conn
!= C_STANDALONE
) {
2924 retcode
= ERR_CONNECTED
;
2928 drbd_uuid_set(mdev
, UI_BITMAP
, 0); /* Rotate UI_BITMAP to History 1, etc... */
2929 drbd_uuid_new_current(mdev
); /* New current, previous to UI_BITMAP */
2931 if (args
.clear_bm
) {
2932 err
= drbd_bitmap_io(mdev
, &drbd_bmio_clear_n_write
,
2933 "clear_n_write from new_c_uuid", BM_LOCKED_MASK
);
2935 dev_err(DEV
, "Writing bitmap failed with %d\n",err
);
2936 retcode
= ERR_IO_MD_DISK
;
2938 if (skip_initial_sync
) {
2939 drbd_send_uuids_skip_initial_sync(mdev
);
2940 _drbd_uuid_set(mdev
, UI_BITMAP
, 0);
2941 drbd_print_uuids(mdev
, "cleared bitmap UUID");
2942 spin_lock_irq(&mdev
->tconn
->req_lock
);
2943 _drbd_set_state(_NS2(mdev
, disk
, D_UP_TO_DATE
, pdsk
, D_UP_TO_DATE
),
2945 spin_unlock_irq(&mdev
->tconn
->req_lock
);
2953 mutex_unlock(mdev
->state_mutex
);
2955 drbd_adm_finish(info
, retcode
);
2959 static enum drbd_ret_code
2960 drbd_check_conn_name(const char *name
)
2962 if (!name
|| !name
[0]) {
2963 drbd_msg_put_info("connection name missing");
2964 return ERR_MANDATORY_TAG
;
2966 /* if we want to use these in sysfs/configfs/debugfs some day,
2967 * we must not allow slashes */
2968 if (strchr(name
, '/')) {
2969 drbd_msg_put_info("invalid connection name");
2970 return ERR_INVALID_REQUEST
;
2975 int drbd_adm_new_resource(struct sk_buff
*skb
, struct genl_info
*info
)
2977 enum drbd_ret_code retcode
;
2979 retcode
= drbd_adm_prepare(skb
, info
, 0);
2980 if (!adm_ctx
.reply_skb
)
2982 if (retcode
!= NO_ERROR
)
2985 retcode
= drbd_check_conn_name(adm_ctx
.conn_name
);
2986 if (retcode
!= NO_ERROR
)
2989 if (adm_ctx
.tconn
) {
2990 if (info
->nlhdr
->nlmsg_flags
& NLM_F_EXCL
) {
2991 retcode
= ERR_INVALID_REQUEST
;
2992 drbd_msg_put_info("resource exists");
2994 /* else: still NO_ERROR */
2998 if (!conn_create(adm_ctx
.conn_name
))
2999 retcode
= ERR_NOMEM
;
3001 drbd_adm_finish(info
, retcode
);
3005 int drbd_adm_add_minor(struct sk_buff
*skb
, struct genl_info
*info
)
3007 struct drbd_genlmsghdr
*dh
= info
->userhdr
;
3008 enum drbd_ret_code retcode
;
3010 retcode
= drbd_adm_prepare(skb
, info
, DRBD_ADM_NEED_CONN
);
3011 if (!adm_ctx
.reply_skb
)
3013 if (retcode
!= NO_ERROR
)
3016 /* FIXME drop minor_count parameter, limit to MINORMASK */
3017 if (dh
->minor
>= minor_count
) {
3018 drbd_msg_put_info("requested minor out of range");
3019 retcode
= ERR_INVALID_REQUEST
;
3022 if (adm_ctx
.volume
> DRBD_VOLUME_MAX
) {
3023 drbd_msg_put_info("requested volume id out of range");
3024 retcode
= ERR_INVALID_REQUEST
;
3028 /* drbd_adm_prepare made sure already
3029 * that mdev->tconn and mdev->vnr match the request. */
3031 if (info
->nlhdr
->nlmsg_flags
& NLM_F_EXCL
)
3032 retcode
= ERR_MINOR_EXISTS
;
3033 /* else: still NO_ERROR */
3037 retcode
= conn_new_minor(adm_ctx
.tconn
, dh
->minor
, adm_ctx
.volume
);
3039 drbd_adm_finish(info
, retcode
);
3043 static enum drbd_ret_code
adm_delete_minor(struct drbd_conf
*mdev
)
3045 if (mdev
->state
.disk
== D_DISKLESS
&&
3046 /* no need to be mdev->state.conn == C_STANDALONE &&
3047 * we may want to delete a minor from a live replication group.
3049 mdev
->state
.role
== R_SECONDARY
) {
3050 idr_remove(&mdev
->tconn
->volumes
, mdev
->vnr
);
3051 idr_remove(&minors
, mdev_to_minor(mdev
));
3052 del_gendisk(mdev
->vdisk
);
3054 kref_put(&mdev
->kref
, &drbd_minor_destroy
);
3057 return ERR_MINOR_CONFIGURED
;
3060 int drbd_adm_delete_minor(struct sk_buff
*skb
, struct genl_info
*info
)
3062 enum drbd_ret_code retcode
;
3064 retcode
= drbd_adm_prepare(skb
, info
, DRBD_ADM_NEED_MINOR
);
3065 if (!adm_ctx
.reply_skb
)
3067 if (retcode
!= NO_ERROR
)
3070 retcode
= adm_delete_minor(adm_ctx
.mdev
);
3072 drbd_adm_finish(info
, retcode
);
3076 int drbd_adm_down(struct sk_buff
*skb
, struct genl_info
*info
)
3078 int retcode
; /* enum drbd_ret_code rsp. enum drbd_state_rv */
3079 struct drbd_conf
*mdev
;
3082 retcode
= drbd_adm_prepare(skb
, info
, 0);
3083 if (!adm_ctx
.reply_skb
)
3085 if (retcode
!= NO_ERROR
)
3088 if (!adm_ctx
.tconn
) {
3089 retcode
= ERR_RES_NOT_KNOWN
;
3094 idr_for_each_entry(&adm_ctx
.tconn
->volumes
, mdev
, i
) {
3095 retcode
= drbd_set_role(mdev
, R_SECONDARY
, 0);
3096 if (retcode
< SS_SUCCESS
) {
3097 drbd_msg_put_info("failed to demote");
3102 retcode
= conn_try_disconnect(adm_ctx
.tconn
, 0);
3103 if (retcode
< SS_SUCCESS
) {
3104 drbd_msg_put_info("failed to disconnect");
3109 idr_for_each_entry(&adm_ctx
.tconn
->volumes
, mdev
, i
) {
3110 retcode
= adm_detach(mdev
);
3111 if (retcode
< SS_SUCCESS
) {
3112 drbd_msg_put_info("failed to detach");
3117 /* If we reach this, all volumes (of this tconn) are Secondary,
3118 * Disconnected, Diskless, aka Unconfigured. Make sure all threads have
3119 * actually stopped, state handling only does drbd_thread_stop_nowait(). */
3120 drbd_thread_stop(&adm_ctx
.tconn
->worker
);
3122 /* Now, nothing can fail anymore */
3124 /* delete volumes */
3125 idr_for_each_entry(&adm_ctx
.tconn
->volumes
, mdev
, i
) {
3126 retcode
= adm_delete_minor(mdev
);
3127 if (retcode
!= NO_ERROR
) {
3128 /* "can not happen" */
3129 drbd_msg_put_info("failed to delete volume");
3134 /* delete connection */
3135 if (conn_lowest_minor(adm_ctx
.tconn
) < 0) {
3136 list_del_rcu(&adm_ctx
.tconn
->all_tconn
);
3138 kref_put(&adm_ctx
.tconn
->kref
, &conn_destroy
);
3142 /* "can not happen" */
3143 retcode
= ERR_RES_IN_USE
;
3144 drbd_msg_put_info("failed to delete connection");
3148 drbd_adm_finish(info
, retcode
);
3152 int drbd_adm_del_resource(struct sk_buff
*skb
, struct genl_info
*info
)
3154 enum drbd_ret_code retcode
;
3156 retcode
= drbd_adm_prepare(skb
, info
, DRBD_ADM_NEED_CONN
);
3157 if (!adm_ctx
.reply_skb
)
3159 if (retcode
!= NO_ERROR
)
3162 if (conn_lowest_minor(adm_ctx
.tconn
) < 0) {
3163 list_del_rcu(&adm_ctx
.tconn
->all_tconn
);
3165 kref_put(&adm_ctx
.tconn
->kref
, &conn_destroy
);
3169 retcode
= ERR_RES_IN_USE
;
3172 if (retcode
== NO_ERROR
)
3173 drbd_thread_stop(&adm_ctx
.tconn
->worker
);
3175 drbd_adm_finish(info
, retcode
);
3179 void drbd_bcast_event(struct drbd_conf
*mdev
, const struct sib_info
*sib
)
3181 static atomic_t drbd_genl_seq
= ATOMIC_INIT(2); /* two. */
3182 struct sk_buff
*msg
;
3183 struct drbd_genlmsghdr
*d_out
;
3187 seq
= atomic_inc_return(&drbd_genl_seq
);
3188 msg
= genlmsg_new(NLMSG_GOODSIZE
, GFP_NOIO
);
3193 d_out
= genlmsg_put(msg
, 0, seq
, &drbd_genl_family
, 0, DRBD_EVENT
);
3194 if (!d_out
) /* cannot happen, but anyways. */
3195 goto nla_put_failure
;
3196 d_out
->minor
= mdev_to_minor(mdev
);
3197 d_out
->ret_code
= NO_ERROR
;
3199 if (nla_put_status_info(msg
, mdev
, sib
))
3200 goto nla_put_failure
;
3201 genlmsg_end(msg
, d_out
);
3202 err
= drbd_genl_multicast_events(msg
, 0);
3203 /* msg has been consumed or freed in netlink_broadcast() */
3204 if (err
&& err
!= -ESRCH
)
3212 dev_err(DEV
, "Error %d while broadcasting event. "
3213 "Event seq:%u sib_reason:%u\n",
3214 err
, seq
, sib
->sib_reason
);