drbd: get_one_status(): Iterate over resource->devices instead of connection->peer_de...
[deliverable/linux.git] / drivers / block / drbd / drbd_nl.c
index c706d50a8b0674aeabd58a548551b8898168b88e..6f11d85792631223264d03e5252afe7b6e00af2d 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/blkpg.h>
 #include <linux/cpumask.h>
 #include "drbd_int.h"
+#include "drbd_protocol.h"
 #include "drbd_req.h"
 #include "drbd_wrappers.h"
 #include <asm/unaligned.h>
@@ -44,8 +45,8 @@
 // int drbd_adm_create_resource(struct sk_buff *skb, struct genl_info *info);
 // int drbd_adm_delete_resource(struct sk_buff *skb, struct genl_info *info);
 
-int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info);
-int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_new_minor(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_del_minor(struct sk_buff *skb, struct genl_info *info);
 
 int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info);
 int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info);
@@ -102,8 +103,9 @@ static struct drbd_config_context {
        /* pointer into reply buffer */
        struct drbd_genlmsghdr *reply_dh;
        /* resolved from attributes, if possible */
-       struct drbd_conf *mdev;
-       struct drbd_tconn *tconn;
+       struct drbd_device *device;
+       struct drbd_resource *resource;
+       struct drbd_connection *connection;
 } adm_ctx;
 
 static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info)
@@ -202,62 +204,71 @@ static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info,
                adm_ctx.my_addr = nested_attr_tb[__nla_type(T_ctx_my_addr)];
                adm_ctx.peer_addr = nested_attr_tb[__nla_type(T_ctx_peer_addr)];
                if ((adm_ctx.my_addr &&
-                    nla_len(adm_ctx.my_addr) > sizeof(adm_ctx.tconn->my_addr)) ||
+                    nla_len(adm_ctx.my_addr) > sizeof(adm_ctx.connection->my_addr)) ||
                    (adm_ctx.peer_addr &&
-                    nla_len(adm_ctx.peer_addr) > sizeof(adm_ctx.tconn->peer_addr))) {
+                    nla_len(adm_ctx.peer_addr) > sizeof(adm_ctx.connection->peer_addr))) {
                        err = -EINVAL;
                        goto fail;
                }
        }
 
        adm_ctx.minor = d_in->minor;
-       adm_ctx.mdev = minor_to_mdev(d_in->minor);
-       adm_ctx.tconn = conn_get_by_name(adm_ctx.resource_name);
+       adm_ctx.device = minor_to_device(d_in->minor);
+       if (adm_ctx.resource_name) {
+               adm_ctx.resource = drbd_find_resource(adm_ctx.resource_name);
+               if (adm_ctx.resource) {
+                       adm_ctx.connection = first_connection(adm_ctx.resource);
+                       kref_get(&adm_ctx.connection->kref);
+               }
+       }
 
-       if (!adm_ctx.mdev && (flags & DRBD_ADM_NEED_MINOR)) {
+       if (!adm_ctx.device && (flags & DRBD_ADM_NEED_MINOR)) {
                drbd_msg_put_info("unknown minor");
                return ERR_MINOR_INVALID;
        }
-       if (!adm_ctx.tconn && (flags & DRBD_ADM_NEED_RESOURCE)) {
+       if (!adm_ctx.resource && (flags & DRBD_ADM_NEED_RESOURCE)) {
                drbd_msg_put_info("unknown resource");
+               if (adm_ctx.resource_name)
+                       return ERR_RES_NOT_KNOWN;
                return ERR_INVALID_REQUEST;
        }
 
        if (flags & DRBD_ADM_NEED_CONNECTION) {
-               if (adm_ctx.tconn && !(flags & DRBD_ADM_NEED_RESOURCE)) {
+               if (adm_ctx.connection && !(flags & DRBD_ADM_NEED_RESOURCE)) {
                        drbd_msg_put_info("no resource name expected");
                        return ERR_INVALID_REQUEST;
                }
-               if (adm_ctx.mdev) {
+               if (adm_ctx.device) {
                        drbd_msg_put_info("no minor number expected");
                        return ERR_INVALID_REQUEST;
                }
                if (adm_ctx.my_addr && adm_ctx.peer_addr)
-                       adm_ctx.tconn = conn_get_by_addrs(nla_data(adm_ctx.my_addr),
+                       adm_ctx.connection = conn_get_by_addrs(nla_data(adm_ctx.my_addr),
                                                          nla_len(adm_ctx.my_addr),
                                                          nla_data(adm_ctx.peer_addr),
                                                          nla_len(adm_ctx.peer_addr));
-               if (!adm_ctx.tconn) {
+               if (!adm_ctx.connection) {
                        drbd_msg_put_info("unknown connection");
                        return ERR_INVALID_REQUEST;
                }
        }
 
        /* some more paranoia, if the request was over-determined */
-       if (adm_ctx.mdev && adm_ctx.tconn &&
-           adm_ctx.mdev->tconn != adm_ctx.tconn) {
-               pr_warning("request: minor=%u, resource=%s; but that minor belongs to connection %s\n",
-                               adm_ctx.minor, adm_ctx.resource_name,
-                               adm_ctx.mdev->tconn->name);
+       if (adm_ctx.device && adm_ctx.resource &&
+           adm_ctx.device->resource != adm_ctx.resource) {
+               pr_warning("request: minor=%u, resource=%s; but that minor belongs to resource %s\n",
+                               adm_ctx.minor, adm_ctx.resource->name,
+                               adm_ctx.device->resource->name);
                drbd_msg_put_info("minor exists in different resource");
                return ERR_INVALID_REQUEST;
        }
-       if (adm_ctx.mdev &&
+       if (adm_ctx.device &&
            adm_ctx.volume != VOLUME_UNSPECIFIED &&
-           adm_ctx.volume != adm_ctx.mdev->vnr) {
+           adm_ctx.volume != adm_ctx.device->vnr) {
                pr_warning("request: minor=%u, volume=%u; but that minor is volume %u in %s\n",
                                adm_ctx.minor, adm_ctx.volume,
-                               adm_ctx.mdev->vnr, adm_ctx.mdev->tconn->name);
+                               adm_ctx.device->vnr,
+                               adm_ctx.device->resource->name);
                drbd_msg_put_info("minor exists as different volume");
                return ERR_INVALID_REQUEST;
        }
@@ -272,9 +283,13 @@ fail:
 
 static int drbd_adm_finish(struct genl_info *info, int retcode)
 {
-       if (adm_ctx.tconn) {
-               kref_put(&adm_ctx.tconn->kref, &conn_destroy);
-               adm_ctx.tconn = NULL;
+       if (adm_ctx.connection) {
+               kref_put(&adm_ctx.connection->kref, drbd_destroy_connection);
+               adm_ctx.connection = NULL;
+       }
+       if (adm_ctx.resource) {
+               kref_put(&adm_ctx.resource->kref, drbd_destroy_resource);
+               adm_ctx.resource = NULL;
        }
 
        if (!adm_ctx.reply_skb)
@@ -285,34 +300,34 @@ static int drbd_adm_finish(struct genl_info *info, int retcode)
        return 0;
 }
 
-static void setup_khelper_env(struct drbd_tconn *tconn, char **envp)
+static void setup_khelper_env(struct drbd_connection *connection, char **envp)
 {
        char *afs;
 
        /* FIXME: A future version will not allow this case. */
-       if (tconn->my_addr_len == 0 || tconn->peer_addr_len == 0)
+       if (connection->my_addr_len == 0 || connection->peer_addr_len == 0)
                return;
 
-       switch (((struct sockaddr *)&tconn->peer_addr)->sa_family) {
+       switch (((struct sockaddr *)&connection->peer_addr)->sa_family) {
        case AF_INET6:
                afs = "ipv6";
                snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI6",
-                        &((struct sockaddr_in6 *)&tconn->peer_addr)->sin6_addr);
+                        &((struct sockaddr_in6 *)&connection->peer_addr)->sin6_addr);
                break;
        case AF_INET:
                afs = "ipv4";
                snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
-                        &((struct sockaddr_in *)&tconn->peer_addr)->sin_addr);
+                        &((struct sockaddr_in *)&connection->peer_addr)->sin_addr);
                break;
        default:
                afs = "ssocks";
                snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
-                        &((struct sockaddr_in *)&tconn->peer_addr)->sin_addr);
+                        &((struct sockaddr_in *)&connection->peer_addr)->sin_addr);
        }
        snprintf(envp[3], 20, "DRBD_PEER_AF=%s", afs);
 }
 
-int drbd_khelper(struct drbd_conf *mdev, char *cmd)
+int drbd_khelper(struct drbd_device *device, char *cmd)
 {
        char *envp[] = { "HOME=/",
                        "TERM=linux",
@@ -322,39 +337,39 @@ int drbd_khelper(struct drbd_conf *mdev, char *cmd)
                        NULL };
        char mb[12];
        char *argv[] = {usermode_helper, cmd, mb, NULL };
-       struct drbd_tconn *tconn = mdev->tconn;
+       struct drbd_connection *connection = first_peer_device(device)->connection;
        struct sib_info sib;
        int ret;
 
-       if (current == tconn->worker.task)
-               set_bit(CALLBACK_PENDING, &tconn->flags);
+       if (current == connection->worker.task)
+               set_bit(CALLBACK_PENDING, &connection->flags);
 
-       snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev));
-       setup_khelper_env(tconn, envp);
+       snprintf(mb, 12, "minor-%d", device_to_minor(device));
+       setup_khelper_env(connection, envp);
 
        /* The helper may take some time.
         * write out any unsynced meta data changes now */
-       drbd_md_sync(mdev);
+       drbd_md_sync(device);
 
-       dev_info(DEV, "helper command: %s %s %s\n", usermode_helper, cmd, mb);
+       drbd_info(device, "helper command: %s %s %s\n", usermode_helper, cmd, mb);
        sib.sib_reason = SIB_HELPER_PRE;
        sib.helper_name = cmd;
-       drbd_bcast_event(mdev, &sib);
+       drbd_bcast_event(device, &sib);
        ret = call_usermodehelper(usermode_helper, argv, envp, UMH_WAIT_PROC);
        if (ret)
-               dev_warn(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
+               drbd_warn(device, "helper command: %s %s %s exit code %u (0x%x)\n",
                                usermode_helper, cmd, mb,
                                (ret >> 8) & 0xff, ret);
        else
-               dev_info(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
+               drbd_info(device, "helper command: %s %s %s exit code %u (0x%x)\n",
                                usermode_helper, cmd, mb,
                                (ret >> 8) & 0xff, ret);
        sib.sib_reason = SIB_HELPER_POST;
        sib.helper_exit_code = ret;
-       drbd_bcast_event(mdev, &sib);
+       drbd_bcast_event(device, &sib);
 
-       if (current == tconn->worker.task)
-               clear_bit(CALLBACK_PENDING, &tconn->flags);
+       if (current == connection->worker.task)
+               clear_bit(CALLBACK_PENDING, &connection->flags);
 
        if (ret < 0) /* Ignore any ERRNOs we got. */
                ret = 0;
@@ -362,7 +377,7 @@ int drbd_khelper(struct drbd_conf *mdev, char *cmd)
        return ret;
 }
 
-int conn_khelper(struct drbd_tconn *tconn, char *cmd)
+static int conn_khelper(struct drbd_connection *connection, char *cmd)
 {
        char *envp[] = { "HOME=/",
                        "TERM=linux",
@@ -370,23 +385,24 @@ int conn_khelper(struct drbd_tconn *tconn, char *cmd)
                         (char[20]) { }, /* address family */
                         (char[60]) { }, /* address */
                        NULL };
-       char *argv[] = {usermode_helper, cmd, tconn->name, NULL };
+       char *resource_name = connection->resource->name;
+       char *argv[] = {usermode_helper, cmd, resource_name, NULL };
        int ret;
 
-       setup_khelper_env(tconn, envp);
-       conn_md_sync(tconn);
+       setup_khelper_env(connection, envp);
+       conn_md_sync(connection);
 
-       conn_info(tconn, "helper command: %s %s %s\n", usermode_helper, cmd, tconn->name);
+       drbd_info(connection, "helper command: %s %s %s\n", usermode_helper, cmd, resource_name);
        /* TODO: conn_bcast_event() ?? */
 
        ret = call_usermodehelper(usermode_helper, argv, envp, UMH_WAIT_PROC);
        if (ret)
-               conn_warn(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
-                         usermode_helper, cmd, tconn->name,
+               drbd_warn(connection, "helper command: %s %s %s exit code %u (0x%x)\n",
+                         usermode_helper, cmd, resource_name,
                          (ret >> 8) & 0xff, ret);
        else
-               conn_info(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
-                         usermode_helper, cmd, tconn->name,
+               drbd_info(connection, "helper command: %s %s %s exit code %u (0x%x)\n",
+                         usermode_helper, cmd, resource_name,
                          (ret >> 8) & 0xff, ret);
        /* TODO: conn_bcast_event() ?? */
 
@@ -396,18 +412,20 @@ int conn_khelper(struct drbd_tconn *tconn, char *cmd)
        return ret;
 }
 
-static enum drbd_fencing_p highest_fencing_policy(struct drbd_tconn *tconn)
+static enum drbd_fencing_p highest_fencing_policy(struct drbd_connection *connection)
 {
        enum drbd_fencing_p fp = FP_NOT_AVAIL;
-       struct drbd_conf *mdev;
+       struct drbd_peer_device *peer_device;
        int vnr;
 
        rcu_read_lock();
-       idr_for_each_entry(&tconn->volumes, mdev, vnr) {
-               if (get_ldev_if_state(mdev, D_CONSISTENT)) {
-                       fp = max_t(enum drbd_fencing_p, fp,
-                                  rcu_dereference(mdev->ldev->disk_conf)->fencing);
-                       put_ldev(mdev);
+       idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+               struct drbd_device *device = peer_device->device;
+               if (get_ldev_if_state(device, D_CONSISTENT)) {
+                       struct disk_conf *disk_conf =
+                               rcu_dereference(peer_device->device->ldev->disk_conf);
+                       fp = max_t(enum drbd_fencing_p, fp, disk_conf->fencing);
+                       put_ldev(device);
                }
        }
        rcu_read_unlock();
@@ -415,7 +433,7 @@ static enum drbd_fencing_p highest_fencing_policy(struct drbd_tconn *tconn)
        return fp;
 }
 
-bool conn_try_outdate_peer(struct drbd_tconn *tconn)
+bool conn_try_outdate_peer(struct drbd_connection *connection)
 {
        unsigned int connect_cnt;
        union drbd_state mask = { };
@@ -424,26 +442,26 @@ bool conn_try_outdate_peer(struct drbd_tconn *tconn)
        char *ex_to_string;
        int r;
 
-       if (tconn->cstate >= C_WF_REPORT_PARAMS) {
-               conn_err(tconn, "Expected cstate < C_WF_REPORT_PARAMS\n");
+       if (connection->cstate >= C_WF_REPORT_PARAMS) {
+               drbd_err(connection, "Expected cstate < C_WF_REPORT_PARAMS\n");
                return false;
        }
 
-       spin_lock_irq(&tconn->req_lock);
-       connect_cnt = tconn->connect_cnt;
-       spin_unlock_irq(&tconn->req_lock);
+       spin_lock_irq(&connection->req_lock);
+       connect_cnt = connection->connect_cnt;
+       spin_unlock_irq(&connection->req_lock);
 
-       fp = highest_fencing_policy(tconn);
+       fp = highest_fencing_policy(connection);
        switch (fp) {
        case FP_NOT_AVAIL:
-               conn_warn(tconn, "Not fencing peer, I'm not even Consistent myself.\n");
+               drbd_warn(connection, "Not fencing peer, I'm not even Consistent myself.\n");
                goto out;
        case FP_DONT_CARE:
                return true;
        default: ;
        }
 
-       r = conn_khelper(tconn, "fence-peer");
+       r = conn_khelper(connection, "fence-peer");
 
        switch ((r>>8) & 0xff) {
        case 3: /* peer is inconsistent */
@@ -457,7 +475,7 @@ bool conn_try_outdate_peer(struct drbd_tconn *tconn)
                val.pdsk = D_OUTDATED;
                break;
        case 5: /* peer was down */
-               if (conn_highest_disk(tconn) == D_UP_TO_DATE) {
+               if (conn_highest_disk(connection) == D_UP_TO_DATE) {
                        /* we will(have) create(d) a new UUID anyways... */
                        ex_to_string = "peer is unreachable, assumed to be dead";
                        mask.pdsk = D_MASK;
@@ -470,70 +488,70 @@ bool conn_try_outdate_peer(struct drbd_tconn *tconn)
                 * This is useful when an unconnected R_SECONDARY is asked to
                 * become R_PRIMARY, but finds the other peer being active. */
                ex_to_string = "peer is active";
-               conn_warn(tconn, "Peer is primary, outdating myself.\n");
+               drbd_warn(connection, "Peer is primary, outdating myself.\n");
                mask.disk = D_MASK;
                val.disk = D_OUTDATED;
                break;
        case 7:
                if (fp != FP_STONITH)
-                       conn_err(tconn, "fence-peer() = 7 && fencing != Stonith !!!\n");
+                       drbd_err(connection, "fence-peer() = 7 && fencing != Stonith !!!\n");
                ex_to_string = "peer was stonithed";
                mask.pdsk = D_MASK;
                val.pdsk = D_OUTDATED;
                break;
        default:
                /* The script is broken ... */
-               conn_err(tconn, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
+               drbd_err(connection, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
                return false; /* Eventually leave IO frozen */
        }
 
-       conn_info(tconn, "fence-peer helper returned %d (%s)\n",
+       drbd_info(connection, "fence-peer helper returned %d (%s)\n",
                  (r>>8) & 0xff, ex_to_string);
 
  out:
 
        /* Not using
-          conn_request_state(tconn, mask, val, CS_VERBOSE);
+          conn_request_state(connection, mask, val, CS_VERBOSE);
           here, because we might were able to re-establish the connection in the
           meantime. */
-       spin_lock_irq(&tconn->req_lock);
-       if (tconn->cstate < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &tconn->flags)) {
-               if (tconn->connect_cnt != connect_cnt)
+       spin_lock_irq(&connection->req_lock);
+       if (connection->cstate < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &connection->flags)) {
+               if (connection->connect_cnt != connect_cnt)
                        /* In case the connection was established and droped
                           while the fence-peer handler was running, ignore it */
-                       conn_info(tconn, "Ignoring fence-peer exit code\n");
+                       drbd_info(connection, "Ignoring fence-peer exit code\n");
                else
-                       _conn_request_state(tconn, mask, val, CS_VERBOSE);
+                       _conn_request_state(connection, mask, val, CS_VERBOSE);
        }
-       spin_unlock_irq(&tconn->req_lock);
+       spin_unlock_irq(&connection->req_lock);
 
-       return conn_highest_pdsk(tconn) <= D_OUTDATED;
+       return conn_highest_pdsk(connection) <= D_OUTDATED;
 }
 
 static int _try_outdate_peer_async(void *data)
 {
-       struct drbd_tconn *tconn = (struct drbd_tconn *)data;
+       struct drbd_connection *connection = (struct drbd_connection *)data;
 
-       conn_try_outdate_peer(tconn);
+       conn_try_outdate_peer(connection);
 
-       kref_put(&tconn->kref, &conn_destroy);
+       kref_put(&connection->kref, drbd_destroy_connection);
        return 0;
 }
 
-void conn_try_outdate_peer_async(struct drbd_tconn *tconn)
+void conn_try_outdate_peer_async(struct drbd_connection *connection)
 {
        struct task_struct *opa;
 
-       kref_get(&tconn->kref);
-       opa = kthread_run(_try_outdate_peer_async, tconn, "drbd_async_h");
+       kref_get(&connection->kref);
+       opa = kthread_run(_try_outdate_peer_async, connection, "drbd_async_h");
        if (IS_ERR(opa)) {
-               conn_err(tconn, "out of mem, failed to invoke fence-peer helper\n");
-               kref_put(&tconn->kref, &conn_destroy);
+               drbd_err(connection, "out of mem, failed to invoke fence-peer helper\n");
+               kref_put(&connection->kref, drbd_destroy_connection);
        }
 }
 
 enum drbd_state_rv
-drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
+drbd_set_role(struct drbd_device *device, enum drbd_role new_role, int force)
 {
        const int max_tries = 4;
        enum drbd_state_rv rv = SS_UNKNOWN_ERROR;
@@ -543,15 +561,15 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
        union drbd_state mask, val;
 
        if (new_role == R_PRIMARY)
-               request_ping(mdev->tconn); /* Detect a dead peer ASAP */
+               request_ping(first_peer_device(device)->connection); /* Detect a dead peer ASAP */
 
-       mutex_lock(mdev->state_mutex);
+       mutex_lock(device->state_mutex);
 
        mask.i = 0; mask.role = R_MASK;
        val.i  = 0; val.role  = new_role;
 
        while (try++ < max_tries) {
-               rv = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE);
+               rv = _drbd_request_state(device, mask, val, CS_WAIT_COMPLETE);
 
                /* in case we first succeeded to outdate,
                 * but now suddenly could establish a connection */
@@ -562,8 +580,8 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
                }
 
                if (rv == SS_NO_UP_TO_DATE_DISK && force &&
-                   (mdev->state.disk < D_UP_TO_DATE &&
-                    mdev->state.disk >= D_INCONSISTENT)) {
+                   (device->state.disk < D_UP_TO_DATE &&
+                    device->state.disk >= D_INCONSISTENT)) {
                        mask.disk = D_MASK;
                        val.disk  = D_UP_TO_DATE;
                        forced = 1;
@@ -571,10 +589,10 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
                }
 
                if (rv == SS_NO_UP_TO_DATE_DISK &&
-                   mdev->state.disk == D_CONSISTENT && mask.pdsk == 0) {
-                       D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
+                   device->state.disk == D_CONSISTENT && mask.pdsk == 0) {
+                       D_ASSERT(device, device->state.pdsk == D_UNKNOWN);
 
-                       if (conn_try_outdate_peer(mdev->tconn)) {
+                       if (conn_try_outdate_peer(first_peer_device(device)->connection)) {
                                val.disk = D_UP_TO_DATE;
                                mask.disk = D_MASK;
                        }
@@ -584,8 +602,8 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
                if (rv == SS_NOTHING_TO_DO)
                        goto out;
                if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) {
-                       if (!conn_try_outdate_peer(mdev->tconn) && force) {
-                               dev_warn(DEV, "Forced into split brain situation!\n");
+                       if (!conn_try_outdate_peer(first_peer_device(device)->connection) && force) {
+                               drbd_warn(device, "Forced into split brain situation!\n");
                                mask.pdsk = D_MASK;
                                val.pdsk  = D_OUTDATED;
 
@@ -597,7 +615,7 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
                           retry at most once more in this case. */
                        int timeo;
                        rcu_read_lock();
-                       nc = rcu_dereference(mdev->tconn->net_conf);
+                       nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
                        timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1;
                        rcu_read_unlock();
                        schedule_timeout_interruptible(timeo);
@@ -606,7 +624,7 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
                        continue;
                }
                if (rv < SS_SUCCESS) {
-                       rv = _drbd_request_state(mdev, mask, val,
+                       rv = _drbd_request_state(device, mask, val,
                                                CS_VERBOSE + CS_WAIT_COMPLETE);
                        if (rv < SS_SUCCESS)
                                goto out;
@@ -618,53 +636,53 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
                goto out;
 
        if (forced)
-               dev_warn(DEV, "Forced to consider local data as UpToDate!\n");
+               drbd_warn(device, "Forced to consider local data as UpToDate!\n");
 
        /* Wait until nothing is on the fly :) */
-       wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0);
+       wait_event(device->misc_wait, atomic_read(&device->ap_pending_cnt) == 0);
 
        /* FIXME also wait for all pending P_BARRIER_ACK? */
 
        if (new_role == R_SECONDARY) {
-               set_disk_ro(mdev->vdisk, true);
-               if (get_ldev(mdev)) {
-                       mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
-                       put_ldev(mdev);
+               set_disk_ro(device->vdisk, true);
+               if (get_ldev(device)) {
+                       device->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
+                       put_ldev(device);
                }
        } else {
-               mutex_lock(&mdev->tconn->conf_update);
-               nc = mdev->tconn->net_conf;
+               mutex_lock(&first_peer_device(device)->connection->conf_update);
+               nc = first_peer_device(device)->connection->net_conf;
                if (nc)
                        nc->discard_my_data = 0; /* without copy; single bit op is atomic */
-               mutex_unlock(&mdev->tconn->conf_update);
+               mutex_unlock(&first_peer_device(device)->connection->conf_update);
 
-               set_disk_ro(mdev->vdisk, false);
-               if (get_ldev(mdev)) {
-                       if (((mdev->state.conn < C_CONNECTED ||
-                              mdev->state.pdsk <= D_FAILED)
-                             && mdev->ldev->md.uuid[UI_BITMAP] == 0) || forced)
-                               drbd_uuid_new_current(mdev);
+               set_disk_ro(device->vdisk, false);
+               if (get_ldev(device)) {
+                       if (((device->state.conn < C_CONNECTED ||
+                              device->state.pdsk <= D_FAILED)
+                             && device->ldev->md.uuid[UI_BITMAP] == 0) || forced)
+                               drbd_uuid_new_current(device);
 
-                       mdev->ldev->md.uuid[UI_CURRENT] |=  (u64)1;
-                       put_ldev(mdev);
+                       device->ldev->md.uuid[UI_CURRENT] |=  (u64)1;
+                       put_ldev(device);
                }
        }
 
        /* writeout of activity log covered areas of the bitmap
         * to stable storage done in after state change already */
 
-       if (mdev->state.conn >= C_WF_REPORT_PARAMS) {
+       if (device->state.conn >= C_WF_REPORT_PARAMS) {
                /* if this was forced, we should consider sync */
                if (forced)
-                       drbd_send_uuids(mdev);
-               drbd_send_current_state(mdev);
+                       drbd_send_uuids(device);
+               drbd_send_current_state(device);
        }
 
-       drbd_md_sync(mdev);
+       drbd_md_sync(device);
 
-       kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
+       kobject_uevent(&disk_to_dev(device->vdisk)->kobj, KOBJ_CHANGE);
 out:
-       mutex_unlock(mdev->state_mutex);
+       mutex_unlock(device->state_mutex);
        return rv;
 }
 
@@ -699,9 +717,9 @@ int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info)
        }
 
        if (info->genlhdr->cmd == DRBD_ADM_PRIMARY)
-               retcode = drbd_set_role(adm_ctx.mdev, R_PRIMARY, parms.assume_uptodate);
+               retcode = drbd_set_role(adm_ctx.device, R_PRIMARY, parms.assume_uptodate);
        else
-               retcode = drbd_set_role(adm_ctx.mdev, R_SECONDARY, 0);
+               retcode = drbd_set_role(adm_ctx.device, R_SECONDARY, 0);
 out:
        drbd_adm_finish(info, retcode);
        return 0;
@@ -728,7 +746,7 @@ out:
  *  Activity log size used to be fixed 32kB,
  *  but is about to become configurable.
  */
-static void drbd_md_set_sector_offsets(struct drbd_conf *mdev,
+static void drbd_md_set_sector_offsets(struct drbd_device *device,
                                       struct drbd_backing_dev *bdev)
 {
        sector_t md_size_sect = 0;
@@ -804,35 +822,35 @@ char *ppsize(char *buf, unsigned long long size)
  * drbd_adm_suspend_io/drbd_adm_resume_io,
  * which are (sub) state changes triggered by admin (drbdsetup),
  * and can be long lived.
- * This changes an mdev->flag, is triggered by drbd internals,
+ * This changes an device->flag, is triggered by drbd internals,
  * and should be short-lived. */
-void drbd_suspend_io(struct drbd_conf *mdev)
+void drbd_suspend_io(struct drbd_device *device)
 {
-       set_bit(SUSPEND_IO, &mdev->flags);
-       if (drbd_suspended(mdev))
+       set_bit(SUSPEND_IO, &device->flags);
+       if (drbd_suspended(device))
                return;
-       wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
+       wait_event(device->misc_wait, !atomic_read(&device->ap_bio_cnt));
 }
 
-void drbd_resume_io(struct drbd_conf *mdev)
+void drbd_resume_io(struct drbd_device *device)
 {
-       clear_bit(SUSPEND_IO, &mdev->flags);
-       wake_up(&mdev->misc_wait);
+       clear_bit(SUSPEND_IO, &device->flags);
+       wake_up(&device->misc_wait);
 }
 
 /**
  * drbd_determine_dev_size() -  Sets the right device size obeying all constraints
- * @mdev:      DRBD device.
+ * @device:    DRBD device.
  *
  * Returns 0 on success, negative return values indicate errors.
  * You should call drbd_md_sync() after calling this function.
  */
 enum determine_dev_size
-drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags, struct resize_parms *rs) __must_hold(local)
+drbd_determine_dev_size(struct drbd_device *device, enum dds_flags flags, struct resize_parms *rs) __must_hold(local)
 {
        sector_t prev_first_sect, prev_size; /* previous meta location */
        sector_t la_size_sect, u_size;
-       struct drbd_md *md = &mdev->ldev->md;
+       struct drbd_md *md = &device->ldev->md;
        u32 prev_al_stripe_size_4k;
        u32 prev_al_stripes;
        sector_t size;
@@ -851,19 +869,19 @@ drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags, struct res
         * Suspend IO right here.
         * still lock the act_log to not trigger ASSERTs there.
         */
-       drbd_suspend_io(mdev);
-       buffer = drbd_md_get_buffer(mdev); /* Lock meta-data IO */
+       drbd_suspend_io(device);
+       buffer = drbd_md_get_buffer(device); /* Lock meta-data IO */
        if (!buffer) {
-               drbd_resume_io(mdev);
+               drbd_resume_io(device);
                return DS_ERROR;
        }
 
        /* no wait necessary anymore, actually we could assert that */
-       wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
+       wait_event(device->al_wait, lc_try_lock(device->act_log));
 
-       prev_first_sect = drbd_md_first_sector(mdev->ldev);
-       prev_size = mdev->ldev->md.md_size_sect;
-       la_size_sect = mdev->ldev->md.la_size_sect;
+       prev_first_sect = drbd_md_first_sector(device->ldev);
+       prev_size = device->ldev->md.md_size_sect;
+       la_size_sect = device->ldev->md.la_size_sect;
 
        if (rs) {
                /* rs is non NULL if we should change the AL layout only */
@@ -876,18 +894,18 @@ drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags, struct res
                md->al_size_4k = (u64)rs->al_stripes * rs->al_stripe_size / 4;
        }
 
-       drbd_md_set_sector_offsets(mdev, mdev->ldev);
+       drbd_md_set_sector_offsets(device, device->ldev);
 
        rcu_read_lock();
-       u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
+       u_size = rcu_dereference(device->ldev->disk_conf)->disk_size;
        rcu_read_unlock();
-       size = drbd_new_dev_size(mdev, mdev->ldev, u_size, flags & DDSF_FORCED);
+       size = drbd_new_dev_size(device, device->ldev, u_size, flags & DDSF_FORCED);
 
        if (size < la_size_sect) {
                if (rs && u_size == 0) {
                        /* Remove "rs &&" later. This check should always be active, but
                           right now the receiver expects the permissive behavior */
-                       dev_warn(DEV, "Implicit shrink not allowed. "
+                       drbd_warn(device, "Implicit shrink not allowed. "
                                 "Use --size=%llus for explicit shrink.\n",
                                 (unsigned long long)size);
                        rv = DS_ERROR_SHRINK;
@@ -898,60 +916,60 @@ drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags, struct res
                        goto err_out;
        }
 
-       if (drbd_get_capacity(mdev->this_bdev) != size ||
-           drbd_bm_capacity(mdev) != size) {
+       if (drbd_get_capacity(device->this_bdev) != size ||
+           drbd_bm_capacity(device) != size) {
                int err;
-               err = drbd_bm_resize(mdev, size, !(flags & DDSF_NO_RESYNC));
+               err = drbd_bm_resize(device, size, !(flags & DDSF_NO_RESYNC));
                if (unlikely(err)) {
                        /* currently there is only one error: ENOMEM! */
-                       size = drbd_bm_capacity(mdev)>>1;
+                       size = drbd_bm_capacity(device)>>1;
                        if (size == 0) {
-                               dev_err(DEV, "OUT OF MEMORY! "
+                               drbd_err(device, "OUT OF MEMORY! "
                                    "Could not allocate bitmap!\n");
                        } else {
-                               dev_err(DEV, "BM resizing failed. "
+                               drbd_err(device, "BM resizing failed. "
                                    "Leaving size unchanged at size = %lu KB\n",
                                    (unsigned long)size);
                        }
                        rv = DS_ERROR;
                }
                /* racy, see comments above. */
-               drbd_set_my_capacity(mdev, size);
-               mdev->ldev->md.la_size_sect = size;
-               dev_info(DEV, "size = %s (%llu KB)\n", ppsize(ppb, size>>1),
+               drbd_set_my_capacity(device, size);
+               device->ldev->md.la_size_sect = size;
+               drbd_info(device, "size = %s (%llu KB)\n", ppsize(ppb, size>>1),
                     (unsigned long long)size>>1);
        }
        if (rv <= DS_ERROR)
                goto err_out;
 
-       la_size_changed = (la_size_sect != mdev->ldev->md.la_size_sect);
+       la_size_changed = (la_size_sect != device->ldev->md.la_size_sect);
 
-       md_moved = prev_first_sect != drbd_md_first_sector(mdev->ldev)
-               || prev_size       != mdev->ldev->md.md_size_sect;
+       md_moved = prev_first_sect != drbd_md_first_sector(device->ldev)
+               || prev_size       != device->ldev->md.md_size_sect;
 
        if (la_size_changed || md_moved || rs) {
                u32 prev_flags;
 
-               drbd_al_shrink(mdev); /* All extents inactive. */
+               drbd_al_shrink(device); /* All extents inactive. */
 
                prev_flags = md->flags;
                md->flags &= ~MDF_PRIMARY_IND;
-               drbd_md_write(mdev, buffer);
+               drbd_md_write(device, buffer);
 
-               dev_info(DEV, "Writing the whole bitmap, %s\n",
+               drbd_info(device, "Writing the whole bitmap, %s\n",
                         la_size_changed && md_moved ? "size changed and md moved" :
                         la_size_changed ? "size changed" : "md moved");
                /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
-               drbd_bitmap_io(mdev, md_moved ? &drbd_bm_write_all : &drbd_bm_write,
+               drbd_bitmap_io(device, md_moved ? &drbd_bm_write_all : &drbd_bm_write,
                               "size changed", BM_LOCKED_MASK);
-               drbd_initialize_al(mdev, buffer);
+               drbd_initialize_al(device, buffer);
 
                md->flags = prev_flags;
-               drbd_md_write(mdev, buffer);
+               drbd_md_write(device, buffer);
 
                if (rs)
-                       dev_info(DEV, "Changed AL layout to al-stripes = %d, al-stripe-size-kB = %d\n",
-                                md->al_stripes, md->al_stripe_size_4k * 4);
+                       drbd_info(device, "Changed AL layout to al-stripes = %d, al-stripe-size-kB = %d\n",
+                                 md->al_stripes, md->al_stripe_size_4k * 4);
        }
 
        if (size > la_size_sect)
@@ -966,30 +984,30 @@ drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags, struct res
                        md->al_stripe_size_4k = prev_al_stripe_size_4k;
                        md->al_size_4k = (u64)prev_al_stripes * prev_al_stripe_size_4k;
 
-                       drbd_md_set_sector_offsets(mdev, mdev->ldev);
+                       drbd_md_set_sector_offsets(device, device->ldev);
                }
        }
-       lc_unlock(mdev->act_log);
-       wake_up(&mdev->al_wait);
-       drbd_md_put_buffer(mdev);
-       drbd_resume_io(mdev);
+       lc_unlock(device->act_log);
+       wake_up(&device->al_wait);
+       drbd_md_put_buffer(device);
+       drbd_resume_io(device);
 
        return rv;
 }
 
 sector_t
-drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
+drbd_new_dev_size(struct drbd_device *device, struct drbd_backing_dev *bdev,
                  sector_t u_size, int assume_peer_has_space)
 {
-       sector_t p_size = mdev->p_size;   /* partner's disk size. */
+       sector_t p_size = device->p_size;   /* partner's disk size. */
        sector_t la_size_sect = bdev->md.la_size_sect; /* last agreed size. */
        sector_t m_size; /* my size */
        sector_t size = 0;
 
        m_size = drbd_get_max_capacity(bdev);
 
-       if (mdev->state.conn < C_CONNECTED && assume_peer_has_space) {
-               dev_warn(DEV, "Resize while not connected was forced by the user!\n");
+       if (device->state.conn < C_CONNECTED && assume_peer_has_space) {
+               drbd_warn(device, "Resize while not connected was forced by the user!\n");
                p_size = m_size;
        }
 
@@ -1011,11 +1029,11 @@ drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
        }
 
        if (size == 0)
-               dev_err(DEV, "Both nodes diskless!\n");
+               drbd_err(device, "Both nodes diskless!\n");
 
        if (u_size) {
                if (u_size > size)
-                       dev_err(DEV, "Requested disk size is too big (%lu > %lu)\n",
+                       drbd_err(device, "Requested disk size is too big (%lu > %lu)\n",
                            (unsigned long)u_size>>1, (unsigned long)size>>1);
                else
                        size = u_size;
@@ -1026,71 +1044,71 @@ drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
 
 /**
  * drbd_check_al_size() - Ensures that the AL is of the right size
- * @mdev:      DRBD device.
+ * @device:    DRBD device.
  *
  * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation
  * failed, and 0 on success. You should call drbd_md_sync() after you called
  * this function.
  */
-static int drbd_check_al_size(struct drbd_conf *mdev, struct disk_conf *dc)
+static int drbd_check_al_size(struct drbd_device *device, struct disk_conf *dc)
 {
        struct lru_cache *n, *t;
        struct lc_element *e;
        unsigned int in_use;
        int i;
 
-       if (mdev->act_log &&
-           mdev->act_log->nr_elements == dc->al_extents)
+       if (device->act_log &&
+           device->act_log->nr_elements == dc->al_extents)
                return 0;
 
        in_use = 0;
-       t = mdev->act_log;
+       t = device->act_log;
        n = lc_create("act_log", drbd_al_ext_cache, AL_UPDATES_PER_TRANSACTION,
                dc->al_extents, sizeof(struct lc_element), 0);
 
        if (n == NULL) {
-               dev_err(DEV, "Cannot allocate act_log lru!\n");
+               drbd_err(device, "Cannot allocate act_log lru!\n");
                return -ENOMEM;
        }
-       spin_lock_irq(&mdev->al_lock);
+       spin_lock_irq(&device->al_lock);
        if (t) {
                for (i = 0; i < t->nr_elements; i++) {
                        e = lc_element_by_index(t, i);
                        if (e->refcnt)
-                               dev_err(DEV, "refcnt(%d)==%d\n",
+                               drbd_err(device, "refcnt(%d)==%d\n",
                                    e->lc_number, e->refcnt);
                        in_use += e->refcnt;
                }
        }
        if (!in_use)
-               mdev->act_log = n;
-       spin_unlock_irq(&mdev->al_lock);
+               device->act_log = n;
+       spin_unlock_irq(&device->al_lock);
        if (in_use) {
-               dev_err(DEV, "Activity log still in use!\n");
+               drbd_err(device, "Activity log still in use!\n");
                lc_destroy(n);
                return -EBUSY;
        } else {
                if (t)
                        lc_destroy(t);
        }
-       drbd_md_mark_dirty(mdev); /* we changed mdev->act_log->nr_elemens */
+       drbd_md_mark_dirty(device); /* we changed device->act_log->nr_elemens */
        return 0;
 }
 
-static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size)
+static void drbd_setup_queue_param(struct drbd_device *device, unsigned int max_bio_size)
 {
-       struct request_queue * const q = mdev->rq_queue;
+       struct request_queue * const q = device->rq_queue;
        unsigned int max_hw_sectors = max_bio_size >> 9;
        unsigned int max_segments = 0;
 
-       if (get_ldev_if_state(mdev, D_ATTACHING)) {
-               struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
+       if (get_ldev_if_state(device, D_ATTACHING)) {
+               struct request_queue * const b = device->ldev->backing_bdev->bd_disk->queue;
 
                max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
                rcu_read_lock();
-               max_segments = rcu_dereference(mdev->ldev->disk_conf)->max_bio_bvecs;
+               max_segments = rcu_dereference(device->ldev->disk_conf)->max_bio_bvecs;
                rcu_read_unlock();
-               put_ldev(mdev);
+               put_ldev(device);
        }
 
        blk_queue_logical_block_size(q, 512);
@@ -1099,46 +1117,46 @@ static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_
        blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
        blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1);
 
-       if (get_ldev_if_state(mdev, D_ATTACHING)) {
-               struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
+       if (get_ldev_if_state(device, D_ATTACHING)) {
+               struct request_queue * const b = device->ldev->backing_bdev->bd_disk->queue;
 
                blk_queue_stack_limits(q, b);
 
                if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
-                       dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
+                       drbd_info(device, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
                                 q->backing_dev_info.ra_pages,
                                 b->backing_dev_info.ra_pages);
                        q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages;
                }
-               put_ldev(mdev);
+               put_ldev(device);
        }
 }
 
-void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
+void drbd_reconsider_max_bio_size(struct drbd_device *device)
 {
        unsigned int now, new, local, peer;
 
-       now = queue_max_hw_sectors(mdev->rq_queue) << 9;
-       local = mdev->local_max_bio_size; /* Eventually last known value, from volatile memory */
-       peer = mdev->peer_max_bio_size; /* Eventually last known value, from meta data */
+       now = queue_max_hw_sectors(device->rq_queue) << 9;
+       local = device->local_max_bio_size; /* Eventually last known value, from volatile memory */
+       peer = device->peer_max_bio_size; /* Eventually last known value, from meta data */
 
-       if (get_ldev_if_state(mdev, D_ATTACHING)) {
-               local = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
-               mdev->local_max_bio_size = local;
-               put_ldev(mdev);
+       if (get_ldev_if_state(device, D_ATTACHING)) {
+               local = queue_max_hw_sectors(device->ldev->backing_bdev->bd_disk->queue) << 9;
+               device->local_max_bio_size = local;
+               put_ldev(device);
        }
        local = min(local, DRBD_MAX_BIO_SIZE);
 
        /* We may ignore peer limits if the peer is modern enough.
           Because new from 8.3.8 onwards the peer can use multiple
           BIOs for a single peer_request */
-       if (mdev->state.conn >= C_WF_REPORT_PARAMS) {
-               if (mdev->tconn->agreed_pro_version < 94)
-                       peer = min(mdev->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
+       if (device->state.conn >= C_WF_REPORT_PARAMS) {
+               if (first_peer_device(device)->connection->agreed_pro_version < 94)
+                       peer = min(device->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
                        /* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */
-               else if (mdev->tconn->agreed_pro_version == 94)
+               else if (first_peer_device(device)->connection->agreed_pro_version == 94)
                        peer = DRBD_MAX_SIZE_H80_PACKET;
-               else if (mdev->tconn->agreed_pro_version < 100)
+               else if (first_peer_device(device)->connection->agreed_pro_version < 100)
                        peer = DRBD_MAX_BIO_SIZE_P95;  /* drbd 8.3.8 onwards, before 8.4.0 */
                else
                        peer = DRBD_MAX_BIO_SIZE;
@@ -1146,57 +1164,57 @@ void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
 
        new = min(local, peer);
 
-       if (mdev->state.role == R_PRIMARY && new < now)
-               dev_err(DEV, "ASSERT FAILED new < now; (%u < %u)\n", new, now);
+       if (device->state.role == R_PRIMARY && new < now)
+               drbd_err(device, "ASSERT FAILED new < now; (%u < %u)\n", new, now);
 
        if (new != now)
-               dev_info(DEV, "max BIO size = %u\n", new);
+               drbd_info(device, "max BIO size = %u\n", new);
 
-       drbd_setup_queue_param(mdev, new);
+       drbd_setup_queue_param(device, new);
 }
 
 /* Starts the worker thread */
-static void conn_reconfig_start(struct drbd_tconn *tconn)
+static void conn_reconfig_start(struct drbd_connection *connection)
 {
-       drbd_thread_start(&tconn->worker);
-       conn_flush_workqueue(tconn);
+       drbd_thread_start(&connection->worker);
+       conn_flush_workqueue(connection);
 }
 
 /* if still unconfigured, stops worker again. */
-static void conn_reconfig_done(struct drbd_tconn *tconn)
+static void conn_reconfig_done(struct drbd_connection *connection)
 {
        bool stop_threads;
-       spin_lock_irq(&tconn->req_lock);
-       stop_threads = conn_all_vols_unconf(tconn) &&
-               tconn->cstate == C_STANDALONE;
-       spin_unlock_irq(&tconn->req_lock);
+       spin_lock_irq(&connection->req_lock);
+       stop_threads = conn_all_vols_unconf(connection) &&
+               connection->cstate == C_STANDALONE;
+       spin_unlock_irq(&connection->req_lock);
        if (stop_threads) {
                /* asender is implicitly stopped by receiver
                 * in conn_disconnect() */
-               drbd_thread_stop(&tconn->receiver);
-               drbd_thread_stop(&tconn->worker);
+               drbd_thread_stop(&connection->receiver);
+               drbd_thread_stop(&connection->worker);
        }
 }
 
 /* Make sure IO is suspended before calling this function(). */
-static void drbd_suspend_al(struct drbd_conf *mdev)
+static void drbd_suspend_al(struct drbd_device *device)
 {
        int s = 0;
 
-       if (!lc_try_lock(mdev->act_log)) {
-               dev_warn(DEV, "Failed to lock al in drbd_suspend_al()\n");
+       if (!lc_try_lock(device->act_log)) {
+               drbd_warn(device, "Failed to lock al in drbd_suspend_al()\n");
                return;
        }
 
-       drbd_al_shrink(mdev);
-       spin_lock_irq(&mdev->tconn->req_lock);
-       if (mdev->state.conn < C_CONNECTED)
-               s = !test_and_set_bit(AL_SUSPENDED, &mdev->flags);
-       spin_unlock_irq(&mdev->tconn->req_lock);
-       lc_unlock(mdev->act_log);
+       drbd_al_shrink(device);
+       spin_lock_irq(&first_peer_device(device)->connection->req_lock);
+       if (device->state.conn < C_CONNECTED)
+               s = !test_and_set_bit(AL_SUSPENDED, &device->flags);
+       spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
+       lc_unlock(device->act_log);
 
        if (s)
-               dev_info(DEV, "Suspended AL updates\n");
+               drbd_info(device, "Suspended AL updates\n");
 }
 
 
@@ -1237,7 +1255,7 @@ static unsigned int drbd_al_extents_max(struct drbd_backing_dev *bdev)
 int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
 {
        enum drbd_ret_code retcode;
-       struct drbd_conf *mdev;
+       struct drbd_device *device;
        struct disk_conf *new_disk_conf, *old_disk_conf;
        struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
        int err, fifo_size;
@@ -1248,11 +1266,11 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
        if (retcode != NO_ERROR)
                goto out;
 
-       mdev = adm_ctx.mdev;
+       device = adm_ctx.device;
 
        /* we also need a disk
         * to change the options on */
-       if (!get_ldev(mdev)) {
+       if (!get_ldev(device)) {
                retcode = ERR_NO_DISK;
                goto out;
        }
@@ -1263,8 +1281,8 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
                goto fail;
        }
 
-       mutex_lock(&mdev->tconn->conf_update);
-       old_disk_conf = mdev->ldev->disk_conf;
+       mutex_lock(&first_peer_device(device)->connection->conf_update);
+       old_disk_conf = device->ldev->disk_conf;
        *new_disk_conf = *old_disk_conf;
        if (should_set_defaults(info))
                set_disk_conf_defaults(new_disk_conf);
@@ -1273,6 +1291,7 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
        if (err && err != -ENOMSG) {
                retcode = ERR_MANDATORY_TAG;
                drbd_msg_put_info(from_attrs_err_to_txt(err));
+               goto fail_unlock;
        }
 
        if (!expect(new_disk_conf->resync_rate >= 1))
@@ -1280,29 +1299,29 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
 
        if (new_disk_conf->al_extents < DRBD_AL_EXTENTS_MIN)
                new_disk_conf->al_extents = DRBD_AL_EXTENTS_MIN;
-       if (new_disk_conf->al_extents > drbd_al_extents_max(mdev->ldev))
-               new_disk_conf->al_extents = drbd_al_extents_max(mdev->ldev);
+       if (new_disk_conf->al_extents > drbd_al_extents_max(device->ldev))
+               new_disk_conf->al_extents = drbd_al_extents_max(device->ldev);
 
        if (new_disk_conf->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
                new_disk_conf->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
 
        fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
-       if (fifo_size != mdev->rs_plan_s->size) {
+       if (fifo_size != device->rs_plan_s->size) {
                new_plan = fifo_alloc(fifo_size);
                if (!new_plan) {
-                       dev_err(DEV, "kmalloc of fifo_buffer failed");
+                       drbd_err(device, "kmalloc of fifo_buffer failed");
                        retcode = ERR_NOMEM;
                        goto fail_unlock;
                }
        }
 
-       drbd_suspend_io(mdev);
-       wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
-       drbd_al_shrink(mdev);
-       err = drbd_check_al_size(mdev, new_disk_conf);
-       lc_unlock(mdev->act_log);
-       wake_up(&mdev->al_wait);
-       drbd_resume_io(mdev);
+       drbd_suspend_io(device);
+       wait_event(device->al_wait, lc_try_lock(device->act_log));
+       drbd_al_shrink(device);
+       err = drbd_check_al_size(device, new_disk_conf);
+       lc_unlock(device->act_log);
+       wake_up(&device->al_wait);
+       drbd_resume_io(device);
 
        if (err) {
                retcode = ERR_NOMEM;
@@ -1310,10 +1329,10 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
        }
 
        write_lock_irq(&global_state_lock);
-       retcode = drbd_resync_after_valid(mdev, new_disk_conf->resync_after);
+       retcode = drbd_resync_after_valid(device, new_disk_conf->resync_after);
        if (retcode == NO_ERROR) {
-               rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
-               drbd_resync_after_changed(mdev);
+               rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
+               drbd_resync_after_changed(device);
        }
        write_unlock_irq(&global_state_lock);
 
@@ -1321,42 +1340,42 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
                goto fail_unlock;
 
        if (new_plan) {
-               old_plan = mdev->rs_plan_s;
-               rcu_assign_pointer(mdev->rs_plan_s, new_plan);
+               old_plan = device->rs_plan_s;
+               rcu_assign_pointer(device->rs_plan_s, new_plan);
        }
 
-       mutex_unlock(&mdev->tconn->conf_update);
+       mutex_unlock(&first_peer_device(device)->connection->conf_update);
 
        if (new_disk_conf->al_updates)
-               mdev->ldev->md.flags &= ~MDF_AL_DISABLED;
+               device->ldev->md.flags &= ~MDF_AL_DISABLED;
        else
-               mdev->ldev->md.flags |= MDF_AL_DISABLED;
+               device->ldev->md.flags |= MDF_AL_DISABLED;
 
        if (new_disk_conf->md_flushes)
-               clear_bit(MD_NO_FUA, &mdev->flags);
+               clear_bit(MD_NO_FUA, &device->flags);
        else
-               set_bit(MD_NO_FUA, &mdev->flags);
+               set_bit(MD_NO_FUA, &device->flags);
 
-       drbd_bump_write_ordering(mdev->tconn, WO_bdev_flush);
+       drbd_bump_write_ordering(first_peer_device(device)->connection, WO_bdev_flush);
 
-       drbd_md_sync(mdev);
+       drbd_md_sync(device);
 
-       if (mdev->state.conn >= C_CONNECTED)
-               drbd_send_sync_param(mdev);
+       if (device->state.conn >= C_CONNECTED)
+               drbd_send_sync_param(device);
 
        synchronize_rcu();
        kfree(old_disk_conf);
        kfree(old_plan);
-       mod_timer(&mdev->request_timer, jiffies + HZ);
+       mod_timer(&device->request_timer, jiffies + HZ);
        goto success;
 
 fail_unlock:
-       mutex_unlock(&mdev->tconn->conf_update);
+       mutex_unlock(&first_peer_device(device)->connection->conf_update);
  fail:
        kfree(new_disk_conf);
        kfree(new_plan);
 success:
-       put_ldev(mdev);
+       put_ldev(device);
  out:
        drbd_adm_finish(info, retcode);
        return 0;
@@ -1364,7 +1383,7 @@ success:
 
 int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
 {
-       struct drbd_conf *mdev;
+       struct drbd_device *device;
        int err;
        enum drbd_ret_code retcode;
        enum determine_dev_size dd;
@@ -1385,11 +1404,11 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
        if (retcode != NO_ERROR)
                goto finish;
 
-       mdev = adm_ctx.mdev;
-       conn_reconfig_start(mdev->tconn);
+       device = adm_ctx.device;
+       conn_reconfig_start(first_peer_device(device)->connection);
 
        /* if you want to reconfigure, please tear down first */
-       if (mdev->state.disk > D_DISKLESS) {
+       if (device->state.disk > D_DISKLESS) {
                retcode = ERR_DISK_CONFIGURED;
                goto fail;
        }
@@ -1397,17 +1416,17 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
         * drbd_ldev_destroy is done already, we may end up here very fast,
         * e.g. if someone calls attach from the on-io-error handler,
         * to realize a "hot spare" feature (not that I'd recommend that) */
-       wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
+       wait_event(device->misc_wait, !atomic_read(&device->local_cnt));
 
        /* make sure there is no leftover from previous force-detach attempts */
-       clear_bit(FORCE_DETACH, &mdev->flags);
-       clear_bit(WAS_IO_ERROR, &mdev->flags);
-       clear_bit(WAS_READ_ERROR, &mdev->flags);
+       clear_bit(FORCE_DETACH, &device->flags);
+       clear_bit(WAS_IO_ERROR, &device->flags);
+       clear_bit(WAS_READ_ERROR, &device->flags);
 
        /* and no leftover from previously aborted resync or verify, either */
-       mdev->rs_total = 0;
-       mdev->rs_failed = 0;
-       atomic_set(&mdev->rs_pending_cnt, 0);
+       device->rs_total = 0;
+       device->rs_failed = 0;
+       atomic_set(&device->rs_pending_cnt, 0);
 
        /* allocation not in the IO path, drbdsetup context */
        nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
@@ -1447,13 +1466,13 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
        }
 
        write_lock_irq(&global_state_lock);
-       retcode = drbd_resync_after_valid(mdev, new_disk_conf->resync_after);
+       retcode = drbd_resync_after_valid(device, new_disk_conf->resync_after);
        write_unlock_irq(&global_state_lock);
        if (retcode != NO_ERROR)
                goto fail;
 
        rcu_read_lock();
-       nc = rcu_dereference(mdev->tconn->net_conf);
+       nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
        if (nc) {
                if (new_disk_conf->fencing == FP_STONITH && nc->wire_protocol == DRBD_PROT_A) {
                        rcu_read_unlock();
@@ -1464,9 +1483,9 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
        rcu_read_unlock();
 
        bdev = blkdev_get_by_path(new_disk_conf->backing_dev,
-                                 FMODE_READ | FMODE_WRITE | FMODE_EXCL, mdev);
+                                 FMODE_READ | FMODE_WRITE | FMODE_EXCL, device);
        if (IS_ERR(bdev)) {
-               dev_err(DEV, "open(\"%s\") failed with %ld\n", new_disk_conf->backing_dev,
+               drbd_err(device, "open(\"%s\") failed with %ld\n", new_disk_conf->backing_dev,
                        PTR_ERR(bdev));
                retcode = ERR_OPEN_DISK;
                goto fail;
@@ -1484,9 +1503,9 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
        bdev = blkdev_get_by_path(new_disk_conf->meta_dev,
                                  FMODE_READ | FMODE_WRITE | FMODE_EXCL,
                                  (new_disk_conf->meta_dev_idx < 0) ?
-                                 (void *)mdev : (void *)drbd_m_holder);
+                                 (void *)device : (void *)drbd_m_holder);
        if (IS_ERR(bdev)) {
-               dev_err(DEV, "open(\"%s\") failed with %ld\n", new_disk_conf->meta_dev,
+               drbd_err(device, "open(\"%s\") failed with %ld\n", new_disk_conf->meta_dev,
                        PTR_ERR(bdev));
                retcode = ERR_OPEN_MD_DISK;
                goto fail;
@@ -1510,7 +1529,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
 
        /* Read our meta data super block early.
         * This also sets other on-disk offsets. */
-       retcode = drbd_md_read(mdev, nbc);
+       retcode = drbd_md_read(device, nbc);
        if (retcode != NO_ERROR)
                goto fail;
 
@@ -1520,7 +1539,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
                new_disk_conf->al_extents = drbd_al_extents_max(nbc);
 
        if (drbd_get_max_capacity(nbc) < new_disk_conf->disk_size) {
-               dev_err(DEV, "max capacity %llu smaller than disk size %llu\n",
+               drbd_err(device, "max capacity %llu smaller than disk size %llu\n",
                        (unsigned long long) drbd_get_max_capacity(nbc),
                        (unsigned long long) new_disk_conf->disk_size);
                retcode = ERR_DISK_TOO_SMALL;
@@ -1538,7 +1557,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
 
        if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
                retcode = ERR_MD_DISK_TOO_SMALL;
-               dev_warn(DEV, "refusing attach: md-device too small, "
+               drbd_warn(device, "refusing attach: md-device too small, "
                     "at least %llu sectors needed for this meta-disk type\n",
                     (unsigned long long) min_md_device_sectors);
                goto fail;
@@ -1547,7 +1566,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
        /* Make sure the new disk is big enough
         * (we may currently be R_PRIMARY with no local disk...) */
        if (drbd_get_max_capacity(nbc) <
-           drbd_get_capacity(mdev->this_bdev)) {
+           drbd_get_capacity(device->this_bdev)) {
                retcode = ERR_DISK_TOO_SMALL;
                goto fail;
        }
@@ -1555,15 +1574,15 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
        nbc->known_size = drbd_get_capacity(nbc->backing_bdev);
 
        if (nbc->known_size > max_possible_sectors) {
-               dev_warn(DEV, "==> truncating very big lower level device "
+               drbd_warn(device, "==> truncating very big lower level device "
                        "to currently maximum possible %llu sectors <==\n",
                        (unsigned long long) max_possible_sectors);
                if (new_disk_conf->meta_dev_idx >= 0)
-                       dev_warn(DEV, "==>> using internal or flexible "
+                       drbd_warn(device, "==>> using internal or flexible "
                                      "meta data may help <<==\n");
        }
 
-       drbd_suspend_io(mdev);
+       drbd_suspend_io(device);
        /* also wait for the last barrier ack. */
        /* FIXME see also https://daiquiri.linbit/cgi-bin/bugzilla/show_bug.cgi?id=171
         * We need a way to either ignore barrier acks for barriers sent before a device
@@ -1571,45 +1590,45 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
         * As barriers are counted per resource,
         * we'd need to suspend io on all devices of a resource.
         */
-       wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt) || drbd_suspended(mdev));
+       wait_event(device->misc_wait, !atomic_read(&device->ap_pending_cnt) || drbd_suspended(device));
        /* and for any other previously queued work */
-       drbd_flush_workqueue(mdev);
+       drbd_flush_workqueue(device);
 
-       rv = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE);
+       rv = _drbd_request_state(device, NS(disk, D_ATTACHING), CS_VERBOSE);
        retcode = rv;  /* FIXME: Type mismatch. */
-       drbd_resume_io(mdev);
+       drbd_resume_io(device);
        if (rv < SS_SUCCESS)
                goto fail;
 
-       if (!get_ldev_if_state(mdev, D_ATTACHING))
+       if (!get_ldev_if_state(device, D_ATTACHING))
                goto force_diskless;
 
-       if (!mdev->bitmap) {
-               if (drbd_bm_init(mdev)) {
+       if (!device->bitmap) {
+               if (drbd_bm_init(device)) {
                        retcode = ERR_NOMEM;
                        goto force_diskless_dec;
                }
        }
 
-       if (mdev->state.conn < C_CONNECTED &&
-           mdev->state.role == R_PRIMARY &&
-           (mdev->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
-               dev_err(DEV, "Can only attach to data with current UUID=%016llX\n",
-                   (unsigned long long)mdev->ed_uuid);
+       if (device->state.conn < C_CONNECTED &&
+           device->state.role == R_PRIMARY &&
+           (device->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
+               drbd_err(device, "Can only attach to data with current UUID=%016llX\n",
+                   (unsigned long long)device->ed_uuid);
                retcode = ERR_DATA_NOT_CURRENT;
                goto force_diskless_dec;
        }
 
        /* Since we are diskless, fix the activity log first... */
-       if (drbd_check_al_size(mdev, new_disk_conf)) {
+       if (drbd_check_al_size(device, new_disk_conf)) {
                retcode = ERR_NOMEM;
                goto force_diskless_dec;
        }
 
        /* Prevent shrinking of consistent devices ! */
        if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
-           drbd_new_dev_size(mdev, nbc, nbc->disk_conf->disk_size, 0) < nbc->md.la_size_sect) {
-               dev_warn(DEV, "refusing to truncate a consistent device\n");
+           drbd_new_dev_size(device, nbc, nbc->disk_conf->disk_size, 0) < nbc->md.la_size_sect) {
+               drbd_warn(device, "refusing to truncate a consistent device\n");
                retcode = ERR_DISK_TOO_SMALL;
                goto force_diskless_dec;
        }
@@ -1617,40 +1636,41 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
        /* Reset the "barriers don't work" bits here, then force meta data to
         * be written, to ensure we determine if barriers are supported. */
        if (new_disk_conf->md_flushes)
-               clear_bit(MD_NO_FUA, &mdev->flags);
+               clear_bit(MD_NO_FUA, &device->flags);
        else
-               set_bit(MD_NO_FUA, &mdev->flags);
+               set_bit(MD_NO_FUA, &device->flags);
 
        /* Point of no return reached.
         * Devices and memory are no longer released by error cleanup below.
-        * now mdev takes over responsibility, and the state engine should
+        * now device takes over responsibility, and the state engine should
         * clean it up somewhere.  */
-       D_ASSERT(mdev->ldev == NULL);
-       mdev->ldev = nbc;
-       mdev->resync = resync_lru;
-       mdev->rs_plan_s = new_plan;
+       D_ASSERT(device, device->ldev == NULL);
+       device->ldev = nbc;
+       device->resync = resync_lru;
+       device->rs_plan_s = new_plan;
        nbc = NULL;
        resync_lru = NULL;
        new_disk_conf = NULL;
        new_plan = NULL;
 
-       drbd_bump_write_ordering(mdev->tconn, WO_bdev_flush);
+       drbd_bump_write_ordering(first_peer_device(device)->connection, WO_bdev_flush);
 
-       if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
-               set_bit(CRASHED_PRIMARY, &mdev->flags);
+       if (drbd_md_test_flag(device->ldev, MDF_CRASHED_PRIMARY))
+               set_bit(CRASHED_PRIMARY, &device->flags);
        else
-               clear_bit(CRASHED_PRIMARY, &mdev->flags);
+               clear_bit(CRASHED_PRIMARY, &device->flags);
 
-       if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
-           !(mdev->state.role == R_PRIMARY && mdev->tconn->susp_nod))
-               set_bit(CRASHED_PRIMARY, &mdev->flags);
+       if (drbd_md_test_flag(device->ldev, MDF_PRIMARY_IND) &&
+           !(device->state.role == R_PRIMARY &&
+             first_peer_device(device)->connection->susp_nod))
+               set_bit(CRASHED_PRIMARY, &device->flags);
 
-       mdev->send_cnt = 0;
-       mdev->recv_cnt = 0;
-       mdev->read_cnt = 0;
-       mdev->writ_cnt = 0;
+       device->send_cnt = 0;
+       device->recv_cnt = 0;
+       device->read_cnt = 0;
+       device->writ_cnt = 0;
 
-       drbd_reconsider_max_bio_size(mdev);
+       drbd_reconsider_max_bio_size(device);
 
        /* If I am currently not R_PRIMARY,
         * but meta data primary indicator is set,
@@ -1666,50 +1686,50 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
         * so we can automatically recover from a crash of a
         * degraded but active "cluster" after a certain timeout.
         */
-       clear_bit(USE_DEGR_WFC_T, &mdev->flags);
-       if (mdev->state.role != R_PRIMARY &&
-            drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
-           !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND))
-               set_bit(USE_DEGR_WFC_T, &mdev->flags);
+       clear_bit(USE_DEGR_WFC_T, &device->flags);
+       if (device->state.role != R_PRIMARY &&
+            drbd_md_test_flag(device->ldev, MDF_PRIMARY_IND) &&
+           !drbd_md_test_flag(device->ldev, MDF_CONNECTED_IND))
+               set_bit(USE_DEGR_WFC_T, &device->flags);
 
-       dd = drbd_determine_dev_size(mdev, 0, NULL);
+       dd = drbd_determine_dev_size(device, 0, NULL);
        if (dd <= DS_ERROR) {
                retcode = ERR_NOMEM_BITMAP;
                goto force_diskless_dec;
        } else if (dd == DS_GREW)
-               set_bit(RESYNC_AFTER_NEG, &mdev->flags);
+               set_bit(RESYNC_AFTER_NEG, &device->flags);
 
-       if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC) ||
-           (test_bit(CRASHED_PRIMARY, &mdev->flags) &&
-            drbd_md_test_flag(mdev->ldev, MDF_AL_DISABLED))) {
-               dev_info(DEV, "Assuming that all blocks are out of sync "
+       if (drbd_md_test_flag(device->ldev, MDF_FULL_SYNC) ||
+           (test_bit(CRASHED_PRIMARY, &device->flags) &&
+            drbd_md_test_flag(device->ldev, MDF_AL_DISABLED))) {
+               drbd_info(device, "Assuming that all blocks are out of sync "
                     "(aka FullSync)\n");
-               if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write,
+               if (drbd_bitmap_io(device, &drbd_bmio_set_n_write,
                        "set_n_write from attaching", BM_LOCKED_MASK)) {
                        retcode = ERR_IO_MD_DISK;
                        goto force_diskless_dec;
                }
        } else {
-               if (drbd_bitmap_io(mdev, &drbd_bm_read,
+               if (drbd_bitmap_io(device, &drbd_bm_read,
                        "read from attaching", BM_LOCKED_MASK)) {
                        retcode = ERR_IO_MD_DISK;
                        goto force_diskless_dec;
                }
        }
 
-       if (_drbd_bm_total_weight(mdev) == drbd_bm_bits(mdev))
-               drbd_suspend_al(mdev); /* IO is still suspended here... */
+       if (_drbd_bm_total_weight(device) == drbd_bm_bits(device))
+               drbd_suspend_al(device); /* IO is still suspended here... */
 
-       spin_lock_irq(&mdev->tconn->req_lock);
-       os = drbd_read_state(mdev);
+       spin_lock_irq(&first_peer_device(device)->connection->req_lock);
+       os = drbd_read_state(device);
        ns = os;
        /* If MDF_CONSISTENT is not set go into inconsistent state,
           otherwise investigate MDF_WasUpToDate...
           If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
           otherwise into D_CONSISTENT state.
        */
-       if (drbd_md_test_flag(mdev->ldev, MDF_CONSISTENT)) {
-               if (drbd_md_test_flag(mdev->ldev, MDF_WAS_UP_TO_DATE))
+       if (drbd_md_test_flag(device->ldev, MDF_CONSISTENT)) {
+               if (drbd_md_test_flag(device->ldev, MDF_WAS_UP_TO_DATE))
                        ns.disk = D_CONSISTENT;
                else
                        ns.disk = D_OUTDATED;
@@ -1717,12 +1737,12 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
                ns.disk = D_INCONSISTENT;
        }
 
-       if (drbd_md_test_flag(mdev->ldev, MDF_PEER_OUT_DATED))
+       if (drbd_md_test_flag(device->ldev, MDF_PEER_OUT_DATED))
                ns.pdsk = D_OUTDATED;
 
        rcu_read_lock();
        if (ns.disk == D_CONSISTENT &&
-           (ns.pdsk == D_OUTDATED || rcu_dereference(mdev->ldev->disk_conf)->fencing == FP_DONT_CARE))
+           (ns.pdsk == D_OUTDATED || rcu_dereference(device->ldev->disk_conf)->fencing == FP_DONT_CARE))
                ns.disk = D_UP_TO_DATE;
 
        /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
@@ -1730,56 +1750,56 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
           this point, because drbd_request_state() modifies these
           flags. */
 
-       if (rcu_dereference(mdev->ldev->disk_conf)->al_updates)
-               mdev->ldev->md.flags &= ~MDF_AL_DISABLED;
+       if (rcu_dereference(device->ldev->disk_conf)->al_updates)
+               device->ldev->md.flags &= ~MDF_AL_DISABLED;
        else
-               mdev->ldev->md.flags |= MDF_AL_DISABLED;
+               device->ldev->md.flags |= MDF_AL_DISABLED;
 
        rcu_read_unlock();
 
        /* In case we are C_CONNECTED postpone any decision on the new disk
           state after the negotiation phase. */
-       if (mdev->state.conn == C_CONNECTED) {
-               mdev->new_state_tmp.i = ns.i;
+       if (device->state.conn == C_CONNECTED) {
+               device->new_state_tmp.i = ns.i;
                ns.i = os.i;
                ns.disk = D_NEGOTIATING;
 
                /* We expect to receive up-to-date UUIDs soon.
                   To avoid a race in receive_state, free p_uuid while
                   holding req_lock. I.e. atomic with the state change */
-               kfree(mdev->p_uuid);
-               mdev->p_uuid = NULL;
+               kfree(device->p_uuid);
+               device->p_uuid = NULL;
        }
 
-       rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
-       spin_unlock_irq(&mdev->tconn->req_lock);
+       rv = _drbd_set_state(device, ns, CS_VERBOSE, NULL);
+       spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
 
        if (rv < SS_SUCCESS)
                goto force_diskless_dec;
 
-       mod_timer(&mdev->request_timer, jiffies + HZ);
+       mod_timer(&device->request_timer, jiffies + HZ);
 
-       if (mdev->state.role == R_PRIMARY)
-               mdev->ldev->md.uuid[UI_CURRENT] |=  (u64)1;
+       if (device->state.role == R_PRIMARY)
+               device->ldev->md.uuid[UI_CURRENT] |=  (u64)1;
        else
-               mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
+               device->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
 
-       drbd_md_mark_dirty(mdev);
-       drbd_md_sync(mdev);
+       drbd_md_mark_dirty(device);
+       drbd_md_sync(device);
 
-       kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
-       put_ldev(mdev);
-       conn_reconfig_done(mdev->tconn);
+       kobject_uevent(&disk_to_dev(device->vdisk)->kobj, KOBJ_CHANGE);
+       put_ldev(device);
+       conn_reconfig_done(first_peer_device(device)->connection);
        drbd_adm_finish(info, retcode);
        return 0;
 
  force_diskless_dec:
-       put_ldev(mdev);
+       put_ldev(device);
  force_diskless:
-       drbd_force_state(mdev, NS(disk, D_DISKLESS));
-       drbd_md_sync(mdev);
+       drbd_force_state(device, NS(disk, D_DISKLESS));
+       drbd_md_sync(device);
  fail:
-       conn_reconfig_done(mdev->tconn);
+       conn_reconfig_done(first_peer_device(device)->connection);
        if (nbc) {
                if (nbc->backing_bdev)
                        blkdev_put(nbc->backing_bdev,
@@ -1798,26 +1818,26 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
        return 0;
 }
 
-static int adm_detach(struct drbd_conf *mdev, int force)
+static int adm_detach(struct drbd_device *device, int force)
 {
        enum drbd_state_rv retcode;
        int ret;
 
        if (force) {
-               set_bit(FORCE_DETACH, &mdev->flags);
-               drbd_force_state(mdev, NS(disk, D_FAILED));
+               set_bit(FORCE_DETACH, &device->flags);
+               drbd_force_state(device, NS(disk, D_FAILED));
                retcode = SS_SUCCESS;
                goto out;
        }
 
-       drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */
-       drbd_md_get_buffer(mdev); /* make sure there is no in-flight meta-data IO */
-       retcode = drbd_request_state(mdev, NS(disk, D_FAILED));
-       drbd_md_put_buffer(mdev);
+       drbd_suspend_io(device); /* so no-one is stuck in drbd_al_begin_io */
+       drbd_md_get_buffer(device); /* make sure there is no in-flight meta-data IO */
+       retcode = drbd_request_state(device, NS(disk, D_FAILED));
+       drbd_md_put_buffer(device);
        /* D_FAILED will transition to DISKLESS. */
-       ret = wait_event_interruptible(mdev->misc_wait,
-                       mdev->state.disk != D_FAILED);
-       drbd_resume_io(mdev);
+       ret = wait_event_interruptible(device->misc_wait,
+                       device->state.disk != D_FAILED);
+       drbd_resume_io(device);
        if ((int)retcode == (int)SS_IS_DISKLESS)
                retcode = SS_NOTHING_TO_DO;
        if (ret)
@@ -1852,24 +1872,25 @@ int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info)
                }
        }
 
-       retcode = adm_detach(adm_ctx.mdev, parms.force_detach);
+       retcode = adm_detach(adm_ctx.device, parms.force_detach);
 out:
        drbd_adm_finish(info, retcode);
        return 0;
 }
 
-static bool conn_resync_running(struct drbd_tconn *tconn)
+static bool conn_resync_running(struct drbd_connection *connection)
 {
-       struct drbd_conf *mdev;
+       struct drbd_peer_device *peer_device;
        bool rv = false;
        int vnr;
 
        rcu_read_lock();
-       idr_for_each_entry(&tconn->volumes, mdev, vnr) {
-               if (mdev->state.conn == C_SYNC_SOURCE ||
-                   mdev->state.conn == C_SYNC_TARGET ||
-                   mdev->state.conn == C_PAUSED_SYNC_S ||
-                   mdev->state.conn == C_PAUSED_SYNC_T) {
+       idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+               struct drbd_device *device = peer_device->device;
+               if (device->state.conn == C_SYNC_SOURCE ||
+                   device->state.conn == C_SYNC_TARGET ||
+                   device->state.conn == C_PAUSED_SYNC_S ||
+                   device->state.conn == C_PAUSED_SYNC_T) {
                        rv = true;
                        break;
                }
@@ -1879,16 +1900,17 @@ static bool conn_resync_running(struct drbd_tconn *tconn)
        return rv;
 }
 
-static bool conn_ov_running(struct drbd_tconn *tconn)
+static bool conn_ov_running(struct drbd_connection *connection)
 {
-       struct drbd_conf *mdev;
+       struct drbd_peer_device *peer_device;
        bool rv = false;
        int vnr;
 
        rcu_read_lock();
-       idr_for_each_entry(&tconn->volumes, mdev, vnr) {
-               if (mdev->state.conn == C_VERIFY_S ||
-                   mdev->state.conn == C_VERIFY_T) {
+       idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
+               struct drbd_device *device = peer_device->device;
+               if (device->state.conn == C_VERIFY_S ||
+                   device->state.conn == C_VERIFY_T) {
                        rv = true;
                        break;
                }
@@ -1899,12 +1921,12 @@ static bool conn_ov_running(struct drbd_tconn *tconn)
 }
 
 static enum drbd_ret_code
-_check_net_options(struct drbd_tconn *tconn, struct net_conf *old_conf, struct net_conf *new_conf)
+_check_net_options(struct drbd_connection *connection, struct net_conf *old_conf, struct net_conf *new_conf)
 {
-       struct drbd_conf *mdev;
+       struct drbd_peer_device *peer_device;
        int i;
 
-       if (old_conf && tconn->cstate == C_WF_REPORT_PARAMS && tconn->agreed_pro_version < 100) {
+       if (old_conf && connection->cstate == C_WF_REPORT_PARAMS && connection->agreed_pro_version < 100) {
                if (new_conf->wire_protocol != old_conf->wire_protocol)
                        return ERR_NEED_APV_100;
 
@@ -1916,22 +1938,23 @@ _check_net_options(struct drbd_tconn *tconn, struct net_conf *old_conf, struct n
        }
 
        if (!new_conf->two_primaries &&
-           conn_highest_role(tconn) == R_PRIMARY &&
-           conn_highest_peer(tconn) == R_PRIMARY)
+           conn_highest_role(connection) == R_PRIMARY &&
+           conn_highest_peer(connection) == R_PRIMARY)
                return ERR_NEED_ALLOW_TWO_PRI;
 
        if (new_conf->two_primaries &&
            (new_conf->wire_protocol != DRBD_PROT_C))
                return ERR_NOT_PROTO_C;
 
-       idr_for_each_entry(&tconn->volumes, mdev, i) {
-               if (get_ldev(mdev)) {
-                       enum drbd_fencing_p fp = rcu_dereference(mdev->ldev->disk_conf)->fencing;
-                       put_ldev(mdev);
+       idr_for_each_entry(&connection->peer_devices, peer_device, i) {
+               struct drbd_device *device = peer_device->device;
+               if (get_ldev(device)) {
+                       enum drbd_fencing_p fp = rcu_dereference(device->ldev->disk_conf)->fencing;
+                       put_ldev(device);
                        if (new_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH)
                                return ERR_STONITH_AND_PROT_A;
                }
-               if (mdev->state.role == R_PRIMARY && new_conf->discard_my_data)
+               if (device->state.role == R_PRIMARY && new_conf->discard_my_data)
                        return ERR_DISCARD_IMPOSSIBLE;
        }
 
@@ -1942,20 +1965,21 @@ _check_net_options(struct drbd_tconn *tconn, struct net_conf *old_conf, struct n
 }
 
 static enum drbd_ret_code
-check_net_options(struct drbd_tconn *tconn, struct net_conf *new_conf)
+check_net_options(struct drbd_connection *connection, struct net_conf *new_conf)
 {
        static enum drbd_ret_code rv;
-       struct drbd_conf *mdev;
+       struct drbd_peer_device *peer_device;
        int i;
 
        rcu_read_lock();
-       rv = _check_net_options(tconn, rcu_dereference(tconn->net_conf), new_conf);
+       rv = _check_net_options(connection, rcu_dereference(connection->net_conf), new_conf);
        rcu_read_unlock();
 
-       /* tconn->volumes protected by genl_lock() here */
-       idr_for_each_entry(&tconn->volumes, mdev, i) {
-               if (!mdev->bitmap) {
-                       if(drbd_bm_init(mdev))
+       /* connection->volumes protected by genl_lock() here */
+       idr_for_each_entry(&connection->peer_devices, peer_device, i) {
+               struct drbd_device *device = peer_device->device;
+               if (!device->bitmap) {
+                       if (drbd_bm_init(device))
                                return ERR_NOMEM;
                }
        }
@@ -2025,7 +2049,7 @@ static void free_crypto(struct crypto *crypto)
 int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
 {
        enum drbd_ret_code retcode;
-       struct drbd_tconn *tconn;
+       struct drbd_connection *connection;
        struct net_conf *old_conf, *new_conf = NULL;
        int err;
        int ovr; /* online verify running */
@@ -2038,7 +2062,7 @@ int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
        if (retcode != NO_ERROR)
                goto out;
 
-       tconn = adm_ctx.tconn;
+       connection = adm_ctx.connection;
 
        new_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
        if (!new_conf) {
@@ -2046,11 +2070,11 @@ int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
                goto out;
        }
 
-       conn_reconfig_start(tconn);
+       conn_reconfig_start(connection);
 
-       mutex_lock(&tconn->data.mutex);
-       mutex_lock(&tconn->conf_update);
-       old_conf = tconn->net_conf;
+       mutex_lock(&connection->data.mutex);
+       mutex_lock(&connection->conf_update);
+       old_conf = connection->net_conf;
 
        if (!old_conf) {
                drbd_msg_put_info("net conf missing, try connect");
@@ -2069,19 +2093,19 @@ int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
                goto fail;
        }
 
-       retcode = check_net_options(tconn, new_conf);
+       retcode = check_net_options(connection, new_conf);
        if (retcode != NO_ERROR)
                goto fail;
 
        /* re-sync running */
-       rsr = conn_resync_running(tconn);
+       rsr = conn_resync_running(connection);
        if (rsr && strcmp(new_conf->csums_alg, old_conf->csums_alg)) {
                retcode = ERR_CSUMS_RESYNC_RUNNING;
                goto fail;
        }
 
        /* online verify running */
-       ovr = conn_ov_running(tconn);
+       ovr = conn_ov_running(connection);
        if (ovr && strcmp(new_conf->verify_alg, old_conf->verify_alg)) {
                retcode = ERR_VERIFY_RUNNING;
                goto fail;
@@ -2091,45 +2115,45 @@ int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
        if (retcode != NO_ERROR)
                goto fail;
 
-       rcu_assign_pointer(tconn->net_conf, new_conf);
+       rcu_assign_pointer(connection->net_conf, new_conf);
 
        if (!rsr) {
-               crypto_free_hash(tconn->csums_tfm);
-               tconn->csums_tfm = crypto.csums_tfm;
+               crypto_free_hash(connection->csums_tfm);
+               connection->csums_tfm = crypto.csums_tfm;
                crypto.csums_tfm = NULL;
        }
        if (!ovr) {
-               crypto_free_hash(tconn->verify_tfm);
-               tconn->verify_tfm = crypto.verify_tfm;
+               crypto_free_hash(connection->verify_tfm);
+               connection->verify_tfm = crypto.verify_tfm;
                crypto.verify_tfm = NULL;
        }
 
-       crypto_free_hash(tconn->integrity_tfm);
-       tconn->integrity_tfm = crypto.integrity_tfm;
-       if (tconn->cstate >= C_WF_REPORT_PARAMS && tconn->agreed_pro_version >= 100)
-               /* Do this without trying to take tconn->data.mutex again.  */
-               __drbd_send_protocol(tconn, P_PROTOCOL_UPDATE);
+       crypto_free_hash(connection->integrity_tfm);
+       connection->integrity_tfm = crypto.integrity_tfm;
+       if (connection->cstate >= C_WF_REPORT_PARAMS && connection->agreed_pro_version >= 100)
+               /* Do this without trying to take connection->data.mutex again.  */
+               __drbd_send_protocol(connection, P_PROTOCOL_UPDATE);
 
-       crypto_free_hash(tconn->cram_hmac_tfm);
-       tconn->cram_hmac_tfm = crypto.cram_hmac_tfm;
+       crypto_free_hash(connection->cram_hmac_tfm);
+       connection->cram_hmac_tfm = crypto.cram_hmac_tfm;
 
-       mutex_unlock(&tconn->conf_update);
-       mutex_unlock(&tconn->data.mutex);
+       mutex_unlock(&connection->conf_update);
+       mutex_unlock(&connection->data.mutex);
        synchronize_rcu();
        kfree(old_conf);
 
-       if (tconn->cstate >= C_WF_REPORT_PARAMS)
-               drbd_send_sync_param(minor_to_mdev(conn_lowest_minor(tconn)));
+       if (connection->cstate >= C_WF_REPORT_PARAMS)
+               drbd_send_sync_param(minor_to_device(conn_lowest_minor(connection)));
 
        goto done;
 
  fail:
-       mutex_unlock(&tconn->conf_update);
-       mutex_unlock(&tconn->data.mutex);
+       mutex_unlock(&connection->conf_update);
+       mutex_unlock(&connection->data.mutex);
        free_crypto(&crypto);
        kfree(new_conf);
  done:
-       conn_reconfig_done(tconn);
+       conn_reconfig_done(connection);
  out:
        drbd_adm_finish(info, retcode);
        return 0;
@@ -2137,10 +2161,11 @@ int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
 
 int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
 {
-       struct drbd_conf *mdev;
+       struct drbd_peer_device *peer_device;
        struct net_conf *old_conf, *new_conf = NULL;
        struct crypto crypto = { };
-       struct drbd_tconn *tconn;
+       struct drbd_resource *resource;
+       struct drbd_connection *connection;
        enum drbd_ret_code retcode;
        int i;
        int err;
@@ -2160,24 +2185,28 @@ int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
        /* No need for _rcu here. All reconfiguration is
         * strictly serialized on genl_lock(). We are protected against
         * concurrent reconfiguration/addition/deletion */
-       list_for_each_entry(tconn, &drbd_tconns, all_tconn) {
-               if (nla_len(adm_ctx.my_addr) == tconn->my_addr_len &&
-                   !memcmp(nla_data(adm_ctx.my_addr), &tconn->my_addr, tconn->my_addr_len)) {
-                       retcode = ERR_LOCAL_ADDR;
-                       goto out;
-               }
+       for_each_resource(resource, &drbd_resources) {
+               for_each_connection(connection, resource) {
+                       if (nla_len(adm_ctx.my_addr) == connection->my_addr_len &&
+                           !memcmp(nla_data(adm_ctx.my_addr), &connection->my_addr,
+                                   connection->my_addr_len)) {
+                               retcode = ERR_LOCAL_ADDR;
+                               goto out;
+                       }
 
-               if (nla_len(adm_ctx.peer_addr) == tconn->peer_addr_len &&
-                   !memcmp(nla_data(adm_ctx.peer_addr), &tconn->peer_addr, tconn->peer_addr_len)) {
-                       retcode = ERR_PEER_ADDR;
-                       goto out;
+                       if (nla_len(adm_ctx.peer_addr) == connection->peer_addr_len &&
+                           !memcmp(nla_data(adm_ctx.peer_addr), &connection->peer_addr,
+                                   connection->peer_addr_len)) {
+                               retcode = ERR_PEER_ADDR;
+                               goto out;
+                       }
                }
        }
 
-       tconn = adm_ctx.tconn;
-       conn_reconfig_start(tconn);
+       connection = adm_ctx.connection;
+       conn_reconfig_start(connection);
 
-       if (tconn->cstate > C_STANDALONE) {
+       if (connection->cstate > C_STANDALONE) {
                retcode = ERR_NET_CONFIGURED;
                goto fail;
        }
@@ -2198,7 +2227,7 @@ int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
                goto fail;
        }
 
-       retcode = check_net_options(tconn, new_conf);
+       retcode = check_net_options(connection, new_conf);
        if (retcode != NO_ERROR)
                goto fail;
 
@@ -2208,40 +2237,41 @@ int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
 
        ((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
 
-       conn_flush_workqueue(tconn);
+       conn_flush_workqueue(connection);
 
-       mutex_lock(&tconn->conf_update);
-       old_conf = tconn->net_conf;
+       mutex_lock(&connection->conf_update);
+       old_conf = connection->net_conf;
        if (old_conf) {
                retcode = ERR_NET_CONFIGURED;
-               mutex_unlock(&tconn->conf_update);
+               mutex_unlock(&connection->conf_update);
                goto fail;
        }
-       rcu_assign_pointer(tconn->net_conf, new_conf);
+       rcu_assign_pointer(connection->net_conf, new_conf);
 
-       conn_free_crypto(tconn);
-       tconn->cram_hmac_tfm = crypto.cram_hmac_tfm;
-       tconn->integrity_tfm = crypto.integrity_tfm;
-       tconn->csums_tfm = crypto.csums_tfm;
-       tconn->verify_tfm = crypto.verify_tfm;
+       conn_free_crypto(connection);
+       connection->cram_hmac_tfm = crypto.cram_hmac_tfm;
+       connection->integrity_tfm = crypto.integrity_tfm;
+       connection->csums_tfm = crypto.csums_tfm;
+       connection->verify_tfm = crypto.verify_tfm;
 
-       tconn->my_addr_len = nla_len(adm_ctx.my_addr);
-       memcpy(&tconn->my_addr, nla_data(adm_ctx.my_addr), tconn->my_addr_len);
-       tconn->peer_addr_len = nla_len(adm_ctx.peer_addr);
-       memcpy(&tconn->peer_addr, nla_data(adm_ctx.peer_addr), tconn->peer_addr_len);
+       connection->my_addr_len = nla_len(adm_ctx.my_addr);
+       memcpy(&connection->my_addr, nla_data(adm_ctx.my_addr), connection->my_addr_len);
+       connection->peer_addr_len = nla_len(adm_ctx.peer_addr);
+       memcpy(&connection->peer_addr, nla_data(adm_ctx.peer_addr), connection->peer_addr_len);
 
-       mutex_unlock(&tconn->conf_update);
+       mutex_unlock(&connection->conf_update);
 
        rcu_read_lock();
-       idr_for_each_entry(&tconn->volumes, mdev, i) {
-               mdev->send_cnt = 0;
-               mdev->recv_cnt = 0;
+       idr_for_each_entry(&connection->peer_devices, peer_device, i) {
+               struct drbd_device *device = peer_device->device;
+               device->send_cnt = 0;
+               device->recv_cnt = 0;
        }
        rcu_read_unlock();
 
-       retcode = conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
+       retcode = conn_request_state(connection, NS(conn, C_UNCONNECTED), CS_VERBOSE);
 
-       conn_reconfig_done(tconn);
+       conn_reconfig_done(connection);
        drbd_adm_finish(info, retcode);
        return 0;
 
@@ -2249,17 +2279,17 @@ fail:
        free_crypto(&crypto);
        kfree(new_conf);
 
-       conn_reconfig_done(tconn);
+       conn_reconfig_done(connection);
 out:
        drbd_adm_finish(info, retcode);
        return 0;
 }
 
-static enum drbd_state_rv conn_try_disconnect(struct drbd_tconn *tconn, bool force)
+static enum drbd_state_rv conn_try_disconnect(struct drbd_connection *connection, bool force)
 {
        enum drbd_state_rv rv;
 
-       rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING),
+       rv = conn_request_state(connection, NS(conn, C_DISCONNECTING),
                        force ? CS_HARD : 0);
 
        switch (rv) {
@@ -2269,18 +2299,18 @@ static enum drbd_state_rv conn_try_disconnect(struct drbd_tconn *tconn, bool for
                return SS_SUCCESS;
        case SS_PRIMARY_NOP:
                /* Our state checking code wants to see the peer outdated. */
-               rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING, pdsk, D_OUTDATED), 0);
+               rv = conn_request_state(connection, NS2(conn, C_DISCONNECTING, pdsk, D_OUTDATED), 0);
 
                if (rv == SS_OUTDATE_WO_CONN) /* lost connection before graceful disconnect succeeded */
-                       rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_VERBOSE);
+                       rv = conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_VERBOSE);
 
                break;
        case SS_CW_FAILED_BY_PEER:
                /* The peer probably wants to see us outdated. */
-               rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
+               rv = conn_request_state(connection, NS2(conn, C_DISCONNECTING,
                                                        disk, D_OUTDATED), 0);
                if (rv == SS_IS_DISKLESS || rv == SS_LOWER_THAN_OUTDATED) {
-                       rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING),
+                       rv = conn_request_state(connection, NS(conn, C_DISCONNECTING),
                                        CS_HARD);
                }
                break;
@@ -2294,7 +2324,7 @@ static enum drbd_state_rv conn_try_disconnect(struct drbd_tconn *tconn, bool for
                 * The state handling only uses drbd_thread_stop_nowait(),
                 * we want to really wait here until the receiver is no more.
                 */
-               drbd_thread_stop(&adm_ctx.tconn->receiver);
+               drbd_thread_stop(&connection->receiver);
 
                /* Race breaker.  This additional state change request may be
                 * necessary, if this was a forced disconnect during a receiver
@@ -2302,10 +2332,10 @@ static enum drbd_state_rv conn_try_disconnect(struct drbd_tconn *tconn, bool for
                 * after drbdd_init() returned.  Typically, we should be
                 * C_STANDALONE already, now, and this becomes a no-op.
                 */
-               rv2 = conn_request_state(tconn, NS(conn, C_STANDALONE),
+               rv2 = conn_request_state(connection, NS(conn, C_STANDALONE),
                                CS_VERBOSE | CS_HARD);
                if (rv2 < SS_SUCCESS)
-                       conn_err(tconn,
+                       drbd_err(connection,
                                "unexpected rv2=%d in conn_try_disconnect()\n",
                                rv2);
        }
@@ -2315,7 +2345,7 @@ static enum drbd_state_rv conn_try_disconnect(struct drbd_tconn *tconn, bool for
 int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
 {
        struct disconnect_parms parms;
-       struct drbd_tconn *tconn;
+       struct drbd_connection *connection;
        enum drbd_state_rv rv;
        enum drbd_ret_code retcode;
        int err;
@@ -2326,7 +2356,7 @@ int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
        if (retcode != NO_ERROR)
                goto fail;
 
-       tconn = adm_ctx.tconn;
+       connection = adm_ctx.connection;
        memset(&parms, 0, sizeof(parms));
        if (info->attrs[DRBD_NLA_DISCONNECT_PARMS]) {
                err = disconnect_parms_from_attrs(&parms, info);
@@ -2337,7 +2367,7 @@ int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
                }
        }
 
-       rv = conn_try_disconnect(tconn, parms.force_disconnect);
+       rv = conn_try_disconnect(connection, parms.force_disconnect);
        if (rv < SS_SUCCESS)
                retcode = rv;  /* FIXME: Type mismatch. */
        else
@@ -2347,27 +2377,27 @@ int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
        return 0;
 }
 
-void resync_after_online_grow(struct drbd_conf *mdev)
+void resync_after_online_grow(struct drbd_device *device)
 {
        int iass; /* I am sync source */
 
-       dev_info(DEV, "Resync of new storage after online grow\n");
-       if (mdev->state.role != mdev->state.peer)
-               iass = (mdev->state.role == R_PRIMARY);
+       drbd_info(device, "Resync of new storage after online grow\n");
+       if (device->state.role != device->state.peer)
+               iass = (device->state.role == R_PRIMARY);
        else
-               iass = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags);
+               iass = test_bit(RESOLVE_CONFLICTS, &first_peer_device(device)->connection->flags);
 
        if (iass)
-               drbd_start_resync(mdev, C_SYNC_SOURCE);
+               drbd_start_resync(device, C_SYNC_SOURCE);
        else
-               _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE);
+               _drbd_request_state(device, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE);
 }
 
 int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
 {
        struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
        struct resize_parms rs;
-       struct drbd_conf *mdev;
+       struct drbd_device *device;
        enum drbd_ret_code retcode;
        enum determine_dev_size dd;
        bool change_al_layout = false;
@@ -2381,15 +2411,15 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
        if (retcode != NO_ERROR)
                goto fail;
 
-       mdev = adm_ctx.mdev;
-       if (!get_ldev(mdev)) {
+       device = adm_ctx.device;
+       if (!get_ldev(device)) {
                retcode = ERR_NO_DISK;
                goto fail;
        }
 
        memset(&rs, 0, sizeof(struct resize_parms));
-       rs.al_stripes = mdev->ldev->md.al_stripes;
-       rs.al_stripe_size = mdev->ldev->md.al_stripe_size_4k * 4;
+       rs.al_stripes = device->ldev->md.al_stripes;
+       rs.al_stripe_size = device->ldev->md.al_stripe_size_4k * 4;
        if (info->attrs[DRBD_NLA_RESIZE_PARMS]) {
                err = resize_parms_from_attrs(&rs, info);
                if (err) {
@@ -2399,24 +2429,24 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
                }
        }
 
-       if (mdev->state.conn > C_CONNECTED) {
+       if (device->state.conn > C_CONNECTED) {
                retcode = ERR_RESIZE_RESYNC;
                goto fail_ldev;
        }
 
-       if (mdev->state.role == R_SECONDARY &&
-           mdev->state.peer == R_SECONDARY) {
+       if (device->state.role == R_SECONDARY &&
+           device->state.peer == R_SECONDARY) {
                retcode = ERR_NO_PRIMARY;
                goto fail_ldev;
        }
 
-       if (rs.no_resync && mdev->tconn->agreed_pro_version < 93) {
+       if (rs.no_resync && first_peer_device(device)->connection->agreed_pro_version < 93) {
                retcode = ERR_NEED_APV_93;
                goto fail_ldev;
        }
 
        rcu_read_lock();
-       u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
+       u_size = rcu_dereference(device->ldev->disk_conf)->disk_size;
        rcu_read_unlock();
        if (u_size != (sector_t)rs.resize_size) {
                new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
@@ -2426,8 +2456,8 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
                }
        }
 
-       if (mdev->ldev->md.al_stripes != rs.al_stripes ||
-           mdev->ldev->md.al_stripe_size_4k != rs.al_stripe_size / 4) {
+       if (device->ldev->md.al_stripes != rs.al_stripes ||
+           device->ldev->md.al_stripe_size_4k != rs.al_stripe_size / 4) {
                u32 al_size_k = rs.al_stripes * rs.al_stripe_size;
 
                if (al_size_k > (16 * 1024 * 1024)) {
@@ -2440,7 +2470,7 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
                        goto fail_ldev;
                }
 
-               if (mdev->state.conn != C_CONNECTED) {
+               if (device->state.conn != C_CONNECTED) {
                        retcode = ERR_MD_LAYOUT_CONNECTED;
                        goto fail_ldev;
                }
@@ -2448,24 +2478,24 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
                change_al_layout = true;
        }
 
-       if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev))
-               mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
+       if (device->ldev->known_size != drbd_get_capacity(device->ldev->backing_bdev))
+               device->ldev->known_size = drbd_get_capacity(device->ldev->backing_bdev);
 
        if (new_disk_conf) {
-               mutex_lock(&mdev->tconn->conf_update);
-               old_disk_conf = mdev->ldev->disk_conf;
+               mutex_lock(&first_peer_device(device)->connection->conf_update);
+               old_disk_conf = device->ldev->disk_conf;
                *new_disk_conf = *old_disk_conf;
                new_disk_conf->disk_size = (sector_t)rs.resize_size;
-               rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
-               mutex_unlock(&mdev->tconn->conf_update);
+               rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
+               mutex_unlock(&first_peer_device(device)->connection->conf_update);
                synchronize_rcu();
                kfree(old_disk_conf);
        }
 
        ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
-       dd = drbd_determine_dev_size(mdev, ddsf, change_al_layout ? &rs : NULL);
-       drbd_md_sync(mdev);
-       put_ldev(mdev);
+       dd = drbd_determine_dev_size(device, ddsf, change_al_layout ? &rs : NULL);
+       drbd_md_sync(device);
+       put_ldev(device);
        if (dd == DS_ERROR) {
                retcode = ERR_NOMEM_BITMAP;
                goto fail;
@@ -2477,12 +2507,12 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
                goto fail;
        }
 
-       if (mdev->state.conn == C_CONNECTED) {
+       if (device->state.conn == C_CONNECTED) {
                if (dd == DS_GREW)
-                       set_bit(RESIZE_PENDING, &mdev->flags);
+                       set_bit(RESIZE_PENDING, &device->flags);
 
-               drbd_send_uuids(mdev);
-               drbd_send_sizes(mdev, 1, ddsf);
+               drbd_send_uuids(device);
+               drbd_send_sizes(device, 1, ddsf);
        }
 
  fail:
@@ -2490,14 +2520,13 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
        return 0;
 
  fail_ldev:
-       put_ldev(mdev);
+       put_ldev(device);
        goto fail;
 }
 
 int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
 {
        enum drbd_ret_code retcode;
-       struct drbd_tconn *tconn;
        struct res_opts res_opts;
        int err;
 
@@ -2506,9 +2535,8 @@ int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
                return retcode;
        if (retcode != NO_ERROR)
                goto fail;
-       tconn = adm_ctx.tconn;
 
-       res_opts = tconn->res_opts;
+       res_opts = adm_ctx.resource->res_opts;
        if (should_set_defaults(info))
                set_res_opts_defaults(&res_opts);
 
@@ -2519,7 +2547,7 @@ int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
                goto fail;
        }
 
-       err = set_resource_options(tconn, &res_opts);
+       err = set_resource_options(adm_ctx.resource, &res_opts);
        if (err) {
                retcode = ERR_INVALID_REQUEST;
                if (err == -ENOMEM)
@@ -2533,7 +2561,7 @@ fail:
 
 int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
 {
-       struct drbd_conf *mdev;
+       struct drbd_device *device;
        int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
 
        retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
@@ -2542,29 +2570,29 @@ int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
        if (retcode != NO_ERROR)
                goto out;
 
-       mdev = adm_ctx.mdev;
+       device = adm_ctx.device;
 
        /* If there is still bitmap IO pending, probably because of a previous
         * resync just being finished, wait for it before requesting a new resync.
         * Also wait for it's after_state_ch(). */
-       drbd_suspend_io(mdev);
-       wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
-       drbd_flush_workqueue(mdev);
+       drbd_suspend_io(device);
+       wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags));
+       drbd_flush_workqueue(device);
 
        /* If we happen to be C_STANDALONE R_SECONDARY, just change to
         * D_INCONSISTENT, and set all bits in the bitmap.  Otherwise,
         * try to start a resync handshake as sync target for full sync.
         */
-       if (mdev->state.conn == C_STANDALONE && mdev->state.role == R_SECONDARY) {
-               retcode = drbd_request_state(mdev, NS(disk, D_INCONSISTENT));
+       if (device->state.conn == C_STANDALONE && device->state.role == R_SECONDARY) {
+               retcode = drbd_request_state(device, NS(disk, D_INCONSISTENT));
                if (retcode >= SS_SUCCESS) {
-                       if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write,
+                       if (drbd_bitmap_io(device, &drbd_bmio_set_n_write,
                                "set_n_write from invalidate", BM_LOCKED_MASK))
                                retcode = ERR_IO_MD_DISK;
                }
        } else
-               retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
-       drbd_resume_io(mdev);
+               retcode = drbd_request_state(device, NS(conn, C_STARTING_SYNC_T));
+       drbd_resume_io(device);
 
 out:
        drbd_adm_finish(info, retcode);
@@ -2582,25 +2610,25 @@ static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *
        if (retcode != NO_ERROR)
                goto out;
 
-       retcode = drbd_request_state(adm_ctx.mdev, mask, val);
+       retcode = drbd_request_state(adm_ctx.device, mask, val);
 out:
        drbd_adm_finish(info, retcode);
        return 0;
 }
 
-static int drbd_bmio_set_susp_al(struct drbd_conf *mdev)
+static int drbd_bmio_set_susp_al(struct drbd_device *device)
 {
        int rv;
 
-       rv = drbd_bmio_set_n_write(mdev);
-       drbd_suspend_al(mdev);
+       rv = drbd_bmio_set_n_write(device);
+       drbd_suspend_al(device);
        return rv;
 }
 
 int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
 {
        int retcode; /* drbd_ret_code, drbd_state_rv */
-       struct drbd_conf *mdev;
+       struct drbd_device *device;
 
        retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
        if (!adm_ctx.reply_skb)
@@ -2608,32 +2636,32 @@ int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
        if (retcode != NO_ERROR)
                goto out;
 
-       mdev = adm_ctx.mdev;
+       device = adm_ctx.device;
 
        /* If there is still bitmap IO pending, probably because of a previous
         * resync just being finished, wait for it before requesting a new resync.
         * Also wait for it's after_state_ch(). */
-       drbd_suspend_io(mdev);
-       wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
-       drbd_flush_workqueue(mdev);
+       drbd_suspend_io(device);
+       wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags));
+       drbd_flush_workqueue(device);
 
        /* If we happen to be C_STANDALONE R_PRIMARY, just set all bits
         * in the bitmap.  Otherwise, try to start a resync handshake
         * as sync source for full sync.
         */
-       if (mdev->state.conn == C_STANDALONE && mdev->state.role == R_PRIMARY) {
+       if (device->state.conn == C_STANDALONE && device->state.role == R_PRIMARY) {
                /* The peer will get a resync upon connect anyways. Just make that
                   into a full resync. */
-               retcode = drbd_request_state(mdev, NS(pdsk, D_INCONSISTENT));
+               retcode = drbd_request_state(device, NS(pdsk, D_INCONSISTENT));
                if (retcode >= SS_SUCCESS) {
-                       if (drbd_bitmap_io(mdev, &drbd_bmio_set_susp_al,
+                       if (drbd_bitmap_io(device, &drbd_bmio_set_susp_al,
                                "set_n_write from invalidate_peer",
                                BM_LOCKED_SET_ALLOWED))
                                retcode = ERR_IO_MD_DISK;
                }
        } else
-               retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S));
-       drbd_resume_io(mdev);
+               retcode = drbd_request_state(device, NS(conn, C_STARTING_SYNC_S));
+       drbd_resume_io(device);
 
 out:
        drbd_adm_finish(info, retcode);
@@ -2650,7 +2678,7 @@ int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info)
        if (retcode != NO_ERROR)
                goto out;
 
-       if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
+       if (drbd_request_state(adm_ctx.device, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
                retcode = ERR_PAUSE_IS_SET;
 out:
        drbd_adm_finish(info, retcode);
@@ -2668,8 +2696,8 @@ int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info)
        if (retcode != NO_ERROR)
                goto out;
 
-       if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
-               s = adm_ctx.mdev->state;
+       if (drbd_request_state(adm_ctx.device, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
+               s = adm_ctx.device->state;
                if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) {
                        retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP :
                                  s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR;
@@ -2690,7 +2718,7 @@ int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info)
 
 int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info)
 {
-       struct drbd_conf *mdev;
+       struct drbd_device *device;
        int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
 
        retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
@@ -2699,20 +2727,20 @@ int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info)
        if (retcode != NO_ERROR)
                goto out;
 
-       mdev = adm_ctx.mdev;
-       if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
-               drbd_uuid_new_current(mdev);
-               clear_bit(NEW_CUR_UUID, &mdev->flags);
+       device = adm_ctx.device;
+       if (test_bit(NEW_CUR_UUID, &device->flags)) {
+               drbd_uuid_new_current(device);
+               clear_bit(NEW_CUR_UUID, &device->flags);
        }
-       drbd_suspend_io(mdev);
-       retcode = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
+       drbd_suspend_io(device);
+       retcode = drbd_request_state(device, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
        if (retcode == SS_SUCCESS) {
-               if (mdev->state.conn < C_CONNECTED)
-                       tl_clear(mdev->tconn);
-               if (mdev->state.disk == D_DISKLESS || mdev->state.disk == D_FAILED)
-                       tl_restart(mdev->tconn, FAIL_FROZEN_DISK_IO);
+               if (device->state.conn < C_CONNECTED)
+                       tl_clear(first_peer_device(device)->connection);
+               if (device->state.disk == D_DISKLESS || device->state.disk == D_FAILED)
+                       tl_restart(first_peer_device(device)->connection, FAIL_FROZEN_DISK_IO);
        }
-       drbd_resume_io(mdev);
+       drbd_resume_io(device);
 
 out:
        drbd_adm_finish(info, retcode);
@@ -2724,23 +2752,28 @@ int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info)
        return drbd_adm_simple_request_state(skb, info, NS(disk, D_OUTDATED));
 }
 
-int nla_put_drbd_cfg_context(struct sk_buff *skb, struct drbd_tconn *tconn, unsigned vnr)
+static int nla_put_drbd_cfg_context(struct sk_buff *skb,
+                                   struct drbd_resource *resource,
+                                   struct drbd_connection *connection,
+                                   struct drbd_device *device)
 {
        struct nlattr *nla;
        nla = nla_nest_start(skb, DRBD_NLA_CFG_CONTEXT);
        if (!nla)
                goto nla_put_failure;
-       if (vnr != VOLUME_UNSPECIFIED &&
-           nla_put_u32(skb, T_ctx_volume, vnr))
-               goto nla_put_failure;
-       if (nla_put_string(skb, T_ctx_resource_name, tconn->name))
+       if (device &&
+           nla_put_u32(skb, T_ctx_volume, device->vnr))
                goto nla_put_failure;
-       if (tconn->my_addr_len &&
-           nla_put(skb, T_ctx_my_addr, tconn->my_addr_len, &tconn->my_addr))
-               goto nla_put_failure;
-       if (tconn->peer_addr_len &&
-           nla_put(skb, T_ctx_peer_addr, tconn->peer_addr_len, &tconn->peer_addr))
+       if (nla_put_string(skb, T_ctx_resource_name, connection->resource->name))
                goto nla_put_failure;
+       if (connection) {
+               if (connection->my_addr_len &&
+                   nla_put(skb, T_ctx_my_addr, connection->my_addr_len, &connection->my_addr))
+                       goto nla_put_failure;
+               if (connection->peer_addr_len &&
+                   nla_put(skb, T_ctx_peer_addr, connection->peer_addr_len, &connection->peer_addr))
+                       goto nla_put_failure;
+       }
        nla_nest_end(skb, nla);
        return 0;
 
@@ -2750,9 +2783,22 @@ nla_put_failure:
        return -EMSGSIZE;
 }
 
-int nla_put_status_info(struct sk_buff *skb, struct drbd_conf *mdev,
+/*
+ * Return the connection of @resource if @resource has exactly one connection.
+ */
+static struct drbd_connection *the_only_connection(struct drbd_resource *resource)
+{
+       struct list_head *connections = &resource->connections;
+
+       if (list_empty(connections) || connections->next->next != connections)
+               return NULL;
+       return list_first_entry(&resource->connections, struct drbd_connection, connections);
+}
+
+int nla_put_status_info(struct sk_buff *skb, struct drbd_device *device,
                const struct sib_info *sib)
 {
+       struct drbd_resource *resource = device->resource;
        struct state_info *si = NULL; /* for sizeof(si->member); */
        struct nlattr *nla;
        int got_ldev;
@@ -2772,27 +2818,27 @@ int nla_put_status_info(struct sk_buff *skb, struct drbd_conf *mdev,
         * always in the context of the receiving process */
        exclude_sensitive = sib || !capable(CAP_SYS_ADMIN);
 
-       got_ldev = get_ldev(mdev);
+       got_ldev = get_ldev(device);
 
        /* We need to add connection name and volume number information still.
         * Minor number is in drbd_genlmsghdr. */
-       if (nla_put_drbd_cfg_context(skb, mdev->tconn, mdev->vnr))
+       if (nla_put_drbd_cfg_context(skb, resource, the_only_connection(resource), device))
                goto nla_put_failure;
 
-       if (res_opts_to_skb(skb, &mdev->tconn->res_opts, exclude_sensitive))
+       if (res_opts_to_skb(skb, &device->resource->res_opts, exclude_sensitive))
                goto nla_put_failure;
 
        rcu_read_lock();
        if (got_ldev) {
                struct disk_conf *disk_conf;
 
-               disk_conf = rcu_dereference(mdev->ldev->disk_conf);
+               disk_conf = rcu_dereference(device->ldev->disk_conf);
                err = disk_conf_to_skb(skb, disk_conf, exclude_sensitive);
        }
        if (!err) {
                struct net_conf *nc;
 
-               nc = rcu_dereference(mdev->tconn->net_conf);
+               nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
                if (nc)
                        err = net_conf_to_skb(skb, nc, exclude_sensitive);
        }
@@ -2804,38 +2850,38 @@ int nla_put_status_info(struct sk_buff *skb, struct drbd_conf *mdev,
        if (!nla)
                goto nla_put_failure;
        if (nla_put_u32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY) ||
-           nla_put_u32(skb, T_current_state, mdev->state.i) ||
-           nla_put_u64(skb, T_ed_uuid, mdev->ed_uuid) ||
-           nla_put_u64(skb, T_capacity, drbd_get_capacity(mdev->this_bdev)) ||
-           nla_put_u64(skb, T_send_cnt, mdev->send_cnt) ||
-           nla_put_u64(skb, T_recv_cnt, mdev->recv_cnt) ||
-           nla_put_u64(skb, T_read_cnt, mdev->read_cnt) ||
-           nla_put_u64(skb, T_writ_cnt, mdev->writ_cnt) ||
-           nla_put_u64(skb, T_al_writ_cnt, mdev->al_writ_cnt) ||
-           nla_put_u64(skb, T_bm_writ_cnt, mdev->bm_writ_cnt) ||
-           nla_put_u32(skb, T_ap_bio_cnt, atomic_read(&mdev->ap_bio_cnt)) ||
-           nla_put_u32(skb, T_ap_pending_cnt, atomic_read(&mdev->ap_pending_cnt)) ||
-           nla_put_u32(skb, T_rs_pending_cnt, atomic_read(&mdev->rs_pending_cnt)))
+           nla_put_u32(skb, T_current_state, device->state.i) ||
+           nla_put_u64(skb, T_ed_uuid, device->ed_uuid) ||
+           nla_put_u64(skb, T_capacity, drbd_get_capacity(device->this_bdev)) ||
+           nla_put_u64(skb, T_send_cnt, device->send_cnt) ||
+           nla_put_u64(skb, T_recv_cnt, device->recv_cnt) ||
+           nla_put_u64(skb, T_read_cnt, device->read_cnt) ||
+           nla_put_u64(skb, T_writ_cnt, device->writ_cnt) ||
+           nla_put_u64(skb, T_al_writ_cnt, device->al_writ_cnt) ||
+           nla_put_u64(skb, T_bm_writ_cnt, device->bm_writ_cnt) ||
+           nla_put_u32(skb, T_ap_bio_cnt, atomic_read(&device->ap_bio_cnt)) ||
+           nla_put_u32(skb, T_ap_pending_cnt, atomic_read(&device->ap_pending_cnt)) ||
+           nla_put_u32(skb, T_rs_pending_cnt, atomic_read(&device->rs_pending_cnt)))
                goto nla_put_failure;
 
        if (got_ldev) {
                int err;
 
-               spin_lock_irq(&mdev->ldev->md.uuid_lock);
-               err = nla_put(skb, T_uuids, sizeof(si->uuids), mdev->ldev->md.uuid);
-               spin_unlock_irq(&mdev->ldev->md.uuid_lock);
+               spin_lock_irq(&device->ldev->md.uuid_lock);
+               err = nla_put(skb, T_uuids, sizeof(si->uuids), device->ldev->md.uuid);
+               spin_unlock_irq(&device->ldev->md.uuid_lock);
 
                if (err)
                        goto nla_put_failure;
 
-               if (nla_put_u32(skb, T_disk_flags, mdev->ldev->md.flags) ||
-                   nla_put_u64(skb, T_bits_total, drbd_bm_bits(mdev)) ||
-                   nla_put_u64(skb, T_bits_oos, drbd_bm_total_weight(mdev)))
+               if (nla_put_u32(skb, T_disk_flags, device->ldev->md.flags) ||
+                   nla_put_u64(skb, T_bits_total, drbd_bm_bits(device)) ||
+                   nla_put_u64(skb, T_bits_oos, drbd_bm_total_weight(device)))
                        goto nla_put_failure;
-               if (C_SYNC_SOURCE <= mdev->state.conn &&
-                   C_PAUSED_SYNC_T >= mdev->state.conn) {
-                       if (nla_put_u64(skb, T_bits_rs_total, mdev->rs_total) ||
-                           nla_put_u64(skb, T_bits_rs_failed, mdev->rs_failed))
+               if (C_SYNC_SOURCE <= device->state.conn &&
+                   C_PAUSED_SYNC_T >= device->state.conn) {
+                       if (nla_put_u64(skb, T_bits_rs_total, device->rs_total) ||
+                           nla_put_u64(skb, T_bits_rs_failed, device->rs_failed))
                                goto nla_put_failure;
                }
        }
@@ -2867,7 +2913,7 @@ int nla_put_status_info(struct sk_buff *skb, struct drbd_conf *mdev,
 nla_put_failure:
                err = -EMSGSIZE;
        if (got_ldev)
-               put_ldev(mdev);
+               put_ldev(device);
        return err;
 }
 
@@ -2882,7 +2928,7 @@ int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info)
        if (retcode != NO_ERROR)
                goto out;
 
-       err = nla_put_status_info(adm_ctx.reply_skb, adm_ctx.mdev, NULL);
+       err = nla_put_status_info(adm_ctx.reply_skb, adm_ctx.device, NULL);
        if (err) {
                nlmsg_free(adm_ctx.reply_skb);
                return err;
@@ -2892,22 +2938,23 @@ out:
        return 0;
 }
 
-int get_one_status(struct sk_buff *skb, struct netlink_callback *cb)
+static int get_one_status(struct sk_buff *skb, struct netlink_callback *cb)
 {
-       struct drbd_conf *mdev;
+       struct drbd_device *device;
        struct drbd_genlmsghdr *dh;
-       struct drbd_tconn *pos = (struct drbd_tconn*)cb->args[0];
-       struct drbd_tconn *tconn = NULL;
-       struct drbd_tconn *tmp;
+       struct drbd_resource *pos = (struct drbd_resource *)cb->args[0];
+       struct drbd_resource *resource = NULL;
+       struct drbd_resource *tmp;
        unsigned volume = cb->args[1];
 
        /* Open coded, deferred, iteration:
-        * list_for_each_entry_safe(tconn, tmp, &drbd_tconns, all_tconn) {
-        *      idr_for_each_entry(&tconn->volumes, mdev, i) {
+        * for_each_resource_safe(resource, tmp, &drbd_resources) {
+        *      connection = "first connection of resource or undefined";
+        *      idr_for_each_entry(&resource->devices, device, i) {
         *        ...
         *      }
         * }
-        * where tconn is cb->args[0];
+        * where resource is cb->args[0];
         * and i is cb->args[1];
         *
         * cb->args[2] indicates if we shall loop over all resources,
@@ -2916,44 +2963,44 @@ int get_one_status(struct sk_buff *skb, struct netlink_callback *cb)
         * This may miss entries inserted after this dump started,
         * or entries deleted before they are reached.
         *
-        * We need to make sure the mdev won't disappear while
+        * We need to make sure the device won't disappear while
         * we are looking at it, and revalidate our iterators
         * on each iteration.
         */
 
-       /* synchronize with conn_create()/conn_destroy() */
+       /* synchronize with conn_create()/drbd_destroy_connection() */
        rcu_read_lock();
        /* revalidate iterator position */
-       list_for_each_entry_rcu(tmp, &drbd_tconns, all_tconn) {
+       for_each_resource_rcu(tmp, &drbd_resources) {
                if (pos == NULL) {
                        /* first iteration */
                        pos = tmp;
-                       tconn = pos;
+                       resource = pos;
                        break;
                }
                if (tmp == pos) {
-                       tconn = pos;
+                       resource = pos;
                        break;
                }
        }
-       if (tconn) {
-next_tconn:
-               mdev = idr_get_next(&tconn->volumes, &volume);
-               if (!mdev) {
-                       /* No more volumes to dump on this tconn.
-                        * Advance tconn iterator. */
-                       pos = list_entry_rcu(tconn->all_tconn.next,
-                                            struct drbd_tconn, all_tconn);
-                       /* Did we dump any volume on this tconn yet? */
+       if (resource) {
+next_resource:
+               device = idr_get_next(&resource->devices, &volume);
+               if (!device) {
+                       /* No more volumes to dump on this resource.
+                        * Advance resource iterator. */
+                       pos = list_entry_rcu(resource->resources.next,
+                                            struct drbd_resource, resources);
+                       /* Did we dump any volume of this resource yet? */
                        if (volume != 0) {
                                /* If we reached the end of the list,
                                 * or only a single resource dump was requested,
                                 * we are done. */
-                               if (&pos->all_tconn == &drbd_tconns || cb->args[2])
+                               if (&pos->resources == &drbd_resources || cb->args[2])
                                        goto out;
                                volume = 0;
-                               tconn = pos;
-                               goto next_tconn;
+                               resource = pos;
+                               goto next_resource;
                        }
                }
 
@@ -2963,43 +3010,49 @@ next_tconn:
                if (!dh)
                        goto out;
 
-               if (!mdev) {
-                       /* This is a tconn without a single volume.
+               if (!device) {
+                       /* This is a connection without a single volume.
                         * Suprisingly enough, it may have a network
                         * configuration. */
-                       struct net_conf *nc;
+                       struct drbd_connection *connection;
+
                        dh->minor = -1U;
                        dh->ret_code = NO_ERROR;
-                       if (nla_put_drbd_cfg_context(skb, tconn, VOLUME_UNSPECIFIED))
-                               goto cancel;
-                       nc = rcu_dereference(tconn->net_conf);
-                       if (nc && net_conf_to_skb(skb, nc, 1) != 0)
+                       connection = the_only_connection(resource);
+                       if (nla_put_drbd_cfg_context(skb, resource, connection, NULL))
                                goto cancel;
+                       if (connection) {
+                               struct net_conf *nc;
+
+                               nc = rcu_dereference(connection->net_conf);
+                               if (nc && net_conf_to_skb(skb, nc, 1) != 0)
+                                       goto cancel;
+                       }
                        goto done;
                }
 
-               D_ASSERT(mdev->vnr == volume);
-               D_ASSERT(mdev->tconn == tconn);
+               D_ASSERT(device, device->vnr == volume);
+               D_ASSERT(device, device->resource == resource);
 
-               dh->minor = mdev_to_minor(mdev);
+               dh->minor = device_to_minor(device);
                dh->ret_code = NO_ERROR;
 
-               if (nla_put_status_info(skb, mdev, NULL)) {
+               if (nla_put_status_info(skb, device, NULL)) {
 cancel:
                        genlmsg_cancel(skb, dh);
                        goto out;
                }
 done:
                genlmsg_end(skb, dh);
-        }
+       }
 
 out:
        rcu_read_unlock();
        /* where to start the next iteration */
-        cb->args[0] = (long)pos;
-        cb->args[1] = (pos == tconn) ? volume + 1 : 0;
+       cb->args[0] = (long)pos;
+       cb->args[1] = (pos == resource) ? volume + 1 : 0;
 
-       /* No more tconns/volumes/minors found results in an empty skb.
+       /* No more resources/volumes/minors found results in an empty skb.
         * Which will terminate the dump. */
         return skb->len;
 }
@@ -3019,7 +3072,7 @@ int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb)
        const unsigned hdrlen = GENL_HDRLEN + GENL_MAGIC_FAMILY_HDRSZ;
        struct nlattr *nla;
        const char *resource_name;
-       struct drbd_tconn *tconn;
+       struct drbd_resource *resource;
        int maxtype;
 
        /* Is this a followup call? */
@@ -3048,18 +3101,19 @@ int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb)
        if (!nla)
                return -EINVAL;
        resource_name = nla_data(nla);
-       tconn = conn_get_by_name(resource_name);
-
-       if (!tconn)
+       if (!*resource_name)
+               return -ENODEV;
+       resource = drbd_find_resource(resource_name);
+       if (!resource)
                return -ENODEV;
 
-       kref_put(&tconn->kref, &conn_destroy); /* get_one_status() (re)validates tconn by itself */
+       kref_put(&resource->kref, drbd_destroy_resource); /* get_one_status() revalidates the resource */
 
        /* prime iterators, and set "filter" mode mark:
-        * only dump this tconn. */
-       cb->args[0] = (long)tconn;
+        * only dump this connection. */
+       cb->args[0] = (long)resource;
        /* cb->args[1] = 0; passed in this way. */
-       cb->args[2] = (long)tconn;
+       cb->args[2] = (long)resource;
 
 dump:
        return get_one_status(skb, cb);
@@ -3078,8 +3132,8 @@ int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info)
                goto out;
 
        tp.timeout_type =
-               adm_ctx.mdev->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
-               test_bit(USE_DEGR_WFC_T, &adm_ctx.mdev->flags) ? UT_DEGRADED :
+               adm_ctx.device->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
+               test_bit(USE_DEGR_WFC_T, &adm_ctx.device->flags) ? UT_DEGRADED :
                UT_DEFAULT;
 
        err = timeout_parms_to_priv_skb(adm_ctx.reply_skb, &tp);
@@ -3094,7 +3148,7 @@ out:
 
 int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
 {
-       struct drbd_conf *mdev;
+       struct drbd_device *device;
        enum drbd_ret_code retcode;
        struct start_ov_parms parms;
 
@@ -3104,10 +3158,10 @@ int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
        if (retcode != NO_ERROR)
                goto out;
 
-       mdev = adm_ctx.mdev;
+       device = adm_ctx.device;
 
        /* resume from last known position, if possible */
-       parms.ov_start_sector = mdev->ov_start_sector;
+       parms.ov_start_sector = device->ov_start_sector;
        parms.ov_stop_sector = ULLONG_MAX;
        if (info->attrs[DRBD_NLA_START_OV_PARMS]) {
                int err = start_ov_parms_from_attrs(&parms, info);
@@ -3118,15 +3172,15 @@ int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
                }
        }
        /* w_make_ov_request expects position to be aligned */
-       mdev->ov_start_sector = parms.ov_start_sector & ~(BM_SECT_PER_BIT-1);
-       mdev->ov_stop_sector = parms.ov_stop_sector;
+       device->ov_start_sector = parms.ov_start_sector & ~(BM_SECT_PER_BIT-1);
+       device->ov_stop_sector = parms.ov_stop_sector;
 
        /* If there is still bitmap IO pending, e.g. previous resync or verify
         * just being finished, wait for it before requesting a new resync. */
-       drbd_suspend_io(mdev);
-       wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
-       retcode = drbd_request_state(mdev,NS(conn,C_VERIFY_S));
-       drbd_resume_io(mdev);
+       drbd_suspend_io(device);
+       wait_event(device->misc_wait, !test_bit(BITMAP_IO, &device->flags));
+       retcode = drbd_request_state(device, NS(conn, C_VERIFY_S));
+       drbd_resume_io(device);
 out:
        drbd_adm_finish(info, retcode);
        return 0;
@@ -3135,7 +3189,7 @@ out:
 
 int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
 {
-       struct drbd_conf *mdev;
+       struct drbd_device *device;
        enum drbd_ret_code retcode;
        int skip_initial_sync = 0;
        int err;
@@ -3147,7 +3201,7 @@ int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
        if (retcode != NO_ERROR)
                goto out_nolock;
 
-       mdev = adm_ctx.mdev;
+       device = adm_ctx.device;
        memset(&args, 0, sizeof(args));
        if (info->attrs[DRBD_NLA_NEW_C_UUID_PARMS]) {
                err = new_c_uuid_parms_from_attrs(&args, info);
@@ -3158,49 +3212,50 @@ int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
                }
        }
 
-       mutex_lock(mdev->state_mutex); /* Protects us against serialized state changes. */
+       mutex_lock(device->state_mutex); /* Protects us against serialized state changes. */
 
-       if (!get_ldev(mdev)) {
+       if (!get_ldev(device)) {
                retcode = ERR_NO_DISK;
                goto out;
        }
 
        /* this is "skip initial sync", assume to be clean */
-       if (mdev->state.conn == C_CONNECTED && mdev->tconn->agreed_pro_version >= 90 &&
-           mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
-               dev_info(DEV, "Preparing to skip initial sync\n");
+       if (device->state.conn == C_CONNECTED &&
+           first_peer_device(device)->connection->agreed_pro_version >= 90 &&
+           device->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
+               drbd_info(device, "Preparing to skip initial sync\n");
                skip_initial_sync = 1;
-       } else if (mdev->state.conn != C_STANDALONE) {
+       } else if (device->state.conn != C_STANDALONE) {
                retcode = ERR_CONNECTED;
                goto out_dec;
        }
 
-       drbd_uuid_set(mdev, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */
-       drbd_uuid_new_current(mdev); /* New current, previous to UI_BITMAP */
+       drbd_uuid_set(device, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */
+       drbd_uuid_new_current(device); /* New current, previous to UI_BITMAP */
 
        if (args.clear_bm) {
-               err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
+               err = drbd_bitmap_io(device, &drbd_bmio_clear_n_write,
                        "clear_n_write from new_c_uuid", BM_LOCKED_MASK);
                if (err) {
-                       dev_err(DEV, "Writing bitmap failed with %d\n",err);
+                       drbd_err(device, "Writing bitmap failed with %d\n", err);
                        retcode = ERR_IO_MD_DISK;
                }
                if (skip_initial_sync) {
-                       drbd_send_uuids_skip_initial_sync(mdev);
-                       _drbd_uuid_set(mdev, UI_BITMAP, 0);
-                       drbd_print_uuids(mdev, "cleared bitmap UUID");
-                       spin_lock_irq(&mdev->tconn->req_lock);
-                       _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
+                       drbd_send_uuids_skip_initial_sync(device);
+                       _drbd_uuid_set(device, UI_BITMAP, 0);
+                       drbd_print_uuids(device, "cleared bitmap UUID");
+                       spin_lock_irq(&first_peer_device(device)->connection->req_lock);
+                       _drbd_set_state(_NS2(device, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
                                        CS_VERBOSE, NULL);
-                       spin_unlock_irq(&mdev->tconn->req_lock);
+                       spin_unlock_irq(&first_peer_device(device)->connection->req_lock);
                }
        }
 
-       drbd_md_sync(mdev);
+       drbd_md_sync(device);
 out_dec:
-       put_ldev(mdev);
+       put_ldev(device);
 out:
-       mutex_unlock(mdev->state_mutex);
+       mutex_unlock(device->state_mutex);
 out_nolock:
        drbd_adm_finish(info, retcode);
        return 0;
@@ -3246,7 +3301,7 @@ int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info)
        if (retcode != NO_ERROR)
                goto out;
 
-       if (adm_ctx.tconn) {
+       if (adm_ctx.connection) {
                if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) {
                        retcode = ERR_INVALID_REQUEST;
                        drbd_msg_put_info("resource exists");
@@ -3262,7 +3317,7 @@ out:
        return 0;
 }
 
-int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info)
+int drbd_adm_new_minor(struct sk_buff *skb, struct genl_info *info)
 {
        struct drbd_genlmsghdr *dh = info->userhdr;
        enum drbd_ret_code retcode;
@@ -3285,41 +3340,36 @@ int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info)
        }
 
        /* drbd_adm_prepare made sure already
-        * that mdev->tconn and mdev->vnr match the request. */
-       if (adm_ctx.mdev) {
+        * that first_peer_device(device)->connection and device->vnr match the request. */
+       if (adm_ctx.device) {
                if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
                        retcode = ERR_MINOR_EXISTS;
                /* else: still NO_ERROR */
                goto out;
        }
 
-       retcode = conn_new_minor(adm_ctx.tconn, dh->minor, adm_ctx.volume);
+       retcode = drbd_create_device(adm_ctx.connection, dh->minor, adm_ctx.volume);
 out:
        drbd_adm_finish(info, retcode);
        return 0;
 }
 
-static enum drbd_ret_code adm_delete_minor(struct drbd_conf *mdev)
+static enum drbd_ret_code adm_del_minor(struct drbd_device *device)
 {
-       if (mdev->state.disk == D_DISKLESS &&
-           /* no need to be mdev->state.conn == C_STANDALONE &&
+       if (device->state.disk == D_DISKLESS &&
+           /* no need to be device->state.conn == C_STANDALONE &&
             * we may want to delete a minor from a live replication group.
             */
-           mdev->state.role == R_SECONDARY) {
-               _drbd_request_state(mdev, NS(conn, C_WF_REPORT_PARAMS),
+           device->state.role == R_SECONDARY) {
+               _drbd_request_state(device, NS(conn, C_WF_REPORT_PARAMS),
                                    CS_VERBOSE + CS_WAIT_COMPLETE);
-               idr_remove(&mdev->tconn->volumes, mdev->vnr);
-               idr_remove(&minors, mdev_to_minor(mdev));
-               destroy_workqueue(mdev->submit.wq);
-               del_gendisk(mdev->vdisk);
-               synchronize_rcu();
-               kref_put(&mdev->kref, &drbd_minor_destroy);
+               drbd_delete_device(device);
                return NO_ERROR;
        } else
                return ERR_MINOR_CONFIGURED;
 }
 
-int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info)
+int drbd_adm_del_minor(struct sk_buff *skb, struct genl_info *info)
 {
        enum drbd_ret_code retcode;
 
@@ -3329,7 +3379,7 @@ int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info)
        if (retcode != NO_ERROR)
                goto out;
 
-       retcode = adm_delete_minor(adm_ctx.mdev);
+       retcode = adm_del_minor(adm_ctx.device);
 out:
        drbd_adm_finish(info, retcode);
        return 0;
@@ -3338,54 +3388,49 @@ out:
 int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
 {
        int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
-       struct drbd_conf *mdev;
+       struct drbd_peer_device *peer_device;
        unsigned i;
 
-       retcode = drbd_adm_prepare(skb, info, 0);
+       retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
        if (!adm_ctx.reply_skb)
                return retcode;
        if (retcode != NO_ERROR)
                goto out;
 
-       if (!adm_ctx.tconn) {
-               retcode = ERR_RES_NOT_KNOWN;
-               goto out;
-       }
-
        /* demote */
-       idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
-               retcode = drbd_set_role(mdev, R_SECONDARY, 0);
+       idr_for_each_entry(&adm_ctx.connection->peer_devices, peer_device, i) {
+               retcode = drbd_set_role(peer_device->device, R_SECONDARY, 0);
                if (retcode < SS_SUCCESS) {
                        drbd_msg_put_info("failed to demote");
                        goto out;
                }
        }
 
-       retcode = conn_try_disconnect(adm_ctx.tconn, 0);
+       retcode = conn_try_disconnect(adm_ctx.connection, 0);
        if (retcode < SS_SUCCESS) {
                drbd_msg_put_info("failed to disconnect");
                goto out;
        }
 
        /* detach */
-       idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
-               retcode = adm_detach(mdev, 0);
+       idr_for_each_entry(&adm_ctx.connection->peer_devices, peer_device, i) {
+               retcode = adm_detach(peer_device->device, 0);
                if (retcode < SS_SUCCESS || retcode > NO_ERROR) {
                        drbd_msg_put_info("failed to detach");
                        goto out;
                }
        }
 
-       /* If we reach this, all volumes (of this tconn) are Secondary,
+       /* If we reach this, all volumes (of this connection) are Secondary,
         * Disconnected, Diskless, aka Unconfigured. Make sure all threads have
         * actually stopped, state handling only does drbd_thread_stop_nowait(). */
-       drbd_thread_stop(&adm_ctx.tconn->worker);
+       drbd_thread_stop(&adm_ctx.connection->worker);
 
        /* Now, nothing can fail anymore */
 
        /* delete volumes */
-       idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
-               retcode = adm_delete_minor(mdev);
+       idr_for_each_entry(&adm_ctx.connection->peer_devices, peer_device, i) {
+               retcode = adm_del_minor(peer_device->device);
                if (retcode != NO_ERROR) {
                        /* "can not happen" */
                        drbd_msg_put_info("failed to delete volume");
@@ -3394,10 +3439,12 @@ int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
        }
 
        /* delete connection */
-       if (conn_lowest_minor(adm_ctx.tconn) < 0) {
-               list_del_rcu(&adm_ctx.tconn->all_tconn);
+       if (conn_lowest_minor(adm_ctx.connection) < 0) {
+               struct drbd_resource *resource = adm_ctx.connection->resource;
+
+               list_del_rcu(&resource->resources);
                synchronize_rcu();
-               kref_put(&adm_ctx.tconn->kref, &conn_destroy);
+               drbd_free_resource(resource);
 
                retcode = NO_ERROR;
        } else {
@@ -3413,6 +3460,8 @@ out:
 
 int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info)
 {
+       struct drbd_resource *resource;
+       struct drbd_connection *connection;
        enum drbd_ret_code retcode;
 
        retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
@@ -3421,24 +3470,30 @@ int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info)
        if (retcode != NO_ERROR)
                goto out;
 
-       if (conn_lowest_minor(adm_ctx.tconn) < 0) {
-               list_del_rcu(&adm_ctx.tconn->all_tconn);
-               synchronize_rcu();
-               kref_put(&adm_ctx.tconn->kref, &conn_destroy);
-
-               retcode = NO_ERROR;
-       } else {
+       resource = adm_ctx.resource;
+       for_each_connection(connection, resource) {
+               if (connection->cstate > C_STANDALONE) {
+                       retcode = ERR_NET_CONFIGURED;
+                       goto out;
+               }
+       }
+       if (!idr_is_empty(&resource->devices)) {
                retcode = ERR_RES_IN_USE;
+               goto out;
        }
 
-       if (retcode == NO_ERROR)
-               drbd_thread_stop(&adm_ctx.tconn->worker);
+       list_del_rcu(&resource->resources);
+       for_each_connection(connection, resource)
+               drbd_thread_stop(&connection->worker);
+       synchronize_rcu();
+       drbd_free_resource(resource);
+       retcode = NO_ERROR;
 out:
        drbd_adm_finish(info, retcode);
        return 0;
 }
 
-void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
+void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib)
 {
        static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
        struct sk_buff *msg;
@@ -3447,8 +3502,8 @@ void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
        int err = -ENOMEM;
 
        if (sib->sib_reason == SIB_SYNC_PROGRESS) {
-               if (time_after(jiffies, mdev->rs_last_bcast + HZ))
-                       mdev->rs_last_bcast = jiffies;
+               if (time_after(jiffies, device->rs_last_bcast + HZ))
+                       device->rs_last_bcast = jiffies;
                else
                        return;
        }
@@ -3462,10 +3517,10 @@ void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
        d_out = genlmsg_put(msg, 0, seq, &drbd_genl_family, 0, DRBD_EVENT);
        if (!d_out) /* cannot happen, but anyways. */
                goto nla_put_failure;
-       d_out->minor = mdev_to_minor(mdev);
+       d_out->minor = device_to_minor(device);
        d_out->ret_code = NO_ERROR;
 
-       if (nla_put_status_info(msg, mdev, sib))
+       if (nla_put_status_info(msg, device, sib))
                goto nla_put_failure;
        genlmsg_end(msg, d_out);
        err = drbd_genl_multicast_events(msg, 0);
@@ -3478,7 +3533,7 @@ void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
 nla_put_failure:
        nlmsg_free(msg);
 failed:
-       dev_err(DEV, "Error %d while broadcasting event. "
+       drbd_err(device, "Error %d while broadcasting event. "
                        "Event seq:%u sib_reason:%u\n",
                        err, seq, sib->sib_reason);
 }
This page took 0.088243 seconds and 5 git commands to generate.