drbd: Converted the transfer log from mdev to tconn
authorPhilipp Reisner <philipp.reisner@linbit.com>
Mon, 21 Feb 2011 13:29:27 +0000 (14:29 +0100)
committerPhilipp Reisner <philipp.reisner@linbit.com>
Fri, 14 Oct 2011 14:47:58 +0000 (16:47 +0200)
Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com>
Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
drivers/block/drbd/drbd_int.h
drivers/block/drbd/drbd_main.c
drivers/block/drbd/drbd_nl.c
drivers/block/drbd/drbd_receiver.c
drivers/block/drbd/drbd_req.c
drivers/block/drbd/drbd_req.h
drivers/block/drbd/drbd_state.c

index 103b61748c2df5ef6394e70a085afd3b715a95ed..48367e53a7a5f4cbd43ed685d02144973b33859b 100644 (file)
@@ -1173,10 +1173,10 @@ extern void drbd_calc_cpu_mask(struct drbd_tconn *tconn);
 #define drbd_calc_cpu_mask(A) ({})
 #endif
 extern void drbd_free_resources(struct drbd_conf *mdev);
-extern void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr,
+extern void tl_release(struct drbd_tconn *, unsigned int barrier_nr,
                       unsigned int set_size);
-extern void tl_clear(struct drbd_conf *mdev);
-extern void _tl_add_barrier(struct drbd_conf *, struct drbd_tl_epoch *);
+extern void tl_clear(struct drbd_tconn *);
+extern void _tl_add_barrier(struct drbd_tconn *, struct drbd_tl_epoch *);
 extern void drbd_free_sock(struct drbd_tconn *tconn);
 extern int drbd_send(struct drbd_tconn *tconn, struct socket *sock,
                     void *buf, size_t size, unsigned msg_flags);
index f43752fb5b52cd4fe9e3f59c2a3b33135d4ae27e..cbec5ff2cc745cf73e99d269a5c08efbc8dfd06c 100644 (file)
@@ -180,7 +180,7 @@ int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins)
  * Each &struct drbd_tl_epoch has a circular double linked list of requests
  * attached.
  */
-static int tl_init(struct drbd_conf *mdev)
+static int tl_init(struct drbd_tconn *tconn)
 {
        struct drbd_tl_epoch *b;
 
@@ -195,21 +195,23 @@ static int tl_init(struct drbd_conf *mdev)
        b->n_writes = 0;
        b->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
 
-       mdev->tconn->oldest_tle = b;
-       mdev->tconn->newest_tle = b;
-       INIT_LIST_HEAD(&mdev->tconn->out_of_sequence_requests);
+       tconn->oldest_tle = b;
+       tconn->newest_tle = b;
+       INIT_LIST_HEAD(&tconn->out_of_sequence_requests);
 
        return 1;
 }
 
-static void tl_cleanup(struct drbd_conf *mdev)
+static void tl_cleanup(struct drbd_tconn *tconn)
 {
-       D_ASSERT(mdev->tconn->oldest_tle == mdev->tconn->newest_tle);
-       D_ASSERT(list_empty(&mdev->tconn->out_of_sequence_requests));
-       kfree(mdev->tconn->oldest_tle);
-       mdev->tconn->oldest_tle = NULL;
-       kfree(mdev->tconn->unused_spare_tle);
-       mdev->tconn->unused_spare_tle = NULL;
+       if (tconn->oldest_tle != tconn->newest_tle)
+               conn_err(tconn, "ASSERT FAILED: oldest_tle == newest_tle\n");
+       if (!list_empty(&tconn->out_of_sequence_requests))
+               conn_err(tconn, "ASSERT FAILED: list_empty(out_of_sequence_requests)\n");
+       kfree(tconn->oldest_tle);
+       tconn->oldest_tle = NULL;
+       kfree(tconn->unused_spare_tle);
+       tconn->unused_spare_tle = NULL;
 }
 
 /**
@@ -219,7 +221,7 @@ static void tl_cleanup(struct drbd_conf *mdev)
  *
  * The caller must hold the req_lock.
  */
-void _tl_add_barrier(struct drbd_conf *mdev, struct drbd_tl_epoch *new)
+void _tl_add_barrier(struct drbd_tconn *tconn, struct drbd_tl_epoch *new)
 {
        struct drbd_tl_epoch *newest_before;
 
@@ -229,13 +231,13 @@ void _tl_add_barrier(struct drbd_conf *mdev, struct drbd_tl_epoch *new)
        new->next = NULL;
        new->n_writes = 0;
 
-       newest_before = mdev->tconn->newest_tle;
+       newest_before = tconn->newest_tle;
        /* never send a barrier number == 0, because that is special-cased
         * when using TCQ for our write ordering code */
        new->br_number = (newest_before->br_number+1) ?: 1;
-       if (mdev->tconn->newest_tle != new) {
-               mdev->tconn->newest_tle->next = new;
-               mdev->tconn->newest_tle = new;
+       if (tconn->newest_tle != new) {
+               tconn->newest_tle->next = new;
+               tconn->newest_tle = new;
        }
 }
 
@@ -249,31 +251,32 @@ void _tl_add_barrier(struct drbd_conf *mdev, struct drbd_tl_epoch *new)
  * &struct drbd_tl_epoch objects this function will cause a termination
  * of the connection.
  */
-void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr,
-                      unsigned int set_size)
+void tl_release(struct drbd_tconn *tconn, unsigned int barrier_nr,
+               unsigned int set_size)
 {
+       struct drbd_conf *mdev;
        struct drbd_tl_epoch *b, *nob; /* next old barrier */
        struct list_head *le, *tle;
        struct drbd_request *r;
 
-       spin_lock_irq(&mdev->tconn->req_lock);
+       spin_lock_irq(&tconn->req_lock);
 
-       b = mdev->tconn->oldest_tle;
+       b = tconn->oldest_tle;
 
        /* first some paranoia code */
        if (b == NULL) {
-               dev_err(DEV, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
-                       barrier_nr);
+               conn_err(tconn, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
+                        barrier_nr);
                goto bail;
        }
        if (b->br_number != barrier_nr) {
-               dev_err(DEV, "BAD! BarrierAck #%u received, expected #%u!\n",
-                       barrier_nr, b->br_number);
+               conn_err(tconn, "BAD! BarrierAck #%u received, expected #%u!\n",
+                        barrier_nr, b->br_number);
                goto bail;
        }
        if (b->n_writes != set_size) {
-               dev_err(DEV, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
-                       barrier_nr, set_size, b->n_writes);
+               conn_err(tconn, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
+                        barrier_nr, set_size, b->n_writes);
                goto bail;
        }
 
@@ -296,28 +299,29 @@ void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr,
           _req_mod(, BARRIER_ACKED) above.
           */
        list_del_init(&b->requests);
+       mdev = b->w.mdev;
 
        nob = b->next;
        if (test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
-               _tl_add_barrier(mdev, b);
+               _tl_add_barrier(tconn, b);
                if (nob)
-                       mdev->tconn->oldest_tle = nob;
+                       tconn->oldest_tle = nob;
                /* if nob == NULL b was the only barrier, and becomes the new
-                  barrier. Therefore mdev->tconn->oldest_tle points already to b */
+                  barrier. Therefore tconn->oldest_tle points already to b */
        } else {
                D_ASSERT(nob != NULL);
-               mdev->tconn->oldest_tle = nob;
+               tconn->oldest_tle = nob;
                kfree(b);
        }
 
-       spin_unlock_irq(&mdev->tconn->req_lock);
+       spin_unlock_irq(&tconn->req_lock);
        dec_ap_pending(mdev);
 
        return;
 
 bail:
-       spin_unlock_irq(&mdev->tconn->req_lock);
-       drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
+       spin_unlock_irq(&tconn->req_lock);
+       conn_request_state(tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
 }
 
 
@@ -329,15 +333,15 @@ bail:
  * @what might be one of CONNECTION_LOST_WHILE_PENDING, RESEND, FAIL_FROZEN_DISK_IO,
  * RESTART_FROZEN_DISK_IO.
  */
-void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
+void _tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what)
 {
        struct drbd_tl_epoch *b, *tmp, **pn;
        struct list_head *le, *tle, carry_reads;
        struct drbd_request *req;
        int rv, n_writes, n_reads;
 
-       b = mdev->tconn->oldest_tle;
-       pn = &mdev->tconn->oldest_tle;
+       b = tconn->oldest_tle;
+       pn = &tconn->oldest_tle;
        while (b) {
                n_writes = 0;
                n_reads = 0;
@@ -356,11 +360,11 @@ void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
                                b->n_writes = n_writes;
                                if (b->w.cb == NULL) {
                                        b->w.cb = w_send_barrier;
-                                       inc_ap_pending(mdev);
-                                       set_bit(CREATE_BARRIER, &mdev->flags);
+                                       inc_ap_pending(b->w.mdev);
+                                       set_bit(CREATE_BARRIER, &b->w.mdev->flags);
                                }
 
-                               drbd_queue_work(&mdev->tconn->data.work, &b->w);
+                               drbd_queue_work(&tconn->data.work, &b->w);
                        }
                        pn = &b->next;
                } else {
@@ -374,11 +378,12 @@ void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
                         * the newest barrier may not have been queued yet,
                         * in which case w.cb is still NULL. */
                        if (b->w.cb != NULL)
-                               dec_ap_pending(mdev);
+                               dec_ap_pending(b->w.mdev);
 
-                       if (b == mdev->tconn->newest_tle) {
+                       if (b == tconn->newest_tle) {
                                /* recycle, but reinit! */
-                               D_ASSERT(tmp == NULL);
+                               if (tmp != NULL)
+                                       conn_err(tconn, "ASSERT FAILED tmp == NULL");
                                INIT_LIST_HEAD(&b->requests);
                                list_splice(&carry_reads, &b->requests);
                                INIT_LIST_HEAD(&b->w.list);
@@ -406,20 +411,23 @@ void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
  * by the requests on the transfer gets marked as our of sync. Called from the
  * receiver thread and the worker thread.
  */
-void tl_clear(struct drbd_conf *mdev)
+void tl_clear(struct drbd_tconn *tconn)
 {
+       struct drbd_conf *mdev;
        struct list_head *le, *tle;
        struct drbd_request *r;
+       int minor;
 
-       spin_lock_irq(&mdev->tconn->req_lock);
+       spin_lock_irq(&tconn->req_lock);
 
-       _tl_restart(mdev, CONNECTION_LOST_WHILE_PENDING);
+       _tl_restart(tconn, CONNECTION_LOST_WHILE_PENDING);
 
        /* we expect this list to be empty. */
-       D_ASSERT(list_empty(&mdev->tconn->out_of_sequence_requests));
+       if (!list_empty(&tconn->out_of_sequence_requests))
+               conn_err(tconn, "ASSERT FAILED list_empty(&out_of_sequence_requests)\n");
 
        /* but just in case, clean it up anyways! */
-       list_for_each_safe(le, tle, &mdev->tconn->out_of_sequence_requests) {
+       list_for_each_safe(le, tle, &tconn->out_of_sequence_requests) {
                r = list_entry(le, struct drbd_request, tl_requests);
                /* It would be nice to complete outside of spinlock.
                 * But this is easier for now. */
@@ -427,16 +435,17 @@ void tl_clear(struct drbd_conf *mdev)
        }
 
        /* ensure bit indicating barrier is required is clear */
-       clear_bit(CREATE_BARRIER, &mdev->flags);
+       idr_for_each_entry(&tconn->volumes, mdev, minor)
+               clear_bit(CREATE_BARRIER, &mdev->flags);
 
-       spin_unlock_irq(&mdev->tconn->req_lock);
+       spin_unlock_irq(&tconn->req_lock);
 }
 
-void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what)
+void tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what)
 {
-       spin_lock_irq(&mdev->tconn->req_lock);
-       _tl_restart(mdev, what);
-       spin_unlock_irq(&mdev->tconn->req_lock);
+       spin_lock_irq(&tconn->req_lock);
+       _tl_restart(tconn, what);
+       spin_unlock_irq(&tconn->req_lock);
 }
 
 static int drbd_thread_setup(void *arg)
@@ -2199,6 +2208,9 @@ struct drbd_tconn *drbd_new_tconn(char *name)
        if (!tconn->name)
                goto fail;
 
+       if (!tl_init(tconn))
+               goto fail;
+
        tconn->cstate = C_STANDALONE;
        mutex_init(&tconn->cstate_mutex);
        spin_lock_init(&tconn->req_lock);
@@ -2224,6 +2236,7 @@ struct drbd_tconn *drbd_new_tconn(char *name)
        return tconn;
 
 fail:
+       tl_cleanup(tconn);
        kfree(tconn->name);
        kfree(tconn);
 
@@ -2316,9 +2329,6 @@ struct drbd_conf *drbd_new_device(unsigned int minor)
 
        if (drbd_bm_init(mdev))
                goto out_no_bitmap;
-       /* no need to lock access, we are still initializing this minor device. */
-       if (!tl_init(mdev))
-               goto out_no_tl;
        mdev->read_requests = RB_ROOT;
        mdev->write_requests = RB_ROOT;
 
@@ -2334,8 +2344,6 @@ struct drbd_conf *drbd_new_device(unsigned int minor)
 /* out_whatever_else:
        kfree(mdev->current_epoch); */
 out_no_epoch:
-       tl_cleanup(mdev);
-out_no_tl:
        drbd_bm_cleanup(mdev);
 out_no_bitmap:
        __free_page(mdev->md_io_page);
@@ -2357,7 +2365,6 @@ out_no_tconn:
 void drbd_free_mdev(struct drbd_conf *mdev)
 {
        kfree(mdev->current_epoch);
-       tl_cleanup(mdev);
        if (mdev->bitmap) /* should no longer be there. */
                drbd_bm_cleanup(mdev);
        __free_page(mdev->md_io_page);
index 33159e47e6e06e3b0ae49ed79bcdeebfacb690a8..b141f891f643605d92243772f26d7558e33ad41d 100644 (file)
@@ -1996,9 +1996,9 @@ static int drbd_nl_resume_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
        reply->ret_code = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
        if (reply->ret_code == SS_SUCCESS) {
                if (mdev->state.conn < C_CONNECTED)
-                       tl_clear(mdev);
+                       tl_clear(mdev->tconn);
                if (mdev->state.disk == D_DISKLESS || mdev->state.disk == D_FAILED)
-                       tl_restart(mdev, FAIL_FROZEN_DISK_IO);
+                       tl_restart(mdev->tconn, FAIL_FROZEN_DISK_IO);
        }
        drbd_resume_io(mdev);
 
index 66080e204086b7d020a290e8ed4b8132883d7f5f..fcdc2c1cc503ca79bf095edb8f1cdef8e1a40d15 100644 (file)
@@ -3466,7 +3466,7 @@ static int receive_state(struct drbd_conf *mdev, enum drbd_packet cmd,
                   for temporal network outages! */
                spin_unlock_irq(&mdev->tconn->req_lock);
                dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
-               tl_clear(mdev);
+               tl_clear(mdev->tconn);
                drbd_uuid_new_current(mdev);
                clear_bit(NEW_CUR_UUID, &mdev->flags);
                drbd_force_state(mdev, NS2(conn, C_PROTOCOL_ERROR, susp, 0));
@@ -4025,7 +4025,7 @@ static int drbd_disconnected(int vnr, void *p, void *data)
        mdev->p_uuid = NULL;
 
        if (!is_susp(mdev->state))
-               tl_clear(mdev);
+               tl_clear(mdev->tconn);
 
        drbd_md_sync(mdev);
 
@@ -4585,7 +4585,7 @@ static int got_BarrierAck(struct drbd_conf *mdev, enum drbd_packet cmd)
 {
        struct p_barrier_ack *p = &mdev->tconn->meta.rbuf.barrier_ack;
 
-       tl_release(mdev, p->barrier, be32_to_cpu(p->set_size));
+       tl_release(mdev->tconn, p->barrier, be32_to_cpu(p->set_size));
 
        if (mdev->state.conn == C_AHEAD &&
            atomic_read(&mdev->ap_in_flight) == 0 &&
index cfa5fba5303ce578dd8fc24eff6370837f4fea84..fa799e372babbde6ca949a8563c40a258348a695 100644 (file)
@@ -885,7 +885,7 @@ allocate_barrier:
         * barrier packet, this request is queued within the same spinlock. */
        if ((remote || send_oos) && mdev->tconn->unused_spare_tle &&
            test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
-               _tl_add_barrier(mdev, mdev->tconn->unused_spare_tle);
+               _tl_add_barrier(mdev->tconn, mdev->tconn->unused_spare_tle);
                mdev->tconn->unused_spare_tle = NULL;
        } else {
                D_ASSERT(!(remote && rw == WRITE &&
index 0b3cd412d52d37bf873c98cbb3f581777702b961..8c8c2588c4b907f3d8c35e3c60947b5be9947fa2 100644 (file)
@@ -254,7 +254,8 @@ extern int __req_mod(struct drbd_request *req, enum drbd_req_event what,
 extern void complete_master_bio(struct drbd_conf *mdev,
                struct bio_and_error *m);
 extern void request_timer_fn(unsigned long data);
-extern void tl_restart(struct drbd_conf *mdev, enum drbd_req_event what);
+extern void tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what);
+extern void _tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what);
 
 /* use this if you don't want to deal with calling complete_master_bio()
  * outside the spinlock, e.g. when walking some list on cleanup. */
index 338e1f5c7cd0b597cc9f70e74efdc5079299d01a..ffee90d6d374d8ed6498820143e78e68099c475c 100644 (file)
@@ -37,7 +37,6 @@ struct after_state_chg_work {
        struct completion *done;
 };
 
-extern void _tl_restart(struct drbd_conf *mdev, enum drbd_req_event what);
 static int w_after_state_ch(struct drbd_work *w, int unused);
 static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
                           union drbd_state ns, enum chg_state_flags flags);
@@ -1009,7 +1008,7 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
        if (ns.susp_fen) {
                /* case1: The outdate peer handler is successful: */
                if (os.pdsk > D_OUTDATED  && ns.pdsk <= D_OUTDATED) {
-                       tl_clear(mdev);
+                       tl_clear(mdev->tconn);
                        if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
                                drbd_uuid_new_current(mdev);
                                clear_bit(NEW_CUR_UUID, &mdev->flags);
@@ -1028,7 +1027,7 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
 
        if (what != NOTHING) {
                spin_lock_irq(&mdev->tconn->req_lock);
-               _tl_restart(mdev, what);
+               _tl_restart(mdev->tconn, what);
                nsm.i &= mdev->state.i;
                _drbd_set_state(mdev, nsm, CS_VERBOSE, NULL);
                spin_unlock_irq(&mdev->tconn->req_lock);
This page took 0.037245 seconds and 5 git commands to generate.