drbd: don't count sendpage()d pages only referenced by tcp as in use
[deliverable/linux.git] / drivers / block / drbd / drbd_main.c
index 440b1d5dcfe2be8091312c20cb53739cb12f449c..981cfd178b0923a43c6068ffdb391f8abc92e944 100644 (file)
@@ -963,6 +963,12 @@ static void set_ov_position(struct drbd_conf *mdev, enum drbd_conns cs)
        }
 }
 
+static void drbd_resume_al(struct drbd_conf *mdev)
+{
+       if (test_and_clear_bit(AL_SUSPENDED, &mdev->flags))
+               dev_info(DEV, "Resumed AL updates\n");
+}
+
 /**
  * __drbd_set_state() - Set a new DRBD state
  * @mdev:      DRBD device.
@@ -1046,12 +1052,6 @@ int __drbd_set_state(struct drbd_conf *mdev,
        wake_up(&mdev->misc_wait);
        wake_up(&mdev->state_wait);
 
-       /*   post-state-change actions   */
-       if (os.conn >= C_SYNC_SOURCE   && ns.conn <= C_CONNECTED) {
-               set_bit(STOP_SYNC_TIMER, &mdev->flags);
-               mod_timer(&mdev->resync_timer, jiffies);
-       }
-
        /* aborted verify run. log the last position */
        if ((os.conn == C_VERIFY_S || os.conn == C_VERIFY_T) &&
            ns.conn < C_CONNECTED) {
@@ -1064,41 +1064,42 @@ int __drbd_set_state(struct drbd_conf *mdev,
        if ((os.conn == C_PAUSED_SYNC_T || os.conn == C_PAUSED_SYNC_S) &&
            (ns.conn == C_SYNC_TARGET  || ns.conn == C_SYNC_SOURCE)) {
                dev_info(DEV, "Syncer continues.\n");
-               mdev->rs_paused += (long)jiffies-(long)mdev->rs_mark_time;
-               if (ns.conn == C_SYNC_TARGET) {
-                       if (!test_and_clear_bit(STOP_SYNC_TIMER, &mdev->flags))
-                               mod_timer(&mdev->resync_timer, jiffies);
-                       /* This if (!test_bit) is only needed for the case
-                          that a device that has ceased to used its timer,
-                          i.e. it is already in drbd_resync_finished() gets
-                          paused and resumed. */
-               }
+               mdev->rs_paused += (long)jiffies
+                                 -(long)mdev->rs_mark_time[mdev->rs_last_mark];
+               if (ns.conn == C_SYNC_TARGET)
+                       mod_timer(&mdev->resync_timer, jiffies);
        }
 
        if ((os.conn == C_SYNC_TARGET  || os.conn == C_SYNC_SOURCE) &&
            (ns.conn == C_PAUSED_SYNC_T || ns.conn == C_PAUSED_SYNC_S)) {
                dev_info(DEV, "Resync suspended\n");
-               mdev->rs_mark_time = jiffies;
-               if (ns.conn == C_PAUSED_SYNC_T)
-                       set_bit(STOP_SYNC_TIMER, &mdev->flags);
+               mdev->rs_mark_time[mdev->rs_last_mark] = jiffies;
        }
 
        if (os.conn == C_CONNECTED &&
            (ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T)) {
+               unsigned long now = jiffies;
+               int i;
+
                mdev->ov_position = 0;
-               mdev->rs_total =
-               mdev->rs_mark_left = drbd_bm_bits(mdev);
+               mdev->rs_total = drbd_bm_bits(mdev);
                if (mdev->agreed_pro_version >= 90)
                        set_ov_position(mdev, ns.conn);
                else
                        mdev->ov_start_sector = 0;
                mdev->ov_left = mdev->rs_total
                              - BM_SECT_TO_BIT(mdev->ov_position);
-               mdev->rs_start     =
-               mdev->rs_mark_time = jiffies;
+               mdev->rs_start = now;
+               mdev->rs_last_events = 0;
+               mdev->rs_last_sect_ev = 0;
                mdev->ov_last_oos_size = 0;
                mdev->ov_last_oos_start = 0;
 
+               for (i = 0; i < DRBD_SYNC_MARKS; i++) {
+                       mdev->rs_mark_left[i] = mdev->rs_total;
+                       mdev->rs_mark_time[i] = now;
+               }
+
                if (ns.conn == C_VERIFY_S) {
                        dev_info(DEV, "Starting Online Verify from sector %llu\n",
                                        (unsigned long long)mdev->ov_position);
@@ -1151,6 +1152,10 @@ int __drbd_set_state(struct drbd_conf *mdev,
            ns.conn <= C_TEAR_DOWN && ns.conn >= C_TIMEOUT)
                drbd_thread_restart_nowait(&mdev->receiver);
 
+       /* Resume AL writing if we get a connection */
+       if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED)
+               drbd_resume_al(mdev);
+
        ascw = kmalloc(sizeof(*ascw), GFP_ATOMIC);
        if (ascw) {
                ascw->os = os;
@@ -1209,6 +1214,7 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
                           union drbd_state ns, enum chg_state_flags flags)
 {
        enum drbd_fencing_p fp;
+       enum drbd_req_event what = nothing;
 
        if (os.conn != C_CONNECTED && ns.conn == C_CONNECTED) {
                clear_bit(CRASHED_PRIMARY, &mdev->flags);
@@ -1234,21 +1240,14 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
 
        if (os.susp && ns.susp && mdev->sync_conf.on_no_data == OND_SUSPEND_IO) {
                if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) {
-                       if (ns.conn == C_CONNECTED) {
-                               spin_lock_irq(&mdev->req_lock);
-                               _tl_restart(mdev, resend);
-                               _drbd_set_state(_NS(mdev, susp, 0), CS_VERBOSE, NULL);
-                               spin_unlock_irq(&mdev->req_lock);
-                       } else /* ns.conn > C_CONNECTED */
+                       if (ns.conn == C_CONNECTED)
+                               what = resend;
+                       else /* ns.conn > C_CONNECTED */
                                dev_err(DEV, "Unexpected Resynd going on!\n");
                }
 
-               if (os.disk == D_ATTACHING && ns.disk > D_ATTACHING) {
-                       spin_lock_irq(&mdev->req_lock);
-                       _tl_restart(mdev, restart_frozen_disk_io);
-                       _drbd_set_state(_NS(mdev, susp, 0), CS_VERBOSE, NULL);
-                       spin_unlock_irq(&mdev->req_lock);
-               }
+               if (os.disk == D_ATTACHING && ns.disk > D_ATTACHING)
+                       what = restart_frozen_disk_io;
        }
 
        if (fp == FP_STONITH && ns.susp) {
@@ -1267,12 +1266,17 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
                /* case2: The connection was established again: */
                if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) {
                        clear_bit(NEW_CUR_UUID, &mdev->flags);
-                       spin_lock_irq(&mdev->req_lock);
-                       _tl_restart(mdev, resend);
-                       _drbd_set_state(_NS(mdev, susp, 0), CS_VERBOSE, NULL);
-                       spin_unlock_irq(&mdev->req_lock);
+                       what = resend;
                }
        }
+
+       if (what != nothing) {
+               spin_lock_irq(&mdev->req_lock);
+               _tl_restart(mdev, what);
+               _drbd_set_state(_NS(mdev, susp, 0), CS_VERBOSE, NULL);
+               spin_unlock_irq(&mdev->req_lock);
+       }
+
        /* Do not change the order of the if above and the two below... */
        if (os.pdsk == D_DISKLESS && ns.pdsk > D_DISKLESS) {      /* attach on the peer */
                drbd_send_uuids(mdev);
@@ -1409,6 +1413,10 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
            (os.user_isp && !ns.user_isp))
                resume_next_sg(mdev);
 
+       /* free tl_hash if we Got thawed and are C_STANDALONE */
+       if (ns.conn == C_STANDALONE && ns.susp == 0 && mdev->tl_hash)
+               drbd_free_tl_hash(mdev);
+
        /* Upon network connection, we need to start the receiver */
        if (os.conn == C_STANDALONE && ns.conn == C_UNCONNECTED)
                drbd_thread_start(&mdev->receiver);
@@ -1635,7 +1643,7 @@ void drbd_thread_current_set_cpu(struct drbd_conf *mdev)
 
 /* the appropriate socket mutex must be held already */
 int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock,
-                         enum drbd_packets cmd, struct p_header *h,
+                         enum drbd_packets cmd, struct p_header80 *h,
                          size_t size, unsigned msg_flags)
 {
        int sent, ok;
@@ -1645,7 +1653,7 @@ int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock,
 
        h->magic   = BE_DRBD_MAGIC;
        h->command = cpu_to_be16(cmd);
-       h->length  = cpu_to_be16(size-sizeof(struct p_header));
+       h->length  = cpu_to_be16(size-sizeof(struct p_header80));
 
        sent = drbd_send(mdev, sock, h, size, msg_flags);
 
@@ -1660,7 +1668,7 @@ int _drbd_send_cmd(struct drbd_conf *mdev, struct socket *sock,
  * when we hold the appropriate socket mutex.
  */
 int drbd_send_cmd(struct drbd_conf *mdev, int use_data_socket,
-                 enum drbd_packets cmd, struct p_header *h, size_t size)
+                 enum drbd_packets cmd, struct p_header80 *h, size_t size)
 {
        int ok = 0;
        struct socket *sock;
@@ -1688,7 +1696,7 @@ int drbd_send_cmd(struct drbd_conf *mdev, int use_data_socket,
 int drbd_send_cmd2(struct drbd_conf *mdev, enum drbd_packets cmd, char *data,
                   size_t size)
 {
-       struct p_header h;
+       struct p_header80 h;
        int ok;
 
        h.magic   = BE_DRBD_MAGIC;
@@ -1710,7 +1718,7 @@ int drbd_send_cmd2(struct drbd_conf *mdev, enum drbd_packets cmd, char *data,
 
 int drbd_send_sync_param(struct drbd_conf *mdev, struct syncer_conf *sc)
 {
-       struct p_rs_param_89 *p;
+       struct p_rs_param_95 *p;
        struct socket *sock;
        int size, rv;
        const int apv = mdev->agreed_pro_version;
@@ -1718,7 +1726,8 @@ int drbd_send_sync_param(struct drbd_conf *mdev, struct syncer_conf *sc)
        size = apv <= 87 ? sizeof(struct p_rs_param)
                : apv == 88 ? sizeof(struct p_rs_param)
                        + strlen(mdev->sync_conf.verify_alg) + 1
-               : /* 89 */    sizeof(struct p_rs_param_89);
+               : apv <= 94 ? sizeof(struct p_rs_param_89)
+               : /* apv >= 95 */ sizeof(struct p_rs_param_95);
 
        /* used from admin command context and receiver/worker context.
         * to avoid kmalloc, grab the socket right here,
@@ -1729,12 +1738,16 @@ int drbd_send_sync_param(struct drbd_conf *mdev, struct syncer_conf *sc)
        if (likely(sock != NULL)) {
                enum drbd_packets cmd = apv >= 89 ? P_SYNC_PARAM89 : P_SYNC_PARAM;
 
-               p = &mdev->data.sbuf.rs_param_89;
+               p = &mdev->data.sbuf.rs_param_95;
 
                /* initialize verify_alg and csums_alg */
                memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
 
                p->rate = cpu_to_be32(sc->rate);
+               p->c_plan_ahead = cpu_to_be32(sc->c_plan_ahead);
+               p->c_delay_target = cpu_to_be32(sc->c_delay_target);
+               p->c_fill_target = cpu_to_be32(sc->c_fill_target);
+               p->c_max_rate = cpu_to_be32(sc->c_max_rate);
 
                if (apv >= 88)
                        strcpy(p->verify_alg, mdev->sync_conf.verify_alg);
@@ -1790,7 +1803,7 @@ int drbd_send_protocol(struct drbd_conf *mdev)
                strcpy(p->integrity_alg, mdev->net_conf->integrity_alg);
 
        rv = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_PROTOCOL,
-                          (struct p_header *)p, size);
+                          (struct p_header80 *)p, size);
        kfree(p);
        return rv;
 }
@@ -1816,7 +1829,7 @@ int _drbd_send_uuids(struct drbd_conf *mdev, u64 uuid_flags)
        put_ldev(mdev);
 
        return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_UUIDS,
-                            (struct p_header *)&p, sizeof(p));
+                            (struct p_header80 *)&p, sizeof(p));
 }
 
 int drbd_send_uuids(struct drbd_conf *mdev)
@@ -1837,7 +1850,7 @@ int drbd_send_sync_uuid(struct drbd_conf *mdev, u64 val)
        p.uuid = cpu_to_be64(val);
 
        return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SYNC_UUID,
-                            (struct p_header *)&p, sizeof(p));
+                            (struct p_header80 *)&p, sizeof(p));
 }
 
 int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags)
@@ -1867,7 +1880,7 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl
        p.dds_flags = cpu_to_be16(flags);
 
        ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_SIZES,
-                          (struct p_header *)&p, sizeof(p));
+                          (struct p_header80 *)&p, sizeof(p));
        return ok;
 }
 
@@ -1892,7 +1905,7 @@ int drbd_send_state(struct drbd_conf *mdev)
 
        if (likely(sock != NULL)) {
                ok = _drbd_send_cmd(mdev, sock, P_STATE,
-                                   (struct p_header *)&p, sizeof(p), 0);
+                                   (struct p_header80 *)&p, sizeof(p), 0);
        }
 
        mutex_unlock(&mdev->data.mutex);
@@ -1910,7 +1923,7 @@ int drbd_send_state_req(struct drbd_conf *mdev,
        p.val     = cpu_to_be32(val.i);
 
        return drbd_send_cmd(mdev, USE_DATA_SOCKET, P_STATE_CHG_REQ,
-                            (struct p_header *)&p, sizeof(p));
+                            (struct p_header80 *)&p, sizeof(p));
 }
 
 int drbd_send_sr_reply(struct drbd_conf *mdev, int retcode)
@@ -1920,7 +1933,7 @@ int drbd_send_sr_reply(struct drbd_conf *mdev, int retcode)
        p.retcode    = cpu_to_be32(retcode);
 
        return drbd_send_cmd(mdev, USE_META_SOCKET, P_STATE_CHG_REPLY,
-                            (struct p_header *)&p, sizeof(p));
+                            (struct p_header80 *)&p, sizeof(p));
 }
 
 int fill_bitmap_rle_bits(struct drbd_conf *mdev,
@@ -2019,7 +2032,7 @@ int fill_bitmap_rle_bits(struct drbd_conf *mdev,
 
 enum { OK, FAILED, DONE }
 send_bitmap_rle_or_plain(struct drbd_conf *mdev,
-       struct p_header *h, struct bm_xfer_ctx *c)
+       struct p_header80 *h, struct bm_xfer_ctx *c)
 {
        struct p_compressed_bm *p = (void*)h;
        unsigned long num_words;
@@ -2049,12 +2062,12 @@ send_bitmap_rle_or_plain(struct drbd_conf *mdev,
                if (len)
                        drbd_bm_get_lel(mdev, c->word_offset, num_words, (unsigned long*)h->payload);
                ok = _drbd_send_cmd(mdev, mdev->data.socket, P_BITMAP,
-                                  h, sizeof(struct p_header) + len, 0);
+                                  h, sizeof(struct p_header80) + len, 0);
                c->word_offset += num_words;
                c->bit_offset = c->word_offset * BITS_PER_LONG;
 
                c->packets[1]++;
-               c->bytes[1] += sizeof(struct p_header) + len;
+               c->bytes[1] += sizeof(struct p_header80) + len;
 
                if (c->bit_offset > c->bm_bits)
                        c->bit_offset = c->bm_bits;
@@ -2070,14 +2083,14 @@ send_bitmap_rle_or_plain(struct drbd_conf *mdev,
 int _drbd_send_bitmap(struct drbd_conf *mdev)
 {
        struct bm_xfer_ctx c;
-       struct p_header *p;
+       struct p_header80 *p;
        int ret;
 
        ERR_IF(!mdev->bitmap) return FALSE;
 
        /* maybe we should use some per thread scratch page,
         * and allocate that during initial device creation? */
-       p = (struct p_header *) __get_free_page(GFP_NOIO);
+       p = (struct p_header80 *) __get_free_page(GFP_NOIO);
        if (!p) {
                dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__);
                return FALSE;
@@ -2135,7 +2148,7 @@ int drbd_send_b_ack(struct drbd_conf *mdev, u32 barrier_nr, u32 set_size)
        if (mdev->state.conn < C_CONNECTED)
                return FALSE;
        ok = drbd_send_cmd(mdev, USE_META_SOCKET, P_BARRIER_ACK,
-                       (struct p_header *)&p, sizeof(p));
+                       (struct p_header80 *)&p, sizeof(p));
        return ok;
 }
 
@@ -2163,7 +2176,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packets cmd,
        if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
                return FALSE;
        ok = drbd_send_cmd(mdev, USE_META_SOCKET, cmd,
-                               (struct p_header *)&p, sizeof(p));
+                               (struct p_header80 *)&p, sizeof(p));
        return ok;
 }
 
@@ -2171,8 +2184,8 @@ int drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packets cmd,
                     struct p_data *dp)
 {
        const int header_size = sizeof(struct p_data)
-                             - sizeof(struct p_header);
-       int data_size  = ((struct p_header *)dp)->length - header_size;
+                             - sizeof(struct p_header80);
+       int data_size  = ((struct p_header80 *)dp)->length - header_size;
 
        return _drbd_send_ack(mdev, cmd, dp->sector, cpu_to_be32(data_size),
                              dp->block_id);
@@ -2221,7 +2234,7 @@ int drbd_send_drequest(struct drbd_conf *mdev, int cmd,
        p.blksize  = cpu_to_be32(size);
 
        ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, cmd,
-                               (struct p_header *)&p, sizeof(p));
+                               (struct p_header80 *)&p, sizeof(p));
        return ok;
 }
 
@@ -2239,7 +2252,7 @@ int drbd_send_drequest_csum(struct drbd_conf *mdev,
 
        p.head.magic   = BE_DRBD_MAGIC;
        p.head.command = cpu_to_be16(cmd);
-       p.head.length  = cpu_to_be16(sizeof(p) - sizeof(struct p_header) + digest_size);
+       p.head.length  = cpu_to_be16(sizeof(p) - sizeof(struct p_header80) + digest_size);
 
        mutex_lock(&mdev->data.mutex);
 
@@ -2261,7 +2274,7 @@ int drbd_send_ov_request(struct drbd_conf *mdev, sector_t sector, int size)
        p.blksize  = cpu_to_be32(size);
 
        ok = drbd_send_cmd(mdev, USE_DATA_SOCKET, P_OV_REQUEST,
-                          (struct p_header *)&p, sizeof(p));
+                          (struct p_header80 *)&p, sizeof(p));
        return ok;
 }
 
@@ -2413,6 +2426,18 @@ static int _drbd_send_zc_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e)
        return 1;
 }
 
+static u32 bio_flags_to_wire(struct drbd_conf *mdev, unsigned long bi_rw)
+{
+       if (mdev->agreed_pro_version >= 95)
+               return  (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) |
+                       (bi_rw & REQ_UNPLUG ? DP_UNPLUG : 0) |
+                       (bi_rw & REQ_FUA ? DP_FUA : 0) |
+                       (bi_rw & REQ_FLUSH ? DP_FLUSH : 0) |
+                       (bi_rw & REQ_DISCARD ? DP_DISCARD : 0);
+       else
+               return bi_rw & (REQ_SYNC | REQ_UNPLUG) ? DP_RW_SYNC : 0;
+}
+
 /* Used to send write requests
  * R_PRIMARY -> Peer   (P_DATA)
  */
@@ -2430,30 +2455,25 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
        dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_w_tfm) ?
                crypto_hash_digestsize(mdev->integrity_w_tfm) : 0;
 
-       p.head.magic   = BE_DRBD_MAGIC;
-       p.head.command = cpu_to_be16(P_DATA);
-       p.head.length  =
-               cpu_to_be16(sizeof(p) - sizeof(struct p_header) + dgs + req->size);
+       if (req->size <= DRBD_MAX_SIZE_H80_PACKET) {
+               p.head.h80.magic   = BE_DRBD_MAGIC;
+               p.head.h80.command = cpu_to_be16(P_DATA);
+               p.head.h80.length  =
+                       cpu_to_be16(sizeof(p) - sizeof(union p_header) + dgs + req->size);
+       } else {
+               p.head.h95.magic   = BE_DRBD_MAGIC_BIG;
+               p.head.h95.command = cpu_to_be16(P_DATA);
+               p.head.h95.length  =
+                       cpu_to_be32(sizeof(p) - sizeof(union p_header) + dgs + req->size);
+       }
 
        p.sector   = cpu_to_be64(req->sector);
        p.block_id = (unsigned long)req;
        p.seq_num  = cpu_to_be32(req->seq_num =
                                 atomic_add_return(1, &mdev->packet_seq));
-       dp_flags = 0;
 
-       /* NOTE: no need to check if barriers supported here as we would
-        *       not pass the test in make_request_common in that case
-        */
-       if (req->master_bio->bi_rw & REQ_HARDBARRIER) {
-               dev_err(DEV, "ASSERT FAILED would have set DP_HARDBARRIER\n");
-               /* dp_flags |= DP_HARDBARRIER; */
-       }
-       if (req->master_bio->bi_rw & REQ_SYNC)
-               dp_flags |= DP_RW_SYNC;
-       /* for now handle SYNCIO and UNPLUG
-        * as if they still were one and the same flag */
-       if (req->master_bio->bi_rw & REQ_UNPLUG)
-               dp_flags |= DP_RW_SYNC;
+       dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
+
        if (mdev->state.conn >= C_SYNC_SOURCE &&
            mdev->state.conn <= C_PAUSED_SYNC_T)
                dp_flags |= DP_MAY_SET_IN_SYNC;
@@ -2494,10 +2514,17 @@ int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd,
        dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_w_tfm) ?
                crypto_hash_digestsize(mdev->integrity_w_tfm) : 0;
 
-       p.head.magic   = BE_DRBD_MAGIC;
-       p.head.command = cpu_to_be16(cmd);
-       p.head.length  =
-               cpu_to_be16(sizeof(p) - sizeof(struct p_header) + dgs + e->size);
+       if (e->size <= DRBD_MAX_SIZE_H80_PACKET) {
+               p.head.h80.magic   = BE_DRBD_MAGIC;
+               p.head.h80.command = cpu_to_be16(cmd);
+               p.head.h80.length  =
+                       cpu_to_be16(sizeof(p) - sizeof(struct p_header80) + dgs + e->size);
+       } else {
+               p.head.h95.magic   = BE_DRBD_MAGIC_BIG;
+               p.head.h95.command = cpu_to_be16(cmd);
+               p.head.h95.length  =
+                       cpu_to_be32(sizeof(p) - sizeof(struct p_header80) + dgs + e->size);
+       }
 
        p.sector   = cpu_to_be64(e->sector);
        p.block_id = e->block_id;
@@ -2510,8 +2537,7 @@ int drbd_send_block(struct drbd_conf *mdev, enum drbd_packets cmd,
        if (!drbd_get_data_sock(mdev))
                return 0;
 
-       ok = sizeof(p) == drbd_send(mdev, mdev->data.socket, &p,
-                                       sizeof(p), dgs ? MSG_MORE : 0);
+       ok = sizeof(p) == drbd_send(mdev, mdev->data.socket, &p, sizeof(p), dgs ? MSG_MORE : 0);
        if (ok && dgs) {
                dgb = mdev->int_dig_out;
                drbd_csum_ee(mdev, mdev->integrity_w_tfm, e, dgb);
@@ -2686,7 +2712,13 @@ static void drbd_set_defaults(struct drbd_conf *mdev)
                /* .verify_alg = */     {}, 0,
                /* .cpu_mask = */       {}, 0,
                /* .csums_alg = */      {}, 0,
-               /* .use_rle = */        0
+               /* .use_rle = */        0,
+               /* .on_no_data = */     DRBD_ON_NO_DATA_DEF,
+               /* .c_plan_ahead = */   DRBD_C_PLAN_AHEAD_DEF,
+               /* .c_delay_target = */ DRBD_C_DELAY_TARGET_DEF,
+               /* .c_fill_target = */  DRBD_C_FILL_TARGET_DEF,
+               /* .c_max_rate = */     DRBD_C_MAX_RATE_DEF,
+               /* .c_min_rate = */     DRBD_C_MIN_RATE_DEF
        };
 
        /* Have to use that way, because the layout differs between
@@ -2721,6 +2753,9 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
        atomic_set(&mdev->net_cnt, 0);
        atomic_set(&mdev->packet_seq, 0);
        atomic_set(&mdev->pp_in_use, 0);
+       atomic_set(&mdev->pp_in_use_by_net, 0);
+       atomic_set(&mdev->rs_sect_in, 0);
+       atomic_set(&mdev->rs_sect_ev, 0);
 
        mutex_init(&mdev->md_io_mutex);
        mutex_init(&mdev->data.mutex);
@@ -2763,6 +2798,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
 
        init_waitqueue_head(&mdev->misc_wait);
        init_waitqueue_head(&mdev->state_wait);
+       init_waitqueue_head(&mdev->net_cnt_wait);
        init_waitqueue_head(&mdev->ee_wait);
        init_waitqueue_head(&mdev->al_wait);
        init_waitqueue_head(&mdev->seq_wait);
@@ -2778,6 +2814,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
 
 void drbd_mdev_cleanup(struct drbd_conf *mdev)
 {
+       int i;
        if (mdev->receiver.t_state != None)
                dev_err(DEV, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
                                mdev->receiver.t_state);
@@ -2794,9 +2831,13 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
        mdev->p_size       =
        mdev->rs_start     =
        mdev->rs_total     =
-       mdev->rs_failed    =
-       mdev->rs_mark_left =
-       mdev->rs_mark_time = 0;
+       mdev->rs_failed    = 0;
+       mdev->rs_last_events = 0;
+       mdev->rs_last_sect_ev = 0;
+       for (i = 0; i < DRBD_SYNC_MARKS; i++) {
+               mdev->rs_mark_left[i] = 0;
+               mdev->rs_mark_time[i] = 0;
+       }
        D_ASSERT(mdev->net_conf == NULL);
 
        drbd_set_my_capacity(mdev, 0);
@@ -2807,6 +2848,7 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
        }
 
        drbd_free_resources(mdev);
+       clear_bit(AL_SUSPENDED, &mdev->flags);
 
        /*
         * currently we drbd_init_ee only on module load, so
@@ -3361,9 +3403,10 @@ void drbd_md_sync(struct drbd_conf *mdev)
        sector_t sector;
        int i;
 
+       del_timer(&mdev->md_sync_timer);
+       /* timer may be rearmed by drbd_md_mark_dirty() now. */
        if (!test_and_clear_bit(MD_DIRTY, &mdev->flags))
                return;
-       del_timer(&mdev->md_sync_timer);
 
        /* We use here D_FAILED and not D_ATTACHING because we try to write
         * metadata even if we detach due to a disk failure! */
@@ -3391,12 +3434,9 @@ void drbd_md_sync(struct drbd_conf *mdev)
        D_ASSERT(drbd_md_ss__(mdev, mdev->ldev) == mdev->ldev->md.md_offset);
        sector = mdev->ldev->md.md_offset;
 
-       if (drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
-               clear_bit(MD_DIRTY, &mdev->flags);
-       } else {
+       if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
                /* this was a try anyways ... */
                dev_err(DEV, "meta data update failed!\n");
-
                drbd_chk_io_error(mdev, 1, TRUE);
        }
 
@@ -3491,12 +3531,22 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
  * the meta-data super block. This function sets MD_DIRTY, and starts a
  * timer that ensures that within five seconds you have to call drbd_md_sync().
  */
+#ifdef DRBD_DEBUG_MD_SYNC
+void drbd_md_mark_dirty_(struct drbd_conf *mdev, unsigned int line, const char *func)
+{
+       if (!test_and_set_bit(MD_DIRTY, &mdev->flags)) {
+               mod_timer(&mdev->md_sync_timer, jiffies + HZ);
+               mdev->last_md_mark_dirty.line = line;
+               mdev->last_md_mark_dirty.func = func;
+       }
+}
+#else
 void drbd_md_mark_dirty(struct drbd_conf *mdev)
 {
-       set_bit(MD_DIRTY, &mdev->flags);
-       mod_timer(&mdev->md_sync_timer, jiffies + 5*HZ);
+       if (!test_and_set_bit(MD_DIRTY, &mdev->flags))
+               mod_timer(&mdev->md_sync_timer, jiffies + HZ);
 }
-
+#endif
 
 static void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local)
 {
@@ -3608,6 +3658,7 @@ int drbd_bmio_clear_n_write(struct drbd_conf *mdev)
 {
        int rv = -EIO;
 
+       drbd_resume_al(mdev);
        if (get_ldev_if_state(mdev, D_ATTACHING)) {
                drbd_bm_clear_all(mdev);
                rv = drbd_bm_write(mdev);
@@ -3736,8 +3787,11 @@ static void md_sync_timer_fn(unsigned long data)
 static int w_md_sync(struct drbd_conf *mdev, struct drbd_work *w, int unused)
 {
        dev_warn(DEV, "md_sync_timer expired! Worker calls drbd_md_sync().\n");
+#ifdef DEBUG
+       dev_warn(DEV, "last md_mark_dirty: %s:%u\n",
+               mdev->last_md_mark_dirty.func, mdev->last_md_mark_dirty.line);
+#endif
        drbd_md_sync(mdev);
-
        return 1;
 }
 
This page took 0.049005 seconds and 5 git commands to generate.