block: Add submit_bio_wait(), remove from md
[deliverable/linux.git] / drivers / md / raid1.c
index d5bddfc4010e4f77ea6d28a3f8fde5ff304bcc6e..800748d585ca5bfa21d962d5f367fe2b4d3cc445 100644 (file)
@@ -267,7 +267,7 @@ static void raid_end_bio_io(struct r1bio *r1_bio)
                         (bio_data_dir(bio) == WRITE) ? "write" : "read",
                         (unsigned long long) bio->bi_sector,
                         (unsigned long long) bio->bi_sector +
-                        (bio->bi_size >> 9) - 1);
+                        bio_sectors(bio) - 1);
 
                call_bio_endio(r1_bio);
        }
@@ -458,7 +458,7 @@ static void raid1_end_write_request(struct bio *bio, int error)
                                         " %llu-%llu\n",
                                         (unsigned long long) mbio->bi_sector,
                                         (unsigned long long) mbio->bi_sector +
-                                        (mbio->bi_size >> 9) - 1);
+                                        bio_sectors(mbio) - 1);
                                call_bio_endio(r1_bio);
                        }
                }
@@ -967,6 +967,7 @@ static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
                bio_list_merge(&conf->pending_bio_list, &plug->pending);
                conf->pending_count += plug->pending_cnt;
                spin_unlock_irq(&conf->device_lock);
+               wake_up(&conf->wait_barrier);
                md_wakeup_thread(mddev->thread);
                kfree(plug);
                return;
@@ -1000,6 +1001,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
        const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA));
        const unsigned long do_discard = (bio->bi_rw
                                          & (REQ_DISCARD | REQ_SECURE));
+       const unsigned long do_same = (bio->bi_rw & REQ_WRITE_SAME);
        struct md_rdev *blocked_rdev;
        struct blk_plug_cb *cb;
        struct raid1_plug_cb *plug = NULL;
@@ -1016,7 +1018,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
        md_write_start(mddev, bio); /* wait on superblock update early */
 
        if (bio_data_dir(bio) == WRITE &&
-           bio->bi_sector + bio->bi_size/512 > mddev->suspend_lo &&
+           bio_end_sector(bio) > mddev->suspend_lo &&
            bio->bi_sector < mddev->suspend_hi) {
                /* As the suspend_* range is controlled by
                 * userspace, we want an interruptible
@@ -1027,7 +1029,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
                        flush_signals(current);
                        prepare_to_wait(&conf->wait_barrier,
                                        &w, TASK_INTERRUPTIBLE);
-                       if (bio->bi_sector + bio->bi_size/512 <= mddev->suspend_lo ||
+                       if (bio_end_sector(bio) <= mddev->suspend_lo ||
                            bio->bi_sector >= mddev->suspend_hi)
                                break;
                        schedule();
@@ -1047,7 +1049,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
        r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
 
        r1_bio->master_bio = bio;
-       r1_bio->sectors = bio->bi_size >> 9;
+       r1_bio->sectors = bio_sectors(bio);
        r1_bio->state = 0;
        r1_bio->mddev = mddev;
        r1_bio->sector = bio->bi_sector;
@@ -1125,7 +1127,7 @@ read_again:
                        r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
 
                        r1_bio->master_bio = bio;
-                       r1_bio->sectors = (bio->bi_size >> 9) - sectors_handled;
+                       r1_bio->sectors = bio_sectors(bio) - sectors_handled;
                        r1_bio->state = 0;
                        r1_bio->mddev = mddev;
                        r1_bio->sector = bio->bi_sector + sectors_handled;
@@ -1301,7 +1303,8 @@ read_again:
                                   conf->mirrors[i].rdev->data_offset);
                mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
                mbio->bi_end_io = raid1_end_write_request;
-               mbio->bi_rw = WRITE | do_flush_fua | do_sync | do_discard;
+               mbio->bi_rw =
+                       WRITE | do_flush_fua | do_sync | do_discard | do_same;
                mbio->bi_private = r1_bio;
 
                atomic_inc(&r1_bio->remaining);
@@ -1326,14 +1329,14 @@ read_again:
        /* Mustn't call r1_bio_write_done before this next test,
         * as it could result in the bio being freed.
         */
-       if (sectors_handled < (bio->bi_size >> 9)) {
+       if (sectors_handled < bio_sectors(bio)) {
                r1_bio_write_done(r1_bio);
                /* We need another r1_bio.  It has already been counted
                 * in bio->bi_phys_segments
                 */
                r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
                r1_bio->master_bio = bio;
-               r1_bio->sectors = (bio->bi_size >> 9) - sectors_handled;
+               r1_bio->sectors = bio_sectors(bio) - sectors_handled;
                r1_bio->state = 0;
                r1_bio->mddev = mddev;
                r1_bio->sector = bio->bi_sector + sectors_handled;
@@ -1944,7 +1947,7 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
                wbio->bi_rw = WRITE;
                wbio->bi_end_io = end_sync_write;
                atomic_inc(&r1_bio->remaining);
-               md_sync_acct(conf->mirrors[i].rdev->bdev, wbio->bi_size >> 9);
+               md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio));
 
                generic_make_request(wbio);
        }
@@ -2056,25 +2059,6 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
        }
 }
 
-static void bi_complete(struct bio *bio, int error)
-{
-       complete((struct completion *)bio->bi_private);
-}
-
-static int submit_bio_wait(int rw, struct bio *bio)
-{
-       struct completion event;
-       rw |= REQ_SYNC;
-
-       init_completion(&event);
-       bio->bi_private = &event;
-       bio->bi_end_io = bi_complete;
-       submit_bio(rw, bio);
-       wait_for_completion(&event);
-
-       return test_bit(BIO_UPTODATE, &bio->bi_flags);
-}
-
 static int narrow_write_error(struct r1bio *r1_bio, int i)
 {
        struct mddev *mddev = r1_bio->mddev;
@@ -2281,8 +2265,7 @@ read_more:
                        r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
 
                        r1_bio->master_bio = mbio;
-                       r1_bio->sectors = (mbio->bi_size >> 9)
-                                         - sectors_handled;
+                       r1_bio->sectors = bio_sectors(mbio) - sectors_handled;
                        r1_bio->state = 0;
                        set_bit(R1BIO_ReadError, &r1_bio->state);
                        r1_bio->mddev = mddev;
@@ -2818,6 +2801,9 @@ static int run(struct mddev *mddev)
        if (IS_ERR(conf))
                return PTR_ERR(conf);
 
+       if (mddev->queue)
+               blk_queue_max_write_same_sectors(mddev->queue,
+                                                mddev->chunk_sectors);
        rdev_for_each(rdev, mddev) {
                if (!mddev->gendisk)
                        continue;
This page took 0.026782 seconds and 5 git commands to generate.