[BLOCK] Get rid of request_queue_t typedef
[deliverable/linux.git] / drivers / md / raid10.c
index a9401c017e3588f076411d83046c84b27495eab8..f730a144baf12339fceba31c04e0f54819a7000d 100644 (file)
@@ -429,7 +429,7 @@ static sector_t raid10_find_virt(conf_t *conf, sector_t sector, int dev)
                if (dev < 0)
                        dev += conf->raid_disks;
        } else {
-               while (sector > conf->stride) {
+               while (sector >= conf->stride) {
                        sector -= conf->stride;
                        if (dev < conf->near_copies)
                                dev += conf->raid_disks - conf->near_copies;
@@ -453,7 +453,7 @@ static sector_t raid10_find_virt(conf_t *conf, sector_t sector, int dev)
  *      If near_copies == raid_disk, there are no striping issues,
  *      but in that case, the function isn't called at all.
  */
-static int raid10_mergeable_bvec(request_queue_t *q, struct bio *bio,
+static int raid10_mergeable_bvec(struct request_queue *q, struct bio *bio,
                                struct bio_vec *bio_vec)
 {
        mddev_t *mddev = q->queuedata;
@@ -595,7 +595,7 @@ static void unplug_slaves(mddev_t *mddev)
        for (i=0; i<mddev->raid_disks; i++) {
                mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
                if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
-                       request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
+                       struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
 
                        atomic_inc(&rdev->nr_pending);
                        rcu_read_unlock();
@@ -610,7 +610,7 @@ static void unplug_slaves(mddev_t *mddev)
        rcu_read_unlock();
 }
 
-static void raid10_unplug(request_queue_t *q)
+static void raid10_unplug(struct request_queue *q)
 {
        mddev_t *mddev = q->queuedata;
 
@@ -618,7 +618,7 @@ static void raid10_unplug(request_queue_t *q)
        md_wakeup_thread(mddev->thread);
 }
 
-static int raid10_issue_flush(request_queue_t *q, struct gendisk *disk,
+static int raid10_issue_flush(struct request_queue *q, struct gendisk *disk,
                             sector_t *error_sector)
 {
        mddev_t *mddev = q->queuedata;
@@ -630,7 +630,7 @@ static int raid10_issue_flush(request_queue_t *q, struct gendisk *disk,
                mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
                if (rdev && !test_bit(Faulty, &rdev->flags)) {
                        struct block_device *bdev = rdev->bdev;
-                       request_queue_t *r_queue = bdev_get_queue(bdev);
+                       struct request_queue *r_queue = bdev_get_queue(bdev);
 
                        if (!r_queue->issue_flush_fn)
                                ret = -EOPNOTSUPP;
@@ -658,7 +658,7 @@ static int raid10_congested(void *data, int bits)
        for (i = 0; i < mddev->raid_disks && ret == 0; i++) {
                mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
                if (rdev && !test_bit(Faulty, &rdev->flags)) {
-                       request_queue_t *q = bdev_get_queue(rdev->bdev);
+                       struct request_queue *q = bdev_get_queue(rdev->bdev);
 
                        ret |= bdi_congested(&q->backing_dev_info, bits);
                }
@@ -772,7 +772,7 @@ static void unfreeze_array(conf_t *conf)
        spin_unlock_irq(&conf->resync_lock);
 }
 
-static int make_request(request_queue_t *q, struct bio * bio)
+static int make_request(struct request_queue *q, struct bio * bio)
 {
        mddev_t *mddev = q->queuedata;
        conf_t *conf = mddev_to_conf(mddev);
@@ -1510,8 +1510,7 @@ static void raid10d(mddev_t *mddev)
                        blk_remove_plug(mddev->queue);
                        spin_unlock_irqrestore(&conf->device_lock, flags);
                        /* flush any pending bitmap writes to disk before proceeding w/ I/O */
-                       if (bitmap_unplug(mddev->bitmap) != 0)
-                               printk("%s: bitmap file write failed!\n", mdname(mddev));
+                       bitmap_unplug(mddev->bitmap);
 
                        while (bio) { /* submit pending writes */
                                struct bio *next = bio->bi_next;
@@ -1801,6 +1800,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
                                                for (k=0; k<conf->copies; k++)
                                                        if (r10_bio->devs[k].devnum == i)
                                                                break;
+                                               BUG_ON(k == conf->copies);
                                                bio = r10_bio->devs[1].bio;
                                                bio->bi_next = biolist;
                                                biolist = bio;
@@ -1866,6 +1866,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
                        int d = r10_bio->devs[i].devnum;
                        bio = r10_bio->devs[i].bio;
                        bio->bi_end_io = NULL;
+                       clear_bit(BIO_UPTODATE, &bio->bi_flags);
                        if (conf->mirrors[d].rdev == NULL ||
                            test_bit(Faulty, &conf->mirrors[d].rdev->flags))
                                continue;
@@ -2021,19 +2022,35 @@ static int run(mddev_t *mddev)
        if (!conf->tmppage)
                goto out_free_conf;
 
+       conf->mddev = mddev;
+       conf->raid_disks = mddev->raid_disks;
        conf->near_copies = nc;
        conf->far_copies = fc;
        conf->copies = nc*fc;
        conf->far_offset = fo;
        conf->chunk_mask = (sector_t)(mddev->chunk_size>>9)-1;
        conf->chunk_shift = ffz(~mddev->chunk_size) - 9;
+       size = mddev->size >> (conf->chunk_shift-1);
+       sector_div(size, fc);
+       size = size * conf->raid_disks;
+       sector_div(size, nc);
+       /* 'size' is now the number of chunks in the array */
+       /* calculate "used chunks per device" in 'stride' */
+       stride = size * conf->copies;
+
+       /* We need to round up when dividing by raid_disks to
+        * get the stride size.
+        */
+       stride += conf->raid_disks - 1;
+       sector_div(stride, conf->raid_disks);
+       mddev->size = stride  << (conf->chunk_shift-1);
+
        if (fo)
-               conf->stride = 1 << conf->chunk_shift;
-       else {
-               stride = mddev->size >> (conf->chunk_shift-1);
+               stride = 1;
+       else
                sector_div(stride, fc);
-               conf->stride = stride << conf->chunk_shift;
-       }
+       conf->stride = stride << conf->chunk_shift;
+
        conf->r10bio_pool = mempool_create(NR_RAID10_BIOS, r10bio_pool_alloc,
                                                r10bio_pool_free, conf);
        if (!conf->r10bio_pool) {
@@ -2063,8 +2080,6 @@ static int run(mddev_t *mddev)
 
                disk->head_position = 0;
        }
-       conf->raid_disks = mddev->raid_disks;
-       conf->mddev = mddev;
        spin_lock_init(&conf->device_lock);
        INIT_LIST_HEAD(&conf->retry_list);
 
@@ -2106,16 +2121,8 @@ static int run(mddev_t *mddev)
        /*
         * Ok, everything is just fine now
         */
-       if (conf->far_offset) {
-               size = mddev->size >> (conf->chunk_shift-1);
-               size *= conf->raid_disks;
-               size <<= conf->chunk_shift;
-               sector_div(size, conf->far_copies);
-       } else
-               size = conf->stride * conf->raid_disks;
-       sector_div(size, conf->near_copies);
-       mddev->array_size = size/2;
-       mddev->resync_max_sectors = size;
+       mddev->array_size = size << (conf->chunk_shift-1);
+       mddev->resync_max_sectors = size << conf->chunk_shift;
 
        mddev->queue->unplug_fn = raid10_unplug;
        mddev->queue->issue_flush_fn = raid10_issue_flush;
This page took 0.029131 seconds and 5 git commands to generate.