dm: split discard requests on target boundaries
[deliverable/linux.git] / drivers / md / dm.c
index f3cc5d99fe8dcd6064a6c6880de3b3a90b12b6ef..ac384b2a6a3396238e9f0810cbeee52d08cd7fa7 100644 (file)
@@ -125,6 +125,10 @@ struct mapped_device {
        unsigned long flags;
 
        struct request_queue *queue;
+       unsigned type;
+       /* Protect queue and type against concurrent access. */
+       struct mutex type_lock;
+
        struct gendisk *disk;
        char name[16];
 
@@ -1026,17 +1030,27 @@ static void end_clone_request(struct request *clone, int error)
        dm_complete_request(clone, error);
 }
 
-static sector_t max_io_len(struct mapped_device *md,
-                          sector_t sector, struct dm_target *ti)
+/*
+ * Return maximum size of I/O possible at the supplied sector up to the current
+ * target boundary.
+ */
+static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti)
 {
-       sector_t offset = sector - ti->begin;
-       sector_t len = ti->len - offset;
+       sector_t target_offset = dm_target_offset(ti, sector);
+
+       return ti->len - target_offset;
+}
+
+static sector_t max_io_len(sector_t sector, struct dm_target *ti)
+{
+       sector_t len = max_io_len_target_boundary(sector, ti);
 
        /*
         * Does the target need to split even further ?
         */
        if (ti->split_io) {
                sector_t boundary;
+               sector_t offset = dm_target_offset(ti, sector);
                boundary = ((offset + ti->split_io) & ~(ti->split_io - 1))
                           - offset;
                if (len > boundary)
@@ -1178,36 +1192,96 @@ static struct dm_target_io *alloc_tio(struct clone_info *ci,
        return tio;
 }
 
-static void __flush_target(struct clone_info *ci, struct dm_target *ti,
-                         unsigned flush_nr)
+static void __issue_target_request(struct clone_info *ci, struct dm_target *ti,
+                                  unsigned request_nr, sector_t len)
 {
        struct dm_target_io *tio = alloc_tio(ci, ti);
        struct bio *clone;
 
-       tio->info.flush_request = flush_nr;
+       tio->info.target_request_nr = request_nr;
 
-       clone = bio_alloc_bioset(GFP_NOIO, 0, ci->md->bs);
+       /*
+        * Discard requests require the bio's inline iovecs be initialized.
+        * ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush
+        * and discard, so no need for concern about wasted bvec allocations.
+        */
+       clone = bio_alloc_bioset(GFP_NOIO, ci->bio->bi_max_vecs, ci->md->bs);
        __bio_clone(clone, ci->bio);
        clone->bi_destructor = dm_bio_destructor;
+       if (len) {
+               clone->bi_sector = ci->sector;
+               clone->bi_size = to_bytes(len);
+       }
 
        __map_bio(ti, clone, tio);
 }
 
+static void __issue_target_requests(struct clone_info *ci, struct dm_target *ti,
+                                   unsigned num_requests, sector_t len)
+{
+       unsigned request_nr;
+
+       for (request_nr = 0; request_nr < num_requests; request_nr++)
+               __issue_target_request(ci, ti, request_nr, len);
+}
+
 static int __clone_and_map_empty_barrier(struct clone_info *ci)
 {
-       unsigned target_nr = 0, flush_nr;
+       unsigned target_nr = 0;
        struct dm_target *ti;
 
        while ((ti = dm_table_get_target(ci->map, target_nr++)))
-               for (flush_nr = 0; flush_nr < ti->num_flush_requests;
-                    flush_nr++)
-                       __flush_target(ci, ti, flush_nr);
+               __issue_target_requests(ci, ti, ti->num_flush_requests, 0);
 
        ci->sector_count = 0;
 
        return 0;
 }
 
+/*
+ * Perform all io with a single clone.
+ */
+static void __clone_and_map_simple(struct clone_info *ci, struct dm_target *ti)
+{
+       struct bio *clone, *bio = ci->bio;
+       struct dm_target_io *tio;
+
+       tio = alloc_tio(ci, ti);
+       clone = clone_bio(bio, ci->sector, ci->idx,
+                         bio->bi_vcnt - ci->idx, ci->sector_count,
+                         ci->md->bs);
+       __map_bio(ti, clone, tio);
+       ci->sector_count = 0;
+}
+
+static int __clone_and_map_discard(struct clone_info *ci)
+{
+       struct dm_target *ti;
+       sector_t len;
+
+       do {
+               ti = dm_table_find_target(ci->map, ci->sector);
+               if (!dm_target_is_valid(ti))
+                       return -EIO;
+
+               /*
+                * Even though the device advertised discard support,
+                * reconfiguration might have changed that since the
+                * check was performed.
+                */
+               if (!ti->num_discard_requests)
+                       return -EOPNOTSUPP;
+
+               len = min(ci->sector_count, max_io_len_target_boundary(ci->sector, ti));
+
+               __issue_target_requests(ci, ti, ti->num_discard_requests, len);
+
+               ci->sector += len;
+       } while (ci->sector_count -= len);
+
+       return 0;
+}
+
 static int __clone_and_map(struct clone_info *ci)
 {
        struct bio *clone, *bio = ci->bio;
@@ -1218,27 +1292,21 @@ static int __clone_and_map(struct clone_info *ci)
        if (unlikely(bio_empty_barrier(bio)))
                return __clone_and_map_empty_barrier(ci);
 
+       if (unlikely(bio->bi_rw & REQ_DISCARD))
+               return __clone_and_map_discard(ci);
+
        ti = dm_table_find_target(ci->map, ci->sector);
        if (!dm_target_is_valid(ti))
                return -EIO;
 
-       max = max_io_len(ci->md, ci->sector, ti);
-
-       /*
-        * Allocate a target io object.
-        */
-       tio = alloc_tio(ci, ti);
+       max = max_io_len(ci->sector, ti);
 
        if (ci->sector_count <= max) {
                /*
                 * Optimise for the simple case where we can do all of
                 * the remaining io with a single clone.
                 */
-               clone = clone_bio(bio, ci->sector, ci->idx,
-                                 bio->bi_vcnt - ci->idx, ci->sector_count,
-                                 ci->md->bs);
-               __map_bio(ti, clone, tio);
-               ci->sector_count = 0;
+               __clone_and_map_simple(ci, ti);
 
        } else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
                /*
@@ -1259,6 +1327,7 @@ static int __clone_and_map(struct clone_info *ci)
                        len += bv_len;
                }
 
+               tio = alloc_tio(ci, ti);
                clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len,
                                  ci->md->bs);
                __map_bio(ti, clone, tio);
@@ -1281,13 +1350,12 @@ static int __clone_and_map(struct clone_info *ci)
                                if (!dm_target_is_valid(ti))
                                        return -EIO;
 
-                               max = max_io_len(ci->md, ci->sector, ti);
-
-                               tio = alloc_tio(ci, ti);
+                               max = max_io_len(ci->sector, ti);
                        }
 
                        len = min(remaining, max);
 
+                       tio = alloc_tio(ci, ti);
                        clone = split_bvec(bio, ci->sector, ci->idx,
                                           bv->bv_offset + offset, len,
                                           ci->md->bs);
@@ -1369,7 +1437,7 @@ static int dm_merge_bvec(struct request_queue *q,
        /*
         * Find maximum amount of I/O that won't need splitting
         */
-       max_sectors = min(max_io_len(md, bvm->bi_sector, ti),
+       max_sectors = min(max_io_len(bvm->bi_sector, ti),
                          (sector_t) BIO_MAX_SECTORS);
        max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
        if (max_size < 0)
@@ -1852,6 +1920,28 @@ static const struct block_device_operations dm_blk_dops;
 static void dm_wq_work(struct work_struct *work);
 static void dm_rq_barrier_work(struct work_struct *work);
 
+static void dm_init_md_queue(struct mapped_device *md)
+{
+       /*
+        * Request-based dm devices cannot be stacked on top of bio-based dm
+        * devices.  The type of this dm device has not been decided yet.
+        * The type is decided at the first table loading time.
+        * To prevent problematic device stacking, clear the queue flag
+        * for request stacking support until then.
+        *
+        * This queue is new, so no concurrency on the queue_flags.
+        */
+       queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue);
+
+       md->queue->queuedata = md;
+       md->queue->backing_dev_info.congested_fn = dm_any_congested;
+       md->queue->backing_dev_info.congested_data = md;
+       blk_queue_make_request(md->queue, dm_request);
+       blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
+       md->queue->unplug_fn = dm_unplug_all;
+       blk_queue_merge_bvec(md->queue, dm_merge_bvec);
+}
+
 /*
  * Allocate and initialise a blank device with a given minor.
  */
@@ -1877,8 +1967,10 @@ static struct mapped_device *alloc_dev(int minor)
        if (r < 0)
                goto bad_minor;
 
+       md->type = DM_TYPE_NONE;
        init_rwsem(&md->io_lock);
        mutex_init(&md->suspend_lock);
+       mutex_init(&md->type_lock);
        spin_lock_init(&md->deferred_lock);
        spin_lock_init(&md->barrier_error_lock);
        rwlock_init(&md->map_lock);
@@ -1889,33 +1981,11 @@ static struct mapped_device *alloc_dev(int minor)
        INIT_LIST_HEAD(&md->uevent_list);
        spin_lock_init(&md->uevent_lock);
 
-       md->queue = blk_init_queue(dm_request_fn, NULL);
+       md->queue = blk_alloc_queue(GFP_KERNEL);
        if (!md->queue)
                goto bad_queue;
 
-       /*
-        * Request-based dm devices cannot be stacked on top of bio-based dm
-        * devices.  The type of this dm device has not been decided yet,
-        * although we initialized the queue using blk_init_queue().
-        * The type is decided at the first table loading time.
-        * To prevent problematic device stacking, clear the queue flag
-        * for request stacking support until then.
-        *
-        * This queue is new, so no concurrency on the queue_flags.
-        */
-       queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue);
-       md->saved_make_request_fn = md->queue->make_request_fn;
-       md->queue->queuedata = md;
-       md->queue->backing_dev_info.congested_fn = dm_any_congested;
-       md->queue->backing_dev_info.congested_data = md;
-       blk_queue_make_request(md->queue, dm_request);
-       blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
-       md->queue->unplug_fn = dm_unplug_all;
-       blk_queue_merge_bvec(md->queue, dm_merge_bvec);
-       blk_queue_softirq_done(md->queue, dm_softirq_done);
-       blk_queue_prep_rq(md->queue, dm_prep_fn);
-       blk_queue_lld_busy(md->queue, dm_lld_busy);
-       blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN_FLUSH);
+       dm_init_md_queue(md);
 
        md->disk = alloc_disk(1);
        if (!md->disk)
@@ -2130,6 +2200,72 @@ int dm_create(int minor, struct mapped_device **result)
        return 0;
 }
 
+/*
+ * Functions to manage md->type.
+ * All are required to hold md->type_lock.
+ */
+void dm_lock_md_type(struct mapped_device *md)
+{
+       mutex_lock(&md->type_lock);
+}
+
+void dm_unlock_md_type(struct mapped_device *md)
+{
+       mutex_unlock(&md->type_lock);
+}
+
+void dm_set_md_type(struct mapped_device *md, unsigned type)
+{
+       md->type = type;
+}
+
+unsigned dm_get_md_type(struct mapped_device *md)
+{
+       return md->type;
+}
+
+/*
+ * Fully initialize a request-based queue (->elevator, ->request_fn, etc).
+ */
+static int dm_init_request_based_queue(struct mapped_device *md)
+{
+       struct request_queue *q = NULL;
+
+       if (md->queue->elevator)
+               return 1;
+
+       /* Fully initialize the queue */
+       q = blk_init_allocated_queue(md->queue, dm_request_fn, NULL);
+       if (!q)
+               return 0;
+
+       md->queue = q;
+       md->saved_make_request_fn = md->queue->make_request_fn;
+       dm_init_md_queue(md);
+       blk_queue_softirq_done(md->queue, dm_softirq_done);
+       blk_queue_prep_rq(md->queue, dm_prep_fn);
+       blk_queue_lld_busy(md->queue, dm_lld_busy);
+       blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN_FLUSH);
+
+       elv_register_queue(md->queue);
+
+       return 1;
+}
+
+/*
+ * Setup the DM device's queue based on md's type
+ */
+int dm_setup_md_queue(struct mapped_device *md)
+{
+       if ((dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) &&
+           !dm_init_request_based_queue(md)) {
+               DMWARN("Cannot initialize queue for request-based mapped device");
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 static struct mapped_device *dm_find_md(dev_t dev)
 {
        struct mapped_device *md;
@@ -2352,11 +2488,11 @@ static void dm_queue_flush(struct mapped_device *md)
        queue_work(md->wq, &md->work);
 }
 
-static void dm_rq_set_flush_nr(struct request *clone, unsigned flush_nr)
+static void dm_rq_set_target_request_nr(struct request *clone, unsigned request_nr)
 {
        struct dm_rq_target_io *tio = clone->end_io_data;
 
-       tio->info.flush_request = flush_nr;
+       tio->info.target_request_nr = request_nr;
 }
 
 /* Issue barrier requests to targets and wait for their completion. */
@@ -2374,7 +2510,7 @@ static int dm_rq_barrier(struct mapped_device *md)
                ti = dm_table_get_target(map, i);
                for (j = 0; j < ti->num_flush_requests; j++) {
                        clone = clone_rq(md->flush_request, md, GFP_NOIO);
-                       dm_rq_set_flush_nr(clone, j);
+                       dm_rq_set_target_request_nr(clone, j);
                        atomic_inc(&md->pending[rq_data_dir(clone)]);
                        map_request(ti, clone, md);
                }
@@ -2440,13 +2576,6 @@ struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
                goto out;
        }
 
-       /* cannot change the device type, once a table is bound */
-       if (md->map &&
-           (dm_table_get_type(md->map) != dm_table_get_type(table))) {
-               DMWARN("can't change the device type after a table is bound");
-               goto out;
-       }
-
        map = __bind(md, table, &limits);
 
 out:
This page took 0.032423 seconds and 5 git commands to generate.