2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 * This file is released under the GPL.
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/mutex.h>
14 #include <linux/moduleparam.h>
15 #include <linux/blkpg.h>
16 #include <linux/bio.h>
17 #include <linux/mempool.h>
18 #include <linux/slab.h>
19 #include <linux/idr.h>
20 #include <linux/hdreg.h>
21 #include <linux/delay.h>
23 #include <trace/events/block.h>
25 #define DM_MSG_PREFIX "core"
29 * ratelimit state to be used in DMXXX_LIMIT().
31 DEFINE_RATELIMIT_STATE(dm_ratelimit_state
,
32 DEFAULT_RATELIMIT_INTERVAL
,
33 DEFAULT_RATELIMIT_BURST
);
34 EXPORT_SYMBOL(dm_ratelimit_state
);
38 * Cookies are numeric values sent with CHANGE and REMOVE
39 * uevents while resuming, removing or renaming the device.
41 #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
42 #define DM_COOKIE_LENGTH 24
44 static const char *_name
= DM_NAME
;
46 static unsigned int major
= 0;
47 static unsigned int _major
= 0;
49 static DEFINE_IDR(_minor_idr
);
51 static DEFINE_SPINLOCK(_minor_lock
);
54 * One of these is allocated per bio.
57 struct mapped_device
*md
;
61 unsigned long start_time
;
62 spinlock_t endio_lock
;
66 * For request-based dm.
67 * One of these is allocated per request.
69 struct dm_rq_target_io
{
70 struct mapped_device
*md
;
72 struct request
*orig
, clone
;
78 * For request-based dm - the bio clones we allocate are embedded in these
81 * We allocate these with bio_alloc_bioset, using the front_pad parameter when
82 * the bioset is created - this means the bio has to come at the end of the
85 struct dm_rq_clone_bio_info
{
87 struct dm_rq_target_io
*tio
;
91 union map_info
*dm_get_mapinfo(struct bio
*bio
)
93 if (bio
&& bio
->bi_private
)
94 return &((struct dm_target_io
*)bio
->bi_private
)->info
;
98 union map_info
*dm_get_rq_mapinfo(struct request
*rq
)
100 if (rq
&& rq
->end_io_data
)
101 return &((struct dm_rq_target_io
*)rq
->end_io_data
)->info
;
104 EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo
);
106 #define MINOR_ALLOCED ((void *)-1)
109 * Bits for the md->flags field.
111 #define DMF_BLOCK_IO_FOR_SUSPEND 0
112 #define DMF_SUSPENDED 1
114 #define DMF_FREEING 3
115 #define DMF_DELETING 4
116 #define DMF_NOFLUSH_SUSPENDING 5
117 #define DMF_MERGE_IS_OPTIONAL 6
120 * Work processed by per-device workqueue.
122 struct mapped_device
{
123 struct rw_semaphore io_lock
;
124 struct mutex suspend_lock
;
131 struct request_queue
*queue
;
133 /* Protect queue and type against concurrent access. */
134 struct mutex type_lock
;
136 struct target_type
*immutable_target_type
;
138 struct gendisk
*disk
;
144 * A list of ios that arrived while we were suspended.
147 wait_queue_head_t wait
;
148 struct work_struct work
;
149 struct bio_list deferred
;
150 spinlock_t deferred_lock
;
153 * Processing queue (flush)
155 struct workqueue_struct
*wq
;
158 * The current mapping.
160 struct dm_table
*map
;
163 * io objects are allocated from here.
174 wait_queue_head_t eventq
;
176 struct list_head uevent_list
;
177 spinlock_t uevent_lock
; /* Protect access to uevent_list */
180 * freeze/thaw support require holding onto a super block
182 struct super_block
*frozen_sb
;
183 struct block_device
*bdev
;
185 /* forced geometry settings */
186 struct hd_geometry geometry
;
191 /* zero-length flush that will be cloned and submitted to targets */
192 struct bio flush_bio
;
196 * For mempools pre-allocation at the table loading time.
198 struct dm_md_mempools
{
205 static struct kmem_cache
*_io_cache
;
206 static struct kmem_cache
*_rq_tio_cache
;
209 * Unused now, and needs to be deleted. But since io_pool is overloaded and it's
210 * still used for _io_cache, I'm leaving this for a later cleanup
212 static struct kmem_cache
*_rq_bio_info_cache
;
214 static int __init
local_init(void)
218 /* allocate a slab for the dm_ios */
219 _io_cache
= KMEM_CACHE(dm_io
, 0);
223 _rq_tio_cache
= KMEM_CACHE(dm_rq_target_io
, 0);
225 goto out_free_io_cache
;
227 _rq_bio_info_cache
= KMEM_CACHE(dm_rq_clone_bio_info
, 0);
228 if (!_rq_bio_info_cache
)
229 goto out_free_rq_tio_cache
;
231 r
= dm_uevent_init();
233 goto out_free_rq_bio_info_cache
;
236 r
= register_blkdev(_major
, _name
);
238 goto out_uevent_exit
;
247 out_free_rq_bio_info_cache
:
248 kmem_cache_destroy(_rq_bio_info_cache
);
249 out_free_rq_tio_cache
:
250 kmem_cache_destroy(_rq_tio_cache
);
252 kmem_cache_destroy(_io_cache
);
257 static void local_exit(void)
259 kmem_cache_destroy(_rq_bio_info_cache
);
260 kmem_cache_destroy(_rq_tio_cache
);
261 kmem_cache_destroy(_io_cache
);
262 unregister_blkdev(_major
, _name
);
267 DMINFO("cleaned up");
270 static int (*_inits
[])(void) __initdata
= {
280 static void (*_exits
[])(void) = {
290 static int __init
dm_init(void)
292 const int count
= ARRAY_SIZE(_inits
);
296 for (i
= 0; i
< count
; i
++) {
311 static void __exit
dm_exit(void)
313 int i
= ARRAY_SIZE(_exits
);
319 * Should be empty by this point.
321 idr_destroy(&_minor_idr
);
325 * Block device functions
327 int dm_deleting_md(struct mapped_device
*md
)
329 return test_bit(DMF_DELETING
, &md
->flags
);
332 static int dm_blk_open(struct block_device
*bdev
, fmode_t mode
)
334 struct mapped_device
*md
;
336 spin_lock(&_minor_lock
);
338 md
= bdev
->bd_disk
->private_data
;
342 if (test_bit(DMF_FREEING
, &md
->flags
) ||
343 dm_deleting_md(md
)) {
349 atomic_inc(&md
->open_count
);
352 spin_unlock(&_minor_lock
);
354 return md
? 0 : -ENXIO
;
357 static int dm_blk_close(struct gendisk
*disk
, fmode_t mode
)
359 struct mapped_device
*md
= disk
->private_data
;
361 spin_lock(&_minor_lock
);
363 atomic_dec(&md
->open_count
);
366 spin_unlock(&_minor_lock
);
371 int dm_open_count(struct mapped_device
*md
)
373 return atomic_read(&md
->open_count
);
377 * Guarantees nothing is using the device before it's deleted.
379 int dm_lock_for_deletion(struct mapped_device
*md
)
383 spin_lock(&_minor_lock
);
385 if (dm_open_count(md
))
388 set_bit(DMF_DELETING
, &md
->flags
);
390 spin_unlock(&_minor_lock
);
395 static int dm_blk_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
397 struct mapped_device
*md
= bdev
->bd_disk
->private_data
;
399 return dm_get_geometry(md
, geo
);
402 static int dm_blk_ioctl(struct block_device
*bdev
, fmode_t mode
,
403 unsigned int cmd
, unsigned long arg
)
405 struct mapped_device
*md
= bdev
->bd_disk
->private_data
;
406 struct dm_table
*map
= dm_get_live_table(md
);
407 struct dm_target
*tgt
;
410 if (!map
|| !dm_table_get_size(map
))
413 /* We only support devices that have a single target */
414 if (dm_table_get_num_targets(map
) != 1)
417 tgt
= dm_table_get_target(map
, 0);
419 if (dm_suspended_md(md
)) {
424 if (tgt
->type
->ioctl
)
425 r
= tgt
->type
->ioctl(tgt
, cmd
, arg
);
433 static struct dm_io
*alloc_io(struct mapped_device
*md
)
435 return mempool_alloc(md
->io_pool
, GFP_NOIO
);
438 static void free_io(struct mapped_device
*md
, struct dm_io
*io
)
440 mempool_free(io
, md
->io_pool
);
443 static void free_tio(struct mapped_device
*md
, struct dm_target_io
*tio
)
445 bio_put(&tio
->clone
);
448 static struct dm_rq_target_io
*alloc_rq_tio(struct mapped_device
*md
,
451 return mempool_alloc(md
->tio_pool
, gfp_mask
);
454 static void free_rq_tio(struct dm_rq_target_io
*tio
)
456 mempool_free(tio
, tio
->md
->tio_pool
);
459 static int md_in_flight(struct mapped_device
*md
)
461 return atomic_read(&md
->pending
[READ
]) +
462 atomic_read(&md
->pending
[WRITE
]);
465 static void start_io_acct(struct dm_io
*io
)
467 struct mapped_device
*md
= io
->md
;
469 int rw
= bio_data_dir(io
->bio
);
471 io
->start_time
= jiffies
;
473 cpu
= part_stat_lock();
474 part_round_stats(cpu
, &dm_disk(md
)->part0
);
476 atomic_set(&dm_disk(md
)->part0
.in_flight
[rw
],
477 atomic_inc_return(&md
->pending
[rw
]));
480 static void end_io_acct(struct dm_io
*io
)
482 struct mapped_device
*md
= io
->md
;
483 struct bio
*bio
= io
->bio
;
484 unsigned long duration
= jiffies
- io
->start_time
;
486 int rw
= bio_data_dir(bio
);
488 cpu
= part_stat_lock();
489 part_round_stats(cpu
, &dm_disk(md
)->part0
);
490 part_stat_add(cpu
, &dm_disk(md
)->part0
, ticks
[rw
], duration
);
494 * After this is decremented the bio must not be touched if it is
497 pending
= atomic_dec_return(&md
->pending
[rw
]);
498 atomic_set(&dm_disk(md
)->part0
.in_flight
[rw
], pending
);
499 pending
+= atomic_read(&md
->pending
[rw
^0x1]);
501 /* nudge anyone waiting on suspend queue */
507 * Add the bio to the list of deferred io.
509 static void queue_io(struct mapped_device
*md
, struct bio
*bio
)
513 spin_lock_irqsave(&md
->deferred_lock
, flags
);
514 bio_list_add(&md
->deferred
, bio
);
515 spin_unlock_irqrestore(&md
->deferred_lock
, flags
);
516 queue_work(md
->wq
, &md
->work
);
520 * Everyone (including functions in this file), should use this
521 * function to access the md->map field, and make sure they call
522 * dm_table_put() when finished.
524 struct dm_table
*dm_get_live_table(struct mapped_device
*md
)
529 read_lock_irqsave(&md
->map_lock
, flags
);
533 read_unlock_irqrestore(&md
->map_lock
, flags
);
539 * Get the geometry associated with a dm device
541 int dm_get_geometry(struct mapped_device
*md
, struct hd_geometry
*geo
)
549 * Set the geometry of a device.
551 int dm_set_geometry(struct mapped_device
*md
, struct hd_geometry
*geo
)
553 sector_t sz
= (sector_t
)geo
->cylinders
* geo
->heads
* geo
->sectors
;
555 if (geo
->start
> sz
) {
556 DMWARN("Start sector is beyond the geometry limits.");
565 /*-----------------------------------------------------------------
567 * A more elegant soln is in the works that uses the queue
568 * merge fn, unfortunately there are a couple of changes to
569 * the block layer that I want to make for this. So in the
570 * interests of getting something for people to use I give
571 * you this clearly demarcated crap.
572 *---------------------------------------------------------------*/
574 static int __noflush_suspending(struct mapped_device
*md
)
576 return test_bit(DMF_NOFLUSH_SUSPENDING
, &md
->flags
);
580 * Decrements the number of outstanding ios that a bio has been
581 * cloned into, completing the original io if necc.
583 static void dec_pending(struct dm_io
*io
, int error
)
588 struct mapped_device
*md
= io
->md
;
590 /* Push-back supersedes any I/O errors */
591 if (unlikely(error
)) {
592 spin_lock_irqsave(&io
->endio_lock
, flags
);
593 if (!(io
->error
> 0 && __noflush_suspending(md
)))
595 spin_unlock_irqrestore(&io
->endio_lock
, flags
);
598 if (atomic_dec_and_test(&io
->io_count
)) {
599 if (io
->error
== DM_ENDIO_REQUEUE
) {
601 * Target requested pushing back the I/O.
603 spin_lock_irqsave(&md
->deferred_lock
, flags
);
604 if (__noflush_suspending(md
))
605 bio_list_add_head(&md
->deferred
, io
->bio
);
607 /* noflush suspend was interrupted. */
609 spin_unlock_irqrestore(&md
->deferred_lock
, flags
);
612 io_error
= io
->error
;
617 if (io_error
== DM_ENDIO_REQUEUE
)
620 if ((bio
->bi_rw
& REQ_FLUSH
) && bio
->bi_size
) {
622 * Preflush done for flush with data, reissue
625 bio
->bi_rw
&= ~REQ_FLUSH
;
628 /* done with normal IO or empty flush */
629 trace_block_bio_complete(md
->queue
, bio
, io_error
);
630 bio_endio(bio
, io_error
);
635 static void clone_endio(struct bio
*bio
, int error
)
638 struct dm_target_io
*tio
= bio
->bi_private
;
639 struct dm_io
*io
= tio
->io
;
640 struct mapped_device
*md
= tio
->io
->md
;
641 dm_endio_fn endio
= tio
->ti
->type
->end_io
;
643 if (!bio_flagged(bio
, BIO_UPTODATE
) && !error
)
647 r
= endio(tio
->ti
, bio
, error
);
648 if (r
< 0 || r
== DM_ENDIO_REQUEUE
)
650 * error and requeue request are handled
654 else if (r
== DM_ENDIO_INCOMPLETE
)
655 /* The target will handle the io */
658 DMWARN("unimplemented target endio return value: %d", r
);
664 dec_pending(io
, error
);
668 * Partial completion handling for request-based dm
670 static void end_clone_bio(struct bio
*clone
, int error
)
672 struct dm_rq_clone_bio_info
*info
= clone
->bi_private
;
673 struct dm_rq_target_io
*tio
= info
->tio
;
674 struct bio
*bio
= info
->orig
;
675 unsigned int nr_bytes
= info
->orig
->bi_size
;
681 * An error has already been detected on the request.
682 * Once error occurred, just let clone->end_io() handle
688 * Don't notice the error to the upper layer yet.
689 * The error handling decision is made by the target driver,
690 * when the request is completed.
697 * I/O for the bio successfully completed.
698 * Notice the data completion to the upper layer.
702 * bios are processed from the head of the list.
703 * So the completing bio should always be rq->bio.
704 * If it's not, something wrong is happening.
706 if (tio
->orig
->bio
!= bio
)
707 DMERR("bio completion is going in the middle of the request");
710 * Update the original request.
711 * Do not use blk_end_request() here, because it may complete
712 * the original request before the clone, and break the ordering.
714 blk_update_request(tio
->orig
, 0, nr_bytes
);
718 * Don't touch any member of the md after calling this function because
719 * the md may be freed in dm_put() at the end of this function.
720 * Or do dm_get() before calling this function and dm_put() later.
722 static void rq_completed(struct mapped_device
*md
, int rw
, int run_queue
)
724 atomic_dec(&md
->pending
[rw
]);
726 /* nudge anyone waiting on suspend queue */
727 if (!md_in_flight(md
))
731 * Run this off this callpath, as drivers could invoke end_io while
732 * inside their request_fn (and holding the queue lock). Calling
733 * back into ->request_fn() could deadlock attempting to grab the
737 blk_run_queue_async(md
->queue
);
740 * dm_put() must be at the end of this function. See the comment above
745 static void free_rq_clone(struct request
*clone
)
747 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
749 blk_rq_unprep_clone(clone
);
754 * Complete the clone and the original request.
755 * Must be called without queue lock.
757 static void dm_end_request(struct request
*clone
, int error
)
759 int rw
= rq_data_dir(clone
);
760 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
761 struct mapped_device
*md
= tio
->md
;
762 struct request
*rq
= tio
->orig
;
764 if (rq
->cmd_type
== REQ_TYPE_BLOCK_PC
) {
765 rq
->errors
= clone
->errors
;
766 rq
->resid_len
= clone
->resid_len
;
770 * We are using the sense buffer of the original
772 * So setting the length of the sense data is enough.
774 rq
->sense_len
= clone
->sense_len
;
777 free_rq_clone(clone
);
778 blk_end_request_all(rq
, error
);
779 rq_completed(md
, rw
, true);
782 static void dm_unprep_request(struct request
*rq
)
784 struct request
*clone
= rq
->special
;
787 rq
->cmd_flags
&= ~REQ_DONTPREP
;
789 free_rq_clone(clone
);
793 * Requeue the original request of a clone.
795 void dm_requeue_unmapped_request(struct request
*clone
)
797 int rw
= rq_data_dir(clone
);
798 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
799 struct mapped_device
*md
= tio
->md
;
800 struct request
*rq
= tio
->orig
;
801 struct request_queue
*q
= rq
->q
;
804 dm_unprep_request(rq
);
806 spin_lock_irqsave(q
->queue_lock
, flags
);
807 blk_requeue_request(q
, rq
);
808 spin_unlock_irqrestore(q
->queue_lock
, flags
);
810 rq_completed(md
, rw
, 0);
812 EXPORT_SYMBOL_GPL(dm_requeue_unmapped_request
);
814 static void __stop_queue(struct request_queue
*q
)
819 static void stop_queue(struct request_queue
*q
)
823 spin_lock_irqsave(q
->queue_lock
, flags
);
825 spin_unlock_irqrestore(q
->queue_lock
, flags
);
828 static void __start_queue(struct request_queue
*q
)
830 if (blk_queue_stopped(q
))
834 static void start_queue(struct request_queue
*q
)
838 spin_lock_irqsave(q
->queue_lock
, flags
);
840 spin_unlock_irqrestore(q
->queue_lock
, flags
);
843 static void dm_done(struct request
*clone
, int error
, bool mapped
)
846 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
847 dm_request_endio_fn rq_end_io
= NULL
;
850 rq_end_io
= tio
->ti
->type
->rq_end_io
;
852 if (mapped
&& rq_end_io
)
853 r
= rq_end_io(tio
->ti
, clone
, error
, &tio
->info
);
857 /* The target wants to complete the I/O */
858 dm_end_request(clone
, r
);
859 else if (r
== DM_ENDIO_INCOMPLETE
)
860 /* The target will handle the I/O */
862 else if (r
== DM_ENDIO_REQUEUE
)
863 /* The target wants to requeue the I/O */
864 dm_requeue_unmapped_request(clone
);
866 DMWARN("unimplemented target endio return value: %d", r
);
872 * Request completion handler for request-based dm
874 static void dm_softirq_done(struct request
*rq
)
877 struct request
*clone
= rq
->completion_data
;
878 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
880 if (rq
->cmd_flags
& REQ_FAILED
)
883 dm_done(clone
, tio
->error
, mapped
);
887 * Complete the clone and the original request with the error status
888 * through softirq context.
890 static void dm_complete_request(struct request
*clone
, int error
)
892 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
893 struct request
*rq
= tio
->orig
;
896 rq
->completion_data
= clone
;
897 blk_complete_request(rq
);
901 * Complete the not-mapped clone and the original request with the error status
902 * through softirq context.
903 * Target's rq_end_io() function isn't called.
904 * This may be used when the target's map_rq() function fails.
906 void dm_kill_unmapped_request(struct request
*clone
, int error
)
908 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
909 struct request
*rq
= tio
->orig
;
911 rq
->cmd_flags
|= REQ_FAILED
;
912 dm_complete_request(clone
, error
);
914 EXPORT_SYMBOL_GPL(dm_kill_unmapped_request
);
917 * Called with the queue lock held
919 static void end_clone_request(struct request
*clone
, int error
)
922 * For just cleaning up the information of the queue in which
923 * the clone was dispatched.
924 * The clone is *NOT* freed actually here because it is alloced from
925 * dm own mempool and REQ_ALLOCED isn't set in clone->cmd_flags.
927 __blk_put_request(clone
->q
, clone
);
930 * Actual request completion is done in a softirq context which doesn't
931 * hold the queue lock. Otherwise, deadlock could occur because:
932 * - another request may be submitted by the upper level driver
933 * of the stacking during the completion
934 * - the submission which requires queue lock may be done
937 dm_complete_request(clone
, error
);
941 * Return maximum size of I/O possible at the supplied sector up to the current
944 static sector_t
max_io_len_target_boundary(sector_t sector
, struct dm_target
*ti
)
946 sector_t target_offset
= dm_target_offset(ti
, sector
);
948 return ti
->len
- target_offset
;
951 static sector_t
max_io_len(sector_t sector
, struct dm_target
*ti
)
953 sector_t len
= max_io_len_target_boundary(sector
, ti
);
954 sector_t offset
, max_len
;
957 * Does the target need to split even further?
959 if (ti
->max_io_len
) {
960 offset
= dm_target_offset(ti
, sector
);
961 if (unlikely(ti
->max_io_len
& (ti
->max_io_len
- 1)))
962 max_len
= sector_div(offset
, ti
->max_io_len
);
964 max_len
= offset
& (ti
->max_io_len
- 1);
965 max_len
= ti
->max_io_len
- max_len
;
974 int dm_set_target_max_io_len(struct dm_target
*ti
, sector_t len
)
976 if (len
> UINT_MAX
) {
977 DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)",
978 (unsigned long long)len
, UINT_MAX
);
979 ti
->error
= "Maximum size of target IO is too large";
983 ti
->max_io_len
= (uint32_t) len
;
987 EXPORT_SYMBOL_GPL(dm_set_target_max_io_len
);
989 static void __map_bio(struct dm_target
*ti
, struct dm_target_io
*tio
)
993 struct mapped_device
*md
;
994 struct bio
*clone
= &tio
->clone
;
996 clone
->bi_end_io
= clone_endio
;
997 clone
->bi_private
= tio
;
1000 * Map the clone. If r == 0 we don't need to do
1001 * anything, the target has assumed ownership of
1004 atomic_inc(&tio
->io
->io_count
);
1005 sector
= clone
->bi_sector
;
1006 r
= ti
->type
->map(ti
, clone
);
1007 if (r
== DM_MAPIO_REMAPPED
) {
1008 /* the bio has been remapped so dispatch it */
1010 trace_block_bio_remap(bdev_get_queue(clone
->bi_bdev
), clone
,
1011 tio
->io
->bio
->bi_bdev
->bd_dev
, sector
);
1013 generic_make_request(clone
);
1014 } else if (r
< 0 || r
== DM_MAPIO_REQUEUE
) {
1015 /* error the io and bail out, or requeue it if needed */
1017 dec_pending(tio
->io
, r
);
1020 DMWARN("unimplemented target map return value: %d", r
);
1026 struct mapped_device
*md
;
1027 struct dm_table
*map
;
1031 sector_t sector_count
;
1036 * Creates a little bio that just does part of a bvec.
1038 static void split_bvec(struct dm_target_io
*tio
, struct bio
*bio
,
1039 sector_t sector
, unsigned short idx
, unsigned int offset
,
1040 unsigned int len
, struct bio_set
*bs
)
1042 struct bio
*clone
= &tio
->clone
;
1043 struct bio_vec
*bv
= bio
->bi_io_vec
+ idx
;
1045 *clone
->bi_io_vec
= *bv
;
1047 clone
->bi_sector
= sector
;
1048 clone
->bi_bdev
= bio
->bi_bdev
;
1049 clone
->bi_rw
= bio
->bi_rw
;
1051 clone
->bi_size
= to_bytes(len
);
1052 clone
->bi_io_vec
->bv_offset
= offset
;
1053 clone
->bi_io_vec
->bv_len
= clone
->bi_size
;
1054 clone
->bi_flags
|= 1 << BIO_CLONED
;
1056 if (bio_integrity(bio
)) {
1057 bio_integrity_clone(clone
, bio
, GFP_NOIO
);
1058 bio_integrity_trim(clone
,
1059 bio_sector_offset(bio
, idx
, offset
), len
);
1064 * Creates a bio that consists of range of complete bvecs.
1066 static void clone_bio(struct dm_target_io
*tio
, struct bio
*bio
,
1067 sector_t sector
, unsigned short idx
,
1068 unsigned short bv_count
, unsigned int len
,
1071 struct bio
*clone
= &tio
->clone
;
1073 __bio_clone(clone
, bio
);
1074 clone
->bi_sector
= sector
;
1075 clone
->bi_idx
= idx
;
1076 clone
->bi_vcnt
= idx
+ bv_count
;
1077 clone
->bi_size
= to_bytes(len
);
1078 clone
->bi_flags
&= ~(1 << BIO_SEG_VALID
);
1080 if (bio_integrity(bio
)) {
1081 bio_integrity_clone(clone
, bio
, GFP_NOIO
);
1083 if (idx
!= bio
->bi_idx
|| clone
->bi_size
< bio
->bi_size
)
1084 bio_integrity_trim(clone
,
1085 bio_sector_offset(bio
, idx
, 0), len
);
1089 static struct dm_target_io
*alloc_tio(struct clone_info
*ci
,
1090 struct dm_target
*ti
, int nr_iovecs
)
1092 struct dm_target_io
*tio
;
1095 clone
= bio_alloc_bioset(GFP_NOIO
, nr_iovecs
, ci
->md
->bs
);
1096 tio
= container_of(clone
, struct dm_target_io
, clone
);
1100 memset(&tio
->info
, 0, sizeof(tio
->info
));
1101 tio
->target_request_nr
= 0;
1106 static void __issue_target_request(struct clone_info
*ci
, struct dm_target
*ti
,
1107 unsigned request_nr
, sector_t len
)
1109 struct dm_target_io
*tio
= alloc_tio(ci
, ti
, ci
->bio
->bi_max_vecs
);
1110 struct bio
*clone
= &tio
->clone
;
1112 tio
->target_request_nr
= request_nr
;
1115 * Discard requests require the bio's inline iovecs be initialized.
1116 * ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush
1117 * and discard, so no need for concern about wasted bvec allocations.
1120 __bio_clone(clone
, ci
->bio
);
1122 clone
->bi_sector
= ci
->sector
;
1123 clone
->bi_size
= to_bytes(len
);
1129 static void __issue_target_requests(struct clone_info
*ci
, struct dm_target
*ti
,
1130 unsigned num_requests
, sector_t len
)
1132 unsigned request_nr
;
1134 for (request_nr
= 0; request_nr
< num_requests
; request_nr
++)
1135 __issue_target_request(ci
, ti
, request_nr
, len
);
1138 static int __clone_and_map_empty_flush(struct clone_info
*ci
)
1140 unsigned target_nr
= 0;
1141 struct dm_target
*ti
;
1143 BUG_ON(bio_has_data(ci
->bio
));
1144 while ((ti
= dm_table_get_target(ci
->map
, target_nr
++)))
1145 __issue_target_requests(ci
, ti
, ti
->num_flush_requests
, 0);
1151 * Perform all io with a single clone.
1153 static void __clone_and_map_simple(struct clone_info
*ci
, struct dm_target
*ti
)
1155 struct bio
*bio
= ci
->bio
;
1156 struct dm_target_io
*tio
;
1158 tio
= alloc_tio(ci
, ti
, bio
->bi_max_vecs
);
1159 clone_bio(tio
, bio
, ci
->sector
, ci
->idx
, bio
->bi_vcnt
- ci
->idx
,
1160 ci
->sector_count
, ci
->md
->bs
);
1162 ci
->sector_count
= 0;
1165 typedef unsigned (*get_num_requests_fn
)(struct dm_target
*ti
);
1167 static unsigned get_num_discard_requests(struct dm_target
*ti
)
1169 return ti
->num_discard_requests
;
1172 static unsigned get_num_write_same_requests(struct dm_target
*ti
)
1174 return ti
->num_write_same_requests
;
1177 typedef bool (*is_split_required_fn
)(struct dm_target
*ti
);
1179 static bool is_split_required_for_discard(struct dm_target
*ti
)
1181 return ti
->split_discard_requests
;
1184 static int __clone_and_map_changing_extent_only(struct clone_info
*ci
,
1185 get_num_requests_fn get_num_requests
,
1186 is_split_required_fn is_split_required
)
1188 struct dm_target
*ti
;
1190 unsigned num_requests
;
1193 ti
= dm_table_find_target(ci
->map
, ci
->sector
);
1194 if (!dm_target_is_valid(ti
))
1198 * Even though the device advertised support for this type of
1199 * request, that does not mean every target supports it, and
1200 * reconfiguration might also have changed that since the
1201 * check was performed.
1203 num_requests
= get_num_requests
? get_num_requests(ti
) : 0;
1207 if (is_split_required
&& !is_split_required(ti
))
1208 len
= min(ci
->sector_count
, max_io_len_target_boundary(ci
->sector
, ti
));
1210 len
= min(ci
->sector_count
, max_io_len(ci
->sector
, ti
));
1212 __issue_target_requests(ci
, ti
, num_requests
, len
);
1215 } while (ci
->sector_count
-= len
);
1220 static int __clone_and_map_discard(struct clone_info
*ci
)
1222 return __clone_and_map_changing_extent_only(ci
, get_num_discard_requests
,
1223 is_split_required_for_discard
);
1226 static int __clone_and_map_write_same(struct clone_info
*ci
)
1228 return __clone_and_map_changing_extent_only(ci
, get_num_write_same_requests
, NULL
);
1231 static int __clone_and_map(struct clone_info
*ci
)
1233 struct bio
*bio
= ci
->bio
;
1234 struct dm_target
*ti
;
1235 sector_t len
= 0, max
;
1236 struct dm_target_io
*tio
;
1238 if (unlikely(bio
->bi_rw
& REQ_DISCARD
))
1239 return __clone_and_map_discard(ci
);
1240 else if (unlikely(bio
->bi_rw
& REQ_WRITE_SAME
))
1241 return __clone_and_map_write_same(ci
);
1243 ti
= dm_table_find_target(ci
->map
, ci
->sector
);
1244 if (!dm_target_is_valid(ti
))
1247 max
= max_io_len(ci
->sector
, ti
);
1249 if (ci
->sector_count
<= max
) {
1251 * Optimise for the simple case where we can do all of
1252 * the remaining io with a single clone.
1254 __clone_and_map_simple(ci
, ti
);
1256 } else if (to_sector(bio
->bi_io_vec
[ci
->idx
].bv_len
) <= max
) {
1258 * There are some bvecs that don't span targets.
1259 * Do as many of these as possible.
1262 sector_t remaining
= max
;
1265 for (i
= ci
->idx
; remaining
&& (i
< bio
->bi_vcnt
); i
++) {
1266 bv_len
= to_sector(bio
->bi_io_vec
[i
].bv_len
);
1268 if (bv_len
> remaining
)
1271 remaining
-= bv_len
;
1275 tio
= alloc_tio(ci
, ti
, bio
->bi_max_vecs
);
1276 clone_bio(tio
, bio
, ci
->sector
, ci
->idx
, i
- ci
->idx
, len
,
1281 ci
->sector_count
-= len
;
1286 * Handle a bvec that must be split between two or more targets.
1288 struct bio_vec
*bv
= bio
->bi_io_vec
+ ci
->idx
;
1289 sector_t remaining
= to_sector(bv
->bv_len
);
1290 unsigned int offset
= 0;
1294 ti
= dm_table_find_target(ci
->map
, ci
->sector
);
1295 if (!dm_target_is_valid(ti
))
1298 max
= max_io_len(ci
->sector
, ti
);
1301 len
= min(remaining
, max
);
1303 tio
= alloc_tio(ci
, ti
, 1);
1304 split_bvec(tio
, bio
, ci
->sector
, ci
->idx
,
1305 bv
->bv_offset
+ offset
, len
, ci
->md
->bs
);
1310 ci
->sector_count
-= len
;
1311 offset
+= to_bytes(len
);
1312 } while (remaining
-= len
);
1321 * Split the bio into several clones and submit it to targets.
1323 static void __split_and_process_bio(struct mapped_device
*md
, struct bio
*bio
)
1325 struct clone_info ci
;
1328 ci
.map
= dm_get_live_table(md
);
1329 if (unlikely(!ci
.map
)) {
1335 ci
.io
= alloc_io(md
);
1337 atomic_set(&ci
.io
->io_count
, 1);
1340 spin_lock_init(&ci
.io
->endio_lock
);
1341 ci
.sector
= bio
->bi_sector
;
1342 ci
.idx
= bio
->bi_idx
;
1344 start_io_acct(ci
.io
);
1345 if (bio
->bi_rw
& REQ_FLUSH
) {
1346 ci
.bio
= &ci
.md
->flush_bio
;
1347 ci
.sector_count
= 0;
1348 error
= __clone_and_map_empty_flush(&ci
);
1349 /* dec_pending submits any data associated with flush */
1352 ci
.sector_count
= bio_sectors(bio
);
1353 while (ci
.sector_count
&& !error
)
1354 error
= __clone_and_map(&ci
);
1357 /* drop the extra reference count */
1358 dec_pending(ci
.io
, error
);
1359 dm_table_put(ci
.map
);
1361 /*-----------------------------------------------------------------
1363 *---------------------------------------------------------------*/
1365 static int dm_merge_bvec(struct request_queue
*q
,
1366 struct bvec_merge_data
*bvm
,
1367 struct bio_vec
*biovec
)
1369 struct mapped_device
*md
= q
->queuedata
;
1370 struct dm_table
*map
= dm_get_live_table(md
);
1371 struct dm_target
*ti
;
1372 sector_t max_sectors
;
1378 ti
= dm_table_find_target(map
, bvm
->bi_sector
);
1379 if (!dm_target_is_valid(ti
))
1383 * Find maximum amount of I/O that won't need splitting
1385 max_sectors
= min(max_io_len(bvm
->bi_sector
, ti
),
1386 (sector_t
) BIO_MAX_SECTORS
);
1387 max_size
= (max_sectors
<< SECTOR_SHIFT
) - bvm
->bi_size
;
1392 * merge_bvec_fn() returns number of bytes
1393 * it can accept at this offset
1394 * max is precomputed maximal io size
1396 if (max_size
&& ti
->type
->merge
)
1397 max_size
= ti
->type
->merge(ti
, bvm
, biovec
, max_size
);
1399 * If the target doesn't support merge method and some of the devices
1400 * provided their merge_bvec method (we know this by looking at
1401 * queue_max_hw_sectors), then we can't allow bios with multiple vector
1402 * entries. So always set max_size to 0, and the code below allows
1405 else if (queue_max_hw_sectors(q
) <= PAGE_SIZE
>> 9)
1414 * Always allow an entire first page
1416 if (max_size
<= biovec
->bv_len
&& !(bvm
->bi_size
>> SECTOR_SHIFT
))
1417 max_size
= biovec
->bv_len
;
1423 * The request function that just remaps the bio built up by
1426 static void _dm_request(struct request_queue
*q
, struct bio
*bio
)
1428 int rw
= bio_data_dir(bio
);
1429 struct mapped_device
*md
= q
->queuedata
;
1432 down_read(&md
->io_lock
);
1434 cpu
= part_stat_lock();
1435 part_stat_inc(cpu
, &dm_disk(md
)->part0
, ios
[rw
]);
1436 part_stat_add(cpu
, &dm_disk(md
)->part0
, sectors
[rw
], bio_sectors(bio
));
1439 /* if we're suspended, we have to queue this io for later */
1440 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
))) {
1441 up_read(&md
->io_lock
);
1443 if (bio_rw(bio
) != READA
)
1450 __split_and_process_bio(md
, bio
);
1451 up_read(&md
->io_lock
);
1455 static int dm_request_based(struct mapped_device
*md
)
1457 return blk_queue_stackable(md
->queue
);
1460 static void dm_request(struct request_queue
*q
, struct bio
*bio
)
1462 struct mapped_device
*md
= q
->queuedata
;
1464 if (dm_request_based(md
))
1465 blk_queue_bio(q
, bio
);
1467 _dm_request(q
, bio
);
1470 void dm_dispatch_request(struct request
*rq
)
1474 if (blk_queue_io_stat(rq
->q
))
1475 rq
->cmd_flags
|= REQ_IO_STAT
;
1477 rq
->start_time
= jiffies
;
1478 r
= blk_insert_cloned_request(rq
->q
, rq
);
1480 dm_complete_request(rq
, r
);
1482 EXPORT_SYMBOL_GPL(dm_dispatch_request
);
1484 static int dm_rq_bio_constructor(struct bio
*bio
, struct bio
*bio_orig
,
1487 struct dm_rq_target_io
*tio
= data
;
1488 struct dm_rq_clone_bio_info
*info
=
1489 container_of(bio
, struct dm_rq_clone_bio_info
, clone
);
1491 info
->orig
= bio_orig
;
1493 bio
->bi_end_io
= end_clone_bio
;
1494 bio
->bi_private
= info
;
1499 static int setup_clone(struct request
*clone
, struct request
*rq
,
1500 struct dm_rq_target_io
*tio
)
1504 r
= blk_rq_prep_clone(clone
, rq
, tio
->md
->bs
, GFP_ATOMIC
,
1505 dm_rq_bio_constructor
, tio
);
1509 clone
->cmd
= rq
->cmd
;
1510 clone
->cmd_len
= rq
->cmd_len
;
1511 clone
->sense
= rq
->sense
;
1512 clone
->buffer
= rq
->buffer
;
1513 clone
->end_io
= end_clone_request
;
1514 clone
->end_io_data
= tio
;
1519 static struct request
*clone_rq(struct request
*rq
, struct mapped_device
*md
,
1522 struct request
*clone
;
1523 struct dm_rq_target_io
*tio
;
1525 tio
= alloc_rq_tio(md
, gfp_mask
);
1533 memset(&tio
->info
, 0, sizeof(tio
->info
));
1535 clone
= &tio
->clone
;
1536 if (setup_clone(clone
, rq
, tio
)) {
1546 * Called with the queue lock held.
1548 static int dm_prep_fn(struct request_queue
*q
, struct request
*rq
)
1550 struct mapped_device
*md
= q
->queuedata
;
1551 struct request
*clone
;
1553 if (unlikely(rq
->special
)) {
1554 DMWARN("Already has something in rq->special.");
1555 return BLKPREP_KILL
;
1558 clone
= clone_rq(rq
, md
, GFP_ATOMIC
);
1560 return BLKPREP_DEFER
;
1562 rq
->special
= clone
;
1563 rq
->cmd_flags
|= REQ_DONTPREP
;
1570 * 0 : the request has been processed (not requeued)
1571 * !0 : the request has been requeued
1573 static int map_request(struct dm_target
*ti
, struct request
*clone
,
1574 struct mapped_device
*md
)
1576 int r
, requeued
= 0;
1577 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
1580 r
= ti
->type
->map_rq(ti
, clone
, &tio
->info
);
1582 case DM_MAPIO_SUBMITTED
:
1583 /* The target has taken the I/O to submit by itself later */
1585 case DM_MAPIO_REMAPPED
:
1586 /* The target has remapped the I/O so dispatch it */
1587 trace_block_rq_remap(clone
->q
, clone
, disk_devt(dm_disk(md
)),
1588 blk_rq_pos(tio
->orig
));
1589 dm_dispatch_request(clone
);
1591 case DM_MAPIO_REQUEUE
:
1592 /* The target wants to requeue the I/O */
1593 dm_requeue_unmapped_request(clone
);
1598 DMWARN("unimplemented target map return value: %d", r
);
1602 /* The target wants to complete the I/O */
1603 dm_kill_unmapped_request(clone
, r
);
1610 static struct request
*dm_start_request(struct mapped_device
*md
, struct request
*orig
)
1612 struct request
*clone
;
1614 blk_start_request(orig
);
1615 clone
= orig
->special
;
1616 atomic_inc(&md
->pending
[rq_data_dir(clone
)]);
1619 * Hold the md reference here for the in-flight I/O.
1620 * We can't rely on the reference count by device opener,
1621 * because the device may be closed during the request completion
1622 * when all bios are completed.
1623 * See the comment in rq_completed() too.
1631 * q->request_fn for request-based dm.
1632 * Called with the queue lock held.
1634 static void dm_request_fn(struct request_queue
*q
)
1636 struct mapped_device
*md
= q
->queuedata
;
1637 struct dm_table
*map
= dm_get_live_table(md
);
1638 struct dm_target
*ti
;
1639 struct request
*rq
, *clone
;
1643 * For suspend, check blk_queue_stopped() and increment
1644 * ->pending within a single queue_lock not to increment the
1645 * number of in-flight I/Os after the queue is stopped in
1648 while (!blk_queue_stopped(q
)) {
1649 rq
= blk_peek_request(q
);
1653 /* always use block 0 to find the target for flushes for now */
1655 if (!(rq
->cmd_flags
& REQ_FLUSH
))
1656 pos
= blk_rq_pos(rq
);
1658 ti
= dm_table_find_target(map
, pos
);
1659 if (!dm_target_is_valid(ti
)) {
1661 * Must perform setup, that dm_done() requires,
1662 * before calling dm_kill_unmapped_request
1664 DMERR_LIMIT("request attempted access beyond the end of device");
1665 clone
= dm_start_request(md
, rq
);
1666 dm_kill_unmapped_request(clone
, -EIO
);
1670 if (ti
->type
->busy
&& ti
->type
->busy(ti
))
1673 clone
= dm_start_request(md
, rq
);
1675 spin_unlock(q
->queue_lock
);
1676 if (map_request(ti
, clone
, md
))
1679 BUG_ON(!irqs_disabled());
1680 spin_lock(q
->queue_lock
);
1686 BUG_ON(!irqs_disabled());
1687 spin_lock(q
->queue_lock
);
1690 blk_delay_queue(q
, HZ
/ 10);
1695 int dm_underlying_device_busy(struct request_queue
*q
)
1697 return blk_lld_busy(q
);
1699 EXPORT_SYMBOL_GPL(dm_underlying_device_busy
);
1701 static int dm_lld_busy(struct request_queue
*q
)
1704 struct mapped_device
*md
= q
->queuedata
;
1705 struct dm_table
*map
= dm_get_live_table(md
);
1707 if (!map
|| test_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
))
1710 r
= dm_table_any_busy_target(map
);
1717 static int dm_any_congested(void *congested_data
, int bdi_bits
)
1720 struct mapped_device
*md
= congested_data
;
1721 struct dm_table
*map
;
1723 if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
)) {
1724 map
= dm_get_live_table(md
);
1727 * Request-based dm cares about only own queue for
1728 * the query about congestion status of request_queue
1730 if (dm_request_based(md
))
1731 r
= md
->queue
->backing_dev_info
.state
&
1734 r
= dm_table_any_congested(map
, bdi_bits
);
1743 /*-----------------------------------------------------------------
1744 * An IDR is used to keep track of allocated minor numbers.
1745 *---------------------------------------------------------------*/
1746 static void free_minor(int minor
)
1748 spin_lock(&_minor_lock
);
1749 idr_remove(&_minor_idr
, minor
);
1750 spin_unlock(&_minor_lock
);
1754 * See if the device with a specific minor # is free.
1756 static int specific_minor(int minor
)
1760 if (minor
>= (1 << MINORBITS
))
1763 idr_preload(GFP_KERNEL
);
1764 spin_lock(&_minor_lock
);
1766 r
= idr_alloc(&_minor_idr
, MINOR_ALLOCED
, minor
, minor
+ 1, GFP_NOWAIT
);
1768 spin_unlock(&_minor_lock
);
1771 return r
== -ENOSPC
? -EBUSY
: r
;
1775 static int next_free_minor(int *minor
)
1779 idr_preload(GFP_KERNEL
);
1780 spin_lock(&_minor_lock
);
1782 r
= idr_alloc(&_minor_idr
, MINOR_ALLOCED
, 0, 1 << MINORBITS
, GFP_NOWAIT
);
1784 spin_unlock(&_minor_lock
);
1792 static const struct block_device_operations dm_blk_dops
;
1794 static void dm_wq_work(struct work_struct
*work
);
1796 static void dm_init_md_queue(struct mapped_device
*md
)
1799 * Request-based dm devices cannot be stacked on top of bio-based dm
1800 * devices. The type of this dm device has not been decided yet.
1801 * The type is decided at the first table loading time.
1802 * To prevent problematic device stacking, clear the queue flag
1803 * for request stacking support until then.
1805 * This queue is new, so no concurrency on the queue_flags.
1807 queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE
, md
->queue
);
1809 md
->queue
->queuedata
= md
;
1810 md
->queue
->backing_dev_info
.congested_fn
= dm_any_congested
;
1811 md
->queue
->backing_dev_info
.congested_data
= md
;
1812 blk_queue_make_request(md
->queue
, dm_request
);
1813 blk_queue_bounce_limit(md
->queue
, BLK_BOUNCE_ANY
);
1814 blk_queue_merge_bvec(md
->queue
, dm_merge_bvec
);
1818 * Allocate and initialise a blank device with a given minor.
1820 static struct mapped_device
*alloc_dev(int minor
)
1823 struct mapped_device
*md
= kzalloc(sizeof(*md
), GFP_KERNEL
);
1827 DMWARN("unable to allocate device, out of memory.");
1831 if (!try_module_get(THIS_MODULE
))
1832 goto bad_module_get
;
1834 /* get a minor number for the dev */
1835 if (minor
== DM_ANY_MINOR
)
1836 r
= next_free_minor(&minor
);
1838 r
= specific_minor(minor
);
1842 md
->type
= DM_TYPE_NONE
;
1843 init_rwsem(&md
->io_lock
);
1844 mutex_init(&md
->suspend_lock
);
1845 mutex_init(&md
->type_lock
);
1846 spin_lock_init(&md
->deferred_lock
);
1847 rwlock_init(&md
->map_lock
);
1848 atomic_set(&md
->holders
, 1);
1849 atomic_set(&md
->open_count
, 0);
1850 atomic_set(&md
->event_nr
, 0);
1851 atomic_set(&md
->uevent_seq
, 0);
1852 INIT_LIST_HEAD(&md
->uevent_list
);
1853 spin_lock_init(&md
->uevent_lock
);
1855 md
->queue
= blk_alloc_queue(GFP_KERNEL
);
1859 dm_init_md_queue(md
);
1861 md
->disk
= alloc_disk(1);
1865 atomic_set(&md
->pending
[0], 0);
1866 atomic_set(&md
->pending
[1], 0);
1867 init_waitqueue_head(&md
->wait
);
1868 INIT_WORK(&md
->work
, dm_wq_work
);
1869 init_waitqueue_head(&md
->eventq
);
1871 md
->disk
->major
= _major
;
1872 md
->disk
->first_minor
= minor
;
1873 md
->disk
->fops
= &dm_blk_dops
;
1874 md
->disk
->queue
= md
->queue
;
1875 md
->disk
->private_data
= md
;
1876 sprintf(md
->disk
->disk_name
, "dm-%d", minor
);
1878 format_dev_t(md
->name
, MKDEV(_major
, minor
));
1880 md
->wq
= alloc_workqueue("kdmflush",
1881 WQ_NON_REENTRANT
| WQ_MEM_RECLAIM
, 0);
1885 md
->bdev
= bdget_disk(md
->disk
, 0);
1889 bio_init(&md
->flush_bio
);
1890 md
->flush_bio
.bi_bdev
= md
->bdev
;
1891 md
->flush_bio
.bi_rw
= WRITE_FLUSH
;
1893 /* Populate the mapping, nobody knows we exist yet */
1894 spin_lock(&_minor_lock
);
1895 old_md
= idr_replace(&_minor_idr
, md
, minor
);
1896 spin_unlock(&_minor_lock
);
1898 BUG_ON(old_md
!= MINOR_ALLOCED
);
1903 destroy_workqueue(md
->wq
);
1905 del_gendisk(md
->disk
);
1908 blk_cleanup_queue(md
->queue
);
1912 module_put(THIS_MODULE
);
1918 static void unlock_fs(struct mapped_device
*md
);
1920 static void free_dev(struct mapped_device
*md
)
1922 int minor
= MINOR(disk_devt(md
->disk
));
1926 destroy_workqueue(md
->wq
);
1928 mempool_destroy(md
->tio_pool
);
1930 mempool_destroy(md
->io_pool
);
1932 bioset_free(md
->bs
);
1933 blk_integrity_unregister(md
->disk
);
1934 del_gendisk(md
->disk
);
1937 spin_lock(&_minor_lock
);
1938 md
->disk
->private_data
= NULL
;
1939 spin_unlock(&_minor_lock
);
1942 blk_cleanup_queue(md
->queue
);
1943 module_put(THIS_MODULE
);
1947 static void __bind_mempools(struct mapped_device
*md
, struct dm_table
*t
)
1949 struct dm_md_mempools
*p
= dm_table_get_md_mempools(t
);
1951 if (md
->io_pool
&& (md
->tio_pool
|| dm_table_get_type(t
) == DM_TYPE_BIO_BASED
) && md
->bs
) {
1953 * The md already has necessary mempools. Reload just the
1954 * bioset because front_pad may have changed because
1955 * a different table was loaded.
1957 bioset_free(md
->bs
);
1963 BUG_ON(!p
|| md
->io_pool
|| md
->tio_pool
|| md
->bs
);
1965 md
->io_pool
= p
->io_pool
;
1967 md
->tio_pool
= p
->tio_pool
;
1973 /* mempool bind completed, now no need any mempools in the table */
1974 dm_table_free_md_mempools(t
);
1978 * Bind a table to the device.
1980 static void event_callback(void *context
)
1982 unsigned long flags
;
1984 struct mapped_device
*md
= (struct mapped_device
*) context
;
1986 spin_lock_irqsave(&md
->uevent_lock
, flags
);
1987 list_splice_init(&md
->uevent_list
, &uevents
);
1988 spin_unlock_irqrestore(&md
->uevent_lock
, flags
);
1990 dm_send_uevents(&uevents
, &disk_to_dev(md
->disk
)->kobj
);
1992 atomic_inc(&md
->event_nr
);
1993 wake_up(&md
->eventq
);
1997 * Protected by md->suspend_lock obtained by dm_swap_table().
1999 static void __set_size(struct mapped_device
*md
, sector_t size
)
2001 set_capacity(md
->disk
, size
);
2003 i_size_write(md
->bdev
->bd_inode
, (loff_t
)size
<< SECTOR_SHIFT
);
2007 * Return 1 if the queue has a compulsory merge_bvec_fn function.
2009 * If this function returns 0, then the device is either a non-dm
2010 * device without a merge_bvec_fn, or it is a dm device that is
2011 * able to split any bios it receives that are too big.
2013 int dm_queue_merge_is_compulsory(struct request_queue
*q
)
2015 struct mapped_device
*dev_md
;
2017 if (!q
->merge_bvec_fn
)
2020 if (q
->make_request_fn
== dm_request
) {
2021 dev_md
= q
->queuedata
;
2022 if (test_bit(DMF_MERGE_IS_OPTIONAL
, &dev_md
->flags
))
2029 static int dm_device_merge_is_compulsory(struct dm_target
*ti
,
2030 struct dm_dev
*dev
, sector_t start
,
2031 sector_t len
, void *data
)
2033 struct block_device
*bdev
= dev
->bdev
;
2034 struct request_queue
*q
= bdev_get_queue(bdev
);
2036 return dm_queue_merge_is_compulsory(q
);
2040 * Return 1 if it is acceptable to ignore merge_bvec_fn based
2041 * on the properties of the underlying devices.
2043 static int dm_table_merge_is_optional(struct dm_table
*table
)
2046 struct dm_target
*ti
;
2048 while (i
< dm_table_get_num_targets(table
)) {
2049 ti
= dm_table_get_target(table
, i
++);
2051 if (ti
->type
->iterate_devices
&&
2052 ti
->type
->iterate_devices(ti
, dm_device_merge_is_compulsory
, NULL
))
2060 * Returns old map, which caller must destroy.
2062 static struct dm_table
*__bind(struct mapped_device
*md
, struct dm_table
*t
,
2063 struct queue_limits
*limits
)
2065 struct dm_table
*old_map
;
2066 struct request_queue
*q
= md
->queue
;
2068 unsigned long flags
;
2069 int merge_is_optional
;
2071 size
= dm_table_get_size(t
);
2074 * Wipe any geometry if the size of the table changed.
2076 if (size
!= get_capacity(md
->disk
))
2077 memset(&md
->geometry
, 0, sizeof(md
->geometry
));
2079 __set_size(md
, size
);
2081 dm_table_event_callback(t
, event_callback
, md
);
2084 * The queue hasn't been stopped yet, if the old table type wasn't
2085 * for request-based during suspension. So stop it to prevent
2086 * I/O mapping before resume.
2087 * This must be done before setting the queue restrictions,
2088 * because request-based dm may be run just after the setting.
2090 if (dm_table_request_based(t
) && !blk_queue_stopped(q
))
2093 __bind_mempools(md
, t
);
2095 merge_is_optional
= dm_table_merge_is_optional(t
);
2097 write_lock_irqsave(&md
->map_lock
, flags
);
2100 md
->immutable_target_type
= dm_table_get_immutable_target_type(t
);
2102 dm_table_set_restrictions(t
, q
, limits
);
2103 if (merge_is_optional
)
2104 set_bit(DMF_MERGE_IS_OPTIONAL
, &md
->flags
);
2106 clear_bit(DMF_MERGE_IS_OPTIONAL
, &md
->flags
);
2107 write_unlock_irqrestore(&md
->map_lock
, flags
);
2113 * Returns unbound table for the caller to free.
2115 static struct dm_table
*__unbind(struct mapped_device
*md
)
2117 struct dm_table
*map
= md
->map
;
2118 unsigned long flags
;
2123 dm_table_event_callback(map
, NULL
, NULL
);
2124 write_lock_irqsave(&md
->map_lock
, flags
);
2126 write_unlock_irqrestore(&md
->map_lock
, flags
);
2132 * Constructor for a new device.
2134 int dm_create(int minor
, struct mapped_device
**result
)
2136 struct mapped_device
*md
;
2138 md
= alloc_dev(minor
);
2149 * Functions to manage md->type.
2150 * All are required to hold md->type_lock.
2152 void dm_lock_md_type(struct mapped_device
*md
)
2154 mutex_lock(&md
->type_lock
);
2157 void dm_unlock_md_type(struct mapped_device
*md
)
2159 mutex_unlock(&md
->type_lock
);
2162 void dm_set_md_type(struct mapped_device
*md
, unsigned type
)
2167 unsigned dm_get_md_type(struct mapped_device
*md
)
2172 struct target_type
*dm_get_immutable_target_type(struct mapped_device
*md
)
2174 return md
->immutable_target_type
;
2178 * Fully initialize a request-based queue (->elevator, ->request_fn, etc).
2180 static int dm_init_request_based_queue(struct mapped_device
*md
)
2182 struct request_queue
*q
= NULL
;
2184 if (md
->queue
->elevator
)
2187 /* Fully initialize the queue */
2188 q
= blk_init_allocated_queue(md
->queue
, dm_request_fn
, NULL
);
2193 dm_init_md_queue(md
);
2194 blk_queue_softirq_done(md
->queue
, dm_softirq_done
);
2195 blk_queue_prep_rq(md
->queue
, dm_prep_fn
);
2196 blk_queue_lld_busy(md
->queue
, dm_lld_busy
);
2198 elv_register_queue(md
->queue
);
2204 * Setup the DM device's queue based on md's type
2206 int dm_setup_md_queue(struct mapped_device
*md
)
2208 if ((dm_get_md_type(md
) == DM_TYPE_REQUEST_BASED
) &&
2209 !dm_init_request_based_queue(md
)) {
2210 DMWARN("Cannot initialize queue for request-based mapped device");
2217 static struct mapped_device
*dm_find_md(dev_t dev
)
2219 struct mapped_device
*md
;
2220 unsigned minor
= MINOR(dev
);
2222 if (MAJOR(dev
) != _major
|| minor
>= (1 << MINORBITS
))
2225 spin_lock(&_minor_lock
);
2227 md
= idr_find(&_minor_idr
, minor
);
2228 if (md
&& (md
== MINOR_ALLOCED
||
2229 (MINOR(disk_devt(dm_disk(md
))) != minor
) ||
2230 dm_deleting_md(md
) ||
2231 test_bit(DMF_FREEING
, &md
->flags
))) {
2237 spin_unlock(&_minor_lock
);
2242 struct mapped_device
*dm_get_md(dev_t dev
)
2244 struct mapped_device
*md
= dm_find_md(dev
);
2251 EXPORT_SYMBOL_GPL(dm_get_md
);
2253 void *dm_get_mdptr(struct mapped_device
*md
)
2255 return md
->interface_ptr
;
2258 void dm_set_mdptr(struct mapped_device
*md
, void *ptr
)
2260 md
->interface_ptr
= ptr
;
2263 void dm_get(struct mapped_device
*md
)
2265 atomic_inc(&md
->holders
);
2266 BUG_ON(test_bit(DMF_FREEING
, &md
->flags
));
2269 const char *dm_device_name(struct mapped_device
*md
)
2273 EXPORT_SYMBOL_GPL(dm_device_name
);
2275 static void __dm_destroy(struct mapped_device
*md
, bool wait
)
2277 struct dm_table
*map
;
2281 spin_lock(&_minor_lock
);
2282 map
= dm_get_live_table(md
);
2283 idr_replace(&_minor_idr
, MINOR_ALLOCED
, MINOR(disk_devt(dm_disk(md
))));
2284 set_bit(DMF_FREEING
, &md
->flags
);
2285 spin_unlock(&_minor_lock
);
2287 if (!dm_suspended_md(md
)) {
2288 dm_table_presuspend_targets(map
);
2289 dm_table_postsuspend_targets(map
);
2293 * Rare, but there may be I/O requests still going to complete,
2294 * for example. Wait for all references to disappear.
2295 * No one should increment the reference count of the mapped_device,
2296 * after the mapped_device state becomes DMF_FREEING.
2299 while (atomic_read(&md
->holders
))
2301 else if (atomic_read(&md
->holders
))
2302 DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
2303 dm_device_name(md
), atomic_read(&md
->holders
));
2307 dm_table_destroy(__unbind(md
));
2311 void dm_destroy(struct mapped_device
*md
)
2313 __dm_destroy(md
, true);
2316 void dm_destroy_immediate(struct mapped_device
*md
)
2318 __dm_destroy(md
, false);
2321 void dm_put(struct mapped_device
*md
)
2323 atomic_dec(&md
->holders
);
2325 EXPORT_SYMBOL_GPL(dm_put
);
2327 static int dm_wait_for_completion(struct mapped_device
*md
, int interruptible
)
2330 DECLARE_WAITQUEUE(wait
, current
);
2332 add_wait_queue(&md
->wait
, &wait
);
2335 set_current_state(interruptible
);
2337 if (!md_in_flight(md
))
2340 if (interruptible
== TASK_INTERRUPTIBLE
&&
2341 signal_pending(current
)) {
2348 set_current_state(TASK_RUNNING
);
2350 remove_wait_queue(&md
->wait
, &wait
);
2356 * Process the deferred bios
2358 static void dm_wq_work(struct work_struct
*work
)
2360 struct mapped_device
*md
= container_of(work
, struct mapped_device
,
2364 down_read(&md
->io_lock
);
2366 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
)) {
2367 spin_lock_irq(&md
->deferred_lock
);
2368 c
= bio_list_pop(&md
->deferred
);
2369 spin_unlock_irq(&md
->deferred_lock
);
2374 up_read(&md
->io_lock
);
2376 if (dm_request_based(md
))
2377 generic_make_request(c
);
2379 __split_and_process_bio(md
, c
);
2381 down_read(&md
->io_lock
);
2384 up_read(&md
->io_lock
);
2387 static void dm_queue_flush(struct mapped_device
*md
)
2389 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
);
2390 smp_mb__after_clear_bit();
2391 queue_work(md
->wq
, &md
->work
);
2395 * Swap in a new table, returning the old one for the caller to destroy.
2397 struct dm_table
*dm_swap_table(struct mapped_device
*md
, struct dm_table
*table
)
2399 struct dm_table
*live_map
, *map
= ERR_PTR(-EINVAL
);
2400 struct queue_limits limits
;
2403 mutex_lock(&md
->suspend_lock
);
2405 /* device must be suspended */
2406 if (!dm_suspended_md(md
))
2410 * If the new table has no data devices, retain the existing limits.
2411 * This helps multipath with queue_if_no_path if all paths disappear,
2412 * then new I/O is queued based on these limits, and then some paths
2415 if (dm_table_has_no_data_devices(table
)) {
2416 live_map
= dm_get_live_table(md
);
2418 limits
= md
->queue
->limits
;
2419 dm_table_put(live_map
);
2422 r
= dm_calculate_queue_limits(table
, &limits
);
2428 map
= __bind(md
, table
, &limits
);
2431 mutex_unlock(&md
->suspend_lock
);
2436 * Functions to lock and unlock any filesystem running on the
2439 static int lock_fs(struct mapped_device
*md
)
2443 WARN_ON(md
->frozen_sb
);
2445 md
->frozen_sb
= freeze_bdev(md
->bdev
);
2446 if (IS_ERR(md
->frozen_sb
)) {
2447 r
= PTR_ERR(md
->frozen_sb
);
2448 md
->frozen_sb
= NULL
;
2452 set_bit(DMF_FROZEN
, &md
->flags
);
2457 static void unlock_fs(struct mapped_device
*md
)
2459 if (!test_bit(DMF_FROZEN
, &md
->flags
))
2462 thaw_bdev(md
->bdev
, md
->frozen_sb
);
2463 md
->frozen_sb
= NULL
;
2464 clear_bit(DMF_FROZEN
, &md
->flags
);
2468 * We need to be able to change a mapping table under a mounted
2469 * filesystem. For example we might want to move some data in
2470 * the background. Before the table can be swapped with
2471 * dm_bind_table, dm_suspend must be called to flush any in
2472 * flight bios and ensure that any further io gets deferred.
2475 * Suspend mechanism in request-based dm.
2477 * 1. Flush all I/Os by lock_fs() if needed.
2478 * 2. Stop dispatching any I/O by stopping the request_queue.
2479 * 3. Wait for all in-flight I/Os to be completed or requeued.
2481 * To abort suspend, start the request_queue.
2483 int dm_suspend(struct mapped_device
*md
, unsigned suspend_flags
)
2485 struct dm_table
*map
= NULL
;
2487 int do_lockfs
= suspend_flags
& DM_SUSPEND_LOCKFS_FLAG
? 1 : 0;
2488 int noflush
= suspend_flags
& DM_SUSPEND_NOFLUSH_FLAG
? 1 : 0;
2490 mutex_lock(&md
->suspend_lock
);
2492 if (dm_suspended_md(md
)) {
2497 map
= dm_get_live_table(md
);
2500 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
2501 * This flag is cleared before dm_suspend returns.
2504 set_bit(DMF_NOFLUSH_SUSPENDING
, &md
->flags
);
2506 /* This does not get reverted if there's an error later. */
2507 dm_table_presuspend_targets(map
);
2510 * Flush I/O to the device.
2511 * Any I/O submitted after lock_fs() may not be flushed.
2512 * noflush takes precedence over do_lockfs.
2513 * (lock_fs() flushes I/Os and waits for them to complete.)
2515 if (!noflush
&& do_lockfs
) {
2522 * Here we must make sure that no processes are submitting requests
2523 * to target drivers i.e. no one may be executing
2524 * __split_and_process_bio. This is called from dm_request and
2527 * To get all processes out of __split_and_process_bio in dm_request,
2528 * we take the write lock. To prevent any process from reentering
2529 * __split_and_process_bio from dm_request and quiesce the thread
2530 * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call
2531 * flush_workqueue(md->wq).
2533 down_write(&md
->io_lock
);
2534 set_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
);
2535 up_write(&md
->io_lock
);
2538 * Stop md->queue before flushing md->wq in case request-based
2539 * dm defers requests to md->wq from md->queue.
2541 if (dm_request_based(md
))
2542 stop_queue(md
->queue
);
2544 flush_workqueue(md
->wq
);
2547 * At this point no more requests are entering target request routines.
2548 * We call dm_wait_for_completion to wait for all existing requests
2551 r
= dm_wait_for_completion(md
, TASK_INTERRUPTIBLE
);
2553 down_write(&md
->io_lock
);
2555 clear_bit(DMF_NOFLUSH_SUSPENDING
, &md
->flags
);
2556 up_write(&md
->io_lock
);
2558 /* were we interrupted ? */
2562 if (dm_request_based(md
))
2563 start_queue(md
->queue
);
2566 goto out
; /* pushback list is already flushed, so skip flush */
2570 * If dm_wait_for_completion returned 0, the device is completely
2571 * quiescent now. There is no request-processing activity. All new
2572 * requests are being added to md->deferred list.
2575 set_bit(DMF_SUSPENDED
, &md
->flags
);
2577 dm_table_postsuspend_targets(map
);
2583 mutex_unlock(&md
->suspend_lock
);
2587 int dm_resume(struct mapped_device
*md
)
2590 struct dm_table
*map
= NULL
;
2592 mutex_lock(&md
->suspend_lock
);
2593 if (!dm_suspended_md(md
))
2596 map
= dm_get_live_table(md
);
2597 if (!map
|| !dm_table_get_size(map
))
2600 r
= dm_table_resume_targets(map
);
2607 * Flushing deferred I/Os must be done after targets are resumed
2608 * so that mapping of targets can work correctly.
2609 * Request-based dm is queueing the deferred I/Os in its request_queue.
2611 if (dm_request_based(md
))
2612 start_queue(md
->queue
);
2616 clear_bit(DMF_SUSPENDED
, &md
->flags
);
2621 mutex_unlock(&md
->suspend_lock
);
2626 /*-----------------------------------------------------------------
2627 * Event notification.
2628 *---------------------------------------------------------------*/
2629 int dm_kobject_uevent(struct mapped_device
*md
, enum kobject_action action
,
2632 char udev_cookie
[DM_COOKIE_LENGTH
];
2633 char *envp
[] = { udev_cookie
, NULL
};
2636 return kobject_uevent(&disk_to_dev(md
->disk
)->kobj
, action
);
2638 snprintf(udev_cookie
, DM_COOKIE_LENGTH
, "%s=%u",
2639 DM_COOKIE_ENV_VAR_NAME
, cookie
);
2640 return kobject_uevent_env(&disk_to_dev(md
->disk
)->kobj
,
2645 uint32_t dm_next_uevent_seq(struct mapped_device
*md
)
2647 return atomic_add_return(1, &md
->uevent_seq
);
2650 uint32_t dm_get_event_nr(struct mapped_device
*md
)
2652 return atomic_read(&md
->event_nr
);
2655 int dm_wait_event(struct mapped_device
*md
, int event_nr
)
2657 return wait_event_interruptible(md
->eventq
,
2658 (event_nr
!= atomic_read(&md
->event_nr
)));
2661 void dm_uevent_add(struct mapped_device
*md
, struct list_head
*elist
)
2663 unsigned long flags
;
2665 spin_lock_irqsave(&md
->uevent_lock
, flags
);
2666 list_add(elist
, &md
->uevent_list
);
2667 spin_unlock_irqrestore(&md
->uevent_lock
, flags
);
2671 * The gendisk is only valid as long as you have a reference
2674 struct gendisk
*dm_disk(struct mapped_device
*md
)
2679 struct kobject
*dm_kobject(struct mapped_device
*md
)
2685 * struct mapped_device should not be exported outside of dm.c
2686 * so use this check to verify that kobj is part of md structure
2688 struct mapped_device
*dm_get_from_kobject(struct kobject
*kobj
)
2690 struct mapped_device
*md
;
2692 md
= container_of(kobj
, struct mapped_device
, kobj
);
2693 if (&md
->kobj
!= kobj
)
2696 if (test_bit(DMF_FREEING
, &md
->flags
) ||
2704 int dm_suspended_md(struct mapped_device
*md
)
2706 return test_bit(DMF_SUSPENDED
, &md
->flags
);
2709 int dm_suspended(struct dm_target
*ti
)
2711 return dm_suspended_md(dm_table_get_md(ti
->table
));
2713 EXPORT_SYMBOL_GPL(dm_suspended
);
2715 int dm_noflush_suspending(struct dm_target
*ti
)
2717 return __noflush_suspending(dm_table_get_md(ti
->table
));
2719 EXPORT_SYMBOL_GPL(dm_noflush_suspending
);
2721 struct dm_md_mempools
*dm_alloc_md_mempools(unsigned type
, unsigned integrity
, unsigned per_bio_data_size
)
2723 struct dm_md_mempools
*pools
= kmalloc(sizeof(*pools
), GFP_KERNEL
);
2724 unsigned int pool_size
= (type
== DM_TYPE_BIO_BASED
) ? 16 : MIN_IOS
;
2729 per_bio_data_size
= roundup(per_bio_data_size
, __alignof__(struct dm_target_io
));
2731 pools
->io_pool
= (type
== DM_TYPE_BIO_BASED
) ?
2732 mempool_create_slab_pool(MIN_IOS
, _io_cache
) :
2733 mempool_create_slab_pool(MIN_IOS
, _rq_bio_info_cache
);
2734 if (!pools
->io_pool
)
2735 goto free_pools_and_out
;
2737 pools
->tio_pool
= NULL
;
2738 if (type
== DM_TYPE_REQUEST_BASED
) {
2739 pools
->tio_pool
= mempool_create_slab_pool(MIN_IOS
, _rq_tio_cache
);
2740 if (!pools
->tio_pool
)
2741 goto free_io_pool_and_out
;
2744 pools
->bs
= (type
== DM_TYPE_BIO_BASED
) ?
2745 bioset_create(pool_size
,
2746 per_bio_data_size
+ offsetof(struct dm_target_io
, clone
)) :
2747 bioset_create(pool_size
,
2748 offsetof(struct dm_rq_clone_bio_info
, clone
));
2750 goto free_tio_pool_and_out
;
2752 if (integrity
&& bioset_integrity_create(pools
->bs
, pool_size
))
2753 goto free_bioset_and_out
;
2757 free_bioset_and_out
:
2758 bioset_free(pools
->bs
);
2760 free_tio_pool_and_out
:
2761 if (pools
->tio_pool
)
2762 mempool_destroy(pools
->tio_pool
);
2764 free_io_pool_and_out
:
2765 mempool_destroy(pools
->io_pool
);
2773 void dm_free_md_mempools(struct dm_md_mempools
*pools
)
2779 mempool_destroy(pools
->io_pool
);
2781 if (pools
->tio_pool
)
2782 mempool_destroy(pools
->tio_pool
);
2785 bioset_free(pools
->bs
);
2790 static const struct block_device_operations dm_blk_dops
= {
2791 .open
= dm_blk_open
,
2792 .release
= dm_blk_close
,
2793 .ioctl
= dm_blk_ioctl
,
2794 .getgeo
= dm_blk_getgeo
,
2795 .owner
= THIS_MODULE
2798 EXPORT_SYMBOL(dm_get_mapinfo
);
2803 module_init(dm_init
);
2804 module_exit(dm_exit
);
2806 module_param(major
, uint
, 0);
2807 MODULE_PARM_DESC(major
, "The major number of the device mapper");
2808 MODULE_DESCRIPTION(DM_NAME
" driver");
2809 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
2810 MODULE_LICENSE("GPL");