2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 * This file is released under the GPL.
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/mutex.h>
14 #include <linux/moduleparam.h>
15 #include <linux/blkpg.h>
16 #include <linux/bio.h>
17 #include <linux/mempool.h>
18 #include <linux/slab.h>
19 #include <linux/idr.h>
20 #include <linux/hdreg.h>
21 #include <linux/delay.h>
23 #include <trace/events/block.h>
25 #define DM_MSG_PREFIX "core"
29 * ratelimit state to be used in DMXXX_LIMIT().
31 DEFINE_RATELIMIT_STATE(dm_ratelimit_state
,
32 DEFAULT_RATELIMIT_INTERVAL
,
33 DEFAULT_RATELIMIT_BURST
);
34 EXPORT_SYMBOL(dm_ratelimit_state
);
38 * Cookies are numeric values sent with CHANGE and REMOVE
39 * uevents while resuming, removing or renaming the device.
41 #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
42 #define DM_COOKIE_LENGTH 24
44 static const char *_name
= DM_NAME
;
46 static unsigned int major
= 0;
47 static unsigned int _major
= 0;
49 static DEFINE_IDR(_minor_idr
);
51 static DEFINE_SPINLOCK(_minor_lock
);
54 * One of these is allocated per bio.
57 struct mapped_device
*md
;
61 unsigned long start_time
;
62 spinlock_t endio_lock
;
63 struct dm_stats_aux stats_aux
;
67 * For request-based dm.
68 * One of these is allocated per request.
70 struct dm_rq_target_io
{
71 struct mapped_device
*md
;
73 struct request
*orig
, clone
;
79 * For request-based dm - the bio clones we allocate are embedded in these
82 * We allocate these with bio_alloc_bioset, using the front_pad parameter when
83 * the bioset is created - this means the bio has to come at the end of the
86 struct dm_rq_clone_bio_info
{
88 struct dm_rq_target_io
*tio
;
92 union map_info
*dm_get_mapinfo(struct bio
*bio
)
94 if (bio
&& bio
->bi_private
)
95 return &((struct dm_target_io
*)bio
->bi_private
)->info
;
99 union map_info
*dm_get_rq_mapinfo(struct request
*rq
)
101 if (rq
&& rq
->end_io_data
)
102 return &((struct dm_rq_target_io
*)rq
->end_io_data
)->info
;
105 EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo
);
107 #define MINOR_ALLOCED ((void *)-1)
110 * Bits for the md->flags field.
112 #define DMF_BLOCK_IO_FOR_SUSPEND 0
113 #define DMF_SUSPENDED 1
115 #define DMF_FREEING 3
116 #define DMF_DELETING 4
117 #define DMF_NOFLUSH_SUSPENDING 5
118 #define DMF_MERGE_IS_OPTIONAL 6
121 * A dummy definition to make RCU happy.
122 * struct dm_table should never be dereferenced in this file.
129 * Work processed by per-device workqueue.
131 struct mapped_device
{
132 struct srcu_struct io_barrier
;
133 struct mutex suspend_lock
;
138 * The current mapping.
139 * Use dm_get_live_table{_fast} or take suspend_lock for
142 struct dm_table
*map
;
146 struct request_queue
*queue
;
148 /* Protect queue and type against concurrent access. */
149 struct mutex type_lock
;
151 struct target_type
*immutable_target_type
;
153 struct gendisk
*disk
;
159 * A list of ios that arrived while we were suspended.
162 wait_queue_head_t wait
;
163 struct work_struct work
;
164 struct bio_list deferred
;
165 spinlock_t deferred_lock
;
168 * Processing queue (flush)
170 struct workqueue_struct
*wq
;
173 * io objects are allocated from here.
183 wait_queue_head_t eventq
;
185 struct list_head uevent_list
;
186 spinlock_t uevent_lock
; /* Protect access to uevent_list */
189 * freeze/thaw support require holding onto a super block
191 struct super_block
*frozen_sb
;
192 struct block_device
*bdev
;
194 /* forced geometry settings */
195 struct hd_geometry geometry
;
200 /* zero-length flush that will be cloned and submitted to targets */
201 struct bio flush_bio
;
203 struct dm_stats stats
;
207 * For mempools pre-allocation at the table loading time.
209 struct dm_md_mempools
{
215 static struct kmem_cache
*_io_cache
;
216 static struct kmem_cache
*_rq_tio_cache
;
218 static int __init
local_init(void)
222 /* allocate a slab for the dm_ios */
223 _io_cache
= KMEM_CACHE(dm_io
, 0);
227 _rq_tio_cache
= KMEM_CACHE(dm_rq_target_io
, 0);
229 goto out_free_io_cache
;
231 r
= dm_uevent_init();
233 goto out_free_rq_tio_cache
;
236 r
= register_blkdev(_major
, _name
);
238 goto out_uevent_exit
;
247 out_free_rq_tio_cache
:
248 kmem_cache_destroy(_rq_tio_cache
);
250 kmem_cache_destroy(_io_cache
);
255 static void local_exit(void)
257 kmem_cache_destroy(_rq_tio_cache
);
258 kmem_cache_destroy(_io_cache
);
259 unregister_blkdev(_major
, _name
);
264 DMINFO("cleaned up");
267 static int (*_inits
[])(void) __initdata
= {
278 static void (*_exits
[])(void) = {
289 static int __init
dm_init(void)
291 const int count
= ARRAY_SIZE(_inits
);
295 for (i
= 0; i
< count
; i
++) {
310 static void __exit
dm_exit(void)
312 int i
= ARRAY_SIZE(_exits
);
318 * Should be empty by this point.
320 idr_destroy(&_minor_idr
);
324 * Block device functions
326 int dm_deleting_md(struct mapped_device
*md
)
328 return test_bit(DMF_DELETING
, &md
->flags
);
331 static int dm_blk_open(struct block_device
*bdev
, fmode_t mode
)
333 struct mapped_device
*md
;
335 spin_lock(&_minor_lock
);
337 md
= bdev
->bd_disk
->private_data
;
341 if (test_bit(DMF_FREEING
, &md
->flags
) ||
342 dm_deleting_md(md
)) {
348 atomic_inc(&md
->open_count
);
351 spin_unlock(&_minor_lock
);
353 return md
? 0 : -ENXIO
;
356 static void dm_blk_close(struct gendisk
*disk
, fmode_t mode
)
358 struct mapped_device
*md
= disk
->private_data
;
360 spin_lock(&_minor_lock
);
362 atomic_dec(&md
->open_count
);
365 spin_unlock(&_minor_lock
);
368 int dm_open_count(struct mapped_device
*md
)
370 return atomic_read(&md
->open_count
);
374 * Guarantees nothing is using the device before it's deleted.
376 int dm_lock_for_deletion(struct mapped_device
*md
)
380 spin_lock(&_minor_lock
);
382 if (dm_open_count(md
))
385 set_bit(DMF_DELETING
, &md
->flags
);
387 spin_unlock(&_minor_lock
);
392 sector_t
dm_get_size(struct mapped_device
*md
)
394 return get_capacity(md
->disk
);
397 struct dm_stats
*dm_get_stats(struct mapped_device
*md
)
402 static int dm_blk_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
404 struct mapped_device
*md
= bdev
->bd_disk
->private_data
;
406 return dm_get_geometry(md
, geo
);
409 static int dm_blk_ioctl(struct block_device
*bdev
, fmode_t mode
,
410 unsigned int cmd
, unsigned long arg
)
412 struct mapped_device
*md
= bdev
->bd_disk
->private_data
;
414 struct dm_table
*map
;
415 struct dm_target
*tgt
;
419 map
= dm_get_live_table(md
, &srcu_idx
);
421 if (!map
|| !dm_table_get_size(map
))
424 /* We only support devices that have a single target */
425 if (dm_table_get_num_targets(map
) != 1)
428 tgt
= dm_table_get_target(map
, 0);
430 if (dm_suspended_md(md
)) {
435 if (tgt
->type
->ioctl
)
436 r
= tgt
->type
->ioctl(tgt
, cmd
, arg
);
439 dm_put_live_table(md
, srcu_idx
);
441 if (r
== -ENOTCONN
) {
449 static struct dm_io
*alloc_io(struct mapped_device
*md
)
451 return mempool_alloc(md
->io_pool
, GFP_NOIO
);
454 static void free_io(struct mapped_device
*md
, struct dm_io
*io
)
456 mempool_free(io
, md
->io_pool
);
459 static void free_tio(struct mapped_device
*md
, struct dm_target_io
*tio
)
461 bio_put(&tio
->clone
);
464 static struct dm_rq_target_io
*alloc_rq_tio(struct mapped_device
*md
,
467 return mempool_alloc(md
->io_pool
, gfp_mask
);
470 static void free_rq_tio(struct dm_rq_target_io
*tio
)
472 mempool_free(tio
, tio
->md
->io_pool
);
475 static int md_in_flight(struct mapped_device
*md
)
477 return atomic_read(&md
->pending
[READ
]) +
478 atomic_read(&md
->pending
[WRITE
]);
481 static void start_io_acct(struct dm_io
*io
)
483 struct mapped_device
*md
= io
->md
;
484 struct bio
*bio
= io
->bio
;
486 int rw
= bio_data_dir(bio
);
488 io
->start_time
= jiffies
;
490 cpu
= part_stat_lock();
491 part_round_stats(cpu
, &dm_disk(md
)->part0
);
493 atomic_set(&dm_disk(md
)->part0
.in_flight
[rw
],
494 atomic_inc_return(&md
->pending
[rw
]));
496 if (unlikely(dm_stats_used(&md
->stats
)))
497 dm_stats_account_io(&md
->stats
, bio
->bi_rw
, bio
->bi_sector
,
498 bio_sectors(bio
), false, 0, &io
->stats_aux
);
501 static void end_io_acct(struct dm_io
*io
)
503 struct mapped_device
*md
= io
->md
;
504 struct bio
*bio
= io
->bio
;
505 unsigned long duration
= jiffies
- io
->start_time
;
507 int rw
= bio_data_dir(bio
);
509 cpu
= part_stat_lock();
510 part_round_stats(cpu
, &dm_disk(md
)->part0
);
511 part_stat_add(cpu
, &dm_disk(md
)->part0
, ticks
[rw
], duration
);
514 if (unlikely(dm_stats_used(&md
->stats
)))
515 dm_stats_account_io(&md
->stats
, bio
->bi_rw
, bio
->bi_sector
,
516 bio_sectors(bio
), true, duration
, &io
->stats_aux
);
519 * After this is decremented the bio must not be touched if it is
522 pending
= atomic_dec_return(&md
->pending
[rw
]);
523 atomic_set(&dm_disk(md
)->part0
.in_flight
[rw
], pending
);
524 pending
+= atomic_read(&md
->pending
[rw
^0x1]);
526 /* nudge anyone waiting on suspend queue */
532 * Add the bio to the list of deferred io.
534 static void queue_io(struct mapped_device
*md
, struct bio
*bio
)
538 spin_lock_irqsave(&md
->deferred_lock
, flags
);
539 bio_list_add(&md
->deferred
, bio
);
540 spin_unlock_irqrestore(&md
->deferred_lock
, flags
);
541 queue_work(md
->wq
, &md
->work
);
545 * Everyone (including functions in this file), should use this
546 * function to access the md->map field, and make sure they call
547 * dm_put_live_table() when finished.
549 struct dm_table
*dm_get_live_table(struct mapped_device
*md
, int *srcu_idx
) __acquires(md
->io_barrier
)
551 *srcu_idx
= srcu_read_lock(&md
->io_barrier
);
553 return srcu_dereference(md
->map
, &md
->io_barrier
);
556 void dm_put_live_table(struct mapped_device
*md
, int srcu_idx
) __releases(md
->io_barrier
)
558 srcu_read_unlock(&md
->io_barrier
, srcu_idx
);
561 void dm_sync_table(struct mapped_device
*md
)
563 synchronize_srcu(&md
->io_barrier
);
564 synchronize_rcu_expedited();
568 * A fast alternative to dm_get_live_table/dm_put_live_table.
569 * The caller must not block between these two functions.
571 static struct dm_table
*dm_get_live_table_fast(struct mapped_device
*md
) __acquires(RCU
)
574 return rcu_dereference(md
->map
);
577 static void dm_put_live_table_fast(struct mapped_device
*md
) __releases(RCU
)
583 * Get the geometry associated with a dm device
585 int dm_get_geometry(struct mapped_device
*md
, struct hd_geometry
*geo
)
593 * Set the geometry of a device.
595 int dm_set_geometry(struct mapped_device
*md
, struct hd_geometry
*geo
)
597 sector_t sz
= (sector_t
)geo
->cylinders
* geo
->heads
* geo
->sectors
;
599 if (geo
->start
> sz
) {
600 DMWARN("Start sector is beyond the geometry limits.");
609 /*-----------------------------------------------------------------
611 * A more elegant soln is in the works that uses the queue
612 * merge fn, unfortunately there are a couple of changes to
613 * the block layer that I want to make for this. So in the
614 * interests of getting something for people to use I give
615 * you this clearly demarcated crap.
616 *---------------------------------------------------------------*/
618 static int __noflush_suspending(struct mapped_device
*md
)
620 return test_bit(DMF_NOFLUSH_SUSPENDING
, &md
->flags
);
624 * Decrements the number of outstanding ios that a bio has been
625 * cloned into, completing the original io if necc.
627 static void dec_pending(struct dm_io
*io
, int error
)
632 struct mapped_device
*md
= io
->md
;
634 /* Push-back supersedes any I/O errors */
635 if (unlikely(error
)) {
636 spin_lock_irqsave(&io
->endio_lock
, flags
);
637 if (!(io
->error
> 0 && __noflush_suspending(md
)))
639 spin_unlock_irqrestore(&io
->endio_lock
, flags
);
642 if (atomic_dec_and_test(&io
->io_count
)) {
643 if (io
->error
== DM_ENDIO_REQUEUE
) {
645 * Target requested pushing back the I/O.
647 spin_lock_irqsave(&md
->deferred_lock
, flags
);
648 if (__noflush_suspending(md
))
649 bio_list_add_head(&md
->deferred
, io
->bio
);
651 /* noflush suspend was interrupted. */
653 spin_unlock_irqrestore(&md
->deferred_lock
, flags
);
656 io_error
= io
->error
;
661 if (io_error
== DM_ENDIO_REQUEUE
)
664 if ((bio
->bi_rw
& REQ_FLUSH
) && bio
->bi_size
) {
666 * Preflush done for flush with data, reissue
669 bio
->bi_rw
&= ~REQ_FLUSH
;
672 /* done with normal IO or empty flush */
673 trace_block_bio_complete(md
->queue
, bio
, io_error
);
674 bio_endio(bio
, io_error
);
679 static void clone_endio(struct bio
*bio
, int error
)
682 struct dm_target_io
*tio
= bio
->bi_private
;
683 struct dm_io
*io
= tio
->io
;
684 struct mapped_device
*md
= tio
->io
->md
;
685 dm_endio_fn endio
= tio
->ti
->type
->end_io
;
687 if (!bio_flagged(bio
, BIO_UPTODATE
) && !error
)
691 r
= endio(tio
->ti
, bio
, error
);
692 if (r
< 0 || r
== DM_ENDIO_REQUEUE
)
694 * error and requeue request are handled
698 else if (r
== DM_ENDIO_INCOMPLETE
)
699 /* The target will handle the io */
702 DMWARN("unimplemented target endio return value: %d", r
);
708 dec_pending(io
, error
);
712 * Partial completion handling for request-based dm
714 static void end_clone_bio(struct bio
*clone
, int error
)
716 struct dm_rq_clone_bio_info
*info
= clone
->bi_private
;
717 struct dm_rq_target_io
*tio
= info
->tio
;
718 struct bio
*bio
= info
->orig
;
719 unsigned int nr_bytes
= info
->orig
->bi_size
;
725 * An error has already been detected on the request.
726 * Once error occurred, just let clone->end_io() handle
732 * Don't notice the error to the upper layer yet.
733 * The error handling decision is made by the target driver,
734 * when the request is completed.
741 * I/O for the bio successfully completed.
742 * Notice the data completion to the upper layer.
746 * bios are processed from the head of the list.
747 * So the completing bio should always be rq->bio.
748 * If it's not, something wrong is happening.
750 if (tio
->orig
->bio
!= bio
)
751 DMERR("bio completion is going in the middle of the request");
754 * Update the original request.
755 * Do not use blk_end_request() here, because it may complete
756 * the original request before the clone, and break the ordering.
758 blk_update_request(tio
->orig
, 0, nr_bytes
);
762 * Don't touch any member of the md after calling this function because
763 * the md may be freed in dm_put() at the end of this function.
764 * Or do dm_get() before calling this function and dm_put() later.
766 static void rq_completed(struct mapped_device
*md
, int rw
, int run_queue
)
768 atomic_dec(&md
->pending
[rw
]);
770 /* nudge anyone waiting on suspend queue */
771 if (!md_in_flight(md
))
775 * Run this off this callpath, as drivers could invoke end_io while
776 * inside their request_fn (and holding the queue lock). Calling
777 * back into ->request_fn() could deadlock attempting to grab the
781 blk_run_queue_async(md
->queue
);
784 * dm_put() must be at the end of this function. See the comment above
789 static void free_rq_clone(struct request
*clone
)
791 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
793 blk_rq_unprep_clone(clone
);
798 * Complete the clone and the original request.
799 * Must be called without queue lock.
801 static void dm_end_request(struct request
*clone
, int error
)
803 int rw
= rq_data_dir(clone
);
804 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
805 struct mapped_device
*md
= tio
->md
;
806 struct request
*rq
= tio
->orig
;
808 if (rq
->cmd_type
== REQ_TYPE_BLOCK_PC
) {
809 rq
->errors
= clone
->errors
;
810 rq
->resid_len
= clone
->resid_len
;
814 * We are using the sense buffer of the original
816 * So setting the length of the sense data is enough.
818 rq
->sense_len
= clone
->sense_len
;
821 free_rq_clone(clone
);
822 blk_end_request_all(rq
, error
);
823 rq_completed(md
, rw
, true);
826 static void dm_unprep_request(struct request
*rq
)
828 struct request
*clone
= rq
->special
;
831 rq
->cmd_flags
&= ~REQ_DONTPREP
;
833 free_rq_clone(clone
);
837 * Requeue the original request of a clone.
839 void dm_requeue_unmapped_request(struct request
*clone
)
841 int rw
= rq_data_dir(clone
);
842 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
843 struct mapped_device
*md
= tio
->md
;
844 struct request
*rq
= tio
->orig
;
845 struct request_queue
*q
= rq
->q
;
848 dm_unprep_request(rq
);
850 spin_lock_irqsave(q
->queue_lock
, flags
);
851 blk_requeue_request(q
, rq
);
852 spin_unlock_irqrestore(q
->queue_lock
, flags
);
854 rq_completed(md
, rw
, 0);
856 EXPORT_SYMBOL_GPL(dm_requeue_unmapped_request
);
858 static void __stop_queue(struct request_queue
*q
)
863 static void stop_queue(struct request_queue
*q
)
867 spin_lock_irqsave(q
->queue_lock
, flags
);
869 spin_unlock_irqrestore(q
->queue_lock
, flags
);
872 static void __start_queue(struct request_queue
*q
)
874 if (blk_queue_stopped(q
))
878 static void start_queue(struct request_queue
*q
)
882 spin_lock_irqsave(q
->queue_lock
, flags
);
884 spin_unlock_irqrestore(q
->queue_lock
, flags
);
887 static void dm_done(struct request
*clone
, int error
, bool mapped
)
890 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
891 dm_request_endio_fn rq_end_io
= NULL
;
894 rq_end_io
= tio
->ti
->type
->rq_end_io
;
896 if (mapped
&& rq_end_io
)
897 r
= rq_end_io(tio
->ti
, clone
, error
, &tio
->info
);
901 /* The target wants to complete the I/O */
902 dm_end_request(clone
, r
);
903 else if (r
== DM_ENDIO_INCOMPLETE
)
904 /* The target will handle the I/O */
906 else if (r
== DM_ENDIO_REQUEUE
)
907 /* The target wants to requeue the I/O */
908 dm_requeue_unmapped_request(clone
);
910 DMWARN("unimplemented target endio return value: %d", r
);
916 * Request completion handler for request-based dm
918 static void dm_softirq_done(struct request
*rq
)
921 struct request
*clone
= rq
->completion_data
;
922 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
924 if (rq
->cmd_flags
& REQ_FAILED
)
927 dm_done(clone
, tio
->error
, mapped
);
931 * Complete the clone and the original request with the error status
932 * through softirq context.
934 static void dm_complete_request(struct request
*clone
, int error
)
936 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
937 struct request
*rq
= tio
->orig
;
940 rq
->completion_data
= clone
;
941 blk_complete_request(rq
);
945 * Complete the not-mapped clone and the original request with the error status
946 * through softirq context.
947 * Target's rq_end_io() function isn't called.
948 * This may be used when the target's map_rq() function fails.
950 void dm_kill_unmapped_request(struct request
*clone
, int error
)
952 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
953 struct request
*rq
= tio
->orig
;
955 rq
->cmd_flags
|= REQ_FAILED
;
956 dm_complete_request(clone
, error
);
958 EXPORT_SYMBOL_GPL(dm_kill_unmapped_request
);
961 * Called with the queue lock held
963 static void end_clone_request(struct request
*clone
, int error
)
966 * For just cleaning up the information of the queue in which
967 * the clone was dispatched.
968 * The clone is *NOT* freed actually here because it is alloced from
969 * dm own mempool and REQ_ALLOCED isn't set in clone->cmd_flags.
971 __blk_put_request(clone
->q
, clone
);
974 * Actual request completion is done in a softirq context which doesn't
975 * hold the queue lock. Otherwise, deadlock could occur because:
976 * - another request may be submitted by the upper level driver
977 * of the stacking during the completion
978 * - the submission which requires queue lock may be done
981 dm_complete_request(clone
, error
);
985 * Return maximum size of I/O possible at the supplied sector up to the current
988 static sector_t
max_io_len_target_boundary(sector_t sector
, struct dm_target
*ti
)
990 sector_t target_offset
= dm_target_offset(ti
, sector
);
992 return ti
->len
- target_offset
;
995 static sector_t
max_io_len(sector_t sector
, struct dm_target
*ti
)
997 sector_t len
= max_io_len_target_boundary(sector
, ti
);
998 sector_t offset
, max_len
;
1001 * Does the target need to split even further?
1003 if (ti
->max_io_len
) {
1004 offset
= dm_target_offset(ti
, sector
);
1005 if (unlikely(ti
->max_io_len
& (ti
->max_io_len
- 1)))
1006 max_len
= sector_div(offset
, ti
->max_io_len
);
1008 max_len
= offset
& (ti
->max_io_len
- 1);
1009 max_len
= ti
->max_io_len
- max_len
;
1018 int dm_set_target_max_io_len(struct dm_target
*ti
, sector_t len
)
1020 if (len
> UINT_MAX
) {
1021 DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)",
1022 (unsigned long long)len
, UINT_MAX
);
1023 ti
->error
= "Maximum size of target IO is too large";
1027 ti
->max_io_len
= (uint32_t) len
;
1031 EXPORT_SYMBOL_GPL(dm_set_target_max_io_len
);
1033 static void __map_bio(struct dm_target_io
*tio
)
1037 struct mapped_device
*md
;
1038 struct bio
*clone
= &tio
->clone
;
1039 struct dm_target
*ti
= tio
->ti
;
1041 clone
->bi_end_io
= clone_endio
;
1042 clone
->bi_private
= tio
;
1045 * Map the clone. If r == 0 we don't need to do
1046 * anything, the target has assumed ownership of
1049 atomic_inc(&tio
->io
->io_count
);
1050 sector
= clone
->bi_sector
;
1051 r
= ti
->type
->map(ti
, clone
);
1052 if (r
== DM_MAPIO_REMAPPED
) {
1053 /* the bio has been remapped so dispatch it */
1055 trace_block_bio_remap(bdev_get_queue(clone
->bi_bdev
), clone
,
1056 tio
->io
->bio
->bi_bdev
->bd_dev
, sector
);
1058 generic_make_request(clone
);
1059 } else if (r
< 0 || r
== DM_MAPIO_REQUEUE
) {
1060 /* error the io and bail out, or requeue it if needed */
1062 dec_pending(tio
->io
, r
);
1065 DMWARN("unimplemented target map return value: %d", r
);
1071 struct mapped_device
*md
;
1072 struct dm_table
*map
;
1076 sector_t sector_count
;
1080 static void bio_setup_sector(struct bio
*bio
, sector_t sector
, sector_t len
)
1082 bio
->bi_sector
= sector
;
1083 bio
->bi_size
= to_bytes(len
);
1086 static void bio_setup_bv(struct bio
*bio
, unsigned short idx
, unsigned short bv_count
)
1089 bio
->bi_vcnt
= idx
+ bv_count
;
1090 bio
->bi_flags
&= ~(1 << BIO_SEG_VALID
);
1093 static void clone_bio_integrity(struct bio
*bio
, struct bio
*clone
,
1094 unsigned short idx
, unsigned len
, unsigned offset
,
1097 if (!bio_integrity(bio
))
1100 bio_integrity_clone(clone
, bio
, GFP_NOIO
);
1103 bio_integrity_trim(clone
, bio_sector_offset(bio
, idx
, offset
), len
);
1107 * Creates a little bio that just does part of a bvec.
1109 static void clone_split_bio(struct dm_target_io
*tio
, struct bio
*bio
,
1110 sector_t sector
, unsigned short idx
,
1111 unsigned offset
, unsigned len
)
1113 struct bio
*clone
= &tio
->clone
;
1114 struct bio_vec
*bv
= bio
->bi_io_vec
+ idx
;
1116 *clone
->bi_io_vec
= *bv
;
1118 bio_setup_sector(clone
, sector
, len
);
1120 clone
->bi_bdev
= bio
->bi_bdev
;
1121 clone
->bi_rw
= bio
->bi_rw
;
1123 clone
->bi_io_vec
->bv_offset
= offset
;
1124 clone
->bi_io_vec
->bv_len
= clone
->bi_size
;
1125 clone
->bi_flags
|= 1 << BIO_CLONED
;
1127 clone_bio_integrity(bio
, clone
, idx
, len
, offset
, 1);
1131 * Creates a bio that consists of range of complete bvecs.
1133 static void clone_bio(struct dm_target_io
*tio
, struct bio
*bio
,
1134 sector_t sector
, unsigned short idx
,
1135 unsigned short bv_count
, unsigned len
)
1137 struct bio
*clone
= &tio
->clone
;
1140 __bio_clone(clone
, bio
);
1141 bio_setup_sector(clone
, sector
, len
);
1142 bio_setup_bv(clone
, idx
, bv_count
);
1144 if (idx
!= bio
->bi_idx
|| clone
->bi_size
< bio
->bi_size
)
1146 clone_bio_integrity(bio
, clone
, idx
, len
, 0, trim
);
1149 static struct dm_target_io
*alloc_tio(struct clone_info
*ci
,
1150 struct dm_target
*ti
, int nr_iovecs
,
1151 unsigned target_bio_nr
)
1153 struct dm_target_io
*tio
;
1156 clone
= bio_alloc_bioset(GFP_NOIO
, nr_iovecs
, ci
->md
->bs
);
1157 tio
= container_of(clone
, struct dm_target_io
, clone
);
1161 memset(&tio
->info
, 0, sizeof(tio
->info
));
1162 tio
->target_bio_nr
= target_bio_nr
;
1167 static void __clone_and_map_simple_bio(struct clone_info
*ci
,
1168 struct dm_target
*ti
,
1169 unsigned target_bio_nr
, sector_t len
)
1171 struct dm_target_io
*tio
= alloc_tio(ci
, ti
, ci
->bio
->bi_max_vecs
, target_bio_nr
);
1172 struct bio
*clone
= &tio
->clone
;
1175 * Discard requests require the bio's inline iovecs be initialized.
1176 * ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush
1177 * and discard, so no need for concern about wasted bvec allocations.
1179 __bio_clone(clone
, ci
->bio
);
1181 bio_setup_sector(clone
, ci
->sector
, len
);
1186 static void __send_duplicate_bios(struct clone_info
*ci
, struct dm_target
*ti
,
1187 unsigned num_bios
, sector_t len
)
1189 unsigned target_bio_nr
;
1191 for (target_bio_nr
= 0; target_bio_nr
< num_bios
; target_bio_nr
++)
1192 __clone_and_map_simple_bio(ci
, ti
, target_bio_nr
, len
);
1195 static int __send_empty_flush(struct clone_info
*ci
)
1197 unsigned target_nr
= 0;
1198 struct dm_target
*ti
;
1200 BUG_ON(bio_has_data(ci
->bio
));
1201 while ((ti
= dm_table_get_target(ci
->map
, target_nr
++)))
1202 __send_duplicate_bios(ci
, ti
, ti
->num_flush_bios
, 0);
1207 static void __clone_and_map_data_bio(struct clone_info
*ci
, struct dm_target
*ti
,
1208 sector_t sector
, int nr_iovecs
,
1209 unsigned short idx
, unsigned short bv_count
,
1210 unsigned offset
, unsigned len
,
1211 unsigned split_bvec
)
1213 struct bio
*bio
= ci
->bio
;
1214 struct dm_target_io
*tio
;
1215 unsigned target_bio_nr
;
1216 unsigned num_target_bios
= 1;
1219 * Does the target want to receive duplicate copies of the bio?
1221 if (bio_data_dir(bio
) == WRITE
&& ti
->num_write_bios
)
1222 num_target_bios
= ti
->num_write_bios(ti
, bio
);
1224 for (target_bio_nr
= 0; target_bio_nr
< num_target_bios
; target_bio_nr
++) {
1225 tio
= alloc_tio(ci
, ti
, nr_iovecs
, target_bio_nr
);
1227 clone_split_bio(tio
, bio
, sector
, idx
, offset
, len
);
1229 clone_bio(tio
, bio
, sector
, idx
, bv_count
, len
);
1234 typedef unsigned (*get_num_bios_fn
)(struct dm_target
*ti
);
1236 static unsigned get_num_discard_bios(struct dm_target
*ti
)
1238 return ti
->num_discard_bios
;
1241 static unsigned get_num_write_same_bios(struct dm_target
*ti
)
1243 return ti
->num_write_same_bios
;
1246 typedef bool (*is_split_required_fn
)(struct dm_target
*ti
);
1248 static bool is_split_required_for_discard(struct dm_target
*ti
)
1250 return ti
->split_discard_bios
;
1253 static int __send_changing_extent_only(struct clone_info
*ci
,
1254 get_num_bios_fn get_num_bios
,
1255 is_split_required_fn is_split_required
)
1257 struct dm_target
*ti
;
1262 ti
= dm_table_find_target(ci
->map
, ci
->sector
);
1263 if (!dm_target_is_valid(ti
))
1267 * Even though the device advertised support for this type of
1268 * request, that does not mean every target supports it, and
1269 * reconfiguration might also have changed that since the
1270 * check was performed.
1272 num_bios
= get_num_bios
? get_num_bios(ti
) : 0;
1276 if (is_split_required
&& !is_split_required(ti
))
1277 len
= min(ci
->sector_count
, max_io_len_target_boundary(ci
->sector
, ti
));
1279 len
= min(ci
->sector_count
, max_io_len(ci
->sector
, ti
));
1281 __send_duplicate_bios(ci
, ti
, num_bios
, len
);
1284 } while (ci
->sector_count
-= len
);
1289 static int __send_discard(struct clone_info
*ci
)
1291 return __send_changing_extent_only(ci
, get_num_discard_bios
,
1292 is_split_required_for_discard
);
1295 static int __send_write_same(struct clone_info
*ci
)
1297 return __send_changing_extent_only(ci
, get_num_write_same_bios
, NULL
);
1301 * Find maximum number of sectors / bvecs we can process with a single bio.
1303 static sector_t
__len_within_target(struct clone_info
*ci
, sector_t max
, int *idx
)
1305 struct bio
*bio
= ci
->bio
;
1306 sector_t bv_len
, total_len
= 0;
1308 for (*idx
= ci
->idx
; max
&& (*idx
< bio
->bi_vcnt
); (*idx
)++) {
1309 bv_len
= to_sector(bio
->bi_io_vec
[*idx
].bv_len
);
1315 total_len
+= bv_len
;
1321 static int __split_bvec_across_targets(struct clone_info
*ci
,
1322 struct dm_target
*ti
, sector_t max
)
1324 struct bio
*bio
= ci
->bio
;
1325 struct bio_vec
*bv
= bio
->bi_io_vec
+ ci
->idx
;
1326 sector_t remaining
= to_sector(bv
->bv_len
);
1327 unsigned offset
= 0;
1332 ti
= dm_table_find_target(ci
->map
, ci
->sector
);
1333 if (!dm_target_is_valid(ti
))
1336 max
= max_io_len(ci
->sector
, ti
);
1339 len
= min(remaining
, max
);
1341 __clone_and_map_data_bio(ci
, ti
, ci
->sector
, 1, ci
->idx
, 0,
1342 bv
->bv_offset
+ offset
, len
, 1);
1345 ci
->sector_count
-= len
;
1346 offset
+= to_bytes(len
);
1347 } while (remaining
-= len
);
1355 * Select the correct strategy for processing a non-flush bio.
1357 static int __split_and_process_non_flush(struct clone_info
*ci
)
1359 struct bio
*bio
= ci
->bio
;
1360 struct dm_target
*ti
;
1364 if (unlikely(bio
->bi_rw
& REQ_DISCARD
))
1365 return __send_discard(ci
);
1366 else if (unlikely(bio
->bi_rw
& REQ_WRITE_SAME
))
1367 return __send_write_same(ci
);
1369 ti
= dm_table_find_target(ci
->map
, ci
->sector
);
1370 if (!dm_target_is_valid(ti
))
1373 max
= max_io_len(ci
->sector
, ti
);
1376 * Optimise for the simple case where we can do all of
1377 * the remaining io with a single clone.
1379 if (ci
->sector_count
<= max
) {
1380 __clone_and_map_data_bio(ci
, ti
, ci
->sector
, bio
->bi_max_vecs
,
1381 ci
->idx
, bio
->bi_vcnt
- ci
->idx
, 0,
1382 ci
->sector_count
, 0);
1383 ci
->sector_count
= 0;
1388 * There are some bvecs that don't span targets.
1389 * Do as many of these as possible.
1391 if (to_sector(bio
->bi_io_vec
[ci
->idx
].bv_len
) <= max
) {
1392 len
= __len_within_target(ci
, max
, &idx
);
1394 __clone_and_map_data_bio(ci
, ti
, ci
->sector
, bio
->bi_max_vecs
,
1395 ci
->idx
, idx
- ci
->idx
, 0, len
, 0);
1398 ci
->sector_count
-= len
;
1405 * Handle a bvec that must be split between two or more targets.
1407 return __split_bvec_across_targets(ci
, ti
, max
);
1411 * Entry point to split a bio into clones and submit them to the targets.
1413 static void __split_and_process_bio(struct mapped_device
*md
,
1414 struct dm_table
*map
, struct bio
*bio
)
1416 struct clone_info ci
;
1419 if (unlikely(!map
)) {
1426 ci
.io
= alloc_io(md
);
1428 atomic_set(&ci
.io
->io_count
, 1);
1431 spin_lock_init(&ci
.io
->endio_lock
);
1432 ci
.sector
= bio
->bi_sector
;
1433 ci
.idx
= bio
->bi_idx
;
1435 start_io_acct(ci
.io
);
1437 if (bio
->bi_rw
& REQ_FLUSH
) {
1438 ci
.bio
= &ci
.md
->flush_bio
;
1439 ci
.sector_count
= 0;
1440 error
= __send_empty_flush(&ci
);
1441 /* dec_pending submits any data associated with flush */
1444 ci
.sector_count
= bio_sectors(bio
);
1445 while (ci
.sector_count
&& !error
)
1446 error
= __split_and_process_non_flush(&ci
);
1449 /* drop the extra reference count */
1450 dec_pending(ci
.io
, error
);
1452 /*-----------------------------------------------------------------
1454 *---------------------------------------------------------------*/
1456 static int dm_merge_bvec(struct request_queue
*q
,
1457 struct bvec_merge_data
*bvm
,
1458 struct bio_vec
*biovec
)
1460 struct mapped_device
*md
= q
->queuedata
;
1461 struct dm_table
*map
= dm_get_live_table_fast(md
);
1462 struct dm_target
*ti
;
1463 sector_t max_sectors
;
1469 ti
= dm_table_find_target(map
, bvm
->bi_sector
);
1470 if (!dm_target_is_valid(ti
))
1474 * Find maximum amount of I/O that won't need splitting
1476 max_sectors
= min(max_io_len(bvm
->bi_sector
, ti
),
1477 (sector_t
) BIO_MAX_SECTORS
);
1478 max_size
= (max_sectors
<< SECTOR_SHIFT
) - bvm
->bi_size
;
1483 * merge_bvec_fn() returns number of bytes
1484 * it can accept at this offset
1485 * max is precomputed maximal io size
1487 if (max_size
&& ti
->type
->merge
)
1488 max_size
= ti
->type
->merge(ti
, bvm
, biovec
, max_size
);
1490 * If the target doesn't support merge method and some of the devices
1491 * provided their merge_bvec method (we know this by looking at
1492 * queue_max_hw_sectors), then we can't allow bios with multiple vector
1493 * entries. So always set max_size to 0, and the code below allows
1496 else if (queue_max_hw_sectors(q
) <= PAGE_SIZE
>> 9)
1501 dm_put_live_table_fast(md
);
1503 * Always allow an entire first page
1505 if (max_size
<= biovec
->bv_len
&& !(bvm
->bi_size
>> SECTOR_SHIFT
))
1506 max_size
= biovec
->bv_len
;
1512 * The request function that just remaps the bio built up by
1515 static void _dm_request(struct request_queue
*q
, struct bio
*bio
)
1517 int rw
= bio_data_dir(bio
);
1518 struct mapped_device
*md
= q
->queuedata
;
1521 struct dm_table
*map
;
1523 map
= dm_get_live_table(md
, &srcu_idx
);
1525 cpu
= part_stat_lock();
1526 part_stat_inc(cpu
, &dm_disk(md
)->part0
, ios
[rw
]);
1527 part_stat_add(cpu
, &dm_disk(md
)->part0
, sectors
[rw
], bio_sectors(bio
));
1530 /* if we're suspended, we have to queue this io for later */
1531 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
))) {
1532 dm_put_live_table(md
, srcu_idx
);
1534 if (bio_rw(bio
) != READA
)
1541 __split_and_process_bio(md
, map
, bio
);
1542 dm_put_live_table(md
, srcu_idx
);
1546 int dm_request_based(struct mapped_device
*md
)
1548 return blk_queue_stackable(md
->queue
);
1551 static void dm_request(struct request_queue
*q
, struct bio
*bio
)
1553 struct mapped_device
*md
= q
->queuedata
;
1555 if (dm_request_based(md
))
1556 blk_queue_bio(q
, bio
);
1558 _dm_request(q
, bio
);
1561 void dm_dispatch_request(struct request
*rq
)
1565 if (blk_queue_io_stat(rq
->q
))
1566 rq
->cmd_flags
|= REQ_IO_STAT
;
1568 rq
->start_time
= jiffies
;
1569 r
= blk_insert_cloned_request(rq
->q
, rq
);
1571 dm_complete_request(rq
, r
);
1573 EXPORT_SYMBOL_GPL(dm_dispatch_request
);
1575 static int dm_rq_bio_constructor(struct bio
*bio
, struct bio
*bio_orig
,
1578 struct dm_rq_target_io
*tio
= data
;
1579 struct dm_rq_clone_bio_info
*info
=
1580 container_of(bio
, struct dm_rq_clone_bio_info
, clone
);
1582 info
->orig
= bio_orig
;
1584 bio
->bi_end_io
= end_clone_bio
;
1585 bio
->bi_private
= info
;
1590 static int setup_clone(struct request
*clone
, struct request
*rq
,
1591 struct dm_rq_target_io
*tio
)
1595 r
= blk_rq_prep_clone(clone
, rq
, tio
->md
->bs
, GFP_ATOMIC
,
1596 dm_rq_bio_constructor
, tio
);
1600 clone
->cmd
= rq
->cmd
;
1601 clone
->cmd_len
= rq
->cmd_len
;
1602 clone
->sense
= rq
->sense
;
1603 clone
->buffer
= rq
->buffer
;
1604 clone
->end_io
= end_clone_request
;
1605 clone
->end_io_data
= tio
;
1610 static struct request
*clone_rq(struct request
*rq
, struct mapped_device
*md
,
1613 struct request
*clone
;
1614 struct dm_rq_target_io
*tio
;
1616 tio
= alloc_rq_tio(md
, gfp_mask
);
1624 memset(&tio
->info
, 0, sizeof(tio
->info
));
1626 clone
= &tio
->clone
;
1627 if (setup_clone(clone
, rq
, tio
)) {
1637 * Called with the queue lock held.
1639 static int dm_prep_fn(struct request_queue
*q
, struct request
*rq
)
1641 struct mapped_device
*md
= q
->queuedata
;
1642 struct request
*clone
;
1644 if (unlikely(rq
->special
)) {
1645 DMWARN("Already has something in rq->special.");
1646 return BLKPREP_KILL
;
1649 clone
= clone_rq(rq
, md
, GFP_ATOMIC
);
1651 return BLKPREP_DEFER
;
1653 rq
->special
= clone
;
1654 rq
->cmd_flags
|= REQ_DONTPREP
;
1661 * 0 : the request has been processed (not requeued)
1662 * !0 : the request has been requeued
1664 static int map_request(struct dm_target
*ti
, struct request
*clone
,
1665 struct mapped_device
*md
)
1667 int r
, requeued
= 0;
1668 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
1671 r
= ti
->type
->map_rq(ti
, clone
, &tio
->info
);
1673 case DM_MAPIO_SUBMITTED
:
1674 /* The target has taken the I/O to submit by itself later */
1676 case DM_MAPIO_REMAPPED
:
1677 /* The target has remapped the I/O so dispatch it */
1678 trace_block_rq_remap(clone
->q
, clone
, disk_devt(dm_disk(md
)),
1679 blk_rq_pos(tio
->orig
));
1680 dm_dispatch_request(clone
);
1682 case DM_MAPIO_REQUEUE
:
1683 /* The target wants to requeue the I/O */
1684 dm_requeue_unmapped_request(clone
);
1689 DMWARN("unimplemented target map return value: %d", r
);
1693 /* The target wants to complete the I/O */
1694 dm_kill_unmapped_request(clone
, r
);
1701 static struct request
*dm_start_request(struct mapped_device
*md
, struct request
*orig
)
1703 struct request
*clone
;
1705 blk_start_request(orig
);
1706 clone
= orig
->special
;
1707 atomic_inc(&md
->pending
[rq_data_dir(clone
)]);
1710 * Hold the md reference here for the in-flight I/O.
1711 * We can't rely on the reference count by device opener,
1712 * because the device may be closed during the request completion
1713 * when all bios are completed.
1714 * See the comment in rq_completed() too.
1722 * q->request_fn for request-based dm.
1723 * Called with the queue lock held.
1725 static void dm_request_fn(struct request_queue
*q
)
1727 struct mapped_device
*md
= q
->queuedata
;
1729 struct dm_table
*map
= dm_get_live_table(md
, &srcu_idx
);
1730 struct dm_target
*ti
;
1731 struct request
*rq
, *clone
;
1735 * For suspend, check blk_queue_stopped() and increment
1736 * ->pending within a single queue_lock not to increment the
1737 * number of in-flight I/Os after the queue is stopped in
1740 while (!blk_queue_stopped(q
)) {
1741 rq
= blk_peek_request(q
);
1745 /* always use block 0 to find the target for flushes for now */
1747 if (!(rq
->cmd_flags
& REQ_FLUSH
))
1748 pos
= blk_rq_pos(rq
);
1750 ti
= dm_table_find_target(map
, pos
);
1751 if (!dm_target_is_valid(ti
)) {
1753 * Must perform setup, that dm_done() requires,
1754 * before calling dm_kill_unmapped_request
1756 DMERR_LIMIT("request attempted access beyond the end of device");
1757 clone
= dm_start_request(md
, rq
);
1758 dm_kill_unmapped_request(clone
, -EIO
);
1762 if (ti
->type
->busy
&& ti
->type
->busy(ti
))
1765 clone
= dm_start_request(md
, rq
);
1767 spin_unlock(q
->queue_lock
);
1768 if (map_request(ti
, clone
, md
))
1771 BUG_ON(!irqs_disabled());
1772 spin_lock(q
->queue_lock
);
1778 BUG_ON(!irqs_disabled());
1779 spin_lock(q
->queue_lock
);
1782 blk_delay_queue(q
, HZ
/ 10);
1784 dm_put_live_table(md
, srcu_idx
);
1787 int dm_underlying_device_busy(struct request_queue
*q
)
1789 return blk_lld_busy(q
);
1791 EXPORT_SYMBOL_GPL(dm_underlying_device_busy
);
1793 static int dm_lld_busy(struct request_queue
*q
)
1796 struct mapped_device
*md
= q
->queuedata
;
1797 struct dm_table
*map
= dm_get_live_table_fast(md
);
1799 if (!map
|| test_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
))
1802 r
= dm_table_any_busy_target(map
);
1804 dm_put_live_table_fast(md
);
1809 static int dm_any_congested(void *congested_data
, int bdi_bits
)
1812 struct mapped_device
*md
= congested_data
;
1813 struct dm_table
*map
;
1815 if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
)) {
1816 map
= dm_get_live_table_fast(md
);
1819 * Request-based dm cares about only own queue for
1820 * the query about congestion status of request_queue
1822 if (dm_request_based(md
))
1823 r
= md
->queue
->backing_dev_info
.state
&
1826 r
= dm_table_any_congested(map
, bdi_bits
);
1828 dm_put_live_table_fast(md
);
1834 /*-----------------------------------------------------------------
1835 * An IDR is used to keep track of allocated minor numbers.
1836 *---------------------------------------------------------------*/
1837 static void free_minor(int minor
)
1839 spin_lock(&_minor_lock
);
1840 idr_remove(&_minor_idr
, minor
);
1841 spin_unlock(&_minor_lock
);
1845 * See if the device with a specific minor # is free.
1847 static int specific_minor(int minor
)
1851 if (minor
>= (1 << MINORBITS
))
1854 idr_preload(GFP_KERNEL
);
1855 spin_lock(&_minor_lock
);
1857 r
= idr_alloc(&_minor_idr
, MINOR_ALLOCED
, minor
, minor
+ 1, GFP_NOWAIT
);
1859 spin_unlock(&_minor_lock
);
1862 return r
== -ENOSPC
? -EBUSY
: r
;
1866 static int next_free_minor(int *minor
)
1870 idr_preload(GFP_KERNEL
);
1871 spin_lock(&_minor_lock
);
1873 r
= idr_alloc(&_minor_idr
, MINOR_ALLOCED
, 0, 1 << MINORBITS
, GFP_NOWAIT
);
1875 spin_unlock(&_minor_lock
);
1883 static const struct block_device_operations dm_blk_dops
;
1885 static void dm_wq_work(struct work_struct
*work
);
1887 static void dm_init_md_queue(struct mapped_device
*md
)
1890 * Request-based dm devices cannot be stacked on top of bio-based dm
1891 * devices. The type of this dm device has not been decided yet.
1892 * The type is decided at the first table loading time.
1893 * To prevent problematic device stacking, clear the queue flag
1894 * for request stacking support until then.
1896 * This queue is new, so no concurrency on the queue_flags.
1898 queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE
, md
->queue
);
1900 md
->queue
->queuedata
= md
;
1901 md
->queue
->backing_dev_info
.congested_fn
= dm_any_congested
;
1902 md
->queue
->backing_dev_info
.congested_data
= md
;
1903 blk_queue_make_request(md
->queue
, dm_request
);
1904 blk_queue_bounce_limit(md
->queue
, BLK_BOUNCE_ANY
);
1905 blk_queue_merge_bvec(md
->queue
, dm_merge_bvec
);
1909 * Allocate and initialise a blank device with a given minor.
1911 static struct mapped_device
*alloc_dev(int minor
)
1914 struct mapped_device
*md
= kzalloc(sizeof(*md
), GFP_KERNEL
);
1918 DMWARN("unable to allocate device, out of memory.");
1922 if (!try_module_get(THIS_MODULE
))
1923 goto bad_module_get
;
1925 /* get a minor number for the dev */
1926 if (minor
== DM_ANY_MINOR
)
1927 r
= next_free_minor(&minor
);
1929 r
= specific_minor(minor
);
1933 r
= init_srcu_struct(&md
->io_barrier
);
1935 goto bad_io_barrier
;
1937 md
->type
= DM_TYPE_NONE
;
1938 mutex_init(&md
->suspend_lock
);
1939 mutex_init(&md
->type_lock
);
1940 spin_lock_init(&md
->deferred_lock
);
1941 atomic_set(&md
->holders
, 1);
1942 atomic_set(&md
->open_count
, 0);
1943 atomic_set(&md
->event_nr
, 0);
1944 atomic_set(&md
->uevent_seq
, 0);
1945 INIT_LIST_HEAD(&md
->uevent_list
);
1946 spin_lock_init(&md
->uevent_lock
);
1948 md
->queue
= blk_alloc_queue(GFP_KERNEL
);
1952 dm_init_md_queue(md
);
1954 md
->disk
= alloc_disk(1);
1958 atomic_set(&md
->pending
[0], 0);
1959 atomic_set(&md
->pending
[1], 0);
1960 init_waitqueue_head(&md
->wait
);
1961 INIT_WORK(&md
->work
, dm_wq_work
);
1962 init_waitqueue_head(&md
->eventq
);
1964 md
->disk
->major
= _major
;
1965 md
->disk
->first_minor
= minor
;
1966 md
->disk
->fops
= &dm_blk_dops
;
1967 md
->disk
->queue
= md
->queue
;
1968 md
->disk
->private_data
= md
;
1969 sprintf(md
->disk
->disk_name
, "dm-%d", minor
);
1971 format_dev_t(md
->name
, MKDEV(_major
, minor
));
1973 md
->wq
= alloc_workqueue("kdmflush", WQ_MEM_RECLAIM
, 0);
1977 md
->bdev
= bdget_disk(md
->disk
, 0);
1981 bio_init(&md
->flush_bio
);
1982 md
->flush_bio
.bi_bdev
= md
->bdev
;
1983 md
->flush_bio
.bi_rw
= WRITE_FLUSH
;
1985 dm_stats_init(&md
->stats
);
1987 /* Populate the mapping, nobody knows we exist yet */
1988 spin_lock(&_minor_lock
);
1989 old_md
= idr_replace(&_minor_idr
, md
, minor
);
1990 spin_unlock(&_minor_lock
);
1992 BUG_ON(old_md
!= MINOR_ALLOCED
);
1997 destroy_workqueue(md
->wq
);
1999 del_gendisk(md
->disk
);
2002 blk_cleanup_queue(md
->queue
);
2004 cleanup_srcu_struct(&md
->io_barrier
);
2008 module_put(THIS_MODULE
);
2014 static void unlock_fs(struct mapped_device
*md
);
2016 static void free_dev(struct mapped_device
*md
)
2018 int minor
= MINOR(disk_devt(md
->disk
));
2022 destroy_workqueue(md
->wq
);
2024 mempool_destroy(md
->io_pool
);
2026 bioset_free(md
->bs
);
2027 blk_integrity_unregister(md
->disk
);
2028 del_gendisk(md
->disk
);
2029 cleanup_srcu_struct(&md
->io_barrier
);
2032 spin_lock(&_minor_lock
);
2033 md
->disk
->private_data
= NULL
;
2034 spin_unlock(&_minor_lock
);
2037 blk_cleanup_queue(md
->queue
);
2038 dm_stats_cleanup(&md
->stats
);
2039 module_put(THIS_MODULE
);
2043 static void __bind_mempools(struct mapped_device
*md
, struct dm_table
*t
)
2045 struct dm_md_mempools
*p
= dm_table_get_md_mempools(t
);
2047 if (md
->io_pool
&& md
->bs
) {
2048 /* The md already has necessary mempools. */
2049 if (dm_table_get_type(t
) == DM_TYPE_BIO_BASED
) {
2051 * Reload bioset because front_pad may have changed
2052 * because a different table was loaded.
2054 bioset_free(md
->bs
);
2057 } else if (dm_table_get_type(t
) == DM_TYPE_REQUEST_BASED
) {
2059 * There's no need to reload with request-based dm
2060 * because the size of front_pad doesn't change.
2061 * Note for future: If you are to reload bioset,
2062 * prep-ed requests in the queue may refer
2063 * to bio from the old bioset, so you must walk
2064 * through the queue to unprep.
2070 BUG_ON(!p
|| md
->io_pool
|| md
->bs
);
2072 md
->io_pool
= p
->io_pool
;
2078 /* mempool bind completed, now no need any mempools in the table */
2079 dm_table_free_md_mempools(t
);
2083 * Bind a table to the device.
2085 static void event_callback(void *context
)
2087 unsigned long flags
;
2089 struct mapped_device
*md
= (struct mapped_device
*) context
;
2091 spin_lock_irqsave(&md
->uevent_lock
, flags
);
2092 list_splice_init(&md
->uevent_list
, &uevents
);
2093 spin_unlock_irqrestore(&md
->uevent_lock
, flags
);
2095 dm_send_uevents(&uevents
, &disk_to_dev(md
->disk
)->kobj
);
2097 atomic_inc(&md
->event_nr
);
2098 wake_up(&md
->eventq
);
2102 * Protected by md->suspend_lock obtained by dm_swap_table().
2104 static void __set_size(struct mapped_device
*md
, sector_t size
)
2106 set_capacity(md
->disk
, size
);
2108 i_size_write(md
->bdev
->bd_inode
, (loff_t
)size
<< SECTOR_SHIFT
);
2112 * Return 1 if the queue has a compulsory merge_bvec_fn function.
2114 * If this function returns 0, then the device is either a non-dm
2115 * device without a merge_bvec_fn, or it is a dm device that is
2116 * able to split any bios it receives that are too big.
2118 int dm_queue_merge_is_compulsory(struct request_queue
*q
)
2120 struct mapped_device
*dev_md
;
2122 if (!q
->merge_bvec_fn
)
2125 if (q
->make_request_fn
== dm_request
) {
2126 dev_md
= q
->queuedata
;
2127 if (test_bit(DMF_MERGE_IS_OPTIONAL
, &dev_md
->flags
))
2134 static int dm_device_merge_is_compulsory(struct dm_target
*ti
,
2135 struct dm_dev
*dev
, sector_t start
,
2136 sector_t len
, void *data
)
2138 struct block_device
*bdev
= dev
->bdev
;
2139 struct request_queue
*q
= bdev_get_queue(bdev
);
2141 return dm_queue_merge_is_compulsory(q
);
2145 * Return 1 if it is acceptable to ignore merge_bvec_fn based
2146 * on the properties of the underlying devices.
2148 static int dm_table_merge_is_optional(struct dm_table
*table
)
2151 struct dm_target
*ti
;
2153 while (i
< dm_table_get_num_targets(table
)) {
2154 ti
= dm_table_get_target(table
, i
++);
2156 if (ti
->type
->iterate_devices
&&
2157 ti
->type
->iterate_devices(ti
, dm_device_merge_is_compulsory
, NULL
))
2165 * Returns old map, which caller must destroy.
2167 static struct dm_table
*__bind(struct mapped_device
*md
, struct dm_table
*t
,
2168 struct queue_limits
*limits
)
2170 struct dm_table
*old_map
;
2171 struct request_queue
*q
= md
->queue
;
2173 int merge_is_optional
;
2175 size
= dm_table_get_size(t
);
2178 * Wipe any geometry if the size of the table changed.
2180 if (size
!= dm_get_size(md
))
2181 memset(&md
->geometry
, 0, sizeof(md
->geometry
));
2183 __set_size(md
, size
);
2185 dm_table_event_callback(t
, event_callback
, md
);
2188 * The queue hasn't been stopped yet, if the old table type wasn't
2189 * for request-based during suspension. So stop it to prevent
2190 * I/O mapping before resume.
2191 * This must be done before setting the queue restrictions,
2192 * because request-based dm may be run just after the setting.
2194 if (dm_table_request_based(t
) && !blk_queue_stopped(q
))
2197 __bind_mempools(md
, t
);
2199 merge_is_optional
= dm_table_merge_is_optional(t
);
2202 rcu_assign_pointer(md
->map
, t
);
2203 md
->immutable_target_type
= dm_table_get_immutable_target_type(t
);
2205 dm_table_set_restrictions(t
, q
, limits
);
2206 if (merge_is_optional
)
2207 set_bit(DMF_MERGE_IS_OPTIONAL
, &md
->flags
);
2209 clear_bit(DMF_MERGE_IS_OPTIONAL
, &md
->flags
);
2216 * Returns unbound table for the caller to free.
2218 static struct dm_table
*__unbind(struct mapped_device
*md
)
2220 struct dm_table
*map
= md
->map
;
2225 dm_table_event_callback(map
, NULL
, NULL
);
2226 rcu_assign_pointer(md
->map
, NULL
);
2233 * Constructor for a new device.
2235 int dm_create(int minor
, struct mapped_device
**result
)
2237 struct mapped_device
*md
;
2239 md
= alloc_dev(minor
);
2250 * Functions to manage md->type.
2251 * All are required to hold md->type_lock.
2253 void dm_lock_md_type(struct mapped_device
*md
)
2255 mutex_lock(&md
->type_lock
);
2258 void dm_unlock_md_type(struct mapped_device
*md
)
2260 mutex_unlock(&md
->type_lock
);
2263 void dm_set_md_type(struct mapped_device
*md
, unsigned type
)
2265 BUG_ON(!mutex_is_locked(&md
->type_lock
));
2269 unsigned dm_get_md_type(struct mapped_device
*md
)
2271 BUG_ON(!mutex_is_locked(&md
->type_lock
));
2275 struct target_type
*dm_get_immutable_target_type(struct mapped_device
*md
)
2277 return md
->immutable_target_type
;
2281 * Fully initialize a request-based queue (->elevator, ->request_fn, etc).
2283 static int dm_init_request_based_queue(struct mapped_device
*md
)
2285 struct request_queue
*q
= NULL
;
2287 if (md
->queue
->elevator
)
2290 /* Fully initialize the queue */
2291 q
= blk_init_allocated_queue(md
->queue
, dm_request_fn
, NULL
);
2296 dm_init_md_queue(md
);
2297 blk_queue_softirq_done(md
->queue
, dm_softirq_done
);
2298 blk_queue_prep_rq(md
->queue
, dm_prep_fn
);
2299 blk_queue_lld_busy(md
->queue
, dm_lld_busy
);
2301 elv_register_queue(md
->queue
);
2307 * Setup the DM device's queue based on md's type
2309 int dm_setup_md_queue(struct mapped_device
*md
)
2311 if ((dm_get_md_type(md
) == DM_TYPE_REQUEST_BASED
) &&
2312 !dm_init_request_based_queue(md
)) {
2313 DMWARN("Cannot initialize queue for request-based mapped device");
2320 static struct mapped_device
*dm_find_md(dev_t dev
)
2322 struct mapped_device
*md
;
2323 unsigned minor
= MINOR(dev
);
2325 if (MAJOR(dev
) != _major
|| minor
>= (1 << MINORBITS
))
2328 spin_lock(&_minor_lock
);
2330 md
= idr_find(&_minor_idr
, minor
);
2331 if (md
&& (md
== MINOR_ALLOCED
||
2332 (MINOR(disk_devt(dm_disk(md
))) != minor
) ||
2333 dm_deleting_md(md
) ||
2334 test_bit(DMF_FREEING
, &md
->flags
))) {
2340 spin_unlock(&_minor_lock
);
2345 struct mapped_device
*dm_get_md(dev_t dev
)
2347 struct mapped_device
*md
= dm_find_md(dev
);
2354 EXPORT_SYMBOL_GPL(dm_get_md
);
2356 void *dm_get_mdptr(struct mapped_device
*md
)
2358 return md
->interface_ptr
;
2361 void dm_set_mdptr(struct mapped_device
*md
, void *ptr
)
2363 md
->interface_ptr
= ptr
;
2366 void dm_get(struct mapped_device
*md
)
2368 atomic_inc(&md
->holders
);
2369 BUG_ON(test_bit(DMF_FREEING
, &md
->flags
));
2372 const char *dm_device_name(struct mapped_device
*md
)
2376 EXPORT_SYMBOL_GPL(dm_device_name
);
2378 static void __dm_destroy(struct mapped_device
*md
, bool wait
)
2380 struct dm_table
*map
;
2385 spin_lock(&_minor_lock
);
2386 map
= dm_get_live_table(md
, &srcu_idx
);
2387 idr_replace(&_minor_idr
, MINOR_ALLOCED
, MINOR(disk_devt(dm_disk(md
))));
2388 set_bit(DMF_FREEING
, &md
->flags
);
2389 spin_unlock(&_minor_lock
);
2391 if (!dm_suspended_md(md
)) {
2392 dm_table_presuspend_targets(map
);
2393 dm_table_postsuspend_targets(map
);
2396 /* dm_put_live_table must be before msleep, otherwise deadlock is possible */
2397 dm_put_live_table(md
, srcu_idx
);
2400 * Rare, but there may be I/O requests still going to complete,
2401 * for example. Wait for all references to disappear.
2402 * No one should increment the reference count of the mapped_device,
2403 * after the mapped_device state becomes DMF_FREEING.
2406 while (atomic_read(&md
->holders
))
2408 else if (atomic_read(&md
->holders
))
2409 DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
2410 dm_device_name(md
), atomic_read(&md
->holders
));
2413 dm_table_destroy(__unbind(md
));
2417 void dm_destroy(struct mapped_device
*md
)
2419 __dm_destroy(md
, true);
2422 void dm_destroy_immediate(struct mapped_device
*md
)
2424 __dm_destroy(md
, false);
2427 void dm_put(struct mapped_device
*md
)
2429 atomic_dec(&md
->holders
);
2431 EXPORT_SYMBOL_GPL(dm_put
);
2433 static int dm_wait_for_completion(struct mapped_device
*md
, int interruptible
)
2436 DECLARE_WAITQUEUE(wait
, current
);
2438 add_wait_queue(&md
->wait
, &wait
);
2441 set_current_state(interruptible
);
2443 if (!md_in_flight(md
))
2446 if (interruptible
== TASK_INTERRUPTIBLE
&&
2447 signal_pending(current
)) {
2454 set_current_state(TASK_RUNNING
);
2456 remove_wait_queue(&md
->wait
, &wait
);
2462 * Process the deferred bios
2464 static void dm_wq_work(struct work_struct
*work
)
2466 struct mapped_device
*md
= container_of(work
, struct mapped_device
,
2470 struct dm_table
*map
;
2472 map
= dm_get_live_table(md
, &srcu_idx
);
2474 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
)) {
2475 spin_lock_irq(&md
->deferred_lock
);
2476 c
= bio_list_pop(&md
->deferred
);
2477 spin_unlock_irq(&md
->deferred_lock
);
2482 if (dm_request_based(md
))
2483 generic_make_request(c
);
2485 __split_and_process_bio(md
, map
, c
);
2488 dm_put_live_table(md
, srcu_idx
);
2491 static void dm_queue_flush(struct mapped_device
*md
)
2493 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
);
2494 smp_mb__after_clear_bit();
2495 queue_work(md
->wq
, &md
->work
);
2499 * Swap in a new table, returning the old one for the caller to destroy.
2501 struct dm_table
*dm_swap_table(struct mapped_device
*md
, struct dm_table
*table
)
2503 struct dm_table
*live_map
= NULL
, *map
= ERR_PTR(-EINVAL
);
2504 struct queue_limits limits
;
2507 mutex_lock(&md
->suspend_lock
);
2509 /* device must be suspended */
2510 if (!dm_suspended_md(md
))
2514 * If the new table has no data devices, retain the existing limits.
2515 * This helps multipath with queue_if_no_path if all paths disappear,
2516 * then new I/O is queued based on these limits, and then some paths
2519 if (dm_table_has_no_data_devices(table
)) {
2520 live_map
= dm_get_live_table_fast(md
);
2522 limits
= md
->queue
->limits
;
2523 dm_put_live_table_fast(md
);
2527 r
= dm_calculate_queue_limits(table
, &limits
);
2534 map
= __bind(md
, table
, &limits
);
2537 mutex_unlock(&md
->suspend_lock
);
2542 * Functions to lock and unlock any filesystem running on the
2545 static int lock_fs(struct mapped_device
*md
)
2549 WARN_ON(md
->frozen_sb
);
2551 md
->frozen_sb
= freeze_bdev(md
->bdev
);
2552 if (IS_ERR(md
->frozen_sb
)) {
2553 r
= PTR_ERR(md
->frozen_sb
);
2554 md
->frozen_sb
= NULL
;
2558 set_bit(DMF_FROZEN
, &md
->flags
);
2563 static void unlock_fs(struct mapped_device
*md
)
2565 if (!test_bit(DMF_FROZEN
, &md
->flags
))
2568 thaw_bdev(md
->bdev
, md
->frozen_sb
);
2569 md
->frozen_sb
= NULL
;
2570 clear_bit(DMF_FROZEN
, &md
->flags
);
2574 * We need to be able to change a mapping table under a mounted
2575 * filesystem. For example we might want to move some data in
2576 * the background. Before the table can be swapped with
2577 * dm_bind_table, dm_suspend must be called to flush any in
2578 * flight bios and ensure that any further io gets deferred.
2581 * Suspend mechanism in request-based dm.
2583 * 1. Flush all I/Os by lock_fs() if needed.
2584 * 2. Stop dispatching any I/O by stopping the request_queue.
2585 * 3. Wait for all in-flight I/Os to be completed or requeued.
2587 * To abort suspend, start the request_queue.
2589 int dm_suspend(struct mapped_device
*md
, unsigned suspend_flags
)
2591 struct dm_table
*map
= NULL
;
2593 int do_lockfs
= suspend_flags
& DM_SUSPEND_LOCKFS_FLAG
? 1 : 0;
2594 int noflush
= suspend_flags
& DM_SUSPEND_NOFLUSH_FLAG
? 1 : 0;
2596 mutex_lock(&md
->suspend_lock
);
2598 if (dm_suspended_md(md
)) {
2606 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
2607 * This flag is cleared before dm_suspend returns.
2610 set_bit(DMF_NOFLUSH_SUSPENDING
, &md
->flags
);
2612 /* This does not get reverted if there's an error later. */
2613 dm_table_presuspend_targets(map
);
2616 * Flush I/O to the device.
2617 * Any I/O submitted after lock_fs() may not be flushed.
2618 * noflush takes precedence over do_lockfs.
2619 * (lock_fs() flushes I/Os and waits for them to complete.)
2621 if (!noflush
&& do_lockfs
) {
2628 * Here we must make sure that no processes are submitting requests
2629 * to target drivers i.e. no one may be executing
2630 * __split_and_process_bio. This is called from dm_request and
2633 * To get all processes out of __split_and_process_bio in dm_request,
2634 * we take the write lock. To prevent any process from reentering
2635 * __split_and_process_bio from dm_request and quiesce the thread
2636 * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call
2637 * flush_workqueue(md->wq).
2639 set_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
);
2640 synchronize_srcu(&md
->io_barrier
);
2643 * Stop md->queue before flushing md->wq in case request-based
2644 * dm defers requests to md->wq from md->queue.
2646 if (dm_request_based(md
))
2647 stop_queue(md
->queue
);
2649 flush_workqueue(md
->wq
);
2652 * At this point no more requests are entering target request routines.
2653 * We call dm_wait_for_completion to wait for all existing requests
2656 r
= dm_wait_for_completion(md
, TASK_INTERRUPTIBLE
);
2659 clear_bit(DMF_NOFLUSH_SUSPENDING
, &md
->flags
);
2660 synchronize_srcu(&md
->io_barrier
);
2662 /* were we interrupted ? */
2666 if (dm_request_based(md
))
2667 start_queue(md
->queue
);
2670 goto out_unlock
; /* pushback list is already flushed, so skip flush */
2674 * If dm_wait_for_completion returned 0, the device is completely
2675 * quiescent now. There is no request-processing activity. All new
2676 * requests are being added to md->deferred list.
2679 set_bit(DMF_SUSPENDED
, &md
->flags
);
2681 dm_table_postsuspend_targets(map
);
2684 mutex_unlock(&md
->suspend_lock
);
2688 int dm_resume(struct mapped_device
*md
)
2691 struct dm_table
*map
= NULL
;
2693 mutex_lock(&md
->suspend_lock
);
2694 if (!dm_suspended_md(md
))
2698 if (!map
|| !dm_table_get_size(map
))
2701 r
= dm_table_resume_targets(map
);
2708 * Flushing deferred I/Os must be done after targets are resumed
2709 * so that mapping of targets can work correctly.
2710 * Request-based dm is queueing the deferred I/Os in its request_queue.
2712 if (dm_request_based(md
))
2713 start_queue(md
->queue
);
2717 clear_bit(DMF_SUSPENDED
, &md
->flags
);
2721 mutex_unlock(&md
->suspend_lock
);
2727 * Internal suspend/resume works like userspace-driven suspend. It waits
2728 * until all bios finish and prevents issuing new bios to the target drivers.
2729 * It may be used only from the kernel.
2731 * Internal suspend holds md->suspend_lock, which prevents interaction with
2732 * userspace-driven suspend.
2735 void dm_internal_suspend(struct mapped_device
*md
)
2737 mutex_lock(&md
->suspend_lock
);
2738 if (dm_suspended_md(md
))
2741 set_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
);
2742 synchronize_srcu(&md
->io_barrier
);
2743 flush_workqueue(md
->wq
);
2744 dm_wait_for_completion(md
, TASK_UNINTERRUPTIBLE
);
2747 void dm_internal_resume(struct mapped_device
*md
)
2749 if (dm_suspended_md(md
))
2755 mutex_unlock(&md
->suspend_lock
);
2758 /*-----------------------------------------------------------------
2759 * Event notification.
2760 *---------------------------------------------------------------*/
2761 int dm_kobject_uevent(struct mapped_device
*md
, enum kobject_action action
,
2764 char udev_cookie
[DM_COOKIE_LENGTH
];
2765 char *envp
[] = { udev_cookie
, NULL
};
2768 return kobject_uevent(&disk_to_dev(md
->disk
)->kobj
, action
);
2770 snprintf(udev_cookie
, DM_COOKIE_LENGTH
, "%s=%u",
2771 DM_COOKIE_ENV_VAR_NAME
, cookie
);
2772 return kobject_uevent_env(&disk_to_dev(md
->disk
)->kobj
,
2777 uint32_t dm_next_uevent_seq(struct mapped_device
*md
)
2779 return atomic_add_return(1, &md
->uevent_seq
);
2782 uint32_t dm_get_event_nr(struct mapped_device
*md
)
2784 return atomic_read(&md
->event_nr
);
2787 int dm_wait_event(struct mapped_device
*md
, int event_nr
)
2789 return wait_event_interruptible(md
->eventq
,
2790 (event_nr
!= atomic_read(&md
->event_nr
)));
2793 void dm_uevent_add(struct mapped_device
*md
, struct list_head
*elist
)
2795 unsigned long flags
;
2797 spin_lock_irqsave(&md
->uevent_lock
, flags
);
2798 list_add(elist
, &md
->uevent_list
);
2799 spin_unlock_irqrestore(&md
->uevent_lock
, flags
);
2803 * The gendisk is only valid as long as you have a reference
2806 struct gendisk
*dm_disk(struct mapped_device
*md
)
2811 struct kobject
*dm_kobject(struct mapped_device
*md
)
2817 * struct mapped_device should not be exported outside of dm.c
2818 * so use this check to verify that kobj is part of md structure
2820 struct mapped_device
*dm_get_from_kobject(struct kobject
*kobj
)
2822 struct mapped_device
*md
;
2824 md
= container_of(kobj
, struct mapped_device
, kobj
);
2825 if (&md
->kobj
!= kobj
)
2828 if (test_bit(DMF_FREEING
, &md
->flags
) ||
2836 int dm_suspended_md(struct mapped_device
*md
)
2838 return test_bit(DMF_SUSPENDED
, &md
->flags
);
2841 int dm_suspended(struct dm_target
*ti
)
2843 return dm_suspended_md(dm_table_get_md(ti
->table
));
2845 EXPORT_SYMBOL_GPL(dm_suspended
);
2847 int dm_noflush_suspending(struct dm_target
*ti
)
2849 return __noflush_suspending(dm_table_get_md(ti
->table
));
2851 EXPORT_SYMBOL_GPL(dm_noflush_suspending
);
2853 struct dm_md_mempools
*dm_alloc_md_mempools(unsigned type
, unsigned integrity
, unsigned per_bio_data_size
)
2855 struct dm_md_mempools
*pools
= kzalloc(sizeof(*pools
), GFP_KERNEL
);
2856 struct kmem_cache
*cachep
;
2857 unsigned int pool_size
;
2858 unsigned int front_pad
;
2863 if (type
== DM_TYPE_BIO_BASED
) {
2866 front_pad
= roundup(per_bio_data_size
, __alignof__(struct dm_target_io
)) + offsetof(struct dm_target_io
, clone
);
2867 } else if (type
== DM_TYPE_REQUEST_BASED
) {
2868 cachep
= _rq_tio_cache
;
2869 pool_size
= MIN_IOS
;
2870 front_pad
= offsetof(struct dm_rq_clone_bio_info
, clone
);
2871 /* per_bio_data_size is not used. See __bind_mempools(). */
2872 WARN_ON(per_bio_data_size
!= 0);
2876 pools
->io_pool
= mempool_create_slab_pool(MIN_IOS
, cachep
);
2877 if (!pools
->io_pool
)
2880 pools
->bs
= bioset_create(pool_size
, front_pad
);
2884 if (integrity
&& bioset_integrity_create(pools
->bs
, pool_size
))
2890 dm_free_md_mempools(pools
);
2895 void dm_free_md_mempools(struct dm_md_mempools
*pools
)
2901 mempool_destroy(pools
->io_pool
);
2904 bioset_free(pools
->bs
);
2909 static const struct block_device_operations dm_blk_dops
= {
2910 .open
= dm_blk_open
,
2911 .release
= dm_blk_close
,
2912 .ioctl
= dm_blk_ioctl
,
2913 .getgeo
= dm_blk_getgeo
,
2914 .owner
= THIS_MODULE
2917 EXPORT_SYMBOL(dm_get_mapinfo
);
2922 module_init(dm_init
);
2923 module_exit(dm_exit
);
2925 module_param(major
, uint
, 0);
2926 MODULE_PARM_DESC(major
, "The major number of the device mapper");
2927 MODULE_DESCRIPTION(DM_NAME
" driver");
2928 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
2929 MODULE_LICENSE("GPL");