2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 * This file is released under the GPL.
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/mutex.h>
14 #include <linux/moduleparam.h>
15 #include <linux/blkpg.h>
16 #include <linux/bio.h>
17 #include <linux/mempool.h>
18 #include <linux/slab.h>
19 #include <linux/idr.h>
20 #include <linux/hdreg.h>
21 #include <linux/delay.h>
22 #include <linux/wait.h>
23 #include <linux/kthread.h>
24 #include <linux/ktime.h>
25 #include <linux/elevator.h> /* for rq_end_sector() */
26 #include <linux/blk-mq.h>
29 #include <trace/events/block.h>
31 #define DM_MSG_PREFIX "core"
35 * ratelimit state to be used in DMXXX_LIMIT().
37 DEFINE_RATELIMIT_STATE(dm_ratelimit_state
,
38 DEFAULT_RATELIMIT_INTERVAL
,
39 DEFAULT_RATELIMIT_BURST
);
40 EXPORT_SYMBOL(dm_ratelimit_state
);
44 * Cookies are numeric values sent with CHANGE and REMOVE
45 * uevents while resuming, removing or renaming the device.
47 #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
48 #define DM_COOKIE_LENGTH 24
50 static const char *_name
= DM_NAME
;
52 static unsigned int major
= 0;
53 static unsigned int _major
= 0;
55 static DEFINE_IDR(_minor_idr
);
57 static DEFINE_SPINLOCK(_minor_lock
);
59 static void do_deferred_remove(struct work_struct
*w
);
61 static DECLARE_WORK(deferred_remove_work
, do_deferred_remove
);
63 static struct workqueue_struct
*deferred_remove_workqueue
;
67 * One of these is allocated per bio.
70 struct mapped_device
*md
;
74 unsigned long start_time
;
75 spinlock_t endio_lock
;
76 struct dm_stats_aux stats_aux
;
80 * For request-based dm.
81 * One of these is allocated per request.
83 struct dm_rq_target_io
{
84 struct mapped_device
*md
;
86 struct request
*orig
, *clone
;
87 struct kthread_work work
;
90 struct dm_stats_aux stats_aux
;
91 unsigned long duration_jiffies
;
96 * For request-based dm - the bio clones we allocate are embedded in these
99 * We allocate these with bio_alloc_bioset, using the front_pad parameter when
100 * the bioset is created - this means the bio has to come at the end of the
103 struct dm_rq_clone_bio_info
{
105 struct dm_rq_target_io
*tio
;
109 #define MINOR_ALLOCED ((void *)-1)
112 * Bits for the md->flags field.
114 #define DMF_BLOCK_IO_FOR_SUSPEND 0
115 #define DMF_SUSPENDED 1
117 #define DMF_FREEING 3
118 #define DMF_DELETING 4
119 #define DMF_NOFLUSH_SUSPENDING 5
120 #define DMF_DEFERRED_REMOVE 6
121 #define DMF_SUSPENDED_INTERNALLY 7
124 * A dummy definition to make RCU happy.
125 * struct dm_table should never be dereferenced in this file.
132 * Work processed by per-device workqueue.
134 struct mapped_device
{
135 struct srcu_struct io_barrier
;
136 struct mutex suspend_lock
;
141 * The current mapping.
142 * Use dm_get_live_table{_fast} or take suspend_lock for
145 struct dm_table __rcu
*map
;
147 struct list_head table_devices
;
148 struct mutex table_devices_lock
;
152 struct request_queue
*queue
;
154 /* Protect queue and type against concurrent access. */
155 struct mutex type_lock
;
157 struct dm_target
*immutable_target
;
158 struct target_type
*immutable_target_type
;
160 struct gendisk
*disk
;
166 * A list of ios that arrived while we were suspended.
169 wait_queue_head_t wait
;
170 struct work_struct work
;
171 struct bio_list deferred
;
172 spinlock_t deferred_lock
;
175 * Processing queue (flush)
177 struct workqueue_struct
*wq
;
180 * io objects are allocated from here.
191 wait_queue_head_t eventq
;
193 struct list_head uevent_list
;
194 spinlock_t uevent_lock
; /* Protect access to uevent_list */
197 * freeze/thaw support require holding onto a super block
199 struct super_block
*frozen_sb
;
200 struct block_device
*bdev
;
202 /* forced geometry settings */
203 struct hd_geometry geometry
;
205 /* kobject and completion */
206 struct dm_kobject_holder kobj_holder
;
208 /* zero-length flush that will be cloned and submitted to targets */
209 struct bio flush_bio
;
211 /* the number of internal suspends */
212 unsigned internal_suspend_count
;
214 struct dm_stats stats
;
216 struct kthread_worker kworker
;
217 struct task_struct
*kworker_task
;
219 /* for request-based merge heuristic in dm_request_fn() */
220 unsigned seq_rq_merge_deadline_usecs
;
222 sector_t last_rq_pos
;
223 ktime_t last_rq_start_time
;
225 /* for blk-mq request-based DM support */
226 struct blk_mq_tag_set
*tag_set
;
230 #ifdef CONFIG_DM_MQ_DEFAULT
231 static bool use_blk_mq
= true;
233 static bool use_blk_mq
= false;
236 #define DM_MQ_NR_HW_QUEUES 1
237 #define DM_MQ_QUEUE_DEPTH 2048
239 static unsigned dm_mq_nr_hw_queues
= DM_MQ_NR_HW_QUEUES
;
240 static unsigned dm_mq_queue_depth
= DM_MQ_QUEUE_DEPTH
;
242 bool dm_use_blk_mq(struct mapped_device
*md
)
244 return md
->use_blk_mq
;
248 * For mempools pre-allocation at the table loading time.
250 struct dm_md_mempools
{
256 struct table_device
{
257 struct list_head list
;
259 struct dm_dev dm_dev
;
262 #define RESERVED_BIO_BASED_IOS 16
263 #define RESERVED_REQUEST_BASED_IOS 256
264 #define RESERVED_MAX_IOS 1024
265 static struct kmem_cache
*_io_cache
;
266 static struct kmem_cache
*_rq_tio_cache
;
267 static struct kmem_cache
*_rq_cache
;
270 * Bio-based DM's mempools' reserved IOs set by the user.
272 static unsigned reserved_bio_based_ios
= RESERVED_BIO_BASED_IOS
;
275 * Request-based DM's mempools' reserved IOs set by the user.
277 static unsigned reserved_rq_based_ios
= RESERVED_REQUEST_BASED_IOS
;
279 static unsigned __dm_get_module_param(unsigned *module_param
,
280 unsigned def
, unsigned max
)
282 unsigned param
= ACCESS_ONCE(*module_param
);
283 unsigned modified_param
= 0;
286 modified_param
= def
;
287 else if (param
> max
)
288 modified_param
= max
;
290 if (modified_param
) {
291 (void)cmpxchg(module_param
, param
, modified_param
);
292 param
= modified_param
;
298 unsigned dm_get_reserved_bio_based_ios(void)
300 return __dm_get_module_param(&reserved_bio_based_ios
,
301 RESERVED_BIO_BASED_IOS
, RESERVED_MAX_IOS
);
303 EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios
);
305 unsigned dm_get_reserved_rq_based_ios(void)
307 return __dm_get_module_param(&reserved_rq_based_ios
,
308 RESERVED_REQUEST_BASED_IOS
, RESERVED_MAX_IOS
);
310 EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios
);
312 static unsigned dm_get_blk_mq_nr_hw_queues(void)
314 return __dm_get_module_param(&dm_mq_nr_hw_queues
, 1, 32);
317 static unsigned dm_get_blk_mq_queue_depth(void)
319 return __dm_get_module_param(&dm_mq_queue_depth
,
320 DM_MQ_QUEUE_DEPTH
, BLK_MQ_MAX_DEPTH
);
323 static int __init
local_init(void)
327 /* allocate a slab for the dm_ios */
328 _io_cache
= KMEM_CACHE(dm_io
, 0);
332 _rq_tio_cache
= KMEM_CACHE(dm_rq_target_io
, 0);
334 goto out_free_io_cache
;
336 _rq_cache
= kmem_cache_create("dm_old_clone_request", sizeof(struct request
),
337 __alignof__(struct request
), 0, NULL
);
339 goto out_free_rq_tio_cache
;
341 r
= dm_uevent_init();
343 goto out_free_rq_cache
;
345 deferred_remove_workqueue
= alloc_workqueue("kdmremove", WQ_UNBOUND
, 1);
346 if (!deferred_remove_workqueue
) {
348 goto out_uevent_exit
;
352 r
= register_blkdev(_major
, _name
);
354 goto out_free_workqueue
;
362 destroy_workqueue(deferred_remove_workqueue
);
366 kmem_cache_destroy(_rq_cache
);
367 out_free_rq_tio_cache
:
368 kmem_cache_destroy(_rq_tio_cache
);
370 kmem_cache_destroy(_io_cache
);
375 static void local_exit(void)
377 flush_scheduled_work();
378 destroy_workqueue(deferred_remove_workqueue
);
380 kmem_cache_destroy(_rq_cache
);
381 kmem_cache_destroy(_rq_tio_cache
);
382 kmem_cache_destroy(_io_cache
);
383 unregister_blkdev(_major
, _name
);
388 DMINFO("cleaned up");
391 static int (*_inits
[])(void) __initdata
= {
402 static void (*_exits
[])(void) = {
413 static int __init
dm_init(void)
415 const int count
= ARRAY_SIZE(_inits
);
419 for (i
= 0; i
< count
; i
++) {
434 static void __exit
dm_exit(void)
436 int i
= ARRAY_SIZE(_exits
);
442 * Should be empty by this point.
444 idr_destroy(&_minor_idr
);
448 * Block device functions
450 int dm_deleting_md(struct mapped_device
*md
)
452 return test_bit(DMF_DELETING
, &md
->flags
);
455 static int dm_blk_open(struct block_device
*bdev
, fmode_t mode
)
457 struct mapped_device
*md
;
459 spin_lock(&_minor_lock
);
461 md
= bdev
->bd_disk
->private_data
;
465 if (test_bit(DMF_FREEING
, &md
->flags
) ||
466 dm_deleting_md(md
)) {
472 atomic_inc(&md
->open_count
);
474 spin_unlock(&_minor_lock
);
476 return md
? 0 : -ENXIO
;
479 static void dm_blk_close(struct gendisk
*disk
, fmode_t mode
)
481 struct mapped_device
*md
;
483 spin_lock(&_minor_lock
);
485 md
= disk
->private_data
;
489 if (atomic_dec_and_test(&md
->open_count
) &&
490 (test_bit(DMF_DEFERRED_REMOVE
, &md
->flags
)))
491 queue_work(deferred_remove_workqueue
, &deferred_remove_work
);
495 spin_unlock(&_minor_lock
);
498 int dm_open_count(struct mapped_device
*md
)
500 return atomic_read(&md
->open_count
);
504 * Guarantees nothing is using the device before it's deleted.
506 int dm_lock_for_deletion(struct mapped_device
*md
, bool mark_deferred
, bool only_deferred
)
510 spin_lock(&_minor_lock
);
512 if (dm_open_count(md
)) {
515 set_bit(DMF_DEFERRED_REMOVE
, &md
->flags
);
516 } else if (only_deferred
&& !test_bit(DMF_DEFERRED_REMOVE
, &md
->flags
))
519 set_bit(DMF_DELETING
, &md
->flags
);
521 spin_unlock(&_minor_lock
);
526 int dm_cancel_deferred_remove(struct mapped_device
*md
)
530 spin_lock(&_minor_lock
);
532 if (test_bit(DMF_DELETING
, &md
->flags
))
535 clear_bit(DMF_DEFERRED_REMOVE
, &md
->flags
);
537 spin_unlock(&_minor_lock
);
542 static void do_deferred_remove(struct work_struct
*w
)
544 dm_deferred_remove();
547 sector_t
dm_get_size(struct mapped_device
*md
)
549 return get_capacity(md
->disk
);
552 struct request_queue
*dm_get_md_queue(struct mapped_device
*md
)
557 struct dm_stats
*dm_get_stats(struct mapped_device
*md
)
562 static int dm_blk_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
564 struct mapped_device
*md
= bdev
->bd_disk
->private_data
;
566 return dm_get_geometry(md
, geo
);
569 static int dm_grab_bdev_for_ioctl(struct mapped_device
*md
,
570 struct block_device
**bdev
,
573 struct dm_target
*tgt
;
574 struct dm_table
*map
;
579 map
= dm_get_live_table(md
, &srcu_idx
);
580 if (!map
|| !dm_table_get_size(map
))
583 /* We only support devices that have a single target */
584 if (dm_table_get_num_targets(map
) != 1)
587 tgt
= dm_table_get_target(map
, 0);
588 if (!tgt
->type
->prepare_ioctl
)
591 if (dm_suspended_md(md
)) {
596 r
= tgt
->type
->prepare_ioctl(tgt
, bdev
, mode
);
601 dm_put_live_table(md
, srcu_idx
);
605 dm_put_live_table(md
, srcu_idx
);
606 if (r
== -ENOTCONN
&& !fatal_signal_pending(current
)) {
613 static int dm_blk_ioctl(struct block_device
*bdev
, fmode_t mode
,
614 unsigned int cmd
, unsigned long arg
)
616 struct mapped_device
*md
= bdev
->bd_disk
->private_data
;
619 r
= dm_grab_bdev_for_ioctl(md
, &bdev
, &mode
);
625 * Target determined this ioctl is being issued against
626 * a logical partition of the parent bdev; so extra
627 * validation is needed.
629 r
= scsi_verify_blk_ioctl(NULL
, cmd
);
634 r
= __blkdev_driver_ioctl(bdev
, mode
, cmd
, arg
);
640 static struct dm_io
*alloc_io(struct mapped_device
*md
)
642 return mempool_alloc(md
->io_pool
, GFP_NOIO
);
645 static void free_io(struct mapped_device
*md
, struct dm_io
*io
)
647 mempool_free(io
, md
->io_pool
);
650 static void free_tio(struct mapped_device
*md
, struct dm_target_io
*tio
)
652 bio_put(&tio
->clone
);
655 static struct dm_rq_target_io
*alloc_old_rq_tio(struct mapped_device
*md
,
658 return mempool_alloc(md
->io_pool
, gfp_mask
);
661 static void free_old_rq_tio(struct dm_rq_target_io
*tio
)
663 mempool_free(tio
, tio
->md
->io_pool
);
666 static struct request
*alloc_old_clone_request(struct mapped_device
*md
,
669 return mempool_alloc(md
->rq_pool
, gfp_mask
);
672 static void free_old_clone_request(struct mapped_device
*md
, struct request
*rq
)
674 mempool_free(rq
, md
->rq_pool
);
677 static int md_in_flight(struct mapped_device
*md
)
679 return atomic_read(&md
->pending
[READ
]) +
680 atomic_read(&md
->pending
[WRITE
]);
683 static void start_io_acct(struct dm_io
*io
)
685 struct mapped_device
*md
= io
->md
;
686 struct bio
*bio
= io
->bio
;
688 int rw
= bio_data_dir(bio
);
690 io
->start_time
= jiffies
;
692 cpu
= part_stat_lock();
693 part_round_stats(cpu
, &dm_disk(md
)->part0
);
695 atomic_set(&dm_disk(md
)->part0
.in_flight
[rw
],
696 atomic_inc_return(&md
->pending
[rw
]));
698 if (unlikely(dm_stats_used(&md
->stats
)))
699 dm_stats_account_io(&md
->stats
, bio
->bi_rw
, bio
->bi_iter
.bi_sector
,
700 bio_sectors(bio
), false, 0, &io
->stats_aux
);
703 static void end_io_acct(struct dm_io
*io
)
705 struct mapped_device
*md
= io
->md
;
706 struct bio
*bio
= io
->bio
;
707 unsigned long duration
= jiffies
- io
->start_time
;
709 int rw
= bio_data_dir(bio
);
711 generic_end_io_acct(rw
, &dm_disk(md
)->part0
, io
->start_time
);
713 if (unlikely(dm_stats_used(&md
->stats
)))
714 dm_stats_account_io(&md
->stats
, bio
->bi_rw
, bio
->bi_iter
.bi_sector
,
715 bio_sectors(bio
), true, duration
, &io
->stats_aux
);
718 * After this is decremented the bio must not be touched if it is
721 pending
= atomic_dec_return(&md
->pending
[rw
]);
722 atomic_set(&dm_disk(md
)->part0
.in_flight
[rw
], pending
);
723 pending
+= atomic_read(&md
->pending
[rw
^0x1]);
725 /* nudge anyone waiting on suspend queue */
731 * Add the bio to the list of deferred io.
733 static void queue_io(struct mapped_device
*md
, struct bio
*bio
)
737 spin_lock_irqsave(&md
->deferred_lock
, flags
);
738 bio_list_add(&md
->deferred
, bio
);
739 spin_unlock_irqrestore(&md
->deferred_lock
, flags
);
740 queue_work(md
->wq
, &md
->work
);
744 * Everyone (including functions in this file), should use this
745 * function to access the md->map field, and make sure they call
746 * dm_put_live_table() when finished.
748 struct dm_table
*dm_get_live_table(struct mapped_device
*md
, int *srcu_idx
) __acquires(md
->io_barrier
)
750 *srcu_idx
= srcu_read_lock(&md
->io_barrier
);
752 return srcu_dereference(md
->map
, &md
->io_barrier
);
755 void dm_put_live_table(struct mapped_device
*md
, int srcu_idx
) __releases(md
->io_barrier
)
757 srcu_read_unlock(&md
->io_barrier
, srcu_idx
);
760 void dm_sync_table(struct mapped_device
*md
)
762 synchronize_srcu(&md
->io_barrier
);
763 synchronize_rcu_expedited();
767 * A fast alternative to dm_get_live_table/dm_put_live_table.
768 * The caller must not block between these two functions.
770 static struct dm_table
*dm_get_live_table_fast(struct mapped_device
*md
) __acquires(RCU
)
773 return rcu_dereference(md
->map
);
776 static void dm_put_live_table_fast(struct mapped_device
*md
) __releases(RCU
)
782 * Open a table device so we can use it as a map destination.
784 static int open_table_device(struct table_device
*td
, dev_t dev
,
785 struct mapped_device
*md
)
787 static char *_claim_ptr
= "I belong to device-mapper";
788 struct block_device
*bdev
;
792 BUG_ON(td
->dm_dev
.bdev
);
794 bdev
= blkdev_get_by_dev(dev
, td
->dm_dev
.mode
| FMODE_EXCL
, _claim_ptr
);
796 return PTR_ERR(bdev
);
798 r
= bd_link_disk_holder(bdev
, dm_disk(md
));
800 blkdev_put(bdev
, td
->dm_dev
.mode
| FMODE_EXCL
);
804 td
->dm_dev
.bdev
= bdev
;
809 * Close a table device that we've been using.
811 static void close_table_device(struct table_device
*td
, struct mapped_device
*md
)
813 if (!td
->dm_dev
.bdev
)
816 bd_unlink_disk_holder(td
->dm_dev
.bdev
, dm_disk(md
));
817 blkdev_put(td
->dm_dev
.bdev
, td
->dm_dev
.mode
| FMODE_EXCL
);
818 td
->dm_dev
.bdev
= NULL
;
821 static struct table_device
*find_table_device(struct list_head
*l
, dev_t dev
,
823 struct table_device
*td
;
825 list_for_each_entry(td
, l
, list
)
826 if (td
->dm_dev
.bdev
->bd_dev
== dev
&& td
->dm_dev
.mode
== mode
)
832 int dm_get_table_device(struct mapped_device
*md
, dev_t dev
, fmode_t mode
,
833 struct dm_dev
**result
) {
835 struct table_device
*td
;
837 mutex_lock(&md
->table_devices_lock
);
838 td
= find_table_device(&md
->table_devices
, dev
, mode
);
840 td
= kmalloc(sizeof(*td
), GFP_KERNEL
);
842 mutex_unlock(&md
->table_devices_lock
);
846 td
->dm_dev
.mode
= mode
;
847 td
->dm_dev
.bdev
= NULL
;
849 if ((r
= open_table_device(td
, dev
, md
))) {
850 mutex_unlock(&md
->table_devices_lock
);
855 format_dev_t(td
->dm_dev
.name
, dev
);
857 atomic_set(&td
->count
, 0);
858 list_add(&td
->list
, &md
->table_devices
);
860 atomic_inc(&td
->count
);
861 mutex_unlock(&md
->table_devices_lock
);
863 *result
= &td
->dm_dev
;
866 EXPORT_SYMBOL_GPL(dm_get_table_device
);
868 void dm_put_table_device(struct mapped_device
*md
, struct dm_dev
*d
)
870 struct table_device
*td
= container_of(d
, struct table_device
, dm_dev
);
872 mutex_lock(&md
->table_devices_lock
);
873 if (atomic_dec_and_test(&td
->count
)) {
874 close_table_device(td
, md
);
878 mutex_unlock(&md
->table_devices_lock
);
880 EXPORT_SYMBOL(dm_put_table_device
);
882 static void free_table_devices(struct list_head
*devices
)
884 struct list_head
*tmp
, *next
;
886 list_for_each_safe(tmp
, next
, devices
) {
887 struct table_device
*td
= list_entry(tmp
, struct table_device
, list
);
889 DMWARN("dm_destroy: %s still exists with %d references",
890 td
->dm_dev
.name
, atomic_read(&td
->count
));
896 * Get the geometry associated with a dm device
898 int dm_get_geometry(struct mapped_device
*md
, struct hd_geometry
*geo
)
906 * Set the geometry of a device.
908 int dm_set_geometry(struct mapped_device
*md
, struct hd_geometry
*geo
)
910 sector_t sz
= (sector_t
)geo
->cylinders
* geo
->heads
* geo
->sectors
;
912 if (geo
->start
> sz
) {
913 DMWARN("Start sector is beyond the geometry limits.");
922 /*-----------------------------------------------------------------
924 * A more elegant soln is in the works that uses the queue
925 * merge fn, unfortunately there are a couple of changes to
926 * the block layer that I want to make for this. So in the
927 * interests of getting something for people to use I give
928 * you this clearly demarcated crap.
929 *---------------------------------------------------------------*/
931 static int __noflush_suspending(struct mapped_device
*md
)
933 return test_bit(DMF_NOFLUSH_SUSPENDING
, &md
->flags
);
937 * Decrements the number of outstanding ios that a bio has been
938 * cloned into, completing the original io if necc.
940 static void dec_pending(struct dm_io
*io
, int error
)
945 struct mapped_device
*md
= io
->md
;
947 /* Push-back supersedes any I/O errors */
948 if (unlikely(error
)) {
949 spin_lock_irqsave(&io
->endio_lock
, flags
);
950 if (!(io
->error
> 0 && __noflush_suspending(md
)))
952 spin_unlock_irqrestore(&io
->endio_lock
, flags
);
955 if (atomic_dec_and_test(&io
->io_count
)) {
956 if (io
->error
== DM_ENDIO_REQUEUE
) {
958 * Target requested pushing back the I/O.
960 spin_lock_irqsave(&md
->deferred_lock
, flags
);
961 if (__noflush_suspending(md
))
962 bio_list_add_head(&md
->deferred
, io
->bio
);
964 /* noflush suspend was interrupted. */
966 spin_unlock_irqrestore(&md
->deferred_lock
, flags
);
969 io_error
= io
->error
;
974 if (io_error
== DM_ENDIO_REQUEUE
)
977 if ((bio
->bi_rw
& REQ_FLUSH
) && bio
->bi_iter
.bi_size
) {
979 * Preflush done for flush with data, reissue
982 bio
->bi_rw
&= ~REQ_FLUSH
;
985 /* done with normal IO or empty flush */
986 trace_block_bio_complete(md
->queue
, bio
, io_error
);
987 bio
->bi_error
= io_error
;
993 static void disable_write_same(struct mapped_device
*md
)
995 struct queue_limits
*limits
= dm_get_queue_limits(md
);
997 /* device doesn't really support WRITE SAME, disable it */
998 limits
->max_write_same_sectors
= 0;
1001 static void clone_endio(struct bio
*bio
)
1003 int error
= bio
->bi_error
;
1005 struct dm_target_io
*tio
= container_of(bio
, struct dm_target_io
, clone
);
1006 struct dm_io
*io
= tio
->io
;
1007 struct mapped_device
*md
= tio
->io
->md
;
1008 dm_endio_fn endio
= tio
->ti
->type
->end_io
;
1011 r
= endio(tio
->ti
, bio
, error
);
1012 if (r
< 0 || r
== DM_ENDIO_REQUEUE
)
1014 * error and requeue request are handled
1018 else if (r
== DM_ENDIO_INCOMPLETE
)
1019 /* The target will handle the io */
1022 DMWARN("unimplemented target endio return value: %d", r
);
1027 if (unlikely(r
== -EREMOTEIO
&& (bio
->bi_rw
& REQ_WRITE_SAME
) &&
1028 !bdev_get_queue(bio
->bi_bdev
)->limits
.max_write_same_sectors
))
1029 disable_write_same(md
);
1032 dec_pending(io
, error
);
1036 * Partial completion handling for request-based dm
1038 static void end_clone_bio(struct bio
*clone
)
1040 struct dm_rq_clone_bio_info
*info
=
1041 container_of(clone
, struct dm_rq_clone_bio_info
, clone
);
1042 struct dm_rq_target_io
*tio
= info
->tio
;
1043 struct bio
*bio
= info
->orig
;
1044 unsigned int nr_bytes
= info
->orig
->bi_iter
.bi_size
;
1045 int error
= clone
->bi_error
;
1051 * An error has already been detected on the request.
1052 * Once error occurred, just let clone->end_io() handle
1058 * Don't notice the error to the upper layer yet.
1059 * The error handling decision is made by the target driver,
1060 * when the request is completed.
1067 * I/O for the bio successfully completed.
1068 * Notice the data completion to the upper layer.
1072 * bios are processed from the head of the list.
1073 * So the completing bio should always be rq->bio.
1074 * If it's not, something wrong is happening.
1076 if (tio
->orig
->bio
!= bio
)
1077 DMERR("bio completion is going in the middle of the request");
1080 * Update the original request.
1081 * Do not use blk_end_request() here, because it may complete
1082 * the original request before the clone, and break the ordering.
1084 blk_update_request(tio
->orig
, 0, nr_bytes
);
1087 static struct dm_rq_target_io
*tio_from_request(struct request
*rq
)
1089 return (rq
->q
->mq_ops
? blk_mq_rq_to_pdu(rq
) : rq
->special
);
1092 static void rq_end_stats(struct mapped_device
*md
, struct request
*orig
)
1094 if (unlikely(dm_stats_used(&md
->stats
))) {
1095 struct dm_rq_target_io
*tio
= tio_from_request(orig
);
1096 tio
->duration_jiffies
= jiffies
- tio
->duration_jiffies
;
1097 dm_stats_account_io(&md
->stats
, orig
->cmd_flags
, blk_rq_pos(orig
),
1098 tio
->n_sectors
, true, tio
->duration_jiffies
,
1104 * Don't touch any member of the md after calling this function because
1105 * the md may be freed in dm_put() at the end of this function.
1106 * Or do dm_get() before calling this function and dm_put() later.
1108 static void rq_completed(struct mapped_device
*md
, int rw
, bool run_queue
)
1110 atomic_dec(&md
->pending
[rw
]);
1112 /* nudge anyone waiting on suspend queue */
1113 if (!md_in_flight(md
))
1117 * Run this off this callpath, as drivers could invoke end_io while
1118 * inside their request_fn (and holding the queue lock). Calling
1119 * back into ->request_fn() could deadlock attempting to grab the
1122 if (!md
->queue
->mq_ops
&& run_queue
)
1123 blk_run_queue_async(md
->queue
);
1126 * dm_put() must be at the end of this function. See the comment above
1131 static void free_rq_clone(struct request
*clone
)
1133 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
1134 struct mapped_device
*md
= tio
->md
;
1136 blk_rq_unprep_clone(clone
);
1138 if (md
->type
== DM_TYPE_MQ_REQUEST_BASED
)
1139 /* stacked on blk-mq queue(s) */
1140 tio
->ti
->type
->release_clone_rq(clone
);
1141 else if (!md
->queue
->mq_ops
)
1142 /* request_fn queue stacked on request_fn queue(s) */
1143 free_old_clone_request(md
, clone
);
1145 if (!md
->queue
->mq_ops
)
1146 free_old_rq_tio(tio
);
1150 * Complete the clone and the original request.
1151 * Must be called without clone's queue lock held,
1152 * see end_clone_request() for more details.
1154 static void dm_end_request(struct request
*clone
, int error
)
1156 int rw
= rq_data_dir(clone
);
1157 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
1158 struct mapped_device
*md
= tio
->md
;
1159 struct request
*rq
= tio
->orig
;
1161 if (rq
->cmd_type
== REQ_TYPE_BLOCK_PC
) {
1162 rq
->errors
= clone
->errors
;
1163 rq
->resid_len
= clone
->resid_len
;
1167 * We are using the sense buffer of the original
1169 * So setting the length of the sense data is enough.
1171 rq
->sense_len
= clone
->sense_len
;
1174 free_rq_clone(clone
);
1175 rq_end_stats(md
, rq
);
1177 blk_end_request_all(rq
, error
);
1179 blk_mq_end_request(rq
, error
);
1180 rq_completed(md
, rw
, true);
1183 static void dm_unprep_request(struct request
*rq
)
1185 struct dm_rq_target_io
*tio
= tio_from_request(rq
);
1186 struct request
*clone
= tio
->clone
;
1188 if (!rq
->q
->mq_ops
) {
1190 rq
->cmd_flags
&= ~REQ_DONTPREP
;
1194 free_rq_clone(clone
);
1195 else if (!tio
->md
->queue
->mq_ops
)
1196 free_old_rq_tio(tio
);
1200 * Requeue the original request of a clone.
1202 static void dm_old_requeue_request(struct request
*rq
)
1204 struct request_queue
*q
= rq
->q
;
1205 unsigned long flags
;
1207 spin_lock_irqsave(q
->queue_lock
, flags
);
1208 blk_requeue_request(q
, rq
);
1209 blk_run_queue_async(q
);
1210 spin_unlock_irqrestore(q
->queue_lock
, flags
);
1213 static void dm_mq_requeue_request(struct request
*rq
)
1215 struct request_queue
*q
= rq
->q
;
1216 unsigned long flags
;
1218 blk_mq_requeue_request(rq
);
1219 spin_lock_irqsave(q
->queue_lock
, flags
);
1220 if (!blk_queue_stopped(q
))
1221 blk_mq_kick_requeue_list(q
);
1222 spin_unlock_irqrestore(q
->queue_lock
, flags
);
1225 static void dm_requeue_original_request(struct mapped_device
*md
,
1228 int rw
= rq_data_dir(rq
);
1230 dm_unprep_request(rq
);
1232 rq_end_stats(md
, rq
);
1234 dm_old_requeue_request(rq
);
1236 dm_mq_requeue_request(rq
);
1238 rq_completed(md
, rw
, false);
1241 static void dm_old_stop_queue(struct request_queue
*q
)
1243 unsigned long flags
;
1245 spin_lock_irqsave(q
->queue_lock
, flags
);
1246 if (blk_queue_stopped(q
)) {
1247 spin_unlock_irqrestore(q
->queue_lock
, flags
);
1252 spin_unlock_irqrestore(q
->queue_lock
, flags
);
1255 static void dm_stop_queue(struct request_queue
*q
)
1258 dm_old_stop_queue(q
);
1260 blk_mq_stop_hw_queues(q
);
1263 static void dm_old_start_queue(struct request_queue
*q
)
1265 unsigned long flags
;
1267 spin_lock_irqsave(q
->queue_lock
, flags
);
1268 if (blk_queue_stopped(q
))
1270 spin_unlock_irqrestore(q
->queue_lock
, flags
);
1273 static void dm_start_queue(struct request_queue
*q
)
1276 dm_old_start_queue(q
);
1278 blk_mq_start_stopped_hw_queues(q
, true);
1279 blk_mq_kick_requeue_list(q
);
1283 static void dm_done(struct request
*clone
, int error
, bool mapped
)
1286 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
1287 dm_request_endio_fn rq_end_io
= NULL
;
1290 rq_end_io
= tio
->ti
->type
->rq_end_io
;
1292 if (mapped
&& rq_end_io
)
1293 r
= rq_end_io(tio
->ti
, clone
, error
, &tio
->info
);
1296 if (unlikely(r
== -EREMOTEIO
&& (clone
->cmd_flags
& REQ_WRITE_SAME
) &&
1297 !clone
->q
->limits
.max_write_same_sectors
))
1298 disable_write_same(tio
->md
);
1301 /* The target wants to complete the I/O */
1302 dm_end_request(clone
, r
);
1303 else if (r
== DM_ENDIO_INCOMPLETE
)
1304 /* The target will handle the I/O */
1306 else if (r
== DM_ENDIO_REQUEUE
)
1307 /* The target wants to requeue the I/O */
1308 dm_requeue_original_request(tio
->md
, tio
->orig
);
1310 DMWARN("unimplemented target endio return value: %d", r
);
1316 * Request completion handler for request-based dm
1318 static void dm_softirq_done(struct request
*rq
)
1321 struct dm_rq_target_io
*tio
= tio_from_request(rq
);
1322 struct request
*clone
= tio
->clone
;
1326 rq_end_stats(tio
->md
, rq
);
1327 rw
= rq_data_dir(rq
);
1328 if (!rq
->q
->mq_ops
) {
1329 blk_end_request_all(rq
, tio
->error
);
1330 rq_completed(tio
->md
, rw
, false);
1331 free_old_rq_tio(tio
);
1333 blk_mq_end_request(rq
, tio
->error
);
1334 rq_completed(tio
->md
, rw
, false);
1339 if (rq
->cmd_flags
& REQ_FAILED
)
1342 dm_done(clone
, tio
->error
, mapped
);
1346 * Complete the clone and the original request with the error status
1347 * through softirq context.
1349 static void dm_complete_request(struct request
*rq
, int error
)
1351 struct dm_rq_target_io
*tio
= tio_from_request(rq
);
1355 blk_complete_request(rq
);
1357 blk_mq_complete_request(rq
, error
);
1361 * Complete the not-mapped clone and the original request with the error status
1362 * through softirq context.
1363 * Target's rq_end_io() function isn't called.
1364 * This may be used when the target's map_rq() or clone_and_map_rq() functions fail.
1366 static void dm_kill_unmapped_request(struct request
*rq
, int error
)
1368 rq
->cmd_flags
|= REQ_FAILED
;
1369 dm_complete_request(rq
, error
);
1373 * Called with the clone's queue lock held (in the case of .request_fn)
1375 static void end_clone_request(struct request
*clone
, int error
)
1377 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
1379 if (!clone
->q
->mq_ops
) {
1381 * For just cleaning up the information of the queue in which
1382 * the clone was dispatched.
1383 * The clone is *NOT* freed actually here because it is alloced
1384 * from dm own mempool (REQ_ALLOCED isn't set).
1386 __blk_put_request(clone
->q
, clone
);
1390 * Actual request completion is done in a softirq context which doesn't
1391 * hold the clone's queue lock. Otherwise, deadlock could occur because:
1392 * - another request may be submitted by the upper level driver
1393 * of the stacking during the completion
1394 * - the submission which requires queue lock may be done
1395 * against this clone's queue
1397 dm_complete_request(tio
->orig
, error
);
1401 * Return maximum size of I/O possible at the supplied sector up to the current
1404 static sector_t
max_io_len_target_boundary(sector_t sector
, struct dm_target
*ti
)
1406 sector_t target_offset
= dm_target_offset(ti
, sector
);
1408 return ti
->len
- target_offset
;
1411 static sector_t
max_io_len(sector_t sector
, struct dm_target
*ti
)
1413 sector_t len
= max_io_len_target_boundary(sector
, ti
);
1414 sector_t offset
, max_len
;
1417 * Does the target need to split even further?
1419 if (ti
->max_io_len
) {
1420 offset
= dm_target_offset(ti
, sector
);
1421 if (unlikely(ti
->max_io_len
& (ti
->max_io_len
- 1)))
1422 max_len
= sector_div(offset
, ti
->max_io_len
);
1424 max_len
= offset
& (ti
->max_io_len
- 1);
1425 max_len
= ti
->max_io_len
- max_len
;
1434 int dm_set_target_max_io_len(struct dm_target
*ti
, sector_t len
)
1436 if (len
> UINT_MAX
) {
1437 DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)",
1438 (unsigned long long)len
, UINT_MAX
);
1439 ti
->error
= "Maximum size of target IO is too large";
1443 ti
->max_io_len
= (uint32_t) len
;
1447 EXPORT_SYMBOL_GPL(dm_set_target_max_io_len
);
1450 * A target may call dm_accept_partial_bio only from the map routine. It is
1451 * allowed for all bio types except REQ_FLUSH.
1453 * dm_accept_partial_bio informs the dm that the target only wants to process
1454 * additional n_sectors sectors of the bio and the rest of the data should be
1455 * sent in a next bio.
1457 * A diagram that explains the arithmetics:
1458 * +--------------------+---------------+-------+
1460 * +--------------------+---------------+-------+
1462 * <-------------- *tio->len_ptr --------------->
1463 * <------- bi_size ------->
1466 * Region 1 was already iterated over with bio_advance or similar function.
1467 * (it may be empty if the target doesn't use bio_advance)
1468 * Region 2 is the remaining bio size that the target wants to process.
1469 * (it may be empty if region 1 is non-empty, although there is no reason
1471 * The target requires that region 3 is to be sent in the next bio.
1473 * If the target wants to receive multiple copies of the bio (via num_*bios, etc),
1474 * the partially processed part (the sum of regions 1+2) must be the same for all
1475 * copies of the bio.
1477 void dm_accept_partial_bio(struct bio
*bio
, unsigned n_sectors
)
1479 struct dm_target_io
*tio
= container_of(bio
, struct dm_target_io
, clone
);
1480 unsigned bi_size
= bio
->bi_iter
.bi_size
>> SECTOR_SHIFT
;
1481 BUG_ON(bio
->bi_rw
& REQ_FLUSH
);
1482 BUG_ON(bi_size
> *tio
->len_ptr
);
1483 BUG_ON(n_sectors
> bi_size
);
1484 *tio
->len_ptr
-= bi_size
- n_sectors
;
1485 bio
->bi_iter
.bi_size
= n_sectors
<< SECTOR_SHIFT
;
1487 EXPORT_SYMBOL_GPL(dm_accept_partial_bio
);
1489 static void __map_bio(struct dm_target_io
*tio
)
1493 struct mapped_device
*md
;
1494 struct bio
*clone
= &tio
->clone
;
1495 struct dm_target
*ti
= tio
->ti
;
1497 clone
->bi_end_io
= clone_endio
;
1500 * Map the clone. If r == 0 we don't need to do
1501 * anything, the target has assumed ownership of
1504 atomic_inc(&tio
->io
->io_count
);
1505 sector
= clone
->bi_iter
.bi_sector
;
1506 r
= ti
->type
->map(ti
, clone
);
1507 if (r
== DM_MAPIO_REMAPPED
) {
1508 /* the bio has been remapped so dispatch it */
1510 trace_block_bio_remap(bdev_get_queue(clone
->bi_bdev
), clone
,
1511 tio
->io
->bio
->bi_bdev
->bd_dev
, sector
);
1513 generic_make_request(clone
);
1514 } else if (r
< 0 || r
== DM_MAPIO_REQUEUE
) {
1515 /* error the io and bail out, or requeue it if needed */
1517 dec_pending(tio
->io
, r
);
1519 } else if (r
!= DM_MAPIO_SUBMITTED
) {
1520 DMWARN("unimplemented target map return value: %d", r
);
1526 struct mapped_device
*md
;
1527 struct dm_table
*map
;
1531 unsigned sector_count
;
1534 static void bio_setup_sector(struct bio
*bio
, sector_t sector
, unsigned len
)
1536 bio
->bi_iter
.bi_sector
= sector
;
1537 bio
->bi_iter
.bi_size
= to_bytes(len
);
1541 * Creates a bio that consists of range of complete bvecs.
1543 static void clone_bio(struct dm_target_io
*tio
, struct bio
*bio
,
1544 sector_t sector
, unsigned len
)
1546 struct bio
*clone
= &tio
->clone
;
1548 __bio_clone_fast(clone
, bio
);
1550 if (bio_integrity(bio
))
1551 bio_integrity_clone(clone
, bio
, GFP_NOIO
);
1553 bio_advance(clone
, to_bytes(sector
- clone
->bi_iter
.bi_sector
));
1554 clone
->bi_iter
.bi_size
= to_bytes(len
);
1556 if (bio_integrity(bio
))
1557 bio_integrity_trim(clone
, 0, len
);
1560 static struct dm_target_io
*alloc_tio(struct clone_info
*ci
,
1561 struct dm_target
*ti
,
1562 unsigned target_bio_nr
)
1564 struct dm_target_io
*tio
;
1567 clone
= bio_alloc_bioset(GFP_NOIO
, 0, ci
->md
->bs
);
1568 tio
= container_of(clone
, struct dm_target_io
, clone
);
1572 tio
->target_bio_nr
= target_bio_nr
;
1577 static void __clone_and_map_simple_bio(struct clone_info
*ci
,
1578 struct dm_target
*ti
,
1579 unsigned target_bio_nr
, unsigned *len
)
1581 struct dm_target_io
*tio
= alloc_tio(ci
, ti
, target_bio_nr
);
1582 struct bio
*clone
= &tio
->clone
;
1586 __bio_clone_fast(clone
, ci
->bio
);
1588 bio_setup_sector(clone
, ci
->sector
, *len
);
1593 static void __send_duplicate_bios(struct clone_info
*ci
, struct dm_target
*ti
,
1594 unsigned num_bios
, unsigned *len
)
1596 unsigned target_bio_nr
;
1598 for (target_bio_nr
= 0; target_bio_nr
< num_bios
; target_bio_nr
++)
1599 __clone_and_map_simple_bio(ci
, ti
, target_bio_nr
, len
);
1602 static int __send_empty_flush(struct clone_info
*ci
)
1604 unsigned target_nr
= 0;
1605 struct dm_target
*ti
;
1607 BUG_ON(bio_has_data(ci
->bio
));
1608 while ((ti
= dm_table_get_target(ci
->map
, target_nr
++)))
1609 __send_duplicate_bios(ci
, ti
, ti
->num_flush_bios
, NULL
);
1614 static void __clone_and_map_data_bio(struct clone_info
*ci
, struct dm_target
*ti
,
1615 sector_t sector
, unsigned *len
)
1617 struct bio
*bio
= ci
->bio
;
1618 struct dm_target_io
*tio
;
1619 unsigned target_bio_nr
;
1620 unsigned num_target_bios
= 1;
1623 * Does the target want to receive duplicate copies of the bio?
1625 if (bio_data_dir(bio
) == WRITE
&& ti
->num_write_bios
)
1626 num_target_bios
= ti
->num_write_bios(ti
, bio
);
1628 for (target_bio_nr
= 0; target_bio_nr
< num_target_bios
; target_bio_nr
++) {
1629 tio
= alloc_tio(ci
, ti
, target_bio_nr
);
1631 clone_bio(tio
, bio
, sector
, *len
);
1636 typedef unsigned (*get_num_bios_fn
)(struct dm_target
*ti
);
1638 static unsigned get_num_discard_bios(struct dm_target
*ti
)
1640 return ti
->num_discard_bios
;
1643 static unsigned get_num_write_same_bios(struct dm_target
*ti
)
1645 return ti
->num_write_same_bios
;
1648 typedef bool (*is_split_required_fn
)(struct dm_target
*ti
);
1650 static bool is_split_required_for_discard(struct dm_target
*ti
)
1652 return ti
->split_discard_bios
;
1655 static int __send_changing_extent_only(struct clone_info
*ci
,
1656 get_num_bios_fn get_num_bios
,
1657 is_split_required_fn is_split_required
)
1659 struct dm_target
*ti
;
1664 ti
= dm_table_find_target(ci
->map
, ci
->sector
);
1665 if (!dm_target_is_valid(ti
))
1669 * Even though the device advertised support for this type of
1670 * request, that does not mean every target supports it, and
1671 * reconfiguration might also have changed that since the
1672 * check was performed.
1674 num_bios
= get_num_bios
? get_num_bios(ti
) : 0;
1678 if (is_split_required
&& !is_split_required(ti
))
1679 len
= min((sector_t
)ci
->sector_count
, max_io_len_target_boundary(ci
->sector
, ti
));
1681 len
= min((sector_t
)ci
->sector_count
, max_io_len(ci
->sector
, ti
));
1683 __send_duplicate_bios(ci
, ti
, num_bios
, &len
);
1686 } while (ci
->sector_count
-= len
);
1691 static int __send_discard(struct clone_info
*ci
)
1693 return __send_changing_extent_only(ci
, get_num_discard_bios
,
1694 is_split_required_for_discard
);
1697 static int __send_write_same(struct clone_info
*ci
)
1699 return __send_changing_extent_only(ci
, get_num_write_same_bios
, NULL
);
1703 * Select the correct strategy for processing a non-flush bio.
1705 static int __split_and_process_non_flush(struct clone_info
*ci
)
1707 struct bio
*bio
= ci
->bio
;
1708 struct dm_target
*ti
;
1711 if (unlikely(bio
->bi_rw
& REQ_DISCARD
))
1712 return __send_discard(ci
);
1713 else if (unlikely(bio
->bi_rw
& REQ_WRITE_SAME
))
1714 return __send_write_same(ci
);
1716 ti
= dm_table_find_target(ci
->map
, ci
->sector
);
1717 if (!dm_target_is_valid(ti
))
1720 len
= min_t(sector_t
, max_io_len(ci
->sector
, ti
), ci
->sector_count
);
1722 __clone_and_map_data_bio(ci
, ti
, ci
->sector
, &len
);
1725 ci
->sector_count
-= len
;
1731 * Entry point to split a bio into clones and submit them to the targets.
1733 static void __split_and_process_bio(struct mapped_device
*md
,
1734 struct dm_table
*map
, struct bio
*bio
)
1736 struct clone_info ci
;
1739 if (unlikely(!map
)) {
1746 ci
.io
= alloc_io(md
);
1748 atomic_set(&ci
.io
->io_count
, 1);
1751 spin_lock_init(&ci
.io
->endio_lock
);
1752 ci
.sector
= bio
->bi_iter
.bi_sector
;
1754 start_io_acct(ci
.io
);
1756 if (bio
->bi_rw
& REQ_FLUSH
) {
1757 ci
.bio
= &ci
.md
->flush_bio
;
1758 ci
.sector_count
= 0;
1759 error
= __send_empty_flush(&ci
);
1760 /* dec_pending submits any data associated with flush */
1763 ci
.sector_count
= bio_sectors(bio
);
1764 while (ci
.sector_count
&& !error
)
1765 error
= __split_and_process_non_flush(&ci
);
1768 /* drop the extra reference count */
1769 dec_pending(ci
.io
, error
);
1771 /*-----------------------------------------------------------------
1773 *---------------------------------------------------------------*/
1776 * The request function that just remaps the bio built up by
1779 static blk_qc_t
dm_make_request(struct request_queue
*q
, struct bio
*bio
)
1781 int rw
= bio_data_dir(bio
);
1782 struct mapped_device
*md
= q
->queuedata
;
1784 struct dm_table
*map
;
1786 map
= dm_get_live_table(md
, &srcu_idx
);
1788 generic_start_io_acct(rw
, bio_sectors(bio
), &dm_disk(md
)->part0
);
1790 /* if we're suspended, we have to queue this io for later */
1791 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
))) {
1792 dm_put_live_table(md
, srcu_idx
);
1794 if (bio_rw(bio
) != READA
)
1798 return BLK_QC_T_NONE
;
1801 __split_and_process_bio(md
, map
, bio
);
1802 dm_put_live_table(md
, srcu_idx
);
1803 return BLK_QC_T_NONE
;
1806 int dm_request_based(struct mapped_device
*md
)
1808 return blk_queue_stackable(md
->queue
);
1811 static void dm_dispatch_clone_request(struct request
*clone
, struct request
*rq
)
1815 if (blk_queue_io_stat(clone
->q
))
1816 clone
->cmd_flags
|= REQ_IO_STAT
;
1818 clone
->start_time
= jiffies
;
1819 r
= blk_insert_cloned_request(clone
->q
, clone
);
1821 /* must complete clone in terms of original request */
1822 dm_complete_request(rq
, r
);
1825 static int dm_rq_bio_constructor(struct bio
*bio
, struct bio
*bio_orig
,
1828 struct dm_rq_target_io
*tio
= data
;
1829 struct dm_rq_clone_bio_info
*info
=
1830 container_of(bio
, struct dm_rq_clone_bio_info
, clone
);
1832 info
->orig
= bio_orig
;
1834 bio
->bi_end_io
= end_clone_bio
;
1839 static int setup_clone(struct request
*clone
, struct request
*rq
,
1840 struct dm_rq_target_io
*tio
, gfp_t gfp_mask
)
1844 r
= blk_rq_prep_clone(clone
, rq
, tio
->md
->bs
, gfp_mask
,
1845 dm_rq_bio_constructor
, tio
);
1849 clone
->cmd
= rq
->cmd
;
1850 clone
->cmd_len
= rq
->cmd_len
;
1851 clone
->sense
= rq
->sense
;
1852 clone
->end_io
= end_clone_request
;
1853 clone
->end_io_data
= tio
;
1860 static struct request
*clone_old_rq(struct request
*rq
, struct mapped_device
*md
,
1861 struct dm_rq_target_io
*tio
, gfp_t gfp_mask
)
1864 * Create clone for use with .request_fn request_queue
1866 struct request
*clone
;
1868 clone
= alloc_old_clone_request(md
, gfp_mask
);
1872 blk_rq_init(NULL
, clone
);
1873 if (setup_clone(clone
, rq
, tio
, gfp_mask
)) {
1875 free_old_clone_request(md
, clone
);
1882 static void map_tio_request(struct kthread_work
*work
);
1884 static void init_tio(struct dm_rq_target_io
*tio
, struct request
*rq
,
1885 struct mapped_device
*md
)
1892 memset(&tio
->info
, 0, sizeof(tio
->info
));
1893 if (md
->kworker_task
)
1894 init_kthread_work(&tio
->work
, map_tio_request
);
1897 static struct dm_rq_target_io
*dm_old_prep_tio(struct request
*rq
,
1898 struct mapped_device
*md
,
1901 struct dm_rq_target_io
*tio
;
1903 struct dm_table
*table
;
1905 tio
= alloc_old_rq_tio(md
, gfp_mask
);
1909 init_tio(tio
, rq
, md
);
1911 table
= dm_get_live_table(md
, &srcu_idx
);
1913 * Must clone a request if this .request_fn DM device
1914 * is stacked on .request_fn device(s).
1916 if (!dm_table_mq_request_based(table
)) {
1917 if (!clone_old_rq(rq
, md
, tio
, gfp_mask
)) {
1918 dm_put_live_table(md
, srcu_idx
);
1919 free_old_rq_tio(tio
);
1923 dm_put_live_table(md
, srcu_idx
);
1929 * Called with the queue lock held.
1931 static int dm_old_prep_fn(struct request_queue
*q
, struct request
*rq
)
1933 struct mapped_device
*md
= q
->queuedata
;
1934 struct dm_rq_target_io
*tio
;
1936 if (unlikely(rq
->special
)) {
1937 DMWARN("Already has something in rq->special.");
1938 return BLKPREP_KILL
;
1941 tio
= dm_old_prep_tio(rq
, md
, GFP_ATOMIC
);
1943 return BLKPREP_DEFER
;
1946 rq
->cmd_flags
|= REQ_DONTPREP
;
1953 * 0 : the request has been processed
1954 * DM_MAPIO_REQUEUE : the original request needs to be requeued
1955 * < 0 : the request was completed due to failure
1957 static int map_request(struct dm_rq_target_io
*tio
, struct request
*rq
,
1958 struct mapped_device
*md
)
1961 struct dm_target
*ti
= tio
->ti
;
1962 struct request
*clone
= NULL
;
1966 r
= ti
->type
->map_rq(ti
, clone
, &tio
->info
);
1968 r
= ti
->type
->clone_and_map_rq(ti
, rq
, &tio
->info
, &clone
);
1970 /* The target wants to complete the I/O */
1971 dm_kill_unmapped_request(rq
, r
);
1974 if (r
!= DM_MAPIO_REMAPPED
)
1976 if (setup_clone(clone
, rq
, tio
, GFP_ATOMIC
)) {
1978 ti
->type
->release_clone_rq(clone
);
1979 return DM_MAPIO_REQUEUE
;
1984 case DM_MAPIO_SUBMITTED
:
1985 /* The target has taken the I/O to submit by itself later */
1987 case DM_MAPIO_REMAPPED
:
1988 /* The target has remapped the I/O so dispatch it */
1989 trace_block_rq_remap(clone
->q
, clone
, disk_devt(dm_disk(md
)),
1991 dm_dispatch_clone_request(clone
, rq
);
1993 case DM_MAPIO_REQUEUE
:
1994 /* The target wants to requeue the I/O */
1995 dm_requeue_original_request(md
, tio
->orig
);
1999 DMWARN("unimplemented target map return value: %d", r
);
2003 /* The target wants to complete the I/O */
2004 dm_kill_unmapped_request(rq
, r
);
2011 static void map_tio_request(struct kthread_work
*work
)
2013 struct dm_rq_target_io
*tio
= container_of(work
, struct dm_rq_target_io
, work
);
2014 struct request
*rq
= tio
->orig
;
2015 struct mapped_device
*md
= tio
->md
;
2017 if (map_request(tio
, rq
, md
) == DM_MAPIO_REQUEUE
)
2018 dm_requeue_original_request(md
, rq
);
2021 static void dm_start_request(struct mapped_device
*md
, struct request
*orig
)
2023 if (!orig
->q
->mq_ops
)
2024 blk_start_request(orig
);
2026 blk_mq_start_request(orig
);
2027 atomic_inc(&md
->pending
[rq_data_dir(orig
)]);
2029 if (md
->seq_rq_merge_deadline_usecs
) {
2030 md
->last_rq_pos
= rq_end_sector(orig
);
2031 md
->last_rq_rw
= rq_data_dir(orig
);
2032 md
->last_rq_start_time
= ktime_get();
2035 if (unlikely(dm_stats_used(&md
->stats
))) {
2036 struct dm_rq_target_io
*tio
= tio_from_request(orig
);
2037 tio
->duration_jiffies
= jiffies
;
2038 tio
->n_sectors
= blk_rq_sectors(orig
);
2039 dm_stats_account_io(&md
->stats
, orig
->cmd_flags
, blk_rq_pos(orig
),
2040 tio
->n_sectors
, false, 0, &tio
->stats_aux
);
2044 * Hold the md reference here for the in-flight I/O.
2045 * We can't rely on the reference count by device opener,
2046 * because the device may be closed during the request completion
2047 * when all bios are completed.
2048 * See the comment in rq_completed() too.
2053 #define MAX_SEQ_RQ_MERGE_DEADLINE_USECS 100000
2055 ssize_t
dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device
*md
, char *buf
)
2057 return sprintf(buf
, "%u\n", md
->seq_rq_merge_deadline_usecs
);
2060 ssize_t
dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device
*md
,
2061 const char *buf
, size_t count
)
2065 if (!dm_request_based(md
) || md
->use_blk_mq
)
2068 if (kstrtouint(buf
, 10, &deadline
))
2071 if (deadline
> MAX_SEQ_RQ_MERGE_DEADLINE_USECS
)
2072 deadline
= MAX_SEQ_RQ_MERGE_DEADLINE_USECS
;
2074 md
->seq_rq_merge_deadline_usecs
= deadline
;
2079 static bool dm_request_peeked_before_merge_deadline(struct mapped_device
*md
)
2081 ktime_t kt_deadline
;
2083 if (!md
->seq_rq_merge_deadline_usecs
)
2086 kt_deadline
= ns_to_ktime((u64
)md
->seq_rq_merge_deadline_usecs
* NSEC_PER_USEC
);
2087 kt_deadline
= ktime_add_safe(md
->last_rq_start_time
, kt_deadline
);
2089 return !ktime_after(ktime_get(), kt_deadline
);
2093 * q->request_fn for request-based dm.
2094 * Called with the queue lock held.
2096 static void dm_request_fn(struct request_queue
*q
)
2098 struct mapped_device
*md
= q
->queuedata
;
2099 struct dm_target
*ti
= md
->immutable_target
;
2101 struct dm_rq_target_io
*tio
;
2104 if (unlikely(!ti
)) {
2106 struct dm_table
*map
= dm_get_live_table(md
, &srcu_idx
);
2108 ti
= dm_table_find_target(map
, pos
);
2109 dm_put_live_table(md
, srcu_idx
);
2113 * For suspend, check blk_queue_stopped() and increment
2114 * ->pending within a single queue_lock not to increment the
2115 * number of in-flight I/Os after the queue is stopped in
2118 while (!blk_queue_stopped(q
)) {
2119 rq
= blk_peek_request(q
);
2123 /* always use block 0 to find the target for flushes for now */
2125 if (!(rq
->cmd_flags
& REQ_FLUSH
))
2126 pos
= blk_rq_pos(rq
);
2128 if ((dm_request_peeked_before_merge_deadline(md
) &&
2129 md_in_flight(md
) && rq
->bio
&& rq
->bio
->bi_vcnt
== 1 &&
2130 md
->last_rq_pos
== pos
&& md
->last_rq_rw
== rq_data_dir(rq
)) ||
2131 (ti
->type
->busy
&& ti
->type
->busy(ti
))) {
2132 blk_delay_queue(q
, HZ
/ 100);
2136 dm_start_request(md
, rq
);
2138 tio
= tio_from_request(rq
);
2139 /* Establish tio->ti before queuing work (map_tio_request) */
2141 queue_kthread_work(&md
->kworker
, &tio
->work
);
2142 BUG_ON(!irqs_disabled());
2146 static int dm_any_congested(void *congested_data
, int bdi_bits
)
2149 struct mapped_device
*md
= congested_data
;
2150 struct dm_table
*map
;
2152 if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
)) {
2153 if (dm_request_based(md
)) {
2155 * With request-based DM we only need to check the
2156 * top-level queue for congestion.
2158 r
= md
->queue
->backing_dev_info
.wb
.state
& bdi_bits
;
2160 map
= dm_get_live_table_fast(md
);
2162 r
= dm_table_any_congested(map
, bdi_bits
);
2163 dm_put_live_table_fast(md
);
2170 /*-----------------------------------------------------------------
2171 * An IDR is used to keep track of allocated minor numbers.
2172 *---------------------------------------------------------------*/
2173 static void free_minor(int minor
)
2175 spin_lock(&_minor_lock
);
2176 idr_remove(&_minor_idr
, minor
);
2177 spin_unlock(&_minor_lock
);
2181 * See if the device with a specific minor # is free.
2183 static int specific_minor(int minor
)
2187 if (minor
>= (1 << MINORBITS
))
2190 idr_preload(GFP_KERNEL
);
2191 spin_lock(&_minor_lock
);
2193 r
= idr_alloc(&_minor_idr
, MINOR_ALLOCED
, minor
, minor
+ 1, GFP_NOWAIT
);
2195 spin_unlock(&_minor_lock
);
2198 return r
== -ENOSPC
? -EBUSY
: r
;
2202 static int next_free_minor(int *minor
)
2206 idr_preload(GFP_KERNEL
);
2207 spin_lock(&_minor_lock
);
2209 r
= idr_alloc(&_minor_idr
, MINOR_ALLOCED
, 0, 1 << MINORBITS
, GFP_NOWAIT
);
2211 spin_unlock(&_minor_lock
);
2219 static const struct block_device_operations dm_blk_dops
;
2221 static void dm_wq_work(struct work_struct
*work
);
2223 static void dm_init_md_queue(struct mapped_device
*md
)
2226 * Request-based dm devices cannot be stacked on top of bio-based dm
2227 * devices. The type of this dm device may not have been decided yet.
2228 * The type is decided at the first table loading time.
2229 * To prevent problematic device stacking, clear the queue flag
2230 * for request stacking support until then.
2232 * This queue is new, so no concurrency on the queue_flags.
2234 queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE
, md
->queue
);
2237 * Initialize data that will only be used by a non-blk-mq DM queue
2238 * - must do so here (in alloc_dev callchain) before queue is used
2240 md
->queue
->queuedata
= md
;
2241 md
->queue
->backing_dev_info
.congested_data
= md
;
2244 static void dm_init_normal_md_queue(struct mapped_device
*md
)
2246 md
->use_blk_mq
= false;
2247 dm_init_md_queue(md
);
2250 * Initialize aspects of queue that aren't relevant for blk-mq
2252 md
->queue
->backing_dev_info
.congested_fn
= dm_any_congested
;
2253 blk_queue_bounce_limit(md
->queue
, BLK_BOUNCE_ANY
);
2256 static void cleanup_mapped_device(struct mapped_device
*md
)
2259 destroy_workqueue(md
->wq
);
2260 if (md
->kworker_task
)
2261 kthread_stop(md
->kworker_task
);
2262 mempool_destroy(md
->io_pool
);
2263 mempool_destroy(md
->rq_pool
);
2265 bioset_free(md
->bs
);
2267 cleanup_srcu_struct(&md
->io_barrier
);
2270 spin_lock(&_minor_lock
);
2271 md
->disk
->private_data
= NULL
;
2272 spin_unlock(&_minor_lock
);
2273 del_gendisk(md
->disk
);
2278 blk_cleanup_queue(md
->queue
);
2287 * Allocate and initialise a blank device with a given minor.
2289 static struct mapped_device
*alloc_dev(int minor
)
2292 struct mapped_device
*md
= kzalloc(sizeof(*md
), GFP_KERNEL
);
2296 DMWARN("unable to allocate device, out of memory.");
2300 if (!try_module_get(THIS_MODULE
))
2301 goto bad_module_get
;
2303 /* get a minor number for the dev */
2304 if (minor
== DM_ANY_MINOR
)
2305 r
= next_free_minor(&minor
);
2307 r
= specific_minor(minor
);
2311 r
= init_srcu_struct(&md
->io_barrier
);
2313 goto bad_io_barrier
;
2315 md
->use_blk_mq
= use_blk_mq
;
2316 md
->type
= DM_TYPE_NONE
;
2317 mutex_init(&md
->suspend_lock
);
2318 mutex_init(&md
->type_lock
);
2319 mutex_init(&md
->table_devices_lock
);
2320 spin_lock_init(&md
->deferred_lock
);
2321 atomic_set(&md
->holders
, 1);
2322 atomic_set(&md
->open_count
, 0);
2323 atomic_set(&md
->event_nr
, 0);
2324 atomic_set(&md
->uevent_seq
, 0);
2325 INIT_LIST_HEAD(&md
->uevent_list
);
2326 INIT_LIST_HEAD(&md
->table_devices
);
2327 spin_lock_init(&md
->uevent_lock
);
2329 md
->queue
= blk_alloc_queue(GFP_KERNEL
);
2333 dm_init_md_queue(md
);
2335 md
->disk
= alloc_disk(1);
2339 atomic_set(&md
->pending
[0], 0);
2340 atomic_set(&md
->pending
[1], 0);
2341 init_waitqueue_head(&md
->wait
);
2342 INIT_WORK(&md
->work
, dm_wq_work
);
2343 init_waitqueue_head(&md
->eventq
);
2344 init_completion(&md
->kobj_holder
.completion
);
2345 md
->kworker_task
= NULL
;
2347 md
->disk
->major
= _major
;
2348 md
->disk
->first_minor
= minor
;
2349 md
->disk
->fops
= &dm_blk_dops
;
2350 md
->disk
->queue
= md
->queue
;
2351 md
->disk
->private_data
= md
;
2352 sprintf(md
->disk
->disk_name
, "dm-%d", minor
);
2354 format_dev_t(md
->name
, MKDEV(_major
, minor
));
2356 md
->wq
= alloc_workqueue("kdmflush", WQ_MEM_RECLAIM
, 0);
2360 md
->bdev
= bdget_disk(md
->disk
, 0);
2364 bio_init(&md
->flush_bio
);
2365 md
->flush_bio
.bi_bdev
= md
->bdev
;
2366 md
->flush_bio
.bi_rw
= WRITE_FLUSH
;
2368 dm_stats_init(&md
->stats
);
2370 /* Populate the mapping, nobody knows we exist yet */
2371 spin_lock(&_minor_lock
);
2372 old_md
= idr_replace(&_minor_idr
, md
, minor
);
2373 spin_unlock(&_minor_lock
);
2375 BUG_ON(old_md
!= MINOR_ALLOCED
);
2380 cleanup_mapped_device(md
);
2384 module_put(THIS_MODULE
);
2390 static void unlock_fs(struct mapped_device
*md
);
2392 static void free_dev(struct mapped_device
*md
)
2394 int minor
= MINOR(disk_devt(md
->disk
));
2398 cleanup_mapped_device(md
);
2400 blk_mq_free_tag_set(md
->tag_set
);
2404 free_table_devices(&md
->table_devices
);
2405 dm_stats_cleanup(&md
->stats
);
2408 module_put(THIS_MODULE
);
2412 static void __bind_mempools(struct mapped_device
*md
, struct dm_table
*t
)
2414 struct dm_md_mempools
*p
= dm_table_get_md_mempools(t
);
2417 /* The md already has necessary mempools. */
2418 if (dm_table_get_type(t
) == DM_TYPE_BIO_BASED
) {
2420 * Reload bioset because front_pad may have changed
2421 * because a different table was loaded.
2423 bioset_free(md
->bs
);
2428 * There's no need to reload with request-based dm
2429 * because the size of front_pad doesn't change.
2430 * Note for future: If you are to reload bioset,
2431 * prep-ed requests in the queue may refer
2432 * to bio from the old bioset, so you must walk
2433 * through the queue to unprep.
2438 BUG_ON(!p
|| md
->io_pool
|| md
->rq_pool
|| md
->bs
);
2440 md
->io_pool
= p
->io_pool
;
2442 md
->rq_pool
= p
->rq_pool
;
2448 /* mempool bind completed, no longer need any mempools in the table */
2449 dm_table_free_md_mempools(t
);
2453 * Bind a table to the device.
2455 static void event_callback(void *context
)
2457 unsigned long flags
;
2459 struct mapped_device
*md
= (struct mapped_device
*) context
;
2461 spin_lock_irqsave(&md
->uevent_lock
, flags
);
2462 list_splice_init(&md
->uevent_list
, &uevents
);
2463 spin_unlock_irqrestore(&md
->uevent_lock
, flags
);
2465 dm_send_uevents(&uevents
, &disk_to_dev(md
->disk
)->kobj
);
2467 atomic_inc(&md
->event_nr
);
2468 wake_up(&md
->eventq
);
2472 * Protected by md->suspend_lock obtained by dm_swap_table().
2474 static void __set_size(struct mapped_device
*md
, sector_t size
)
2476 set_capacity(md
->disk
, size
);
2478 i_size_write(md
->bdev
->bd_inode
, (loff_t
)size
<< SECTOR_SHIFT
);
2482 * Returns old map, which caller must destroy.
2484 static struct dm_table
*__bind(struct mapped_device
*md
, struct dm_table
*t
,
2485 struct queue_limits
*limits
)
2487 struct dm_table
*old_map
;
2488 struct request_queue
*q
= md
->queue
;
2491 size
= dm_table_get_size(t
);
2494 * Wipe any geometry if the size of the table changed.
2496 if (size
!= dm_get_size(md
))
2497 memset(&md
->geometry
, 0, sizeof(md
->geometry
));
2499 __set_size(md
, size
);
2501 dm_table_event_callback(t
, event_callback
, md
);
2504 * The queue hasn't been stopped yet, if the old table type wasn't
2505 * for request-based during suspension. So stop it to prevent
2506 * I/O mapping before resume.
2507 * This must be done before setting the queue restrictions,
2508 * because request-based dm may be run just after the setting.
2510 if (dm_table_request_based(t
)) {
2513 * Leverage the fact that request-based DM targets are
2514 * immutable singletons and establish md->immutable_target
2515 * - used to optimize both dm_request_fn and dm_mq_queue_rq
2517 md
->immutable_target
= dm_table_get_immutable_target(t
);
2520 __bind_mempools(md
, t
);
2522 old_map
= rcu_dereference_protected(md
->map
, lockdep_is_held(&md
->suspend_lock
));
2523 rcu_assign_pointer(md
->map
, t
);
2524 md
->immutable_target_type
= dm_table_get_immutable_target_type(t
);
2526 dm_table_set_restrictions(t
, q
, limits
);
2534 * Returns unbound table for the caller to free.
2536 static struct dm_table
*__unbind(struct mapped_device
*md
)
2538 struct dm_table
*map
= rcu_dereference_protected(md
->map
, 1);
2543 dm_table_event_callback(map
, NULL
, NULL
);
2544 RCU_INIT_POINTER(md
->map
, NULL
);
2551 * Constructor for a new device.
2553 int dm_create(int minor
, struct mapped_device
**result
)
2555 struct mapped_device
*md
;
2557 md
= alloc_dev(minor
);
2568 * Functions to manage md->type.
2569 * All are required to hold md->type_lock.
2571 void dm_lock_md_type(struct mapped_device
*md
)
2573 mutex_lock(&md
->type_lock
);
2576 void dm_unlock_md_type(struct mapped_device
*md
)
2578 mutex_unlock(&md
->type_lock
);
2581 void dm_set_md_type(struct mapped_device
*md
, unsigned type
)
2583 BUG_ON(!mutex_is_locked(&md
->type_lock
));
2587 unsigned dm_get_md_type(struct mapped_device
*md
)
2592 struct target_type
*dm_get_immutable_target_type(struct mapped_device
*md
)
2594 return md
->immutable_target_type
;
2598 * The queue_limits are only valid as long as you have a reference
2601 struct queue_limits
*dm_get_queue_limits(struct mapped_device
*md
)
2603 BUG_ON(!atomic_read(&md
->holders
));
2604 return &md
->queue
->limits
;
2606 EXPORT_SYMBOL_GPL(dm_get_queue_limits
);
2608 static void dm_old_init_rq_based_worker_thread(struct mapped_device
*md
)
2610 /* Initialize the request-based DM worker thread */
2611 init_kthread_worker(&md
->kworker
);
2612 md
->kworker_task
= kthread_run(kthread_worker_fn
, &md
->kworker
,
2613 "kdmwork-%s", dm_device_name(md
));
2617 * Fully initialize a .request_fn request-based queue.
2619 static int dm_old_init_request_queue(struct mapped_device
*md
)
2621 struct request_queue
*q
= NULL
;
2623 /* Fully initialize the queue */
2624 q
= blk_init_allocated_queue(md
->queue
, dm_request_fn
, NULL
);
2628 /* disable dm_request_fn's merge heuristic by default */
2629 md
->seq_rq_merge_deadline_usecs
= 0;
2632 dm_init_normal_md_queue(md
);
2633 blk_queue_softirq_done(md
->queue
, dm_softirq_done
);
2634 blk_queue_prep_rq(md
->queue
, dm_old_prep_fn
);
2636 dm_old_init_rq_based_worker_thread(md
);
2638 elv_register_queue(md
->queue
);
2643 static int dm_mq_init_request(void *data
, struct request
*rq
,
2644 unsigned int hctx_idx
, unsigned int request_idx
,
2645 unsigned int numa_node
)
2647 struct mapped_device
*md
= data
;
2648 struct dm_rq_target_io
*tio
= blk_mq_rq_to_pdu(rq
);
2651 * Must initialize md member of tio, otherwise it won't
2652 * be available in dm_mq_queue_rq.
2659 static int dm_mq_queue_rq(struct blk_mq_hw_ctx
*hctx
,
2660 const struct blk_mq_queue_data
*bd
)
2662 struct request
*rq
= bd
->rq
;
2663 struct dm_rq_target_io
*tio
= blk_mq_rq_to_pdu(rq
);
2664 struct mapped_device
*md
= tio
->md
;
2665 struct dm_target
*ti
= md
->immutable_target
;
2667 if (unlikely(!ti
)) {
2669 struct dm_table
*map
= dm_get_live_table(md
, &srcu_idx
);
2671 ti
= dm_table_find_target(map
, 0);
2672 dm_put_live_table(md
, srcu_idx
);
2675 if (ti
->type
->busy
&& ti
->type
->busy(ti
))
2676 return BLK_MQ_RQ_QUEUE_BUSY
;
2678 dm_start_request(md
, rq
);
2680 /* Init tio using md established in .init_request */
2681 init_tio(tio
, rq
, md
);
2684 * Establish tio->ti before queuing work (map_tio_request)
2685 * or making direct call to map_request().
2689 /* Direct call is fine since .queue_rq allows allocations */
2690 if (map_request(tio
, rq
, md
) == DM_MAPIO_REQUEUE
) {
2691 /* Undo dm_start_request() before requeuing */
2692 rq_end_stats(md
, rq
);
2693 rq_completed(md
, rq_data_dir(rq
), false);
2694 return BLK_MQ_RQ_QUEUE_BUSY
;
2697 return BLK_MQ_RQ_QUEUE_OK
;
2700 static struct blk_mq_ops dm_mq_ops
= {
2701 .queue_rq
= dm_mq_queue_rq
,
2702 .map_queue
= blk_mq_map_queue
,
2703 .complete
= dm_softirq_done
,
2704 .init_request
= dm_mq_init_request
,
2707 static int dm_mq_init_request_queue(struct mapped_device
*md
)
2709 struct request_queue
*q
;
2712 if (dm_get_md_type(md
) == DM_TYPE_REQUEST_BASED
) {
2713 DMERR("request-based dm-mq may only be stacked on blk-mq device(s)");
2717 md
->tag_set
= kzalloc(sizeof(struct blk_mq_tag_set
), GFP_KERNEL
);
2721 md
->tag_set
->ops
= &dm_mq_ops
;
2722 md
->tag_set
->queue_depth
= dm_get_blk_mq_queue_depth();
2723 md
->tag_set
->numa_node
= NUMA_NO_NODE
;
2724 md
->tag_set
->flags
= BLK_MQ_F_SHOULD_MERGE
| BLK_MQ_F_SG_MERGE
;
2725 md
->tag_set
->nr_hw_queues
= dm_get_blk_mq_nr_hw_queues();
2726 md
->tag_set
->driver_data
= md
;
2728 md
->tag_set
->cmd_size
= sizeof(struct dm_rq_target_io
);
2730 err
= blk_mq_alloc_tag_set(md
->tag_set
);
2732 goto out_kfree_tag_set
;
2734 q
= blk_mq_init_allocated_queue(md
->tag_set
, md
->queue
);
2740 dm_init_md_queue(md
);
2742 /* backfill 'mq' sysfs registration normally done in blk_register_queue */
2743 blk_mq_register_disk(md
->disk
);
2748 blk_mq_free_tag_set(md
->tag_set
);
2755 static unsigned filter_md_type(unsigned type
, struct mapped_device
*md
)
2757 if (type
== DM_TYPE_BIO_BASED
)
2760 return !md
->use_blk_mq
? DM_TYPE_REQUEST_BASED
: DM_TYPE_MQ_REQUEST_BASED
;
2764 * Setup the DM device's queue based on md's type
2766 int dm_setup_md_queue(struct mapped_device
*md
)
2769 unsigned md_type
= filter_md_type(dm_get_md_type(md
), md
);
2772 case DM_TYPE_REQUEST_BASED
:
2773 r
= dm_old_init_request_queue(md
);
2775 DMERR("Cannot initialize queue for request-based mapped device");
2779 case DM_TYPE_MQ_REQUEST_BASED
:
2780 r
= dm_mq_init_request_queue(md
);
2782 DMERR("Cannot initialize queue for request-based dm-mq mapped device");
2786 case DM_TYPE_BIO_BASED
:
2787 dm_init_normal_md_queue(md
);
2788 blk_queue_make_request(md
->queue
, dm_make_request
);
2790 * DM handles splitting bios as needed. Free the bio_split bioset
2791 * since it won't be used (saves 1 process per bio-based DM device).
2793 bioset_free(md
->queue
->bio_split
);
2794 md
->queue
->bio_split
= NULL
;
2801 struct mapped_device
*dm_get_md(dev_t dev
)
2803 struct mapped_device
*md
;
2804 unsigned minor
= MINOR(dev
);
2806 if (MAJOR(dev
) != _major
|| minor
>= (1 << MINORBITS
))
2809 spin_lock(&_minor_lock
);
2811 md
= idr_find(&_minor_idr
, minor
);
2813 if ((md
== MINOR_ALLOCED
||
2814 (MINOR(disk_devt(dm_disk(md
))) != minor
) ||
2815 dm_deleting_md(md
) ||
2816 test_bit(DMF_FREEING
, &md
->flags
))) {
2824 spin_unlock(&_minor_lock
);
2828 EXPORT_SYMBOL_GPL(dm_get_md
);
2830 void *dm_get_mdptr(struct mapped_device
*md
)
2832 return md
->interface_ptr
;
2835 void dm_set_mdptr(struct mapped_device
*md
, void *ptr
)
2837 md
->interface_ptr
= ptr
;
2840 void dm_get(struct mapped_device
*md
)
2842 atomic_inc(&md
->holders
);
2843 BUG_ON(test_bit(DMF_FREEING
, &md
->flags
));
2846 int dm_hold(struct mapped_device
*md
)
2848 spin_lock(&_minor_lock
);
2849 if (test_bit(DMF_FREEING
, &md
->flags
)) {
2850 spin_unlock(&_minor_lock
);
2854 spin_unlock(&_minor_lock
);
2857 EXPORT_SYMBOL_GPL(dm_hold
);
2859 const char *dm_device_name(struct mapped_device
*md
)
2863 EXPORT_SYMBOL_GPL(dm_device_name
);
2865 static void __dm_destroy(struct mapped_device
*md
, bool wait
)
2867 struct dm_table
*map
;
2872 spin_lock(&_minor_lock
);
2873 idr_replace(&_minor_idr
, MINOR_ALLOCED
, MINOR(disk_devt(dm_disk(md
))));
2874 set_bit(DMF_FREEING
, &md
->flags
);
2875 spin_unlock(&_minor_lock
);
2877 if (dm_request_based(md
) && md
->kworker_task
)
2878 flush_kthread_worker(&md
->kworker
);
2881 * Take suspend_lock so that presuspend and postsuspend methods
2882 * do not race with internal suspend.
2884 mutex_lock(&md
->suspend_lock
);
2885 map
= dm_get_live_table(md
, &srcu_idx
);
2886 if (!dm_suspended_md(md
)) {
2887 dm_table_presuspend_targets(map
);
2888 dm_table_postsuspend_targets(map
);
2890 /* dm_put_live_table must be before msleep, otherwise deadlock is possible */
2891 dm_put_live_table(md
, srcu_idx
);
2892 mutex_unlock(&md
->suspend_lock
);
2895 * Rare, but there may be I/O requests still going to complete,
2896 * for example. Wait for all references to disappear.
2897 * No one should increment the reference count of the mapped_device,
2898 * after the mapped_device state becomes DMF_FREEING.
2901 while (atomic_read(&md
->holders
))
2903 else if (atomic_read(&md
->holders
))
2904 DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
2905 dm_device_name(md
), atomic_read(&md
->holders
));
2908 dm_table_destroy(__unbind(md
));
2912 void dm_destroy(struct mapped_device
*md
)
2914 __dm_destroy(md
, true);
2917 void dm_destroy_immediate(struct mapped_device
*md
)
2919 __dm_destroy(md
, false);
2922 void dm_put(struct mapped_device
*md
)
2924 atomic_dec(&md
->holders
);
2926 EXPORT_SYMBOL_GPL(dm_put
);
2928 static int dm_wait_for_completion(struct mapped_device
*md
, int interruptible
)
2931 DECLARE_WAITQUEUE(wait
, current
);
2933 add_wait_queue(&md
->wait
, &wait
);
2936 set_current_state(interruptible
);
2938 if (!md_in_flight(md
))
2941 if (interruptible
== TASK_INTERRUPTIBLE
&&
2942 signal_pending(current
)) {
2949 set_current_state(TASK_RUNNING
);
2951 remove_wait_queue(&md
->wait
, &wait
);
2957 * Process the deferred bios
2959 static void dm_wq_work(struct work_struct
*work
)
2961 struct mapped_device
*md
= container_of(work
, struct mapped_device
,
2965 struct dm_table
*map
;
2967 map
= dm_get_live_table(md
, &srcu_idx
);
2969 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
)) {
2970 spin_lock_irq(&md
->deferred_lock
);
2971 c
= bio_list_pop(&md
->deferred
);
2972 spin_unlock_irq(&md
->deferred_lock
);
2977 if (dm_request_based(md
))
2978 generic_make_request(c
);
2980 __split_and_process_bio(md
, map
, c
);
2983 dm_put_live_table(md
, srcu_idx
);
2986 static void dm_queue_flush(struct mapped_device
*md
)
2988 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
);
2989 smp_mb__after_atomic();
2990 queue_work(md
->wq
, &md
->work
);
2994 * Swap in a new table, returning the old one for the caller to destroy.
2996 struct dm_table
*dm_swap_table(struct mapped_device
*md
, struct dm_table
*table
)
2998 struct dm_table
*live_map
= NULL
, *map
= ERR_PTR(-EINVAL
);
2999 struct queue_limits limits
;
3002 mutex_lock(&md
->suspend_lock
);
3004 /* device must be suspended */
3005 if (!dm_suspended_md(md
))
3009 * If the new table has no data devices, retain the existing limits.
3010 * This helps multipath with queue_if_no_path if all paths disappear,
3011 * then new I/O is queued based on these limits, and then some paths
3014 if (dm_table_has_no_data_devices(table
)) {
3015 live_map
= dm_get_live_table_fast(md
);
3017 limits
= md
->queue
->limits
;
3018 dm_put_live_table_fast(md
);
3022 r
= dm_calculate_queue_limits(table
, &limits
);
3029 map
= __bind(md
, table
, &limits
);
3032 mutex_unlock(&md
->suspend_lock
);
3037 * Functions to lock and unlock any filesystem running on the
3040 static int lock_fs(struct mapped_device
*md
)
3044 WARN_ON(md
->frozen_sb
);
3046 md
->frozen_sb
= freeze_bdev(md
->bdev
);
3047 if (IS_ERR(md
->frozen_sb
)) {
3048 r
= PTR_ERR(md
->frozen_sb
);
3049 md
->frozen_sb
= NULL
;
3053 set_bit(DMF_FROZEN
, &md
->flags
);
3058 static void unlock_fs(struct mapped_device
*md
)
3060 if (!test_bit(DMF_FROZEN
, &md
->flags
))
3063 thaw_bdev(md
->bdev
, md
->frozen_sb
);
3064 md
->frozen_sb
= NULL
;
3065 clear_bit(DMF_FROZEN
, &md
->flags
);
3069 * If __dm_suspend returns 0, the device is completely quiescent
3070 * now. There is no request-processing activity. All new requests
3071 * are being added to md->deferred list.
3073 * Caller must hold md->suspend_lock
3075 static int __dm_suspend(struct mapped_device
*md
, struct dm_table
*map
,
3076 unsigned suspend_flags
, int interruptible
)
3078 bool do_lockfs
= suspend_flags
& DM_SUSPEND_LOCKFS_FLAG
;
3079 bool noflush
= suspend_flags
& DM_SUSPEND_NOFLUSH_FLAG
;
3083 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
3084 * This flag is cleared before dm_suspend returns.
3087 set_bit(DMF_NOFLUSH_SUSPENDING
, &md
->flags
);
3090 * This gets reverted if there's an error later and the targets
3091 * provide the .presuspend_undo hook.
3093 dm_table_presuspend_targets(map
);
3096 * Flush I/O to the device.
3097 * Any I/O submitted after lock_fs() may not be flushed.
3098 * noflush takes precedence over do_lockfs.
3099 * (lock_fs() flushes I/Os and waits for them to complete.)
3101 if (!noflush
&& do_lockfs
) {
3104 dm_table_presuspend_undo_targets(map
);
3110 * Here we must make sure that no processes are submitting requests
3111 * to target drivers i.e. no one may be executing
3112 * __split_and_process_bio. This is called from dm_request and
3115 * To get all processes out of __split_and_process_bio in dm_request,
3116 * we take the write lock. To prevent any process from reentering
3117 * __split_and_process_bio from dm_request and quiesce the thread
3118 * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call
3119 * flush_workqueue(md->wq).
3121 set_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
);
3123 synchronize_srcu(&md
->io_barrier
);
3126 * Stop md->queue before flushing md->wq in case request-based
3127 * dm defers requests to md->wq from md->queue.
3129 if (dm_request_based(md
)) {
3130 dm_stop_queue(md
->queue
);
3131 if (md
->kworker_task
)
3132 flush_kthread_worker(&md
->kworker
);
3135 flush_workqueue(md
->wq
);
3138 * At this point no more requests are entering target request routines.
3139 * We call dm_wait_for_completion to wait for all existing requests
3142 r
= dm_wait_for_completion(md
, interruptible
);
3145 clear_bit(DMF_NOFLUSH_SUSPENDING
, &md
->flags
);
3147 synchronize_srcu(&md
->io_barrier
);
3149 /* were we interrupted ? */
3153 if (dm_request_based(md
))
3154 dm_start_queue(md
->queue
);
3157 dm_table_presuspend_undo_targets(map
);
3158 /* pushback list is already flushed, so skip flush */
3165 * We need to be able to change a mapping table under a mounted
3166 * filesystem. For example we might want to move some data in
3167 * the background. Before the table can be swapped with
3168 * dm_bind_table, dm_suspend must be called to flush any in
3169 * flight bios and ensure that any further io gets deferred.
3172 * Suspend mechanism in request-based dm.
3174 * 1. Flush all I/Os by lock_fs() if needed.
3175 * 2. Stop dispatching any I/O by stopping the request_queue.
3176 * 3. Wait for all in-flight I/Os to be completed or requeued.
3178 * To abort suspend, start the request_queue.
3180 int dm_suspend(struct mapped_device
*md
, unsigned suspend_flags
)
3182 struct dm_table
*map
= NULL
;
3186 mutex_lock_nested(&md
->suspend_lock
, SINGLE_DEPTH_NESTING
);
3188 if (dm_suspended_md(md
)) {
3193 if (dm_suspended_internally_md(md
)) {
3194 /* already internally suspended, wait for internal resume */
3195 mutex_unlock(&md
->suspend_lock
);
3196 r
= wait_on_bit(&md
->flags
, DMF_SUSPENDED_INTERNALLY
, TASK_INTERRUPTIBLE
);
3202 map
= rcu_dereference_protected(md
->map
, lockdep_is_held(&md
->suspend_lock
));
3204 r
= __dm_suspend(md
, map
, suspend_flags
, TASK_INTERRUPTIBLE
);
3208 set_bit(DMF_SUSPENDED
, &md
->flags
);
3210 dm_table_postsuspend_targets(map
);
3213 mutex_unlock(&md
->suspend_lock
);
3217 static int __dm_resume(struct mapped_device
*md
, struct dm_table
*map
)
3220 int r
= dm_table_resume_targets(map
);
3228 * Flushing deferred I/Os must be done after targets are resumed
3229 * so that mapping of targets can work correctly.
3230 * Request-based dm is queueing the deferred I/Os in its request_queue.
3232 if (dm_request_based(md
))
3233 dm_start_queue(md
->queue
);
3240 int dm_resume(struct mapped_device
*md
)
3243 struct dm_table
*map
= NULL
;
3246 mutex_lock_nested(&md
->suspend_lock
, SINGLE_DEPTH_NESTING
);
3248 if (!dm_suspended_md(md
))
3251 if (dm_suspended_internally_md(md
)) {
3252 /* already internally suspended, wait for internal resume */
3253 mutex_unlock(&md
->suspend_lock
);
3254 r
= wait_on_bit(&md
->flags
, DMF_SUSPENDED_INTERNALLY
, TASK_INTERRUPTIBLE
);
3260 map
= rcu_dereference_protected(md
->map
, lockdep_is_held(&md
->suspend_lock
));
3261 if (!map
|| !dm_table_get_size(map
))
3264 r
= __dm_resume(md
, map
);
3268 clear_bit(DMF_SUSPENDED
, &md
->flags
);
3272 mutex_unlock(&md
->suspend_lock
);
3278 * Internal suspend/resume works like userspace-driven suspend. It waits
3279 * until all bios finish and prevents issuing new bios to the target drivers.
3280 * It may be used only from the kernel.
3283 static void __dm_internal_suspend(struct mapped_device
*md
, unsigned suspend_flags
)
3285 struct dm_table
*map
= NULL
;
3287 if (md
->internal_suspend_count
++)
3288 return; /* nested internal suspend */
3290 if (dm_suspended_md(md
)) {
3291 set_bit(DMF_SUSPENDED_INTERNALLY
, &md
->flags
);
3292 return; /* nest suspend */
3295 map
= rcu_dereference_protected(md
->map
, lockdep_is_held(&md
->suspend_lock
));
3298 * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is
3299 * supported. Properly supporting a TASK_INTERRUPTIBLE internal suspend
3300 * would require changing .presuspend to return an error -- avoid this
3301 * until there is a need for more elaborate variants of internal suspend.
3303 (void) __dm_suspend(md
, map
, suspend_flags
, TASK_UNINTERRUPTIBLE
);
3305 set_bit(DMF_SUSPENDED_INTERNALLY
, &md
->flags
);
3307 dm_table_postsuspend_targets(map
);
3310 static void __dm_internal_resume(struct mapped_device
*md
)
3312 BUG_ON(!md
->internal_suspend_count
);
3314 if (--md
->internal_suspend_count
)
3315 return; /* resume from nested internal suspend */
3317 if (dm_suspended_md(md
))
3318 goto done
; /* resume from nested suspend */
3321 * NOTE: existing callers don't need to call dm_table_resume_targets
3322 * (which may fail -- so best to avoid it for now by passing NULL map)
3324 (void) __dm_resume(md
, NULL
);
3327 clear_bit(DMF_SUSPENDED_INTERNALLY
, &md
->flags
);
3328 smp_mb__after_atomic();
3329 wake_up_bit(&md
->flags
, DMF_SUSPENDED_INTERNALLY
);
3332 void dm_internal_suspend_noflush(struct mapped_device
*md
)
3334 mutex_lock(&md
->suspend_lock
);
3335 __dm_internal_suspend(md
, DM_SUSPEND_NOFLUSH_FLAG
);
3336 mutex_unlock(&md
->suspend_lock
);
3338 EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush
);
3340 void dm_internal_resume(struct mapped_device
*md
)
3342 mutex_lock(&md
->suspend_lock
);
3343 __dm_internal_resume(md
);
3344 mutex_unlock(&md
->suspend_lock
);
3346 EXPORT_SYMBOL_GPL(dm_internal_resume
);
3349 * Fast variants of internal suspend/resume hold md->suspend_lock,
3350 * which prevents interaction with userspace-driven suspend.
3353 void dm_internal_suspend_fast(struct mapped_device
*md
)
3355 mutex_lock(&md
->suspend_lock
);
3356 if (dm_suspended_md(md
) || dm_suspended_internally_md(md
))
3359 set_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
);
3360 synchronize_srcu(&md
->io_barrier
);
3361 flush_workqueue(md
->wq
);
3362 dm_wait_for_completion(md
, TASK_UNINTERRUPTIBLE
);
3364 EXPORT_SYMBOL_GPL(dm_internal_suspend_fast
);
3366 void dm_internal_resume_fast(struct mapped_device
*md
)
3368 if (dm_suspended_md(md
) || dm_suspended_internally_md(md
))
3374 mutex_unlock(&md
->suspend_lock
);
3376 EXPORT_SYMBOL_GPL(dm_internal_resume_fast
);
3378 /*-----------------------------------------------------------------
3379 * Event notification.
3380 *---------------------------------------------------------------*/
3381 int dm_kobject_uevent(struct mapped_device
*md
, enum kobject_action action
,
3384 char udev_cookie
[DM_COOKIE_LENGTH
];
3385 char *envp
[] = { udev_cookie
, NULL
};
3388 return kobject_uevent(&disk_to_dev(md
->disk
)->kobj
, action
);
3390 snprintf(udev_cookie
, DM_COOKIE_LENGTH
, "%s=%u",
3391 DM_COOKIE_ENV_VAR_NAME
, cookie
);
3392 return kobject_uevent_env(&disk_to_dev(md
->disk
)->kobj
,
3397 uint32_t dm_next_uevent_seq(struct mapped_device
*md
)
3399 return atomic_add_return(1, &md
->uevent_seq
);
3402 uint32_t dm_get_event_nr(struct mapped_device
*md
)
3404 return atomic_read(&md
->event_nr
);
3407 int dm_wait_event(struct mapped_device
*md
, int event_nr
)
3409 return wait_event_interruptible(md
->eventq
,
3410 (event_nr
!= atomic_read(&md
->event_nr
)));
3413 void dm_uevent_add(struct mapped_device
*md
, struct list_head
*elist
)
3415 unsigned long flags
;
3417 spin_lock_irqsave(&md
->uevent_lock
, flags
);
3418 list_add(elist
, &md
->uevent_list
);
3419 spin_unlock_irqrestore(&md
->uevent_lock
, flags
);
3423 * The gendisk is only valid as long as you have a reference
3426 struct gendisk
*dm_disk(struct mapped_device
*md
)
3430 EXPORT_SYMBOL_GPL(dm_disk
);
3432 struct kobject
*dm_kobject(struct mapped_device
*md
)
3434 return &md
->kobj_holder
.kobj
;
3437 struct mapped_device
*dm_get_from_kobject(struct kobject
*kobj
)
3439 struct mapped_device
*md
;
3441 md
= container_of(kobj
, struct mapped_device
, kobj_holder
.kobj
);
3443 if (test_bit(DMF_FREEING
, &md
->flags
) ||
3451 int dm_suspended_md(struct mapped_device
*md
)
3453 return test_bit(DMF_SUSPENDED
, &md
->flags
);
3456 int dm_suspended_internally_md(struct mapped_device
*md
)
3458 return test_bit(DMF_SUSPENDED_INTERNALLY
, &md
->flags
);
3461 int dm_test_deferred_remove_flag(struct mapped_device
*md
)
3463 return test_bit(DMF_DEFERRED_REMOVE
, &md
->flags
);
3466 int dm_suspended(struct dm_target
*ti
)
3468 return dm_suspended_md(dm_table_get_md(ti
->table
));
3470 EXPORT_SYMBOL_GPL(dm_suspended
);
3472 int dm_noflush_suspending(struct dm_target
*ti
)
3474 return __noflush_suspending(dm_table_get_md(ti
->table
));
3476 EXPORT_SYMBOL_GPL(dm_noflush_suspending
);
3478 struct dm_md_mempools
*dm_alloc_md_mempools(struct mapped_device
*md
, unsigned type
,
3479 unsigned integrity
, unsigned per_io_data_size
)
3481 struct dm_md_mempools
*pools
= kzalloc(sizeof(*pools
), GFP_KERNEL
);
3482 struct kmem_cache
*cachep
= NULL
;
3483 unsigned int pool_size
= 0;
3484 unsigned int front_pad
;
3489 type
= filter_md_type(type
, md
);
3492 case DM_TYPE_BIO_BASED
:
3494 pool_size
= dm_get_reserved_bio_based_ios();
3495 front_pad
= roundup(per_io_data_size
, __alignof__(struct dm_target_io
)) + offsetof(struct dm_target_io
, clone
);
3497 case DM_TYPE_REQUEST_BASED
:
3498 cachep
= _rq_tio_cache
;
3499 pool_size
= dm_get_reserved_rq_based_ios();
3500 pools
->rq_pool
= mempool_create_slab_pool(pool_size
, _rq_cache
);
3501 if (!pools
->rq_pool
)
3503 /* fall through to setup remaining rq-based pools */
3504 case DM_TYPE_MQ_REQUEST_BASED
:
3506 pool_size
= dm_get_reserved_rq_based_ios();
3507 front_pad
= offsetof(struct dm_rq_clone_bio_info
, clone
);
3508 /* per_io_data_size is not used. */
3509 WARN_ON(per_io_data_size
!= 0);
3516 pools
->io_pool
= mempool_create_slab_pool(pool_size
, cachep
);
3517 if (!pools
->io_pool
)
3521 pools
->bs
= bioset_create_nobvec(pool_size
, front_pad
);
3525 if (integrity
&& bioset_integrity_create(pools
->bs
, pool_size
))
3531 dm_free_md_mempools(pools
);
3536 void dm_free_md_mempools(struct dm_md_mempools
*pools
)
3541 mempool_destroy(pools
->io_pool
);
3542 mempool_destroy(pools
->rq_pool
);
3545 bioset_free(pools
->bs
);
3550 static int dm_pr_register(struct block_device
*bdev
, u64 old_key
, u64 new_key
,
3553 struct mapped_device
*md
= bdev
->bd_disk
->private_data
;
3554 const struct pr_ops
*ops
;
3558 r
= dm_grab_bdev_for_ioctl(md
, &bdev
, &mode
);
3562 ops
= bdev
->bd_disk
->fops
->pr_ops
;
3563 if (ops
&& ops
->pr_register
)
3564 r
= ops
->pr_register(bdev
, old_key
, new_key
, flags
);
3572 static int dm_pr_reserve(struct block_device
*bdev
, u64 key
, enum pr_type type
,
3575 struct mapped_device
*md
= bdev
->bd_disk
->private_data
;
3576 const struct pr_ops
*ops
;
3580 r
= dm_grab_bdev_for_ioctl(md
, &bdev
, &mode
);
3584 ops
= bdev
->bd_disk
->fops
->pr_ops
;
3585 if (ops
&& ops
->pr_reserve
)
3586 r
= ops
->pr_reserve(bdev
, key
, type
, flags
);
3594 static int dm_pr_release(struct block_device
*bdev
, u64 key
, enum pr_type type
)
3596 struct mapped_device
*md
= bdev
->bd_disk
->private_data
;
3597 const struct pr_ops
*ops
;
3601 r
= dm_grab_bdev_for_ioctl(md
, &bdev
, &mode
);
3605 ops
= bdev
->bd_disk
->fops
->pr_ops
;
3606 if (ops
&& ops
->pr_release
)
3607 r
= ops
->pr_release(bdev
, key
, type
);
3615 static int dm_pr_preempt(struct block_device
*bdev
, u64 old_key
, u64 new_key
,
3616 enum pr_type type
, bool abort
)
3618 struct mapped_device
*md
= bdev
->bd_disk
->private_data
;
3619 const struct pr_ops
*ops
;
3623 r
= dm_grab_bdev_for_ioctl(md
, &bdev
, &mode
);
3627 ops
= bdev
->bd_disk
->fops
->pr_ops
;
3628 if (ops
&& ops
->pr_preempt
)
3629 r
= ops
->pr_preempt(bdev
, old_key
, new_key
, type
, abort
);
3637 static int dm_pr_clear(struct block_device
*bdev
, u64 key
)
3639 struct mapped_device
*md
= bdev
->bd_disk
->private_data
;
3640 const struct pr_ops
*ops
;
3644 r
= dm_grab_bdev_for_ioctl(md
, &bdev
, &mode
);
3648 ops
= bdev
->bd_disk
->fops
->pr_ops
;
3649 if (ops
&& ops
->pr_clear
)
3650 r
= ops
->pr_clear(bdev
, key
);
3658 static const struct pr_ops dm_pr_ops
= {
3659 .pr_register
= dm_pr_register
,
3660 .pr_reserve
= dm_pr_reserve
,
3661 .pr_release
= dm_pr_release
,
3662 .pr_preempt
= dm_pr_preempt
,
3663 .pr_clear
= dm_pr_clear
,
3666 static const struct block_device_operations dm_blk_dops
= {
3667 .open
= dm_blk_open
,
3668 .release
= dm_blk_close
,
3669 .ioctl
= dm_blk_ioctl
,
3670 .getgeo
= dm_blk_getgeo
,
3671 .pr_ops
= &dm_pr_ops
,
3672 .owner
= THIS_MODULE
3678 module_init(dm_init
);
3679 module_exit(dm_exit
);
3681 module_param(major
, uint
, 0);
3682 MODULE_PARM_DESC(major
, "The major number of the device mapper");
3684 module_param(reserved_bio_based_ios
, uint
, S_IRUGO
| S_IWUSR
);
3685 MODULE_PARM_DESC(reserved_bio_based_ios
, "Reserved IOs in bio-based mempools");
3687 module_param(reserved_rq_based_ios
, uint
, S_IRUGO
| S_IWUSR
);
3688 MODULE_PARM_DESC(reserved_rq_based_ios
, "Reserved IOs in request-based mempools");
3690 module_param(use_blk_mq
, bool, S_IRUGO
| S_IWUSR
);
3691 MODULE_PARM_DESC(use_blk_mq
, "Use block multiqueue for request-based DM devices");
3693 module_param(dm_mq_nr_hw_queues
, uint
, S_IRUGO
| S_IWUSR
);
3694 MODULE_PARM_DESC(dm_mq_nr_hw_queues
, "Number of hardware queues for request-based dm-mq devices");
3696 module_param(dm_mq_queue_depth
, uint
, S_IRUGO
| S_IWUSR
);
3697 MODULE_PARM_DESC(dm_mq_queue_depth
, "Queue depth for request-based dm-mq devices");
3699 MODULE_DESCRIPTION(DM_NAME
" driver");
3700 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
3701 MODULE_LICENSE("GPL");