dm thin: sort the deferred cells
[deliverable/linux.git] / drivers / md / dm-thin.c
CommitLineData
991d9fa0 1/*
e49e5829 2 * Copyright (C) 2011-2012 Red Hat UK.
991d9fa0
JT
3 *
4 * This file is released under the GPL.
5 */
6
7#include "dm-thin-metadata.h"
4f81a417 8#include "dm-bio-prison.h"
1f4e0ff0 9#include "dm.h"
991d9fa0
JT
10
11#include <linux/device-mapper.h>
12#include <linux/dm-io.h>
13#include <linux/dm-kcopyd.h>
604ea906 14#include <linux/log2.h>
991d9fa0 15#include <linux/list.h>
c140e1c4 16#include <linux/rculist.h>
991d9fa0
JT
17#include <linux/init.h>
18#include <linux/module.h>
19#include <linux/slab.h>
ac4c3f34 20#include <linux/sort.h>
67324ea1 21#include <linux/rbtree.h>
991d9fa0
JT
22
23#define DM_MSG_PREFIX "thin"
24
25/*
26 * Tunable constants
27 */
7768ed33 28#define ENDIO_HOOK_POOL_SIZE 1024
991d9fa0 29#define MAPPING_POOL_SIZE 1024
905e51b3 30#define COMMIT_PERIOD HZ
80c57893
MS
31#define NO_SPACE_TIMEOUT_SECS 60
32
33static unsigned no_space_timeout_secs = NO_SPACE_TIMEOUT_SECS;
991d9fa0 34
df5d2e90
MP
35DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
36 "A percentage of time allocated for copy on write");
37
991d9fa0
JT
38/*
39 * The block size of the device holding pool data must be
40 * between 64KB and 1GB.
41 */
42#define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (64 * 1024 >> SECTOR_SHIFT)
43#define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)
44
991d9fa0
JT
45/*
46 * Device id is restricted to 24 bits.
47 */
48#define MAX_DEV_ID ((1 << 24) - 1)
49
50/*
51 * How do we handle breaking sharing of data blocks?
52 * =================================================
53 *
54 * We use a standard copy-on-write btree to store the mappings for the
55 * devices (note I'm talking about copy-on-write of the metadata here, not
56 * the data). When you take an internal snapshot you clone the root node
57 * of the origin btree. After this there is no concept of an origin or a
58 * snapshot. They are just two device trees that happen to point to the
59 * same data blocks.
60 *
61 * When we get a write in we decide if it's to a shared data block using
62 * some timestamp magic. If it is, we have to break sharing.
63 *
64 * Let's say we write to a shared block in what was the origin. The
65 * steps are:
66 *
67 * i) plug io further to this physical block. (see bio_prison code).
68 *
69 * ii) quiesce any read io to that shared data block. Obviously
44feb387 70 * including all devices that share this block. (see dm_deferred_set code)
991d9fa0
JT
71 *
72 * iii) copy the data block to a newly allocate block. This step can be
73 * missed out if the io covers the block. (schedule_copy).
74 *
75 * iv) insert the new mapping into the origin's btree
fe878f34 76 * (process_prepared_mapping). This act of inserting breaks some
991d9fa0
JT
77 * sharing of btree nodes between the two devices. Breaking sharing only
78 * effects the btree of that specific device. Btrees for the other
79 * devices that share the block never change. The btree for the origin
80 * device as it was after the last commit is untouched, ie. we're using
81 * persistent data structures in the functional programming sense.
82 *
83 * v) unplug io to this physical block, including the io that triggered
84 * the breaking of sharing.
85 *
86 * Steps (ii) and (iii) occur in parallel.
87 *
88 * The metadata _doesn't_ need to be committed before the io continues. We
89 * get away with this because the io is always written to a _new_ block.
90 * If there's a crash, then:
91 *
92 * - The origin mapping will point to the old origin block (the shared
93 * one). This will contain the data as it was before the io that triggered
94 * the breaking of sharing came in.
95 *
96 * - The snap mapping still points to the old block. As it would after
97 * the commit.
98 *
99 * The downside of this scheme is the timestamp magic isn't perfect, and
100 * will continue to think that data block in the snapshot device is shared
101 * even after the write to the origin has broken sharing. I suspect data
102 * blocks will typically be shared by many different devices, so we're
103 * breaking sharing n + 1 times, rather than n, where n is the number of
104 * devices that reference this data block. At the moment I think the
105 * benefits far, far outweigh the disadvantages.
106 */
107
108/*----------------------------------------------------------------*/
109
991d9fa0
JT
110/*
111 * Key building.
112 */
113static void build_data_key(struct dm_thin_device *td,
44feb387 114 dm_block_t b, struct dm_cell_key *key)
991d9fa0
JT
115{
116 key->virtual = 0;
117 key->dev = dm_thin_dev_id(td);
118 key->block = b;
119}
120
121static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
44feb387 122 struct dm_cell_key *key)
991d9fa0
JT
123{
124 key->virtual = 1;
125 key->dev = dm_thin_dev_id(td);
126 key->block = b;
127}
128
129/*----------------------------------------------------------------*/
130
7d327fe0
JT
131#define THROTTLE_THRESHOLD (1 * HZ)
132
133struct throttle {
134 struct rw_semaphore lock;
135 unsigned long threshold;
136 bool throttle_applied;
137};
138
139static void throttle_init(struct throttle *t)
140{
141 init_rwsem(&t->lock);
142 t->throttle_applied = false;
143}
144
145static void throttle_work_start(struct throttle *t)
146{
147 t->threshold = jiffies + THROTTLE_THRESHOLD;
148}
149
150static void throttle_work_update(struct throttle *t)
151{
152 if (!t->throttle_applied && jiffies > t->threshold) {
153 down_write(&t->lock);
154 t->throttle_applied = true;
155 }
156}
157
158static void throttle_work_complete(struct throttle *t)
159{
160 if (t->throttle_applied) {
161 t->throttle_applied = false;
162 up_write(&t->lock);
163 }
164}
165
166static void throttle_lock(struct throttle *t)
167{
168 down_read(&t->lock);
169}
170
171static void throttle_unlock(struct throttle *t)
172{
173 up_read(&t->lock);
174}
175
176/*----------------------------------------------------------------*/
177
991d9fa0
JT
178/*
179 * A pool device ties together a metadata device and a data device. It
180 * also provides the interface for creating and destroying internal
181 * devices.
182 */
a24c2569 183struct dm_thin_new_mapping;
67e2e2b2 184
e49e5829 185/*
3e1a0699 186 * The pool runs in 4 modes. Ordered in degraded order for comparisons.
e49e5829
JT
187 */
188enum pool_mode {
189 PM_WRITE, /* metadata may be changed */
3e1a0699 190 PM_OUT_OF_DATA_SPACE, /* metadata may be changed, though data may not be allocated */
e49e5829
JT
191 PM_READ_ONLY, /* metadata may not be changed */
192 PM_FAIL, /* all I/O fails */
193};
194
67e2e2b2 195struct pool_features {
e49e5829
JT
196 enum pool_mode mode;
197
9bc142dd
MS
198 bool zero_new_blocks:1;
199 bool discard_enabled:1;
200 bool discard_passdown:1;
787a996c 201 bool error_if_no_space:1;
67e2e2b2
JT
202};
203
e49e5829
JT
204struct thin_c;
205typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio);
a374bb21 206typedef void (*process_cell_fn)(struct thin_c *tc, struct dm_bio_prison_cell *cell);
e49e5829
JT
207typedef void (*process_mapping_fn)(struct dm_thin_new_mapping *m);
208
ac4c3f34
JT
209#define CELL_SORT_ARRAY_SIZE 8192
210
991d9fa0
JT
211struct pool {
212 struct list_head list;
213 struct dm_target *ti; /* Only set if a pool target is bound */
214
215 struct mapped_device *pool_md;
216 struct block_device *md_dev;
217 struct dm_pool_metadata *pmd;
218
991d9fa0 219 dm_block_t low_water_blocks;
55f2b8bd 220 uint32_t sectors_per_block;
f9a8e0cd 221 int sectors_per_block_shift;
991d9fa0 222
67e2e2b2 223 struct pool_features pf;
88a6621b 224 bool low_water_triggered:1; /* A dm event has been sent */
991d9fa0 225
44feb387 226 struct dm_bio_prison *prison;
991d9fa0
JT
227 struct dm_kcopyd_client *copier;
228
229 struct workqueue_struct *wq;
7d327fe0 230 struct throttle throttle;
991d9fa0 231 struct work_struct worker;
905e51b3 232 struct delayed_work waker;
85ad643b 233 struct delayed_work no_space_timeout;
991d9fa0 234
905e51b3 235 unsigned long last_commit_jiffies;
55f2b8bd 236 unsigned ref_count;
991d9fa0
JT
237
238 spinlock_t lock;
991d9fa0
JT
239 struct bio_list deferred_flush_bios;
240 struct list_head prepared_mappings;
104655fd 241 struct list_head prepared_discards;
c140e1c4 242 struct list_head active_thins;
991d9fa0 243
44feb387
MS
244 struct dm_deferred_set *shared_read_ds;
245 struct dm_deferred_set *all_io_ds;
991d9fa0 246
a24c2569 247 struct dm_thin_new_mapping *next_mapping;
991d9fa0 248 mempool_t *mapping_pool;
e49e5829
JT
249
250 process_bio_fn process_bio;
251 process_bio_fn process_discard;
252
a374bb21
JT
253 process_cell_fn process_cell;
254 process_cell_fn process_discard_cell;
255
e49e5829
JT
256 process_mapping_fn process_prepared_mapping;
257 process_mapping_fn process_prepared_discard;
ac4c3f34
JT
258
259 struct dm_bio_prison_cell *cell_sort_array[CELL_SORT_ARRAY_SIZE];
991d9fa0
JT
260};
261
e49e5829 262static enum pool_mode get_pool_mode(struct pool *pool);
b5330655 263static void metadata_operation_failed(struct pool *pool, const char *op, int r);
e49e5829 264
991d9fa0
JT
265/*
266 * Target context for a pool.
267 */
268struct pool_c {
269 struct dm_target *ti;
270 struct pool *pool;
271 struct dm_dev *data_dev;
272 struct dm_dev *metadata_dev;
273 struct dm_target_callbacks callbacks;
274
275 dm_block_t low_water_blocks;
0424caa1
MS
276 struct pool_features requested_pf; /* Features requested during table load */
277 struct pool_features adjusted_pf; /* Features used after adjusting for constituent devices */
991d9fa0
JT
278};
279
280/*
281 * Target context for a thin.
282 */
283struct thin_c {
c140e1c4 284 struct list_head list;
991d9fa0 285 struct dm_dev *pool_dev;
2dd9c257 286 struct dm_dev *origin_dev;
e5aea7b4 287 sector_t origin_size;
991d9fa0
JT
288 dm_thin_id dev_id;
289
290 struct pool *pool;
291 struct dm_thin_device *td;
738211f7 292 bool requeue_mode:1;
c140e1c4 293 spinlock_t lock;
a374bb21 294 struct list_head deferred_cells;
c140e1c4
MS
295 struct bio_list deferred_bio_list;
296 struct bio_list retry_on_resume_list;
67324ea1 297 struct rb_root sort_bio_list; /* sorted list of deferred bios */
b10ebd34
JT
298
299 /*
300 * Ensures the thin is not destroyed until the worker has finished
301 * iterating the active_thins list.
302 */
303 atomic_t refcount;
304 struct completion can_destroy;
991d9fa0
JT
305};
306
307/*----------------------------------------------------------------*/
308
025b9685
JT
309/*
310 * wake_worker() is used when new work is queued and when pool_resume is
311 * ready to continue deferred IO processing.
312 */
313static void wake_worker(struct pool *pool)
314{
315 queue_work(pool->wq, &pool->worker);
316}
317
318/*----------------------------------------------------------------*/
319
6beca5eb
JT
320static int bio_detain(struct pool *pool, struct dm_cell_key *key, struct bio *bio,
321 struct dm_bio_prison_cell **cell_result)
322{
323 int r;
324 struct dm_bio_prison_cell *cell_prealloc;
325
326 /*
327 * Allocate a cell from the prison's mempool.
328 * This might block but it can't fail.
329 */
330 cell_prealloc = dm_bio_prison_alloc_cell(pool->prison, GFP_NOIO);
331
332 r = dm_bio_detain(pool->prison, key, bio, cell_prealloc, cell_result);
333 if (r)
334 /*
335 * We reused an old cell; we can get rid of
336 * the new one.
337 */
338 dm_bio_prison_free_cell(pool->prison, cell_prealloc);
339
340 return r;
341}
342
343static void cell_release(struct pool *pool,
344 struct dm_bio_prison_cell *cell,
345 struct bio_list *bios)
346{
347 dm_cell_release(pool->prison, cell, bios);
348 dm_bio_prison_free_cell(pool->prison, cell);
349}
350
2d759a46
JT
351static void cell_visit_release(struct pool *pool,
352 void (*fn)(void *, struct dm_bio_prison_cell *),
353 void *context,
354 struct dm_bio_prison_cell *cell)
355{
356 dm_cell_visit_release(pool->prison, fn, context, cell);
357 dm_bio_prison_free_cell(pool->prison, cell);
358}
359
6beca5eb
JT
360static void cell_release_no_holder(struct pool *pool,
361 struct dm_bio_prison_cell *cell,
362 struct bio_list *bios)
363{
364 dm_cell_release_no_holder(pool->prison, cell, bios);
365 dm_bio_prison_free_cell(pool->prison, cell);
366}
367
af91805a
MS
368static void cell_error_with_code(struct pool *pool,
369 struct dm_bio_prison_cell *cell, int error_code)
6beca5eb 370{
af91805a 371 dm_cell_error(pool->prison, cell, error_code);
6beca5eb
JT
372 dm_bio_prison_free_cell(pool->prison, cell);
373}
374
af91805a
MS
375static void cell_error(struct pool *pool, struct dm_bio_prison_cell *cell)
376{
377 cell_error_with_code(pool, cell, -EIO);
378}
379
a374bb21
JT
380static void cell_success(struct pool *pool, struct dm_bio_prison_cell *cell)
381{
382 cell_error_with_code(pool, cell, 0);
383}
384
385static void cell_requeue(struct pool *pool, struct dm_bio_prison_cell *cell)
386{
387 cell_error_with_code(pool, cell, DM_ENDIO_REQUEUE);
388}
389
6beca5eb
JT
390/*----------------------------------------------------------------*/
391
991d9fa0
JT
392/*
393 * A global list of pools that uses a struct mapped_device as a key.
394 */
395static struct dm_thin_pool_table {
396 struct mutex mutex;
397 struct list_head pools;
398} dm_thin_pool_table;
399
400static void pool_table_init(void)
401{
402 mutex_init(&dm_thin_pool_table.mutex);
403 INIT_LIST_HEAD(&dm_thin_pool_table.pools);
404}
405
406static void __pool_table_insert(struct pool *pool)
407{
408 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
409 list_add(&pool->list, &dm_thin_pool_table.pools);
410}
411
412static void __pool_table_remove(struct pool *pool)
413{
414 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
415 list_del(&pool->list);
416}
417
418static struct pool *__pool_table_lookup(struct mapped_device *md)
419{
420 struct pool *pool = NULL, *tmp;
421
422 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
423
424 list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
425 if (tmp->pool_md == md) {
426 pool = tmp;
427 break;
428 }
429 }
430
431 return pool;
432}
433
434static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev)
435{
436 struct pool *pool = NULL, *tmp;
437
438 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
439
440 list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
441 if (tmp->md_dev == md_dev) {
442 pool = tmp;
443 break;
444 }
445 }
446
447 return pool;
448}
449
450/*----------------------------------------------------------------*/
451
a24c2569 452struct dm_thin_endio_hook {
eb2aa48d 453 struct thin_c *tc;
44feb387
MS
454 struct dm_deferred_entry *shared_read_entry;
455 struct dm_deferred_entry *all_io_entry;
a24c2569 456 struct dm_thin_new_mapping *overwrite_mapping;
67324ea1 457 struct rb_node rb_node;
eb2aa48d
JT
458};
459
18adc577 460static void requeue_bio_list(struct thin_c *tc, struct bio_list *master)
991d9fa0
JT
461{
462 struct bio *bio;
463 struct bio_list bios;
18adc577 464 unsigned long flags;
991d9fa0
JT
465
466 bio_list_init(&bios);
18adc577 467
c140e1c4 468 spin_lock_irqsave(&tc->lock, flags);
991d9fa0
JT
469 bio_list_merge(&bios, master);
470 bio_list_init(master);
c140e1c4 471 spin_unlock_irqrestore(&tc->lock, flags);
991d9fa0 472
c140e1c4
MS
473 while ((bio = bio_list_pop(&bios)))
474 bio_endio(bio, DM_ENDIO_REQUEUE);
991d9fa0
JT
475}
476
a374bb21
JT
477static void requeue_deferred_cells(struct thin_c *tc)
478{
479 struct pool *pool = tc->pool;
480 unsigned long flags;
481 struct list_head cells;
482 struct dm_bio_prison_cell *cell, *tmp;
483
484 INIT_LIST_HEAD(&cells);
485
486 spin_lock_irqsave(&tc->lock, flags);
487 list_splice_init(&tc->deferred_cells, &cells);
488 spin_unlock_irqrestore(&tc->lock, flags);
489
490 list_for_each_entry_safe(cell, tmp, &cells, user_list)
491 cell_requeue(pool, cell);
492}
493
991d9fa0
JT
494static void requeue_io(struct thin_c *tc)
495{
c140e1c4
MS
496 requeue_bio_list(tc, &tc->deferred_bio_list);
497 requeue_bio_list(tc, &tc->retry_on_resume_list);
a374bb21 498 requeue_deferred_cells(tc);
991d9fa0
JT
499}
500
c140e1c4 501static void error_thin_retry_list(struct thin_c *tc)
3e1a0699
JT
502{
503 struct bio *bio;
504 unsigned long flags;
505 struct bio_list bios;
506
507 bio_list_init(&bios);
508
c140e1c4
MS
509 spin_lock_irqsave(&tc->lock, flags);
510 bio_list_merge(&bios, &tc->retry_on_resume_list);
511 bio_list_init(&tc->retry_on_resume_list);
512 spin_unlock_irqrestore(&tc->lock, flags);
3e1a0699
JT
513
514 while ((bio = bio_list_pop(&bios)))
515 bio_io_error(bio);
516}
517
c140e1c4
MS
518static void error_retry_list(struct pool *pool)
519{
520 struct thin_c *tc;
521
522 rcu_read_lock();
523 list_for_each_entry_rcu(tc, &pool->active_thins, list)
524 error_thin_retry_list(tc);
525 rcu_read_unlock();
526}
527
991d9fa0
JT
528/*
529 * This section of code contains the logic for processing a thin device's IO.
530 * Much of the code depends on pool object resources (lists, workqueues, etc)
531 * but most is exclusively called from the thin target rather than the thin-pool
532 * target.
533 */
534
58f77a21
MS
535static bool block_size_is_power_of_two(struct pool *pool)
536{
537 return pool->sectors_per_block_shift >= 0;
538}
539
991d9fa0
JT
540static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
541{
58f77a21 542 struct pool *pool = tc->pool;
4f024f37 543 sector_t block_nr = bio->bi_iter.bi_sector;
55f2b8bd 544
58f77a21
MS
545 if (block_size_is_power_of_two(pool))
546 block_nr >>= pool->sectors_per_block_shift;
f9a8e0cd 547 else
58f77a21 548 (void) sector_div(block_nr, pool->sectors_per_block);
55f2b8bd
MS
549
550 return block_nr;
991d9fa0
JT
551}
552
553static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
554{
555 struct pool *pool = tc->pool;
4f024f37 556 sector_t bi_sector = bio->bi_iter.bi_sector;
991d9fa0
JT
557
558 bio->bi_bdev = tc->pool_dev->bdev;
58f77a21 559 if (block_size_is_power_of_two(pool))
4f024f37
KO
560 bio->bi_iter.bi_sector =
561 (block << pool->sectors_per_block_shift) |
562 (bi_sector & (pool->sectors_per_block - 1));
58f77a21 563 else
4f024f37 564 bio->bi_iter.bi_sector = (block * pool->sectors_per_block) +
58f77a21 565 sector_div(bi_sector, pool->sectors_per_block);
991d9fa0
JT
566}
567
2dd9c257
JT
568static void remap_to_origin(struct thin_c *tc, struct bio *bio)
569{
570 bio->bi_bdev = tc->origin_dev->bdev;
571}
572
4afdd680
JT
573static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
574{
575 return (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) &&
576 dm_thin_changed_this_transaction(tc->td);
577}
578
e8088073
JT
579static void inc_all_io_entry(struct pool *pool, struct bio *bio)
580{
581 struct dm_thin_endio_hook *h;
582
583 if (bio->bi_rw & REQ_DISCARD)
584 return;
585
59c3d2c6 586 h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
e8088073
JT
587 h->all_io_entry = dm_deferred_entry_inc(pool->all_io_ds);
588}
589
2dd9c257 590static void issue(struct thin_c *tc, struct bio *bio)
991d9fa0
JT
591{
592 struct pool *pool = tc->pool;
593 unsigned long flags;
594
e49e5829
JT
595 if (!bio_triggers_commit(tc, bio)) {
596 generic_make_request(bio);
597 return;
598 }
599
991d9fa0 600 /*
e49e5829
JT
601 * Complete bio with an error if earlier I/O caused changes to
602 * the metadata that can't be committed e.g, due to I/O errors
603 * on the metadata device.
991d9fa0 604 */
e49e5829
JT
605 if (dm_thin_aborted_changes(tc->td)) {
606 bio_io_error(bio);
607 return;
608 }
609
610 /*
611 * Batch together any bios that trigger commits and then issue a
612 * single commit for them in process_deferred_bios().
613 */
614 spin_lock_irqsave(&pool->lock, flags);
615 bio_list_add(&pool->deferred_flush_bios, bio);
616 spin_unlock_irqrestore(&pool->lock, flags);
991d9fa0
JT
617}
618
2dd9c257
JT
619static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio)
620{
621 remap_to_origin(tc, bio);
622 issue(tc, bio);
623}
624
625static void remap_and_issue(struct thin_c *tc, struct bio *bio,
626 dm_block_t block)
627{
628 remap(tc, bio, block);
629 issue(tc, bio);
630}
631
991d9fa0
JT
632/*----------------------------------------------------------------*/
633
634/*
635 * Bio endio functions.
636 */
a24c2569 637struct dm_thin_new_mapping {
991d9fa0
JT
638 struct list_head list;
639
7f214665
MS
640 bool pass_discard:1;
641 bool definitely_not_shared:1;
991d9fa0 642
50f3c3ef
JT
643 /*
644 * Track quiescing, copying and zeroing preparation actions. When this
645 * counter hits zero the block is prepared and can be inserted into the
646 * btree.
647 */
648 atomic_t prepare_actions;
649
7f214665 650 int err;
991d9fa0
JT
651 struct thin_c *tc;
652 dm_block_t virt_block;
653 dm_block_t data_block;
a24c2569 654 struct dm_bio_prison_cell *cell, *cell2;
991d9fa0
JT
655
656 /*
657 * If the bio covers the whole area of a block then we can avoid
658 * zeroing or copying. Instead this bio is hooked. The bio will
659 * still be in the cell, so care has to be taken to avoid issuing
660 * the bio twice.
661 */
662 struct bio *bio;
663 bio_end_io_t *saved_bi_end_io;
664};
665
50f3c3ef 666static void __complete_mapping_preparation(struct dm_thin_new_mapping *m)
991d9fa0
JT
667{
668 struct pool *pool = m->tc->pool;
669
50f3c3ef 670 if (atomic_dec_and_test(&m->prepare_actions)) {
daec338b 671 list_add_tail(&m->list, &pool->prepared_mappings);
991d9fa0
JT
672 wake_worker(pool);
673 }
674}
675
e5aea7b4 676static void complete_mapping_preparation(struct dm_thin_new_mapping *m)
991d9fa0
JT
677{
678 unsigned long flags;
991d9fa0
JT
679 struct pool *pool = m->tc->pool;
680
991d9fa0 681 spin_lock_irqsave(&pool->lock, flags);
50f3c3ef 682 __complete_mapping_preparation(m);
991d9fa0
JT
683 spin_unlock_irqrestore(&pool->lock, flags);
684}
685
e5aea7b4
JT
686static void copy_complete(int read_err, unsigned long write_err, void *context)
687{
688 struct dm_thin_new_mapping *m = context;
689
690 m->err = read_err || write_err ? -EIO : 0;
691 complete_mapping_preparation(m);
692}
693
991d9fa0
JT
694static void overwrite_endio(struct bio *bio, int err)
695{
59c3d2c6 696 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
a24c2569 697 struct dm_thin_new_mapping *m = h->overwrite_mapping;
991d9fa0
JT
698
699 m->err = err;
e5aea7b4 700 complete_mapping_preparation(m);
991d9fa0
JT
701}
702
991d9fa0
JT
703/*----------------------------------------------------------------*/
704
705/*
706 * Workqueue.
707 */
708
709/*
710 * Prepared mapping jobs.
711 */
712
713/*
2d759a46
JT
714 * This sends the bios in the cell, except the original holder, back
715 * to the deferred_bios list.
991d9fa0 716 */
f286ba0e 717static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *cell)
991d9fa0 718{
991d9fa0
JT
719 struct pool *pool = tc->pool;
720 unsigned long flags;
721
c140e1c4
MS
722 spin_lock_irqsave(&tc->lock, flags);
723 cell_release_no_holder(pool, cell, &tc->deferred_bio_list);
724 spin_unlock_irqrestore(&tc->lock, flags);
991d9fa0
JT
725
726 wake_worker(pool);
727}
728
a374bb21
JT
729static void thin_defer_bio(struct thin_c *tc, struct bio *bio);
730
2d759a46
JT
731struct remap_info {
732 struct thin_c *tc;
733 struct bio_list defer_bios;
734 struct bio_list issue_bios;
735};
736
737static void __inc_remap_and_issue_cell(void *context,
738 struct dm_bio_prison_cell *cell)
a374bb21 739{
2d759a46 740 struct remap_info *info = context;
a374bb21 741 struct bio *bio;
a374bb21 742
2d759a46 743 while ((bio = bio_list_pop(&cell->bios))) {
a374bb21 744 if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA))
2d759a46 745 bio_list_add(&info->defer_bios, bio);
a374bb21 746 else {
2d759a46
JT
747 inc_all_io_entry(info->tc->pool, bio);
748
749 /*
750 * We can't issue the bios with the bio prison lock
751 * held, so we add them to a list to issue on
752 * return from this function.
753 */
754 bio_list_add(&info->issue_bios, bio);
a374bb21
JT
755 }
756 }
757}
758
2d759a46
JT
759static void inc_remap_and_issue_cell(struct thin_c *tc,
760 struct dm_bio_prison_cell *cell,
761 dm_block_t block)
762{
763 struct bio *bio;
764 struct remap_info info;
765
766 info.tc = tc;
767 bio_list_init(&info.defer_bios);
768 bio_list_init(&info.issue_bios);
769
770 /*
771 * We have to be careful to inc any bios we're about to issue
772 * before the cell is released, and avoid a race with new bios
773 * being added to the cell.
774 */
775 cell_visit_release(tc->pool, __inc_remap_and_issue_cell,
776 &info, cell);
777
778 while ((bio = bio_list_pop(&info.defer_bios)))
779 thin_defer_bio(tc, bio);
780
781 while ((bio = bio_list_pop(&info.issue_bios)))
782 remap_and_issue(info.tc, bio, block);
783}
784
e49e5829
JT
785static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
786{
196d38bc 787 if (m->bio) {
e49e5829 788 m->bio->bi_end_io = m->saved_bi_end_io;
196d38bc
KO
789 atomic_inc(&m->bio->bi_remaining);
790 }
6beca5eb 791 cell_error(m->tc->pool, m->cell);
e49e5829
JT
792 list_del(&m->list);
793 mempool_free(m, m->tc->pool->mapping_pool);
794}
025b9685 795
a24c2569 796static void process_prepared_mapping(struct dm_thin_new_mapping *m)
991d9fa0
JT
797{
798 struct thin_c *tc = m->tc;
6beca5eb 799 struct pool *pool = tc->pool;
991d9fa0
JT
800 struct bio *bio;
801 int r;
802
803 bio = m->bio;
196d38bc 804 if (bio) {
991d9fa0 805 bio->bi_end_io = m->saved_bi_end_io;
196d38bc
KO
806 atomic_inc(&bio->bi_remaining);
807 }
991d9fa0
JT
808
809 if (m->err) {
6beca5eb 810 cell_error(pool, m->cell);
905386f8 811 goto out;
991d9fa0
JT
812 }
813
814 /*
815 * Commit the prepared block into the mapping btree.
816 * Any I/O for this block arriving after this point will get
817 * remapped to it directly.
818 */
819 r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block);
820 if (r) {
b5330655 821 metadata_operation_failed(pool, "dm_thin_insert_block", r);
6beca5eb 822 cell_error(pool, m->cell);
905386f8 823 goto out;
991d9fa0
JT
824 }
825
826 /*
827 * Release any bios held while the block was being provisioned.
828 * If we are processing a write bio that completely covers the block,
829 * we already processed it so can ignore it now when processing
830 * the bios in the cell.
831 */
832 if (bio) {
2d759a46 833 inc_remap_and_issue_cell(tc, m->cell, m->data_block);
991d9fa0 834 bio_endio(bio, 0);
2d759a46
JT
835 } else {
836 inc_all_io_entry(tc->pool, m->cell->holder);
837 remap_and_issue(tc, m->cell->holder, m->data_block);
838 inc_remap_and_issue_cell(tc, m->cell, m->data_block);
839 }
991d9fa0 840
905386f8 841out:
991d9fa0 842 list_del(&m->list);
6beca5eb 843 mempool_free(m, pool->mapping_pool);
991d9fa0
JT
844}
845
e49e5829 846static void process_prepared_discard_fail(struct dm_thin_new_mapping *m)
104655fd 847{
104655fd
JT
848 struct thin_c *tc = m->tc;
849
e49e5829 850 bio_io_error(m->bio);
f286ba0e
JT
851 cell_defer_no_holder(tc, m->cell);
852 cell_defer_no_holder(tc, m->cell2);
e49e5829
JT
853 mempool_free(m, tc->pool->mapping_pool);
854}
855
856static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m)
857{
858 struct thin_c *tc = m->tc;
104655fd 859
e8088073 860 inc_all_io_entry(tc->pool, m->bio);
f286ba0e
JT
861 cell_defer_no_holder(tc, m->cell);
862 cell_defer_no_holder(tc, m->cell2);
e8088073 863
104655fd 864 if (m->pass_discard)
19fa1a67
JT
865 if (m->definitely_not_shared)
866 remap_and_issue(tc, m->bio, m->data_block);
867 else {
868 bool used = false;
869 if (dm_pool_block_is_used(tc->pool->pmd, m->data_block, &used) || used)
870 bio_endio(m->bio, 0);
871 else
872 remap_and_issue(tc, m->bio, m->data_block);
873 }
104655fd
JT
874 else
875 bio_endio(m->bio, 0);
876
104655fd
JT
877 mempool_free(m, tc->pool->mapping_pool);
878}
879
e49e5829
JT
880static void process_prepared_discard(struct dm_thin_new_mapping *m)
881{
882 int r;
883 struct thin_c *tc = m->tc;
884
885 r = dm_thin_remove_block(tc->td, m->virt_block);
886 if (r)
c397741c 887 DMERR_LIMIT("dm_thin_remove_block() failed");
e49e5829
JT
888
889 process_prepared_discard_passdown(m);
890}
891
104655fd 892static void process_prepared(struct pool *pool, struct list_head *head,
e49e5829 893 process_mapping_fn *fn)
991d9fa0
JT
894{
895 unsigned long flags;
896 struct list_head maps;
a24c2569 897 struct dm_thin_new_mapping *m, *tmp;
991d9fa0
JT
898
899 INIT_LIST_HEAD(&maps);
900 spin_lock_irqsave(&pool->lock, flags);
104655fd 901 list_splice_init(head, &maps);
991d9fa0
JT
902 spin_unlock_irqrestore(&pool->lock, flags);
903
904 list_for_each_entry_safe(m, tmp, &maps, list)
e49e5829 905 (*fn)(m);
991d9fa0
JT
906}
907
908/*
909 * Deferred bio jobs.
910 */
104655fd 911static int io_overlaps_block(struct pool *pool, struct bio *bio)
991d9fa0 912{
4f024f37
KO
913 return bio->bi_iter.bi_size ==
914 (pool->sectors_per_block << SECTOR_SHIFT);
104655fd
JT
915}
916
917static int io_overwrites_block(struct pool *pool, struct bio *bio)
918{
919 return (bio_data_dir(bio) == WRITE) &&
920 io_overlaps_block(pool, bio);
991d9fa0
JT
921}
922
923static void save_and_set_endio(struct bio *bio, bio_end_io_t **save,
924 bio_end_io_t *fn)
925{
926 *save = bio->bi_end_io;
927 bio->bi_end_io = fn;
928}
929
930static int ensure_next_mapping(struct pool *pool)
931{
932 if (pool->next_mapping)
933 return 0;
934
935 pool->next_mapping = mempool_alloc(pool->mapping_pool, GFP_ATOMIC);
936
937 return pool->next_mapping ? 0 : -ENOMEM;
938}
939
a24c2569 940static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool)
991d9fa0 941{
16961b04 942 struct dm_thin_new_mapping *m = pool->next_mapping;
991d9fa0
JT
943
944 BUG_ON(!pool->next_mapping);
945
16961b04
MS
946 memset(m, 0, sizeof(struct dm_thin_new_mapping));
947 INIT_LIST_HEAD(&m->list);
948 m->bio = NULL;
949
991d9fa0
JT
950 pool->next_mapping = NULL;
951
16961b04 952 return m;
991d9fa0
JT
953}
954
e5aea7b4
JT
955static void ll_zero(struct thin_c *tc, struct dm_thin_new_mapping *m,
956 sector_t begin, sector_t end)
957{
958 int r;
959 struct dm_io_region to;
960
961 to.bdev = tc->pool_dev->bdev;
962 to.sector = begin;
963 to.count = end - begin;
964
965 r = dm_kcopyd_zero(tc->pool->copier, 1, &to, 0, copy_complete, m);
966 if (r < 0) {
967 DMERR_LIMIT("dm_kcopyd_zero() failed");
968 copy_complete(1, 1, m);
969 }
970}
971
452d7a62
MS
972static void remap_and_issue_overwrite(struct thin_c *tc, struct bio *bio,
973 dm_block_t data_block,
974 struct dm_thin_new_mapping *m)
975{
976 struct pool *pool = tc->pool;
977 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
978
979 h->overwrite_mapping = m;
980 m->bio = bio;
981 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
982 inc_all_io_entry(pool, bio);
983 remap_and_issue(tc, bio, data_block);
984}
985
e5aea7b4
JT
986/*
987 * A partial copy also needs to zero the uncopied region.
988 */
991d9fa0 989static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
2dd9c257
JT
990 struct dm_dev *origin, dm_block_t data_origin,
991 dm_block_t data_dest,
e5aea7b4
JT
992 struct dm_bio_prison_cell *cell, struct bio *bio,
993 sector_t len)
991d9fa0
JT
994{
995 int r;
996 struct pool *pool = tc->pool;
a24c2569 997 struct dm_thin_new_mapping *m = get_next_mapping(pool);
991d9fa0 998
991d9fa0
JT
999 m->tc = tc;
1000 m->virt_block = virt_block;
1001 m->data_block = data_dest;
1002 m->cell = cell;
991d9fa0 1003
e5aea7b4
JT
1004 /*
1005 * quiesce action + copy action + an extra reference held for the
1006 * duration of this function (we may need to inc later for a
1007 * partial zero).
1008 */
1009 atomic_set(&m->prepare_actions, 3);
1010
44feb387 1011 if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list))
e5aea7b4 1012 complete_mapping_preparation(m); /* already quiesced */
991d9fa0
JT
1013
1014 /*
1015 * IO to pool_dev remaps to the pool target's data_dev.
1016 *
1017 * If the whole block of data is being overwritten, we can issue the
1018 * bio immediately. Otherwise we use kcopyd to clone the data first.
1019 */
452d7a62
MS
1020 if (io_overwrites_block(pool, bio))
1021 remap_and_issue_overwrite(tc, bio, data_dest, m);
1022 else {
991d9fa0
JT
1023 struct dm_io_region from, to;
1024
2dd9c257 1025 from.bdev = origin->bdev;
991d9fa0 1026 from.sector = data_origin * pool->sectors_per_block;
e5aea7b4 1027 from.count = len;
991d9fa0
JT
1028
1029 to.bdev = tc->pool_dev->bdev;
1030 to.sector = data_dest * pool->sectors_per_block;
e5aea7b4 1031 to.count = len;
991d9fa0
JT
1032
1033 r = dm_kcopyd_copy(pool->copier, &from, 1, &to,
1034 0, copy_complete, m);
1035 if (r < 0) {
c397741c 1036 DMERR_LIMIT("dm_kcopyd_copy() failed");
e5aea7b4
JT
1037 copy_complete(1, 1, m);
1038
1039 /*
1040 * We allow the zero to be issued, to simplify the
1041 * error path. Otherwise we'd need to start
1042 * worrying about decrementing the prepare_actions
1043 * counter.
1044 */
1045 }
1046
1047 /*
1048 * Do we need to zero a tail region?
1049 */
1050 if (len < pool->sectors_per_block && pool->pf.zero_new_blocks) {
1051 atomic_inc(&m->prepare_actions);
1052 ll_zero(tc, m,
1053 data_dest * pool->sectors_per_block + len,
1054 (data_dest + 1) * pool->sectors_per_block);
991d9fa0
JT
1055 }
1056 }
e5aea7b4
JT
1057
1058 complete_mapping_preparation(m); /* drop our ref */
991d9fa0
JT
1059}
1060
2dd9c257
JT
1061static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block,
1062 dm_block_t data_origin, dm_block_t data_dest,
a24c2569 1063 struct dm_bio_prison_cell *cell, struct bio *bio)
2dd9c257
JT
1064{
1065 schedule_copy(tc, virt_block, tc->pool_dev,
e5aea7b4
JT
1066 data_origin, data_dest, cell, bio,
1067 tc->pool->sectors_per_block);
2dd9c257
JT
1068}
1069
991d9fa0 1070static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
a24c2569 1071 dm_block_t data_block, struct dm_bio_prison_cell *cell,
991d9fa0
JT
1072 struct bio *bio)
1073{
1074 struct pool *pool = tc->pool;
a24c2569 1075 struct dm_thin_new_mapping *m = get_next_mapping(pool);
991d9fa0 1076
50f3c3ef 1077 atomic_set(&m->prepare_actions, 1); /* no need to quiesce */
991d9fa0
JT
1078 m->tc = tc;
1079 m->virt_block = virt_block;
1080 m->data_block = data_block;
1081 m->cell = cell;
991d9fa0
JT
1082
1083 /*
1084 * If the whole block of data is being overwritten or we are not
1085 * zeroing pre-existing data, we can issue the bio immediately.
1086 * Otherwise we use kcopyd to zero the data first.
1087 */
67e2e2b2 1088 if (!pool->pf.zero_new_blocks)
991d9fa0
JT
1089 process_prepared_mapping(m);
1090
452d7a62
MS
1091 else if (io_overwrites_block(pool, bio))
1092 remap_and_issue_overwrite(tc, bio, data_block, m);
991d9fa0 1093
452d7a62 1094 else
e5aea7b4
JT
1095 ll_zero(tc, m,
1096 data_block * pool->sectors_per_block,
1097 (data_block + 1) * pool->sectors_per_block);
1098}
991d9fa0 1099
e5aea7b4
JT
1100static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
1101 dm_block_t data_dest,
1102 struct dm_bio_prison_cell *cell, struct bio *bio)
1103{
1104 struct pool *pool = tc->pool;
1105 sector_t virt_block_begin = virt_block * pool->sectors_per_block;
1106 sector_t virt_block_end = (virt_block + 1) * pool->sectors_per_block;
1107
1108 if (virt_block_end <= tc->origin_size)
1109 schedule_copy(tc, virt_block, tc->origin_dev,
1110 virt_block, data_dest, cell, bio,
1111 pool->sectors_per_block);
1112
1113 else if (virt_block_begin < tc->origin_size)
1114 schedule_copy(tc, virt_block, tc->origin_dev,
1115 virt_block, data_dest, cell, bio,
1116 tc->origin_size - virt_block_begin);
1117
1118 else
1119 schedule_zero(tc, virt_block, data_dest, cell, bio);
991d9fa0
JT
1120}
1121
e49e5829
JT
1122/*
1123 * A non-zero return indicates read_only or fail_io mode.
1124 * Many callers don't care about the return value.
1125 */
020cc3b5 1126static int commit(struct pool *pool)
e49e5829
JT
1127{
1128 int r;
1129
8d07e8a5 1130 if (get_pool_mode(pool) >= PM_READ_ONLY)
e49e5829
JT
1131 return -EINVAL;
1132
020cc3b5 1133 r = dm_pool_commit_metadata(pool->pmd);
b5330655
JT
1134 if (r)
1135 metadata_operation_failed(pool, "dm_pool_commit_metadata", r);
e49e5829
JT
1136
1137 return r;
1138}
1139
88a6621b
JT
1140static void check_low_water_mark(struct pool *pool, dm_block_t free_blocks)
1141{
1142 unsigned long flags;
1143
1144 if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) {
1145 DMWARN("%s: reached low water mark for data device: sending event.",
1146 dm_device_name(pool->pool_md));
1147 spin_lock_irqsave(&pool->lock, flags);
1148 pool->low_water_triggered = true;
1149 spin_unlock_irqrestore(&pool->lock, flags);
1150 dm_table_event(pool->ti->table);
1151 }
1152}
1153
3e1a0699
JT
1154static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
1155
991d9fa0
JT
1156static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
1157{
1158 int r;
1159 dm_block_t free_blocks;
991d9fa0
JT
1160 struct pool *pool = tc->pool;
1161
3e1a0699 1162 if (WARN_ON(get_pool_mode(pool) != PM_WRITE))
8d30abff
JT
1163 return -EINVAL;
1164
991d9fa0 1165 r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
b5330655
JT
1166 if (r) {
1167 metadata_operation_failed(pool, "dm_pool_get_free_block_count", r);
991d9fa0 1168 return r;
b5330655 1169 }
991d9fa0 1170
88a6621b 1171 check_low_water_mark(pool, free_blocks);
991d9fa0
JT
1172
1173 if (!free_blocks) {
94563bad
MS
1174 /*
1175 * Try to commit to see if that will free up some
1176 * more space.
1177 */
020cc3b5
JT
1178 r = commit(pool);
1179 if (r)
1180 return r;
991d9fa0 1181
94563bad 1182 r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
b5330655
JT
1183 if (r) {
1184 metadata_operation_failed(pool, "dm_pool_get_free_block_count", r);
94563bad 1185 return r;
b5330655 1186 }
991d9fa0 1187
94563bad 1188 if (!free_blocks) {
3e1a0699 1189 set_pool_mode(pool, PM_OUT_OF_DATA_SPACE);
94563bad 1190 return -ENOSPC;
991d9fa0
JT
1191 }
1192 }
1193
1194 r = dm_pool_alloc_data_block(pool->pmd, result);
4a02b34e 1195 if (r) {
b5330655 1196 metadata_operation_failed(pool, "dm_pool_alloc_data_block", r);
991d9fa0 1197 return r;
4a02b34e 1198 }
991d9fa0
JT
1199
1200 return 0;
1201}
1202
1203/*
1204 * If we have run out of space, queue bios until the device is
1205 * resumed, presumably after having been reloaded with more space.
1206 */
1207static void retry_on_resume(struct bio *bio)
1208{
59c3d2c6 1209 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
eb2aa48d 1210 struct thin_c *tc = h->tc;
991d9fa0
JT
1211 unsigned long flags;
1212
c140e1c4
MS
1213 spin_lock_irqsave(&tc->lock, flags);
1214 bio_list_add(&tc->retry_on_resume_list, bio);
1215 spin_unlock_irqrestore(&tc->lock, flags);
991d9fa0
JT
1216}
1217
af91805a 1218static int should_error_unserviceable_bio(struct pool *pool)
8c0f0e8c 1219{
3e1a0699
JT
1220 enum pool_mode m = get_pool_mode(pool);
1221
1222 switch (m) {
1223 case PM_WRITE:
1224 /* Shouldn't get here */
1225 DMERR_LIMIT("bio unserviceable, yet pool is in PM_WRITE mode");
af91805a 1226 return -EIO;
3e1a0699
JT
1227
1228 case PM_OUT_OF_DATA_SPACE:
af91805a 1229 return pool->pf.error_if_no_space ? -ENOSPC : 0;
3e1a0699
JT
1230
1231 case PM_READ_ONLY:
1232 case PM_FAIL:
af91805a 1233 return -EIO;
3e1a0699
JT
1234 default:
1235 /* Shouldn't get here */
1236 DMERR_LIMIT("bio unserviceable, yet pool has an unknown mode");
af91805a 1237 return -EIO;
3e1a0699
JT
1238 }
1239}
8c0f0e8c 1240
3e1a0699
JT
1241static void handle_unserviceable_bio(struct pool *pool, struct bio *bio)
1242{
af91805a
MS
1243 int error = should_error_unserviceable_bio(pool);
1244
1245 if (error)
1246 bio_endio(bio, error);
6d16202b
MS
1247 else
1248 retry_on_resume(bio);
8c0f0e8c
MS
1249}
1250
399caddf 1251static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *cell)
991d9fa0
JT
1252{
1253 struct bio *bio;
1254 struct bio_list bios;
af91805a 1255 int error;
991d9fa0 1256
af91805a
MS
1257 error = should_error_unserviceable_bio(pool);
1258 if (error) {
1259 cell_error_with_code(pool, cell, error);
3e1a0699
JT
1260 return;
1261 }
1262
991d9fa0 1263 bio_list_init(&bios);
6beca5eb 1264 cell_release(pool, cell, &bios);
991d9fa0 1265
af91805a
MS
1266 error = should_error_unserviceable_bio(pool);
1267 if (error)
3e1a0699 1268 while ((bio = bio_list_pop(&bios)))
af91805a 1269 bio_endio(bio, error);
3e1a0699
JT
1270 else
1271 while ((bio = bio_list_pop(&bios)))
1272 retry_on_resume(bio);
991d9fa0
JT
1273}
1274
a374bb21 1275static void process_discard_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell)
104655fd
JT
1276{
1277 int r;
a374bb21 1278 struct bio *bio = cell->holder;
104655fd 1279 struct pool *pool = tc->pool;
a374bb21
JT
1280 struct dm_bio_prison_cell *cell2;
1281 struct dm_cell_key key2;
104655fd
JT
1282 dm_block_t block = get_bio_block(tc, bio);
1283 struct dm_thin_lookup_result lookup_result;
a24c2569 1284 struct dm_thin_new_mapping *m;
104655fd 1285
a374bb21
JT
1286 if (tc->requeue_mode) {
1287 cell_requeue(pool, cell);
104655fd 1288 return;
a374bb21 1289 }
104655fd
JT
1290
1291 r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1292 switch (r) {
1293 case 0:
1294 /*
1295 * Check nobody is fiddling with this pool block. This can
1296 * happen if someone's in the process of breaking sharing
1297 * on this block.
1298 */
1299 build_data_key(tc->td, lookup_result.block, &key2);
6beca5eb 1300 if (bio_detain(tc->pool, &key2, bio, &cell2)) {
f286ba0e 1301 cell_defer_no_holder(tc, cell);
104655fd
JT
1302 break;
1303 }
1304
1305 if (io_overlaps_block(pool, bio)) {
1306 /*
1307 * IO may still be going to the destination block. We must
1308 * quiesce before we can do the removal.
1309 */
1310 m = get_next_mapping(pool);
1311 m->tc = tc;
19fa1a67
JT
1312 m->pass_discard = pool->pf.discard_passdown;
1313 m->definitely_not_shared = !lookup_result.shared;
104655fd
JT
1314 m->virt_block = block;
1315 m->data_block = lookup_result.block;
1316 m->cell = cell;
1317 m->cell2 = cell2;
104655fd
JT
1318 m->bio = bio;
1319
7a7e97ca
JT
1320 if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list))
1321 pool->process_prepared_discard(m);
1322
104655fd 1323 } else {
e8088073 1324 inc_all_io_entry(pool, bio);
f286ba0e
JT
1325 cell_defer_no_holder(tc, cell);
1326 cell_defer_no_holder(tc, cell2);
e8088073 1327
104655fd 1328 /*
49296309
MP
1329 * The DM core makes sure that the discard doesn't span
1330 * a block boundary. So we submit the discard of a
1331 * partial block appropriately.
104655fd 1332 */
650d2a06
MP
1333 if ((!lookup_result.shared) && pool->pf.discard_passdown)
1334 remap_and_issue(tc, bio, lookup_result.block);
1335 else
1336 bio_endio(bio, 0);
104655fd
JT
1337 }
1338 break;
1339
1340 case -ENODATA:
1341 /*
1342 * It isn't provisioned, just forget it.
1343 */
f286ba0e 1344 cell_defer_no_holder(tc, cell);
104655fd
JT
1345 bio_endio(bio, 0);
1346 break;
1347
1348 default:
c397741c
MS
1349 DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
1350 __func__, r);
f286ba0e 1351 cell_defer_no_holder(tc, cell);
104655fd
JT
1352 bio_io_error(bio);
1353 break;
1354 }
1355}
1356
a374bb21
JT
1357static void process_discard_bio(struct thin_c *tc, struct bio *bio)
1358{
1359 struct dm_bio_prison_cell *cell;
1360 struct dm_cell_key key;
1361 dm_block_t block = get_bio_block(tc, bio);
1362
1363 build_virtual_key(tc->td, block, &key);
1364 if (bio_detain(tc->pool, &key, bio, &cell))
1365 return;
1366
1367 process_discard_cell(tc, cell);
1368}
1369
991d9fa0 1370static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
44feb387 1371 struct dm_cell_key *key,
991d9fa0 1372 struct dm_thin_lookup_result *lookup_result,
a24c2569 1373 struct dm_bio_prison_cell *cell)
991d9fa0
JT
1374{
1375 int r;
1376 dm_block_t data_block;
d6fc2042 1377 struct pool *pool = tc->pool;
991d9fa0
JT
1378
1379 r = alloc_data_block(tc, &data_block);
1380 switch (r) {
1381 case 0:
2dd9c257
JT
1382 schedule_internal_copy(tc, block, lookup_result->block,
1383 data_block, cell, bio);
991d9fa0
JT
1384 break;
1385
1386 case -ENOSPC:
399caddf 1387 retry_bios_on_resume(pool, cell);
991d9fa0
JT
1388 break;
1389
1390 default:
c397741c
MS
1391 DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
1392 __func__, r);
d6fc2042 1393 cell_error(pool, cell);
991d9fa0
JT
1394 break;
1395 }
1396}
1397
23ca2bb6
JT
1398static void __remap_and_issue_shared_cell(void *context,
1399 struct dm_bio_prison_cell *cell)
1400{
1401 struct remap_info *info = context;
1402 struct bio *bio;
1403
1404 while ((bio = bio_list_pop(&cell->bios))) {
1405 if ((bio_data_dir(bio) == WRITE) ||
1406 (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)))
1407 bio_list_add(&info->defer_bios, bio);
1408 else {
1409 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));;
1410
1411 h->shared_read_entry = dm_deferred_entry_inc(info->tc->pool->shared_read_ds);
1412 inc_all_io_entry(info->tc->pool, bio);
1413 bio_list_add(&info->issue_bios, bio);
1414 }
1415 }
1416}
1417
1418static void remap_and_issue_shared_cell(struct thin_c *tc,
1419 struct dm_bio_prison_cell *cell,
1420 dm_block_t block)
1421{
1422 struct bio *bio;
1423 struct remap_info info;
1424
1425 info.tc = tc;
1426 bio_list_init(&info.defer_bios);
1427 bio_list_init(&info.issue_bios);
1428
1429 cell_visit_release(tc->pool, __remap_and_issue_shared_cell,
1430 &info, cell);
1431
1432 while ((bio = bio_list_pop(&info.defer_bios)))
1433 thin_defer_bio(tc, bio);
1434
1435 while ((bio = bio_list_pop(&info.issue_bios)))
1436 remap_and_issue(tc, bio, block);
1437}
1438
991d9fa0
JT
1439static void process_shared_bio(struct thin_c *tc, struct bio *bio,
1440 dm_block_t block,
23ca2bb6
JT
1441 struct dm_thin_lookup_result *lookup_result,
1442 struct dm_bio_prison_cell *virt_cell)
991d9fa0 1443{
23ca2bb6 1444 struct dm_bio_prison_cell *data_cell;
991d9fa0 1445 struct pool *pool = tc->pool;
44feb387 1446 struct dm_cell_key key;
991d9fa0
JT
1447
1448 /*
1449 * If cell is already occupied, then sharing is already in the process
1450 * of being broken so we have nothing further to do here.
1451 */
1452 build_data_key(tc->td, lookup_result->block, &key);
23ca2bb6
JT
1453 if (bio_detain(pool, &key, bio, &data_cell)) {
1454 cell_defer_no_holder(tc, virt_cell);
991d9fa0 1455 return;
23ca2bb6 1456 }
991d9fa0 1457
23ca2bb6
JT
1458 if (bio_data_dir(bio) == WRITE && bio->bi_iter.bi_size) {
1459 break_sharing(tc, bio, block, &key, lookup_result, data_cell);
1460 cell_defer_no_holder(tc, virt_cell);
1461 } else {
59c3d2c6 1462 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
991d9fa0 1463
44feb387 1464 h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds);
e8088073 1465 inc_all_io_entry(pool, bio);
991d9fa0 1466 remap_and_issue(tc, bio, lookup_result->block);
23ca2bb6
JT
1467
1468 remap_and_issue_shared_cell(tc, data_cell, lookup_result->block);
1469 remap_and_issue_shared_cell(tc, virt_cell, lookup_result->block);
991d9fa0
JT
1470 }
1471}
1472
1473static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block,
a24c2569 1474 struct dm_bio_prison_cell *cell)
991d9fa0
JT
1475{
1476 int r;
1477 dm_block_t data_block;
6beca5eb 1478 struct pool *pool = tc->pool;
991d9fa0
JT
1479
1480 /*
1481 * Remap empty bios (flushes) immediately, without provisioning.
1482 */
4f024f37 1483 if (!bio->bi_iter.bi_size) {
6beca5eb 1484 inc_all_io_entry(pool, bio);
f286ba0e 1485 cell_defer_no_holder(tc, cell);
e8088073 1486
991d9fa0
JT
1487 remap_and_issue(tc, bio, 0);
1488 return;
1489 }
1490
1491 /*
1492 * Fill read bios with zeroes and complete them immediately.
1493 */
1494 if (bio_data_dir(bio) == READ) {
1495 zero_fill_bio(bio);
f286ba0e 1496 cell_defer_no_holder(tc, cell);
991d9fa0
JT
1497 bio_endio(bio, 0);
1498 return;
1499 }
1500
1501 r = alloc_data_block(tc, &data_block);
1502 switch (r) {
1503 case 0:
2dd9c257
JT
1504 if (tc->origin_dev)
1505 schedule_external_copy(tc, block, data_block, cell, bio);
1506 else
1507 schedule_zero(tc, block, data_block, cell, bio);
991d9fa0
JT
1508 break;
1509
1510 case -ENOSPC:
399caddf 1511 retry_bios_on_resume(pool, cell);
991d9fa0
JT
1512 break;
1513
1514 default:
c397741c
MS
1515 DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
1516 __func__, r);
6beca5eb 1517 cell_error(pool, cell);
991d9fa0
JT
1518 break;
1519 }
1520}
1521
a374bb21 1522static void process_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell)
991d9fa0
JT
1523{
1524 int r;
6beca5eb 1525 struct pool *pool = tc->pool;
a374bb21 1526 struct bio *bio = cell->holder;
991d9fa0 1527 dm_block_t block = get_bio_block(tc, bio);
991d9fa0
JT
1528 struct dm_thin_lookup_result lookup_result;
1529
a374bb21
JT
1530 if (tc->requeue_mode) {
1531 cell_requeue(pool, cell);
991d9fa0 1532 return;
a374bb21 1533 }
991d9fa0
JT
1534
1535 r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1536 switch (r) {
1537 case 0:
23ca2bb6
JT
1538 if (lookup_result.shared)
1539 process_shared_bio(tc, bio, block, &lookup_result, cell);
1540 else {
6beca5eb 1541 inc_all_io_entry(pool, bio);
991d9fa0 1542 remap_and_issue(tc, bio, lookup_result.block);
a374bb21 1543 inc_remap_and_issue_cell(tc, cell, lookup_result.block);
e8088073 1544 }
991d9fa0
JT
1545 break;
1546
1547 case -ENODATA:
2dd9c257 1548 if (bio_data_dir(bio) == READ && tc->origin_dev) {
6beca5eb 1549 inc_all_io_entry(pool, bio);
f286ba0e 1550 cell_defer_no_holder(tc, cell);
e8088073 1551
e5aea7b4
JT
1552 if (bio_end_sector(bio) <= tc->origin_size)
1553 remap_to_origin_and_issue(tc, bio);
1554
1555 else if (bio->bi_iter.bi_sector < tc->origin_size) {
1556 zero_fill_bio(bio);
1557 bio->bi_iter.bi_size = (tc->origin_size - bio->bi_iter.bi_sector) << SECTOR_SHIFT;
1558 remap_to_origin_and_issue(tc, bio);
1559
1560 } else {
1561 zero_fill_bio(bio);
1562 bio_endio(bio, 0);
1563 }
2dd9c257
JT
1564 } else
1565 provision_block(tc, bio, block, cell);
991d9fa0
JT
1566 break;
1567
1568 default:
c397741c
MS
1569 DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
1570 __func__, r);
f286ba0e 1571 cell_defer_no_holder(tc, cell);
991d9fa0
JT
1572 bio_io_error(bio);
1573 break;
1574 }
1575}
1576
a374bb21
JT
1577static void process_bio(struct thin_c *tc, struct bio *bio)
1578{
1579 struct pool *pool = tc->pool;
1580 dm_block_t block = get_bio_block(tc, bio);
1581 struct dm_bio_prison_cell *cell;
1582 struct dm_cell_key key;
1583
1584 /*
1585 * If cell is already occupied, then the block is already
1586 * being provisioned so we have nothing further to do here.
1587 */
1588 build_virtual_key(tc->td, block, &key);
1589 if (bio_detain(pool, &key, bio, &cell))
1590 return;
1591
1592 process_cell(tc, cell);
1593}
1594
1595static void __process_bio_read_only(struct thin_c *tc, struct bio *bio,
1596 struct dm_bio_prison_cell *cell)
e49e5829
JT
1597{
1598 int r;
1599 int rw = bio_data_dir(bio);
1600 dm_block_t block = get_bio_block(tc, bio);
1601 struct dm_thin_lookup_result lookup_result;
1602
1603 r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1604 switch (r) {
1605 case 0:
a374bb21 1606 if (lookup_result.shared && (rw == WRITE) && bio->bi_iter.bi_size) {
8c0f0e8c 1607 handle_unserviceable_bio(tc->pool, bio);
a374bb21
JT
1608 if (cell)
1609 cell_defer_no_holder(tc, cell);
1610 } else {
e8088073 1611 inc_all_io_entry(tc->pool, bio);
e49e5829 1612 remap_and_issue(tc, bio, lookup_result.block);
a374bb21
JT
1613 if (cell)
1614 inc_remap_and_issue_cell(tc, cell, lookup_result.block);
e8088073 1615 }
e49e5829
JT
1616 break;
1617
1618 case -ENODATA:
a374bb21
JT
1619 if (cell)
1620 cell_defer_no_holder(tc, cell);
e49e5829 1621 if (rw != READ) {
8c0f0e8c 1622 handle_unserviceable_bio(tc->pool, bio);
e49e5829
JT
1623 break;
1624 }
1625
1626 if (tc->origin_dev) {
e8088073 1627 inc_all_io_entry(tc->pool, bio);
e49e5829
JT
1628 remap_to_origin_and_issue(tc, bio);
1629 break;
1630 }
1631
1632 zero_fill_bio(bio);
1633 bio_endio(bio, 0);
1634 break;
1635
1636 default:
c397741c
MS
1637 DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
1638 __func__, r);
a374bb21
JT
1639 if (cell)
1640 cell_defer_no_holder(tc, cell);
e49e5829
JT
1641 bio_io_error(bio);
1642 break;
1643 }
1644}
1645
a374bb21
JT
1646static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
1647{
1648 __process_bio_read_only(tc, bio, NULL);
1649}
1650
1651static void process_cell_read_only(struct thin_c *tc, struct dm_bio_prison_cell *cell)
1652{
1653 __process_bio_read_only(tc, cell->holder, cell);
1654}
1655
3e1a0699
JT
1656static void process_bio_success(struct thin_c *tc, struct bio *bio)
1657{
1658 bio_endio(bio, 0);
1659}
1660
e49e5829
JT
1661static void process_bio_fail(struct thin_c *tc, struct bio *bio)
1662{
1663 bio_io_error(bio);
1664}
1665
a374bb21
JT
1666static void process_cell_success(struct thin_c *tc, struct dm_bio_prison_cell *cell)
1667{
1668 cell_success(tc->pool, cell);
1669}
1670
1671static void process_cell_fail(struct thin_c *tc, struct dm_bio_prison_cell *cell)
1672{
1673 cell_error(tc->pool, cell);
1674}
1675
ac8c3f3d
JT
1676/*
1677 * FIXME: should we also commit due to size of transaction, measured in
1678 * metadata blocks?
1679 */
905e51b3
JT
1680static int need_commit_due_to_time(struct pool *pool)
1681{
1682 return jiffies < pool->last_commit_jiffies ||
1683 jiffies > pool->last_commit_jiffies + COMMIT_PERIOD;
1684}
1685
67324ea1
MS
1686#define thin_pbd(node) rb_entry((node), struct dm_thin_endio_hook, rb_node)
1687#define thin_bio(pbd) dm_bio_from_per_bio_data((pbd), sizeof(struct dm_thin_endio_hook))
1688
1689static void __thin_bio_rb_add(struct thin_c *tc, struct bio *bio)
1690{
1691 struct rb_node **rbp, *parent;
1692 struct dm_thin_endio_hook *pbd;
1693 sector_t bi_sector = bio->bi_iter.bi_sector;
1694
1695 rbp = &tc->sort_bio_list.rb_node;
1696 parent = NULL;
1697 while (*rbp) {
1698 parent = *rbp;
1699 pbd = thin_pbd(parent);
1700
1701 if (bi_sector < thin_bio(pbd)->bi_iter.bi_sector)
1702 rbp = &(*rbp)->rb_left;
1703 else
1704 rbp = &(*rbp)->rb_right;
1705 }
1706
1707 pbd = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1708 rb_link_node(&pbd->rb_node, parent, rbp);
1709 rb_insert_color(&pbd->rb_node, &tc->sort_bio_list);
1710}
1711
1712static void __extract_sorted_bios(struct thin_c *tc)
1713{
1714 struct rb_node *node;
1715 struct dm_thin_endio_hook *pbd;
1716 struct bio *bio;
1717
1718 for (node = rb_first(&tc->sort_bio_list); node; node = rb_next(node)) {
1719 pbd = thin_pbd(node);
1720 bio = thin_bio(pbd);
1721
1722 bio_list_add(&tc->deferred_bio_list, bio);
1723 rb_erase(&pbd->rb_node, &tc->sort_bio_list);
1724 }
1725
1726 WARN_ON(!RB_EMPTY_ROOT(&tc->sort_bio_list));
1727}
1728
1729static void __sort_thin_deferred_bios(struct thin_c *tc)
1730{
1731 struct bio *bio;
1732 struct bio_list bios;
1733
1734 bio_list_init(&bios);
1735 bio_list_merge(&bios, &tc->deferred_bio_list);
1736 bio_list_init(&tc->deferred_bio_list);
1737
1738 /* Sort deferred_bio_list using rb-tree */
1739 while ((bio = bio_list_pop(&bios)))
1740 __thin_bio_rb_add(tc, bio);
1741
1742 /*
1743 * Transfer the sorted bios in sort_bio_list back to
1744 * deferred_bio_list to allow lockless submission of
1745 * all bios.
1746 */
1747 __extract_sorted_bios(tc);
1748}
1749
c140e1c4 1750static void process_thin_deferred_bios(struct thin_c *tc)
991d9fa0 1751{
c140e1c4 1752 struct pool *pool = tc->pool;
991d9fa0
JT
1753 unsigned long flags;
1754 struct bio *bio;
1755 struct bio_list bios;
67324ea1 1756 struct blk_plug plug;
8a01a6af 1757 unsigned count = 0;
991d9fa0 1758
c140e1c4
MS
1759 if (tc->requeue_mode) {
1760 requeue_bio_list(tc, &tc->deferred_bio_list);
1761 return;
1762 }
1763
991d9fa0
JT
1764 bio_list_init(&bios);
1765
c140e1c4 1766 spin_lock_irqsave(&tc->lock, flags);
67324ea1
MS
1767
1768 if (bio_list_empty(&tc->deferred_bio_list)) {
1769 spin_unlock_irqrestore(&tc->lock, flags);
1770 return;
1771 }
1772
1773 __sort_thin_deferred_bios(tc);
1774
c140e1c4
MS
1775 bio_list_merge(&bios, &tc->deferred_bio_list);
1776 bio_list_init(&tc->deferred_bio_list);
67324ea1 1777
c140e1c4 1778 spin_unlock_irqrestore(&tc->lock, flags);
991d9fa0 1779
67324ea1 1780 blk_start_plug(&plug);
991d9fa0 1781 while ((bio = bio_list_pop(&bios))) {
991d9fa0
JT
1782 /*
1783 * If we've got no free new_mapping structs, and processing
1784 * this bio might require one, we pause until there are some
1785 * prepared mappings to process.
1786 */
1787 if (ensure_next_mapping(pool)) {
c140e1c4
MS
1788 spin_lock_irqsave(&tc->lock, flags);
1789 bio_list_add(&tc->deferred_bio_list, bio);
1790 bio_list_merge(&tc->deferred_bio_list, &bios);
1791 spin_unlock_irqrestore(&tc->lock, flags);
991d9fa0
JT
1792 break;
1793 }
104655fd
JT
1794
1795 if (bio->bi_rw & REQ_DISCARD)
e49e5829 1796 pool->process_discard(tc, bio);
104655fd 1797 else
e49e5829 1798 pool->process_bio(tc, bio);
8a01a6af
JT
1799
1800 if ((count++ & 127) == 0) {
7d327fe0 1801 throttle_work_update(&pool->throttle);
8a01a6af
JT
1802 dm_pool_issue_prefetches(pool->pmd);
1803 }
991d9fa0 1804 }
67324ea1 1805 blk_finish_plug(&plug);
c140e1c4
MS
1806}
1807
ac4c3f34
JT
1808static int cmp_cells(const void *lhs, const void *rhs)
1809{
1810 struct dm_bio_prison_cell *lhs_cell = *((struct dm_bio_prison_cell **) lhs);
1811 struct dm_bio_prison_cell *rhs_cell = *((struct dm_bio_prison_cell **) rhs);
1812
1813 BUG_ON(!lhs_cell->holder);
1814 BUG_ON(!rhs_cell->holder);
1815
1816 if (lhs_cell->holder->bi_iter.bi_sector < rhs_cell->holder->bi_iter.bi_sector)
1817 return -1;
1818
1819 if (lhs_cell->holder->bi_iter.bi_sector > rhs_cell->holder->bi_iter.bi_sector)
1820 return 1;
1821
1822 return 0;
1823}
1824
1825static unsigned sort_cells(struct pool *pool, struct list_head *cells)
1826{
1827 unsigned count = 0;
1828 struct dm_bio_prison_cell *cell, *tmp;
1829
1830 list_for_each_entry_safe(cell, tmp, cells, user_list) {
1831 if (count >= CELL_SORT_ARRAY_SIZE)
1832 break;
1833
1834 pool->cell_sort_array[count++] = cell;
1835 list_del(&cell->user_list);
1836 }
1837
1838 sort(pool->cell_sort_array, count, sizeof(cell), cmp_cells, NULL);
1839
1840 return count;
1841}
1842
a374bb21
JT
1843static void process_thin_deferred_cells(struct thin_c *tc)
1844{
1845 struct pool *pool = tc->pool;
1846 unsigned long flags;
1847 struct list_head cells;
ac4c3f34
JT
1848 struct dm_bio_prison_cell *cell;
1849 unsigned i, j, count;
a374bb21
JT
1850
1851 INIT_LIST_HEAD(&cells);
1852
1853 spin_lock_irqsave(&tc->lock, flags);
1854 list_splice_init(&tc->deferred_cells, &cells);
1855 spin_unlock_irqrestore(&tc->lock, flags);
1856
1857 if (list_empty(&cells))
1858 return;
1859
ac4c3f34
JT
1860 do {
1861 count = sort_cells(tc->pool, &cells);
a374bb21 1862
ac4c3f34
JT
1863 for (i = 0; i < count; i++) {
1864 cell = pool->cell_sort_array[i];
1865 BUG_ON(!cell->holder);
a374bb21 1866
ac4c3f34
JT
1867 /*
1868 * If we've got no free new_mapping structs, and processing
1869 * this bio might require one, we pause until there are some
1870 * prepared mappings to process.
1871 */
1872 if (ensure_next_mapping(pool)) {
1873 for (j = i; j < count; j++)
1874 list_add(&pool->cell_sort_array[j]->user_list, &cells);
1875
1876 spin_lock_irqsave(&tc->lock, flags);
1877 list_splice(&cells, &tc->deferred_cells);
1878 spin_unlock_irqrestore(&tc->lock, flags);
1879 return;
1880 }
1881
1882 if (cell->holder->bi_rw & REQ_DISCARD)
1883 pool->process_discard_cell(tc, cell);
1884 else
1885 pool->process_cell(tc, cell);
1886 }
1887 } while (!list_empty(&cells));
a374bb21
JT
1888}
1889
b10ebd34
JT
1890static void thin_get(struct thin_c *tc);
1891static void thin_put(struct thin_c *tc);
1892
1893/*
1894 * We can't hold rcu_read_lock() around code that can block. So we
1895 * find a thin with the rcu lock held; bump a refcount; then drop
1896 * the lock.
1897 */
1898static struct thin_c *get_first_thin(struct pool *pool)
1899{
1900 struct thin_c *tc = NULL;
1901
1902 rcu_read_lock();
1903 if (!list_empty(&pool->active_thins)) {
1904 tc = list_entry_rcu(pool->active_thins.next, struct thin_c, list);
1905 thin_get(tc);
1906 }
1907 rcu_read_unlock();
1908
1909 return tc;
1910}
1911
1912static struct thin_c *get_next_thin(struct pool *pool, struct thin_c *tc)
1913{
1914 struct thin_c *old_tc = tc;
1915
1916 rcu_read_lock();
1917 list_for_each_entry_continue_rcu(tc, &pool->active_thins, list) {
1918 thin_get(tc);
1919 thin_put(old_tc);
1920 rcu_read_unlock();
1921 return tc;
1922 }
1923 thin_put(old_tc);
1924 rcu_read_unlock();
1925
1926 return NULL;
1927}
1928
c140e1c4
MS
1929static void process_deferred_bios(struct pool *pool)
1930{
1931 unsigned long flags;
1932 struct bio *bio;
1933 struct bio_list bios;
1934 struct thin_c *tc;
1935
b10ebd34
JT
1936 tc = get_first_thin(pool);
1937 while (tc) {
a374bb21 1938 process_thin_deferred_cells(tc);
c140e1c4 1939 process_thin_deferred_bios(tc);
b10ebd34
JT
1940 tc = get_next_thin(pool, tc);
1941 }
991d9fa0
JT
1942
1943 /*
1944 * If there are any deferred flush bios, we must commit
1945 * the metadata before issuing them.
1946 */
1947 bio_list_init(&bios);
1948 spin_lock_irqsave(&pool->lock, flags);
1949 bio_list_merge(&bios, &pool->deferred_flush_bios);
1950 bio_list_init(&pool->deferred_flush_bios);
1951 spin_unlock_irqrestore(&pool->lock, flags);
1952
4d1662a3
MS
1953 if (bio_list_empty(&bios) &&
1954 !(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool)))
991d9fa0
JT
1955 return;
1956
020cc3b5 1957 if (commit(pool)) {
991d9fa0
JT
1958 while ((bio = bio_list_pop(&bios)))
1959 bio_io_error(bio);
1960 return;
1961 }
905e51b3 1962 pool->last_commit_jiffies = jiffies;
991d9fa0
JT
1963
1964 while ((bio = bio_list_pop(&bios)))
1965 generic_make_request(bio);
1966}
1967
1968static void do_worker(struct work_struct *ws)
1969{
1970 struct pool *pool = container_of(ws, struct pool, worker);
1971
7d327fe0 1972 throttle_work_start(&pool->throttle);
8a01a6af 1973 dm_pool_issue_prefetches(pool->pmd);
7d327fe0 1974 throttle_work_update(&pool->throttle);
e49e5829 1975 process_prepared(pool, &pool->prepared_mappings, &pool->process_prepared_mapping);
7d327fe0 1976 throttle_work_update(&pool->throttle);
e49e5829 1977 process_prepared(pool, &pool->prepared_discards, &pool->process_prepared_discard);
7d327fe0 1978 throttle_work_update(&pool->throttle);
991d9fa0 1979 process_deferred_bios(pool);
7d327fe0 1980 throttle_work_complete(&pool->throttle);
991d9fa0
JT
1981}
1982
905e51b3
JT
1983/*
1984 * We want to commit periodically so that not too much
1985 * unwritten data builds up.
1986 */
1987static void do_waker(struct work_struct *ws)
1988{
1989 struct pool *pool = container_of(to_delayed_work(ws), struct pool, waker);
1990 wake_worker(pool);
1991 queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD);
1992}
1993
85ad643b
JT
1994/*
1995 * We're holding onto IO to allow userland time to react. After the
1996 * timeout either the pool will have been resized (and thus back in
1997 * PM_WRITE mode), or we degrade to PM_READ_ONLY and start erroring IO.
1998 */
1999static void do_no_space_timeout(struct work_struct *ws)
2000{
2001 struct pool *pool = container_of(to_delayed_work(ws), struct pool,
2002 no_space_timeout);
2003
2004 if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space)
2005 set_pool_mode(pool, PM_READ_ONLY);
2006}
2007
991d9fa0
JT
2008/*----------------------------------------------------------------*/
2009
e7a3e871 2010struct pool_work {
738211f7 2011 struct work_struct worker;
e7a3e871
JT
2012 struct completion complete;
2013};
2014
2015static struct pool_work *to_pool_work(struct work_struct *ws)
2016{
2017 return container_of(ws, struct pool_work, worker);
2018}
2019
2020static void pool_work_complete(struct pool_work *pw)
2021{
2022 complete(&pw->complete);
2023}
738211f7 2024
e7a3e871
JT
2025static void pool_work_wait(struct pool_work *pw, struct pool *pool,
2026 void (*fn)(struct work_struct *))
2027{
2028 INIT_WORK_ONSTACK(&pw->worker, fn);
2029 init_completion(&pw->complete);
2030 queue_work(pool->wq, &pw->worker);
2031 wait_for_completion(&pw->complete);
2032}
2033
2034/*----------------------------------------------------------------*/
2035
2036struct noflush_work {
2037 struct pool_work pw;
2038 struct thin_c *tc;
738211f7
JT
2039};
2040
e7a3e871 2041static struct noflush_work *to_noflush(struct work_struct *ws)
738211f7 2042{
e7a3e871 2043 return container_of(to_pool_work(ws), struct noflush_work, pw);
738211f7
JT
2044}
2045
2046static void do_noflush_start(struct work_struct *ws)
2047{
e7a3e871 2048 struct noflush_work *w = to_noflush(ws);
738211f7
JT
2049 w->tc->requeue_mode = true;
2050 requeue_io(w->tc);
e7a3e871 2051 pool_work_complete(&w->pw);
738211f7
JT
2052}
2053
2054static void do_noflush_stop(struct work_struct *ws)
2055{
e7a3e871 2056 struct noflush_work *w = to_noflush(ws);
738211f7 2057 w->tc->requeue_mode = false;
e7a3e871 2058 pool_work_complete(&w->pw);
738211f7
JT
2059}
2060
2061static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *))
2062{
2063 struct noflush_work w;
2064
738211f7 2065 w.tc = tc;
e7a3e871 2066 pool_work_wait(&w.pw, tc->pool, fn);
738211f7
JT
2067}
2068
2069/*----------------------------------------------------------------*/
2070
e49e5829
JT
2071static enum pool_mode get_pool_mode(struct pool *pool)
2072{
2073 return pool->pf.mode;
2074}
2075
3e1a0699
JT
2076static void notify_of_pool_mode_change(struct pool *pool, const char *new_mode)
2077{
2078 dm_table_event(pool->ti->table);
2079 DMINFO("%s: switching pool to %s mode",
2080 dm_device_name(pool->pool_md), new_mode);
2081}
2082
8b64e881 2083static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
e49e5829 2084{
cdc2b415 2085 struct pool_c *pt = pool->ti->private;
07f2b6e0
MS
2086 bool needs_check = dm_pool_metadata_needs_check(pool->pmd);
2087 enum pool_mode old_mode = get_pool_mode(pool);
80c57893 2088 unsigned long no_space_timeout = ACCESS_ONCE(no_space_timeout_secs) * HZ;
07f2b6e0
MS
2089
2090 /*
2091 * Never allow the pool to transition to PM_WRITE mode if user
2092 * intervention is required to verify metadata and data consistency.
2093 */
2094 if (new_mode == PM_WRITE && needs_check) {
2095 DMERR("%s: unable to switch pool to write mode until repaired.",
2096 dm_device_name(pool->pool_md));
2097 if (old_mode != new_mode)
2098 new_mode = old_mode;
2099 else
2100 new_mode = PM_READ_ONLY;
2101 }
2102 /*
2103 * If we were in PM_FAIL mode, rollback of metadata failed. We're
2104 * not going to recover without a thin_repair. So we never let the
2105 * pool move out of the old mode.
2106 */
2107 if (old_mode == PM_FAIL)
2108 new_mode = old_mode;
e49e5829 2109
8b64e881 2110 switch (new_mode) {
e49e5829 2111 case PM_FAIL:
8b64e881 2112 if (old_mode != new_mode)
3e1a0699 2113 notify_of_pool_mode_change(pool, "failure");
5383ef3a 2114 dm_pool_metadata_read_only(pool->pmd);
e49e5829
JT
2115 pool->process_bio = process_bio_fail;
2116 pool->process_discard = process_bio_fail;
a374bb21
JT
2117 pool->process_cell = process_cell_fail;
2118 pool->process_discard_cell = process_cell_fail;
e49e5829
JT
2119 pool->process_prepared_mapping = process_prepared_mapping_fail;
2120 pool->process_prepared_discard = process_prepared_discard_fail;
3e1a0699
JT
2121
2122 error_retry_list(pool);
e49e5829
JT
2123 break;
2124
2125 case PM_READ_ONLY:
8b64e881 2126 if (old_mode != new_mode)
3e1a0699
JT
2127 notify_of_pool_mode_change(pool, "read-only");
2128 dm_pool_metadata_read_only(pool->pmd);
2129 pool->process_bio = process_bio_read_only;
2130 pool->process_discard = process_bio_success;
a374bb21
JT
2131 pool->process_cell = process_cell_read_only;
2132 pool->process_discard_cell = process_cell_success;
3e1a0699
JT
2133 pool->process_prepared_mapping = process_prepared_mapping_fail;
2134 pool->process_prepared_discard = process_prepared_discard_passdown;
2135
2136 error_retry_list(pool);
2137 break;
2138
2139 case PM_OUT_OF_DATA_SPACE:
2140 /*
2141 * Ideally we'd never hit this state; the low water mark
2142 * would trigger userland to extend the pool before we
2143 * completely run out of data space. However, many small
2144 * IOs to unprovisioned space can consume data space at an
2145 * alarming rate. Adjust your low water mark if you're
2146 * frequently seeing this mode.
2147 */
2148 if (old_mode != new_mode)
2149 notify_of_pool_mode_change(pool, "out-of-data-space");
2150 pool->process_bio = process_bio_read_only;
a374bb21
JT
2151 pool->process_discard = process_discard_bio;
2152 pool->process_cell = process_cell_read_only;
2153 pool->process_discard_cell = process_discard_cell;
3e1a0699
JT
2154 pool->process_prepared_mapping = process_prepared_mapping;
2155 pool->process_prepared_discard = process_prepared_discard_passdown;
85ad643b 2156
80c57893
MS
2157 if (!pool->pf.error_if_no_space && no_space_timeout)
2158 queue_delayed_work(pool->wq, &pool->no_space_timeout, no_space_timeout);
e49e5829
JT
2159 break;
2160
2161 case PM_WRITE:
8b64e881 2162 if (old_mode != new_mode)
3e1a0699 2163 notify_of_pool_mode_change(pool, "write");
9b7aaa64 2164 dm_pool_metadata_read_write(pool->pmd);
e49e5829 2165 pool->process_bio = process_bio;
a374bb21
JT
2166 pool->process_discard = process_discard_bio;
2167 pool->process_cell = process_cell;
2168 pool->process_discard_cell = process_discard_cell;
e49e5829
JT
2169 pool->process_prepared_mapping = process_prepared_mapping;
2170 pool->process_prepared_discard = process_prepared_discard;
2171 break;
2172 }
8b64e881
MS
2173
2174 pool->pf.mode = new_mode;
cdc2b415
MS
2175 /*
2176 * The pool mode may have changed, sync it so bind_control_target()
2177 * doesn't cause an unexpected mode transition on resume.
2178 */
2179 pt->adjusted_pf.mode = new_mode;
e49e5829
JT
2180}
2181
07f2b6e0 2182static void abort_transaction(struct pool *pool)
b5330655 2183{
07f2b6e0
MS
2184 const char *dev_name = dm_device_name(pool->pool_md);
2185
2186 DMERR_LIMIT("%s: aborting current metadata transaction", dev_name);
2187 if (dm_pool_abort_metadata(pool->pmd)) {
2188 DMERR("%s: failed to abort metadata transaction", dev_name);
2189 set_pool_mode(pool, PM_FAIL);
2190 }
2191
2192 if (dm_pool_metadata_set_needs_check(pool->pmd)) {
2193 DMERR("%s: failed to set 'needs_check' flag in metadata", dev_name);
2194 set_pool_mode(pool, PM_FAIL);
2195 }
2196}
399caddf 2197
07f2b6e0
MS
2198static void metadata_operation_failed(struct pool *pool, const char *op, int r)
2199{
b5330655
JT
2200 DMERR_LIMIT("%s: metadata operation '%s' failed: error = %d",
2201 dm_device_name(pool->pool_md), op, r);
2202
07f2b6e0 2203 abort_transaction(pool);
b5330655
JT
2204 set_pool_mode(pool, PM_READ_ONLY);
2205}
2206
e49e5829
JT
2207/*----------------------------------------------------------------*/
2208
991d9fa0
JT
2209/*
2210 * Mapping functions.
2211 */
2212
2213/*
2214 * Called only while mapping a thin bio to hand it over to the workqueue.
2215 */
2216static void thin_defer_bio(struct thin_c *tc, struct bio *bio)
2217{
2218 unsigned long flags;
2219 struct pool *pool = tc->pool;
2220
c140e1c4
MS
2221 spin_lock_irqsave(&tc->lock, flags);
2222 bio_list_add(&tc->deferred_bio_list, bio);
2223 spin_unlock_irqrestore(&tc->lock, flags);
991d9fa0
JT
2224
2225 wake_worker(pool);
2226}
2227
7d327fe0
JT
2228static void thin_defer_bio_with_throttle(struct thin_c *tc, struct bio *bio)
2229{
2230 struct pool *pool = tc->pool;
2231
2232 throttle_lock(&pool->throttle);
2233 thin_defer_bio(tc, bio);
2234 throttle_unlock(&pool->throttle);
2235}
2236
a374bb21
JT
2237static void thin_defer_cell(struct thin_c *tc, struct dm_bio_prison_cell *cell)
2238{
2239 unsigned long flags;
2240 struct pool *pool = tc->pool;
2241
2242 throttle_lock(&pool->throttle);
2243 spin_lock_irqsave(&tc->lock, flags);
2244 list_add_tail(&cell->user_list, &tc->deferred_cells);
2245 spin_unlock_irqrestore(&tc->lock, flags);
2246 throttle_unlock(&pool->throttle);
2247
2248 wake_worker(pool);
2249}
2250
59c3d2c6 2251static void thin_hook_bio(struct thin_c *tc, struct bio *bio)
eb2aa48d 2252{
59c3d2c6 2253 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
eb2aa48d
JT
2254
2255 h->tc = tc;
2256 h->shared_read_entry = NULL;
e8088073 2257 h->all_io_entry = NULL;
eb2aa48d 2258 h->overwrite_mapping = NULL;
eb2aa48d
JT
2259}
2260
991d9fa0
JT
2261/*
2262 * Non-blocking function called from the thin target's map function.
2263 */
7de3ee57 2264static int thin_bio_map(struct dm_target *ti, struct bio *bio)
991d9fa0
JT
2265{
2266 int r;
2267 struct thin_c *tc = ti->private;
2268 dm_block_t block = get_bio_block(tc, bio);
2269 struct dm_thin_device *td = tc->td;
2270 struct dm_thin_lookup_result result;
a374bb21 2271 struct dm_bio_prison_cell *virt_cell, *data_cell;
e8088073 2272 struct dm_cell_key key;
991d9fa0 2273
59c3d2c6 2274 thin_hook_bio(tc, bio);
e49e5829 2275
738211f7
JT
2276 if (tc->requeue_mode) {
2277 bio_endio(bio, DM_ENDIO_REQUEUE);
2278 return DM_MAPIO_SUBMITTED;
2279 }
2280
e49e5829
JT
2281 if (get_pool_mode(tc->pool) == PM_FAIL) {
2282 bio_io_error(bio);
2283 return DM_MAPIO_SUBMITTED;
2284 }
2285
104655fd 2286 if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)) {
7d327fe0 2287 thin_defer_bio_with_throttle(tc, bio);
991d9fa0
JT
2288 return DM_MAPIO_SUBMITTED;
2289 }
2290
c822ed96
JT
2291 /*
2292 * We must hold the virtual cell before doing the lookup, otherwise
2293 * there's a race with discard.
2294 */
2295 build_virtual_key(tc->td, block, &key);
a374bb21 2296 if (bio_detain(tc->pool, &key, bio, &virt_cell))
c822ed96
JT
2297 return DM_MAPIO_SUBMITTED;
2298
991d9fa0
JT
2299 r = dm_thin_find_block(td, block, 0, &result);
2300
2301 /*
2302 * Note that we defer readahead too.
2303 */
2304 switch (r) {
2305 case 0:
2306 if (unlikely(result.shared)) {
2307 /*
2308 * We have a race condition here between the
2309 * result.shared value returned by the lookup and
2310 * snapshot creation, which may cause new
2311 * sharing.
2312 *
2313 * To avoid this always quiesce the origin before
2314 * taking the snap. You want to do this anyway to
2315 * ensure a consistent application view
2316 * (i.e. lockfs).
2317 *
2318 * More distant ancestors are irrelevant. The
2319 * shared flag will be set in their case.
2320 */
a374bb21 2321 thin_defer_cell(tc, virt_cell);
e8088073 2322 return DM_MAPIO_SUBMITTED;
991d9fa0 2323 }
e8088073 2324
e8088073 2325 build_data_key(tc->td, result.block, &key);
a374bb21
JT
2326 if (bio_detain(tc->pool, &key, bio, &data_cell)) {
2327 cell_defer_no_holder(tc, virt_cell);
e8088073
JT
2328 return DM_MAPIO_SUBMITTED;
2329 }
2330
2331 inc_all_io_entry(tc->pool, bio);
a374bb21
JT
2332 cell_defer_no_holder(tc, data_cell);
2333 cell_defer_no_holder(tc, virt_cell);
e8088073
JT
2334
2335 remap(tc, bio, result.block);
2336 return DM_MAPIO_REMAPPED;
991d9fa0
JT
2337
2338 case -ENODATA:
e49e5829
JT
2339 if (get_pool_mode(tc->pool) == PM_READ_ONLY) {
2340 /*
2341 * This block isn't provisioned, and we have no way
8c0f0e8c 2342 * of doing so.
e49e5829 2343 */
8c0f0e8c 2344 handle_unserviceable_bio(tc->pool, bio);
a374bb21 2345 cell_defer_no_holder(tc, virt_cell);
2aab3850 2346 return DM_MAPIO_SUBMITTED;
e49e5829
JT
2347 }
2348 /* fall through */
2349
2350 case -EWOULDBLOCK:
a374bb21 2351 thin_defer_cell(tc, virt_cell);
2aab3850 2352 return DM_MAPIO_SUBMITTED;
e49e5829
JT
2353
2354 default:
2355 /*
2356 * Must always call bio_io_error on failure.
2357 * dm_thin_find_block can fail with -EINVAL if the
2358 * pool is switched to fail-io mode.
2359 */
2360 bio_io_error(bio);
a374bb21 2361 cell_defer_no_holder(tc, virt_cell);
2aab3850 2362 return DM_MAPIO_SUBMITTED;
991d9fa0 2363 }
991d9fa0
JT
2364}
2365
2366static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
2367{
991d9fa0 2368 struct pool_c *pt = container_of(cb, struct pool_c, callbacks);
760fe67e 2369 struct request_queue *q;
991d9fa0 2370
760fe67e
MS
2371 if (get_pool_mode(pt->pool) == PM_OUT_OF_DATA_SPACE)
2372 return 1;
991d9fa0 2373
760fe67e
MS
2374 q = bdev_get_queue(pt->data_dev->bdev);
2375 return bdi_congested(&q->backing_dev_info, bdi_bits);
991d9fa0
JT
2376}
2377
c140e1c4 2378static void requeue_bios(struct pool *pool)
991d9fa0 2379{
c140e1c4
MS
2380 unsigned long flags;
2381 struct thin_c *tc;
2382
2383 rcu_read_lock();
2384 list_for_each_entry_rcu(tc, &pool->active_thins, list) {
2385 spin_lock_irqsave(&tc->lock, flags);
2386 bio_list_merge(&tc->deferred_bio_list, &tc->retry_on_resume_list);
2387 bio_list_init(&tc->retry_on_resume_list);
2388 spin_unlock_irqrestore(&tc->lock, flags);
2389 }
2390 rcu_read_unlock();
991d9fa0
JT
2391}
2392
2393/*----------------------------------------------------------------
2394 * Binding of control targets to a pool object
2395 *--------------------------------------------------------------*/
9bc142dd
MS
2396static bool data_dev_supports_discard(struct pool_c *pt)
2397{
2398 struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
2399
2400 return q && blk_queue_discard(q);
2401}
2402
58051b94
JT
2403static bool is_factor(sector_t block_size, uint32_t n)
2404{
2405 return !sector_div(block_size, n);
2406}
2407
9bc142dd
MS
2408/*
2409 * If discard_passdown was enabled verify that the data device
0424caa1 2410 * supports discards. Disable discard_passdown if not.
9bc142dd 2411 */
0424caa1 2412static void disable_passdown_if_not_supported(struct pool_c *pt)
9bc142dd 2413{
0424caa1
MS
2414 struct pool *pool = pt->pool;
2415 struct block_device *data_bdev = pt->data_dev->bdev;
2416 struct queue_limits *data_limits = &bdev_get_queue(data_bdev)->limits;
2417 sector_t block_size = pool->sectors_per_block << SECTOR_SHIFT;
2418 const char *reason = NULL;
9bc142dd
MS
2419 char buf[BDEVNAME_SIZE];
2420
0424caa1 2421 if (!pt->adjusted_pf.discard_passdown)
9bc142dd
MS
2422 return;
2423
0424caa1
MS
2424 if (!data_dev_supports_discard(pt))
2425 reason = "discard unsupported";
2426
2427 else if (data_limits->max_discard_sectors < pool->sectors_per_block)
2428 reason = "max discard sectors smaller than a block";
9bc142dd 2429
0424caa1
MS
2430 else if (data_limits->discard_granularity > block_size)
2431 reason = "discard granularity larger than a block";
2432
58051b94 2433 else if (!is_factor(block_size, data_limits->discard_granularity))
0424caa1
MS
2434 reason = "discard granularity not a factor of block size";
2435
2436 if (reason) {
2437 DMWARN("Data device (%s) %s: Disabling discard passdown.", bdevname(data_bdev, buf), reason);
2438 pt->adjusted_pf.discard_passdown = false;
2439 }
9bc142dd
MS
2440}
2441
991d9fa0
JT
2442static int bind_control_target(struct pool *pool, struct dm_target *ti)
2443{
2444 struct pool_c *pt = ti->private;
2445
e49e5829 2446 /*
9b7aaa64 2447 * We want to make sure that a pool in PM_FAIL mode is never upgraded.
e49e5829 2448 */
07f2b6e0 2449 enum pool_mode old_mode = get_pool_mode(pool);
0424caa1 2450 enum pool_mode new_mode = pt->adjusted_pf.mode;
e49e5829 2451
8b64e881
MS
2452 /*
2453 * Don't change the pool's mode until set_pool_mode() below.
2454 * Otherwise the pool's process_* function pointers may
2455 * not match the desired pool mode.
2456 */
2457 pt->adjusted_pf.mode = old_mode;
2458
2459 pool->ti = ti;
2460 pool->pf = pt->adjusted_pf;
2461 pool->low_water_blocks = pt->low_water_blocks;
2462
9bc142dd 2463 set_pool_mode(pool, new_mode);
f402693d 2464
991d9fa0
JT
2465 return 0;
2466}
2467
2468static void unbind_control_target(struct pool *pool, struct dm_target *ti)
2469{
2470 if (pool->ti == ti)
2471 pool->ti = NULL;
2472}
2473
2474/*----------------------------------------------------------------
2475 * Pool creation
2476 *--------------------------------------------------------------*/
67e2e2b2
JT
2477/* Initialize pool features. */
2478static void pool_features_init(struct pool_features *pf)
2479{
e49e5829 2480 pf->mode = PM_WRITE;
9bc142dd
MS
2481 pf->zero_new_blocks = true;
2482 pf->discard_enabled = true;
2483 pf->discard_passdown = true;
787a996c 2484 pf->error_if_no_space = false;
67e2e2b2
JT
2485}
2486
991d9fa0
JT
2487static void __pool_destroy(struct pool *pool)
2488{
2489 __pool_table_remove(pool);
2490
2491 if (dm_pool_metadata_close(pool->pmd) < 0)
2492 DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
2493
44feb387 2494 dm_bio_prison_destroy(pool->prison);
991d9fa0
JT
2495 dm_kcopyd_client_destroy(pool->copier);
2496
2497 if (pool->wq)
2498 destroy_workqueue(pool->wq);
2499
2500 if (pool->next_mapping)
2501 mempool_free(pool->next_mapping, pool->mapping_pool);
2502 mempool_destroy(pool->mapping_pool);
44feb387
MS
2503 dm_deferred_set_destroy(pool->shared_read_ds);
2504 dm_deferred_set_destroy(pool->all_io_ds);
991d9fa0
JT
2505 kfree(pool);
2506}
2507
a24c2569 2508static struct kmem_cache *_new_mapping_cache;
a24c2569 2509
991d9fa0
JT
2510static struct pool *pool_create(struct mapped_device *pool_md,
2511 struct block_device *metadata_dev,
e49e5829
JT
2512 unsigned long block_size,
2513 int read_only, char **error)
991d9fa0
JT
2514{
2515 int r;
2516 void *err_p;
2517 struct pool *pool;
2518 struct dm_pool_metadata *pmd;
e49e5829 2519 bool format_device = read_only ? false : true;
991d9fa0 2520
e49e5829 2521 pmd = dm_pool_metadata_open(metadata_dev, block_size, format_device);
991d9fa0
JT
2522 if (IS_ERR(pmd)) {
2523 *error = "Error creating metadata object";
2524 return (struct pool *)pmd;
2525 }
2526
2527 pool = kmalloc(sizeof(*pool), GFP_KERNEL);
2528 if (!pool) {
2529 *error = "Error allocating memory for pool";
2530 err_p = ERR_PTR(-ENOMEM);
2531 goto bad_pool;
2532 }
2533
2534 pool->pmd = pmd;
2535 pool->sectors_per_block = block_size;
f9a8e0cd
MP
2536 if (block_size & (block_size - 1))
2537 pool->sectors_per_block_shift = -1;
2538 else
2539 pool->sectors_per_block_shift = __ffs(block_size);
991d9fa0 2540 pool->low_water_blocks = 0;
67e2e2b2 2541 pool_features_init(&pool->pf);
a195db2d 2542 pool->prison = dm_bio_prison_create();
991d9fa0
JT
2543 if (!pool->prison) {
2544 *error = "Error creating pool's bio prison";
2545 err_p = ERR_PTR(-ENOMEM);
2546 goto bad_prison;
2547 }
2548
df5d2e90 2549 pool->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle);
991d9fa0
JT
2550 if (IS_ERR(pool->copier)) {
2551 r = PTR_ERR(pool->copier);
2552 *error = "Error creating pool's kcopyd client";
2553 err_p = ERR_PTR(r);
2554 goto bad_kcopyd_client;
2555 }
2556
2557 /*
2558 * Create singlethreaded workqueue that will service all devices
2559 * that use this metadata.
2560 */
2561 pool->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
2562 if (!pool->wq) {
2563 *error = "Error creating pool's workqueue";
2564 err_p = ERR_PTR(-ENOMEM);
2565 goto bad_wq;
2566 }
2567
7d327fe0 2568 throttle_init(&pool->throttle);
991d9fa0 2569 INIT_WORK(&pool->worker, do_worker);
905e51b3 2570 INIT_DELAYED_WORK(&pool->waker, do_waker);
85ad643b 2571 INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout);
991d9fa0 2572 spin_lock_init(&pool->lock);
991d9fa0
JT
2573 bio_list_init(&pool->deferred_flush_bios);
2574 INIT_LIST_HEAD(&pool->prepared_mappings);
104655fd 2575 INIT_LIST_HEAD(&pool->prepared_discards);
c140e1c4 2576 INIT_LIST_HEAD(&pool->active_thins);
88a6621b 2577 pool->low_water_triggered = false;
44feb387
MS
2578
2579 pool->shared_read_ds = dm_deferred_set_create();
2580 if (!pool->shared_read_ds) {
2581 *error = "Error creating pool's shared read deferred set";
2582 err_p = ERR_PTR(-ENOMEM);
2583 goto bad_shared_read_ds;
2584 }
2585
2586 pool->all_io_ds = dm_deferred_set_create();
2587 if (!pool->all_io_ds) {
2588 *error = "Error creating pool's all io deferred set";
2589 err_p = ERR_PTR(-ENOMEM);
2590 goto bad_all_io_ds;
2591 }
991d9fa0
JT
2592
2593 pool->next_mapping = NULL;
a24c2569
MS
2594 pool->mapping_pool = mempool_create_slab_pool(MAPPING_POOL_SIZE,
2595 _new_mapping_cache);
991d9fa0
JT
2596 if (!pool->mapping_pool) {
2597 *error = "Error creating pool's mapping mempool";
2598 err_p = ERR_PTR(-ENOMEM);
2599 goto bad_mapping_pool;
2600 }
2601
991d9fa0 2602 pool->ref_count = 1;
905e51b3 2603 pool->last_commit_jiffies = jiffies;
991d9fa0
JT
2604 pool->pool_md = pool_md;
2605 pool->md_dev = metadata_dev;
2606 __pool_table_insert(pool);
2607
2608 return pool;
2609
991d9fa0 2610bad_mapping_pool:
44feb387
MS
2611 dm_deferred_set_destroy(pool->all_io_ds);
2612bad_all_io_ds:
2613 dm_deferred_set_destroy(pool->shared_read_ds);
2614bad_shared_read_ds:
991d9fa0
JT
2615 destroy_workqueue(pool->wq);
2616bad_wq:
2617 dm_kcopyd_client_destroy(pool->copier);
2618bad_kcopyd_client:
44feb387 2619 dm_bio_prison_destroy(pool->prison);
991d9fa0
JT
2620bad_prison:
2621 kfree(pool);
2622bad_pool:
2623 if (dm_pool_metadata_close(pmd))
2624 DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
2625
2626 return err_p;
2627}
2628
2629static void __pool_inc(struct pool *pool)
2630{
2631 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
2632 pool->ref_count++;
2633}
2634
2635static void __pool_dec(struct pool *pool)
2636{
2637 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
2638 BUG_ON(!pool->ref_count);
2639 if (!--pool->ref_count)
2640 __pool_destroy(pool);
2641}
2642
2643static struct pool *__pool_find(struct mapped_device *pool_md,
2644 struct block_device *metadata_dev,
e49e5829
JT
2645 unsigned long block_size, int read_only,
2646 char **error, int *created)
991d9fa0
JT
2647{
2648 struct pool *pool = __pool_table_lookup_metadata_dev(metadata_dev);
2649
2650 if (pool) {
f09996c9
MS
2651 if (pool->pool_md != pool_md) {
2652 *error = "metadata device already in use by a pool";
991d9fa0 2653 return ERR_PTR(-EBUSY);
f09996c9 2654 }
991d9fa0
JT
2655 __pool_inc(pool);
2656
2657 } else {
2658 pool = __pool_table_lookup(pool_md);
2659 if (pool) {
f09996c9
MS
2660 if (pool->md_dev != metadata_dev) {
2661 *error = "different pool cannot replace a pool";
991d9fa0 2662 return ERR_PTR(-EINVAL);
f09996c9 2663 }
991d9fa0
JT
2664 __pool_inc(pool);
2665
67e2e2b2 2666 } else {
e49e5829 2667 pool = pool_create(pool_md, metadata_dev, block_size, read_only, error);
67e2e2b2
JT
2668 *created = 1;
2669 }
991d9fa0
JT
2670 }
2671
2672 return pool;
2673}
2674
2675/*----------------------------------------------------------------
2676 * Pool target methods
2677 *--------------------------------------------------------------*/
2678static void pool_dtr(struct dm_target *ti)
2679{
2680 struct pool_c *pt = ti->private;
2681
2682 mutex_lock(&dm_thin_pool_table.mutex);
2683
2684 unbind_control_target(pt->pool, ti);
2685 __pool_dec(pt->pool);
2686 dm_put_device(ti, pt->metadata_dev);
2687 dm_put_device(ti, pt->data_dev);
2688 kfree(pt);
2689
2690 mutex_unlock(&dm_thin_pool_table.mutex);
2691}
2692
991d9fa0
JT
2693static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf,
2694 struct dm_target *ti)
2695{
2696 int r;
2697 unsigned argc;
2698 const char *arg_name;
2699
2700 static struct dm_arg _args[] = {
74aa45c3 2701 {0, 4, "Invalid number of pool feature arguments"},
991d9fa0
JT
2702 };
2703
2704 /*
2705 * No feature arguments supplied.
2706 */
2707 if (!as->argc)
2708 return 0;
2709
2710 r = dm_read_arg_group(_args, as, &argc, &ti->error);
2711 if (r)
2712 return -EINVAL;
2713
2714 while (argc && !r) {
2715 arg_name = dm_shift_arg(as);
2716 argc--;
2717
e49e5829 2718 if (!strcasecmp(arg_name, "skip_block_zeroing"))
9bc142dd 2719 pf->zero_new_blocks = false;
e49e5829
JT
2720
2721 else if (!strcasecmp(arg_name, "ignore_discard"))
9bc142dd 2722 pf->discard_enabled = false;
e49e5829
JT
2723
2724 else if (!strcasecmp(arg_name, "no_discard_passdown"))
9bc142dd 2725 pf->discard_passdown = false;
991d9fa0 2726
e49e5829
JT
2727 else if (!strcasecmp(arg_name, "read_only"))
2728 pf->mode = PM_READ_ONLY;
2729
787a996c
MS
2730 else if (!strcasecmp(arg_name, "error_if_no_space"))
2731 pf->error_if_no_space = true;
2732
e49e5829
JT
2733 else {
2734 ti->error = "Unrecognised pool feature requested";
2735 r = -EINVAL;
2736 break;
2737 }
991d9fa0
JT
2738 }
2739
2740 return r;
2741}
2742
ac8c3f3d
JT
2743static void metadata_low_callback(void *context)
2744{
2745 struct pool *pool = context;
2746
2747 DMWARN("%s: reached low water mark for metadata device: sending event.",
2748 dm_device_name(pool->pool_md));
2749
2750 dm_table_event(pool->ti->table);
2751}
2752
7d48935e
MS
2753static sector_t get_dev_size(struct block_device *bdev)
2754{
2755 return i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
2756}
2757
2758static void warn_if_metadata_device_too_big(struct block_device *bdev)
b17446df 2759{
7d48935e 2760 sector_t metadata_dev_size = get_dev_size(bdev);
b17446df
JT
2761 char buffer[BDEVNAME_SIZE];
2762
7d48935e 2763 if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING)
b17446df
JT
2764 DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
2765 bdevname(bdev, buffer), THIN_METADATA_MAX_SECTORS);
7d48935e
MS
2766}
2767
2768static sector_t get_metadata_dev_size(struct block_device *bdev)
2769{
2770 sector_t metadata_dev_size = get_dev_size(bdev);
2771
2772 if (metadata_dev_size > THIN_METADATA_MAX_SECTORS)
2773 metadata_dev_size = THIN_METADATA_MAX_SECTORS;
b17446df
JT
2774
2775 return metadata_dev_size;
2776}
2777
24347e95
JT
2778static dm_block_t get_metadata_dev_size_in_blocks(struct block_device *bdev)
2779{
2780 sector_t metadata_dev_size = get_metadata_dev_size(bdev);
2781
7d48935e 2782 sector_div(metadata_dev_size, THIN_METADATA_BLOCK_SIZE);
24347e95
JT
2783
2784 return metadata_dev_size;
2785}
2786
ac8c3f3d
JT
2787/*
2788 * When a metadata threshold is crossed a dm event is triggered, and
2789 * userland should respond by growing the metadata device. We could let
2790 * userland set the threshold, like we do with the data threshold, but I'm
2791 * not sure they know enough to do this well.
2792 */
2793static dm_block_t calc_metadata_threshold(struct pool_c *pt)
2794{
2795 /*
2796 * 4M is ample for all ops with the possible exception of thin
2797 * device deletion which is harmless if it fails (just retry the
2798 * delete after you've grown the device).
2799 */
2800 dm_block_t quarter = get_metadata_dev_size_in_blocks(pt->metadata_dev->bdev) / 4;
2801 return min((dm_block_t)1024ULL /* 4M */, quarter);
2802}
2803
991d9fa0
JT
2804/*
2805 * thin-pool <metadata dev> <data dev>
2806 * <data block size (sectors)>
2807 * <low water mark (blocks)>
2808 * [<#feature args> [<arg>]*]
2809 *
2810 * Optional feature arguments are:
2811 * skip_block_zeroing: skips the zeroing of newly-provisioned blocks.
67e2e2b2
JT
2812 * ignore_discard: disable discard
2813 * no_discard_passdown: don't pass discards down to the data device
787a996c
MS
2814 * read_only: Don't allow any changes to be made to the pool metadata.
2815 * error_if_no_space: error IOs, instead of queueing, if no space.
991d9fa0
JT
2816 */
2817static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
2818{
67e2e2b2 2819 int r, pool_created = 0;
991d9fa0
JT
2820 struct pool_c *pt;
2821 struct pool *pool;
2822 struct pool_features pf;
2823 struct dm_arg_set as;
2824 struct dm_dev *data_dev;
2825 unsigned long block_size;
2826 dm_block_t low_water_blocks;
2827 struct dm_dev *metadata_dev;
5d0db96d 2828 fmode_t metadata_mode;
991d9fa0
JT
2829
2830 /*
2831 * FIXME Remove validation from scope of lock.
2832 */
2833 mutex_lock(&dm_thin_pool_table.mutex);
2834
2835 if (argc < 4) {
2836 ti->error = "Invalid argument count";
2837 r = -EINVAL;
2838 goto out_unlock;
2839 }
5d0db96d 2840
991d9fa0
JT
2841 as.argc = argc;
2842 as.argv = argv;
2843
5d0db96d
JT
2844 /*
2845 * Set default pool features.
2846 */
2847 pool_features_init(&pf);
2848
2849 dm_consume_args(&as, 4);
2850 r = parse_pool_features(&as, &pf, ti);
2851 if (r)
2852 goto out_unlock;
2853
2854 metadata_mode = FMODE_READ | ((pf.mode == PM_READ_ONLY) ? 0 : FMODE_WRITE);
2855 r = dm_get_device(ti, argv[0], metadata_mode, &metadata_dev);
991d9fa0
JT
2856 if (r) {
2857 ti->error = "Error opening metadata block device";
2858 goto out_unlock;
2859 }
7d48935e 2860 warn_if_metadata_device_too_big(metadata_dev->bdev);
991d9fa0
JT
2861
2862 r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &data_dev);
2863 if (r) {
2864 ti->error = "Error getting data device";
2865 goto out_metadata;
2866 }
2867
2868 if (kstrtoul(argv[2], 10, &block_size) || !block_size ||
2869 block_size < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
2870 block_size > DATA_DEV_BLOCK_SIZE_MAX_SECTORS ||
55f2b8bd 2871 block_size & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) {
991d9fa0
JT
2872 ti->error = "Invalid block size";
2873 r = -EINVAL;
2874 goto out;
2875 }
2876
2877 if (kstrtoull(argv[3], 10, (unsigned long long *)&low_water_blocks)) {
2878 ti->error = "Invalid low water mark";
2879 r = -EINVAL;
2880 goto out;
2881 }
2882
991d9fa0
JT
2883 pt = kzalloc(sizeof(*pt), GFP_KERNEL);
2884 if (!pt) {
2885 r = -ENOMEM;
2886 goto out;
2887 }
2888
2889 pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev,
e49e5829 2890 block_size, pf.mode == PM_READ_ONLY, &ti->error, &pool_created);
991d9fa0
JT
2891 if (IS_ERR(pool)) {
2892 r = PTR_ERR(pool);
2893 goto out_free_pt;
2894 }
2895
67e2e2b2
JT
2896 /*
2897 * 'pool_created' reflects whether this is the first table load.
2898 * Top level discard support is not allowed to be changed after
2899 * initial load. This would require a pool reload to trigger thin
2900 * device changes.
2901 */
2902 if (!pool_created && pf.discard_enabled != pool->pf.discard_enabled) {
2903 ti->error = "Discard support cannot be disabled once enabled";
2904 r = -EINVAL;
2905 goto out_flags_changed;
2906 }
2907
991d9fa0
JT
2908 pt->pool = pool;
2909 pt->ti = ti;
2910 pt->metadata_dev = metadata_dev;
2911 pt->data_dev = data_dev;
2912 pt->low_water_blocks = low_water_blocks;
0424caa1 2913 pt->adjusted_pf = pt->requested_pf = pf;
55a62eef 2914 ti->num_flush_bios = 1;
9bc142dd 2915
67e2e2b2
JT
2916 /*
2917 * Only need to enable discards if the pool should pass
2918 * them down to the data device. The thin device's discard
2919 * processing will cause mappings to be removed from the btree.
2920 */
b60ab990 2921 ti->discard_zeroes_data_unsupported = true;
67e2e2b2 2922 if (pf.discard_enabled && pf.discard_passdown) {
55a62eef 2923 ti->num_discard_bios = 1;
9bc142dd 2924
67e2e2b2
JT
2925 /*
2926 * Setting 'discards_supported' circumvents the normal
2927 * stacking of discard limits (this keeps the pool and
2928 * thin devices' discard limits consistent).
2929 */
0ac55489 2930 ti->discards_supported = true;
67e2e2b2 2931 }
991d9fa0
JT
2932 ti->private = pt;
2933
ac8c3f3d
JT
2934 r = dm_pool_register_metadata_threshold(pt->pool->pmd,
2935 calc_metadata_threshold(pt),
2936 metadata_low_callback,
2937 pool);
2938 if (r)
2939 goto out_free_pt;
2940
991d9fa0
JT
2941 pt->callbacks.congested_fn = pool_is_congested;
2942 dm_table_add_target_callbacks(ti->table, &pt->callbacks);
2943
2944 mutex_unlock(&dm_thin_pool_table.mutex);
2945
2946 return 0;
2947
67e2e2b2
JT
2948out_flags_changed:
2949 __pool_dec(pool);
991d9fa0
JT
2950out_free_pt:
2951 kfree(pt);
2952out:
2953 dm_put_device(ti, data_dev);
2954out_metadata:
2955 dm_put_device(ti, metadata_dev);
2956out_unlock:
2957 mutex_unlock(&dm_thin_pool_table.mutex);
2958
2959 return r;
2960}
2961
7de3ee57 2962static int pool_map(struct dm_target *ti, struct bio *bio)
991d9fa0
JT
2963{
2964 int r;
2965 struct pool_c *pt = ti->private;
2966 struct pool *pool = pt->pool;
2967 unsigned long flags;
2968
2969 /*
2970 * As this is a singleton target, ti->begin is always zero.
2971 */
2972 spin_lock_irqsave(&pool->lock, flags);
2973 bio->bi_bdev = pt->data_dev->bdev;
2974 r = DM_MAPIO_REMAPPED;
2975 spin_unlock_irqrestore(&pool->lock, flags);
2976
2977 return r;
2978}
2979
b17446df 2980static int maybe_resize_data_dev(struct dm_target *ti, bool *need_commit)
991d9fa0
JT
2981{
2982 int r;
2983 struct pool_c *pt = ti->private;
2984 struct pool *pool = pt->pool;
55f2b8bd
MS
2985 sector_t data_size = ti->len;
2986 dm_block_t sb_data_size;
991d9fa0 2987
b17446df 2988 *need_commit = false;
991d9fa0 2989
55f2b8bd
MS
2990 (void) sector_div(data_size, pool->sectors_per_block);
2991
991d9fa0
JT
2992 r = dm_pool_get_data_dev_size(pool->pmd, &sb_data_size);
2993 if (r) {
4fa5971a
MS
2994 DMERR("%s: failed to retrieve data device size",
2995 dm_device_name(pool->pool_md));
991d9fa0
JT
2996 return r;
2997 }
2998
2999 if (data_size < sb_data_size) {
4fa5971a
MS
3000 DMERR("%s: pool target (%llu blocks) too small: expected %llu",
3001 dm_device_name(pool->pool_md),
55f2b8bd 3002 (unsigned long long)data_size, sb_data_size);
991d9fa0
JT
3003 return -EINVAL;
3004
3005 } else if (data_size > sb_data_size) {
07f2b6e0
MS
3006 if (dm_pool_metadata_needs_check(pool->pmd)) {
3007 DMERR("%s: unable to grow the data device until repaired.",
3008 dm_device_name(pool->pool_md));
3009 return 0;
3010 }
3011
6f7f51d4
MS
3012 if (sb_data_size)
3013 DMINFO("%s: growing the data device from %llu to %llu blocks",
3014 dm_device_name(pool->pool_md),
3015 sb_data_size, (unsigned long long)data_size);
991d9fa0
JT
3016 r = dm_pool_resize_data_dev(pool->pmd, data_size);
3017 if (r) {
b5330655 3018 metadata_operation_failed(pool, "dm_pool_resize_data_dev", r);
991d9fa0
JT
3019 return r;
3020 }
3021
b17446df 3022 *need_commit = true;
991d9fa0
JT
3023 }
3024
3025 return 0;
3026}
3027
24347e95
JT
3028static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit)
3029{
3030 int r;
3031 struct pool_c *pt = ti->private;
3032 struct pool *pool = pt->pool;
3033 dm_block_t metadata_dev_size, sb_metadata_dev_size;
3034
3035 *need_commit = false;
3036
610bba8b 3037 metadata_dev_size = get_metadata_dev_size_in_blocks(pool->md_dev);
24347e95
JT
3038
3039 r = dm_pool_get_metadata_dev_size(pool->pmd, &sb_metadata_dev_size);
3040 if (r) {
4fa5971a
MS
3041 DMERR("%s: failed to retrieve metadata device size",
3042 dm_device_name(pool->pool_md));
24347e95
JT
3043 return r;
3044 }
3045
3046 if (metadata_dev_size < sb_metadata_dev_size) {
4fa5971a
MS
3047 DMERR("%s: metadata device (%llu blocks) too small: expected %llu",
3048 dm_device_name(pool->pool_md),
24347e95
JT
3049 metadata_dev_size, sb_metadata_dev_size);
3050 return -EINVAL;
3051
3052 } else if (metadata_dev_size > sb_metadata_dev_size) {
07f2b6e0
MS
3053 if (dm_pool_metadata_needs_check(pool->pmd)) {
3054 DMERR("%s: unable to grow the metadata device until repaired.",
3055 dm_device_name(pool->pool_md));
3056 return 0;
3057 }
3058
7d48935e 3059 warn_if_metadata_device_too_big(pool->md_dev);
6f7f51d4
MS
3060 DMINFO("%s: growing the metadata device from %llu to %llu blocks",
3061 dm_device_name(pool->pool_md),
3062 sb_metadata_dev_size, metadata_dev_size);
24347e95
JT
3063 r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size);
3064 if (r) {
b5330655 3065 metadata_operation_failed(pool, "dm_pool_resize_metadata_dev", r);
24347e95
JT
3066 return r;
3067 }
3068
3069 *need_commit = true;
3070 }
3071
3072 return 0;
3073}
3074
b17446df
JT
3075/*
3076 * Retrieves the number of blocks of the data device from
3077 * the superblock and compares it to the actual device size,
3078 * thus resizing the data device in case it has grown.
3079 *
3080 * This both copes with opening preallocated data devices in the ctr
3081 * being followed by a resume
3082 * -and-
3083 * calling the resume method individually after userspace has
3084 * grown the data device in reaction to a table event.
3085 */
3086static int pool_preresume(struct dm_target *ti)
3087{
3088 int r;
24347e95 3089 bool need_commit1, need_commit2;
b17446df
JT
3090 struct pool_c *pt = ti->private;
3091 struct pool *pool = pt->pool;
3092
3093 /*
3094 * Take control of the pool object.
3095 */
3096 r = bind_control_target(pool, ti);
3097 if (r)
3098 return r;
3099
3100 r = maybe_resize_data_dev(ti, &need_commit1);
3101 if (r)
3102 return r;
3103
24347e95
JT
3104 r = maybe_resize_metadata_dev(ti, &need_commit2);
3105 if (r)
3106 return r;
3107
3108 if (need_commit1 || need_commit2)
020cc3b5 3109 (void) commit(pool);
b17446df
JT
3110
3111 return 0;
3112}
3113
991d9fa0
JT
3114static void pool_resume(struct dm_target *ti)
3115{
3116 struct pool_c *pt = ti->private;
3117 struct pool *pool = pt->pool;
3118 unsigned long flags;
3119
3120 spin_lock_irqsave(&pool->lock, flags);
88a6621b 3121 pool->low_water_triggered = false;
991d9fa0 3122 spin_unlock_irqrestore(&pool->lock, flags);
c140e1c4 3123 requeue_bios(pool);
991d9fa0 3124
905e51b3 3125 do_waker(&pool->waker.work);
991d9fa0
JT
3126}
3127
3128static void pool_postsuspend(struct dm_target *ti)
3129{
991d9fa0
JT
3130 struct pool_c *pt = ti->private;
3131 struct pool *pool = pt->pool;
3132
905e51b3 3133 cancel_delayed_work(&pool->waker);
85ad643b 3134 cancel_delayed_work(&pool->no_space_timeout);
991d9fa0 3135 flush_workqueue(pool->wq);
020cc3b5 3136 (void) commit(pool);
991d9fa0
JT
3137}
3138
3139static int check_arg_count(unsigned argc, unsigned args_required)
3140{
3141 if (argc != args_required) {
3142 DMWARN("Message received with %u arguments instead of %u.",
3143 argc, args_required);
3144 return -EINVAL;
3145 }
3146
3147 return 0;
3148}
3149
3150static int read_dev_id(char *arg, dm_thin_id *dev_id, int warning)
3151{
3152 if (!kstrtoull(arg, 10, (unsigned long long *)dev_id) &&
3153 *dev_id <= MAX_DEV_ID)
3154 return 0;
3155
3156 if (warning)
3157 DMWARN("Message received with invalid device id: %s", arg);
3158
3159 return -EINVAL;
3160}
3161
3162static int process_create_thin_mesg(unsigned argc, char **argv, struct pool *pool)
3163{
3164 dm_thin_id dev_id;
3165 int r;
3166
3167 r = check_arg_count(argc, 2);
3168 if (r)
3169 return r;
3170
3171 r = read_dev_id(argv[1], &dev_id, 1);
3172 if (r)
3173 return r;
3174
3175 r = dm_pool_create_thin(pool->pmd, dev_id);
3176 if (r) {
3177 DMWARN("Creation of new thinly-provisioned device with id %s failed.",
3178 argv[1]);
3179 return r;
3180 }
3181
3182 return 0;
3183}
3184
3185static int process_create_snap_mesg(unsigned argc, char **argv, struct pool *pool)
3186{
3187 dm_thin_id dev_id;
3188 dm_thin_id origin_dev_id;
3189 int r;
3190
3191 r = check_arg_count(argc, 3);
3192 if (r)
3193 return r;
3194
3195 r = read_dev_id(argv[1], &dev_id, 1);
3196 if (r)
3197 return r;
3198
3199 r = read_dev_id(argv[2], &origin_dev_id, 1);
3200 if (r)
3201 return r;
3202
3203 r = dm_pool_create_snap(pool->pmd, dev_id, origin_dev_id);
3204 if (r) {
3205 DMWARN("Creation of new snapshot %s of device %s failed.",
3206 argv[1], argv[2]);
3207 return r;
3208 }
3209
3210 return 0;
3211}
3212
3213static int process_delete_mesg(unsigned argc, char **argv, struct pool *pool)
3214{
3215 dm_thin_id dev_id;
3216 int r;
3217
3218 r = check_arg_count(argc, 2);
3219 if (r)
3220 return r;
3221
3222 r = read_dev_id(argv[1], &dev_id, 1);
3223 if (r)
3224 return r;
3225
3226 r = dm_pool_delete_thin_device(pool->pmd, dev_id);
3227 if (r)
3228 DMWARN("Deletion of thin device %s failed.", argv[1]);
3229
3230 return r;
3231}
3232
3233static int process_set_transaction_id_mesg(unsigned argc, char **argv, struct pool *pool)
3234{
3235 dm_thin_id old_id, new_id;
3236 int r;
3237
3238 r = check_arg_count(argc, 3);
3239 if (r)
3240 return r;
3241
3242 if (kstrtoull(argv[1], 10, (unsigned long long *)&old_id)) {
3243 DMWARN("set_transaction_id message: Unrecognised id %s.", argv[1]);
3244 return -EINVAL;
3245 }
3246
3247 if (kstrtoull(argv[2], 10, (unsigned long long *)&new_id)) {
3248 DMWARN("set_transaction_id message: Unrecognised new id %s.", argv[2]);
3249 return -EINVAL;
3250 }
3251
3252 r = dm_pool_set_metadata_transaction_id(pool->pmd, old_id, new_id);
3253 if (r) {
3254 DMWARN("Failed to change transaction id from %s to %s.",
3255 argv[1], argv[2]);
3256 return r;
3257 }
3258
3259 return 0;
3260}
3261
cc8394d8
JT
3262static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
3263{
3264 int r;
3265
3266 r = check_arg_count(argc, 1);
3267 if (r)
3268 return r;
3269
020cc3b5 3270 (void) commit(pool);
0d200aef 3271
cc8394d8
JT
3272 r = dm_pool_reserve_metadata_snap(pool->pmd);
3273 if (r)
3274 DMWARN("reserve_metadata_snap message failed.");
3275
3276 return r;
3277}
3278
3279static int process_release_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
3280{
3281 int r;
3282
3283 r = check_arg_count(argc, 1);
3284 if (r)
3285 return r;
3286
3287 r = dm_pool_release_metadata_snap(pool->pmd);
3288 if (r)
3289 DMWARN("release_metadata_snap message failed.");
3290
3291 return r;
3292}
3293
991d9fa0
JT
3294/*
3295 * Messages supported:
3296 * create_thin <dev_id>
3297 * create_snap <dev_id> <origin_id>
3298 * delete <dev_id>
3299 * trim <dev_id> <new_size_in_sectors>
3300 * set_transaction_id <current_trans_id> <new_trans_id>
cc8394d8
JT
3301 * reserve_metadata_snap
3302 * release_metadata_snap
991d9fa0
JT
3303 */
3304static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
3305{
3306 int r = -EINVAL;
3307 struct pool_c *pt = ti->private;
3308 struct pool *pool = pt->pool;
3309
3310 if (!strcasecmp(argv[0], "create_thin"))
3311 r = process_create_thin_mesg(argc, argv, pool);
3312
3313 else if (!strcasecmp(argv[0], "create_snap"))
3314 r = process_create_snap_mesg(argc, argv, pool);
3315
3316 else if (!strcasecmp(argv[0], "delete"))
3317 r = process_delete_mesg(argc, argv, pool);
3318
3319 else if (!strcasecmp(argv[0], "set_transaction_id"))
3320 r = process_set_transaction_id_mesg(argc, argv, pool);
3321
cc8394d8
JT
3322 else if (!strcasecmp(argv[0], "reserve_metadata_snap"))
3323 r = process_reserve_metadata_snap_mesg(argc, argv, pool);
3324
3325 else if (!strcasecmp(argv[0], "release_metadata_snap"))
3326 r = process_release_metadata_snap_mesg(argc, argv, pool);
3327
991d9fa0
JT
3328 else
3329 DMWARN("Unrecognised thin pool target message received: %s", argv[0]);
3330
e49e5829 3331 if (!r)
020cc3b5 3332 (void) commit(pool);
991d9fa0
JT
3333
3334 return r;
3335}
3336
e49e5829
JT
3337static void emit_flags(struct pool_features *pf, char *result,
3338 unsigned sz, unsigned maxlen)
3339{
3340 unsigned count = !pf->zero_new_blocks + !pf->discard_enabled +
787a996c
MS
3341 !pf->discard_passdown + (pf->mode == PM_READ_ONLY) +
3342 pf->error_if_no_space;
e49e5829
JT
3343 DMEMIT("%u ", count);
3344
3345 if (!pf->zero_new_blocks)
3346 DMEMIT("skip_block_zeroing ");
3347
3348 if (!pf->discard_enabled)
3349 DMEMIT("ignore_discard ");
3350
3351 if (!pf->discard_passdown)
3352 DMEMIT("no_discard_passdown ");
3353
3354 if (pf->mode == PM_READ_ONLY)
3355 DMEMIT("read_only ");
787a996c
MS
3356
3357 if (pf->error_if_no_space)
3358 DMEMIT("error_if_no_space ");
e49e5829
JT
3359}
3360
991d9fa0
JT
3361/*
3362 * Status line is:
3363 * <transaction id> <used metadata sectors>/<total metadata sectors>
3364 * <used data sectors>/<total data sectors> <held metadata root>
3365 */
fd7c092e
MP
3366static void pool_status(struct dm_target *ti, status_type_t type,
3367 unsigned status_flags, char *result, unsigned maxlen)
991d9fa0 3368{
e49e5829 3369 int r;
991d9fa0
JT
3370 unsigned sz = 0;
3371 uint64_t transaction_id;
3372 dm_block_t nr_free_blocks_data;
3373 dm_block_t nr_free_blocks_metadata;
3374 dm_block_t nr_blocks_data;
3375 dm_block_t nr_blocks_metadata;
3376 dm_block_t held_root;
3377 char buf[BDEVNAME_SIZE];
3378 char buf2[BDEVNAME_SIZE];
3379 struct pool_c *pt = ti->private;
3380 struct pool *pool = pt->pool;
3381
3382 switch (type) {
3383 case STATUSTYPE_INFO:
e49e5829
JT
3384 if (get_pool_mode(pool) == PM_FAIL) {
3385 DMEMIT("Fail");
3386 break;
3387 }
3388
1f4e0ff0
AK
3389 /* Commit to ensure statistics aren't out-of-date */
3390 if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti))
020cc3b5 3391 (void) commit(pool);
1f4e0ff0 3392
fd7c092e
MP
3393 r = dm_pool_get_metadata_transaction_id(pool->pmd, &transaction_id);
3394 if (r) {
4fa5971a
MS
3395 DMERR("%s: dm_pool_get_metadata_transaction_id returned %d",
3396 dm_device_name(pool->pool_md), r);
fd7c092e
MP
3397 goto err;
3398 }
991d9fa0 3399
fd7c092e
MP
3400 r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free_blocks_metadata);
3401 if (r) {
4fa5971a
MS
3402 DMERR("%s: dm_pool_get_free_metadata_block_count returned %d",
3403 dm_device_name(pool->pool_md), r);
fd7c092e
MP
3404 goto err;
3405 }
991d9fa0
JT
3406
3407 r = dm_pool_get_metadata_dev_size(pool->pmd, &nr_blocks_metadata);
fd7c092e 3408 if (r) {
4fa5971a
MS
3409 DMERR("%s: dm_pool_get_metadata_dev_size returned %d",
3410 dm_device_name(pool->pool_md), r);
fd7c092e
MP
3411 goto err;
3412 }
991d9fa0 3413
fd7c092e
MP
3414 r = dm_pool_get_free_block_count(pool->pmd, &nr_free_blocks_data);
3415 if (r) {
4fa5971a
MS
3416 DMERR("%s: dm_pool_get_free_block_count returned %d",
3417 dm_device_name(pool->pool_md), r);
fd7c092e
MP
3418 goto err;
3419 }
991d9fa0
JT
3420
3421 r = dm_pool_get_data_dev_size(pool->pmd, &nr_blocks_data);
fd7c092e 3422 if (r) {
4fa5971a
MS
3423 DMERR("%s: dm_pool_get_data_dev_size returned %d",
3424 dm_device_name(pool->pool_md), r);
fd7c092e
MP
3425 goto err;
3426 }
991d9fa0 3427
cc8394d8 3428 r = dm_pool_get_metadata_snap(pool->pmd, &held_root);
fd7c092e 3429 if (r) {
4fa5971a
MS
3430 DMERR("%s: dm_pool_get_metadata_snap returned %d",
3431 dm_device_name(pool->pool_md), r);
fd7c092e
MP
3432 goto err;
3433 }
991d9fa0
JT
3434
3435 DMEMIT("%llu %llu/%llu %llu/%llu ",
3436 (unsigned long long)transaction_id,
3437 (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
3438 (unsigned long long)nr_blocks_metadata,
3439 (unsigned long long)(nr_blocks_data - nr_free_blocks_data),
3440 (unsigned long long)nr_blocks_data);
3441
3442 if (held_root)
e49e5829
JT
3443 DMEMIT("%llu ", held_root);
3444 else
3445 DMEMIT("- ");
3446
3e1a0699
JT
3447 if (pool->pf.mode == PM_OUT_OF_DATA_SPACE)
3448 DMEMIT("out_of_data_space ");
3449 else if (pool->pf.mode == PM_READ_ONLY)
e49e5829 3450 DMEMIT("ro ");
991d9fa0 3451 else
e49e5829
JT
3452 DMEMIT("rw ");
3453
018debea 3454 if (!pool->pf.discard_enabled)
787a996c 3455 DMEMIT("ignore_discard ");
018debea 3456 else if (pool->pf.discard_passdown)
787a996c
MS
3457 DMEMIT("discard_passdown ");
3458 else
3459 DMEMIT("no_discard_passdown ");
3460
3461 if (pool->pf.error_if_no_space)
3462 DMEMIT("error_if_no_space ");
e49e5829 3463 else
787a996c 3464 DMEMIT("queue_if_no_space ");
991d9fa0
JT
3465
3466 break;
3467
3468 case STATUSTYPE_TABLE:
3469 DMEMIT("%s %s %lu %llu ",
3470 format_dev_t(buf, pt->metadata_dev->bdev->bd_dev),
3471 format_dev_t(buf2, pt->data_dev->bdev->bd_dev),
3472 (unsigned long)pool->sectors_per_block,
3473 (unsigned long long)pt->low_water_blocks);
0424caa1 3474 emit_flags(&pt->requested_pf, result, sz, maxlen);
991d9fa0
JT
3475 break;
3476 }
fd7c092e 3477 return;
991d9fa0 3478
fd7c092e
MP
3479err:
3480 DMEMIT("Error");
991d9fa0
JT
3481}
3482
3483static int pool_iterate_devices(struct dm_target *ti,
3484 iterate_devices_callout_fn fn, void *data)
3485{
3486 struct pool_c *pt = ti->private;
3487
3488 return fn(ti, pt->data_dev, 0, ti->len, data);
3489}
3490
3491static int pool_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
3492 struct bio_vec *biovec, int max_size)
3493{
3494 struct pool_c *pt = ti->private;
3495 struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
3496
3497 if (!q->merge_bvec_fn)
3498 return max_size;
3499
3500 bvm->bi_bdev = pt->data_dev->bdev;
3501
3502 return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
3503}
3504
0424caa1 3505static void set_discard_limits(struct pool_c *pt, struct queue_limits *limits)
104655fd 3506{
0424caa1
MS
3507 struct pool *pool = pt->pool;
3508 struct queue_limits *data_limits;
3509
104655fd
JT
3510 limits->max_discard_sectors = pool->sectors_per_block;
3511
3512 /*
0424caa1 3513 * discard_granularity is just a hint, and not enforced.
104655fd 3514 */
0424caa1
MS
3515 if (pt->adjusted_pf.discard_passdown) {
3516 data_limits = &bdev_get_queue(pt->data_dev->bdev)->limits;
09869de5
LC
3517 limits->discard_granularity = max(data_limits->discard_granularity,
3518 pool->sectors_per_block << SECTOR_SHIFT);
f13945d7 3519 } else
0424caa1 3520 limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
104655fd
JT
3521}
3522
991d9fa0
JT
3523static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
3524{
3525 struct pool_c *pt = ti->private;
3526 struct pool *pool = pt->pool;
604ea906
MS
3527 sector_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT;
3528
3529 /*
3530 * Adjust max_sectors_kb to highest possible power-of-2
3531 * factor of pool->sectors_per_block.
3532 */
3533 if (limits->max_hw_sectors & (limits->max_hw_sectors - 1))
3534 limits->max_sectors = rounddown_pow_of_two(limits->max_hw_sectors);
3535 else
3536 limits->max_sectors = limits->max_hw_sectors;
3537
3538 if (limits->max_sectors < pool->sectors_per_block) {
3539 while (!is_factor(pool->sectors_per_block, limits->max_sectors)) {
3540 if ((limits->max_sectors & (limits->max_sectors - 1)) == 0)
3541 limits->max_sectors--;
3542 limits->max_sectors = rounddown_pow_of_two(limits->max_sectors);
3543 }
3544 } else if (block_size_is_power_of_two(pool)) {
3545 /* max_sectors_kb is >= power-of-2 thinp blocksize */
3546 while (!is_factor(limits->max_sectors, pool->sectors_per_block)) {
3547 if ((limits->max_sectors & (limits->max_sectors - 1)) == 0)
3548 limits->max_sectors--;
3549 limits->max_sectors = rounddown_pow_of_two(limits->max_sectors);
3550 }
3551 }
991d9fa0 3552
0cc67cd9
MS
3553 /*
3554 * If the system-determined stacked limits are compatible with the
3555 * pool's blocksize (io_opt is a factor) do not override them.
3556 */
3557 if (io_opt_sectors < pool->sectors_per_block ||
604ea906
MS
3558 !is_factor(io_opt_sectors, pool->sectors_per_block)) {
3559 if (is_factor(pool->sectors_per_block, limits->max_sectors))
3560 blk_limits_io_min(limits, limits->max_sectors << SECTOR_SHIFT);
3561 else
3562 blk_limits_io_min(limits, pool->sectors_per_block << SECTOR_SHIFT);
0cc67cd9
MS
3563 blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT);
3564 }
0424caa1
MS
3565
3566 /*
3567 * pt->adjusted_pf is a staging area for the actual features to use.
3568 * They get transferred to the live pool in bind_control_target()
3569 * called from pool_preresume().
3570 */
b60ab990
MS
3571 if (!pt->adjusted_pf.discard_enabled) {
3572 /*
3573 * Must explicitly disallow stacking discard limits otherwise the
3574 * block layer will stack them if pool's data device has support.
3575 * QUEUE_FLAG_DISCARD wouldn't be set but there is no way for the
3576 * user to see that, so make sure to set all discard limits to 0.
3577 */
3578 limits->discard_granularity = 0;
0424caa1 3579 return;
b60ab990 3580 }
0424caa1
MS
3581
3582 disable_passdown_if_not_supported(pt);
3583
3584 set_discard_limits(pt, limits);
991d9fa0
JT
3585}
3586
3587static struct target_type pool_target = {
3588 .name = "thin-pool",
3589 .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
3590 DM_TARGET_IMMUTABLE,
36f12aeb 3591 .version = {1, 14, 0},
991d9fa0
JT
3592 .module = THIS_MODULE,
3593 .ctr = pool_ctr,
3594 .dtr = pool_dtr,
3595 .map = pool_map,
3596 .postsuspend = pool_postsuspend,
3597 .preresume = pool_preresume,
3598 .resume = pool_resume,
3599 .message = pool_message,
3600 .status = pool_status,
3601 .merge = pool_merge,
3602 .iterate_devices = pool_iterate_devices,
3603 .io_hints = pool_io_hints,
3604};
3605
3606/*----------------------------------------------------------------
3607 * Thin target methods
3608 *--------------------------------------------------------------*/
b10ebd34
JT
3609static void thin_get(struct thin_c *tc)
3610{
3611 atomic_inc(&tc->refcount);
3612}
3613
3614static void thin_put(struct thin_c *tc)
3615{
3616 if (atomic_dec_and_test(&tc->refcount))
3617 complete(&tc->can_destroy);
3618}
3619
991d9fa0
JT
3620static void thin_dtr(struct dm_target *ti)
3621{
3622 struct thin_c *tc = ti->private;
c140e1c4
MS
3623 unsigned long flags;
3624
b10ebd34
JT
3625 thin_put(tc);
3626 wait_for_completion(&tc->can_destroy);
3627
c140e1c4
MS
3628 spin_lock_irqsave(&tc->pool->lock, flags);
3629 list_del_rcu(&tc->list);
3630 spin_unlock_irqrestore(&tc->pool->lock, flags);
3631 synchronize_rcu();
991d9fa0
JT
3632
3633 mutex_lock(&dm_thin_pool_table.mutex);
3634
3635 __pool_dec(tc->pool);
3636 dm_pool_close_thin_device(tc->td);
3637 dm_put_device(ti, tc->pool_dev);
2dd9c257
JT
3638 if (tc->origin_dev)
3639 dm_put_device(ti, tc->origin_dev);
991d9fa0
JT
3640 kfree(tc);
3641
3642 mutex_unlock(&dm_thin_pool_table.mutex);
3643}
3644
3645/*
3646 * Thin target parameters:
3647 *
2dd9c257 3648 * <pool_dev> <dev_id> [origin_dev]
991d9fa0
JT
3649 *
3650 * pool_dev: the path to the pool (eg, /dev/mapper/my_pool)
3651 * dev_id: the internal device identifier
2dd9c257 3652 * origin_dev: a device external to the pool that should act as the origin
67e2e2b2
JT
3653 *
3654 * If the pool device has discards disabled, they get disabled for the thin
3655 * device as well.
991d9fa0
JT
3656 */
3657static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
3658{
3659 int r;
3660 struct thin_c *tc;
2dd9c257 3661 struct dm_dev *pool_dev, *origin_dev;
991d9fa0 3662 struct mapped_device *pool_md;
5e3283e2 3663 unsigned long flags;
991d9fa0
JT
3664
3665 mutex_lock(&dm_thin_pool_table.mutex);
3666
2dd9c257 3667 if (argc != 2 && argc != 3) {
991d9fa0
JT
3668 ti->error = "Invalid argument count";
3669 r = -EINVAL;
3670 goto out_unlock;
3671 }
3672
3673 tc = ti->private = kzalloc(sizeof(*tc), GFP_KERNEL);
3674 if (!tc) {
3675 ti->error = "Out of memory";
3676 r = -ENOMEM;
3677 goto out_unlock;
3678 }
c140e1c4 3679 spin_lock_init(&tc->lock);
a374bb21 3680 INIT_LIST_HEAD(&tc->deferred_cells);
c140e1c4
MS
3681 bio_list_init(&tc->deferred_bio_list);
3682 bio_list_init(&tc->retry_on_resume_list);
67324ea1 3683 tc->sort_bio_list = RB_ROOT;
991d9fa0 3684
2dd9c257
JT
3685 if (argc == 3) {
3686 r = dm_get_device(ti, argv[2], FMODE_READ, &origin_dev);
3687 if (r) {
3688 ti->error = "Error opening origin device";
3689 goto bad_origin_dev;
3690 }
3691 tc->origin_dev = origin_dev;
3692 }
3693
991d9fa0
JT
3694 r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &pool_dev);
3695 if (r) {
3696 ti->error = "Error opening pool device";
3697 goto bad_pool_dev;
3698 }
3699 tc->pool_dev = pool_dev;
3700
3701 if (read_dev_id(argv[1], (unsigned long long *)&tc->dev_id, 0)) {
3702 ti->error = "Invalid device id";
3703 r = -EINVAL;
3704 goto bad_common;
3705 }
3706
3707 pool_md = dm_get_md(tc->pool_dev->bdev->bd_dev);
3708 if (!pool_md) {
3709 ti->error = "Couldn't get pool mapped device";
3710 r = -EINVAL;
3711 goto bad_common;
3712 }
3713
3714 tc->pool = __pool_table_lookup(pool_md);
3715 if (!tc->pool) {
3716 ti->error = "Couldn't find pool object";
3717 r = -EINVAL;
3718 goto bad_pool_lookup;
3719 }
3720 __pool_inc(tc->pool);
3721
e49e5829
JT
3722 if (get_pool_mode(tc->pool) == PM_FAIL) {
3723 ti->error = "Couldn't open thin device, Pool is in fail mode";
1acacc07 3724 r = -EINVAL;
e49e5829
JT
3725 goto bad_thin_open;
3726 }
3727
991d9fa0
JT
3728 r = dm_pool_open_thin_device(tc->pool->pmd, tc->dev_id, &tc->td);
3729 if (r) {
3730 ti->error = "Couldn't open thin internal device";
3731 goto bad_thin_open;
3732 }
3733
542f9038
MS
3734 r = dm_set_target_max_io_len(ti, tc->pool->sectors_per_block);
3735 if (r)
1acacc07 3736 goto bad_target_max_io_len;
542f9038 3737
55a62eef 3738 ti->num_flush_bios = 1;
16ad3d10 3739 ti->flush_supported = true;
59c3d2c6 3740 ti->per_bio_data_size = sizeof(struct dm_thin_endio_hook);
67e2e2b2
JT
3741
3742 /* In case the pool supports discards, pass them on. */
b60ab990 3743 ti->discard_zeroes_data_unsupported = true;
67e2e2b2 3744 if (tc->pool->pf.discard_enabled) {
0ac55489 3745 ti->discards_supported = true;
55a62eef 3746 ti->num_discard_bios = 1;
55a62eef
AK
3747 /* Discard bios must be split on a block boundary */
3748 ti->split_discard_bios = true;
67e2e2b2 3749 }
991d9fa0
JT
3750
3751 dm_put(pool_md);
3752
3753 mutex_unlock(&dm_thin_pool_table.mutex);
3754
b10ebd34
JT
3755 atomic_set(&tc->refcount, 1);
3756 init_completion(&tc->can_destroy);
3757
5e3283e2 3758 spin_lock_irqsave(&tc->pool->lock, flags);
c140e1c4 3759 list_add_tail_rcu(&tc->list, &tc->pool->active_thins);
5e3283e2 3760 spin_unlock_irqrestore(&tc->pool->lock, flags);
c140e1c4
MS
3761 /*
3762 * This synchronize_rcu() call is needed here otherwise we risk a
3763 * wake_worker() call finding no bios to process (because the newly
3764 * added tc isn't yet visible). So this reduces latency since we
3765 * aren't then dependent on the periodic commit to wake_worker().
3766 */
3767 synchronize_rcu();
3768
991d9fa0
JT
3769 return 0;
3770
1acacc07
MS
3771bad_target_max_io_len:
3772 dm_pool_close_thin_device(tc->td);
991d9fa0
JT
3773bad_thin_open:
3774 __pool_dec(tc->pool);
3775bad_pool_lookup:
3776 dm_put(pool_md);
3777bad_common:
3778 dm_put_device(ti, tc->pool_dev);
3779bad_pool_dev:
2dd9c257
JT
3780 if (tc->origin_dev)
3781 dm_put_device(ti, tc->origin_dev);
3782bad_origin_dev:
991d9fa0
JT
3783 kfree(tc);
3784out_unlock:
3785 mutex_unlock(&dm_thin_pool_table.mutex);
3786
3787 return r;
3788}
3789
7de3ee57 3790static int thin_map(struct dm_target *ti, struct bio *bio)
991d9fa0 3791{
4f024f37 3792 bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
991d9fa0 3793
7de3ee57 3794 return thin_bio_map(ti, bio);
991d9fa0
JT
3795}
3796
7de3ee57 3797static int thin_endio(struct dm_target *ti, struct bio *bio, int err)
eb2aa48d
JT
3798{
3799 unsigned long flags;
59c3d2c6 3800 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
eb2aa48d 3801 struct list_head work;
a24c2569 3802 struct dm_thin_new_mapping *m, *tmp;
eb2aa48d
JT
3803 struct pool *pool = h->tc->pool;
3804
3805 if (h->shared_read_entry) {
3806 INIT_LIST_HEAD(&work);
44feb387 3807 dm_deferred_entry_dec(h->shared_read_entry, &work);
eb2aa48d
JT
3808
3809 spin_lock_irqsave(&pool->lock, flags);
3810 list_for_each_entry_safe(m, tmp, &work, list) {
3811 list_del(&m->list);
50f3c3ef 3812 __complete_mapping_preparation(m);
eb2aa48d
JT
3813 }
3814 spin_unlock_irqrestore(&pool->lock, flags);
3815 }
3816
104655fd
JT
3817 if (h->all_io_entry) {
3818 INIT_LIST_HEAD(&work);
44feb387 3819 dm_deferred_entry_dec(h->all_io_entry, &work);
563af186
JT
3820 if (!list_empty(&work)) {
3821 spin_lock_irqsave(&pool->lock, flags);
3822 list_for_each_entry_safe(m, tmp, &work, list)
daec338b 3823 list_add_tail(&m->list, &pool->prepared_discards);
563af186
JT
3824 spin_unlock_irqrestore(&pool->lock, flags);
3825 wake_worker(pool);
3826 }
104655fd
JT
3827 }
3828
eb2aa48d
JT
3829 return 0;
3830}
3831
738211f7 3832static void thin_presuspend(struct dm_target *ti)
991d9fa0 3833{
738211f7
JT
3834 struct thin_c *tc = ti->private;
3835
991d9fa0 3836 if (dm_noflush_suspending(ti))
738211f7
JT
3837 noflush_work(tc, do_noflush_start);
3838}
3839
3840static void thin_postsuspend(struct dm_target *ti)
3841{
3842 struct thin_c *tc = ti->private;
3843
3844 /*
3845 * The dm_noflush_suspending flag has been cleared by now, so
3846 * unfortunately we must always run this.
3847 */
3848 noflush_work(tc, do_noflush_stop);
991d9fa0
JT
3849}
3850
e5aea7b4
JT
3851static int thin_preresume(struct dm_target *ti)
3852{
3853 struct thin_c *tc = ti->private;
3854
3855 if (tc->origin_dev)
3856 tc->origin_size = get_dev_size(tc->origin_dev->bdev);
3857
3858 return 0;
3859}
3860
991d9fa0
JT
3861/*
3862 * <nr mapped sectors> <highest mapped sector>
3863 */
fd7c092e
MP
3864static void thin_status(struct dm_target *ti, status_type_t type,
3865 unsigned status_flags, char *result, unsigned maxlen)
991d9fa0
JT
3866{
3867 int r;
3868 ssize_t sz = 0;
3869 dm_block_t mapped, highest;
3870 char buf[BDEVNAME_SIZE];
3871 struct thin_c *tc = ti->private;
3872
e49e5829
JT
3873 if (get_pool_mode(tc->pool) == PM_FAIL) {
3874 DMEMIT("Fail");
fd7c092e 3875 return;
e49e5829
JT
3876 }
3877
991d9fa0
JT
3878 if (!tc->td)
3879 DMEMIT("-");
3880 else {
3881 switch (type) {
3882 case STATUSTYPE_INFO:
3883 r = dm_thin_get_mapped_count(tc->td, &mapped);
fd7c092e
MP
3884 if (r) {
3885 DMERR("dm_thin_get_mapped_count returned %d", r);
3886 goto err;
3887 }
991d9fa0
JT
3888
3889 r = dm_thin_get_highest_mapped_block(tc->td, &highest);
fd7c092e
MP
3890 if (r < 0) {
3891 DMERR("dm_thin_get_highest_mapped_block returned %d", r);
3892 goto err;
3893 }
991d9fa0
JT
3894
3895 DMEMIT("%llu ", mapped * tc->pool->sectors_per_block);
3896 if (r)
3897 DMEMIT("%llu", ((highest + 1) *
3898 tc->pool->sectors_per_block) - 1);
3899 else
3900 DMEMIT("-");
3901 break;
3902
3903 case STATUSTYPE_TABLE:
3904 DMEMIT("%s %lu",
3905 format_dev_t(buf, tc->pool_dev->bdev->bd_dev),
3906 (unsigned long) tc->dev_id);
2dd9c257
JT
3907 if (tc->origin_dev)
3908 DMEMIT(" %s", format_dev_t(buf, tc->origin_dev->bdev->bd_dev));
991d9fa0
JT
3909 break;
3910 }
3911 }
3912
fd7c092e
MP
3913 return;
3914
3915err:
3916 DMEMIT("Error");
991d9fa0
JT
3917}
3918
36f12aeb
MS
3919static int thin_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
3920 struct bio_vec *biovec, int max_size)
3921{
3922 struct thin_c *tc = ti->private;
3923 struct request_queue *q = bdev_get_queue(tc->pool_dev->bdev);
3924
3925 if (!q->merge_bvec_fn)
3926 return max_size;
3927
3928 bvm->bi_bdev = tc->pool_dev->bdev;
3929 bvm->bi_sector = dm_target_offset(ti, bvm->bi_sector);
3930
3931 return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
3932}
3933
991d9fa0
JT
3934static int thin_iterate_devices(struct dm_target *ti,
3935 iterate_devices_callout_fn fn, void *data)
3936{
55f2b8bd 3937 sector_t blocks;
991d9fa0 3938 struct thin_c *tc = ti->private;
55f2b8bd 3939 struct pool *pool = tc->pool;
991d9fa0
JT
3940
3941 /*
3942 * We can't call dm_pool_get_data_dev_size() since that blocks. So
3943 * we follow a more convoluted path through to the pool's target.
3944 */
55f2b8bd 3945 if (!pool->ti)
991d9fa0
JT
3946 return 0; /* nothing is bound */
3947
55f2b8bd
MS
3948 blocks = pool->ti->len;
3949 (void) sector_div(blocks, pool->sectors_per_block);
991d9fa0 3950 if (blocks)
55f2b8bd 3951 return fn(ti, tc->pool_dev, 0, pool->sectors_per_block * blocks, data);
991d9fa0
JT
3952
3953 return 0;
3954}
3955
991d9fa0
JT
3956static struct target_type thin_target = {
3957 .name = "thin",
36f12aeb 3958 .version = {1, 14, 0},
991d9fa0
JT
3959 .module = THIS_MODULE,
3960 .ctr = thin_ctr,
3961 .dtr = thin_dtr,
3962 .map = thin_map,
eb2aa48d 3963 .end_io = thin_endio,
e5aea7b4 3964 .preresume = thin_preresume,
738211f7 3965 .presuspend = thin_presuspend,
991d9fa0
JT
3966 .postsuspend = thin_postsuspend,
3967 .status = thin_status,
36f12aeb 3968 .merge = thin_merge,
991d9fa0 3969 .iterate_devices = thin_iterate_devices,
991d9fa0
JT
3970};
3971
3972/*----------------------------------------------------------------*/
3973
3974static int __init dm_thin_init(void)
3975{
3976 int r;
3977
3978 pool_table_init();
3979
3980 r = dm_register_target(&thin_target);
3981 if (r)
3982 return r;
3983
3984 r = dm_register_target(&pool_target);
3985 if (r)
a24c2569
MS
3986 goto bad_pool_target;
3987
3988 r = -ENOMEM;
3989
a24c2569
MS
3990 _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0);
3991 if (!_new_mapping_cache)
3992 goto bad_new_mapping_cache;
3993
a24c2569
MS
3994 return 0;
3995
a24c2569 3996bad_new_mapping_cache:
a24c2569
MS
3997 dm_unregister_target(&pool_target);
3998bad_pool_target:
3999 dm_unregister_target(&thin_target);
991d9fa0
JT
4000
4001 return r;
4002}
4003
4004static void dm_thin_exit(void)
4005{
4006 dm_unregister_target(&thin_target);
4007 dm_unregister_target(&pool_target);
a24c2569 4008
a24c2569 4009 kmem_cache_destroy(_new_mapping_cache);
991d9fa0
JT
4010}
4011
4012module_init(dm_thin_init);
4013module_exit(dm_thin_exit);
4014
80c57893
MS
4015module_param_named(no_space_timeout, no_space_timeout_secs, uint, S_IRUGO | S_IWUSR);
4016MODULE_PARM_DESC(no_space_timeout, "Out of data space queue IO timeout in seconds");
4017
7cab8bf1 4018MODULE_DESCRIPTION(DM_NAME " thin provisioning target");
991d9fa0
JT
4019MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
4020MODULE_LICENSE("GPL");
This page took 0.413994 seconds and 5 git commands to generate.