dm thin: prefetch missing metadata pages
[deliverable/linux.git] / drivers / md / dm-thin.c
CommitLineData
991d9fa0 1/*
e49e5829 2 * Copyright (C) 2011-2012 Red Hat UK.
991d9fa0
JT
3 *
4 * This file is released under the GPL.
5 */
6
7#include "dm-thin-metadata.h"
4f81a417 8#include "dm-bio-prison.h"
1f4e0ff0 9#include "dm.h"
991d9fa0
JT
10
11#include <linux/device-mapper.h>
12#include <linux/dm-io.h>
13#include <linux/dm-kcopyd.h>
14#include <linux/list.h>
c140e1c4 15#include <linux/rculist.h>
991d9fa0
JT
16#include <linux/init.h>
17#include <linux/module.h>
18#include <linux/slab.h>
67324ea1 19#include <linux/rbtree.h>
991d9fa0
JT
20
21#define DM_MSG_PREFIX "thin"
22
23/*
24 * Tunable constants
25 */
7768ed33 26#define ENDIO_HOOK_POOL_SIZE 1024
991d9fa0 27#define MAPPING_POOL_SIZE 1024
905e51b3 28#define COMMIT_PERIOD HZ
80c57893
MS
29#define NO_SPACE_TIMEOUT_SECS 60
30
31static unsigned no_space_timeout_secs = NO_SPACE_TIMEOUT_SECS;
991d9fa0 32
df5d2e90
MP
33DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
34 "A percentage of time allocated for copy on write");
35
991d9fa0
JT
36/*
37 * The block size of the device holding pool data must be
38 * between 64KB and 1GB.
39 */
40#define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (64 * 1024 >> SECTOR_SHIFT)
41#define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)
42
991d9fa0
JT
43/*
44 * Device id is restricted to 24 bits.
45 */
46#define MAX_DEV_ID ((1 << 24) - 1)
47
48/*
49 * How do we handle breaking sharing of data blocks?
50 * =================================================
51 *
52 * We use a standard copy-on-write btree to store the mappings for the
53 * devices (note I'm talking about copy-on-write of the metadata here, not
54 * the data). When you take an internal snapshot you clone the root node
55 * of the origin btree. After this there is no concept of an origin or a
56 * snapshot. They are just two device trees that happen to point to the
57 * same data blocks.
58 *
59 * When we get a write in we decide if it's to a shared data block using
60 * some timestamp magic. If it is, we have to break sharing.
61 *
62 * Let's say we write to a shared block in what was the origin. The
63 * steps are:
64 *
65 * i) plug io further to this physical block. (see bio_prison code).
66 *
67 * ii) quiesce any read io to that shared data block. Obviously
44feb387 68 * including all devices that share this block. (see dm_deferred_set code)
991d9fa0
JT
69 *
70 * iii) copy the data block to a newly allocate block. This step can be
71 * missed out if the io covers the block. (schedule_copy).
72 *
73 * iv) insert the new mapping into the origin's btree
fe878f34 74 * (process_prepared_mapping). This act of inserting breaks some
991d9fa0
JT
75 * sharing of btree nodes between the two devices. Breaking sharing only
76 * effects the btree of that specific device. Btrees for the other
77 * devices that share the block never change. The btree for the origin
78 * device as it was after the last commit is untouched, ie. we're using
79 * persistent data structures in the functional programming sense.
80 *
81 * v) unplug io to this physical block, including the io that triggered
82 * the breaking of sharing.
83 *
84 * Steps (ii) and (iii) occur in parallel.
85 *
86 * The metadata _doesn't_ need to be committed before the io continues. We
87 * get away with this because the io is always written to a _new_ block.
88 * If there's a crash, then:
89 *
90 * - The origin mapping will point to the old origin block (the shared
91 * one). This will contain the data as it was before the io that triggered
92 * the breaking of sharing came in.
93 *
94 * - The snap mapping still points to the old block. As it would after
95 * the commit.
96 *
97 * The downside of this scheme is the timestamp magic isn't perfect, and
98 * will continue to think that data block in the snapshot device is shared
99 * even after the write to the origin has broken sharing. I suspect data
100 * blocks will typically be shared by many different devices, so we're
101 * breaking sharing n + 1 times, rather than n, where n is the number of
102 * devices that reference this data block. At the moment I think the
103 * benefits far, far outweigh the disadvantages.
104 */
105
106/*----------------------------------------------------------------*/
107
991d9fa0
JT
108/*
109 * Key building.
110 */
111static void build_data_key(struct dm_thin_device *td,
44feb387 112 dm_block_t b, struct dm_cell_key *key)
991d9fa0
JT
113{
114 key->virtual = 0;
115 key->dev = dm_thin_dev_id(td);
116 key->block = b;
117}
118
119static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
44feb387 120 struct dm_cell_key *key)
991d9fa0
JT
121{
122 key->virtual = 1;
123 key->dev = dm_thin_dev_id(td);
124 key->block = b;
125}
126
127/*----------------------------------------------------------------*/
128
129/*
130 * A pool device ties together a metadata device and a data device. It
131 * also provides the interface for creating and destroying internal
132 * devices.
133 */
a24c2569 134struct dm_thin_new_mapping;
67e2e2b2 135
e49e5829 136/*
3e1a0699 137 * The pool runs in 4 modes. Ordered in degraded order for comparisons.
e49e5829
JT
138 */
139enum pool_mode {
140 PM_WRITE, /* metadata may be changed */
3e1a0699 141 PM_OUT_OF_DATA_SPACE, /* metadata may be changed, though data may not be allocated */
e49e5829
JT
142 PM_READ_ONLY, /* metadata may not be changed */
143 PM_FAIL, /* all I/O fails */
144};
145
67e2e2b2 146struct pool_features {
e49e5829
JT
147 enum pool_mode mode;
148
9bc142dd
MS
149 bool zero_new_blocks:1;
150 bool discard_enabled:1;
151 bool discard_passdown:1;
787a996c 152 bool error_if_no_space:1;
67e2e2b2
JT
153};
154
e49e5829
JT
155struct thin_c;
156typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio);
157typedef void (*process_mapping_fn)(struct dm_thin_new_mapping *m);
158
991d9fa0
JT
159struct pool {
160 struct list_head list;
161 struct dm_target *ti; /* Only set if a pool target is bound */
162
163 struct mapped_device *pool_md;
164 struct block_device *md_dev;
165 struct dm_pool_metadata *pmd;
166
991d9fa0 167 dm_block_t low_water_blocks;
55f2b8bd 168 uint32_t sectors_per_block;
f9a8e0cd 169 int sectors_per_block_shift;
991d9fa0 170
67e2e2b2 171 struct pool_features pf;
88a6621b 172 bool low_water_triggered:1; /* A dm event has been sent */
991d9fa0 173
44feb387 174 struct dm_bio_prison *prison;
991d9fa0
JT
175 struct dm_kcopyd_client *copier;
176
177 struct workqueue_struct *wq;
178 struct work_struct worker;
905e51b3 179 struct delayed_work waker;
85ad643b 180 struct delayed_work no_space_timeout;
991d9fa0 181
905e51b3 182 unsigned long last_commit_jiffies;
55f2b8bd 183 unsigned ref_count;
991d9fa0
JT
184
185 spinlock_t lock;
991d9fa0
JT
186 struct bio_list deferred_flush_bios;
187 struct list_head prepared_mappings;
104655fd 188 struct list_head prepared_discards;
c140e1c4 189 struct list_head active_thins;
991d9fa0 190
44feb387
MS
191 struct dm_deferred_set *shared_read_ds;
192 struct dm_deferred_set *all_io_ds;
991d9fa0 193
a24c2569 194 struct dm_thin_new_mapping *next_mapping;
991d9fa0 195 mempool_t *mapping_pool;
e49e5829
JT
196
197 process_bio_fn process_bio;
198 process_bio_fn process_discard;
199
200 process_mapping_fn process_prepared_mapping;
201 process_mapping_fn process_prepared_discard;
991d9fa0
JT
202};
203
e49e5829 204static enum pool_mode get_pool_mode(struct pool *pool);
b5330655 205static void metadata_operation_failed(struct pool *pool, const char *op, int r);
e49e5829 206
991d9fa0
JT
207/*
208 * Target context for a pool.
209 */
210struct pool_c {
211 struct dm_target *ti;
212 struct pool *pool;
213 struct dm_dev *data_dev;
214 struct dm_dev *metadata_dev;
215 struct dm_target_callbacks callbacks;
216
217 dm_block_t low_water_blocks;
0424caa1
MS
218 struct pool_features requested_pf; /* Features requested during table load */
219 struct pool_features adjusted_pf; /* Features used after adjusting for constituent devices */
991d9fa0
JT
220};
221
222/*
223 * Target context for a thin.
224 */
225struct thin_c {
c140e1c4 226 struct list_head list;
991d9fa0 227 struct dm_dev *pool_dev;
2dd9c257 228 struct dm_dev *origin_dev;
e5aea7b4 229 sector_t origin_size;
991d9fa0
JT
230 dm_thin_id dev_id;
231
232 struct pool *pool;
233 struct dm_thin_device *td;
738211f7 234 bool requeue_mode:1;
c140e1c4
MS
235 spinlock_t lock;
236 struct bio_list deferred_bio_list;
237 struct bio_list retry_on_resume_list;
67324ea1 238 struct rb_root sort_bio_list; /* sorted list of deferred bios */
b10ebd34
JT
239
240 /*
241 * Ensures the thin is not destroyed until the worker has finished
242 * iterating the active_thins list.
243 */
244 atomic_t refcount;
245 struct completion can_destroy;
991d9fa0
JT
246};
247
248/*----------------------------------------------------------------*/
249
025b9685
JT
250/*
251 * wake_worker() is used when new work is queued and when pool_resume is
252 * ready to continue deferred IO processing.
253 */
254static void wake_worker(struct pool *pool)
255{
256 queue_work(pool->wq, &pool->worker);
257}
258
259/*----------------------------------------------------------------*/
260
6beca5eb
JT
261static int bio_detain(struct pool *pool, struct dm_cell_key *key, struct bio *bio,
262 struct dm_bio_prison_cell **cell_result)
263{
264 int r;
265 struct dm_bio_prison_cell *cell_prealloc;
266
267 /*
268 * Allocate a cell from the prison's mempool.
269 * This might block but it can't fail.
270 */
271 cell_prealloc = dm_bio_prison_alloc_cell(pool->prison, GFP_NOIO);
272
273 r = dm_bio_detain(pool->prison, key, bio, cell_prealloc, cell_result);
274 if (r)
275 /*
276 * We reused an old cell; we can get rid of
277 * the new one.
278 */
279 dm_bio_prison_free_cell(pool->prison, cell_prealloc);
280
281 return r;
282}
283
284static void cell_release(struct pool *pool,
285 struct dm_bio_prison_cell *cell,
286 struct bio_list *bios)
287{
288 dm_cell_release(pool->prison, cell, bios);
289 dm_bio_prison_free_cell(pool->prison, cell);
290}
291
292static void cell_release_no_holder(struct pool *pool,
293 struct dm_bio_prison_cell *cell,
294 struct bio_list *bios)
295{
296 dm_cell_release_no_holder(pool->prison, cell, bios);
297 dm_bio_prison_free_cell(pool->prison, cell);
298}
299
025b9685
JT
300static void cell_defer_no_holder_no_free(struct thin_c *tc,
301 struct dm_bio_prison_cell *cell)
302{
303 struct pool *pool = tc->pool;
304 unsigned long flags;
305
c140e1c4
MS
306 spin_lock_irqsave(&tc->lock, flags);
307 dm_cell_release_no_holder(pool->prison, cell, &tc->deferred_bio_list);
308 spin_unlock_irqrestore(&tc->lock, flags);
025b9685
JT
309
310 wake_worker(pool);
311}
312
af91805a
MS
313static void cell_error_with_code(struct pool *pool,
314 struct dm_bio_prison_cell *cell, int error_code)
6beca5eb 315{
af91805a 316 dm_cell_error(pool->prison, cell, error_code);
6beca5eb
JT
317 dm_bio_prison_free_cell(pool->prison, cell);
318}
319
af91805a
MS
320static void cell_error(struct pool *pool, struct dm_bio_prison_cell *cell)
321{
322 cell_error_with_code(pool, cell, -EIO);
323}
324
6beca5eb
JT
325/*----------------------------------------------------------------*/
326
991d9fa0
JT
327/*
328 * A global list of pools that uses a struct mapped_device as a key.
329 */
330static struct dm_thin_pool_table {
331 struct mutex mutex;
332 struct list_head pools;
333} dm_thin_pool_table;
334
335static void pool_table_init(void)
336{
337 mutex_init(&dm_thin_pool_table.mutex);
338 INIT_LIST_HEAD(&dm_thin_pool_table.pools);
339}
340
341static void __pool_table_insert(struct pool *pool)
342{
343 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
344 list_add(&pool->list, &dm_thin_pool_table.pools);
345}
346
347static void __pool_table_remove(struct pool *pool)
348{
349 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
350 list_del(&pool->list);
351}
352
353static struct pool *__pool_table_lookup(struct mapped_device *md)
354{
355 struct pool *pool = NULL, *tmp;
356
357 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
358
359 list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
360 if (tmp->pool_md == md) {
361 pool = tmp;
362 break;
363 }
364 }
365
366 return pool;
367}
368
369static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev)
370{
371 struct pool *pool = NULL, *tmp;
372
373 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
374
375 list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
376 if (tmp->md_dev == md_dev) {
377 pool = tmp;
378 break;
379 }
380 }
381
382 return pool;
383}
384
385/*----------------------------------------------------------------*/
386
a24c2569 387struct dm_thin_endio_hook {
eb2aa48d 388 struct thin_c *tc;
44feb387
MS
389 struct dm_deferred_entry *shared_read_entry;
390 struct dm_deferred_entry *all_io_entry;
a24c2569 391 struct dm_thin_new_mapping *overwrite_mapping;
67324ea1 392 struct rb_node rb_node;
eb2aa48d
JT
393};
394
18adc577 395static void requeue_bio_list(struct thin_c *tc, struct bio_list *master)
991d9fa0
JT
396{
397 struct bio *bio;
398 struct bio_list bios;
18adc577 399 unsigned long flags;
991d9fa0
JT
400
401 bio_list_init(&bios);
18adc577 402
c140e1c4 403 spin_lock_irqsave(&tc->lock, flags);
991d9fa0
JT
404 bio_list_merge(&bios, master);
405 bio_list_init(master);
c140e1c4 406 spin_unlock_irqrestore(&tc->lock, flags);
991d9fa0 407
c140e1c4
MS
408 while ((bio = bio_list_pop(&bios)))
409 bio_endio(bio, DM_ENDIO_REQUEUE);
991d9fa0
JT
410}
411
412static void requeue_io(struct thin_c *tc)
413{
c140e1c4
MS
414 requeue_bio_list(tc, &tc->deferred_bio_list);
415 requeue_bio_list(tc, &tc->retry_on_resume_list);
991d9fa0
JT
416}
417
c140e1c4 418static void error_thin_retry_list(struct thin_c *tc)
3e1a0699
JT
419{
420 struct bio *bio;
421 unsigned long flags;
422 struct bio_list bios;
423
424 bio_list_init(&bios);
425
c140e1c4
MS
426 spin_lock_irqsave(&tc->lock, flags);
427 bio_list_merge(&bios, &tc->retry_on_resume_list);
428 bio_list_init(&tc->retry_on_resume_list);
429 spin_unlock_irqrestore(&tc->lock, flags);
3e1a0699
JT
430
431 while ((bio = bio_list_pop(&bios)))
432 bio_io_error(bio);
433}
434
c140e1c4
MS
435static void error_retry_list(struct pool *pool)
436{
437 struct thin_c *tc;
438
439 rcu_read_lock();
440 list_for_each_entry_rcu(tc, &pool->active_thins, list)
441 error_thin_retry_list(tc);
442 rcu_read_unlock();
443}
444
991d9fa0
JT
445/*
446 * This section of code contains the logic for processing a thin device's IO.
447 * Much of the code depends on pool object resources (lists, workqueues, etc)
448 * but most is exclusively called from the thin target rather than the thin-pool
449 * target.
450 */
451
58f77a21
MS
452static bool block_size_is_power_of_two(struct pool *pool)
453{
454 return pool->sectors_per_block_shift >= 0;
455}
456
991d9fa0
JT
457static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
458{
58f77a21 459 struct pool *pool = tc->pool;
4f024f37 460 sector_t block_nr = bio->bi_iter.bi_sector;
55f2b8bd 461
58f77a21
MS
462 if (block_size_is_power_of_two(pool))
463 block_nr >>= pool->sectors_per_block_shift;
f9a8e0cd 464 else
58f77a21 465 (void) sector_div(block_nr, pool->sectors_per_block);
55f2b8bd
MS
466
467 return block_nr;
991d9fa0
JT
468}
469
470static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
471{
472 struct pool *pool = tc->pool;
4f024f37 473 sector_t bi_sector = bio->bi_iter.bi_sector;
991d9fa0
JT
474
475 bio->bi_bdev = tc->pool_dev->bdev;
58f77a21 476 if (block_size_is_power_of_two(pool))
4f024f37
KO
477 bio->bi_iter.bi_sector =
478 (block << pool->sectors_per_block_shift) |
479 (bi_sector & (pool->sectors_per_block - 1));
58f77a21 480 else
4f024f37 481 bio->bi_iter.bi_sector = (block * pool->sectors_per_block) +
58f77a21 482 sector_div(bi_sector, pool->sectors_per_block);
991d9fa0
JT
483}
484
2dd9c257
JT
485static void remap_to_origin(struct thin_c *tc, struct bio *bio)
486{
487 bio->bi_bdev = tc->origin_dev->bdev;
488}
489
4afdd680
JT
490static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
491{
492 return (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) &&
493 dm_thin_changed_this_transaction(tc->td);
494}
495
e8088073
JT
496static void inc_all_io_entry(struct pool *pool, struct bio *bio)
497{
498 struct dm_thin_endio_hook *h;
499
500 if (bio->bi_rw & REQ_DISCARD)
501 return;
502
59c3d2c6 503 h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
e8088073
JT
504 h->all_io_entry = dm_deferred_entry_inc(pool->all_io_ds);
505}
506
2dd9c257 507static void issue(struct thin_c *tc, struct bio *bio)
991d9fa0
JT
508{
509 struct pool *pool = tc->pool;
510 unsigned long flags;
511
e49e5829
JT
512 if (!bio_triggers_commit(tc, bio)) {
513 generic_make_request(bio);
514 return;
515 }
516
991d9fa0 517 /*
e49e5829
JT
518 * Complete bio with an error if earlier I/O caused changes to
519 * the metadata that can't be committed e.g, due to I/O errors
520 * on the metadata device.
991d9fa0 521 */
e49e5829
JT
522 if (dm_thin_aborted_changes(tc->td)) {
523 bio_io_error(bio);
524 return;
525 }
526
527 /*
528 * Batch together any bios that trigger commits and then issue a
529 * single commit for them in process_deferred_bios().
530 */
531 spin_lock_irqsave(&pool->lock, flags);
532 bio_list_add(&pool->deferred_flush_bios, bio);
533 spin_unlock_irqrestore(&pool->lock, flags);
991d9fa0
JT
534}
535
2dd9c257
JT
536static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio)
537{
538 remap_to_origin(tc, bio);
539 issue(tc, bio);
540}
541
542static void remap_and_issue(struct thin_c *tc, struct bio *bio,
543 dm_block_t block)
544{
545 remap(tc, bio, block);
546 issue(tc, bio);
547}
548
991d9fa0
JT
549/*----------------------------------------------------------------*/
550
551/*
552 * Bio endio functions.
553 */
a24c2569 554struct dm_thin_new_mapping {
991d9fa0
JT
555 struct list_head list;
556
7f214665
MS
557 bool pass_discard:1;
558 bool definitely_not_shared:1;
991d9fa0 559
50f3c3ef
JT
560 /*
561 * Track quiescing, copying and zeroing preparation actions. When this
562 * counter hits zero the block is prepared and can be inserted into the
563 * btree.
564 */
565 atomic_t prepare_actions;
566
7f214665 567 int err;
991d9fa0
JT
568 struct thin_c *tc;
569 dm_block_t virt_block;
570 dm_block_t data_block;
a24c2569 571 struct dm_bio_prison_cell *cell, *cell2;
991d9fa0
JT
572
573 /*
574 * If the bio covers the whole area of a block then we can avoid
575 * zeroing or copying. Instead this bio is hooked. The bio will
576 * still be in the cell, so care has to be taken to avoid issuing
577 * the bio twice.
578 */
579 struct bio *bio;
580 bio_end_io_t *saved_bi_end_io;
581};
582
50f3c3ef 583static void __complete_mapping_preparation(struct dm_thin_new_mapping *m)
991d9fa0
JT
584{
585 struct pool *pool = m->tc->pool;
586
50f3c3ef 587 if (atomic_dec_and_test(&m->prepare_actions)) {
daec338b 588 list_add_tail(&m->list, &pool->prepared_mappings);
991d9fa0
JT
589 wake_worker(pool);
590 }
591}
592
e5aea7b4 593static void complete_mapping_preparation(struct dm_thin_new_mapping *m)
991d9fa0
JT
594{
595 unsigned long flags;
991d9fa0
JT
596 struct pool *pool = m->tc->pool;
597
991d9fa0 598 spin_lock_irqsave(&pool->lock, flags);
50f3c3ef 599 __complete_mapping_preparation(m);
991d9fa0
JT
600 spin_unlock_irqrestore(&pool->lock, flags);
601}
602
e5aea7b4
JT
603static void copy_complete(int read_err, unsigned long write_err, void *context)
604{
605 struct dm_thin_new_mapping *m = context;
606
607 m->err = read_err || write_err ? -EIO : 0;
608 complete_mapping_preparation(m);
609}
610
991d9fa0
JT
611static void overwrite_endio(struct bio *bio, int err)
612{
59c3d2c6 613 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
a24c2569 614 struct dm_thin_new_mapping *m = h->overwrite_mapping;
991d9fa0
JT
615
616 m->err = err;
e5aea7b4 617 complete_mapping_preparation(m);
991d9fa0
JT
618}
619
991d9fa0
JT
620/*----------------------------------------------------------------*/
621
622/*
623 * Workqueue.
624 */
625
626/*
627 * Prepared mapping jobs.
628 */
629
630/*
631 * This sends the bios in the cell back to the deferred_bios list.
632 */
2aab3850 633static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell)
991d9fa0
JT
634{
635 struct pool *pool = tc->pool;
636 unsigned long flags;
637
c140e1c4
MS
638 spin_lock_irqsave(&tc->lock, flags);
639 cell_release(pool, cell, &tc->deferred_bio_list);
640 spin_unlock_irqrestore(&tc->lock, flags);
991d9fa0
JT
641
642 wake_worker(pool);
643}
644
645/*
6beca5eb 646 * Same as cell_defer above, except it omits the original holder of the cell.
991d9fa0 647 */
f286ba0e 648static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *cell)
991d9fa0 649{
991d9fa0
JT
650 struct pool *pool = tc->pool;
651 unsigned long flags;
652
c140e1c4
MS
653 spin_lock_irqsave(&tc->lock, flags);
654 cell_release_no_holder(pool, cell, &tc->deferred_bio_list);
655 spin_unlock_irqrestore(&tc->lock, flags);
991d9fa0
JT
656
657 wake_worker(pool);
658}
659
e49e5829
JT
660static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
661{
196d38bc 662 if (m->bio) {
e49e5829 663 m->bio->bi_end_io = m->saved_bi_end_io;
196d38bc
KO
664 atomic_inc(&m->bio->bi_remaining);
665 }
6beca5eb 666 cell_error(m->tc->pool, m->cell);
e49e5829
JT
667 list_del(&m->list);
668 mempool_free(m, m->tc->pool->mapping_pool);
669}
025b9685 670
a24c2569 671static void process_prepared_mapping(struct dm_thin_new_mapping *m)
991d9fa0
JT
672{
673 struct thin_c *tc = m->tc;
6beca5eb 674 struct pool *pool = tc->pool;
991d9fa0
JT
675 struct bio *bio;
676 int r;
677
678 bio = m->bio;
196d38bc 679 if (bio) {
991d9fa0 680 bio->bi_end_io = m->saved_bi_end_io;
196d38bc
KO
681 atomic_inc(&bio->bi_remaining);
682 }
991d9fa0
JT
683
684 if (m->err) {
6beca5eb 685 cell_error(pool, m->cell);
905386f8 686 goto out;
991d9fa0
JT
687 }
688
689 /*
690 * Commit the prepared block into the mapping btree.
691 * Any I/O for this block arriving after this point will get
692 * remapped to it directly.
693 */
694 r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block);
695 if (r) {
b5330655 696 metadata_operation_failed(pool, "dm_thin_insert_block", r);
6beca5eb 697 cell_error(pool, m->cell);
905386f8 698 goto out;
991d9fa0
JT
699 }
700
701 /*
702 * Release any bios held while the block was being provisioned.
703 * If we are processing a write bio that completely covers the block,
704 * we already processed it so can ignore it now when processing
705 * the bios in the cell.
706 */
707 if (bio) {
f286ba0e 708 cell_defer_no_holder(tc, m->cell);
991d9fa0
JT
709 bio_endio(bio, 0);
710 } else
2aab3850 711 cell_defer(tc, m->cell);
991d9fa0 712
905386f8 713out:
991d9fa0 714 list_del(&m->list);
6beca5eb 715 mempool_free(m, pool->mapping_pool);
991d9fa0
JT
716}
717
e49e5829 718static void process_prepared_discard_fail(struct dm_thin_new_mapping *m)
104655fd 719{
104655fd
JT
720 struct thin_c *tc = m->tc;
721
e49e5829 722 bio_io_error(m->bio);
f286ba0e
JT
723 cell_defer_no_holder(tc, m->cell);
724 cell_defer_no_holder(tc, m->cell2);
e49e5829
JT
725 mempool_free(m, tc->pool->mapping_pool);
726}
727
728static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m)
729{
730 struct thin_c *tc = m->tc;
104655fd 731
e8088073 732 inc_all_io_entry(tc->pool, m->bio);
f286ba0e
JT
733 cell_defer_no_holder(tc, m->cell);
734 cell_defer_no_holder(tc, m->cell2);
e8088073 735
104655fd 736 if (m->pass_discard)
19fa1a67
JT
737 if (m->definitely_not_shared)
738 remap_and_issue(tc, m->bio, m->data_block);
739 else {
740 bool used = false;
741 if (dm_pool_block_is_used(tc->pool->pmd, m->data_block, &used) || used)
742 bio_endio(m->bio, 0);
743 else
744 remap_and_issue(tc, m->bio, m->data_block);
745 }
104655fd
JT
746 else
747 bio_endio(m->bio, 0);
748
104655fd
JT
749 mempool_free(m, tc->pool->mapping_pool);
750}
751
e49e5829
JT
752static void process_prepared_discard(struct dm_thin_new_mapping *m)
753{
754 int r;
755 struct thin_c *tc = m->tc;
756
757 r = dm_thin_remove_block(tc->td, m->virt_block);
758 if (r)
c397741c 759 DMERR_LIMIT("dm_thin_remove_block() failed");
e49e5829
JT
760
761 process_prepared_discard_passdown(m);
762}
763
104655fd 764static void process_prepared(struct pool *pool, struct list_head *head,
e49e5829 765 process_mapping_fn *fn)
991d9fa0
JT
766{
767 unsigned long flags;
768 struct list_head maps;
a24c2569 769 struct dm_thin_new_mapping *m, *tmp;
991d9fa0
JT
770
771 INIT_LIST_HEAD(&maps);
772 spin_lock_irqsave(&pool->lock, flags);
104655fd 773 list_splice_init(head, &maps);
991d9fa0
JT
774 spin_unlock_irqrestore(&pool->lock, flags);
775
776 list_for_each_entry_safe(m, tmp, &maps, list)
e49e5829 777 (*fn)(m);
991d9fa0
JT
778}
779
780/*
781 * Deferred bio jobs.
782 */
104655fd 783static int io_overlaps_block(struct pool *pool, struct bio *bio)
991d9fa0 784{
4f024f37
KO
785 return bio->bi_iter.bi_size ==
786 (pool->sectors_per_block << SECTOR_SHIFT);
104655fd
JT
787}
788
789static int io_overwrites_block(struct pool *pool, struct bio *bio)
790{
791 return (bio_data_dir(bio) == WRITE) &&
792 io_overlaps_block(pool, bio);
991d9fa0
JT
793}
794
795static void save_and_set_endio(struct bio *bio, bio_end_io_t **save,
796 bio_end_io_t *fn)
797{
798 *save = bio->bi_end_io;
799 bio->bi_end_io = fn;
800}
801
802static int ensure_next_mapping(struct pool *pool)
803{
804 if (pool->next_mapping)
805 return 0;
806
807 pool->next_mapping = mempool_alloc(pool->mapping_pool, GFP_ATOMIC);
808
809 return pool->next_mapping ? 0 : -ENOMEM;
810}
811
a24c2569 812static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool)
991d9fa0 813{
16961b04 814 struct dm_thin_new_mapping *m = pool->next_mapping;
991d9fa0
JT
815
816 BUG_ON(!pool->next_mapping);
817
16961b04
MS
818 memset(m, 0, sizeof(struct dm_thin_new_mapping));
819 INIT_LIST_HEAD(&m->list);
820 m->bio = NULL;
821
991d9fa0
JT
822 pool->next_mapping = NULL;
823
16961b04 824 return m;
991d9fa0
JT
825}
826
e5aea7b4
JT
827static void ll_zero(struct thin_c *tc, struct dm_thin_new_mapping *m,
828 sector_t begin, sector_t end)
829{
830 int r;
831 struct dm_io_region to;
832
833 to.bdev = tc->pool_dev->bdev;
834 to.sector = begin;
835 to.count = end - begin;
836
837 r = dm_kcopyd_zero(tc->pool->copier, 1, &to, 0, copy_complete, m);
838 if (r < 0) {
839 DMERR_LIMIT("dm_kcopyd_zero() failed");
840 copy_complete(1, 1, m);
841 }
842}
843
844/*
845 * A partial copy also needs to zero the uncopied region.
846 */
991d9fa0 847static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
2dd9c257
JT
848 struct dm_dev *origin, dm_block_t data_origin,
849 dm_block_t data_dest,
e5aea7b4
JT
850 struct dm_bio_prison_cell *cell, struct bio *bio,
851 sector_t len)
991d9fa0
JT
852{
853 int r;
854 struct pool *pool = tc->pool;
a24c2569 855 struct dm_thin_new_mapping *m = get_next_mapping(pool);
991d9fa0 856
991d9fa0
JT
857 m->tc = tc;
858 m->virt_block = virt_block;
859 m->data_block = data_dest;
860 m->cell = cell;
991d9fa0 861
e5aea7b4
JT
862 /*
863 * quiesce action + copy action + an extra reference held for the
864 * duration of this function (we may need to inc later for a
865 * partial zero).
866 */
867 atomic_set(&m->prepare_actions, 3);
868
44feb387 869 if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list))
e5aea7b4 870 complete_mapping_preparation(m); /* already quiesced */
991d9fa0
JT
871
872 /*
873 * IO to pool_dev remaps to the pool target's data_dev.
874 *
875 * If the whole block of data is being overwritten, we can issue the
876 * bio immediately. Otherwise we use kcopyd to clone the data first.
877 */
878 if (io_overwrites_block(pool, bio)) {
59c3d2c6 879 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
a24c2569 880
eb2aa48d 881 h->overwrite_mapping = m;
991d9fa0
JT
882 m->bio = bio;
883 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
e8088073 884 inc_all_io_entry(pool, bio);
991d9fa0
JT
885 remap_and_issue(tc, bio, data_dest);
886 } else {
887 struct dm_io_region from, to;
888
2dd9c257 889 from.bdev = origin->bdev;
991d9fa0 890 from.sector = data_origin * pool->sectors_per_block;
e5aea7b4 891 from.count = len;
991d9fa0
JT
892
893 to.bdev = tc->pool_dev->bdev;
894 to.sector = data_dest * pool->sectors_per_block;
e5aea7b4 895 to.count = len;
991d9fa0
JT
896
897 r = dm_kcopyd_copy(pool->copier, &from, 1, &to,
898 0, copy_complete, m);
899 if (r < 0) {
c397741c 900 DMERR_LIMIT("dm_kcopyd_copy() failed");
e5aea7b4
JT
901 copy_complete(1, 1, m);
902
903 /*
904 * We allow the zero to be issued, to simplify the
905 * error path. Otherwise we'd need to start
906 * worrying about decrementing the prepare_actions
907 * counter.
908 */
909 }
910
911 /*
912 * Do we need to zero a tail region?
913 */
914 if (len < pool->sectors_per_block && pool->pf.zero_new_blocks) {
915 atomic_inc(&m->prepare_actions);
916 ll_zero(tc, m,
917 data_dest * pool->sectors_per_block + len,
918 (data_dest + 1) * pool->sectors_per_block);
991d9fa0
JT
919 }
920 }
e5aea7b4
JT
921
922 complete_mapping_preparation(m); /* drop our ref */
991d9fa0
JT
923}
924
2dd9c257
JT
925static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block,
926 dm_block_t data_origin, dm_block_t data_dest,
a24c2569 927 struct dm_bio_prison_cell *cell, struct bio *bio)
2dd9c257
JT
928{
929 schedule_copy(tc, virt_block, tc->pool_dev,
e5aea7b4
JT
930 data_origin, data_dest, cell, bio,
931 tc->pool->sectors_per_block);
2dd9c257
JT
932}
933
991d9fa0 934static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
a24c2569 935 dm_block_t data_block, struct dm_bio_prison_cell *cell,
991d9fa0
JT
936 struct bio *bio)
937{
938 struct pool *pool = tc->pool;
a24c2569 939 struct dm_thin_new_mapping *m = get_next_mapping(pool);
991d9fa0 940
50f3c3ef 941 atomic_set(&m->prepare_actions, 1); /* no need to quiesce */
991d9fa0
JT
942 m->tc = tc;
943 m->virt_block = virt_block;
944 m->data_block = data_block;
945 m->cell = cell;
991d9fa0
JT
946
947 /*
948 * If the whole block of data is being overwritten or we are not
949 * zeroing pre-existing data, we can issue the bio immediately.
950 * Otherwise we use kcopyd to zero the data first.
951 */
67e2e2b2 952 if (!pool->pf.zero_new_blocks)
991d9fa0
JT
953 process_prepared_mapping(m);
954
955 else if (io_overwrites_block(pool, bio)) {
59c3d2c6 956 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
a24c2569 957
eb2aa48d 958 h->overwrite_mapping = m;
991d9fa0
JT
959 m->bio = bio;
960 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
e8088073 961 inc_all_io_entry(pool, bio);
991d9fa0 962 remap_and_issue(tc, bio, data_block);
991d9fa0 963
e5aea7b4
JT
964 } else
965 ll_zero(tc, m,
966 data_block * pool->sectors_per_block,
967 (data_block + 1) * pool->sectors_per_block);
968}
991d9fa0 969
e5aea7b4
JT
970static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
971 dm_block_t data_dest,
972 struct dm_bio_prison_cell *cell, struct bio *bio)
973{
974 struct pool *pool = tc->pool;
975 sector_t virt_block_begin = virt_block * pool->sectors_per_block;
976 sector_t virt_block_end = (virt_block + 1) * pool->sectors_per_block;
977
978 if (virt_block_end <= tc->origin_size)
979 schedule_copy(tc, virt_block, tc->origin_dev,
980 virt_block, data_dest, cell, bio,
981 pool->sectors_per_block);
982
983 else if (virt_block_begin < tc->origin_size)
984 schedule_copy(tc, virt_block, tc->origin_dev,
985 virt_block, data_dest, cell, bio,
986 tc->origin_size - virt_block_begin);
987
988 else
989 schedule_zero(tc, virt_block, data_dest, cell, bio);
991d9fa0
JT
990}
991
e49e5829
JT
992/*
993 * A non-zero return indicates read_only or fail_io mode.
994 * Many callers don't care about the return value.
995 */
020cc3b5 996static int commit(struct pool *pool)
e49e5829
JT
997{
998 int r;
999
8d07e8a5 1000 if (get_pool_mode(pool) >= PM_READ_ONLY)
e49e5829
JT
1001 return -EINVAL;
1002
020cc3b5 1003 r = dm_pool_commit_metadata(pool->pmd);
b5330655
JT
1004 if (r)
1005 metadata_operation_failed(pool, "dm_pool_commit_metadata", r);
e49e5829
JT
1006
1007 return r;
1008}
1009
88a6621b
JT
1010static void check_low_water_mark(struct pool *pool, dm_block_t free_blocks)
1011{
1012 unsigned long flags;
1013
1014 if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) {
1015 DMWARN("%s: reached low water mark for data device: sending event.",
1016 dm_device_name(pool->pool_md));
1017 spin_lock_irqsave(&pool->lock, flags);
1018 pool->low_water_triggered = true;
1019 spin_unlock_irqrestore(&pool->lock, flags);
1020 dm_table_event(pool->ti->table);
1021 }
1022}
1023
3e1a0699
JT
1024static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
1025
991d9fa0
JT
1026static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
1027{
1028 int r;
1029 dm_block_t free_blocks;
991d9fa0
JT
1030 struct pool *pool = tc->pool;
1031
3e1a0699 1032 if (WARN_ON(get_pool_mode(pool) != PM_WRITE))
8d30abff
JT
1033 return -EINVAL;
1034
991d9fa0 1035 r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
b5330655
JT
1036 if (r) {
1037 metadata_operation_failed(pool, "dm_pool_get_free_block_count", r);
991d9fa0 1038 return r;
b5330655 1039 }
991d9fa0 1040
88a6621b 1041 check_low_water_mark(pool, free_blocks);
991d9fa0
JT
1042
1043 if (!free_blocks) {
94563bad
MS
1044 /*
1045 * Try to commit to see if that will free up some
1046 * more space.
1047 */
020cc3b5
JT
1048 r = commit(pool);
1049 if (r)
1050 return r;
991d9fa0 1051
94563bad 1052 r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
b5330655
JT
1053 if (r) {
1054 metadata_operation_failed(pool, "dm_pool_get_free_block_count", r);
94563bad 1055 return r;
b5330655 1056 }
991d9fa0 1057
94563bad 1058 if (!free_blocks) {
3e1a0699 1059 set_pool_mode(pool, PM_OUT_OF_DATA_SPACE);
94563bad 1060 return -ENOSPC;
991d9fa0
JT
1061 }
1062 }
1063
1064 r = dm_pool_alloc_data_block(pool->pmd, result);
4a02b34e 1065 if (r) {
b5330655 1066 metadata_operation_failed(pool, "dm_pool_alloc_data_block", r);
991d9fa0 1067 return r;
4a02b34e 1068 }
991d9fa0
JT
1069
1070 return 0;
1071}
1072
1073/*
1074 * If we have run out of space, queue bios until the device is
1075 * resumed, presumably after having been reloaded with more space.
1076 */
1077static void retry_on_resume(struct bio *bio)
1078{
59c3d2c6 1079 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
eb2aa48d 1080 struct thin_c *tc = h->tc;
991d9fa0
JT
1081 unsigned long flags;
1082
c140e1c4
MS
1083 spin_lock_irqsave(&tc->lock, flags);
1084 bio_list_add(&tc->retry_on_resume_list, bio);
1085 spin_unlock_irqrestore(&tc->lock, flags);
991d9fa0
JT
1086}
1087
af91805a 1088static int should_error_unserviceable_bio(struct pool *pool)
8c0f0e8c 1089{
3e1a0699
JT
1090 enum pool_mode m = get_pool_mode(pool);
1091
1092 switch (m) {
1093 case PM_WRITE:
1094 /* Shouldn't get here */
1095 DMERR_LIMIT("bio unserviceable, yet pool is in PM_WRITE mode");
af91805a 1096 return -EIO;
3e1a0699
JT
1097
1098 case PM_OUT_OF_DATA_SPACE:
af91805a 1099 return pool->pf.error_if_no_space ? -ENOSPC : 0;
3e1a0699
JT
1100
1101 case PM_READ_ONLY:
1102 case PM_FAIL:
af91805a 1103 return -EIO;
3e1a0699
JT
1104 default:
1105 /* Shouldn't get here */
1106 DMERR_LIMIT("bio unserviceable, yet pool has an unknown mode");
af91805a 1107 return -EIO;
3e1a0699
JT
1108 }
1109}
8c0f0e8c 1110
3e1a0699
JT
1111static void handle_unserviceable_bio(struct pool *pool, struct bio *bio)
1112{
af91805a
MS
1113 int error = should_error_unserviceable_bio(pool);
1114
1115 if (error)
1116 bio_endio(bio, error);
6d16202b
MS
1117 else
1118 retry_on_resume(bio);
8c0f0e8c
MS
1119}
1120
399caddf 1121static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *cell)
991d9fa0
JT
1122{
1123 struct bio *bio;
1124 struct bio_list bios;
af91805a 1125 int error;
991d9fa0 1126
af91805a
MS
1127 error = should_error_unserviceable_bio(pool);
1128 if (error) {
1129 cell_error_with_code(pool, cell, error);
3e1a0699
JT
1130 return;
1131 }
1132
991d9fa0 1133 bio_list_init(&bios);
6beca5eb 1134 cell_release(pool, cell, &bios);
991d9fa0 1135
af91805a
MS
1136 error = should_error_unserviceable_bio(pool);
1137 if (error)
3e1a0699 1138 while ((bio = bio_list_pop(&bios)))
af91805a 1139 bio_endio(bio, error);
3e1a0699
JT
1140 else
1141 while ((bio = bio_list_pop(&bios)))
1142 retry_on_resume(bio);
991d9fa0
JT
1143}
1144
104655fd
JT
1145static void process_discard(struct thin_c *tc, struct bio *bio)
1146{
1147 int r;
c3a0ce2e 1148 unsigned long flags;
104655fd 1149 struct pool *pool = tc->pool;
a24c2569 1150 struct dm_bio_prison_cell *cell, *cell2;
44feb387 1151 struct dm_cell_key key, key2;
104655fd
JT
1152 dm_block_t block = get_bio_block(tc, bio);
1153 struct dm_thin_lookup_result lookup_result;
a24c2569 1154 struct dm_thin_new_mapping *m;
104655fd
JT
1155
1156 build_virtual_key(tc->td, block, &key);
6beca5eb 1157 if (bio_detain(tc->pool, &key, bio, &cell))
104655fd
JT
1158 return;
1159
1160 r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1161 switch (r) {
1162 case 0:
1163 /*
1164 * Check nobody is fiddling with this pool block. This can
1165 * happen if someone's in the process of breaking sharing
1166 * on this block.
1167 */
1168 build_data_key(tc->td, lookup_result.block, &key2);
6beca5eb 1169 if (bio_detain(tc->pool, &key2, bio, &cell2)) {
f286ba0e 1170 cell_defer_no_holder(tc, cell);
104655fd
JT
1171 break;
1172 }
1173
1174 if (io_overlaps_block(pool, bio)) {
1175 /*
1176 * IO may still be going to the destination block. We must
1177 * quiesce before we can do the removal.
1178 */
1179 m = get_next_mapping(pool);
1180 m->tc = tc;
19fa1a67
JT
1181 m->pass_discard = pool->pf.discard_passdown;
1182 m->definitely_not_shared = !lookup_result.shared;
104655fd
JT
1183 m->virt_block = block;
1184 m->data_block = lookup_result.block;
1185 m->cell = cell;
1186 m->cell2 = cell2;
104655fd
JT
1187 m->bio = bio;
1188
44feb387 1189 if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) {
c3a0ce2e 1190 spin_lock_irqsave(&pool->lock, flags);
daec338b 1191 list_add_tail(&m->list, &pool->prepared_discards);
c3a0ce2e 1192 spin_unlock_irqrestore(&pool->lock, flags);
104655fd
JT
1193 wake_worker(pool);
1194 }
1195 } else {
e8088073 1196 inc_all_io_entry(pool, bio);
f286ba0e
JT
1197 cell_defer_no_holder(tc, cell);
1198 cell_defer_no_holder(tc, cell2);
e8088073 1199
104655fd 1200 /*
49296309
MP
1201 * The DM core makes sure that the discard doesn't span
1202 * a block boundary. So we submit the discard of a
1203 * partial block appropriately.
104655fd 1204 */
650d2a06
MP
1205 if ((!lookup_result.shared) && pool->pf.discard_passdown)
1206 remap_and_issue(tc, bio, lookup_result.block);
1207 else
1208 bio_endio(bio, 0);
104655fd
JT
1209 }
1210 break;
1211
1212 case -ENODATA:
1213 /*
1214 * It isn't provisioned, just forget it.
1215 */
f286ba0e 1216 cell_defer_no_holder(tc, cell);
104655fd
JT
1217 bio_endio(bio, 0);
1218 break;
1219
1220 default:
c397741c
MS
1221 DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
1222 __func__, r);
f286ba0e 1223 cell_defer_no_holder(tc, cell);
104655fd
JT
1224 bio_io_error(bio);
1225 break;
1226 }
1227}
1228
991d9fa0 1229static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
44feb387 1230 struct dm_cell_key *key,
991d9fa0 1231 struct dm_thin_lookup_result *lookup_result,
a24c2569 1232 struct dm_bio_prison_cell *cell)
991d9fa0
JT
1233{
1234 int r;
1235 dm_block_t data_block;
d6fc2042 1236 struct pool *pool = tc->pool;
991d9fa0
JT
1237
1238 r = alloc_data_block(tc, &data_block);
1239 switch (r) {
1240 case 0:
2dd9c257
JT
1241 schedule_internal_copy(tc, block, lookup_result->block,
1242 data_block, cell, bio);
991d9fa0
JT
1243 break;
1244
1245 case -ENOSPC:
399caddf 1246 retry_bios_on_resume(pool, cell);
991d9fa0
JT
1247 break;
1248
1249 default:
c397741c
MS
1250 DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
1251 __func__, r);
d6fc2042 1252 cell_error(pool, cell);
991d9fa0
JT
1253 break;
1254 }
1255}
1256
1257static void process_shared_bio(struct thin_c *tc, struct bio *bio,
1258 dm_block_t block,
1259 struct dm_thin_lookup_result *lookup_result)
1260{
a24c2569 1261 struct dm_bio_prison_cell *cell;
991d9fa0 1262 struct pool *pool = tc->pool;
44feb387 1263 struct dm_cell_key key;
991d9fa0
JT
1264
1265 /*
1266 * If cell is already occupied, then sharing is already in the process
1267 * of being broken so we have nothing further to do here.
1268 */
1269 build_data_key(tc->td, lookup_result->block, &key);
6beca5eb 1270 if (bio_detain(pool, &key, bio, &cell))
991d9fa0
JT
1271 return;
1272
4f024f37 1273 if (bio_data_dir(bio) == WRITE && bio->bi_iter.bi_size)
991d9fa0
JT
1274 break_sharing(tc, bio, block, &key, lookup_result, cell);
1275 else {
59c3d2c6 1276 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
991d9fa0 1277
44feb387 1278 h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds);
e8088073 1279 inc_all_io_entry(pool, bio);
f286ba0e 1280 cell_defer_no_holder(tc, cell);
e8088073 1281
991d9fa0
JT
1282 remap_and_issue(tc, bio, lookup_result->block);
1283 }
1284}
1285
1286static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block,
a24c2569 1287 struct dm_bio_prison_cell *cell)
991d9fa0
JT
1288{
1289 int r;
1290 dm_block_t data_block;
6beca5eb 1291 struct pool *pool = tc->pool;
991d9fa0
JT
1292
1293 /*
1294 * Remap empty bios (flushes) immediately, without provisioning.
1295 */
4f024f37 1296 if (!bio->bi_iter.bi_size) {
6beca5eb 1297 inc_all_io_entry(pool, bio);
f286ba0e 1298 cell_defer_no_holder(tc, cell);
e8088073 1299
991d9fa0
JT
1300 remap_and_issue(tc, bio, 0);
1301 return;
1302 }
1303
1304 /*
1305 * Fill read bios with zeroes and complete them immediately.
1306 */
1307 if (bio_data_dir(bio) == READ) {
1308 zero_fill_bio(bio);
f286ba0e 1309 cell_defer_no_holder(tc, cell);
991d9fa0
JT
1310 bio_endio(bio, 0);
1311 return;
1312 }
1313
1314 r = alloc_data_block(tc, &data_block);
1315 switch (r) {
1316 case 0:
2dd9c257
JT
1317 if (tc->origin_dev)
1318 schedule_external_copy(tc, block, data_block, cell, bio);
1319 else
1320 schedule_zero(tc, block, data_block, cell, bio);
991d9fa0
JT
1321 break;
1322
1323 case -ENOSPC:
399caddf 1324 retry_bios_on_resume(pool, cell);
991d9fa0
JT
1325 break;
1326
1327 default:
c397741c
MS
1328 DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
1329 __func__, r);
6beca5eb 1330 cell_error(pool, cell);
991d9fa0
JT
1331 break;
1332 }
1333}
1334
1335static void process_bio(struct thin_c *tc, struct bio *bio)
1336{
1337 int r;
6beca5eb 1338 struct pool *pool = tc->pool;
991d9fa0 1339 dm_block_t block = get_bio_block(tc, bio);
a24c2569 1340 struct dm_bio_prison_cell *cell;
44feb387 1341 struct dm_cell_key key;
991d9fa0
JT
1342 struct dm_thin_lookup_result lookup_result;
1343
1344 /*
1345 * If cell is already occupied, then the block is already
1346 * being provisioned so we have nothing further to do here.
1347 */
1348 build_virtual_key(tc->td, block, &key);
6beca5eb 1349 if (bio_detain(pool, &key, bio, &cell))
991d9fa0
JT
1350 return;
1351
1352 r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1353 switch (r) {
1354 case 0:
e8088073 1355 if (lookup_result.shared) {
991d9fa0 1356 process_shared_bio(tc, bio, block, &lookup_result);
6beca5eb 1357 cell_defer_no_holder(tc, cell); /* FIXME: pass this cell into process_shared? */
e8088073 1358 } else {
6beca5eb 1359 inc_all_io_entry(pool, bio);
f286ba0e 1360 cell_defer_no_holder(tc, cell);
e8088073 1361
991d9fa0 1362 remap_and_issue(tc, bio, lookup_result.block);
e8088073 1363 }
991d9fa0
JT
1364 break;
1365
1366 case -ENODATA:
2dd9c257 1367 if (bio_data_dir(bio) == READ && tc->origin_dev) {
6beca5eb 1368 inc_all_io_entry(pool, bio);
f286ba0e 1369 cell_defer_no_holder(tc, cell);
e8088073 1370
e5aea7b4
JT
1371 if (bio_end_sector(bio) <= tc->origin_size)
1372 remap_to_origin_and_issue(tc, bio);
1373
1374 else if (bio->bi_iter.bi_sector < tc->origin_size) {
1375 zero_fill_bio(bio);
1376 bio->bi_iter.bi_size = (tc->origin_size - bio->bi_iter.bi_sector) << SECTOR_SHIFT;
1377 remap_to_origin_and_issue(tc, bio);
1378
1379 } else {
1380 zero_fill_bio(bio);
1381 bio_endio(bio, 0);
1382 }
2dd9c257
JT
1383 } else
1384 provision_block(tc, bio, block, cell);
991d9fa0
JT
1385 break;
1386
1387 default:
c397741c
MS
1388 DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
1389 __func__, r);
f286ba0e 1390 cell_defer_no_holder(tc, cell);
991d9fa0
JT
1391 bio_io_error(bio);
1392 break;
1393 }
1394}
1395
e49e5829
JT
1396static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
1397{
1398 int r;
1399 int rw = bio_data_dir(bio);
1400 dm_block_t block = get_bio_block(tc, bio);
1401 struct dm_thin_lookup_result lookup_result;
1402
1403 r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1404 switch (r) {
1405 case 0:
4f024f37 1406 if (lookup_result.shared && (rw == WRITE) && bio->bi_iter.bi_size)
8c0f0e8c 1407 handle_unserviceable_bio(tc->pool, bio);
e8088073
JT
1408 else {
1409 inc_all_io_entry(tc->pool, bio);
e49e5829 1410 remap_and_issue(tc, bio, lookup_result.block);
e8088073 1411 }
e49e5829
JT
1412 break;
1413
1414 case -ENODATA:
1415 if (rw != READ) {
8c0f0e8c 1416 handle_unserviceable_bio(tc->pool, bio);
e49e5829
JT
1417 break;
1418 }
1419
1420 if (tc->origin_dev) {
e8088073 1421 inc_all_io_entry(tc->pool, bio);
e49e5829
JT
1422 remap_to_origin_and_issue(tc, bio);
1423 break;
1424 }
1425
1426 zero_fill_bio(bio);
1427 bio_endio(bio, 0);
1428 break;
1429
1430 default:
c397741c
MS
1431 DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
1432 __func__, r);
e49e5829
JT
1433 bio_io_error(bio);
1434 break;
1435 }
1436}
1437
3e1a0699
JT
1438static void process_bio_success(struct thin_c *tc, struct bio *bio)
1439{
1440 bio_endio(bio, 0);
1441}
1442
e49e5829
JT
1443static void process_bio_fail(struct thin_c *tc, struct bio *bio)
1444{
1445 bio_io_error(bio);
1446}
1447
ac8c3f3d
JT
1448/*
1449 * FIXME: should we also commit due to size of transaction, measured in
1450 * metadata blocks?
1451 */
905e51b3
JT
1452static int need_commit_due_to_time(struct pool *pool)
1453{
1454 return jiffies < pool->last_commit_jiffies ||
1455 jiffies > pool->last_commit_jiffies + COMMIT_PERIOD;
1456}
1457
67324ea1
MS
1458#define thin_pbd(node) rb_entry((node), struct dm_thin_endio_hook, rb_node)
1459#define thin_bio(pbd) dm_bio_from_per_bio_data((pbd), sizeof(struct dm_thin_endio_hook))
1460
1461static void __thin_bio_rb_add(struct thin_c *tc, struct bio *bio)
1462{
1463 struct rb_node **rbp, *parent;
1464 struct dm_thin_endio_hook *pbd;
1465 sector_t bi_sector = bio->bi_iter.bi_sector;
1466
1467 rbp = &tc->sort_bio_list.rb_node;
1468 parent = NULL;
1469 while (*rbp) {
1470 parent = *rbp;
1471 pbd = thin_pbd(parent);
1472
1473 if (bi_sector < thin_bio(pbd)->bi_iter.bi_sector)
1474 rbp = &(*rbp)->rb_left;
1475 else
1476 rbp = &(*rbp)->rb_right;
1477 }
1478
1479 pbd = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1480 rb_link_node(&pbd->rb_node, parent, rbp);
1481 rb_insert_color(&pbd->rb_node, &tc->sort_bio_list);
1482}
1483
1484static void __extract_sorted_bios(struct thin_c *tc)
1485{
1486 struct rb_node *node;
1487 struct dm_thin_endio_hook *pbd;
1488 struct bio *bio;
1489
1490 for (node = rb_first(&tc->sort_bio_list); node; node = rb_next(node)) {
1491 pbd = thin_pbd(node);
1492 bio = thin_bio(pbd);
1493
1494 bio_list_add(&tc->deferred_bio_list, bio);
1495 rb_erase(&pbd->rb_node, &tc->sort_bio_list);
1496 }
1497
1498 WARN_ON(!RB_EMPTY_ROOT(&tc->sort_bio_list));
1499}
1500
1501static void __sort_thin_deferred_bios(struct thin_c *tc)
1502{
1503 struct bio *bio;
1504 struct bio_list bios;
1505
1506 bio_list_init(&bios);
1507 bio_list_merge(&bios, &tc->deferred_bio_list);
1508 bio_list_init(&tc->deferred_bio_list);
1509
1510 /* Sort deferred_bio_list using rb-tree */
1511 while ((bio = bio_list_pop(&bios)))
1512 __thin_bio_rb_add(tc, bio);
1513
1514 /*
1515 * Transfer the sorted bios in sort_bio_list back to
1516 * deferred_bio_list to allow lockless submission of
1517 * all bios.
1518 */
1519 __extract_sorted_bios(tc);
1520}
1521
c140e1c4 1522static void process_thin_deferred_bios(struct thin_c *tc)
991d9fa0 1523{
c140e1c4 1524 struct pool *pool = tc->pool;
991d9fa0
JT
1525 unsigned long flags;
1526 struct bio *bio;
1527 struct bio_list bios;
67324ea1 1528 struct blk_plug plug;
8a01a6af 1529 unsigned count = 0;
991d9fa0 1530
c140e1c4
MS
1531 if (tc->requeue_mode) {
1532 requeue_bio_list(tc, &tc->deferred_bio_list);
1533 return;
1534 }
1535
991d9fa0
JT
1536 bio_list_init(&bios);
1537
c140e1c4 1538 spin_lock_irqsave(&tc->lock, flags);
67324ea1
MS
1539
1540 if (bio_list_empty(&tc->deferred_bio_list)) {
1541 spin_unlock_irqrestore(&tc->lock, flags);
1542 return;
1543 }
1544
1545 __sort_thin_deferred_bios(tc);
1546
c140e1c4
MS
1547 bio_list_merge(&bios, &tc->deferred_bio_list);
1548 bio_list_init(&tc->deferred_bio_list);
67324ea1 1549
c140e1c4 1550 spin_unlock_irqrestore(&tc->lock, flags);
991d9fa0 1551
67324ea1 1552 blk_start_plug(&plug);
991d9fa0 1553 while ((bio = bio_list_pop(&bios))) {
991d9fa0
JT
1554 /*
1555 * If we've got no free new_mapping structs, and processing
1556 * this bio might require one, we pause until there are some
1557 * prepared mappings to process.
1558 */
1559 if (ensure_next_mapping(pool)) {
c140e1c4
MS
1560 spin_lock_irqsave(&tc->lock, flags);
1561 bio_list_add(&tc->deferred_bio_list, bio);
1562 bio_list_merge(&tc->deferred_bio_list, &bios);
1563 spin_unlock_irqrestore(&tc->lock, flags);
991d9fa0
JT
1564 break;
1565 }
104655fd
JT
1566
1567 if (bio->bi_rw & REQ_DISCARD)
e49e5829 1568 pool->process_discard(tc, bio);
104655fd 1569 else
e49e5829 1570 pool->process_bio(tc, bio);
8a01a6af
JT
1571
1572 if ((count++ & 127) == 0) {
1573 dm_pool_issue_prefetches(pool->pmd);
1574 }
991d9fa0 1575 }
67324ea1 1576 blk_finish_plug(&plug);
c140e1c4
MS
1577}
1578
b10ebd34
JT
1579static void thin_get(struct thin_c *tc);
1580static void thin_put(struct thin_c *tc);
1581
1582/*
1583 * We can't hold rcu_read_lock() around code that can block. So we
1584 * find a thin with the rcu lock held; bump a refcount; then drop
1585 * the lock.
1586 */
1587static struct thin_c *get_first_thin(struct pool *pool)
1588{
1589 struct thin_c *tc = NULL;
1590
1591 rcu_read_lock();
1592 if (!list_empty(&pool->active_thins)) {
1593 tc = list_entry_rcu(pool->active_thins.next, struct thin_c, list);
1594 thin_get(tc);
1595 }
1596 rcu_read_unlock();
1597
1598 return tc;
1599}
1600
1601static struct thin_c *get_next_thin(struct pool *pool, struct thin_c *tc)
1602{
1603 struct thin_c *old_tc = tc;
1604
1605 rcu_read_lock();
1606 list_for_each_entry_continue_rcu(tc, &pool->active_thins, list) {
1607 thin_get(tc);
1608 thin_put(old_tc);
1609 rcu_read_unlock();
1610 return tc;
1611 }
1612 thin_put(old_tc);
1613 rcu_read_unlock();
1614
1615 return NULL;
1616}
1617
c140e1c4
MS
1618static void process_deferred_bios(struct pool *pool)
1619{
1620 unsigned long flags;
1621 struct bio *bio;
1622 struct bio_list bios;
1623 struct thin_c *tc;
1624
b10ebd34
JT
1625 tc = get_first_thin(pool);
1626 while (tc) {
c140e1c4 1627 process_thin_deferred_bios(tc);
b10ebd34
JT
1628 tc = get_next_thin(pool, tc);
1629 }
991d9fa0
JT
1630
1631 /*
1632 * If there are any deferred flush bios, we must commit
1633 * the metadata before issuing them.
1634 */
1635 bio_list_init(&bios);
1636 spin_lock_irqsave(&pool->lock, flags);
1637 bio_list_merge(&bios, &pool->deferred_flush_bios);
1638 bio_list_init(&pool->deferred_flush_bios);
1639 spin_unlock_irqrestore(&pool->lock, flags);
1640
4d1662a3
MS
1641 if (bio_list_empty(&bios) &&
1642 !(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool)))
991d9fa0
JT
1643 return;
1644
020cc3b5 1645 if (commit(pool)) {
991d9fa0
JT
1646 while ((bio = bio_list_pop(&bios)))
1647 bio_io_error(bio);
1648 return;
1649 }
905e51b3 1650 pool->last_commit_jiffies = jiffies;
991d9fa0
JT
1651
1652 while ((bio = bio_list_pop(&bios)))
1653 generic_make_request(bio);
1654}
1655
1656static void do_worker(struct work_struct *ws)
1657{
1658 struct pool *pool = container_of(ws, struct pool, worker);
1659
8a01a6af 1660 dm_pool_issue_prefetches(pool->pmd);
e49e5829
JT
1661 process_prepared(pool, &pool->prepared_mappings, &pool->process_prepared_mapping);
1662 process_prepared(pool, &pool->prepared_discards, &pool->process_prepared_discard);
991d9fa0
JT
1663 process_deferred_bios(pool);
1664}
1665
905e51b3
JT
1666/*
1667 * We want to commit periodically so that not too much
1668 * unwritten data builds up.
1669 */
1670static void do_waker(struct work_struct *ws)
1671{
1672 struct pool *pool = container_of(to_delayed_work(ws), struct pool, waker);
1673 wake_worker(pool);
1674 queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD);
1675}
1676
85ad643b
JT
1677/*
1678 * We're holding onto IO to allow userland time to react. After the
1679 * timeout either the pool will have been resized (and thus back in
1680 * PM_WRITE mode), or we degrade to PM_READ_ONLY and start erroring IO.
1681 */
1682static void do_no_space_timeout(struct work_struct *ws)
1683{
1684 struct pool *pool = container_of(to_delayed_work(ws), struct pool,
1685 no_space_timeout);
1686
1687 if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space)
1688 set_pool_mode(pool, PM_READ_ONLY);
1689}
1690
991d9fa0
JT
1691/*----------------------------------------------------------------*/
1692
e7a3e871 1693struct pool_work {
738211f7 1694 struct work_struct worker;
e7a3e871
JT
1695 struct completion complete;
1696};
1697
1698static struct pool_work *to_pool_work(struct work_struct *ws)
1699{
1700 return container_of(ws, struct pool_work, worker);
1701}
1702
1703static void pool_work_complete(struct pool_work *pw)
1704{
1705 complete(&pw->complete);
1706}
738211f7 1707
e7a3e871
JT
1708static void pool_work_wait(struct pool_work *pw, struct pool *pool,
1709 void (*fn)(struct work_struct *))
1710{
1711 INIT_WORK_ONSTACK(&pw->worker, fn);
1712 init_completion(&pw->complete);
1713 queue_work(pool->wq, &pw->worker);
1714 wait_for_completion(&pw->complete);
1715}
1716
1717/*----------------------------------------------------------------*/
1718
1719struct noflush_work {
1720 struct pool_work pw;
1721 struct thin_c *tc;
738211f7
JT
1722};
1723
e7a3e871 1724static struct noflush_work *to_noflush(struct work_struct *ws)
738211f7 1725{
e7a3e871 1726 return container_of(to_pool_work(ws), struct noflush_work, pw);
738211f7
JT
1727}
1728
1729static void do_noflush_start(struct work_struct *ws)
1730{
e7a3e871 1731 struct noflush_work *w = to_noflush(ws);
738211f7
JT
1732 w->tc->requeue_mode = true;
1733 requeue_io(w->tc);
e7a3e871 1734 pool_work_complete(&w->pw);
738211f7
JT
1735}
1736
1737static void do_noflush_stop(struct work_struct *ws)
1738{
e7a3e871 1739 struct noflush_work *w = to_noflush(ws);
738211f7 1740 w->tc->requeue_mode = false;
e7a3e871 1741 pool_work_complete(&w->pw);
738211f7
JT
1742}
1743
1744static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *))
1745{
1746 struct noflush_work w;
1747
738211f7 1748 w.tc = tc;
e7a3e871 1749 pool_work_wait(&w.pw, tc->pool, fn);
738211f7
JT
1750}
1751
1752/*----------------------------------------------------------------*/
1753
e49e5829
JT
1754static enum pool_mode get_pool_mode(struct pool *pool)
1755{
1756 return pool->pf.mode;
1757}
1758
3e1a0699
JT
1759static void notify_of_pool_mode_change(struct pool *pool, const char *new_mode)
1760{
1761 dm_table_event(pool->ti->table);
1762 DMINFO("%s: switching pool to %s mode",
1763 dm_device_name(pool->pool_md), new_mode);
1764}
1765
8b64e881 1766static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
e49e5829 1767{
cdc2b415 1768 struct pool_c *pt = pool->ti->private;
07f2b6e0
MS
1769 bool needs_check = dm_pool_metadata_needs_check(pool->pmd);
1770 enum pool_mode old_mode = get_pool_mode(pool);
80c57893 1771 unsigned long no_space_timeout = ACCESS_ONCE(no_space_timeout_secs) * HZ;
07f2b6e0
MS
1772
1773 /*
1774 * Never allow the pool to transition to PM_WRITE mode if user
1775 * intervention is required to verify metadata and data consistency.
1776 */
1777 if (new_mode == PM_WRITE && needs_check) {
1778 DMERR("%s: unable to switch pool to write mode until repaired.",
1779 dm_device_name(pool->pool_md));
1780 if (old_mode != new_mode)
1781 new_mode = old_mode;
1782 else
1783 new_mode = PM_READ_ONLY;
1784 }
1785 /*
1786 * If we were in PM_FAIL mode, rollback of metadata failed. We're
1787 * not going to recover without a thin_repair. So we never let the
1788 * pool move out of the old mode.
1789 */
1790 if (old_mode == PM_FAIL)
1791 new_mode = old_mode;
e49e5829 1792
8b64e881 1793 switch (new_mode) {
e49e5829 1794 case PM_FAIL:
8b64e881 1795 if (old_mode != new_mode)
3e1a0699 1796 notify_of_pool_mode_change(pool, "failure");
5383ef3a 1797 dm_pool_metadata_read_only(pool->pmd);
e49e5829
JT
1798 pool->process_bio = process_bio_fail;
1799 pool->process_discard = process_bio_fail;
1800 pool->process_prepared_mapping = process_prepared_mapping_fail;
1801 pool->process_prepared_discard = process_prepared_discard_fail;
3e1a0699
JT
1802
1803 error_retry_list(pool);
e49e5829
JT
1804 break;
1805
1806 case PM_READ_ONLY:
8b64e881 1807 if (old_mode != new_mode)
3e1a0699
JT
1808 notify_of_pool_mode_change(pool, "read-only");
1809 dm_pool_metadata_read_only(pool->pmd);
1810 pool->process_bio = process_bio_read_only;
1811 pool->process_discard = process_bio_success;
1812 pool->process_prepared_mapping = process_prepared_mapping_fail;
1813 pool->process_prepared_discard = process_prepared_discard_passdown;
1814
1815 error_retry_list(pool);
1816 break;
1817
1818 case PM_OUT_OF_DATA_SPACE:
1819 /*
1820 * Ideally we'd never hit this state; the low water mark
1821 * would trigger userland to extend the pool before we
1822 * completely run out of data space. However, many small
1823 * IOs to unprovisioned space can consume data space at an
1824 * alarming rate. Adjust your low water mark if you're
1825 * frequently seeing this mode.
1826 */
1827 if (old_mode != new_mode)
1828 notify_of_pool_mode_change(pool, "out-of-data-space");
1829 pool->process_bio = process_bio_read_only;
1830 pool->process_discard = process_discard;
1831 pool->process_prepared_mapping = process_prepared_mapping;
1832 pool->process_prepared_discard = process_prepared_discard_passdown;
85ad643b 1833
80c57893
MS
1834 if (!pool->pf.error_if_no_space && no_space_timeout)
1835 queue_delayed_work(pool->wq, &pool->no_space_timeout, no_space_timeout);
e49e5829
JT
1836 break;
1837
1838 case PM_WRITE:
8b64e881 1839 if (old_mode != new_mode)
3e1a0699 1840 notify_of_pool_mode_change(pool, "write");
9b7aaa64 1841 dm_pool_metadata_read_write(pool->pmd);
e49e5829
JT
1842 pool->process_bio = process_bio;
1843 pool->process_discard = process_discard;
1844 pool->process_prepared_mapping = process_prepared_mapping;
1845 pool->process_prepared_discard = process_prepared_discard;
1846 break;
1847 }
8b64e881
MS
1848
1849 pool->pf.mode = new_mode;
cdc2b415
MS
1850 /*
1851 * The pool mode may have changed, sync it so bind_control_target()
1852 * doesn't cause an unexpected mode transition on resume.
1853 */
1854 pt->adjusted_pf.mode = new_mode;
e49e5829
JT
1855}
1856
07f2b6e0 1857static void abort_transaction(struct pool *pool)
b5330655 1858{
07f2b6e0
MS
1859 const char *dev_name = dm_device_name(pool->pool_md);
1860
1861 DMERR_LIMIT("%s: aborting current metadata transaction", dev_name);
1862 if (dm_pool_abort_metadata(pool->pmd)) {
1863 DMERR("%s: failed to abort metadata transaction", dev_name);
1864 set_pool_mode(pool, PM_FAIL);
1865 }
1866
1867 if (dm_pool_metadata_set_needs_check(pool->pmd)) {
1868 DMERR("%s: failed to set 'needs_check' flag in metadata", dev_name);
1869 set_pool_mode(pool, PM_FAIL);
1870 }
1871}
399caddf 1872
07f2b6e0
MS
1873static void metadata_operation_failed(struct pool *pool, const char *op, int r)
1874{
b5330655
JT
1875 DMERR_LIMIT("%s: metadata operation '%s' failed: error = %d",
1876 dm_device_name(pool->pool_md), op, r);
1877
07f2b6e0 1878 abort_transaction(pool);
b5330655
JT
1879 set_pool_mode(pool, PM_READ_ONLY);
1880}
1881
e49e5829
JT
1882/*----------------------------------------------------------------*/
1883
991d9fa0
JT
1884/*
1885 * Mapping functions.
1886 */
1887
1888/*
1889 * Called only while mapping a thin bio to hand it over to the workqueue.
1890 */
1891static void thin_defer_bio(struct thin_c *tc, struct bio *bio)
1892{
1893 unsigned long flags;
1894 struct pool *pool = tc->pool;
1895
c140e1c4
MS
1896 spin_lock_irqsave(&tc->lock, flags);
1897 bio_list_add(&tc->deferred_bio_list, bio);
1898 spin_unlock_irqrestore(&tc->lock, flags);
991d9fa0
JT
1899
1900 wake_worker(pool);
1901}
1902
59c3d2c6 1903static void thin_hook_bio(struct thin_c *tc, struct bio *bio)
eb2aa48d 1904{
59c3d2c6 1905 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
eb2aa48d
JT
1906
1907 h->tc = tc;
1908 h->shared_read_entry = NULL;
e8088073 1909 h->all_io_entry = NULL;
eb2aa48d 1910 h->overwrite_mapping = NULL;
eb2aa48d
JT
1911}
1912
991d9fa0
JT
1913/*
1914 * Non-blocking function called from the thin target's map function.
1915 */
7de3ee57 1916static int thin_bio_map(struct dm_target *ti, struct bio *bio)
991d9fa0
JT
1917{
1918 int r;
1919 struct thin_c *tc = ti->private;
1920 dm_block_t block = get_bio_block(tc, bio);
1921 struct dm_thin_device *td = tc->td;
1922 struct dm_thin_lookup_result result;
025b9685
JT
1923 struct dm_bio_prison_cell cell1, cell2;
1924 struct dm_bio_prison_cell *cell_result;
e8088073 1925 struct dm_cell_key key;
991d9fa0 1926
59c3d2c6 1927 thin_hook_bio(tc, bio);
e49e5829 1928
738211f7
JT
1929 if (tc->requeue_mode) {
1930 bio_endio(bio, DM_ENDIO_REQUEUE);
1931 return DM_MAPIO_SUBMITTED;
1932 }
1933
e49e5829
JT
1934 if (get_pool_mode(tc->pool) == PM_FAIL) {
1935 bio_io_error(bio);
1936 return DM_MAPIO_SUBMITTED;
1937 }
1938
104655fd 1939 if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)) {
991d9fa0
JT
1940 thin_defer_bio(tc, bio);
1941 return DM_MAPIO_SUBMITTED;
1942 }
1943
c822ed96
JT
1944 /*
1945 * We must hold the virtual cell before doing the lookup, otherwise
1946 * there's a race with discard.
1947 */
1948 build_virtual_key(tc->td, block, &key);
1949 if (dm_bio_detain(tc->pool->prison, &key, bio, &cell1, &cell_result))
1950 return DM_MAPIO_SUBMITTED;
1951
991d9fa0
JT
1952 r = dm_thin_find_block(td, block, 0, &result);
1953
1954 /*
1955 * Note that we defer readahead too.
1956 */
1957 switch (r) {
1958 case 0:
1959 if (unlikely(result.shared)) {
1960 /*
1961 * We have a race condition here between the
1962 * result.shared value returned by the lookup and
1963 * snapshot creation, which may cause new
1964 * sharing.
1965 *
1966 * To avoid this always quiesce the origin before
1967 * taking the snap. You want to do this anyway to
1968 * ensure a consistent application view
1969 * (i.e. lockfs).
1970 *
1971 * More distant ancestors are irrelevant. The
1972 * shared flag will be set in their case.
1973 */
1974 thin_defer_bio(tc, bio);
c822ed96 1975 cell_defer_no_holder_no_free(tc, &cell1);
e8088073 1976 return DM_MAPIO_SUBMITTED;
991d9fa0 1977 }
e8088073 1978
e8088073 1979 build_data_key(tc->td, result.block, &key);
025b9685
JT
1980 if (dm_bio_detain(tc->pool->prison, &key, bio, &cell2, &cell_result)) {
1981 cell_defer_no_holder_no_free(tc, &cell1);
e8088073
JT
1982 return DM_MAPIO_SUBMITTED;
1983 }
1984
1985 inc_all_io_entry(tc->pool, bio);
025b9685
JT
1986 cell_defer_no_holder_no_free(tc, &cell2);
1987 cell_defer_no_holder_no_free(tc, &cell1);
e8088073
JT
1988
1989 remap(tc, bio, result.block);
1990 return DM_MAPIO_REMAPPED;
991d9fa0
JT
1991
1992 case -ENODATA:
e49e5829
JT
1993 if (get_pool_mode(tc->pool) == PM_READ_ONLY) {
1994 /*
1995 * This block isn't provisioned, and we have no way
8c0f0e8c 1996 * of doing so.
e49e5829 1997 */
8c0f0e8c 1998 handle_unserviceable_bio(tc->pool, bio);
c822ed96 1999 cell_defer_no_holder_no_free(tc, &cell1);
2aab3850 2000 return DM_MAPIO_SUBMITTED;
e49e5829
JT
2001 }
2002 /* fall through */
2003
2004 case -EWOULDBLOCK:
991d9fa0 2005 thin_defer_bio(tc, bio);
c822ed96 2006 cell_defer_no_holder_no_free(tc, &cell1);
2aab3850 2007 return DM_MAPIO_SUBMITTED;
e49e5829
JT
2008
2009 default:
2010 /*
2011 * Must always call bio_io_error on failure.
2012 * dm_thin_find_block can fail with -EINVAL if the
2013 * pool is switched to fail-io mode.
2014 */
2015 bio_io_error(bio);
c822ed96 2016 cell_defer_no_holder_no_free(tc, &cell1);
2aab3850 2017 return DM_MAPIO_SUBMITTED;
991d9fa0 2018 }
991d9fa0
JT
2019}
2020
2021static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
2022{
991d9fa0 2023 struct pool_c *pt = container_of(cb, struct pool_c, callbacks);
760fe67e 2024 struct request_queue *q;
991d9fa0 2025
760fe67e
MS
2026 if (get_pool_mode(pt->pool) == PM_OUT_OF_DATA_SPACE)
2027 return 1;
991d9fa0 2028
760fe67e
MS
2029 q = bdev_get_queue(pt->data_dev->bdev);
2030 return bdi_congested(&q->backing_dev_info, bdi_bits);
991d9fa0
JT
2031}
2032
c140e1c4 2033static void requeue_bios(struct pool *pool)
991d9fa0 2034{
c140e1c4
MS
2035 unsigned long flags;
2036 struct thin_c *tc;
2037
2038 rcu_read_lock();
2039 list_for_each_entry_rcu(tc, &pool->active_thins, list) {
2040 spin_lock_irqsave(&tc->lock, flags);
2041 bio_list_merge(&tc->deferred_bio_list, &tc->retry_on_resume_list);
2042 bio_list_init(&tc->retry_on_resume_list);
2043 spin_unlock_irqrestore(&tc->lock, flags);
2044 }
2045 rcu_read_unlock();
991d9fa0
JT
2046}
2047
2048/*----------------------------------------------------------------
2049 * Binding of control targets to a pool object
2050 *--------------------------------------------------------------*/
9bc142dd
MS
2051static bool data_dev_supports_discard(struct pool_c *pt)
2052{
2053 struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
2054
2055 return q && blk_queue_discard(q);
2056}
2057
58051b94
JT
2058static bool is_factor(sector_t block_size, uint32_t n)
2059{
2060 return !sector_div(block_size, n);
2061}
2062
9bc142dd
MS
2063/*
2064 * If discard_passdown was enabled verify that the data device
0424caa1 2065 * supports discards. Disable discard_passdown if not.
9bc142dd 2066 */
0424caa1 2067static void disable_passdown_if_not_supported(struct pool_c *pt)
9bc142dd 2068{
0424caa1
MS
2069 struct pool *pool = pt->pool;
2070 struct block_device *data_bdev = pt->data_dev->bdev;
2071 struct queue_limits *data_limits = &bdev_get_queue(data_bdev)->limits;
2072 sector_t block_size = pool->sectors_per_block << SECTOR_SHIFT;
2073 const char *reason = NULL;
9bc142dd
MS
2074 char buf[BDEVNAME_SIZE];
2075
0424caa1 2076 if (!pt->adjusted_pf.discard_passdown)
9bc142dd
MS
2077 return;
2078
0424caa1
MS
2079 if (!data_dev_supports_discard(pt))
2080 reason = "discard unsupported";
2081
2082 else if (data_limits->max_discard_sectors < pool->sectors_per_block)
2083 reason = "max discard sectors smaller than a block";
9bc142dd 2084
0424caa1
MS
2085 else if (data_limits->discard_granularity > block_size)
2086 reason = "discard granularity larger than a block";
2087
58051b94 2088 else if (!is_factor(block_size, data_limits->discard_granularity))
0424caa1
MS
2089 reason = "discard granularity not a factor of block size";
2090
2091 if (reason) {
2092 DMWARN("Data device (%s) %s: Disabling discard passdown.", bdevname(data_bdev, buf), reason);
2093 pt->adjusted_pf.discard_passdown = false;
2094 }
9bc142dd
MS
2095}
2096
991d9fa0
JT
2097static int bind_control_target(struct pool *pool, struct dm_target *ti)
2098{
2099 struct pool_c *pt = ti->private;
2100
e49e5829 2101 /*
9b7aaa64 2102 * We want to make sure that a pool in PM_FAIL mode is never upgraded.
e49e5829 2103 */
07f2b6e0 2104 enum pool_mode old_mode = get_pool_mode(pool);
0424caa1 2105 enum pool_mode new_mode = pt->adjusted_pf.mode;
e49e5829 2106
8b64e881
MS
2107 /*
2108 * Don't change the pool's mode until set_pool_mode() below.
2109 * Otherwise the pool's process_* function pointers may
2110 * not match the desired pool mode.
2111 */
2112 pt->adjusted_pf.mode = old_mode;
2113
2114 pool->ti = ti;
2115 pool->pf = pt->adjusted_pf;
2116 pool->low_water_blocks = pt->low_water_blocks;
2117
9bc142dd 2118 set_pool_mode(pool, new_mode);
f402693d 2119
991d9fa0
JT
2120 return 0;
2121}
2122
2123static void unbind_control_target(struct pool *pool, struct dm_target *ti)
2124{
2125 if (pool->ti == ti)
2126 pool->ti = NULL;
2127}
2128
2129/*----------------------------------------------------------------
2130 * Pool creation
2131 *--------------------------------------------------------------*/
67e2e2b2
JT
2132/* Initialize pool features. */
2133static void pool_features_init(struct pool_features *pf)
2134{
e49e5829 2135 pf->mode = PM_WRITE;
9bc142dd
MS
2136 pf->zero_new_blocks = true;
2137 pf->discard_enabled = true;
2138 pf->discard_passdown = true;
787a996c 2139 pf->error_if_no_space = false;
67e2e2b2
JT
2140}
2141
991d9fa0
JT
2142static void __pool_destroy(struct pool *pool)
2143{
2144 __pool_table_remove(pool);
2145
2146 if (dm_pool_metadata_close(pool->pmd) < 0)
2147 DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
2148
44feb387 2149 dm_bio_prison_destroy(pool->prison);
991d9fa0
JT
2150 dm_kcopyd_client_destroy(pool->copier);
2151
2152 if (pool->wq)
2153 destroy_workqueue(pool->wq);
2154
2155 if (pool->next_mapping)
2156 mempool_free(pool->next_mapping, pool->mapping_pool);
2157 mempool_destroy(pool->mapping_pool);
44feb387
MS
2158 dm_deferred_set_destroy(pool->shared_read_ds);
2159 dm_deferred_set_destroy(pool->all_io_ds);
991d9fa0
JT
2160 kfree(pool);
2161}
2162
a24c2569 2163static struct kmem_cache *_new_mapping_cache;
a24c2569 2164
991d9fa0
JT
2165static struct pool *pool_create(struct mapped_device *pool_md,
2166 struct block_device *metadata_dev,
e49e5829
JT
2167 unsigned long block_size,
2168 int read_only, char **error)
991d9fa0
JT
2169{
2170 int r;
2171 void *err_p;
2172 struct pool *pool;
2173 struct dm_pool_metadata *pmd;
e49e5829 2174 bool format_device = read_only ? false : true;
991d9fa0 2175
e49e5829 2176 pmd = dm_pool_metadata_open(metadata_dev, block_size, format_device);
991d9fa0
JT
2177 if (IS_ERR(pmd)) {
2178 *error = "Error creating metadata object";
2179 return (struct pool *)pmd;
2180 }
2181
2182 pool = kmalloc(sizeof(*pool), GFP_KERNEL);
2183 if (!pool) {
2184 *error = "Error allocating memory for pool";
2185 err_p = ERR_PTR(-ENOMEM);
2186 goto bad_pool;
2187 }
2188
2189 pool->pmd = pmd;
2190 pool->sectors_per_block = block_size;
f9a8e0cd
MP
2191 if (block_size & (block_size - 1))
2192 pool->sectors_per_block_shift = -1;
2193 else
2194 pool->sectors_per_block_shift = __ffs(block_size);
991d9fa0 2195 pool->low_water_blocks = 0;
67e2e2b2 2196 pool_features_init(&pool->pf);
a195db2d 2197 pool->prison = dm_bio_prison_create();
991d9fa0
JT
2198 if (!pool->prison) {
2199 *error = "Error creating pool's bio prison";
2200 err_p = ERR_PTR(-ENOMEM);
2201 goto bad_prison;
2202 }
2203
df5d2e90 2204 pool->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle);
991d9fa0
JT
2205 if (IS_ERR(pool->copier)) {
2206 r = PTR_ERR(pool->copier);
2207 *error = "Error creating pool's kcopyd client";
2208 err_p = ERR_PTR(r);
2209 goto bad_kcopyd_client;
2210 }
2211
2212 /*
2213 * Create singlethreaded workqueue that will service all devices
2214 * that use this metadata.
2215 */
2216 pool->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
2217 if (!pool->wq) {
2218 *error = "Error creating pool's workqueue";
2219 err_p = ERR_PTR(-ENOMEM);
2220 goto bad_wq;
2221 }
2222
2223 INIT_WORK(&pool->worker, do_worker);
905e51b3 2224 INIT_DELAYED_WORK(&pool->waker, do_waker);
85ad643b 2225 INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout);
991d9fa0 2226 spin_lock_init(&pool->lock);
991d9fa0
JT
2227 bio_list_init(&pool->deferred_flush_bios);
2228 INIT_LIST_HEAD(&pool->prepared_mappings);
104655fd 2229 INIT_LIST_HEAD(&pool->prepared_discards);
c140e1c4 2230 INIT_LIST_HEAD(&pool->active_thins);
88a6621b 2231 pool->low_water_triggered = false;
44feb387
MS
2232
2233 pool->shared_read_ds = dm_deferred_set_create();
2234 if (!pool->shared_read_ds) {
2235 *error = "Error creating pool's shared read deferred set";
2236 err_p = ERR_PTR(-ENOMEM);
2237 goto bad_shared_read_ds;
2238 }
2239
2240 pool->all_io_ds = dm_deferred_set_create();
2241 if (!pool->all_io_ds) {
2242 *error = "Error creating pool's all io deferred set";
2243 err_p = ERR_PTR(-ENOMEM);
2244 goto bad_all_io_ds;
2245 }
991d9fa0
JT
2246
2247 pool->next_mapping = NULL;
a24c2569
MS
2248 pool->mapping_pool = mempool_create_slab_pool(MAPPING_POOL_SIZE,
2249 _new_mapping_cache);
991d9fa0
JT
2250 if (!pool->mapping_pool) {
2251 *error = "Error creating pool's mapping mempool";
2252 err_p = ERR_PTR(-ENOMEM);
2253 goto bad_mapping_pool;
2254 }
2255
991d9fa0 2256 pool->ref_count = 1;
905e51b3 2257 pool->last_commit_jiffies = jiffies;
991d9fa0
JT
2258 pool->pool_md = pool_md;
2259 pool->md_dev = metadata_dev;
2260 __pool_table_insert(pool);
2261
2262 return pool;
2263
991d9fa0 2264bad_mapping_pool:
44feb387
MS
2265 dm_deferred_set_destroy(pool->all_io_ds);
2266bad_all_io_ds:
2267 dm_deferred_set_destroy(pool->shared_read_ds);
2268bad_shared_read_ds:
991d9fa0
JT
2269 destroy_workqueue(pool->wq);
2270bad_wq:
2271 dm_kcopyd_client_destroy(pool->copier);
2272bad_kcopyd_client:
44feb387 2273 dm_bio_prison_destroy(pool->prison);
991d9fa0
JT
2274bad_prison:
2275 kfree(pool);
2276bad_pool:
2277 if (dm_pool_metadata_close(pmd))
2278 DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
2279
2280 return err_p;
2281}
2282
2283static void __pool_inc(struct pool *pool)
2284{
2285 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
2286 pool->ref_count++;
2287}
2288
2289static void __pool_dec(struct pool *pool)
2290{
2291 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
2292 BUG_ON(!pool->ref_count);
2293 if (!--pool->ref_count)
2294 __pool_destroy(pool);
2295}
2296
2297static struct pool *__pool_find(struct mapped_device *pool_md,
2298 struct block_device *metadata_dev,
e49e5829
JT
2299 unsigned long block_size, int read_only,
2300 char **error, int *created)
991d9fa0
JT
2301{
2302 struct pool *pool = __pool_table_lookup_metadata_dev(metadata_dev);
2303
2304 if (pool) {
f09996c9
MS
2305 if (pool->pool_md != pool_md) {
2306 *error = "metadata device already in use by a pool";
991d9fa0 2307 return ERR_PTR(-EBUSY);
f09996c9 2308 }
991d9fa0
JT
2309 __pool_inc(pool);
2310
2311 } else {
2312 pool = __pool_table_lookup(pool_md);
2313 if (pool) {
f09996c9
MS
2314 if (pool->md_dev != metadata_dev) {
2315 *error = "different pool cannot replace a pool";
991d9fa0 2316 return ERR_PTR(-EINVAL);
f09996c9 2317 }
991d9fa0
JT
2318 __pool_inc(pool);
2319
67e2e2b2 2320 } else {
e49e5829 2321 pool = pool_create(pool_md, metadata_dev, block_size, read_only, error);
67e2e2b2
JT
2322 *created = 1;
2323 }
991d9fa0
JT
2324 }
2325
2326 return pool;
2327}
2328
2329/*----------------------------------------------------------------
2330 * Pool target methods
2331 *--------------------------------------------------------------*/
2332static void pool_dtr(struct dm_target *ti)
2333{
2334 struct pool_c *pt = ti->private;
2335
2336 mutex_lock(&dm_thin_pool_table.mutex);
2337
2338 unbind_control_target(pt->pool, ti);
2339 __pool_dec(pt->pool);
2340 dm_put_device(ti, pt->metadata_dev);
2341 dm_put_device(ti, pt->data_dev);
2342 kfree(pt);
2343
2344 mutex_unlock(&dm_thin_pool_table.mutex);
2345}
2346
991d9fa0
JT
2347static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf,
2348 struct dm_target *ti)
2349{
2350 int r;
2351 unsigned argc;
2352 const char *arg_name;
2353
2354 static struct dm_arg _args[] = {
74aa45c3 2355 {0, 4, "Invalid number of pool feature arguments"},
991d9fa0
JT
2356 };
2357
2358 /*
2359 * No feature arguments supplied.
2360 */
2361 if (!as->argc)
2362 return 0;
2363
2364 r = dm_read_arg_group(_args, as, &argc, &ti->error);
2365 if (r)
2366 return -EINVAL;
2367
2368 while (argc && !r) {
2369 arg_name = dm_shift_arg(as);
2370 argc--;
2371
e49e5829 2372 if (!strcasecmp(arg_name, "skip_block_zeroing"))
9bc142dd 2373 pf->zero_new_blocks = false;
e49e5829
JT
2374
2375 else if (!strcasecmp(arg_name, "ignore_discard"))
9bc142dd 2376 pf->discard_enabled = false;
e49e5829
JT
2377
2378 else if (!strcasecmp(arg_name, "no_discard_passdown"))
9bc142dd 2379 pf->discard_passdown = false;
991d9fa0 2380
e49e5829
JT
2381 else if (!strcasecmp(arg_name, "read_only"))
2382 pf->mode = PM_READ_ONLY;
2383
787a996c
MS
2384 else if (!strcasecmp(arg_name, "error_if_no_space"))
2385 pf->error_if_no_space = true;
2386
e49e5829
JT
2387 else {
2388 ti->error = "Unrecognised pool feature requested";
2389 r = -EINVAL;
2390 break;
2391 }
991d9fa0
JT
2392 }
2393
2394 return r;
2395}
2396
ac8c3f3d
JT
2397static void metadata_low_callback(void *context)
2398{
2399 struct pool *pool = context;
2400
2401 DMWARN("%s: reached low water mark for metadata device: sending event.",
2402 dm_device_name(pool->pool_md));
2403
2404 dm_table_event(pool->ti->table);
2405}
2406
7d48935e
MS
2407static sector_t get_dev_size(struct block_device *bdev)
2408{
2409 return i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
2410}
2411
2412static void warn_if_metadata_device_too_big(struct block_device *bdev)
b17446df 2413{
7d48935e 2414 sector_t metadata_dev_size = get_dev_size(bdev);
b17446df
JT
2415 char buffer[BDEVNAME_SIZE];
2416
7d48935e 2417 if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING)
b17446df
JT
2418 DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
2419 bdevname(bdev, buffer), THIN_METADATA_MAX_SECTORS);
7d48935e
MS
2420}
2421
2422static sector_t get_metadata_dev_size(struct block_device *bdev)
2423{
2424 sector_t metadata_dev_size = get_dev_size(bdev);
2425
2426 if (metadata_dev_size > THIN_METADATA_MAX_SECTORS)
2427 metadata_dev_size = THIN_METADATA_MAX_SECTORS;
b17446df
JT
2428
2429 return metadata_dev_size;
2430}
2431
24347e95
JT
2432static dm_block_t get_metadata_dev_size_in_blocks(struct block_device *bdev)
2433{
2434 sector_t metadata_dev_size = get_metadata_dev_size(bdev);
2435
7d48935e 2436 sector_div(metadata_dev_size, THIN_METADATA_BLOCK_SIZE);
24347e95
JT
2437
2438 return metadata_dev_size;
2439}
2440
ac8c3f3d
JT
2441/*
2442 * When a metadata threshold is crossed a dm event is triggered, and
2443 * userland should respond by growing the metadata device. We could let
2444 * userland set the threshold, like we do with the data threshold, but I'm
2445 * not sure they know enough to do this well.
2446 */
2447static dm_block_t calc_metadata_threshold(struct pool_c *pt)
2448{
2449 /*
2450 * 4M is ample for all ops with the possible exception of thin
2451 * device deletion which is harmless if it fails (just retry the
2452 * delete after you've grown the device).
2453 */
2454 dm_block_t quarter = get_metadata_dev_size_in_blocks(pt->metadata_dev->bdev) / 4;
2455 return min((dm_block_t)1024ULL /* 4M */, quarter);
2456}
2457
991d9fa0
JT
2458/*
2459 * thin-pool <metadata dev> <data dev>
2460 * <data block size (sectors)>
2461 * <low water mark (blocks)>
2462 * [<#feature args> [<arg>]*]
2463 *
2464 * Optional feature arguments are:
2465 * skip_block_zeroing: skips the zeroing of newly-provisioned blocks.
67e2e2b2
JT
2466 * ignore_discard: disable discard
2467 * no_discard_passdown: don't pass discards down to the data device
787a996c
MS
2468 * read_only: Don't allow any changes to be made to the pool metadata.
2469 * error_if_no_space: error IOs, instead of queueing, if no space.
991d9fa0
JT
2470 */
2471static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
2472{
67e2e2b2 2473 int r, pool_created = 0;
991d9fa0
JT
2474 struct pool_c *pt;
2475 struct pool *pool;
2476 struct pool_features pf;
2477 struct dm_arg_set as;
2478 struct dm_dev *data_dev;
2479 unsigned long block_size;
2480 dm_block_t low_water_blocks;
2481 struct dm_dev *metadata_dev;
5d0db96d 2482 fmode_t metadata_mode;
991d9fa0
JT
2483
2484 /*
2485 * FIXME Remove validation from scope of lock.
2486 */
2487 mutex_lock(&dm_thin_pool_table.mutex);
2488
2489 if (argc < 4) {
2490 ti->error = "Invalid argument count";
2491 r = -EINVAL;
2492 goto out_unlock;
2493 }
5d0db96d 2494
991d9fa0
JT
2495 as.argc = argc;
2496 as.argv = argv;
2497
5d0db96d
JT
2498 /*
2499 * Set default pool features.
2500 */
2501 pool_features_init(&pf);
2502
2503 dm_consume_args(&as, 4);
2504 r = parse_pool_features(&as, &pf, ti);
2505 if (r)
2506 goto out_unlock;
2507
2508 metadata_mode = FMODE_READ | ((pf.mode == PM_READ_ONLY) ? 0 : FMODE_WRITE);
2509 r = dm_get_device(ti, argv[0], metadata_mode, &metadata_dev);
991d9fa0
JT
2510 if (r) {
2511 ti->error = "Error opening metadata block device";
2512 goto out_unlock;
2513 }
7d48935e 2514 warn_if_metadata_device_too_big(metadata_dev->bdev);
991d9fa0
JT
2515
2516 r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &data_dev);
2517 if (r) {
2518 ti->error = "Error getting data device";
2519 goto out_metadata;
2520 }
2521
2522 if (kstrtoul(argv[2], 10, &block_size) || !block_size ||
2523 block_size < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
2524 block_size > DATA_DEV_BLOCK_SIZE_MAX_SECTORS ||
55f2b8bd 2525 block_size & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) {
991d9fa0
JT
2526 ti->error = "Invalid block size";
2527 r = -EINVAL;
2528 goto out;
2529 }
2530
2531 if (kstrtoull(argv[3], 10, (unsigned long long *)&low_water_blocks)) {
2532 ti->error = "Invalid low water mark";
2533 r = -EINVAL;
2534 goto out;
2535 }
2536
991d9fa0
JT
2537 pt = kzalloc(sizeof(*pt), GFP_KERNEL);
2538 if (!pt) {
2539 r = -ENOMEM;
2540 goto out;
2541 }
2542
2543 pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev,
e49e5829 2544 block_size, pf.mode == PM_READ_ONLY, &ti->error, &pool_created);
991d9fa0
JT
2545 if (IS_ERR(pool)) {
2546 r = PTR_ERR(pool);
2547 goto out_free_pt;
2548 }
2549
67e2e2b2
JT
2550 /*
2551 * 'pool_created' reflects whether this is the first table load.
2552 * Top level discard support is not allowed to be changed after
2553 * initial load. This would require a pool reload to trigger thin
2554 * device changes.
2555 */
2556 if (!pool_created && pf.discard_enabled != pool->pf.discard_enabled) {
2557 ti->error = "Discard support cannot be disabled once enabled";
2558 r = -EINVAL;
2559 goto out_flags_changed;
2560 }
2561
991d9fa0
JT
2562 pt->pool = pool;
2563 pt->ti = ti;
2564 pt->metadata_dev = metadata_dev;
2565 pt->data_dev = data_dev;
2566 pt->low_water_blocks = low_water_blocks;
0424caa1 2567 pt->adjusted_pf = pt->requested_pf = pf;
55a62eef 2568 ti->num_flush_bios = 1;
9bc142dd 2569
67e2e2b2
JT
2570 /*
2571 * Only need to enable discards if the pool should pass
2572 * them down to the data device. The thin device's discard
2573 * processing will cause mappings to be removed from the btree.
2574 */
b60ab990 2575 ti->discard_zeroes_data_unsupported = true;
67e2e2b2 2576 if (pf.discard_enabled && pf.discard_passdown) {
55a62eef 2577 ti->num_discard_bios = 1;
9bc142dd 2578
67e2e2b2
JT
2579 /*
2580 * Setting 'discards_supported' circumvents the normal
2581 * stacking of discard limits (this keeps the pool and
2582 * thin devices' discard limits consistent).
2583 */
0ac55489 2584 ti->discards_supported = true;
67e2e2b2 2585 }
991d9fa0
JT
2586 ti->private = pt;
2587
ac8c3f3d
JT
2588 r = dm_pool_register_metadata_threshold(pt->pool->pmd,
2589 calc_metadata_threshold(pt),
2590 metadata_low_callback,
2591 pool);
2592 if (r)
2593 goto out_free_pt;
2594
991d9fa0
JT
2595 pt->callbacks.congested_fn = pool_is_congested;
2596 dm_table_add_target_callbacks(ti->table, &pt->callbacks);
2597
2598 mutex_unlock(&dm_thin_pool_table.mutex);
2599
2600 return 0;
2601
67e2e2b2
JT
2602out_flags_changed:
2603 __pool_dec(pool);
991d9fa0
JT
2604out_free_pt:
2605 kfree(pt);
2606out:
2607 dm_put_device(ti, data_dev);
2608out_metadata:
2609 dm_put_device(ti, metadata_dev);
2610out_unlock:
2611 mutex_unlock(&dm_thin_pool_table.mutex);
2612
2613 return r;
2614}
2615
7de3ee57 2616static int pool_map(struct dm_target *ti, struct bio *bio)
991d9fa0
JT
2617{
2618 int r;
2619 struct pool_c *pt = ti->private;
2620 struct pool *pool = pt->pool;
2621 unsigned long flags;
2622
2623 /*
2624 * As this is a singleton target, ti->begin is always zero.
2625 */
2626 spin_lock_irqsave(&pool->lock, flags);
2627 bio->bi_bdev = pt->data_dev->bdev;
2628 r = DM_MAPIO_REMAPPED;
2629 spin_unlock_irqrestore(&pool->lock, flags);
2630
2631 return r;
2632}
2633
b17446df 2634static int maybe_resize_data_dev(struct dm_target *ti, bool *need_commit)
991d9fa0
JT
2635{
2636 int r;
2637 struct pool_c *pt = ti->private;
2638 struct pool *pool = pt->pool;
55f2b8bd
MS
2639 sector_t data_size = ti->len;
2640 dm_block_t sb_data_size;
991d9fa0 2641
b17446df 2642 *need_commit = false;
991d9fa0 2643
55f2b8bd
MS
2644 (void) sector_div(data_size, pool->sectors_per_block);
2645
991d9fa0
JT
2646 r = dm_pool_get_data_dev_size(pool->pmd, &sb_data_size);
2647 if (r) {
4fa5971a
MS
2648 DMERR("%s: failed to retrieve data device size",
2649 dm_device_name(pool->pool_md));
991d9fa0
JT
2650 return r;
2651 }
2652
2653 if (data_size < sb_data_size) {
4fa5971a
MS
2654 DMERR("%s: pool target (%llu blocks) too small: expected %llu",
2655 dm_device_name(pool->pool_md),
55f2b8bd 2656 (unsigned long long)data_size, sb_data_size);
991d9fa0
JT
2657 return -EINVAL;
2658
2659 } else if (data_size > sb_data_size) {
07f2b6e0
MS
2660 if (dm_pool_metadata_needs_check(pool->pmd)) {
2661 DMERR("%s: unable to grow the data device until repaired.",
2662 dm_device_name(pool->pool_md));
2663 return 0;
2664 }
2665
6f7f51d4
MS
2666 if (sb_data_size)
2667 DMINFO("%s: growing the data device from %llu to %llu blocks",
2668 dm_device_name(pool->pool_md),
2669 sb_data_size, (unsigned long long)data_size);
991d9fa0
JT
2670 r = dm_pool_resize_data_dev(pool->pmd, data_size);
2671 if (r) {
b5330655 2672 metadata_operation_failed(pool, "dm_pool_resize_data_dev", r);
991d9fa0
JT
2673 return r;
2674 }
2675
b17446df 2676 *need_commit = true;
991d9fa0
JT
2677 }
2678
2679 return 0;
2680}
2681
24347e95
JT
2682static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit)
2683{
2684 int r;
2685 struct pool_c *pt = ti->private;
2686 struct pool *pool = pt->pool;
2687 dm_block_t metadata_dev_size, sb_metadata_dev_size;
2688
2689 *need_commit = false;
2690
610bba8b 2691 metadata_dev_size = get_metadata_dev_size_in_blocks(pool->md_dev);
24347e95
JT
2692
2693 r = dm_pool_get_metadata_dev_size(pool->pmd, &sb_metadata_dev_size);
2694 if (r) {
4fa5971a
MS
2695 DMERR("%s: failed to retrieve metadata device size",
2696 dm_device_name(pool->pool_md));
24347e95
JT
2697 return r;
2698 }
2699
2700 if (metadata_dev_size < sb_metadata_dev_size) {
4fa5971a
MS
2701 DMERR("%s: metadata device (%llu blocks) too small: expected %llu",
2702 dm_device_name(pool->pool_md),
24347e95
JT
2703 metadata_dev_size, sb_metadata_dev_size);
2704 return -EINVAL;
2705
2706 } else if (metadata_dev_size > sb_metadata_dev_size) {
07f2b6e0
MS
2707 if (dm_pool_metadata_needs_check(pool->pmd)) {
2708 DMERR("%s: unable to grow the metadata device until repaired.",
2709 dm_device_name(pool->pool_md));
2710 return 0;
2711 }
2712
7d48935e 2713 warn_if_metadata_device_too_big(pool->md_dev);
6f7f51d4
MS
2714 DMINFO("%s: growing the metadata device from %llu to %llu blocks",
2715 dm_device_name(pool->pool_md),
2716 sb_metadata_dev_size, metadata_dev_size);
24347e95
JT
2717 r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size);
2718 if (r) {
b5330655 2719 metadata_operation_failed(pool, "dm_pool_resize_metadata_dev", r);
24347e95
JT
2720 return r;
2721 }
2722
2723 *need_commit = true;
2724 }
2725
2726 return 0;
2727}
2728
b17446df
JT
2729/*
2730 * Retrieves the number of blocks of the data device from
2731 * the superblock and compares it to the actual device size,
2732 * thus resizing the data device in case it has grown.
2733 *
2734 * This both copes with opening preallocated data devices in the ctr
2735 * being followed by a resume
2736 * -and-
2737 * calling the resume method individually after userspace has
2738 * grown the data device in reaction to a table event.
2739 */
2740static int pool_preresume(struct dm_target *ti)
2741{
2742 int r;
24347e95 2743 bool need_commit1, need_commit2;
b17446df
JT
2744 struct pool_c *pt = ti->private;
2745 struct pool *pool = pt->pool;
2746
2747 /*
2748 * Take control of the pool object.
2749 */
2750 r = bind_control_target(pool, ti);
2751 if (r)
2752 return r;
2753
2754 r = maybe_resize_data_dev(ti, &need_commit1);
2755 if (r)
2756 return r;
2757
24347e95
JT
2758 r = maybe_resize_metadata_dev(ti, &need_commit2);
2759 if (r)
2760 return r;
2761
2762 if (need_commit1 || need_commit2)
020cc3b5 2763 (void) commit(pool);
b17446df
JT
2764
2765 return 0;
2766}
2767
991d9fa0
JT
2768static void pool_resume(struct dm_target *ti)
2769{
2770 struct pool_c *pt = ti->private;
2771 struct pool *pool = pt->pool;
2772 unsigned long flags;
2773
2774 spin_lock_irqsave(&pool->lock, flags);
88a6621b 2775 pool->low_water_triggered = false;
991d9fa0 2776 spin_unlock_irqrestore(&pool->lock, flags);
c140e1c4 2777 requeue_bios(pool);
991d9fa0 2778
905e51b3 2779 do_waker(&pool->waker.work);
991d9fa0
JT
2780}
2781
2782static void pool_postsuspend(struct dm_target *ti)
2783{
991d9fa0
JT
2784 struct pool_c *pt = ti->private;
2785 struct pool *pool = pt->pool;
2786
905e51b3 2787 cancel_delayed_work(&pool->waker);
85ad643b 2788 cancel_delayed_work(&pool->no_space_timeout);
991d9fa0 2789 flush_workqueue(pool->wq);
020cc3b5 2790 (void) commit(pool);
991d9fa0
JT
2791}
2792
2793static int check_arg_count(unsigned argc, unsigned args_required)
2794{
2795 if (argc != args_required) {
2796 DMWARN("Message received with %u arguments instead of %u.",
2797 argc, args_required);
2798 return -EINVAL;
2799 }
2800
2801 return 0;
2802}
2803
2804static int read_dev_id(char *arg, dm_thin_id *dev_id, int warning)
2805{
2806 if (!kstrtoull(arg, 10, (unsigned long long *)dev_id) &&
2807 *dev_id <= MAX_DEV_ID)
2808 return 0;
2809
2810 if (warning)
2811 DMWARN("Message received with invalid device id: %s", arg);
2812
2813 return -EINVAL;
2814}
2815
2816static int process_create_thin_mesg(unsigned argc, char **argv, struct pool *pool)
2817{
2818 dm_thin_id dev_id;
2819 int r;
2820
2821 r = check_arg_count(argc, 2);
2822 if (r)
2823 return r;
2824
2825 r = read_dev_id(argv[1], &dev_id, 1);
2826 if (r)
2827 return r;
2828
2829 r = dm_pool_create_thin(pool->pmd, dev_id);
2830 if (r) {
2831 DMWARN("Creation of new thinly-provisioned device with id %s failed.",
2832 argv[1]);
2833 return r;
2834 }
2835
2836 return 0;
2837}
2838
2839static int process_create_snap_mesg(unsigned argc, char **argv, struct pool *pool)
2840{
2841 dm_thin_id dev_id;
2842 dm_thin_id origin_dev_id;
2843 int r;
2844
2845 r = check_arg_count(argc, 3);
2846 if (r)
2847 return r;
2848
2849 r = read_dev_id(argv[1], &dev_id, 1);
2850 if (r)
2851 return r;
2852
2853 r = read_dev_id(argv[2], &origin_dev_id, 1);
2854 if (r)
2855 return r;
2856
2857 r = dm_pool_create_snap(pool->pmd, dev_id, origin_dev_id);
2858 if (r) {
2859 DMWARN("Creation of new snapshot %s of device %s failed.",
2860 argv[1], argv[2]);
2861 return r;
2862 }
2863
2864 return 0;
2865}
2866
2867static int process_delete_mesg(unsigned argc, char **argv, struct pool *pool)
2868{
2869 dm_thin_id dev_id;
2870 int r;
2871
2872 r = check_arg_count(argc, 2);
2873 if (r)
2874 return r;
2875
2876 r = read_dev_id(argv[1], &dev_id, 1);
2877 if (r)
2878 return r;
2879
2880 r = dm_pool_delete_thin_device(pool->pmd, dev_id);
2881 if (r)
2882 DMWARN("Deletion of thin device %s failed.", argv[1]);
2883
2884 return r;
2885}
2886
2887static int process_set_transaction_id_mesg(unsigned argc, char **argv, struct pool *pool)
2888{
2889 dm_thin_id old_id, new_id;
2890 int r;
2891
2892 r = check_arg_count(argc, 3);
2893 if (r)
2894 return r;
2895
2896 if (kstrtoull(argv[1], 10, (unsigned long long *)&old_id)) {
2897 DMWARN("set_transaction_id message: Unrecognised id %s.", argv[1]);
2898 return -EINVAL;
2899 }
2900
2901 if (kstrtoull(argv[2], 10, (unsigned long long *)&new_id)) {
2902 DMWARN("set_transaction_id message: Unrecognised new id %s.", argv[2]);
2903 return -EINVAL;
2904 }
2905
2906 r = dm_pool_set_metadata_transaction_id(pool->pmd, old_id, new_id);
2907 if (r) {
2908 DMWARN("Failed to change transaction id from %s to %s.",
2909 argv[1], argv[2]);
2910 return r;
2911 }
2912
2913 return 0;
2914}
2915
cc8394d8
JT
2916static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
2917{
2918 int r;
2919
2920 r = check_arg_count(argc, 1);
2921 if (r)
2922 return r;
2923
020cc3b5 2924 (void) commit(pool);
0d200aef 2925
cc8394d8
JT
2926 r = dm_pool_reserve_metadata_snap(pool->pmd);
2927 if (r)
2928 DMWARN("reserve_metadata_snap message failed.");
2929
2930 return r;
2931}
2932
2933static int process_release_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
2934{
2935 int r;
2936
2937 r = check_arg_count(argc, 1);
2938 if (r)
2939 return r;
2940
2941 r = dm_pool_release_metadata_snap(pool->pmd);
2942 if (r)
2943 DMWARN("release_metadata_snap message failed.");
2944
2945 return r;
2946}
2947
991d9fa0
JT
2948/*
2949 * Messages supported:
2950 * create_thin <dev_id>
2951 * create_snap <dev_id> <origin_id>
2952 * delete <dev_id>
2953 * trim <dev_id> <new_size_in_sectors>
2954 * set_transaction_id <current_trans_id> <new_trans_id>
cc8394d8
JT
2955 * reserve_metadata_snap
2956 * release_metadata_snap
991d9fa0
JT
2957 */
2958static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
2959{
2960 int r = -EINVAL;
2961 struct pool_c *pt = ti->private;
2962 struct pool *pool = pt->pool;
2963
2964 if (!strcasecmp(argv[0], "create_thin"))
2965 r = process_create_thin_mesg(argc, argv, pool);
2966
2967 else if (!strcasecmp(argv[0], "create_snap"))
2968 r = process_create_snap_mesg(argc, argv, pool);
2969
2970 else if (!strcasecmp(argv[0], "delete"))
2971 r = process_delete_mesg(argc, argv, pool);
2972
2973 else if (!strcasecmp(argv[0], "set_transaction_id"))
2974 r = process_set_transaction_id_mesg(argc, argv, pool);
2975
cc8394d8
JT
2976 else if (!strcasecmp(argv[0], "reserve_metadata_snap"))
2977 r = process_reserve_metadata_snap_mesg(argc, argv, pool);
2978
2979 else if (!strcasecmp(argv[0], "release_metadata_snap"))
2980 r = process_release_metadata_snap_mesg(argc, argv, pool);
2981
991d9fa0
JT
2982 else
2983 DMWARN("Unrecognised thin pool target message received: %s", argv[0]);
2984
e49e5829 2985 if (!r)
020cc3b5 2986 (void) commit(pool);
991d9fa0
JT
2987
2988 return r;
2989}
2990
e49e5829
JT
2991static void emit_flags(struct pool_features *pf, char *result,
2992 unsigned sz, unsigned maxlen)
2993{
2994 unsigned count = !pf->zero_new_blocks + !pf->discard_enabled +
787a996c
MS
2995 !pf->discard_passdown + (pf->mode == PM_READ_ONLY) +
2996 pf->error_if_no_space;
e49e5829
JT
2997 DMEMIT("%u ", count);
2998
2999 if (!pf->zero_new_blocks)
3000 DMEMIT("skip_block_zeroing ");
3001
3002 if (!pf->discard_enabled)
3003 DMEMIT("ignore_discard ");
3004
3005 if (!pf->discard_passdown)
3006 DMEMIT("no_discard_passdown ");
3007
3008 if (pf->mode == PM_READ_ONLY)
3009 DMEMIT("read_only ");
787a996c
MS
3010
3011 if (pf->error_if_no_space)
3012 DMEMIT("error_if_no_space ");
e49e5829
JT
3013}
3014
991d9fa0
JT
3015/*
3016 * Status line is:
3017 * <transaction id> <used metadata sectors>/<total metadata sectors>
3018 * <used data sectors>/<total data sectors> <held metadata root>
3019 */
fd7c092e
MP
3020static void pool_status(struct dm_target *ti, status_type_t type,
3021 unsigned status_flags, char *result, unsigned maxlen)
991d9fa0 3022{
e49e5829 3023 int r;
991d9fa0
JT
3024 unsigned sz = 0;
3025 uint64_t transaction_id;
3026 dm_block_t nr_free_blocks_data;
3027 dm_block_t nr_free_blocks_metadata;
3028 dm_block_t nr_blocks_data;
3029 dm_block_t nr_blocks_metadata;
3030 dm_block_t held_root;
3031 char buf[BDEVNAME_SIZE];
3032 char buf2[BDEVNAME_SIZE];
3033 struct pool_c *pt = ti->private;
3034 struct pool *pool = pt->pool;
3035
3036 switch (type) {
3037 case STATUSTYPE_INFO:
e49e5829
JT
3038 if (get_pool_mode(pool) == PM_FAIL) {
3039 DMEMIT("Fail");
3040 break;
3041 }
3042
1f4e0ff0
AK
3043 /* Commit to ensure statistics aren't out-of-date */
3044 if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti))
020cc3b5 3045 (void) commit(pool);
1f4e0ff0 3046
fd7c092e
MP
3047 r = dm_pool_get_metadata_transaction_id(pool->pmd, &transaction_id);
3048 if (r) {
4fa5971a
MS
3049 DMERR("%s: dm_pool_get_metadata_transaction_id returned %d",
3050 dm_device_name(pool->pool_md), r);
fd7c092e
MP
3051 goto err;
3052 }
991d9fa0 3053
fd7c092e
MP
3054 r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free_blocks_metadata);
3055 if (r) {
4fa5971a
MS
3056 DMERR("%s: dm_pool_get_free_metadata_block_count returned %d",
3057 dm_device_name(pool->pool_md), r);
fd7c092e
MP
3058 goto err;
3059 }
991d9fa0
JT
3060
3061 r = dm_pool_get_metadata_dev_size(pool->pmd, &nr_blocks_metadata);
fd7c092e 3062 if (r) {
4fa5971a
MS
3063 DMERR("%s: dm_pool_get_metadata_dev_size returned %d",
3064 dm_device_name(pool->pool_md), r);
fd7c092e
MP
3065 goto err;
3066 }
991d9fa0 3067
fd7c092e
MP
3068 r = dm_pool_get_free_block_count(pool->pmd, &nr_free_blocks_data);
3069 if (r) {
4fa5971a
MS
3070 DMERR("%s: dm_pool_get_free_block_count returned %d",
3071 dm_device_name(pool->pool_md), r);
fd7c092e
MP
3072 goto err;
3073 }
991d9fa0
JT
3074
3075 r = dm_pool_get_data_dev_size(pool->pmd, &nr_blocks_data);
fd7c092e 3076 if (r) {
4fa5971a
MS
3077 DMERR("%s: dm_pool_get_data_dev_size returned %d",
3078 dm_device_name(pool->pool_md), r);
fd7c092e
MP
3079 goto err;
3080 }
991d9fa0 3081
cc8394d8 3082 r = dm_pool_get_metadata_snap(pool->pmd, &held_root);
fd7c092e 3083 if (r) {
4fa5971a
MS
3084 DMERR("%s: dm_pool_get_metadata_snap returned %d",
3085 dm_device_name(pool->pool_md), r);
fd7c092e
MP
3086 goto err;
3087 }
991d9fa0
JT
3088
3089 DMEMIT("%llu %llu/%llu %llu/%llu ",
3090 (unsigned long long)transaction_id,
3091 (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
3092 (unsigned long long)nr_blocks_metadata,
3093 (unsigned long long)(nr_blocks_data - nr_free_blocks_data),
3094 (unsigned long long)nr_blocks_data);
3095
3096 if (held_root)
e49e5829
JT
3097 DMEMIT("%llu ", held_root);
3098 else
3099 DMEMIT("- ");
3100
3e1a0699
JT
3101 if (pool->pf.mode == PM_OUT_OF_DATA_SPACE)
3102 DMEMIT("out_of_data_space ");
3103 else if (pool->pf.mode == PM_READ_ONLY)
e49e5829 3104 DMEMIT("ro ");
991d9fa0 3105 else
e49e5829
JT
3106 DMEMIT("rw ");
3107
018debea 3108 if (!pool->pf.discard_enabled)
787a996c 3109 DMEMIT("ignore_discard ");
018debea 3110 else if (pool->pf.discard_passdown)
787a996c
MS
3111 DMEMIT("discard_passdown ");
3112 else
3113 DMEMIT("no_discard_passdown ");
3114
3115 if (pool->pf.error_if_no_space)
3116 DMEMIT("error_if_no_space ");
e49e5829 3117 else
787a996c 3118 DMEMIT("queue_if_no_space ");
991d9fa0
JT
3119
3120 break;
3121
3122 case STATUSTYPE_TABLE:
3123 DMEMIT("%s %s %lu %llu ",
3124 format_dev_t(buf, pt->metadata_dev->bdev->bd_dev),
3125 format_dev_t(buf2, pt->data_dev->bdev->bd_dev),
3126 (unsigned long)pool->sectors_per_block,
3127 (unsigned long long)pt->low_water_blocks);
0424caa1 3128 emit_flags(&pt->requested_pf, result, sz, maxlen);
991d9fa0
JT
3129 break;
3130 }
fd7c092e 3131 return;
991d9fa0 3132
fd7c092e
MP
3133err:
3134 DMEMIT("Error");
991d9fa0
JT
3135}
3136
3137static int pool_iterate_devices(struct dm_target *ti,
3138 iterate_devices_callout_fn fn, void *data)
3139{
3140 struct pool_c *pt = ti->private;
3141
3142 return fn(ti, pt->data_dev, 0, ti->len, data);
3143}
3144
3145static int pool_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
3146 struct bio_vec *biovec, int max_size)
3147{
3148 struct pool_c *pt = ti->private;
3149 struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
3150
3151 if (!q->merge_bvec_fn)
3152 return max_size;
3153
3154 bvm->bi_bdev = pt->data_dev->bdev;
3155
3156 return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
3157}
3158
0424caa1 3159static void set_discard_limits(struct pool_c *pt, struct queue_limits *limits)
104655fd 3160{
0424caa1
MS
3161 struct pool *pool = pt->pool;
3162 struct queue_limits *data_limits;
3163
104655fd
JT
3164 limits->max_discard_sectors = pool->sectors_per_block;
3165
3166 /*
0424caa1 3167 * discard_granularity is just a hint, and not enforced.
104655fd 3168 */
0424caa1
MS
3169 if (pt->adjusted_pf.discard_passdown) {
3170 data_limits = &bdev_get_queue(pt->data_dev->bdev)->limits;
09869de5
LC
3171 limits->discard_granularity = max(data_limits->discard_granularity,
3172 pool->sectors_per_block << SECTOR_SHIFT);
f13945d7 3173 } else
0424caa1 3174 limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
104655fd
JT
3175}
3176
991d9fa0
JT
3177static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
3178{
3179 struct pool_c *pt = ti->private;
3180 struct pool *pool = pt->pool;
0cc67cd9 3181 uint64_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT;
991d9fa0 3182
0cc67cd9
MS
3183 /*
3184 * If the system-determined stacked limits are compatible with the
3185 * pool's blocksize (io_opt is a factor) do not override them.
3186 */
3187 if (io_opt_sectors < pool->sectors_per_block ||
3188 do_div(io_opt_sectors, pool->sectors_per_block)) {
fdfb4c8c 3189 blk_limits_io_min(limits, pool->sectors_per_block << SECTOR_SHIFT);
0cc67cd9
MS
3190 blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT);
3191 }
0424caa1
MS
3192
3193 /*
3194 * pt->adjusted_pf is a staging area for the actual features to use.
3195 * They get transferred to the live pool in bind_control_target()
3196 * called from pool_preresume().
3197 */
b60ab990
MS
3198 if (!pt->adjusted_pf.discard_enabled) {
3199 /*
3200 * Must explicitly disallow stacking discard limits otherwise the
3201 * block layer will stack them if pool's data device has support.
3202 * QUEUE_FLAG_DISCARD wouldn't be set but there is no way for the
3203 * user to see that, so make sure to set all discard limits to 0.
3204 */
3205 limits->discard_granularity = 0;
0424caa1 3206 return;
b60ab990 3207 }
0424caa1
MS
3208
3209 disable_passdown_if_not_supported(pt);
3210
3211 set_discard_limits(pt, limits);
991d9fa0
JT
3212}
3213
3214static struct target_type pool_target = {
3215 .name = "thin-pool",
3216 .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
3217 DM_TARGET_IMMUTABLE,
e5aea7b4 3218 .version = {1, 13, 0},
991d9fa0
JT
3219 .module = THIS_MODULE,
3220 .ctr = pool_ctr,
3221 .dtr = pool_dtr,
3222 .map = pool_map,
3223 .postsuspend = pool_postsuspend,
3224 .preresume = pool_preresume,
3225 .resume = pool_resume,
3226 .message = pool_message,
3227 .status = pool_status,
3228 .merge = pool_merge,
3229 .iterate_devices = pool_iterate_devices,
3230 .io_hints = pool_io_hints,
3231};
3232
3233/*----------------------------------------------------------------
3234 * Thin target methods
3235 *--------------------------------------------------------------*/
b10ebd34
JT
3236static void thin_get(struct thin_c *tc)
3237{
3238 atomic_inc(&tc->refcount);
3239}
3240
3241static void thin_put(struct thin_c *tc)
3242{
3243 if (atomic_dec_and_test(&tc->refcount))
3244 complete(&tc->can_destroy);
3245}
3246
991d9fa0
JT
3247static void thin_dtr(struct dm_target *ti)
3248{
3249 struct thin_c *tc = ti->private;
c140e1c4
MS
3250 unsigned long flags;
3251
b10ebd34
JT
3252 thin_put(tc);
3253 wait_for_completion(&tc->can_destroy);
3254
c140e1c4
MS
3255 spin_lock_irqsave(&tc->pool->lock, flags);
3256 list_del_rcu(&tc->list);
3257 spin_unlock_irqrestore(&tc->pool->lock, flags);
3258 synchronize_rcu();
991d9fa0
JT
3259
3260 mutex_lock(&dm_thin_pool_table.mutex);
3261
3262 __pool_dec(tc->pool);
3263 dm_pool_close_thin_device(tc->td);
3264 dm_put_device(ti, tc->pool_dev);
2dd9c257
JT
3265 if (tc->origin_dev)
3266 dm_put_device(ti, tc->origin_dev);
991d9fa0
JT
3267 kfree(tc);
3268
3269 mutex_unlock(&dm_thin_pool_table.mutex);
3270}
3271
3272/*
3273 * Thin target parameters:
3274 *
2dd9c257 3275 * <pool_dev> <dev_id> [origin_dev]
991d9fa0
JT
3276 *
3277 * pool_dev: the path to the pool (eg, /dev/mapper/my_pool)
3278 * dev_id: the internal device identifier
2dd9c257 3279 * origin_dev: a device external to the pool that should act as the origin
67e2e2b2
JT
3280 *
3281 * If the pool device has discards disabled, they get disabled for the thin
3282 * device as well.
991d9fa0
JT
3283 */
3284static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
3285{
3286 int r;
3287 struct thin_c *tc;
2dd9c257 3288 struct dm_dev *pool_dev, *origin_dev;
991d9fa0 3289 struct mapped_device *pool_md;
5e3283e2 3290 unsigned long flags;
991d9fa0
JT
3291
3292 mutex_lock(&dm_thin_pool_table.mutex);
3293
2dd9c257 3294 if (argc != 2 && argc != 3) {
991d9fa0
JT
3295 ti->error = "Invalid argument count";
3296 r = -EINVAL;
3297 goto out_unlock;
3298 }
3299
3300 tc = ti->private = kzalloc(sizeof(*tc), GFP_KERNEL);
3301 if (!tc) {
3302 ti->error = "Out of memory";
3303 r = -ENOMEM;
3304 goto out_unlock;
3305 }
c140e1c4
MS
3306 spin_lock_init(&tc->lock);
3307 bio_list_init(&tc->deferred_bio_list);
3308 bio_list_init(&tc->retry_on_resume_list);
67324ea1 3309 tc->sort_bio_list = RB_ROOT;
991d9fa0 3310
2dd9c257
JT
3311 if (argc == 3) {
3312 r = dm_get_device(ti, argv[2], FMODE_READ, &origin_dev);
3313 if (r) {
3314 ti->error = "Error opening origin device";
3315 goto bad_origin_dev;
3316 }
3317 tc->origin_dev = origin_dev;
3318 }
3319
991d9fa0
JT
3320 r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &pool_dev);
3321 if (r) {
3322 ti->error = "Error opening pool device";
3323 goto bad_pool_dev;
3324 }
3325 tc->pool_dev = pool_dev;
3326
3327 if (read_dev_id(argv[1], (unsigned long long *)&tc->dev_id, 0)) {
3328 ti->error = "Invalid device id";
3329 r = -EINVAL;
3330 goto bad_common;
3331 }
3332
3333 pool_md = dm_get_md(tc->pool_dev->bdev->bd_dev);
3334 if (!pool_md) {
3335 ti->error = "Couldn't get pool mapped device";
3336 r = -EINVAL;
3337 goto bad_common;
3338 }
3339
3340 tc->pool = __pool_table_lookup(pool_md);
3341 if (!tc->pool) {
3342 ti->error = "Couldn't find pool object";
3343 r = -EINVAL;
3344 goto bad_pool_lookup;
3345 }
3346 __pool_inc(tc->pool);
3347
e49e5829
JT
3348 if (get_pool_mode(tc->pool) == PM_FAIL) {
3349 ti->error = "Couldn't open thin device, Pool is in fail mode";
1acacc07 3350 r = -EINVAL;
e49e5829
JT
3351 goto bad_thin_open;
3352 }
3353
991d9fa0
JT
3354 r = dm_pool_open_thin_device(tc->pool->pmd, tc->dev_id, &tc->td);
3355 if (r) {
3356 ti->error = "Couldn't open thin internal device";
3357 goto bad_thin_open;
3358 }
3359
542f9038
MS
3360 r = dm_set_target_max_io_len(ti, tc->pool->sectors_per_block);
3361 if (r)
1acacc07 3362 goto bad_target_max_io_len;
542f9038 3363
55a62eef 3364 ti->num_flush_bios = 1;
16ad3d10 3365 ti->flush_supported = true;
59c3d2c6 3366 ti->per_bio_data_size = sizeof(struct dm_thin_endio_hook);
67e2e2b2
JT
3367
3368 /* In case the pool supports discards, pass them on. */
b60ab990 3369 ti->discard_zeroes_data_unsupported = true;
67e2e2b2 3370 if (tc->pool->pf.discard_enabled) {
0ac55489 3371 ti->discards_supported = true;
55a62eef 3372 ti->num_discard_bios = 1;
55a62eef
AK
3373 /* Discard bios must be split on a block boundary */
3374 ti->split_discard_bios = true;
67e2e2b2 3375 }
991d9fa0
JT
3376
3377 dm_put(pool_md);
3378
3379 mutex_unlock(&dm_thin_pool_table.mutex);
3380
b10ebd34
JT
3381 atomic_set(&tc->refcount, 1);
3382 init_completion(&tc->can_destroy);
3383
5e3283e2 3384 spin_lock_irqsave(&tc->pool->lock, flags);
c140e1c4 3385 list_add_tail_rcu(&tc->list, &tc->pool->active_thins);
5e3283e2 3386 spin_unlock_irqrestore(&tc->pool->lock, flags);
c140e1c4
MS
3387 /*
3388 * This synchronize_rcu() call is needed here otherwise we risk a
3389 * wake_worker() call finding no bios to process (because the newly
3390 * added tc isn't yet visible). So this reduces latency since we
3391 * aren't then dependent on the periodic commit to wake_worker().
3392 */
3393 synchronize_rcu();
3394
991d9fa0
JT
3395 return 0;
3396
1acacc07
MS
3397bad_target_max_io_len:
3398 dm_pool_close_thin_device(tc->td);
991d9fa0
JT
3399bad_thin_open:
3400 __pool_dec(tc->pool);
3401bad_pool_lookup:
3402 dm_put(pool_md);
3403bad_common:
3404 dm_put_device(ti, tc->pool_dev);
3405bad_pool_dev:
2dd9c257
JT
3406 if (tc->origin_dev)
3407 dm_put_device(ti, tc->origin_dev);
3408bad_origin_dev:
991d9fa0
JT
3409 kfree(tc);
3410out_unlock:
3411 mutex_unlock(&dm_thin_pool_table.mutex);
3412
3413 return r;
3414}
3415
7de3ee57 3416static int thin_map(struct dm_target *ti, struct bio *bio)
991d9fa0 3417{
4f024f37 3418 bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
991d9fa0 3419
7de3ee57 3420 return thin_bio_map(ti, bio);
991d9fa0
JT
3421}
3422
7de3ee57 3423static int thin_endio(struct dm_target *ti, struct bio *bio, int err)
eb2aa48d
JT
3424{
3425 unsigned long flags;
59c3d2c6 3426 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
eb2aa48d 3427 struct list_head work;
a24c2569 3428 struct dm_thin_new_mapping *m, *tmp;
eb2aa48d
JT
3429 struct pool *pool = h->tc->pool;
3430
3431 if (h->shared_read_entry) {
3432 INIT_LIST_HEAD(&work);
44feb387 3433 dm_deferred_entry_dec(h->shared_read_entry, &work);
eb2aa48d
JT
3434
3435 spin_lock_irqsave(&pool->lock, flags);
3436 list_for_each_entry_safe(m, tmp, &work, list) {
3437 list_del(&m->list);
50f3c3ef 3438 __complete_mapping_preparation(m);
eb2aa48d
JT
3439 }
3440 spin_unlock_irqrestore(&pool->lock, flags);
3441 }
3442
104655fd
JT
3443 if (h->all_io_entry) {
3444 INIT_LIST_HEAD(&work);
44feb387 3445 dm_deferred_entry_dec(h->all_io_entry, &work);
563af186
JT
3446 if (!list_empty(&work)) {
3447 spin_lock_irqsave(&pool->lock, flags);
3448 list_for_each_entry_safe(m, tmp, &work, list)
daec338b 3449 list_add_tail(&m->list, &pool->prepared_discards);
563af186
JT
3450 spin_unlock_irqrestore(&pool->lock, flags);
3451 wake_worker(pool);
3452 }
104655fd
JT
3453 }
3454
eb2aa48d
JT
3455 return 0;
3456}
3457
738211f7 3458static void thin_presuspend(struct dm_target *ti)
991d9fa0 3459{
738211f7
JT
3460 struct thin_c *tc = ti->private;
3461
991d9fa0 3462 if (dm_noflush_suspending(ti))
738211f7
JT
3463 noflush_work(tc, do_noflush_start);
3464}
3465
3466static void thin_postsuspend(struct dm_target *ti)
3467{
3468 struct thin_c *tc = ti->private;
3469
3470 /*
3471 * The dm_noflush_suspending flag has been cleared by now, so
3472 * unfortunately we must always run this.
3473 */
3474 noflush_work(tc, do_noflush_stop);
991d9fa0
JT
3475}
3476
e5aea7b4
JT
3477static int thin_preresume(struct dm_target *ti)
3478{
3479 struct thin_c *tc = ti->private;
3480
3481 if (tc->origin_dev)
3482 tc->origin_size = get_dev_size(tc->origin_dev->bdev);
3483
3484 return 0;
3485}
3486
991d9fa0
JT
3487/*
3488 * <nr mapped sectors> <highest mapped sector>
3489 */
fd7c092e
MP
3490static void thin_status(struct dm_target *ti, status_type_t type,
3491 unsigned status_flags, char *result, unsigned maxlen)
991d9fa0
JT
3492{
3493 int r;
3494 ssize_t sz = 0;
3495 dm_block_t mapped, highest;
3496 char buf[BDEVNAME_SIZE];
3497 struct thin_c *tc = ti->private;
3498
e49e5829
JT
3499 if (get_pool_mode(tc->pool) == PM_FAIL) {
3500 DMEMIT("Fail");
fd7c092e 3501 return;
e49e5829
JT
3502 }
3503
991d9fa0
JT
3504 if (!tc->td)
3505 DMEMIT("-");
3506 else {
3507 switch (type) {
3508 case STATUSTYPE_INFO:
3509 r = dm_thin_get_mapped_count(tc->td, &mapped);
fd7c092e
MP
3510 if (r) {
3511 DMERR("dm_thin_get_mapped_count returned %d", r);
3512 goto err;
3513 }
991d9fa0
JT
3514
3515 r = dm_thin_get_highest_mapped_block(tc->td, &highest);
fd7c092e
MP
3516 if (r < 0) {
3517 DMERR("dm_thin_get_highest_mapped_block returned %d", r);
3518 goto err;
3519 }
991d9fa0
JT
3520
3521 DMEMIT("%llu ", mapped * tc->pool->sectors_per_block);
3522 if (r)
3523 DMEMIT("%llu", ((highest + 1) *
3524 tc->pool->sectors_per_block) - 1);
3525 else
3526 DMEMIT("-");
3527 break;
3528
3529 case STATUSTYPE_TABLE:
3530 DMEMIT("%s %lu",
3531 format_dev_t(buf, tc->pool_dev->bdev->bd_dev),
3532 (unsigned long) tc->dev_id);
2dd9c257
JT
3533 if (tc->origin_dev)
3534 DMEMIT(" %s", format_dev_t(buf, tc->origin_dev->bdev->bd_dev));
991d9fa0
JT
3535 break;
3536 }
3537 }
3538
fd7c092e
MP
3539 return;
3540
3541err:
3542 DMEMIT("Error");
991d9fa0
JT
3543}
3544
3545static int thin_iterate_devices(struct dm_target *ti,
3546 iterate_devices_callout_fn fn, void *data)
3547{
55f2b8bd 3548 sector_t blocks;
991d9fa0 3549 struct thin_c *tc = ti->private;
55f2b8bd 3550 struct pool *pool = tc->pool;
991d9fa0
JT
3551
3552 /*
3553 * We can't call dm_pool_get_data_dev_size() since that blocks. So
3554 * we follow a more convoluted path through to the pool's target.
3555 */
55f2b8bd 3556 if (!pool->ti)
991d9fa0
JT
3557 return 0; /* nothing is bound */
3558
55f2b8bd
MS
3559 blocks = pool->ti->len;
3560 (void) sector_div(blocks, pool->sectors_per_block);
991d9fa0 3561 if (blocks)
55f2b8bd 3562 return fn(ti, tc->pool_dev, 0, pool->sectors_per_block * blocks, data);
991d9fa0
JT
3563
3564 return 0;
3565}
3566
991d9fa0
JT
3567static struct target_type thin_target = {
3568 .name = "thin",
e5aea7b4 3569 .version = {1, 13, 0},
991d9fa0
JT
3570 .module = THIS_MODULE,
3571 .ctr = thin_ctr,
3572 .dtr = thin_dtr,
3573 .map = thin_map,
eb2aa48d 3574 .end_io = thin_endio,
e5aea7b4 3575 .preresume = thin_preresume,
738211f7 3576 .presuspend = thin_presuspend,
991d9fa0
JT
3577 .postsuspend = thin_postsuspend,
3578 .status = thin_status,
3579 .iterate_devices = thin_iterate_devices,
991d9fa0
JT
3580};
3581
3582/*----------------------------------------------------------------*/
3583
3584static int __init dm_thin_init(void)
3585{
3586 int r;
3587
3588 pool_table_init();
3589
3590 r = dm_register_target(&thin_target);
3591 if (r)
3592 return r;
3593
3594 r = dm_register_target(&pool_target);
3595 if (r)
a24c2569
MS
3596 goto bad_pool_target;
3597
3598 r = -ENOMEM;
3599
a24c2569
MS
3600 _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0);
3601 if (!_new_mapping_cache)
3602 goto bad_new_mapping_cache;
3603
a24c2569
MS
3604 return 0;
3605
a24c2569 3606bad_new_mapping_cache:
a24c2569
MS
3607 dm_unregister_target(&pool_target);
3608bad_pool_target:
3609 dm_unregister_target(&thin_target);
991d9fa0
JT
3610
3611 return r;
3612}
3613
3614static void dm_thin_exit(void)
3615{
3616 dm_unregister_target(&thin_target);
3617 dm_unregister_target(&pool_target);
a24c2569 3618
a24c2569 3619 kmem_cache_destroy(_new_mapping_cache);
991d9fa0
JT
3620}
3621
3622module_init(dm_thin_init);
3623module_exit(dm_thin_exit);
3624
80c57893
MS
3625module_param_named(no_space_timeout, no_space_timeout_secs, uint, S_IRUGO | S_IWUSR);
3626MODULE_PARM_DESC(no_space_timeout, "Out of data space queue IO timeout in seconds");
3627
7cab8bf1 3628MODULE_DESCRIPTION(DM_NAME " thin provisioning target");
991d9fa0
JT
3629MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
3630MODULE_LICENSE("GPL");
This page took 0.355679 seconds and 5 git commands to generate.