dm mpath: eliminate pg_ready() wrapper
[deliverable/linux.git] / drivers / md / dm-thin.c
CommitLineData
991d9fa0 1/*
e49e5829 2 * Copyright (C) 2011-2012 Red Hat UK.
991d9fa0
JT
3 *
4 * This file is released under the GPL.
5 */
6
7#include "dm-thin-metadata.h"
4f81a417 8#include "dm-bio-prison.h"
1f4e0ff0 9#include "dm.h"
991d9fa0
JT
10
11#include <linux/device-mapper.h>
12#include <linux/dm-io.h>
13#include <linux/dm-kcopyd.h>
14#include <linux/list.h>
c140e1c4 15#include <linux/rculist.h>
991d9fa0
JT
16#include <linux/init.h>
17#include <linux/module.h>
18#include <linux/slab.h>
67324ea1 19#include <linux/rbtree.h>
991d9fa0
JT
20
21#define DM_MSG_PREFIX "thin"
22
23/*
24 * Tunable constants
25 */
7768ed33 26#define ENDIO_HOOK_POOL_SIZE 1024
991d9fa0
JT
27#define MAPPING_POOL_SIZE 1024
28#define PRISON_CELLS 1024
905e51b3 29#define COMMIT_PERIOD HZ
80c57893
MS
30#define NO_SPACE_TIMEOUT_SECS 60
31
32static unsigned no_space_timeout_secs = NO_SPACE_TIMEOUT_SECS;
991d9fa0 33
df5d2e90
MP
34DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(snapshot_copy_throttle,
35 "A percentage of time allocated for copy on write");
36
991d9fa0
JT
37/*
38 * The block size of the device holding pool data must be
39 * between 64KB and 1GB.
40 */
41#define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (64 * 1024 >> SECTOR_SHIFT)
42#define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)
43
991d9fa0
JT
44/*
45 * Device id is restricted to 24 bits.
46 */
47#define MAX_DEV_ID ((1 << 24) - 1)
48
49/*
50 * How do we handle breaking sharing of data blocks?
51 * =================================================
52 *
53 * We use a standard copy-on-write btree to store the mappings for the
54 * devices (note I'm talking about copy-on-write of the metadata here, not
55 * the data). When you take an internal snapshot you clone the root node
56 * of the origin btree. After this there is no concept of an origin or a
57 * snapshot. They are just two device trees that happen to point to the
58 * same data blocks.
59 *
60 * When we get a write in we decide if it's to a shared data block using
61 * some timestamp magic. If it is, we have to break sharing.
62 *
63 * Let's say we write to a shared block in what was the origin. The
64 * steps are:
65 *
66 * i) plug io further to this physical block. (see bio_prison code).
67 *
68 * ii) quiesce any read io to that shared data block. Obviously
44feb387 69 * including all devices that share this block. (see dm_deferred_set code)
991d9fa0
JT
70 *
71 * iii) copy the data block to a newly allocate block. This step can be
72 * missed out if the io covers the block. (schedule_copy).
73 *
74 * iv) insert the new mapping into the origin's btree
fe878f34 75 * (process_prepared_mapping). This act of inserting breaks some
991d9fa0
JT
76 * sharing of btree nodes between the two devices. Breaking sharing only
77 * effects the btree of that specific device. Btrees for the other
78 * devices that share the block never change. The btree for the origin
79 * device as it was after the last commit is untouched, ie. we're using
80 * persistent data structures in the functional programming sense.
81 *
82 * v) unplug io to this physical block, including the io that triggered
83 * the breaking of sharing.
84 *
85 * Steps (ii) and (iii) occur in parallel.
86 *
87 * The metadata _doesn't_ need to be committed before the io continues. We
88 * get away with this because the io is always written to a _new_ block.
89 * If there's a crash, then:
90 *
91 * - The origin mapping will point to the old origin block (the shared
92 * one). This will contain the data as it was before the io that triggered
93 * the breaking of sharing came in.
94 *
95 * - The snap mapping still points to the old block. As it would after
96 * the commit.
97 *
98 * The downside of this scheme is the timestamp magic isn't perfect, and
99 * will continue to think that data block in the snapshot device is shared
100 * even after the write to the origin has broken sharing. I suspect data
101 * blocks will typically be shared by many different devices, so we're
102 * breaking sharing n + 1 times, rather than n, where n is the number of
103 * devices that reference this data block. At the moment I think the
104 * benefits far, far outweigh the disadvantages.
105 */
106
107/*----------------------------------------------------------------*/
108
991d9fa0
JT
109/*
110 * Key building.
111 */
112static void build_data_key(struct dm_thin_device *td,
44feb387 113 dm_block_t b, struct dm_cell_key *key)
991d9fa0
JT
114{
115 key->virtual = 0;
116 key->dev = dm_thin_dev_id(td);
117 key->block = b;
118}
119
120static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
44feb387 121 struct dm_cell_key *key)
991d9fa0
JT
122{
123 key->virtual = 1;
124 key->dev = dm_thin_dev_id(td);
125 key->block = b;
126}
127
128/*----------------------------------------------------------------*/
129
130/*
131 * A pool device ties together a metadata device and a data device. It
132 * also provides the interface for creating and destroying internal
133 * devices.
134 */
a24c2569 135struct dm_thin_new_mapping;
67e2e2b2 136
e49e5829 137/*
3e1a0699 138 * The pool runs in 4 modes. Ordered in degraded order for comparisons.
e49e5829
JT
139 */
140enum pool_mode {
141 PM_WRITE, /* metadata may be changed */
3e1a0699 142 PM_OUT_OF_DATA_SPACE, /* metadata may be changed, though data may not be allocated */
e49e5829
JT
143 PM_READ_ONLY, /* metadata may not be changed */
144 PM_FAIL, /* all I/O fails */
145};
146
67e2e2b2 147struct pool_features {
e49e5829
JT
148 enum pool_mode mode;
149
9bc142dd
MS
150 bool zero_new_blocks:1;
151 bool discard_enabled:1;
152 bool discard_passdown:1;
787a996c 153 bool error_if_no_space:1;
67e2e2b2
JT
154};
155
e49e5829
JT
156struct thin_c;
157typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio);
158typedef void (*process_mapping_fn)(struct dm_thin_new_mapping *m);
159
991d9fa0
JT
160struct pool {
161 struct list_head list;
162 struct dm_target *ti; /* Only set if a pool target is bound */
163
164 struct mapped_device *pool_md;
165 struct block_device *md_dev;
166 struct dm_pool_metadata *pmd;
167
991d9fa0 168 dm_block_t low_water_blocks;
55f2b8bd 169 uint32_t sectors_per_block;
f9a8e0cd 170 int sectors_per_block_shift;
991d9fa0 171
67e2e2b2 172 struct pool_features pf;
88a6621b 173 bool low_water_triggered:1; /* A dm event has been sent */
991d9fa0 174
44feb387 175 struct dm_bio_prison *prison;
991d9fa0
JT
176 struct dm_kcopyd_client *copier;
177
178 struct workqueue_struct *wq;
179 struct work_struct worker;
905e51b3 180 struct delayed_work waker;
85ad643b 181 struct delayed_work no_space_timeout;
991d9fa0 182
905e51b3 183 unsigned long last_commit_jiffies;
55f2b8bd 184 unsigned ref_count;
991d9fa0
JT
185
186 spinlock_t lock;
991d9fa0
JT
187 struct bio_list deferred_flush_bios;
188 struct list_head prepared_mappings;
104655fd 189 struct list_head prepared_discards;
c140e1c4 190 struct list_head active_thins;
991d9fa0 191
44feb387
MS
192 struct dm_deferred_set *shared_read_ds;
193 struct dm_deferred_set *all_io_ds;
991d9fa0 194
a24c2569 195 struct dm_thin_new_mapping *next_mapping;
991d9fa0 196 mempool_t *mapping_pool;
e49e5829
JT
197
198 process_bio_fn process_bio;
199 process_bio_fn process_discard;
200
201 process_mapping_fn process_prepared_mapping;
202 process_mapping_fn process_prepared_discard;
991d9fa0
JT
203};
204
e49e5829 205static enum pool_mode get_pool_mode(struct pool *pool);
b5330655 206static void metadata_operation_failed(struct pool *pool, const char *op, int r);
e49e5829 207
991d9fa0
JT
208/*
209 * Target context for a pool.
210 */
211struct pool_c {
212 struct dm_target *ti;
213 struct pool *pool;
214 struct dm_dev *data_dev;
215 struct dm_dev *metadata_dev;
216 struct dm_target_callbacks callbacks;
217
218 dm_block_t low_water_blocks;
0424caa1
MS
219 struct pool_features requested_pf; /* Features requested during table load */
220 struct pool_features adjusted_pf; /* Features used after adjusting for constituent devices */
991d9fa0
JT
221};
222
223/*
224 * Target context for a thin.
225 */
226struct thin_c {
c140e1c4 227 struct list_head list;
991d9fa0 228 struct dm_dev *pool_dev;
2dd9c257 229 struct dm_dev *origin_dev;
991d9fa0
JT
230 dm_thin_id dev_id;
231
232 struct pool *pool;
233 struct dm_thin_device *td;
738211f7 234 bool requeue_mode:1;
c140e1c4
MS
235 spinlock_t lock;
236 struct bio_list deferred_bio_list;
237 struct bio_list retry_on_resume_list;
67324ea1 238 struct rb_root sort_bio_list; /* sorted list of deferred bios */
b10ebd34
JT
239
240 /*
241 * Ensures the thin is not destroyed until the worker has finished
242 * iterating the active_thins list.
243 */
244 atomic_t refcount;
245 struct completion can_destroy;
991d9fa0
JT
246};
247
248/*----------------------------------------------------------------*/
249
025b9685
JT
250/*
251 * wake_worker() is used when new work is queued and when pool_resume is
252 * ready to continue deferred IO processing.
253 */
254static void wake_worker(struct pool *pool)
255{
256 queue_work(pool->wq, &pool->worker);
257}
258
259/*----------------------------------------------------------------*/
260
6beca5eb
JT
261static int bio_detain(struct pool *pool, struct dm_cell_key *key, struct bio *bio,
262 struct dm_bio_prison_cell **cell_result)
263{
264 int r;
265 struct dm_bio_prison_cell *cell_prealloc;
266
267 /*
268 * Allocate a cell from the prison's mempool.
269 * This might block but it can't fail.
270 */
271 cell_prealloc = dm_bio_prison_alloc_cell(pool->prison, GFP_NOIO);
272
273 r = dm_bio_detain(pool->prison, key, bio, cell_prealloc, cell_result);
274 if (r)
275 /*
276 * We reused an old cell; we can get rid of
277 * the new one.
278 */
279 dm_bio_prison_free_cell(pool->prison, cell_prealloc);
280
281 return r;
282}
283
284static void cell_release(struct pool *pool,
285 struct dm_bio_prison_cell *cell,
286 struct bio_list *bios)
287{
288 dm_cell_release(pool->prison, cell, bios);
289 dm_bio_prison_free_cell(pool->prison, cell);
290}
291
292static void cell_release_no_holder(struct pool *pool,
293 struct dm_bio_prison_cell *cell,
294 struct bio_list *bios)
295{
296 dm_cell_release_no_holder(pool->prison, cell, bios);
297 dm_bio_prison_free_cell(pool->prison, cell);
298}
299
025b9685
JT
300static void cell_defer_no_holder_no_free(struct thin_c *tc,
301 struct dm_bio_prison_cell *cell)
302{
303 struct pool *pool = tc->pool;
304 unsigned long flags;
305
c140e1c4
MS
306 spin_lock_irqsave(&tc->lock, flags);
307 dm_cell_release_no_holder(pool->prison, cell, &tc->deferred_bio_list);
308 spin_unlock_irqrestore(&tc->lock, flags);
025b9685
JT
309
310 wake_worker(pool);
311}
312
af91805a
MS
313static void cell_error_with_code(struct pool *pool,
314 struct dm_bio_prison_cell *cell, int error_code)
6beca5eb 315{
af91805a 316 dm_cell_error(pool->prison, cell, error_code);
6beca5eb
JT
317 dm_bio_prison_free_cell(pool->prison, cell);
318}
319
af91805a
MS
320static void cell_error(struct pool *pool, struct dm_bio_prison_cell *cell)
321{
322 cell_error_with_code(pool, cell, -EIO);
323}
324
6beca5eb
JT
325/*----------------------------------------------------------------*/
326
991d9fa0
JT
327/*
328 * A global list of pools that uses a struct mapped_device as a key.
329 */
330static struct dm_thin_pool_table {
331 struct mutex mutex;
332 struct list_head pools;
333} dm_thin_pool_table;
334
335static void pool_table_init(void)
336{
337 mutex_init(&dm_thin_pool_table.mutex);
338 INIT_LIST_HEAD(&dm_thin_pool_table.pools);
339}
340
341static void __pool_table_insert(struct pool *pool)
342{
343 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
344 list_add(&pool->list, &dm_thin_pool_table.pools);
345}
346
347static void __pool_table_remove(struct pool *pool)
348{
349 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
350 list_del(&pool->list);
351}
352
353static struct pool *__pool_table_lookup(struct mapped_device *md)
354{
355 struct pool *pool = NULL, *tmp;
356
357 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
358
359 list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
360 if (tmp->pool_md == md) {
361 pool = tmp;
362 break;
363 }
364 }
365
366 return pool;
367}
368
369static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev)
370{
371 struct pool *pool = NULL, *tmp;
372
373 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
374
375 list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
376 if (tmp->md_dev == md_dev) {
377 pool = tmp;
378 break;
379 }
380 }
381
382 return pool;
383}
384
385/*----------------------------------------------------------------*/
386
a24c2569 387struct dm_thin_endio_hook {
eb2aa48d 388 struct thin_c *tc;
44feb387
MS
389 struct dm_deferred_entry *shared_read_entry;
390 struct dm_deferred_entry *all_io_entry;
a24c2569 391 struct dm_thin_new_mapping *overwrite_mapping;
67324ea1 392 struct rb_node rb_node;
eb2aa48d
JT
393};
394
18adc577 395static void requeue_bio_list(struct thin_c *tc, struct bio_list *master)
991d9fa0
JT
396{
397 struct bio *bio;
398 struct bio_list bios;
18adc577 399 unsigned long flags;
991d9fa0
JT
400
401 bio_list_init(&bios);
18adc577 402
c140e1c4 403 spin_lock_irqsave(&tc->lock, flags);
991d9fa0
JT
404 bio_list_merge(&bios, master);
405 bio_list_init(master);
c140e1c4 406 spin_unlock_irqrestore(&tc->lock, flags);
991d9fa0 407
c140e1c4
MS
408 while ((bio = bio_list_pop(&bios)))
409 bio_endio(bio, DM_ENDIO_REQUEUE);
991d9fa0
JT
410}
411
412static void requeue_io(struct thin_c *tc)
413{
c140e1c4
MS
414 requeue_bio_list(tc, &tc->deferred_bio_list);
415 requeue_bio_list(tc, &tc->retry_on_resume_list);
991d9fa0
JT
416}
417
c140e1c4 418static void error_thin_retry_list(struct thin_c *tc)
3e1a0699
JT
419{
420 struct bio *bio;
421 unsigned long flags;
422 struct bio_list bios;
423
424 bio_list_init(&bios);
425
c140e1c4
MS
426 spin_lock_irqsave(&tc->lock, flags);
427 bio_list_merge(&bios, &tc->retry_on_resume_list);
428 bio_list_init(&tc->retry_on_resume_list);
429 spin_unlock_irqrestore(&tc->lock, flags);
3e1a0699
JT
430
431 while ((bio = bio_list_pop(&bios)))
432 bio_io_error(bio);
433}
434
c140e1c4
MS
435static void error_retry_list(struct pool *pool)
436{
437 struct thin_c *tc;
438
439 rcu_read_lock();
440 list_for_each_entry_rcu(tc, &pool->active_thins, list)
441 error_thin_retry_list(tc);
442 rcu_read_unlock();
443}
444
991d9fa0
JT
445/*
446 * This section of code contains the logic for processing a thin device's IO.
447 * Much of the code depends on pool object resources (lists, workqueues, etc)
448 * but most is exclusively called from the thin target rather than the thin-pool
449 * target.
450 */
451
58f77a21
MS
452static bool block_size_is_power_of_two(struct pool *pool)
453{
454 return pool->sectors_per_block_shift >= 0;
455}
456
991d9fa0
JT
457static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
458{
58f77a21 459 struct pool *pool = tc->pool;
4f024f37 460 sector_t block_nr = bio->bi_iter.bi_sector;
55f2b8bd 461
58f77a21
MS
462 if (block_size_is_power_of_two(pool))
463 block_nr >>= pool->sectors_per_block_shift;
f9a8e0cd 464 else
58f77a21 465 (void) sector_div(block_nr, pool->sectors_per_block);
55f2b8bd
MS
466
467 return block_nr;
991d9fa0
JT
468}
469
470static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
471{
472 struct pool *pool = tc->pool;
4f024f37 473 sector_t bi_sector = bio->bi_iter.bi_sector;
991d9fa0
JT
474
475 bio->bi_bdev = tc->pool_dev->bdev;
58f77a21 476 if (block_size_is_power_of_two(pool))
4f024f37
KO
477 bio->bi_iter.bi_sector =
478 (block << pool->sectors_per_block_shift) |
479 (bi_sector & (pool->sectors_per_block - 1));
58f77a21 480 else
4f024f37 481 bio->bi_iter.bi_sector = (block * pool->sectors_per_block) +
58f77a21 482 sector_div(bi_sector, pool->sectors_per_block);
991d9fa0
JT
483}
484
2dd9c257
JT
485static void remap_to_origin(struct thin_c *tc, struct bio *bio)
486{
487 bio->bi_bdev = tc->origin_dev->bdev;
488}
489
4afdd680
JT
490static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
491{
492 return (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) &&
493 dm_thin_changed_this_transaction(tc->td);
494}
495
e8088073
JT
496static void inc_all_io_entry(struct pool *pool, struct bio *bio)
497{
498 struct dm_thin_endio_hook *h;
499
500 if (bio->bi_rw & REQ_DISCARD)
501 return;
502
59c3d2c6 503 h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
e8088073
JT
504 h->all_io_entry = dm_deferred_entry_inc(pool->all_io_ds);
505}
506
2dd9c257 507static void issue(struct thin_c *tc, struct bio *bio)
991d9fa0
JT
508{
509 struct pool *pool = tc->pool;
510 unsigned long flags;
511
e49e5829
JT
512 if (!bio_triggers_commit(tc, bio)) {
513 generic_make_request(bio);
514 return;
515 }
516
991d9fa0 517 /*
e49e5829
JT
518 * Complete bio with an error if earlier I/O caused changes to
519 * the metadata that can't be committed e.g, due to I/O errors
520 * on the metadata device.
991d9fa0 521 */
e49e5829
JT
522 if (dm_thin_aborted_changes(tc->td)) {
523 bio_io_error(bio);
524 return;
525 }
526
527 /*
528 * Batch together any bios that trigger commits and then issue a
529 * single commit for them in process_deferred_bios().
530 */
531 spin_lock_irqsave(&pool->lock, flags);
532 bio_list_add(&pool->deferred_flush_bios, bio);
533 spin_unlock_irqrestore(&pool->lock, flags);
991d9fa0
JT
534}
535
2dd9c257
JT
536static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio)
537{
538 remap_to_origin(tc, bio);
539 issue(tc, bio);
540}
541
542static void remap_and_issue(struct thin_c *tc, struct bio *bio,
543 dm_block_t block)
544{
545 remap(tc, bio, block);
546 issue(tc, bio);
547}
548
991d9fa0
JT
549/*----------------------------------------------------------------*/
550
551/*
552 * Bio endio functions.
553 */
a24c2569 554struct dm_thin_new_mapping {
991d9fa0
JT
555 struct list_head list;
556
7f214665
MS
557 bool quiesced:1;
558 bool prepared:1;
559 bool pass_discard:1;
560 bool definitely_not_shared:1;
991d9fa0 561
7f214665 562 int err;
991d9fa0
JT
563 struct thin_c *tc;
564 dm_block_t virt_block;
565 dm_block_t data_block;
a24c2569 566 struct dm_bio_prison_cell *cell, *cell2;
991d9fa0
JT
567
568 /*
569 * If the bio covers the whole area of a block then we can avoid
570 * zeroing or copying. Instead this bio is hooked. The bio will
571 * still be in the cell, so care has to be taken to avoid issuing
572 * the bio twice.
573 */
574 struct bio *bio;
575 bio_end_io_t *saved_bi_end_io;
576};
577
a24c2569 578static void __maybe_add_mapping(struct dm_thin_new_mapping *m)
991d9fa0
JT
579{
580 struct pool *pool = m->tc->pool;
581
eb2aa48d 582 if (m->quiesced && m->prepared) {
daec338b 583 list_add_tail(&m->list, &pool->prepared_mappings);
991d9fa0
JT
584 wake_worker(pool);
585 }
586}
587
588static void copy_complete(int read_err, unsigned long write_err, void *context)
589{
590 unsigned long flags;
a24c2569 591 struct dm_thin_new_mapping *m = context;
991d9fa0
JT
592 struct pool *pool = m->tc->pool;
593
594 m->err = read_err || write_err ? -EIO : 0;
595
596 spin_lock_irqsave(&pool->lock, flags);
7f214665 597 m->prepared = true;
991d9fa0
JT
598 __maybe_add_mapping(m);
599 spin_unlock_irqrestore(&pool->lock, flags);
600}
601
602static void overwrite_endio(struct bio *bio, int err)
603{
604 unsigned long flags;
59c3d2c6 605 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
a24c2569 606 struct dm_thin_new_mapping *m = h->overwrite_mapping;
991d9fa0
JT
607 struct pool *pool = m->tc->pool;
608
609 m->err = err;
610
611 spin_lock_irqsave(&pool->lock, flags);
7f214665 612 m->prepared = true;
991d9fa0
JT
613 __maybe_add_mapping(m);
614 spin_unlock_irqrestore(&pool->lock, flags);
615}
616
991d9fa0
JT
617/*----------------------------------------------------------------*/
618
619/*
620 * Workqueue.
621 */
622
623/*
624 * Prepared mapping jobs.
625 */
626
627/*
628 * This sends the bios in the cell back to the deferred_bios list.
629 */
2aab3850 630static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell)
991d9fa0
JT
631{
632 struct pool *pool = tc->pool;
633 unsigned long flags;
634
c140e1c4
MS
635 spin_lock_irqsave(&tc->lock, flags);
636 cell_release(pool, cell, &tc->deferred_bio_list);
637 spin_unlock_irqrestore(&tc->lock, flags);
991d9fa0
JT
638
639 wake_worker(pool);
640}
641
642/*
6beca5eb 643 * Same as cell_defer above, except it omits the original holder of the cell.
991d9fa0 644 */
f286ba0e 645static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *cell)
991d9fa0 646{
991d9fa0
JT
647 struct pool *pool = tc->pool;
648 unsigned long flags;
649
c140e1c4
MS
650 spin_lock_irqsave(&tc->lock, flags);
651 cell_release_no_holder(pool, cell, &tc->deferred_bio_list);
652 spin_unlock_irqrestore(&tc->lock, flags);
991d9fa0
JT
653
654 wake_worker(pool);
655}
656
e49e5829
JT
657static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
658{
196d38bc 659 if (m->bio) {
e49e5829 660 m->bio->bi_end_io = m->saved_bi_end_io;
196d38bc
KO
661 atomic_inc(&m->bio->bi_remaining);
662 }
6beca5eb 663 cell_error(m->tc->pool, m->cell);
e49e5829
JT
664 list_del(&m->list);
665 mempool_free(m, m->tc->pool->mapping_pool);
666}
025b9685 667
a24c2569 668static void process_prepared_mapping(struct dm_thin_new_mapping *m)
991d9fa0
JT
669{
670 struct thin_c *tc = m->tc;
6beca5eb 671 struct pool *pool = tc->pool;
991d9fa0
JT
672 struct bio *bio;
673 int r;
674
675 bio = m->bio;
196d38bc 676 if (bio) {
991d9fa0 677 bio->bi_end_io = m->saved_bi_end_io;
196d38bc
KO
678 atomic_inc(&bio->bi_remaining);
679 }
991d9fa0
JT
680
681 if (m->err) {
6beca5eb 682 cell_error(pool, m->cell);
905386f8 683 goto out;
991d9fa0
JT
684 }
685
686 /*
687 * Commit the prepared block into the mapping btree.
688 * Any I/O for this block arriving after this point will get
689 * remapped to it directly.
690 */
691 r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block);
692 if (r) {
b5330655 693 metadata_operation_failed(pool, "dm_thin_insert_block", r);
6beca5eb 694 cell_error(pool, m->cell);
905386f8 695 goto out;
991d9fa0
JT
696 }
697
698 /*
699 * Release any bios held while the block was being provisioned.
700 * If we are processing a write bio that completely covers the block,
701 * we already processed it so can ignore it now when processing
702 * the bios in the cell.
703 */
704 if (bio) {
f286ba0e 705 cell_defer_no_holder(tc, m->cell);
991d9fa0
JT
706 bio_endio(bio, 0);
707 } else
2aab3850 708 cell_defer(tc, m->cell);
991d9fa0 709
905386f8 710out:
991d9fa0 711 list_del(&m->list);
6beca5eb 712 mempool_free(m, pool->mapping_pool);
991d9fa0
JT
713}
714
e49e5829 715static void process_prepared_discard_fail(struct dm_thin_new_mapping *m)
104655fd 716{
104655fd
JT
717 struct thin_c *tc = m->tc;
718
e49e5829 719 bio_io_error(m->bio);
f286ba0e
JT
720 cell_defer_no_holder(tc, m->cell);
721 cell_defer_no_holder(tc, m->cell2);
e49e5829
JT
722 mempool_free(m, tc->pool->mapping_pool);
723}
724
725static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m)
726{
727 struct thin_c *tc = m->tc;
104655fd 728
e8088073 729 inc_all_io_entry(tc->pool, m->bio);
f286ba0e
JT
730 cell_defer_no_holder(tc, m->cell);
731 cell_defer_no_holder(tc, m->cell2);
e8088073 732
104655fd 733 if (m->pass_discard)
19fa1a67
JT
734 if (m->definitely_not_shared)
735 remap_and_issue(tc, m->bio, m->data_block);
736 else {
737 bool used = false;
738 if (dm_pool_block_is_used(tc->pool->pmd, m->data_block, &used) || used)
739 bio_endio(m->bio, 0);
740 else
741 remap_and_issue(tc, m->bio, m->data_block);
742 }
104655fd
JT
743 else
744 bio_endio(m->bio, 0);
745
104655fd
JT
746 mempool_free(m, tc->pool->mapping_pool);
747}
748
e49e5829
JT
749static void process_prepared_discard(struct dm_thin_new_mapping *m)
750{
751 int r;
752 struct thin_c *tc = m->tc;
753
754 r = dm_thin_remove_block(tc->td, m->virt_block);
755 if (r)
c397741c 756 DMERR_LIMIT("dm_thin_remove_block() failed");
e49e5829
JT
757
758 process_prepared_discard_passdown(m);
759}
760
104655fd 761static void process_prepared(struct pool *pool, struct list_head *head,
e49e5829 762 process_mapping_fn *fn)
991d9fa0
JT
763{
764 unsigned long flags;
765 struct list_head maps;
a24c2569 766 struct dm_thin_new_mapping *m, *tmp;
991d9fa0
JT
767
768 INIT_LIST_HEAD(&maps);
769 spin_lock_irqsave(&pool->lock, flags);
104655fd 770 list_splice_init(head, &maps);
991d9fa0
JT
771 spin_unlock_irqrestore(&pool->lock, flags);
772
773 list_for_each_entry_safe(m, tmp, &maps, list)
e49e5829 774 (*fn)(m);
991d9fa0
JT
775}
776
777/*
778 * Deferred bio jobs.
779 */
104655fd 780static int io_overlaps_block(struct pool *pool, struct bio *bio)
991d9fa0 781{
4f024f37
KO
782 return bio->bi_iter.bi_size ==
783 (pool->sectors_per_block << SECTOR_SHIFT);
104655fd
JT
784}
785
786static int io_overwrites_block(struct pool *pool, struct bio *bio)
787{
788 return (bio_data_dir(bio) == WRITE) &&
789 io_overlaps_block(pool, bio);
991d9fa0
JT
790}
791
792static void save_and_set_endio(struct bio *bio, bio_end_io_t **save,
793 bio_end_io_t *fn)
794{
795 *save = bio->bi_end_io;
796 bio->bi_end_io = fn;
797}
798
799static int ensure_next_mapping(struct pool *pool)
800{
801 if (pool->next_mapping)
802 return 0;
803
804 pool->next_mapping = mempool_alloc(pool->mapping_pool, GFP_ATOMIC);
805
806 return pool->next_mapping ? 0 : -ENOMEM;
807}
808
a24c2569 809static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool)
991d9fa0 810{
16961b04 811 struct dm_thin_new_mapping *m = pool->next_mapping;
991d9fa0
JT
812
813 BUG_ON(!pool->next_mapping);
814
16961b04
MS
815 memset(m, 0, sizeof(struct dm_thin_new_mapping));
816 INIT_LIST_HEAD(&m->list);
817 m->bio = NULL;
818
991d9fa0
JT
819 pool->next_mapping = NULL;
820
16961b04 821 return m;
991d9fa0
JT
822}
823
824static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
2dd9c257
JT
825 struct dm_dev *origin, dm_block_t data_origin,
826 dm_block_t data_dest,
a24c2569 827 struct dm_bio_prison_cell *cell, struct bio *bio)
991d9fa0
JT
828{
829 int r;
830 struct pool *pool = tc->pool;
a24c2569 831 struct dm_thin_new_mapping *m = get_next_mapping(pool);
991d9fa0 832
991d9fa0
JT
833 m->tc = tc;
834 m->virt_block = virt_block;
835 m->data_block = data_dest;
836 m->cell = cell;
991d9fa0 837
44feb387 838 if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list))
7f214665 839 m->quiesced = true;
991d9fa0
JT
840
841 /*
842 * IO to pool_dev remaps to the pool target's data_dev.
843 *
844 * If the whole block of data is being overwritten, we can issue the
845 * bio immediately. Otherwise we use kcopyd to clone the data first.
846 */
847 if (io_overwrites_block(pool, bio)) {
59c3d2c6 848 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
a24c2569 849
eb2aa48d 850 h->overwrite_mapping = m;
991d9fa0
JT
851 m->bio = bio;
852 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
e8088073 853 inc_all_io_entry(pool, bio);
991d9fa0
JT
854 remap_and_issue(tc, bio, data_dest);
855 } else {
856 struct dm_io_region from, to;
857
2dd9c257 858 from.bdev = origin->bdev;
991d9fa0
JT
859 from.sector = data_origin * pool->sectors_per_block;
860 from.count = pool->sectors_per_block;
861
862 to.bdev = tc->pool_dev->bdev;
863 to.sector = data_dest * pool->sectors_per_block;
864 to.count = pool->sectors_per_block;
865
866 r = dm_kcopyd_copy(pool->copier, &from, 1, &to,
867 0, copy_complete, m);
868 if (r < 0) {
869 mempool_free(m, pool->mapping_pool);
c397741c 870 DMERR_LIMIT("dm_kcopyd_copy() failed");
6beca5eb 871 cell_error(pool, cell);
991d9fa0
JT
872 }
873 }
874}
875
2dd9c257
JT
876static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block,
877 dm_block_t data_origin, dm_block_t data_dest,
a24c2569 878 struct dm_bio_prison_cell *cell, struct bio *bio)
2dd9c257
JT
879{
880 schedule_copy(tc, virt_block, tc->pool_dev,
881 data_origin, data_dest, cell, bio);
882}
883
884static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
885 dm_block_t data_dest,
a24c2569 886 struct dm_bio_prison_cell *cell, struct bio *bio)
2dd9c257
JT
887{
888 schedule_copy(tc, virt_block, tc->origin_dev,
889 virt_block, data_dest, cell, bio);
890}
891
991d9fa0 892static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
a24c2569 893 dm_block_t data_block, struct dm_bio_prison_cell *cell,
991d9fa0
JT
894 struct bio *bio)
895{
896 struct pool *pool = tc->pool;
a24c2569 897 struct dm_thin_new_mapping *m = get_next_mapping(pool);
991d9fa0 898
7f214665
MS
899 m->quiesced = true;
900 m->prepared = false;
991d9fa0
JT
901 m->tc = tc;
902 m->virt_block = virt_block;
903 m->data_block = data_block;
904 m->cell = cell;
991d9fa0
JT
905
906 /*
907 * If the whole block of data is being overwritten or we are not
908 * zeroing pre-existing data, we can issue the bio immediately.
909 * Otherwise we use kcopyd to zero the data first.
910 */
67e2e2b2 911 if (!pool->pf.zero_new_blocks)
991d9fa0
JT
912 process_prepared_mapping(m);
913
914 else if (io_overwrites_block(pool, bio)) {
59c3d2c6 915 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
a24c2569 916
eb2aa48d 917 h->overwrite_mapping = m;
991d9fa0
JT
918 m->bio = bio;
919 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
e8088073 920 inc_all_io_entry(pool, bio);
991d9fa0 921 remap_and_issue(tc, bio, data_block);
991d9fa0
JT
922 } else {
923 int r;
924 struct dm_io_region to;
925
926 to.bdev = tc->pool_dev->bdev;
927 to.sector = data_block * pool->sectors_per_block;
928 to.count = pool->sectors_per_block;
929
930 r = dm_kcopyd_zero(pool->copier, 1, &to, 0, copy_complete, m);
931 if (r < 0) {
932 mempool_free(m, pool->mapping_pool);
c397741c 933 DMERR_LIMIT("dm_kcopyd_zero() failed");
6beca5eb 934 cell_error(pool, cell);
991d9fa0
JT
935 }
936 }
937}
938
e49e5829
JT
939/*
940 * A non-zero return indicates read_only or fail_io mode.
941 * Many callers don't care about the return value.
942 */
020cc3b5 943static int commit(struct pool *pool)
e49e5829
JT
944{
945 int r;
946
8d07e8a5 947 if (get_pool_mode(pool) >= PM_READ_ONLY)
e49e5829
JT
948 return -EINVAL;
949
020cc3b5 950 r = dm_pool_commit_metadata(pool->pmd);
b5330655
JT
951 if (r)
952 metadata_operation_failed(pool, "dm_pool_commit_metadata", r);
e49e5829
JT
953
954 return r;
955}
956
88a6621b
JT
957static void check_low_water_mark(struct pool *pool, dm_block_t free_blocks)
958{
959 unsigned long flags;
960
961 if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) {
962 DMWARN("%s: reached low water mark for data device: sending event.",
963 dm_device_name(pool->pool_md));
964 spin_lock_irqsave(&pool->lock, flags);
965 pool->low_water_triggered = true;
966 spin_unlock_irqrestore(&pool->lock, flags);
967 dm_table_event(pool->ti->table);
968 }
969}
970
3e1a0699
JT
971static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
972
991d9fa0
JT
973static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
974{
975 int r;
976 dm_block_t free_blocks;
991d9fa0
JT
977 struct pool *pool = tc->pool;
978
3e1a0699 979 if (WARN_ON(get_pool_mode(pool) != PM_WRITE))
8d30abff
JT
980 return -EINVAL;
981
991d9fa0 982 r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
b5330655
JT
983 if (r) {
984 metadata_operation_failed(pool, "dm_pool_get_free_block_count", r);
991d9fa0 985 return r;
b5330655 986 }
991d9fa0 987
88a6621b 988 check_low_water_mark(pool, free_blocks);
991d9fa0
JT
989
990 if (!free_blocks) {
94563bad
MS
991 /*
992 * Try to commit to see if that will free up some
993 * more space.
994 */
020cc3b5
JT
995 r = commit(pool);
996 if (r)
997 return r;
991d9fa0 998
94563bad 999 r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
b5330655
JT
1000 if (r) {
1001 metadata_operation_failed(pool, "dm_pool_get_free_block_count", r);
94563bad 1002 return r;
b5330655 1003 }
991d9fa0 1004
94563bad 1005 if (!free_blocks) {
3e1a0699 1006 set_pool_mode(pool, PM_OUT_OF_DATA_SPACE);
94563bad 1007 return -ENOSPC;
991d9fa0
JT
1008 }
1009 }
1010
1011 r = dm_pool_alloc_data_block(pool->pmd, result);
4a02b34e 1012 if (r) {
b5330655 1013 metadata_operation_failed(pool, "dm_pool_alloc_data_block", r);
991d9fa0 1014 return r;
4a02b34e 1015 }
991d9fa0
JT
1016
1017 return 0;
1018}
1019
1020/*
1021 * If we have run out of space, queue bios until the device is
1022 * resumed, presumably after having been reloaded with more space.
1023 */
1024static void retry_on_resume(struct bio *bio)
1025{
59c3d2c6 1026 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
eb2aa48d 1027 struct thin_c *tc = h->tc;
991d9fa0
JT
1028 unsigned long flags;
1029
c140e1c4
MS
1030 spin_lock_irqsave(&tc->lock, flags);
1031 bio_list_add(&tc->retry_on_resume_list, bio);
1032 spin_unlock_irqrestore(&tc->lock, flags);
991d9fa0
JT
1033}
1034
af91805a 1035static int should_error_unserviceable_bio(struct pool *pool)
8c0f0e8c 1036{
3e1a0699
JT
1037 enum pool_mode m = get_pool_mode(pool);
1038
1039 switch (m) {
1040 case PM_WRITE:
1041 /* Shouldn't get here */
1042 DMERR_LIMIT("bio unserviceable, yet pool is in PM_WRITE mode");
af91805a 1043 return -EIO;
3e1a0699
JT
1044
1045 case PM_OUT_OF_DATA_SPACE:
af91805a 1046 return pool->pf.error_if_no_space ? -ENOSPC : 0;
3e1a0699
JT
1047
1048 case PM_READ_ONLY:
1049 case PM_FAIL:
af91805a 1050 return -EIO;
3e1a0699
JT
1051 default:
1052 /* Shouldn't get here */
1053 DMERR_LIMIT("bio unserviceable, yet pool has an unknown mode");
af91805a 1054 return -EIO;
3e1a0699
JT
1055 }
1056}
8c0f0e8c 1057
3e1a0699
JT
1058static void handle_unserviceable_bio(struct pool *pool, struct bio *bio)
1059{
af91805a
MS
1060 int error = should_error_unserviceable_bio(pool);
1061
1062 if (error)
1063 bio_endio(bio, error);
6d16202b
MS
1064 else
1065 retry_on_resume(bio);
8c0f0e8c
MS
1066}
1067
399caddf 1068static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *cell)
991d9fa0
JT
1069{
1070 struct bio *bio;
1071 struct bio_list bios;
af91805a 1072 int error;
991d9fa0 1073
af91805a
MS
1074 error = should_error_unserviceable_bio(pool);
1075 if (error) {
1076 cell_error_with_code(pool, cell, error);
3e1a0699
JT
1077 return;
1078 }
1079
991d9fa0 1080 bio_list_init(&bios);
6beca5eb 1081 cell_release(pool, cell, &bios);
991d9fa0 1082
af91805a
MS
1083 error = should_error_unserviceable_bio(pool);
1084 if (error)
3e1a0699 1085 while ((bio = bio_list_pop(&bios)))
af91805a 1086 bio_endio(bio, error);
3e1a0699
JT
1087 else
1088 while ((bio = bio_list_pop(&bios)))
1089 retry_on_resume(bio);
991d9fa0
JT
1090}
1091
104655fd
JT
1092static void process_discard(struct thin_c *tc, struct bio *bio)
1093{
1094 int r;
c3a0ce2e 1095 unsigned long flags;
104655fd 1096 struct pool *pool = tc->pool;
a24c2569 1097 struct dm_bio_prison_cell *cell, *cell2;
44feb387 1098 struct dm_cell_key key, key2;
104655fd
JT
1099 dm_block_t block = get_bio_block(tc, bio);
1100 struct dm_thin_lookup_result lookup_result;
a24c2569 1101 struct dm_thin_new_mapping *m;
104655fd
JT
1102
1103 build_virtual_key(tc->td, block, &key);
6beca5eb 1104 if (bio_detain(tc->pool, &key, bio, &cell))
104655fd
JT
1105 return;
1106
1107 r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1108 switch (r) {
1109 case 0:
1110 /*
1111 * Check nobody is fiddling with this pool block. This can
1112 * happen if someone's in the process of breaking sharing
1113 * on this block.
1114 */
1115 build_data_key(tc->td, lookup_result.block, &key2);
6beca5eb 1116 if (bio_detain(tc->pool, &key2, bio, &cell2)) {
f286ba0e 1117 cell_defer_no_holder(tc, cell);
104655fd
JT
1118 break;
1119 }
1120
1121 if (io_overlaps_block(pool, bio)) {
1122 /*
1123 * IO may still be going to the destination block. We must
1124 * quiesce before we can do the removal.
1125 */
1126 m = get_next_mapping(pool);
1127 m->tc = tc;
19fa1a67
JT
1128 m->pass_discard = pool->pf.discard_passdown;
1129 m->definitely_not_shared = !lookup_result.shared;
104655fd
JT
1130 m->virt_block = block;
1131 m->data_block = lookup_result.block;
1132 m->cell = cell;
1133 m->cell2 = cell2;
104655fd
JT
1134 m->bio = bio;
1135
44feb387 1136 if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) {
c3a0ce2e 1137 spin_lock_irqsave(&pool->lock, flags);
daec338b 1138 list_add_tail(&m->list, &pool->prepared_discards);
c3a0ce2e 1139 spin_unlock_irqrestore(&pool->lock, flags);
104655fd
JT
1140 wake_worker(pool);
1141 }
1142 } else {
e8088073 1143 inc_all_io_entry(pool, bio);
f286ba0e
JT
1144 cell_defer_no_holder(tc, cell);
1145 cell_defer_no_holder(tc, cell2);
e8088073 1146
104655fd 1147 /*
49296309
MP
1148 * The DM core makes sure that the discard doesn't span
1149 * a block boundary. So we submit the discard of a
1150 * partial block appropriately.
104655fd 1151 */
650d2a06
MP
1152 if ((!lookup_result.shared) && pool->pf.discard_passdown)
1153 remap_and_issue(tc, bio, lookup_result.block);
1154 else
1155 bio_endio(bio, 0);
104655fd
JT
1156 }
1157 break;
1158
1159 case -ENODATA:
1160 /*
1161 * It isn't provisioned, just forget it.
1162 */
f286ba0e 1163 cell_defer_no_holder(tc, cell);
104655fd
JT
1164 bio_endio(bio, 0);
1165 break;
1166
1167 default:
c397741c
MS
1168 DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
1169 __func__, r);
f286ba0e 1170 cell_defer_no_holder(tc, cell);
104655fd
JT
1171 bio_io_error(bio);
1172 break;
1173 }
1174}
1175
991d9fa0 1176static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
44feb387 1177 struct dm_cell_key *key,
991d9fa0 1178 struct dm_thin_lookup_result *lookup_result,
a24c2569 1179 struct dm_bio_prison_cell *cell)
991d9fa0
JT
1180{
1181 int r;
1182 dm_block_t data_block;
d6fc2042 1183 struct pool *pool = tc->pool;
991d9fa0
JT
1184
1185 r = alloc_data_block(tc, &data_block);
1186 switch (r) {
1187 case 0:
2dd9c257
JT
1188 schedule_internal_copy(tc, block, lookup_result->block,
1189 data_block, cell, bio);
991d9fa0
JT
1190 break;
1191
1192 case -ENOSPC:
399caddf 1193 retry_bios_on_resume(pool, cell);
991d9fa0
JT
1194 break;
1195
1196 default:
c397741c
MS
1197 DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
1198 __func__, r);
d6fc2042 1199 cell_error(pool, cell);
991d9fa0
JT
1200 break;
1201 }
1202}
1203
1204static void process_shared_bio(struct thin_c *tc, struct bio *bio,
1205 dm_block_t block,
1206 struct dm_thin_lookup_result *lookup_result)
1207{
a24c2569 1208 struct dm_bio_prison_cell *cell;
991d9fa0 1209 struct pool *pool = tc->pool;
44feb387 1210 struct dm_cell_key key;
991d9fa0
JT
1211
1212 /*
1213 * If cell is already occupied, then sharing is already in the process
1214 * of being broken so we have nothing further to do here.
1215 */
1216 build_data_key(tc->td, lookup_result->block, &key);
6beca5eb 1217 if (bio_detain(pool, &key, bio, &cell))
991d9fa0
JT
1218 return;
1219
4f024f37 1220 if (bio_data_dir(bio) == WRITE && bio->bi_iter.bi_size)
991d9fa0
JT
1221 break_sharing(tc, bio, block, &key, lookup_result, cell);
1222 else {
59c3d2c6 1223 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
991d9fa0 1224
44feb387 1225 h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds);
e8088073 1226 inc_all_io_entry(pool, bio);
f286ba0e 1227 cell_defer_no_holder(tc, cell);
e8088073 1228
991d9fa0
JT
1229 remap_and_issue(tc, bio, lookup_result->block);
1230 }
1231}
1232
1233static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block,
a24c2569 1234 struct dm_bio_prison_cell *cell)
991d9fa0
JT
1235{
1236 int r;
1237 dm_block_t data_block;
6beca5eb 1238 struct pool *pool = tc->pool;
991d9fa0
JT
1239
1240 /*
1241 * Remap empty bios (flushes) immediately, without provisioning.
1242 */
4f024f37 1243 if (!bio->bi_iter.bi_size) {
6beca5eb 1244 inc_all_io_entry(pool, bio);
f286ba0e 1245 cell_defer_no_holder(tc, cell);
e8088073 1246
991d9fa0
JT
1247 remap_and_issue(tc, bio, 0);
1248 return;
1249 }
1250
1251 /*
1252 * Fill read bios with zeroes and complete them immediately.
1253 */
1254 if (bio_data_dir(bio) == READ) {
1255 zero_fill_bio(bio);
f286ba0e 1256 cell_defer_no_holder(tc, cell);
991d9fa0
JT
1257 bio_endio(bio, 0);
1258 return;
1259 }
1260
1261 r = alloc_data_block(tc, &data_block);
1262 switch (r) {
1263 case 0:
2dd9c257
JT
1264 if (tc->origin_dev)
1265 schedule_external_copy(tc, block, data_block, cell, bio);
1266 else
1267 schedule_zero(tc, block, data_block, cell, bio);
991d9fa0
JT
1268 break;
1269
1270 case -ENOSPC:
399caddf 1271 retry_bios_on_resume(pool, cell);
991d9fa0
JT
1272 break;
1273
1274 default:
c397741c
MS
1275 DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
1276 __func__, r);
6beca5eb 1277 cell_error(pool, cell);
991d9fa0
JT
1278 break;
1279 }
1280}
1281
1282static void process_bio(struct thin_c *tc, struct bio *bio)
1283{
1284 int r;
6beca5eb 1285 struct pool *pool = tc->pool;
991d9fa0 1286 dm_block_t block = get_bio_block(tc, bio);
a24c2569 1287 struct dm_bio_prison_cell *cell;
44feb387 1288 struct dm_cell_key key;
991d9fa0
JT
1289 struct dm_thin_lookup_result lookup_result;
1290
1291 /*
1292 * If cell is already occupied, then the block is already
1293 * being provisioned so we have nothing further to do here.
1294 */
1295 build_virtual_key(tc->td, block, &key);
6beca5eb 1296 if (bio_detain(pool, &key, bio, &cell))
991d9fa0
JT
1297 return;
1298
1299 r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1300 switch (r) {
1301 case 0:
e8088073 1302 if (lookup_result.shared) {
991d9fa0 1303 process_shared_bio(tc, bio, block, &lookup_result);
6beca5eb 1304 cell_defer_no_holder(tc, cell); /* FIXME: pass this cell into process_shared? */
e8088073 1305 } else {
6beca5eb 1306 inc_all_io_entry(pool, bio);
f286ba0e 1307 cell_defer_no_holder(tc, cell);
e8088073 1308
991d9fa0 1309 remap_and_issue(tc, bio, lookup_result.block);
e8088073 1310 }
991d9fa0
JT
1311 break;
1312
1313 case -ENODATA:
2dd9c257 1314 if (bio_data_dir(bio) == READ && tc->origin_dev) {
6beca5eb 1315 inc_all_io_entry(pool, bio);
f286ba0e 1316 cell_defer_no_holder(tc, cell);
e8088073 1317
2dd9c257
JT
1318 remap_to_origin_and_issue(tc, bio);
1319 } else
1320 provision_block(tc, bio, block, cell);
991d9fa0
JT
1321 break;
1322
1323 default:
c397741c
MS
1324 DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
1325 __func__, r);
f286ba0e 1326 cell_defer_no_holder(tc, cell);
991d9fa0
JT
1327 bio_io_error(bio);
1328 break;
1329 }
1330}
1331
e49e5829
JT
1332static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
1333{
1334 int r;
1335 int rw = bio_data_dir(bio);
1336 dm_block_t block = get_bio_block(tc, bio);
1337 struct dm_thin_lookup_result lookup_result;
1338
1339 r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1340 switch (r) {
1341 case 0:
4f024f37 1342 if (lookup_result.shared && (rw == WRITE) && bio->bi_iter.bi_size)
8c0f0e8c 1343 handle_unserviceable_bio(tc->pool, bio);
e8088073
JT
1344 else {
1345 inc_all_io_entry(tc->pool, bio);
e49e5829 1346 remap_and_issue(tc, bio, lookup_result.block);
e8088073 1347 }
e49e5829
JT
1348 break;
1349
1350 case -ENODATA:
1351 if (rw != READ) {
8c0f0e8c 1352 handle_unserviceable_bio(tc->pool, bio);
e49e5829
JT
1353 break;
1354 }
1355
1356 if (tc->origin_dev) {
e8088073 1357 inc_all_io_entry(tc->pool, bio);
e49e5829
JT
1358 remap_to_origin_and_issue(tc, bio);
1359 break;
1360 }
1361
1362 zero_fill_bio(bio);
1363 bio_endio(bio, 0);
1364 break;
1365
1366 default:
c397741c
MS
1367 DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
1368 __func__, r);
e49e5829
JT
1369 bio_io_error(bio);
1370 break;
1371 }
1372}
1373
3e1a0699
JT
1374static void process_bio_success(struct thin_c *tc, struct bio *bio)
1375{
1376 bio_endio(bio, 0);
1377}
1378
e49e5829
JT
1379static void process_bio_fail(struct thin_c *tc, struct bio *bio)
1380{
1381 bio_io_error(bio);
1382}
1383
ac8c3f3d
JT
1384/*
1385 * FIXME: should we also commit due to size of transaction, measured in
1386 * metadata blocks?
1387 */
905e51b3
JT
1388static int need_commit_due_to_time(struct pool *pool)
1389{
1390 return jiffies < pool->last_commit_jiffies ||
1391 jiffies > pool->last_commit_jiffies + COMMIT_PERIOD;
1392}
1393
67324ea1
MS
1394#define thin_pbd(node) rb_entry((node), struct dm_thin_endio_hook, rb_node)
1395#define thin_bio(pbd) dm_bio_from_per_bio_data((pbd), sizeof(struct dm_thin_endio_hook))
1396
1397static void __thin_bio_rb_add(struct thin_c *tc, struct bio *bio)
1398{
1399 struct rb_node **rbp, *parent;
1400 struct dm_thin_endio_hook *pbd;
1401 sector_t bi_sector = bio->bi_iter.bi_sector;
1402
1403 rbp = &tc->sort_bio_list.rb_node;
1404 parent = NULL;
1405 while (*rbp) {
1406 parent = *rbp;
1407 pbd = thin_pbd(parent);
1408
1409 if (bi_sector < thin_bio(pbd)->bi_iter.bi_sector)
1410 rbp = &(*rbp)->rb_left;
1411 else
1412 rbp = &(*rbp)->rb_right;
1413 }
1414
1415 pbd = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1416 rb_link_node(&pbd->rb_node, parent, rbp);
1417 rb_insert_color(&pbd->rb_node, &tc->sort_bio_list);
1418}
1419
1420static void __extract_sorted_bios(struct thin_c *tc)
1421{
1422 struct rb_node *node;
1423 struct dm_thin_endio_hook *pbd;
1424 struct bio *bio;
1425
1426 for (node = rb_first(&tc->sort_bio_list); node; node = rb_next(node)) {
1427 pbd = thin_pbd(node);
1428 bio = thin_bio(pbd);
1429
1430 bio_list_add(&tc->deferred_bio_list, bio);
1431 rb_erase(&pbd->rb_node, &tc->sort_bio_list);
1432 }
1433
1434 WARN_ON(!RB_EMPTY_ROOT(&tc->sort_bio_list));
1435}
1436
1437static void __sort_thin_deferred_bios(struct thin_c *tc)
1438{
1439 struct bio *bio;
1440 struct bio_list bios;
1441
1442 bio_list_init(&bios);
1443 bio_list_merge(&bios, &tc->deferred_bio_list);
1444 bio_list_init(&tc->deferred_bio_list);
1445
1446 /* Sort deferred_bio_list using rb-tree */
1447 while ((bio = bio_list_pop(&bios)))
1448 __thin_bio_rb_add(tc, bio);
1449
1450 /*
1451 * Transfer the sorted bios in sort_bio_list back to
1452 * deferred_bio_list to allow lockless submission of
1453 * all bios.
1454 */
1455 __extract_sorted_bios(tc);
1456}
1457
c140e1c4 1458static void process_thin_deferred_bios(struct thin_c *tc)
991d9fa0 1459{
c140e1c4 1460 struct pool *pool = tc->pool;
991d9fa0
JT
1461 unsigned long flags;
1462 struct bio *bio;
1463 struct bio_list bios;
67324ea1 1464 struct blk_plug plug;
991d9fa0 1465
c140e1c4
MS
1466 if (tc->requeue_mode) {
1467 requeue_bio_list(tc, &tc->deferred_bio_list);
1468 return;
1469 }
1470
991d9fa0
JT
1471 bio_list_init(&bios);
1472
c140e1c4 1473 spin_lock_irqsave(&tc->lock, flags);
67324ea1
MS
1474
1475 if (bio_list_empty(&tc->deferred_bio_list)) {
1476 spin_unlock_irqrestore(&tc->lock, flags);
1477 return;
1478 }
1479
1480 __sort_thin_deferred_bios(tc);
1481
c140e1c4
MS
1482 bio_list_merge(&bios, &tc->deferred_bio_list);
1483 bio_list_init(&tc->deferred_bio_list);
67324ea1 1484
c140e1c4 1485 spin_unlock_irqrestore(&tc->lock, flags);
991d9fa0 1486
67324ea1 1487 blk_start_plug(&plug);
991d9fa0 1488 while ((bio = bio_list_pop(&bios))) {
991d9fa0
JT
1489 /*
1490 * If we've got no free new_mapping structs, and processing
1491 * this bio might require one, we pause until there are some
1492 * prepared mappings to process.
1493 */
1494 if (ensure_next_mapping(pool)) {
c140e1c4
MS
1495 spin_lock_irqsave(&tc->lock, flags);
1496 bio_list_add(&tc->deferred_bio_list, bio);
1497 bio_list_merge(&tc->deferred_bio_list, &bios);
1498 spin_unlock_irqrestore(&tc->lock, flags);
991d9fa0
JT
1499 break;
1500 }
104655fd
JT
1501
1502 if (bio->bi_rw & REQ_DISCARD)
e49e5829 1503 pool->process_discard(tc, bio);
104655fd 1504 else
e49e5829 1505 pool->process_bio(tc, bio);
991d9fa0 1506 }
67324ea1 1507 blk_finish_plug(&plug);
c140e1c4
MS
1508}
1509
b10ebd34
JT
1510static void thin_get(struct thin_c *tc);
1511static void thin_put(struct thin_c *tc);
1512
1513/*
1514 * We can't hold rcu_read_lock() around code that can block. So we
1515 * find a thin with the rcu lock held; bump a refcount; then drop
1516 * the lock.
1517 */
1518static struct thin_c *get_first_thin(struct pool *pool)
1519{
1520 struct thin_c *tc = NULL;
1521
1522 rcu_read_lock();
1523 if (!list_empty(&pool->active_thins)) {
1524 tc = list_entry_rcu(pool->active_thins.next, struct thin_c, list);
1525 thin_get(tc);
1526 }
1527 rcu_read_unlock();
1528
1529 return tc;
1530}
1531
1532static struct thin_c *get_next_thin(struct pool *pool, struct thin_c *tc)
1533{
1534 struct thin_c *old_tc = tc;
1535
1536 rcu_read_lock();
1537 list_for_each_entry_continue_rcu(tc, &pool->active_thins, list) {
1538 thin_get(tc);
1539 thin_put(old_tc);
1540 rcu_read_unlock();
1541 return tc;
1542 }
1543 thin_put(old_tc);
1544 rcu_read_unlock();
1545
1546 return NULL;
1547}
1548
c140e1c4
MS
1549static void process_deferred_bios(struct pool *pool)
1550{
1551 unsigned long flags;
1552 struct bio *bio;
1553 struct bio_list bios;
1554 struct thin_c *tc;
1555
b10ebd34
JT
1556 tc = get_first_thin(pool);
1557 while (tc) {
c140e1c4 1558 process_thin_deferred_bios(tc);
b10ebd34
JT
1559 tc = get_next_thin(pool, tc);
1560 }
991d9fa0
JT
1561
1562 /*
1563 * If there are any deferred flush bios, we must commit
1564 * the metadata before issuing them.
1565 */
1566 bio_list_init(&bios);
1567 spin_lock_irqsave(&pool->lock, flags);
1568 bio_list_merge(&bios, &pool->deferred_flush_bios);
1569 bio_list_init(&pool->deferred_flush_bios);
1570 spin_unlock_irqrestore(&pool->lock, flags);
1571
4d1662a3
MS
1572 if (bio_list_empty(&bios) &&
1573 !(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool)))
991d9fa0
JT
1574 return;
1575
020cc3b5 1576 if (commit(pool)) {
991d9fa0
JT
1577 while ((bio = bio_list_pop(&bios)))
1578 bio_io_error(bio);
1579 return;
1580 }
905e51b3 1581 pool->last_commit_jiffies = jiffies;
991d9fa0
JT
1582
1583 while ((bio = bio_list_pop(&bios)))
1584 generic_make_request(bio);
1585}
1586
1587static void do_worker(struct work_struct *ws)
1588{
1589 struct pool *pool = container_of(ws, struct pool, worker);
1590
e49e5829
JT
1591 process_prepared(pool, &pool->prepared_mappings, &pool->process_prepared_mapping);
1592 process_prepared(pool, &pool->prepared_discards, &pool->process_prepared_discard);
991d9fa0
JT
1593 process_deferred_bios(pool);
1594}
1595
905e51b3
JT
1596/*
1597 * We want to commit periodically so that not too much
1598 * unwritten data builds up.
1599 */
1600static void do_waker(struct work_struct *ws)
1601{
1602 struct pool *pool = container_of(to_delayed_work(ws), struct pool, waker);
1603 wake_worker(pool);
1604 queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD);
1605}
1606
85ad643b
JT
1607/*
1608 * We're holding onto IO to allow userland time to react. After the
1609 * timeout either the pool will have been resized (and thus back in
1610 * PM_WRITE mode), or we degrade to PM_READ_ONLY and start erroring IO.
1611 */
1612static void do_no_space_timeout(struct work_struct *ws)
1613{
1614 struct pool *pool = container_of(to_delayed_work(ws), struct pool,
1615 no_space_timeout);
1616
1617 if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space)
1618 set_pool_mode(pool, PM_READ_ONLY);
1619}
1620
991d9fa0
JT
1621/*----------------------------------------------------------------*/
1622
e7a3e871 1623struct pool_work {
738211f7 1624 struct work_struct worker;
e7a3e871
JT
1625 struct completion complete;
1626};
1627
1628static struct pool_work *to_pool_work(struct work_struct *ws)
1629{
1630 return container_of(ws, struct pool_work, worker);
1631}
1632
1633static void pool_work_complete(struct pool_work *pw)
1634{
1635 complete(&pw->complete);
1636}
738211f7 1637
e7a3e871
JT
1638static void pool_work_wait(struct pool_work *pw, struct pool *pool,
1639 void (*fn)(struct work_struct *))
1640{
1641 INIT_WORK_ONSTACK(&pw->worker, fn);
1642 init_completion(&pw->complete);
1643 queue_work(pool->wq, &pw->worker);
1644 wait_for_completion(&pw->complete);
1645}
1646
1647/*----------------------------------------------------------------*/
1648
1649struct noflush_work {
1650 struct pool_work pw;
1651 struct thin_c *tc;
738211f7
JT
1652};
1653
e7a3e871 1654static struct noflush_work *to_noflush(struct work_struct *ws)
738211f7 1655{
e7a3e871 1656 return container_of(to_pool_work(ws), struct noflush_work, pw);
738211f7
JT
1657}
1658
1659static void do_noflush_start(struct work_struct *ws)
1660{
e7a3e871 1661 struct noflush_work *w = to_noflush(ws);
738211f7
JT
1662 w->tc->requeue_mode = true;
1663 requeue_io(w->tc);
e7a3e871 1664 pool_work_complete(&w->pw);
738211f7
JT
1665}
1666
1667static void do_noflush_stop(struct work_struct *ws)
1668{
e7a3e871 1669 struct noflush_work *w = to_noflush(ws);
738211f7 1670 w->tc->requeue_mode = false;
e7a3e871 1671 pool_work_complete(&w->pw);
738211f7
JT
1672}
1673
1674static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *))
1675{
1676 struct noflush_work w;
1677
738211f7 1678 w.tc = tc;
e7a3e871 1679 pool_work_wait(&w.pw, tc->pool, fn);
738211f7
JT
1680}
1681
1682/*----------------------------------------------------------------*/
1683
e49e5829
JT
1684static enum pool_mode get_pool_mode(struct pool *pool)
1685{
1686 return pool->pf.mode;
1687}
1688
3e1a0699
JT
1689static void notify_of_pool_mode_change(struct pool *pool, const char *new_mode)
1690{
1691 dm_table_event(pool->ti->table);
1692 DMINFO("%s: switching pool to %s mode",
1693 dm_device_name(pool->pool_md), new_mode);
1694}
1695
8b64e881 1696static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
e49e5829 1697{
cdc2b415 1698 struct pool_c *pt = pool->ti->private;
07f2b6e0
MS
1699 bool needs_check = dm_pool_metadata_needs_check(pool->pmd);
1700 enum pool_mode old_mode = get_pool_mode(pool);
80c57893 1701 unsigned long no_space_timeout = ACCESS_ONCE(no_space_timeout_secs) * HZ;
07f2b6e0
MS
1702
1703 /*
1704 * Never allow the pool to transition to PM_WRITE mode if user
1705 * intervention is required to verify metadata and data consistency.
1706 */
1707 if (new_mode == PM_WRITE && needs_check) {
1708 DMERR("%s: unable to switch pool to write mode until repaired.",
1709 dm_device_name(pool->pool_md));
1710 if (old_mode != new_mode)
1711 new_mode = old_mode;
1712 else
1713 new_mode = PM_READ_ONLY;
1714 }
1715 /*
1716 * If we were in PM_FAIL mode, rollback of metadata failed. We're
1717 * not going to recover without a thin_repair. So we never let the
1718 * pool move out of the old mode.
1719 */
1720 if (old_mode == PM_FAIL)
1721 new_mode = old_mode;
e49e5829 1722
8b64e881 1723 switch (new_mode) {
e49e5829 1724 case PM_FAIL:
8b64e881 1725 if (old_mode != new_mode)
3e1a0699 1726 notify_of_pool_mode_change(pool, "failure");
5383ef3a 1727 dm_pool_metadata_read_only(pool->pmd);
e49e5829
JT
1728 pool->process_bio = process_bio_fail;
1729 pool->process_discard = process_bio_fail;
1730 pool->process_prepared_mapping = process_prepared_mapping_fail;
1731 pool->process_prepared_discard = process_prepared_discard_fail;
3e1a0699
JT
1732
1733 error_retry_list(pool);
e49e5829
JT
1734 break;
1735
1736 case PM_READ_ONLY:
8b64e881 1737 if (old_mode != new_mode)
3e1a0699
JT
1738 notify_of_pool_mode_change(pool, "read-only");
1739 dm_pool_metadata_read_only(pool->pmd);
1740 pool->process_bio = process_bio_read_only;
1741 pool->process_discard = process_bio_success;
1742 pool->process_prepared_mapping = process_prepared_mapping_fail;
1743 pool->process_prepared_discard = process_prepared_discard_passdown;
1744
1745 error_retry_list(pool);
1746 break;
1747
1748 case PM_OUT_OF_DATA_SPACE:
1749 /*
1750 * Ideally we'd never hit this state; the low water mark
1751 * would trigger userland to extend the pool before we
1752 * completely run out of data space. However, many small
1753 * IOs to unprovisioned space can consume data space at an
1754 * alarming rate. Adjust your low water mark if you're
1755 * frequently seeing this mode.
1756 */
1757 if (old_mode != new_mode)
1758 notify_of_pool_mode_change(pool, "out-of-data-space");
1759 pool->process_bio = process_bio_read_only;
1760 pool->process_discard = process_discard;
1761 pool->process_prepared_mapping = process_prepared_mapping;
1762 pool->process_prepared_discard = process_prepared_discard_passdown;
85ad643b 1763
80c57893
MS
1764 if (!pool->pf.error_if_no_space && no_space_timeout)
1765 queue_delayed_work(pool->wq, &pool->no_space_timeout, no_space_timeout);
e49e5829
JT
1766 break;
1767
1768 case PM_WRITE:
8b64e881 1769 if (old_mode != new_mode)
3e1a0699 1770 notify_of_pool_mode_change(pool, "write");
9b7aaa64 1771 dm_pool_metadata_read_write(pool->pmd);
e49e5829
JT
1772 pool->process_bio = process_bio;
1773 pool->process_discard = process_discard;
1774 pool->process_prepared_mapping = process_prepared_mapping;
1775 pool->process_prepared_discard = process_prepared_discard;
1776 break;
1777 }
8b64e881
MS
1778
1779 pool->pf.mode = new_mode;
cdc2b415
MS
1780 /*
1781 * The pool mode may have changed, sync it so bind_control_target()
1782 * doesn't cause an unexpected mode transition on resume.
1783 */
1784 pt->adjusted_pf.mode = new_mode;
e49e5829
JT
1785}
1786
07f2b6e0 1787static void abort_transaction(struct pool *pool)
b5330655 1788{
07f2b6e0
MS
1789 const char *dev_name = dm_device_name(pool->pool_md);
1790
1791 DMERR_LIMIT("%s: aborting current metadata transaction", dev_name);
1792 if (dm_pool_abort_metadata(pool->pmd)) {
1793 DMERR("%s: failed to abort metadata transaction", dev_name);
1794 set_pool_mode(pool, PM_FAIL);
1795 }
1796
1797 if (dm_pool_metadata_set_needs_check(pool->pmd)) {
1798 DMERR("%s: failed to set 'needs_check' flag in metadata", dev_name);
1799 set_pool_mode(pool, PM_FAIL);
1800 }
1801}
399caddf 1802
07f2b6e0
MS
1803static void metadata_operation_failed(struct pool *pool, const char *op, int r)
1804{
b5330655
JT
1805 DMERR_LIMIT("%s: metadata operation '%s' failed: error = %d",
1806 dm_device_name(pool->pool_md), op, r);
1807
07f2b6e0 1808 abort_transaction(pool);
b5330655
JT
1809 set_pool_mode(pool, PM_READ_ONLY);
1810}
1811
e49e5829
JT
1812/*----------------------------------------------------------------*/
1813
991d9fa0
JT
1814/*
1815 * Mapping functions.
1816 */
1817
1818/*
1819 * Called only while mapping a thin bio to hand it over to the workqueue.
1820 */
1821static void thin_defer_bio(struct thin_c *tc, struct bio *bio)
1822{
1823 unsigned long flags;
1824 struct pool *pool = tc->pool;
1825
c140e1c4
MS
1826 spin_lock_irqsave(&tc->lock, flags);
1827 bio_list_add(&tc->deferred_bio_list, bio);
1828 spin_unlock_irqrestore(&tc->lock, flags);
991d9fa0
JT
1829
1830 wake_worker(pool);
1831}
1832
59c3d2c6 1833static void thin_hook_bio(struct thin_c *tc, struct bio *bio)
eb2aa48d 1834{
59c3d2c6 1835 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
eb2aa48d
JT
1836
1837 h->tc = tc;
1838 h->shared_read_entry = NULL;
e8088073 1839 h->all_io_entry = NULL;
eb2aa48d 1840 h->overwrite_mapping = NULL;
eb2aa48d
JT
1841}
1842
991d9fa0
JT
1843/*
1844 * Non-blocking function called from the thin target's map function.
1845 */
7de3ee57 1846static int thin_bio_map(struct dm_target *ti, struct bio *bio)
991d9fa0
JT
1847{
1848 int r;
1849 struct thin_c *tc = ti->private;
1850 dm_block_t block = get_bio_block(tc, bio);
1851 struct dm_thin_device *td = tc->td;
1852 struct dm_thin_lookup_result result;
025b9685
JT
1853 struct dm_bio_prison_cell cell1, cell2;
1854 struct dm_bio_prison_cell *cell_result;
e8088073 1855 struct dm_cell_key key;
991d9fa0 1856
59c3d2c6 1857 thin_hook_bio(tc, bio);
e49e5829 1858
738211f7
JT
1859 if (tc->requeue_mode) {
1860 bio_endio(bio, DM_ENDIO_REQUEUE);
1861 return DM_MAPIO_SUBMITTED;
1862 }
1863
e49e5829
JT
1864 if (get_pool_mode(tc->pool) == PM_FAIL) {
1865 bio_io_error(bio);
1866 return DM_MAPIO_SUBMITTED;
1867 }
1868
104655fd 1869 if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)) {
991d9fa0
JT
1870 thin_defer_bio(tc, bio);
1871 return DM_MAPIO_SUBMITTED;
1872 }
1873
1874 r = dm_thin_find_block(td, block, 0, &result);
1875
1876 /*
1877 * Note that we defer readahead too.
1878 */
1879 switch (r) {
1880 case 0:
1881 if (unlikely(result.shared)) {
1882 /*
1883 * We have a race condition here between the
1884 * result.shared value returned by the lookup and
1885 * snapshot creation, which may cause new
1886 * sharing.
1887 *
1888 * To avoid this always quiesce the origin before
1889 * taking the snap. You want to do this anyway to
1890 * ensure a consistent application view
1891 * (i.e. lockfs).
1892 *
1893 * More distant ancestors are irrelevant. The
1894 * shared flag will be set in their case.
1895 */
1896 thin_defer_bio(tc, bio);
e8088073 1897 return DM_MAPIO_SUBMITTED;
991d9fa0 1898 }
e8088073
JT
1899
1900 build_virtual_key(tc->td, block, &key);
025b9685 1901 if (dm_bio_detain(tc->pool->prison, &key, bio, &cell1, &cell_result))
e8088073
JT
1902 return DM_MAPIO_SUBMITTED;
1903
1904 build_data_key(tc->td, result.block, &key);
025b9685
JT
1905 if (dm_bio_detain(tc->pool->prison, &key, bio, &cell2, &cell_result)) {
1906 cell_defer_no_holder_no_free(tc, &cell1);
e8088073
JT
1907 return DM_MAPIO_SUBMITTED;
1908 }
1909
1910 inc_all_io_entry(tc->pool, bio);
025b9685
JT
1911 cell_defer_no_holder_no_free(tc, &cell2);
1912 cell_defer_no_holder_no_free(tc, &cell1);
e8088073
JT
1913
1914 remap(tc, bio, result.block);
1915 return DM_MAPIO_REMAPPED;
991d9fa0
JT
1916
1917 case -ENODATA:
e49e5829
JT
1918 if (get_pool_mode(tc->pool) == PM_READ_ONLY) {
1919 /*
1920 * This block isn't provisioned, and we have no way
8c0f0e8c 1921 * of doing so.
e49e5829 1922 */
8c0f0e8c 1923 handle_unserviceable_bio(tc->pool, bio);
2aab3850 1924 return DM_MAPIO_SUBMITTED;
e49e5829
JT
1925 }
1926 /* fall through */
1927
1928 case -EWOULDBLOCK:
991d9fa0
JT
1929 /*
1930 * In future, the failed dm_thin_find_block above could
1931 * provide the hint to load the metadata into cache.
1932 */
991d9fa0 1933 thin_defer_bio(tc, bio);
2aab3850 1934 return DM_MAPIO_SUBMITTED;
e49e5829
JT
1935
1936 default:
1937 /*
1938 * Must always call bio_io_error on failure.
1939 * dm_thin_find_block can fail with -EINVAL if the
1940 * pool is switched to fail-io mode.
1941 */
1942 bio_io_error(bio);
2aab3850 1943 return DM_MAPIO_SUBMITTED;
991d9fa0 1944 }
991d9fa0
JT
1945}
1946
1947static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
1948{
991d9fa0 1949 struct pool_c *pt = container_of(cb, struct pool_c, callbacks);
760fe67e 1950 struct request_queue *q;
991d9fa0 1951
760fe67e
MS
1952 if (get_pool_mode(pt->pool) == PM_OUT_OF_DATA_SPACE)
1953 return 1;
991d9fa0 1954
760fe67e
MS
1955 q = bdev_get_queue(pt->data_dev->bdev);
1956 return bdi_congested(&q->backing_dev_info, bdi_bits);
991d9fa0
JT
1957}
1958
c140e1c4 1959static void requeue_bios(struct pool *pool)
991d9fa0 1960{
c140e1c4
MS
1961 unsigned long flags;
1962 struct thin_c *tc;
1963
1964 rcu_read_lock();
1965 list_for_each_entry_rcu(tc, &pool->active_thins, list) {
1966 spin_lock_irqsave(&tc->lock, flags);
1967 bio_list_merge(&tc->deferred_bio_list, &tc->retry_on_resume_list);
1968 bio_list_init(&tc->retry_on_resume_list);
1969 spin_unlock_irqrestore(&tc->lock, flags);
1970 }
1971 rcu_read_unlock();
991d9fa0
JT
1972}
1973
1974/*----------------------------------------------------------------
1975 * Binding of control targets to a pool object
1976 *--------------------------------------------------------------*/
9bc142dd
MS
1977static bool data_dev_supports_discard(struct pool_c *pt)
1978{
1979 struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
1980
1981 return q && blk_queue_discard(q);
1982}
1983
58051b94
JT
1984static bool is_factor(sector_t block_size, uint32_t n)
1985{
1986 return !sector_div(block_size, n);
1987}
1988
9bc142dd
MS
1989/*
1990 * If discard_passdown was enabled verify that the data device
0424caa1 1991 * supports discards. Disable discard_passdown if not.
9bc142dd 1992 */
0424caa1 1993static void disable_passdown_if_not_supported(struct pool_c *pt)
9bc142dd 1994{
0424caa1
MS
1995 struct pool *pool = pt->pool;
1996 struct block_device *data_bdev = pt->data_dev->bdev;
1997 struct queue_limits *data_limits = &bdev_get_queue(data_bdev)->limits;
1998 sector_t block_size = pool->sectors_per_block << SECTOR_SHIFT;
1999 const char *reason = NULL;
9bc142dd
MS
2000 char buf[BDEVNAME_SIZE];
2001
0424caa1 2002 if (!pt->adjusted_pf.discard_passdown)
9bc142dd
MS
2003 return;
2004
0424caa1
MS
2005 if (!data_dev_supports_discard(pt))
2006 reason = "discard unsupported";
2007
2008 else if (data_limits->max_discard_sectors < pool->sectors_per_block)
2009 reason = "max discard sectors smaller than a block";
9bc142dd 2010
0424caa1
MS
2011 else if (data_limits->discard_granularity > block_size)
2012 reason = "discard granularity larger than a block";
2013
58051b94 2014 else if (!is_factor(block_size, data_limits->discard_granularity))
0424caa1
MS
2015 reason = "discard granularity not a factor of block size";
2016
2017 if (reason) {
2018 DMWARN("Data device (%s) %s: Disabling discard passdown.", bdevname(data_bdev, buf), reason);
2019 pt->adjusted_pf.discard_passdown = false;
2020 }
9bc142dd
MS
2021}
2022
991d9fa0
JT
2023static int bind_control_target(struct pool *pool, struct dm_target *ti)
2024{
2025 struct pool_c *pt = ti->private;
2026
e49e5829 2027 /*
9b7aaa64 2028 * We want to make sure that a pool in PM_FAIL mode is never upgraded.
e49e5829 2029 */
07f2b6e0 2030 enum pool_mode old_mode = get_pool_mode(pool);
0424caa1 2031 enum pool_mode new_mode = pt->adjusted_pf.mode;
e49e5829 2032
8b64e881
MS
2033 /*
2034 * Don't change the pool's mode until set_pool_mode() below.
2035 * Otherwise the pool's process_* function pointers may
2036 * not match the desired pool mode.
2037 */
2038 pt->adjusted_pf.mode = old_mode;
2039
2040 pool->ti = ti;
2041 pool->pf = pt->adjusted_pf;
2042 pool->low_water_blocks = pt->low_water_blocks;
2043
9bc142dd 2044 set_pool_mode(pool, new_mode);
f402693d 2045
991d9fa0
JT
2046 return 0;
2047}
2048
2049static void unbind_control_target(struct pool *pool, struct dm_target *ti)
2050{
2051 if (pool->ti == ti)
2052 pool->ti = NULL;
2053}
2054
2055/*----------------------------------------------------------------
2056 * Pool creation
2057 *--------------------------------------------------------------*/
67e2e2b2
JT
2058/* Initialize pool features. */
2059static void pool_features_init(struct pool_features *pf)
2060{
e49e5829 2061 pf->mode = PM_WRITE;
9bc142dd
MS
2062 pf->zero_new_blocks = true;
2063 pf->discard_enabled = true;
2064 pf->discard_passdown = true;
787a996c 2065 pf->error_if_no_space = false;
67e2e2b2
JT
2066}
2067
991d9fa0
JT
2068static void __pool_destroy(struct pool *pool)
2069{
2070 __pool_table_remove(pool);
2071
2072 if (dm_pool_metadata_close(pool->pmd) < 0)
2073 DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
2074
44feb387 2075 dm_bio_prison_destroy(pool->prison);
991d9fa0
JT
2076 dm_kcopyd_client_destroy(pool->copier);
2077
2078 if (pool->wq)
2079 destroy_workqueue(pool->wq);
2080
2081 if (pool->next_mapping)
2082 mempool_free(pool->next_mapping, pool->mapping_pool);
2083 mempool_destroy(pool->mapping_pool);
44feb387
MS
2084 dm_deferred_set_destroy(pool->shared_read_ds);
2085 dm_deferred_set_destroy(pool->all_io_ds);
991d9fa0
JT
2086 kfree(pool);
2087}
2088
a24c2569 2089static struct kmem_cache *_new_mapping_cache;
a24c2569 2090
991d9fa0
JT
2091static struct pool *pool_create(struct mapped_device *pool_md,
2092 struct block_device *metadata_dev,
e49e5829
JT
2093 unsigned long block_size,
2094 int read_only, char **error)
991d9fa0
JT
2095{
2096 int r;
2097 void *err_p;
2098 struct pool *pool;
2099 struct dm_pool_metadata *pmd;
e49e5829 2100 bool format_device = read_only ? false : true;
991d9fa0 2101
e49e5829 2102 pmd = dm_pool_metadata_open(metadata_dev, block_size, format_device);
991d9fa0
JT
2103 if (IS_ERR(pmd)) {
2104 *error = "Error creating metadata object";
2105 return (struct pool *)pmd;
2106 }
2107
2108 pool = kmalloc(sizeof(*pool), GFP_KERNEL);
2109 if (!pool) {
2110 *error = "Error allocating memory for pool";
2111 err_p = ERR_PTR(-ENOMEM);
2112 goto bad_pool;
2113 }
2114
2115 pool->pmd = pmd;
2116 pool->sectors_per_block = block_size;
f9a8e0cd
MP
2117 if (block_size & (block_size - 1))
2118 pool->sectors_per_block_shift = -1;
2119 else
2120 pool->sectors_per_block_shift = __ffs(block_size);
991d9fa0 2121 pool->low_water_blocks = 0;
67e2e2b2 2122 pool_features_init(&pool->pf);
44feb387 2123 pool->prison = dm_bio_prison_create(PRISON_CELLS);
991d9fa0
JT
2124 if (!pool->prison) {
2125 *error = "Error creating pool's bio prison";
2126 err_p = ERR_PTR(-ENOMEM);
2127 goto bad_prison;
2128 }
2129
df5d2e90 2130 pool->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle);
991d9fa0
JT
2131 if (IS_ERR(pool->copier)) {
2132 r = PTR_ERR(pool->copier);
2133 *error = "Error creating pool's kcopyd client";
2134 err_p = ERR_PTR(r);
2135 goto bad_kcopyd_client;
2136 }
2137
2138 /*
2139 * Create singlethreaded workqueue that will service all devices
2140 * that use this metadata.
2141 */
2142 pool->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
2143 if (!pool->wq) {
2144 *error = "Error creating pool's workqueue";
2145 err_p = ERR_PTR(-ENOMEM);
2146 goto bad_wq;
2147 }
2148
2149 INIT_WORK(&pool->worker, do_worker);
905e51b3 2150 INIT_DELAYED_WORK(&pool->waker, do_waker);
85ad643b 2151 INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout);
991d9fa0 2152 spin_lock_init(&pool->lock);
991d9fa0
JT
2153 bio_list_init(&pool->deferred_flush_bios);
2154 INIT_LIST_HEAD(&pool->prepared_mappings);
104655fd 2155 INIT_LIST_HEAD(&pool->prepared_discards);
c140e1c4 2156 INIT_LIST_HEAD(&pool->active_thins);
88a6621b 2157 pool->low_water_triggered = false;
44feb387
MS
2158
2159 pool->shared_read_ds = dm_deferred_set_create();
2160 if (!pool->shared_read_ds) {
2161 *error = "Error creating pool's shared read deferred set";
2162 err_p = ERR_PTR(-ENOMEM);
2163 goto bad_shared_read_ds;
2164 }
2165
2166 pool->all_io_ds = dm_deferred_set_create();
2167 if (!pool->all_io_ds) {
2168 *error = "Error creating pool's all io deferred set";
2169 err_p = ERR_PTR(-ENOMEM);
2170 goto bad_all_io_ds;
2171 }
991d9fa0
JT
2172
2173 pool->next_mapping = NULL;
a24c2569
MS
2174 pool->mapping_pool = mempool_create_slab_pool(MAPPING_POOL_SIZE,
2175 _new_mapping_cache);
991d9fa0
JT
2176 if (!pool->mapping_pool) {
2177 *error = "Error creating pool's mapping mempool";
2178 err_p = ERR_PTR(-ENOMEM);
2179 goto bad_mapping_pool;
2180 }
2181
991d9fa0 2182 pool->ref_count = 1;
905e51b3 2183 pool->last_commit_jiffies = jiffies;
991d9fa0
JT
2184 pool->pool_md = pool_md;
2185 pool->md_dev = metadata_dev;
2186 __pool_table_insert(pool);
2187
2188 return pool;
2189
991d9fa0 2190bad_mapping_pool:
44feb387
MS
2191 dm_deferred_set_destroy(pool->all_io_ds);
2192bad_all_io_ds:
2193 dm_deferred_set_destroy(pool->shared_read_ds);
2194bad_shared_read_ds:
991d9fa0
JT
2195 destroy_workqueue(pool->wq);
2196bad_wq:
2197 dm_kcopyd_client_destroy(pool->copier);
2198bad_kcopyd_client:
44feb387 2199 dm_bio_prison_destroy(pool->prison);
991d9fa0
JT
2200bad_prison:
2201 kfree(pool);
2202bad_pool:
2203 if (dm_pool_metadata_close(pmd))
2204 DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
2205
2206 return err_p;
2207}
2208
2209static void __pool_inc(struct pool *pool)
2210{
2211 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
2212 pool->ref_count++;
2213}
2214
2215static void __pool_dec(struct pool *pool)
2216{
2217 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
2218 BUG_ON(!pool->ref_count);
2219 if (!--pool->ref_count)
2220 __pool_destroy(pool);
2221}
2222
2223static struct pool *__pool_find(struct mapped_device *pool_md,
2224 struct block_device *metadata_dev,
e49e5829
JT
2225 unsigned long block_size, int read_only,
2226 char **error, int *created)
991d9fa0
JT
2227{
2228 struct pool *pool = __pool_table_lookup_metadata_dev(metadata_dev);
2229
2230 if (pool) {
f09996c9
MS
2231 if (pool->pool_md != pool_md) {
2232 *error = "metadata device already in use by a pool";
991d9fa0 2233 return ERR_PTR(-EBUSY);
f09996c9 2234 }
991d9fa0
JT
2235 __pool_inc(pool);
2236
2237 } else {
2238 pool = __pool_table_lookup(pool_md);
2239 if (pool) {
f09996c9
MS
2240 if (pool->md_dev != metadata_dev) {
2241 *error = "different pool cannot replace a pool";
991d9fa0 2242 return ERR_PTR(-EINVAL);
f09996c9 2243 }
991d9fa0
JT
2244 __pool_inc(pool);
2245
67e2e2b2 2246 } else {
e49e5829 2247 pool = pool_create(pool_md, metadata_dev, block_size, read_only, error);
67e2e2b2
JT
2248 *created = 1;
2249 }
991d9fa0
JT
2250 }
2251
2252 return pool;
2253}
2254
2255/*----------------------------------------------------------------
2256 * Pool target methods
2257 *--------------------------------------------------------------*/
2258static void pool_dtr(struct dm_target *ti)
2259{
2260 struct pool_c *pt = ti->private;
2261
2262 mutex_lock(&dm_thin_pool_table.mutex);
2263
2264 unbind_control_target(pt->pool, ti);
2265 __pool_dec(pt->pool);
2266 dm_put_device(ti, pt->metadata_dev);
2267 dm_put_device(ti, pt->data_dev);
2268 kfree(pt);
2269
2270 mutex_unlock(&dm_thin_pool_table.mutex);
2271}
2272
991d9fa0
JT
2273static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf,
2274 struct dm_target *ti)
2275{
2276 int r;
2277 unsigned argc;
2278 const char *arg_name;
2279
2280 static struct dm_arg _args[] = {
74aa45c3 2281 {0, 4, "Invalid number of pool feature arguments"},
991d9fa0
JT
2282 };
2283
2284 /*
2285 * No feature arguments supplied.
2286 */
2287 if (!as->argc)
2288 return 0;
2289
2290 r = dm_read_arg_group(_args, as, &argc, &ti->error);
2291 if (r)
2292 return -EINVAL;
2293
2294 while (argc && !r) {
2295 arg_name = dm_shift_arg(as);
2296 argc--;
2297
e49e5829 2298 if (!strcasecmp(arg_name, "skip_block_zeroing"))
9bc142dd 2299 pf->zero_new_blocks = false;
e49e5829
JT
2300
2301 else if (!strcasecmp(arg_name, "ignore_discard"))
9bc142dd 2302 pf->discard_enabled = false;
e49e5829
JT
2303
2304 else if (!strcasecmp(arg_name, "no_discard_passdown"))
9bc142dd 2305 pf->discard_passdown = false;
991d9fa0 2306
e49e5829
JT
2307 else if (!strcasecmp(arg_name, "read_only"))
2308 pf->mode = PM_READ_ONLY;
2309
787a996c
MS
2310 else if (!strcasecmp(arg_name, "error_if_no_space"))
2311 pf->error_if_no_space = true;
2312
e49e5829
JT
2313 else {
2314 ti->error = "Unrecognised pool feature requested";
2315 r = -EINVAL;
2316 break;
2317 }
991d9fa0
JT
2318 }
2319
2320 return r;
2321}
2322
ac8c3f3d
JT
2323static void metadata_low_callback(void *context)
2324{
2325 struct pool *pool = context;
2326
2327 DMWARN("%s: reached low water mark for metadata device: sending event.",
2328 dm_device_name(pool->pool_md));
2329
2330 dm_table_event(pool->ti->table);
2331}
2332
7d48935e
MS
2333static sector_t get_dev_size(struct block_device *bdev)
2334{
2335 return i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
2336}
2337
2338static void warn_if_metadata_device_too_big(struct block_device *bdev)
b17446df 2339{
7d48935e 2340 sector_t metadata_dev_size = get_dev_size(bdev);
b17446df
JT
2341 char buffer[BDEVNAME_SIZE];
2342
7d48935e 2343 if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING)
b17446df
JT
2344 DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
2345 bdevname(bdev, buffer), THIN_METADATA_MAX_SECTORS);
7d48935e
MS
2346}
2347
2348static sector_t get_metadata_dev_size(struct block_device *bdev)
2349{
2350 sector_t metadata_dev_size = get_dev_size(bdev);
2351
2352 if (metadata_dev_size > THIN_METADATA_MAX_SECTORS)
2353 metadata_dev_size = THIN_METADATA_MAX_SECTORS;
b17446df
JT
2354
2355 return metadata_dev_size;
2356}
2357
24347e95
JT
2358static dm_block_t get_metadata_dev_size_in_blocks(struct block_device *bdev)
2359{
2360 sector_t metadata_dev_size = get_metadata_dev_size(bdev);
2361
7d48935e 2362 sector_div(metadata_dev_size, THIN_METADATA_BLOCK_SIZE);
24347e95
JT
2363
2364 return metadata_dev_size;
2365}
2366
ac8c3f3d
JT
2367/*
2368 * When a metadata threshold is crossed a dm event is triggered, and
2369 * userland should respond by growing the metadata device. We could let
2370 * userland set the threshold, like we do with the data threshold, but I'm
2371 * not sure they know enough to do this well.
2372 */
2373static dm_block_t calc_metadata_threshold(struct pool_c *pt)
2374{
2375 /*
2376 * 4M is ample for all ops with the possible exception of thin
2377 * device deletion which is harmless if it fails (just retry the
2378 * delete after you've grown the device).
2379 */
2380 dm_block_t quarter = get_metadata_dev_size_in_blocks(pt->metadata_dev->bdev) / 4;
2381 return min((dm_block_t)1024ULL /* 4M */, quarter);
2382}
2383
991d9fa0
JT
2384/*
2385 * thin-pool <metadata dev> <data dev>
2386 * <data block size (sectors)>
2387 * <low water mark (blocks)>
2388 * [<#feature args> [<arg>]*]
2389 *
2390 * Optional feature arguments are:
2391 * skip_block_zeroing: skips the zeroing of newly-provisioned blocks.
67e2e2b2
JT
2392 * ignore_discard: disable discard
2393 * no_discard_passdown: don't pass discards down to the data device
787a996c
MS
2394 * read_only: Don't allow any changes to be made to the pool metadata.
2395 * error_if_no_space: error IOs, instead of queueing, if no space.
991d9fa0
JT
2396 */
2397static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
2398{
67e2e2b2 2399 int r, pool_created = 0;
991d9fa0
JT
2400 struct pool_c *pt;
2401 struct pool *pool;
2402 struct pool_features pf;
2403 struct dm_arg_set as;
2404 struct dm_dev *data_dev;
2405 unsigned long block_size;
2406 dm_block_t low_water_blocks;
2407 struct dm_dev *metadata_dev;
5d0db96d 2408 fmode_t metadata_mode;
991d9fa0
JT
2409
2410 /*
2411 * FIXME Remove validation from scope of lock.
2412 */
2413 mutex_lock(&dm_thin_pool_table.mutex);
2414
2415 if (argc < 4) {
2416 ti->error = "Invalid argument count";
2417 r = -EINVAL;
2418 goto out_unlock;
2419 }
5d0db96d 2420
991d9fa0
JT
2421 as.argc = argc;
2422 as.argv = argv;
2423
5d0db96d
JT
2424 /*
2425 * Set default pool features.
2426 */
2427 pool_features_init(&pf);
2428
2429 dm_consume_args(&as, 4);
2430 r = parse_pool_features(&as, &pf, ti);
2431 if (r)
2432 goto out_unlock;
2433
2434 metadata_mode = FMODE_READ | ((pf.mode == PM_READ_ONLY) ? 0 : FMODE_WRITE);
2435 r = dm_get_device(ti, argv[0], metadata_mode, &metadata_dev);
991d9fa0
JT
2436 if (r) {
2437 ti->error = "Error opening metadata block device";
2438 goto out_unlock;
2439 }
7d48935e 2440 warn_if_metadata_device_too_big(metadata_dev->bdev);
991d9fa0
JT
2441
2442 r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &data_dev);
2443 if (r) {
2444 ti->error = "Error getting data device";
2445 goto out_metadata;
2446 }
2447
2448 if (kstrtoul(argv[2], 10, &block_size) || !block_size ||
2449 block_size < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
2450 block_size > DATA_DEV_BLOCK_SIZE_MAX_SECTORS ||
55f2b8bd 2451 block_size & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) {
991d9fa0
JT
2452 ti->error = "Invalid block size";
2453 r = -EINVAL;
2454 goto out;
2455 }
2456
2457 if (kstrtoull(argv[3], 10, (unsigned long long *)&low_water_blocks)) {
2458 ti->error = "Invalid low water mark";
2459 r = -EINVAL;
2460 goto out;
2461 }
2462
991d9fa0
JT
2463 pt = kzalloc(sizeof(*pt), GFP_KERNEL);
2464 if (!pt) {
2465 r = -ENOMEM;
2466 goto out;
2467 }
2468
2469 pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev,
e49e5829 2470 block_size, pf.mode == PM_READ_ONLY, &ti->error, &pool_created);
991d9fa0
JT
2471 if (IS_ERR(pool)) {
2472 r = PTR_ERR(pool);
2473 goto out_free_pt;
2474 }
2475
67e2e2b2
JT
2476 /*
2477 * 'pool_created' reflects whether this is the first table load.
2478 * Top level discard support is not allowed to be changed after
2479 * initial load. This would require a pool reload to trigger thin
2480 * device changes.
2481 */
2482 if (!pool_created && pf.discard_enabled != pool->pf.discard_enabled) {
2483 ti->error = "Discard support cannot be disabled once enabled";
2484 r = -EINVAL;
2485 goto out_flags_changed;
2486 }
2487
991d9fa0
JT
2488 pt->pool = pool;
2489 pt->ti = ti;
2490 pt->metadata_dev = metadata_dev;
2491 pt->data_dev = data_dev;
2492 pt->low_water_blocks = low_water_blocks;
0424caa1 2493 pt->adjusted_pf = pt->requested_pf = pf;
55a62eef 2494 ti->num_flush_bios = 1;
9bc142dd 2495
67e2e2b2
JT
2496 /*
2497 * Only need to enable discards if the pool should pass
2498 * them down to the data device. The thin device's discard
2499 * processing will cause mappings to be removed from the btree.
2500 */
b60ab990 2501 ti->discard_zeroes_data_unsupported = true;
67e2e2b2 2502 if (pf.discard_enabled && pf.discard_passdown) {
55a62eef 2503 ti->num_discard_bios = 1;
9bc142dd 2504
67e2e2b2
JT
2505 /*
2506 * Setting 'discards_supported' circumvents the normal
2507 * stacking of discard limits (this keeps the pool and
2508 * thin devices' discard limits consistent).
2509 */
0ac55489 2510 ti->discards_supported = true;
67e2e2b2 2511 }
991d9fa0
JT
2512 ti->private = pt;
2513
ac8c3f3d
JT
2514 r = dm_pool_register_metadata_threshold(pt->pool->pmd,
2515 calc_metadata_threshold(pt),
2516 metadata_low_callback,
2517 pool);
2518 if (r)
2519 goto out_free_pt;
2520
991d9fa0
JT
2521 pt->callbacks.congested_fn = pool_is_congested;
2522 dm_table_add_target_callbacks(ti->table, &pt->callbacks);
2523
2524 mutex_unlock(&dm_thin_pool_table.mutex);
2525
2526 return 0;
2527
67e2e2b2
JT
2528out_flags_changed:
2529 __pool_dec(pool);
991d9fa0
JT
2530out_free_pt:
2531 kfree(pt);
2532out:
2533 dm_put_device(ti, data_dev);
2534out_metadata:
2535 dm_put_device(ti, metadata_dev);
2536out_unlock:
2537 mutex_unlock(&dm_thin_pool_table.mutex);
2538
2539 return r;
2540}
2541
7de3ee57 2542static int pool_map(struct dm_target *ti, struct bio *bio)
991d9fa0
JT
2543{
2544 int r;
2545 struct pool_c *pt = ti->private;
2546 struct pool *pool = pt->pool;
2547 unsigned long flags;
2548
2549 /*
2550 * As this is a singleton target, ti->begin is always zero.
2551 */
2552 spin_lock_irqsave(&pool->lock, flags);
2553 bio->bi_bdev = pt->data_dev->bdev;
2554 r = DM_MAPIO_REMAPPED;
2555 spin_unlock_irqrestore(&pool->lock, flags);
2556
2557 return r;
2558}
2559
b17446df 2560static int maybe_resize_data_dev(struct dm_target *ti, bool *need_commit)
991d9fa0
JT
2561{
2562 int r;
2563 struct pool_c *pt = ti->private;
2564 struct pool *pool = pt->pool;
55f2b8bd
MS
2565 sector_t data_size = ti->len;
2566 dm_block_t sb_data_size;
991d9fa0 2567
b17446df 2568 *need_commit = false;
991d9fa0 2569
55f2b8bd
MS
2570 (void) sector_div(data_size, pool->sectors_per_block);
2571
991d9fa0
JT
2572 r = dm_pool_get_data_dev_size(pool->pmd, &sb_data_size);
2573 if (r) {
4fa5971a
MS
2574 DMERR("%s: failed to retrieve data device size",
2575 dm_device_name(pool->pool_md));
991d9fa0
JT
2576 return r;
2577 }
2578
2579 if (data_size < sb_data_size) {
4fa5971a
MS
2580 DMERR("%s: pool target (%llu blocks) too small: expected %llu",
2581 dm_device_name(pool->pool_md),
55f2b8bd 2582 (unsigned long long)data_size, sb_data_size);
991d9fa0
JT
2583 return -EINVAL;
2584
2585 } else if (data_size > sb_data_size) {
07f2b6e0
MS
2586 if (dm_pool_metadata_needs_check(pool->pmd)) {
2587 DMERR("%s: unable to grow the data device until repaired.",
2588 dm_device_name(pool->pool_md));
2589 return 0;
2590 }
2591
6f7f51d4
MS
2592 if (sb_data_size)
2593 DMINFO("%s: growing the data device from %llu to %llu blocks",
2594 dm_device_name(pool->pool_md),
2595 sb_data_size, (unsigned long long)data_size);
991d9fa0
JT
2596 r = dm_pool_resize_data_dev(pool->pmd, data_size);
2597 if (r) {
b5330655 2598 metadata_operation_failed(pool, "dm_pool_resize_data_dev", r);
991d9fa0
JT
2599 return r;
2600 }
2601
b17446df 2602 *need_commit = true;
991d9fa0
JT
2603 }
2604
2605 return 0;
2606}
2607
24347e95
JT
2608static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit)
2609{
2610 int r;
2611 struct pool_c *pt = ti->private;
2612 struct pool *pool = pt->pool;
2613 dm_block_t metadata_dev_size, sb_metadata_dev_size;
2614
2615 *need_commit = false;
2616
610bba8b 2617 metadata_dev_size = get_metadata_dev_size_in_blocks(pool->md_dev);
24347e95
JT
2618
2619 r = dm_pool_get_metadata_dev_size(pool->pmd, &sb_metadata_dev_size);
2620 if (r) {
4fa5971a
MS
2621 DMERR("%s: failed to retrieve metadata device size",
2622 dm_device_name(pool->pool_md));
24347e95
JT
2623 return r;
2624 }
2625
2626 if (metadata_dev_size < sb_metadata_dev_size) {
4fa5971a
MS
2627 DMERR("%s: metadata device (%llu blocks) too small: expected %llu",
2628 dm_device_name(pool->pool_md),
24347e95
JT
2629 metadata_dev_size, sb_metadata_dev_size);
2630 return -EINVAL;
2631
2632 } else if (metadata_dev_size > sb_metadata_dev_size) {
07f2b6e0
MS
2633 if (dm_pool_metadata_needs_check(pool->pmd)) {
2634 DMERR("%s: unable to grow the metadata device until repaired.",
2635 dm_device_name(pool->pool_md));
2636 return 0;
2637 }
2638
7d48935e 2639 warn_if_metadata_device_too_big(pool->md_dev);
6f7f51d4
MS
2640 DMINFO("%s: growing the metadata device from %llu to %llu blocks",
2641 dm_device_name(pool->pool_md),
2642 sb_metadata_dev_size, metadata_dev_size);
24347e95
JT
2643 r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size);
2644 if (r) {
b5330655 2645 metadata_operation_failed(pool, "dm_pool_resize_metadata_dev", r);
24347e95
JT
2646 return r;
2647 }
2648
2649 *need_commit = true;
2650 }
2651
2652 return 0;
2653}
2654
b17446df
JT
2655/*
2656 * Retrieves the number of blocks of the data device from
2657 * the superblock and compares it to the actual device size,
2658 * thus resizing the data device in case it has grown.
2659 *
2660 * This both copes with opening preallocated data devices in the ctr
2661 * being followed by a resume
2662 * -and-
2663 * calling the resume method individually after userspace has
2664 * grown the data device in reaction to a table event.
2665 */
2666static int pool_preresume(struct dm_target *ti)
2667{
2668 int r;
24347e95 2669 bool need_commit1, need_commit2;
b17446df
JT
2670 struct pool_c *pt = ti->private;
2671 struct pool *pool = pt->pool;
2672
2673 /*
2674 * Take control of the pool object.
2675 */
2676 r = bind_control_target(pool, ti);
2677 if (r)
2678 return r;
2679
2680 r = maybe_resize_data_dev(ti, &need_commit1);
2681 if (r)
2682 return r;
2683
24347e95
JT
2684 r = maybe_resize_metadata_dev(ti, &need_commit2);
2685 if (r)
2686 return r;
2687
2688 if (need_commit1 || need_commit2)
020cc3b5 2689 (void) commit(pool);
b17446df
JT
2690
2691 return 0;
2692}
2693
991d9fa0
JT
2694static void pool_resume(struct dm_target *ti)
2695{
2696 struct pool_c *pt = ti->private;
2697 struct pool *pool = pt->pool;
2698 unsigned long flags;
2699
2700 spin_lock_irqsave(&pool->lock, flags);
88a6621b 2701 pool->low_water_triggered = false;
991d9fa0 2702 spin_unlock_irqrestore(&pool->lock, flags);
c140e1c4 2703 requeue_bios(pool);
991d9fa0 2704
905e51b3 2705 do_waker(&pool->waker.work);
991d9fa0
JT
2706}
2707
2708static void pool_postsuspend(struct dm_target *ti)
2709{
991d9fa0
JT
2710 struct pool_c *pt = ti->private;
2711 struct pool *pool = pt->pool;
2712
905e51b3 2713 cancel_delayed_work(&pool->waker);
85ad643b 2714 cancel_delayed_work(&pool->no_space_timeout);
991d9fa0 2715 flush_workqueue(pool->wq);
020cc3b5 2716 (void) commit(pool);
991d9fa0
JT
2717}
2718
2719static int check_arg_count(unsigned argc, unsigned args_required)
2720{
2721 if (argc != args_required) {
2722 DMWARN("Message received with %u arguments instead of %u.",
2723 argc, args_required);
2724 return -EINVAL;
2725 }
2726
2727 return 0;
2728}
2729
2730static int read_dev_id(char *arg, dm_thin_id *dev_id, int warning)
2731{
2732 if (!kstrtoull(arg, 10, (unsigned long long *)dev_id) &&
2733 *dev_id <= MAX_DEV_ID)
2734 return 0;
2735
2736 if (warning)
2737 DMWARN("Message received with invalid device id: %s", arg);
2738
2739 return -EINVAL;
2740}
2741
2742static int process_create_thin_mesg(unsigned argc, char **argv, struct pool *pool)
2743{
2744 dm_thin_id dev_id;
2745 int r;
2746
2747 r = check_arg_count(argc, 2);
2748 if (r)
2749 return r;
2750
2751 r = read_dev_id(argv[1], &dev_id, 1);
2752 if (r)
2753 return r;
2754
2755 r = dm_pool_create_thin(pool->pmd, dev_id);
2756 if (r) {
2757 DMWARN("Creation of new thinly-provisioned device with id %s failed.",
2758 argv[1]);
2759 return r;
2760 }
2761
2762 return 0;
2763}
2764
2765static int process_create_snap_mesg(unsigned argc, char **argv, struct pool *pool)
2766{
2767 dm_thin_id dev_id;
2768 dm_thin_id origin_dev_id;
2769 int r;
2770
2771 r = check_arg_count(argc, 3);
2772 if (r)
2773 return r;
2774
2775 r = read_dev_id(argv[1], &dev_id, 1);
2776 if (r)
2777 return r;
2778
2779 r = read_dev_id(argv[2], &origin_dev_id, 1);
2780 if (r)
2781 return r;
2782
2783 r = dm_pool_create_snap(pool->pmd, dev_id, origin_dev_id);
2784 if (r) {
2785 DMWARN("Creation of new snapshot %s of device %s failed.",
2786 argv[1], argv[2]);
2787 return r;
2788 }
2789
2790 return 0;
2791}
2792
2793static int process_delete_mesg(unsigned argc, char **argv, struct pool *pool)
2794{
2795 dm_thin_id dev_id;
2796 int r;
2797
2798 r = check_arg_count(argc, 2);
2799 if (r)
2800 return r;
2801
2802 r = read_dev_id(argv[1], &dev_id, 1);
2803 if (r)
2804 return r;
2805
2806 r = dm_pool_delete_thin_device(pool->pmd, dev_id);
2807 if (r)
2808 DMWARN("Deletion of thin device %s failed.", argv[1]);
2809
2810 return r;
2811}
2812
2813static int process_set_transaction_id_mesg(unsigned argc, char **argv, struct pool *pool)
2814{
2815 dm_thin_id old_id, new_id;
2816 int r;
2817
2818 r = check_arg_count(argc, 3);
2819 if (r)
2820 return r;
2821
2822 if (kstrtoull(argv[1], 10, (unsigned long long *)&old_id)) {
2823 DMWARN("set_transaction_id message: Unrecognised id %s.", argv[1]);
2824 return -EINVAL;
2825 }
2826
2827 if (kstrtoull(argv[2], 10, (unsigned long long *)&new_id)) {
2828 DMWARN("set_transaction_id message: Unrecognised new id %s.", argv[2]);
2829 return -EINVAL;
2830 }
2831
2832 r = dm_pool_set_metadata_transaction_id(pool->pmd, old_id, new_id);
2833 if (r) {
2834 DMWARN("Failed to change transaction id from %s to %s.",
2835 argv[1], argv[2]);
2836 return r;
2837 }
2838
2839 return 0;
2840}
2841
cc8394d8
JT
2842static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
2843{
2844 int r;
2845
2846 r = check_arg_count(argc, 1);
2847 if (r)
2848 return r;
2849
020cc3b5 2850 (void) commit(pool);
0d200aef 2851
cc8394d8
JT
2852 r = dm_pool_reserve_metadata_snap(pool->pmd);
2853 if (r)
2854 DMWARN("reserve_metadata_snap message failed.");
2855
2856 return r;
2857}
2858
2859static int process_release_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
2860{
2861 int r;
2862
2863 r = check_arg_count(argc, 1);
2864 if (r)
2865 return r;
2866
2867 r = dm_pool_release_metadata_snap(pool->pmd);
2868 if (r)
2869 DMWARN("release_metadata_snap message failed.");
2870
2871 return r;
2872}
2873
991d9fa0
JT
2874/*
2875 * Messages supported:
2876 * create_thin <dev_id>
2877 * create_snap <dev_id> <origin_id>
2878 * delete <dev_id>
2879 * trim <dev_id> <new_size_in_sectors>
2880 * set_transaction_id <current_trans_id> <new_trans_id>
cc8394d8
JT
2881 * reserve_metadata_snap
2882 * release_metadata_snap
991d9fa0
JT
2883 */
2884static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
2885{
2886 int r = -EINVAL;
2887 struct pool_c *pt = ti->private;
2888 struct pool *pool = pt->pool;
2889
2890 if (!strcasecmp(argv[0], "create_thin"))
2891 r = process_create_thin_mesg(argc, argv, pool);
2892
2893 else if (!strcasecmp(argv[0], "create_snap"))
2894 r = process_create_snap_mesg(argc, argv, pool);
2895
2896 else if (!strcasecmp(argv[0], "delete"))
2897 r = process_delete_mesg(argc, argv, pool);
2898
2899 else if (!strcasecmp(argv[0], "set_transaction_id"))
2900 r = process_set_transaction_id_mesg(argc, argv, pool);
2901
cc8394d8
JT
2902 else if (!strcasecmp(argv[0], "reserve_metadata_snap"))
2903 r = process_reserve_metadata_snap_mesg(argc, argv, pool);
2904
2905 else if (!strcasecmp(argv[0], "release_metadata_snap"))
2906 r = process_release_metadata_snap_mesg(argc, argv, pool);
2907
991d9fa0
JT
2908 else
2909 DMWARN("Unrecognised thin pool target message received: %s", argv[0]);
2910
e49e5829 2911 if (!r)
020cc3b5 2912 (void) commit(pool);
991d9fa0
JT
2913
2914 return r;
2915}
2916
e49e5829
JT
2917static void emit_flags(struct pool_features *pf, char *result,
2918 unsigned sz, unsigned maxlen)
2919{
2920 unsigned count = !pf->zero_new_blocks + !pf->discard_enabled +
787a996c
MS
2921 !pf->discard_passdown + (pf->mode == PM_READ_ONLY) +
2922 pf->error_if_no_space;
e49e5829
JT
2923 DMEMIT("%u ", count);
2924
2925 if (!pf->zero_new_blocks)
2926 DMEMIT("skip_block_zeroing ");
2927
2928 if (!pf->discard_enabled)
2929 DMEMIT("ignore_discard ");
2930
2931 if (!pf->discard_passdown)
2932 DMEMIT("no_discard_passdown ");
2933
2934 if (pf->mode == PM_READ_ONLY)
2935 DMEMIT("read_only ");
787a996c
MS
2936
2937 if (pf->error_if_no_space)
2938 DMEMIT("error_if_no_space ");
e49e5829
JT
2939}
2940
991d9fa0
JT
2941/*
2942 * Status line is:
2943 * <transaction id> <used metadata sectors>/<total metadata sectors>
2944 * <used data sectors>/<total data sectors> <held metadata root>
2945 */
fd7c092e
MP
2946static void pool_status(struct dm_target *ti, status_type_t type,
2947 unsigned status_flags, char *result, unsigned maxlen)
991d9fa0 2948{
e49e5829 2949 int r;
991d9fa0
JT
2950 unsigned sz = 0;
2951 uint64_t transaction_id;
2952 dm_block_t nr_free_blocks_data;
2953 dm_block_t nr_free_blocks_metadata;
2954 dm_block_t nr_blocks_data;
2955 dm_block_t nr_blocks_metadata;
2956 dm_block_t held_root;
2957 char buf[BDEVNAME_SIZE];
2958 char buf2[BDEVNAME_SIZE];
2959 struct pool_c *pt = ti->private;
2960 struct pool *pool = pt->pool;
2961
2962 switch (type) {
2963 case STATUSTYPE_INFO:
e49e5829
JT
2964 if (get_pool_mode(pool) == PM_FAIL) {
2965 DMEMIT("Fail");
2966 break;
2967 }
2968
1f4e0ff0
AK
2969 /* Commit to ensure statistics aren't out-of-date */
2970 if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti))
020cc3b5 2971 (void) commit(pool);
1f4e0ff0 2972
fd7c092e
MP
2973 r = dm_pool_get_metadata_transaction_id(pool->pmd, &transaction_id);
2974 if (r) {
4fa5971a
MS
2975 DMERR("%s: dm_pool_get_metadata_transaction_id returned %d",
2976 dm_device_name(pool->pool_md), r);
fd7c092e
MP
2977 goto err;
2978 }
991d9fa0 2979
fd7c092e
MP
2980 r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free_blocks_metadata);
2981 if (r) {
4fa5971a
MS
2982 DMERR("%s: dm_pool_get_free_metadata_block_count returned %d",
2983 dm_device_name(pool->pool_md), r);
fd7c092e
MP
2984 goto err;
2985 }
991d9fa0
JT
2986
2987 r = dm_pool_get_metadata_dev_size(pool->pmd, &nr_blocks_metadata);
fd7c092e 2988 if (r) {
4fa5971a
MS
2989 DMERR("%s: dm_pool_get_metadata_dev_size returned %d",
2990 dm_device_name(pool->pool_md), r);
fd7c092e
MP
2991 goto err;
2992 }
991d9fa0 2993
fd7c092e
MP
2994 r = dm_pool_get_free_block_count(pool->pmd, &nr_free_blocks_data);
2995 if (r) {
4fa5971a
MS
2996 DMERR("%s: dm_pool_get_free_block_count returned %d",
2997 dm_device_name(pool->pool_md), r);
fd7c092e
MP
2998 goto err;
2999 }
991d9fa0
JT
3000
3001 r = dm_pool_get_data_dev_size(pool->pmd, &nr_blocks_data);
fd7c092e 3002 if (r) {
4fa5971a
MS
3003 DMERR("%s: dm_pool_get_data_dev_size returned %d",
3004 dm_device_name(pool->pool_md), r);
fd7c092e
MP
3005 goto err;
3006 }
991d9fa0 3007
cc8394d8 3008 r = dm_pool_get_metadata_snap(pool->pmd, &held_root);
fd7c092e 3009 if (r) {
4fa5971a
MS
3010 DMERR("%s: dm_pool_get_metadata_snap returned %d",
3011 dm_device_name(pool->pool_md), r);
fd7c092e
MP
3012 goto err;
3013 }
991d9fa0
JT
3014
3015 DMEMIT("%llu %llu/%llu %llu/%llu ",
3016 (unsigned long long)transaction_id,
3017 (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
3018 (unsigned long long)nr_blocks_metadata,
3019 (unsigned long long)(nr_blocks_data - nr_free_blocks_data),
3020 (unsigned long long)nr_blocks_data);
3021
3022 if (held_root)
e49e5829
JT
3023 DMEMIT("%llu ", held_root);
3024 else
3025 DMEMIT("- ");
3026
3e1a0699
JT
3027 if (pool->pf.mode == PM_OUT_OF_DATA_SPACE)
3028 DMEMIT("out_of_data_space ");
3029 else if (pool->pf.mode == PM_READ_ONLY)
e49e5829 3030 DMEMIT("ro ");
991d9fa0 3031 else
e49e5829
JT
3032 DMEMIT("rw ");
3033
018debea 3034 if (!pool->pf.discard_enabled)
787a996c 3035 DMEMIT("ignore_discard ");
018debea 3036 else if (pool->pf.discard_passdown)
787a996c
MS
3037 DMEMIT("discard_passdown ");
3038 else
3039 DMEMIT("no_discard_passdown ");
3040
3041 if (pool->pf.error_if_no_space)
3042 DMEMIT("error_if_no_space ");
e49e5829 3043 else
787a996c 3044 DMEMIT("queue_if_no_space ");
991d9fa0
JT
3045
3046 break;
3047
3048 case STATUSTYPE_TABLE:
3049 DMEMIT("%s %s %lu %llu ",
3050 format_dev_t(buf, pt->metadata_dev->bdev->bd_dev),
3051 format_dev_t(buf2, pt->data_dev->bdev->bd_dev),
3052 (unsigned long)pool->sectors_per_block,
3053 (unsigned long long)pt->low_water_blocks);
0424caa1 3054 emit_flags(&pt->requested_pf, result, sz, maxlen);
991d9fa0
JT
3055 break;
3056 }
fd7c092e 3057 return;
991d9fa0 3058
fd7c092e
MP
3059err:
3060 DMEMIT("Error");
991d9fa0
JT
3061}
3062
3063static int pool_iterate_devices(struct dm_target *ti,
3064 iterate_devices_callout_fn fn, void *data)
3065{
3066 struct pool_c *pt = ti->private;
3067
3068 return fn(ti, pt->data_dev, 0, ti->len, data);
3069}
3070
3071static int pool_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
3072 struct bio_vec *biovec, int max_size)
3073{
3074 struct pool_c *pt = ti->private;
3075 struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
3076
3077 if (!q->merge_bvec_fn)
3078 return max_size;
3079
3080 bvm->bi_bdev = pt->data_dev->bdev;
3081
3082 return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
3083}
3084
0424caa1 3085static void set_discard_limits(struct pool_c *pt, struct queue_limits *limits)
104655fd 3086{
0424caa1
MS
3087 struct pool *pool = pt->pool;
3088 struct queue_limits *data_limits;
3089
104655fd
JT
3090 limits->max_discard_sectors = pool->sectors_per_block;
3091
3092 /*
0424caa1 3093 * discard_granularity is just a hint, and not enforced.
104655fd 3094 */
0424caa1
MS
3095 if (pt->adjusted_pf.discard_passdown) {
3096 data_limits = &bdev_get_queue(pt->data_dev->bdev)->limits;
09869de5
LC
3097 limits->discard_granularity = max(data_limits->discard_granularity,
3098 pool->sectors_per_block << SECTOR_SHIFT);
f13945d7 3099 } else
0424caa1 3100 limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
104655fd
JT
3101}
3102
991d9fa0
JT
3103static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
3104{
3105 struct pool_c *pt = ti->private;
3106 struct pool *pool = pt->pool;
0cc67cd9 3107 uint64_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT;
991d9fa0 3108
0cc67cd9
MS
3109 /*
3110 * If the system-determined stacked limits are compatible with the
3111 * pool's blocksize (io_opt is a factor) do not override them.
3112 */
3113 if (io_opt_sectors < pool->sectors_per_block ||
3114 do_div(io_opt_sectors, pool->sectors_per_block)) {
3115 blk_limits_io_min(limits, 0);
3116 blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT);
3117 }
0424caa1
MS
3118
3119 /*
3120 * pt->adjusted_pf is a staging area for the actual features to use.
3121 * They get transferred to the live pool in bind_control_target()
3122 * called from pool_preresume().
3123 */
b60ab990
MS
3124 if (!pt->adjusted_pf.discard_enabled) {
3125 /*
3126 * Must explicitly disallow stacking discard limits otherwise the
3127 * block layer will stack them if pool's data device has support.
3128 * QUEUE_FLAG_DISCARD wouldn't be set but there is no way for the
3129 * user to see that, so make sure to set all discard limits to 0.
3130 */
3131 limits->discard_granularity = 0;
0424caa1 3132 return;
b60ab990 3133 }
0424caa1
MS
3134
3135 disable_passdown_if_not_supported(pt);
3136
3137 set_discard_limits(pt, limits);
991d9fa0
JT
3138}
3139
3140static struct target_type pool_target = {
3141 .name = "thin-pool",
3142 .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
3143 DM_TARGET_IMMUTABLE,
67324ea1 3144 .version = {1, 12, 0},
991d9fa0
JT
3145 .module = THIS_MODULE,
3146 .ctr = pool_ctr,
3147 .dtr = pool_dtr,
3148 .map = pool_map,
3149 .postsuspend = pool_postsuspend,
3150 .preresume = pool_preresume,
3151 .resume = pool_resume,
3152 .message = pool_message,
3153 .status = pool_status,
3154 .merge = pool_merge,
3155 .iterate_devices = pool_iterate_devices,
3156 .io_hints = pool_io_hints,
3157};
3158
3159/*----------------------------------------------------------------
3160 * Thin target methods
3161 *--------------------------------------------------------------*/
b10ebd34
JT
3162static void thin_get(struct thin_c *tc)
3163{
3164 atomic_inc(&tc->refcount);
3165}
3166
3167static void thin_put(struct thin_c *tc)
3168{
3169 if (atomic_dec_and_test(&tc->refcount))
3170 complete(&tc->can_destroy);
3171}
3172
991d9fa0
JT
3173static void thin_dtr(struct dm_target *ti)
3174{
3175 struct thin_c *tc = ti->private;
c140e1c4
MS
3176 unsigned long flags;
3177
b10ebd34
JT
3178 thin_put(tc);
3179 wait_for_completion(&tc->can_destroy);
3180
c140e1c4
MS
3181 spin_lock_irqsave(&tc->pool->lock, flags);
3182 list_del_rcu(&tc->list);
3183 spin_unlock_irqrestore(&tc->pool->lock, flags);
3184 synchronize_rcu();
991d9fa0
JT
3185
3186 mutex_lock(&dm_thin_pool_table.mutex);
3187
3188 __pool_dec(tc->pool);
3189 dm_pool_close_thin_device(tc->td);
3190 dm_put_device(ti, tc->pool_dev);
2dd9c257
JT
3191 if (tc->origin_dev)
3192 dm_put_device(ti, tc->origin_dev);
991d9fa0
JT
3193 kfree(tc);
3194
3195 mutex_unlock(&dm_thin_pool_table.mutex);
3196}
3197
3198/*
3199 * Thin target parameters:
3200 *
2dd9c257 3201 * <pool_dev> <dev_id> [origin_dev]
991d9fa0
JT
3202 *
3203 * pool_dev: the path to the pool (eg, /dev/mapper/my_pool)
3204 * dev_id: the internal device identifier
2dd9c257 3205 * origin_dev: a device external to the pool that should act as the origin
67e2e2b2
JT
3206 *
3207 * If the pool device has discards disabled, they get disabled for the thin
3208 * device as well.
991d9fa0
JT
3209 */
3210static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
3211{
3212 int r;
3213 struct thin_c *tc;
2dd9c257 3214 struct dm_dev *pool_dev, *origin_dev;
991d9fa0 3215 struct mapped_device *pool_md;
5e3283e2 3216 unsigned long flags;
991d9fa0
JT
3217
3218 mutex_lock(&dm_thin_pool_table.mutex);
3219
2dd9c257 3220 if (argc != 2 && argc != 3) {
991d9fa0
JT
3221 ti->error = "Invalid argument count";
3222 r = -EINVAL;
3223 goto out_unlock;
3224 }
3225
3226 tc = ti->private = kzalloc(sizeof(*tc), GFP_KERNEL);
3227 if (!tc) {
3228 ti->error = "Out of memory";
3229 r = -ENOMEM;
3230 goto out_unlock;
3231 }
c140e1c4
MS
3232 spin_lock_init(&tc->lock);
3233 bio_list_init(&tc->deferred_bio_list);
3234 bio_list_init(&tc->retry_on_resume_list);
67324ea1 3235 tc->sort_bio_list = RB_ROOT;
991d9fa0 3236
2dd9c257
JT
3237 if (argc == 3) {
3238 r = dm_get_device(ti, argv[2], FMODE_READ, &origin_dev);
3239 if (r) {
3240 ti->error = "Error opening origin device";
3241 goto bad_origin_dev;
3242 }
3243 tc->origin_dev = origin_dev;
3244 }
3245
991d9fa0
JT
3246 r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &pool_dev);
3247 if (r) {
3248 ti->error = "Error opening pool device";
3249 goto bad_pool_dev;
3250 }
3251 tc->pool_dev = pool_dev;
3252
3253 if (read_dev_id(argv[1], (unsigned long long *)&tc->dev_id, 0)) {
3254 ti->error = "Invalid device id";
3255 r = -EINVAL;
3256 goto bad_common;
3257 }
3258
3259 pool_md = dm_get_md(tc->pool_dev->bdev->bd_dev);
3260 if (!pool_md) {
3261 ti->error = "Couldn't get pool mapped device";
3262 r = -EINVAL;
3263 goto bad_common;
3264 }
3265
3266 tc->pool = __pool_table_lookup(pool_md);
3267 if (!tc->pool) {
3268 ti->error = "Couldn't find pool object";
3269 r = -EINVAL;
3270 goto bad_pool_lookup;
3271 }
3272 __pool_inc(tc->pool);
3273
e49e5829
JT
3274 if (get_pool_mode(tc->pool) == PM_FAIL) {
3275 ti->error = "Couldn't open thin device, Pool is in fail mode";
1acacc07 3276 r = -EINVAL;
e49e5829
JT
3277 goto bad_thin_open;
3278 }
3279
991d9fa0
JT
3280 r = dm_pool_open_thin_device(tc->pool->pmd, tc->dev_id, &tc->td);
3281 if (r) {
3282 ti->error = "Couldn't open thin internal device";
3283 goto bad_thin_open;
3284 }
3285
542f9038
MS
3286 r = dm_set_target_max_io_len(ti, tc->pool->sectors_per_block);
3287 if (r)
1acacc07 3288 goto bad_target_max_io_len;
542f9038 3289
55a62eef 3290 ti->num_flush_bios = 1;
16ad3d10 3291 ti->flush_supported = true;
59c3d2c6 3292 ti->per_bio_data_size = sizeof(struct dm_thin_endio_hook);
67e2e2b2
JT
3293
3294 /* In case the pool supports discards, pass them on. */
b60ab990 3295 ti->discard_zeroes_data_unsupported = true;
67e2e2b2 3296 if (tc->pool->pf.discard_enabled) {
0ac55489 3297 ti->discards_supported = true;
55a62eef 3298 ti->num_discard_bios = 1;
55a62eef
AK
3299 /* Discard bios must be split on a block boundary */
3300 ti->split_discard_bios = true;
67e2e2b2 3301 }
991d9fa0
JT
3302
3303 dm_put(pool_md);
3304
3305 mutex_unlock(&dm_thin_pool_table.mutex);
3306
b10ebd34
JT
3307 atomic_set(&tc->refcount, 1);
3308 init_completion(&tc->can_destroy);
3309
5e3283e2 3310 spin_lock_irqsave(&tc->pool->lock, flags);
c140e1c4 3311 list_add_tail_rcu(&tc->list, &tc->pool->active_thins);
5e3283e2 3312 spin_unlock_irqrestore(&tc->pool->lock, flags);
c140e1c4
MS
3313 /*
3314 * This synchronize_rcu() call is needed here otherwise we risk a
3315 * wake_worker() call finding no bios to process (because the newly
3316 * added tc isn't yet visible). So this reduces latency since we
3317 * aren't then dependent on the periodic commit to wake_worker().
3318 */
3319 synchronize_rcu();
3320
991d9fa0
JT
3321 return 0;
3322
1acacc07
MS
3323bad_target_max_io_len:
3324 dm_pool_close_thin_device(tc->td);
991d9fa0
JT
3325bad_thin_open:
3326 __pool_dec(tc->pool);
3327bad_pool_lookup:
3328 dm_put(pool_md);
3329bad_common:
3330 dm_put_device(ti, tc->pool_dev);
3331bad_pool_dev:
2dd9c257
JT
3332 if (tc->origin_dev)
3333 dm_put_device(ti, tc->origin_dev);
3334bad_origin_dev:
991d9fa0
JT
3335 kfree(tc);
3336out_unlock:
3337 mutex_unlock(&dm_thin_pool_table.mutex);
3338
3339 return r;
3340}
3341
7de3ee57 3342static int thin_map(struct dm_target *ti, struct bio *bio)
991d9fa0 3343{
4f024f37 3344 bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
991d9fa0 3345
7de3ee57 3346 return thin_bio_map(ti, bio);
991d9fa0
JT
3347}
3348
7de3ee57 3349static int thin_endio(struct dm_target *ti, struct bio *bio, int err)
eb2aa48d
JT
3350{
3351 unsigned long flags;
59c3d2c6 3352 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
eb2aa48d 3353 struct list_head work;
a24c2569 3354 struct dm_thin_new_mapping *m, *tmp;
eb2aa48d
JT
3355 struct pool *pool = h->tc->pool;
3356
3357 if (h->shared_read_entry) {
3358 INIT_LIST_HEAD(&work);
44feb387 3359 dm_deferred_entry_dec(h->shared_read_entry, &work);
eb2aa48d
JT
3360
3361 spin_lock_irqsave(&pool->lock, flags);
3362 list_for_each_entry_safe(m, tmp, &work, list) {
3363 list_del(&m->list);
7f214665 3364 m->quiesced = true;
eb2aa48d
JT
3365 __maybe_add_mapping(m);
3366 }
3367 spin_unlock_irqrestore(&pool->lock, flags);
3368 }
3369
104655fd
JT
3370 if (h->all_io_entry) {
3371 INIT_LIST_HEAD(&work);
44feb387 3372 dm_deferred_entry_dec(h->all_io_entry, &work);
563af186
JT
3373 if (!list_empty(&work)) {
3374 spin_lock_irqsave(&pool->lock, flags);
3375 list_for_each_entry_safe(m, tmp, &work, list)
daec338b 3376 list_add_tail(&m->list, &pool->prepared_discards);
563af186
JT
3377 spin_unlock_irqrestore(&pool->lock, flags);
3378 wake_worker(pool);
3379 }
104655fd
JT
3380 }
3381
eb2aa48d
JT
3382 return 0;
3383}
3384
738211f7 3385static void thin_presuspend(struct dm_target *ti)
991d9fa0 3386{
738211f7
JT
3387 struct thin_c *tc = ti->private;
3388
991d9fa0 3389 if (dm_noflush_suspending(ti))
738211f7
JT
3390 noflush_work(tc, do_noflush_start);
3391}
3392
3393static void thin_postsuspend(struct dm_target *ti)
3394{
3395 struct thin_c *tc = ti->private;
3396
3397 /*
3398 * The dm_noflush_suspending flag has been cleared by now, so
3399 * unfortunately we must always run this.
3400 */
3401 noflush_work(tc, do_noflush_stop);
991d9fa0
JT
3402}
3403
3404/*
3405 * <nr mapped sectors> <highest mapped sector>
3406 */
fd7c092e
MP
3407static void thin_status(struct dm_target *ti, status_type_t type,
3408 unsigned status_flags, char *result, unsigned maxlen)
991d9fa0
JT
3409{
3410 int r;
3411 ssize_t sz = 0;
3412 dm_block_t mapped, highest;
3413 char buf[BDEVNAME_SIZE];
3414 struct thin_c *tc = ti->private;
3415
e49e5829
JT
3416 if (get_pool_mode(tc->pool) == PM_FAIL) {
3417 DMEMIT("Fail");
fd7c092e 3418 return;
e49e5829
JT
3419 }
3420
991d9fa0
JT
3421 if (!tc->td)
3422 DMEMIT("-");
3423 else {
3424 switch (type) {
3425 case STATUSTYPE_INFO:
3426 r = dm_thin_get_mapped_count(tc->td, &mapped);
fd7c092e
MP
3427 if (r) {
3428 DMERR("dm_thin_get_mapped_count returned %d", r);
3429 goto err;
3430 }
991d9fa0
JT
3431
3432 r = dm_thin_get_highest_mapped_block(tc->td, &highest);
fd7c092e
MP
3433 if (r < 0) {
3434 DMERR("dm_thin_get_highest_mapped_block returned %d", r);
3435 goto err;
3436 }
991d9fa0
JT
3437
3438 DMEMIT("%llu ", mapped * tc->pool->sectors_per_block);
3439 if (r)
3440 DMEMIT("%llu", ((highest + 1) *
3441 tc->pool->sectors_per_block) - 1);
3442 else
3443 DMEMIT("-");
3444 break;
3445
3446 case STATUSTYPE_TABLE:
3447 DMEMIT("%s %lu",
3448 format_dev_t(buf, tc->pool_dev->bdev->bd_dev),
3449 (unsigned long) tc->dev_id);
2dd9c257
JT
3450 if (tc->origin_dev)
3451 DMEMIT(" %s", format_dev_t(buf, tc->origin_dev->bdev->bd_dev));
991d9fa0
JT
3452 break;
3453 }
3454 }
3455
fd7c092e
MP
3456 return;
3457
3458err:
3459 DMEMIT("Error");
991d9fa0
JT
3460}
3461
3462static int thin_iterate_devices(struct dm_target *ti,
3463 iterate_devices_callout_fn fn, void *data)
3464{
55f2b8bd 3465 sector_t blocks;
991d9fa0 3466 struct thin_c *tc = ti->private;
55f2b8bd 3467 struct pool *pool = tc->pool;
991d9fa0
JT
3468
3469 /*
3470 * We can't call dm_pool_get_data_dev_size() since that blocks. So
3471 * we follow a more convoluted path through to the pool's target.
3472 */
55f2b8bd 3473 if (!pool->ti)
991d9fa0
JT
3474 return 0; /* nothing is bound */
3475
55f2b8bd
MS
3476 blocks = pool->ti->len;
3477 (void) sector_div(blocks, pool->sectors_per_block);
991d9fa0 3478 if (blocks)
55f2b8bd 3479 return fn(ti, tc->pool_dev, 0, pool->sectors_per_block * blocks, data);
991d9fa0
JT
3480
3481 return 0;
3482}
3483
991d9fa0
JT
3484static struct target_type thin_target = {
3485 .name = "thin",
67324ea1 3486 .version = {1, 12, 0},
991d9fa0
JT
3487 .module = THIS_MODULE,
3488 .ctr = thin_ctr,
3489 .dtr = thin_dtr,
3490 .map = thin_map,
eb2aa48d 3491 .end_io = thin_endio,
738211f7 3492 .presuspend = thin_presuspend,
991d9fa0
JT
3493 .postsuspend = thin_postsuspend,
3494 .status = thin_status,
3495 .iterate_devices = thin_iterate_devices,
991d9fa0
JT
3496};
3497
3498/*----------------------------------------------------------------*/
3499
3500static int __init dm_thin_init(void)
3501{
3502 int r;
3503
3504 pool_table_init();
3505
3506 r = dm_register_target(&thin_target);
3507 if (r)
3508 return r;
3509
3510 r = dm_register_target(&pool_target);
3511 if (r)
a24c2569
MS
3512 goto bad_pool_target;
3513
3514 r = -ENOMEM;
3515
a24c2569
MS
3516 _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0);
3517 if (!_new_mapping_cache)
3518 goto bad_new_mapping_cache;
3519
a24c2569
MS
3520 return 0;
3521
a24c2569 3522bad_new_mapping_cache:
a24c2569
MS
3523 dm_unregister_target(&pool_target);
3524bad_pool_target:
3525 dm_unregister_target(&thin_target);
991d9fa0
JT
3526
3527 return r;
3528}
3529
3530static void dm_thin_exit(void)
3531{
3532 dm_unregister_target(&thin_target);
3533 dm_unregister_target(&pool_target);
a24c2569 3534
a24c2569 3535 kmem_cache_destroy(_new_mapping_cache);
991d9fa0
JT
3536}
3537
3538module_init(dm_thin_init);
3539module_exit(dm_thin_exit);
3540
80c57893
MS
3541module_param_named(no_space_timeout, no_space_timeout_secs, uint, S_IRUGO | S_IWUSR);
3542MODULE_PARM_DESC(no_space_timeout, "Out of data space queue IO timeout in seconds");
3543
7cab8bf1 3544MODULE_DESCRIPTION(DM_NAME " thin provisioning target");
991d9fa0
JT
3545MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
3546MODULE_LICENSE("GPL");
This page took 0.352353 seconds and 5 git commands to generate.