dm snapshot: do not use map_context
[deliverable/linux.git] / drivers / md / dm-thin.c
CommitLineData
991d9fa0 1/*
e49e5829 2 * Copyright (C) 2011-2012 Red Hat UK.
991d9fa0
JT
3 *
4 * This file is released under the GPL.
5 */
6
7#include "dm-thin-metadata.h"
4f81a417 8#include "dm-bio-prison.h"
1f4e0ff0 9#include "dm.h"
991d9fa0
JT
10
11#include <linux/device-mapper.h>
12#include <linux/dm-io.h>
13#include <linux/dm-kcopyd.h>
14#include <linux/list.h>
15#include <linux/init.h>
16#include <linux/module.h>
17#include <linux/slab.h>
18
19#define DM_MSG_PREFIX "thin"
20
21/*
22 * Tunable constants
23 */
7768ed33 24#define ENDIO_HOOK_POOL_SIZE 1024
991d9fa0
JT
25#define MAPPING_POOL_SIZE 1024
26#define PRISON_CELLS 1024
905e51b3 27#define COMMIT_PERIOD HZ
991d9fa0
JT
28
29/*
30 * The block size of the device holding pool data must be
31 * between 64KB and 1GB.
32 */
33#define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (64 * 1024 >> SECTOR_SHIFT)
34#define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)
35
991d9fa0
JT
36/*
37 * Device id is restricted to 24 bits.
38 */
39#define MAX_DEV_ID ((1 << 24) - 1)
40
41/*
42 * How do we handle breaking sharing of data blocks?
43 * =================================================
44 *
45 * We use a standard copy-on-write btree to store the mappings for the
46 * devices (note I'm talking about copy-on-write of the metadata here, not
47 * the data). When you take an internal snapshot you clone the root node
48 * of the origin btree. After this there is no concept of an origin or a
49 * snapshot. They are just two device trees that happen to point to the
50 * same data blocks.
51 *
52 * When we get a write in we decide if it's to a shared data block using
53 * some timestamp magic. If it is, we have to break sharing.
54 *
55 * Let's say we write to a shared block in what was the origin. The
56 * steps are:
57 *
58 * i) plug io further to this physical block. (see bio_prison code).
59 *
60 * ii) quiesce any read io to that shared data block. Obviously
44feb387 61 * including all devices that share this block. (see dm_deferred_set code)
991d9fa0
JT
62 *
63 * iii) copy the data block to a newly allocate block. This step can be
64 * missed out if the io covers the block. (schedule_copy).
65 *
66 * iv) insert the new mapping into the origin's btree
fe878f34 67 * (process_prepared_mapping). This act of inserting breaks some
991d9fa0
JT
68 * sharing of btree nodes between the two devices. Breaking sharing only
69 * effects the btree of that specific device. Btrees for the other
70 * devices that share the block never change. The btree for the origin
71 * device as it was after the last commit is untouched, ie. we're using
72 * persistent data structures in the functional programming sense.
73 *
74 * v) unplug io to this physical block, including the io that triggered
75 * the breaking of sharing.
76 *
77 * Steps (ii) and (iii) occur in parallel.
78 *
79 * The metadata _doesn't_ need to be committed before the io continues. We
80 * get away with this because the io is always written to a _new_ block.
81 * If there's a crash, then:
82 *
83 * - The origin mapping will point to the old origin block (the shared
84 * one). This will contain the data as it was before the io that triggered
85 * the breaking of sharing came in.
86 *
87 * - The snap mapping still points to the old block. As it would after
88 * the commit.
89 *
90 * The downside of this scheme is the timestamp magic isn't perfect, and
91 * will continue to think that data block in the snapshot device is shared
92 * even after the write to the origin has broken sharing. I suspect data
93 * blocks will typically be shared by many different devices, so we're
94 * breaking sharing n + 1 times, rather than n, where n is the number of
95 * devices that reference this data block. At the moment I think the
96 * benefits far, far outweigh the disadvantages.
97 */
98
99/*----------------------------------------------------------------*/
100
991d9fa0
JT
101/*
102 * Key building.
103 */
104static void build_data_key(struct dm_thin_device *td,
44feb387 105 dm_block_t b, struct dm_cell_key *key)
991d9fa0
JT
106{
107 key->virtual = 0;
108 key->dev = dm_thin_dev_id(td);
109 key->block = b;
110}
111
112static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
44feb387 113 struct dm_cell_key *key)
991d9fa0
JT
114{
115 key->virtual = 1;
116 key->dev = dm_thin_dev_id(td);
117 key->block = b;
118}
119
120/*----------------------------------------------------------------*/
121
122/*
123 * A pool device ties together a metadata device and a data device. It
124 * also provides the interface for creating and destroying internal
125 * devices.
126 */
a24c2569 127struct dm_thin_new_mapping;
67e2e2b2 128
e49e5829
JT
129/*
130 * The pool runs in 3 modes. Ordered in degraded order for comparisons.
131 */
132enum pool_mode {
133 PM_WRITE, /* metadata may be changed */
134 PM_READ_ONLY, /* metadata may not be changed */
135 PM_FAIL, /* all I/O fails */
136};
137
67e2e2b2 138struct pool_features {
e49e5829
JT
139 enum pool_mode mode;
140
9bc142dd
MS
141 bool zero_new_blocks:1;
142 bool discard_enabled:1;
143 bool discard_passdown:1;
67e2e2b2
JT
144};
145
e49e5829
JT
146struct thin_c;
147typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio);
148typedef void (*process_mapping_fn)(struct dm_thin_new_mapping *m);
149
991d9fa0
JT
150struct pool {
151 struct list_head list;
152 struct dm_target *ti; /* Only set if a pool target is bound */
153
154 struct mapped_device *pool_md;
155 struct block_device *md_dev;
156 struct dm_pool_metadata *pmd;
157
991d9fa0 158 dm_block_t low_water_blocks;
55f2b8bd 159 uint32_t sectors_per_block;
f9a8e0cd 160 int sectors_per_block_shift;
991d9fa0 161
67e2e2b2 162 struct pool_features pf;
991d9fa0
JT
163 unsigned low_water_triggered:1; /* A dm event has been sent */
164 unsigned no_free_space:1; /* A -ENOSPC warning has been issued */
165
44feb387 166 struct dm_bio_prison *prison;
991d9fa0
JT
167 struct dm_kcopyd_client *copier;
168
169 struct workqueue_struct *wq;
170 struct work_struct worker;
905e51b3 171 struct delayed_work waker;
991d9fa0 172
905e51b3 173 unsigned long last_commit_jiffies;
55f2b8bd 174 unsigned ref_count;
991d9fa0
JT
175
176 spinlock_t lock;
177 struct bio_list deferred_bios;
178 struct bio_list deferred_flush_bios;
179 struct list_head prepared_mappings;
104655fd 180 struct list_head prepared_discards;
991d9fa0
JT
181
182 struct bio_list retry_on_resume_list;
183
44feb387
MS
184 struct dm_deferred_set *shared_read_ds;
185 struct dm_deferred_set *all_io_ds;
991d9fa0 186
a24c2569 187 struct dm_thin_new_mapping *next_mapping;
991d9fa0 188 mempool_t *mapping_pool;
e49e5829
JT
189
190 process_bio_fn process_bio;
191 process_bio_fn process_discard;
192
193 process_mapping_fn process_prepared_mapping;
194 process_mapping_fn process_prepared_discard;
991d9fa0
JT
195};
196
e49e5829
JT
197static enum pool_mode get_pool_mode(struct pool *pool);
198static void set_pool_mode(struct pool *pool, enum pool_mode mode);
199
991d9fa0
JT
200/*
201 * Target context for a pool.
202 */
203struct pool_c {
204 struct dm_target *ti;
205 struct pool *pool;
206 struct dm_dev *data_dev;
207 struct dm_dev *metadata_dev;
208 struct dm_target_callbacks callbacks;
209
210 dm_block_t low_water_blocks;
0424caa1
MS
211 struct pool_features requested_pf; /* Features requested during table load */
212 struct pool_features adjusted_pf; /* Features used after adjusting for constituent devices */
991d9fa0
JT
213};
214
215/*
216 * Target context for a thin.
217 */
218struct thin_c {
219 struct dm_dev *pool_dev;
2dd9c257 220 struct dm_dev *origin_dev;
991d9fa0
JT
221 dm_thin_id dev_id;
222
223 struct pool *pool;
224 struct dm_thin_device *td;
225};
226
227/*----------------------------------------------------------------*/
228
229/*
230 * A global list of pools that uses a struct mapped_device as a key.
231 */
232static struct dm_thin_pool_table {
233 struct mutex mutex;
234 struct list_head pools;
235} dm_thin_pool_table;
236
237static void pool_table_init(void)
238{
239 mutex_init(&dm_thin_pool_table.mutex);
240 INIT_LIST_HEAD(&dm_thin_pool_table.pools);
241}
242
243static void __pool_table_insert(struct pool *pool)
244{
245 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
246 list_add(&pool->list, &dm_thin_pool_table.pools);
247}
248
249static void __pool_table_remove(struct pool *pool)
250{
251 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
252 list_del(&pool->list);
253}
254
255static struct pool *__pool_table_lookup(struct mapped_device *md)
256{
257 struct pool *pool = NULL, *tmp;
258
259 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
260
261 list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
262 if (tmp->pool_md == md) {
263 pool = tmp;
264 break;
265 }
266 }
267
268 return pool;
269}
270
271static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev)
272{
273 struct pool *pool = NULL, *tmp;
274
275 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
276
277 list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
278 if (tmp->md_dev == md_dev) {
279 pool = tmp;
280 break;
281 }
282 }
283
284 return pool;
285}
286
287/*----------------------------------------------------------------*/
288
a24c2569 289struct dm_thin_endio_hook {
eb2aa48d 290 struct thin_c *tc;
44feb387
MS
291 struct dm_deferred_entry *shared_read_entry;
292 struct dm_deferred_entry *all_io_entry;
a24c2569 293 struct dm_thin_new_mapping *overwrite_mapping;
eb2aa48d
JT
294};
295
991d9fa0
JT
296static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master)
297{
298 struct bio *bio;
299 struct bio_list bios;
300
301 bio_list_init(&bios);
302 bio_list_merge(&bios, master);
303 bio_list_init(master);
304
305 while ((bio = bio_list_pop(&bios))) {
59c3d2c6 306 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
a24c2569 307
eb2aa48d 308 if (h->tc == tc)
991d9fa0
JT
309 bio_endio(bio, DM_ENDIO_REQUEUE);
310 else
311 bio_list_add(master, bio);
312 }
313}
314
315static void requeue_io(struct thin_c *tc)
316{
317 struct pool *pool = tc->pool;
318 unsigned long flags;
319
320 spin_lock_irqsave(&pool->lock, flags);
321 __requeue_bio_list(tc, &pool->deferred_bios);
322 __requeue_bio_list(tc, &pool->retry_on_resume_list);
323 spin_unlock_irqrestore(&pool->lock, flags);
324}
325
326/*
327 * This section of code contains the logic for processing a thin device's IO.
328 * Much of the code depends on pool object resources (lists, workqueues, etc)
329 * but most is exclusively called from the thin target rather than the thin-pool
330 * target.
331 */
332
333static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
334{
55f2b8bd
MS
335 sector_t block_nr = bio->bi_sector;
336
f9a8e0cd
MP
337 if (tc->pool->sectors_per_block_shift < 0)
338 (void) sector_div(block_nr, tc->pool->sectors_per_block);
339 else
340 block_nr >>= tc->pool->sectors_per_block_shift;
55f2b8bd
MS
341
342 return block_nr;
991d9fa0
JT
343}
344
345static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
346{
347 struct pool *pool = tc->pool;
55f2b8bd 348 sector_t bi_sector = bio->bi_sector;
991d9fa0
JT
349
350 bio->bi_bdev = tc->pool_dev->bdev;
f9a8e0cd
MP
351 if (tc->pool->sectors_per_block_shift < 0)
352 bio->bi_sector = (block * pool->sectors_per_block) +
353 sector_div(bi_sector, pool->sectors_per_block);
354 else
355 bio->bi_sector = (block << pool->sectors_per_block_shift) |
356 (bi_sector & (pool->sectors_per_block - 1));
991d9fa0
JT
357}
358
2dd9c257
JT
359static void remap_to_origin(struct thin_c *tc, struct bio *bio)
360{
361 bio->bi_bdev = tc->origin_dev->bdev;
362}
363
4afdd680
JT
364static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
365{
366 return (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) &&
367 dm_thin_changed_this_transaction(tc->td);
368}
369
e8088073
JT
370static void inc_all_io_entry(struct pool *pool, struct bio *bio)
371{
372 struct dm_thin_endio_hook *h;
373
374 if (bio->bi_rw & REQ_DISCARD)
375 return;
376
59c3d2c6 377 h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
e8088073
JT
378 h->all_io_entry = dm_deferred_entry_inc(pool->all_io_ds);
379}
380
2dd9c257 381static void issue(struct thin_c *tc, struct bio *bio)
991d9fa0
JT
382{
383 struct pool *pool = tc->pool;
384 unsigned long flags;
385
e49e5829
JT
386 if (!bio_triggers_commit(tc, bio)) {
387 generic_make_request(bio);
388 return;
389 }
390
991d9fa0 391 /*
e49e5829
JT
392 * Complete bio with an error if earlier I/O caused changes to
393 * the metadata that can't be committed e.g, due to I/O errors
394 * on the metadata device.
991d9fa0 395 */
e49e5829
JT
396 if (dm_thin_aborted_changes(tc->td)) {
397 bio_io_error(bio);
398 return;
399 }
400
401 /*
402 * Batch together any bios that trigger commits and then issue a
403 * single commit for them in process_deferred_bios().
404 */
405 spin_lock_irqsave(&pool->lock, flags);
406 bio_list_add(&pool->deferred_flush_bios, bio);
407 spin_unlock_irqrestore(&pool->lock, flags);
991d9fa0
JT
408}
409
2dd9c257
JT
410static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio)
411{
412 remap_to_origin(tc, bio);
413 issue(tc, bio);
414}
415
416static void remap_and_issue(struct thin_c *tc, struct bio *bio,
417 dm_block_t block)
418{
419 remap(tc, bio, block);
420 issue(tc, bio);
421}
422
991d9fa0
JT
423/*
424 * wake_worker() is used when new work is queued and when pool_resume is
425 * ready to continue deferred IO processing.
426 */
427static void wake_worker(struct pool *pool)
428{
429 queue_work(pool->wq, &pool->worker);
430}
431
432/*----------------------------------------------------------------*/
433
434/*
435 * Bio endio functions.
436 */
a24c2569 437struct dm_thin_new_mapping {
991d9fa0
JT
438 struct list_head list;
439
eb2aa48d
JT
440 unsigned quiesced:1;
441 unsigned prepared:1;
104655fd 442 unsigned pass_discard:1;
991d9fa0
JT
443
444 struct thin_c *tc;
445 dm_block_t virt_block;
446 dm_block_t data_block;
a24c2569 447 struct dm_bio_prison_cell *cell, *cell2;
991d9fa0
JT
448 int err;
449
450 /*
451 * If the bio covers the whole area of a block then we can avoid
452 * zeroing or copying. Instead this bio is hooked. The bio will
453 * still be in the cell, so care has to be taken to avoid issuing
454 * the bio twice.
455 */
456 struct bio *bio;
457 bio_end_io_t *saved_bi_end_io;
458};
459
a24c2569 460static void __maybe_add_mapping(struct dm_thin_new_mapping *m)
991d9fa0
JT
461{
462 struct pool *pool = m->tc->pool;
463
eb2aa48d 464 if (m->quiesced && m->prepared) {
991d9fa0
JT
465 list_add(&m->list, &pool->prepared_mappings);
466 wake_worker(pool);
467 }
468}
469
470static void copy_complete(int read_err, unsigned long write_err, void *context)
471{
472 unsigned long flags;
a24c2569 473 struct dm_thin_new_mapping *m = context;
991d9fa0
JT
474 struct pool *pool = m->tc->pool;
475
476 m->err = read_err || write_err ? -EIO : 0;
477
478 spin_lock_irqsave(&pool->lock, flags);
479 m->prepared = 1;
480 __maybe_add_mapping(m);
481 spin_unlock_irqrestore(&pool->lock, flags);
482}
483
484static void overwrite_endio(struct bio *bio, int err)
485{
486 unsigned long flags;
59c3d2c6 487 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
a24c2569 488 struct dm_thin_new_mapping *m = h->overwrite_mapping;
991d9fa0
JT
489 struct pool *pool = m->tc->pool;
490
491 m->err = err;
492
493 spin_lock_irqsave(&pool->lock, flags);
494 m->prepared = 1;
495 __maybe_add_mapping(m);
496 spin_unlock_irqrestore(&pool->lock, flags);
497}
498
991d9fa0
JT
499/*----------------------------------------------------------------*/
500
501/*
502 * Workqueue.
503 */
504
505/*
506 * Prepared mapping jobs.
507 */
508
509/*
510 * This sends the bios in the cell back to the deferred_bios list.
511 */
2aab3850 512static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell)
991d9fa0
JT
513{
514 struct pool *pool = tc->pool;
515 unsigned long flags;
516
517 spin_lock_irqsave(&pool->lock, flags);
44feb387 518 dm_cell_release(cell, &pool->deferred_bios);
991d9fa0
JT
519 spin_unlock_irqrestore(&tc->pool->lock, flags);
520
521 wake_worker(pool);
522}
523
524/*
b7ca9c92 525 * Same as cell_defer except it omits the original holder of the cell.
991d9fa0 526 */
f286ba0e 527static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *cell)
991d9fa0 528{
991d9fa0
JT
529 struct pool *pool = tc->pool;
530 unsigned long flags;
531
991d9fa0 532 spin_lock_irqsave(&pool->lock, flags);
44feb387 533 dm_cell_release_no_holder(cell, &pool->deferred_bios);
991d9fa0
JT
534 spin_unlock_irqrestore(&pool->lock, flags);
535
536 wake_worker(pool);
537}
538
e49e5829
JT
539static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
540{
541 if (m->bio)
542 m->bio->bi_end_io = m->saved_bi_end_io;
44feb387 543 dm_cell_error(m->cell);
e49e5829
JT
544 list_del(&m->list);
545 mempool_free(m, m->tc->pool->mapping_pool);
546}
a24c2569 547static void process_prepared_mapping(struct dm_thin_new_mapping *m)
991d9fa0
JT
548{
549 struct thin_c *tc = m->tc;
550 struct bio *bio;
551 int r;
552
553 bio = m->bio;
554 if (bio)
555 bio->bi_end_io = m->saved_bi_end_io;
556
557 if (m->err) {
44feb387 558 dm_cell_error(m->cell);
905386f8 559 goto out;
991d9fa0
JT
560 }
561
562 /*
563 * Commit the prepared block into the mapping btree.
564 * Any I/O for this block arriving after this point will get
565 * remapped to it directly.
566 */
567 r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block);
568 if (r) {
c397741c 569 DMERR_LIMIT("dm_thin_insert_block() failed");
44feb387 570 dm_cell_error(m->cell);
905386f8 571 goto out;
991d9fa0
JT
572 }
573
574 /*
575 * Release any bios held while the block was being provisioned.
576 * If we are processing a write bio that completely covers the block,
577 * we already processed it so can ignore it now when processing
578 * the bios in the cell.
579 */
580 if (bio) {
f286ba0e 581 cell_defer_no_holder(tc, m->cell);
991d9fa0
JT
582 bio_endio(bio, 0);
583 } else
2aab3850 584 cell_defer(tc, m->cell);
991d9fa0 585
905386f8 586out:
991d9fa0
JT
587 list_del(&m->list);
588 mempool_free(m, tc->pool->mapping_pool);
589}
590
e49e5829 591static void process_prepared_discard_fail(struct dm_thin_new_mapping *m)
104655fd 592{
104655fd
JT
593 struct thin_c *tc = m->tc;
594
e49e5829 595 bio_io_error(m->bio);
f286ba0e
JT
596 cell_defer_no_holder(tc, m->cell);
597 cell_defer_no_holder(tc, m->cell2);
e49e5829
JT
598 mempool_free(m, tc->pool->mapping_pool);
599}
600
601static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m)
602{
603 struct thin_c *tc = m->tc;
104655fd 604
e8088073 605 inc_all_io_entry(tc->pool, m->bio);
f286ba0e
JT
606 cell_defer_no_holder(tc, m->cell);
607 cell_defer_no_holder(tc, m->cell2);
e8088073 608
104655fd
JT
609 if (m->pass_discard)
610 remap_and_issue(tc, m->bio, m->data_block);
611 else
612 bio_endio(m->bio, 0);
613
104655fd
JT
614 mempool_free(m, tc->pool->mapping_pool);
615}
616
e49e5829
JT
617static void process_prepared_discard(struct dm_thin_new_mapping *m)
618{
619 int r;
620 struct thin_c *tc = m->tc;
621
622 r = dm_thin_remove_block(tc->td, m->virt_block);
623 if (r)
c397741c 624 DMERR_LIMIT("dm_thin_remove_block() failed");
e49e5829
JT
625
626 process_prepared_discard_passdown(m);
627}
628
104655fd 629static void process_prepared(struct pool *pool, struct list_head *head,
e49e5829 630 process_mapping_fn *fn)
991d9fa0
JT
631{
632 unsigned long flags;
633 struct list_head maps;
a24c2569 634 struct dm_thin_new_mapping *m, *tmp;
991d9fa0
JT
635
636 INIT_LIST_HEAD(&maps);
637 spin_lock_irqsave(&pool->lock, flags);
104655fd 638 list_splice_init(head, &maps);
991d9fa0
JT
639 spin_unlock_irqrestore(&pool->lock, flags);
640
641 list_for_each_entry_safe(m, tmp, &maps, list)
e49e5829 642 (*fn)(m);
991d9fa0
JT
643}
644
645/*
646 * Deferred bio jobs.
647 */
104655fd 648static int io_overlaps_block(struct pool *pool, struct bio *bio)
991d9fa0 649{
f9a8e0cd 650 return bio->bi_size == (pool->sectors_per_block << SECTOR_SHIFT);
104655fd
JT
651}
652
653static int io_overwrites_block(struct pool *pool, struct bio *bio)
654{
655 return (bio_data_dir(bio) == WRITE) &&
656 io_overlaps_block(pool, bio);
991d9fa0
JT
657}
658
659static void save_and_set_endio(struct bio *bio, bio_end_io_t **save,
660 bio_end_io_t *fn)
661{
662 *save = bio->bi_end_io;
663 bio->bi_end_io = fn;
664}
665
666static int ensure_next_mapping(struct pool *pool)
667{
668 if (pool->next_mapping)
669 return 0;
670
671 pool->next_mapping = mempool_alloc(pool->mapping_pool, GFP_ATOMIC);
672
673 return pool->next_mapping ? 0 : -ENOMEM;
674}
675
a24c2569 676static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool)
991d9fa0 677{
a24c2569 678 struct dm_thin_new_mapping *r = pool->next_mapping;
991d9fa0
JT
679
680 BUG_ON(!pool->next_mapping);
681
682 pool->next_mapping = NULL;
683
684 return r;
685}
686
687static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
2dd9c257
JT
688 struct dm_dev *origin, dm_block_t data_origin,
689 dm_block_t data_dest,
a24c2569 690 struct dm_bio_prison_cell *cell, struct bio *bio)
991d9fa0
JT
691{
692 int r;
693 struct pool *pool = tc->pool;
a24c2569 694 struct dm_thin_new_mapping *m = get_next_mapping(pool);
991d9fa0
JT
695
696 INIT_LIST_HEAD(&m->list);
eb2aa48d 697 m->quiesced = 0;
991d9fa0
JT
698 m->prepared = 0;
699 m->tc = tc;
700 m->virt_block = virt_block;
701 m->data_block = data_dest;
702 m->cell = cell;
703 m->err = 0;
704 m->bio = NULL;
705
44feb387 706 if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list))
eb2aa48d 707 m->quiesced = 1;
991d9fa0
JT
708
709 /*
710 * IO to pool_dev remaps to the pool target's data_dev.
711 *
712 * If the whole block of data is being overwritten, we can issue the
713 * bio immediately. Otherwise we use kcopyd to clone the data first.
714 */
715 if (io_overwrites_block(pool, bio)) {
59c3d2c6 716 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
a24c2569 717
eb2aa48d 718 h->overwrite_mapping = m;
991d9fa0
JT
719 m->bio = bio;
720 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
e8088073 721 inc_all_io_entry(pool, bio);
991d9fa0
JT
722 remap_and_issue(tc, bio, data_dest);
723 } else {
724 struct dm_io_region from, to;
725
2dd9c257 726 from.bdev = origin->bdev;
991d9fa0
JT
727 from.sector = data_origin * pool->sectors_per_block;
728 from.count = pool->sectors_per_block;
729
730 to.bdev = tc->pool_dev->bdev;
731 to.sector = data_dest * pool->sectors_per_block;
732 to.count = pool->sectors_per_block;
733
734 r = dm_kcopyd_copy(pool->copier, &from, 1, &to,
735 0, copy_complete, m);
736 if (r < 0) {
737 mempool_free(m, pool->mapping_pool);
c397741c 738 DMERR_LIMIT("dm_kcopyd_copy() failed");
44feb387 739 dm_cell_error(cell);
991d9fa0
JT
740 }
741 }
742}
743
2dd9c257
JT
744static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block,
745 dm_block_t data_origin, dm_block_t data_dest,
a24c2569 746 struct dm_bio_prison_cell *cell, struct bio *bio)
2dd9c257
JT
747{
748 schedule_copy(tc, virt_block, tc->pool_dev,
749 data_origin, data_dest, cell, bio);
750}
751
752static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
753 dm_block_t data_dest,
a24c2569 754 struct dm_bio_prison_cell *cell, struct bio *bio)
2dd9c257
JT
755{
756 schedule_copy(tc, virt_block, tc->origin_dev,
757 virt_block, data_dest, cell, bio);
758}
759
991d9fa0 760static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
a24c2569 761 dm_block_t data_block, struct dm_bio_prison_cell *cell,
991d9fa0
JT
762 struct bio *bio)
763{
764 struct pool *pool = tc->pool;
a24c2569 765 struct dm_thin_new_mapping *m = get_next_mapping(pool);
991d9fa0
JT
766
767 INIT_LIST_HEAD(&m->list);
eb2aa48d 768 m->quiesced = 1;
991d9fa0
JT
769 m->prepared = 0;
770 m->tc = tc;
771 m->virt_block = virt_block;
772 m->data_block = data_block;
773 m->cell = cell;
774 m->err = 0;
775 m->bio = NULL;
776
777 /*
778 * If the whole block of data is being overwritten or we are not
779 * zeroing pre-existing data, we can issue the bio immediately.
780 * Otherwise we use kcopyd to zero the data first.
781 */
67e2e2b2 782 if (!pool->pf.zero_new_blocks)
991d9fa0
JT
783 process_prepared_mapping(m);
784
785 else if (io_overwrites_block(pool, bio)) {
59c3d2c6 786 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
a24c2569 787
eb2aa48d 788 h->overwrite_mapping = m;
991d9fa0
JT
789 m->bio = bio;
790 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
e8088073 791 inc_all_io_entry(pool, bio);
991d9fa0 792 remap_and_issue(tc, bio, data_block);
991d9fa0
JT
793 } else {
794 int r;
795 struct dm_io_region to;
796
797 to.bdev = tc->pool_dev->bdev;
798 to.sector = data_block * pool->sectors_per_block;
799 to.count = pool->sectors_per_block;
800
801 r = dm_kcopyd_zero(pool->copier, 1, &to, 0, copy_complete, m);
802 if (r < 0) {
803 mempool_free(m, pool->mapping_pool);
c397741c 804 DMERR_LIMIT("dm_kcopyd_zero() failed");
44feb387 805 dm_cell_error(cell);
991d9fa0
JT
806 }
807 }
808}
809
e49e5829
JT
810static int commit(struct pool *pool)
811{
812 int r;
813
814 r = dm_pool_commit_metadata(pool->pmd);
815 if (r)
c397741c 816 DMERR_LIMIT("commit failed: error = %d", r);
e49e5829
JT
817
818 return r;
819}
820
821/*
822 * A non-zero return indicates read_only or fail_io mode.
823 * Many callers don't care about the return value.
824 */
825static int commit_or_fallback(struct pool *pool)
826{
827 int r;
828
829 if (get_pool_mode(pool) != PM_WRITE)
830 return -EINVAL;
831
832 r = commit(pool);
833 if (r)
834 set_pool_mode(pool, PM_READ_ONLY);
835
836 return r;
837}
838
991d9fa0
JT
839static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
840{
841 int r;
842 dm_block_t free_blocks;
843 unsigned long flags;
844 struct pool *pool = tc->pool;
845
846 r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
847 if (r)
848 return r;
849
850 if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) {
851 DMWARN("%s: reached low water mark, sending event.",
852 dm_device_name(pool->pool_md));
853 spin_lock_irqsave(&pool->lock, flags);
854 pool->low_water_triggered = 1;
855 spin_unlock_irqrestore(&pool->lock, flags);
856 dm_table_event(pool->ti->table);
857 }
858
859 if (!free_blocks) {
860 if (pool->no_free_space)
861 return -ENOSPC;
862 else {
863 /*
864 * Try to commit to see if that will free up some
865 * more space.
866 */
e49e5829 867 (void) commit_or_fallback(pool);
991d9fa0
JT
868
869 r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
870 if (r)
871 return r;
872
873 /*
874 * If we still have no space we set a flag to avoid
875 * doing all this checking and return -ENOSPC.
876 */
877 if (!free_blocks) {
878 DMWARN("%s: no free space available.",
879 dm_device_name(pool->pool_md));
880 spin_lock_irqsave(&pool->lock, flags);
881 pool->no_free_space = 1;
882 spin_unlock_irqrestore(&pool->lock, flags);
883 return -ENOSPC;
884 }
885 }
886 }
887
888 r = dm_pool_alloc_data_block(pool->pmd, result);
889 if (r)
890 return r;
891
892 return 0;
893}
894
895/*
896 * If we have run out of space, queue bios until the device is
897 * resumed, presumably after having been reloaded with more space.
898 */
899static void retry_on_resume(struct bio *bio)
900{
59c3d2c6 901 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
eb2aa48d 902 struct thin_c *tc = h->tc;
991d9fa0
JT
903 struct pool *pool = tc->pool;
904 unsigned long flags;
905
906 spin_lock_irqsave(&pool->lock, flags);
907 bio_list_add(&pool->retry_on_resume_list, bio);
908 spin_unlock_irqrestore(&pool->lock, flags);
909}
910
a24c2569 911static void no_space(struct dm_bio_prison_cell *cell)
991d9fa0
JT
912{
913 struct bio *bio;
914 struct bio_list bios;
915
916 bio_list_init(&bios);
44feb387 917 dm_cell_release(cell, &bios);
991d9fa0
JT
918
919 while ((bio = bio_list_pop(&bios)))
920 retry_on_resume(bio);
921}
922
104655fd
JT
923static void process_discard(struct thin_c *tc, struct bio *bio)
924{
925 int r;
c3a0ce2e 926 unsigned long flags;
104655fd 927 struct pool *pool = tc->pool;
a24c2569 928 struct dm_bio_prison_cell *cell, *cell2;
44feb387 929 struct dm_cell_key key, key2;
104655fd
JT
930 dm_block_t block = get_bio_block(tc, bio);
931 struct dm_thin_lookup_result lookup_result;
a24c2569 932 struct dm_thin_new_mapping *m;
104655fd
JT
933
934 build_virtual_key(tc->td, block, &key);
44feb387 935 if (dm_bio_detain(tc->pool->prison, &key, bio, &cell))
104655fd
JT
936 return;
937
938 r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
939 switch (r) {
940 case 0:
941 /*
942 * Check nobody is fiddling with this pool block. This can
943 * happen if someone's in the process of breaking sharing
944 * on this block.
945 */
946 build_data_key(tc->td, lookup_result.block, &key2);
44feb387 947 if (dm_bio_detain(tc->pool->prison, &key2, bio, &cell2)) {
f286ba0e 948 cell_defer_no_holder(tc, cell);
104655fd
JT
949 break;
950 }
951
952 if (io_overlaps_block(pool, bio)) {
953 /*
954 * IO may still be going to the destination block. We must
955 * quiesce before we can do the removal.
956 */
957 m = get_next_mapping(pool);
958 m->tc = tc;
17b7d63f 959 m->pass_discard = (!lookup_result.shared) && pool->pf.discard_passdown;
104655fd
JT
960 m->virt_block = block;
961 m->data_block = lookup_result.block;
962 m->cell = cell;
963 m->cell2 = cell2;
964 m->err = 0;
965 m->bio = bio;
966
44feb387 967 if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) {
c3a0ce2e 968 spin_lock_irqsave(&pool->lock, flags);
104655fd 969 list_add(&m->list, &pool->prepared_discards);
c3a0ce2e 970 spin_unlock_irqrestore(&pool->lock, flags);
104655fd
JT
971 wake_worker(pool);
972 }
973 } else {
e8088073 974 inc_all_io_entry(pool, bio);
f286ba0e
JT
975 cell_defer_no_holder(tc, cell);
976 cell_defer_no_holder(tc, cell2);
e8088073 977
104655fd 978 /*
49296309
MP
979 * The DM core makes sure that the discard doesn't span
980 * a block boundary. So we submit the discard of a
981 * partial block appropriately.
104655fd 982 */
650d2a06
MP
983 if ((!lookup_result.shared) && pool->pf.discard_passdown)
984 remap_and_issue(tc, bio, lookup_result.block);
985 else
986 bio_endio(bio, 0);
104655fd
JT
987 }
988 break;
989
990 case -ENODATA:
991 /*
992 * It isn't provisioned, just forget it.
993 */
f286ba0e 994 cell_defer_no_holder(tc, cell);
104655fd
JT
995 bio_endio(bio, 0);
996 break;
997
998 default:
c397741c
MS
999 DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
1000 __func__, r);
f286ba0e 1001 cell_defer_no_holder(tc, cell);
104655fd
JT
1002 bio_io_error(bio);
1003 break;
1004 }
1005}
1006
991d9fa0 1007static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
44feb387 1008 struct dm_cell_key *key,
991d9fa0 1009 struct dm_thin_lookup_result *lookup_result,
a24c2569 1010 struct dm_bio_prison_cell *cell)
991d9fa0
JT
1011{
1012 int r;
1013 dm_block_t data_block;
1014
1015 r = alloc_data_block(tc, &data_block);
1016 switch (r) {
1017 case 0:
2dd9c257
JT
1018 schedule_internal_copy(tc, block, lookup_result->block,
1019 data_block, cell, bio);
991d9fa0
JT
1020 break;
1021
1022 case -ENOSPC:
1023 no_space(cell);
1024 break;
1025
1026 default:
c397741c
MS
1027 DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
1028 __func__, r);
44feb387 1029 dm_cell_error(cell);
991d9fa0
JT
1030 break;
1031 }
1032}
1033
1034static void process_shared_bio(struct thin_c *tc, struct bio *bio,
1035 dm_block_t block,
1036 struct dm_thin_lookup_result *lookup_result)
1037{
a24c2569 1038 struct dm_bio_prison_cell *cell;
991d9fa0 1039 struct pool *pool = tc->pool;
44feb387 1040 struct dm_cell_key key;
991d9fa0
JT
1041
1042 /*
1043 * If cell is already occupied, then sharing is already in the process
1044 * of being broken so we have nothing further to do here.
1045 */
1046 build_data_key(tc->td, lookup_result->block, &key);
44feb387 1047 if (dm_bio_detain(pool->prison, &key, bio, &cell))
991d9fa0
JT
1048 return;
1049
60049701 1050 if (bio_data_dir(bio) == WRITE && bio->bi_size)
991d9fa0
JT
1051 break_sharing(tc, bio, block, &key, lookup_result, cell);
1052 else {
59c3d2c6 1053 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
991d9fa0 1054
44feb387 1055 h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds);
e8088073 1056 inc_all_io_entry(pool, bio);
f286ba0e 1057 cell_defer_no_holder(tc, cell);
e8088073 1058
991d9fa0
JT
1059 remap_and_issue(tc, bio, lookup_result->block);
1060 }
1061}
1062
1063static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block,
a24c2569 1064 struct dm_bio_prison_cell *cell)
991d9fa0
JT
1065{
1066 int r;
1067 dm_block_t data_block;
1068
1069 /*
1070 * Remap empty bios (flushes) immediately, without provisioning.
1071 */
1072 if (!bio->bi_size) {
e8088073 1073 inc_all_io_entry(tc->pool, bio);
f286ba0e 1074 cell_defer_no_holder(tc, cell);
e8088073 1075
991d9fa0
JT
1076 remap_and_issue(tc, bio, 0);
1077 return;
1078 }
1079
1080 /*
1081 * Fill read bios with zeroes and complete them immediately.
1082 */
1083 if (bio_data_dir(bio) == READ) {
1084 zero_fill_bio(bio);
f286ba0e 1085 cell_defer_no_holder(tc, cell);
991d9fa0
JT
1086 bio_endio(bio, 0);
1087 return;
1088 }
1089
1090 r = alloc_data_block(tc, &data_block);
1091 switch (r) {
1092 case 0:
2dd9c257
JT
1093 if (tc->origin_dev)
1094 schedule_external_copy(tc, block, data_block, cell, bio);
1095 else
1096 schedule_zero(tc, block, data_block, cell, bio);
991d9fa0
JT
1097 break;
1098
1099 case -ENOSPC:
1100 no_space(cell);
1101 break;
1102
1103 default:
c397741c
MS
1104 DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
1105 __func__, r);
e49e5829 1106 set_pool_mode(tc->pool, PM_READ_ONLY);
44feb387 1107 dm_cell_error(cell);
991d9fa0
JT
1108 break;
1109 }
1110}
1111
1112static void process_bio(struct thin_c *tc, struct bio *bio)
1113{
1114 int r;
1115 dm_block_t block = get_bio_block(tc, bio);
a24c2569 1116 struct dm_bio_prison_cell *cell;
44feb387 1117 struct dm_cell_key key;
991d9fa0
JT
1118 struct dm_thin_lookup_result lookup_result;
1119
1120 /*
1121 * If cell is already occupied, then the block is already
1122 * being provisioned so we have nothing further to do here.
1123 */
1124 build_virtual_key(tc->td, block, &key);
44feb387 1125 if (dm_bio_detain(tc->pool->prison, &key, bio, &cell))
991d9fa0
JT
1126 return;
1127
1128 r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1129 switch (r) {
1130 case 0:
e8088073 1131 if (lookup_result.shared) {
991d9fa0 1132 process_shared_bio(tc, bio, block, &lookup_result);
f286ba0e 1133 cell_defer_no_holder(tc, cell);
e8088073
JT
1134 } else {
1135 inc_all_io_entry(tc->pool, bio);
f286ba0e 1136 cell_defer_no_holder(tc, cell);
e8088073 1137
991d9fa0 1138 remap_and_issue(tc, bio, lookup_result.block);
e8088073 1139 }
991d9fa0
JT
1140 break;
1141
1142 case -ENODATA:
2dd9c257 1143 if (bio_data_dir(bio) == READ && tc->origin_dev) {
e8088073 1144 inc_all_io_entry(tc->pool, bio);
f286ba0e 1145 cell_defer_no_holder(tc, cell);
e8088073 1146
2dd9c257
JT
1147 remap_to_origin_and_issue(tc, bio);
1148 } else
1149 provision_block(tc, bio, block, cell);
991d9fa0
JT
1150 break;
1151
1152 default:
c397741c
MS
1153 DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
1154 __func__, r);
f286ba0e 1155 cell_defer_no_holder(tc, cell);
991d9fa0
JT
1156 bio_io_error(bio);
1157 break;
1158 }
1159}
1160
e49e5829
JT
1161static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
1162{
1163 int r;
1164 int rw = bio_data_dir(bio);
1165 dm_block_t block = get_bio_block(tc, bio);
1166 struct dm_thin_lookup_result lookup_result;
1167
1168 r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1169 switch (r) {
1170 case 0:
1171 if (lookup_result.shared && (rw == WRITE) && bio->bi_size)
1172 bio_io_error(bio);
e8088073
JT
1173 else {
1174 inc_all_io_entry(tc->pool, bio);
e49e5829 1175 remap_and_issue(tc, bio, lookup_result.block);
e8088073 1176 }
e49e5829
JT
1177 break;
1178
1179 case -ENODATA:
1180 if (rw != READ) {
1181 bio_io_error(bio);
1182 break;
1183 }
1184
1185 if (tc->origin_dev) {
e8088073 1186 inc_all_io_entry(tc->pool, bio);
e49e5829
JT
1187 remap_to_origin_and_issue(tc, bio);
1188 break;
1189 }
1190
1191 zero_fill_bio(bio);
1192 bio_endio(bio, 0);
1193 break;
1194
1195 default:
c397741c
MS
1196 DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
1197 __func__, r);
e49e5829
JT
1198 bio_io_error(bio);
1199 break;
1200 }
1201}
1202
1203static void process_bio_fail(struct thin_c *tc, struct bio *bio)
1204{
1205 bio_io_error(bio);
1206}
1207
905e51b3
JT
1208static int need_commit_due_to_time(struct pool *pool)
1209{
1210 return jiffies < pool->last_commit_jiffies ||
1211 jiffies > pool->last_commit_jiffies + COMMIT_PERIOD;
1212}
1213
991d9fa0
JT
1214static void process_deferred_bios(struct pool *pool)
1215{
1216 unsigned long flags;
1217 struct bio *bio;
1218 struct bio_list bios;
991d9fa0
JT
1219
1220 bio_list_init(&bios);
1221
1222 spin_lock_irqsave(&pool->lock, flags);
1223 bio_list_merge(&bios, &pool->deferred_bios);
1224 bio_list_init(&pool->deferred_bios);
1225 spin_unlock_irqrestore(&pool->lock, flags);
1226
1227 while ((bio = bio_list_pop(&bios))) {
59c3d2c6 1228 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
eb2aa48d
JT
1229 struct thin_c *tc = h->tc;
1230
991d9fa0
JT
1231 /*
1232 * If we've got no free new_mapping structs, and processing
1233 * this bio might require one, we pause until there are some
1234 * prepared mappings to process.
1235 */
1236 if (ensure_next_mapping(pool)) {
1237 spin_lock_irqsave(&pool->lock, flags);
1238 bio_list_merge(&pool->deferred_bios, &bios);
1239 spin_unlock_irqrestore(&pool->lock, flags);
1240
1241 break;
1242 }
104655fd
JT
1243
1244 if (bio->bi_rw & REQ_DISCARD)
e49e5829 1245 pool->process_discard(tc, bio);
104655fd 1246 else
e49e5829 1247 pool->process_bio(tc, bio);
991d9fa0
JT
1248 }
1249
1250 /*
1251 * If there are any deferred flush bios, we must commit
1252 * the metadata before issuing them.
1253 */
1254 bio_list_init(&bios);
1255 spin_lock_irqsave(&pool->lock, flags);
1256 bio_list_merge(&bios, &pool->deferred_flush_bios);
1257 bio_list_init(&pool->deferred_flush_bios);
1258 spin_unlock_irqrestore(&pool->lock, flags);
1259
905e51b3 1260 if (bio_list_empty(&bios) && !need_commit_due_to_time(pool))
991d9fa0
JT
1261 return;
1262
e49e5829 1263 if (commit_or_fallback(pool)) {
991d9fa0
JT
1264 while ((bio = bio_list_pop(&bios)))
1265 bio_io_error(bio);
1266 return;
1267 }
905e51b3 1268 pool->last_commit_jiffies = jiffies;
991d9fa0
JT
1269
1270 while ((bio = bio_list_pop(&bios)))
1271 generic_make_request(bio);
1272}
1273
1274static void do_worker(struct work_struct *ws)
1275{
1276 struct pool *pool = container_of(ws, struct pool, worker);
1277
e49e5829
JT
1278 process_prepared(pool, &pool->prepared_mappings, &pool->process_prepared_mapping);
1279 process_prepared(pool, &pool->prepared_discards, &pool->process_prepared_discard);
991d9fa0
JT
1280 process_deferred_bios(pool);
1281}
1282
905e51b3
JT
1283/*
1284 * We want to commit periodically so that not too much
1285 * unwritten data builds up.
1286 */
1287static void do_waker(struct work_struct *ws)
1288{
1289 struct pool *pool = container_of(to_delayed_work(ws), struct pool, waker);
1290 wake_worker(pool);
1291 queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD);
1292}
1293
991d9fa0
JT
1294/*----------------------------------------------------------------*/
1295
e49e5829
JT
1296static enum pool_mode get_pool_mode(struct pool *pool)
1297{
1298 return pool->pf.mode;
1299}
1300
1301static void set_pool_mode(struct pool *pool, enum pool_mode mode)
1302{
1303 int r;
1304
1305 pool->pf.mode = mode;
1306
1307 switch (mode) {
1308 case PM_FAIL:
1309 DMERR("switching pool to failure mode");
1310 pool->process_bio = process_bio_fail;
1311 pool->process_discard = process_bio_fail;
1312 pool->process_prepared_mapping = process_prepared_mapping_fail;
1313 pool->process_prepared_discard = process_prepared_discard_fail;
1314 break;
1315
1316 case PM_READ_ONLY:
1317 DMERR("switching pool to read-only mode");
1318 r = dm_pool_abort_metadata(pool->pmd);
1319 if (r) {
1320 DMERR("aborting transaction failed");
1321 set_pool_mode(pool, PM_FAIL);
1322 } else {
1323 dm_pool_metadata_read_only(pool->pmd);
1324 pool->process_bio = process_bio_read_only;
1325 pool->process_discard = process_discard;
1326 pool->process_prepared_mapping = process_prepared_mapping_fail;
1327 pool->process_prepared_discard = process_prepared_discard_passdown;
1328 }
1329 break;
1330
1331 case PM_WRITE:
1332 pool->process_bio = process_bio;
1333 pool->process_discard = process_discard;
1334 pool->process_prepared_mapping = process_prepared_mapping;
1335 pool->process_prepared_discard = process_prepared_discard;
1336 break;
1337 }
1338}
1339
1340/*----------------------------------------------------------------*/
1341
991d9fa0
JT
1342/*
1343 * Mapping functions.
1344 */
1345
1346/*
1347 * Called only while mapping a thin bio to hand it over to the workqueue.
1348 */
1349static void thin_defer_bio(struct thin_c *tc, struct bio *bio)
1350{
1351 unsigned long flags;
1352 struct pool *pool = tc->pool;
1353
1354 spin_lock_irqsave(&pool->lock, flags);
1355 bio_list_add(&pool->deferred_bios, bio);
1356 spin_unlock_irqrestore(&pool->lock, flags);
1357
1358 wake_worker(pool);
1359}
1360
59c3d2c6 1361static void thin_hook_bio(struct thin_c *tc, struct bio *bio)
eb2aa48d 1362{
59c3d2c6 1363 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
eb2aa48d
JT
1364
1365 h->tc = tc;
1366 h->shared_read_entry = NULL;
e8088073 1367 h->all_io_entry = NULL;
eb2aa48d 1368 h->overwrite_mapping = NULL;
eb2aa48d
JT
1369}
1370
991d9fa0
JT
1371/*
1372 * Non-blocking function called from the thin target's map function.
1373 */
1374static int thin_bio_map(struct dm_target *ti, struct bio *bio,
1375 union map_info *map_context)
1376{
1377 int r;
1378 struct thin_c *tc = ti->private;
1379 dm_block_t block = get_bio_block(tc, bio);
1380 struct dm_thin_device *td = tc->td;
1381 struct dm_thin_lookup_result result;
e8088073
JT
1382 struct dm_bio_prison_cell *cell1, *cell2;
1383 struct dm_cell_key key;
991d9fa0 1384
59c3d2c6 1385 thin_hook_bio(tc, bio);
e49e5829
JT
1386
1387 if (get_pool_mode(tc->pool) == PM_FAIL) {
1388 bio_io_error(bio);
1389 return DM_MAPIO_SUBMITTED;
1390 }
1391
104655fd 1392 if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)) {
991d9fa0
JT
1393 thin_defer_bio(tc, bio);
1394 return DM_MAPIO_SUBMITTED;
1395 }
1396
1397 r = dm_thin_find_block(td, block, 0, &result);
1398
1399 /*
1400 * Note that we defer readahead too.
1401 */
1402 switch (r) {
1403 case 0:
1404 if (unlikely(result.shared)) {
1405 /*
1406 * We have a race condition here between the
1407 * result.shared value returned by the lookup and
1408 * snapshot creation, which may cause new
1409 * sharing.
1410 *
1411 * To avoid this always quiesce the origin before
1412 * taking the snap. You want to do this anyway to
1413 * ensure a consistent application view
1414 * (i.e. lockfs).
1415 *
1416 * More distant ancestors are irrelevant. The
1417 * shared flag will be set in their case.
1418 */
1419 thin_defer_bio(tc, bio);
e8088073 1420 return DM_MAPIO_SUBMITTED;
991d9fa0 1421 }
e8088073
JT
1422
1423 build_virtual_key(tc->td, block, &key);
1424 if (dm_bio_detain(tc->pool->prison, &key, bio, &cell1))
1425 return DM_MAPIO_SUBMITTED;
1426
1427 build_data_key(tc->td, result.block, &key);
1428 if (dm_bio_detain(tc->pool->prison, &key, bio, &cell2)) {
f286ba0e 1429 cell_defer_no_holder(tc, cell1);
e8088073
JT
1430 return DM_MAPIO_SUBMITTED;
1431 }
1432
1433 inc_all_io_entry(tc->pool, bio);
f286ba0e
JT
1434 cell_defer_no_holder(tc, cell2);
1435 cell_defer_no_holder(tc, cell1);
e8088073
JT
1436
1437 remap(tc, bio, result.block);
1438 return DM_MAPIO_REMAPPED;
991d9fa0
JT
1439
1440 case -ENODATA:
e49e5829
JT
1441 if (get_pool_mode(tc->pool) == PM_READ_ONLY) {
1442 /*
1443 * This block isn't provisioned, and we have no way
1444 * of doing so. Just error it.
1445 */
1446 bio_io_error(bio);
2aab3850 1447 return DM_MAPIO_SUBMITTED;
e49e5829
JT
1448 }
1449 /* fall through */
1450
1451 case -EWOULDBLOCK:
991d9fa0
JT
1452 /*
1453 * In future, the failed dm_thin_find_block above could
1454 * provide the hint to load the metadata into cache.
1455 */
991d9fa0 1456 thin_defer_bio(tc, bio);
2aab3850 1457 return DM_MAPIO_SUBMITTED;
e49e5829
JT
1458
1459 default:
1460 /*
1461 * Must always call bio_io_error on failure.
1462 * dm_thin_find_block can fail with -EINVAL if the
1463 * pool is switched to fail-io mode.
1464 */
1465 bio_io_error(bio);
2aab3850 1466 return DM_MAPIO_SUBMITTED;
991d9fa0 1467 }
991d9fa0
JT
1468}
1469
1470static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
1471{
1472 int r;
1473 unsigned long flags;
1474 struct pool_c *pt = container_of(cb, struct pool_c, callbacks);
1475
1476 spin_lock_irqsave(&pt->pool->lock, flags);
1477 r = !bio_list_empty(&pt->pool->retry_on_resume_list);
1478 spin_unlock_irqrestore(&pt->pool->lock, flags);
1479
1480 if (!r) {
1481 struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
1482 r = bdi_congested(&q->backing_dev_info, bdi_bits);
1483 }
1484
1485 return r;
1486}
1487
1488static void __requeue_bios(struct pool *pool)
1489{
1490 bio_list_merge(&pool->deferred_bios, &pool->retry_on_resume_list);
1491 bio_list_init(&pool->retry_on_resume_list);
1492}
1493
1494/*----------------------------------------------------------------
1495 * Binding of control targets to a pool object
1496 *--------------------------------------------------------------*/
9bc142dd
MS
1497static bool data_dev_supports_discard(struct pool_c *pt)
1498{
1499 struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
1500
1501 return q && blk_queue_discard(q);
1502}
1503
1504/*
1505 * If discard_passdown was enabled verify that the data device
0424caa1 1506 * supports discards. Disable discard_passdown if not.
9bc142dd 1507 */
0424caa1 1508static void disable_passdown_if_not_supported(struct pool_c *pt)
9bc142dd 1509{
0424caa1
MS
1510 struct pool *pool = pt->pool;
1511 struct block_device *data_bdev = pt->data_dev->bdev;
1512 struct queue_limits *data_limits = &bdev_get_queue(data_bdev)->limits;
1513 sector_t block_size = pool->sectors_per_block << SECTOR_SHIFT;
1514 const char *reason = NULL;
9bc142dd
MS
1515 char buf[BDEVNAME_SIZE];
1516
0424caa1 1517 if (!pt->adjusted_pf.discard_passdown)
9bc142dd
MS
1518 return;
1519
0424caa1
MS
1520 if (!data_dev_supports_discard(pt))
1521 reason = "discard unsupported";
1522
1523 else if (data_limits->max_discard_sectors < pool->sectors_per_block)
1524 reason = "max discard sectors smaller than a block";
9bc142dd 1525
0424caa1
MS
1526 else if (data_limits->discard_granularity > block_size)
1527 reason = "discard granularity larger than a block";
1528
1529 else if (block_size & (data_limits->discard_granularity - 1))
1530 reason = "discard granularity not a factor of block size";
1531
1532 if (reason) {
1533 DMWARN("Data device (%s) %s: Disabling discard passdown.", bdevname(data_bdev, buf), reason);
1534 pt->adjusted_pf.discard_passdown = false;
1535 }
9bc142dd
MS
1536}
1537
991d9fa0
JT
1538static int bind_control_target(struct pool *pool, struct dm_target *ti)
1539{
1540 struct pool_c *pt = ti->private;
1541
e49e5829
JT
1542 /*
1543 * We want to make sure that degraded pools are never upgraded.
1544 */
1545 enum pool_mode old_mode = pool->pf.mode;
0424caa1 1546 enum pool_mode new_mode = pt->adjusted_pf.mode;
e49e5829
JT
1547
1548 if (old_mode > new_mode)
1549 new_mode = old_mode;
1550
991d9fa0
JT
1551 pool->ti = ti;
1552 pool->low_water_blocks = pt->low_water_blocks;
0424caa1 1553 pool->pf = pt->adjusted_pf;
991d9fa0 1554
9bc142dd 1555 set_pool_mode(pool, new_mode);
f402693d 1556
991d9fa0
JT
1557 return 0;
1558}
1559
1560static void unbind_control_target(struct pool *pool, struct dm_target *ti)
1561{
1562 if (pool->ti == ti)
1563 pool->ti = NULL;
1564}
1565
1566/*----------------------------------------------------------------
1567 * Pool creation
1568 *--------------------------------------------------------------*/
67e2e2b2
JT
1569/* Initialize pool features. */
1570static void pool_features_init(struct pool_features *pf)
1571{
e49e5829 1572 pf->mode = PM_WRITE;
9bc142dd
MS
1573 pf->zero_new_blocks = true;
1574 pf->discard_enabled = true;
1575 pf->discard_passdown = true;
67e2e2b2
JT
1576}
1577
991d9fa0
JT
1578static void __pool_destroy(struct pool *pool)
1579{
1580 __pool_table_remove(pool);
1581
1582 if (dm_pool_metadata_close(pool->pmd) < 0)
1583 DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
1584
44feb387 1585 dm_bio_prison_destroy(pool->prison);
991d9fa0
JT
1586 dm_kcopyd_client_destroy(pool->copier);
1587
1588 if (pool->wq)
1589 destroy_workqueue(pool->wq);
1590
1591 if (pool->next_mapping)
1592 mempool_free(pool->next_mapping, pool->mapping_pool);
1593 mempool_destroy(pool->mapping_pool);
44feb387
MS
1594 dm_deferred_set_destroy(pool->shared_read_ds);
1595 dm_deferred_set_destroy(pool->all_io_ds);
991d9fa0
JT
1596 kfree(pool);
1597}
1598
a24c2569 1599static struct kmem_cache *_new_mapping_cache;
a24c2569 1600
991d9fa0
JT
1601static struct pool *pool_create(struct mapped_device *pool_md,
1602 struct block_device *metadata_dev,
e49e5829
JT
1603 unsigned long block_size,
1604 int read_only, char **error)
991d9fa0
JT
1605{
1606 int r;
1607 void *err_p;
1608 struct pool *pool;
1609 struct dm_pool_metadata *pmd;
e49e5829 1610 bool format_device = read_only ? false : true;
991d9fa0 1611
e49e5829 1612 pmd = dm_pool_metadata_open(metadata_dev, block_size, format_device);
991d9fa0
JT
1613 if (IS_ERR(pmd)) {
1614 *error = "Error creating metadata object";
1615 return (struct pool *)pmd;
1616 }
1617
1618 pool = kmalloc(sizeof(*pool), GFP_KERNEL);
1619 if (!pool) {
1620 *error = "Error allocating memory for pool";
1621 err_p = ERR_PTR(-ENOMEM);
1622 goto bad_pool;
1623 }
1624
1625 pool->pmd = pmd;
1626 pool->sectors_per_block = block_size;
f9a8e0cd
MP
1627 if (block_size & (block_size - 1))
1628 pool->sectors_per_block_shift = -1;
1629 else
1630 pool->sectors_per_block_shift = __ffs(block_size);
991d9fa0 1631 pool->low_water_blocks = 0;
67e2e2b2 1632 pool_features_init(&pool->pf);
44feb387 1633 pool->prison = dm_bio_prison_create(PRISON_CELLS);
991d9fa0
JT
1634 if (!pool->prison) {
1635 *error = "Error creating pool's bio prison";
1636 err_p = ERR_PTR(-ENOMEM);
1637 goto bad_prison;
1638 }
1639
1640 pool->copier = dm_kcopyd_client_create();
1641 if (IS_ERR(pool->copier)) {
1642 r = PTR_ERR(pool->copier);
1643 *error = "Error creating pool's kcopyd client";
1644 err_p = ERR_PTR(r);
1645 goto bad_kcopyd_client;
1646 }
1647
1648 /*
1649 * Create singlethreaded workqueue that will service all devices
1650 * that use this metadata.
1651 */
1652 pool->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
1653 if (!pool->wq) {
1654 *error = "Error creating pool's workqueue";
1655 err_p = ERR_PTR(-ENOMEM);
1656 goto bad_wq;
1657 }
1658
1659 INIT_WORK(&pool->worker, do_worker);
905e51b3 1660 INIT_DELAYED_WORK(&pool->waker, do_waker);
991d9fa0
JT
1661 spin_lock_init(&pool->lock);
1662 bio_list_init(&pool->deferred_bios);
1663 bio_list_init(&pool->deferred_flush_bios);
1664 INIT_LIST_HEAD(&pool->prepared_mappings);
104655fd 1665 INIT_LIST_HEAD(&pool->prepared_discards);
991d9fa0
JT
1666 pool->low_water_triggered = 0;
1667 pool->no_free_space = 0;
1668 bio_list_init(&pool->retry_on_resume_list);
44feb387
MS
1669
1670 pool->shared_read_ds = dm_deferred_set_create();
1671 if (!pool->shared_read_ds) {
1672 *error = "Error creating pool's shared read deferred set";
1673 err_p = ERR_PTR(-ENOMEM);
1674 goto bad_shared_read_ds;
1675 }
1676
1677 pool->all_io_ds = dm_deferred_set_create();
1678 if (!pool->all_io_ds) {
1679 *error = "Error creating pool's all io deferred set";
1680 err_p = ERR_PTR(-ENOMEM);
1681 goto bad_all_io_ds;
1682 }
991d9fa0
JT
1683
1684 pool->next_mapping = NULL;
a24c2569
MS
1685 pool->mapping_pool = mempool_create_slab_pool(MAPPING_POOL_SIZE,
1686 _new_mapping_cache);
991d9fa0
JT
1687 if (!pool->mapping_pool) {
1688 *error = "Error creating pool's mapping mempool";
1689 err_p = ERR_PTR(-ENOMEM);
1690 goto bad_mapping_pool;
1691 }
1692
991d9fa0 1693 pool->ref_count = 1;
905e51b3 1694 pool->last_commit_jiffies = jiffies;
991d9fa0
JT
1695 pool->pool_md = pool_md;
1696 pool->md_dev = metadata_dev;
1697 __pool_table_insert(pool);
1698
1699 return pool;
1700
991d9fa0 1701bad_mapping_pool:
44feb387
MS
1702 dm_deferred_set_destroy(pool->all_io_ds);
1703bad_all_io_ds:
1704 dm_deferred_set_destroy(pool->shared_read_ds);
1705bad_shared_read_ds:
991d9fa0
JT
1706 destroy_workqueue(pool->wq);
1707bad_wq:
1708 dm_kcopyd_client_destroy(pool->copier);
1709bad_kcopyd_client:
44feb387 1710 dm_bio_prison_destroy(pool->prison);
991d9fa0
JT
1711bad_prison:
1712 kfree(pool);
1713bad_pool:
1714 if (dm_pool_metadata_close(pmd))
1715 DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
1716
1717 return err_p;
1718}
1719
1720static void __pool_inc(struct pool *pool)
1721{
1722 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
1723 pool->ref_count++;
1724}
1725
1726static void __pool_dec(struct pool *pool)
1727{
1728 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
1729 BUG_ON(!pool->ref_count);
1730 if (!--pool->ref_count)
1731 __pool_destroy(pool);
1732}
1733
1734static struct pool *__pool_find(struct mapped_device *pool_md,
1735 struct block_device *metadata_dev,
e49e5829
JT
1736 unsigned long block_size, int read_only,
1737 char **error, int *created)
991d9fa0
JT
1738{
1739 struct pool *pool = __pool_table_lookup_metadata_dev(metadata_dev);
1740
1741 if (pool) {
f09996c9
MS
1742 if (pool->pool_md != pool_md) {
1743 *error = "metadata device already in use by a pool";
991d9fa0 1744 return ERR_PTR(-EBUSY);
f09996c9 1745 }
991d9fa0
JT
1746 __pool_inc(pool);
1747
1748 } else {
1749 pool = __pool_table_lookup(pool_md);
1750 if (pool) {
f09996c9
MS
1751 if (pool->md_dev != metadata_dev) {
1752 *error = "different pool cannot replace a pool";
991d9fa0 1753 return ERR_PTR(-EINVAL);
f09996c9 1754 }
991d9fa0
JT
1755 __pool_inc(pool);
1756
67e2e2b2 1757 } else {
e49e5829 1758 pool = pool_create(pool_md, metadata_dev, block_size, read_only, error);
67e2e2b2
JT
1759 *created = 1;
1760 }
991d9fa0
JT
1761 }
1762
1763 return pool;
1764}
1765
1766/*----------------------------------------------------------------
1767 * Pool target methods
1768 *--------------------------------------------------------------*/
1769static void pool_dtr(struct dm_target *ti)
1770{
1771 struct pool_c *pt = ti->private;
1772
1773 mutex_lock(&dm_thin_pool_table.mutex);
1774
1775 unbind_control_target(pt->pool, ti);
1776 __pool_dec(pt->pool);
1777 dm_put_device(ti, pt->metadata_dev);
1778 dm_put_device(ti, pt->data_dev);
1779 kfree(pt);
1780
1781 mutex_unlock(&dm_thin_pool_table.mutex);
1782}
1783
991d9fa0
JT
1784static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf,
1785 struct dm_target *ti)
1786{
1787 int r;
1788 unsigned argc;
1789 const char *arg_name;
1790
1791 static struct dm_arg _args[] = {
67e2e2b2 1792 {0, 3, "Invalid number of pool feature arguments"},
991d9fa0
JT
1793 };
1794
1795 /*
1796 * No feature arguments supplied.
1797 */
1798 if (!as->argc)
1799 return 0;
1800
1801 r = dm_read_arg_group(_args, as, &argc, &ti->error);
1802 if (r)
1803 return -EINVAL;
1804
1805 while (argc && !r) {
1806 arg_name = dm_shift_arg(as);
1807 argc--;
1808
e49e5829 1809 if (!strcasecmp(arg_name, "skip_block_zeroing"))
9bc142dd 1810 pf->zero_new_blocks = false;
e49e5829
JT
1811
1812 else if (!strcasecmp(arg_name, "ignore_discard"))
9bc142dd 1813 pf->discard_enabled = false;
e49e5829
JT
1814
1815 else if (!strcasecmp(arg_name, "no_discard_passdown"))
9bc142dd 1816 pf->discard_passdown = false;
991d9fa0 1817
e49e5829
JT
1818 else if (!strcasecmp(arg_name, "read_only"))
1819 pf->mode = PM_READ_ONLY;
1820
1821 else {
1822 ti->error = "Unrecognised pool feature requested";
1823 r = -EINVAL;
1824 break;
1825 }
991d9fa0
JT
1826 }
1827
1828 return r;
1829}
1830
1831/*
1832 * thin-pool <metadata dev> <data dev>
1833 * <data block size (sectors)>
1834 * <low water mark (blocks)>
1835 * [<#feature args> [<arg>]*]
1836 *
1837 * Optional feature arguments are:
1838 * skip_block_zeroing: skips the zeroing of newly-provisioned blocks.
67e2e2b2
JT
1839 * ignore_discard: disable discard
1840 * no_discard_passdown: don't pass discards down to the data device
991d9fa0
JT
1841 */
1842static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
1843{
67e2e2b2 1844 int r, pool_created = 0;
991d9fa0
JT
1845 struct pool_c *pt;
1846 struct pool *pool;
1847 struct pool_features pf;
1848 struct dm_arg_set as;
1849 struct dm_dev *data_dev;
1850 unsigned long block_size;
1851 dm_block_t low_water_blocks;
1852 struct dm_dev *metadata_dev;
1853 sector_t metadata_dev_size;
c4a69ecd 1854 char b[BDEVNAME_SIZE];
991d9fa0
JT
1855
1856 /*
1857 * FIXME Remove validation from scope of lock.
1858 */
1859 mutex_lock(&dm_thin_pool_table.mutex);
1860
1861 if (argc < 4) {
1862 ti->error = "Invalid argument count";
1863 r = -EINVAL;
1864 goto out_unlock;
1865 }
1866 as.argc = argc;
1867 as.argv = argv;
1868
1869 r = dm_get_device(ti, argv[0], FMODE_READ | FMODE_WRITE, &metadata_dev);
1870 if (r) {
1871 ti->error = "Error opening metadata block device";
1872 goto out_unlock;
1873 }
1874
1875 metadata_dev_size = i_size_read(metadata_dev->bdev->bd_inode) >> SECTOR_SHIFT;
c4a69ecd
MS
1876 if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING)
1877 DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
1878 bdevname(metadata_dev->bdev, b), THIN_METADATA_MAX_SECTORS);
991d9fa0
JT
1879
1880 r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &data_dev);
1881 if (r) {
1882 ti->error = "Error getting data device";
1883 goto out_metadata;
1884 }
1885
1886 if (kstrtoul(argv[2], 10, &block_size) || !block_size ||
1887 block_size < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
1888 block_size > DATA_DEV_BLOCK_SIZE_MAX_SECTORS ||
55f2b8bd 1889 block_size & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) {
991d9fa0
JT
1890 ti->error = "Invalid block size";
1891 r = -EINVAL;
1892 goto out;
1893 }
1894
1895 if (kstrtoull(argv[3], 10, (unsigned long long *)&low_water_blocks)) {
1896 ti->error = "Invalid low water mark";
1897 r = -EINVAL;
1898 goto out;
1899 }
1900
1901 /*
1902 * Set default pool features.
1903 */
67e2e2b2 1904 pool_features_init(&pf);
991d9fa0
JT
1905
1906 dm_consume_args(&as, 4);
1907 r = parse_pool_features(&as, &pf, ti);
1908 if (r)
1909 goto out;
1910
1911 pt = kzalloc(sizeof(*pt), GFP_KERNEL);
1912 if (!pt) {
1913 r = -ENOMEM;
1914 goto out;
1915 }
1916
1917 pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev,
e49e5829 1918 block_size, pf.mode == PM_READ_ONLY, &ti->error, &pool_created);
991d9fa0
JT
1919 if (IS_ERR(pool)) {
1920 r = PTR_ERR(pool);
1921 goto out_free_pt;
1922 }
1923
67e2e2b2
JT
1924 /*
1925 * 'pool_created' reflects whether this is the first table load.
1926 * Top level discard support is not allowed to be changed after
1927 * initial load. This would require a pool reload to trigger thin
1928 * device changes.
1929 */
1930 if (!pool_created && pf.discard_enabled != pool->pf.discard_enabled) {
1931 ti->error = "Discard support cannot be disabled once enabled";
1932 r = -EINVAL;
1933 goto out_flags_changed;
1934 }
1935
991d9fa0
JT
1936 pt->pool = pool;
1937 pt->ti = ti;
1938 pt->metadata_dev = metadata_dev;
1939 pt->data_dev = data_dev;
1940 pt->low_water_blocks = low_water_blocks;
0424caa1 1941 pt->adjusted_pf = pt->requested_pf = pf;
991d9fa0 1942 ti->num_flush_requests = 1;
9bc142dd 1943
67e2e2b2
JT
1944 /*
1945 * Only need to enable discards if the pool should pass
1946 * them down to the data device. The thin device's discard
1947 * processing will cause mappings to be removed from the btree.
1948 */
1949 if (pf.discard_enabled && pf.discard_passdown) {
1950 ti->num_discard_requests = 1;
9bc142dd 1951
67e2e2b2
JT
1952 /*
1953 * Setting 'discards_supported' circumvents the normal
1954 * stacking of discard limits (this keeps the pool and
1955 * thin devices' discard limits consistent).
1956 */
0ac55489 1957 ti->discards_supported = true;
307615a2 1958 ti->discard_zeroes_data_unsupported = true;
67e2e2b2 1959 }
991d9fa0
JT
1960 ti->private = pt;
1961
1962 pt->callbacks.congested_fn = pool_is_congested;
1963 dm_table_add_target_callbacks(ti->table, &pt->callbacks);
1964
1965 mutex_unlock(&dm_thin_pool_table.mutex);
1966
1967 return 0;
1968
67e2e2b2
JT
1969out_flags_changed:
1970 __pool_dec(pool);
991d9fa0
JT
1971out_free_pt:
1972 kfree(pt);
1973out:
1974 dm_put_device(ti, data_dev);
1975out_metadata:
1976 dm_put_device(ti, metadata_dev);
1977out_unlock:
1978 mutex_unlock(&dm_thin_pool_table.mutex);
1979
1980 return r;
1981}
1982
1983static int pool_map(struct dm_target *ti, struct bio *bio,
1984 union map_info *map_context)
1985{
1986 int r;
1987 struct pool_c *pt = ti->private;
1988 struct pool *pool = pt->pool;
1989 unsigned long flags;
1990
1991 /*
1992 * As this is a singleton target, ti->begin is always zero.
1993 */
1994 spin_lock_irqsave(&pool->lock, flags);
1995 bio->bi_bdev = pt->data_dev->bdev;
1996 r = DM_MAPIO_REMAPPED;
1997 spin_unlock_irqrestore(&pool->lock, flags);
1998
1999 return r;
2000}
2001
2002/*
2003 * Retrieves the number of blocks of the data device from
2004 * the superblock and compares it to the actual device size,
2005 * thus resizing the data device in case it has grown.
2006 *
2007 * This both copes with opening preallocated data devices in the ctr
2008 * being followed by a resume
2009 * -and-
2010 * calling the resume method individually after userspace has
2011 * grown the data device in reaction to a table event.
2012 */
2013static int pool_preresume(struct dm_target *ti)
2014{
2015 int r;
2016 struct pool_c *pt = ti->private;
2017 struct pool *pool = pt->pool;
55f2b8bd
MS
2018 sector_t data_size = ti->len;
2019 dm_block_t sb_data_size;
991d9fa0
JT
2020
2021 /*
2022 * Take control of the pool object.
2023 */
2024 r = bind_control_target(pool, ti);
2025 if (r)
2026 return r;
2027
55f2b8bd
MS
2028 (void) sector_div(data_size, pool->sectors_per_block);
2029
991d9fa0
JT
2030 r = dm_pool_get_data_dev_size(pool->pmd, &sb_data_size);
2031 if (r) {
2032 DMERR("failed to retrieve data device size");
2033 return r;
2034 }
2035
2036 if (data_size < sb_data_size) {
2037 DMERR("pool target too small, is %llu blocks (expected %llu)",
55f2b8bd 2038 (unsigned long long)data_size, sb_data_size);
991d9fa0
JT
2039 return -EINVAL;
2040
2041 } else if (data_size > sb_data_size) {
2042 r = dm_pool_resize_data_dev(pool->pmd, data_size);
2043 if (r) {
2044 DMERR("failed to resize data device");
e49e5829
JT
2045 /* FIXME Stricter than necessary: Rollback transaction instead here */
2046 set_pool_mode(pool, PM_READ_ONLY);
991d9fa0
JT
2047 return r;
2048 }
2049
e49e5829 2050 (void) commit_or_fallback(pool);
991d9fa0
JT
2051 }
2052
2053 return 0;
2054}
2055
2056static void pool_resume(struct dm_target *ti)
2057{
2058 struct pool_c *pt = ti->private;
2059 struct pool *pool = pt->pool;
2060 unsigned long flags;
2061
2062 spin_lock_irqsave(&pool->lock, flags);
2063 pool->low_water_triggered = 0;
2064 pool->no_free_space = 0;
2065 __requeue_bios(pool);
2066 spin_unlock_irqrestore(&pool->lock, flags);
2067
905e51b3 2068 do_waker(&pool->waker.work);
991d9fa0
JT
2069}
2070
2071static void pool_postsuspend(struct dm_target *ti)
2072{
991d9fa0
JT
2073 struct pool_c *pt = ti->private;
2074 struct pool *pool = pt->pool;
2075
905e51b3 2076 cancel_delayed_work(&pool->waker);
991d9fa0 2077 flush_workqueue(pool->wq);
e49e5829 2078 (void) commit_or_fallback(pool);
991d9fa0
JT
2079}
2080
2081static int check_arg_count(unsigned argc, unsigned args_required)
2082{
2083 if (argc != args_required) {
2084 DMWARN("Message received with %u arguments instead of %u.",
2085 argc, args_required);
2086 return -EINVAL;
2087 }
2088
2089 return 0;
2090}
2091
2092static int read_dev_id(char *arg, dm_thin_id *dev_id, int warning)
2093{
2094 if (!kstrtoull(arg, 10, (unsigned long long *)dev_id) &&
2095 *dev_id <= MAX_DEV_ID)
2096 return 0;
2097
2098 if (warning)
2099 DMWARN("Message received with invalid device id: %s", arg);
2100
2101 return -EINVAL;
2102}
2103
2104static int process_create_thin_mesg(unsigned argc, char **argv, struct pool *pool)
2105{
2106 dm_thin_id dev_id;
2107 int r;
2108
2109 r = check_arg_count(argc, 2);
2110 if (r)
2111 return r;
2112
2113 r = read_dev_id(argv[1], &dev_id, 1);
2114 if (r)
2115 return r;
2116
2117 r = dm_pool_create_thin(pool->pmd, dev_id);
2118 if (r) {
2119 DMWARN("Creation of new thinly-provisioned device with id %s failed.",
2120 argv[1]);
2121 return r;
2122 }
2123
2124 return 0;
2125}
2126
2127static int process_create_snap_mesg(unsigned argc, char **argv, struct pool *pool)
2128{
2129 dm_thin_id dev_id;
2130 dm_thin_id origin_dev_id;
2131 int r;
2132
2133 r = check_arg_count(argc, 3);
2134 if (r)
2135 return r;
2136
2137 r = read_dev_id(argv[1], &dev_id, 1);
2138 if (r)
2139 return r;
2140
2141 r = read_dev_id(argv[2], &origin_dev_id, 1);
2142 if (r)
2143 return r;
2144
2145 r = dm_pool_create_snap(pool->pmd, dev_id, origin_dev_id);
2146 if (r) {
2147 DMWARN("Creation of new snapshot %s of device %s failed.",
2148 argv[1], argv[2]);
2149 return r;
2150 }
2151
2152 return 0;
2153}
2154
2155static int process_delete_mesg(unsigned argc, char **argv, struct pool *pool)
2156{
2157 dm_thin_id dev_id;
2158 int r;
2159
2160 r = check_arg_count(argc, 2);
2161 if (r)
2162 return r;
2163
2164 r = read_dev_id(argv[1], &dev_id, 1);
2165 if (r)
2166 return r;
2167
2168 r = dm_pool_delete_thin_device(pool->pmd, dev_id);
2169 if (r)
2170 DMWARN("Deletion of thin device %s failed.", argv[1]);
2171
2172 return r;
2173}
2174
2175static int process_set_transaction_id_mesg(unsigned argc, char **argv, struct pool *pool)
2176{
2177 dm_thin_id old_id, new_id;
2178 int r;
2179
2180 r = check_arg_count(argc, 3);
2181 if (r)
2182 return r;
2183
2184 if (kstrtoull(argv[1], 10, (unsigned long long *)&old_id)) {
2185 DMWARN("set_transaction_id message: Unrecognised id %s.", argv[1]);
2186 return -EINVAL;
2187 }
2188
2189 if (kstrtoull(argv[2], 10, (unsigned long long *)&new_id)) {
2190 DMWARN("set_transaction_id message: Unrecognised new id %s.", argv[2]);
2191 return -EINVAL;
2192 }
2193
2194 r = dm_pool_set_metadata_transaction_id(pool->pmd, old_id, new_id);
2195 if (r) {
2196 DMWARN("Failed to change transaction id from %s to %s.",
2197 argv[1], argv[2]);
2198 return r;
2199 }
2200
2201 return 0;
2202}
2203
cc8394d8
JT
2204static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
2205{
2206 int r;
2207
2208 r = check_arg_count(argc, 1);
2209 if (r)
2210 return r;
2211
e49e5829 2212 (void) commit_or_fallback(pool);
0d200aef 2213
cc8394d8
JT
2214 r = dm_pool_reserve_metadata_snap(pool->pmd);
2215 if (r)
2216 DMWARN("reserve_metadata_snap message failed.");
2217
2218 return r;
2219}
2220
2221static int process_release_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
2222{
2223 int r;
2224
2225 r = check_arg_count(argc, 1);
2226 if (r)
2227 return r;
2228
2229 r = dm_pool_release_metadata_snap(pool->pmd);
2230 if (r)
2231 DMWARN("release_metadata_snap message failed.");
2232
2233 return r;
2234}
2235
991d9fa0
JT
2236/*
2237 * Messages supported:
2238 * create_thin <dev_id>
2239 * create_snap <dev_id> <origin_id>
2240 * delete <dev_id>
2241 * trim <dev_id> <new_size_in_sectors>
2242 * set_transaction_id <current_trans_id> <new_trans_id>
cc8394d8
JT
2243 * reserve_metadata_snap
2244 * release_metadata_snap
991d9fa0
JT
2245 */
2246static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
2247{
2248 int r = -EINVAL;
2249 struct pool_c *pt = ti->private;
2250 struct pool *pool = pt->pool;
2251
2252 if (!strcasecmp(argv[0], "create_thin"))
2253 r = process_create_thin_mesg(argc, argv, pool);
2254
2255 else if (!strcasecmp(argv[0], "create_snap"))
2256 r = process_create_snap_mesg(argc, argv, pool);
2257
2258 else if (!strcasecmp(argv[0], "delete"))
2259 r = process_delete_mesg(argc, argv, pool);
2260
2261 else if (!strcasecmp(argv[0], "set_transaction_id"))
2262 r = process_set_transaction_id_mesg(argc, argv, pool);
2263
cc8394d8
JT
2264 else if (!strcasecmp(argv[0], "reserve_metadata_snap"))
2265 r = process_reserve_metadata_snap_mesg(argc, argv, pool);
2266
2267 else if (!strcasecmp(argv[0], "release_metadata_snap"))
2268 r = process_release_metadata_snap_mesg(argc, argv, pool);
2269
991d9fa0
JT
2270 else
2271 DMWARN("Unrecognised thin pool target message received: %s", argv[0]);
2272
e49e5829
JT
2273 if (!r)
2274 (void) commit_or_fallback(pool);
991d9fa0
JT
2275
2276 return r;
2277}
2278
e49e5829
JT
2279static void emit_flags(struct pool_features *pf, char *result,
2280 unsigned sz, unsigned maxlen)
2281{
2282 unsigned count = !pf->zero_new_blocks + !pf->discard_enabled +
2283 !pf->discard_passdown + (pf->mode == PM_READ_ONLY);
2284 DMEMIT("%u ", count);
2285
2286 if (!pf->zero_new_blocks)
2287 DMEMIT("skip_block_zeroing ");
2288
2289 if (!pf->discard_enabled)
2290 DMEMIT("ignore_discard ");
2291
2292 if (!pf->discard_passdown)
2293 DMEMIT("no_discard_passdown ");
2294
2295 if (pf->mode == PM_READ_ONLY)
2296 DMEMIT("read_only ");
2297}
2298
991d9fa0
JT
2299/*
2300 * Status line is:
2301 * <transaction id> <used metadata sectors>/<total metadata sectors>
2302 * <used data sectors>/<total data sectors> <held metadata root>
2303 */
2304static int pool_status(struct dm_target *ti, status_type_t type,
1f4e0ff0 2305 unsigned status_flags, char *result, unsigned maxlen)
991d9fa0 2306{
e49e5829 2307 int r;
991d9fa0
JT
2308 unsigned sz = 0;
2309 uint64_t transaction_id;
2310 dm_block_t nr_free_blocks_data;
2311 dm_block_t nr_free_blocks_metadata;
2312 dm_block_t nr_blocks_data;
2313 dm_block_t nr_blocks_metadata;
2314 dm_block_t held_root;
2315 char buf[BDEVNAME_SIZE];
2316 char buf2[BDEVNAME_SIZE];
2317 struct pool_c *pt = ti->private;
2318 struct pool *pool = pt->pool;
2319
2320 switch (type) {
2321 case STATUSTYPE_INFO:
e49e5829
JT
2322 if (get_pool_mode(pool) == PM_FAIL) {
2323 DMEMIT("Fail");
2324 break;
2325 }
2326
1f4e0ff0
AK
2327 /* Commit to ensure statistics aren't out-of-date */
2328 if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti))
2329 (void) commit_or_fallback(pool);
2330
991d9fa0
JT
2331 r = dm_pool_get_metadata_transaction_id(pool->pmd,
2332 &transaction_id);
2333 if (r)
2334 return r;
2335
2336 r = dm_pool_get_free_metadata_block_count(pool->pmd,
2337 &nr_free_blocks_metadata);
2338 if (r)
2339 return r;
2340
2341 r = dm_pool_get_metadata_dev_size(pool->pmd, &nr_blocks_metadata);
2342 if (r)
2343 return r;
2344
2345 r = dm_pool_get_free_block_count(pool->pmd,
2346 &nr_free_blocks_data);
2347 if (r)
2348 return r;
2349
2350 r = dm_pool_get_data_dev_size(pool->pmd, &nr_blocks_data);
2351 if (r)
2352 return r;
2353
cc8394d8 2354 r = dm_pool_get_metadata_snap(pool->pmd, &held_root);
991d9fa0
JT
2355 if (r)
2356 return r;
2357
2358 DMEMIT("%llu %llu/%llu %llu/%llu ",
2359 (unsigned long long)transaction_id,
2360 (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
2361 (unsigned long long)nr_blocks_metadata,
2362 (unsigned long long)(nr_blocks_data - nr_free_blocks_data),
2363 (unsigned long long)nr_blocks_data);
2364
2365 if (held_root)
e49e5829
JT
2366 DMEMIT("%llu ", held_root);
2367 else
2368 DMEMIT("- ");
2369
2370 if (pool->pf.mode == PM_READ_ONLY)
2371 DMEMIT("ro ");
991d9fa0 2372 else
e49e5829
JT
2373 DMEMIT("rw ");
2374
018debea
MS
2375 if (!pool->pf.discard_enabled)
2376 DMEMIT("ignore_discard");
2377 else if (pool->pf.discard_passdown)
e49e5829
JT
2378 DMEMIT("discard_passdown");
2379 else
2380 DMEMIT("no_discard_passdown");
991d9fa0
JT
2381
2382 break;
2383
2384 case STATUSTYPE_TABLE:
2385 DMEMIT("%s %s %lu %llu ",
2386 format_dev_t(buf, pt->metadata_dev->bdev->bd_dev),
2387 format_dev_t(buf2, pt->data_dev->bdev->bd_dev),
2388 (unsigned long)pool->sectors_per_block,
2389 (unsigned long long)pt->low_water_blocks);
0424caa1 2390 emit_flags(&pt->requested_pf, result, sz, maxlen);
991d9fa0
JT
2391 break;
2392 }
2393
2394 return 0;
2395}
2396
2397static int pool_iterate_devices(struct dm_target *ti,
2398 iterate_devices_callout_fn fn, void *data)
2399{
2400 struct pool_c *pt = ti->private;
2401
2402 return fn(ti, pt->data_dev, 0, ti->len, data);
2403}
2404
2405static int pool_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
2406 struct bio_vec *biovec, int max_size)
2407{
2408 struct pool_c *pt = ti->private;
2409 struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
2410
2411 if (!q->merge_bvec_fn)
2412 return max_size;
2413
2414 bvm->bi_bdev = pt->data_dev->bdev;
2415
2416 return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
2417}
2418
28eed34e
MS
2419static bool block_size_is_power_of_two(struct pool *pool)
2420{
2421 return pool->sectors_per_block_shift >= 0;
2422}
2423
0424caa1 2424static void set_discard_limits(struct pool_c *pt, struct queue_limits *limits)
104655fd 2425{
0424caa1
MS
2426 struct pool *pool = pt->pool;
2427 struct queue_limits *data_limits;
2428
104655fd
JT
2429 limits->max_discard_sectors = pool->sectors_per_block;
2430
2431 /*
0424caa1 2432 * discard_granularity is just a hint, and not enforced.
104655fd 2433 */
0424caa1
MS
2434 if (pt->adjusted_pf.discard_passdown) {
2435 data_limits = &bdev_get_queue(pt->data_dev->bdev)->limits;
2436 limits->discard_granularity = data_limits->discard_granularity;
28eed34e 2437 } else if (block_size_is_power_of_two(pool))
0424caa1 2438 limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
28eed34e
MS
2439 else
2440 /*
2441 * Use largest power of 2 that is a factor of sectors_per_block
2442 * but at least DATA_DEV_BLOCK_SIZE_MIN_SECTORS.
2443 */
2444 limits->discard_granularity = max(1 << (ffs(pool->sectors_per_block) - 1),
2445 DATA_DEV_BLOCK_SIZE_MIN_SECTORS) << SECTOR_SHIFT;
104655fd
JT
2446}
2447
991d9fa0
JT
2448static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
2449{
2450 struct pool_c *pt = ti->private;
2451 struct pool *pool = pt->pool;
2452
2453 blk_limits_io_min(limits, 0);
2454 blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT);
0424caa1
MS
2455
2456 /*
2457 * pt->adjusted_pf is a staging area for the actual features to use.
2458 * They get transferred to the live pool in bind_control_target()
2459 * called from pool_preresume().
2460 */
2461 if (!pt->adjusted_pf.discard_enabled)
2462 return;
2463
2464 disable_passdown_if_not_supported(pt);
2465
2466 set_discard_limits(pt, limits);
991d9fa0
JT
2467}
2468
2469static struct target_type pool_target = {
2470 .name = "thin-pool",
2471 .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
2472 DM_TARGET_IMMUTABLE,
018debea 2473 .version = {1, 6, 0},
991d9fa0
JT
2474 .module = THIS_MODULE,
2475 .ctr = pool_ctr,
2476 .dtr = pool_dtr,
2477 .map = pool_map,
2478 .postsuspend = pool_postsuspend,
2479 .preresume = pool_preresume,
2480 .resume = pool_resume,
2481 .message = pool_message,
2482 .status = pool_status,
2483 .merge = pool_merge,
2484 .iterate_devices = pool_iterate_devices,
2485 .io_hints = pool_io_hints,
2486};
2487
2488/*----------------------------------------------------------------
2489 * Thin target methods
2490 *--------------------------------------------------------------*/
2491static void thin_dtr(struct dm_target *ti)
2492{
2493 struct thin_c *tc = ti->private;
2494
2495 mutex_lock(&dm_thin_pool_table.mutex);
2496
2497 __pool_dec(tc->pool);
2498 dm_pool_close_thin_device(tc->td);
2499 dm_put_device(ti, tc->pool_dev);
2dd9c257
JT
2500 if (tc->origin_dev)
2501 dm_put_device(ti, tc->origin_dev);
991d9fa0
JT
2502 kfree(tc);
2503
2504 mutex_unlock(&dm_thin_pool_table.mutex);
2505}
2506
2507/*
2508 * Thin target parameters:
2509 *
2dd9c257 2510 * <pool_dev> <dev_id> [origin_dev]
991d9fa0
JT
2511 *
2512 * pool_dev: the path to the pool (eg, /dev/mapper/my_pool)
2513 * dev_id: the internal device identifier
2dd9c257 2514 * origin_dev: a device external to the pool that should act as the origin
67e2e2b2
JT
2515 *
2516 * If the pool device has discards disabled, they get disabled for the thin
2517 * device as well.
991d9fa0
JT
2518 */
2519static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
2520{
2521 int r;
2522 struct thin_c *tc;
2dd9c257 2523 struct dm_dev *pool_dev, *origin_dev;
991d9fa0
JT
2524 struct mapped_device *pool_md;
2525
2526 mutex_lock(&dm_thin_pool_table.mutex);
2527
2dd9c257 2528 if (argc != 2 && argc != 3) {
991d9fa0
JT
2529 ti->error = "Invalid argument count";
2530 r = -EINVAL;
2531 goto out_unlock;
2532 }
2533
2534 tc = ti->private = kzalloc(sizeof(*tc), GFP_KERNEL);
2535 if (!tc) {
2536 ti->error = "Out of memory";
2537 r = -ENOMEM;
2538 goto out_unlock;
2539 }
2540
2dd9c257
JT
2541 if (argc == 3) {
2542 r = dm_get_device(ti, argv[2], FMODE_READ, &origin_dev);
2543 if (r) {
2544 ti->error = "Error opening origin device";
2545 goto bad_origin_dev;
2546 }
2547 tc->origin_dev = origin_dev;
2548 }
2549
991d9fa0
JT
2550 r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &pool_dev);
2551 if (r) {
2552 ti->error = "Error opening pool device";
2553 goto bad_pool_dev;
2554 }
2555 tc->pool_dev = pool_dev;
2556
2557 if (read_dev_id(argv[1], (unsigned long long *)&tc->dev_id, 0)) {
2558 ti->error = "Invalid device id";
2559 r = -EINVAL;
2560 goto bad_common;
2561 }
2562
2563 pool_md = dm_get_md(tc->pool_dev->bdev->bd_dev);
2564 if (!pool_md) {
2565 ti->error = "Couldn't get pool mapped device";
2566 r = -EINVAL;
2567 goto bad_common;
2568 }
2569
2570 tc->pool = __pool_table_lookup(pool_md);
2571 if (!tc->pool) {
2572 ti->error = "Couldn't find pool object";
2573 r = -EINVAL;
2574 goto bad_pool_lookup;
2575 }
2576 __pool_inc(tc->pool);
2577
e49e5829
JT
2578 if (get_pool_mode(tc->pool) == PM_FAIL) {
2579 ti->error = "Couldn't open thin device, Pool is in fail mode";
2580 goto bad_thin_open;
2581 }
2582
991d9fa0
JT
2583 r = dm_pool_open_thin_device(tc->pool->pmd, tc->dev_id, &tc->td);
2584 if (r) {
2585 ti->error = "Couldn't open thin internal device";
2586 goto bad_thin_open;
2587 }
2588
542f9038
MS
2589 r = dm_set_target_max_io_len(ti, tc->pool->sectors_per_block);
2590 if (r)
2591 goto bad_thin_open;
2592
991d9fa0 2593 ti->num_flush_requests = 1;
16ad3d10 2594 ti->flush_supported = true;
59c3d2c6 2595 ti->per_bio_data_size = sizeof(struct dm_thin_endio_hook);
67e2e2b2
JT
2596
2597 /* In case the pool supports discards, pass them on. */
2598 if (tc->pool->pf.discard_enabled) {
0ac55489 2599 ti->discards_supported = true;
67e2e2b2 2600 ti->num_discard_requests = 1;
0ac55489 2601 ti->discard_zeroes_data_unsupported = true;
49296309 2602 /* Discard requests must be split on a block boundary */
0ac55489 2603 ti->split_discard_requests = true;
67e2e2b2 2604 }
991d9fa0
JT
2605
2606 dm_put(pool_md);
2607
2608 mutex_unlock(&dm_thin_pool_table.mutex);
2609
2610 return 0;
2611
2612bad_thin_open:
2613 __pool_dec(tc->pool);
2614bad_pool_lookup:
2615 dm_put(pool_md);
2616bad_common:
2617 dm_put_device(ti, tc->pool_dev);
2618bad_pool_dev:
2dd9c257
JT
2619 if (tc->origin_dev)
2620 dm_put_device(ti, tc->origin_dev);
2621bad_origin_dev:
991d9fa0
JT
2622 kfree(tc);
2623out_unlock:
2624 mutex_unlock(&dm_thin_pool_table.mutex);
2625
2626 return r;
2627}
2628
2629static int thin_map(struct dm_target *ti, struct bio *bio,
2630 union map_info *map_context)
2631{
6efd6e83 2632 bio->bi_sector = dm_target_offset(ti, bio->bi_sector);
991d9fa0
JT
2633
2634 return thin_bio_map(ti, bio, map_context);
2635}
2636
eb2aa48d
JT
2637static int thin_endio(struct dm_target *ti,
2638 struct bio *bio, int err,
2639 union map_info *map_context)
2640{
2641 unsigned long flags;
59c3d2c6 2642 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
eb2aa48d 2643 struct list_head work;
a24c2569 2644 struct dm_thin_new_mapping *m, *tmp;
eb2aa48d
JT
2645 struct pool *pool = h->tc->pool;
2646
2647 if (h->shared_read_entry) {
2648 INIT_LIST_HEAD(&work);
44feb387 2649 dm_deferred_entry_dec(h->shared_read_entry, &work);
eb2aa48d
JT
2650
2651 spin_lock_irqsave(&pool->lock, flags);
2652 list_for_each_entry_safe(m, tmp, &work, list) {
2653 list_del(&m->list);
2654 m->quiesced = 1;
2655 __maybe_add_mapping(m);
2656 }
2657 spin_unlock_irqrestore(&pool->lock, flags);
2658 }
2659
104655fd
JT
2660 if (h->all_io_entry) {
2661 INIT_LIST_HEAD(&work);
44feb387 2662 dm_deferred_entry_dec(h->all_io_entry, &work);
563af186
JT
2663 if (!list_empty(&work)) {
2664 spin_lock_irqsave(&pool->lock, flags);
2665 list_for_each_entry_safe(m, tmp, &work, list)
2666 list_add(&m->list, &pool->prepared_discards);
2667 spin_unlock_irqrestore(&pool->lock, flags);
2668 wake_worker(pool);
2669 }
104655fd
JT
2670 }
2671
eb2aa48d
JT
2672 return 0;
2673}
2674
991d9fa0
JT
2675static void thin_postsuspend(struct dm_target *ti)
2676{
2677 if (dm_noflush_suspending(ti))
2678 requeue_io((struct thin_c *)ti->private);
2679}
2680
2681/*
2682 * <nr mapped sectors> <highest mapped sector>
2683 */
2684static int thin_status(struct dm_target *ti, status_type_t type,
1f4e0ff0 2685 unsigned status_flags, char *result, unsigned maxlen)
991d9fa0
JT
2686{
2687 int r;
2688 ssize_t sz = 0;
2689 dm_block_t mapped, highest;
2690 char buf[BDEVNAME_SIZE];
2691 struct thin_c *tc = ti->private;
2692
e49e5829
JT
2693 if (get_pool_mode(tc->pool) == PM_FAIL) {
2694 DMEMIT("Fail");
2695 return 0;
2696 }
2697
991d9fa0
JT
2698 if (!tc->td)
2699 DMEMIT("-");
2700 else {
2701 switch (type) {
2702 case STATUSTYPE_INFO:
2703 r = dm_thin_get_mapped_count(tc->td, &mapped);
2704 if (r)
2705 return r;
2706
2707 r = dm_thin_get_highest_mapped_block(tc->td, &highest);
2708 if (r < 0)
2709 return r;
2710
2711 DMEMIT("%llu ", mapped * tc->pool->sectors_per_block);
2712 if (r)
2713 DMEMIT("%llu", ((highest + 1) *
2714 tc->pool->sectors_per_block) - 1);
2715 else
2716 DMEMIT("-");
2717 break;
2718
2719 case STATUSTYPE_TABLE:
2720 DMEMIT("%s %lu",
2721 format_dev_t(buf, tc->pool_dev->bdev->bd_dev),
2722 (unsigned long) tc->dev_id);
2dd9c257
JT
2723 if (tc->origin_dev)
2724 DMEMIT(" %s", format_dev_t(buf, tc->origin_dev->bdev->bd_dev));
991d9fa0
JT
2725 break;
2726 }
2727 }
2728
2729 return 0;
2730}
2731
2732static int thin_iterate_devices(struct dm_target *ti,
2733 iterate_devices_callout_fn fn, void *data)
2734{
55f2b8bd 2735 sector_t blocks;
991d9fa0 2736 struct thin_c *tc = ti->private;
55f2b8bd 2737 struct pool *pool = tc->pool;
991d9fa0
JT
2738
2739 /*
2740 * We can't call dm_pool_get_data_dev_size() since that blocks. So
2741 * we follow a more convoluted path through to the pool's target.
2742 */
55f2b8bd 2743 if (!pool->ti)
991d9fa0
JT
2744 return 0; /* nothing is bound */
2745
55f2b8bd
MS
2746 blocks = pool->ti->len;
2747 (void) sector_div(blocks, pool->sectors_per_block);
991d9fa0 2748 if (blocks)
55f2b8bd 2749 return fn(ti, tc->pool_dev, 0, pool->sectors_per_block * blocks, data);
991d9fa0
JT
2750
2751 return 0;
2752}
2753
0424caa1
MS
2754/*
2755 * A thin device always inherits its queue limits from its pool.
2756 */
991d9fa0
JT
2757static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
2758{
2759 struct thin_c *tc = ti->private;
2760
0424caa1 2761 *limits = bdev_get_queue(tc->pool_dev->bdev)->limits;
991d9fa0
JT
2762}
2763
2764static struct target_type thin_target = {
2765 .name = "thin",
70d6c400 2766 .version = {1, 6, 0},
991d9fa0
JT
2767 .module = THIS_MODULE,
2768 .ctr = thin_ctr,
2769 .dtr = thin_dtr,
2770 .map = thin_map,
eb2aa48d 2771 .end_io = thin_endio,
991d9fa0
JT
2772 .postsuspend = thin_postsuspend,
2773 .status = thin_status,
2774 .iterate_devices = thin_iterate_devices,
2775 .io_hints = thin_io_hints,
2776};
2777
2778/*----------------------------------------------------------------*/
2779
2780static int __init dm_thin_init(void)
2781{
2782 int r;
2783
2784 pool_table_init();
2785
2786 r = dm_register_target(&thin_target);
2787 if (r)
2788 return r;
2789
2790 r = dm_register_target(&pool_target);
2791 if (r)
a24c2569
MS
2792 goto bad_pool_target;
2793
2794 r = -ENOMEM;
2795
a24c2569
MS
2796 _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0);
2797 if (!_new_mapping_cache)
2798 goto bad_new_mapping_cache;
2799
a24c2569
MS
2800 return 0;
2801
a24c2569 2802bad_new_mapping_cache:
a24c2569
MS
2803 dm_unregister_target(&pool_target);
2804bad_pool_target:
2805 dm_unregister_target(&thin_target);
991d9fa0
JT
2806
2807 return r;
2808}
2809
2810static void dm_thin_exit(void)
2811{
2812 dm_unregister_target(&thin_target);
2813 dm_unregister_target(&pool_target);
a24c2569 2814
a24c2569 2815 kmem_cache_destroy(_new_mapping_cache);
991d9fa0
JT
2816}
2817
2818module_init(dm_thin_init);
2819module_exit(dm_thin_exit);
2820
7cab8bf1 2821MODULE_DESCRIPTION(DM_NAME " thin provisioning target");
991d9fa0
JT
2822MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
2823MODULE_LICENSE("GPL");
This page took 0.231467 seconds and 5 git commands to generate.