2 * Copyright (C) 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2006 Red Hat GmbH
5 * This file is released under the GPL.
7 * Kcopyd provides a simple interface for copying an area of one
8 * block-device to one or more other block-devices, with an asynchronous
9 * completion notification.
12 #include <linux/types.h>
13 #include <asm/atomic.h>
14 #include <linux/blkdev.h>
16 #include <linux/init.h>
17 #include <linux/list.h>
18 #include <linux/mempool.h>
19 #include <linux/module.h>
20 #include <linux/pagemap.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
23 #include <linux/workqueue.h>
24 #include <linux/mutex.h>
25 #include <linux/device-mapper.h>
26 #include <linux/dm-kcopyd.h>
30 #define SUB_JOB_SIZE 128
34 /*-----------------------------------------------------------------
35 * Each kcopyd client has its own little pool of preallocated
36 * pages for kcopyd io.
37 *---------------------------------------------------------------*/
38 struct dm_kcopyd_client
{
39 struct page_list
*pages
;
40 unsigned int nr_pages
;
41 unsigned int nr_free_pages
;
43 struct dm_io_client
*io_client
;
45 wait_queue_head_t destroyq
;
50 struct workqueue_struct
*kcopyd_wq
;
51 struct work_struct kcopyd_work
;
54 * We maintain three lists of jobs:
56 * i) jobs waiting for pages
57 * ii) jobs that have pages, and are waiting for the io to be issued.
58 * iii) jobs that have completed.
60 * All three of these are protected by job_lock.
63 struct list_head complete_jobs
;
64 struct list_head io_jobs
;
65 struct list_head pages_jobs
;
68 static void wake(struct dm_kcopyd_client
*kc
)
70 queue_work(kc
->kcopyd_wq
, &kc
->kcopyd_work
);
73 static struct page_list
*alloc_pl(gfp_t gfp
)
77 pl
= kmalloc(sizeof(*pl
), gfp
);
81 pl
->page
= alloc_page(gfp
);
90 static void free_pl(struct page_list
*pl
)
92 __free_page(pl
->page
);
96 static int kcopyd_get_pages(struct dm_kcopyd_client
*kc
,
97 unsigned int nr
, struct page_list
**pages
)
101 if (kc
->nr_free_pages
< nr
)
104 kc
->nr_free_pages
-= nr
;
105 for (*pages
= pl
= kc
->pages
; --nr
; pl
= pl
->next
)
108 kc
->pages
= pl
->next
;
114 static void kcopyd_put_pages(struct dm_kcopyd_client
*kc
, struct page_list
*pl
)
116 struct page_list
*cursor
;
118 for (cursor
= pl
; cursor
->next
; cursor
= cursor
->next
)
122 cursor
->next
= kc
->pages
;
127 * These three functions resize the page pool.
129 static void drop_pages(struct page_list
*pl
)
131 struct page_list
*next
;
140 static int client_alloc_pages(struct dm_kcopyd_client
*kc
, unsigned int nr
)
143 struct page_list
*pl
= NULL
, *next
;
145 for (i
= 0; i
< nr
; i
++) {
146 next
= alloc_pl(GFP_KERNEL
);
156 kcopyd_put_pages(kc
, pl
);
161 static void client_free_pages(struct dm_kcopyd_client
*kc
)
163 BUG_ON(kc
->nr_free_pages
!= kc
->nr_pages
);
164 drop_pages(kc
->pages
);
166 kc
->nr_free_pages
= kc
->nr_pages
= 0;
169 /*-----------------------------------------------------------------
170 * kcopyd_jobs need to be allocated by the *clients* of kcopyd,
171 * for this reason we use a mempool to prevent the client from
172 * ever having to do io (which could cause a deadlock).
173 *---------------------------------------------------------------*/
175 struct dm_kcopyd_client
*kc
;
176 struct list_head list
;
180 * Error state of the job.
183 unsigned long write_err
;
186 * Either READ or WRITE
189 struct dm_io_region source
;
192 * The destinations for the transfer.
194 unsigned int num_dests
;
195 struct dm_io_region dests
[DM_KCOPYD_MAX_REGIONS
];
198 unsigned int nr_pages
;
199 struct page_list
*pages
;
202 * Set this to ensure you are notified when the job has
203 * completed. 'context' is for callback to use.
205 dm_kcopyd_notify_fn fn
;
209 * These fields are only used if the job has been split
210 * into more manageable parts.
216 struct kcopyd_job
*master_job
;
219 static struct kmem_cache
*_job_cache
;
221 int __init
dm_kcopyd_init(void)
223 _job_cache
= kmem_cache_create("kcopyd_job",
224 sizeof(struct kcopyd_job
) * (SPLIT_COUNT
+ 1),
225 __alignof__(struct kcopyd_job
), 0, NULL
);
232 void dm_kcopyd_exit(void)
234 kmem_cache_destroy(_job_cache
);
239 * Functions to push and pop a job onto the head of a given job
242 static struct kcopyd_job
*pop(struct list_head
*jobs
,
243 struct dm_kcopyd_client
*kc
)
245 struct kcopyd_job
*job
= NULL
;
248 spin_lock_irqsave(&kc
->job_lock
, flags
);
250 if (!list_empty(jobs
)) {
251 job
= list_entry(jobs
->next
, struct kcopyd_job
, list
);
252 list_del(&job
->list
);
254 spin_unlock_irqrestore(&kc
->job_lock
, flags
);
259 static void push(struct list_head
*jobs
, struct kcopyd_job
*job
)
262 struct dm_kcopyd_client
*kc
= job
->kc
;
264 spin_lock_irqsave(&kc
->job_lock
, flags
);
265 list_add_tail(&job
->list
, jobs
);
266 spin_unlock_irqrestore(&kc
->job_lock
, flags
);
270 static void push_head(struct list_head
*jobs
, struct kcopyd_job
*job
)
273 struct dm_kcopyd_client
*kc
= job
->kc
;
275 spin_lock_irqsave(&kc
->job_lock
, flags
);
276 list_add(&job
->list
, jobs
);
277 spin_unlock_irqrestore(&kc
->job_lock
, flags
);
281 * These three functions process 1 item from the corresponding
287 * > 0: can't process yet.
289 static int run_complete_job(struct kcopyd_job
*job
)
291 void *context
= job
->context
;
292 int read_err
= job
->read_err
;
293 unsigned long write_err
= job
->write_err
;
294 dm_kcopyd_notify_fn fn
= job
->fn
;
295 struct dm_kcopyd_client
*kc
= job
->kc
;
298 kcopyd_put_pages(kc
, job
->pages
);
300 * If this is the master job, the sub jobs have already
301 * completed so we can free everything.
303 if (job
->master_job
== job
)
304 mempool_free(job
, kc
->job_pool
);
305 fn(read_err
, write_err
, context
);
307 if (atomic_dec_and_test(&kc
->nr_jobs
))
308 wake_up(&kc
->destroyq
);
313 static void complete_io(unsigned long error
, void *context
)
315 struct kcopyd_job
*job
= (struct kcopyd_job
*) context
;
316 struct dm_kcopyd_client
*kc
= job
->kc
;
319 if (job
->rw
== WRITE
)
320 job
->write_err
|= error
;
324 if (!test_bit(DM_KCOPYD_IGNORE_ERROR
, &job
->flags
)) {
325 push(&kc
->complete_jobs
, job
);
331 if (job
->rw
== WRITE
)
332 push(&kc
->complete_jobs
, job
);
336 push(&kc
->io_jobs
, job
);
343 * Request io on as many buffer heads as we can currently get for
346 static int run_io_job(struct kcopyd_job
*job
)
349 struct dm_io_request io_req
= {
351 .mem
.type
= DM_IO_PAGE_LIST
,
352 .mem
.ptr
.pl
= job
->pages
,
353 .mem
.offset
= job
->offset
,
354 .notify
.fn
= complete_io
,
355 .notify
.context
= job
,
356 .client
= job
->kc
->io_client
,
360 r
= dm_io(&io_req
, 1, &job
->source
, NULL
);
362 r
= dm_io(&io_req
, job
->num_dests
, job
->dests
, NULL
);
367 static int run_pages_job(struct kcopyd_job
*job
)
371 job
->nr_pages
= dm_div_up(job
->dests
[0].count
+ job
->offset
,
373 r
= kcopyd_get_pages(job
->kc
, job
->nr_pages
, &job
->pages
);
375 /* this job is ready for io */
376 push(&job
->kc
->io_jobs
, job
);
381 /* can't complete now */
388 * Run through a list for as long as possible. Returns the count
389 * of successful jobs.
391 static int process_jobs(struct list_head
*jobs
, struct dm_kcopyd_client
*kc
,
392 int (*fn
) (struct kcopyd_job
*))
394 struct kcopyd_job
*job
;
397 while ((job
= pop(jobs
, kc
))) {
402 /* error this rogue job */
403 if (job
->rw
== WRITE
)
404 job
->write_err
= (unsigned long) -1L;
407 push(&kc
->complete_jobs
, job
);
413 * We couldn't service this job ATM, so
414 * push this job back onto the list.
416 push_head(jobs
, job
);
427 * kcopyd does this every time it's woken up.
429 static void do_work(struct work_struct
*work
)
431 struct dm_kcopyd_client
*kc
= container_of(work
,
432 struct dm_kcopyd_client
, kcopyd_work
);
433 struct blk_plug plug
;
436 * The order that these are called is *very* important.
437 * complete jobs can free some pages for pages jobs.
438 * Pages jobs when successful will jump onto the io jobs
439 * list. io jobs call wake when they complete and it all
442 blk_start_plug(&plug
);
443 process_jobs(&kc
->complete_jobs
, kc
, run_complete_job
);
444 process_jobs(&kc
->pages_jobs
, kc
, run_pages_job
);
445 process_jobs(&kc
->io_jobs
, kc
, run_io_job
);
446 blk_finish_plug(&plug
);
450 * If we are copying a small region we just dispatch a single job
451 * to do the copy, otherwise the io has to be split up into many
454 static void dispatch_job(struct kcopyd_job
*job
)
456 struct dm_kcopyd_client
*kc
= job
->kc
;
457 atomic_inc(&kc
->nr_jobs
);
458 if (unlikely(!job
->source
.count
))
459 push(&kc
->complete_jobs
, job
);
461 push(&kc
->pages_jobs
, job
);
465 static void segment_complete(int read_err
, unsigned long write_err
,
468 /* FIXME: tidy this function */
469 sector_t progress
= 0;
471 struct kcopyd_job
*sub_job
= (struct kcopyd_job
*) context
;
472 struct kcopyd_job
*job
= sub_job
->master_job
;
473 struct dm_kcopyd_client
*kc
= job
->kc
;
475 mutex_lock(&job
->lock
);
477 /* update the error */
482 job
->write_err
|= write_err
;
485 * Only dispatch more work if there hasn't been an error.
487 if ((!job
->read_err
&& !job
->write_err
) ||
488 test_bit(DM_KCOPYD_IGNORE_ERROR
, &job
->flags
)) {
489 /* get the next chunk of work */
490 progress
= job
->progress
;
491 count
= job
->source
.count
- progress
;
493 if (count
> SUB_JOB_SIZE
)
494 count
= SUB_JOB_SIZE
;
496 job
->progress
+= count
;
499 mutex_unlock(&job
->lock
);
505 sub_job
->source
.sector
+= progress
;
506 sub_job
->source
.count
= count
;
508 for (i
= 0; i
< job
->num_dests
; i
++) {
509 sub_job
->dests
[i
].sector
+= progress
;
510 sub_job
->dests
[i
].count
= count
;
513 sub_job
->fn
= segment_complete
;
514 sub_job
->context
= sub_job
;
515 dispatch_job(sub_job
);
517 } else if (atomic_dec_and_test(&job
->sub_jobs
)) {
520 * Queue the completion callback to the kcopyd thread.
522 * Some callers assume that all the completions are called
523 * from a single thread and don't race with each other.
525 * We must not call the callback directly here because this
526 * code may not be executing in the thread.
528 push(&kc
->complete_jobs
, job
);
534 * Create some sub jobs to share the work between them.
536 static void split_job(struct kcopyd_job
*master_job
)
540 atomic_inc(&master_job
->kc
->nr_jobs
);
542 atomic_set(&master_job
->sub_jobs
, SPLIT_COUNT
);
543 for (i
= 0; i
< SPLIT_COUNT
; i
++) {
544 master_job
[i
+ 1].master_job
= master_job
;
545 segment_complete(0, 0u, &master_job
[i
+ 1]);
549 int dm_kcopyd_copy(struct dm_kcopyd_client
*kc
, struct dm_io_region
*from
,
550 unsigned int num_dests
, struct dm_io_region
*dests
,
551 unsigned int flags
, dm_kcopyd_notify_fn fn
, void *context
)
553 struct kcopyd_job
*job
;
556 * Allocate an array of jobs consisting of one master job
557 * followed by SPLIT_COUNT sub jobs.
559 job
= mempool_alloc(kc
->job_pool
, GFP_NOIO
);
562 * set up for the read.
572 job
->num_dests
= num_dests
;
573 memcpy(&job
->dests
, dests
, sizeof(*dests
) * num_dests
);
580 job
->context
= context
;
581 job
->master_job
= job
;
583 if (job
->source
.count
<= SUB_JOB_SIZE
)
586 mutex_init(&job
->lock
);
593 EXPORT_SYMBOL(dm_kcopyd_copy
);
596 * Cancels a kcopyd job, eg. someone might be deactivating a
600 int kcopyd_cancel(struct kcopyd_job
*job
, int block
)
607 /*-----------------------------------------------------------------
609 *---------------------------------------------------------------*/
610 int dm_kcopyd_client_create(unsigned int nr_pages
,
611 struct dm_kcopyd_client
**result
)
614 struct dm_kcopyd_client
*kc
;
616 kc
= kmalloc(sizeof(*kc
), GFP_KERNEL
);
620 spin_lock_init(&kc
->job_lock
);
621 INIT_LIST_HEAD(&kc
->complete_jobs
);
622 INIT_LIST_HEAD(&kc
->io_jobs
);
623 INIT_LIST_HEAD(&kc
->pages_jobs
);
625 kc
->job_pool
= mempool_create_slab_pool(MIN_JOBS
, _job_cache
);
629 INIT_WORK(&kc
->kcopyd_work
, do_work
);
630 kc
->kcopyd_wq
= alloc_workqueue("kcopyd",
631 WQ_NON_REENTRANT
| WQ_MEM_RECLAIM
, 0);
636 kc
->nr_pages
= kc
->nr_free_pages
= 0;
637 r
= client_alloc_pages(kc
, nr_pages
);
639 goto bad_client_pages
;
641 kc
->io_client
= dm_io_client_create(nr_pages
);
642 if (IS_ERR(kc
->io_client
)) {
643 r
= PTR_ERR(kc
->io_client
);
647 init_waitqueue_head(&kc
->destroyq
);
648 atomic_set(&kc
->nr_jobs
, 0);
654 client_free_pages(kc
);
656 destroy_workqueue(kc
->kcopyd_wq
);
658 mempool_destroy(kc
->job_pool
);
664 EXPORT_SYMBOL(dm_kcopyd_client_create
);
666 void dm_kcopyd_client_destroy(struct dm_kcopyd_client
*kc
)
668 /* Wait for completion of all jobs submitted by this client. */
669 wait_event(kc
->destroyq
, !atomic_read(&kc
->nr_jobs
));
671 BUG_ON(!list_empty(&kc
->complete_jobs
));
672 BUG_ON(!list_empty(&kc
->io_jobs
));
673 BUG_ON(!list_empty(&kc
->pages_jobs
));
674 destroy_workqueue(kc
->kcopyd_wq
);
675 dm_io_client_destroy(kc
->io_client
);
676 client_free_pages(kc
);
677 mempool_destroy(kc
->job_pool
);
680 EXPORT_SYMBOL(dm_kcopyd_client_destroy
);