aio: remove request submission batching
[deliverable/linux.git] / drivers / md / dm-kcopyd.c
1 /*
2 * Copyright (C) 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2006 Red Hat GmbH
4 *
5 * This file is released under the GPL.
6 *
7 * Kcopyd provides a simple interface for copying an area of one
8 * block-device to one or more other block-devices, with an asynchronous
9 * completion notification.
10 */
11
12 #include <linux/types.h>
13 #include <asm/atomic.h>
14 #include <linux/blkdev.h>
15 #include <linux/fs.h>
16 #include <linux/init.h>
17 #include <linux/list.h>
18 #include <linux/mempool.h>
19 #include <linux/module.h>
20 #include <linux/pagemap.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
23 #include <linux/workqueue.h>
24 #include <linux/mutex.h>
25 #include <linux/device-mapper.h>
26 #include <linux/dm-kcopyd.h>
27
28 #include "dm.h"
29
30 /*-----------------------------------------------------------------
31 * Each kcopyd client has its own little pool of preallocated
32 * pages for kcopyd io.
33 *---------------------------------------------------------------*/
34 struct dm_kcopyd_client {
35 spinlock_t lock;
36 struct page_list *pages;
37 unsigned int nr_pages;
38 unsigned int nr_free_pages;
39
40 struct dm_io_client *io_client;
41
42 wait_queue_head_t destroyq;
43 atomic_t nr_jobs;
44
45 mempool_t *job_pool;
46
47 struct workqueue_struct *kcopyd_wq;
48 struct work_struct kcopyd_work;
49
50 /*
51 * We maintain three lists of jobs:
52 *
53 * i) jobs waiting for pages
54 * ii) jobs that have pages, and are waiting for the io to be issued.
55 * iii) jobs that have completed.
56 *
57 * All three of these are protected by job_lock.
58 */
59 spinlock_t job_lock;
60 struct list_head complete_jobs;
61 struct list_head io_jobs;
62 struct list_head pages_jobs;
63 };
64
65 static void wake(struct dm_kcopyd_client *kc)
66 {
67 queue_work(kc->kcopyd_wq, &kc->kcopyd_work);
68 }
69
70 static struct page_list *alloc_pl(void)
71 {
72 struct page_list *pl;
73
74 pl = kmalloc(sizeof(*pl), GFP_KERNEL);
75 if (!pl)
76 return NULL;
77
78 pl->page = alloc_page(GFP_KERNEL);
79 if (!pl->page) {
80 kfree(pl);
81 return NULL;
82 }
83
84 return pl;
85 }
86
87 static void free_pl(struct page_list *pl)
88 {
89 __free_page(pl->page);
90 kfree(pl);
91 }
92
93 static int kcopyd_get_pages(struct dm_kcopyd_client *kc,
94 unsigned int nr, struct page_list **pages)
95 {
96 struct page_list *pl;
97
98 spin_lock(&kc->lock);
99 if (kc->nr_free_pages < nr) {
100 spin_unlock(&kc->lock);
101 return -ENOMEM;
102 }
103
104 kc->nr_free_pages -= nr;
105 for (*pages = pl = kc->pages; --nr; pl = pl->next)
106 ;
107
108 kc->pages = pl->next;
109 pl->next = NULL;
110
111 spin_unlock(&kc->lock);
112
113 return 0;
114 }
115
116 static void kcopyd_put_pages(struct dm_kcopyd_client *kc, struct page_list *pl)
117 {
118 struct page_list *cursor;
119
120 spin_lock(&kc->lock);
121 for (cursor = pl; cursor->next; cursor = cursor->next)
122 kc->nr_free_pages++;
123
124 kc->nr_free_pages++;
125 cursor->next = kc->pages;
126 kc->pages = pl;
127 spin_unlock(&kc->lock);
128 }
129
130 /*
131 * These three functions resize the page pool.
132 */
133 static void drop_pages(struct page_list *pl)
134 {
135 struct page_list *next;
136
137 while (pl) {
138 next = pl->next;
139 free_pl(pl);
140 pl = next;
141 }
142 }
143
144 static int client_alloc_pages(struct dm_kcopyd_client *kc, unsigned int nr)
145 {
146 unsigned int i;
147 struct page_list *pl = NULL, *next;
148
149 for (i = 0; i < nr; i++) {
150 next = alloc_pl();
151 if (!next) {
152 if (pl)
153 drop_pages(pl);
154 return -ENOMEM;
155 }
156 next->next = pl;
157 pl = next;
158 }
159
160 kcopyd_put_pages(kc, pl);
161 kc->nr_pages += nr;
162 return 0;
163 }
164
165 static void client_free_pages(struct dm_kcopyd_client *kc)
166 {
167 BUG_ON(kc->nr_free_pages != kc->nr_pages);
168 drop_pages(kc->pages);
169 kc->pages = NULL;
170 kc->nr_free_pages = kc->nr_pages = 0;
171 }
172
173 /*-----------------------------------------------------------------
174 * kcopyd_jobs need to be allocated by the *clients* of kcopyd,
175 * for this reason we use a mempool to prevent the client from
176 * ever having to do io (which could cause a deadlock).
177 *---------------------------------------------------------------*/
178 struct kcopyd_job {
179 struct dm_kcopyd_client *kc;
180 struct list_head list;
181 unsigned long flags;
182
183 /*
184 * Error state of the job.
185 */
186 int read_err;
187 unsigned long write_err;
188
189 /*
190 * Either READ or WRITE
191 */
192 int rw;
193 struct dm_io_region source;
194
195 /*
196 * The destinations for the transfer.
197 */
198 unsigned int num_dests;
199 struct dm_io_region dests[DM_KCOPYD_MAX_REGIONS];
200
201 sector_t offset;
202 unsigned int nr_pages;
203 struct page_list *pages;
204
205 /*
206 * Set this to ensure you are notified when the job has
207 * completed. 'context' is for callback to use.
208 */
209 dm_kcopyd_notify_fn fn;
210 void *context;
211
212 /*
213 * These fields are only used if the job has been split
214 * into more manageable parts.
215 */
216 struct mutex lock;
217 atomic_t sub_jobs;
218 sector_t progress;
219 };
220
221 /* FIXME: this should scale with the number of pages */
222 #define MIN_JOBS 512
223
224 static struct kmem_cache *_job_cache;
225
226 int __init dm_kcopyd_init(void)
227 {
228 _job_cache = KMEM_CACHE(kcopyd_job, 0);
229 if (!_job_cache)
230 return -ENOMEM;
231
232 return 0;
233 }
234
235 void dm_kcopyd_exit(void)
236 {
237 kmem_cache_destroy(_job_cache);
238 _job_cache = NULL;
239 }
240
241 /*
242 * Functions to push and pop a job onto the head of a given job
243 * list.
244 */
245 static struct kcopyd_job *pop(struct list_head *jobs,
246 struct dm_kcopyd_client *kc)
247 {
248 struct kcopyd_job *job = NULL;
249 unsigned long flags;
250
251 spin_lock_irqsave(&kc->job_lock, flags);
252
253 if (!list_empty(jobs)) {
254 job = list_entry(jobs->next, struct kcopyd_job, list);
255 list_del(&job->list);
256 }
257 spin_unlock_irqrestore(&kc->job_lock, flags);
258
259 return job;
260 }
261
262 static void push(struct list_head *jobs, struct kcopyd_job *job)
263 {
264 unsigned long flags;
265 struct dm_kcopyd_client *kc = job->kc;
266
267 spin_lock_irqsave(&kc->job_lock, flags);
268 list_add_tail(&job->list, jobs);
269 spin_unlock_irqrestore(&kc->job_lock, flags);
270 }
271
272
273 static void push_head(struct list_head *jobs, struct kcopyd_job *job)
274 {
275 unsigned long flags;
276 struct dm_kcopyd_client *kc = job->kc;
277
278 spin_lock_irqsave(&kc->job_lock, flags);
279 list_add(&job->list, jobs);
280 spin_unlock_irqrestore(&kc->job_lock, flags);
281 }
282
283 /*
284 * These three functions process 1 item from the corresponding
285 * job list.
286 *
287 * They return:
288 * < 0: error
289 * 0: success
290 * > 0: can't process yet.
291 */
292 static int run_complete_job(struct kcopyd_job *job)
293 {
294 void *context = job->context;
295 int read_err = job->read_err;
296 unsigned long write_err = job->write_err;
297 dm_kcopyd_notify_fn fn = job->fn;
298 struct dm_kcopyd_client *kc = job->kc;
299
300 if (job->pages)
301 kcopyd_put_pages(kc, job->pages);
302 mempool_free(job, kc->job_pool);
303 fn(read_err, write_err, context);
304
305 if (atomic_dec_and_test(&kc->nr_jobs))
306 wake_up(&kc->destroyq);
307
308 return 0;
309 }
310
311 static void complete_io(unsigned long error, void *context)
312 {
313 struct kcopyd_job *job = (struct kcopyd_job *) context;
314 struct dm_kcopyd_client *kc = job->kc;
315
316 if (error) {
317 if (job->rw == WRITE)
318 job->write_err |= error;
319 else
320 job->read_err = 1;
321
322 if (!test_bit(DM_KCOPYD_IGNORE_ERROR, &job->flags)) {
323 push(&kc->complete_jobs, job);
324 wake(kc);
325 return;
326 }
327 }
328
329 if (job->rw == WRITE)
330 push(&kc->complete_jobs, job);
331
332 else {
333 job->rw = WRITE;
334 push(&kc->io_jobs, job);
335 }
336
337 wake(kc);
338 }
339
340 /*
341 * Request io on as many buffer heads as we can currently get for
342 * a particular job.
343 */
344 static int run_io_job(struct kcopyd_job *job)
345 {
346 int r;
347 struct dm_io_request io_req = {
348 .bi_rw = job->rw,
349 .mem.type = DM_IO_PAGE_LIST,
350 .mem.ptr.pl = job->pages,
351 .mem.offset = job->offset,
352 .notify.fn = complete_io,
353 .notify.context = job,
354 .client = job->kc->io_client,
355 };
356
357 if (job->rw == READ)
358 r = dm_io(&io_req, 1, &job->source, NULL);
359 else {
360 if (job->num_dests > 1)
361 io_req.bi_rw |= REQ_UNPLUG;
362 r = dm_io(&io_req, job->num_dests, job->dests, NULL);
363 }
364
365 return r;
366 }
367
368 static int run_pages_job(struct kcopyd_job *job)
369 {
370 int r;
371
372 job->nr_pages = dm_div_up(job->dests[0].count + job->offset,
373 PAGE_SIZE >> 9);
374 r = kcopyd_get_pages(job->kc, job->nr_pages, &job->pages);
375 if (!r) {
376 /* this job is ready for io */
377 push(&job->kc->io_jobs, job);
378 return 0;
379 }
380
381 if (r == -ENOMEM)
382 /* can't complete now */
383 return 1;
384
385 return r;
386 }
387
388 /*
389 * Run through a list for as long as possible. Returns the count
390 * of successful jobs.
391 */
392 static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc,
393 int (*fn) (struct kcopyd_job *))
394 {
395 struct kcopyd_job *job;
396 int r, count = 0;
397
398 while ((job = pop(jobs, kc))) {
399
400 r = fn(job);
401
402 if (r < 0) {
403 /* error this rogue job */
404 if (job->rw == WRITE)
405 job->write_err = (unsigned long) -1L;
406 else
407 job->read_err = 1;
408 push(&kc->complete_jobs, job);
409 break;
410 }
411
412 if (r > 0) {
413 /*
414 * We couldn't service this job ATM, so
415 * push this job back onto the list.
416 */
417 push_head(jobs, job);
418 break;
419 }
420
421 count++;
422 }
423
424 return count;
425 }
426
427 /*
428 * kcopyd does this every time it's woken up.
429 */
430 static void do_work(struct work_struct *work)
431 {
432 struct dm_kcopyd_client *kc = container_of(work,
433 struct dm_kcopyd_client, kcopyd_work);
434 struct blk_plug plug;
435
436 /*
437 * The order that these are called is *very* important.
438 * complete jobs can free some pages for pages jobs.
439 * Pages jobs when successful will jump onto the io jobs
440 * list. io jobs call wake when they complete and it all
441 * starts again.
442 */
443 blk_start_plug(&plug);
444 process_jobs(&kc->complete_jobs, kc, run_complete_job);
445 process_jobs(&kc->pages_jobs, kc, run_pages_job);
446 process_jobs(&kc->io_jobs, kc, run_io_job);
447 blk_finish_plug(&plug);
448 }
449
450 /*
451 * If we are copying a small region we just dispatch a single job
452 * to do the copy, otherwise the io has to be split up into many
453 * jobs.
454 */
455 static void dispatch_job(struct kcopyd_job *job)
456 {
457 struct dm_kcopyd_client *kc = job->kc;
458 atomic_inc(&kc->nr_jobs);
459 if (unlikely(!job->source.count))
460 push(&kc->complete_jobs, job);
461 else
462 push(&kc->pages_jobs, job);
463 wake(kc);
464 }
465
466 #define SUB_JOB_SIZE 128
467 static void segment_complete(int read_err, unsigned long write_err,
468 void *context)
469 {
470 /* FIXME: tidy this function */
471 sector_t progress = 0;
472 sector_t count = 0;
473 struct kcopyd_job *job = (struct kcopyd_job *) context;
474 struct dm_kcopyd_client *kc = job->kc;
475
476 mutex_lock(&job->lock);
477
478 /* update the error */
479 if (read_err)
480 job->read_err = 1;
481
482 if (write_err)
483 job->write_err |= write_err;
484
485 /*
486 * Only dispatch more work if there hasn't been an error.
487 */
488 if ((!job->read_err && !job->write_err) ||
489 test_bit(DM_KCOPYD_IGNORE_ERROR, &job->flags)) {
490 /* get the next chunk of work */
491 progress = job->progress;
492 count = job->source.count - progress;
493 if (count) {
494 if (count > SUB_JOB_SIZE)
495 count = SUB_JOB_SIZE;
496
497 job->progress += count;
498 }
499 }
500 mutex_unlock(&job->lock);
501
502 if (count) {
503 int i;
504 struct kcopyd_job *sub_job = mempool_alloc(kc->job_pool,
505 GFP_NOIO);
506
507 *sub_job = *job;
508 sub_job->source.sector += progress;
509 sub_job->source.count = count;
510
511 for (i = 0; i < job->num_dests; i++) {
512 sub_job->dests[i].sector += progress;
513 sub_job->dests[i].count = count;
514 }
515
516 sub_job->fn = segment_complete;
517 sub_job->context = job;
518 dispatch_job(sub_job);
519
520 } else if (atomic_dec_and_test(&job->sub_jobs)) {
521
522 /*
523 * Queue the completion callback to the kcopyd thread.
524 *
525 * Some callers assume that all the completions are called
526 * from a single thread and don't race with each other.
527 *
528 * We must not call the callback directly here because this
529 * code may not be executing in the thread.
530 */
531 push(&kc->complete_jobs, job);
532 wake(kc);
533 }
534 }
535
536 /*
537 * Create some little jobs that will do the move between
538 * them.
539 */
540 #define SPLIT_COUNT 8
541 static void split_job(struct kcopyd_job *job)
542 {
543 int i;
544
545 atomic_inc(&job->kc->nr_jobs);
546
547 atomic_set(&job->sub_jobs, SPLIT_COUNT);
548 for (i = 0; i < SPLIT_COUNT; i++)
549 segment_complete(0, 0u, job);
550 }
551
552 int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
553 unsigned int num_dests, struct dm_io_region *dests,
554 unsigned int flags, dm_kcopyd_notify_fn fn, void *context)
555 {
556 struct kcopyd_job *job;
557
558 /*
559 * Allocate a new job.
560 */
561 job = mempool_alloc(kc->job_pool, GFP_NOIO);
562
563 /*
564 * set up for the read.
565 */
566 job->kc = kc;
567 job->flags = flags;
568 job->read_err = 0;
569 job->write_err = 0;
570 job->rw = READ;
571
572 job->source = *from;
573
574 job->num_dests = num_dests;
575 memcpy(&job->dests, dests, sizeof(*dests) * num_dests);
576
577 job->offset = 0;
578 job->nr_pages = 0;
579 job->pages = NULL;
580
581 job->fn = fn;
582 job->context = context;
583
584 if (job->source.count < SUB_JOB_SIZE)
585 dispatch_job(job);
586
587 else {
588 mutex_init(&job->lock);
589 job->progress = 0;
590 split_job(job);
591 }
592
593 return 0;
594 }
595 EXPORT_SYMBOL(dm_kcopyd_copy);
596
597 /*
598 * Cancels a kcopyd job, eg. someone might be deactivating a
599 * mirror.
600 */
601 #if 0
602 int kcopyd_cancel(struct kcopyd_job *job, int block)
603 {
604 /* FIXME: finish */
605 return -1;
606 }
607 #endif /* 0 */
608
609 /*-----------------------------------------------------------------
610 * Client setup
611 *---------------------------------------------------------------*/
612 int dm_kcopyd_client_create(unsigned int nr_pages,
613 struct dm_kcopyd_client **result)
614 {
615 int r = -ENOMEM;
616 struct dm_kcopyd_client *kc;
617
618 kc = kmalloc(sizeof(*kc), GFP_KERNEL);
619 if (!kc)
620 return -ENOMEM;
621
622 spin_lock_init(&kc->lock);
623 spin_lock_init(&kc->job_lock);
624 INIT_LIST_HEAD(&kc->complete_jobs);
625 INIT_LIST_HEAD(&kc->io_jobs);
626 INIT_LIST_HEAD(&kc->pages_jobs);
627
628 kc->job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache);
629 if (!kc->job_pool)
630 goto bad_slab;
631
632 INIT_WORK(&kc->kcopyd_work, do_work);
633 kc->kcopyd_wq = alloc_workqueue("kcopyd",
634 WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0);
635 if (!kc->kcopyd_wq)
636 goto bad_workqueue;
637
638 kc->pages = NULL;
639 kc->nr_pages = kc->nr_free_pages = 0;
640 r = client_alloc_pages(kc, nr_pages);
641 if (r)
642 goto bad_client_pages;
643
644 kc->io_client = dm_io_client_create(nr_pages);
645 if (IS_ERR(kc->io_client)) {
646 r = PTR_ERR(kc->io_client);
647 goto bad_io_client;
648 }
649
650 init_waitqueue_head(&kc->destroyq);
651 atomic_set(&kc->nr_jobs, 0);
652
653 *result = kc;
654 return 0;
655
656 bad_io_client:
657 client_free_pages(kc);
658 bad_client_pages:
659 destroy_workqueue(kc->kcopyd_wq);
660 bad_workqueue:
661 mempool_destroy(kc->job_pool);
662 bad_slab:
663 kfree(kc);
664
665 return r;
666 }
667 EXPORT_SYMBOL(dm_kcopyd_client_create);
668
669 void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc)
670 {
671 /* Wait for completion of all jobs submitted by this client. */
672 wait_event(kc->destroyq, !atomic_read(&kc->nr_jobs));
673
674 BUG_ON(!list_empty(&kc->complete_jobs));
675 BUG_ON(!list_empty(&kc->io_jobs));
676 BUG_ON(!list_empty(&kc->pages_jobs));
677 destroy_workqueue(kc->kcopyd_wq);
678 dm_io_client_destroy(kc->io_client);
679 client_free_pages(kc);
680 mempool_destroy(kc->job_pool);
681 kfree(kc);
682 }
683 EXPORT_SYMBOL(dm_kcopyd_client_destroy);
This page took 0.049032 seconds and 6 git commands to generate.