fs: use helper bio_add_page() instead of open coding on bi_io_vec
[deliverable/linux.git] / drivers / md / dm-io.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (C) 2003 Sistina Software
891ce207 3 * Copyright (C) 2006 Red Hat GmbH
1da177e4
LT
4 *
5 * This file is released under the GPL.
6 */
7
952b3557
MP
8#include "dm.h"
9
586e80e6 10#include <linux/device-mapper.h>
1da177e4
LT
11
12#include <linux/bio.h>
10f1d5d1 13#include <linux/completion.h>
1da177e4
LT
14#include <linux/mempool.h>
15#include <linux/module.h>
16#include <linux/sched.h>
17#include <linux/slab.h>
a765e20e 18#include <linux/dm-io.h>
1da177e4 19
f1e53987
MP
20#define DM_MSG_PREFIX "io"
21
22#define DM_IO_MAX_REGIONS BITS_PER_LONG
23
891ce207
HM
24struct dm_io_client {
25 mempool_t *pool;
26 struct bio_set *bios;
27};
28
f1e53987
MP
29/*
30 * Aligning 'struct io' reduces the number of bits required to store
31 * its address. Refer to store_io_and_region_in_bio() below.
32 */
1da177e4 33struct io {
e01fd7ee 34 unsigned long error_bits;
1da177e4 35 atomic_t count;
891ce207 36 struct dm_io_client *client;
1da177e4
LT
37 io_notify_fn callback;
38 void *context;
bb91bc7b
MP
39 void *vma_invalidate_address;
40 unsigned long vma_invalidate_size;
f1e53987 41} __attribute__((aligned(DM_IO_MAX_REGIONS)));
1da177e4 42
952b3557
MP
43static struct kmem_cache *_dm_io_cache;
44
c8b03afe
HM
45/*
46 * Create a client with mempool and bioset.
47 */
bda8efec 48struct dm_io_client *dm_io_client_create(void)
c8b03afe 49{
c8b03afe 50 struct dm_io_client *client;
e8603136 51 unsigned min_ios = dm_get_reserved_bio_based_ios();
c8b03afe
HM
52
53 client = kmalloc(sizeof(*client), GFP_KERNEL);
54 if (!client)
55 return ERR_PTR(-ENOMEM);
56
e8603136 57 client->pool = mempool_create_slab_pool(min_ios, _dm_io_cache);
c8b03afe
HM
58 if (!client->pool)
59 goto bad;
60
e8603136 61 client->bios = bioset_create(min_ios, 0);
c8b03afe
HM
62 if (!client->bios)
63 goto bad;
64
65 return client;
66
67 bad:
68 if (client->pool)
69 mempool_destroy(client->pool);
70 kfree(client);
71 return ERR_PTR(-ENOMEM);
72}
73EXPORT_SYMBOL(dm_io_client_create);
74
c8b03afe
HM
75void dm_io_client_destroy(struct dm_io_client *client)
76{
77 mempool_destroy(client->pool);
78 bioset_free(client->bios);
79 kfree(client);
80}
81EXPORT_SYMBOL(dm_io_client_destroy);
82
1da177e4
LT
83/*-----------------------------------------------------------------
84 * We need to keep track of which region a bio is doing io for.
f1e53987
MP
85 * To avoid a memory allocation to store just 5 or 6 bits, we
86 * ensure the 'struct io' pointer is aligned so enough low bits are
87 * always zero and then combine it with the region number directly in
88 * bi_private.
1da177e4 89 *---------------------------------------------------------------*/
f1e53987
MP
90static void store_io_and_region_in_bio(struct bio *bio, struct io *io,
91 unsigned region)
1da177e4 92{
f1e53987
MP
93 if (unlikely(!IS_ALIGNED((unsigned long)io, DM_IO_MAX_REGIONS))) {
94 DMCRIT("Unaligned struct io pointer %p", io);
95 BUG();
96 }
97
98 bio->bi_private = (void *)((unsigned long)io | region);
1da177e4
LT
99}
100
f1e53987
MP
101static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io,
102 unsigned *region)
1da177e4 103{
f1e53987
MP
104 unsigned long val = (unsigned long)bio->bi_private;
105
106 *io = (void *)(val & -(unsigned long)DM_IO_MAX_REGIONS);
107 *region = val & (DM_IO_MAX_REGIONS - 1);
1da177e4
LT
108}
109
110/*-----------------------------------------------------------------
111 * We need an io object to keep track of the number of bios that
112 * have been dispatched for a particular io.
113 *---------------------------------------------------------------*/
97e7cdf1 114static void complete_io(struct io *io)
1da177e4 115{
97e7cdf1
JT
116 unsigned long error_bits = io->error_bits;
117 io_notify_fn fn = io->callback;
118 void *context = io->context;
1da177e4 119
97e7cdf1
JT
120 if (io->vma_invalidate_size)
121 invalidate_kernel_vmap_range(io->vma_invalidate_address,
122 io->vma_invalidate_size);
bb91bc7b 123
97e7cdf1
JT
124 mempool_free(io, io->client->pool);
125 fn(error_bits, context);
126}
1da177e4 127
97e7cdf1
JT
128static void dec_count(struct io *io, unsigned int region, int error)
129{
130 if (error)
131 set_bit(region, &io->error_bits);
1da177e4 132
97e7cdf1
JT
133 if (atomic_dec_and_test(&io->count))
134 complete_io(io);
1da177e4
LT
135}
136
4246a0b6 137static void endio(struct bio *bio)
1da177e4 138{
c897feb3
HM
139 struct io *io;
140 unsigned region;
9b81c842 141 int error;
1da177e4 142
4246a0b6 143 if (bio->bi_error && bio_data_dir(bio) == READ)
1da177e4
LT
144 zero_fill_bio(bio);
145
c897feb3
HM
146 /*
147 * The bio destructor in bio_put() may use the io object.
148 */
f1e53987 149 retrieve_io_and_region_from_bio(bio, &io, &region);
c897feb3 150
9b81c842 151 error = bio->bi_error;
1da177e4
LT
152 bio_put(bio);
153
9b81c842 154 dec_count(io, region, error);
1da177e4
LT
155}
156
157/*-----------------------------------------------------------------
158 * These little objects provide an abstraction for getting a new
159 * destination page for io.
160 *---------------------------------------------------------------*/
161struct dpages {
162 void (*get_page)(struct dpages *dp,
163 struct page **p, unsigned long *len, unsigned *offset);
164 void (*next_page)(struct dpages *dp);
165
166 unsigned context_u;
167 void *context_ptr;
bb91bc7b
MP
168
169 void *vma_invalidate_address;
170 unsigned long vma_invalidate_size;
1da177e4
LT
171};
172
173/*
174 * Functions for getting the pages from a list.
175 */
176static void list_get_page(struct dpages *dp,
177 struct page **p, unsigned long *len, unsigned *offset)
178{
179 unsigned o = dp->context_u;
180 struct page_list *pl = (struct page_list *) dp->context_ptr;
181
182 *p = pl->page;
183 *len = PAGE_SIZE - o;
184 *offset = o;
185}
186
187static void list_next_page(struct dpages *dp)
188{
189 struct page_list *pl = (struct page_list *) dp->context_ptr;
190 dp->context_ptr = pl->next;
191 dp->context_u = 0;
192}
193
194static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset)
195{
196 dp->get_page = list_get_page;
197 dp->next_page = list_next_page;
198 dp->context_u = offset;
199 dp->context_ptr = pl;
200}
201
202/*
203 * Functions for getting the pages from a bvec.
204 */
d73f9907
MP
205static void bio_get_page(struct dpages *dp, struct page **p,
206 unsigned long *len, unsigned *offset)
1da177e4 207{
d73f9907
MP
208 struct bio_vec *bvec = dp->context_ptr;
209 *p = bvec->bv_page;
210 *len = bvec->bv_len - dp->context_u;
211 *offset = bvec->bv_offset + dp->context_u;
1da177e4
LT
212}
213
003b5c57 214static void bio_next_page(struct dpages *dp)
1da177e4 215{
d73f9907
MP
216 struct bio_vec *bvec = dp->context_ptr;
217 dp->context_ptr = bvec + 1;
218 dp->context_u = 0;
1da177e4
LT
219}
220
003b5c57 221static void bio_dp_init(struct dpages *dp, struct bio *bio)
1da177e4 222{
003b5c57
KO
223 dp->get_page = bio_get_page;
224 dp->next_page = bio_next_page;
d73f9907
MP
225 dp->context_ptr = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
226 dp->context_u = bio->bi_iter.bi_bvec_done;
1da177e4
LT
227}
228
c8b03afe
HM
229/*
230 * Functions for getting the pages from a VMA.
231 */
1da177e4
LT
232static void vm_get_page(struct dpages *dp,
233 struct page **p, unsigned long *len, unsigned *offset)
234{
235 *p = vmalloc_to_page(dp->context_ptr);
236 *offset = dp->context_u;
237 *len = PAGE_SIZE - dp->context_u;
238}
239
240static void vm_next_page(struct dpages *dp)
241{
242 dp->context_ptr += PAGE_SIZE - dp->context_u;
243 dp->context_u = 0;
244}
245
246static void vm_dp_init(struct dpages *dp, void *data)
247{
248 dp->get_page = vm_get_page;
249 dp->next_page = vm_next_page;
250 dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
251 dp->context_ptr = data;
252}
253
c8b03afe
HM
254/*
255 * Functions for getting the pages from kernel memory.
256 */
257static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len,
258 unsigned *offset)
259{
260 *p = virt_to_page(dp->context_ptr);
261 *offset = dp->context_u;
262 *len = PAGE_SIZE - dp->context_u;
263}
264
265static void km_next_page(struct dpages *dp)
266{
267 dp->context_ptr += PAGE_SIZE - dp->context_u;
268 dp->context_u = 0;
269}
270
271static void km_dp_init(struct dpages *dp, void *data)
272{
273 dp->get_page = km_get_page;
274 dp->next_page = km_next_page;
275 dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
276 dp->context_ptr = data;
277}
278
1da177e4
LT
279/*-----------------------------------------------------------------
280 * IO routines that accept a list of pages.
281 *---------------------------------------------------------------*/
22a1ceb1 282static void do_region(int rw, unsigned region, struct dm_io_region *where,
1da177e4
LT
283 struct dpages *dp, struct io *io)
284{
285 struct bio *bio;
286 struct page *page;
287 unsigned long len;
288 unsigned offset;
289 unsigned num_bvecs;
290 sector_t remaining = where->count;
0c535e0d 291 struct request_queue *q = bdev_get_queue(where->bdev);
70d6c400
MS
292 unsigned short logical_block_size = queue_logical_block_size(q);
293 sector_t num_sectors;
e5db2980 294 unsigned int uninitialized_var(special_cmd_max_sectors);
1da177e4 295
e5db2980
DW
296 /*
297 * Reject unsupported discard and write same requests.
298 */
299 if (rw & REQ_DISCARD)
300 special_cmd_max_sectors = q->limits.max_discard_sectors;
301 else if (rw & REQ_WRITE_SAME)
302 special_cmd_max_sectors = q->limits.max_write_same_sectors;
303 if ((rw & (REQ_DISCARD | REQ_WRITE_SAME)) && special_cmd_max_sectors == 0) {
37527b86
DW
304 dec_count(io, region, -EOPNOTSUPP);
305 return;
306 }
307
12fc0f49 308 /*
d87f4c14
TH
309 * where->count may be zero if rw holds a flush and we need to
310 * send a zero-sized flush.
12fc0f49
MP
311 */
312 do {
1da177e4 313 /*
f1e53987 314 * Allocate a suitably sized-bio.
1da177e4 315 */
70d6c400 316 if ((rw & REQ_DISCARD) || (rw & REQ_WRITE_SAME))
0c535e0d
MB
317 num_bvecs = 1;
318 else
319 num_bvecs = min_t(int, bio_get_nr_vecs(where->bdev),
320 dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT)));
321
bf17ce3a 322 bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
4f024f37 323 bio->bi_iter.bi_sector = where->sector + (where->count - remaining);
1da177e4
LT
324 bio->bi_bdev = where->bdev;
325 bio->bi_end_io = endio;
f1e53987 326 store_io_and_region_in_bio(bio, io, region);
1da177e4 327
0c535e0d 328 if (rw & REQ_DISCARD) {
e5db2980 329 num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining);
4f024f37 330 bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
70d6c400
MS
331 remaining -= num_sectors;
332 } else if (rw & REQ_WRITE_SAME) {
333 /*
334 * WRITE SAME only uses a single page.
335 */
336 dp->get_page(dp, &page, &len, &offset);
337 bio_add_page(bio, page, logical_block_size, offset);
e5db2980 338 num_sectors = min_t(sector_t, special_cmd_max_sectors, remaining);
4f024f37 339 bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
70d6c400
MS
340
341 offset = 0;
342 remaining -= num_sectors;
343 dp->next_page(dp);
0c535e0d
MB
344 } else while (remaining) {
345 /*
346 * Try and add as many pages as possible.
347 */
1da177e4
LT
348 dp->get_page(dp, &page, &len, &offset);
349 len = min(len, to_bytes(remaining));
350 if (!bio_add_page(bio, page, len, offset))
351 break;
352
353 offset = 0;
354 remaining -= to_sector(len);
355 dp->next_page(dp);
356 }
357
358 atomic_inc(&io->count);
359 submit_bio(rw, bio);
12fc0f49 360 } while (remaining);
1da177e4
LT
361}
362
363static void dispatch_io(int rw, unsigned int num_regions,
22a1ceb1 364 struct dm_io_region *where, struct dpages *dp,
1da177e4
LT
365 struct io *io, int sync)
366{
367 int i;
368 struct dpages old_pages = *dp;
369
f1e53987
MP
370 BUG_ON(num_regions > DM_IO_MAX_REGIONS);
371
1da177e4 372 if (sync)
721a9602 373 rw |= REQ_SYNC;
1da177e4
LT
374
375 /*
376 * For multiple regions we need to be careful to rewind
377 * the dp object for each call to do_region.
378 */
379 for (i = 0; i < num_regions; i++) {
380 *dp = old_pages;
d87f4c14 381 if (where[i].count || (rw & REQ_FLUSH))
1da177e4
LT
382 do_region(rw, i, where + i, dp, io);
383 }
384
385 /*
f00b16ad 386 * Drop the extra reference that we were holding to avoid
1da177e4
LT
387 * the io being completed too early.
388 */
389 dec_count(io, 0, 0);
390}
391
97e7cdf1
JT
392struct sync_io {
393 unsigned long error_bits;
394 struct completion wait;
395};
396
397static void sync_io_complete(unsigned long error, void *context)
398{
399 struct sync_io *sio = context;
400
401 sio->error_bits = error;
402 complete(&sio->wait);
403}
404
891ce207 405static int sync_io(struct dm_io_client *client, unsigned int num_regions,
22a1ceb1 406 struct dm_io_region *where, int rw, struct dpages *dp,
891ce207 407 unsigned long *error_bits)
1da177e4 408{
97e7cdf1
JT
409 struct io *io;
410 struct sync_io sio;
1da177e4 411
7ff14a36 412 if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
1da177e4
LT
413 WARN_ON(1);
414 return -EIO;
415 }
416
97e7cdf1
JT
417 init_completion(&sio.wait);
418
419 io = mempool_alloc(client->pool, GFP_NOIO);
f1e53987 420 io->error_bits = 0;
f1e53987 421 atomic_set(&io->count, 1); /* see dispatch_io() */
f1e53987 422 io->client = client;
97e7cdf1
JT
423 io->callback = sync_io_complete;
424 io->context = &sio;
1da177e4 425
bb91bc7b
MP
426 io->vma_invalidate_address = dp->vma_invalidate_address;
427 io->vma_invalidate_size = dp->vma_invalidate_size;
428
f1e53987 429 dispatch_io(rw, num_regions, where, dp, io, 1);
1da177e4 430
97e7cdf1 431 wait_for_completion_io(&sio.wait);
1da177e4 432
891ce207 433 if (error_bits)
97e7cdf1 434 *error_bits = sio.error_bits;
891ce207 435
97e7cdf1 436 return sio.error_bits ? -EIO : 0;
1da177e4
LT
437}
438
891ce207 439static int async_io(struct dm_io_client *client, unsigned int num_regions,
22a1ceb1 440 struct dm_io_region *where, int rw, struct dpages *dp,
891ce207 441 io_notify_fn fn, void *context)
1da177e4
LT
442{
443 struct io *io;
444
7ff14a36 445 if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
1da177e4
LT
446 WARN_ON(1);
447 fn(1, context);
448 return -EIO;
449 }
450
bf17ce3a 451 io = mempool_alloc(client->pool, GFP_NOIO);
e01fd7ee 452 io->error_bits = 0;
1da177e4 453 atomic_set(&io->count, 1); /* see dispatch_io() */
891ce207 454 io->client = client;
1da177e4
LT
455 io->callback = fn;
456 io->context = context;
457
bb91bc7b
MP
458 io->vma_invalidate_address = dp->vma_invalidate_address;
459 io->vma_invalidate_size = dp->vma_invalidate_size;
460
1da177e4
LT
461 dispatch_io(rw, num_regions, where, dp, io, 0);
462 return 0;
463}
464
bb91bc7b
MP
465static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
466 unsigned long size)
c8b03afe
HM
467{
468 /* Set up dpages based on memory type */
bb91bc7b
MP
469
470 dp->vma_invalidate_address = NULL;
471 dp->vma_invalidate_size = 0;
472
c8b03afe
HM
473 switch (io_req->mem.type) {
474 case DM_IO_PAGE_LIST:
475 list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
476 break;
477
003b5c57
KO
478 case DM_IO_BIO:
479 bio_dp_init(dp, io_req->mem.ptr.bio);
c8b03afe
HM
480 break;
481
482 case DM_IO_VMA:
bb91bc7b
MP
483 flush_kernel_vmap_range(io_req->mem.ptr.vma, size);
484 if ((io_req->bi_rw & RW_MASK) == READ) {
485 dp->vma_invalidate_address = io_req->mem.ptr.vma;
486 dp->vma_invalidate_size = size;
487 }
c8b03afe
HM
488 vm_dp_init(dp, io_req->mem.ptr.vma);
489 break;
490
491 case DM_IO_KMEM:
492 km_dp_init(dp, io_req->mem.ptr.addr);
493 break;
494
495 default:
496 return -EINVAL;
497 }
498
499 return 0;
500}
501
502/*
7ff14a36
MP
503 * New collapsed (a)synchronous interface.
504 *
505 * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug
97e7cdf1
JT
506 * the queue with blk_unplug() some time later or set REQ_SYNC in io_req->bi_rw.
507 * If you fail to do one of these, the IO will be submitted to the disk after
508 * q->unplug_delay, which defaults to 3ms in blk-settings.c.
c8b03afe
HM
509 */
510int dm_io(struct dm_io_request *io_req, unsigned num_regions,
22a1ceb1 511 struct dm_io_region *where, unsigned long *sync_error_bits)
c8b03afe
HM
512{
513 int r;
514 struct dpages dp;
515
bb91bc7b 516 r = dp_init(io_req, &dp, (unsigned long)where->count << SECTOR_SHIFT);
c8b03afe
HM
517 if (r)
518 return r;
519
520 if (!io_req->notify.fn)
521 return sync_io(io_req->client, num_regions, where,
522 io_req->bi_rw, &dp, sync_error_bits);
523
524 return async_io(io_req->client, num_regions, where, io_req->bi_rw,
525 &dp, io_req->notify.fn, io_req->notify.context);
526}
527EXPORT_SYMBOL(dm_io);
952b3557
MP
528
529int __init dm_io_init(void)
530{
531 _dm_io_cache = KMEM_CACHE(io, 0);
532 if (!_dm_io_cache)
533 return -ENOMEM;
534
535 return 0;
536}
537
538void dm_io_exit(void)
539{
540 kmem_cache_destroy(_dm_io_cache);
541 _dm_io_cache = NULL;
542}
This page took 0.750312 seconds and 5 git commands to generate.