bcma: move PCI IRQ control function to host specific code
[deliverable/linux.git] / drivers / md / dm-io.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (C) 2003 Sistina Software
891ce207 3 * Copyright (C) 2006 Red Hat GmbH
1da177e4
LT
4 *
5 * This file is released under the GPL.
6 */
7
952b3557
MP
8#include "dm.h"
9
586e80e6 10#include <linux/device-mapper.h>
1da177e4
LT
11
12#include <linux/bio.h>
10f1d5d1 13#include <linux/completion.h>
1da177e4
LT
14#include <linux/mempool.h>
15#include <linux/module.h>
16#include <linux/sched.h>
17#include <linux/slab.h>
a765e20e 18#include <linux/dm-io.h>
1da177e4 19
f1e53987
MP
20#define DM_MSG_PREFIX "io"
21
22#define DM_IO_MAX_REGIONS BITS_PER_LONG
23
891ce207
HM
24struct dm_io_client {
25 mempool_t *pool;
26 struct bio_set *bios;
27};
28
f1e53987
MP
29/*
30 * Aligning 'struct io' reduces the number of bits required to store
31 * its address. Refer to store_io_and_region_in_bio() below.
32 */
1da177e4 33struct io {
e01fd7ee 34 unsigned long error_bits;
1da177e4 35 atomic_t count;
891ce207 36 struct dm_io_client *client;
1da177e4
LT
37 io_notify_fn callback;
38 void *context;
bb91bc7b
MP
39 void *vma_invalidate_address;
40 unsigned long vma_invalidate_size;
f1e53987 41} __attribute__((aligned(DM_IO_MAX_REGIONS)));
1da177e4 42
952b3557
MP
43static struct kmem_cache *_dm_io_cache;
44
c8b03afe
HM
45/*
46 * Create a client with mempool and bioset.
47 */
bda8efec 48struct dm_io_client *dm_io_client_create(void)
c8b03afe 49{
c8b03afe 50 struct dm_io_client *client;
e8603136 51 unsigned min_ios = dm_get_reserved_bio_based_ios();
c8b03afe
HM
52
53 client = kmalloc(sizeof(*client), GFP_KERNEL);
54 if (!client)
55 return ERR_PTR(-ENOMEM);
56
e8603136 57 client->pool = mempool_create_slab_pool(min_ios, _dm_io_cache);
c8b03afe
HM
58 if (!client->pool)
59 goto bad;
60
e8603136 61 client->bios = bioset_create(min_ios, 0);
c8b03afe
HM
62 if (!client->bios)
63 goto bad;
64
65 return client;
66
67 bad:
68 if (client->pool)
69 mempool_destroy(client->pool);
70 kfree(client);
71 return ERR_PTR(-ENOMEM);
72}
73EXPORT_SYMBOL(dm_io_client_create);
74
c8b03afe
HM
75void dm_io_client_destroy(struct dm_io_client *client)
76{
77 mempool_destroy(client->pool);
78 bioset_free(client->bios);
79 kfree(client);
80}
81EXPORT_SYMBOL(dm_io_client_destroy);
82
1da177e4
LT
83/*-----------------------------------------------------------------
84 * We need to keep track of which region a bio is doing io for.
f1e53987
MP
85 * To avoid a memory allocation to store just 5 or 6 bits, we
86 * ensure the 'struct io' pointer is aligned so enough low bits are
87 * always zero and then combine it with the region number directly in
88 * bi_private.
1da177e4 89 *---------------------------------------------------------------*/
f1e53987
MP
90static void store_io_and_region_in_bio(struct bio *bio, struct io *io,
91 unsigned region)
1da177e4 92{
f1e53987
MP
93 if (unlikely(!IS_ALIGNED((unsigned long)io, DM_IO_MAX_REGIONS))) {
94 DMCRIT("Unaligned struct io pointer %p", io);
95 BUG();
96 }
97
98 bio->bi_private = (void *)((unsigned long)io | region);
1da177e4
LT
99}
100
f1e53987
MP
101static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io,
102 unsigned *region)
1da177e4 103{
f1e53987
MP
104 unsigned long val = (unsigned long)bio->bi_private;
105
106 *io = (void *)(val & -(unsigned long)DM_IO_MAX_REGIONS);
107 *region = val & (DM_IO_MAX_REGIONS - 1);
1da177e4
LT
108}
109
110/*-----------------------------------------------------------------
111 * We need an io object to keep track of the number of bios that
112 * have been dispatched for a particular io.
113 *---------------------------------------------------------------*/
97e7cdf1 114static void complete_io(struct io *io)
1da177e4 115{
97e7cdf1
JT
116 unsigned long error_bits = io->error_bits;
117 io_notify_fn fn = io->callback;
118 void *context = io->context;
1da177e4 119
97e7cdf1
JT
120 if (io->vma_invalidate_size)
121 invalidate_kernel_vmap_range(io->vma_invalidate_address,
122 io->vma_invalidate_size);
bb91bc7b 123
97e7cdf1
JT
124 mempool_free(io, io->client->pool);
125 fn(error_bits, context);
126}
1da177e4 127
97e7cdf1
JT
128static void dec_count(struct io *io, unsigned int region, int error)
129{
130 if (error)
131 set_bit(region, &io->error_bits);
1da177e4 132
97e7cdf1
JT
133 if (atomic_dec_and_test(&io->count))
134 complete_io(io);
1da177e4
LT
135}
136
6712ecf8 137static void endio(struct bio *bio, int error)
1da177e4 138{
c897feb3
HM
139 struct io *io;
140 unsigned region;
1da177e4 141
1da177e4
LT
142 if (error && bio_data_dir(bio) == READ)
143 zero_fill_bio(bio);
144
c897feb3
HM
145 /*
146 * The bio destructor in bio_put() may use the io object.
147 */
f1e53987 148 retrieve_io_and_region_from_bio(bio, &io, &region);
c897feb3 149
1da177e4
LT
150 bio_put(bio);
151
c897feb3 152 dec_count(io, region, error);
1da177e4
LT
153}
154
155/*-----------------------------------------------------------------
156 * These little objects provide an abstraction for getting a new
157 * destination page for io.
158 *---------------------------------------------------------------*/
159struct dpages {
160 void (*get_page)(struct dpages *dp,
161 struct page **p, unsigned long *len, unsigned *offset);
162 void (*next_page)(struct dpages *dp);
163
164 unsigned context_u;
165 void *context_ptr;
bb91bc7b
MP
166
167 void *vma_invalidate_address;
168 unsigned long vma_invalidate_size;
1da177e4
LT
169};
170
171/*
172 * Functions for getting the pages from a list.
173 */
174static void list_get_page(struct dpages *dp,
175 struct page **p, unsigned long *len, unsigned *offset)
176{
177 unsigned o = dp->context_u;
178 struct page_list *pl = (struct page_list *) dp->context_ptr;
179
180 *p = pl->page;
181 *len = PAGE_SIZE - o;
182 *offset = o;
183}
184
185static void list_next_page(struct dpages *dp)
186{
187 struct page_list *pl = (struct page_list *) dp->context_ptr;
188 dp->context_ptr = pl->next;
189 dp->context_u = 0;
190}
191
192static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset)
193{
194 dp->get_page = list_get_page;
195 dp->next_page = list_next_page;
196 dp->context_u = offset;
197 dp->context_ptr = pl;
198}
199
200/*
201 * Functions for getting the pages from a bvec.
202 */
d73f9907
MP
203static void bio_get_page(struct dpages *dp, struct page **p,
204 unsigned long *len, unsigned *offset)
1da177e4 205{
d73f9907
MP
206 struct bio_vec *bvec = dp->context_ptr;
207 *p = bvec->bv_page;
208 *len = bvec->bv_len - dp->context_u;
209 *offset = bvec->bv_offset + dp->context_u;
1da177e4
LT
210}
211
003b5c57 212static void bio_next_page(struct dpages *dp)
1da177e4 213{
d73f9907
MP
214 struct bio_vec *bvec = dp->context_ptr;
215 dp->context_ptr = bvec + 1;
216 dp->context_u = 0;
1da177e4
LT
217}
218
003b5c57 219static void bio_dp_init(struct dpages *dp, struct bio *bio)
1da177e4 220{
003b5c57
KO
221 dp->get_page = bio_get_page;
222 dp->next_page = bio_next_page;
d73f9907
MP
223 dp->context_ptr = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
224 dp->context_u = bio->bi_iter.bi_bvec_done;
1da177e4
LT
225}
226
c8b03afe
HM
227/*
228 * Functions for getting the pages from a VMA.
229 */
1da177e4
LT
230static void vm_get_page(struct dpages *dp,
231 struct page **p, unsigned long *len, unsigned *offset)
232{
233 *p = vmalloc_to_page(dp->context_ptr);
234 *offset = dp->context_u;
235 *len = PAGE_SIZE - dp->context_u;
236}
237
238static void vm_next_page(struct dpages *dp)
239{
240 dp->context_ptr += PAGE_SIZE - dp->context_u;
241 dp->context_u = 0;
242}
243
244static void vm_dp_init(struct dpages *dp, void *data)
245{
246 dp->get_page = vm_get_page;
247 dp->next_page = vm_next_page;
248 dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
249 dp->context_ptr = data;
250}
251
c8b03afe
HM
252/*
253 * Functions for getting the pages from kernel memory.
254 */
255static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len,
256 unsigned *offset)
257{
258 *p = virt_to_page(dp->context_ptr);
259 *offset = dp->context_u;
260 *len = PAGE_SIZE - dp->context_u;
261}
262
263static void km_next_page(struct dpages *dp)
264{
265 dp->context_ptr += PAGE_SIZE - dp->context_u;
266 dp->context_u = 0;
267}
268
269static void km_dp_init(struct dpages *dp, void *data)
270{
271 dp->get_page = km_get_page;
272 dp->next_page = km_next_page;
273 dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1);
274 dp->context_ptr = data;
275}
276
1da177e4
LT
277/*-----------------------------------------------------------------
278 * IO routines that accept a list of pages.
279 *---------------------------------------------------------------*/
22a1ceb1 280static void do_region(int rw, unsigned region, struct dm_io_region *where,
1da177e4
LT
281 struct dpages *dp, struct io *io)
282{
283 struct bio *bio;
284 struct page *page;
285 unsigned long len;
286 unsigned offset;
287 unsigned num_bvecs;
288 sector_t remaining = where->count;
0c535e0d 289 struct request_queue *q = bdev_get_queue(where->bdev);
70d6c400
MS
290 unsigned short logical_block_size = queue_logical_block_size(q);
291 sector_t num_sectors;
1da177e4 292
37527b86
DW
293 /* Reject unsupported discard requests */
294 if ((rw & REQ_DISCARD) && !blk_queue_discard(q)) {
295 dec_count(io, region, -EOPNOTSUPP);
296 return;
297 }
298
12fc0f49 299 /*
d87f4c14
TH
300 * where->count may be zero if rw holds a flush and we need to
301 * send a zero-sized flush.
12fc0f49
MP
302 */
303 do {
1da177e4 304 /*
f1e53987 305 * Allocate a suitably sized-bio.
1da177e4 306 */
70d6c400 307 if ((rw & REQ_DISCARD) || (rw & REQ_WRITE_SAME))
0c535e0d
MB
308 num_bvecs = 1;
309 else
310 num_bvecs = min_t(int, bio_get_nr_vecs(where->bdev),
311 dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT)));
312
bf17ce3a 313 bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
4f024f37 314 bio->bi_iter.bi_sector = where->sector + (where->count - remaining);
1da177e4
LT
315 bio->bi_bdev = where->bdev;
316 bio->bi_end_io = endio;
f1e53987 317 store_io_and_region_in_bio(bio, io, region);
1da177e4 318
0c535e0d 319 if (rw & REQ_DISCARD) {
70d6c400 320 num_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining);
4f024f37 321 bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
70d6c400
MS
322 remaining -= num_sectors;
323 } else if (rw & REQ_WRITE_SAME) {
324 /*
325 * WRITE SAME only uses a single page.
326 */
327 dp->get_page(dp, &page, &len, &offset);
328 bio_add_page(bio, page, logical_block_size, offset);
329 num_sectors = min_t(sector_t, q->limits.max_write_same_sectors, remaining);
4f024f37 330 bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
70d6c400
MS
331
332 offset = 0;
333 remaining -= num_sectors;
334 dp->next_page(dp);
0c535e0d
MB
335 } else while (remaining) {
336 /*
337 * Try and add as many pages as possible.
338 */
1da177e4
LT
339 dp->get_page(dp, &page, &len, &offset);
340 len = min(len, to_bytes(remaining));
341 if (!bio_add_page(bio, page, len, offset))
342 break;
343
344 offset = 0;
345 remaining -= to_sector(len);
346 dp->next_page(dp);
347 }
348
349 atomic_inc(&io->count);
350 submit_bio(rw, bio);
12fc0f49 351 } while (remaining);
1da177e4
LT
352}
353
354static void dispatch_io(int rw, unsigned int num_regions,
22a1ceb1 355 struct dm_io_region *where, struct dpages *dp,
1da177e4
LT
356 struct io *io, int sync)
357{
358 int i;
359 struct dpages old_pages = *dp;
360
f1e53987
MP
361 BUG_ON(num_regions > DM_IO_MAX_REGIONS);
362
1da177e4 363 if (sync)
721a9602 364 rw |= REQ_SYNC;
1da177e4
LT
365
366 /*
367 * For multiple regions we need to be careful to rewind
368 * the dp object for each call to do_region.
369 */
370 for (i = 0; i < num_regions; i++) {
371 *dp = old_pages;
d87f4c14 372 if (where[i].count || (rw & REQ_FLUSH))
1da177e4
LT
373 do_region(rw, i, where + i, dp, io);
374 }
375
376 /*
f00b16ad 377 * Drop the extra reference that we were holding to avoid
1da177e4
LT
378 * the io being completed too early.
379 */
380 dec_count(io, 0, 0);
381}
382
97e7cdf1
JT
383struct sync_io {
384 unsigned long error_bits;
385 struct completion wait;
386};
387
388static void sync_io_complete(unsigned long error, void *context)
389{
390 struct sync_io *sio = context;
391
392 sio->error_bits = error;
393 complete(&sio->wait);
394}
395
891ce207 396static int sync_io(struct dm_io_client *client, unsigned int num_regions,
22a1ceb1 397 struct dm_io_region *where, int rw, struct dpages *dp,
891ce207 398 unsigned long *error_bits)
1da177e4 399{
97e7cdf1
JT
400 struct io *io;
401 struct sync_io sio;
1da177e4 402
7ff14a36 403 if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
1da177e4
LT
404 WARN_ON(1);
405 return -EIO;
406 }
407
97e7cdf1
JT
408 init_completion(&sio.wait);
409
410 io = mempool_alloc(client->pool, GFP_NOIO);
f1e53987 411 io->error_bits = 0;
f1e53987 412 atomic_set(&io->count, 1); /* see dispatch_io() */
f1e53987 413 io->client = client;
97e7cdf1
JT
414 io->callback = sync_io_complete;
415 io->context = &sio;
1da177e4 416
bb91bc7b
MP
417 io->vma_invalidate_address = dp->vma_invalidate_address;
418 io->vma_invalidate_size = dp->vma_invalidate_size;
419
f1e53987 420 dispatch_io(rw, num_regions, where, dp, io, 1);
1da177e4 421
97e7cdf1 422 wait_for_completion_io(&sio.wait);
1da177e4 423
891ce207 424 if (error_bits)
97e7cdf1 425 *error_bits = sio.error_bits;
891ce207 426
97e7cdf1 427 return sio.error_bits ? -EIO : 0;
1da177e4
LT
428}
429
891ce207 430static int async_io(struct dm_io_client *client, unsigned int num_regions,
22a1ceb1 431 struct dm_io_region *where, int rw, struct dpages *dp,
891ce207 432 io_notify_fn fn, void *context)
1da177e4
LT
433{
434 struct io *io;
435
7ff14a36 436 if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
1da177e4
LT
437 WARN_ON(1);
438 fn(1, context);
439 return -EIO;
440 }
441
bf17ce3a 442 io = mempool_alloc(client->pool, GFP_NOIO);
e01fd7ee 443 io->error_bits = 0;
1da177e4 444 atomic_set(&io->count, 1); /* see dispatch_io() */
891ce207 445 io->client = client;
1da177e4
LT
446 io->callback = fn;
447 io->context = context;
448
bb91bc7b
MP
449 io->vma_invalidate_address = dp->vma_invalidate_address;
450 io->vma_invalidate_size = dp->vma_invalidate_size;
451
1da177e4
LT
452 dispatch_io(rw, num_regions, where, dp, io, 0);
453 return 0;
454}
455
bb91bc7b
MP
456static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
457 unsigned long size)
c8b03afe
HM
458{
459 /* Set up dpages based on memory type */
bb91bc7b
MP
460
461 dp->vma_invalidate_address = NULL;
462 dp->vma_invalidate_size = 0;
463
c8b03afe
HM
464 switch (io_req->mem.type) {
465 case DM_IO_PAGE_LIST:
466 list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
467 break;
468
003b5c57
KO
469 case DM_IO_BIO:
470 bio_dp_init(dp, io_req->mem.ptr.bio);
c8b03afe
HM
471 break;
472
473 case DM_IO_VMA:
bb91bc7b
MP
474 flush_kernel_vmap_range(io_req->mem.ptr.vma, size);
475 if ((io_req->bi_rw & RW_MASK) == READ) {
476 dp->vma_invalidate_address = io_req->mem.ptr.vma;
477 dp->vma_invalidate_size = size;
478 }
c8b03afe
HM
479 vm_dp_init(dp, io_req->mem.ptr.vma);
480 break;
481
482 case DM_IO_KMEM:
483 km_dp_init(dp, io_req->mem.ptr.addr);
484 break;
485
486 default:
487 return -EINVAL;
488 }
489
490 return 0;
491}
492
493/*
7ff14a36
MP
494 * New collapsed (a)synchronous interface.
495 *
496 * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug
97e7cdf1
JT
497 * the queue with blk_unplug() some time later or set REQ_SYNC in io_req->bi_rw.
498 * If you fail to do one of these, the IO will be submitted to the disk after
499 * q->unplug_delay, which defaults to 3ms in blk-settings.c.
c8b03afe
HM
500 */
501int dm_io(struct dm_io_request *io_req, unsigned num_regions,
22a1ceb1 502 struct dm_io_region *where, unsigned long *sync_error_bits)
c8b03afe
HM
503{
504 int r;
505 struct dpages dp;
506
bb91bc7b 507 r = dp_init(io_req, &dp, (unsigned long)where->count << SECTOR_SHIFT);
c8b03afe
HM
508 if (r)
509 return r;
510
511 if (!io_req->notify.fn)
512 return sync_io(io_req->client, num_regions, where,
513 io_req->bi_rw, &dp, sync_error_bits);
514
515 return async_io(io_req->client, num_regions, where, io_req->bi_rw,
516 &dp, io_req->notify.fn, io_req->notify.context);
517}
518EXPORT_SYMBOL(dm_io);
952b3557
MP
519
520int __init dm_io_init(void)
521{
522 _dm_io_cache = KMEM_CACHE(io, 0);
523 if (!_dm_io_cache)
524 return -ENOMEM;
525
526 return 0;
527}
528
529void dm_io_exit(void)
530{
531 kmem_cache_destroy(_dm_io_cache);
532 _dm_io_cache = NULL;
533}
This page took 0.801688 seconds and 5 git commands to generate.