Merge tag 'for-4.1' of git://git.kernel.org/pub/scm/linux/kernel/git/kishon/linux...
[deliverable/linux.git] / include / linux / device-mapper.h
1 /*
2 * Copyright (C) 2001 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the LGPL.
6 */
7
8 #ifndef _LINUX_DEVICE_MAPPER_H
9 #define _LINUX_DEVICE_MAPPER_H
10
11 #include <linux/bio.h>
12 #include <linux/blkdev.h>
13 #include <linux/math64.h>
14 #include <linux/ratelimit.h>
15
16 struct dm_dev;
17 struct dm_target;
18 struct dm_table;
19 struct mapped_device;
20 struct bio_vec;
21
22 typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t;
23
24 union map_info {
25 void *ptr;
26 };
27
28 /*
29 * In the constructor the target parameter will already have the
30 * table, type, begin and len fields filled in.
31 */
32 typedef int (*dm_ctr_fn) (struct dm_target *target,
33 unsigned int argc, char **argv);
34
35 /*
36 * The destructor doesn't need to free the dm_target, just
37 * anything hidden ti->private.
38 */
39 typedef void (*dm_dtr_fn) (struct dm_target *ti);
40
41 /*
42 * The map function must return:
43 * < 0: error
44 * = 0: The target will handle the io by resubmitting it later
45 * = 1: simple remap complete
46 * = 2: The target wants to push back the io
47 */
48 typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio);
49 typedef int (*dm_map_request_fn) (struct dm_target *ti, struct request *clone,
50 union map_info *map_context);
51 typedef int (*dm_clone_and_map_request_fn) (struct dm_target *ti,
52 struct request *rq,
53 union map_info *map_context,
54 struct request **clone);
55 typedef void (*dm_release_clone_request_fn) (struct request *clone);
56
57 /*
58 * Returns:
59 * < 0 : error (currently ignored)
60 * 0 : ended successfully
61 * 1 : for some reason the io has still not completed (eg,
62 * multipath target might want to requeue a failed io).
63 * 2 : The target wants to push back the io
64 */
65 typedef int (*dm_endio_fn) (struct dm_target *ti,
66 struct bio *bio, int error);
67 typedef int (*dm_request_endio_fn) (struct dm_target *ti,
68 struct request *clone, int error,
69 union map_info *map_context);
70
71 typedef void (*dm_presuspend_fn) (struct dm_target *ti);
72 typedef void (*dm_presuspend_undo_fn) (struct dm_target *ti);
73 typedef void (*dm_postsuspend_fn) (struct dm_target *ti);
74 typedef int (*dm_preresume_fn) (struct dm_target *ti);
75 typedef void (*dm_resume_fn) (struct dm_target *ti);
76
77 typedef void (*dm_status_fn) (struct dm_target *ti, status_type_t status_type,
78 unsigned status_flags, char *result, unsigned maxlen);
79
80 typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv);
81
82 typedef int (*dm_ioctl_fn) (struct dm_target *ti, unsigned int cmd,
83 unsigned long arg);
84
85 typedef int (*dm_merge_fn) (struct dm_target *ti, struct bvec_merge_data *bvm,
86 struct bio_vec *biovec, int max_size);
87
88 /*
89 * These iteration functions are typically used to check (and combine)
90 * properties of underlying devices.
91 * E.g. Does at least one underlying device support flush?
92 * Does any underlying device not support WRITE_SAME?
93 *
94 * The callout function is called once for each contiguous section of
95 * an underlying device. State can be maintained in *data.
96 * Return non-zero to stop iterating through any further devices.
97 */
98 typedef int (*iterate_devices_callout_fn) (struct dm_target *ti,
99 struct dm_dev *dev,
100 sector_t start, sector_t len,
101 void *data);
102
103 /*
104 * This function must iterate through each section of device used by the
105 * target until it encounters a non-zero return code, which it then returns.
106 * Returns zero if no callout returned non-zero.
107 */
108 typedef int (*dm_iterate_devices_fn) (struct dm_target *ti,
109 iterate_devices_callout_fn fn,
110 void *data);
111
112 typedef void (*dm_io_hints_fn) (struct dm_target *ti,
113 struct queue_limits *limits);
114
115 /*
116 * Returns:
117 * 0: The target can handle the next I/O immediately.
118 * 1: The target can't handle the next I/O immediately.
119 */
120 typedef int (*dm_busy_fn) (struct dm_target *ti);
121
122 void dm_error(const char *message);
123
124 struct dm_dev {
125 struct block_device *bdev;
126 fmode_t mode;
127 char name[16];
128 };
129
130 /*
131 * Constructors should call these functions to ensure destination devices
132 * are opened/closed correctly.
133 */
134 int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
135 struct dm_dev **result);
136 void dm_put_device(struct dm_target *ti, struct dm_dev *d);
137
138 /*
139 * Information about a target type
140 */
141
142 struct target_type {
143 uint64_t features;
144 const char *name;
145 struct module *module;
146 unsigned version[3];
147 dm_ctr_fn ctr;
148 dm_dtr_fn dtr;
149 dm_map_fn map;
150 dm_map_request_fn map_rq;
151 dm_clone_and_map_request_fn clone_and_map_rq;
152 dm_release_clone_request_fn release_clone_rq;
153 dm_endio_fn end_io;
154 dm_request_endio_fn rq_end_io;
155 dm_presuspend_fn presuspend;
156 dm_presuspend_undo_fn presuspend_undo;
157 dm_postsuspend_fn postsuspend;
158 dm_preresume_fn preresume;
159 dm_resume_fn resume;
160 dm_status_fn status;
161 dm_message_fn message;
162 dm_ioctl_fn ioctl;
163 dm_merge_fn merge;
164 dm_busy_fn busy;
165 dm_iterate_devices_fn iterate_devices;
166 dm_io_hints_fn io_hints;
167
168 /* For internal device-mapper use. */
169 struct list_head list;
170 };
171
172 /*
173 * Target features
174 */
175
176 /*
177 * Any table that contains an instance of this target must have only one.
178 */
179 #define DM_TARGET_SINGLETON 0x00000001
180 #define dm_target_needs_singleton(type) ((type)->features & DM_TARGET_SINGLETON)
181
182 /*
183 * Indicates that a target does not support read-only devices.
184 */
185 #define DM_TARGET_ALWAYS_WRITEABLE 0x00000002
186 #define dm_target_always_writeable(type) \
187 ((type)->features & DM_TARGET_ALWAYS_WRITEABLE)
188
189 /*
190 * Any device that contains a table with an instance of this target may never
191 * have tables containing any different target type.
192 */
193 #define DM_TARGET_IMMUTABLE 0x00000004
194 #define dm_target_is_immutable(type) ((type)->features & DM_TARGET_IMMUTABLE)
195
196 /*
197 * Some targets need to be sent the same WRITE bio severals times so
198 * that they can send copies of it to different devices. This function
199 * examines any supplied bio and returns the number of copies of it the
200 * target requires.
201 */
202 typedef unsigned (*dm_num_write_bios_fn) (struct dm_target *ti, struct bio *bio);
203
204 struct dm_target {
205 struct dm_table *table;
206 struct target_type *type;
207
208 /* target limits */
209 sector_t begin;
210 sector_t len;
211
212 /* If non-zero, maximum size of I/O submitted to a target. */
213 uint32_t max_io_len;
214
215 /*
216 * A number of zero-length barrier bios that will be submitted
217 * to the target for the purpose of flushing cache.
218 *
219 * The bio number can be accessed with dm_bio_get_target_bio_nr.
220 * It is a responsibility of the target driver to remap these bios
221 * to the real underlying devices.
222 */
223 unsigned num_flush_bios;
224
225 /*
226 * The number of discard bios that will be submitted to the target.
227 * The bio number can be accessed with dm_bio_get_target_bio_nr.
228 */
229 unsigned num_discard_bios;
230
231 /*
232 * The number of WRITE SAME bios that will be submitted to the target.
233 * The bio number can be accessed with dm_bio_get_target_bio_nr.
234 */
235 unsigned num_write_same_bios;
236
237 /*
238 * The minimum number of extra bytes allocated in each bio for the
239 * target to use. dm_per_bio_data returns the data location.
240 */
241 unsigned per_bio_data_size;
242
243 /*
244 * If defined, this function is called to find out how many
245 * duplicate bios should be sent to the target when writing
246 * data.
247 */
248 dm_num_write_bios_fn num_write_bios;
249
250 /* target specific data */
251 void *private;
252
253 /* Used to provide an error string from the ctr */
254 char *error;
255
256 /*
257 * Set if this target needs to receive flushes regardless of
258 * whether or not its underlying devices have support.
259 */
260 bool flush_supported:1;
261
262 /*
263 * Set if this target needs to receive discards regardless of
264 * whether or not its underlying devices have support.
265 */
266 bool discards_supported:1;
267
268 /*
269 * Set if the target required discard bios to be split
270 * on max_io_len boundary.
271 */
272 bool split_discard_bios:1;
273
274 /*
275 * Set if this target does not return zeroes on discarded blocks.
276 */
277 bool discard_zeroes_data_unsupported:1;
278 };
279
280 /* Each target can link one of these into the table */
281 struct dm_target_callbacks {
282 struct list_head list;
283 int (*congested_fn) (struct dm_target_callbacks *, int);
284 };
285
286 /*
287 * For bio-based dm.
288 * One of these is allocated for each bio.
289 * This structure shouldn't be touched directly by target drivers.
290 * It is here so that we can inline dm_per_bio_data and
291 * dm_bio_from_per_bio_data
292 */
293 struct dm_target_io {
294 struct dm_io *io;
295 struct dm_target *ti;
296 unsigned target_bio_nr;
297 unsigned *len_ptr;
298 struct bio clone;
299 };
300
301 static inline void *dm_per_bio_data(struct bio *bio, size_t data_size)
302 {
303 return (char *)bio - offsetof(struct dm_target_io, clone) - data_size;
304 }
305
306 static inline struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size)
307 {
308 return (struct bio *)((char *)data + data_size + offsetof(struct dm_target_io, clone));
309 }
310
311 static inline unsigned dm_bio_get_target_bio_nr(const struct bio *bio)
312 {
313 return container_of(bio, struct dm_target_io, clone)->target_bio_nr;
314 }
315
316 int dm_register_target(struct target_type *t);
317 void dm_unregister_target(struct target_type *t);
318
319 /*
320 * Target argument parsing.
321 */
322 struct dm_arg_set {
323 unsigned argc;
324 char **argv;
325 };
326
327 /*
328 * The minimum and maximum value of a numeric argument, together with
329 * the error message to use if the number is found to be outside that range.
330 */
331 struct dm_arg {
332 unsigned min;
333 unsigned max;
334 char *error;
335 };
336
337 /*
338 * Validate the next argument, either returning it as *value or, if invalid,
339 * returning -EINVAL and setting *error.
340 */
341 int dm_read_arg(struct dm_arg *arg, struct dm_arg_set *arg_set,
342 unsigned *value, char **error);
343
344 /*
345 * Process the next argument as the start of a group containing between
346 * arg->min and arg->max further arguments. Either return the size as
347 * *num_args or, if invalid, return -EINVAL and set *error.
348 */
349 int dm_read_arg_group(struct dm_arg *arg, struct dm_arg_set *arg_set,
350 unsigned *num_args, char **error);
351
352 /*
353 * Return the current argument and shift to the next.
354 */
355 const char *dm_shift_arg(struct dm_arg_set *as);
356
357 /*
358 * Move through num_args arguments.
359 */
360 void dm_consume_args(struct dm_arg_set *as, unsigned num_args);
361
362 /*-----------------------------------------------------------------
363 * Functions for creating and manipulating mapped devices.
364 * Drop the reference with dm_put when you finish with the object.
365 *---------------------------------------------------------------*/
366
367 /*
368 * DM_ANY_MINOR chooses the next available minor number.
369 */
370 #define DM_ANY_MINOR (-1)
371 int dm_create(int minor, struct mapped_device **md);
372
373 /*
374 * Reference counting for md.
375 */
376 struct mapped_device *dm_get_md(dev_t dev);
377 void dm_get(struct mapped_device *md);
378 int dm_hold(struct mapped_device *md);
379 void dm_put(struct mapped_device *md);
380
381 /*
382 * An arbitrary pointer may be stored alongside a mapped device.
383 */
384 void dm_set_mdptr(struct mapped_device *md, void *ptr);
385 void *dm_get_mdptr(struct mapped_device *md);
386
387 /*
388 * A device can still be used while suspended, but I/O is deferred.
389 */
390 int dm_suspend(struct mapped_device *md, unsigned suspend_flags);
391 int dm_resume(struct mapped_device *md);
392
393 /*
394 * Event functions.
395 */
396 uint32_t dm_get_event_nr(struct mapped_device *md);
397 int dm_wait_event(struct mapped_device *md, int event_nr);
398 uint32_t dm_next_uevent_seq(struct mapped_device *md);
399 void dm_uevent_add(struct mapped_device *md, struct list_head *elist);
400
401 /*
402 * Info functions.
403 */
404 const char *dm_device_name(struct mapped_device *md);
405 int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid);
406 struct gendisk *dm_disk(struct mapped_device *md);
407 int dm_suspended(struct dm_target *ti);
408 int dm_noflush_suspending(struct dm_target *ti);
409 void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors);
410 union map_info *dm_get_rq_mapinfo(struct request *rq);
411
412 struct queue_limits *dm_get_queue_limits(struct mapped_device *md);
413
414 /*
415 * Geometry functions.
416 */
417 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo);
418 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo);
419
420 /*-----------------------------------------------------------------
421 * Functions for manipulating device-mapper tables.
422 *---------------------------------------------------------------*/
423
424 /*
425 * First create an empty table.
426 */
427 int dm_table_create(struct dm_table **result, fmode_t mode,
428 unsigned num_targets, struct mapped_device *md);
429
430 /*
431 * Then call this once for each target.
432 */
433 int dm_table_add_target(struct dm_table *t, const char *type,
434 sector_t start, sector_t len, char *params);
435
436 /*
437 * Target_ctr should call this if it needs to add any callbacks.
438 */
439 void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb);
440
441 /*
442 * Finally call this to make the table ready for use.
443 */
444 int dm_table_complete(struct dm_table *t);
445
446 /*
447 * Target may require that it is never sent I/O larger than len.
448 */
449 int __must_check dm_set_target_max_io_len(struct dm_target *ti, sector_t len);
450
451 /*
452 * Table reference counting.
453 */
454 struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx);
455 void dm_put_live_table(struct mapped_device *md, int srcu_idx);
456 void dm_sync_table(struct mapped_device *md);
457
458 /*
459 * Queries
460 */
461 sector_t dm_table_get_size(struct dm_table *t);
462 unsigned int dm_table_get_num_targets(struct dm_table *t);
463 fmode_t dm_table_get_mode(struct dm_table *t);
464 struct mapped_device *dm_table_get_md(struct dm_table *t);
465
466 /*
467 * Trigger an event.
468 */
469 void dm_table_event(struct dm_table *t);
470
471 /*
472 * Run the queue for request-based targets.
473 */
474 void dm_table_run_md_queue_async(struct dm_table *t);
475
476 /*
477 * The device must be suspended before calling this method.
478 * Returns the previous table, which the caller must destroy.
479 */
480 struct dm_table *dm_swap_table(struct mapped_device *md,
481 struct dm_table *t);
482
483 /*
484 * A wrapper around vmalloc.
485 */
486 void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size);
487
488 /*-----------------------------------------------------------------
489 * Macros.
490 *---------------------------------------------------------------*/
491 #define DM_NAME "device-mapper"
492
493 #ifdef CONFIG_PRINTK
494 extern struct ratelimit_state dm_ratelimit_state;
495
496 #define dm_ratelimit() __ratelimit(&dm_ratelimit_state)
497 #else
498 #define dm_ratelimit() 0
499 #endif
500
501 #define DMCRIT(f, arg...) \
502 printk(KERN_CRIT DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
503
504 #define DMERR(f, arg...) \
505 printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
506 #define DMERR_LIMIT(f, arg...) \
507 do { \
508 if (dm_ratelimit()) \
509 printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " \
510 f "\n", ## arg); \
511 } while (0)
512
513 #define DMWARN(f, arg...) \
514 printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
515 #define DMWARN_LIMIT(f, arg...) \
516 do { \
517 if (dm_ratelimit()) \
518 printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " \
519 f "\n", ## arg); \
520 } while (0)
521
522 #define DMINFO(f, arg...) \
523 printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
524 #define DMINFO_LIMIT(f, arg...) \
525 do { \
526 if (dm_ratelimit()) \
527 printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f \
528 "\n", ## arg); \
529 } while (0)
530
531 #ifdef CONFIG_DM_DEBUG
532 # define DMDEBUG(f, arg...) \
533 printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX " DEBUG: " f "\n", ## arg)
534 # define DMDEBUG_LIMIT(f, arg...) \
535 do { \
536 if (dm_ratelimit()) \
537 printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX ": " f \
538 "\n", ## arg); \
539 } while (0)
540 #else
541 # define DMDEBUG(f, arg...) do {} while (0)
542 # define DMDEBUG_LIMIT(f, arg...) do {} while (0)
543 #endif
544
545 #define DMEMIT(x...) sz += ((sz >= maxlen) ? \
546 0 : scnprintf(result + sz, maxlen - sz, x))
547
548 #define SECTOR_SHIFT 9
549
550 /*
551 * Definitions of return values from target end_io function.
552 */
553 #define DM_ENDIO_INCOMPLETE 1
554 #define DM_ENDIO_REQUEUE 2
555
556 /*
557 * Definitions of return values from target map function.
558 */
559 #define DM_MAPIO_SUBMITTED 0
560 #define DM_MAPIO_REMAPPED 1
561 #define DM_MAPIO_REQUEUE DM_ENDIO_REQUEUE
562
563 #define dm_sector_div64(x, y)( \
564 { \
565 u64 _res; \
566 (x) = div64_u64_rem(x, y, &_res); \
567 _res; \
568 } \
569 )
570
571 /*
572 * Ceiling(n / sz)
573 */
574 #define dm_div_up(n, sz) (((n) + (sz) - 1) / (sz))
575
576 #define dm_sector_div_up(n, sz) ( \
577 { \
578 sector_t _r = ((n) + (sz) - 1); \
579 sector_div(_r, (sz)); \
580 _r; \
581 } \
582 )
583
584 /*
585 * ceiling(n / size) * size
586 */
587 #define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz))
588
589 #define dm_array_too_big(fixed, obj, num) \
590 ((num) > (UINT_MAX - (fixed)) / (obj))
591
592 /*
593 * Sector offset taken relative to the start of the target instead of
594 * relative to the start of the device.
595 */
596 #define dm_target_offset(ti, sector) ((sector) - (ti)->begin)
597
598 static inline sector_t to_sector(unsigned long n)
599 {
600 return (n >> SECTOR_SHIFT);
601 }
602
603 static inline unsigned long to_bytes(sector_t n)
604 {
605 return (n << SECTOR_SHIFT);
606 }
607
608 /*-----------------------------------------------------------------
609 * Helper for block layer and dm core operations
610 *---------------------------------------------------------------*/
611 int dm_underlying_device_busy(struct request_queue *q);
612
613 #endif /* _LINUX_DEVICE_MAPPER_H */
This page took 0.053924 seconds and 5 git commands to generate.