Merge tag 'dt-for-linus-2' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
[deliverable/linux.git] / include / linux / device-mapper.h
1 /*
2 * Copyright (C) 2001 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 *
5 * This file is released under the LGPL.
6 */
7
8 #ifndef _LINUX_DEVICE_MAPPER_H
9 #define _LINUX_DEVICE_MAPPER_H
10
11 #include <linux/bio.h>
12 #include <linux/blkdev.h>
13 #include <linux/ratelimit.h>
14
15 struct dm_dev;
16 struct dm_target;
17 struct dm_table;
18 struct mapped_device;
19 struct bio_vec;
20
21 typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t;
22
23 union map_info {
24 void *ptr;
25 unsigned long long ll;
26 };
27
28 /*
29 * In the constructor the target parameter will already have the
30 * table, type, begin and len fields filled in.
31 */
32 typedef int (*dm_ctr_fn) (struct dm_target *target,
33 unsigned int argc, char **argv);
34
35 /*
36 * The destructor doesn't need to free the dm_target, just
37 * anything hidden ti->private.
38 */
39 typedef void (*dm_dtr_fn) (struct dm_target *ti);
40
41 /*
42 * The map function must return:
43 * < 0: error
44 * = 0: The target will handle the io by resubmitting it later
45 * = 1: simple remap complete
46 * = 2: The target wants to push back the io
47 */
48 typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio);
49 typedef int (*dm_map_request_fn) (struct dm_target *ti, struct request *clone,
50 union map_info *map_context);
51
52 /*
53 * Returns:
54 * < 0 : error (currently ignored)
55 * 0 : ended successfully
56 * 1 : for some reason the io has still not completed (eg,
57 * multipath target might want to requeue a failed io).
58 * 2 : The target wants to push back the io
59 */
60 typedef int (*dm_endio_fn) (struct dm_target *ti,
61 struct bio *bio, int error);
62 typedef int (*dm_request_endio_fn) (struct dm_target *ti,
63 struct request *clone, int error,
64 union map_info *map_context);
65
66 typedef void (*dm_presuspend_fn) (struct dm_target *ti);
67 typedef void (*dm_postsuspend_fn) (struct dm_target *ti);
68 typedef int (*dm_preresume_fn) (struct dm_target *ti);
69 typedef void (*dm_resume_fn) (struct dm_target *ti);
70
71 typedef void (*dm_status_fn) (struct dm_target *ti, status_type_t status_type,
72 unsigned status_flags, char *result, unsigned maxlen);
73
74 typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv);
75
76 typedef int (*dm_ioctl_fn) (struct dm_target *ti, unsigned int cmd,
77 unsigned long arg);
78
79 typedef int (*dm_merge_fn) (struct dm_target *ti, struct bvec_merge_data *bvm,
80 struct bio_vec *biovec, int max_size);
81
82 typedef int (*iterate_devices_callout_fn) (struct dm_target *ti,
83 struct dm_dev *dev,
84 sector_t start, sector_t len,
85 void *data);
86
87 typedef int (*dm_iterate_devices_fn) (struct dm_target *ti,
88 iterate_devices_callout_fn fn,
89 void *data);
90
91 typedef void (*dm_io_hints_fn) (struct dm_target *ti,
92 struct queue_limits *limits);
93
94 /*
95 * Returns:
96 * 0: The target can handle the next I/O immediately.
97 * 1: The target can't handle the next I/O immediately.
98 */
99 typedef int (*dm_busy_fn) (struct dm_target *ti);
100
101 void dm_error(const char *message);
102
103 /*
104 * Combine device limits.
105 */
106 int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
107 sector_t start, sector_t len, void *data);
108
109 struct dm_dev {
110 struct block_device *bdev;
111 fmode_t mode;
112 char name[16];
113 };
114
115 /*
116 * Constructors should call these functions to ensure destination devices
117 * are opened/closed correctly.
118 */
119 int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
120 struct dm_dev **result);
121 void dm_put_device(struct dm_target *ti, struct dm_dev *d);
122
123 /*
124 * Information about a target type
125 */
126
127 struct target_type {
128 uint64_t features;
129 const char *name;
130 struct module *module;
131 unsigned version[3];
132 dm_ctr_fn ctr;
133 dm_dtr_fn dtr;
134 dm_map_fn map;
135 dm_map_request_fn map_rq;
136 dm_endio_fn end_io;
137 dm_request_endio_fn rq_end_io;
138 dm_presuspend_fn presuspend;
139 dm_postsuspend_fn postsuspend;
140 dm_preresume_fn preresume;
141 dm_resume_fn resume;
142 dm_status_fn status;
143 dm_message_fn message;
144 dm_ioctl_fn ioctl;
145 dm_merge_fn merge;
146 dm_busy_fn busy;
147 dm_iterate_devices_fn iterate_devices;
148 dm_io_hints_fn io_hints;
149
150 /* For internal device-mapper use. */
151 struct list_head list;
152 };
153
154 /*
155 * Target features
156 */
157
158 /*
159 * Any table that contains an instance of this target must have only one.
160 */
161 #define DM_TARGET_SINGLETON 0x00000001
162 #define dm_target_needs_singleton(type) ((type)->features & DM_TARGET_SINGLETON)
163
164 /*
165 * Indicates that a target does not support read-only devices.
166 */
167 #define DM_TARGET_ALWAYS_WRITEABLE 0x00000002
168 #define dm_target_always_writeable(type) \
169 ((type)->features & DM_TARGET_ALWAYS_WRITEABLE)
170
171 /*
172 * Any device that contains a table with an instance of this target may never
173 * have tables containing any different target type.
174 */
175 #define DM_TARGET_IMMUTABLE 0x00000004
176 #define dm_target_is_immutable(type) ((type)->features & DM_TARGET_IMMUTABLE)
177
178 /*
179 * Some targets need to be sent the same WRITE bio severals times so
180 * that they can send copies of it to different devices. This function
181 * examines any supplied bio and returns the number of copies of it the
182 * target requires.
183 */
184 typedef unsigned (*dm_num_write_bios_fn) (struct dm_target *ti, struct bio *bio);
185
186 struct dm_target {
187 struct dm_table *table;
188 struct target_type *type;
189
190 /* target limits */
191 sector_t begin;
192 sector_t len;
193
194 /* If non-zero, maximum size of I/O submitted to a target. */
195 uint32_t max_io_len;
196
197 /*
198 * A number of zero-length barrier bios that will be submitted
199 * to the target for the purpose of flushing cache.
200 *
201 * The bio number can be accessed with dm_bio_get_target_bio_nr.
202 * It is a responsibility of the target driver to remap these bios
203 * to the real underlying devices.
204 */
205 unsigned num_flush_bios;
206
207 /*
208 * The number of discard bios that will be submitted to the target.
209 * The bio number can be accessed with dm_bio_get_target_bio_nr.
210 */
211 unsigned num_discard_bios;
212
213 /*
214 * The number of WRITE SAME bios that will be submitted to the target.
215 * The bio number can be accessed with dm_bio_get_target_bio_nr.
216 */
217 unsigned num_write_same_bios;
218
219 /*
220 * The minimum number of extra bytes allocated in each bio for the
221 * target to use. dm_per_bio_data returns the data location.
222 */
223 unsigned per_bio_data_size;
224
225 /*
226 * If defined, this function is called to find out how many
227 * duplicate bios should be sent to the target when writing
228 * data.
229 */
230 dm_num_write_bios_fn num_write_bios;
231
232 /* target specific data */
233 void *private;
234
235 /* Used to provide an error string from the ctr */
236 char *error;
237
238 /*
239 * Set if this target needs to receive flushes regardless of
240 * whether or not its underlying devices have support.
241 */
242 bool flush_supported:1;
243
244 /*
245 * Set if this target needs to receive discards regardless of
246 * whether or not its underlying devices have support.
247 */
248 bool discards_supported:1;
249
250 /*
251 * Set if the target required discard bios to be split
252 * on max_io_len boundary.
253 */
254 bool split_discard_bios:1;
255
256 /*
257 * Set if this target does not return zeroes on discarded blocks.
258 */
259 bool discard_zeroes_data_unsupported:1;
260 };
261
262 /* Each target can link one of these into the table */
263 struct dm_target_callbacks {
264 struct list_head list;
265 int (*congested_fn) (struct dm_target_callbacks *, int);
266 };
267
268 /*
269 * For bio-based dm.
270 * One of these is allocated for each bio.
271 * This structure shouldn't be touched directly by target drivers.
272 * It is here so that we can inline dm_per_bio_data and
273 * dm_bio_from_per_bio_data
274 */
275 struct dm_target_io {
276 struct dm_io *io;
277 struct dm_target *ti;
278 union map_info info;
279 unsigned target_bio_nr;
280 struct bio clone;
281 };
282
283 static inline void *dm_per_bio_data(struct bio *bio, size_t data_size)
284 {
285 return (char *)bio - offsetof(struct dm_target_io, clone) - data_size;
286 }
287
288 static inline struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size)
289 {
290 return (struct bio *)((char *)data + data_size + offsetof(struct dm_target_io, clone));
291 }
292
293 static inline unsigned dm_bio_get_target_bio_nr(const struct bio *bio)
294 {
295 return container_of(bio, struct dm_target_io, clone)->target_bio_nr;
296 }
297
298 int dm_register_target(struct target_type *t);
299 void dm_unregister_target(struct target_type *t);
300
301 /*
302 * Target argument parsing.
303 */
304 struct dm_arg_set {
305 unsigned argc;
306 char **argv;
307 };
308
309 /*
310 * The minimum and maximum value of a numeric argument, together with
311 * the error message to use if the number is found to be outside that range.
312 */
313 struct dm_arg {
314 unsigned min;
315 unsigned max;
316 char *error;
317 };
318
319 /*
320 * Validate the next argument, either returning it as *value or, if invalid,
321 * returning -EINVAL and setting *error.
322 */
323 int dm_read_arg(struct dm_arg *arg, struct dm_arg_set *arg_set,
324 unsigned *value, char **error);
325
326 /*
327 * Process the next argument as the start of a group containing between
328 * arg->min and arg->max further arguments. Either return the size as
329 * *num_args or, if invalid, return -EINVAL and set *error.
330 */
331 int dm_read_arg_group(struct dm_arg *arg, struct dm_arg_set *arg_set,
332 unsigned *num_args, char **error);
333
334 /*
335 * Return the current argument and shift to the next.
336 */
337 const char *dm_shift_arg(struct dm_arg_set *as);
338
339 /*
340 * Move through num_args arguments.
341 */
342 void dm_consume_args(struct dm_arg_set *as, unsigned num_args);
343
344 /*-----------------------------------------------------------------
345 * Functions for creating and manipulating mapped devices.
346 * Drop the reference with dm_put when you finish with the object.
347 *---------------------------------------------------------------*/
348
349 /*
350 * DM_ANY_MINOR chooses the next available minor number.
351 */
352 #define DM_ANY_MINOR (-1)
353 int dm_create(int minor, struct mapped_device **md);
354
355 /*
356 * Reference counting for md.
357 */
358 struct mapped_device *dm_get_md(dev_t dev);
359 void dm_get(struct mapped_device *md);
360 void dm_put(struct mapped_device *md);
361
362 /*
363 * An arbitrary pointer may be stored alongside a mapped device.
364 */
365 void dm_set_mdptr(struct mapped_device *md, void *ptr);
366 void *dm_get_mdptr(struct mapped_device *md);
367
368 /*
369 * A device can still be used while suspended, but I/O is deferred.
370 */
371 int dm_suspend(struct mapped_device *md, unsigned suspend_flags);
372 int dm_resume(struct mapped_device *md);
373
374 /*
375 * Event functions.
376 */
377 uint32_t dm_get_event_nr(struct mapped_device *md);
378 int dm_wait_event(struct mapped_device *md, int event_nr);
379 uint32_t dm_next_uevent_seq(struct mapped_device *md);
380 void dm_uevent_add(struct mapped_device *md, struct list_head *elist);
381
382 /*
383 * Info functions.
384 */
385 const char *dm_device_name(struct mapped_device *md);
386 int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid);
387 struct gendisk *dm_disk(struct mapped_device *md);
388 int dm_suspended(struct dm_target *ti);
389 int dm_noflush_suspending(struct dm_target *ti);
390 union map_info *dm_get_mapinfo(struct bio *bio);
391 union map_info *dm_get_rq_mapinfo(struct request *rq);
392
393 /*
394 * Geometry functions.
395 */
396 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo);
397 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo);
398
399
400 /*-----------------------------------------------------------------
401 * Functions for manipulating device-mapper tables.
402 *---------------------------------------------------------------*/
403
404 /*
405 * First create an empty table.
406 */
407 int dm_table_create(struct dm_table **result, fmode_t mode,
408 unsigned num_targets, struct mapped_device *md);
409
410 /*
411 * Then call this once for each target.
412 */
413 int dm_table_add_target(struct dm_table *t, const char *type,
414 sector_t start, sector_t len, char *params);
415
416 /*
417 * Target_ctr should call this if it needs to add any callbacks.
418 */
419 void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb);
420
421 /*
422 * Finally call this to make the table ready for use.
423 */
424 int dm_table_complete(struct dm_table *t);
425
426 /*
427 * Target may require that it is never sent I/O larger than len.
428 */
429 int __must_check dm_set_target_max_io_len(struct dm_target *ti, sector_t len);
430
431 /*
432 * Table reference counting.
433 */
434 struct dm_table *dm_get_live_table(struct mapped_device *md);
435 void dm_table_get(struct dm_table *t);
436 void dm_table_put(struct dm_table *t);
437
438 /*
439 * Queries
440 */
441 sector_t dm_table_get_size(struct dm_table *t);
442 unsigned int dm_table_get_num_targets(struct dm_table *t);
443 fmode_t dm_table_get_mode(struct dm_table *t);
444 struct mapped_device *dm_table_get_md(struct dm_table *t);
445
446 /*
447 * Trigger an event.
448 */
449 void dm_table_event(struct dm_table *t);
450
451 /*
452 * The device must be suspended before calling this method.
453 * Returns the previous table, which the caller must destroy.
454 */
455 struct dm_table *dm_swap_table(struct mapped_device *md,
456 struct dm_table *t);
457
458 /*
459 * A wrapper around vmalloc.
460 */
461 void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size);
462
463 /*-----------------------------------------------------------------
464 * Macros.
465 *---------------------------------------------------------------*/
466 #define DM_NAME "device-mapper"
467
468 #ifdef CONFIG_PRINTK
469 extern struct ratelimit_state dm_ratelimit_state;
470
471 #define dm_ratelimit() __ratelimit(&dm_ratelimit_state)
472 #else
473 #define dm_ratelimit() 0
474 #endif
475
476 #define DMCRIT(f, arg...) \
477 printk(KERN_CRIT DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
478
479 #define DMERR(f, arg...) \
480 printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
481 #define DMERR_LIMIT(f, arg...) \
482 do { \
483 if (dm_ratelimit()) \
484 printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " \
485 f "\n", ## arg); \
486 } while (0)
487
488 #define DMWARN(f, arg...) \
489 printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
490 #define DMWARN_LIMIT(f, arg...) \
491 do { \
492 if (dm_ratelimit()) \
493 printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " \
494 f "\n", ## arg); \
495 } while (0)
496
497 #define DMINFO(f, arg...) \
498 printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
499 #define DMINFO_LIMIT(f, arg...) \
500 do { \
501 if (dm_ratelimit()) \
502 printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f \
503 "\n", ## arg); \
504 } while (0)
505
506 #ifdef CONFIG_DM_DEBUG
507 # define DMDEBUG(f, arg...) \
508 printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX " DEBUG: " f "\n", ## arg)
509 # define DMDEBUG_LIMIT(f, arg...) \
510 do { \
511 if (dm_ratelimit()) \
512 printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX ": " f \
513 "\n", ## arg); \
514 } while (0)
515 #else
516 # define DMDEBUG(f, arg...) do {} while (0)
517 # define DMDEBUG_LIMIT(f, arg...) do {} while (0)
518 #endif
519
520 #define DMEMIT(x...) sz += ((sz >= maxlen) ? \
521 0 : scnprintf(result + sz, maxlen - sz, x))
522
523 #define SECTOR_SHIFT 9
524
525 /*
526 * Definitions of return values from target end_io function.
527 */
528 #define DM_ENDIO_INCOMPLETE 1
529 #define DM_ENDIO_REQUEUE 2
530
531 /*
532 * Definitions of return values from target map function.
533 */
534 #define DM_MAPIO_SUBMITTED 0
535 #define DM_MAPIO_REMAPPED 1
536 #define DM_MAPIO_REQUEUE DM_ENDIO_REQUEUE
537
538 /*
539 * Ceiling(n / sz)
540 */
541 #define dm_div_up(n, sz) (((n) + (sz) - 1) / (sz))
542
543 #define dm_sector_div_up(n, sz) ( \
544 { \
545 sector_t _r = ((n) + (sz) - 1); \
546 sector_div(_r, (sz)); \
547 _r; \
548 } \
549 )
550
551 /*
552 * ceiling(n / size) * size
553 */
554 #define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz))
555
556 #define dm_array_too_big(fixed, obj, num) \
557 ((num) > (UINT_MAX - (fixed)) / (obj))
558
559 /*
560 * Sector offset taken relative to the start of the target instead of
561 * relative to the start of the device.
562 */
563 #define dm_target_offset(ti, sector) ((sector) - (ti)->begin)
564
565 static inline sector_t to_sector(unsigned long n)
566 {
567 return (n >> SECTOR_SHIFT);
568 }
569
570 static inline unsigned long to_bytes(sector_t n)
571 {
572 return (n << SECTOR_SHIFT);
573 }
574
575 /*-----------------------------------------------------------------
576 * Helper for block layer and dm core operations
577 *---------------------------------------------------------------*/
578 void dm_dispatch_request(struct request *rq);
579 void dm_requeue_unmapped_request(struct request *rq);
580 void dm_kill_unmapped_request(struct request *rq, int error);
581 int dm_underlying_device_busy(struct request_queue *q);
582
583 #endif /* _LINUX_DEVICE_MAPPER_H */
This page took 0.102448 seconds and 5 git commands to generate.