Merge (part of) tag 'omap-for-v3.19/hwmod-and-defconfig' of git://git.kernel.org...
[deliverable/linux.git] / include / linux / device-mapper.h
CommitLineData
1da177e4
LT
1/*
2 * Copyright (C) 2001 Sistina Software (UK) Limited.
0da336e5 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
1da177e4
LT
4 *
5 * This file is released under the LGPL.
6 */
7
8#ifndef _LINUX_DEVICE_MAPPER_H
9#define _LINUX_DEVICE_MAPPER_H
10
416cd17b 11#include <linux/bio.h>
f6fccb12 12#include <linux/blkdev.h>
fd2ed4d2 13#include <linux/math64.h>
71a16736 14#include <linux/ratelimit.h>
416cd17b 15
af4874e0 16struct dm_dev;
1da177e4
LT
17struct dm_target;
18struct dm_table;
17b2f66f 19struct mapped_device;
f6fccb12 20struct bio_vec;
1da177e4
LT
21
22typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t;
23
24union map_info {
25 void *ptr;
1da177e4
LT
26};
27
28/*
29 * In the constructor the target parameter will already have the
30 * table, type, begin and len fields filled in.
31 */
32typedef int (*dm_ctr_fn) (struct dm_target *target,
33 unsigned int argc, char **argv);
34
35/*
36 * The destructor doesn't need to free the dm_target, just
37 * anything hidden ti->private.
38 */
39typedef void (*dm_dtr_fn) (struct dm_target *ti);
40
41/*
42 * The map function must return:
43 * < 0: error
44 * = 0: The target will handle the io by resubmitting it later
45cbcd79 45 * = 1: simple remap complete
2e93ccc1 46 * = 2: The target wants to push back the io
1da177e4 47 */
7de3ee57 48typedef int (*dm_map_fn) (struct dm_target *ti, struct bio *bio);
7d76345d
KU
49typedef int (*dm_map_request_fn) (struct dm_target *ti, struct request *clone,
50 union map_info *map_context);
1da177e4
LT
51
52/*
53 * Returns:
54 * < 0 : error (currently ignored)
55 * 0 : ended successfully
56 * 1 : for some reason the io has still not completed (eg,
57 * multipath target might want to requeue a failed io).
2e93ccc1 58 * 2 : The target wants to push back the io
1da177e4
LT
59 */
60typedef int (*dm_endio_fn) (struct dm_target *ti,
7de3ee57 61 struct bio *bio, int error);
7d76345d
KU
62typedef int (*dm_request_endio_fn) (struct dm_target *ti,
63 struct request *clone, int error,
64 union map_info *map_context);
1da177e4
LT
65
66typedef void (*dm_presuspend_fn) (struct dm_target *ti);
67typedef void (*dm_postsuspend_fn) (struct dm_target *ti);
8757b776 68typedef int (*dm_preresume_fn) (struct dm_target *ti);
1da177e4
LT
69typedef void (*dm_resume_fn) (struct dm_target *ti);
70
fd7c092e
MP
71typedef void (*dm_status_fn) (struct dm_target *ti, status_type_t status_type,
72 unsigned status_flags, char *result, unsigned maxlen);
1da177e4
LT
73
74typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv);
75
647b3d00 76typedef int (*dm_ioctl_fn) (struct dm_target *ti, unsigned int cmd,
aa129a22
MB
77 unsigned long arg);
78
f6fccb12
MB
79typedef int (*dm_merge_fn) (struct dm_target *ti, struct bvec_merge_data *bvm,
80 struct bio_vec *biovec, int max_size);
81
058ce5ca
AK
82/*
83 * These iteration functions are typically used to check (and combine)
84 * properties of underlying devices.
85 * E.g. Does at least one underlying device support flush?
86 * Does any underlying device not support WRITE_SAME?
87 *
88 * The callout function is called once for each contiguous section of
89 * an underlying device. State can be maintained in *data.
90 * Return non-zero to stop iterating through any further devices.
91 */
af4874e0
MS
92typedef int (*iterate_devices_callout_fn) (struct dm_target *ti,
93 struct dm_dev *dev,
5dea271b 94 sector_t start, sector_t len,
af4874e0
MS
95 void *data);
96
058ce5ca
AK
97/*
98 * This function must iterate through each section of device used by the
99 * target until it encounters a non-zero return code, which it then returns.
100 * Returns zero if no callout returned non-zero.
101 */
af4874e0
MS
102typedef int (*dm_iterate_devices_fn) (struct dm_target *ti,
103 iterate_devices_callout_fn fn,
104 void *data);
105
40bea431
MS
106typedef void (*dm_io_hints_fn) (struct dm_target *ti,
107 struct queue_limits *limits);
108
7d76345d
KU
109/*
110 * Returns:
111 * 0: The target can handle the next I/O immediately.
112 * 1: The target can't handle the next I/O immediately.
113 */
114typedef int (*dm_busy_fn) (struct dm_target *ti);
115
1da177e4
LT
116void dm_error(const char *message);
117
82b1519b
MP
118struct dm_dev {
119 struct block_device *bdev;
aeb5d727 120 fmode_t mode;
82b1519b
MP
121 char name[16];
122};
123
1da177e4
LT
124/*
125 * Constructors should call these functions to ensure destination devices
126 * are opened/closed correctly.
1da177e4 127 */
8215d6ec 128int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
11f0431b 129 struct dm_dev **result);
1da177e4
LT
130void dm_put_device(struct dm_target *ti, struct dm_dev *d);
131
132/*
133 * Information about a target type
134 */
ab4c1424 135
1da177e4 136struct target_type {
ab4c1424 137 uint64_t features;
1da177e4
LT
138 const char *name;
139 struct module *module;
17b2f66f 140 unsigned version[3];
1da177e4
LT
141 dm_ctr_fn ctr;
142 dm_dtr_fn dtr;
143 dm_map_fn map;
7d76345d 144 dm_map_request_fn map_rq;
1da177e4 145 dm_endio_fn end_io;
7d76345d 146 dm_request_endio_fn rq_end_io;
1da177e4
LT
147 dm_presuspend_fn presuspend;
148 dm_postsuspend_fn postsuspend;
8757b776 149 dm_preresume_fn preresume;
1da177e4
LT
150 dm_resume_fn resume;
151 dm_status_fn status;
152 dm_message_fn message;
aa129a22 153 dm_ioctl_fn ioctl;
f6fccb12 154 dm_merge_fn merge;
7d76345d 155 dm_busy_fn busy;
af4874e0 156 dm_iterate_devices_fn iterate_devices;
40bea431 157 dm_io_hints_fn io_hints;
45194e4f
CR
158
159 /* For internal device-mapper use. */
160 struct list_head list;
1da177e4
LT
161};
162
3791e2fc
AK
163/*
164 * Target features
165 */
166
167/*
168 * Any table that contains an instance of this target must have only one.
169 */
170#define DM_TARGET_SINGLETON 0x00000001
171#define dm_target_needs_singleton(type) ((type)->features & DM_TARGET_SINGLETON)
172
cc6cbe14
AK
173/*
174 * Indicates that a target does not support read-only devices.
175 */
176#define DM_TARGET_ALWAYS_WRITEABLE 0x00000002
177#define dm_target_always_writeable(type) \
178 ((type)->features & DM_TARGET_ALWAYS_WRITEABLE)
179
36a0456f
AK
180/*
181 * Any device that contains a table with an instance of this target may never
182 * have tables containing any different target type.
183 */
184#define DM_TARGET_IMMUTABLE 0x00000004
185#define dm_target_is_immutable(type) ((type)->features & DM_TARGET_IMMUTABLE)
186
b0d8ed4d
AK
187/*
188 * Some targets need to be sent the same WRITE bio severals times so
189 * that they can send copies of it to different devices. This function
190 * examines any supplied bio and returns the number of copies of it the
191 * target requires.
192 */
193typedef unsigned (*dm_num_write_bios_fn) (struct dm_target *ti, struct bio *bio);
194
1da177e4
LT
195struct dm_target {
196 struct dm_table *table;
197 struct target_type *type;
198
199 /* target limits */
200 sector_t begin;
201 sector_t len;
202
542f9038
MS
203 /* If non-zero, maximum size of I/O submitted to a target. */
204 uint32_t max_io_len;
1da177e4 205
f9ab94ce 206 /*
55a62eef 207 * A number of zero-length barrier bios that will be submitted
f9ab94ce
MP
208 * to the target for the purpose of flushing cache.
209 *
55a62eef
AK
210 * The bio number can be accessed with dm_bio_get_target_bio_nr.
211 * It is a responsibility of the target driver to remap these bios
f9ab94ce
MP
212 * to the real underlying devices.
213 */
55a62eef 214 unsigned num_flush_bios;
f9ab94ce 215
5ae89a87 216 /*
55a62eef
AK
217 * The number of discard bios that will be submitted to the target.
218 * The bio number can be accessed with dm_bio_get_target_bio_nr.
5ae89a87 219 */
55a62eef 220 unsigned num_discard_bios;
5ae89a87 221
d54eaa5a 222 /*
55a62eef
AK
223 * The number of WRITE SAME bios that will be submitted to the target.
224 * The bio number can be accessed with dm_bio_get_target_bio_nr.
d54eaa5a 225 */
55a62eef 226 unsigned num_write_same_bios;
d54eaa5a 227
c0820cf5
MP
228 /*
229 * The minimum number of extra bytes allocated in each bio for the
230 * target to use. dm_per_bio_data returns the data location.
231 */
232 unsigned per_bio_data_size;
233
b0d8ed4d
AK
234 /*
235 * If defined, this function is called to find out how many
236 * duplicate bios should be sent to the target when writing
237 * data.
238 */
239 dm_num_write_bios_fn num_write_bios;
240
1da177e4
LT
241 /* target specific data */
242 void *private;
243
244 /* Used to provide an error string from the ctr */
245 char *error;
4c259327 246
0e9c24ed
JT
247 /*
248 * Set if this target needs to receive flushes regardless of
249 * whether or not its underlying devices have support.
250 */
251 bool flush_supported:1;
252
4c259327
MS
253 /*
254 * Set if this target needs to receive discards regardless of
255 * whether or not its underlying devices have support.
256 */
0ac55489 257 bool discards_supported:1;
983c7db3 258
7acf0277 259 /*
55a62eef 260 * Set if the target required discard bios to be split
7acf0277
MP
261 * on max_io_len boundary.
262 */
55a62eef 263 bool split_discard_bios:1;
7acf0277 264
983c7db3
MB
265 /*
266 * Set if this target does not return zeroes on discarded blocks.
267 */
0ac55489 268 bool discard_zeroes_data_unsupported:1;
1da177e4
LT
269};
270
9d357b07
N
271/* Each target can link one of these into the table */
272struct dm_target_callbacks {
273 struct list_head list;
274 int (*congested_fn) (struct dm_target_callbacks *, int);
275};
276
c0820cf5
MP
277/*
278 * For bio-based dm.
279 * One of these is allocated for each bio.
280 * This structure shouldn't be touched directly by target drivers.
281 * It is here so that we can inline dm_per_bio_data and
282 * dm_bio_from_per_bio_data
283 */
284struct dm_target_io {
285 struct dm_io *io;
286 struct dm_target *ti;
55a62eef 287 unsigned target_bio_nr;
1dd40c3e 288 unsigned *len_ptr;
c0820cf5
MP
289 struct bio clone;
290};
291
292static inline void *dm_per_bio_data(struct bio *bio, size_t data_size)
293{
294 return (char *)bio - offsetof(struct dm_target_io, clone) - data_size;
295}
296
297static inline struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size)
298{
299 return (struct bio *)((char *)data + data_size + offsetof(struct dm_target_io, clone));
300}
301
55a62eef 302static inline unsigned dm_bio_get_target_bio_nr(const struct bio *bio)
ddbd658f 303{
55a62eef 304 return container_of(bio, struct dm_target_io, clone)->target_bio_nr;
ddbd658f
MP
305}
306
1da177e4 307int dm_register_target(struct target_type *t);
10d3bd09 308void dm_unregister_target(struct target_type *t);
17b2f66f 309
498f0103
MS
310/*
311 * Target argument parsing.
312 */
313struct dm_arg_set {
314 unsigned argc;
315 char **argv;
316};
317
318/*
319 * The minimum and maximum value of a numeric argument, together with
320 * the error message to use if the number is found to be outside that range.
321 */
322struct dm_arg {
323 unsigned min;
324 unsigned max;
325 char *error;
326};
327
328/*
329 * Validate the next argument, either returning it as *value or, if invalid,
330 * returning -EINVAL and setting *error.
331 */
332int dm_read_arg(struct dm_arg *arg, struct dm_arg_set *arg_set,
333 unsigned *value, char **error);
334
335/*
336 * Process the next argument as the start of a group containing between
337 * arg->min and arg->max further arguments. Either return the size as
338 * *num_args or, if invalid, return -EINVAL and set *error.
339 */
340int dm_read_arg_group(struct dm_arg *arg, struct dm_arg_set *arg_set,
341 unsigned *num_args, char **error);
342
343/*
344 * Return the current argument and shift to the next.
345 */
346const char *dm_shift_arg(struct dm_arg_set *as);
347
348/*
349 * Move through num_args arguments.
350 */
351void dm_consume_args(struct dm_arg_set *as, unsigned num_args);
352
17b2f66f
AK
353/*-----------------------------------------------------------------
354 * Functions for creating and manipulating mapped devices.
355 * Drop the reference with dm_put when you finish with the object.
356 *---------------------------------------------------------------*/
357
358/*
359 * DM_ANY_MINOR chooses the next available minor number.
360 */
361#define DM_ANY_MINOR (-1)
362int dm_create(int minor, struct mapped_device **md);
363
364/*
365 * Reference counting for md.
366 */
367struct mapped_device *dm_get_md(dev_t dev);
368void dm_get(struct mapped_device *md);
369void dm_put(struct mapped_device *md);
370
371/*
372 * An arbitrary pointer may be stored alongside a mapped device.
373 */
374void dm_set_mdptr(struct mapped_device *md, void *ptr);
375void *dm_get_mdptr(struct mapped_device *md);
376
377/*
378 * A device can still be used while suspended, but I/O is deferred.
379 */
a3d77d35 380int dm_suspend(struct mapped_device *md, unsigned suspend_flags);
17b2f66f
AK
381int dm_resume(struct mapped_device *md);
382
383/*
384 * Event functions.
385 */
386uint32_t dm_get_event_nr(struct mapped_device *md);
387int dm_wait_event(struct mapped_device *md, int event_nr);
7a8c3d3b
MA
388uint32_t dm_next_uevent_seq(struct mapped_device *md);
389void dm_uevent_add(struct mapped_device *md, struct list_head *elist);
17b2f66f
AK
390
391/*
392 * Info functions.
393 */
72d94861 394const char *dm_device_name(struct mapped_device *md);
96a1f7db 395int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid);
17b2f66f 396struct gendisk *dm_disk(struct mapped_device *md);
64dbce58 397int dm_suspended(struct dm_target *ti);
2e93ccc1 398int dm_noflush_suspending(struct dm_target *ti);
1dd40c3e 399void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors);
cec47e3d 400union map_info *dm_get_rq_mapinfo(struct request *rq);
17b2f66f 401
f84cb8a4
MS
402struct queue_limits *dm_get_queue_limits(struct mapped_device *md);
403
17b2f66f
AK
404/*
405 * Geometry functions.
406 */
407int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo);
408int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo);
409
17b2f66f
AK
410/*-----------------------------------------------------------------
411 * Functions for manipulating device-mapper tables.
412 *---------------------------------------------------------------*/
413
414/*
415 * First create an empty table.
416 */
aeb5d727 417int dm_table_create(struct dm_table **result, fmode_t mode,
17b2f66f
AK
418 unsigned num_targets, struct mapped_device *md);
419
420/*
421 * Then call this once for each target.
422 */
423int dm_table_add_target(struct dm_table *t, const char *type,
424 sector_t start, sector_t len, char *params);
425
9d357b07
N
426/*
427 * Target_ctr should call this if it needs to add any callbacks.
428 */
429void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb);
430
17b2f66f
AK
431/*
432 * Finally call this to make the table ready for use.
433 */
434int dm_table_complete(struct dm_table *t);
435
542f9038
MS
436/*
437 * Target may require that it is never sent I/O larger than len.
438 */
439int __must_check dm_set_target_max_io_len(struct dm_target *ti, sector_t len);
440
17b2f66f
AK
441/*
442 * Table reference counting.
443 */
83d5e5b0
MP
444struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx);
445void dm_put_live_table(struct mapped_device *md, int srcu_idx);
446void dm_sync_table(struct mapped_device *md);
17b2f66f
AK
447
448/*
449 * Queries
450 */
451sector_t dm_table_get_size(struct dm_table *t);
452unsigned int dm_table_get_num_targets(struct dm_table *t);
aeb5d727 453fmode_t dm_table_get_mode(struct dm_table *t);
17b2f66f
AK
454struct mapped_device *dm_table_get_md(struct dm_table *t);
455
456/*
457 * Trigger an event.
458 */
459void dm_table_event(struct dm_table *t);
460
9974fa2c
MS
461/*
462 * Run the queue for request-based targets.
463 */
464void dm_table_run_md_queue_async(struct dm_table *t);
465
17b2f66f
AK
466/*
467 * The device must be suspended before calling this method.
042d2a9b 468 * Returns the previous table, which the caller must destroy.
17b2f66f 469 */
042d2a9b
AK
470struct dm_table *dm_swap_table(struct mapped_device *md,
471 struct dm_table *t);
17b2f66f 472
54160904
MP
473/*
474 * A wrapper around vmalloc.
475 */
476void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size);
477
0da336e5
AK
478/*-----------------------------------------------------------------
479 * Macros.
480 *---------------------------------------------------------------*/
481#define DM_NAME "device-mapper"
482
71a16736
NK
483#ifdef CONFIG_PRINTK
484extern struct ratelimit_state dm_ratelimit_state;
485
486#define dm_ratelimit() __ratelimit(&dm_ratelimit_state)
487#else
488#define dm_ratelimit() 0
489#endif
490
10d3bd09
MP
491#define DMCRIT(f, arg...) \
492 printk(KERN_CRIT DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
493
0da336e5
AK
494#define DMERR(f, arg...) \
495 printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
496#define DMERR_LIMIT(f, arg...) \
497 do { \
71a16736 498 if (dm_ratelimit()) \
0da336e5
AK
499 printk(KERN_ERR DM_NAME ": " DM_MSG_PREFIX ": " \
500 f "\n", ## arg); \
501 } while (0)
502
503#define DMWARN(f, arg...) \
504 printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
505#define DMWARN_LIMIT(f, arg...) \
506 do { \
71a16736 507 if (dm_ratelimit()) \
0da336e5
AK
508 printk(KERN_WARNING DM_NAME ": " DM_MSG_PREFIX ": " \
509 f "\n", ## arg); \
510 } while (0)
511
512#define DMINFO(f, arg...) \
513 printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f "\n", ## arg)
514#define DMINFO_LIMIT(f, arg...) \
515 do { \
71a16736 516 if (dm_ratelimit()) \
0da336e5
AK
517 printk(KERN_INFO DM_NAME ": " DM_MSG_PREFIX ": " f \
518 "\n", ## arg); \
519 } while (0)
520
521#ifdef CONFIG_DM_DEBUG
522# define DMDEBUG(f, arg...) \
523 printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX " DEBUG: " f "\n", ## arg)
524# define DMDEBUG_LIMIT(f, arg...) \
525 do { \
71a16736 526 if (dm_ratelimit()) \
0da336e5
AK
527 printk(KERN_DEBUG DM_NAME ": " DM_MSG_PREFIX ": " f \
528 "\n", ## arg); \
529 } while (0)
530#else
531# define DMDEBUG(f, arg...) do {} while (0)
532# define DMDEBUG_LIMIT(f, arg...) do {} while (0)
533#endif
534
535#define DMEMIT(x...) sz += ((sz >= maxlen) ? \
536 0 : scnprintf(result + sz, maxlen - sz, x))
537
538#define SECTOR_SHIFT 9
539
540/*
541 * Definitions of return values from target end_io function.
542 */
543#define DM_ENDIO_INCOMPLETE 1
544#define DM_ENDIO_REQUEUE 2
545
546/*
547 * Definitions of return values from target map function.
548 */
549#define DM_MAPIO_SUBMITTED 0
550#define DM_MAPIO_REMAPPED 1
551#define DM_MAPIO_REQUEUE DM_ENDIO_REQUEUE
552
fd2ed4d2
MP
553#define dm_sector_div64(x, y)( \
554{ \
555 u64 _res; \
556 (x) = div64_u64_rem(x, y, &_res); \
557 _res; \
558} \
559)
560
0da336e5
AK
561/*
562 * Ceiling(n / sz)
563 */
564#define dm_div_up(n, sz) (((n) + (sz) - 1) / (sz))
565
566#define dm_sector_div_up(n, sz) ( \
567{ \
568 sector_t _r = ((n) + (sz) - 1); \
569 sector_div(_r, (sz)); \
570 _r; \
571} \
572)
573
574/*
575 * ceiling(n / size) * size
576 */
577#define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz))
578
d63a5ce3
MP
579#define dm_array_too_big(fixed, obj, num) \
580 ((num) > (UINT_MAX - (fixed)) / (obj))
581
56a67df7
MS
582/*
583 * Sector offset taken relative to the start of the target instead of
584 * relative to the start of the device.
585 */
586#define dm_target_offset(ti, sector) ((sector) - (ti)->begin)
587
0da336e5
AK
588static inline sector_t to_sector(unsigned long n)
589{
590 return (n >> SECTOR_SHIFT);
591}
592
593static inline unsigned long to_bytes(sector_t n)
594{
595 return (n << SECTOR_SHIFT);
596}
597
cec47e3d
KU
598/*-----------------------------------------------------------------
599 * Helper for block layer and dm core operations
600 *---------------------------------------------------------------*/
601void dm_dispatch_request(struct request *rq);
602void dm_requeue_unmapped_request(struct request *rq);
603void dm_kill_unmapped_request(struct request *rq, int error);
604int dm_underlying_device_busy(struct request_queue *q);
605
17b2f66f 606#endif /* _LINUX_DEVICE_MAPPER_H */
This page took 1.091454 seconds and 5 git commands to generate.