bnx2: Refine remote PHY locking.
[deliverable/linux.git] / drivers / md / dm-snap.h
1 /*
2 * dm-snapshot.c
3 *
4 * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
5 *
6 * This file is released under the GPL.
7 */
8
9 #ifndef DM_SNAPSHOT_H
10 #define DM_SNAPSHOT_H
11
12 #include "dm.h"
13 #include "dm-bio-list.h"
14 #include <linux/blkdev.h>
15 #include <linux/workqueue.h>
16
17 struct exception_table {
18 uint32_t hash_mask;
19 unsigned hash_shift;
20 struct list_head *table;
21 };
22
23 /*
24 * The snapshot code deals with largish chunks of the disk at a
25 * time. Typically 32k - 512k.
26 */
27 typedef sector_t chunk_t;
28
29 /*
30 * An exception is used where an old chunk of data has been
31 * replaced by a new one.
32 * If chunk_t is 64 bits in size, the top 8 bits of new_chunk hold the number
33 * of chunks that follow contiguously. Remaining bits hold the number of the
34 * chunk within the device.
35 */
36 struct dm_snap_exception {
37 struct list_head hash_list;
38
39 chunk_t old_chunk;
40 chunk_t new_chunk;
41 };
42
43 /*
44 * Funtions to manipulate consecutive chunks
45 */
46 # if defined(CONFIG_LBD) || (BITS_PER_LONG == 64)
47 # define DM_CHUNK_CONSECUTIVE_BITS 8
48 # define DM_CHUNK_NUMBER_BITS 56
49
50 static inline chunk_t dm_chunk_number(chunk_t chunk)
51 {
52 return chunk & (chunk_t)((1ULL << DM_CHUNK_NUMBER_BITS) - 1ULL);
53 }
54
55 static inline unsigned dm_consecutive_chunk_count(struct dm_snap_exception *e)
56 {
57 return e->new_chunk >> DM_CHUNK_NUMBER_BITS;
58 }
59
60 static inline void dm_consecutive_chunk_count_inc(struct dm_snap_exception *e)
61 {
62 e->new_chunk += (1ULL << DM_CHUNK_NUMBER_BITS);
63
64 BUG_ON(!dm_consecutive_chunk_count(e));
65 }
66
67 # else
68 # define DM_CHUNK_CONSECUTIVE_BITS 0
69
70 static inline chunk_t dm_chunk_number(chunk_t chunk)
71 {
72 return chunk;
73 }
74
75 static inline unsigned dm_consecutive_chunk_count(struct dm_snap_exception *e)
76 {
77 return 0;
78 }
79
80 static inline void dm_consecutive_chunk_count_inc(struct dm_snap_exception *e)
81 {
82 }
83
84 # endif
85
86 /*
87 * Abstraction to handle the meta/layout of exception stores (the
88 * COW device).
89 */
90 struct exception_store {
91
92 /*
93 * Destroys this object when you've finished with it.
94 */
95 void (*destroy) (struct exception_store *store);
96
97 /*
98 * The target shouldn't read the COW device until this is
99 * called.
100 */
101 int (*read_metadata) (struct exception_store *store);
102
103 /*
104 * Find somewhere to store the next exception.
105 */
106 int (*prepare_exception) (struct exception_store *store,
107 struct dm_snap_exception *e);
108
109 /*
110 * Update the metadata with this exception.
111 */
112 void (*commit_exception) (struct exception_store *store,
113 struct dm_snap_exception *e,
114 void (*callback) (void *, int success),
115 void *callback_context);
116
117 /*
118 * The snapshot is invalid, note this in the metadata.
119 */
120 void (*drop_snapshot) (struct exception_store *store);
121
122 /*
123 * Return how full the snapshot is.
124 */
125 void (*fraction_full) (struct exception_store *store,
126 sector_t *numerator,
127 sector_t *denominator);
128
129 struct dm_snapshot *snap;
130 void *context;
131 };
132
133 struct dm_snapshot {
134 struct rw_semaphore lock;
135 struct dm_target *ti;
136
137 struct dm_dev *origin;
138 struct dm_dev *cow;
139
140 /* List of snapshots per Origin */
141 struct list_head list;
142
143 /* Size of data blocks saved - must be a power of 2 */
144 chunk_t chunk_size;
145 chunk_t chunk_mask;
146 chunk_t chunk_shift;
147
148 /* You can't use a snapshot if this is 0 (e.g. if full) */
149 int valid;
150
151 /* Origin writes don't trigger exceptions until this is set */
152 int active;
153
154 /* Used for display of table */
155 char type;
156
157 /* The last percentage we notified */
158 int last_percent;
159
160 struct exception_table pending;
161 struct exception_table complete;
162
163 /*
164 * pe_lock protects all pending_exception operations and access
165 * as well as the snapshot_bios list.
166 */
167 spinlock_t pe_lock;
168
169 /* The on disk metadata handler */
170 struct exception_store store;
171
172 struct dm_kcopyd_client *kcopyd_client;
173
174 /* Queue of snapshot writes for ksnapd to flush */
175 struct bio_list queued_bios;
176 struct work_struct queued_bios_work;
177 };
178
179 /*
180 * Used by the exception stores to load exceptions hen
181 * initialising.
182 */
183 int dm_add_exception(struct dm_snapshot *s, chunk_t old, chunk_t new);
184
185 /*
186 * Constructor and destructor for the default persistent
187 * store.
188 */
189 int dm_create_persistent(struct exception_store *store);
190
191 int dm_create_transient(struct exception_store *store);
192
193 /*
194 * Return the number of sectors in the device.
195 */
196 static inline sector_t get_dev_size(struct block_device *bdev)
197 {
198 return bdev->bd_inode->i_size >> SECTOR_SHIFT;
199 }
200
201 static inline chunk_t sector_to_chunk(struct dm_snapshot *s, sector_t sector)
202 {
203 return (sector & ~s->chunk_mask) >> s->chunk_shift;
204 }
205
206 static inline sector_t chunk_to_sector(struct dm_snapshot *s, chunk_t chunk)
207 {
208 return chunk << s->chunk_shift;
209 }
210
211 static inline int bdev_equal(struct block_device *lhs, struct block_device *rhs)
212 {
213 /*
214 * There is only ever one instance of a particular block
215 * device so we can compare pointers safely.
216 */
217 return lhs == rhs;
218 }
219
220 #endif
This page took 0.038157 seconds and 5 git commands to generate.