Btrfs: don't export symbols
[deliverable/linux.git] / fs / btrfs / extent_map.c
... / ...
CommitLineData
1#include <linux/err.h>
2#include <linux/gfp.h>
3#include <linux/slab.h>
4#include <linux/module.h>
5#include <linux/spinlock.h>
6#include <linux/hardirq.h>
7#include "extent_map.h"
8
9/* temporary define until extent_map moves out of btrfs */
10struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
11 unsigned long extra_flags,
12 void (*ctor)(void *, struct kmem_cache *,
13 unsigned long));
14
15static struct kmem_cache *extent_map_cache;
16
17int __init extent_map_init(void)
18{
19 extent_map_cache = btrfs_cache_create("extent_map",
20 sizeof(struct extent_map), 0,
21 NULL);
22 if (!extent_map_cache)
23 return -ENOMEM;
24 return 0;
25}
26
27void extent_map_exit(void)
28{
29 if (extent_map_cache)
30 kmem_cache_destroy(extent_map_cache);
31}
32
33/**
34 * extent_map_tree_init - initialize extent map tree
35 * @tree: tree to initialize
36 * @mask: flags for memory allocations during tree operations
37 *
38 * Initialize the extent tree @tree. Should be called for each new inode
39 * or other user of the extent_map interface.
40 */
41void extent_map_tree_init(struct extent_map_tree *tree, gfp_t mask)
42{
43 tree->map.rb_node = NULL;
44 spin_lock_init(&tree->lock);
45}
46
47/**
48 * alloc_extent_map - allocate new extent map structure
49 * @mask: memory allocation flags
50 *
51 * Allocate a new extent_map structure. The new structure is
52 * returned with a reference count of one and needs to be
53 * freed using free_extent_map()
54 */
55struct extent_map *alloc_extent_map(gfp_t mask)
56{
57 struct extent_map *em;
58 em = kmem_cache_alloc(extent_map_cache, mask);
59 if (!em || IS_ERR(em))
60 return em;
61 em->in_tree = 0;
62 em->flags = 0;
63 atomic_set(&em->refs, 1);
64 return em;
65}
66
67/**
68 * free_extent_map - drop reference count of an extent_map
69 * @em: extent map beeing releasead
70 *
71 * Drops the reference out on @em by one and free the structure
72 * if the reference count hits zero.
73 */
74void free_extent_map(struct extent_map *em)
75{
76 if (!em)
77 return;
78 WARN_ON(atomic_read(&em->refs) == 0);
79 if (atomic_dec_and_test(&em->refs)) {
80 WARN_ON(em->in_tree);
81 kmem_cache_free(extent_map_cache, em);
82 }
83}
84
85static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
86 struct rb_node *node)
87{
88 struct rb_node **p = &root->rb_node;
89 struct rb_node *parent = NULL;
90 struct extent_map *entry;
91
92 while (*p) {
93 parent = *p;
94 entry = rb_entry(parent, struct extent_map, rb_node);
95
96 WARN_ON(!entry->in_tree);
97
98 if (offset < entry->start)
99 p = &(*p)->rb_left;
100 else if (offset >= extent_map_end(entry))
101 p = &(*p)->rb_right;
102 else
103 return parent;
104 }
105
106 entry = rb_entry(node, struct extent_map, rb_node);
107 entry->in_tree = 1;
108 rb_link_node(node, parent, p);
109 rb_insert_color(node, root);
110 return NULL;
111}
112
113/*
114 * search through the tree for an extent_map with a given offset. If
115 * it can't be found, try to find some neighboring extents
116 */
117static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
118 struct rb_node **prev_ret,
119 struct rb_node **next_ret)
120{
121 struct rb_node *n = root->rb_node;
122 struct rb_node *prev = NULL;
123 struct rb_node *orig_prev = NULL;
124 struct extent_map *entry;
125 struct extent_map *prev_entry = NULL;
126
127 while (n) {
128 entry = rb_entry(n, struct extent_map, rb_node);
129 prev = n;
130 prev_entry = entry;
131
132 WARN_ON(!entry->in_tree);
133
134 if (offset < entry->start)
135 n = n->rb_left;
136 else if (offset >= extent_map_end(entry))
137 n = n->rb_right;
138 else
139 return n;
140 }
141
142 if (prev_ret) {
143 orig_prev = prev;
144 while (prev && offset >= extent_map_end(prev_entry)) {
145 prev = rb_next(prev);
146 prev_entry = rb_entry(prev, struct extent_map, rb_node);
147 }
148 *prev_ret = prev;
149 prev = orig_prev;
150 }
151
152 if (next_ret) {
153 prev_entry = rb_entry(prev, struct extent_map, rb_node);
154 while (prev && offset < prev_entry->start) {
155 prev = rb_prev(prev);
156 prev_entry = rb_entry(prev, struct extent_map, rb_node);
157 }
158 *next_ret = prev;
159 }
160 return NULL;
161}
162
163/*
164 * look for an offset in the tree, and if it can't be found, return
165 * the first offset we can find smaller than 'offset'.
166 */
167static inline struct rb_node *tree_search(struct rb_root *root, u64 offset)
168{
169 struct rb_node *prev;
170 struct rb_node *ret;
171 ret = __tree_search(root, offset, &prev, NULL);
172 if (!ret)
173 return prev;
174 return ret;
175}
176
177/* check to see if two extent_map structs are adjacent and safe to merge */
178static int mergable_maps(struct extent_map *prev, struct extent_map *next)
179{
180 if (test_bit(EXTENT_FLAG_PINNED, &prev->flags))
181 return 0;
182
183 /*
184 * don't merge compressed extents, we need to know their
185 * actual size
186 */
187 if (test_bit(EXTENT_FLAG_COMPRESSED, &prev->flags))
188 return 0;
189
190 if (extent_map_end(prev) == next->start &&
191 prev->flags == next->flags &&
192 prev->bdev == next->bdev &&
193 ((next->block_start == EXTENT_MAP_HOLE &&
194 prev->block_start == EXTENT_MAP_HOLE) ||
195 (next->block_start == EXTENT_MAP_INLINE &&
196 prev->block_start == EXTENT_MAP_INLINE) ||
197 (next->block_start == EXTENT_MAP_DELALLOC &&
198 prev->block_start == EXTENT_MAP_DELALLOC) ||
199 (next->block_start < EXTENT_MAP_LAST_BYTE - 1 &&
200 next->block_start == extent_map_block_end(prev)))) {
201 return 1;
202 }
203 return 0;
204}
205
206/**
207 * add_extent_mapping - add new extent map to the extent tree
208 * @tree: tree to insert new map in
209 * @em: map to insert
210 *
211 * Insert @em into @tree or perform a simple forward/backward merge with
212 * existing mappings. The extent_map struct passed in will be inserted
213 * into the tree directly, with an additional reference taken, or a
214 * reference dropped if the merge attempt was sucessfull.
215 */
216int add_extent_mapping(struct extent_map_tree *tree,
217 struct extent_map *em)
218{
219 int ret = 0;
220 struct extent_map *merge = NULL;
221 struct rb_node *rb;
222 struct extent_map *exist;
223
224 exist = lookup_extent_mapping(tree, em->start, em->len);
225 if (exist) {
226 free_extent_map(exist);
227 ret = -EEXIST;
228 goto out;
229 }
230 assert_spin_locked(&tree->lock);
231 rb = tree_insert(&tree->map, em->start, &em->rb_node);
232 if (rb) {
233 ret = -EEXIST;
234 goto out;
235 }
236 atomic_inc(&em->refs);
237 if (em->start != 0) {
238 rb = rb_prev(&em->rb_node);
239 if (rb)
240 merge = rb_entry(rb, struct extent_map, rb_node);
241 if (rb && mergable_maps(merge, em)) {
242 em->start = merge->start;
243 em->len += merge->len;
244 em->block_len += merge->block_len;
245 em->block_start = merge->block_start;
246 merge->in_tree = 0;
247 rb_erase(&merge->rb_node, &tree->map);
248 free_extent_map(merge);
249 }
250 }
251 rb = rb_next(&em->rb_node);
252 if (rb)
253 merge = rb_entry(rb, struct extent_map, rb_node);
254 if (rb && mergable_maps(em, merge)) {
255 em->len += merge->len;
256 em->block_len += merge->len;
257 rb_erase(&merge->rb_node, &tree->map);
258 merge->in_tree = 0;
259 free_extent_map(merge);
260 }
261out:
262 return ret;
263}
264
265/* simple helper to do math around the end of an extent, handling wrap */
266static u64 range_end(u64 start, u64 len)
267{
268 if (start + len < start)
269 return (u64)-1;
270 return start + len;
271}
272
273/**
274 * lookup_extent_mapping - lookup extent_map
275 * @tree: tree to lookup in
276 * @start: byte offset to start the search
277 * @len: length of the lookup range
278 *
279 * Find and return the first extent_map struct in @tree that intersects the
280 * [start, len] range. There may be additional objects in the tree that
281 * intersect, so check the object returned carefully to make sure that no
282 * additional lookups are needed.
283 */
284struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
285 u64 start, u64 len)
286{
287 struct extent_map *em;
288 struct rb_node *rb_node;
289 struct rb_node *prev = NULL;
290 struct rb_node *next = NULL;
291 u64 end = range_end(start, len);
292
293 assert_spin_locked(&tree->lock);
294 rb_node = __tree_search(&tree->map, start, &prev, &next);
295 if (!rb_node && prev) {
296 em = rb_entry(prev, struct extent_map, rb_node);
297 if (end > em->start && start < extent_map_end(em))
298 goto found;
299 }
300 if (!rb_node && next) {
301 em = rb_entry(next, struct extent_map, rb_node);
302 if (end > em->start && start < extent_map_end(em))
303 goto found;
304 }
305 if (!rb_node) {
306 em = NULL;
307 goto out;
308 }
309 if (IS_ERR(rb_node)) {
310 em = ERR_PTR(PTR_ERR(rb_node));
311 goto out;
312 }
313 em = rb_entry(rb_node, struct extent_map, rb_node);
314 if (end > em->start && start < extent_map_end(em))
315 goto found;
316
317 em = NULL;
318 goto out;
319
320found:
321 atomic_inc(&em->refs);
322out:
323 return em;
324}
325
326/**
327 * remove_extent_mapping - removes an extent_map from the extent tree
328 * @tree: extent tree to remove from
329 * @em: extent map beeing removed
330 *
331 * Removes @em from @tree. No reference counts are dropped, and no checks
332 * are done to see if the range is in use
333 */
334int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
335{
336 int ret = 0;
337
338 WARN_ON(test_bit(EXTENT_FLAG_PINNED, &em->flags));
339 assert_spin_locked(&tree->lock);
340 rb_erase(&em->rb_node, &tree->map);
341 em->in_tree = 0;
342 return ret;
343}
This page took 0.024052 seconds and 5 git commands to generate.