Btrfs: early reference counting
[deliverable/linux.git] / fs / btrfs / extent-tree.c
1 #include <stdio.h>
2 #include <stdlib.h>
3 #include "kerncompat.h"
4 #include "radix-tree.h"
5 #include "ctree.h"
6 #include "disk-io.h"
7 #include "print-tree.h"
8
9 /*
10 * pending extents are blocks that we're trying to allocate in the extent
11 * map while trying to grow the map because of other allocations. To avoid
12 * recursing, they are tagged in the radix tree and cleaned up after
13 * other allocations are done. The pending tag is also used in the same
14 * manner for deletes.
15 */
16 #define CTREE_EXTENT_PENDING 0
17
18 static int inc_block_ref(struct ctree_root *root, u64 blocknr)
19 {
20 struct ctree_path path;
21 int ret;
22 struct key key;
23 struct leaf *l;
24 struct extent_item *item;
25 init_path(&path);
26 key.objectid = blocknr;
27 key.flags = 0;
28 key.offset = 1;
29 ret = search_slot(root->extent_root, &key, &path, 0, 1);
30 BUG_ON(ret != 0);
31 l = &path.nodes[0]->leaf;
32 item = (struct extent_item *)(l->data +
33 l->items[path.slots[0]].offset);
34 item->refs++;
35 BUG_ON(list_empty(&path.nodes[0]->dirty));
36 release_path(root->extent_root, &path);
37 return 0;
38 }
39
40 int btrfs_inc_ref(struct ctree_root *root, struct tree_buffer *buf)
41 {
42 u64 blocknr;
43 int i;
44 for (i = 0; i < buf->node.header.nritems; i++) {
45 blocknr = buf->node.blockptrs[i];
46 inc_block_ref(root, blocknr);
47 }
48 return 0;
49 }
50
51 /*
52 * find all the blocks marked as pending in the radix tree and remove
53 * them from the extent map
54 */
55 static int del_pending_extents(struct ctree_root *extent_root)
56 {
57 int ret;
58 struct key key;
59 struct tree_buffer *gang[4];
60 int i;
61 struct ctree_path path;
62
63 while(1) {
64 ret = radix_tree_gang_lookup_tag(&extent_root->cache_radix,
65 (void **)gang, 0,
66 ARRAY_SIZE(gang),
67 CTREE_EXTENT_PENDING);
68 if (!ret)
69 break;
70 for (i = 0; i < ret; i++) {
71 key.objectid = gang[i]->blocknr;
72 key.flags = 0;
73 key.offset = 1;
74 init_path(&path);
75 ret = search_slot(extent_root, &key, &path, -1, 1);
76 if (ret) {
77 print_tree(extent_root, extent_root->node);
78 printf("unable to find %Lu\n", key.objectid);
79 BUG();
80 // FIXME undo it and return sane
81 return ret;
82 }
83 ret = del_item(extent_root, &path);
84 if (ret) {
85 BUG();
86 return ret;
87 }
88 release_path(extent_root, &path);
89 radix_tree_tag_clear(&extent_root->cache_radix,
90 gang[i]->blocknr,
91 CTREE_EXTENT_PENDING);
92 tree_block_release(extent_root, gang[i]);
93 }
94 }
95 return 0;
96 }
97
98 /*
99 * remove an extent from the root, returns 0 on success
100 */
101 int free_extent(struct ctree_root *root, u64 blocknr, u64 num_blocks)
102 {
103 struct ctree_path path;
104 struct key key;
105 struct ctree_root *extent_root = root->extent_root;
106 struct tree_buffer *t;
107 int pending_ret;
108 int ret;
109 key.objectid = blocknr;
110 key.flags = 0;
111 key.offset = num_blocks;
112 if (root == extent_root) {
113 t = read_tree_block(root, key.objectid);
114 radix_tree_tag_set(&root->cache_radix, key.objectid,
115 CTREE_EXTENT_PENDING);
116 return 0;
117 }
118 init_path(&path);
119 ret = search_slot(extent_root, &key, &path, -1, 1);
120 if (ret) {
121 print_tree(extent_root, extent_root->node);
122 printf("failed to find %Lu\n", key.objectid);
123 BUG();
124 }
125 ret = del_item(extent_root, &path);
126 if (ret)
127 BUG();
128 release_path(extent_root, &path);
129 pending_ret = del_pending_extents(root->extent_root);
130 return ret ? ret : pending_ret;
131 }
132
133 /*
134 * walks the btree of allocated extents and find a hole of a given size.
135 * The key ins is changed to record the hole:
136 * ins->objectid == block start
137 * ins->flags = 0
138 * ins->offset == number of blocks
139 * Any available blocks before search_start are skipped.
140 */
141 static int find_free_extent(struct ctree_root *orig_root, u64 num_blocks,
142 u64 search_start, u64 search_end, struct key *ins)
143 {
144 struct ctree_path path;
145 struct key *key;
146 int ret;
147 u64 hole_size = 0;
148 int slot = 0;
149 u64 last_block;
150 int start_found;
151 struct leaf *l;
152 struct ctree_root * root = orig_root->extent_root;
153
154 check_failed:
155 init_path(&path);
156 ins->objectid = search_start;
157 ins->offset = 0;
158 ins->flags = 0;
159 start_found = 0;
160 ret = search_slot(root, ins, &path, 0, 0);
161 if (ret < 0)
162 goto error;
163
164 while (1) {
165 l = &path.nodes[0]->leaf;
166 slot = path.slots[0];
167 if (slot >= l->header.nritems) {
168 ret = next_leaf(root, &path);
169 if (ret == 0)
170 continue;
171 if (ret < 0)
172 goto error;
173 if (!start_found) {
174 ins->objectid = search_start;
175 ins->offset = num_blocks;
176 start_found = 1;
177 goto check_pending;
178 }
179 ins->objectid = last_block > search_start ?
180 last_block : search_start;
181 ins->offset = num_blocks;
182 goto check_pending;
183 }
184 key = &l->items[slot].key;
185 if (key->objectid >= search_start) {
186 if (start_found) {
187 hole_size = key->objectid - last_block;
188 if (hole_size > num_blocks) {
189 ins->objectid = last_block;
190 ins->offset = num_blocks;
191 goto check_pending;
192 }
193 } else
194 start_found = 1;
195 last_block = key->objectid + key->offset;
196 }
197 path.slots[0]++;
198 }
199 // FIXME -ENOSPC
200 check_pending:
201 /* we have to make sure we didn't find an extent that has already
202 * been allocated by the map tree or the original allocation
203 */
204 release_path(root, &path);
205 BUG_ON(ins->objectid < search_start);
206 if (orig_root->extent_root == orig_root) {
207 BUG_ON(num_blocks != 1);
208 if ((root->current_insert.objectid <= ins->objectid &&
209 root->current_insert.objectid +
210 root->current_insert.offset > ins->objectid) ||
211 (root->current_insert.objectid > ins->objectid &&
212 root->current_insert.objectid <= ins->objectid +
213 ins->offset) ||
214 radix_tree_tag_get(&root->cache_radix, ins->objectid,
215 CTREE_EXTENT_PENDING)) {
216 search_start = ins->objectid + 1;
217 goto check_failed;
218 }
219 }
220 if (ins->offset != 1)
221 BUG();
222 return 0;
223 error:
224 release_path(root, &path);
225 return ret;
226 }
227
228 /*
229 * insert all of the pending extents reserved during the original
230 * allocation. (CTREE_EXTENT_PENDING). Returns zero if it all worked out
231 */
232 static int insert_pending_extents(struct ctree_root *extent_root)
233 {
234 int ret;
235 struct key key;
236 struct extent_item item;
237 struct tree_buffer *gang[4];
238 int i;
239
240 // FIXME -ENOSPC
241 item.refs = 1;
242 item.owner = extent_root->node->node.header.parentid;
243 while(1) {
244 ret = radix_tree_gang_lookup_tag(&extent_root->cache_radix,
245 (void **)gang, 0,
246 ARRAY_SIZE(gang),
247 CTREE_EXTENT_PENDING);
248 if (!ret)
249 break;
250 for (i = 0; i < ret; i++) {
251 key.objectid = gang[i]->blocknr;
252 key.flags = 0;
253 key.offset = 1;
254 ret = insert_item(extent_root, &key, &item,
255 sizeof(item));
256 if (ret) {
257 printf("%Lu already in tree\n", key.objectid);
258 print_tree(extent_root, extent_root->node);
259 BUG();
260 // FIXME undo it and return sane
261 return ret;
262 }
263 radix_tree_tag_clear(&extent_root->cache_radix,
264 gang[i]->blocknr,
265 CTREE_EXTENT_PENDING);
266 printf("%Lu is not pending\n", gang[i]->blocknr);
267 tree_block_release(extent_root, gang[i]);
268 }
269 }
270 return 0;
271 }
272
273 /*
274 * finds a free extent and does all the dirty work required for allocation
275 * returns the key for the extent through ins, and a tree buffer for
276 * the first block of the extent through buf.
277 *
278 * returns 0 if everything worked, non-zero otherwise.
279 */
280 int alloc_extent(struct ctree_root *root, u64 num_blocks, u64 search_start,
281 u64 search_end, u64 owner, struct key *ins,
282 struct tree_buffer **buf)
283 {
284 int ret;
285 int pending_ret;
286 struct extent_item extent_item;
287 extent_item.refs = 1;
288 extent_item.owner = owner;
289
290 ret = find_free_extent(root, num_blocks, search_start, search_end, ins);
291 if (ret)
292 return ret;
293 if (root != root->extent_root) {
294 memcpy(&root->extent_root->current_insert, ins, sizeof(*ins));
295 ret = insert_item(root->extent_root, ins, &extent_item,
296 sizeof(extent_item));
297 memset(&root->extent_root->current_insert, 0,
298 sizeof(struct key));
299 pending_ret = insert_pending_extents(root->extent_root);
300 if (ret)
301 return ret;
302 if (pending_ret)
303 return pending_ret;
304 *buf = find_tree_block(root, ins->objectid);
305 dirty_tree_block(root, *buf);
306 return 0;
307 }
308 /* we're allocating an extent for the extent tree, don't recurse */
309 BUG_ON(ins->offset != 1);
310 *buf = find_tree_block(root, ins->objectid);
311 BUG_ON(!*buf);
312 printf("%Lu is pending\n", ins->objectid);
313 radix_tree_tag_set(&root->cache_radix, ins->objectid,
314 CTREE_EXTENT_PENDING);
315 (*buf)->count++;
316 dirty_tree_block(root, *buf);
317 return 0;
318
319 }
320
321 /*
322 * helper function to allocate a block for a given tree
323 * returns the tree buffer or NULL.
324 */
325 struct tree_buffer *alloc_free_block(struct ctree_root *root)
326 {
327 struct key ins;
328 int ret;
329 struct tree_buffer *buf = NULL;
330
331 ret = alloc_extent(root, 1, 0, (unsigned long)-1,
332 root->node->node.header.parentid,
333 &ins, &buf);
334
335 if (ret) {
336 BUG();
337 return NULL;
338 }
339 if (root != root->extent_root)
340 BUG_ON(radix_tree_tag_get(&root->extent_root->cache_radix,
341 buf->blocknr, CTREE_EXTENT_PENDING));
342 return buf;
343 }
This page took 0.117954 seconds and 5 git commands to generate.