Btrfs: Add readpages support
[deliverable/linux.git] / fs / btrfs / extent_map.c
CommitLineData
a52d9a80
CM
1#include <linux/bitops.h>
2#include <linux/slab.h>
3#include <linux/bio.h>
4#include <linux/mm.h>
5#include <linux/gfp.h>
6#include <linux/pagemap.h>
7#include <linux/page-flags.h>
8#include <linux/module.h>
9#include <linux/spinlock.h>
10#include <linux/blkdev.h>
4dc11904 11#include <linux/swap.h>
0a2118df 12#include <linux/version.h>
b293f02e 13#include <linux/writeback.h>
3ab2fb5a 14#include <linux/pagevec.h>
a52d9a80
CM
15#include "extent_map.h"
16
86479a04
CM
17/* temporary define until extent_map moves out of btrfs */
18struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
19 unsigned long extra_flags,
20 void (*ctor)(void *, struct kmem_cache *,
21 unsigned long));
22
a52d9a80
CM
23static struct kmem_cache *extent_map_cache;
24static struct kmem_cache *extent_state_cache;
6d36dcd4 25static struct kmem_cache *extent_buffer_cache;
f510cfec 26
f510cfec
CM
27static LIST_HEAD(buffers);
28static LIST_HEAD(states);
29
f510cfec 30static spinlock_t state_lock = SPIN_LOCK_UNLOCKED;
4dc11904 31#define BUFFER_LRU_MAX 64
a52d9a80
CM
32
33struct tree_entry {
34 u64 start;
35 u64 end;
36 int in_tree;
37 struct rb_node rb_node;
38};
39
b293f02e
CM
40struct extent_page_data {
41 struct bio *bio;
42 struct extent_map_tree *tree;
43 get_extent_t *get_extent;
44};
45
a52d9a80
CM
46void __init extent_map_init(void)
47{
86479a04 48 extent_map_cache = btrfs_cache_create("extent_map",
6d36dcd4 49 sizeof(struct extent_map), 0,
a52d9a80 50 NULL);
86479a04 51 extent_state_cache = btrfs_cache_create("extent_state",
6d36dcd4 52 sizeof(struct extent_state), 0,
a52d9a80 53 NULL);
6d36dcd4
CM
54 extent_buffer_cache = btrfs_cache_create("extent_buffers",
55 sizeof(struct extent_buffer), 0,
56 NULL);
a52d9a80
CM
57}
58
59void __exit extent_map_exit(void)
60{
f510cfec 61 struct extent_state *state;
6d36dcd4 62
f510cfec
CM
63 while (!list_empty(&states)) {
64 state = list_entry(states.next, struct extent_state, list);
65 printk("state leak: start %Lu end %Lu state %lu in tree %d refs %d\n", state->start, state->end, state->state, state->in_tree, atomic_read(&state->refs));
66 list_del(&state->list);
67 kmem_cache_free(extent_state_cache, state);
68
69 }
f510cfec 70
a52d9a80
CM
71 if (extent_map_cache)
72 kmem_cache_destroy(extent_map_cache);
73 if (extent_state_cache)
74 kmem_cache_destroy(extent_state_cache);
6d36dcd4
CM
75 if (extent_buffer_cache)
76 kmem_cache_destroy(extent_buffer_cache);
a52d9a80
CM
77}
78
79void extent_map_tree_init(struct extent_map_tree *tree,
80 struct address_space *mapping, gfp_t mask)
81{
82 tree->map.rb_node = NULL;
83 tree->state.rb_node = NULL;
07157aac 84 tree->ops = NULL;
a52d9a80 85 rwlock_init(&tree->lock);
4dc11904 86 spin_lock_init(&tree->lru_lock);
a52d9a80 87 tree->mapping = mapping;
4dc11904
CM
88 INIT_LIST_HEAD(&tree->buffer_lru);
89 tree->lru_size = 0;
a52d9a80
CM
90}
91EXPORT_SYMBOL(extent_map_tree_init);
92
19c00ddc 93void extent_map_tree_empty_lru(struct extent_map_tree *tree)
4dc11904
CM
94{
95 struct extent_buffer *eb;
96 while(!list_empty(&tree->buffer_lru)) {
97 eb = list_entry(tree->buffer_lru.next, struct extent_buffer,
98 lru);
99 list_del(&eb->lru);
100 free_extent_buffer(eb);
101 }
102}
19c00ddc 103EXPORT_SYMBOL(extent_map_tree_empty_lru);
4dc11904 104
a52d9a80
CM
105struct extent_map *alloc_extent_map(gfp_t mask)
106{
107 struct extent_map *em;
108 em = kmem_cache_alloc(extent_map_cache, mask);
109 if (!em || IS_ERR(em))
110 return em;
111 em->in_tree = 0;
112 atomic_set(&em->refs, 1);
113 return em;
114}
115EXPORT_SYMBOL(alloc_extent_map);
116
117void free_extent_map(struct extent_map *em)
118{
2bf5a725
CM
119 if (!em)
120 return;
a52d9a80
CM
121 if (atomic_dec_and_test(&em->refs)) {
122 WARN_ON(em->in_tree);
123 kmem_cache_free(extent_map_cache, em);
124 }
125}
126EXPORT_SYMBOL(free_extent_map);
127
128
129struct extent_state *alloc_extent_state(gfp_t mask)
130{
131 struct extent_state *state;
f510cfec
CM
132 unsigned long flags;
133
a52d9a80
CM
134 state = kmem_cache_alloc(extent_state_cache, mask);
135 if (!state || IS_ERR(state))
136 return state;
137 state->state = 0;
138 state->in_tree = 0;
07157aac 139 state->private = 0;
f510cfec
CM
140
141 spin_lock_irqsave(&state_lock, flags);
142 list_add(&state->list, &states);
143 spin_unlock_irqrestore(&state_lock, flags);
144
a52d9a80
CM
145 atomic_set(&state->refs, 1);
146 init_waitqueue_head(&state->wq);
a52d9a80
CM
147 return state;
148}
149EXPORT_SYMBOL(alloc_extent_state);
150
151void free_extent_state(struct extent_state *state)
152{
f510cfec 153 unsigned long flags;
2bf5a725
CM
154 if (!state)
155 return;
a52d9a80
CM
156 if (atomic_dec_and_test(&state->refs)) {
157 WARN_ON(state->in_tree);
f510cfec
CM
158 spin_lock_irqsave(&state_lock, flags);
159 list_del(&state->list);
160 spin_unlock_irqrestore(&state_lock, flags);
a52d9a80
CM
161 kmem_cache_free(extent_state_cache, state);
162 }
163}
164EXPORT_SYMBOL(free_extent_state);
165
166static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
167 struct rb_node *node)
168{
169 struct rb_node ** p = &root->rb_node;
170 struct rb_node * parent = NULL;
171 struct tree_entry *entry;
172
173 while(*p) {
174 parent = *p;
175 entry = rb_entry(parent, struct tree_entry, rb_node);
176
177 if (offset < entry->start)
178 p = &(*p)->rb_left;
179 else if (offset > entry->end)
180 p = &(*p)->rb_right;
181 else
182 return parent;
183 }
184
185 entry = rb_entry(node, struct tree_entry, rb_node);
186 entry->in_tree = 1;
187 rb_link_node(node, parent, p);
188 rb_insert_color(node, root);
189 return NULL;
190}
191
192static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
193 struct rb_node **prev_ret)
194{
195 struct rb_node * n = root->rb_node;
196 struct rb_node *prev = NULL;
197 struct tree_entry *entry;
198 struct tree_entry *prev_entry = NULL;
199
200 while(n) {
201 entry = rb_entry(n, struct tree_entry, rb_node);
202 prev = n;
203 prev_entry = entry;
204
205 if (offset < entry->start)
206 n = n->rb_left;
207 else if (offset > entry->end)
208 n = n->rb_right;
209 else
210 return n;
211 }
212 if (!prev_ret)
213 return NULL;
214 while(prev && offset > prev_entry->end) {
215 prev = rb_next(prev);
216 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
217 }
218 *prev_ret = prev;
219 return NULL;
220}
221
222static inline struct rb_node *tree_search(struct rb_root *root, u64 offset)
223{
224 struct rb_node *prev;
225 struct rb_node *ret;
226 ret = __tree_search(root, offset, &prev);
227 if (!ret)
228 return prev;
229 return ret;
230}
231
232static int tree_delete(struct rb_root *root, u64 offset)
233{
234 struct rb_node *node;
235 struct tree_entry *entry;
236
237 node = __tree_search(root, offset, NULL);
238 if (!node)
239 return -ENOENT;
240 entry = rb_entry(node, struct tree_entry, rb_node);
241 entry->in_tree = 0;
242 rb_erase(node, root);
243 return 0;
244}
245
246/*
247 * add_extent_mapping tries a simple backward merge with existing
248 * mappings. The extent_map struct passed in will be inserted into
249 * the tree directly (no copies made, just a reference taken).
250 */
251int add_extent_mapping(struct extent_map_tree *tree,
252 struct extent_map *em)
253{
254 int ret = 0;
255 struct extent_map *prev = NULL;
256 struct rb_node *rb;
257
258 write_lock_irq(&tree->lock);
259 rb = tree_insert(&tree->map, em->end, &em->rb_node);
260 if (rb) {
261 prev = rb_entry(rb, struct extent_map, rb_node);
262 printk("found extent map %Lu %Lu on insert of %Lu %Lu\n", prev->start, prev->end, em->start, em->end);
263 ret = -EEXIST;
264 goto out;
265 }
266 atomic_inc(&em->refs);
267 if (em->start != 0) {
268 rb = rb_prev(&em->rb_node);
269 if (rb)
270 prev = rb_entry(rb, struct extent_map, rb_node);
271 if (prev && prev->end + 1 == em->start &&
5f39d397
CM
272 ((em->block_start == EXTENT_MAP_HOLE &&
273 prev->block_start == EXTENT_MAP_HOLE) ||
179e29e4
CM
274 (em->block_start == EXTENT_MAP_INLINE &&
275 prev->block_start == EXTENT_MAP_INLINE) ||
276 (em->block_start == EXTENT_MAP_DELALLOC &&
277 prev->block_start == EXTENT_MAP_DELALLOC) ||
278 (em->block_start < EXTENT_MAP_DELALLOC - 1 &&
279 em->block_start == prev->block_end + 1))) {
a52d9a80
CM
280 em->start = prev->start;
281 em->block_start = prev->block_start;
282 rb_erase(&prev->rb_node, &tree->map);
283 prev->in_tree = 0;
284 free_extent_map(prev);
285 }
286 }
287out:
288 write_unlock_irq(&tree->lock);
289 return ret;
290}
291EXPORT_SYMBOL(add_extent_mapping);
292
293/*
294 * lookup_extent_mapping returns the first extent_map struct in the
295 * tree that intersects the [start, end] (inclusive) range. There may
296 * be additional objects in the tree that intersect, so check the object
297 * returned carefully to make sure you don't need additional lookups.
298 */
299struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
300 u64 start, u64 end)
301{
302 struct extent_map *em;
303 struct rb_node *rb_node;
304
305 read_lock_irq(&tree->lock);
306 rb_node = tree_search(&tree->map, start);
307 if (!rb_node) {
308 em = NULL;
309 goto out;
310 }
311 if (IS_ERR(rb_node)) {
312 em = ERR_PTR(PTR_ERR(rb_node));
313 goto out;
314 }
315 em = rb_entry(rb_node, struct extent_map, rb_node);
316 if (em->end < start || em->start > end) {
317 em = NULL;
318 goto out;
319 }
320 atomic_inc(&em->refs);
321out:
322 read_unlock_irq(&tree->lock);
323 return em;
324}
325EXPORT_SYMBOL(lookup_extent_mapping);
326
327/*
328 * removes an extent_map struct from the tree. No reference counts are
329 * dropped, and no checks are done to see if the range is in use
330 */
331int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
332{
333 int ret;
334
335 write_lock_irq(&tree->lock);
336 ret = tree_delete(&tree->map, em->end);
337 write_unlock_irq(&tree->lock);
338 return ret;
339}
340EXPORT_SYMBOL(remove_extent_mapping);
341
342/*
343 * utility function to look for merge candidates inside a given range.
344 * Any extents with matching state are merged together into a single
345 * extent in the tree. Extents with EXTENT_IO in their state field
346 * are not merged because the end_io handlers need to be able to do
347 * operations on them without sleeping (or doing allocations/splits).
348 *
349 * This should be called with the tree lock held.
350 */
351static int merge_state(struct extent_map_tree *tree,
352 struct extent_state *state)
353{
354 struct extent_state *other;
355 struct rb_node *other_node;
356
357 if (state->state & EXTENT_IOBITS)
358 return 0;
359
360 other_node = rb_prev(&state->rb_node);
361 if (other_node) {
362 other = rb_entry(other_node, struct extent_state, rb_node);
363 if (other->end == state->start - 1 &&
364 other->state == state->state) {
365 state->start = other->start;
366 other->in_tree = 0;
367 rb_erase(&other->rb_node, &tree->state);
368 free_extent_state(other);
369 }
370 }
371 other_node = rb_next(&state->rb_node);
372 if (other_node) {
373 other = rb_entry(other_node, struct extent_state, rb_node);
374 if (other->start == state->end + 1 &&
375 other->state == state->state) {
376 other->start = state->start;
377 state->in_tree = 0;
378 rb_erase(&state->rb_node, &tree->state);
379 free_extent_state(state);
380 }
381 }
382 return 0;
383}
384
385/*
386 * insert an extent_state struct into the tree. 'bits' are set on the
387 * struct before it is inserted.
388 *
389 * This may return -EEXIST if the extent is already there, in which case the
390 * state struct is freed.
391 *
392 * The tree lock is not taken internally. This is a utility function and
393 * probably isn't what you want to call (see set/clear_extent_bit).
394 */
395static int insert_state(struct extent_map_tree *tree,
396 struct extent_state *state, u64 start, u64 end,
397 int bits)
398{
399 struct rb_node *node;
400
401 if (end < start) {
402 printk("end < start %Lu %Lu\n", end, start);
403 WARN_ON(1);
404 }
405 state->state |= bits;
406 state->start = start;
407 state->end = end;
a52d9a80
CM
408 node = tree_insert(&tree->state, end, &state->rb_node);
409 if (node) {
410 struct extent_state *found;
411 found = rb_entry(node, struct extent_state, rb_node);
b888db2b 412 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, start, end);
a52d9a80
CM
413 free_extent_state(state);
414 return -EEXIST;
415 }
416 merge_state(tree, state);
417 return 0;
418}
419
420/*
421 * split a given extent state struct in two, inserting the preallocated
422 * struct 'prealloc' as the newly created second half. 'split' indicates an
423 * offset inside 'orig' where it should be split.
424 *
425 * Before calling,
426 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
427 * are two extent state structs in the tree:
428 * prealloc: [orig->start, split - 1]
429 * orig: [ split, orig->end ]
430 *
431 * The tree locks are not taken by this function. They need to be held
432 * by the caller.
433 */
434static int split_state(struct extent_map_tree *tree, struct extent_state *orig,
435 struct extent_state *prealloc, u64 split)
436{
437 struct rb_node *node;
438 prealloc->start = orig->start;
439 prealloc->end = split - 1;
440 prealloc->state = orig->state;
441 orig->start = split;
f510cfec 442
a52d9a80
CM
443 node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
444 if (node) {
445 struct extent_state *found;
446 found = rb_entry(node, struct extent_state, rb_node);
b888db2b 447 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, prealloc->start, prealloc->end);
a52d9a80
CM
448 free_extent_state(prealloc);
449 return -EEXIST;
450 }
451 return 0;
452}
453
454/*
455 * utility function to clear some bits in an extent state struct.
456 * it will optionally wake up any one waiting on this state (wake == 1), or
457 * forcibly remove the state from the tree (delete == 1).
458 *
459 * If no bits are set on the state struct after clearing things, the
460 * struct is freed and removed from the tree
461 */
462static int clear_state_bit(struct extent_map_tree *tree,
463 struct extent_state *state, int bits, int wake,
464 int delete)
465{
466 int ret = state->state & bits;
467 state->state &= ~bits;
468 if (wake)
469 wake_up(&state->wq);
470 if (delete || state->state == 0) {
471 if (state->in_tree) {
472 rb_erase(&state->rb_node, &tree->state);
473 state->in_tree = 0;
474 free_extent_state(state);
475 } else {
476 WARN_ON(1);
477 }
478 } else {
479 merge_state(tree, state);
480 }
481 return ret;
482}
483
484/*
485 * clear some bits on a range in the tree. This may require splitting
486 * or inserting elements in the tree, so the gfp mask is used to
487 * indicate which allocations or sleeping are allowed.
488 *
489 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
490 * the given range from the tree regardless of state (ie for truncate).
491 *
492 * the range [start, end] is inclusive.
493 *
494 * This takes the tree lock, and returns < 0 on error, > 0 if any of the
495 * bits were already set, or zero if none of the bits were already set.
496 */
497int clear_extent_bit(struct extent_map_tree *tree, u64 start, u64 end,
498 int bits, int wake, int delete, gfp_t mask)
499{
500 struct extent_state *state;
501 struct extent_state *prealloc = NULL;
502 struct rb_node *node;
90f1c19a 503 unsigned long flags;
a52d9a80
CM
504 int err;
505 int set = 0;
506
507again:
508 if (!prealloc && (mask & __GFP_WAIT)) {
509 prealloc = alloc_extent_state(mask);
510 if (!prealloc)
511 return -ENOMEM;
512 }
513
90f1c19a 514 write_lock_irqsave(&tree->lock, flags);
a52d9a80
CM
515 /*
516 * this search will find the extents that end after
517 * our range starts
518 */
519 node = tree_search(&tree->state, start);
520 if (!node)
521 goto out;
522 state = rb_entry(node, struct extent_state, rb_node);
523 if (state->start > end)
524 goto out;
525 WARN_ON(state->end < start);
526
527 /*
528 * | ---- desired range ---- |
529 * | state | or
530 * | ------------- state -------------- |
531 *
532 * We need to split the extent we found, and may flip
533 * bits on second half.
534 *
535 * If the extent we found extends past our range, we
536 * just split and search again. It'll get split again
537 * the next time though.
538 *
539 * If the extent we found is inside our range, we clear
540 * the desired bit on it.
541 */
542
543 if (state->start < start) {
544 err = split_state(tree, state, prealloc, start);
545 BUG_ON(err == -EEXIST);
546 prealloc = NULL;
547 if (err)
548 goto out;
549 if (state->end <= end) {
550 start = state->end + 1;
551 set |= clear_state_bit(tree, state, bits,
552 wake, delete);
553 } else {
554 start = state->start;
555 }
556 goto search_again;
557 }
558 /*
559 * | ---- desired range ---- |
560 * | state |
561 * We need to split the extent, and clear the bit
562 * on the first half
563 */
564 if (state->start <= end && state->end > end) {
565 err = split_state(tree, state, prealloc, end + 1);
566 BUG_ON(err == -EEXIST);
567
568 if (wake)
569 wake_up(&state->wq);
570 set |= clear_state_bit(tree, prealloc, bits,
571 wake, delete);
572 prealloc = NULL;
573 goto out;
574 }
575
576 start = state->end + 1;
577 set |= clear_state_bit(tree, state, bits, wake, delete);
578 goto search_again;
579
580out:
90f1c19a 581 write_unlock_irqrestore(&tree->lock, flags);
a52d9a80
CM
582 if (prealloc)
583 free_extent_state(prealloc);
584
585 return set;
586
587search_again:
96b5179d 588 if (start > end)
a52d9a80 589 goto out;
90f1c19a 590 write_unlock_irqrestore(&tree->lock, flags);
a52d9a80
CM
591 if (mask & __GFP_WAIT)
592 cond_resched();
593 goto again;
594}
595EXPORT_SYMBOL(clear_extent_bit);
596
597static int wait_on_state(struct extent_map_tree *tree,
598 struct extent_state *state)
599{
600 DEFINE_WAIT(wait);
601 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
602 read_unlock_irq(&tree->lock);
603 schedule();
604 read_lock_irq(&tree->lock);
605 finish_wait(&state->wq, &wait);
606 return 0;
607}
608
609/*
610 * waits for one or more bits to clear on a range in the state tree.
611 * The range [start, end] is inclusive.
612 * The tree lock is taken by this function
613 */
614int wait_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits)
615{
616 struct extent_state *state;
617 struct rb_node *node;
618
619 read_lock_irq(&tree->lock);
620again:
621 while (1) {
622 /*
623 * this search will find all the extents that end after
624 * our range starts
625 */
626 node = tree_search(&tree->state, start);
627 if (!node)
628 break;
629
630 state = rb_entry(node, struct extent_state, rb_node);
631
632 if (state->start > end)
633 goto out;
634
635 if (state->state & bits) {
636 start = state->start;
637 atomic_inc(&state->refs);
638 wait_on_state(tree, state);
639 free_extent_state(state);
640 goto again;
641 }
642 start = state->end + 1;
643
644 if (start > end)
645 break;
646
647 if (need_resched()) {
648 read_unlock_irq(&tree->lock);
649 cond_resched();
650 read_lock_irq(&tree->lock);
651 }
652 }
653out:
654 read_unlock_irq(&tree->lock);
655 return 0;
656}
657EXPORT_SYMBOL(wait_extent_bit);
658
659/*
660 * set some bits on a range in the tree. This may require allocations
661 * or sleeping, so the gfp mask is used to indicate what is allowed.
662 *
663 * If 'exclusive' == 1, this will fail with -EEXIST if some part of the
664 * range already has the desired bits set. The start of the existing
665 * range is returned in failed_start in this case.
666 *
667 * [start, end] is inclusive
668 * This takes the tree lock.
669 */
670int set_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits,
671 int exclusive, u64 *failed_start, gfp_t mask)
672{
673 struct extent_state *state;
674 struct extent_state *prealloc = NULL;
675 struct rb_node *node;
90f1c19a 676 unsigned long flags;
a52d9a80
CM
677 int err = 0;
678 int set;
679 u64 last_start;
680 u64 last_end;
681again:
682 if (!prealloc && (mask & __GFP_WAIT)) {
683 prealloc = alloc_extent_state(mask);
684 if (!prealloc)
685 return -ENOMEM;
686 }
687
90f1c19a 688 write_lock_irqsave(&tree->lock, flags);
a52d9a80
CM
689 /*
690 * this search will find all the extents that end after
691 * our range starts.
692 */
693 node = tree_search(&tree->state, start);
694 if (!node) {
695 err = insert_state(tree, prealloc, start, end, bits);
696 prealloc = NULL;
697 BUG_ON(err == -EEXIST);
698 goto out;
699 }
700
701 state = rb_entry(node, struct extent_state, rb_node);
702 last_start = state->start;
703 last_end = state->end;
704
705 /*
706 * | ---- desired range ---- |
707 * | state |
708 *
709 * Just lock what we found and keep going
710 */
711 if (state->start == start && state->end <= end) {
712 set = state->state & bits;
713 if (set && exclusive) {
714 *failed_start = state->start;
715 err = -EEXIST;
716 goto out;
717 }
718 state->state |= bits;
719 start = state->end + 1;
720 merge_state(tree, state);
721 goto search_again;
722 }
723
724 /*
725 * | ---- desired range ---- |
726 * | state |
727 * or
728 * | ------------- state -------------- |
729 *
730 * We need to split the extent we found, and may flip bits on
731 * second half.
732 *
733 * If the extent we found extends past our
734 * range, we just split and search again. It'll get split
735 * again the next time though.
736 *
737 * If the extent we found is inside our range, we set the
738 * desired bit on it.
739 */
740 if (state->start < start) {
741 set = state->state & bits;
742 if (exclusive && set) {
743 *failed_start = start;
744 err = -EEXIST;
745 goto out;
746 }
747 err = split_state(tree, state, prealloc, start);
748 BUG_ON(err == -EEXIST);
749 prealloc = NULL;
750 if (err)
751 goto out;
752 if (state->end <= end) {
753 state->state |= bits;
754 start = state->end + 1;
755 merge_state(tree, state);
756 } else {
757 start = state->start;
758 }
759 goto search_again;
760 }
a52d9a80
CM
761 /*
762 * | ---- desired range ---- |
763 * | state | or | state |
764 *
765 * There's a hole, we need to insert something in it and
766 * ignore the extent we found.
767 */
768 if (state->start > start) {
769 u64 this_end;
770 if (end < last_start)
771 this_end = end;
772 else
773 this_end = last_start -1;
774 err = insert_state(tree, prealloc, start, this_end,
775 bits);
776 prealloc = NULL;
777 BUG_ON(err == -EEXIST);
778 if (err)
779 goto out;
780 start = this_end + 1;
781 goto search_again;
782 }
a8c450b2
CM
783 /*
784 * | ---- desired range ---- |
785 * | state |
786 * We need to split the extent, and set the bit
787 * on the first half
788 */
789 if (state->start <= end && state->end > end) {
790 set = state->state & bits;
791 if (exclusive && set) {
792 *failed_start = start;
793 err = -EEXIST;
794 goto out;
795 }
796 err = split_state(tree, state, prealloc, end + 1);
797 BUG_ON(err == -EEXIST);
798
799 prealloc->state |= bits;
800 merge_state(tree, prealloc);
801 prealloc = NULL;
802 goto out;
803 }
804
a52d9a80
CM
805 goto search_again;
806
807out:
90f1c19a 808 write_unlock_irqrestore(&tree->lock, flags);
a52d9a80
CM
809 if (prealloc)
810 free_extent_state(prealloc);
811
812 return err;
813
814search_again:
815 if (start > end)
816 goto out;
90f1c19a 817 write_unlock_irqrestore(&tree->lock, flags);
a52d9a80
CM
818 if (mask & __GFP_WAIT)
819 cond_resched();
820 goto again;
821}
822EXPORT_SYMBOL(set_extent_bit);
823
824/* wrappers around set/clear extent bit */
825int set_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end,
826 gfp_t mask)
827{
828 return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
829 mask);
830}
831EXPORT_SYMBOL(set_extent_dirty);
832
96b5179d
CM
833int set_extent_bits(struct extent_map_tree *tree, u64 start, u64 end,
834 int bits, gfp_t mask)
835{
836 return set_extent_bit(tree, start, end, bits, 0, NULL,
837 mask);
838}
839EXPORT_SYMBOL(set_extent_bits);
840
841int clear_extent_bits(struct extent_map_tree *tree, u64 start, u64 end,
842 int bits, gfp_t mask)
843{
844 return clear_extent_bit(tree, start, end, bits, 0, 0, mask);
845}
846EXPORT_SYMBOL(clear_extent_bits);
847
b888db2b
CM
848int set_extent_delalloc(struct extent_map_tree *tree, u64 start, u64 end,
849 gfp_t mask)
850{
851 return set_extent_bit(tree, start, end,
852 EXTENT_DELALLOC | EXTENT_DIRTY, 0, NULL,
853 mask);
854}
855EXPORT_SYMBOL(set_extent_delalloc);
856
a52d9a80
CM
857int clear_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end,
858 gfp_t mask)
859{
b888db2b
CM
860 return clear_extent_bit(tree, start, end,
861 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, mask);
a52d9a80
CM
862}
863EXPORT_SYMBOL(clear_extent_dirty);
864
865int set_extent_new(struct extent_map_tree *tree, u64 start, u64 end,
866 gfp_t mask)
867{
868 return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
869 mask);
870}
871EXPORT_SYMBOL(set_extent_new);
872
873int clear_extent_new(struct extent_map_tree *tree, u64 start, u64 end,
874 gfp_t mask)
875{
876 return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, mask);
877}
878EXPORT_SYMBOL(clear_extent_new);
879
880int set_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end,
881 gfp_t mask)
882{
883 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
884 mask);
885}
886EXPORT_SYMBOL(set_extent_uptodate);
887
888int clear_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end,
889 gfp_t mask)
890{
891 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask);
892}
893EXPORT_SYMBOL(clear_extent_uptodate);
894
895int set_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end,
896 gfp_t mask)
897{
898 return set_extent_bit(tree, start, end, EXTENT_WRITEBACK,
899 0, NULL, mask);
900}
901EXPORT_SYMBOL(set_extent_writeback);
902
903int clear_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end,
904 gfp_t mask)
905{
906 return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask);
907}
908EXPORT_SYMBOL(clear_extent_writeback);
909
910int wait_on_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end)
911{
912 return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
913}
914EXPORT_SYMBOL(wait_on_extent_writeback);
915
916/*
917 * locks a range in ascending order, waiting for any locked regions
918 * it hits on the way. [start,end] are inclusive, and this will sleep.
919 */
920int lock_extent(struct extent_map_tree *tree, u64 start, u64 end, gfp_t mask)
921{
922 int err;
923 u64 failed_start;
924 while (1) {
925 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
926 &failed_start, mask);
927 if (err == -EEXIST && (mask & __GFP_WAIT)) {
928 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
929 start = failed_start;
930 } else {
931 break;
932 }
933 WARN_ON(start > end);
934 }
935 return err;
936}
937EXPORT_SYMBOL(lock_extent);
938
939int unlock_extent(struct extent_map_tree *tree, u64 start, u64 end,
940 gfp_t mask)
941{
942 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, mask);
943}
944EXPORT_SYMBOL(unlock_extent);
945
946/*
947 * helper function to set pages and extents in the tree dirty
948 */
949int set_range_dirty(struct extent_map_tree *tree, u64 start, u64 end)
950{
951 unsigned long index = start >> PAGE_CACHE_SHIFT;
952 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
953 struct page *page;
954
955 while (index <= end_index) {
956 page = find_get_page(tree->mapping, index);
957 BUG_ON(!page);
958 __set_page_dirty_nobuffers(page);
959 page_cache_release(page);
960 index++;
961 }
962 set_extent_dirty(tree, start, end, GFP_NOFS);
963 return 0;
964}
965EXPORT_SYMBOL(set_range_dirty);
966
967/*
968 * helper function to set both pages and extents in the tree writeback
969 */
970int set_range_writeback(struct extent_map_tree *tree, u64 start, u64 end)
971{
972 unsigned long index = start >> PAGE_CACHE_SHIFT;
973 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
974 struct page *page;
975
976 while (index <= end_index) {
977 page = find_get_page(tree->mapping, index);
978 BUG_ON(!page);
979 set_page_writeback(page);
980 page_cache_release(page);
981 index++;
982 }
983 set_extent_writeback(tree, start, end, GFP_NOFS);
984 return 0;
985}
986EXPORT_SYMBOL(set_range_writeback);
987
5f39d397
CM
988int find_first_extent_bit(struct extent_map_tree *tree, u64 start,
989 u64 *start_ret, u64 *end_ret, int bits)
990{
991 struct rb_node *node;
992 struct extent_state *state;
993 int ret = 1;
994
e19caa5f 995 read_lock_irq(&tree->lock);
5f39d397
CM
996 /*
997 * this search will find all the extents that end after
998 * our range starts.
999 */
1000 node = tree_search(&tree->state, start);
1001 if (!node || IS_ERR(node)) {
1002 goto out;
1003 }
1004
1005 while(1) {
1006 state = rb_entry(node, struct extent_state, rb_node);
e19caa5f 1007 if (state->end >= start && (state->state & bits)) {
5f39d397
CM
1008 *start_ret = state->start;
1009 *end_ret = state->end;
1010 ret = 0;
f510cfec 1011 break;
5f39d397
CM
1012 }
1013 node = rb_next(node);
1014 if (!node)
1015 break;
1016 }
1017out:
e19caa5f 1018 read_unlock_irq(&tree->lock);
5f39d397
CM
1019 return ret;
1020}
1021EXPORT_SYMBOL(find_first_extent_bit);
1022
b888db2b
CM
1023u64 find_lock_delalloc_range(struct extent_map_tree *tree,
1024 u64 start, u64 lock_start, u64 *end, u64 max_bytes)
1025{
1026 struct rb_node *node;
1027 struct extent_state *state;
1028 u64 cur_start = start;
1029 u64 found = 0;
1030 u64 total_bytes = 0;
1031
1032 write_lock_irq(&tree->lock);
1033 /*
1034 * this search will find all the extents that end after
1035 * our range starts.
1036 */
1037search_again:
1038 node = tree_search(&tree->state, cur_start);
1039 if (!node || IS_ERR(node)) {
1040 goto out;
1041 }
1042
1043 while(1) {
1044 state = rb_entry(node, struct extent_state, rb_node);
1045 if (state->start != cur_start) {
1046 goto out;
1047 }
1048 if (!(state->state & EXTENT_DELALLOC)) {
1049 goto out;
1050 }
1051 if (state->start >= lock_start) {
1052 if (state->state & EXTENT_LOCKED) {
1053 DEFINE_WAIT(wait);
1054 atomic_inc(&state->refs);
944746ec
Y
1055 prepare_to_wait(&state->wq, &wait,
1056 TASK_UNINTERRUPTIBLE);
b888db2b
CM
1057 write_unlock_irq(&tree->lock);
1058 schedule();
1059 write_lock_irq(&tree->lock);
1060 finish_wait(&state->wq, &wait);
1061 free_extent_state(state);
1062 goto search_again;
1063 }
1064 state->state |= EXTENT_LOCKED;
1065 }
1066 found++;
1067 *end = state->end;
1068 cur_start = state->end + 1;
1069 node = rb_next(node);
1070 if (!node)
1071 break;
944746ec 1072 total_bytes += state->end - state->start + 1;
b888db2b
CM
1073 if (total_bytes >= max_bytes)
1074 break;
1075 }
1076out:
1077 write_unlock_irq(&tree->lock);
1078 return found;
1079}
1080
a52d9a80
CM
1081/*
1082 * helper function to lock both pages and extents in the tree.
1083 * pages must be locked first.
1084 */
1085int lock_range(struct extent_map_tree *tree, u64 start, u64 end)
1086{
1087 unsigned long index = start >> PAGE_CACHE_SHIFT;
1088 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1089 struct page *page;
1090 int err;
1091
1092 while (index <= end_index) {
1093 page = grab_cache_page(tree->mapping, index);
1094 if (!page) {
1095 err = -ENOMEM;
1096 goto failed;
1097 }
1098 if (IS_ERR(page)) {
1099 err = PTR_ERR(page);
1100 goto failed;
1101 }
1102 index++;
1103 }
1104 lock_extent(tree, start, end, GFP_NOFS);
1105 return 0;
1106
1107failed:
1108 /*
1109 * we failed above in getting the page at 'index', so we undo here
1110 * up to but not including the page at 'index'
1111 */
1112 end_index = index;
1113 index = start >> PAGE_CACHE_SHIFT;
1114 while (index < end_index) {
1115 page = find_get_page(tree->mapping, index);
1116 unlock_page(page);
1117 page_cache_release(page);
1118 index++;
1119 }
1120 return err;
1121}
1122EXPORT_SYMBOL(lock_range);
1123
1124/*
1125 * helper function to unlock both pages and extents in the tree.
1126 */
1127int unlock_range(struct extent_map_tree *tree, u64 start, u64 end)
1128{
1129 unsigned long index = start >> PAGE_CACHE_SHIFT;
1130 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1131 struct page *page;
1132
1133 while (index <= end_index) {
1134 page = find_get_page(tree->mapping, index);
1135 unlock_page(page);
1136 page_cache_release(page);
1137 index++;
1138 }
1139 unlock_extent(tree, start, end, GFP_NOFS);
1140 return 0;
1141}
1142EXPORT_SYMBOL(unlock_range);
1143
07157aac
CM
1144int set_state_private(struct extent_map_tree *tree, u64 start, u64 private)
1145{
1146 struct rb_node *node;
1147 struct extent_state *state;
1148 int ret = 0;
1149
1150 write_lock_irq(&tree->lock);
1151 /*
1152 * this search will find all the extents that end after
1153 * our range starts.
1154 */
1155 node = tree_search(&tree->state, start);
1156 if (!node || IS_ERR(node)) {
1157 ret = -ENOENT;
1158 goto out;
1159 }
1160 state = rb_entry(node, struct extent_state, rb_node);
1161 if (state->start != start) {
1162 ret = -ENOENT;
1163 goto out;
1164 }
1165 state->private = private;
1166out:
1167 write_unlock_irq(&tree->lock);
1168 return ret;
07157aac
CM
1169}
1170
1171int get_state_private(struct extent_map_tree *tree, u64 start, u64 *private)
1172{
1173 struct rb_node *node;
1174 struct extent_state *state;
1175 int ret = 0;
1176
1177 read_lock_irq(&tree->lock);
1178 /*
1179 * this search will find all the extents that end after
1180 * our range starts.
1181 */
1182 node = tree_search(&tree->state, start);
1183 if (!node || IS_ERR(node)) {
1184 ret = -ENOENT;
1185 goto out;
1186 }
1187 state = rb_entry(node, struct extent_state, rb_node);
1188 if (state->start != start) {
1189 ret = -ENOENT;
1190 goto out;
1191 }
1192 *private = state->private;
1193out:
1194 read_unlock_irq(&tree->lock);
1195 return ret;
1196}
1197
a52d9a80
CM
1198/*
1199 * searches a range in the state tree for a given mask.
1200 * If 'filled' == 1, this returns 1 only if ever extent in the tree
1201 * has the bits set. Otherwise, 1 is returned if any bit in the
1202 * range is found set.
1203 */
1a5bc167
CM
1204int test_range_bit(struct extent_map_tree *tree, u64 start, u64 end,
1205 int bits, int filled)
a52d9a80
CM
1206{
1207 struct extent_state *state = NULL;
1208 struct rb_node *node;
1209 int bitset = 0;
1210
1211 read_lock_irq(&tree->lock);
1212 node = tree_search(&tree->state, start);
1213 while (node && start <= end) {
1214 state = rb_entry(node, struct extent_state, rb_node);
1215 if (state->start > end)
1216 break;
1217
1218 if (filled && state->start > start) {
1219 bitset = 0;
1220 break;
1221 }
1222 if (state->state & bits) {
1223 bitset = 1;
1224 if (!filled)
1225 break;
1226 } else if (filled) {
1227 bitset = 0;
1228 break;
1229 }
1230 start = state->end + 1;
1231 if (start > end)
1232 break;
1233 node = rb_next(node);
1234 }
1235 read_unlock_irq(&tree->lock);
1236 return bitset;
1237}
1a5bc167 1238EXPORT_SYMBOL(test_range_bit);
a52d9a80
CM
1239
1240/*
1241 * helper function to set a given page up to date if all the
1242 * extents in the tree for that page are up to date
1243 */
1244static int check_page_uptodate(struct extent_map_tree *tree,
1245 struct page *page)
1246{
35ebb934 1247 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
a52d9a80
CM
1248 u64 end = start + PAGE_CACHE_SIZE - 1;
1249 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1))
1250 SetPageUptodate(page);
1251 return 0;
1252}
1253
1254/*
1255 * helper function to unlock a page if all the extents in the tree
1256 * for that page are unlocked
1257 */
1258static int check_page_locked(struct extent_map_tree *tree,
1259 struct page *page)
1260{
35ebb934 1261 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
a52d9a80
CM
1262 u64 end = start + PAGE_CACHE_SIZE - 1;
1263 if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0))
1264 unlock_page(page);
1265 return 0;
1266}
1267
1268/*
1269 * helper function to end page writeback if all the extents
1270 * in the tree for that page are done with writeback
1271 */
1272static int check_page_writeback(struct extent_map_tree *tree,
1273 struct page *page)
1274{
35ebb934 1275 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
a52d9a80
CM
1276 u64 end = start + PAGE_CACHE_SIZE - 1;
1277 if (!test_range_bit(tree, start, end, EXTENT_WRITEBACK, 0))
1278 end_page_writeback(page);
1279 return 0;
1280}
1281
1282/* lots and lots of room for performance fixes in the end_bio funcs */
1283
1284/*
1285 * after a writepage IO is done, we need to:
1286 * clear the uptodate bits on error
1287 * clear the writeback bits in the extent tree for this IO
1288 * end_page_writeback if the page has no more pending IO
1289 *
1290 * Scheduling is not allowed, so the extent state tree is expected
1291 * to have one and only one object corresponding to this IO.
1292 */
0a2118df
JA
1293#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1294static void end_bio_extent_writepage(struct bio *bio, int err)
1295#else
a52d9a80
CM
1296static int end_bio_extent_writepage(struct bio *bio,
1297 unsigned int bytes_done, int err)
0a2118df 1298#endif
a52d9a80
CM
1299{
1300 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1301 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1302 struct extent_map_tree *tree = bio->bi_private;
1303 u64 start;
1304 u64 end;
1305 int whole_page;
1306
0a2118df 1307#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
a52d9a80
CM
1308 if (bio->bi_size)
1309 return 1;
0a2118df 1310#endif
a52d9a80
CM
1311
1312 do {
1313 struct page *page = bvec->bv_page;
35ebb934
CM
1314 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1315 bvec->bv_offset;
a52d9a80
CM
1316 end = start + bvec->bv_len - 1;
1317
1318 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1319 whole_page = 1;
1320 else
1321 whole_page = 0;
1322
1323 if (--bvec >= bio->bi_io_vec)
1324 prefetchw(&bvec->bv_page->flags);
1325
1326 if (!uptodate) {
1327 clear_extent_uptodate(tree, start, end, GFP_ATOMIC);
1328 ClearPageUptodate(page);
1329 SetPageError(page);
1330 }
1331 clear_extent_writeback(tree, start, end, GFP_ATOMIC);
1332
1333 if (whole_page)
1334 end_page_writeback(page);
1335 else
1336 check_page_writeback(tree, page);
0e2752a7
CH
1337 if (tree->ops && tree->ops->writepage_end_io_hook)
1338 tree->ops->writepage_end_io_hook(page, start, end);
a52d9a80
CM
1339 } while (bvec >= bio->bi_io_vec);
1340
1341 bio_put(bio);
0a2118df 1342#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
a52d9a80 1343 return 0;
0a2118df 1344#endif
a52d9a80
CM
1345}
1346
1347/*
1348 * after a readpage IO is done, we need to:
1349 * clear the uptodate bits on error
1350 * set the uptodate bits if things worked
1351 * set the page up to date if all extents in the tree are uptodate
1352 * clear the lock bit in the extent tree
1353 * unlock the page if there are no other extents locked for it
1354 *
1355 * Scheduling is not allowed, so the extent state tree is expected
1356 * to have one and only one object corresponding to this IO.
1357 */
0a2118df
JA
1358#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1359static void end_bio_extent_readpage(struct bio *bio, int err)
1360#else
a52d9a80
CM
1361static int end_bio_extent_readpage(struct bio *bio,
1362 unsigned int bytes_done, int err)
0a2118df 1363#endif
a52d9a80 1364{
07157aac 1365 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
a52d9a80
CM
1366 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1367 struct extent_map_tree *tree = bio->bi_private;
1368 u64 start;
1369 u64 end;
1370 int whole_page;
07157aac 1371 int ret;
a52d9a80 1372
0a2118df 1373#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
a52d9a80
CM
1374 if (bio->bi_size)
1375 return 1;
0a2118df 1376#endif
a52d9a80
CM
1377
1378 do {
1379 struct page *page = bvec->bv_page;
35ebb934
CM
1380 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1381 bvec->bv_offset;
a52d9a80
CM
1382 end = start + bvec->bv_len - 1;
1383
1384 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1385 whole_page = 1;
1386 else
1387 whole_page = 0;
1388
1389 if (--bvec >= bio->bi_io_vec)
1390 prefetchw(&bvec->bv_page->flags);
1391
07157aac
CM
1392 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
1393 ret = tree->ops->readpage_end_io_hook(page, start, end);
1394 if (ret)
1395 uptodate = 0;
1396 }
a52d9a80
CM
1397 if (uptodate) {
1398 set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1399 if (whole_page)
1400 SetPageUptodate(page);
1401 else
1402 check_page_uptodate(tree, page);
1403 } else {
1404 ClearPageUptodate(page);
1405 SetPageError(page);
1406 }
1407
1408 unlock_extent(tree, start, end, GFP_ATOMIC);
1409
1410 if (whole_page)
1411 unlock_page(page);
1412 else
1413 check_page_locked(tree, page);
1414 } while (bvec >= bio->bi_io_vec);
1415
1416 bio_put(bio);
0a2118df 1417#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
a52d9a80 1418 return 0;
0a2118df 1419#endif
a52d9a80
CM
1420}
1421
1422/*
1423 * IO done from prepare_write is pretty simple, we just unlock
1424 * the structs in the extent tree when done, and set the uptodate bits
1425 * as appropriate.
1426 */
0a2118df
JA
1427#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1428static void end_bio_extent_preparewrite(struct bio *bio, int err)
1429#else
a52d9a80
CM
1430static int end_bio_extent_preparewrite(struct bio *bio,
1431 unsigned int bytes_done, int err)
0a2118df 1432#endif
a52d9a80
CM
1433{
1434 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1435 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1436 struct extent_map_tree *tree = bio->bi_private;
1437 u64 start;
1438 u64 end;
1439
0a2118df 1440#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
a52d9a80
CM
1441 if (bio->bi_size)
1442 return 1;
0a2118df 1443#endif
a52d9a80
CM
1444
1445 do {
1446 struct page *page = bvec->bv_page;
35ebb934
CM
1447 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1448 bvec->bv_offset;
a52d9a80
CM
1449 end = start + bvec->bv_len - 1;
1450
1451 if (--bvec >= bio->bi_io_vec)
1452 prefetchw(&bvec->bv_page->flags);
1453
1454 if (uptodate) {
1455 set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1456 } else {
1457 ClearPageUptodate(page);
1458 SetPageError(page);
1459 }
1460
1461 unlock_extent(tree, start, end, GFP_ATOMIC);
1462
1463 } while (bvec >= bio->bi_io_vec);
1464
1465 bio_put(bio);
0a2118df 1466#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
a52d9a80 1467 return 0;
0a2118df 1468#endif
a52d9a80
CM
1469}
1470
b293f02e
CM
1471static struct bio *
1472extent_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
1473 gfp_t gfp_flags)
a52d9a80
CM
1474{
1475 struct bio *bio;
a52d9a80 1476
b293f02e 1477 bio = bio_alloc(gfp_flags, nr_vecs);
a52d9a80 1478
b293f02e
CM
1479 if (bio == NULL && (current->flags & PF_MEMALLOC)) {
1480 while (!bio && (nr_vecs /= 2))
1481 bio = bio_alloc(gfp_flags, nr_vecs);
1482 }
a52d9a80 1483
b293f02e
CM
1484 if (bio) {
1485 bio->bi_bdev = bdev;
1486 bio->bi_sector = first_sector;
1487 }
1488 return bio;
1489}
a52d9a80 1490
b293f02e
CM
1491static int submit_one_bio(int rw, struct bio *bio)
1492{
1493 int ret = 0;
a52d9a80
CM
1494 bio_get(bio);
1495 submit_bio(rw, bio);
a52d9a80
CM
1496 if (bio_flagged(bio, BIO_EOPNOTSUPP))
1497 ret = -EOPNOTSUPP;
a52d9a80
CM
1498 bio_put(bio);
1499 return ret;
1500}
1501
b293f02e
CM
1502static int submit_extent_page(int rw, struct extent_map_tree *tree,
1503 struct page *page, sector_t sector,
1504 size_t size, unsigned long offset,
1505 struct block_device *bdev,
1506 struct bio **bio_ret,
3ab2fb5a 1507 unsigned long max_pages,
b293f02e
CM
1508 bio_end_io_t end_io_func)
1509{
1510 int ret = 0;
1511 struct bio *bio;
1512 int nr;
1513
1514 if (bio_ret && *bio_ret) {
1515 bio = *bio_ret;
1516 if (bio->bi_sector + (bio->bi_size >> 9) != sector ||
1517 bio_add_page(bio, page, size, offset) < size) {
1518 ret = submit_one_bio(rw, bio);
1519 bio = NULL;
1520 } else {
1521 return 0;
1522 }
1523 }
3ab2fb5a 1524 nr = min_t(int, max_pages, bio_get_nr_vecs(bdev));
b293f02e
CM
1525 bio = extent_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
1526 if (!bio) {
1527 printk("failed to allocate bio nr %d\n", nr);
1528 }
1529 bio_add_page(bio, page, size, offset);
1530 bio->bi_end_io = end_io_func;
1531 bio->bi_private = tree;
1532 if (bio_ret) {
1533 *bio_ret = bio;
1534 } else {
1535 ret = submit_one_bio(rw, bio);
1536 }
1537
1538 return ret;
1539}
1540
b3cfa35a
CH
1541void set_page_extent_mapped(struct page *page)
1542{
1543 if (!PagePrivate(page)) {
1544 SetPagePrivate(page);
1545 WARN_ON(!page->mapping->a_ops->invalidatepage);
19c00ddc 1546 set_page_private(page, EXTENT_PAGE_PRIVATE);
b3cfa35a
CH
1547 page_cache_get(page);
1548 }
1549}
1550
a52d9a80
CM
1551/*
1552 * basic readpage implementation. Locked extent state structs are inserted
1553 * into the tree that are removed when the IO is done (by the end_io
1554 * handlers)
1555 */
3ab2fb5a
CM
1556static int __extent_read_full_page(struct extent_map_tree *tree,
1557 struct page *page,
1558 get_extent_t *get_extent,
1559 struct bio **bio)
a52d9a80
CM
1560{
1561 struct inode *inode = page->mapping->host;
35ebb934 1562 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
a52d9a80
CM
1563 u64 page_end = start + PAGE_CACHE_SIZE - 1;
1564 u64 end;
1565 u64 cur = start;
1566 u64 extent_offset;
1567 u64 last_byte = i_size_read(inode);
1568 u64 block_start;
1569 u64 cur_end;
1570 sector_t sector;
1571 struct extent_map *em;
1572 struct block_device *bdev;
1573 int ret;
1574 int nr = 0;
1575 size_t page_offset = 0;
1576 size_t iosize;
1577 size_t blocksize = inode->i_sb->s_blocksize;
1578
b3cfa35a 1579 set_page_extent_mapped(page);
a52d9a80
CM
1580
1581 end = page_end;
1582 lock_extent(tree, start, end, GFP_NOFS);
1583
1584 while (cur <= end) {
1585 if (cur >= last_byte) {
1586 iosize = PAGE_CACHE_SIZE - page_offset;
1587 zero_user_page(page, page_offset, iosize, KM_USER0);
1588 set_extent_uptodate(tree, cur, cur + iosize - 1,
1589 GFP_NOFS);
1590 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1591 break;
1592 }
1593 em = get_extent(inode, page, page_offset, cur, end, 0);
1594 if (IS_ERR(em) || !em) {
1595 SetPageError(page);
1596 unlock_extent(tree, cur, end, GFP_NOFS);
1597 break;
1598 }
1599
1600 extent_offset = cur - em->start;
1601 BUG_ON(em->end < cur);
1602 BUG_ON(end < cur);
1603
1604 iosize = min(em->end - cur, end - cur) + 1;
1605 cur_end = min(em->end, end);
1606 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1607 sector = (em->block_start + extent_offset) >> 9;
1608 bdev = em->bdev;
1609 block_start = em->block_start;
1610 free_extent_map(em);
1611 em = NULL;
1612
1613 /* we've found a hole, just zero and go on */
5f39d397 1614 if (block_start == EXTENT_MAP_HOLE) {
a52d9a80
CM
1615 zero_user_page(page, page_offset, iosize, KM_USER0);
1616 set_extent_uptodate(tree, cur, cur + iosize - 1,
1617 GFP_NOFS);
1618 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1619 cur = cur + iosize;
1620 page_offset += iosize;
1621 continue;
1622 }
1623 /* the get_extent function already copied into the page */
1624 if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) {
1625 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1626 cur = cur + iosize;
1627 page_offset += iosize;
1628 continue;
1629 }
1630
07157aac
CM
1631 ret = 0;
1632 if (tree->ops && tree->ops->readpage_io_hook) {
1633 ret = tree->ops->readpage_io_hook(page, cur,
1634 cur + iosize - 1);
1635 }
1636 if (!ret) {
3ab2fb5a
CM
1637 unsigned long nr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
1638 nr -= page->index;
07157aac 1639 ret = submit_extent_page(READ, tree, page,
3ab2fb5a
CM
1640 sector, iosize, page_offset,
1641 bdev, bio, nr,
1642 end_bio_extent_readpage);
07157aac 1643 }
a52d9a80
CM
1644 if (ret)
1645 SetPageError(page);
1646 cur = cur + iosize;
1647 page_offset += iosize;
1648 nr++;
1649 }
1650 if (!nr) {
1651 if (!PageError(page))
1652 SetPageUptodate(page);
1653 unlock_page(page);
1654 }
1655 return 0;
1656}
3ab2fb5a
CM
1657
1658int extent_read_full_page(struct extent_map_tree *tree, struct page *page,
1659 get_extent_t *get_extent)
1660{
1661 struct bio *bio = NULL;
1662 int ret;
1663
1664 ret = __extent_read_full_page(tree, page, get_extent, &bio);
1665 if (bio)
1666 submit_one_bio(READ, bio);
1667 return ret;
1668}
a52d9a80
CM
1669EXPORT_SYMBOL(extent_read_full_page);
1670
1671/*
1672 * the writepage semantics are similar to regular writepage. extent
1673 * records are inserted to lock ranges in the tree, and as dirty areas
1674 * are found, they are marked writeback. Then the lock bits are removed
1675 * and the end_io handler clears the writeback ranges
1676 */
b293f02e
CM
1677static int __extent_writepage(struct page *page, struct writeback_control *wbc,
1678 void *data)
a52d9a80
CM
1679{
1680 struct inode *inode = page->mapping->host;
b293f02e
CM
1681 struct extent_page_data *epd = data;
1682 struct extent_map_tree *tree = epd->tree;
35ebb934 1683 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
a52d9a80
CM
1684 u64 page_end = start + PAGE_CACHE_SIZE - 1;
1685 u64 end;
1686 u64 cur = start;
1687 u64 extent_offset;
1688 u64 last_byte = i_size_read(inode);
1689 u64 block_start;
179e29e4 1690 u64 iosize;
a52d9a80
CM
1691 sector_t sector;
1692 struct extent_map *em;
1693 struct block_device *bdev;
1694 int ret;
1695 int nr = 0;
1696 size_t page_offset = 0;
a52d9a80
CM
1697 size_t blocksize;
1698 loff_t i_size = i_size_read(inode);
1699 unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
b888db2b
CM
1700 u64 nr_delalloc;
1701 u64 delalloc_end;
a52d9a80 1702
b888db2b 1703 WARN_ON(!PageLocked(page));
a52d9a80
CM
1704 if (page->index > end_index) {
1705 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
1706 unlock_page(page);
1707 return 0;
1708 }
1709
1710 if (page->index == end_index) {
1711 size_t offset = i_size & (PAGE_CACHE_SIZE - 1);
1712 zero_user_page(page, offset,
1713 PAGE_CACHE_SIZE - offset, KM_USER0);
1714 }
1715
b3cfa35a 1716 set_page_extent_mapped(page);
a52d9a80 1717
a52d9a80 1718 lock_extent(tree, start, page_end, GFP_NOFS);
b888db2b
CM
1719 nr_delalloc = find_lock_delalloc_range(tree, start, page_end + 1,
1720 &delalloc_end,
1721 128 * 1024 * 1024);
1722 if (nr_delalloc) {
07157aac 1723 tree->ops->fill_delalloc(inode, start, delalloc_end);
b888db2b
CM
1724 if (delalloc_end >= page_end + 1) {
1725 clear_extent_bit(tree, page_end + 1, delalloc_end,
1726 EXTENT_LOCKED | EXTENT_DELALLOC,
1727 1, 0, GFP_NOFS);
1728 }
1729 clear_extent_bit(tree, start, page_end, EXTENT_DELALLOC,
1730 0, 0, GFP_NOFS);
1731 if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
1732 printk("found delalloc bits after clear extent_bit\n");
1733 }
1734 } else if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
1735 printk("found delalloc bits after find_delalloc_range returns 0\n");
1736 }
1737
1738 end = page_end;
1739 if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
1740 printk("found delalloc bits after lock_extent\n");
1741 }
a52d9a80
CM
1742
1743 if (last_byte <= start) {
1744 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
1745 goto done;
1746 }
1747
1748 set_extent_uptodate(tree, start, page_end, GFP_NOFS);
1749 blocksize = inode->i_sb->s_blocksize;
1750
1751 while (cur <= end) {
1752 if (cur >= last_byte) {
1753 clear_extent_dirty(tree, cur, page_end, GFP_NOFS);
1754 break;
1755 }
b293f02e 1756 em = epd->get_extent(inode, page, page_offset, cur, end, 1);
a52d9a80
CM
1757 if (IS_ERR(em) || !em) {
1758 SetPageError(page);
1759 break;
1760 }
1761
1762 extent_offset = cur - em->start;
1763 BUG_ON(em->end < cur);
1764 BUG_ON(end < cur);
1765 iosize = min(em->end - cur, end - cur) + 1;
1766 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1767 sector = (em->block_start + extent_offset) >> 9;
1768 bdev = em->bdev;
1769 block_start = em->block_start;
1770 free_extent_map(em);
1771 em = NULL;
1772
5f39d397
CM
1773 if (block_start == EXTENT_MAP_HOLE ||
1774 block_start == EXTENT_MAP_INLINE) {
a52d9a80
CM
1775 clear_extent_dirty(tree, cur,
1776 cur + iosize - 1, GFP_NOFS);
1777 cur = cur + iosize;
1778 page_offset += iosize;
1779 continue;
1780 }
1781
1782 /* leave this out until we have a page_mkwrite call */
1783 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
1784 EXTENT_DIRTY, 0)) {
1785 cur = cur + iosize;
1786 page_offset += iosize;
1787 continue;
1788 }
1789 clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS);
b06355f0
CH
1790 if (tree->ops && tree->ops->writepage_io_hook) {
1791 ret = tree->ops->writepage_io_hook(page, cur,
1792 cur + iosize - 1);
1793 } else {
1794 ret = 0;
1795 }
a52d9a80
CM
1796 if (ret)
1797 SetPageError(page);
07157aac 1798 else {
b293f02e 1799 unsigned long nr = end_index + 1;
07157aac 1800 set_range_writeback(tree, cur, cur + iosize - 1);
b293f02e 1801
07157aac
CM
1802 ret = submit_extent_page(WRITE, tree, page, sector,
1803 iosize, page_offset, bdev,
b293f02e 1804 &epd->bio, nr,
07157aac
CM
1805 end_bio_extent_writepage);
1806 if (ret)
1807 SetPageError(page);
1808 }
a52d9a80
CM
1809 cur = cur + iosize;
1810 page_offset += iosize;
1811 nr++;
1812 }
1813done:
a52d9a80
CM
1814 unlock_extent(tree, start, page_end, GFP_NOFS);
1815 unlock_page(page);
1816 return 0;
1817}
b293f02e
CM
1818
1819int extent_write_full_page(struct extent_map_tree *tree, struct page *page,
1820 get_extent_t *get_extent,
1821 struct writeback_control *wbc)
1822{
1823 int ret;
1824 struct extent_page_data epd = {
1825 .bio = NULL,
1826 .tree = tree,
1827 .get_extent = get_extent,
1828 };
1829
1830 ret = __extent_writepage(page, wbc, &epd);
1831 if (epd.bio)
1832 submit_one_bio(WRITE, epd.bio);
1833 return ret;
1834}
a52d9a80
CM
1835EXPORT_SYMBOL(extent_write_full_page);
1836
b293f02e
CM
1837int extent_writepages(struct extent_map_tree *tree,
1838 struct address_space *mapping,
1839 get_extent_t *get_extent,
1840 struct writeback_control *wbc)
1841{
1842 int ret;
1843 struct extent_page_data epd = {
1844 .bio = NULL,
1845 .tree = tree,
1846 .get_extent = get_extent,
1847 };
1848
1849 ret = write_cache_pages(mapping, wbc, __extent_writepage, &epd);
1850 if (epd.bio)
1851 submit_one_bio(WRITE, epd.bio);
1852 return ret;
1853}
1854EXPORT_SYMBOL(extent_writepages);
1855
3ab2fb5a
CM
1856int extent_readpages(struct extent_map_tree *tree,
1857 struct address_space *mapping,
1858 struct list_head *pages, unsigned nr_pages,
1859 get_extent_t get_extent)
1860{
1861 struct bio *bio = NULL;
1862 unsigned page_idx;
1863 struct pagevec pvec;
1864
1865 pagevec_init(&pvec, 0);
1866 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
1867 struct page *page = list_entry(pages->prev, struct page, lru);
1868
1869 prefetchw(&page->flags);
1870 list_del(&page->lru);
1871 /*
1872 * what we want to do here is call add_to_page_cache_lru,
1873 * but that isn't exported, so we reproduce it here
1874 */
1875 if (!add_to_page_cache(page, mapping,
1876 page->index, GFP_KERNEL)) {
1877
1878 /* open coding of lru_cache_add, also not exported */
1879 page_cache_get(page);
1880 if (!pagevec_add(&pvec, page))
1881 __pagevec_lru_add(&pvec);
1882 __extent_read_full_page(tree, page, get_extent, &bio);
1883 }
1884 page_cache_release(page);
1885 }
1886 if (pagevec_count(&pvec))
1887 __pagevec_lru_add(&pvec);
1888 BUG_ON(!list_empty(pages));
1889 if (bio)
1890 submit_one_bio(READ, bio);
1891 return 0;
1892}
1893EXPORT_SYMBOL(extent_readpages);
1894
a52d9a80
CM
1895/*
1896 * basic invalidatepage code, this waits on any locked or writeback
1897 * ranges corresponding to the page, and then deletes any extent state
1898 * records from the tree
1899 */
1900int extent_invalidatepage(struct extent_map_tree *tree,
1901 struct page *page, unsigned long offset)
1902{
35ebb934 1903 u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
a52d9a80
CM
1904 u64 end = start + PAGE_CACHE_SIZE - 1;
1905 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
1906
1907 start += (offset + blocksize -1) & ~(blocksize - 1);
1908 if (start > end)
1909 return 0;
1910
1911 lock_extent(tree, start, end, GFP_NOFS);
1912 wait_on_extent_writeback(tree, start, end);
2bf5a725
CM
1913 clear_extent_bit(tree, start, end,
1914 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC,
a52d9a80
CM
1915 1, 1, GFP_NOFS);
1916 return 0;
1917}
1918EXPORT_SYMBOL(extent_invalidatepage);
1919
1920/*
1921 * simple commit_write call, set_range_dirty is used to mark both
1922 * the pages and the extent records as dirty
1923 */
1924int extent_commit_write(struct extent_map_tree *tree,
1925 struct inode *inode, struct page *page,
1926 unsigned from, unsigned to)
1927{
1928 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
1929
b3cfa35a 1930 set_page_extent_mapped(page);
a52d9a80
CM
1931 set_page_dirty(page);
1932
1933 if (pos > inode->i_size) {
1934 i_size_write(inode, pos);
1935 mark_inode_dirty(inode);
1936 }
1937 return 0;
1938}
1939EXPORT_SYMBOL(extent_commit_write);
1940
1941int extent_prepare_write(struct extent_map_tree *tree,
1942 struct inode *inode, struct page *page,
1943 unsigned from, unsigned to, get_extent_t *get_extent)
1944{
35ebb934 1945 u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
a52d9a80
CM
1946 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
1947 u64 block_start;
1948 u64 orig_block_start;
1949 u64 block_end;
1950 u64 cur_end;
1951 struct extent_map *em;
1952 unsigned blocksize = 1 << inode->i_blkbits;
1953 size_t page_offset = 0;
1954 size_t block_off_start;
1955 size_t block_off_end;
1956 int err = 0;
1957 int iocount = 0;
1958 int ret = 0;
1959 int isnew;
1960
b3cfa35a
CH
1961 set_page_extent_mapped(page);
1962
a52d9a80
CM
1963 block_start = (page_start + from) & ~((u64)blocksize - 1);
1964 block_end = (page_start + to - 1) | (blocksize - 1);
1965 orig_block_start = block_start;
1966
1967 lock_extent(tree, page_start, page_end, GFP_NOFS);
1968 while(block_start <= block_end) {
1969 em = get_extent(inode, page, page_offset, block_start,
1970 block_end, 1);
1971 if (IS_ERR(em) || !em) {
1972 goto err;
1973 }
1974 cur_end = min(block_end, em->end);
1975 block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
1976 block_off_end = block_off_start + blocksize;
1977 isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
1978
1979 if (!PageUptodate(page) && isnew &&
1980 (block_off_end > to || block_off_start < from)) {
1981 void *kaddr;
1982
1983 kaddr = kmap_atomic(page, KM_USER0);
1984 if (block_off_end > to)
1985 memset(kaddr + to, 0, block_off_end - to);
1986 if (block_off_start < from)
1987 memset(kaddr + block_off_start, 0,
1988 from - block_off_start);
1989 flush_dcache_page(page);
1990 kunmap_atomic(kaddr, KM_USER0);
1991 }
1992 if (!isnew && !PageUptodate(page) &&
1993 (block_off_end > to || block_off_start < from) &&
1994 !test_range_bit(tree, block_start, cur_end,
1995 EXTENT_UPTODATE, 1)) {
1996 u64 sector;
1997 u64 extent_offset = block_start - em->start;
1998 size_t iosize;
1999 sector = (em->block_start + extent_offset) >> 9;
2000 iosize = (cur_end - block_start + blocksize - 1) &
2001 ~((u64)blocksize - 1);
2002 /*
2003 * we've already got the extent locked, but we
2004 * need to split the state such that our end_bio
2005 * handler can clear the lock.
2006 */
2007 set_extent_bit(tree, block_start,
2008 block_start + iosize - 1,
2009 EXTENT_LOCKED, 0, NULL, GFP_NOFS);
2010 ret = submit_extent_page(READ, tree, page,
2011 sector, iosize, page_offset, em->bdev,
b293f02e 2012 NULL, 1,
a52d9a80
CM
2013 end_bio_extent_preparewrite);
2014 iocount++;
2015 block_start = block_start + iosize;
2016 } else {
2017 set_extent_uptodate(tree, block_start, cur_end,
2018 GFP_NOFS);
2019 unlock_extent(tree, block_start, cur_end, GFP_NOFS);
2020 block_start = cur_end + 1;
2021 }
2022 page_offset = block_start & (PAGE_CACHE_SIZE - 1);
2023 free_extent_map(em);
2024 }
2025 if (iocount) {
2026 wait_extent_bit(tree, orig_block_start,
2027 block_end, EXTENT_LOCKED);
2028 }
2029 check_page_uptodate(tree, page);
2030err:
2031 /* FIXME, zero out newly allocated blocks on error */
2032 return err;
2033}
2034EXPORT_SYMBOL(extent_prepare_write);
2035
2036/*
2037 * a helper for releasepage. As long as there are no locked extents
2038 * in the range corresponding to the page, both state records and extent
2039 * map records are removed
2040 */
2041int try_release_extent_mapping(struct extent_map_tree *tree, struct page *page)
2042{
2043 struct extent_map *em;
35ebb934 2044 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
a52d9a80
CM
2045 u64 end = start + PAGE_CACHE_SIZE - 1;
2046 u64 orig_start = start;
b888db2b 2047 int ret = 1;
a52d9a80
CM
2048
2049 while (start <= end) {
2050 em = lookup_extent_mapping(tree, start, end);
2051 if (!em || IS_ERR(em))
2052 break;
b888db2b
CM
2053 if (!test_range_bit(tree, em->start, em->end,
2054 EXTENT_LOCKED, 0)) {
2055 remove_extent_mapping(tree, em);
2056 /* once for the rb tree */
a52d9a80 2057 free_extent_map(em);
a52d9a80 2058 }
a52d9a80 2059 start = em->end + 1;
a52d9a80
CM
2060 /* once for us */
2061 free_extent_map(em);
2062 }
b888db2b
CM
2063 if (test_range_bit(tree, orig_start, end, EXTENT_LOCKED, 0))
2064 ret = 0;
2065 else
2066 clear_extent_bit(tree, orig_start, end, EXTENT_UPTODATE,
2067 1, 1, GFP_NOFS);
2068 return ret;
a52d9a80
CM
2069}
2070EXPORT_SYMBOL(try_release_extent_mapping);
2071
d396c6f5
CH
2072sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
2073 get_extent_t *get_extent)
2074{
2075 struct inode *inode = mapping->host;
2076 u64 start = iblock << inode->i_blkbits;
2077 u64 end = start + (1 << inode->i_blkbits) - 1;
c67cda17 2078 sector_t sector = 0;
d396c6f5
CH
2079 struct extent_map *em;
2080
2081 em = get_extent(inode, NULL, 0, start, end, 0);
2082 if (!em || IS_ERR(em))
2083 return 0;
2084
d396c6f5 2085 if (em->block_start == EXTENT_MAP_INLINE ||
5f39d397 2086 em->block_start == EXTENT_MAP_HOLE)
c67cda17 2087 goto out;
d396c6f5 2088
c67cda17
Y
2089 sector = (em->block_start + start - em->start) >> inode->i_blkbits;
2090out:
2091 free_extent_map(em);
2092 return sector;
d396c6f5 2093}
5f39d397 2094
4dc11904 2095static int add_lru(struct extent_map_tree *tree, struct extent_buffer *eb)
6d36dcd4 2096{
4dc11904
CM
2097 if (list_empty(&eb->lru)) {
2098 extent_buffer_get(eb);
2099 list_add(&eb->lru, &tree->buffer_lru);
2100 tree->lru_size++;
2101 if (tree->lru_size >= BUFFER_LRU_MAX) {
2102 struct extent_buffer *rm;
2103 rm = list_entry(tree->buffer_lru.prev,
2104 struct extent_buffer, lru);
2105 tree->lru_size--;
856bf3e5 2106 list_del_init(&rm->lru);
4dc11904
CM
2107 free_extent_buffer(rm);
2108 }
2109 } else
2110 list_move(&eb->lru, &tree->buffer_lru);
2111 return 0;
2112}
2113static struct extent_buffer *find_lru(struct extent_map_tree *tree,
2114 u64 start, unsigned long len)
2115{
2116 struct list_head *lru = &tree->buffer_lru;
2117 struct list_head *cur = lru->next;
2118 struct extent_buffer *eb;
f510cfec 2119
4dc11904
CM
2120 if (list_empty(lru))
2121 return NULL;
f510cfec 2122
4dc11904
CM
2123 do {
2124 eb = list_entry(cur, struct extent_buffer, lru);
2125 if (eb->start == start && eb->len == len) {
2126 extent_buffer_get(eb);
2127 return eb;
2128 }
2129 cur = cur->next;
2130 } while (cur != lru);
2131 return NULL;
6d36dcd4
CM
2132}
2133
4dc11904 2134static inline unsigned long num_extent_pages(u64 start, u64 len)
6d36dcd4 2135{
4dc11904
CM
2136 return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
2137 (start >> PAGE_CACHE_SHIFT);
6d36dcd4
CM
2138}
2139
4dc11904
CM
2140static inline struct page *extent_buffer_page(struct extent_buffer *eb,
2141 unsigned long i)
6d36dcd4
CM
2142{
2143 struct page *p;
3685f791 2144 struct address_space *mapping;
db94535d 2145
4dc11904 2146 if (i == 0)
810191ff 2147 return eb->first_page;
6d36dcd4 2148 i += eb->start >> PAGE_CACHE_SHIFT;
3685f791
CM
2149 mapping = eb->first_page->mapping;
2150 read_lock_irq(&mapping->tree_lock);
2151 p = radix_tree_lookup(&mapping->page_tree, i);
2152 read_unlock_irq(&mapping->tree_lock);
6d36dcd4
CM
2153 return p;
2154}
2155
4dc11904
CM
2156static struct extent_buffer *__alloc_extent_buffer(struct extent_map_tree *tree,
2157 u64 start,
2158 unsigned long len,
2159 gfp_t mask)
db94535d 2160{
4dc11904
CM
2161 struct extent_buffer *eb = NULL;
2162
2163 spin_lock(&tree->lru_lock);
2164 eb = find_lru(tree, start, len);
4dc11904 2165 spin_unlock(&tree->lru_lock);
4dc11904 2166 if (eb) {
09be207d 2167 return eb;
4dc11904 2168 }
09be207d
CM
2169
2170 eb = kmem_cache_zalloc(extent_buffer_cache, mask);
4dc11904
CM
2171 INIT_LIST_HEAD(&eb->lru);
2172 eb->start = start;
2173 eb->len = len;
2174 atomic_set(&eb->refs, 1);
2175
4dc11904
CM
2176 return eb;
2177}
2178
2179static void __free_extent_buffer(struct extent_buffer *eb)
2180{
2181 kmem_cache_free(extent_buffer_cache, eb);
db94535d 2182}
4dc11904 2183
5f39d397
CM
2184struct extent_buffer *alloc_extent_buffer(struct extent_map_tree *tree,
2185 u64 start, unsigned long len,
19c00ddc 2186 struct page *page0,
5f39d397
CM
2187 gfp_t mask)
2188{
db94535d 2189 unsigned long num_pages = num_extent_pages(start, len);
5f39d397
CM
2190 unsigned long i;
2191 unsigned long index = start >> PAGE_CACHE_SHIFT;
2192 struct extent_buffer *eb;
2193 struct page *p;
2194 struct address_space *mapping = tree->mapping;
65555a06 2195 int uptodate = 1;
5f39d397 2196
4dc11904 2197 eb = __alloc_extent_buffer(tree, start, len, mask);
5f39d397
CM
2198 if (!eb || IS_ERR(eb))
2199 return NULL;
2200
4dc11904 2201 if (eb->flags & EXTENT_BUFFER_FILLED)
09be207d 2202 goto lru_add;
5f39d397 2203
19c00ddc
CM
2204 if (page0) {
2205 eb->first_page = page0;
2206 i = 1;
2207 index++;
2208 page_cache_get(page0);
ff79f819 2209 mark_page_accessed(page0);
19c00ddc
CM
2210 set_page_extent_mapped(page0);
2211 set_page_private(page0, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2212 len << 2);
2213 } else {
2214 i = 0;
2215 }
2216 for (; i < num_pages; i++, index++) {
5f39d397 2217 p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM);
6d36dcd4 2218 if (!p) {
db94535d 2219 WARN_ON(1);
5f39d397 2220 goto fail;
6d36dcd4 2221 }
f510cfec 2222 set_page_extent_mapped(p);
ff79f819 2223 mark_page_accessed(p);
19c00ddc 2224 if (i == 0) {
810191ff 2225 eb->first_page = p;
19c00ddc
CM
2226 set_page_private(p, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2227 len << 2);
2228 } else {
2229 set_page_private(p, EXTENT_PAGE_PRIVATE);
2230 }
5f39d397
CM
2231 if (!PageUptodate(p))
2232 uptodate = 0;
2233 unlock_page(p);
2234 }
2235 if (uptodate)
2236 eb->flags |= EXTENT_UPTODATE;
4dc11904 2237 eb->flags |= EXTENT_BUFFER_FILLED;
09be207d
CM
2238
2239lru_add:
2240 spin_lock(&tree->lru_lock);
2241 add_lru(tree, eb);
2242 spin_unlock(&tree->lru_lock);
5f39d397 2243 return eb;
09be207d 2244
5f39d397 2245fail:
856bf3e5
CM
2246 spin_lock(&tree->lru_lock);
2247 list_del_init(&eb->lru);
2248 spin_unlock(&tree->lru_lock);
09be207d
CM
2249 if (!atomic_dec_and_test(&eb->refs))
2250 return NULL;
2251 for (index = 0; index < i; index++) {
2252 page_cache_release(extent_buffer_page(eb, index));
2253 }
2254 __free_extent_buffer(eb);
5f39d397
CM
2255 return NULL;
2256}
2257EXPORT_SYMBOL(alloc_extent_buffer);
2258
2259struct extent_buffer *find_extent_buffer(struct extent_map_tree *tree,
2260 u64 start, unsigned long len,
2261 gfp_t mask)
2262{
db94535d 2263 unsigned long num_pages = num_extent_pages(start, len);
09be207d
CM
2264 unsigned long i;
2265 unsigned long index = start >> PAGE_CACHE_SHIFT;
5f39d397
CM
2266 struct extent_buffer *eb;
2267 struct page *p;
2268 struct address_space *mapping = tree->mapping;
14048ed0 2269 int uptodate = 1;
5f39d397 2270
4dc11904 2271 eb = __alloc_extent_buffer(tree, start, len, mask);
5f39d397
CM
2272 if (!eb || IS_ERR(eb))
2273 return NULL;
2274
4dc11904 2275 if (eb->flags & EXTENT_BUFFER_FILLED)
09be207d 2276 goto lru_add;
5f39d397
CM
2277
2278 for (i = 0; i < num_pages; i++, index++) {
14048ed0 2279 p = find_lock_page(mapping, index);
6d36dcd4 2280 if (!p) {
5f39d397 2281 goto fail;
6d36dcd4 2282 }
f510cfec 2283 set_page_extent_mapped(p);
ff79f819 2284 mark_page_accessed(p);
19c00ddc
CM
2285
2286 if (i == 0) {
810191ff 2287 eb->first_page = p;
19c00ddc
CM
2288 set_page_private(p, EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2289 len << 2);
2290 } else {
2291 set_page_private(p, EXTENT_PAGE_PRIVATE);
2292 }
2293
14048ed0
CM
2294 if (!PageUptodate(p))
2295 uptodate = 0;
2296 unlock_page(p);
5f39d397 2297 }
14048ed0
CM
2298 if (uptodate)
2299 eb->flags |= EXTENT_UPTODATE;
4dc11904 2300 eb->flags |= EXTENT_BUFFER_FILLED;
09be207d
CM
2301
2302lru_add:
2303 spin_lock(&tree->lru_lock);
2304 add_lru(tree, eb);
2305 spin_unlock(&tree->lru_lock);
5f39d397
CM
2306 return eb;
2307fail:
856bf3e5
CM
2308 spin_lock(&tree->lru_lock);
2309 list_del_init(&eb->lru);
2310 spin_unlock(&tree->lru_lock);
09be207d
CM
2311 if (!atomic_dec_and_test(&eb->refs))
2312 return NULL;
2313 for (index = 0; index < i; index++) {
2314 page_cache_release(extent_buffer_page(eb, index));
2315 }
2316 __free_extent_buffer(eb);
5f39d397
CM
2317 return NULL;
2318}
2319EXPORT_SYMBOL(find_extent_buffer);
2320
2321void free_extent_buffer(struct extent_buffer *eb)
2322{
2323 unsigned long i;
2324 unsigned long num_pages;
2325
2326 if (!eb)
2327 return;
2328
2329 if (!atomic_dec_and_test(&eb->refs))
2330 return;
2331
db94535d 2332 num_pages = num_extent_pages(eb->start, eb->len);
5f39d397 2333
09e71a32 2334 for (i = 0; i < num_pages; i++) {
6d36dcd4 2335 page_cache_release(extent_buffer_page(eb, i));
5f39d397 2336 }
6d36dcd4 2337 __free_extent_buffer(eb);
5f39d397
CM
2338}
2339EXPORT_SYMBOL(free_extent_buffer);
2340
2341int clear_extent_buffer_dirty(struct extent_map_tree *tree,
2342 struct extent_buffer *eb)
2343{
2344 int set;
2345 unsigned long i;
2346 unsigned long num_pages;
2347 struct page *page;
2348
2349 u64 start = eb->start;
2350 u64 end = start + eb->len - 1;
2351
2352 set = clear_extent_dirty(tree, start, end, GFP_NOFS);
db94535d 2353 num_pages = num_extent_pages(eb->start, eb->len);
5f39d397
CM
2354
2355 for (i = 0; i < num_pages; i++) {
6d36dcd4 2356 page = extent_buffer_page(eb, i);
5f39d397
CM
2357 lock_page(page);
2358 /*
2359 * if we're on the last page or the first page and the
2360 * block isn't aligned on a page boundary, do extra checks
2361 * to make sure we don't clean page that is partially dirty
2362 */
2363 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2364 ((i == num_pages - 1) &&
65555a06 2365 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
35ebb934 2366 start = (u64)page->index << PAGE_CACHE_SHIFT;
5f39d397
CM
2367 end = start + PAGE_CACHE_SIZE - 1;
2368 if (test_range_bit(tree, start, end,
2369 EXTENT_DIRTY, 0)) {
2370 unlock_page(page);
2371 continue;
2372 }
2373 }
2374 clear_page_dirty_for_io(page);
2375 unlock_page(page);
2376 }
2377 return 0;
2378}
2379EXPORT_SYMBOL(clear_extent_buffer_dirty);
2380
2381int wait_on_extent_buffer_writeback(struct extent_map_tree *tree,
2382 struct extent_buffer *eb)
2383{
2384 return wait_on_extent_writeback(tree, eb->start,
2385 eb->start + eb->len - 1);
2386}
2387EXPORT_SYMBOL(wait_on_extent_buffer_writeback);
2388
2389int set_extent_buffer_dirty(struct extent_map_tree *tree,
2390 struct extent_buffer *eb)
2391{
810191ff
CM
2392 unsigned long i;
2393 unsigned long num_pages;
2394
2395 num_pages = num_extent_pages(eb->start, eb->len);
2396 for (i = 0; i < num_pages; i++) {
19c00ddc
CM
2397 struct page *page = extent_buffer_page(eb, i);
2398 /* writepage may need to do something special for the
2399 * first page, we have to make sure page->private is
2400 * properly set. releasepage may drop page->private
2401 * on us if the page isn't already dirty.
2402 */
2403 if (i == 0) {
2404 lock_page(page);
2405 set_page_private(page,
2406 EXTENT_PAGE_PRIVATE_FIRST_PAGE |
2407 eb->len << 2);
2408 }
810191ff 2409 __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
19c00ddc
CM
2410 if (i == 0)
2411 unlock_page(page);
810191ff
CM
2412 }
2413 return set_extent_dirty(tree, eb->start,
2414 eb->start + eb->len - 1, GFP_NOFS);
5f39d397
CM
2415}
2416EXPORT_SYMBOL(set_extent_buffer_dirty);
2417
2418int set_extent_buffer_uptodate(struct extent_map_tree *tree,
2419 struct extent_buffer *eb)
2420{
2421 unsigned long i;
2422 struct page *page;
2423 unsigned long num_pages;
2424
db94535d 2425 num_pages = num_extent_pages(eb->start, eb->len);
5f39d397
CM
2426
2427 set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
2428 GFP_NOFS);
2429 for (i = 0; i < num_pages; i++) {
6d36dcd4 2430 page = extent_buffer_page(eb, i);
5f39d397
CM
2431 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2432 ((i == num_pages - 1) &&
65555a06 2433 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
5f39d397
CM
2434 check_page_uptodate(tree, page);
2435 continue;
2436 }
2437 SetPageUptodate(page);
2438 }
2439 return 0;
2440}
2441EXPORT_SYMBOL(set_extent_buffer_uptodate);
2442
2443int extent_buffer_uptodate(struct extent_map_tree *tree,
2444 struct extent_buffer *eb)
2445{
2446 if (eb->flags & EXTENT_UPTODATE)
2447 return 1;
2448 return test_range_bit(tree, eb->start, eb->start + eb->len - 1,
2449 EXTENT_UPTODATE, 1);
2450}
2451EXPORT_SYMBOL(extent_buffer_uptodate);
2452
2453int read_extent_buffer_pages(struct extent_map_tree *tree,
19c00ddc
CM
2454 struct extent_buffer *eb,
2455 u64 start,
2456 int wait)
5f39d397
CM
2457{
2458 unsigned long i;
19c00ddc 2459 unsigned long start_i;
5f39d397
CM
2460 struct page *page;
2461 int err;
2462 int ret = 0;
2463 unsigned long num_pages;
2464
2465 if (eb->flags & EXTENT_UPTODATE)
2466 return 0;
2467
14048ed0 2468 if (0 && test_range_bit(tree, eb->start, eb->start + eb->len - 1,
5f39d397
CM
2469 EXTENT_UPTODATE, 1)) {
2470 return 0;
2471 }
19c00ddc
CM
2472 if (start) {
2473 WARN_ON(start < eb->start);
2474 start_i = (start >> PAGE_CACHE_SHIFT) -
2475 (eb->start >> PAGE_CACHE_SHIFT);
2476 } else {
2477 start_i = 0;
2478 }
5f39d397 2479
db94535d 2480 num_pages = num_extent_pages(eb->start, eb->len);
19c00ddc 2481 for (i = start_i; i < num_pages; i++) {
6d36dcd4 2482 page = extent_buffer_page(eb, i);
5f39d397
CM
2483 if (PageUptodate(page)) {
2484 continue;
2485 }
2486 if (!wait) {
2487 if (TestSetPageLocked(page)) {
2488 continue;
2489 }
2490 } else {
2491 lock_page(page);
2492 }
2493 if (!PageUptodate(page)) {
2494 err = page->mapping->a_ops->readpage(NULL, page);
2495 if (err) {
2496 ret = err;
2497 }
2498 } else {
2499 unlock_page(page);
2500 }
2501 }
2502
2503 if (ret || !wait) {
2504 return ret;
2505 }
2506
19c00ddc 2507 for (i = start_i; i < num_pages; i++) {
6d36dcd4 2508 page = extent_buffer_page(eb, i);
5f39d397
CM
2509 wait_on_page_locked(page);
2510 if (!PageUptodate(page)) {
2511 ret = -EIO;
2512 }
2513 }
4dc11904
CM
2514 if (!ret)
2515 eb->flags |= EXTENT_UPTODATE;
5f39d397
CM
2516 return ret;
2517}
2518EXPORT_SYMBOL(read_extent_buffer_pages);
2519
2520void read_extent_buffer(struct extent_buffer *eb, void *dstv,
2521 unsigned long start,
2522 unsigned long len)
2523{
2524 size_t cur;
2525 size_t offset;
2526 struct page *page;
2527 char *kaddr;
2528 char *dst = (char *)dstv;
2529 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2530 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
14048ed0 2531 unsigned long num_pages = num_extent_pages(eb->start, eb->len);
5f39d397
CM
2532
2533 WARN_ON(start > eb->len);
2534 WARN_ON(start + len > eb->start + eb->len);
2535
3685f791 2536 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
5f39d397
CM
2537
2538 while(len > 0) {
6d36dcd4 2539 page = extent_buffer_page(eb, i);
14048ed0
CM
2540 if (!PageUptodate(page)) {
2541 printk("page %lu not up to date i %lu, total %lu, len %lu\n", page->index, i, num_pages, eb->len);
2542 WARN_ON(1);
2543 }
5f39d397
CM
2544 WARN_ON(!PageUptodate(page));
2545
2546 cur = min(len, (PAGE_CACHE_SIZE - offset));
59d169e2 2547 kaddr = kmap_atomic(page, KM_USER1);
5f39d397 2548 memcpy(dst, kaddr + offset, cur);
59d169e2 2549 kunmap_atomic(kaddr, KM_USER1);
5f39d397
CM
2550
2551 dst += cur;
2552 len -= cur;
2553 offset = 0;
2554 i++;
5f39d397
CM
2555 }
2556}
2557EXPORT_SYMBOL(read_extent_buffer);
2558
19c00ddc 2559int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
db94535d
CM
2560 unsigned long min_len, char **token, char **map,
2561 unsigned long *map_start,
2562 unsigned long *map_len, int km)
5f39d397 2563{
479965d6 2564 size_t offset = start & (PAGE_CACHE_SIZE - 1);
5f39d397 2565 char *kaddr;
db94535d 2566 struct page *p;
5f39d397
CM
2567 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2568 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
65555a06 2569 unsigned long end_i = (start_offset + start + min_len - 1) >>
810191ff 2570 PAGE_CACHE_SHIFT;
479965d6
CM
2571
2572 if (i != end_i)
2573 return -EINVAL;
5f39d397 2574
5f39d397
CM
2575 if (i == 0) {
2576 offset = start_offset;
2577 *map_start = 0;
2578 } else {
db94535d 2579 offset = 0;
479965d6 2580 *map_start = (i << PAGE_CACHE_SHIFT) - start_offset;
5f39d397 2581 }
65555a06 2582 if (start + min_len > eb->len) {
19c00ddc
CM
2583printk("bad mapping eb start %Lu len %lu, wanted %lu %lu\n", eb->start, eb->len, start, min_len);
2584 WARN_ON(1);
2585 }
5f39d397 2586
db94535d
CM
2587 p = extent_buffer_page(eb, i);
2588 WARN_ON(!PageUptodate(p));
2589 kaddr = kmap_atomic(p, km);
5f39d397
CM
2590 *token = kaddr;
2591 *map = kaddr + offset;
2592 *map_len = PAGE_CACHE_SIZE - offset;
2593 return 0;
2594}
19c00ddc 2595EXPORT_SYMBOL(map_private_extent_buffer);
db94535d
CM
2596
2597int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
2598 unsigned long min_len,
2599 char **token, char **map,
2600 unsigned long *map_start,
2601 unsigned long *map_len, int km)
2602{
2603 int err;
2604 int save = 0;
2605 if (eb->map_token) {
db94535d
CM
2606 unmap_extent_buffer(eb, eb->map_token, km);
2607 eb->map_token = NULL;
2608 save = 1;
2609 }
19c00ddc
CM
2610 err = map_private_extent_buffer(eb, start, min_len, token, map,
2611 map_start, map_len, km);
db94535d
CM
2612 if (!err && save) {
2613 eb->map_token = *token;
2614 eb->kaddr = *map;
2615 eb->map_start = *map_start;
2616 eb->map_len = *map_len;
2617 }
2618 return err;
2619}
5f39d397
CM
2620EXPORT_SYMBOL(map_extent_buffer);
2621
2622void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
2623{
ae5252bd 2624 kunmap_atomic(token, km);
5f39d397
CM
2625}
2626EXPORT_SYMBOL(unmap_extent_buffer);
2627
2628int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
2629 unsigned long start,
2630 unsigned long len)
2631{
2632 size_t cur;
2633 size_t offset;
2634 struct page *page;
2635 char *kaddr;
2636 char *ptr = (char *)ptrv;
2637 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2638 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2639 int ret = 0;
2640
2641 WARN_ON(start > eb->len);
2642 WARN_ON(start + len > eb->start + eb->len);
2643
3685f791 2644 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
5f39d397
CM
2645
2646 while(len > 0) {
6d36dcd4 2647 page = extent_buffer_page(eb, i);
5f39d397
CM
2648 WARN_ON(!PageUptodate(page));
2649
2650 cur = min(len, (PAGE_CACHE_SIZE - offset));
2651
ae5252bd 2652 kaddr = kmap_atomic(page, KM_USER0);
5f39d397 2653 ret = memcmp(ptr, kaddr + offset, cur);
ae5252bd 2654 kunmap_atomic(kaddr, KM_USER0);
5f39d397
CM
2655 if (ret)
2656 break;
2657
2658 ptr += cur;
2659 len -= cur;
2660 offset = 0;
2661 i++;
5f39d397
CM
2662 }
2663 return ret;
2664}
2665EXPORT_SYMBOL(memcmp_extent_buffer);
2666
2667void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
2668 unsigned long start, unsigned long len)
2669{
2670 size_t cur;
2671 size_t offset;
2672 struct page *page;
2673 char *kaddr;
2674 char *src = (char *)srcv;
2675 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2676 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2677
2678 WARN_ON(start > eb->len);
2679 WARN_ON(start + len > eb->start + eb->len);
2680
3685f791 2681 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
5f39d397
CM
2682
2683 while(len > 0) {
6d36dcd4 2684 page = extent_buffer_page(eb, i);
5f39d397
CM
2685 WARN_ON(!PageUptodate(page));
2686
2687 cur = min(len, PAGE_CACHE_SIZE - offset);
59d169e2 2688 kaddr = kmap_atomic(page, KM_USER1);
5f39d397 2689 memcpy(kaddr + offset, src, cur);
59d169e2 2690 kunmap_atomic(kaddr, KM_USER1);
5f39d397
CM
2691
2692 src += cur;
2693 len -= cur;
2694 offset = 0;
2695 i++;
5f39d397
CM
2696 }
2697}
2698EXPORT_SYMBOL(write_extent_buffer);
2699
2700void memset_extent_buffer(struct extent_buffer *eb, char c,
2701 unsigned long start, unsigned long len)
2702{
2703 size_t cur;
2704 size_t offset;
2705 struct page *page;
2706 char *kaddr;
2707 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2708 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2709
2710 WARN_ON(start > eb->len);
2711 WARN_ON(start + len > eb->start + eb->len);
2712
3685f791 2713 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
5f39d397
CM
2714
2715 while(len > 0) {
6d36dcd4 2716 page = extent_buffer_page(eb, i);
5f39d397
CM
2717 WARN_ON(!PageUptodate(page));
2718
2719 cur = min(len, PAGE_CACHE_SIZE - offset);
ae5252bd 2720 kaddr = kmap_atomic(page, KM_USER0);
5f39d397 2721 memset(kaddr + offset, c, cur);
ae5252bd 2722 kunmap_atomic(kaddr, KM_USER0);
5f39d397
CM
2723
2724 len -= cur;
2725 offset = 0;
2726 i++;
5f39d397
CM
2727 }
2728}
2729EXPORT_SYMBOL(memset_extent_buffer);
2730
2731void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
2732 unsigned long dst_offset, unsigned long src_offset,
2733 unsigned long len)
2734{
2735 u64 dst_len = dst->len;
2736 size_t cur;
2737 size_t offset;
2738 struct page *page;
2739 char *kaddr;
2740 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
2741 unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
2742
2743 WARN_ON(src->len != dst_len);
2744
3685f791
CM
2745 offset = (start_offset + dst_offset) &
2746 ((unsigned long)PAGE_CACHE_SIZE - 1);
5f39d397
CM
2747
2748 while(len > 0) {
6d36dcd4 2749 page = extent_buffer_page(dst, i);
5f39d397
CM
2750 WARN_ON(!PageUptodate(page));
2751
2752 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
2753
ff190c0c 2754 kaddr = kmap_atomic(page, KM_USER0);
5f39d397 2755 read_extent_buffer(src, kaddr + offset, src_offset, cur);
ff190c0c 2756 kunmap_atomic(kaddr, KM_USER0);
5f39d397
CM
2757
2758 src_offset += cur;
2759 len -= cur;
2760 offset = 0;
2761 i++;
2762 }
2763}
2764EXPORT_SYMBOL(copy_extent_buffer);
2765
2766static void move_pages(struct page *dst_page, struct page *src_page,
2767 unsigned long dst_off, unsigned long src_off,
2768 unsigned long len)
2769{
ae5252bd 2770 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
5f39d397
CM
2771 if (dst_page == src_page) {
2772 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
2773 } else {
ae5252bd 2774 char *src_kaddr = kmap_atomic(src_page, KM_USER1);
5f39d397
CM
2775 char *p = dst_kaddr + dst_off + len;
2776 char *s = src_kaddr + src_off + len;
2777
2778 while (len--)
2779 *--p = *--s;
2780
ae5252bd 2781 kunmap_atomic(src_kaddr, KM_USER1);
5f39d397 2782 }
ae5252bd 2783 kunmap_atomic(dst_kaddr, KM_USER0);
5f39d397
CM
2784}
2785
2786static void copy_pages(struct page *dst_page, struct page *src_page,
2787 unsigned long dst_off, unsigned long src_off,
2788 unsigned long len)
2789{
ae5252bd 2790 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
5f39d397
CM
2791 char *src_kaddr;
2792
2793 if (dst_page != src_page)
ae5252bd 2794 src_kaddr = kmap_atomic(src_page, KM_USER1);
5f39d397
CM
2795 else
2796 src_kaddr = dst_kaddr;
2797
2798 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
5f39d397
CM
2799 kunmap_atomic(dst_kaddr, KM_USER0);
2800 if (dst_page != src_page)
2801 kunmap_atomic(src_kaddr, KM_USER1);
5f39d397
CM
2802}
2803
2804void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
2805 unsigned long src_offset, unsigned long len)
2806{
2807 size_t cur;
2808 size_t dst_off_in_page;
2809 size_t src_off_in_page;
2810 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
2811 unsigned long dst_i;
2812 unsigned long src_i;
2813
2814 if (src_offset + len > dst->len) {
2815 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
2816 src_offset, len, dst->len);
2817 BUG_ON(1);
2818 }
2819 if (dst_offset + len > dst->len) {
2820 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
2821 dst_offset, len, dst->len);
2822 BUG_ON(1);
2823 }
2824
2825 while(len > 0) {
3685f791 2826 dst_off_in_page = (start_offset + dst_offset) &
5f39d397 2827 ((unsigned long)PAGE_CACHE_SIZE - 1);
3685f791 2828 src_off_in_page = (start_offset + src_offset) &
5f39d397
CM
2829 ((unsigned long)PAGE_CACHE_SIZE - 1);
2830
2831 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
2832 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
2833
5f39d397
CM
2834 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
2835 src_off_in_page));
ae2f5411
JA
2836 cur = min_t(unsigned long, cur,
2837 (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
5f39d397 2838
6d36dcd4
CM
2839 copy_pages(extent_buffer_page(dst, dst_i),
2840 extent_buffer_page(dst, src_i),
5f39d397
CM
2841 dst_off_in_page, src_off_in_page, cur);
2842
2843 src_offset += cur;
2844 dst_offset += cur;
2845 len -= cur;
2846 }
2847}
2848EXPORT_SYMBOL(memcpy_extent_buffer);
2849
2850void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
2851 unsigned long src_offset, unsigned long len)
2852{
2853 size_t cur;
2854 size_t dst_off_in_page;
2855 size_t src_off_in_page;
2856 unsigned long dst_end = dst_offset + len - 1;
2857 unsigned long src_end = src_offset + len - 1;
2858 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
2859 unsigned long dst_i;
2860 unsigned long src_i;
2861
2862 if (src_offset + len > dst->len) {
2863 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
2864 src_offset, len, dst->len);
2865 BUG_ON(1);
2866 }
2867 if (dst_offset + len > dst->len) {
2868 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
2869 dst_offset, len, dst->len);
2870 BUG_ON(1);
2871 }
2872 if (dst_offset < src_offset) {
2873 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
2874 return;
2875 }
2876 while(len > 0) {
2877 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
2878 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
2879
3685f791 2880 dst_off_in_page = (start_offset + dst_end) &
5f39d397 2881 ((unsigned long)PAGE_CACHE_SIZE - 1);
3685f791 2882 src_off_in_page = (start_offset + src_end) &
5f39d397 2883 ((unsigned long)PAGE_CACHE_SIZE - 1);
5f39d397 2884
ae2f5411 2885 cur = min_t(unsigned long, len, src_off_in_page + 1);
5f39d397 2886 cur = min(cur, dst_off_in_page + 1);
6d36dcd4
CM
2887 move_pages(extent_buffer_page(dst, dst_i),
2888 extent_buffer_page(dst, src_i),
5f39d397
CM
2889 dst_off_in_page - cur + 1,
2890 src_off_in_page - cur + 1, cur);
2891
db94535d
CM
2892 dst_end -= cur;
2893 src_end -= cur;
5f39d397
CM
2894 len -= cur;
2895 }
2896}
2897EXPORT_SYMBOL(memmove_extent_buffer);
This page took 0.219997 seconds and 5 git commands to generate.