Btrfs: Fix extent_buffer usage when nodesize != leafsize
[deliverable/linux.git] / fs / btrfs / extent_map.c
1 #include <linux/bitops.h>
2 #include <linux/slab.h>
3 #include <linux/bio.h>
4 #include <linux/mm.h>
5 #include <linux/gfp.h>
6 #include <linux/pagemap.h>
7 #include <linux/page-flags.h>
8 #include <linux/module.h>
9 #include <linux/spinlock.h>
10 #include <linux/blkdev.h>
11 #include <linux/swap.h>
12 #include <linux/version.h>
13 #include <linux/writeback.h>
14 #include <linux/pagevec.h>
15 #include "extent_map.h"
16
17 /* temporary define until extent_map moves out of btrfs */
18 struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
19 unsigned long extra_flags,
20 void (*ctor)(void *, struct kmem_cache *,
21 unsigned long));
22
23 static struct kmem_cache *extent_map_cache;
24 static struct kmem_cache *extent_state_cache;
25 static struct kmem_cache *extent_buffer_cache;
26
27 static LIST_HEAD(buffers);
28 static LIST_HEAD(states);
29
30 static spinlock_t state_lock = SPIN_LOCK_UNLOCKED;
31 #define BUFFER_LRU_MAX 64
32
33 struct tree_entry {
34 u64 start;
35 u64 end;
36 int in_tree;
37 struct rb_node rb_node;
38 };
39
40 struct extent_page_data {
41 struct bio *bio;
42 struct extent_map_tree *tree;
43 get_extent_t *get_extent;
44 };
45
46 int __init extent_map_init(void)
47 {
48 extent_map_cache = btrfs_cache_create("extent_map",
49 sizeof(struct extent_map), 0,
50 NULL);
51 if (!extent_map_cache)
52 return -ENOMEM;
53 extent_state_cache = btrfs_cache_create("extent_state",
54 sizeof(struct extent_state), 0,
55 NULL);
56 if (!extent_state_cache)
57 goto free_map_cache;
58 extent_buffer_cache = btrfs_cache_create("extent_buffers",
59 sizeof(struct extent_buffer), 0,
60 NULL);
61 if (!extent_buffer_cache)
62 goto free_state_cache;
63 return 0;
64
65 free_state_cache:
66 kmem_cache_destroy(extent_state_cache);
67 free_map_cache:
68 kmem_cache_destroy(extent_map_cache);
69 return -ENOMEM;
70 }
71
72 void extent_map_exit(void)
73 {
74 struct extent_state *state;
75
76 while (!list_empty(&states)) {
77 state = list_entry(states.next, struct extent_state, list);
78 printk("state leak: start %Lu end %Lu state %lu in tree %d refs %d\n", state->start, state->end, state->state, state->in_tree, atomic_read(&state->refs));
79 list_del(&state->list);
80 kmem_cache_free(extent_state_cache, state);
81
82 }
83
84 if (extent_map_cache)
85 kmem_cache_destroy(extent_map_cache);
86 if (extent_state_cache)
87 kmem_cache_destroy(extent_state_cache);
88 if (extent_buffer_cache)
89 kmem_cache_destroy(extent_buffer_cache);
90 }
91
92 void extent_map_tree_init(struct extent_map_tree *tree,
93 struct address_space *mapping, gfp_t mask)
94 {
95 tree->map.rb_node = NULL;
96 tree->state.rb_node = NULL;
97 tree->ops = NULL;
98 tree->dirty_bytes = 0;
99 rwlock_init(&tree->lock);
100 spin_lock_init(&tree->lru_lock);
101 tree->mapping = mapping;
102 INIT_LIST_HEAD(&tree->buffer_lru);
103 tree->lru_size = 0;
104 }
105 EXPORT_SYMBOL(extent_map_tree_init);
106
107 void extent_map_tree_empty_lru(struct extent_map_tree *tree)
108 {
109 struct extent_buffer *eb;
110 while(!list_empty(&tree->buffer_lru)) {
111 eb = list_entry(tree->buffer_lru.next, struct extent_buffer,
112 lru);
113 list_del_init(&eb->lru);
114 free_extent_buffer(eb);
115 }
116 }
117 EXPORT_SYMBOL(extent_map_tree_empty_lru);
118
119 struct extent_map *alloc_extent_map(gfp_t mask)
120 {
121 struct extent_map *em;
122 em = kmem_cache_alloc(extent_map_cache, mask);
123 if (!em || IS_ERR(em))
124 return em;
125 em->in_tree = 0;
126 atomic_set(&em->refs, 1);
127 return em;
128 }
129 EXPORT_SYMBOL(alloc_extent_map);
130
131 void free_extent_map(struct extent_map *em)
132 {
133 if (!em)
134 return;
135 if (atomic_dec_and_test(&em->refs)) {
136 WARN_ON(em->in_tree);
137 kmem_cache_free(extent_map_cache, em);
138 }
139 }
140 EXPORT_SYMBOL(free_extent_map);
141
142
143 struct extent_state *alloc_extent_state(gfp_t mask)
144 {
145 struct extent_state *state;
146 unsigned long flags;
147
148 state = kmem_cache_alloc(extent_state_cache, mask);
149 if (!state || IS_ERR(state))
150 return state;
151 state->state = 0;
152 state->in_tree = 0;
153 state->private = 0;
154
155 spin_lock_irqsave(&state_lock, flags);
156 list_add(&state->list, &states);
157 spin_unlock_irqrestore(&state_lock, flags);
158
159 atomic_set(&state->refs, 1);
160 init_waitqueue_head(&state->wq);
161 return state;
162 }
163 EXPORT_SYMBOL(alloc_extent_state);
164
165 void free_extent_state(struct extent_state *state)
166 {
167 unsigned long flags;
168 if (!state)
169 return;
170 if (atomic_dec_and_test(&state->refs)) {
171 WARN_ON(state->in_tree);
172 spin_lock_irqsave(&state_lock, flags);
173 list_del(&state->list);
174 spin_unlock_irqrestore(&state_lock, flags);
175 kmem_cache_free(extent_state_cache, state);
176 }
177 }
178 EXPORT_SYMBOL(free_extent_state);
179
180 static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
181 struct rb_node *node)
182 {
183 struct rb_node ** p = &root->rb_node;
184 struct rb_node * parent = NULL;
185 struct tree_entry *entry;
186
187 while(*p) {
188 parent = *p;
189 entry = rb_entry(parent, struct tree_entry, rb_node);
190
191 if (offset < entry->start)
192 p = &(*p)->rb_left;
193 else if (offset > entry->end)
194 p = &(*p)->rb_right;
195 else
196 return parent;
197 }
198
199 entry = rb_entry(node, struct tree_entry, rb_node);
200 entry->in_tree = 1;
201 rb_link_node(node, parent, p);
202 rb_insert_color(node, root);
203 return NULL;
204 }
205
206 static struct rb_node *__tree_search(struct rb_root *root, u64 offset,
207 struct rb_node **prev_ret)
208 {
209 struct rb_node * n = root->rb_node;
210 struct rb_node *prev = NULL;
211 struct tree_entry *entry;
212 struct tree_entry *prev_entry = NULL;
213
214 while(n) {
215 entry = rb_entry(n, struct tree_entry, rb_node);
216 prev = n;
217 prev_entry = entry;
218
219 if (offset < entry->start)
220 n = n->rb_left;
221 else if (offset > entry->end)
222 n = n->rb_right;
223 else
224 return n;
225 }
226 if (!prev_ret)
227 return NULL;
228 while(prev && offset > prev_entry->end) {
229 prev = rb_next(prev);
230 prev_entry = rb_entry(prev, struct tree_entry, rb_node);
231 }
232 *prev_ret = prev;
233 return NULL;
234 }
235
236 static inline struct rb_node *tree_search(struct rb_root *root, u64 offset)
237 {
238 struct rb_node *prev;
239 struct rb_node *ret;
240 ret = __tree_search(root, offset, &prev);
241 if (!ret)
242 return prev;
243 return ret;
244 }
245
246 static int tree_delete(struct rb_root *root, u64 offset)
247 {
248 struct rb_node *node;
249 struct tree_entry *entry;
250
251 node = __tree_search(root, offset, NULL);
252 if (!node)
253 return -ENOENT;
254 entry = rb_entry(node, struct tree_entry, rb_node);
255 entry->in_tree = 0;
256 rb_erase(node, root);
257 return 0;
258 }
259
260 /*
261 * add_extent_mapping tries a simple backward merge with existing
262 * mappings. The extent_map struct passed in will be inserted into
263 * the tree directly (no copies made, just a reference taken).
264 */
265 int add_extent_mapping(struct extent_map_tree *tree,
266 struct extent_map *em)
267 {
268 int ret = 0;
269 struct extent_map *prev = NULL;
270 struct rb_node *rb;
271
272 write_lock_irq(&tree->lock);
273 rb = tree_insert(&tree->map, em->end, &em->rb_node);
274 if (rb) {
275 prev = rb_entry(rb, struct extent_map, rb_node);
276 ret = -EEXIST;
277 goto out;
278 }
279 atomic_inc(&em->refs);
280 if (em->start != 0) {
281 rb = rb_prev(&em->rb_node);
282 if (rb)
283 prev = rb_entry(rb, struct extent_map, rb_node);
284 if (prev && prev->end + 1 == em->start &&
285 ((em->block_start == EXTENT_MAP_HOLE &&
286 prev->block_start == EXTENT_MAP_HOLE) ||
287 (em->block_start == EXTENT_MAP_INLINE &&
288 prev->block_start == EXTENT_MAP_INLINE) ||
289 (em->block_start == EXTENT_MAP_DELALLOC &&
290 prev->block_start == EXTENT_MAP_DELALLOC) ||
291 (em->block_start < EXTENT_MAP_DELALLOC - 1 &&
292 em->block_start == prev->block_end + 1))) {
293 em->start = prev->start;
294 em->block_start = prev->block_start;
295 rb_erase(&prev->rb_node, &tree->map);
296 prev->in_tree = 0;
297 free_extent_map(prev);
298 }
299 }
300 out:
301 write_unlock_irq(&tree->lock);
302 return ret;
303 }
304 EXPORT_SYMBOL(add_extent_mapping);
305
306 /*
307 * lookup_extent_mapping returns the first extent_map struct in the
308 * tree that intersects the [start, end] (inclusive) range. There may
309 * be additional objects in the tree that intersect, so check the object
310 * returned carefully to make sure you don't need additional lookups.
311 */
312 struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree,
313 u64 start, u64 end)
314 {
315 struct extent_map *em;
316 struct rb_node *rb_node;
317
318 read_lock_irq(&tree->lock);
319 rb_node = tree_search(&tree->map, start);
320 if (!rb_node) {
321 em = NULL;
322 goto out;
323 }
324 if (IS_ERR(rb_node)) {
325 em = ERR_PTR(PTR_ERR(rb_node));
326 goto out;
327 }
328 em = rb_entry(rb_node, struct extent_map, rb_node);
329 if (em->end < start || em->start > end) {
330 em = NULL;
331 goto out;
332 }
333 atomic_inc(&em->refs);
334 out:
335 read_unlock_irq(&tree->lock);
336 return em;
337 }
338 EXPORT_SYMBOL(lookup_extent_mapping);
339
340 /*
341 * removes an extent_map struct from the tree. No reference counts are
342 * dropped, and no checks are done to see if the range is in use
343 */
344 int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
345 {
346 int ret;
347
348 write_lock_irq(&tree->lock);
349 ret = tree_delete(&tree->map, em->end);
350 write_unlock_irq(&tree->lock);
351 return ret;
352 }
353 EXPORT_SYMBOL(remove_extent_mapping);
354
355 /*
356 * utility function to look for merge candidates inside a given range.
357 * Any extents with matching state are merged together into a single
358 * extent in the tree. Extents with EXTENT_IO in their state field
359 * are not merged because the end_io handlers need to be able to do
360 * operations on them without sleeping (or doing allocations/splits).
361 *
362 * This should be called with the tree lock held.
363 */
364 static int merge_state(struct extent_map_tree *tree,
365 struct extent_state *state)
366 {
367 struct extent_state *other;
368 struct rb_node *other_node;
369
370 if (state->state & EXTENT_IOBITS)
371 return 0;
372
373 other_node = rb_prev(&state->rb_node);
374 if (other_node) {
375 other = rb_entry(other_node, struct extent_state, rb_node);
376 if (other->end == state->start - 1 &&
377 other->state == state->state) {
378 state->start = other->start;
379 other->in_tree = 0;
380 rb_erase(&other->rb_node, &tree->state);
381 free_extent_state(other);
382 }
383 }
384 other_node = rb_next(&state->rb_node);
385 if (other_node) {
386 other = rb_entry(other_node, struct extent_state, rb_node);
387 if (other->start == state->end + 1 &&
388 other->state == state->state) {
389 other->start = state->start;
390 state->in_tree = 0;
391 rb_erase(&state->rb_node, &tree->state);
392 free_extent_state(state);
393 }
394 }
395 return 0;
396 }
397
398 /*
399 * insert an extent_state struct into the tree. 'bits' are set on the
400 * struct before it is inserted.
401 *
402 * This may return -EEXIST if the extent is already there, in which case the
403 * state struct is freed.
404 *
405 * The tree lock is not taken internally. This is a utility function and
406 * probably isn't what you want to call (see set/clear_extent_bit).
407 */
408 static int insert_state(struct extent_map_tree *tree,
409 struct extent_state *state, u64 start, u64 end,
410 int bits)
411 {
412 struct rb_node *node;
413
414 if (end < start) {
415 printk("end < start %Lu %Lu\n", end, start);
416 WARN_ON(1);
417 }
418 if (bits & EXTENT_DIRTY)
419 tree->dirty_bytes += end - start + 1;
420 state->state |= bits;
421 state->start = start;
422 state->end = end;
423 node = tree_insert(&tree->state, end, &state->rb_node);
424 if (node) {
425 struct extent_state *found;
426 found = rb_entry(node, struct extent_state, rb_node);
427 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, start, end);
428 free_extent_state(state);
429 return -EEXIST;
430 }
431 merge_state(tree, state);
432 return 0;
433 }
434
435 /*
436 * split a given extent state struct in two, inserting the preallocated
437 * struct 'prealloc' as the newly created second half. 'split' indicates an
438 * offset inside 'orig' where it should be split.
439 *
440 * Before calling,
441 * the tree has 'orig' at [orig->start, orig->end]. After calling, there
442 * are two extent state structs in the tree:
443 * prealloc: [orig->start, split - 1]
444 * orig: [ split, orig->end ]
445 *
446 * The tree locks are not taken by this function. They need to be held
447 * by the caller.
448 */
449 static int split_state(struct extent_map_tree *tree, struct extent_state *orig,
450 struct extent_state *prealloc, u64 split)
451 {
452 struct rb_node *node;
453 prealloc->start = orig->start;
454 prealloc->end = split - 1;
455 prealloc->state = orig->state;
456 orig->start = split;
457
458 node = tree_insert(&tree->state, prealloc->end, &prealloc->rb_node);
459 if (node) {
460 struct extent_state *found;
461 found = rb_entry(node, struct extent_state, rb_node);
462 printk("found node %Lu %Lu on insert of %Lu %Lu\n", found->start, found->end, prealloc->start, prealloc->end);
463 free_extent_state(prealloc);
464 return -EEXIST;
465 }
466 return 0;
467 }
468
469 /*
470 * utility function to clear some bits in an extent state struct.
471 * it will optionally wake up any one waiting on this state (wake == 1), or
472 * forcibly remove the state from the tree (delete == 1).
473 *
474 * If no bits are set on the state struct after clearing things, the
475 * struct is freed and removed from the tree
476 */
477 static int clear_state_bit(struct extent_map_tree *tree,
478 struct extent_state *state, int bits, int wake,
479 int delete)
480 {
481 int ret = state->state & bits;
482
483 if ((bits & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
484 u64 range = state->end - state->start + 1;
485 WARN_ON(range > tree->dirty_bytes);
486 tree->dirty_bytes -= range;
487 }
488 state->state &= ~bits;
489 if (wake)
490 wake_up(&state->wq);
491 if (delete || state->state == 0) {
492 if (state->in_tree) {
493 rb_erase(&state->rb_node, &tree->state);
494 state->in_tree = 0;
495 free_extent_state(state);
496 } else {
497 WARN_ON(1);
498 }
499 } else {
500 merge_state(tree, state);
501 }
502 return ret;
503 }
504
505 /*
506 * clear some bits on a range in the tree. This may require splitting
507 * or inserting elements in the tree, so the gfp mask is used to
508 * indicate which allocations or sleeping are allowed.
509 *
510 * pass 'wake' == 1 to kick any sleepers, and 'delete' == 1 to remove
511 * the given range from the tree regardless of state (ie for truncate).
512 *
513 * the range [start, end] is inclusive.
514 *
515 * This takes the tree lock, and returns < 0 on error, > 0 if any of the
516 * bits were already set, or zero if none of the bits were already set.
517 */
518 int clear_extent_bit(struct extent_map_tree *tree, u64 start, u64 end,
519 int bits, int wake, int delete, gfp_t mask)
520 {
521 struct extent_state *state;
522 struct extent_state *prealloc = NULL;
523 struct rb_node *node;
524 unsigned long flags;
525 int err;
526 int set = 0;
527
528 again:
529 if (!prealloc && (mask & __GFP_WAIT)) {
530 prealloc = alloc_extent_state(mask);
531 if (!prealloc)
532 return -ENOMEM;
533 }
534
535 write_lock_irqsave(&tree->lock, flags);
536 /*
537 * this search will find the extents that end after
538 * our range starts
539 */
540 node = tree_search(&tree->state, start);
541 if (!node)
542 goto out;
543 state = rb_entry(node, struct extent_state, rb_node);
544 if (state->start > end)
545 goto out;
546 WARN_ON(state->end < start);
547
548 /*
549 * | ---- desired range ---- |
550 * | state | or
551 * | ------------- state -------------- |
552 *
553 * We need to split the extent we found, and may flip
554 * bits on second half.
555 *
556 * If the extent we found extends past our range, we
557 * just split and search again. It'll get split again
558 * the next time though.
559 *
560 * If the extent we found is inside our range, we clear
561 * the desired bit on it.
562 */
563
564 if (state->start < start) {
565 err = split_state(tree, state, prealloc, start);
566 BUG_ON(err == -EEXIST);
567 prealloc = NULL;
568 if (err)
569 goto out;
570 if (state->end <= end) {
571 start = state->end + 1;
572 set |= clear_state_bit(tree, state, bits,
573 wake, delete);
574 } else {
575 start = state->start;
576 }
577 goto search_again;
578 }
579 /*
580 * | ---- desired range ---- |
581 * | state |
582 * We need to split the extent, and clear the bit
583 * on the first half
584 */
585 if (state->start <= end && state->end > end) {
586 err = split_state(tree, state, prealloc, end + 1);
587 BUG_ON(err == -EEXIST);
588
589 if (wake)
590 wake_up(&state->wq);
591 set |= clear_state_bit(tree, prealloc, bits,
592 wake, delete);
593 prealloc = NULL;
594 goto out;
595 }
596
597 start = state->end + 1;
598 set |= clear_state_bit(tree, state, bits, wake, delete);
599 goto search_again;
600
601 out:
602 write_unlock_irqrestore(&tree->lock, flags);
603 if (prealloc)
604 free_extent_state(prealloc);
605
606 return set;
607
608 search_again:
609 if (start > end)
610 goto out;
611 write_unlock_irqrestore(&tree->lock, flags);
612 if (mask & __GFP_WAIT)
613 cond_resched();
614 goto again;
615 }
616 EXPORT_SYMBOL(clear_extent_bit);
617
618 static int wait_on_state(struct extent_map_tree *tree,
619 struct extent_state *state)
620 {
621 DEFINE_WAIT(wait);
622 prepare_to_wait(&state->wq, &wait, TASK_UNINTERRUPTIBLE);
623 read_unlock_irq(&tree->lock);
624 schedule();
625 read_lock_irq(&tree->lock);
626 finish_wait(&state->wq, &wait);
627 return 0;
628 }
629
630 /*
631 * waits for one or more bits to clear on a range in the state tree.
632 * The range [start, end] is inclusive.
633 * The tree lock is taken by this function
634 */
635 int wait_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits)
636 {
637 struct extent_state *state;
638 struct rb_node *node;
639
640 read_lock_irq(&tree->lock);
641 again:
642 while (1) {
643 /*
644 * this search will find all the extents that end after
645 * our range starts
646 */
647 node = tree_search(&tree->state, start);
648 if (!node)
649 break;
650
651 state = rb_entry(node, struct extent_state, rb_node);
652
653 if (state->start > end)
654 goto out;
655
656 if (state->state & bits) {
657 start = state->start;
658 atomic_inc(&state->refs);
659 wait_on_state(tree, state);
660 free_extent_state(state);
661 goto again;
662 }
663 start = state->end + 1;
664
665 if (start > end)
666 break;
667
668 if (need_resched()) {
669 read_unlock_irq(&tree->lock);
670 cond_resched();
671 read_lock_irq(&tree->lock);
672 }
673 }
674 out:
675 read_unlock_irq(&tree->lock);
676 return 0;
677 }
678 EXPORT_SYMBOL(wait_extent_bit);
679
680 static void set_state_bits(struct extent_map_tree *tree,
681 struct extent_state *state,
682 int bits)
683 {
684 if ((bits & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
685 u64 range = state->end - state->start + 1;
686 tree->dirty_bytes += range;
687 }
688 state->state |= bits;
689 }
690
691 /*
692 * set some bits on a range in the tree. This may require allocations
693 * or sleeping, so the gfp mask is used to indicate what is allowed.
694 *
695 * If 'exclusive' == 1, this will fail with -EEXIST if some part of the
696 * range already has the desired bits set. The start of the existing
697 * range is returned in failed_start in this case.
698 *
699 * [start, end] is inclusive
700 * This takes the tree lock.
701 */
702 int set_extent_bit(struct extent_map_tree *tree, u64 start, u64 end, int bits,
703 int exclusive, u64 *failed_start, gfp_t mask)
704 {
705 struct extent_state *state;
706 struct extent_state *prealloc = NULL;
707 struct rb_node *node;
708 unsigned long flags;
709 int err = 0;
710 int set;
711 u64 last_start;
712 u64 last_end;
713 again:
714 if (!prealloc && (mask & __GFP_WAIT)) {
715 prealloc = alloc_extent_state(mask);
716 if (!prealloc)
717 return -ENOMEM;
718 }
719
720 write_lock_irqsave(&tree->lock, flags);
721 /*
722 * this search will find all the extents that end after
723 * our range starts.
724 */
725 node = tree_search(&tree->state, start);
726 if (!node) {
727 err = insert_state(tree, prealloc, start, end, bits);
728 prealloc = NULL;
729 BUG_ON(err == -EEXIST);
730 goto out;
731 }
732
733 state = rb_entry(node, struct extent_state, rb_node);
734 last_start = state->start;
735 last_end = state->end;
736
737 /*
738 * | ---- desired range ---- |
739 * | state |
740 *
741 * Just lock what we found and keep going
742 */
743 if (state->start == start && state->end <= end) {
744 set = state->state & bits;
745 if (set && exclusive) {
746 *failed_start = state->start;
747 err = -EEXIST;
748 goto out;
749 }
750 set_state_bits(tree, state, bits);
751 start = state->end + 1;
752 merge_state(tree, state);
753 goto search_again;
754 }
755
756 /*
757 * | ---- desired range ---- |
758 * | state |
759 * or
760 * | ------------- state -------------- |
761 *
762 * We need to split the extent we found, and may flip bits on
763 * second half.
764 *
765 * If the extent we found extends past our
766 * range, we just split and search again. It'll get split
767 * again the next time though.
768 *
769 * If the extent we found is inside our range, we set the
770 * desired bit on it.
771 */
772 if (state->start < start) {
773 set = state->state & bits;
774 if (exclusive && set) {
775 *failed_start = start;
776 err = -EEXIST;
777 goto out;
778 }
779 err = split_state(tree, state, prealloc, start);
780 BUG_ON(err == -EEXIST);
781 prealloc = NULL;
782 if (err)
783 goto out;
784 if (state->end <= end) {
785 set_state_bits(tree, state, bits);
786 start = state->end + 1;
787 merge_state(tree, state);
788 } else {
789 start = state->start;
790 }
791 goto search_again;
792 }
793 /*
794 * | ---- desired range ---- |
795 * | state | or | state |
796 *
797 * There's a hole, we need to insert something in it and
798 * ignore the extent we found.
799 */
800 if (state->start > start) {
801 u64 this_end;
802 if (end < last_start)
803 this_end = end;
804 else
805 this_end = last_start -1;
806 err = insert_state(tree, prealloc, start, this_end,
807 bits);
808 prealloc = NULL;
809 BUG_ON(err == -EEXIST);
810 if (err)
811 goto out;
812 start = this_end + 1;
813 goto search_again;
814 }
815 /*
816 * | ---- desired range ---- |
817 * | state |
818 * We need to split the extent, and set the bit
819 * on the first half
820 */
821 if (state->start <= end && state->end > end) {
822 set = state->state & bits;
823 if (exclusive && set) {
824 *failed_start = start;
825 err = -EEXIST;
826 goto out;
827 }
828 err = split_state(tree, state, prealloc, end + 1);
829 BUG_ON(err == -EEXIST);
830
831 set_state_bits(tree, prealloc, bits);
832 merge_state(tree, prealloc);
833 prealloc = NULL;
834 goto out;
835 }
836
837 goto search_again;
838
839 out:
840 write_unlock_irqrestore(&tree->lock, flags);
841 if (prealloc)
842 free_extent_state(prealloc);
843
844 return err;
845
846 search_again:
847 if (start > end)
848 goto out;
849 write_unlock_irqrestore(&tree->lock, flags);
850 if (mask & __GFP_WAIT)
851 cond_resched();
852 goto again;
853 }
854 EXPORT_SYMBOL(set_extent_bit);
855
856 /* wrappers around set/clear extent bit */
857 int set_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end,
858 gfp_t mask)
859 {
860 return set_extent_bit(tree, start, end, EXTENT_DIRTY, 0, NULL,
861 mask);
862 }
863 EXPORT_SYMBOL(set_extent_dirty);
864
865 int set_extent_bits(struct extent_map_tree *tree, u64 start, u64 end,
866 int bits, gfp_t mask)
867 {
868 return set_extent_bit(tree, start, end, bits, 0, NULL,
869 mask);
870 }
871 EXPORT_SYMBOL(set_extent_bits);
872
873 int clear_extent_bits(struct extent_map_tree *tree, u64 start, u64 end,
874 int bits, gfp_t mask)
875 {
876 return clear_extent_bit(tree, start, end, bits, 0, 0, mask);
877 }
878 EXPORT_SYMBOL(clear_extent_bits);
879
880 int set_extent_delalloc(struct extent_map_tree *tree, u64 start, u64 end,
881 gfp_t mask)
882 {
883 return set_extent_bit(tree, start, end,
884 EXTENT_DELALLOC | EXTENT_DIRTY, 0, NULL,
885 mask);
886 }
887 EXPORT_SYMBOL(set_extent_delalloc);
888
889 int clear_extent_dirty(struct extent_map_tree *tree, u64 start, u64 end,
890 gfp_t mask)
891 {
892 return clear_extent_bit(tree, start, end,
893 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, mask);
894 }
895 EXPORT_SYMBOL(clear_extent_dirty);
896
897 int set_extent_new(struct extent_map_tree *tree, u64 start, u64 end,
898 gfp_t mask)
899 {
900 return set_extent_bit(tree, start, end, EXTENT_NEW, 0, NULL,
901 mask);
902 }
903 EXPORT_SYMBOL(set_extent_new);
904
905 int clear_extent_new(struct extent_map_tree *tree, u64 start, u64 end,
906 gfp_t mask)
907 {
908 return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, mask);
909 }
910 EXPORT_SYMBOL(clear_extent_new);
911
912 int set_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end,
913 gfp_t mask)
914 {
915 return set_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, NULL,
916 mask);
917 }
918 EXPORT_SYMBOL(set_extent_uptodate);
919
920 int clear_extent_uptodate(struct extent_map_tree *tree, u64 start, u64 end,
921 gfp_t mask)
922 {
923 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask);
924 }
925 EXPORT_SYMBOL(clear_extent_uptodate);
926
927 int set_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end,
928 gfp_t mask)
929 {
930 return set_extent_bit(tree, start, end, EXTENT_WRITEBACK,
931 0, NULL, mask);
932 }
933 EXPORT_SYMBOL(set_extent_writeback);
934
935 int clear_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end,
936 gfp_t mask)
937 {
938 return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask);
939 }
940 EXPORT_SYMBOL(clear_extent_writeback);
941
942 int wait_on_extent_writeback(struct extent_map_tree *tree, u64 start, u64 end)
943 {
944 return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
945 }
946 EXPORT_SYMBOL(wait_on_extent_writeback);
947
948 /*
949 * locks a range in ascending order, waiting for any locked regions
950 * it hits on the way. [start,end] are inclusive, and this will sleep.
951 */
952 int lock_extent(struct extent_map_tree *tree, u64 start, u64 end, gfp_t mask)
953 {
954 int err;
955 u64 failed_start;
956 while (1) {
957 err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
958 &failed_start, mask);
959 if (err == -EEXIST && (mask & __GFP_WAIT)) {
960 wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
961 start = failed_start;
962 } else {
963 break;
964 }
965 WARN_ON(start > end);
966 }
967 return err;
968 }
969 EXPORT_SYMBOL(lock_extent);
970
971 int unlock_extent(struct extent_map_tree *tree, u64 start, u64 end,
972 gfp_t mask)
973 {
974 return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, mask);
975 }
976 EXPORT_SYMBOL(unlock_extent);
977
978 /*
979 * helper function to set pages and extents in the tree dirty
980 */
981 int set_range_dirty(struct extent_map_tree *tree, u64 start, u64 end)
982 {
983 unsigned long index = start >> PAGE_CACHE_SHIFT;
984 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
985 struct page *page;
986
987 while (index <= end_index) {
988 page = find_get_page(tree->mapping, index);
989 BUG_ON(!page);
990 __set_page_dirty_nobuffers(page);
991 page_cache_release(page);
992 index++;
993 }
994 set_extent_dirty(tree, start, end, GFP_NOFS);
995 return 0;
996 }
997 EXPORT_SYMBOL(set_range_dirty);
998
999 /*
1000 * helper function to set both pages and extents in the tree writeback
1001 */
1002 int set_range_writeback(struct extent_map_tree *tree, u64 start, u64 end)
1003 {
1004 unsigned long index = start >> PAGE_CACHE_SHIFT;
1005 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1006 struct page *page;
1007
1008 while (index <= end_index) {
1009 page = find_get_page(tree->mapping, index);
1010 BUG_ON(!page);
1011 set_page_writeback(page);
1012 page_cache_release(page);
1013 index++;
1014 }
1015 set_extent_writeback(tree, start, end, GFP_NOFS);
1016 return 0;
1017 }
1018 EXPORT_SYMBOL(set_range_writeback);
1019
1020 int find_first_extent_bit(struct extent_map_tree *tree, u64 start,
1021 u64 *start_ret, u64 *end_ret, int bits)
1022 {
1023 struct rb_node *node;
1024 struct extent_state *state;
1025 int ret = 1;
1026
1027 read_lock_irq(&tree->lock);
1028 /*
1029 * this search will find all the extents that end after
1030 * our range starts.
1031 */
1032 node = tree_search(&tree->state, start);
1033 if (!node || IS_ERR(node)) {
1034 goto out;
1035 }
1036
1037 while(1) {
1038 state = rb_entry(node, struct extent_state, rb_node);
1039 if (state->end >= start && (state->state & bits)) {
1040 *start_ret = state->start;
1041 *end_ret = state->end;
1042 ret = 0;
1043 break;
1044 }
1045 node = rb_next(node);
1046 if (!node)
1047 break;
1048 }
1049 out:
1050 read_unlock_irq(&tree->lock);
1051 return ret;
1052 }
1053 EXPORT_SYMBOL(find_first_extent_bit);
1054
1055 u64 find_lock_delalloc_range(struct extent_map_tree *tree,
1056 u64 *start, u64 *end, u64 max_bytes)
1057 {
1058 struct rb_node *node;
1059 struct extent_state *state;
1060 u64 cur_start = *start;
1061 u64 found = 0;
1062 u64 total_bytes = 0;
1063
1064 write_lock_irq(&tree->lock);
1065 /*
1066 * this search will find all the extents that end after
1067 * our range starts.
1068 */
1069 search_again:
1070 node = tree_search(&tree->state, cur_start);
1071 if (!node || IS_ERR(node)) {
1072 *end = (u64)-1;
1073 goto out;
1074 }
1075
1076 while(1) {
1077 state = rb_entry(node, struct extent_state, rb_node);
1078 if (found && state->start != cur_start) {
1079 goto out;
1080 }
1081 if (!(state->state & EXTENT_DELALLOC)) {
1082 if (!found)
1083 *end = state->end;
1084 goto out;
1085 }
1086 if (!found) {
1087 struct extent_state *prev_state;
1088 struct rb_node *prev_node = node;
1089 while(1) {
1090 prev_node = rb_prev(prev_node);
1091 if (!prev_node)
1092 break;
1093 prev_state = rb_entry(prev_node,
1094 struct extent_state,
1095 rb_node);
1096 if (!(prev_state->state & EXTENT_DELALLOC))
1097 break;
1098 state = prev_state;
1099 node = prev_node;
1100 }
1101 }
1102 if (state->state & EXTENT_LOCKED) {
1103 DEFINE_WAIT(wait);
1104 atomic_inc(&state->refs);
1105 prepare_to_wait(&state->wq, &wait,
1106 TASK_UNINTERRUPTIBLE);
1107 write_unlock_irq(&tree->lock);
1108 schedule();
1109 write_lock_irq(&tree->lock);
1110 finish_wait(&state->wq, &wait);
1111 free_extent_state(state);
1112 goto search_again;
1113 }
1114 state->state |= EXTENT_LOCKED;
1115 if (!found)
1116 *start = state->start;
1117 found++;
1118 *end = state->end;
1119 cur_start = state->end + 1;
1120 node = rb_next(node);
1121 if (!node)
1122 break;
1123 total_bytes += state->end - state->start + 1;
1124 if (total_bytes >= max_bytes)
1125 break;
1126 }
1127 out:
1128 write_unlock_irq(&tree->lock);
1129 return found;
1130 }
1131
1132 u64 count_range_bits(struct extent_map_tree *tree,
1133 u64 *start, u64 search_end, u64 max_bytes,
1134 unsigned long bits)
1135 {
1136 struct rb_node *node;
1137 struct extent_state *state;
1138 u64 cur_start = *start;
1139 u64 total_bytes = 0;
1140 int found = 0;
1141
1142 if (search_end <= cur_start) {
1143 printk("search_end %Lu start %Lu\n", search_end, cur_start);
1144 WARN_ON(1);
1145 return 0;
1146 }
1147
1148 write_lock_irq(&tree->lock);
1149 if (cur_start == 0 && bits == EXTENT_DIRTY) {
1150 total_bytes = tree->dirty_bytes;
1151 goto out;
1152 }
1153 /*
1154 * this search will find all the extents that end after
1155 * our range starts.
1156 */
1157 node = tree_search(&tree->state, cur_start);
1158 if (!node || IS_ERR(node)) {
1159 goto out;
1160 }
1161
1162 while(1) {
1163 state = rb_entry(node, struct extent_state, rb_node);
1164 if (state->start > search_end)
1165 break;
1166 if (state->end >= cur_start && (state->state & bits)) {
1167 total_bytes += min(search_end, state->end) + 1 -
1168 max(cur_start, state->start);
1169 if (total_bytes >= max_bytes)
1170 break;
1171 if (!found) {
1172 *start = state->start;
1173 found = 1;
1174 }
1175 }
1176 node = rb_next(node);
1177 if (!node)
1178 break;
1179 }
1180 out:
1181 write_unlock_irq(&tree->lock);
1182 return total_bytes;
1183 }
1184 /*
1185 * helper function to lock both pages and extents in the tree.
1186 * pages must be locked first.
1187 */
1188 int lock_range(struct extent_map_tree *tree, u64 start, u64 end)
1189 {
1190 unsigned long index = start >> PAGE_CACHE_SHIFT;
1191 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1192 struct page *page;
1193 int err;
1194
1195 while (index <= end_index) {
1196 page = grab_cache_page(tree->mapping, index);
1197 if (!page) {
1198 err = -ENOMEM;
1199 goto failed;
1200 }
1201 if (IS_ERR(page)) {
1202 err = PTR_ERR(page);
1203 goto failed;
1204 }
1205 index++;
1206 }
1207 lock_extent(tree, start, end, GFP_NOFS);
1208 return 0;
1209
1210 failed:
1211 /*
1212 * we failed above in getting the page at 'index', so we undo here
1213 * up to but not including the page at 'index'
1214 */
1215 end_index = index;
1216 index = start >> PAGE_CACHE_SHIFT;
1217 while (index < end_index) {
1218 page = find_get_page(tree->mapping, index);
1219 unlock_page(page);
1220 page_cache_release(page);
1221 index++;
1222 }
1223 return err;
1224 }
1225 EXPORT_SYMBOL(lock_range);
1226
1227 /*
1228 * helper function to unlock both pages and extents in the tree.
1229 */
1230 int unlock_range(struct extent_map_tree *tree, u64 start, u64 end)
1231 {
1232 unsigned long index = start >> PAGE_CACHE_SHIFT;
1233 unsigned long end_index = end >> PAGE_CACHE_SHIFT;
1234 struct page *page;
1235
1236 while (index <= end_index) {
1237 page = find_get_page(tree->mapping, index);
1238 unlock_page(page);
1239 page_cache_release(page);
1240 index++;
1241 }
1242 unlock_extent(tree, start, end, GFP_NOFS);
1243 return 0;
1244 }
1245 EXPORT_SYMBOL(unlock_range);
1246
1247 int set_state_private(struct extent_map_tree *tree, u64 start, u64 private)
1248 {
1249 struct rb_node *node;
1250 struct extent_state *state;
1251 int ret = 0;
1252
1253 write_lock_irq(&tree->lock);
1254 /*
1255 * this search will find all the extents that end after
1256 * our range starts.
1257 */
1258 node = tree_search(&tree->state, start);
1259 if (!node || IS_ERR(node)) {
1260 ret = -ENOENT;
1261 goto out;
1262 }
1263 state = rb_entry(node, struct extent_state, rb_node);
1264 if (state->start != start) {
1265 ret = -ENOENT;
1266 goto out;
1267 }
1268 state->private = private;
1269 out:
1270 write_unlock_irq(&tree->lock);
1271 return ret;
1272 }
1273
1274 int get_state_private(struct extent_map_tree *tree, u64 start, u64 *private)
1275 {
1276 struct rb_node *node;
1277 struct extent_state *state;
1278 int ret = 0;
1279
1280 read_lock_irq(&tree->lock);
1281 /*
1282 * this search will find all the extents that end after
1283 * our range starts.
1284 */
1285 node = tree_search(&tree->state, start);
1286 if (!node || IS_ERR(node)) {
1287 ret = -ENOENT;
1288 goto out;
1289 }
1290 state = rb_entry(node, struct extent_state, rb_node);
1291 if (state->start != start) {
1292 ret = -ENOENT;
1293 goto out;
1294 }
1295 *private = state->private;
1296 out:
1297 read_unlock_irq(&tree->lock);
1298 return ret;
1299 }
1300
1301 /*
1302 * searches a range in the state tree for a given mask.
1303 * If 'filled' == 1, this returns 1 only if ever extent in the tree
1304 * has the bits set. Otherwise, 1 is returned if any bit in the
1305 * range is found set.
1306 */
1307 int test_range_bit(struct extent_map_tree *tree, u64 start, u64 end,
1308 int bits, int filled)
1309 {
1310 struct extent_state *state = NULL;
1311 struct rb_node *node;
1312 int bitset = 0;
1313
1314 read_lock_irq(&tree->lock);
1315 node = tree_search(&tree->state, start);
1316 while (node && start <= end) {
1317 state = rb_entry(node, struct extent_state, rb_node);
1318
1319 if (filled && state->start > start) {
1320 bitset = 0;
1321 break;
1322 }
1323
1324 if (state->start > end)
1325 break;
1326
1327 if (state->state & bits) {
1328 bitset = 1;
1329 if (!filled)
1330 break;
1331 } else if (filled) {
1332 bitset = 0;
1333 break;
1334 }
1335 start = state->end + 1;
1336 if (start > end)
1337 break;
1338 node = rb_next(node);
1339 }
1340 read_unlock_irq(&tree->lock);
1341 return bitset;
1342 }
1343 EXPORT_SYMBOL(test_range_bit);
1344
1345 /*
1346 * helper function to set a given page up to date if all the
1347 * extents in the tree for that page are up to date
1348 */
1349 static int check_page_uptodate(struct extent_map_tree *tree,
1350 struct page *page)
1351 {
1352 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1353 u64 end = start + PAGE_CACHE_SIZE - 1;
1354 if (test_range_bit(tree, start, end, EXTENT_UPTODATE, 1))
1355 SetPageUptodate(page);
1356 return 0;
1357 }
1358
1359 /*
1360 * helper function to unlock a page if all the extents in the tree
1361 * for that page are unlocked
1362 */
1363 static int check_page_locked(struct extent_map_tree *tree,
1364 struct page *page)
1365 {
1366 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1367 u64 end = start + PAGE_CACHE_SIZE - 1;
1368 if (!test_range_bit(tree, start, end, EXTENT_LOCKED, 0))
1369 unlock_page(page);
1370 return 0;
1371 }
1372
1373 /*
1374 * helper function to end page writeback if all the extents
1375 * in the tree for that page are done with writeback
1376 */
1377 static int check_page_writeback(struct extent_map_tree *tree,
1378 struct page *page)
1379 {
1380 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1381 u64 end = start + PAGE_CACHE_SIZE - 1;
1382 if (!test_range_bit(tree, start, end, EXTENT_WRITEBACK, 0))
1383 end_page_writeback(page);
1384 return 0;
1385 }
1386
1387 /* lots and lots of room for performance fixes in the end_bio funcs */
1388
1389 /*
1390 * after a writepage IO is done, we need to:
1391 * clear the uptodate bits on error
1392 * clear the writeback bits in the extent tree for this IO
1393 * end_page_writeback if the page has no more pending IO
1394 *
1395 * Scheduling is not allowed, so the extent state tree is expected
1396 * to have one and only one object corresponding to this IO.
1397 */
1398 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1399 static void end_bio_extent_writepage(struct bio *bio, int err)
1400 #else
1401 static int end_bio_extent_writepage(struct bio *bio,
1402 unsigned int bytes_done, int err)
1403 #endif
1404 {
1405 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1406 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1407 struct extent_map_tree *tree = bio->bi_private;
1408 u64 start;
1409 u64 end;
1410 int whole_page;
1411
1412 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1413 if (bio->bi_size)
1414 return 1;
1415 #endif
1416
1417 do {
1418 struct page *page = bvec->bv_page;
1419 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1420 bvec->bv_offset;
1421 end = start + bvec->bv_len - 1;
1422
1423 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1424 whole_page = 1;
1425 else
1426 whole_page = 0;
1427
1428 if (--bvec >= bio->bi_io_vec)
1429 prefetchw(&bvec->bv_page->flags);
1430
1431 if (!uptodate) {
1432 clear_extent_uptodate(tree, start, end, GFP_ATOMIC);
1433 ClearPageUptodate(page);
1434 SetPageError(page);
1435 }
1436 clear_extent_writeback(tree, start, end, GFP_ATOMIC);
1437
1438 if (whole_page)
1439 end_page_writeback(page);
1440 else
1441 check_page_writeback(tree, page);
1442 if (tree->ops && tree->ops->writepage_end_io_hook)
1443 tree->ops->writepage_end_io_hook(page, start, end);
1444 } while (bvec >= bio->bi_io_vec);
1445
1446 bio_put(bio);
1447 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1448 return 0;
1449 #endif
1450 }
1451
1452 /*
1453 * after a readpage IO is done, we need to:
1454 * clear the uptodate bits on error
1455 * set the uptodate bits if things worked
1456 * set the page up to date if all extents in the tree are uptodate
1457 * clear the lock bit in the extent tree
1458 * unlock the page if there are no other extents locked for it
1459 *
1460 * Scheduling is not allowed, so the extent state tree is expected
1461 * to have one and only one object corresponding to this IO.
1462 */
1463 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1464 static void end_bio_extent_readpage(struct bio *bio, int err)
1465 #else
1466 static int end_bio_extent_readpage(struct bio *bio,
1467 unsigned int bytes_done, int err)
1468 #endif
1469 {
1470 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1471 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1472 struct extent_map_tree *tree = bio->bi_private;
1473 u64 start;
1474 u64 end;
1475 int whole_page;
1476 int ret;
1477
1478 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1479 if (bio->bi_size)
1480 return 1;
1481 #endif
1482
1483 do {
1484 struct page *page = bvec->bv_page;
1485 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1486 bvec->bv_offset;
1487 end = start + bvec->bv_len - 1;
1488
1489 if (bvec->bv_offset == 0 && bvec->bv_len == PAGE_CACHE_SIZE)
1490 whole_page = 1;
1491 else
1492 whole_page = 0;
1493
1494 if (--bvec >= bio->bi_io_vec)
1495 prefetchw(&bvec->bv_page->flags);
1496
1497 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
1498 ret = tree->ops->readpage_end_io_hook(page, start, end);
1499 if (ret)
1500 uptodate = 0;
1501 }
1502 if (uptodate) {
1503 set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1504 if (whole_page)
1505 SetPageUptodate(page);
1506 else
1507 check_page_uptodate(tree, page);
1508 } else {
1509 ClearPageUptodate(page);
1510 SetPageError(page);
1511 }
1512
1513 unlock_extent(tree, start, end, GFP_ATOMIC);
1514
1515 if (whole_page)
1516 unlock_page(page);
1517 else
1518 check_page_locked(tree, page);
1519 } while (bvec >= bio->bi_io_vec);
1520
1521 bio_put(bio);
1522 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1523 return 0;
1524 #endif
1525 }
1526
1527 /*
1528 * IO done from prepare_write is pretty simple, we just unlock
1529 * the structs in the extent tree when done, and set the uptodate bits
1530 * as appropriate.
1531 */
1532 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)
1533 static void end_bio_extent_preparewrite(struct bio *bio, int err)
1534 #else
1535 static int end_bio_extent_preparewrite(struct bio *bio,
1536 unsigned int bytes_done, int err)
1537 #endif
1538 {
1539 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
1540 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
1541 struct extent_map_tree *tree = bio->bi_private;
1542 u64 start;
1543 u64 end;
1544
1545 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1546 if (bio->bi_size)
1547 return 1;
1548 #endif
1549
1550 do {
1551 struct page *page = bvec->bv_page;
1552 start = ((u64)page->index << PAGE_CACHE_SHIFT) +
1553 bvec->bv_offset;
1554 end = start + bvec->bv_len - 1;
1555
1556 if (--bvec >= bio->bi_io_vec)
1557 prefetchw(&bvec->bv_page->flags);
1558
1559 if (uptodate) {
1560 set_extent_uptodate(tree, start, end, GFP_ATOMIC);
1561 } else {
1562 ClearPageUptodate(page);
1563 SetPageError(page);
1564 }
1565
1566 unlock_extent(tree, start, end, GFP_ATOMIC);
1567
1568 } while (bvec >= bio->bi_io_vec);
1569
1570 bio_put(bio);
1571 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,23)
1572 return 0;
1573 #endif
1574 }
1575
1576 static struct bio *
1577 extent_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
1578 gfp_t gfp_flags)
1579 {
1580 struct bio *bio;
1581
1582 bio = bio_alloc(gfp_flags, nr_vecs);
1583
1584 if (bio == NULL && (current->flags & PF_MEMALLOC)) {
1585 while (!bio && (nr_vecs /= 2))
1586 bio = bio_alloc(gfp_flags, nr_vecs);
1587 }
1588
1589 if (bio) {
1590 bio->bi_bdev = bdev;
1591 bio->bi_sector = first_sector;
1592 }
1593 return bio;
1594 }
1595
1596 static int submit_one_bio(int rw, struct bio *bio)
1597 {
1598 u64 maxsector;
1599 int ret = 0;
1600
1601 bio_get(bio);
1602
1603 maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
1604 if (maxsector < bio->bi_sector) {
1605 printk("sector too large max %Lu got %llu\n", maxsector,
1606 (unsigned long long)bio->bi_sector);
1607 WARN_ON(1);
1608 }
1609
1610 submit_bio(rw, bio);
1611 if (bio_flagged(bio, BIO_EOPNOTSUPP))
1612 ret = -EOPNOTSUPP;
1613 bio_put(bio);
1614 return ret;
1615 }
1616
1617 static int submit_extent_page(int rw, struct extent_map_tree *tree,
1618 struct page *page, sector_t sector,
1619 size_t size, unsigned long offset,
1620 struct block_device *bdev,
1621 struct bio **bio_ret,
1622 unsigned long max_pages,
1623 bio_end_io_t end_io_func)
1624 {
1625 int ret = 0;
1626 struct bio *bio;
1627 int nr;
1628
1629 if (bio_ret && *bio_ret) {
1630 bio = *bio_ret;
1631 if (bio->bi_sector + (bio->bi_size >> 9) != sector ||
1632 bio_add_page(bio, page, size, offset) < size) {
1633 ret = submit_one_bio(rw, bio);
1634 bio = NULL;
1635 } else {
1636 return 0;
1637 }
1638 }
1639 nr = min_t(int, max_pages, bio_get_nr_vecs(bdev));
1640 bio = extent_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
1641 if (!bio) {
1642 printk("failed to allocate bio nr %d\n", nr);
1643 }
1644 bio_add_page(bio, page, size, offset);
1645 bio->bi_end_io = end_io_func;
1646 bio->bi_private = tree;
1647 if (bio_ret) {
1648 *bio_ret = bio;
1649 } else {
1650 ret = submit_one_bio(rw, bio);
1651 }
1652
1653 return ret;
1654 }
1655
1656 void set_page_extent_mapped(struct page *page)
1657 {
1658 if (!PagePrivate(page)) {
1659 SetPagePrivate(page);
1660 WARN_ON(!page->mapping->a_ops->invalidatepage);
1661 set_page_private(page, EXTENT_PAGE_PRIVATE);
1662 page_cache_get(page);
1663 }
1664 }
1665
1666 void set_page_extent_head(struct page *page, unsigned long len)
1667 {
1668 WARN_ON(page->private && page->private == EXTENT_PAGE_PRIVATE &&
1669 PageDirty(page));
1670 set_page_private(page, EXTENT_PAGE_PRIVATE_FIRST_PAGE | len << 2);
1671 }
1672
1673 /*
1674 * basic readpage implementation. Locked extent state structs are inserted
1675 * into the tree that are removed when the IO is done (by the end_io
1676 * handlers)
1677 */
1678 static int __extent_read_full_page(struct extent_map_tree *tree,
1679 struct page *page,
1680 get_extent_t *get_extent,
1681 struct bio **bio)
1682 {
1683 struct inode *inode = page->mapping->host;
1684 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1685 u64 page_end = start + PAGE_CACHE_SIZE - 1;
1686 u64 end;
1687 u64 cur = start;
1688 u64 extent_offset;
1689 u64 last_byte = i_size_read(inode);
1690 u64 block_start;
1691 u64 cur_end;
1692 sector_t sector;
1693 struct extent_map *em;
1694 struct block_device *bdev;
1695 int ret;
1696 int nr = 0;
1697 size_t page_offset = 0;
1698 size_t iosize;
1699 size_t blocksize = inode->i_sb->s_blocksize;
1700
1701 set_page_extent_mapped(page);
1702
1703 end = page_end;
1704 lock_extent(tree, start, end, GFP_NOFS);
1705
1706 while (cur <= end) {
1707 if (cur >= last_byte) {
1708 char *userpage;
1709 iosize = PAGE_CACHE_SIZE - page_offset;
1710 userpage = kmap_atomic(page, KM_USER0);
1711 memset(userpage + page_offset, 0, iosize);
1712 flush_dcache_page(page);
1713 kunmap_atomic(userpage, KM_USER0);
1714 set_extent_uptodate(tree, cur, cur + iosize - 1,
1715 GFP_NOFS);
1716 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1717 break;
1718 }
1719 em = get_extent(inode, page, page_offset, cur, end, 0);
1720 if (IS_ERR(em) || !em) {
1721 SetPageError(page);
1722 unlock_extent(tree, cur, end, GFP_NOFS);
1723 break;
1724 }
1725
1726 extent_offset = cur - em->start;
1727 BUG_ON(em->end < cur);
1728 BUG_ON(end < cur);
1729
1730 iosize = min(em->end - cur, end - cur) + 1;
1731 cur_end = min(em->end, end);
1732 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1733 sector = (em->block_start + extent_offset) >> 9;
1734 bdev = em->bdev;
1735 block_start = em->block_start;
1736 free_extent_map(em);
1737 em = NULL;
1738
1739 /* we've found a hole, just zero and go on */
1740 if (block_start == EXTENT_MAP_HOLE) {
1741 char *userpage;
1742 userpage = kmap_atomic(page, KM_USER0);
1743 memset(userpage + page_offset, 0, iosize);
1744 flush_dcache_page(page);
1745 kunmap_atomic(userpage, KM_USER0);
1746
1747 set_extent_uptodate(tree, cur, cur + iosize - 1,
1748 GFP_NOFS);
1749 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1750 cur = cur + iosize;
1751 page_offset += iosize;
1752 continue;
1753 }
1754 /* the get_extent function already copied into the page */
1755 if (test_range_bit(tree, cur, cur_end, EXTENT_UPTODATE, 1)) {
1756 unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS);
1757 cur = cur + iosize;
1758 page_offset += iosize;
1759 continue;
1760 }
1761
1762 ret = 0;
1763 if (tree->ops && tree->ops->readpage_io_hook) {
1764 ret = tree->ops->readpage_io_hook(page, cur,
1765 cur + iosize - 1);
1766 }
1767 if (!ret) {
1768 unsigned long nr = (last_byte >> PAGE_CACHE_SHIFT) + 1;
1769 nr -= page->index;
1770 ret = submit_extent_page(READ, tree, page,
1771 sector, iosize, page_offset,
1772 bdev, bio, nr,
1773 end_bio_extent_readpage);
1774 }
1775 if (ret)
1776 SetPageError(page);
1777 cur = cur + iosize;
1778 page_offset += iosize;
1779 nr++;
1780 }
1781 if (!nr) {
1782 if (!PageError(page))
1783 SetPageUptodate(page);
1784 unlock_page(page);
1785 }
1786 return 0;
1787 }
1788
1789 int extent_read_full_page(struct extent_map_tree *tree, struct page *page,
1790 get_extent_t *get_extent)
1791 {
1792 struct bio *bio = NULL;
1793 int ret;
1794
1795 ret = __extent_read_full_page(tree, page, get_extent, &bio);
1796 if (bio)
1797 submit_one_bio(READ, bio);
1798 return ret;
1799 }
1800 EXPORT_SYMBOL(extent_read_full_page);
1801
1802 /*
1803 * the writepage semantics are similar to regular writepage. extent
1804 * records are inserted to lock ranges in the tree, and as dirty areas
1805 * are found, they are marked writeback. Then the lock bits are removed
1806 * and the end_io handler clears the writeback ranges
1807 */
1808 static int __extent_writepage(struct page *page, struct writeback_control *wbc,
1809 void *data)
1810 {
1811 struct inode *inode = page->mapping->host;
1812 struct extent_page_data *epd = data;
1813 struct extent_map_tree *tree = epd->tree;
1814 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
1815 u64 delalloc_start;
1816 u64 page_end = start + PAGE_CACHE_SIZE - 1;
1817 u64 end;
1818 u64 cur = start;
1819 u64 extent_offset;
1820 u64 last_byte = i_size_read(inode);
1821 u64 block_start;
1822 u64 iosize;
1823 sector_t sector;
1824 struct extent_map *em;
1825 struct block_device *bdev;
1826 int ret;
1827 int nr = 0;
1828 size_t page_offset = 0;
1829 size_t blocksize;
1830 loff_t i_size = i_size_read(inode);
1831 unsigned long end_index = i_size >> PAGE_CACHE_SHIFT;
1832 u64 nr_delalloc;
1833 u64 delalloc_end;
1834
1835 WARN_ON(!PageLocked(page));
1836 if (page->index > end_index) {
1837 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
1838 unlock_page(page);
1839 return 0;
1840 }
1841
1842 if (page->index == end_index) {
1843 char *userpage;
1844
1845 size_t offset = i_size & (PAGE_CACHE_SIZE - 1);
1846
1847 userpage = kmap_atomic(page, KM_USER0);
1848 memset(userpage + offset, 0, PAGE_CACHE_SIZE - offset);
1849 flush_dcache_page(page);
1850 kunmap_atomic(userpage, KM_USER0);
1851 }
1852
1853 set_page_extent_mapped(page);
1854
1855 delalloc_start = start;
1856 delalloc_end = 0;
1857 while(delalloc_end < page_end) {
1858 nr_delalloc = find_lock_delalloc_range(tree, &delalloc_start,
1859 &delalloc_end,
1860 128 * 1024 * 1024);
1861 if (nr_delalloc == 0) {
1862 delalloc_start = delalloc_end + 1;
1863 continue;
1864 }
1865 tree->ops->fill_delalloc(inode, delalloc_start,
1866 delalloc_end);
1867 clear_extent_bit(tree, delalloc_start,
1868 delalloc_end,
1869 EXTENT_LOCKED | EXTENT_DELALLOC,
1870 1, 0, GFP_NOFS);
1871 delalloc_start = delalloc_end + 1;
1872 }
1873 lock_extent(tree, start, page_end, GFP_NOFS);
1874
1875 end = page_end;
1876 if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0)) {
1877 printk("found delalloc bits after lock_extent\n");
1878 }
1879
1880 if (last_byte <= start) {
1881 clear_extent_dirty(tree, start, page_end, GFP_NOFS);
1882 goto done;
1883 }
1884
1885 set_extent_uptodate(tree, start, page_end, GFP_NOFS);
1886 blocksize = inode->i_sb->s_blocksize;
1887
1888 while (cur <= end) {
1889 if (cur >= last_byte) {
1890 clear_extent_dirty(tree, cur, page_end, GFP_NOFS);
1891 break;
1892 }
1893 em = epd->get_extent(inode, page, page_offset, cur, end, 1);
1894 if (IS_ERR(em) || !em) {
1895 SetPageError(page);
1896 break;
1897 }
1898
1899 extent_offset = cur - em->start;
1900 BUG_ON(em->end < cur);
1901 BUG_ON(end < cur);
1902 iosize = min(em->end - cur, end - cur) + 1;
1903 iosize = (iosize + blocksize - 1) & ~((u64)blocksize - 1);
1904 sector = (em->block_start + extent_offset) >> 9;
1905 bdev = em->bdev;
1906 block_start = em->block_start;
1907 free_extent_map(em);
1908 em = NULL;
1909
1910 if (block_start == EXTENT_MAP_HOLE ||
1911 block_start == EXTENT_MAP_INLINE) {
1912 clear_extent_dirty(tree, cur,
1913 cur + iosize - 1, GFP_NOFS);
1914 cur = cur + iosize;
1915 page_offset += iosize;
1916 continue;
1917 }
1918
1919 /* leave this out until we have a page_mkwrite call */
1920 if (0 && !test_range_bit(tree, cur, cur + iosize - 1,
1921 EXTENT_DIRTY, 0)) {
1922 cur = cur + iosize;
1923 page_offset += iosize;
1924 continue;
1925 }
1926 clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS);
1927 if (tree->ops && tree->ops->writepage_io_hook) {
1928 ret = tree->ops->writepage_io_hook(page, cur,
1929 cur + iosize - 1);
1930 } else {
1931 ret = 0;
1932 }
1933 if (ret)
1934 SetPageError(page);
1935 else {
1936 unsigned long max_nr = end_index + 1;
1937 set_range_writeback(tree, cur, cur + iosize - 1);
1938 if (!PageWriteback(page)) {
1939 printk("warning page %lu not writeback, "
1940 "cur %llu end %llu\n", page->index,
1941 (unsigned long long)cur,
1942 (unsigned long long)end);
1943 }
1944
1945 ret = submit_extent_page(WRITE, tree, page, sector,
1946 iosize, page_offset, bdev,
1947 &epd->bio, max_nr,
1948 end_bio_extent_writepage);
1949 if (ret)
1950 SetPageError(page);
1951 }
1952 cur = cur + iosize;
1953 page_offset += iosize;
1954 nr++;
1955 }
1956 done:
1957 if (nr == 0) {
1958 /* make sure the mapping tag for page dirty gets cleared */
1959 set_page_writeback(page);
1960 end_page_writeback(page);
1961 }
1962 unlock_extent(tree, start, page_end, GFP_NOFS);
1963 unlock_page(page);
1964 return 0;
1965 }
1966
1967 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,18)
1968
1969 /* Taken directly from 2.6.23 for 2.6.18 back port */
1970 typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc,
1971 void *data);
1972
1973 /**
1974 * write_cache_pages - walk the list of dirty pages of the given address space
1975 * and write all of them.
1976 * @mapping: address space structure to write
1977 * @wbc: subtract the number of written pages from *@wbc->nr_to_write
1978 * @writepage: function called for each page
1979 * @data: data passed to writepage function
1980 *
1981 * If a page is already under I/O, write_cache_pages() skips it, even
1982 * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
1983 * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
1984 * and msync() need to guarantee that all the data which was dirty at the time
1985 * the call was made get new I/O started against them. If wbc->sync_mode is
1986 * WB_SYNC_ALL then we were called for data integrity and we must wait for
1987 * existing IO to complete.
1988 */
1989 static int write_cache_pages(struct address_space *mapping,
1990 struct writeback_control *wbc, writepage_t writepage,
1991 void *data)
1992 {
1993 struct backing_dev_info *bdi = mapping->backing_dev_info;
1994 int ret = 0;
1995 int done = 0;
1996 struct pagevec pvec;
1997 int nr_pages;
1998 pgoff_t index;
1999 pgoff_t end; /* Inclusive */
2000 int scanned = 0;
2001 int range_whole = 0;
2002
2003 if (wbc->nonblocking && bdi_write_congested(bdi)) {
2004 wbc->encountered_congestion = 1;
2005 return 0;
2006 }
2007
2008 pagevec_init(&pvec, 0);
2009 if (wbc->range_cyclic) {
2010 index = mapping->writeback_index; /* Start from prev offset */
2011 end = -1;
2012 } else {
2013 index = wbc->range_start >> PAGE_CACHE_SHIFT;
2014 end = wbc->range_end >> PAGE_CACHE_SHIFT;
2015 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2016 range_whole = 1;
2017 scanned = 1;
2018 }
2019 retry:
2020 while (!done && (index <= end) &&
2021 (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
2022 PAGECACHE_TAG_DIRTY,
2023 min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
2024 unsigned i;
2025
2026 scanned = 1;
2027 for (i = 0; i < nr_pages; i++) {
2028 struct page *page = pvec.pages[i];
2029
2030 /*
2031 * At this point we hold neither mapping->tree_lock nor
2032 * lock on the page itself: the page may be truncated or
2033 * invalidated (changing page->mapping to NULL), or even
2034 * swizzled back from swapper_space to tmpfs file
2035 * mapping
2036 */
2037 lock_page(page);
2038
2039 if (unlikely(page->mapping != mapping)) {
2040 unlock_page(page);
2041 continue;
2042 }
2043
2044 if (!wbc->range_cyclic && page->index > end) {
2045 done = 1;
2046 unlock_page(page);
2047 continue;
2048 }
2049
2050 if (wbc->sync_mode != WB_SYNC_NONE)
2051 wait_on_page_writeback(page);
2052
2053 if (PageWriteback(page) ||
2054 !clear_page_dirty_for_io(page)) {
2055 unlock_page(page);
2056 continue;
2057 }
2058
2059 ret = (*writepage)(page, wbc, data);
2060
2061 if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
2062 unlock_page(page);
2063 ret = 0;
2064 }
2065 if (ret || (--(wbc->nr_to_write) <= 0))
2066 done = 1;
2067 if (wbc->nonblocking && bdi_write_congested(bdi)) {
2068 wbc->encountered_congestion = 1;
2069 done = 1;
2070 }
2071 }
2072 pagevec_release(&pvec);
2073 cond_resched();
2074 }
2075 if (!scanned && !done) {
2076 /*
2077 * We hit the last page and there is more work to be done: wrap
2078 * back to the start of the file
2079 */
2080 scanned = 1;
2081 index = 0;
2082 goto retry;
2083 }
2084 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2085 mapping->writeback_index = index;
2086 return ret;
2087 }
2088 #endif
2089
2090 int extent_write_full_page(struct extent_map_tree *tree, struct page *page,
2091 get_extent_t *get_extent,
2092 struct writeback_control *wbc)
2093 {
2094 int ret;
2095 struct address_space *mapping = page->mapping;
2096 struct extent_page_data epd = {
2097 .bio = NULL,
2098 .tree = tree,
2099 .get_extent = get_extent,
2100 };
2101 struct writeback_control wbc_writepages = {
2102 .bdi = wbc->bdi,
2103 .sync_mode = WB_SYNC_NONE,
2104 .older_than_this = NULL,
2105 .nr_to_write = 64,
2106 .range_start = page_offset(page) + PAGE_CACHE_SIZE,
2107 .range_end = (loff_t)-1,
2108 };
2109
2110
2111 ret = __extent_writepage(page, wbc, &epd);
2112
2113 write_cache_pages(mapping, &wbc_writepages, __extent_writepage, &epd);
2114 if (epd.bio) {
2115 submit_one_bio(WRITE, epd.bio);
2116 }
2117 return ret;
2118 }
2119 EXPORT_SYMBOL(extent_write_full_page);
2120
2121
2122 int extent_writepages(struct extent_map_tree *tree,
2123 struct address_space *mapping,
2124 get_extent_t *get_extent,
2125 struct writeback_control *wbc)
2126 {
2127 int ret = 0;
2128 struct extent_page_data epd = {
2129 .bio = NULL,
2130 .tree = tree,
2131 .get_extent = get_extent,
2132 };
2133
2134 ret = write_cache_pages(mapping, wbc, __extent_writepage, &epd);
2135 if (epd.bio) {
2136 submit_one_bio(WRITE, epd.bio);
2137 }
2138 return ret;
2139 }
2140 EXPORT_SYMBOL(extent_writepages);
2141
2142 int extent_readpages(struct extent_map_tree *tree,
2143 struct address_space *mapping,
2144 struct list_head *pages, unsigned nr_pages,
2145 get_extent_t get_extent)
2146 {
2147 struct bio *bio = NULL;
2148 unsigned page_idx;
2149 struct pagevec pvec;
2150
2151 pagevec_init(&pvec, 0);
2152 for (page_idx = 0; page_idx < nr_pages; page_idx++) {
2153 struct page *page = list_entry(pages->prev, struct page, lru);
2154
2155 prefetchw(&page->flags);
2156 list_del(&page->lru);
2157 /*
2158 * what we want to do here is call add_to_page_cache_lru,
2159 * but that isn't exported, so we reproduce it here
2160 */
2161 if (!add_to_page_cache(page, mapping,
2162 page->index, GFP_KERNEL)) {
2163
2164 /* open coding of lru_cache_add, also not exported */
2165 page_cache_get(page);
2166 if (!pagevec_add(&pvec, page))
2167 __pagevec_lru_add(&pvec);
2168 __extent_read_full_page(tree, page, get_extent, &bio);
2169 }
2170 page_cache_release(page);
2171 }
2172 if (pagevec_count(&pvec))
2173 __pagevec_lru_add(&pvec);
2174 BUG_ON(!list_empty(pages));
2175 if (bio)
2176 submit_one_bio(READ, bio);
2177 return 0;
2178 }
2179 EXPORT_SYMBOL(extent_readpages);
2180
2181 /*
2182 * basic invalidatepage code, this waits on any locked or writeback
2183 * ranges corresponding to the page, and then deletes any extent state
2184 * records from the tree
2185 */
2186 int extent_invalidatepage(struct extent_map_tree *tree,
2187 struct page *page, unsigned long offset)
2188 {
2189 u64 start = ((u64)page->index << PAGE_CACHE_SHIFT);
2190 u64 end = start + PAGE_CACHE_SIZE - 1;
2191 size_t blocksize = page->mapping->host->i_sb->s_blocksize;
2192
2193 start += (offset + blocksize -1) & ~(blocksize - 1);
2194 if (start > end)
2195 return 0;
2196
2197 lock_extent(tree, start, end, GFP_NOFS);
2198 wait_on_extent_writeback(tree, start, end);
2199 clear_extent_bit(tree, start, end,
2200 EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC,
2201 1, 1, GFP_NOFS);
2202 return 0;
2203 }
2204 EXPORT_SYMBOL(extent_invalidatepage);
2205
2206 /*
2207 * simple commit_write call, set_range_dirty is used to mark both
2208 * the pages and the extent records as dirty
2209 */
2210 int extent_commit_write(struct extent_map_tree *tree,
2211 struct inode *inode, struct page *page,
2212 unsigned from, unsigned to)
2213 {
2214 loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to;
2215
2216 set_page_extent_mapped(page);
2217 set_page_dirty(page);
2218
2219 if (pos > inode->i_size) {
2220 i_size_write(inode, pos);
2221 mark_inode_dirty(inode);
2222 }
2223 return 0;
2224 }
2225 EXPORT_SYMBOL(extent_commit_write);
2226
2227 int extent_prepare_write(struct extent_map_tree *tree,
2228 struct inode *inode, struct page *page,
2229 unsigned from, unsigned to, get_extent_t *get_extent)
2230 {
2231 u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
2232 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
2233 u64 block_start;
2234 u64 orig_block_start;
2235 u64 block_end;
2236 u64 cur_end;
2237 struct extent_map *em;
2238 unsigned blocksize = 1 << inode->i_blkbits;
2239 size_t page_offset = 0;
2240 size_t block_off_start;
2241 size_t block_off_end;
2242 int err = 0;
2243 int iocount = 0;
2244 int ret = 0;
2245 int isnew;
2246
2247 set_page_extent_mapped(page);
2248
2249 block_start = (page_start + from) & ~((u64)blocksize - 1);
2250 block_end = (page_start + to - 1) | (blocksize - 1);
2251 orig_block_start = block_start;
2252
2253 lock_extent(tree, page_start, page_end, GFP_NOFS);
2254 while(block_start <= block_end) {
2255 em = get_extent(inode, page, page_offset, block_start,
2256 block_end, 1);
2257 if (IS_ERR(em) || !em) {
2258 goto err;
2259 }
2260 cur_end = min(block_end, em->end);
2261 block_off_start = block_start & (PAGE_CACHE_SIZE - 1);
2262 block_off_end = block_off_start + blocksize;
2263 isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS);
2264
2265 if (!PageUptodate(page) && isnew &&
2266 (block_off_end > to || block_off_start < from)) {
2267 void *kaddr;
2268
2269 kaddr = kmap_atomic(page, KM_USER0);
2270 if (block_off_end > to)
2271 memset(kaddr + to, 0, block_off_end - to);
2272 if (block_off_start < from)
2273 memset(kaddr + block_off_start, 0,
2274 from - block_off_start);
2275 flush_dcache_page(page);
2276 kunmap_atomic(kaddr, KM_USER0);
2277 }
2278 if ((em->block_start != EXTENT_MAP_HOLE &&
2279 em->block_start != EXTENT_MAP_INLINE) &&
2280 !isnew && !PageUptodate(page) &&
2281 (block_off_end > to || block_off_start < from) &&
2282 !test_range_bit(tree, block_start, cur_end,
2283 EXTENT_UPTODATE, 1)) {
2284 u64 sector;
2285 u64 extent_offset = block_start - em->start;
2286 size_t iosize;
2287 sector = (em->block_start + extent_offset) >> 9;
2288 iosize = (cur_end - block_start + blocksize) &
2289 ~((u64)blocksize - 1);
2290 /*
2291 * we've already got the extent locked, but we
2292 * need to split the state such that our end_bio
2293 * handler can clear the lock.
2294 */
2295 set_extent_bit(tree, block_start,
2296 block_start + iosize - 1,
2297 EXTENT_LOCKED, 0, NULL, GFP_NOFS);
2298 ret = submit_extent_page(READ, tree, page,
2299 sector, iosize, page_offset, em->bdev,
2300 NULL, 1,
2301 end_bio_extent_preparewrite);
2302 iocount++;
2303 block_start = block_start + iosize;
2304 } else {
2305 set_extent_uptodate(tree, block_start, cur_end,
2306 GFP_NOFS);
2307 unlock_extent(tree, block_start, cur_end, GFP_NOFS);
2308 block_start = cur_end + 1;
2309 }
2310 page_offset = block_start & (PAGE_CACHE_SIZE - 1);
2311 free_extent_map(em);
2312 }
2313 if (iocount) {
2314 wait_extent_bit(tree, orig_block_start,
2315 block_end, EXTENT_LOCKED);
2316 }
2317 check_page_uptodate(tree, page);
2318 err:
2319 /* FIXME, zero out newly allocated blocks on error */
2320 return err;
2321 }
2322 EXPORT_SYMBOL(extent_prepare_write);
2323
2324 /*
2325 * a helper for releasepage. As long as there are no locked extents
2326 * in the range corresponding to the page, both state records and extent
2327 * map records are removed
2328 */
2329 int try_release_extent_mapping(struct extent_map_tree *tree, struct page *page)
2330 {
2331 struct extent_map *em;
2332 u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
2333 u64 end = start + PAGE_CACHE_SIZE - 1;
2334 u64 orig_start = start;
2335 int ret = 1;
2336
2337 while (start <= end) {
2338 em = lookup_extent_mapping(tree, start, end);
2339 if (!em || IS_ERR(em))
2340 break;
2341 if (!test_range_bit(tree, em->start, em->end,
2342 EXTENT_LOCKED, 0)) {
2343 remove_extent_mapping(tree, em);
2344 /* once for the rb tree */
2345 free_extent_map(em);
2346 }
2347 start = em->end + 1;
2348 /* once for us */
2349 free_extent_map(em);
2350 }
2351 if (test_range_bit(tree, orig_start, end, EXTENT_LOCKED, 0))
2352 ret = 0;
2353 else
2354 clear_extent_bit(tree, orig_start, end, EXTENT_UPTODATE,
2355 1, 1, GFP_NOFS);
2356 return ret;
2357 }
2358 EXPORT_SYMBOL(try_release_extent_mapping);
2359
2360 sector_t extent_bmap(struct address_space *mapping, sector_t iblock,
2361 get_extent_t *get_extent)
2362 {
2363 struct inode *inode = mapping->host;
2364 u64 start = iblock << inode->i_blkbits;
2365 u64 end = start + (1 << inode->i_blkbits) - 1;
2366 sector_t sector = 0;
2367 struct extent_map *em;
2368
2369 em = get_extent(inode, NULL, 0, start, end, 0);
2370 if (!em || IS_ERR(em))
2371 return 0;
2372
2373 if (em->block_start == EXTENT_MAP_INLINE ||
2374 em->block_start == EXTENT_MAP_HOLE)
2375 goto out;
2376
2377 sector = (em->block_start + start - em->start) >> inode->i_blkbits;
2378 out:
2379 free_extent_map(em);
2380 return sector;
2381 }
2382
2383 static int add_lru(struct extent_map_tree *tree, struct extent_buffer *eb)
2384 {
2385 if (list_empty(&eb->lru)) {
2386 extent_buffer_get(eb);
2387 list_add(&eb->lru, &tree->buffer_lru);
2388 tree->lru_size++;
2389 if (tree->lru_size >= BUFFER_LRU_MAX) {
2390 struct extent_buffer *rm;
2391 rm = list_entry(tree->buffer_lru.prev,
2392 struct extent_buffer, lru);
2393 tree->lru_size--;
2394 list_del_init(&rm->lru);
2395 free_extent_buffer(rm);
2396 }
2397 } else
2398 list_move(&eb->lru, &tree->buffer_lru);
2399 return 0;
2400 }
2401 static struct extent_buffer *find_lru(struct extent_map_tree *tree,
2402 u64 start, unsigned long len)
2403 {
2404 struct list_head *lru = &tree->buffer_lru;
2405 struct list_head *cur = lru->next;
2406 struct extent_buffer *eb;
2407
2408 if (list_empty(lru))
2409 return NULL;
2410
2411 do {
2412 eb = list_entry(cur, struct extent_buffer, lru);
2413 if (eb->start == start && eb->len == len) {
2414 extent_buffer_get(eb);
2415 return eb;
2416 }
2417 cur = cur->next;
2418 } while (cur != lru);
2419 return NULL;
2420 }
2421
2422 static inline unsigned long num_extent_pages(u64 start, u64 len)
2423 {
2424 return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
2425 (start >> PAGE_CACHE_SHIFT);
2426 }
2427
2428 static inline struct page *extent_buffer_page(struct extent_buffer *eb,
2429 unsigned long i)
2430 {
2431 struct page *p;
2432 struct address_space *mapping;
2433
2434 if (i == 0)
2435 return eb->first_page;
2436 i += eb->start >> PAGE_CACHE_SHIFT;
2437 mapping = eb->first_page->mapping;
2438 read_lock_irq(&mapping->tree_lock);
2439 p = radix_tree_lookup(&mapping->page_tree, i);
2440 read_unlock_irq(&mapping->tree_lock);
2441 return p;
2442 }
2443
2444 static struct extent_buffer *__alloc_extent_buffer(struct extent_map_tree *tree,
2445 u64 start,
2446 unsigned long len,
2447 gfp_t mask)
2448 {
2449 struct extent_buffer *eb = NULL;
2450
2451 spin_lock(&tree->lru_lock);
2452 eb = find_lru(tree, start, len);
2453 spin_unlock(&tree->lru_lock);
2454 if (eb) {
2455 return eb;
2456 }
2457
2458 eb = kmem_cache_zalloc(extent_buffer_cache, mask);
2459 INIT_LIST_HEAD(&eb->lru);
2460 eb->start = start;
2461 eb->len = len;
2462 atomic_set(&eb->refs, 1);
2463
2464 return eb;
2465 }
2466
2467 static void __free_extent_buffer(struct extent_buffer *eb)
2468 {
2469 kmem_cache_free(extent_buffer_cache, eb);
2470 }
2471
2472 struct extent_buffer *alloc_extent_buffer(struct extent_map_tree *tree,
2473 u64 start, unsigned long len,
2474 struct page *page0,
2475 gfp_t mask)
2476 {
2477 unsigned long num_pages = num_extent_pages(start, len);
2478 unsigned long i;
2479 unsigned long index = start >> PAGE_CACHE_SHIFT;
2480 struct extent_buffer *eb;
2481 struct page *p;
2482 struct address_space *mapping = tree->mapping;
2483 int uptodate = 1;
2484
2485 eb = __alloc_extent_buffer(tree, start, len, mask);
2486 if (!eb || IS_ERR(eb))
2487 return NULL;
2488
2489 if (eb->flags & EXTENT_BUFFER_FILLED)
2490 goto lru_add;
2491
2492 if (page0) {
2493 eb->first_page = page0;
2494 i = 1;
2495 index++;
2496 page_cache_get(page0);
2497 mark_page_accessed(page0);
2498 set_page_extent_mapped(page0);
2499 WARN_ON(!PageUptodate(page0));
2500 set_page_extent_head(page0, len);
2501 } else {
2502 i = 0;
2503 }
2504 for (; i < num_pages; i++, index++) {
2505 p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM);
2506 if (!p) {
2507 WARN_ON(1);
2508 goto fail;
2509 }
2510 set_page_extent_mapped(p);
2511 mark_page_accessed(p);
2512 if (i == 0) {
2513 eb->first_page = p;
2514 set_page_extent_head(p, len);
2515 } else {
2516 set_page_private(p, EXTENT_PAGE_PRIVATE);
2517 }
2518 if (!PageUptodate(p))
2519 uptodate = 0;
2520 unlock_page(p);
2521 }
2522 if (uptodate)
2523 eb->flags |= EXTENT_UPTODATE;
2524 eb->flags |= EXTENT_BUFFER_FILLED;
2525
2526 lru_add:
2527 spin_lock(&tree->lru_lock);
2528 add_lru(tree, eb);
2529 spin_unlock(&tree->lru_lock);
2530 return eb;
2531
2532 fail:
2533 spin_lock(&tree->lru_lock);
2534 list_del_init(&eb->lru);
2535 spin_unlock(&tree->lru_lock);
2536 if (!atomic_dec_and_test(&eb->refs))
2537 return NULL;
2538 for (index = 1; index < i; index++) {
2539 page_cache_release(extent_buffer_page(eb, index));
2540 }
2541 if (i > 0)
2542 page_cache_release(extent_buffer_page(eb, 0));
2543 __free_extent_buffer(eb);
2544 return NULL;
2545 }
2546 EXPORT_SYMBOL(alloc_extent_buffer);
2547
2548 struct extent_buffer *find_extent_buffer(struct extent_map_tree *tree,
2549 u64 start, unsigned long len,
2550 gfp_t mask)
2551 {
2552 unsigned long num_pages = num_extent_pages(start, len);
2553 unsigned long i;
2554 unsigned long index = start >> PAGE_CACHE_SHIFT;
2555 struct extent_buffer *eb;
2556 struct page *p;
2557 struct address_space *mapping = tree->mapping;
2558 int uptodate = 1;
2559
2560 eb = __alloc_extent_buffer(tree, start, len, mask);
2561 if (!eb || IS_ERR(eb))
2562 return NULL;
2563
2564 if (eb->flags & EXTENT_BUFFER_FILLED)
2565 goto lru_add;
2566
2567 for (i = 0; i < num_pages; i++, index++) {
2568 p = find_lock_page(mapping, index);
2569 if (!p) {
2570 goto fail;
2571 }
2572 set_page_extent_mapped(p);
2573 mark_page_accessed(p);
2574
2575 if (i == 0) {
2576 eb->first_page = p;
2577 set_page_extent_head(p, len);
2578 } else {
2579 set_page_private(p, EXTENT_PAGE_PRIVATE);
2580 }
2581
2582 if (!PageUptodate(p))
2583 uptodate = 0;
2584 unlock_page(p);
2585 }
2586 if (uptodate)
2587 eb->flags |= EXTENT_UPTODATE;
2588 eb->flags |= EXTENT_BUFFER_FILLED;
2589
2590 lru_add:
2591 spin_lock(&tree->lru_lock);
2592 add_lru(tree, eb);
2593 spin_unlock(&tree->lru_lock);
2594 return eb;
2595 fail:
2596 spin_lock(&tree->lru_lock);
2597 list_del_init(&eb->lru);
2598 spin_unlock(&tree->lru_lock);
2599 if (!atomic_dec_and_test(&eb->refs))
2600 return NULL;
2601 for (index = 1; index < i; index++) {
2602 page_cache_release(extent_buffer_page(eb, index));
2603 }
2604 if (i > 0)
2605 page_cache_release(extent_buffer_page(eb, 0));
2606 __free_extent_buffer(eb);
2607 return NULL;
2608 }
2609 EXPORT_SYMBOL(find_extent_buffer);
2610
2611 void free_extent_buffer(struct extent_buffer *eb)
2612 {
2613 unsigned long i;
2614 unsigned long num_pages;
2615
2616 if (!eb)
2617 return;
2618
2619 if (!atomic_dec_and_test(&eb->refs))
2620 return;
2621
2622 WARN_ON(!list_empty(&eb->lru));
2623 num_pages = num_extent_pages(eb->start, eb->len);
2624
2625 for (i = 1; i < num_pages; i++) {
2626 page_cache_release(extent_buffer_page(eb, i));
2627 }
2628 page_cache_release(extent_buffer_page(eb, 0));
2629 __free_extent_buffer(eb);
2630 }
2631 EXPORT_SYMBOL(free_extent_buffer);
2632
2633 int clear_extent_buffer_dirty(struct extent_map_tree *tree,
2634 struct extent_buffer *eb)
2635 {
2636 int set;
2637 unsigned long i;
2638 unsigned long num_pages;
2639 struct page *page;
2640
2641 u64 start = eb->start;
2642 u64 end = start + eb->len - 1;
2643
2644 set = clear_extent_dirty(tree, start, end, GFP_NOFS);
2645 num_pages = num_extent_pages(eb->start, eb->len);
2646
2647 for (i = 0; i < num_pages; i++) {
2648 page = extent_buffer_page(eb, i);
2649 lock_page(page);
2650 if (i == 0)
2651 set_page_extent_head(page, eb->len);
2652 else
2653 set_page_private(page, EXTENT_PAGE_PRIVATE);
2654
2655 /*
2656 * if we're on the last page or the first page and the
2657 * block isn't aligned on a page boundary, do extra checks
2658 * to make sure we don't clean page that is partially dirty
2659 */
2660 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2661 ((i == num_pages - 1) &&
2662 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
2663 start = (u64)page->index << PAGE_CACHE_SHIFT;
2664 end = start + PAGE_CACHE_SIZE - 1;
2665 if (test_range_bit(tree, start, end,
2666 EXTENT_DIRTY, 0)) {
2667 unlock_page(page);
2668 continue;
2669 }
2670 }
2671 clear_page_dirty_for_io(page);
2672 write_lock_irq(&page->mapping->tree_lock);
2673 if (!PageDirty(page)) {
2674 radix_tree_tag_clear(&page->mapping->page_tree,
2675 page_index(page),
2676 PAGECACHE_TAG_DIRTY);
2677 }
2678 write_unlock_irq(&page->mapping->tree_lock);
2679 unlock_page(page);
2680 }
2681 return 0;
2682 }
2683 EXPORT_SYMBOL(clear_extent_buffer_dirty);
2684
2685 int wait_on_extent_buffer_writeback(struct extent_map_tree *tree,
2686 struct extent_buffer *eb)
2687 {
2688 return wait_on_extent_writeback(tree, eb->start,
2689 eb->start + eb->len - 1);
2690 }
2691 EXPORT_SYMBOL(wait_on_extent_buffer_writeback);
2692
2693 int set_extent_buffer_dirty(struct extent_map_tree *tree,
2694 struct extent_buffer *eb)
2695 {
2696 unsigned long i;
2697 unsigned long num_pages;
2698
2699 num_pages = num_extent_pages(eb->start, eb->len);
2700 for (i = 0; i < num_pages; i++) {
2701 struct page *page = extent_buffer_page(eb, i);
2702 /* writepage may need to do something special for the
2703 * first page, we have to make sure page->private is
2704 * properly set. releasepage may drop page->private
2705 * on us if the page isn't already dirty.
2706 */
2707 if (i == 0) {
2708 lock_page(page);
2709 set_page_extent_head(page, eb->len);
2710 } else if (PagePrivate(page) &&
2711 page->private != EXTENT_PAGE_PRIVATE) {
2712 lock_page(page);
2713 set_page_extent_mapped(page);
2714 unlock_page(page);
2715 }
2716 __set_page_dirty_nobuffers(extent_buffer_page(eb, i));
2717 if (i == 0)
2718 unlock_page(page);
2719 }
2720 return set_extent_dirty(tree, eb->start,
2721 eb->start + eb->len - 1, GFP_NOFS);
2722 }
2723 EXPORT_SYMBOL(set_extent_buffer_dirty);
2724
2725 int set_extent_buffer_uptodate(struct extent_map_tree *tree,
2726 struct extent_buffer *eb)
2727 {
2728 unsigned long i;
2729 struct page *page;
2730 unsigned long num_pages;
2731
2732 num_pages = num_extent_pages(eb->start, eb->len);
2733
2734 set_extent_uptodate(tree, eb->start, eb->start + eb->len - 1,
2735 GFP_NOFS);
2736 for (i = 0; i < num_pages; i++) {
2737 page = extent_buffer_page(eb, i);
2738 if ((i == 0 && (eb->start & (PAGE_CACHE_SIZE - 1))) ||
2739 ((i == num_pages - 1) &&
2740 ((eb->start + eb->len) & (PAGE_CACHE_SIZE - 1)))) {
2741 check_page_uptodate(tree, page);
2742 continue;
2743 }
2744 SetPageUptodate(page);
2745 }
2746 return 0;
2747 }
2748 EXPORT_SYMBOL(set_extent_buffer_uptodate);
2749
2750 int extent_buffer_uptodate(struct extent_map_tree *tree,
2751 struct extent_buffer *eb)
2752 {
2753 if (eb->flags & EXTENT_UPTODATE)
2754 return 1;
2755 return test_range_bit(tree, eb->start, eb->start + eb->len - 1,
2756 EXTENT_UPTODATE, 1);
2757 }
2758 EXPORT_SYMBOL(extent_buffer_uptodate);
2759
2760 int read_extent_buffer_pages(struct extent_map_tree *tree,
2761 struct extent_buffer *eb,
2762 u64 start,
2763 int wait)
2764 {
2765 unsigned long i;
2766 unsigned long start_i;
2767 struct page *page;
2768 int err;
2769 int ret = 0;
2770 unsigned long num_pages;
2771
2772 if (eb->flags & EXTENT_UPTODATE)
2773 return 0;
2774
2775 if (0 && test_range_bit(tree, eb->start, eb->start + eb->len - 1,
2776 EXTENT_UPTODATE, 1)) {
2777 return 0;
2778 }
2779
2780 if (start) {
2781 WARN_ON(start < eb->start);
2782 start_i = (start >> PAGE_CACHE_SHIFT) -
2783 (eb->start >> PAGE_CACHE_SHIFT);
2784 } else {
2785 start_i = 0;
2786 }
2787
2788 num_pages = num_extent_pages(eb->start, eb->len);
2789 for (i = start_i; i < num_pages; i++) {
2790 page = extent_buffer_page(eb, i);
2791 if (PageUptodate(page)) {
2792 continue;
2793 }
2794 if (!wait) {
2795 if (TestSetPageLocked(page)) {
2796 continue;
2797 }
2798 } else {
2799 lock_page(page);
2800 }
2801 if (!PageUptodate(page)) {
2802 err = page->mapping->a_ops->readpage(NULL, page);
2803 if (err) {
2804 ret = err;
2805 }
2806 } else {
2807 unlock_page(page);
2808 }
2809 }
2810
2811 if (ret || !wait) {
2812 return ret;
2813 }
2814
2815 for (i = start_i; i < num_pages; i++) {
2816 page = extent_buffer_page(eb, i);
2817 wait_on_page_locked(page);
2818 if (!PageUptodate(page)) {
2819 ret = -EIO;
2820 }
2821 }
2822 if (!ret)
2823 eb->flags |= EXTENT_UPTODATE;
2824 return ret;
2825 }
2826 EXPORT_SYMBOL(read_extent_buffer_pages);
2827
2828 void read_extent_buffer(struct extent_buffer *eb, void *dstv,
2829 unsigned long start,
2830 unsigned long len)
2831 {
2832 size_t cur;
2833 size_t offset;
2834 struct page *page;
2835 char *kaddr;
2836 char *dst = (char *)dstv;
2837 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2838 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2839 unsigned long num_pages = num_extent_pages(eb->start, eb->len);
2840
2841 WARN_ON(start > eb->len);
2842 WARN_ON(start + len > eb->start + eb->len);
2843
2844 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
2845
2846 while(len > 0) {
2847 page = extent_buffer_page(eb, i);
2848 if (!PageUptodate(page)) {
2849 printk("page %lu not up to date i %lu, total %lu, len %lu\n", page->index, i, num_pages, eb->len);
2850 WARN_ON(1);
2851 }
2852 WARN_ON(!PageUptodate(page));
2853
2854 cur = min(len, (PAGE_CACHE_SIZE - offset));
2855 kaddr = kmap_atomic(page, KM_USER1);
2856 memcpy(dst, kaddr + offset, cur);
2857 kunmap_atomic(kaddr, KM_USER1);
2858
2859 dst += cur;
2860 len -= cur;
2861 offset = 0;
2862 i++;
2863 }
2864 }
2865 EXPORT_SYMBOL(read_extent_buffer);
2866
2867 int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
2868 unsigned long min_len, char **token, char **map,
2869 unsigned long *map_start,
2870 unsigned long *map_len, int km)
2871 {
2872 size_t offset = start & (PAGE_CACHE_SIZE - 1);
2873 char *kaddr;
2874 struct page *p;
2875 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2876 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2877 unsigned long end_i = (start_offset + start + min_len - 1) >>
2878 PAGE_CACHE_SHIFT;
2879
2880 if (i != end_i)
2881 return -EINVAL;
2882
2883 if (i == 0) {
2884 offset = start_offset;
2885 *map_start = 0;
2886 } else {
2887 offset = 0;
2888 *map_start = ((u64)i << PAGE_CACHE_SHIFT) - start_offset;
2889 }
2890 if (start + min_len > eb->len) {
2891 printk("bad mapping eb start %Lu len %lu, wanted %lu %lu\n", eb->start, eb->len, start, min_len);
2892 WARN_ON(1);
2893 }
2894
2895 p = extent_buffer_page(eb, i);
2896 WARN_ON(!PageUptodate(p));
2897 kaddr = kmap_atomic(p, km);
2898 *token = kaddr;
2899 *map = kaddr + offset;
2900 *map_len = PAGE_CACHE_SIZE - offset;
2901 return 0;
2902 }
2903 EXPORT_SYMBOL(map_private_extent_buffer);
2904
2905 int map_extent_buffer(struct extent_buffer *eb, unsigned long start,
2906 unsigned long min_len,
2907 char **token, char **map,
2908 unsigned long *map_start,
2909 unsigned long *map_len, int km)
2910 {
2911 int err;
2912 int save = 0;
2913 if (eb->map_token) {
2914 unmap_extent_buffer(eb, eb->map_token, km);
2915 eb->map_token = NULL;
2916 save = 1;
2917 }
2918 err = map_private_extent_buffer(eb, start, min_len, token, map,
2919 map_start, map_len, km);
2920 if (!err && save) {
2921 eb->map_token = *token;
2922 eb->kaddr = *map;
2923 eb->map_start = *map_start;
2924 eb->map_len = *map_len;
2925 }
2926 return err;
2927 }
2928 EXPORT_SYMBOL(map_extent_buffer);
2929
2930 void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km)
2931 {
2932 kunmap_atomic(token, km);
2933 }
2934 EXPORT_SYMBOL(unmap_extent_buffer);
2935
2936 int memcmp_extent_buffer(struct extent_buffer *eb, const void *ptrv,
2937 unsigned long start,
2938 unsigned long len)
2939 {
2940 size_t cur;
2941 size_t offset;
2942 struct page *page;
2943 char *kaddr;
2944 char *ptr = (char *)ptrv;
2945 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2946 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2947 int ret = 0;
2948
2949 WARN_ON(start > eb->len);
2950 WARN_ON(start + len > eb->start + eb->len);
2951
2952 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
2953
2954 while(len > 0) {
2955 page = extent_buffer_page(eb, i);
2956 WARN_ON(!PageUptodate(page));
2957
2958 cur = min(len, (PAGE_CACHE_SIZE - offset));
2959
2960 kaddr = kmap_atomic(page, KM_USER0);
2961 ret = memcmp(ptr, kaddr + offset, cur);
2962 kunmap_atomic(kaddr, KM_USER0);
2963 if (ret)
2964 break;
2965
2966 ptr += cur;
2967 len -= cur;
2968 offset = 0;
2969 i++;
2970 }
2971 return ret;
2972 }
2973 EXPORT_SYMBOL(memcmp_extent_buffer);
2974
2975 void write_extent_buffer(struct extent_buffer *eb, const void *srcv,
2976 unsigned long start, unsigned long len)
2977 {
2978 size_t cur;
2979 size_t offset;
2980 struct page *page;
2981 char *kaddr;
2982 char *src = (char *)srcv;
2983 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
2984 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
2985
2986 WARN_ON(start > eb->len);
2987 WARN_ON(start + len > eb->start + eb->len);
2988
2989 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
2990
2991 while(len > 0) {
2992 page = extent_buffer_page(eb, i);
2993 WARN_ON(!PageUptodate(page));
2994
2995 cur = min(len, PAGE_CACHE_SIZE - offset);
2996 kaddr = kmap_atomic(page, KM_USER1);
2997 memcpy(kaddr + offset, src, cur);
2998 kunmap_atomic(kaddr, KM_USER1);
2999
3000 src += cur;
3001 len -= cur;
3002 offset = 0;
3003 i++;
3004 }
3005 }
3006 EXPORT_SYMBOL(write_extent_buffer);
3007
3008 void memset_extent_buffer(struct extent_buffer *eb, char c,
3009 unsigned long start, unsigned long len)
3010 {
3011 size_t cur;
3012 size_t offset;
3013 struct page *page;
3014 char *kaddr;
3015 size_t start_offset = eb->start & ((u64)PAGE_CACHE_SIZE - 1);
3016 unsigned long i = (start_offset + start) >> PAGE_CACHE_SHIFT;
3017
3018 WARN_ON(start > eb->len);
3019 WARN_ON(start + len > eb->start + eb->len);
3020
3021 offset = (start_offset + start) & ((unsigned long)PAGE_CACHE_SIZE - 1);
3022
3023 while(len > 0) {
3024 page = extent_buffer_page(eb, i);
3025 WARN_ON(!PageUptodate(page));
3026
3027 cur = min(len, PAGE_CACHE_SIZE - offset);
3028 kaddr = kmap_atomic(page, KM_USER0);
3029 memset(kaddr + offset, c, cur);
3030 kunmap_atomic(kaddr, KM_USER0);
3031
3032 len -= cur;
3033 offset = 0;
3034 i++;
3035 }
3036 }
3037 EXPORT_SYMBOL(memset_extent_buffer);
3038
3039 void copy_extent_buffer(struct extent_buffer *dst, struct extent_buffer *src,
3040 unsigned long dst_offset, unsigned long src_offset,
3041 unsigned long len)
3042 {
3043 u64 dst_len = dst->len;
3044 size_t cur;
3045 size_t offset;
3046 struct page *page;
3047 char *kaddr;
3048 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3049 unsigned long i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3050
3051 WARN_ON(src->len != dst_len);
3052
3053 offset = (start_offset + dst_offset) &
3054 ((unsigned long)PAGE_CACHE_SIZE - 1);
3055
3056 while(len > 0) {
3057 page = extent_buffer_page(dst, i);
3058 WARN_ON(!PageUptodate(page));
3059
3060 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE - offset));
3061
3062 kaddr = kmap_atomic(page, KM_USER0);
3063 read_extent_buffer(src, kaddr + offset, src_offset, cur);
3064 kunmap_atomic(kaddr, KM_USER0);
3065
3066 src_offset += cur;
3067 len -= cur;
3068 offset = 0;
3069 i++;
3070 }
3071 }
3072 EXPORT_SYMBOL(copy_extent_buffer);
3073
3074 static void move_pages(struct page *dst_page, struct page *src_page,
3075 unsigned long dst_off, unsigned long src_off,
3076 unsigned long len)
3077 {
3078 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3079 if (dst_page == src_page) {
3080 memmove(dst_kaddr + dst_off, dst_kaddr + src_off, len);
3081 } else {
3082 char *src_kaddr = kmap_atomic(src_page, KM_USER1);
3083 char *p = dst_kaddr + dst_off + len;
3084 char *s = src_kaddr + src_off + len;
3085
3086 while (len--)
3087 *--p = *--s;
3088
3089 kunmap_atomic(src_kaddr, KM_USER1);
3090 }
3091 kunmap_atomic(dst_kaddr, KM_USER0);
3092 }
3093
3094 static void copy_pages(struct page *dst_page, struct page *src_page,
3095 unsigned long dst_off, unsigned long src_off,
3096 unsigned long len)
3097 {
3098 char *dst_kaddr = kmap_atomic(dst_page, KM_USER0);
3099 char *src_kaddr;
3100
3101 if (dst_page != src_page)
3102 src_kaddr = kmap_atomic(src_page, KM_USER1);
3103 else
3104 src_kaddr = dst_kaddr;
3105
3106 memcpy(dst_kaddr + dst_off, src_kaddr + src_off, len);
3107 kunmap_atomic(dst_kaddr, KM_USER0);
3108 if (dst_page != src_page)
3109 kunmap_atomic(src_kaddr, KM_USER1);
3110 }
3111
3112 void memcpy_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3113 unsigned long src_offset, unsigned long len)
3114 {
3115 size_t cur;
3116 size_t dst_off_in_page;
3117 size_t src_off_in_page;
3118 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3119 unsigned long dst_i;
3120 unsigned long src_i;
3121
3122 if (src_offset + len > dst->len) {
3123 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
3124 src_offset, len, dst->len);
3125 BUG_ON(1);
3126 }
3127 if (dst_offset + len > dst->len) {
3128 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
3129 dst_offset, len, dst->len);
3130 BUG_ON(1);
3131 }
3132
3133 while(len > 0) {
3134 dst_off_in_page = (start_offset + dst_offset) &
3135 ((unsigned long)PAGE_CACHE_SIZE - 1);
3136 src_off_in_page = (start_offset + src_offset) &
3137 ((unsigned long)PAGE_CACHE_SIZE - 1);
3138
3139 dst_i = (start_offset + dst_offset) >> PAGE_CACHE_SHIFT;
3140 src_i = (start_offset + src_offset) >> PAGE_CACHE_SHIFT;
3141
3142 cur = min(len, (unsigned long)(PAGE_CACHE_SIZE -
3143 src_off_in_page));
3144 cur = min_t(unsigned long, cur,
3145 (unsigned long)(PAGE_CACHE_SIZE - dst_off_in_page));
3146
3147 copy_pages(extent_buffer_page(dst, dst_i),
3148 extent_buffer_page(dst, src_i),
3149 dst_off_in_page, src_off_in_page, cur);
3150
3151 src_offset += cur;
3152 dst_offset += cur;
3153 len -= cur;
3154 }
3155 }
3156 EXPORT_SYMBOL(memcpy_extent_buffer);
3157
3158 void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset,
3159 unsigned long src_offset, unsigned long len)
3160 {
3161 size_t cur;
3162 size_t dst_off_in_page;
3163 size_t src_off_in_page;
3164 unsigned long dst_end = dst_offset + len - 1;
3165 unsigned long src_end = src_offset + len - 1;
3166 size_t start_offset = dst->start & ((u64)PAGE_CACHE_SIZE - 1);
3167 unsigned long dst_i;
3168 unsigned long src_i;
3169
3170 if (src_offset + len > dst->len) {
3171 printk("memmove bogus src_offset %lu move len %lu len %lu\n",
3172 src_offset, len, dst->len);
3173 BUG_ON(1);
3174 }
3175 if (dst_offset + len > dst->len) {
3176 printk("memmove bogus dst_offset %lu move len %lu len %lu\n",
3177 dst_offset, len, dst->len);
3178 BUG_ON(1);
3179 }
3180 if (dst_offset < src_offset) {
3181 memcpy_extent_buffer(dst, dst_offset, src_offset, len);
3182 return;
3183 }
3184 while(len > 0) {
3185 dst_i = (start_offset + dst_end) >> PAGE_CACHE_SHIFT;
3186 src_i = (start_offset + src_end) >> PAGE_CACHE_SHIFT;
3187
3188 dst_off_in_page = (start_offset + dst_end) &
3189 ((unsigned long)PAGE_CACHE_SIZE - 1);
3190 src_off_in_page = (start_offset + src_end) &
3191 ((unsigned long)PAGE_CACHE_SIZE - 1);
3192
3193 cur = min_t(unsigned long, len, src_off_in_page + 1);
3194 cur = min(cur, dst_off_in_page + 1);
3195 move_pages(extent_buffer_page(dst, dst_i),
3196 extent_buffer_page(dst, src_i),
3197 dst_off_in_page - cur + 1,
3198 src_off_in_page - cur + 1, cur);
3199
3200 dst_end -= cur;
3201 src_end -= cur;
3202 len -= cur;
3203 }
3204 }
3205 EXPORT_SYMBOL(memmove_extent_buffer);
This page took 0.141821 seconds and 5 git commands to generate.