reiserfs: cleanup, remove sb argument from journal_mark_dirty
[deliverable/linux.git] / fs / reiserfs / bitmap.c
1 /*
2 * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
3 */
4 /* Reiserfs block (de)allocator, bitmap-based. */
5
6 #include <linux/time.h>
7 #include "reiserfs.h"
8 #include <linux/errno.h>
9 #include <linux/buffer_head.h>
10 #include <linux/kernel.h>
11 #include <linux/pagemap.h>
12 #include <linux/vmalloc.h>
13 #include <linux/quotaops.h>
14 #include <linux/seq_file.h>
15
16 #define PREALLOCATION_SIZE 9
17
18 /* different reiserfs block allocator options */
19
20 #define SB_ALLOC_OPTS(s) (REISERFS_SB(s)->s_alloc_options.bits)
21
22 #define _ALLOC_concentrating_formatted_nodes 0
23 #define _ALLOC_displacing_large_files 1
24 #define _ALLOC_displacing_new_packing_localities 2
25 #define _ALLOC_old_hashed_relocation 3
26 #define _ALLOC_new_hashed_relocation 4
27 #define _ALLOC_skip_busy 5
28 #define _ALLOC_displace_based_on_dirid 6
29 #define _ALLOC_hashed_formatted_nodes 7
30 #define _ALLOC_old_way 8
31 #define _ALLOC_hundredth_slices 9
32 #define _ALLOC_dirid_groups 10
33 #define _ALLOC_oid_groups 11
34 #define _ALLOC_packing_groups 12
35
36 #define concentrating_formatted_nodes(s) test_bit(_ALLOC_concentrating_formatted_nodes, &SB_ALLOC_OPTS(s))
37 #define displacing_large_files(s) test_bit(_ALLOC_displacing_large_files, &SB_ALLOC_OPTS(s))
38 #define displacing_new_packing_localities(s) test_bit(_ALLOC_displacing_new_packing_localities, &SB_ALLOC_OPTS(s))
39
40 #define SET_OPTION(optname) \
41 do { \
42 reiserfs_info(s, "block allocator option \"%s\" is set", #optname); \
43 set_bit(_ALLOC_ ## optname , &SB_ALLOC_OPTS(s)); \
44 } while(0)
45 #define TEST_OPTION(optname, s) \
46 test_bit(_ALLOC_ ## optname , &SB_ALLOC_OPTS(s))
47
48 static inline void get_bit_address(struct super_block *s,
49 b_blocknr_t block,
50 unsigned int *bmap_nr,
51 unsigned int *offset)
52 {
53 /*
54 * It is in the bitmap block number equal to the block
55 * number divided by the number of bits in a block.
56 */
57 *bmap_nr = block >> (s->s_blocksize_bits + 3);
58 /* Within that bitmap block it is located at bit offset *offset. */
59 *offset = block & ((s->s_blocksize << 3) - 1);
60 }
61
62 int is_reusable(struct super_block *s, b_blocknr_t block, int bit_value)
63 {
64 unsigned int bmap, offset;
65 unsigned int bmap_count = reiserfs_bmap_count(s);
66
67 if (block == 0 || block >= SB_BLOCK_COUNT(s)) {
68 reiserfs_error(s, "vs-4010",
69 "block number is out of range %lu (%u)",
70 block, SB_BLOCK_COUNT(s));
71 return 0;
72 }
73
74 get_bit_address(s, block, &bmap, &offset);
75
76 /*
77 * Old format filesystem? Unlikely, but the bitmaps are all
78 * up front so we need to account for it.
79 */
80 if (unlikely(test_bit(REISERFS_OLD_FORMAT,
81 &(REISERFS_SB(s)->s_properties)))) {
82 b_blocknr_t bmap1 = REISERFS_SB(s)->s_sbh->b_blocknr + 1;
83 if (block >= bmap1 &&
84 block <= bmap1 + bmap_count) {
85 reiserfs_error(s, "vs-4019", "bitmap block %lu(%u) "
86 "can't be freed or reused",
87 block, bmap_count);
88 return 0;
89 }
90 } else {
91 if (offset == 0) {
92 reiserfs_error(s, "vs-4020", "bitmap block %lu(%u) "
93 "can't be freed or reused",
94 block, bmap_count);
95 return 0;
96 }
97 }
98
99 if (bmap >= bmap_count) {
100 reiserfs_error(s, "vs-4030", "bitmap for requested block "
101 "is out of range: block=%lu, bitmap_nr=%u",
102 block, bmap);
103 return 0;
104 }
105
106 if (bit_value == 0 && block == SB_ROOT_BLOCK(s)) {
107 reiserfs_error(s, "vs-4050", "this is root block (%u), "
108 "it must be busy", SB_ROOT_BLOCK(s));
109 return 0;
110 }
111
112 return 1;
113 }
114
115 /*
116 * Searches in journal structures for a given block number (bmap, off).
117 * If block is found in reiserfs journal it suggests next free block
118 * candidate to test.
119 */
120 static inline int is_block_in_journal(struct super_block *s, unsigned int bmap,
121 int off, int *next)
122 {
123 b_blocknr_t tmp;
124
125 if (reiserfs_in_journal(s, bmap, off, 1, &tmp)) {
126 if (tmp) { /* hint supplied */
127 *next = tmp;
128 PROC_INFO_INC(s, scan_bitmap.in_journal_hint);
129 } else {
130 (*next) = off + 1; /* inc offset to avoid looping. */
131 PROC_INFO_INC(s, scan_bitmap.in_journal_nohint);
132 }
133 PROC_INFO_INC(s, scan_bitmap.retry);
134 return 1;
135 }
136 return 0;
137 }
138
139 /*
140 * Searches for a window of zero bits with given minimum and maximum
141 * lengths in one bitmap block
142 */
143 static int scan_bitmap_block(struct reiserfs_transaction_handle *th,
144 unsigned int bmap_n, int *beg, int boundary,
145 int min, int max, int unfm)
146 {
147 struct super_block *s = th->t_super;
148 struct reiserfs_bitmap_info *bi = &SB_AP_BITMAP(s)[bmap_n];
149 struct buffer_head *bh;
150 int end, next;
151 int org = *beg;
152
153 BUG_ON(!th->t_trans_id);
154
155 RFALSE(bmap_n >= reiserfs_bmap_count(s), "Bitmap %u is out of "
156 "range (0..%u)", bmap_n, reiserfs_bmap_count(s) - 1);
157 PROC_INFO_INC(s, scan_bitmap.bmap);
158
159 if (!bi) {
160 reiserfs_error(s, "jdm-4055", "NULL bitmap info pointer "
161 "for bitmap %d", bmap_n);
162 return 0;
163 }
164
165 bh = reiserfs_read_bitmap_block(s, bmap_n);
166 if (bh == NULL)
167 return 0;
168
169 while (1) {
170 cont:
171 if (bi->free_count < min) {
172 brelse(bh);
173 return 0; /* No free blocks in this bitmap */
174 }
175
176 /* search for a first zero bit -- beginning of a window */
177 *beg = reiserfs_find_next_zero_le_bit
178 ((unsigned long *)(bh->b_data), boundary, *beg);
179
180 /*
181 * search for a zero bit fails or the rest of bitmap block
182 * cannot contain a zero window of minimum size
183 */
184 if (*beg + min > boundary) {
185 brelse(bh);
186 return 0;
187 }
188
189 if (unfm && is_block_in_journal(s, bmap_n, *beg, beg))
190 continue;
191 /* first zero bit found; we check next bits */
192 for (end = *beg + 1;; end++) {
193 if (end >= *beg + max || end >= boundary
194 || reiserfs_test_le_bit(end, bh->b_data)) {
195 next = end;
196 break;
197 }
198
199 /*
200 * finding the other end of zero bit window requires
201 * looking into journal structures (in case of
202 * searching for free blocks for unformatted nodes)
203 */
204 if (unfm && is_block_in_journal(s, bmap_n, end, &next))
205 break;
206 }
207
208 /*
209 * now (*beg) points to beginning of zero bits window,
210 * (end) points to one bit after the window end
211 */
212
213 /* found window of proper size */
214 if (end - *beg >= min) {
215 int i;
216 reiserfs_prepare_for_journal(s, bh, 1);
217 /*
218 * try to set all blocks used checking are
219 * they still free
220 */
221 for (i = *beg; i < end; i++) {
222 /* Don't check in journal again. */
223 if (reiserfs_test_and_set_le_bit
224 (i, bh->b_data)) {
225 /*
226 * bit was set by another process while
227 * we slept in prepare_for_journal()
228 */
229 PROC_INFO_INC(s, scan_bitmap.stolen);
230
231 /*
232 * we can continue with smaller set
233 * of allocated blocks, if length of
234 * this set is more or equal to `min'
235 */
236 if (i >= *beg + min) {
237 end = i;
238 break;
239 }
240
241 /*
242 * otherwise we clear all bit
243 * were set ...
244 */
245 while (--i >= *beg)
246 reiserfs_clear_le_bit
247 (i, bh->b_data);
248 reiserfs_restore_prepared_buffer(s, bh);
249 *beg = org;
250
251 /*
252 * Search again in current block
253 * from beginning
254 */
255 goto cont;
256 }
257 }
258 bi->free_count -= (end - *beg);
259 journal_mark_dirty(th, bh);
260 brelse(bh);
261
262 /* free block count calculation */
263 reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s),
264 1);
265 PUT_SB_FREE_BLOCKS(s, SB_FREE_BLOCKS(s) - (end - *beg));
266 journal_mark_dirty(th, SB_BUFFER_WITH_SB(s));
267
268 return end - (*beg);
269 } else {
270 *beg = next;
271 }
272 }
273 }
274
275 static int bmap_hash_id(struct super_block *s, u32 id)
276 {
277 char *hash_in = NULL;
278 unsigned long hash;
279 unsigned bm;
280
281 if (id <= 2) {
282 bm = 1;
283 } else {
284 hash_in = (char *)(&id);
285 hash = keyed_hash(hash_in, 4);
286 bm = hash % reiserfs_bmap_count(s);
287 if (!bm)
288 bm = 1;
289 }
290 /* this can only be true when SB_BMAP_NR = 1 */
291 if (bm >= reiserfs_bmap_count(s))
292 bm = 0;
293 return bm;
294 }
295
296 /*
297 * hashes the id and then returns > 0 if the block group for the
298 * corresponding hash is full
299 */
300 static inline int block_group_used(struct super_block *s, u32 id)
301 {
302 int bm = bmap_hash_id(s, id);
303 struct reiserfs_bitmap_info *info = &SB_AP_BITMAP(s)[bm];
304
305 /*
306 * If we don't have cached information on this bitmap block, we're
307 * going to have to load it later anyway. Loading it here allows us
308 * to make a better decision. This favors long-term performance gain
309 * with a better on-disk layout vs. a short term gain of skipping the
310 * read and potentially having a bad placement.
311 */
312 if (info->free_count == UINT_MAX) {
313 struct buffer_head *bh = reiserfs_read_bitmap_block(s, bm);
314 brelse(bh);
315 }
316
317 if (info->free_count > ((s->s_blocksize << 3) * 60 / 100)) {
318 return 0;
319 }
320 return 1;
321 }
322
323 /*
324 * the packing is returned in disk byte order
325 */
326 __le32 reiserfs_choose_packing(struct inode * dir)
327 {
328 __le32 packing;
329 if (TEST_OPTION(packing_groups, dir->i_sb)) {
330 u32 parent_dir = le32_to_cpu(INODE_PKEY(dir)->k_dir_id);
331 /*
332 * some versions of reiserfsck expect packing locality 1 to be
333 * special
334 */
335 if (parent_dir == 1 || block_group_used(dir->i_sb, parent_dir))
336 packing = INODE_PKEY(dir)->k_objectid;
337 else
338 packing = INODE_PKEY(dir)->k_dir_id;
339 } else
340 packing = INODE_PKEY(dir)->k_objectid;
341 return packing;
342 }
343
344 /*
345 * Tries to find contiguous zero bit window (given size) in given region of
346 * bitmap and place new blocks there. Returns number of allocated blocks.
347 */
348 static int scan_bitmap(struct reiserfs_transaction_handle *th,
349 b_blocknr_t * start, b_blocknr_t finish,
350 int min, int max, int unfm, sector_t file_block)
351 {
352 int nr_allocated = 0;
353 struct super_block *s = th->t_super;
354 unsigned int bm, off;
355 unsigned int end_bm, end_off;
356 unsigned int off_max = s->s_blocksize << 3;
357
358 BUG_ON(!th->t_trans_id);
359
360 PROC_INFO_INC(s, scan_bitmap.call);
361
362 /* No point in looking for more free blocks */
363 if (SB_FREE_BLOCKS(s) <= 0)
364 return 0;
365
366 get_bit_address(s, *start, &bm, &off);
367 get_bit_address(s, finish, &end_bm, &end_off);
368 if (bm > reiserfs_bmap_count(s))
369 return 0;
370 if (end_bm > reiserfs_bmap_count(s))
371 end_bm = reiserfs_bmap_count(s);
372
373 /*
374 * When the bitmap is more than 10% free, anyone can allocate.
375 * When it's less than 10% free, only files that already use the
376 * bitmap are allowed. Once we pass 80% full, this restriction
377 * is lifted.
378 *
379 * We do this so that files that grow later still have space close to
380 * their original allocation. This improves locality, and presumably
381 * performance as a result.
382 *
383 * This is only an allocation policy and does not make up for getting a
384 * bad hint. Decent hinting must be implemented for this to work well.
385 */
386 if (TEST_OPTION(skip_busy, s)
387 && SB_FREE_BLOCKS(s) > SB_BLOCK_COUNT(s) / 20) {
388 for (; bm < end_bm; bm++, off = 0) {
389 if ((off && (!unfm || (file_block != 0)))
390 || SB_AP_BITMAP(s)[bm].free_count >
391 (s->s_blocksize << 3) / 10)
392 nr_allocated =
393 scan_bitmap_block(th, bm, &off, off_max,
394 min, max, unfm);
395 if (nr_allocated)
396 goto ret;
397 }
398 /* we know from above that start is a reasonable number */
399 get_bit_address(s, *start, &bm, &off);
400 }
401
402 for (; bm < end_bm; bm++, off = 0) {
403 nr_allocated =
404 scan_bitmap_block(th, bm, &off, off_max, min, max, unfm);
405 if (nr_allocated)
406 goto ret;
407 }
408
409 nr_allocated =
410 scan_bitmap_block(th, bm, &off, end_off + 1, min, max, unfm);
411
412 ret:
413 *start = bm * off_max + off;
414 return nr_allocated;
415
416 }
417
418 static void _reiserfs_free_block(struct reiserfs_transaction_handle *th,
419 struct inode *inode, b_blocknr_t block,
420 int for_unformatted)
421 {
422 struct super_block *s = th->t_super;
423 struct reiserfs_super_block *rs;
424 struct buffer_head *sbh, *bmbh;
425 struct reiserfs_bitmap_info *apbi;
426 unsigned int nr, offset;
427
428 BUG_ON(!th->t_trans_id);
429
430 PROC_INFO_INC(s, free_block);
431
432 rs = SB_DISK_SUPER_BLOCK(s);
433 sbh = SB_BUFFER_WITH_SB(s);
434 apbi = SB_AP_BITMAP(s);
435
436 get_bit_address(s, block, &nr, &offset);
437
438 if (nr >= reiserfs_bmap_count(s)) {
439 reiserfs_error(s, "vs-4075", "block %lu is out of range",
440 block);
441 return;
442 }
443
444 bmbh = reiserfs_read_bitmap_block(s, nr);
445 if (!bmbh)
446 return;
447
448 reiserfs_prepare_for_journal(s, bmbh, 1);
449
450 /* clear bit for the given block in bit map */
451 if (!reiserfs_test_and_clear_le_bit(offset, bmbh->b_data)) {
452 reiserfs_error(s, "vs-4080",
453 "block %lu: bit already cleared", block);
454 }
455 apbi[nr].free_count++;
456 journal_mark_dirty(th, bmbh);
457 brelse(bmbh);
458
459 reiserfs_prepare_for_journal(s, sbh, 1);
460 /* update super block */
461 set_sb_free_blocks(rs, sb_free_blocks(rs) + 1);
462
463 journal_mark_dirty(th, sbh);
464 if (for_unformatted) {
465 int depth = reiserfs_write_unlock_nested(s);
466 dquot_free_block_nodirty(inode, 1);
467 reiserfs_write_lock_nested(s, depth);
468 }
469 }
470
471 void reiserfs_free_block(struct reiserfs_transaction_handle *th,
472 struct inode *inode, b_blocknr_t block,
473 int for_unformatted)
474 {
475 struct super_block *s = th->t_super;
476 BUG_ON(!th->t_trans_id);
477
478 RFALSE(!s, "vs-4061: trying to free block on nonexistent device");
479 if (!is_reusable(s, block, 1))
480 return;
481
482 if (block > sb_block_count(REISERFS_SB(s)->s_rs)) {
483 reiserfs_error(th->t_super, "bitmap-4072",
484 "Trying to free block outside file system "
485 "boundaries (%lu > %lu)",
486 block, sb_block_count(REISERFS_SB(s)->s_rs));
487 return;
488 }
489 /* mark it before we clear it, just in case */
490 journal_mark_freed(th, s, block);
491 _reiserfs_free_block(th, inode, block, for_unformatted);
492 }
493
494 /* preallocated blocks don't need to be run through journal_mark_freed */
495 static void reiserfs_free_prealloc_block(struct reiserfs_transaction_handle *th,
496 struct inode *inode, b_blocknr_t block)
497 {
498 BUG_ON(!th->t_trans_id);
499 RFALSE(!th->t_super,
500 "vs-4060: trying to free block on nonexistent device");
501 if (!is_reusable(th->t_super, block, 1))
502 return;
503 _reiserfs_free_block(th, inode, block, 1);
504 }
505
506 static void __discard_prealloc(struct reiserfs_transaction_handle *th,
507 struct reiserfs_inode_info *ei)
508 {
509 unsigned long save = ei->i_prealloc_block;
510 int dirty = 0;
511 struct inode *inode = &ei->vfs_inode;
512 BUG_ON(!th->t_trans_id);
513 #ifdef CONFIG_REISERFS_CHECK
514 if (ei->i_prealloc_count < 0)
515 reiserfs_error(th->t_super, "zam-4001",
516 "inode has negative prealloc blocks count.");
517 #endif
518 while (ei->i_prealloc_count > 0) {
519 reiserfs_free_prealloc_block(th, inode, ei->i_prealloc_block);
520 ei->i_prealloc_block++;
521 ei->i_prealloc_count--;
522 dirty = 1;
523 }
524 if (dirty)
525 reiserfs_update_sd(th, inode);
526 ei->i_prealloc_block = save;
527 list_del_init(&(ei->i_prealloc_list));
528 }
529
530 /* FIXME: It should be inline function */
531 void reiserfs_discard_prealloc(struct reiserfs_transaction_handle *th,
532 struct inode *inode)
533 {
534 struct reiserfs_inode_info *ei = REISERFS_I(inode);
535 BUG_ON(!th->t_trans_id);
536 if (ei->i_prealloc_count)
537 __discard_prealloc(th, ei);
538 }
539
540 void reiserfs_discard_all_prealloc(struct reiserfs_transaction_handle *th)
541 {
542 struct list_head *plist = &SB_JOURNAL(th->t_super)->j_prealloc_list;
543
544 BUG_ON(!th->t_trans_id);
545
546 while (!list_empty(plist)) {
547 struct reiserfs_inode_info *ei;
548 ei = list_entry(plist->next, struct reiserfs_inode_info,
549 i_prealloc_list);
550 #ifdef CONFIG_REISERFS_CHECK
551 if (!ei->i_prealloc_count) {
552 reiserfs_error(th->t_super, "zam-4001",
553 "inode is in prealloc list but has "
554 "no preallocated blocks.");
555 }
556 #endif
557 __discard_prealloc(th, ei);
558 }
559 }
560
561 void reiserfs_init_alloc_options(struct super_block *s)
562 {
563 set_bit(_ALLOC_skip_busy, &SB_ALLOC_OPTS(s));
564 set_bit(_ALLOC_dirid_groups, &SB_ALLOC_OPTS(s));
565 set_bit(_ALLOC_packing_groups, &SB_ALLOC_OPTS(s));
566 }
567
568 /* block allocator related options are parsed here */
569 int reiserfs_parse_alloc_options(struct super_block *s, char *options)
570 {
571 char *this_char, *value;
572
573 /* clear default settings */
574 REISERFS_SB(s)->s_alloc_options.bits = 0;
575
576 while ((this_char = strsep(&options, ":")) != NULL) {
577 if ((value = strchr(this_char, '=')) != NULL)
578 *value++ = 0;
579
580 if (!strcmp(this_char, "concentrating_formatted_nodes")) {
581 int temp;
582 SET_OPTION(concentrating_formatted_nodes);
583 temp = (value
584 && *value) ? simple_strtoul(value, &value,
585 0) : 10;
586 if (temp <= 0 || temp > 100) {
587 REISERFS_SB(s)->s_alloc_options.border = 10;
588 } else {
589 REISERFS_SB(s)->s_alloc_options.border =
590 100 / temp;
591 }
592 continue;
593 }
594 if (!strcmp(this_char, "displacing_large_files")) {
595 SET_OPTION(displacing_large_files);
596 REISERFS_SB(s)->s_alloc_options.large_file_size =
597 (value
598 && *value) ? simple_strtoul(value, &value, 0) : 16;
599 continue;
600 }
601 if (!strcmp(this_char, "displacing_new_packing_localities")) {
602 SET_OPTION(displacing_new_packing_localities);
603 continue;
604 };
605
606 if (!strcmp(this_char, "old_hashed_relocation")) {
607 SET_OPTION(old_hashed_relocation);
608 continue;
609 }
610
611 if (!strcmp(this_char, "new_hashed_relocation")) {
612 SET_OPTION(new_hashed_relocation);
613 continue;
614 }
615
616 if (!strcmp(this_char, "dirid_groups")) {
617 SET_OPTION(dirid_groups);
618 continue;
619 }
620 if (!strcmp(this_char, "oid_groups")) {
621 SET_OPTION(oid_groups);
622 continue;
623 }
624 if (!strcmp(this_char, "packing_groups")) {
625 SET_OPTION(packing_groups);
626 continue;
627 }
628 if (!strcmp(this_char, "hashed_formatted_nodes")) {
629 SET_OPTION(hashed_formatted_nodes);
630 continue;
631 }
632
633 if (!strcmp(this_char, "skip_busy")) {
634 SET_OPTION(skip_busy);
635 continue;
636 }
637
638 if (!strcmp(this_char, "hundredth_slices")) {
639 SET_OPTION(hundredth_slices);
640 continue;
641 }
642
643 if (!strcmp(this_char, "old_way")) {
644 SET_OPTION(old_way);
645 continue;
646 }
647
648 if (!strcmp(this_char, "displace_based_on_dirid")) {
649 SET_OPTION(displace_based_on_dirid);
650 continue;
651 }
652
653 if (!strcmp(this_char, "preallocmin")) {
654 REISERFS_SB(s)->s_alloc_options.preallocmin =
655 (value
656 && *value) ? simple_strtoul(value, &value, 0) : 4;
657 continue;
658 }
659
660 if (!strcmp(this_char, "preallocsize")) {
661 REISERFS_SB(s)->s_alloc_options.preallocsize =
662 (value
663 && *value) ? simple_strtoul(value, &value,
664 0) :
665 PREALLOCATION_SIZE;
666 continue;
667 }
668
669 reiserfs_warning(s, "zam-4001", "unknown option - %s",
670 this_char);
671 return 1;
672 }
673
674 reiserfs_info(s, "allocator options = [%08x]\n", SB_ALLOC_OPTS(s));
675 return 0;
676 }
677
678 static void print_sep(struct seq_file *seq, int *first)
679 {
680 if (!*first)
681 seq_puts(seq, ":");
682 else
683 *first = 0;
684 }
685
686 void show_alloc_options(struct seq_file *seq, struct super_block *s)
687 {
688 int first = 1;
689
690 if (SB_ALLOC_OPTS(s) == ((1 << _ALLOC_skip_busy) |
691 (1 << _ALLOC_dirid_groups) | (1 << _ALLOC_packing_groups)))
692 return;
693
694 seq_puts(seq, ",alloc=");
695
696 if (TEST_OPTION(concentrating_formatted_nodes, s)) {
697 print_sep(seq, &first);
698 if (REISERFS_SB(s)->s_alloc_options.border != 10) {
699 seq_printf(seq, "concentrating_formatted_nodes=%d",
700 100 / REISERFS_SB(s)->s_alloc_options.border);
701 } else
702 seq_puts(seq, "concentrating_formatted_nodes");
703 }
704 if (TEST_OPTION(displacing_large_files, s)) {
705 print_sep(seq, &first);
706 if (REISERFS_SB(s)->s_alloc_options.large_file_size != 16) {
707 seq_printf(seq, "displacing_large_files=%lu",
708 REISERFS_SB(s)->s_alloc_options.large_file_size);
709 } else
710 seq_puts(seq, "displacing_large_files");
711 }
712 if (TEST_OPTION(displacing_new_packing_localities, s)) {
713 print_sep(seq, &first);
714 seq_puts(seq, "displacing_new_packing_localities");
715 }
716 if (TEST_OPTION(old_hashed_relocation, s)) {
717 print_sep(seq, &first);
718 seq_puts(seq, "old_hashed_relocation");
719 }
720 if (TEST_OPTION(new_hashed_relocation, s)) {
721 print_sep(seq, &first);
722 seq_puts(seq, "new_hashed_relocation");
723 }
724 if (TEST_OPTION(dirid_groups, s)) {
725 print_sep(seq, &first);
726 seq_puts(seq, "dirid_groups");
727 }
728 if (TEST_OPTION(oid_groups, s)) {
729 print_sep(seq, &first);
730 seq_puts(seq, "oid_groups");
731 }
732 if (TEST_OPTION(packing_groups, s)) {
733 print_sep(seq, &first);
734 seq_puts(seq, "packing_groups");
735 }
736 if (TEST_OPTION(hashed_formatted_nodes, s)) {
737 print_sep(seq, &first);
738 seq_puts(seq, "hashed_formatted_nodes");
739 }
740 if (TEST_OPTION(skip_busy, s)) {
741 print_sep(seq, &first);
742 seq_puts(seq, "skip_busy");
743 }
744 if (TEST_OPTION(hundredth_slices, s)) {
745 print_sep(seq, &first);
746 seq_puts(seq, "hundredth_slices");
747 }
748 if (TEST_OPTION(old_way, s)) {
749 print_sep(seq, &first);
750 seq_puts(seq, "old_way");
751 }
752 if (TEST_OPTION(displace_based_on_dirid, s)) {
753 print_sep(seq, &first);
754 seq_puts(seq, "displace_based_on_dirid");
755 }
756 if (REISERFS_SB(s)->s_alloc_options.preallocmin != 0) {
757 print_sep(seq, &first);
758 seq_printf(seq, "preallocmin=%d",
759 REISERFS_SB(s)->s_alloc_options.preallocmin);
760 }
761 if (REISERFS_SB(s)->s_alloc_options.preallocsize != 17) {
762 print_sep(seq, &first);
763 seq_printf(seq, "preallocsize=%d",
764 REISERFS_SB(s)->s_alloc_options.preallocsize);
765 }
766 }
767
768 static inline void new_hashed_relocation(reiserfs_blocknr_hint_t * hint)
769 {
770 char *hash_in;
771 if (hint->formatted_node) {
772 hash_in = (char *)&hint->key.k_dir_id;
773 } else {
774 if (!hint->inode) {
775 /*hint->search_start = hint->beg;*/
776 hash_in = (char *)&hint->key.k_dir_id;
777 } else
778 if (TEST_OPTION(displace_based_on_dirid, hint->th->t_super))
779 hash_in = (char *)(&INODE_PKEY(hint->inode)->k_dir_id);
780 else
781 hash_in =
782 (char *)(&INODE_PKEY(hint->inode)->k_objectid);
783 }
784
785 hint->search_start =
786 hint->beg + keyed_hash(hash_in, 4) % (hint->end - hint->beg);
787 }
788
789 /*
790 * Relocation based on dirid, hashing them into a given bitmap block
791 * files. Formatted nodes are unaffected, a separate policy covers them
792 */
793 static void dirid_groups(reiserfs_blocknr_hint_t * hint)
794 {
795 unsigned long hash;
796 __u32 dirid = 0;
797 int bm = 0;
798 struct super_block *sb = hint->th->t_super;
799 if (hint->inode)
800 dirid = le32_to_cpu(INODE_PKEY(hint->inode)->k_dir_id);
801 else if (hint->formatted_node)
802 dirid = hint->key.k_dir_id;
803
804 if (dirid) {
805 bm = bmap_hash_id(sb, dirid);
806 hash = bm * (sb->s_blocksize << 3);
807 /* give a portion of the block group to metadata */
808 if (hint->inode)
809 hash += sb->s_blocksize / 2;
810 hint->search_start = hash;
811 }
812 }
813
814 /*
815 * Relocation based on oid, hashing them into a given bitmap block
816 * files. Formatted nodes are unaffected, a separate policy covers them
817 */
818 static void oid_groups(reiserfs_blocknr_hint_t * hint)
819 {
820 if (hint->inode) {
821 unsigned long hash;
822 __u32 oid;
823 __u32 dirid;
824 int bm;
825
826 dirid = le32_to_cpu(INODE_PKEY(hint->inode)->k_dir_id);
827
828 /*
829 * keep the root dir and it's first set of subdirs close to
830 * the start of the disk
831 */
832 if (dirid <= 2)
833 hash = (hint->inode->i_sb->s_blocksize << 3);
834 else {
835 oid = le32_to_cpu(INODE_PKEY(hint->inode)->k_objectid);
836 bm = bmap_hash_id(hint->inode->i_sb, oid);
837 hash = bm * (hint->inode->i_sb->s_blocksize << 3);
838 }
839 hint->search_start = hash;
840 }
841 }
842
843 /*
844 * returns 1 if it finds an indirect item and gets valid hint info
845 * from it, otherwise 0
846 */
847 static int get_left_neighbor(reiserfs_blocknr_hint_t * hint)
848 {
849 struct treepath *path;
850 struct buffer_head *bh;
851 struct item_head *ih;
852 int pos_in_item;
853 __le32 *item;
854 int ret = 0;
855
856 /*
857 * reiserfs code can call this function w/o pointer to path
858 * structure supplied; then we rely on supplied search_start
859 */
860 if (!hint->path)
861 return 0;
862
863 path = hint->path;
864 bh = get_last_bh(path);
865 RFALSE(!bh, "green-4002: Illegal path specified to get_left_neighbor");
866 ih = tp_item_head(path);
867 pos_in_item = path->pos_in_item;
868 item = tp_item_body(path);
869
870 hint->search_start = bh->b_blocknr;
871
872 /*
873 * for indirect item: go to left and look for the first non-hole entry
874 * in the indirect item
875 */
876 if (!hint->formatted_node && is_indirect_le_ih(ih)) {
877 if (pos_in_item == I_UNFM_NUM(ih))
878 pos_in_item--;
879 while (pos_in_item >= 0) {
880 int t = get_block_num(item, pos_in_item);
881 if (t) {
882 hint->search_start = t;
883 ret = 1;
884 break;
885 }
886 pos_in_item--;
887 }
888 }
889
890 /* does result value fit into specified region? */
891 return ret;
892 }
893
894 /*
895 * should be, if formatted node, then try to put on first part of the device
896 * specified as number of percent with mount option device, else try to put
897 * on last of device. This is not to say it is good code to do so,
898 * but the effect should be measured.
899 */
900 static inline void set_border_in_hint(struct super_block *s,
901 reiserfs_blocknr_hint_t * hint)
902 {
903 b_blocknr_t border =
904 SB_BLOCK_COUNT(s) / REISERFS_SB(s)->s_alloc_options.border;
905
906 if (hint->formatted_node)
907 hint->end = border - 1;
908 else
909 hint->beg = border;
910 }
911
912 static inline void displace_large_file(reiserfs_blocknr_hint_t * hint)
913 {
914 if (TEST_OPTION(displace_based_on_dirid, hint->th->t_super))
915 hint->search_start =
916 hint->beg +
917 keyed_hash((char *)(&INODE_PKEY(hint->inode)->k_dir_id),
918 4) % (hint->end - hint->beg);
919 else
920 hint->search_start =
921 hint->beg +
922 keyed_hash((char *)(&INODE_PKEY(hint->inode)->k_objectid),
923 4) % (hint->end - hint->beg);
924 }
925
926 static inline void hash_formatted_node(reiserfs_blocknr_hint_t * hint)
927 {
928 char *hash_in;
929
930 if (!hint->inode)
931 hash_in = (char *)&hint->key.k_dir_id;
932 else if (TEST_OPTION(displace_based_on_dirid, hint->th->t_super))
933 hash_in = (char *)(&INODE_PKEY(hint->inode)->k_dir_id);
934 else
935 hash_in = (char *)(&INODE_PKEY(hint->inode)->k_objectid);
936
937 hint->search_start =
938 hint->beg + keyed_hash(hash_in, 4) % (hint->end - hint->beg);
939 }
940
941 static inline int
942 this_blocknr_allocation_would_make_it_a_large_file(reiserfs_blocknr_hint_t *
943 hint)
944 {
945 return hint->block ==
946 REISERFS_SB(hint->th->t_super)->s_alloc_options.large_file_size;
947 }
948
949 #ifdef DISPLACE_NEW_PACKING_LOCALITIES
950 static inline void displace_new_packing_locality(reiserfs_blocknr_hint_t * hint)
951 {
952 struct in_core_key *key = &hint->key;
953
954 hint->th->displace_new_blocks = 0;
955 hint->search_start =
956 hint->beg + keyed_hash((char *)(&key->k_objectid),
957 4) % (hint->end - hint->beg);
958 }
959 #endif
960
961 static inline int old_hashed_relocation(reiserfs_blocknr_hint_t * hint)
962 {
963 b_blocknr_t border;
964 u32 hash_in;
965
966 if (hint->formatted_node || hint->inode == NULL) {
967 return 0;
968 }
969
970 hash_in = le32_to_cpu((INODE_PKEY(hint->inode))->k_dir_id);
971 border =
972 hint->beg + (u32) keyed_hash(((char *)(&hash_in)),
973 4) % (hint->end - hint->beg - 1);
974 if (border > hint->search_start)
975 hint->search_start = border;
976
977 return 1;
978 }
979
980 static inline int old_way(reiserfs_blocknr_hint_t * hint)
981 {
982 b_blocknr_t border;
983
984 if (hint->formatted_node || hint->inode == NULL) {
985 return 0;
986 }
987
988 border =
989 hint->beg +
990 le32_to_cpu(INODE_PKEY(hint->inode)->k_dir_id) % (hint->end -
991 hint->beg);
992 if (border > hint->search_start)
993 hint->search_start = border;
994
995 return 1;
996 }
997
998 static inline void hundredth_slices(reiserfs_blocknr_hint_t * hint)
999 {
1000 struct in_core_key *key = &hint->key;
1001 b_blocknr_t slice_start;
1002
1003 slice_start =
1004 (keyed_hash((char *)(&key->k_dir_id), 4) % 100) * (hint->end / 100);
1005 if (slice_start > hint->search_start
1006 || slice_start + (hint->end / 100) <= hint->search_start) {
1007 hint->search_start = slice_start;
1008 }
1009 }
1010
1011 static void determine_search_start(reiserfs_blocknr_hint_t * hint,
1012 int amount_needed)
1013 {
1014 struct super_block *s = hint->th->t_super;
1015 int unfm_hint;
1016
1017 hint->beg = 0;
1018 hint->end = SB_BLOCK_COUNT(s) - 1;
1019
1020 /* This is former border algorithm. Now with tunable border offset */
1021 if (concentrating_formatted_nodes(s))
1022 set_border_in_hint(s, hint);
1023
1024 #ifdef DISPLACE_NEW_PACKING_LOCALITIES
1025 /*
1026 * whenever we create a new directory, we displace it. At first
1027 * we will hash for location, later we might look for a moderately
1028 * empty place for it
1029 */
1030 if (displacing_new_packing_localities(s)
1031 && hint->th->displace_new_blocks) {
1032 displace_new_packing_locality(hint);
1033
1034 /*
1035 * we do not continue determine_search_start,
1036 * if new packing locality is being displaced
1037 */
1038 return;
1039 }
1040 #endif
1041
1042 /*
1043 * all persons should feel encouraged to add more special cases
1044 * here and test them
1045 */
1046
1047 if (displacing_large_files(s) && !hint->formatted_node
1048 && this_blocknr_allocation_would_make_it_a_large_file(hint)) {
1049 displace_large_file(hint);
1050 return;
1051 }
1052
1053 /*
1054 * if none of our special cases is relevant, use the left
1055 * neighbor in the tree order of the new node we are allocating for
1056 */
1057 if (hint->formatted_node && TEST_OPTION(hashed_formatted_nodes, s)) {
1058 hash_formatted_node(hint);
1059 return;
1060 }
1061
1062 unfm_hint = get_left_neighbor(hint);
1063
1064 /*
1065 * Mimic old block allocator behaviour, that is if VFS allowed for
1066 * preallocation, new blocks are displaced based on directory ID.
1067 * Also, if suggested search_start is less than last preallocated
1068 * block, we start searching from it, assuming that HDD dataflow
1069 * is faster in forward direction
1070 */
1071 if (TEST_OPTION(old_way, s)) {
1072 if (!hint->formatted_node) {
1073 if (!reiserfs_hashed_relocation(s))
1074 old_way(hint);
1075 else if (!reiserfs_no_unhashed_relocation(s))
1076 old_hashed_relocation(hint);
1077
1078 if (hint->inode
1079 && hint->search_start <
1080 REISERFS_I(hint->inode)->i_prealloc_block)
1081 hint->search_start =
1082 REISERFS_I(hint->inode)->i_prealloc_block;
1083 }
1084 return;
1085 }
1086
1087 /* This is an approach proposed by Hans */
1088 if (TEST_OPTION(hundredth_slices, s)
1089 && !(displacing_large_files(s) && !hint->formatted_node)) {
1090 hundredth_slices(hint);
1091 return;
1092 }
1093
1094 /* old_hashed_relocation only works on unformatted */
1095 if (!unfm_hint && !hint->formatted_node &&
1096 TEST_OPTION(old_hashed_relocation, s)) {
1097 old_hashed_relocation(hint);
1098 }
1099
1100 /* new_hashed_relocation works with both formatted/unformatted nodes */
1101 if ((!unfm_hint || hint->formatted_node) &&
1102 TEST_OPTION(new_hashed_relocation, s)) {
1103 new_hashed_relocation(hint);
1104 }
1105
1106 /* dirid grouping works only on unformatted nodes */
1107 if (!unfm_hint && !hint->formatted_node && TEST_OPTION(dirid_groups, s)) {
1108 dirid_groups(hint);
1109 }
1110 #ifdef DISPLACE_NEW_PACKING_LOCALITIES
1111 if (hint->formatted_node && TEST_OPTION(dirid_groups, s)) {
1112 dirid_groups(hint);
1113 }
1114 #endif
1115
1116 /* oid grouping works only on unformatted nodes */
1117 if (!unfm_hint && !hint->formatted_node && TEST_OPTION(oid_groups, s)) {
1118 oid_groups(hint);
1119 }
1120 return;
1121 }
1122
1123 static int determine_prealloc_size(reiserfs_blocknr_hint_t * hint)
1124 {
1125 /* make minimum size a mount option and benchmark both ways */
1126 /* we preallocate blocks only for regular files, specific size */
1127 /* benchmark preallocating always and see what happens */
1128
1129 hint->prealloc_size = 0;
1130
1131 if (!hint->formatted_node && hint->preallocate) {
1132 if (S_ISREG(hint->inode->i_mode)
1133 && hint->inode->i_size >=
1134 REISERFS_SB(hint->th->t_super)->s_alloc_options.
1135 preallocmin * hint->inode->i_sb->s_blocksize)
1136 hint->prealloc_size =
1137 REISERFS_SB(hint->th->t_super)->s_alloc_options.
1138 preallocsize - 1;
1139 }
1140 return CARRY_ON;
1141 }
1142
1143 static inline int allocate_without_wrapping_disk(reiserfs_blocknr_hint_t * hint,
1144 b_blocknr_t * new_blocknrs,
1145 b_blocknr_t start,
1146 b_blocknr_t finish, int min,
1147 int amount_needed,
1148 int prealloc_size)
1149 {
1150 int rest = amount_needed;
1151 int nr_allocated;
1152
1153 while (rest > 0 && start <= finish) {
1154 nr_allocated = scan_bitmap(hint->th, &start, finish, min,
1155 rest + prealloc_size,
1156 !hint->formatted_node, hint->block);
1157
1158 if (nr_allocated == 0) /* no new blocks allocated, return */
1159 break;
1160
1161 /* fill free_blocknrs array first */
1162 while (rest > 0 && nr_allocated > 0) {
1163 *new_blocknrs++ = start++;
1164 rest--;
1165 nr_allocated--;
1166 }
1167
1168 /* do we have something to fill prealloc. array also ? */
1169 if (nr_allocated > 0) {
1170 /*
1171 * it means prealloc_size was greater that 0 and
1172 * we do preallocation
1173 */
1174 list_add(&REISERFS_I(hint->inode)->i_prealloc_list,
1175 &SB_JOURNAL(hint->th->t_super)->
1176 j_prealloc_list);
1177 REISERFS_I(hint->inode)->i_prealloc_block = start;
1178 REISERFS_I(hint->inode)->i_prealloc_count =
1179 nr_allocated;
1180 break;
1181 }
1182 }
1183
1184 return (amount_needed - rest);
1185 }
1186
1187 static inline int blocknrs_and_prealloc_arrays_from_search_start
1188 (reiserfs_blocknr_hint_t * hint, b_blocknr_t * new_blocknrs,
1189 int amount_needed) {
1190 struct super_block *s = hint->th->t_super;
1191 b_blocknr_t start = hint->search_start;
1192 b_blocknr_t finish = SB_BLOCK_COUNT(s) - 1;
1193 int passno = 0;
1194 int nr_allocated = 0;
1195 int depth;
1196
1197 determine_prealloc_size(hint);
1198 if (!hint->formatted_node) {
1199 int quota_ret;
1200 #ifdef REISERQUOTA_DEBUG
1201 reiserfs_debug(s, REISERFS_DEBUG_CODE,
1202 "reiserquota: allocating %d blocks id=%u",
1203 amount_needed, hint->inode->i_uid);
1204 #endif
1205 depth = reiserfs_write_unlock_nested(s);
1206 quota_ret =
1207 dquot_alloc_block_nodirty(hint->inode, amount_needed);
1208 if (quota_ret) { /* Quota exceeded? */
1209 reiserfs_write_lock_nested(s, depth);
1210 return QUOTA_EXCEEDED;
1211 }
1212 if (hint->preallocate && hint->prealloc_size) {
1213 #ifdef REISERQUOTA_DEBUG
1214 reiserfs_debug(s, REISERFS_DEBUG_CODE,
1215 "reiserquota: allocating (prealloc) %d blocks id=%u",
1216 hint->prealloc_size, hint->inode->i_uid);
1217 #endif
1218 quota_ret = dquot_prealloc_block_nodirty(hint->inode,
1219 hint->prealloc_size);
1220 if (quota_ret)
1221 hint->preallocate = hint->prealloc_size = 0;
1222 }
1223 /* for unformatted nodes, force large allocations */
1224 reiserfs_write_lock_nested(s, depth);
1225 }
1226
1227 do {
1228 switch (passno++) {
1229 case 0: /* Search from hint->search_start to end of disk */
1230 start = hint->search_start;
1231 finish = SB_BLOCK_COUNT(s) - 1;
1232 break;
1233 case 1: /* Search from hint->beg to hint->search_start */
1234 start = hint->beg;
1235 finish = hint->search_start;
1236 break;
1237 case 2: /* Last chance: Search from 0 to hint->beg */
1238 start = 0;
1239 finish = hint->beg;
1240 break;
1241 default:
1242 /* We've tried searching everywhere, not enough space */
1243 /* Free the blocks */
1244 if (!hint->formatted_node) {
1245 #ifdef REISERQUOTA_DEBUG
1246 reiserfs_debug(s, REISERFS_DEBUG_CODE,
1247 "reiserquota: freeing (nospace) %d blocks id=%u",
1248 amount_needed +
1249 hint->prealloc_size -
1250 nr_allocated,
1251 hint->inode->i_uid);
1252 #endif
1253 /* Free not allocated blocks */
1254 depth = reiserfs_write_unlock_nested(s);
1255 dquot_free_block_nodirty(hint->inode,
1256 amount_needed + hint->prealloc_size -
1257 nr_allocated);
1258 reiserfs_write_lock_nested(s, depth);
1259 }
1260 while (nr_allocated--)
1261 reiserfs_free_block(hint->th, hint->inode,
1262 new_blocknrs[nr_allocated],
1263 !hint->formatted_node);
1264
1265 return NO_DISK_SPACE;
1266 }
1267 } while ((nr_allocated += allocate_without_wrapping_disk(hint,
1268 new_blocknrs +
1269 nr_allocated,
1270 start, finish,
1271 1,
1272 amount_needed -
1273 nr_allocated,
1274 hint->
1275 prealloc_size))
1276 < amount_needed);
1277 if (!hint->formatted_node &&
1278 amount_needed + hint->prealloc_size >
1279 nr_allocated + REISERFS_I(hint->inode)->i_prealloc_count) {
1280 /* Some of preallocation blocks were not allocated */
1281 #ifdef REISERQUOTA_DEBUG
1282 reiserfs_debug(s, REISERFS_DEBUG_CODE,
1283 "reiserquota: freeing (failed prealloc) %d blocks id=%u",
1284 amount_needed + hint->prealloc_size -
1285 nr_allocated -
1286 REISERFS_I(hint->inode)->i_prealloc_count,
1287 hint->inode->i_uid);
1288 #endif
1289
1290 depth = reiserfs_write_unlock_nested(s);
1291 dquot_free_block_nodirty(hint->inode, amount_needed +
1292 hint->prealloc_size - nr_allocated -
1293 REISERFS_I(hint->inode)->
1294 i_prealloc_count);
1295 reiserfs_write_lock_nested(s, depth);
1296 }
1297
1298 return CARRY_ON;
1299 }
1300
1301 /* grab new blocknrs from preallocated list */
1302 /* return amount still needed after using them */
1303 static int use_preallocated_list_if_available(reiserfs_blocknr_hint_t * hint,
1304 b_blocknr_t * new_blocknrs,
1305 int amount_needed)
1306 {
1307 struct inode *inode = hint->inode;
1308
1309 if (REISERFS_I(inode)->i_prealloc_count > 0) {
1310 while (amount_needed) {
1311
1312 *new_blocknrs++ = REISERFS_I(inode)->i_prealloc_block++;
1313 REISERFS_I(inode)->i_prealloc_count--;
1314
1315 amount_needed--;
1316
1317 if (REISERFS_I(inode)->i_prealloc_count <= 0) {
1318 list_del(&REISERFS_I(inode)->i_prealloc_list);
1319 break;
1320 }
1321 }
1322 }
1323 /* return amount still needed after using preallocated blocks */
1324 return amount_needed;
1325 }
1326
1327 int reiserfs_allocate_blocknrs(reiserfs_blocknr_hint_t *hint,
1328 b_blocknr_t *new_blocknrs,
1329 int amount_needed,
1330 /* Amount of blocks we have already reserved */
1331 int reserved_by_us)
1332 {
1333 int initial_amount_needed = amount_needed;
1334 int ret;
1335 struct super_block *s = hint->th->t_super;
1336
1337 /* Check if there is enough space, taking into account reserved space */
1338 if (SB_FREE_BLOCKS(s) - REISERFS_SB(s)->reserved_blocks <
1339 amount_needed - reserved_by_us)
1340 return NO_DISK_SPACE;
1341 /* should this be if !hint->inode && hint->preallocate? */
1342 /* do you mean hint->formatted_node can be removed ? - Zam */
1343 /*
1344 * hint->formatted_node cannot be removed because we try to access
1345 * inode information here, and there is often no inode associated with
1346 * metadata allocations - green
1347 */
1348
1349 if (!hint->formatted_node && hint->preallocate) {
1350 amount_needed = use_preallocated_list_if_available
1351 (hint, new_blocknrs, amount_needed);
1352
1353 /*
1354 * We have all the block numbers we need from the
1355 * prealloc list
1356 */
1357 if (amount_needed == 0)
1358 return CARRY_ON;
1359 new_blocknrs += (initial_amount_needed - amount_needed);
1360 }
1361
1362 /* find search start and save it in hint structure */
1363 determine_search_start(hint, amount_needed);
1364 if (hint->search_start >= SB_BLOCK_COUNT(s))
1365 hint->search_start = SB_BLOCK_COUNT(s) - 1;
1366
1367 /* allocation itself; fill new_blocknrs and preallocation arrays */
1368 ret = blocknrs_and_prealloc_arrays_from_search_start
1369 (hint, new_blocknrs, amount_needed);
1370
1371 /*
1372 * We used prealloc. list to fill (partially) new_blocknrs array.
1373 * If final allocation fails we need to return blocks back to
1374 * prealloc. list or just free them. -- Zam (I chose second
1375 * variant)
1376 */
1377 if (ret != CARRY_ON) {
1378 while (amount_needed++ < initial_amount_needed) {
1379 reiserfs_free_block(hint->th, hint->inode,
1380 *(--new_blocknrs), 1);
1381 }
1382 }
1383 return ret;
1384 }
1385
1386 void reiserfs_cache_bitmap_metadata(struct super_block *sb,
1387 struct buffer_head *bh,
1388 struct reiserfs_bitmap_info *info)
1389 {
1390 unsigned long *cur = (unsigned long *)(bh->b_data + bh->b_size);
1391
1392 /* The first bit must ALWAYS be 1 */
1393 if (!reiserfs_test_le_bit(0, (unsigned long *)bh->b_data))
1394 reiserfs_error(sb, "reiserfs-2025", "bitmap block %lu is "
1395 "corrupted: first bit must be 1", bh->b_blocknr);
1396
1397 info->free_count = 0;
1398
1399 while (--cur >= (unsigned long *)bh->b_data) {
1400 /* 0 and ~0 are special, we can optimize for them */
1401 if (*cur == 0)
1402 info->free_count += BITS_PER_LONG;
1403 else if (*cur != ~0L) /* A mix, investigate */
1404 info->free_count += BITS_PER_LONG - hweight_long(*cur);
1405 }
1406 }
1407
1408 struct buffer_head *reiserfs_read_bitmap_block(struct super_block *sb,
1409 unsigned int bitmap)
1410 {
1411 b_blocknr_t block = (sb->s_blocksize << 3) * bitmap;
1412 struct reiserfs_bitmap_info *info = SB_AP_BITMAP(sb) + bitmap;
1413 struct buffer_head *bh;
1414
1415 /*
1416 * Way old format filesystems had the bitmaps packed up front.
1417 * I doubt there are any of these left, but just in case...
1418 */
1419 if (unlikely(test_bit(REISERFS_OLD_FORMAT,
1420 &(REISERFS_SB(sb)->s_properties))))
1421 block = REISERFS_SB(sb)->s_sbh->b_blocknr + 1 + bitmap;
1422 else if (bitmap == 0)
1423 block = (REISERFS_DISK_OFFSET_IN_BYTES >> sb->s_blocksize_bits) + 1;
1424
1425 bh = sb_bread(sb, block);
1426 if (bh == NULL)
1427 reiserfs_warning(sb, "sh-2029: %s: bitmap block (#%u) "
1428 "reading failed", __func__, block);
1429 else {
1430 if (buffer_locked(bh)) {
1431 int depth;
1432 PROC_INFO_INC(sb, scan_bitmap.wait);
1433 depth = reiserfs_write_unlock_nested(sb);
1434 __wait_on_buffer(bh);
1435 reiserfs_write_lock_nested(sb, depth);
1436 }
1437 BUG_ON(!buffer_uptodate(bh));
1438 BUG_ON(atomic_read(&bh->b_count) == 0);
1439
1440 if (info->free_count == UINT_MAX)
1441 reiserfs_cache_bitmap_metadata(sb, bh, info);
1442 }
1443
1444 return bh;
1445 }
1446
1447 int reiserfs_init_bitmap_cache(struct super_block *sb)
1448 {
1449 struct reiserfs_bitmap_info *bitmap;
1450 unsigned int bmap_nr = reiserfs_bmap_count(sb);
1451
1452 bitmap = vmalloc(sizeof(*bitmap) * bmap_nr);
1453 if (bitmap == NULL)
1454 return -ENOMEM;
1455
1456 memset(bitmap, 0xff, sizeof(*bitmap) * bmap_nr);
1457
1458 SB_AP_BITMAP(sb) = bitmap;
1459
1460 return 0;
1461 }
1462
1463 void reiserfs_free_bitmap_cache(struct super_block *sb)
1464 {
1465 if (SB_AP_BITMAP(sb)) {
1466 vfree(SB_AP_BITMAP(sb));
1467 SB_AP_BITMAP(sb) = NULL;
1468 }
1469 }
This page took 0.080299 seconds and 5 git commands to generate.