ocfs2: simplify deallocation locking
[deliverable/linux.git] / fs / ocfs2 / suballoc.c
1 /* -*- mode: c; c-basic-offset: 8; -*-
2 * vim: noexpandtab sw=8 ts=8 sts=0:
3 *
4 * suballoc.c
5 *
6 * metadata alloc and free
7 * Inspired by ext3 block groups.
8 *
9 * Copyright (C) 2002, 2004 Oracle. All rights reserved.
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public
13 * License as published by the Free Software Foundation; either
14 * version 2 of the License, or (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public
22 * License along with this program; if not, write to the
23 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
24 * Boston, MA 021110-1307, USA.
25 */
26
27 #include <linux/fs.h>
28 #include <linux/types.h>
29 #include <linux/slab.h>
30 #include <linux/highmem.h>
31
32 #define MLOG_MASK_PREFIX ML_DISK_ALLOC
33 #include <cluster/masklog.h>
34
35 #include "ocfs2.h"
36
37 #include "alloc.h"
38 #include "dlmglue.h"
39 #include "inode.h"
40 #include "journal.h"
41 #include "localalloc.h"
42 #include "suballoc.h"
43 #include "super.h"
44 #include "sysfile.h"
45 #include "uptodate.h"
46
47 #include "buffer_head_io.h"
48
49 static inline void ocfs2_debug_bg(struct ocfs2_group_desc *bg);
50 static inline void ocfs2_debug_suballoc_inode(struct ocfs2_dinode *fe);
51 static inline u16 ocfs2_find_victim_chain(struct ocfs2_chain_list *cl);
52 static int ocfs2_block_group_fill(handle_t *handle,
53 struct inode *alloc_inode,
54 struct buffer_head *bg_bh,
55 u64 group_blkno,
56 u16 my_chain,
57 struct ocfs2_chain_list *cl);
58 static int ocfs2_block_group_alloc(struct ocfs2_super *osb,
59 struct inode *alloc_inode,
60 struct buffer_head *bh);
61
62 static int ocfs2_cluster_group_search(struct inode *inode,
63 struct buffer_head *group_bh,
64 u32 bits_wanted, u32 min_bits,
65 u16 *bit_off, u16 *bits_found);
66 static int ocfs2_block_group_search(struct inode *inode,
67 struct buffer_head *group_bh,
68 u32 bits_wanted, u32 min_bits,
69 u16 *bit_off, u16 *bits_found);
70 static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb,
71 struct ocfs2_alloc_context *ac,
72 handle_t *handle,
73 u32 bits_wanted,
74 u32 min_bits,
75 u16 *bit_off,
76 unsigned int *num_bits,
77 u64 *bg_blkno);
78 static int ocfs2_test_bg_bit_allocatable(struct buffer_head *bg_bh,
79 int nr);
80 static inline int ocfs2_block_group_set_bits(handle_t *handle,
81 struct inode *alloc_inode,
82 struct ocfs2_group_desc *bg,
83 struct buffer_head *group_bh,
84 unsigned int bit_off,
85 unsigned int num_bits);
86 static inline int ocfs2_block_group_clear_bits(handle_t *handle,
87 struct inode *alloc_inode,
88 struct ocfs2_group_desc *bg,
89 struct buffer_head *group_bh,
90 unsigned int bit_off,
91 unsigned int num_bits);
92
93 static int ocfs2_relink_block_group(handle_t *handle,
94 struct inode *alloc_inode,
95 struct buffer_head *fe_bh,
96 struct buffer_head *bg_bh,
97 struct buffer_head *prev_bg_bh,
98 u16 chain);
99 static inline int ocfs2_block_group_reasonably_empty(struct ocfs2_group_desc *bg,
100 u32 wanted);
101 static inline u32 ocfs2_desc_bitmap_to_cluster_off(struct inode *inode,
102 u64 bg_blkno,
103 u16 bg_bit_off);
104 static inline u64 ocfs2_which_cluster_group(struct inode *inode,
105 u32 cluster);
106 static inline void ocfs2_block_to_cluster_group(struct inode *inode,
107 u64 data_blkno,
108 u64 *bg_blkno,
109 u16 *bg_bit_off);
110
111 void ocfs2_free_alloc_context(struct ocfs2_alloc_context *ac)
112 {
113 struct inode *inode = ac->ac_inode;
114
115 if (inode) {
116 if (ac->ac_which != OCFS2_AC_USE_LOCAL)
117 ocfs2_meta_unlock(inode, 1);
118
119 mutex_unlock(&inode->i_mutex);
120
121 iput(inode);
122 }
123 if (ac->ac_bh)
124 brelse(ac->ac_bh);
125 kfree(ac);
126 }
127
128 static u32 ocfs2_bits_per_group(struct ocfs2_chain_list *cl)
129 {
130 return (u32)le16_to_cpu(cl->cl_cpg) * (u32)le16_to_cpu(cl->cl_bpc);
131 }
132
133 /* somewhat more expensive than our other checks, so use sparingly. */
134 static int ocfs2_check_group_descriptor(struct super_block *sb,
135 struct ocfs2_dinode *di,
136 struct ocfs2_group_desc *gd)
137 {
138 unsigned int max_bits;
139
140 if (!OCFS2_IS_VALID_GROUP_DESC(gd)) {
141 OCFS2_RO_ON_INVALID_GROUP_DESC(sb, gd);
142 return -EIO;
143 }
144
145 if (di->i_blkno != gd->bg_parent_dinode) {
146 ocfs2_error(sb, "Group descriptor # %llu has bad parent "
147 "pointer (%llu, expected %llu)",
148 (unsigned long long)le64_to_cpu(gd->bg_blkno),
149 (unsigned long long)le64_to_cpu(gd->bg_parent_dinode),
150 (unsigned long long)le64_to_cpu(di->i_blkno));
151 return -EIO;
152 }
153
154 max_bits = le16_to_cpu(di->id2.i_chain.cl_cpg) * le16_to_cpu(di->id2.i_chain.cl_bpc);
155 if (le16_to_cpu(gd->bg_bits) > max_bits) {
156 ocfs2_error(sb, "Group descriptor # %llu has bit count of %u",
157 (unsigned long long)le64_to_cpu(gd->bg_blkno),
158 le16_to_cpu(gd->bg_bits));
159 return -EIO;
160 }
161
162 if (le16_to_cpu(gd->bg_chain) >=
163 le16_to_cpu(di->id2.i_chain.cl_next_free_rec)) {
164 ocfs2_error(sb, "Group descriptor # %llu has bad chain %u",
165 (unsigned long long)le64_to_cpu(gd->bg_blkno),
166 le16_to_cpu(gd->bg_chain));
167 return -EIO;
168 }
169
170 if (le16_to_cpu(gd->bg_free_bits_count) > le16_to_cpu(gd->bg_bits)) {
171 ocfs2_error(sb, "Group descriptor # %llu has bit count %u but "
172 "claims that %u are free",
173 (unsigned long long)le64_to_cpu(gd->bg_blkno),
174 le16_to_cpu(gd->bg_bits),
175 le16_to_cpu(gd->bg_free_bits_count));
176 return -EIO;
177 }
178
179 if (le16_to_cpu(gd->bg_bits) > (8 * le16_to_cpu(gd->bg_size))) {
180 ocfs2_error(sb, "Group descriptor # %llu has bit count %u but "
181 "max bitmap bits of %u",
182 (unsigned long long)le64_to_cpu(gd->bg_blkno),
183 le16_to_cpu(gd->bg_bits),
184 8 * le16_to_cpu(gd->bg_size));
185 return -EIO;
186 }
187
188 return 0;
189 }
190
191 static int ocfs2_block_group_fill(handle_t *handle,
192 struct inode *alloc_inode,
193 struct buffer_head *bg_bh,
194 u64 group_blkno,
195 u16 my_chain,
196 struct ocfs2_chain_list *cl)
197 {
198 int status = 0;
199 struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data;
200 struct super_block * sb = alloc_inode->i_sb;
201
202 mlog_entry_void();
203
204 if (((unsigned long long) bg_bh->b_blocknr) != group_blkno) {
205 ocfs2_error(alloc_inode->i_sb, "group block (%llu) != "
206 "b_blocknr (%llu)",
207 (unsigned long long)group_blkno,
208 (unsigned long long) bg_bh->b_blocknr);
209 status = -EIO;
210 goto bail;
211 }
212
213 status = ocfs2_journal_access(handle,
214 alloc_inode,
215 bg_bh,
216 OCFS2_JOURNAL_ACCESS_CREATE);
217 if (status < 0) {
218 mlog_errno(status);
219 goto bail;
220 }
221
222 memset(bg, 0, sb->s_blocksize);
223 strcpy(bg->bg_signature, OCFS2_GROUP_DESC_SIGNATURE);
224 bg->bg_generation = cpu_to_le32(OCFS2_SB(sb)->fs_generation);
225 bg->bg_size = cpu_to_le16(ocfs2_group_bitmap_size(sb));
226 bg->bg_bits = cpu_to_le16(ocfs2_bits_per_group(cl));
227 bg->bg_chain = cpu_to_le16(my_chain);
228 bg->bg_next_group = cl->cl_recs[my_chain].c_blkno;
229 bg->bg_parent_dinode = cpu_to_le64(OCFS2_I(alloc_inode)->ip_blkno);
230 bg->bg_blkno = cpu_to_le64(group_blkno);
231 /* set the 1st bit in the bitmap to account for the descriptor block */
232 ocfs2_set_bit(0, (unsigned long *)bg->bg_bitmap);
233 bg->bg_free_bits_count = cpu_to_le16(le16_to_cpu(bg->bg_bits) - 1);
234
235 status = ocfs2_journal_dirty(handle, bg_bh);
236 if (status < 0)
237 mlog_errno(status);
238
239 /* There is no need to zero out or otherwise initialize the
240 * other blocks in a group - All valid FS metadata in a block
241 * group stores the superblock fs_generation value at
242 * allocation time. */
243
244 bail:
245 mlog_exit(status);
246 return status;
247 }
248
249 static inline u16 ocfs2_find_smallest_chain(struct ocfs2_chain_list *cl)
250 {
251 u16 curr, best;
252
253 best = curr = 0;
254 while (curr < le16_to_cpu(cl->cl_count)) {
255 if (le32_to_cpu(cl->cl_recs[best].c_total) >
256 le32_to_cpu(cl->cl_recs[curr].c_total))
257 best = curr;
258 curr++;
259 }
260 return best;
261 }
262
263 /*
264 * We expect the block group allocator to already be locked.
265 */
266 static int ocfs2_block_group_alloc(struct ocfs2_super *osb,
267 struct inode *alloc_inode,
268 struct buffer_head *bh)
269 {
270 int status, credits;
271 struct ocfs2_dinode *fe = (struct ocfs2_dinode *) bh->b_data;
272 struct ocfs2_chain_list *cl;
273 struct ocfs2_alloc_context *ac = NULL;
274 handle_t *handle = NULL;
275 u32 bit_off, num_bits;
276 u16 alloc_rec;
277 u64 bg_blkno;
278 struct buffer_head *bg_bh = NULL;
279 struct ocfs2_group_desc *bg;
280
281 BUG_ON(ocfs2_is_cluster_bitmap(alloc_inode));
282
283 mlog_entry_void();
284
285 cl = &fe->id2.i_chain;
286 status = ocfs2_reserve_clusters(osb,
287 le16_to_cpu(cl->cl_cpg),
288 &ac);
289 if (status < 0) {
290 if (status != -ENOSPC)
291 mlog_errno(status);
292 goto bail;
293 }
294
295 credits = ocfs2_calc_group_alloc_credits(osb->sb,
296 le16_to_cpu(cl->cl_cpg));
297 handle = ocfs2_start_trans(osb, credits);
298 if (IS_ERR(handle)) {
299 status = PTR_ERR(handle);
300 handle = NULL;
301 mlog_errno(status);
302 goto bail;
303 }
304
305 status = ocfs2_claim_clusters(osb,
306 handle,
307 ac,
308 le16_to_cpu(cl->cl_cpg),
309 &bit_off,
310 &num_bits);
311 if (status < 0) {
312 if (status != -ENOSPC)
313 mlog_errno(status);
314 goto bail;
315 }
316
317 alloc_rec = ocfs2_find_smallest_chain(cl);
318
319 /* setup the group */
320 bg_blkno = ocfs2_clusters_to_blocks(osb->sb, bit_off);
321 mlog(0, "new descriptor, record %u, at block %llu\n",
322 alloc_rec, (unsigned long long)bg_blkno);
323
324 bg_bh = sb_getblk(osb->sb, bg_blkno);
325 if (!bg_bh) {
326 status = -EIO;
327 mlog_errno(status);
328 goto bail;
329 }
330 ocfs2_set_new_buffer_uptodate(alloc_inode, bg_bh);
331
332 status = ocfs2_block_group_fill(handle,
333 alloc_inode,
334 bg_bh,
335 bg_blkno,
336 alloc_rec,
337 cl);
338 if (status < 0) {
339 mlog_errno(status);
340 goto bail;
341 }
342
343 bg = (struct ocfs2_group_desc *) bg_bh->b_data;
344
345 status = ocfs2_journal_access(handle, alloc_inode,
346 bh, OCFS2_JOURNAL_ACCESS_WRITE);
347 if (status < 0) {
348 mlog_errno(status);
349 goto bail;
350 }
351
352 le32_add_cpu(&cl->cl_recs[alloc_rec].c_free,
353 le16_to_cpu(bg->bg_free_bits_count));
354 le32_add_cpu(&cl->cl_recs[alloc_rec].c_total, le16_to_cpu(bg->bg_bits));
355 cl->cl_recs[alloc_rec].c_blkno = cpu_to_le64(bg_blkno);
356 if (le16_to_cpu(cl->cl_next_free_rec) < le16_to_cpu(cl->cl_count))
357 le16_add_cpu(&cl->cl_next_free_rec, 1);
358
359 le32_add_cpu(&fe->id1.bitmap1.i_used, le16_to_cpu(bg->bg_bits) -
360 le16_to_cpu(bg->bg_free_bits_count));
361 le32_add_cpu(&fe->id1.bitmap1.i_total, le16_to_cpu(bg->bg_bits));
362 le32_add_cpu(&fe->i_clusters, le16_to_cpu(cl->cl_cpg));
363
364 status = ocfs2_journal_dirty(handle, bh);
365 if (status < 0) {
366 mlog_errno(status);
367 goto bail;
368 }
369
370 spin_lock(&OCFS2_I(alloc_inode)->ip_lock);
371 OCFS2_I(alloc_inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
372 fe->i_size = cpu_to_le64(ocfs2_clusters_to_bytes(alloc_inode->i_sb,
373 le32_to_cpu(fe->i_clusters)));
374 spin_unlock(&OCFS2_I(alloc_inode)->ip_lock);
375 i_size_write(alloc_inode, le64_to_cpu(fe->i_size));
376 alloc_inode->i_blocks = ocfs2_inode_sector_count(alloc_inode);
377
378 status = 0;
379 bail:
380 if (handle)
381 ocfs2_commit_trans(osb, handle);
382
383 if (ac)
384 ocfs2_free_alloc_context(ac);
385
386 if (bg_bh)
387 brelse(bg_bh);
388
389 mlog_exit(status);
390 return status;
391 }
392
393 static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
394 struct ocfs2_alloc_context *ac,
395 int type,
396 u32 slot)
397 {
398 int status;
399 u32 bits_wanted = ac->ac_bits_wanted;
400 struct inode *alloc_inode;
401 struct buffer_head *bh = NULL;
402 struct ocfs2_dinode *fe;
403 u32 free_bits;
404
405 mlog_entry_void();
406
407 alloc_inode = ocfs2_get_system_file_inode(osb, type, slot);
408 if (!alloc_inode) {
409 mlog_errno(-EINVAL);
410 return -EINVAL;
411 }
412
413 mutex_lock(&alloc_inode->i_mutex);
414
415 status = ocfs2_meta_lock(alloc_inode, &bh, 1);
416 if (status < 0) {
417 mutex_unlock(&alloc_inode->i_mutex);
418 iput(alloc_inode);
419
420 mlog_errno(status);
421 return status;
422 }
423
424 ac->ac_inode = alloc_inode;
425
426 fe = (struct ocfs2_dinode *) bh->b_data;
427 if (!OCFS2_IS_VALID_DINODE(fe)) {
428 OCFS2_RO_ON_INVALID_DINODE(alloc_inode->i_sb, fe);
429 status = -EIO;
430 goto bail;
431 }
432 if (!(fe->i_flags & cpu_to_le32(OCFS2_CHAIN_FL))) {
433 ocfs2_error(alloc_inode->i_sb, "Invalid chain allocator %llu",
434 (unsigned long long)le64_to_cpu(fe->i_blkno));
435 status = -EIO;
436 goto bail;
437 }
438
439 free_bits = le32_to_cpu(fe->id1.bitmap1.i_total) -
440 le32_to_cpu(fe->id1.bitmap1.i_used);
441
442 if (bits_wanted > free_bits) {
443 /* cluster bitmap never grows */
444 if (ocfs2_is_cluster_bitmap(alloc_inode)) {
445 mlog(0, "Disk Full: wanted=%u, free_bits=%u\n",
446 bits_wanted, free_bits);
447 status = -ENOSPC;
448 goto bail;
449 }
450
451 status = ocfs2_block_group_alloc(osb, alloc_inode, bh);
452 if (status < 0) {
453 if (status != -ENOSPC)
454 mlog_errno(status);
455 goto bail;
456 }
457 atomic_inc(&osb->alloc_stats.bg_extends);
458
459 /* You should never ask for this much metadata */
460 BUG_ON(bits_wanted >
461 (le32_to_cpu(fe->id1.bitmap1.i_total)
462 - le32_to_cpu(fe->id1.bitmap1.i_used)));
463 }
464
465 get_bh(bh);
466 ac->ac_bh = bh;
467 bail:
468 if (bh)
469 brelse(bh);
470
471 mlog_exit(status);
472 return status;
473 }
474
475 int ocfs2_reserve_new_metadata(struct ocfs2_super *osb,
476 struct ocfs2_dinode *fe,
477 struct ocfs2_alloc_context **ac)
478 {
479 int status;
480 u32 slot;
481
482 *ac = kzalloc(sizeof(struct ocfs2_alloc_context), GFP_KERNEL);
483 if (!(*ac)) {
484 status = -ENOMEM;
485 mlog_errno(status);
486 goto bail;
487 }
488
489 (*ac)->ac_bits_wanted = ocfs2_extend_meta_needed(fe);
490 (*ac)->ac_which = OCFS2_AC_USE_META;
491
492 #ifndef OCFS2_USE_ALL_METADATA_SUBALLOCATORS
493 slot = 0;
494 #else
495 slot = osb->slot_num;
496 #endif
497
498 (*ac)->ac_group_search = ocfs2_block_group_search;
499
500 status = ocfs2_reserve_suballoc_bits(osb, (*ac),
501 EXTENT_ALLOC_SYSTEM_INODE, slot);
502 if (status < 0) {
503 if (status != -ENOSPC)
504 mlog_errno(status);
505 goto bail;
506 }
507
508 status = 0;
509 bail:
510 if ((status < 0) && *ac) {
511 ocfs2_free_alloc_context(*ac);
512 *ac = NULL;
513 }
514
515 mlog_exit(status);
516 return status;
517 }
518
519 int ocfs2_reserve_new_inode(struct ocfs2_super *osb,
520 struct ocfs2_alloc_context **ac)
521 {
522 int status;
523
524 *ac = kzalloc(sizeof(struct ocfs2_alloc_context), GFP_KERNEL);
525 if (!(*ac)) {
526 status = -ENOMEM;
527 mlog_errno(status);
528 goto bail;
529 }
530
531 (*ac)->ac_bits_wanted = 1;
532 (*ac)->ac_which = OCFS2_AC_USE_INODE;
533
534 (*ac)->ac_group_search = ocfs2_block_group_search;
535
536 status = ocfs2_reserve_suballoc_bits(osb, *ac,
537 INODE_ALLOC_SYSTEM_INODE,
538 osb->slot_num);
539 if (status < 0) {
540 if (status != -ENOSPC)
541 mlog_errno(status);
542 goto bail;
543 }
544
545 status = 0;
546 bail:
547 if ((status < 0) && *ac) {
548 ocfs2_free_alloc_context(*ac);
549 *ac = NULL;
550 }
551
552 mlog_exit(status);
553 return status;
554 }
555
556 /* local alloc code has to do the same thing, so rather than do this
557 * twice.. */
558 int ocfs2_reserve_cluster_bitmap_bits(struct ocfs2_super *osb,
559 struct ocfs2_alloc_context *ac)
560 {
561 int status;
562
563 ac->ac_which = OCFS2_AC_USE_MAIN;
564 ac->ac_group_search = ocfs2_cluster_group_search;
565
566 status = ocfs2_reserve_suballoc_bits(osb, ac,
567 GLOBAL_BITMAP_SYSTEM_INODE,
568 OCFS2_INVALID_SLOT);
569 if (status < 0 && status != -ENOSPC) {
570 mlog_errno(status);
571 goto bail;
572 }
573
574 bail:
575 return status;
576 }
577
578 /* Callers don't need to care which bitmap (local alloc or main) to
579 * use so we figure it out for them, but unfortunately this clutters
580 * things a bit. */
581 int ocfs2_reserve_clusters(struct ocfs2_super *osb,
582 u32 bits_wanted,
583 struct ocfs2_alloc_context **ac)
584 {
585 int status;
586
587 mlog_entry_void();
588
589 *ac = kzalloc(sizeof(struct ocfs2_alloc_context), GFP_KERNEL);
590 if (!(*ac)) {
591 status = -ENOMEM;
592 mlog_errno(status);
593 goto bail;
594 }
595
596 (*ac)->ac_bits_wanted = bits_wanted;
597
598 status = -ENOSPC;
599 if (ocfs2_alloc_should_use_local(osb, bits_wanted)) {
600 status = ocfs2_reserve_local_alloc_bits(osb,
601 bits_wanted,
602 *ac);
603 if ((status < 0) && (status != -ENOSPC)) {
604 mlog_errno(status);
605 goto bail;
606 } else if (status == -ENOSPC) {
607 /* reserve_local_bits will return enospc with
608 * the local alloc inode still locked, so we
609 * can change this safely here. */
610 mlog(0, "Disabling local alloc\n");
611 /* We set to OCFS2_LA_DISABLED so that umount
612 * can clean up what's left of the local
613 * allocation */
614 osb->local_alloc_state = OCFS2_LA_DISABLED;
615 }
616 }
617
618 if (status == -ENOSPC) {
619 status = ocfs2_reserve_cluster_bitmap_bits(osb, *ac);
620 if (status < 0) {
621 if (status != -ENOSPC)
622 mlog_errno(status);
623 goto bail;
624 }
625 }
626
627 status = 0;
628 bail:
629 if ((status < 0) && *ac) {
630 ocfs2_free_alloc_context(*ac);
631 *ac = NULL;
632 }
633
634 mlog_exit(status);
635 return status;
636 }
637
638 /*
639 * More or less lifted from ext3. I'll leave their description below:
640 *
641 * "For ext3 allocations, we must not reuse any blocks which are
642 * allocated in the bitmap buffer's "last committed data" copy. This
643 * prevents deletes from freeing up the page for reuse until we have
644 * committed the delete transaction.
645 *
646 * If we didn't do this, then deleting something and reallocating it as
647 * data would allow the old block to be overwritten before the
648 * transaction committed (because we force data to disk before commit).
649 * This would lead to corruption if we crashed between overwriting the
650 * data and committing the delete.
651 *
652 * @@@ We may want to make this allocation behaviour conditional on
653 * data-writes at some point, and disable it for metadata allocations or
654 * sync-data inodes."
655 *
656 * Note: OCFS2 already does this differently for metadata vs data
657 * allocations, as those bitmaps are seperate and undo access is never
658 * called on a metadata group descriptor.
659 */
660 static int ocfs2_test_bg_bit_allocatable(struct buffer_head *bg_bh,
661 int nr)
662 {
663 struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data;
664
665 if (ocfs2_test_bit(nr, (unsigned long *)bg->bg_bitmap))
666 return 0;
667 if (!buffer_jbd(bg_bh) || !bh2jh(bg_bh)->b_committed_data)
668 return 1;
669
670 bg = (struct ocfs2_group_desc *) bh2jh(bg_bh)->b_committed_data;
671 return !ocfs2_test_bit(nr, (unsigned long *)bg->bg_bitmap);
672 }
673
674 static int ocfs2_block_group_find_clear_bits(struct ocfs2_super *osb,
675 struct buffer_head *bg_bh,
676 unsigned int bits_wanted,
677 unsigned int total_bits,
678 u16 *bit_off,
679 u16 *bits_found)
680 {
681 void *bitmap;
682 u16 best_offset, best_size;
683 int offset, start, found, status = 0;
684 struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data;
685
686 if (!OCFS2_IS_VALID_GROUP_DESC(bg)) {
687 OCFS2_RO_ON_INVALID_GROUP_DESC(osb->sb, bg);
688 return -EIO;
689 }
690
691 found = start = best_offset = best_size = 0;
692 bitmap = bg->bg_bitmap;
693
694 while((offset = ocfs2_find_next_zero_bit(bitmap, total_bits, start)) != -1) {
695 if (offset == total_bits)
696 break;
697
698 if (!ocfs2_test_bg_bit_allocatable(bg_bh, offset)) {
699 /* We found a zero, but we can't use it as it
700 * hasn't been put to disk yet! */
701 found = 0;
702 start = offset + 1;
703 } else if (offset == start) {
704 /* we found a zero */
705 found++;
706 /* move start to the next bit to test */
707 start++;
708 } else {
709 /* got a zero after some ones */
710 found = 1;
711 start = offset + 1;
712 }
713 if (found > best_size) {
714 best_size = found;
715 best_offset = start - found;
716 }
717 /* we got everything we needed */
718 if (found == bits_wanted) {
719 /* mlog(0, "Found it all!\n"); */
720 break;
721 }
722 }
723
724 /* XXX: I think the first clause is equivalent to the second
725 * - jlbec */
726 if (found == bits_wanted) {
727 *bit_off = start - found;
728 *bits_found = found;
729 } else if (best_size) {
730 *bit_off = best_offset;
731 *bits_found = best_size;
732 } else {
733 status = -ENOSPC;
734 /* No error log here -- see the comment above
735 * ocfs2_test_bg_bit_allocatable */
736 }
737
738 return status;
739 }
740
741 static inline int ocfs2_block_group_set_bits(handle_t *handle,
742 struct inode *alloc_inode,
743 struct ocfs2_group_desc *bg,
744 struct buffer_head *group_bh,
745 unsigned int bit_off,
746 unsigned int num_bits)
747 {
748 int status;
749 void *bitmap = bg->bg_bitmap;
750 int journal_type = OCFS2_JOURNAL_ACCESS_WRITE;
751
752 mlog_entry_void();
753
754 if (!OCFS2_IS_VALID_GROUP_DESC(bg)) {
755 OCFS2_RO_ON_INVALID_GROUP_DESC(alloc_inode->i_sb, bg);
756 status = -EIO;
757 goto bail;
758 }
759 BUG_ON(le16_to_cpu(bg->bg_free_bits_count) < num_bits);
760
761 mlog(0, "block_group_set_bits: off = %u, num = %u\n", bit_off,
762 num_bits);
763
764 if (ocfs2_is_cluster_bitmap(alloc_inode))
765 journal_type = OCFS2_JOURNAL_ACCESS_UNDO;
766
767 status = ocfs2_journal_access(handle,
768 alloc_inode,
769 group_bh,
770 journal_type);
771 if (status < 0) {
772 mlog_errno(status);
773 goto bail;
774 }
775
776 le16_add_cpu(&bg->bg_free_bits_count, -num_bits);
777
778 while(num_bits--)
779 ocfs2_set_bit(bit_off++, bitmap);
780
781 status = ocfs2_journal_dirty(handle,
782 group_bh);
783 if (status < 0) {
784 mlog_errno(status);
785 goto bail;
786 }
787
788 bail:
789 mlog_exit(status);
790 return status;
791 }
792
793 /* find the one with the most empty bits */
794 static inline u16 ocfs2_find_victim_chain(struct ocfs2_chain_list *cl)
795 {
796 u16 curr, best;
797
798 BUG_ON(!cl->cl_next_free_rec);
799
800 best = curr = 0;
801 while (curr < le16_to_cpu(cl->cl_next_free_rec)) {
802 if (le32_to_cpu(cl->cl_recs[curr].c_free) >
803 le32_to_cpu(cl->cl_recs[best].c_free))
804 best = curr;
805 curr++;
806 }
807
808 BUG_ON(best >= le16_to_cpu(cl->cl_next_free_rec));
809 return best;
810 }
811
812 static int ocfs2_relink_block_group(handle_t *handle,
813 struct inode *alloc_inode,
814 struct buffer_head *fe_bh,
815 struct buffer_head *bg_bh,
816 struct buffer_head *prev_bg_bh,
817 u16 chain)
818 {
819 int status;
820 /* there is a really tiny chance the journal calls could fail,
821 * but we wouldn't want inconsistent blocks in *any* case. */
822 u64 fe_ptr, bg_ptr, prev_bg_ptr;
823 struct ocfs2_dinode *fe = (struct ocfs2_dinode *) fe_bh->b_data;
824 struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) bg_bh->b_data;
825 struct ocfs2_group_desc *prev_bg = (struct ocfs2_group_desc *) prev_bg_bh->b_data;
826
827 if (!OCFS2_IS_VALID_DINODE(fe)) {
828 OCFS2_RO_ON_INVALID_DINODE(alloc_inode->i_sb, fe);
829 status = -EIO;
830 goto out;
831 }
832 if (!OCFS2_IS_VALID_GROUP_DESC(bg)) {
833 OCFS2_RO_ON_INVALID_GROUP_DESC(alloc_inode->i_sb, bg);
834 status = -EIO;
835 goto out;
836 }
837 if (!OCFS2_IS_VALID_GROUP_DESC(prev_bg)) {
838 OCFS2_RO_ON_INVALID_GROUP_DESC(alloc_inode->i_sb, prev_bg);
839 status = -EIO;
840 goto out;
841 }
842
843 mlog(0, "Suballoc %llu, chain %u, move group %llu to top, prev = %llu\n",
844 (unsigned long long)le64_to_cpu(fe->i_blkno), chain,
845 (unsigned long long)le64_to_cpu(bg->bg_blkno),
846 (unsigned long long)le64_to_cpu(prev_bg->bg_blkno));
847
848 fe_ptr = le64_to_cpu(fe->id2.i_chain.cl_recs[chain].c_blkno);
849 bg_ptr = le64_to_cpu(bg->bg_next_group);
850 prev_bg_ptr = le64_to_cpu(prev_bg->bg_next_group);
851
852 status = ocfs2_journal_access(handle, alloc_inode, prev_bg_bh,
853 OCFS2_JOURNAL_ACCESS_WRITE);
854 if (status < 0) {
855 mlog_errno(status);
856 goto out_rollback;
857 }
858
859 prev_bg->bg_next_group = bg->bg_next_group;
860
861 status = ocfs2_journal_dirty(handle, prev_bg_bh);
862 if (status < 0) {
863 mlog_errno(status);
864 goto out_rollback;
865 }
866
867 status = ocfs2_journal_access(handle, alloc_inode, bg_bh,
868 OCFS2_JOURNAL_ACCESS_WRITE);
869 if (status < 0) {
870 mlog_errno(status);
871 goto out_rollback;
872 }
873
874 bg->bg_next_group = fe->id2.i_chain.cl_recs[chain].c_blkno;
875
876 status = ocfs2_journal_dirty(handle, bg_bh);
877 if (status < 0) {
878 mlog_errno(status);
879 goto out_rollback;
880 }
881
882 status = ocfs2_journal_access(handle, alloc_inode, fe_bh,
883 OCFS2_JOURNAL_ACCESS_WRITE);
884 if (status < 0) {
885 mlog_errno(status);
886 goto out_rollback;
887 }
888
889 fe->id2.i_chain.cl_recs[chain].c_blkno = bg->bg_blkno;
890
891 status = ocfs2_journal_dirty(handle, fe_bh);
892 if (status < 0) {
893 mlog_errno(status);
894 goto out_rollback;
895 }
896
897 status = 0;
898 out_rollback:
899 if (status < 0) {
900 fe->id2.i_chain.cl_recs[chain].c_blkno = cpu_to_le64(fe_ptr);
901 bg->bg_next_group = cpu_to_le64(bg_ptr);
902 prev_bg->bg_next_group = cpu_to_le64(prev_bg_ptr);
903 }
904 out:
905 mlog_exit(status);
906 return status;
907 }
908
909 static inline int ocfs2_block_group_reasonably_empty(struct ocfs2_group_desc *bg,
910 u32 wanted)
911 {
912 return le16_to_cpu(bg->bg_free_bits_count) > wanted;
913 }
914
915 /* return 0 on success, -ENOSPC to keep searching and any other < 0
916 * value on error. */
917 static int ocfs2_cluster_group_search(struct inode *inode,
918 struct buffer_head *group_bh,
919 u32 bits_wanted, u32 min_bits,
920 u16 *bit_off, u16 *bits_found)
921 {
922 int search = -ENOSPC;
923 int ret;
924 struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *) group_bh->b_data;
925 u16 tmp_off, tmp_found;
926 unsigned int max_bits, gd_cluster_off;
927
928 BUG_ON(!ocfs2_is_cluster_bitmap(inode));
929
930 if (gd->bg_free_bits_count) {
931 max_bits = le16_to_cpu(gd->bg_bits);
932
933 /* Tail groups in cluster bitmaps which aren't cpg
934 * aligned are prone to partial extention by a failed
935 * fs resize. If the file system resize never got to
936 * update the dinode cluster count, then we don't want
937 * to trust any clusters past it, regardless of what
938 * the group descriptor says. */
939 gd_cluster_off = ocfs2_blocks_to_clusters(inode->i_sb,
940 le64_to_cpu(gd->bg_blkno));
941 if ((gd_cluster_off + max_bits) >
942 OCFS2_I(inode)->ip_clusters) {
943 max_bits = OCFS2_I(inode)->ip_clusters - gd_cluster_off;
944 mlog(0, "Desc %llu, bg_bits %u, clusters %u, use %u\n",
945 (unsigned long long)le64_to_cpu(gd->bg_blkno),
946 le16_to_cpu(gd->bg_bits),
947 OCFS2_I(inode)->ip_clusters, max_bits);
948 }
949
950 ret = ocfs2_block_group_find_clear_bits(OCFS2_SB(inode->i_sb),
951 group_bh, bits_wanted,
952 max_bits,
953 &tmp_off, &tmp_found);
954 if (ret)
955 return ret;
956
957 /* ocfs2_block_group_find_clear_bits() might
958 * return success, but we still want to return
959 * -ENOSPC unless it found the minimum number
960 * of bits. */
961 if (min_bits <= tmp_found) {
962 *bit_off = tmp_off;
963 *bits_found = tmp_found;
964 search = 0; /* success */
965 }
966 }
967
968 return search;
969 }
970
971 static int ocfs2_block_group_search(struct inode *inode,
972 struct buffer_head *group_bh,
973 u32 bits_wanted, u32 min_bits,
974 u16 *bit_off, u16 *bits_found)
975 {
976 int ret = -ENOSPC;
977 struct ocfs2_group_desc *bg = (struct ocfs2_group_desc *) group_bh->b_data;
978
979 BUG_ON(min_bits != 1);
980 BUG_ON(ocfs2_is_cluster_bitmap(inode));
981
982 if (bg->bg_free_bits_count)
983 ret = ocfs2_block_group_find_clear_bits(OCFS2_SB(inode->i_sb),
984 group_bh, bits_wanted,
985 le16_to_cpu(bg->bg_bits),
986 bit_off, bits_found);
987
988 return ret;
989 }
990
991 static int ocfs2_alloc_dinode_update_counts(struct inode *inode,
992 handle_t *handle,
993 struct buffer_head *di_bh,
994 u32 num_bits,
995 u16 chain)
996 {
997 int ret;
998 u32 tmp_used;
999 struct ocfs2_dinode *di = (struct ocfs2_dinode *) di_bh->b_data;
1000 struct ocfs2_chain_list *cl = (struct ocfs2_chain_list *) &di->id2.i_chain;
1001
1002 ret = ocfs2_journal_access(handle, inode, di_bh,
1003 OCFS2_JOURNAL_ACCESS_WRITE);
1004 if (ret < 0) {
1005 mlog_errno(ret);
1006 goto out;
1007 }
1008
1009 tmp_used = le32_to_cpu(di->id1.bitmap1.i_used);
1010 di->id1.bitmap1.i_used = cpu_to_le32(num_bits + tmp_used);
1011 le32_add_cpu(&cl->cl_recs[chain].c_free, -num_bits);
1012
1013 ret = ocfs2_journal_dirty(handle, di_bh);
1014 if (ret < 0)
1015 mlog_errno(ret);
1016
1017 out:
1018 return ret;
1019 }
1020
1021 static int ocfs2_search_one_group(struct ocfs2_alloc_context *ac,
1022 handle_t *handle,
1023 u32 bits_wanted,
1024 u32 min_bits,
1025 u16 *bit_off,
1026 unsigned int *num_bits,
1027 u64 gd_blkno,
1028 u16 *bits_left)
1029 {
1030 int ret;
1031 u16 found;
1032 struct buffer_head *group_bh = NULL;
1033 struct ocfs2_group_desc *gd;
1034 struct inode *alloc_inode = ac->ac_inode;
1035
1036 ret = ocfs2_read_block(OCFS2_SB(alloc_inode->i_sb), gd_blkno,
1037 &group_bh, OCFS2_BH_CACHED, alloc_inode);
1038 if (ret < 0) {
1039 mlog_errno(ret);
1040 return ret;
1041 }
1042
1043 gd = (struct ocfs2_group_desc *) group_bh->b_data;
1044 if (!OCFS2_IS_VALID_GROUP_DESC(gd)) {
1045 OCFS2_RO_ON_INVALID_GROUP_DESC(alloc_inode->i_sb, gd);
1046 ret = -EIO;
1047 goto out;
1048 }
1049
1050 ret = ac->ac_group_search(alloc_inode, group_bh, bits_wanted, min_bits,
1051 bit_off, &found);
1052 if (ret < 0) {
1053 if (ret != -ENOSPC)
1054 mlog_errno(ret);
1055 goto out;
1056 }
1057
1058 *num_bits = found;
1059
1060 ret = ocfs2_alloc_dinode_update_counts(alloc_inode, handle, ac->ac_bh,
1061 *num_bits,
1062 le16_to_cpu(gd->bg_chain));
1063 if (ret < 0) {
1064 mlog_errno(ret);
1065 goto out;
1066 }
1067
1068 ret = ocfs2_block_group_set_bits(handle, alloc_inode, gd, group_bh,
1069 *bit_off, *num_bits);
1070 if (ret < 0)
1071 mlog_errno(ret);
1072
1073 *bits_left = le16_to_cpu(gd->bg_free_bits_count);
1074
1075 out:
1076 brelse(group_bh);
1077
1078 return ret;
1079 }
1080
1081 static int ocfs2_search_chain(struct ocfs2_alloc_context *ac,
1082 handle_t *handle,
1083 u32 bits_wanted,
1084 u32 min_bits,
1085 u16 *bit_off,
1086 unsigned int *num_bits,
1087 u64 *bg_blkno,
1088 u16 *bits_left)
1089 {
1090 int status;
1091 u16 chain, tmp_bits;
1092 u32 tmp_used;
1093 u64 next_group;
1094 struct inode *alloc_inode = ac->ac_inode;
1095 struct buffer_head *group_bh = NULL;
1096 struct buffer_head *prev_group_bh = NULL;
1097 struct ocfs2_dinode *fe = (struct ocfs2_dinode *) ac->ac_bh->b_data;
1098 struct ocfs2_chain_list *cl = (struct ocfs2_chain_list *) &fe->id2.i_chain;
1099 struct ocfs2_group_desc *bg;
1100
1101 chain = ac->ac_chain;
1102 mlog(0, "trying to alloc %u bits from chain %u, inode %llu\n",
1103 bits_wanted, chain,
1104 (unsigned long long)OCFS2_I(alloc_inode)->ip_blkno);
1105
1106 status = ocfs2_read_block(OCFS2_SB(alloc_inode->i_sb),
1107 le64_to_cpu(cl->cl_recs[chain].c_blkno),
1108 &group_bh, OCFS2_BH_CACHED, alloc_inode);
1109 if (status < 0) {
1110 mlog_errno(status);
1111 goto bail;
1112 }
1113 bg = (struct ocfs2_group_desc *) group_bh->b_data;
1114 status = ocfs2_check_group_descriptor(alloc_inode->i_sb, fe, bg);
1115 if (status) {
1116 mlog_errno(status);
1117 goto bail;
1118 }
1119
1120 status = -ENOSPC;
1121 /* for now, the chain search is a bit simplistic. We just use
1122 * the 1st group with any empty bits. */
1123 while ((status = ac->ac_group_search(alloc_inode, group_bh,
1124 bits_wanted, min_bits, bit_off,
1125 &tmp_bits)) == -ENOSPC) {
1126 if (!bg->bg_next_group)
1127 break;
1128
1129 if (prev_group_bh) {
1130 brelse(prev_group_bh);
1131 prev_group_bh = NULL;
1132 }
1133 next_group = le64_to_cpu(bg->bg_next_group);
1134 prev_group_bh = group_bh;
1135 group_bh = NULL;
1136 status = ocfs2_read_block(OCFS2_SB(alloc_inode->i_sb),
1137 next_group, &group_bh,
1138 OCFS2_BH_CACHED, alloc_inode);
1139 if (status < 0) {
1140 mlog_errno(status);
1141 goto bail;
1142 }
1143 bg = (struct ocfs2_group_desc *) group_bh->b_data;
1144 status = ocfs2_check_group_descriptor(alloc_inode->i_sb, fe, bg);
1145 if (status) {
1146 mlog_errno(status);
1147 goto bail;
1148 }
1149 }
1150 if (status < 0) {
1151 if (status != -ENOSPC)
1152 mlog_errno(status);
1153 goto bail;
1154 }
1155
1156 mlog(0, "alloc succeeds: we give %u bits from block group %llu\n",
1157 tmp_bits, (unsigned long long)le64_to_cpu(bg->bg_blkno));
1158
1159 *num_bits = tmp_bits;
1160
1161 BUG_ON(*num_bits == 0);
1162
1163 /*
1164 * Keep track of previous block descriptor read. When
1165 * we find a target, if we have read more than X
1166 * number of descriptors, and the target is reasonably
1167 * empty, relink him to top of his chain.
1168 *
1169 * We've read 0 extra blocks and only send one more to
1170 * the transaction, yet the next guy to search has a
1171 * much easier time.
1172 *
1173 * Do this *after* figuring out how many bits we're taking out
1174 * of our target group.
1175 */
1176 if (ac->ac_allow_chain_relink &&
1177 (prev_group_bh) &&
1178 (ocfs2_block_group_reasonably_empty(bg, *num_bits))) {
1179 status = ocfs2_relink_block_group(handle, alloc_inode,
1180 ac->ac_bh, group_bh,
1181 prev_group_bh, chain);
1182 if (status < 0) {
1183 mlog_errno(status);
1184 goto bail;
1185 }
1186 }
1187
1188 /* Ok, claim our bits now: set the info on dinode, chainlist
1189 * and then the group */
1190 status = ocfs2_journal_access(handle,
1191 alloc_inode,
1192 ac->ac_bh,
1193 OCFS2_JOURNAL_ACCESS_WRITE);
1194 if (status < 0) {
1195 mlog_errno(status);
1196 goto bail;
1197 }
1198
1199 tmp_used = le32_to_cpu(fe->id1.bitmap1.i_used);
1200 fe->id1.bitmap1.i_used = cpu_to_le32(*num_bits + tmp_used);
1201 le32_add_cpu(&cl->cl_recs[chain].c_free, -(*num_bits));
1202
1203 status = ocfs2_journal_dirty(handle,
1204 ac->ac_bh);
1205 if (status < 0) {
1206 mlog_errno(status);
1207 goto bail;
1208 }
1209
1210 status = ocfs2_block_group_set_bits(handle,
1211 alloc_inode,
1212 bg,
1213 group_bh,
1214 *bit_off,
1215 *num_bits);
1216 if (status < 0) {
1217 mlog_errno(status);
1218 goto bail;
1219 }
1220
1221 mlog(0, "Allocated %u bits from suballocator %llu\n", *num_bits,
1222 (unsigned long long)le64_to_cpu(fe->i_blkno));
1223
1224 *bg_blkno = le64_to_cpu(bg->bg_blkno);
1225 *bits_left = le16_to_cpu(bg->bg_free_bits_count);
1226 bail:
1227 if (group_bh)
1228 brelse(group_bh);
1229 if (prev_group_bh)
1230 brelse(prev_group_bh);
1231
1232 mlog_exit(status);
1233 return status;
1234 }
1235
1236 /* will give out up to bits_wanted contiguous bits. */
1237 static int ocfs2_claim_suballoc_bits(struct ocfs2_super *osb,
1238 struct ocfs2_alloc_context *ac,
1239 handle_t *handle,
1240 u32 bits_wanted,
1241 u32 min_bits,
1242 u16 *bit_off,
1243 unsigned int *num_bits,
1244 u64 *bg_blkno)
1245 {
1246 int status;
1247 u16 victim, i;
1248 u16 bits_left = 0;
1249 u64 hint_blkno = ac->ac_last_group;
1250 struct ocfs2_chain_list *cl;
1251 struct ocfs2_dinode *fe;
1252
1253 mlog_entry_void();
1254
1255 BUG_ON(ac->ac_bits_given >= ac->ac_bits_wanted);
1256 BUG_ON(bits_wanted > (ac->ac_bits_wanted - ac->ac_bits_given));
1257 BUG_ON(!ac->ac_bh);
1258
1259 fe = (struct ocfs2_dinode *) ac->ac_bh->b_data;
1260 if (!OCFS2_IS_VALID_DINODE(fe)) {
1261 OCFS2_RO_ON_INVALID_DINODE(osb->sb, fe);
1262 status = -EIO;
1263 goto bail;
1264 }
1265 if (le32_to_cpu(fe->id1.bitmap1.i_used) >=
1266 le32_to_cpu(fe->id1.bitmap1.i_total)) {
1267 ocfs2_error(osb->sb, "Chain allocator dinode %llu has %u used "
1268 "bits but only %u total.",
1269 (unsigned long long)le64_to_cpu(fe->i_blkno),
1270 le32_to_cpu(fe->id1.bitmap1.i_used),
1271 le32_to_cpu(fe->id1.bitmap1.i_total));
1272 status = -EIO;
1273 goto bail;
1274 }
1275
1276 if (hint_blkno) {
1277 /* Attempt to short-circuit the usual search mechanism
1278 * by jumping straight to the most recently used
1279 * allocation group. This helps us mantain some
1280 * contiguousness across allocations. */
1281 status = ocfs2_search_one_group(ac, handle, bits_wanted,
1282 min_bits, bit_off, num_bits,
1283 hint_blkno, &bits_left);
1284 if (!status) {
1285 /* Be careful to update *bg_blkno here as the
1286 * caller is expecting it to be filled in, and
1287 * ocfs2_search_one_group() won't do that for
1288 * us. */
1289 *bg_blkno = hint_blkno;
1290 goto set_hint;
1291 }
1292 if (status < 0 && status != -ENOSPC) {
1293 mlog_errno(status);
1294 goto bail;
1295 }
1296 }
1297
1298 cl = (struct ocfs2_chain_list *) &fe->id2.i_chain;
1299
1300 victim = ocfs2_find_victim_chain(cl);
1301 ac->ac_chain = victim;
1302 ac->ac_allow_chain_relink = 1;
1303
1304 status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits, bit_off,
1305 num_bits, bg_blkno, &bits_left);
1306 if (!status)
1307 goto set_hint;
1308 if (status < 0 && status != -ENOSPC) {
1309 mlog_errno(status);
1310 goto bail;
1311 }
1312
1313 mlog(0, "Search of victim chain %u came up with nothing, "
1314 "trying all chains now.\n", victim);
1315
1316 /* If we didn't pick a good victim, then just default to
1317 * searching each chain in order. Don't allow chain relinking
1318 * because we only calculate enough journal credits for one
1319 * relink per alloc. */
1320 ac->ac_allow_chain_relink = 0;
1321 for (i = 0; i < le16_to_cpu(cl->cl_next_free_rec); i ++) {
1322 if (i == victim)
1323 continue;
1324 if (!cl->cl_recs[i].c_free)
1325 continue;
1326
1327 ac->ac_chain = i;
1328 status = ocfs2_search_chain(ac, handle, bits_wanted, min_bits,
1329 bit_off, num_bits, bg_blkno,
1330 &bits_left);
1331 if (!status)
1332 break;
1333 if (status < 0 && status != -ENOSPC) {
1334 mlog_errno(status);
1335 goto bail;
1336 }
1337 }
1338
1339 set_hint:
1340 if (status != -ENOSPC) {
1341 /* If the next search of this group is not likely to
1342 * yield a suitable extent, then we reset the last
1343 * group hint so as to not waste a disk read */
1344 if (bits_left < min_bits)
1345 ac->ac_last_group = 0;
1346 else
1347 ac->ac_last_group = *bg_blkno;
1348 }
1349
1350 bail:
1351 mlog_exit(status);
1352 return status;
1353 }
1354
1355 int ocfs2_claim_metadata(struct ocfs2_super *osb,
1356 handle_t *handle,
1357 struct ocfs2_alloc_context *ac,
1358 u32 bits_wanted,
1359 u16 *suballoc_bit_start,
1360 unsigned int *num_bits,
1361 u64 *blkno_start)
1362 {
1363 int status;
1364 u64 bg_blkno;
1365
1366 BUG_ON(!ac);
1367 BUG_ON(ac->ac_bits_wanted < (ac->ac_bits_given + bits_wanted));
1368 BUG_ON(ac->ac_which != OCFS2_AC_USE_META);
1369
1370 status = ocfs2_claim_suballoc_bits(osb,
1371 ac,
1372 handle,
1373 bits_wanted,
1374 1,
1375 suballoc_bit_start,
1376 num_bits,
1377 &bg_blkno);
1378 if (status < 0) {
1379 mlog_errno(status);
1380 goto bail;
1381 }
1382 atomic_inc(&osb->alloc_stats.bg_allocs);
1383
1384 *blkno_start = bg_blkno + (u64) *suballoc_bit_start;
1385 ac->ac_bits_given += (*num_bits);
1386 status = 0;
1387 bail:
1388 mlog_exit(status);
1389 return status;
1390 }
1391
1392 int ocfs2_claim_new_inode(struct ocfs2_super *osb,
1393 handle_t *handle,
1394 struct ocfs2_alloc_context *ac,
1395 u16 *suballoc_bit,
1396 u64 *fe_blkno)
1397 {
1398 int status;
1399 unsigned int num_bits;
1400 u64 bg_blkno;
1401
1402 mlog_entry_void();
1403
1404 BUG_ON(!ac);
1405 BUG_ON(ac->ac_bits_given != 0);
1406 BUG_ON(ac->ac_bits_wanted != 1);
1407 BUG_ON(ac->ac_which != OCFS2_AC_USE_INODE);
1408
1409 status = ocfs2_claim_suballoc_bits(osb,
1410 ac,
1411 handle,
1412 1,
1413 1,
1414 suballoc_bit,
1415 &num_bits,
1416 &bg_blkno);
1417 if (status < 0) {
1418 mlog_errno(status);
1419 goto bail;
1420 }
1421 atomic_inc(&osb->alloc_stats.bg_allocs);
1422
1423 BUG_ON(num_bits != 1);
1424
1425 *fe_blkno = bg_blkno + (u64) (*suballoc_bit);
1426 ac->ac_bits_given++;
1427 status = 0;
1428 bail:
1429 mlog_exit(status);
1430 return status;
1431 }
1432
1433 /* translate a group desc. blkno and it's bitmap offset into
1434 * disk cluster offset. */
1435 static inline u32 ocfs2_desc_bitmap_to_cluster_off(struct inode *inode,
1436 u64 bg_blkno,
1437 u16 bg_bit_off)
1438 {
1439 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1440 u32 cluster = 0;
1441
1442 BUG_ON(!ocfs2_is_cluster_bitmap(inode));
1443
1444 if (bg_blkno != osb->first_cluster_group_blkno)
1445 cluster = ocfs2_blocks_to_clusters(inode->i_sb, bg_blkno);
1446 cluster += (u32) bg_bit_off;
1447 return cluster;
1448 }
1449
1450 /* given a cluster offset, calculate which block group it belongs to
1451 * and return that block offset. */
1452 static inline u64 ocfs2_which_cluster_group(struct inode *inode,
1453 u32 cluster)
1454 {
1455 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1456 u32 group_no;
1457
1458 BUG_ON(!ocfs2_is_cluster_bitmap(inode));
1459
1460 group_no = cluster / osb->bitmap_cpg;
1461 if (!group_no)
1462 return osb->first_cluster_group_blkno;
1463 return ocfs2_clusters_to_blocks(inode->i_sb,
1464 group_no * osb->bitmap_cpg);
1465 }
1466
1467 /* given the block number of a cluster start, calculate which cluster
1468 * group and descriptor bitmap offset that corresponds to. */
1469 static inline void ocfs2_block_to_cluster_group(struct inode *inode,
1470 u64 data_blkno,
1471 u64 *bg_blkno,
1472 u16 *bg_bit_off)
1473 {
1474 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1475 u32 data_cluster = ocfs2_blocks_to_clusters(osb->sb, data_blkno);
1476
1477 BUG_ON(!ocfs2_is_cluster_bitmap(inode));
1478
1479 *bg_blkno = ocfs2_which_cluster_group(inode,
1480 data_cluster);
1481
1482 if (*bg_blkno == osb->first_cluster_group_blkno)
1483 *bg_bit_off = (u16) data_cluster;
1484 else
1485 *bg_bit_off = (u16) ocfs2_blocks_to_clusters(osb->sb,
1486 data_blkno - *bg_blkno);
1487 }
1488
1489 /*
1490 * min_bits - minimum contiguous chunk from this total allocation we
1491 * can handle. set to what we asked for originally for a full
1492 * contig. allocation, set to '1' to indicate we can deal with extents
1493 * of any size.
1494 */
1495 int ocfs2_claim_clusters(struct ocfs2_super *osb,
1496 handle_t *handle,
1497 struct ocfs2_alloc_context *ac,
1498 u32 min_clusters,
1499 u32 *cluster_start,
1500 u32 *num_clusters)
1501 {
1502 int status;
1503 unsigned int bits_wanted = ac->ac_bits_wanted - ac->ac_bits_given;
1504 u64 bg_blkno = 0;
1505 u16 bg_bit_off;
1506
1507 mlog_entry_void();
1508
1509 BUG_ON(!ac);
1510 BUG_ON(ac->ac_bits_given >= ac->ac_bits_wanted);
1511
1512 BUG_ON(ac->ac_which != OCFS2_AC_USE_LOCAL
1513 && ac->ac_which != OCFS2_AC_USE_MAIN);
1514
1515 if (ac->ac_which == OCFS2_AC_USE_LOCAL) {
1516 status = ocfs2_claim_local_alloc_bits(osb,
1517 handle,
1518 ac,
1519 bits_wanted,
1520 cluster_start,
1521 num_clusters);
1522 if (!status)
1523 atomic_inc(&osb->alloc_stats.local_data);
1524 } else {
1525 if (min_clusters > (osb->bitmap_cpg - 1)) {
1526 /* The only paths asking for contiguousness
1527 * should know about this already. */
1528 mlog(ML_ERROR, "minimum allocation requested exceeds "
1529 "group bitmap size!");
1530 status = -ENOSPC;
1531 goto bail;
1532 }
1533 /* clamp the current request down to a realistic size. */
1534 if (bits_wanted > (osb->bitmap_cpg - 1))
1535 bits_wanted = osb->bitmap_cpg - 1;
1536
1537 status = ocfs2_claim_suballoc_bits(osb,
1538 ac,
1539 handle,
1540 bits_wanted,
1541 min_clusters,
1542 &bg_bit_off,
1543 num_clusters,
1544 &bg_blkno);
1545 if (!status) {
1546 *cluster_start =
1547 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
1548 bg_blkno,
1549 bg_bit_off);
1550 atomic_inc(&osb->alloc_stats.bitmap_data);
1551 }
1552 }
1553 if (status < 0) {
1554 if (status != -ENOSPC)
1555 mlog_errno(status);
1556 goto bail;
1557 }
1558
1559 ac->ac_bits_given += *num_clusters;
1560
1561 bail:
1562 mlog_exit(status);
1563 return status;
1564 }
1565
1566 static inline int ocfs2_block_group_clear_bits(handle_t *handle,
1567 struct inode *alloc_inode,
1568 struct ocfs2_group_desc *bg,
1569 struct buffer_head *group_bh,
1570 unsigned int bit_off,
1571 unsigned int num_bits)
1572 {
1573 int status;
1574 unsigned int tmp;
1575 int journal_type = OCFS2_JOURNAL_ACCESS_WRITE;
1576 struct ocfs2_group_desc *undo_bg = NULL;
1577
1578 mlog_entry_void();
1579
1580 if (!OCFS2_IS_VALID_GROUP_DESC(bg)) {
1581 OCFS2_RO_ON_INVALID_GROUP_DESC(alloc_inode->i_sb, bg);
1582 status = -EIO;
1583 goto bail;
1584 }
1585
1586 mlog(0, "off = %u, num = %u\n", bit_off, num_bits);
1587
1588 if (ocfs2_is_cluster_bitmap(alloc_inode))
1589 journal_type = OCFS2_JOURNAL_ACCESS_UNDO;
1590
1591 status = ocfs2_journal_access(handle, alloc_inode, group_bh,
1592 journal_type);
1593 if (status < 0) {
1594 mlog_errno(status);
1595 goto bail;
1596 }
1597
1598 if (ocfs2_is_cluster_bitmap(alloc_inode))
1599 undo_bg = (struct ocfs2_group_desc *) bh2jh(group_bh)->b_committed_data;
1600
1601 tmp = num_bits;
1602 while(tmp--) {
1603 ocfs2_clear_bit((bit_off + tmp),
1604 (unsigned long *) bg->bg_bitmap);
1605 if (ocfs2_is_cluster_bitmap(alloc_inode))
1606 ocfs2_set_bit(bit_off + tmp,
1607 (unsigned long *) undo_bg->bg_bitmap);
1608 }
1609 le16_add_cpu(&bg->bg_free_bits_count, num_bits);
1610
1611 status = ocfs2_journal_dirty(handle, group_bh);
1612 if (status < 0)
1613 mlog_errno(status);
1614 bail:
1615 return status;
1616 }
1617
1618 /*
1619 * expects the suballoc inode to already be locked.
1620 */
1621 int ocfs2_free_suballoc_bits(handle_t *handle,
1622 struct inode *alloc_inode,
1623 struct buffer_head *alloc_bh,
1624 unsigned int start_bit,
1625 u64 bg_blkno,
1626 unsigned int count)
1627 {
1628 int status = 0;
1629 u32 tmp_used;
1630 struct ocfs2_super *osb = OCFS2_SB(alloc_inode->i_sb);
1631 struct ocfs2_dinode *fe = (struct ocfs2_dinode *) alloc_bh->b_data;
1632 struct ocfs2_chain_list *cl = &fe->id2.i_chain;
1633 struct buffer_head *group_bh = NULL;
1634 struct ocfs2_group_desc *group;
1635
1636 mlog_entry_void();
1637
1638 if (!OCFS2_IS_VALID_DINODE(fe)) {
1639 OCFS2_RO_ON_INVALID_DINODE(alloc_inode->i_sb, fe);
1640 status = -EIO;
1641 goto bail;
1642 }
1643 BUG_ON((count + start_bit) > ocfs2_bits_per_group(cl));
1644
1645 mlog(0, "%llu: freeing %u bits from group %llu, starting at %u\n",
1646 (unsigned long long)OCFS2_I(alloc_inode)->ip_blkno, count,
1647 (unsigned long long)bg_blkno, start_bit);
1648
1649 status = ocfs2_read_block(osb, bg_blkno, &group_bh, OCFS2_BH_CACHED,
1650 alloc_inode);
1651 if (status < 0) {
1652 mlog_errno(status);
1653 goto bail;
1654 }
1655
1656 group = (struct ocfs2_group_desc *) group_bh->b_data;
1657 status = ocfs2_check_group_descriptor(alloc_inode->i_sb, fe, group);
1658 if (status) {
1659 mlog_errno(status);
1660 goto bail;
1661 }
1662 BUG_ON((count + start_bit) > le16_to_cpu(group->bg_bits));
1663
1664 status = ocfs2_block_group_clear_bits(handle, alloc_inode,
1665 group, group_bh,
1666 start_bit, count);
1667 if (status < 0) {
1668 mlog_errno(status);
1669 goto bail;
1670 }
1671
1672 status = ocfs2_journal_access(handle, alloc_inode, alloc_bh,
1673 OCFS2_JOURNAL_ACCESS_WRITE);
1674 if (status < 0) {
1675 mlog_errno(status);
1676 goto bail;
1677 }
1678
1679 le32_add_cpu(&cl->cl_recs[le16_to_cpu(group->bg_chain)].c_free,
1680 count);
1681 tmp_used = le32_to_cpu(fe->id1.bitmap1.i_used);
1682 fe->id1.bitmap1.i_used = cpu_to_le32(tmp_used - count);
1683
1684 status = ocfs2_journal_dirty(handle, alloc_bh);
1685 if (status < 0) {
1686 mlog_errno(status);
1687 goto bail;
1688 }
1689
1690 bail:
1691 if (group_bh)
1692 brelse(group_bh);
1693
1694 mlog_exit(status);
1695 return status;
1696 }
1697
1698 int ocfs2_free_dinode(handle_t *handle,
1699 struct inode *inode_alloc_inode,
1700 struct buffer_head *inode_alloc_bh,
1701 struct ocfs2_dinode *di)
1702 {
1703 u64 blk = le64_to_cpu(di->i_blkno);
1704 u16 bit = le16_to_cpu(di->i_suballoc_bit);
1705 u64 bg_blkno = ocfs2_which_suballoc_group(blk, bit);
1706
1707 return ocfs2_free_suballoc_bits(handle, inode_alloc_inode,
1708 inode_alloc_bh, bit, bg_blkno, 1);
1709 }
1710
1711 int ocfs2_free_extent_block(handle_t *handle,
1712 struct inode *eb_alloc_inode,
1713 struct buffer_head *eb_alloc_bh,
1714 struct ocfs2_extent_block *eb)
1715 {
1716 u64 blk = le64_to_cpu(eb->h_blkno);
1717 u16 bit = le16_to_cpu(eb->h_suballoc_bit);
1718 u64 bg_blkno = ocfs2_which_suballoc_group(blk, bit);
1719
1720 return ocfs2_free_suballoc_bits(handle, eb_alloc_inode, eb_alloc_bh,
1721 bit, bg_blkno, 1);
1722 }
1723
1724 int ocfs2_free_clusters(handle_t *handle,
1725 struct inode *bitmap_inode,
1726 struct buffer_head *bitmap_bh,
1727 u64 start_blk,
1728 unsigned int num_clusters)
1729 {
1730 int status;
1731 u16 bg_start_bit;
1732 u64 bg_blkno;
1733 struct ocfs2_dinode *fe;
1734
1735 /* You can't ever have a contiguous set of clusters
1736 * bigger than a block group bitmap so we never have to worry
1737 * about looping on them. */
1738
1739 mlog_entry_void();
1740
1741 /* This is expensive. We can safely remove once this stuff has
1742 * gotten tested really well. */
1743 BUG_ON(start_blk != ocfs2_clusters_to_blocks(bitmap_inode->i_sb, ocfs2_blocks_to_clusters(bitmap_inode->i_sb, start_blk)));
1744
1745 fe = (struct ocfs2_dinode *) bitmap_bh->b_data;
1746
1747 ocfs2_block_to_cluster_group(bitmap_inode, start_blk, &bg_blkno,
1748 &bg_start_bit);
1749
1750 mlog(0, "want to free %u clusters starting at block %llu\n",
1751 num_clusters, (unsigned long long)start_blk);
1752 mlog(0, "bg_blkno = %llu, bg_start_bit = %u\n",
1753 (unsigned long long)bg_blkno, bg_start_bit);
1754
1755 status = ocfs2_free_suballoc_bits(handle, bitmap_inode, bitmap_bh,
1756 bg_start_bit, bg_blkno,
1757 num_clusters);
1758 if (status < 0)
1759 mlog_errno(status);
1760
1761 mlog_exit(status);
1762 return status;
1763 }
1764
1765 static inline void ocfs2_debug_bg(struct ocfs2_group_desc *bg)
1766 {
1767 printk("Block Group:\n");
1768 printk("bg_signature: %s\n", bg->bg_signature);
1769 printk("bg_size: %u\n", bg->bg_size);
1770 printk("bg_bits: %u\n", bg->bg_bits);
1771 printk("bg_free_bits_count: %u\n", bg->bg_free_bits_count);
1772 printk("bg_chain: %u\n", bg->bg_chain);
1773 printk("bg_generation: %u\n", le32_to_cpu(bg->bg_generation));
1774 printk("bg_next_group: %llu\n",
1775 (unsigned long long)bg->bg_next_group);
1776 printk("bg_parent_dinode: %llu\n",
1777 (unsigned long long)bg->bg_parent_dinode);
1778 printk("bg_blkno: %llu\n",
1779 (unsigned long long)bg->bg_blkno);
1780 }
1781
1782 static inline void ocfs2_debug_suballoc_inode(struct ocfs2_dinode *fe)
1783 {
1784 int i;
1785
1786 printk("Suballoc Inode %llu:\n", (unsigned long long)fe->i_blkno);
1787 printk("i_signature: %s\n", fe->i_signature);
1788 printk("i_size: %llu\n",
1789 (unsigned long long)fe->i_size);
1790 printk("i_clusters: %u\n", fe->i_clusters);
1791 printk("i_generation: %u\n",
1792 le32_to_cpu(fe->i_generation));
1793 printk("id1.bitmap1.i_used: %u\n",
1794 le32_to_cpu(fe->id1.bitmap1.i_used));
1795 printk("id1.bitmap1.i_total: %u\n",
1796 le32_to_cpu(fe->id1.bitmap1.i_total));
1797 printk("id2.i_chain.cl_cpg: %u\n", fe->id2.i_chain.cl_cpg);
1798 printk("id2.i_chain.cl_bpc: %u\n", fe->id2.i_chain.cl_bpc);
1799 printk("id2.i_chain.cl_count: %u\n", fe->id2.i_chain.cl_count);
1800 printk("id2.i_chain.cl_next_free_rec: %u\n",
1801 fe->id2.i_chain.cl_next_free_rec);
1802 for(i = 0; i < fe->id2.i_chain.cl_next_free_rec; i++) {
1803 printk("fe->id2.i_chain.cl_recs[%d].c_free: %u\n", i,
1804 fe->id2.i_chain.cl_recs[i].c_free);
1805 printk("fe->id2.i_chain.cl_recs[%d].c_total: %u\n", i,
1806 fe->id2.i_chain.cl_recs[i].c_total);
1807 printk("fe->id2.i_chain.cl_recs[%d].c_blkno: %llu\n", i,
1808 (unsigned long long)fe->id2.i_chain.cl_recs[i].c_blkno);
1809 }
1810 }
This page took 0.068395 seconds and 6 git commands to generate.