Merge tag 'cpuinit-v4.1-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/paulg...
[deliverable/linux.git] / fs / ext4 / balloc.c
1 /*
2 * linux/fs/ext4/balloc.c
3 *
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
8 *
9 * Enhanced block allocation by Stephen Tweedie (sct@redhat.com), 1993
10 * Big-endian to little-endian byte-swapping/bitmaps by
11 * David S. Miller (davem@caip.rutgers.edu), 1995
12 */
13
14 #include <linux/time.h>
15 #include <linux/capability.h>
16 #include <linux/fs.h>
17 #include <linux/quotaops.h>
18 #include <linux/buffer_head.h>
19 #include "ext4.h"
20 #include "ext4_jbd2.h"
21 #include "mballoc.h"
22
23 #include <trace/events/ext4.h>
24
25 static unsigned ext4_num_base_meta_clusters(struct super_block *sb,
26 ext4_group_t block_group);
27 /*
28 * balloc.c contains the blocks allocation and deallocation routines
29 */
30
31 /*
32 * Calculate block group number for a given block number
33 */
34 ext4_group_t ext4_get_group_number(struct super_block *sb,
35 ext4_fsblk_t block)
36 {
37 ext4_group_t group;
38
39 if (test_opt2(sb, STD_GROUP_SIZE))
40 group = (block -
41 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) >>
42 (EXT4_BLOCK_SIZE_BITS(sb) + EXT4_CLUSTER_BITS(sb) + 3);
43 else
44 ext4_get_group_no_and_offset(sb, block, &group, NULL);
45 return group;
46 }
47
48 /*
49 * Calculate the block group number and offset into the block/cluster
50 * allocation bitmap, given a block number
51 */
52 void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr,
53 ext4_group_t *blockgrpp, ext4_grpblk_t *offsetp)
54 {
55 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
56 ext4_grpblk_t offset;
57
58 blocknr = blocknr - le32_to_cpu(es->s_first_data_block);
59 offset = do_div(blocknr, EXT4_BLOCKS_PER_GROUP(sb)) >>
60 EXT4_SB(sb)->s_cluster_bits;
61 if (offsetp)
62 *offsetp = offset;
63 if (blockgrpp)
64 *blockgrpp = blocknr;
65
66 }
67
68 /*
69 * Check whether the 'block' lives within the 'block_group'. Returns 1 if so
70 * and 0 otherwise.
71 */
72 static inline int ext4_block_in_group(struct super_block *sb,
73 ext4_fsblk_t block,
74 ext4_group_t block_group)
75 {
76 ext4_group_t actual_group;
77
78 actual_group = ext4_get_group_number(sb, block);
79 return (actual_group == block_group) ? 1 : 0;
80 }
81
82 /* Return the number of clusters used for file system metadata; this
83 * represents the overhead needed by the file system.
84 */
85 static unsigned ext4_num_overhead_clusters(struct super_block *sb,
86 ext4_group_t block_group,
87 struct ext4_group_desc *gdp)
88 {
89 unsigned num_clusters;
90 int block_cluster = -1, inode_cluster = -1, itbl_cluster = -1, i, c;
91 ext4_fsblk_t start = ext4_group_first_block_no(sb, block_group);
92 ext4_fsblk_t itbl_blk;
93 struct ext4_sb_info *sbi = EXT4_SB(sb);
94
95 /* This is the number of clusters used by the superblock,
96 * block group descriptors, and reserved block group
97 * descriptor blocks */
98 num_clusters = ext4_num_base_meta_clusters(sb, block_group);
99
100 /*
101 * For the allocation bitmaps and inode table, we first need
102 * to check to see if the block is in the block group. If it
103 * is, then check to see if the cluster is already accounted
104 * for in the clusters used for the base metadata cluster, or
105 * if we can increment the base metadata cluster to include
106 * that block. Otherwise, we will have to track the cluster
107 * used for the allocation bitmap or inode table explicitly.
108 * Normally all of these blocks are contiguous, so the special
109 * case handling shouldn't be necessary except for *very*
110 * unusual file system layouts.
111 */
112 if (ext4_block_in_group(sb, ext4_block_bitmap(sb, gdp), block_group)) {
113 block_cluster = EXT4_B2C(sbi,
114 ext4_block_bitmap(sb, gdp) - start);
115 if (block_cluster < num_clusters)
116 block_cluster = -1;
117 else if (block_cluster == num_clusters) {
118 num_clusters++;
119 block_cluster = -1;
120 }
121 }
122
123 if (ext4_block_in_group(sb, ext4_inode_bitmap(sb, gdp), block_group)) {
124 inode_cluster = EXT4_B2C(sbi,
125 ext4_inode_bitmap(sb, gdp) - start);
126 if (inode_cluster < num_clusters)
127 inode_cluster = -1;
128 else if (inode_cluster == num_clusters) {
129 num_clusters++;
130 inode_cluster = -1;
131 }
132 }
133
134 itbl_blk = ext4_inode_table(sb, gdp);
135 for (i = 0; i < sbi->s_itb_per_group; i++) {
136 if (ext4_block_in_group(sb, itbl_blk + i, block_group)) {
137 c = EXT4_B2C(sbi, itbl_blk + i - start);
138 if ((c < num_clusters) || (c == inode_cluster) ||
139 (c == block_cluster) || (c == itbl_cluster))
140 continue;
141 if (c == num_clusters) {
142 num_clusters++;
143 continue;
144 }
145 num_clusters++;
146 itbl_cluster = c;
147 }
148 }
149
150 if (block_cluster != -1)
151 num_clusters++;
152 if (inode_cluster != -1)
153 num_clusters++;
154
155 return num_clusters;
156 }
157
158 static unsigned int num_clusters_in_group(struct super_block *sb,
159 ext4_group_t block_group)
160 {
161 unsigned int blocks;
162
163 if (block_group == ext4_get_groups_count(sb) - 1) {
164 /*
165 * Even though mke2fs always initializes the first and
166 * last group, just in case some other tool was used,
167 * we need to make sure we calculate the right free
168 * blocks.
169 */
170 blocks = ext4_blocks_count(EXT4_SB(sb)->s_es) -
171 ext4_group_first_block_no(sb, block_group);
172 } else
173 blocks = EXT4_BLOCKS_PER_GROUP(sb);
174 return EXT4_NUM_B2C(EXT4_SB(sb), blocks);
175 }
176
177 /* Initializes an uninitialized block bitmap */
178 static int ext4_init_block_bitmap(struct super_block *sb,
179 struct buffer_head *bh,
180 ext4_group_t block_group,
181 struct ext4_group_desc *gdp)
182 {
183 unsigned int bit, bit_max;
184 struct ext4_sb_info *sbi = EXT4_SB(sb);
185 ext4_fsblk_t start, tmp;
186 int flex_bg = 0;
187 struct ext4_group_info *grp;
188
189 J_ASSERT_BH(bh, buffer_locked(bh));
190
191 /* If checksum is bad mark all blocks used to prevent allocation
192 * essentially implementing a per-group read-only flag. */
193 if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
194 grp = ext4_get_group_info(sb, block_group);
195 if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
196 percpu_counter_sub(&sbi->s_freeclusters_counter,
197 grp->bb_free);
198 set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
199 if (!EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) {
200 int count;
201 count = ext4_free_inodes_count(sb, gdp);
202 percpu_counter_sub(&sbi->s_freeinodes_counter,
203 count);
204 }
205 set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state);
206 return -EIO;
207 }
208 memset(bh->b_data, 0, sb->s_blocksize);
209
210 bit_max = ext4_num_base_meta_clusters(sb, block_group);
211 for (bit = 0; bit < bit_max; bit++)
212 ext4_set_bit(bit, bh->b_data);
213
214 start = ext4_group_first_block_no(sb, block_group);
215
216 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG))
217 flex_bg = 1;
218
219 /* Set bits for block and inode bitmaps, and inode table */
220 tmp = ext4_block_bitmap(sb, gdp);
221 if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
222 ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
223
224 tmp = ext4_inode_bitmap(sb, gdp);
225 if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
226 ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
227
228 tmp = ext4_inode_table(sb, gdp);
229 for (; tmp < ext4_inode_table(sb, gdp) +
230 sbi->s_itb_per_group; tmp++) {
231 if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
232 ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
233 }
234
235 /*
236 * Also if the number of blocks within the group is less than
237 * the blocksize * 8 ( which is the size of bitmap ), set rest
238 * of the block bitmap to 1
239 */
240 ext4_mark_bitmap_end(num_clusters_in_group(sb, block_group),
241 sb->s_blocksize * 8, bh->b_data);
242 ext4_block_bitmap_csum_set(sb, block_group, gdp, bh);
243 ext4_group_desc_csum_set(sb, block_group, gdp);
244 return 0;
245 }
246
247 /* Return the number of free blocks in a block group. It is used when
248 * the block bitmap is uninitialized, so we can't just count the bits
249 * in the bitmap. */
250 unsigned ext4_free_clusters_after_init(struct super_block *sb,
251 ext4_group_t block_group,
252 struct ext4_group_desc *gdp)
253 {
254 return num_clusters_in_group(sb, block_group) -
255 ext4_num_overhead_clusters(sb, block_group, gdp);
256 }
257
258 /*
259 * The free blocks are managed by bitmaps. A file system contains several
260 * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap
261 * block for inodes, N blocks for the inode table and data blocks.
262 *
263 * The file system contains group descriptors which are located after the
264 * super block. Each descriptor contains the number of the bitmap block and
265 * the free blocks count in the block. The descriptors are loaded in memory
266 * when a file system is mounted (see ext4_fill_super).
267 */
268
269 /**
270 * ext4_get_group_desc() -- load group descriptor from disk
271 * @sb: super block
272 * @block_group: given block group
273 * @bh: pointer to the buffer head to store the block
274 * group descriptor
275 */
276 struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
277 ext4_group_t block_group,
278 struct buffer_head **bh)
279 {
280 unsigned int group_desc;
281 unsigned int offset;
282 ext4_group_t ngroups = ext4_get_groups_count(sb);
283 struct ext4_group_desc *desc;
284 struct ext4_sb_info *sbi = EXT4_SB(sb);
285
286 if (block_group >= ngroups) {
287 ext4_error(sb, "block_group >= groups_count - block_group = %u,"
288 " groups_count = %u", block_group, ngroups);
289
290 return NULL;
291 }
292
293 group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb);
294 offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1);
295 if (!sbi->s_group_desc[group_desc]) {
296 ext4_error(sb, "Group descriptor not loaded - "
297 "block_group = %u, group_desc = %u, desc = %u",
298 block_group, group_desc, offset);
299 return NULL;
300 }
301
302 desc = (struct ext4_group_desc *)(
303 (__u8 *)sbi->s_group_desc[group_desc]->b_data +
304 offset * EXT4_DESC_SIZE(sb));
305 if (bh)
306 *bh = sbi->s_group_desc[group_desc];
307 return desc;
308 }
309
310 /*
311 * Return the block number which was discovered to be invalid, or 0 if
312 * the block bitmap is valid.
313 */
314 static ext4_fsblk_t ext4_valid_block_bitmap(struct super_block *sb,
315 struct ext4_group_desc *desc,
316 ext4_group_t block_group,
317 struct buffer_head *bh)
318 {
319 struct ext4_sb_info *sbi = EXT4_SB(sb);
320 ext4_grpblk_t offset;
321 ext4_grpblk_t next_zero_bit;
322 ext4_fsblk_t blk;
323 ext4_fsblk_t group_first_block;
324
325 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) {
326 /* with FLEX_BG, the inode/block bitmaps and itable
327 * blocks may not be in the group at all
328 * so the bitmap validation will be skipped for those groups
329 * or it has to also read the block group where the bitmaps
330 * are located to verify they are set.
331 */
332 return 0;
333 }
334 group_first_block = ext4_group_first_block_no(sb, block_group);
335
336 /* check whether block bitmap block number is set */
337 blk = ext4_block_bitmap(sb, desc);
338 offset = blk - group_first_block;
339 if (!ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data))
340 /* bad block bitmap */
341 return blk;
342
343 /* check whether the inode bitmap block number is set */
344 blk = ext4_inode_bitmap(sb, desc);
345 offset = blk - group_first_block;
346 if (!ext4_test_bit(EXT4_B2C(sbi, offset), bh->b_data))
347 /* bad block bitmap */
348 return blk;
349
350 /* check whether the inode table block number is set */
351 blk = ext4_inode_table(sb, desc);
352 offset = blk - group_first_block;
353 next_zero_bit = ext4_find_next_zero_bit(bh->b_data,
354 EXT4_B2C(sbi, offset + EXT4_SB(sb)->s_itb_per_group),
355 EXT4_B2C(sbi, offset));
356 if (next_zero_bit <
357 EXT4_B2C(sbi, offset + EXT4_SB(sb)->s_itb_per_group))
358 /* bad bitmap for inode tables */
359 return blk;
360 return 0;
361 }
362
363 static void ext4_validate_block_bitmap(struct super_block *sb,
364 struct ext4_group_desc *desc,
365 ext4_group_t block_group,
366 struct buffer_head *bh)
367 {
368 ext4_fsblk_t blk;
369 struct ext4_group_info *grp = ext4_get_group_info(sb, block_group);
370 struct ext4_sb_info *sbi = EXT4_SB(sb);
371
372 if (buffer_verified(bh) || EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
373 return;
374
375 ext4_lock_group(sb, block_group);
376 blk = ext4_valid_block_bitmap(sb, desc, block_group, bh);
377 if (unlikely(blk != 0)) {
378 ext4_unlock_group(sb, block_group);
379 ext4_error(sb, "bg %u: block %llu: invalid block bitmap",
380 block_group, blk);
381 if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
382 percpu_counter_sub(&sbi->s_freeclusters_counter,
383 grp->bb_free);
384 set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
385 return;
386 }
387 if (unlikely(!ext4_block_bitmap_csum_verify(sb, block_group,
388 desc, bh))) {
389 ext4_unlock_group(sb, block_group);
390 ext4_error(sb, "bg %u: bad block bitmap checksum", block_group);
391 if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
392 percpu_counter_sub(&sbi->s_freeclusters_counter,
393 grp->bb_free);
394 set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
395 return;
396 }
397 set_buffer_verified(bh);
398 ext4_unlock_group(sb, block_group);
399 }
400
401 /**
402 * ext4_read_block_bitmap_nowait()
403 * @sb: super block
404 * @block_group: given block group
405 *
406 * Read the bitmap for a given block_group,and validate the
407 * bits for block/inode/inode tables are set in the bitmaps
408 *
409 * Return buffer_head on success or NULL in case of failure.
410 */
411 struct buffer_head *
412 ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
413 {
414 struct ext4_group_desc *desc;
415 struct buffer_head *bh;
416 ext4_fsblk_t bitmap_blk;
417
418 desc = ext4_get_group_desc(sb, block_group, NULL);
419 if (!desc)
420 return NULL;
421 bitmap_blk = ext4_block_bitmap(sb, desc);
422 bh = sb_getblk(sb, bitmap_blk);
423 if (unlikely(!bh)) {
424 ext4_error(sb, "Cannot get buffer for block bitmap - "
425 "block_group = %u, block_bitmap = %llu",
426 block_group, bitmap_blk);
427 return NULL;
428 }
429
430 if (bitmap_uptodate(bh))
431 goto verify;
432
433 lock_buffer(bh);
434 if (bitmap_uptodate(bh)) {
435 unlock_buffer(bh);
436 goto verify;
437 }
438 ext4_lock_group(sb, block_group);
439 if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
440 int err;
441
442 err = ext4_init_block_bitmap(sb, bh, block_group, desc);
443 set_bitmap_uptodate(bh);
444 set_buffer_uptodate(bh);
445 ext4_unlock_group(sb, block_group);
446 unlock_buffer(bh);
447 if (err)
448 ext4_error(sb, "Checksum bad for grp %u", block_group);
449 goto verify;
450 }
451 ext4_unlock_group(sb, block_group);
452 if (buffer_uptodate(bh)) {
453 /*
454 * if not uninit if bh is uptodate,
455 * bitmap is also uptodate
456 */
457 set_bitmap_uptodate(bh);
458 unlock_buffer(bh);
459 goto verify;
460 }
461 /*
462 * submit the buffer_head for reading
463 */
464 set_buffer_new(bh);
465 trace_ext4_read_block_bitmap_load(sb, block_group);
466 bh->b_end_io = ext4_end_bitmap_read;
467 get_bh(bh);
468 submit_bh(READ | REQ_META | REQ_PRIO, bh);
469 return bh;
470 verify:
471 ext4_validate_block_bitmap(sb, desc, block_group, bh);
472 if (buffer_verified(bh))
473 return bh;
474 put_bh(bh);
475 return NULL;
476 }
477
478 /* Returns 0 on success, 1 on error */
479 int ext4_wait_block_bitmap(struct super_block *sb, ext4_group_t block_group,
480 struct buffer_head *bh)
481 {
482 struct ext4_group_desc *desc;
483
484 if (!buffer_new(bh))
485 return 0;
486 desc = ext4_get_group_desc(sb, block_group, NULL);
487 if (!desc)
488 return 1;
489 wait_on_buffer(bh);
490 if (!buffer_uptodate(bh)) {
491 ext4_error(sb, "Cannot read block bitmap - "
492 "block_group = %u, block_bitmap = %llu",
493 block_group, (unsigned long long) bh->b_blocknr);
494 return 1;
495 }
496 clear_buffer_new(bh);
497 /* Panic or remount fs read-only if block bitmap is invalid */
498 ext4_validate_block_bitmap(sb, desc, block_group, bh);
499 /* ...but check for error just in case errors=continue. */
500 return !buffer_verified(bh);
501 }
502
503 struct buffer_head *
504 ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group)
505 {
506 struct buffer_head *bh;
507
508 bh = ext4_read_block_bitmap_nowait(sb, block_group);
509 if (!bh)
510 return NULL;
511 if (ext4_wait_block_bitmap(sb, block_group, bh)) {
512 put_bh(bh);
513 return NULL;
514 }
515 return bh;
516 }
517
518 /**
519 * ext4_has_free_clusters()
520 * @sbi: in-core super block structure.
521 * @nclusters: number of needed blocks
522 * @flags: flags from ext4_mb_new_blocks()
523 *
524 * Check if filesystem has nclusters free & available for allocation.
525 * On success return 1, return 0 on failure.
526 */
527 static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
528 s64 nclusters, unsigned int flags)
529 {
530 s64 free_clusters, dirty_clusters, rsv, resv_clusters;
531 struct percpu_counter *fcc = &sbi->s_freeclusters_counter;
532 struct percpu_counter *dcc = &sbi->s_dirtyclusters_counter;
533
534 free_clusters = percpu_counter_read_positive(fcc);
535 dirty_clusters = percpu_counter_read_positive(dcc);
536 resv_clusters = atomic64_read(&sbi->s_resv_clusters);
537
538 /*
539 * r_blocks_count should always be multiple of the cluster ratio so
540 * we are safe to do a plane bit shift only.
541 */
542 rsv = (ext4_r_blocks_count(sbi->s_es) >> sbi->s_cluster_bits) +
543 resv_clusters;
544
545 if (free_clusters - (nclusters + rsv + dirty_clusters) <
546 EXT4_FREECLUSTERS_WATERMARK) {
547 free_clusters = percpu_counter_sum_positive(fcc);
548 dirty_clusters = percpu_counter_sum_positive(dcc);
549 }
550 /* Check whether we have space after accounting for current
551 * dirty clusters & root reserved clusters.
552 */
553 if (free_clusters >= (rsv + nclusters + dirty_clusters))
554 return 1;
555
556 /* Hm, nope. Are (enough) root reserved clusters available? */
557 if (uid_eq(sbi->s_resuid, current_fsuid()) ||
558 (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
559 capable(CAP_SYS_RESOURCE) ||
560 (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
561
562 if (free_clusters >= (nclusters + dirty_clusters +
563 resv_clusters))
564 return 1;
565 }
566 /* No free blocks. Let's see if we can dip into reserved pool */
567 if (flags & EXT4_MB_USE_RESERVED) {
568 if (free_clusters >= (nclusters + dirty_clusters))
569 return 1;
570 }
571
572 return 0;
573 }
574
575 int ext4_claim_free_clusters(struct ext4_sb_info *sbi,
576 s64 nclusters, unsigned int flags)
577 {
578 if (ext4_has_free_clusters(sbi, nclusters, flags)) {
579 percpu_counter_add(&sbi->s_dirtyclusters_counter, nclusters);
580 return 0;
581 } else
582 return -ENOSPC;
583 }
584
585 /**
586 * ext4_should_retry_alloc()
587 * @sb: super block
588 * @retries number of attemps has been made
589 *
590 * ext4_should_retry_alloc() is called when ENOSPC is returned, and if
591 * it is profitable to retry the operation, this function will wait
592 * for the current or committing transaction to complete, and then
593 * return TRUE.
594 *
595 * if the total number of retries exceed three times, return FALSE.
596 */
597 int ext4_should_retry_alloc(struct super_block *sb, int *retries)
598 {
599 if (!ext4_has_free_clusters(EXT4_SB(sb), 1, 0) ||
600 (*retries)++ > 3 ||
601 !EXT4_SB(sb)->s_journal)
602 return 0;
603
604 jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id);
605
606 return jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal);
607 }
608
609 /*
610 * ext4_new_meta_blocks() -- allocate block for meta data (indexing) blocks
611 *
612 * @handle: handle to this transaction
613 * @inode: file inode
614 * @goal: given target block(filesystem wide)
615 * @count: pointer to total number of clusters needed
616 * @errp: error code
617 *
618 * Return 1st allocated block number on success, *count stores total account
619 * error stores in errp pointer
620 */
621 ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode,
622 ext4_fsblk_t goal, unsigned int flags,
623 unsigned long *count, int *errp)
624 {
625 struct ext4_allocation_request ar;
626 ext4_fsblk_t ret;
627
628 memset(&ar, 0, sizeof(ar));
629 /* Fill with neighbour allocated blocks */
630 ar.inode = inode;
631 ar.goal = goal;
632 ar.len = count ? *count : 1;
633 ar.flags = flags;
634
635 ret = ext4_mb_new_blocks(handle, &ar, errp);
636 if (count)
637 *count = ar.len;
638 /*
639 * Account for the allocated meta blocks. We will never
640 * fail EDQUOT for metdata, but we do account for it.
641 */
642 if (!(*errp) && (flags & EXT4_MB_DELALLOC_RESERVED)) {
643 dquot_alloc_block_nofail(inode,
644 EXT4_C2B(EXT4_SB(inode->i_sb), ar.len));
645 }
646 return ret;
647 }
648
649 /**
650 * ext4_count_free_clusters() -- count filesystem free clusters
651 * @sb: superblock
652 *
653 * Adds up the number of free clusters from each block group.
654 */
655 ext4_fsblk_t ext4_count_free_clusters(struct super_block *sb)
656 {
657 ext4_fsblk_t desc_count;
658 struct ext4_group_desc *gdp;
659 ext4_group_t i;
660 ext4_group_t ngroups = ext4_get_groups_count(sb);
661 struct ext4_group_info *grp;
662 #ifdef EXT4FS_DEBUG
663 struct ext4_super_block *es;
664 ext4_fsblk_t bitmap_count;
665 unsigned int x;
666 struct buffer_head *bitmap_bh = NULL;
667
668 es = EXT4_SB(sb)->s_es;
669 desc_count = 0;
670 bitmap_count = 0;
671 gdp = NULL;
672
673 for (i = 0; i < ngroups; i++) {
674 gdp = ext4_get_group_desc(sb, i, NULL);
675 if (!gdp)
676 continue;
677 grp = NULL;
678 if (EXT4_SB(sb)->s_group_info)
679 grp = ext4_get_group_info(sb, i);
680 if (!grp || !EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
681 desc_count += ext4_free_group_clusters(sb, gdp);
682 brelse(bitmap_bh);
683 bitmap_bh = ext4_read_block_bitmap(sb, i);
684 if (bitmap_bh == NULL)
685 continue;
686
687 x = ext4_count_free(bitmap_bh->b_data,
688 EXT4_CLUSTERS_PER_GROUP(sb) / 8);
689 printk(KERN_DEBUG "group %u: stored = %d, counted = %u\n",
690 i, ext4_free_group_clusters(sb, gdp), x);
691 bitmap_count += x;
692 }
693 brelse(bitmap_bh);
694 printk(KERN_DEBUG "ext4_count_free_clusters: stored = %llu"
695 ", computed = %llu, %llu\n",
696 EXT4_NUM_B2C(EXT4_SB(sb), ext4_free_blocks_count(es)),
697 desc_count, bitmap_count);
698 return bitmap_count;
699 #else
700 desc_count = 0;
701 for (i = 0; i < ngroups; i++) {
702 gdp = ext4_get_group_desc(sb, i, NULL);
703 if (!gdp)
704 continue;
705 grp = NULL;
706 if (EXT4_SB(sb)->s_group_info)
707 grp = ext4_get_group_info(sb, i);
708 if (!grp || !EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
709 desc_count += ext4_free_group_clusters(sb, gdp);
710 }
711
712 return desc_count;
713 #endif
714 }
715
716 static inline int test_root(ext4_group_t a, int b)
717 {
718 while (1) {
719 if (a < b)
720 return 0;
721 if (a == b)
722 return 1;
723 if ((a % b) != 0)
724 return 0;
725 a = a / b;
726 }
727 }
728
729 /**
730 * ext4_bg_has_super - number of blocks used by the superblock in group
731 * @sb: superblock for filesystem
732 * @group: group number to check
733 *
734 * Return the number of blocks used by the superblock (primary or backup)
735 * in this group. Currently this will be only 0 or 1.
736 */
737 int ext4_bg_has_super(struct super_block *sb, ext4_group_t group)
738 {
739 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
740
741 if (group == 0)
742 return 1;
743 if (EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_SPARSE_SUPER2)) {
744 if (group == le32_to_cpu(es->s_backup_bgs[0]) ||
745 group == le32_to_cpu(es->s_backup_bgs[1]))
746 return 1;
747 return 0;
748 }
749 if ((group <= 1) || !EXT4_HAS_RO_COMPAT_FEATURE(sb,
750 EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER))
751 return 1;
752 if (!(group & 1))
753 return 0;
754 if (test_root(group, 3) || (test_root(group, 5)) ||
755 test_root(group, 7))
756 return 1;
757
758 return 0;
759 }
760
761 static unsigned long ext4_bg_num_gdb_meta(struct super_block *sb,
762 ext4_group_t group)
763 {
764 unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb);
765 ext4_group_t first = metagroup * EXT4_DESC_PER_BLOCK(sb);
766 ext4_group_t last = first + EXT4_DESC_PER_BLOCK(sb) - 1;
767
768 if (group == first || group == first + 1 || group == last)
769 return 1;
770 return 0;
771 }
772
773 static unsigned long ext4_bg_num_gdb_nometa(struct super_block *sb,
774 ext4_group_t group)
775 {
776 if (!ext4_bg_has_super(sb, group))
777 return 0;
778
779 if (EXT4_HAS_INCOMPAT_FEATURE(sb,EXT4_FEATURE_INCOMPAT_META_BG))
780 return le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg);
781 else
782 return EXT4_SB(sb)->s_gdb_count;
783 }
784
785 /**
786 * ext4_bg_num_gdb - number of blocks used by the group table in group
787 * @sb: superblock for filesystem
788 * @group: group number to check
789 *
790 * Return the number of blocks used by the group descriptor table
791 * (primary or backup) in this group. In the future there may be a
792 * different number of descriptor blocks in each group.
793 */
794 unsigned long ext4_bg_num_gdb(struct super_block *sb, ext4_group_t group)
795 {
796 unsigned long first_meta_bg =
797 le32_to_cpu(EXT4_SB(sb)->s_es->s_first_meta_bg);
798 unsigned long metagroup = group / EXT4_DESC_PER_BLOCK(sb);
799
800 if (!EXT4_HAS_INCOMPAT_FEATURE(sb,EXT4_FEATURE_INCOMPAT_META_BG) ||
801 metagroup < first_meta_bg)
802 return ext4_bg_num_gdb_nometa(sb, group);
803
804 return ext4_bg_num_gdb_meta(sb,group);
805
806 }
807
808 /*
809 * This function returns the number of file system metadata clusters at
810 * the beginning of a block group, including the reserved gdt blocks.
811 */
812 static unsigned ext4_num_base_meta_clusters(struct super_block *sb,
813 ext4_group_t block_group)
814 {
815 struct ext4_sb_info *sbi = EXT4_SB(sb);
816 unsigned num;
817
818 /* Check for superblock and gdt backups in this group */
819 num = ext4_bg_has_super(sb, block_group);
820
821 if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG) ||
822 block_group < le32_to_cpu(sbi->s_es->s_first_meta_bg) *
823 sbi->s_desc_per_block) {
824 if (num) {
825 num += ext4_bg_num_gdb(sb, block_group);
826 num += le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks);
827 }
828 } else { /* For META_BG_BLOCK_GROUPS */
829 num += ext4_bg_num_gdb(sb, block_group);
830 }
831 return EXT4_NUM_B2C(sbi, num);
832 }
833 /**
834 * ext4_inode_to_goal_block - return a hint for block allocation
835 * @inode: inode for block allocation
836 *
837 * Return the ideal location to start allocating blocks for a
838 * newly created inode.
839 */
840 ext4_fsblk_t ext4_inode_to_goal_block(struct inode *inode)
841 {
842 struct ext4_inode_info *ei = EXT4_I(inode);
843 ext4_group_t block_group;
844 ext4_grpblk_t colour;
845 int flex_size = ext4_flex_bg_size(EXT4_SB(inode->i_sb));
846 ext4_fsblk_t bg_start;
847 ext4_fsblk_t last_block;
848
849 block_group = ei->i_block_group;
850 if (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) {
851 /*
852 * If there are at least EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME
853 * block groups per flexgroup, reserve the first block
854 * group for directories and special files. Regular
855 * files will start at the second block group. This
856 * tends to speed up directory access and improves
857 * fsck times.
858 */
859 block_group &= ~(flex_size-1);
860 if (S_ISREG(inode->i_mode))
861 block_group++;
862 }
863 bg_start = ext4_group_first_block_no(inode->i_sb, block_group);
864 last_block = ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es) - 1;
865
866 /*
867 * If we are doing delayed allocation, we don't need take
868 * colour into account.
869 */
870 if (test_opt(inode->i_sb, DELALLOC))
871 return bg_start;
872
873 if (bg_start + EXT4_BLOCKS_PER_GROUP(inode->i_sb) <= last_block)
874 colour = (current->pid % 16) *
875 (EXT4_BLOCKS_PER_GROUP(inode->i_sb) / 16);
876 else
877 colour = (current->pid % 16) * ((last_block - bg_start) / 16);
878 return bg_start + colour;
879 }
880
This page took 0.071077 seconds and 5 git commands to generate.