powerpc: Fix bugs introduced by sysfs changes
[deliverable/linux.git] / fs / gfs2 / rgrp.c
1 /*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10 #include <linux/slab.h>
11 #include <linux/spinlock.h>
12 #include <linux/completion.h>
13 #include <linux/buffer_head.h>
14 #include <linux/fs.h>
15 #include <linux/gfs2_ondisk.h>
16 #include <linux/lm_interface.h>
17 #include <linux/prefetch.h>
18
19 #include "gfs2.h"
20 #include "incore.h"
21 #include "glock.h"
22 #include "glops.h"
23 #include "lops.h"
24 #include "meta_io.h"
25 #include "quota.h"
26 #include "rgrp.h"
27 #include "super.h"
28 #include "trans.h"
29 #include "util.h"
30 #include "log.h"
31 #include "inode.h"
32 #include "ops_address.h"
33
34 #define BFITNOENT ((u32)~0)
35 #define NO_BLOCK ((u64)~0)
36
37 #if BITS_PER_LONG == 32
38 #define LBITMASK (0x55555555UL)
39 #define LBITSKIP55 (0x55555555UL)
40 #define LBITSKIP00 (0x00000000UL)
41 #else
42 #define LBITMASK (0x5555555555555555UL)
43 #define LBITSKIP55 (0x5555555555555555UL)
44 #define LBITSKIP00 (0x0000000000000000UL)
45 #endif
46
47 /*
48 * These routines are used by the resource group routines (rgrp.c)
49 * to keep track of block allocation. Each block is represented by two
50 * bits. So, each byte represents GFS2_NBBY (i.e. 4) blocks.
51 *
52 * 0 = Free
53 * 1 = Used (not metadata)
54 * 2 = Unlinked (still in use) inode
55 * 3 = Used (metadata)
56 */
57
58 static const char valid_change[16] = {
59 /* current */
60 /* n */ 0, 1, 1, 1,
61 /* e */ 1, 0, 0, 0,
62 /* w */ 0, 0, 0, 1,
63 1, 0, 0, 0
64 };
65
66 static u32 rgblk_search(struct gfs2_rgrpd *rgd, u32 goal,
67 unsigned char old_state, unsigned char new_state,
68 unsigned int *n);
69
70 /**
71 * gfs2_setbit - Set a bit in the bitmaps
72 * @buffer: the buffer that holds the bitmaps
73 * @buflen: the length (in bytes) of the buffer
74 * @block: the block to set
75 * @new_state: the new state of the block
76 *
77 */
78
79 static inline void gfs2_setbit(struct gfs2_rgrpd *rgd, unsigned char *buf1,
80 unsigned char *buf2, unsigned int offset,
81 unsigned int buflen, u32 block,
82 unsigned char new_state)
83 {
84 unsigned char *byte1, *byte2, *end, cur_state;
85 const unsigned int bit = (block % GFS2_NBBY) * GFS2_BIT_SIZE;
86
87 byte1 = buf1 + offset + (block / GFS2_NBBY);
88 end = buf1 + offset + buflen;
89
90 BUG_ON(byte1 >= end);
91
92 cur_state = (*byte1 >> bit) & GFS2_BIT_MASK;
93
94 if (unlikely(!valid_change[new_state * 4 + cur_state])) {
95 gfs2_consist_rgrpd(rgd);
96 return;
97 }
98 *byte1 ^= (cur_state ^ new_state) << bit;
99
100 if (buf2) {
101 byte2 = buf2 + offset + (block / GFS2_NBBY);
102 cur_state = (*byte2 >> bit) & GFS2_BIT_MASK;
103 *byte2 ^= (cur_state ^ new_state) << bit;
104 }
105 }
106
107 /**
108 * gfs2_testbit - test a bit in the bitmaps
109 * @buffer: the buffer that holds the bitmaps
110 * @buflen: the length (in bytes) of the buffer
111 * @block: the block to read
112 *
113 */
114
115 static inline unsigned char gfs2_testbit(struct gfs2_rgrpd *rgd,
116 const unsigned char *buffer,
117 unsigned int buflen, u32 block)
118 {
119 const unsigned char *byte, *end;
120 unsigned char cur_state;
121 unsigned int bit;
122
123 byte = buffer + (block / GFS2_NBBY);
124 bit = (block % GFS2_NBBY) * GFS2_BIT_SIZE;
125 end = buffer + buflen;
126
127 gfs2_assert(rgd->rd_sbd, byte < end);
128
129 cur_state = (*byte >> bit) & GFS2_BIT_MASK;
130
131 return cur_state;
132 }
133
134 /**
135 * gfs2_bitfit - Search an rgrp's bitmap buffer to find a bit-pair representing
136 * a block in a given allocation state.
137 * @buffer: the buffer that holds the bitmaps
138 * @buflen: the length (in bytes) of the buffer
139 * @goal: start search at this block's bit-pair (within @buffer)
140 * @old_state: GFS2_BLKST_XXX the state of the block we're looking for.
141 *
142 * Scope of @goal and returned block number is only within this bitmap buffer,
143 * not entire rgrp or filesystem. @buffer will be offset from the actual
144 * beginning of a bitmap block buffer, skipping any header structures.
145 *
146 * Return: the block number (bitmap buffer scope) that was found
147 */
148
149 static u32 gfs2_bitfit(const u8 *buffer, unsigned int buflen, u32 goal,
150 u8 old_state)
151 {
152 const u8 *byte, *start, *end;
153 int bit, startbit;
154 u32 g1, g2, misaligned;
155 unsigned long *plong;
156 unsigned long lskipval;
157
158 lskipval = (old_state & GFS2_BLKST_USED) ? LBITSKIP00 : LBITSKIP55;
159 g1 = (goal / GFS2_NBBY);
160 start = buffer + g1;
161 byte = start;
162 end = buffer + buflen;
163 g2 = ALIGN(g1, sizeof(unsigned long));
164 plong = (unsigned long *)(buffer + g2);
165 startbit = bit = (goal % GFS2_NBBY) * GFS2_BIT_SIZE;
166 misaligned = g2 - g1;
167 if (!misaligned)
168 goto ulong_aligned;
169 /* parse the bitmap a byte at a time */
170 misaligned:
171 while (byte < end) {
172 if (((*byte >> bit) & GFS2_BIT_MASK) == old_state) {
173 return goal +
174 (((byte - start) * GFS2_NBBY) +
175 ((bit - startbit) >> 1));
176 }
177 bit += GFS2_BIT_SIZE;
178 if (bit >= GFS2_NBBY * GFS2_BIT_SIZE) {
179 bit = 0;
180 byte++;
181 misaligned--;
182 if (!misaligned) {
183 plong = (unsigned long *)byte;
184 goto ulong_aligned;
185 }
186 }
187 }
188 return BFITNOENT;
189
190 /* parse the bitmap a unsigned long at a time */
191 ulong_aligned:
192 /* Stop at "end - 1" or else prefetch can go past the end and segfault.
193 We could "if" it but we'd lose some of the performance gained.
194 This way will only slow down searching the very last 4/8 bytes
195 depending on architecture. I've experimented with several ways
196 of writing this section such as using an else before the goto
197 but this one seems to be the fastest. */
198 while ((unsigned char *)plong < end - sizeof(unsigned long)) {
199 prefetch(plong + 1);
200 if (((*plong) & LBITMASK) != lskipval)
201 break;
202 plong++;
203 }
204 if ((unsigned char *)plong < end) {
205 byte = (const u8 *)plong;
206 misaligned += sizeof(unsigned long) - 1;
207 goto misaligned;
208 }
209 return BFITNOENT;
210 }
211
212 /**
213 * gfs2_bitcount - count the number of bits in a certain state
214 * @buffer: the buffer that holds the bitmaps
215 * @buflen: the length (in bytes) of the buffer
216 * @state: the state of the block we're looking for
217 *
218 * Returns: The number of bits
219 */
220
221 static u32 gfs2_bitcount(struct gfs2_rgrpd *rgd, const u8 *buffer,
222 unsigned int buflen, u8 state)
223 {
224 const u8 *byte = buffer;
225 const u8 *end = buffer + buflen;
226 const u8 state1 = state << 2;
227 const u8 state2 = state << 4;
228 const u8 state3 = state << 6;
229 u32 count = 0;
230
231 for (; byte < end; byte++) {
232 if (((*byte) & 0x03) == state)
233 count++;
234 if (((*byte) & 0x0C) == state1)
235 count++;
236 if (((*byte) & 0x30) == state2)
237 count++;
238 if (((*byte) & 0xC0) == state3)
239 count++;
240 }
241
242 return count;
243 }
244
245 /**
246 * gfs2_rgrp_verify - Verify that a resource group is consistent
247 * @sdp: the filesystem
248 * @rgd: the rgrp
249 *
250 */
251
252 void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd)
253 {
254 struct gfs2_sbd *sdp = rgd->rd_sbd;
255 struct gfs2_bitmap *bi = NULL;
256 u32 length = rgd->rd_length;
257 u32 count[4], tmp;
258 int buf, x;
259
260 memset(count, 0, 4 * sizeof(u32));
261
262 /* Count # blocks in each of 4 possible allocation states */
263 for (buf = 0; buf < length; buf++) {
264 bi = rgd->rd_bits + buf;
265 for (x = 0; x < 4; x++)
266 count[x] += gfs2_bitcount(rgd,
267 bi->bi_bh->b_data +
268 bi->bi_offset,
269 bi->bi_len, x);
270 }
271
272 if (count[0] != rgd->rd_free) {
273 if (gfs2_consist_rgrpd(rgd))
274 fs_err(sdp, "free data mismatch: %u != %u\n",
275 count[0], rgd->rd_free);
276 return;
277 }
278
279 tmp = rgd->rd_data - rgd->rd_free - rgd->rd_dinodes;
280 if (count[1] + count[2] != tmp) {
281 if (gfs2_consist_rgrpd(rgd))
282 fs_err(sdp, "used data mismatch: %u != %u\n",
283 count[1], tmp);
284 return;
285 }
286
287 if (count[3] != rgd->rd_dinodes) {
288 if (gfs2_consist_rgrpd(rgd))
289 fs_err(sdp, "used metadata mismatch: %u != %u\n",
290 count[3], rgd->rd_dinodes);
291 return;
292 }
293
294 if (count[2] > count[3]) {
295 if (gfs2_consist_rgrpd(rgd))
296 fs_err(sdp, "unlinked inodes > inodes: %u\n",
297 count[2]);
298 return;
299 }
300
301 }
302
303 static inline int rgrp_contains_block(struct gfs2_rgrpd *rgd, u64 block)
304 {
305 u64 first = rgd->rd_data0;
306 u64 last = first + rgd->rd_data;
307 return first <= block && block < last;
308 }
309
310 /**
311 * gfs2_blk2rgrpd - Find resource group for a given data/meta block number
312 * @sdp: The GFS2 superblock
313 * @n: The data block number
314 *
315 * Returns: The resource group, or NULL if not found
316 */
317
318 struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, u64 blk)
319 {
320 struct gfs2_rgrpd *rgd;
321
322 spin_lock(&sdp->sd_rindex_spin);
323
324 list_for_each_entry(rgd, &sdp->sd_rindex_mru_list, rd_list_mru) {
325 if (rgrp_contains_block(rgd, blk)) {
326 list_move(&rgd->rd_list_mru, &sdp->sd_rindex_mru_list);
327 spin_unlock(&sdp->sd_rindex_spin);
328 return rgd;
329 }
330 }
331
332 spin_unlock(&sdp->sd_rindex_spin);
333
334 return NULL;
335 }
336
337 /**
338 * gfs2_rgrpd_get_first - get the first Resource Group in the filesystem
339 * @sdp: The GFS2 superblock
340 *
341 * Returns: The first rgrp in the filesystem
342 */
343
344 struct gfs2_rgrpd *gfs2_rgrpd_get_first(struct gfs2_sbd *sdp)
345 {
346 gfs2_assert(sdp, !list_empty(&sdp->sd_rindex_list));
347 return list_entry(sdp->sd_rindex_list.next, struct gfs2_rgrpd, rd_list);
348 }
349
350 /**
351 * gfs2_rgrpd_get_next - get the next RG
352 * @rgd: A RG
353 *
354 * Returns: The next rgrp
355 */
356
357 struct gfs2_rgrpd *gfs2_rgrpd_get_next(struct gfs2_rgrpd *rgd)
358 {
359 if (rgd->rd_list.next == &rgd->rd_sbd->sd_rindex_list)
360 return NULL;
361 return list_entry(rgd->rd_list.next, struct gfs2_rgrpd, rd_list);
362 }
363
364 static void clear_rgrpdi(struct gfs2_sbd *sdp)
365 {
366 struct list_head *head;
367 struct gfs2_rgrpd *rgd;
368 struct gfs2_glock *gl;
369
370 spin_lock(&sdp->sd_rindex_spin);
371 sdp->sd_rindex_forward = NULL;
372 spin_unlock(&sdp->sd_rindex_spin);
373
374 head = &sdp->sd_rindex_list;
375 while (!list_empty(head)) {
376 rgd = list_entry(head->next, struct gfs2_rgrpd, rd_list);
377 gl = rgd->rd_gl;
378
379 list_del(&rgd->rd_list);
380 list_del(&rgd->rd_list_mru);
381
382 if (gl) {
383 gl->gl_object = NULL;
384 gfs2_glock_put(gl);
385 }
386
387 kfree(rgd->rd_bits);
388 kmem_cache_free(gfs2_rgrpd_cachep, rgd);
389 }
390 }
391
392 void gfs2_clear_rgrpd(struct gfs2_sbd *sdp)
393 {
394 mutex_lock(&sdp->sd_rindex_mutex);
395 clear_rgrpdi(sdp);
396 mutex_unlock(&sdp->sd_rindex_mutex);
397 }
398
399 static void gfs2_rindex_print(const struct gfs2_rgrpd *rgd)
400 {
401 printk(KERN_INFO " ri_addr = %llu\n", (unsigned long long)rgd->rd_addr);
402 printk(KERN_INFO " ri_length = %u\n", rgd->rd_length);
403 printk(KERN_INFO " ri_data0 = %llu\n", (unsigned long long)rgd->rd_data0);
404 printk(KERN_INFO " ri_data = %u\n", rgd->rd_data);
405 printk(KERN_INFO " ri_bitbytes = %u\n", rgd->rd_bitbytes);
406 }
407
408 /**
409 * gfs2_compute_bitstructs - Compute the bitmap sizes
410 * @rgd: The resource group descriptor
411 *
412 * Calculates bitmap descriptors, one for each block that contains bitmap data
413 *
414 * Returns: errno
415 */
416
417 static int compute_bitstructs(struct gfs2_rgrpd *rgd)
418 {
419 struct gfs2_sbd *sdp = rgd->rd_sbd;
420 struct gfs2_bitmap *bi;
421 u32 length = rgd->rd_length; /* # blocks in hdr & bitmap */
422 u32 bytes_left, bytes;
423 int x;
424
425 if (!length)
426 return -EINVAL;
427
428 rgd->rd_bits = kcalloc(length, sizeof(struct gfs2_bitmap), GFP_NOFS);
429 if (!rgd->rd_bits)
430 return -ENOMEM;
431
432 bytes_left = rgd->rd_bitbytes;
433
434 for (x = 0; x < length; x++) {
435 bi = rgd->rd_bits + x;
436
437 /* small rgrp; bitmap stored completely in header block */
438 if (length == 1) {
439 bytes = bytes_left;
440 bi->bi_offset = sizeof(struct gfs2_rgrp);
441 bi->bi_start = 0;
442 bi->bi_len = bytes;
443 /* header block */
444 } else if (x == 0) {
445 bytes = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_rgrp);
446 bi->bi_offset = sizeof(struct gfs2_rgrp);
447 bi->bi_start = 0;
448 bi->bi_len = bytes;
449 /* last block */
450 } else if (x + 1 == length) {
451 bytes = bytes_left;
452 bi->bi_offset = sizeof(struct gfs2_meta_header);
453 bi->bi_start = rgd->rd_bitbytes - bytes_left;
454 bi->bi_len = bytes;
455 /* other blocks */
456 } else {
457 bytes = sdp->sd_sb.sb_bsize -
458 sizeof(struct gfs2_meta_header);
459 bi->bi_offset = sizeof(struct gfs2_meta_header);
460 bi->bi_start = rgd->rd_bitbytes - bytes_left;
461 bi->bi_len = bytes;
462 }
463
464 bytes_left -= bytes;
465 }
466
467 if (bytes_left) {
468 gfs2_consist_rgrpd(rgd);
469 return -EIO;
470 }
471 bi = rgd->rd_bits + (length - 1);
472 if ((bi->bi_start + bi->bi_len) * GFS2_NBBY != rgd->rd_data) {
473 if (gfs2_consist_rgrpd(rgd)) {
474 gfs2_rindex_print(rgd);
475 fs_err(sdp, "start=%u len=%u offset=%u\n",
476 bi->bi_start, bi->bi_len, bi->bi_offset);
477 }
478 return -EIO;
479 }
480
481 return 0;
482 }
483
484 /**
485 * gfs2_ri_total - Total up the file system space, according to the rindex.
486 *
487 */
488 u64 gfs2_ri_total(struct gfs2_sbd *sdp)
489 {
490 u64 total_data = 0;
491 struct inode *inode = sdp->sd_rindex;
492 struct gfs2_inode *ip = GFS2_I(inode);
493 char buf[sizeof(struct gfs2_rindex)];
494 struct file_ra_state ra_state;
495 int error, rgrps;
496
497 mutex_lock(&sdp->sd_rindex_mutex);
498 file_ra_state_init(&ra_state, inode->i_mapping);
499 for (rgrps = 0;; rgrps++) {
500 loff_t pos = rgrps * sizeof(struct gfs2_rindex);
501
502 if (pos + sizeof(struct gfs2_rindex) >= ip->i_disksize)
503 break;
504 error = gfs2_internal_read(ip, &ra_state, buf, &pos,
505 sizeof(struct gfs2_rindex));
506 if (error != sizeof(struct gfs2_rindex))
507 break;
508 total_data += be32_to_cpu(((struct gfs2_rindex *)buf)->ri_data);
509 }
510 mutex_unlock(&sdp->sd_rindex_mutex);
511 return total_data;
512 }
513
514 static void gfs2_rindex_in(struct gfs2_rgrpd *rgd, const void *buf)
515 {
516 const struct gfs2_rindex *str = buf;
517
518 rgd->rd_addr = be64_to_cpu(str->ri_addr);
519 rgd->rd_length = be32_to_cpu(str->ri_length);
520 rgd->rd_data0 = be64_to_cpu(str->ri_data0);
521 rgd->rd_data = be32_to_cpu(str->ri_data);
522 rgd->rd_bitbytes = be32_to_cpu(str->ri_bitbytes);
523 }
524
525 /**
526 * read_rindex_entry - Pull in a new resource index entry from the disk
527 * @gl: The glock covering the rindex inode
528 *
529 * Returns: 0 on success, error code otherwise
530 */
531
532 static int read_rindex_entry(struct gfs2_inode *ip,
533 struct file_ra_state *ra_state)
534 {
535 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
536 loff_t pos = sdp->sd_rgrps * sizeof(struct gfs2_rindex);
537 char buf[sizeof(struct gfs2_rindex)];
538 int error;
539 struct gfs2_rgrpd *rgd;
540
541 error = gfs2_internal_read(ip, ra_state, buf, &pos,
542 sizeof(struct gfs2_rindex));
543 if (!error)
544 return 0;
545 if (error != sizeof(struct gfs2_rindex)) {
546 if (error > 0)
547 error = -EIO;
548 return error;
549 }
550
551 rgd = kmem_cache_zalloc(gfs2_rgrpd_cachep, GFP_NOFS);
552 error = -ENOMEM;
553 if (!rgd)
554 return error;
555
556 mutex_init(&rgd->rd_mutex);
557 lops_init_le(&rgd->rd_le, &gfs2_rg_lops);
558 rgd->rd_sbd = sdp;
559
560 list_add_tail(&rgd->rd_list, &sdp->sd_rindex_list);
561 list_add_tail(&rgd->rd_list_mru, &sdp->sd_rindex_mru_list);
562
563 gfs2_rindex_in(rgd, buf);
564 error = compute_bitstructs(rgd);
565 if (error)
566 return error;
567
568 error = gfs2_glock_get(sdp, rgd->rd_addr,
569 &gfs2_rgrp_glops, CREATE, &rgd->rd_gl);
570 if (error)
571 return error;
572
573 rgd->rd_gl->gl_object = rgd;
574 rgd->rd_flags &= ~GFS2_RDF_UPTODATE;
575 rgd->rd_flags |= GFS2_RDF_CHECK;
576 return error;
577 }
578
579 /**
580 * gfs2_ri_update - Pull in a new resource index from the disk
581 * @ip: pointer to the rindex inode
582 *
583 * Returns: 0 on successful update, error code otherwise
584 */
585
586 static int gfs2_ri_update(struct gfs2_inode *ip)
587 {
588 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
589 struct inode *inode = &ip->i_inode;
590 struct file_ra_state ra_state;
591 u64 rgrp_count = ip->i_disksize;
592 int error;
593
594 if (do_div(rgrp_count, sizeof(struct gfs2_rindex))) {
595 gfs2_consist_inode(ip);
596 return -EIO;
597 }
598
599 clear_rgrpdi(sdp);
600
601 file_ra_state_init(&ra_state, inode->i_mapping);
602 for (sdp->sd_rgrps = 0; sdp->sd_rgrps < rgrp_count; sdp->sd_rgrps++) {
603 error = read_rindex_entry(ip, &ra_state);
604 if (error) {
605 clear_rgrpdi(sdp);
606 return error;
607 }
608 }
609
610 sdp->sd_rindex_uptodate = 1;
611 return 0;
612 }
613
614 /**
615 * gfs2_ri_update_special - Pull in a new resource index from the disk
616 *
617 * This is a special version that's safe to call from gfs2_inplace_reserve_i.
618 * In this case we know that we don't have any resource groups in memory yet.
619 *
620 * @ip: pointer to the rindex inode
621 *
622 * Returns: 0 on successful update, error code otherwise
623 */
624 static int gfs2_ri_update_special(struct gfs2_inode *ip)
625 {
626 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
627 struct inode *inode = &ip->i_inode;
628 struct file_ra_state ra_state;
629 int error;
630
631 file_ra_state_init(&ra_state, inode->i_mapping);
632 for (sdp->sd_rgrps = 0;; sdp->sd_rgrps++) {
633 /* Ignore partials */
634 if ((sdp->sd_rgrps + 1) * sizeof(struct gfs2_rindex) >
635 ip->i_disksize)
636 break;
637 error = read_rindex_entry(ip, &ra_state);
638 if (error) {
639 clear_rgrpdi(sdp);
640 return error;
641 }
642 }
643
644 sdp->sd_rindex_uptodate = 1;
645 return 0;
646 }
647
648 /**
649 * gfs2_rindex_hold - Grab a lock on the rindex
650 * @sdp: The GFS2 superblock
651 * @ri_gh: the glock holder
652 *
653 * We grab a lock on the rindex inode to make sure that it doesn't
654 * change whilst we are performing an operation. We keep this lock
655 * for quite long periods of time compared to other locks. This
656 * doesn't matter, since it is shared and it is very, very rarely
657 * accessed in the exclusive mode (i.e. only when expanding the filesystem).
658 *
659 * This makes sure that we're using the latest copy of the resource index
660 * special file, which might have been updated if someone expanded the
661 * filesystem (via gfs2_grow utility), which adds new resource groups.
662 *
663 * Returns: 0 on success, error code otherwise
664 */
665
666 int gfs2_rindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ri_gh)
667 {
668 struct gfs2_inode *ip = GFS2_I(sdp->sd_rindex);
669 struct gfs2_glock *gl = ip->i_gl;
670 int error;
671
672 error = gfs2_glock_nq_init(gl, LM_ST_SHARED, 0, ri_gh);
673 if (error)
674 return error;
675
676 /* Read new copy from disk if we don't have the latest */
677 if (!sdp->sd_rindex_uptodate) {
678 mutex_lock(&sdp->sd_rindex_mutex);
679 if (!sdp->sd_rindex_uptodate) {
680 error = gfs2_ri_update(ip);
681 if (error)
682 gfs2_glock_dq_uninit(ri_gh);
683 }
684 mutex_unlock(&sdp->sd_rindex_mutex);
685 }
686
687 return error;
688 }
689
690 static void gfs2_rgrp_in(struct gfs2_rgrpd *rgd, const void *buf)
691 {
692 const struct gfs2_rgrp *str = buf;
693 u32 rg_flags;
694
695 rg_flags = be32_to_cpu(str->rg_flags);
696 if (rg_flags & GFS2_RGF_NOALLOC)
697 rgd->rd_flags |= GFS2_RDF_NOALLOC;
698 else
699 rgd->rd_flags &= ~GFS2_RDF_NOALLOC;
700 rgd->rd_free = be32_to_cpu(str->rg_free);
701 rgd->rd_dinodes = be32_to_cpu(str->rg_dinodes);
702 rgd->rd_igeneration = be64_to_cpu(str->rg_igeneration);
703 }
704
705 static void gfs2_rgrp_out(struct gfs2_rgrpd *rgd, void *buf)
706 {
707 struct gfs2_rgrp *str = buf;
708 u32 rg_flags = 0;
709
710 if (rgd->rd_flags & GFS2_RDF_NOALLOC)
711 rg_flags |= GFS2_RGF_NOALLOC;
712 str->rg_flags = cpu_to_be32(rg_flags);
713 str->rg_free = cpu_to_be32(rgd->rd_free);
714 str->rg_dinodes = cpu_to_be32(rgd->rd_dinodes);
715 str->__pad = cpu_to_be32(0);
716 str->rg_igeneration = cpu_to_be64(rgd->rd_igeneration);
717 memset(&str->rg_reserved, 0, sizeof(str->rg_reserved));
718 }
719
720 /**
721 * gfs2_rgrp_bh_get - Read in a RG's header and bitmaps
722 * @rgd: the struct gfs2_rgrpd describing the RG to read in
723 *
724 * Read in all of a Resource Group's header and bitmap blocks.
725 * Caller must eventually call gfs2_rgrp_relse() to free the bitmaps.
726 *
727 * Returns: errno
728 */
729
730 int gfs2_rgrp_bh_get(struct gfs2_rgrpd *rgd)
731 {
732 struct gfs2_sbd *sdp = rgd->rd_sbd;
733 struct gfs2_glock *gl = rgd->rd_gl;
734 unsigned int length = rgd->rd_length;
735 struct gfs2_bitmap *bi;
736 unsigned int x, y;
737 int error;
738
739 mutex_lock(&rgd->rd_mutex);
740
741 spin_lock(&sdp->sd_rindex_spin);
742 if (rgd->rd_bh_count) {
743 rgd->rd_bh_count++;
744 spin_unlock(&sdp->sd_rindex_spin);
745 mutex_unlock(&rgd->rd_mutex);
746 return 0;
747 }
748 spin_unlock(&sdp->sd_rindex_spin);
749
750 for (x = 0; x < length; x++) {
751 bi = rgd->rd_bits + x;
752 error = gfs2_meta_read(gl, rgd->rd_addr + x, 0, &bi->bi_bh);
753 if (error)
754 goto fail;
755 }
756
757 for (y = length; y--;) {
758 bi = rgd->rd_bits + y;
759 error = gfs2_meta_wait(sdp, bi->bi_bh);
760 if (error)
761 goto fail;
762 if (gfs2_metatype_check(sdp, bi->bi_bh, y ? GFS2_METATYPE_RB :
763 GFS2_METATYPE_RG)) {
764 error = -EIO;
765 goto fail;
766 }
767 }
768
769 if (!(rgd->rd_flags & GFS2_RDF_UPTODATE)) {
770 gfs2_rgrp_in(rgd, (rgd->rd_bits[0].bi_bh)->b_data);
771 rgd->rd_flags |= GFS2_RDF_UPTODATE;
772 }
773
774 spin_lock(&sdp->sd_rindex_spin);
775 rgd->rd_free_clone = rgd->rd_free;
776 rgd->rd_bh_count++;
777 spin_unlock(&sdp->sd_rindex_spin);
778
779 mutex_unlock(&rgd->rd_mutex);
780
781 return 0;
782
783 fail:
784 while (x--) {
785 bi = rgd->rd_bits + x;
786 brelse(bi->bi_bh);
787 bi->bi_bh = NULL;
788 gfs2_assert_warn(sdp, !bi->bi_clone);
789 }
790 mutex_unlock(&rgd->rd_mutex);
791
792 return error;
793 }
794
795 void gfs2_rgrp_bh_hold(struct gfs2_rgrpd *rgd)
796 {
797 struct gfs2_sbd *sdp = rgd->rd_sbd;
798
799 spin_lock(&sdp->sd_rindex_spin);
800 gfs2_assert_warn(rgd->rd_sbd, rgd->rd_bh_count);
801 rgd->rd_bh_count++;
802 spin_unlock(&sdp->sd_rindex_spin);
803 }
804
805 /**
806 * gfs2_rgrp_bh_put - Release RG bitmaps read in with gfs2_rgrp_bh_get()
807 * @rgd: the struct gfs2_rgrpd describing the RG to read in
808 *
809 */
810
811 void gfs2_rgrp_bh_put(struct gfs2_rgrpd *rgd)
812 {
813 struct gfs2_sbd *sdp = rgd->rd_sbd;
814 int x, length = rgd->rd_length;
815
816 spin_lock(&sdp->sd_rindex_spin);
817 gfs2_assert_warn(rgd->rd_sbd, rgd->rd_bh_count);
818 if (--rgd->rd_bh_count) {
819 spin_unlock(&sdp->sd_rindex_spin);
820 return;
821 }
822
823 for (x = 0; x < length; x++) {
824 struct gfs2_bitmap *bi = rgd->rd_bits + x;
825 kfree(bi->bi_clone);
826 bi->bi_clone = NULL;
827 brelse(bi->bi_bh);
828 bi->bi_bh = NULL;
829 }
830
831 spin_unlock(&sdp->sd_rindex_spin);
832 }
833
834 void gfs2_rgrp_repolish_clones(struct gfs2_rgrpd *rgd)
835 {
836 struct gfs2_sbd *sdp = rgd->rd_sbd;
837 unsigned int length = rgd->rd_length;
838 unsigned int x;
839
840 for (x = 0; x < length; x++) {
841 struct gfs2_bitmap *bi = rgd->rd_bits + x;
842 if (!bi->bi_clone)
843 continue;
844 memcpy(bi->bi_clone + bi->bi_offset,
845 bi->bi_bh->b_data + bi->bi_offset, bi->bi_len);
846 }
847
848 spin_lock(&sdp->sd_rindex_spin);
849 rgd->rd_free_clone = rgd->rd_free;
850 spin_unlock(&sdp->sd_rindex_spin);
851 }
852
853 /**
854 * gfs2_alloc_get - get the struct gfs2_alloc structure for an inode
855 * @ip: the incore GFS2 inode structure
856 *
857 * Returns: the struct gfs2_alloc
858 */
859
860 struct gfs2_alloc *gfs2_alloc_get(struct gfs2_inode *ip)
861 {
862 BUG_ON(ip->i_alloc != NULL);
863 ip->i_alloc = kzalloc(sizeof(struct gfs2_alloc), GFP_KERNEL);
864 return ip->i_alloc;
865 }
866
867 /**
868 * try_rgrp_fit - See if a given reservation will fit in a given RG
869 * @rgd: the RG data
870 * @al: the struct gfs2_alloc structure describing the reservation
871 *
872 * If there's room for the requested blocks to be allocated from the RG:
873 * Sets the $al_rgd field in @al.
874 *
875 * Returns: 1 on success (it fits), 0 on failure (it doesn't fit)
876 */
877
878 static int try_rgrp_fit(struct gfs2_rgrpd *rgd, struct gfs2_alloc *al)
879 {
880 struct gfs2_sbd *sdp = rgd->rd_sbd;
881 int ret = 0;
882
883 if (rgd->rd_flags & GFS2_RDF_NOALLOC)
884 return 0;
885
886 spin_lock(&sdp->sd_rindex_spin);
887 if (rgd->rd_free_clone >= al->al_requested) {
888 al->al_rgd = rgd;
889 ret = 1;
890 }
891 spin_unlock(&sdp->sd_rindex_spin);
892
893 return ret;
894 }
895
896 /**
897 * try_rgrp_unlink - Look for any unlinked, allocated, but unused inodes
898 * @rgd: The rgrp
899 *
900 * Returns: The inode, if one has been found
901 */
902
903 static struct inode *try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked)
904 {
905 struct inode *inode;
906 u32 goal = 0, block;
907 u64 no_addr;
908 struct gfs2_sbd *sdp = rgd->rd_sbd;
909 unsigned int n;
910
911 for(;;) {
912 if (goal >= rgd->rd_data)
913 break;
914 down_write(&sdp->sd_log_flush_lock);
915 n = 1;
916 block = rgblk_search(rgd, goal, GFS2_BLKST_UNLINKED,
917 GFS2_BLKST_UNLINKED, &n);
918 up_write(&sdp->sd_log_flush_lock);
919 if (block == BFITNOENT)
920 break;
921 /* rgblk_search can return a block < goal, so we need to
922 keep it marching forward. */
923 no_addr = block + rgd->rd_data0;
924 goal++;
925 if (*last_unlinked != NO_BLOCK && no_addr <= *last_unlinked)
926 continue;
927 *last_unlinked = no_addr;
928 inode = gfs2_inode_lookup(rgd->rd_sbd->sd_vfs, DT_UNKNOWN,
929 no_addr, -1, 1);
930 if (!IS_ERR(inode))
931 return inode;
932 }
933
934 rgd->rd_flags &= ~GFS2_RDF_CHECK;
935 return NULL;
936 }
937
938 /**
939 * recent_rgrp_next - get next RG from "recent" list
940 * @cur_rgd: current rgrp
941 *
942 * Returns: The next rgrp in the recent list
943 */
944
945 static struct gfs2_rgrpd *recent_rgrp_next(struct gfs2_rgrpd *cur_rgd)
946 {
947 struct gfs2_sbd *sdp = cur_rgd->rd_sbd;
948 struct list_head *head;
949 struct gfs2_rgrpd *rgd;
950
951 spin_lock(&sdp->sd_rindex_spin);
952 head = &sdp->sd_rindex_mru_list;
953 if (unlikely(cur_rgd->rd_list_mru.next == head)) {
954 spin_unlock(&sdp->sd_rindex_spin);
955 return NULL;
956 }
957 rgd = list_entry(cur_rgd->rd_list_mru.next, struct gfs2_rgrpd, rd_list_mru);
958 spin_unlock(&sdp->sd_rindex_spin);
959 return rgd;
960 }
961
962 /**
963 * forward_rgrp_get - get an rgrp to try next from full list
964 * @sdp: The GFS2 superblock
965 *
966 * Returns: The rgrp to try next
967 */
968
969 static struct gfs2_rgrpd *forward_rgrp_get(struct gfs2_sbd *sdp)
970 {
971 struct gfs2_rgrpd *rgd;
972 unsigned int journals = gfs2_jindex_size(sdp);
973 unsigned int rg = 0, x;
974
975 spin_lock(&sdp->sd_rindex_spin);
976
977 rgd = sdp->sd_rindex_forward;
978 if (!rgd) {
979 if (sdp->sd_rgrps >= journals)
980 rg = sdp->sd_rgrps * sdp->sd_jdesc->jd_jid / journals;
981
982 for (x = 0, rgd = gfs2_rgrpd_get_first(sdp); x < rg;
983 x++, rgd = gfs2_rgrpd_get_next(rgd))
984 /* Do Nothing */;
985
986 sdp->sd_rindex_forward = rgd;
987 }
988
989 spin_unlock(&sdp->sd_rindex_spin);
990
991 return rgd;
992 }
993
994 /**
995 * forward_rgrp_set - set the forward rgrp pointer
996 * @sdp: the filesystem
997 * @rgd: The new forward rgrp
998 *
999 */
1000
1001 static void forward_rgrp_set(struct gfs2_sbd *sdp, struct gfs2_rgrpd *rgd)
1002 {
1003 spin_lock(&sdp->sd_rindex_spin);
1004 sdp->sd_rindex_forward = rgd;
1005 spin_unlock(&sdp->sd_rindex_spin);
1006 }
1007
1008 /**
1009 * get_local_rgrp - Choose and lock a rgrp for allocation
1010 * @ip: the inode to reserve space for
1011 * @rgp: the chosen and locked rgrp
1012 *
1013 * Try to acquire rgrp in way which avoids contending with others.
1014 *
1015 * Returns: errno
1016 */
1017
1018 static struct inode *get_local_rgrp(struct gfs2_inode *ip, u64 *last_unlinked)
1019 {
1020 struct inode *inode = NULL;
1021 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1022 struct gfs2_rgrpd *rgd, *begin = NULL;
1023 struct gfs2_alloc *al = ip->i_alloc;
1024 int flags = LM_FLAG_TRY;
1025 int skipped = 0;
1026 int loops = 0;
1027 int error, rg_locked;
1028
1029 rgd = gfs2_blk2rgrpd(sdp, ip->i_goal);
1030
1031 while (rgd) {
1032 rg_locked = 0;
1033
1034 if (gfs2_glock_is_locked_by_me(rgd->rd_gl)) {
1035 rg_locked = 1;
1036 error = 0;
1037 } else {
1038 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
1039 LM_FLAG_TRY, &al->al_rgd_gh);
1040 }
1041 switch (error) {
1042 case 0:
1043 if (try_rgrp_fit(rgd, al))
1044 goto out;
1045 if (rgd->rd_flags & GFS2_RDF_CHECK)
1046 inode = try_rgrp_unlink(rgd, last_unlinked);
1047 if (!rg_locked)
1048 gfs2_glock_dq_uninit(&al->al_rgd_gh);
1049 if (inode)
1050 return inode;
1051 /* fall through */
1052 case GLR_TRYFAILED:
1053 rgd = recent_rgrp_next(rgd);
1054 break;
1055
1056 default:
1057 return ERR_PTR(error);
1058 }
1059 }
1060
1061 /* Go through full list of rgrps */
1062
1063 begin = rgd = forward_rgrp_get(sdp);
1064
1065 for (;;) {
1066 rg_locked = 0;
1067
1068 if (gfs2_glock_is_locked_by_me(rgd->rd_gl)) {
1069 rg_locked = 1;
1070 error = 0;
1071 } else {
1072 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, flags,
1073 &al->al_rgd_gh);
1074 }
1075 switch (error) {
1076 case 0:
1077 if (try_rgrp_fit(rgd, al))
1078 goto out;
1079 if (rgd->rd_flags & GFS2_RDF_CHECK)
1080 inode = try_rgrp_unlink(rgd, last_unlinked);
1081 if (!rg_locked)
1082 gfs2_glock_dq_uninit(&al->al_rgd_gh);
1083 if (inode)
1084 return inode;
1085 break;
1086
1087 case GLR_TRYFAILED:
1088 skipped++;
1089 break;
1090
1091 default:
1092 return ERR_PTR(error);
1093 }
1094
1095 rgd = gfs2_rgrpd_get_next(rgd);
1096 if (!rgd)
1097 rgd = gfs2_rgrpd_get_first(sdp);
1098
1099 if (rgd == begin) {
1100 if (++loops >= 3)
1101 return ERR_PTR(-ENOSPC);
1102 if (!skipped)
1103 loops++;
1104 flags = 0;
1105 if (loops == 2)
1106 gfs2_log_flush(sdp, NULL);
1107 }
1108 }
1109
1110 out:
1111 if (begin) {
1112 spin_lock(&sdp->sd_rindex_spin);
1113 list_move(&rgd->rd_list_mru, &sdp->sd_rindex_mru_list);
1114 spin_unlock(&sdp->sd_rindex_spin);
1115 rgd = gfs2_rgrpd_get_next(rgd);
1116 if (!rgd)
1117 rgd = gfs2_rgrpd_get_first(sdp);
1118 forward_rgrp_set(sdp, rgd);
1119 }
1120
1121 return NULL;
1122 }
1123
1124 /**
1125 * gfs2_inplace_reserve_i - Reserve space in the filesystem
1126 * @ip: the inode to reserve space for
1127 *
1128 * Returns: errno
1129 */
1130
1131 int gfs2_inplace_reserve_i(struct gfs2_inode *ip, char *file, unsigned int line)
1132 {
1133 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1134 struct gfs2_alloc *al = ip->i_alloc;
1135 struct inode *inode;
1136 int error = 0;
1137 u64 last_unlinked = NO_BLOCK;
1138
1139 if (gfs2_assert_warn(sdp, al->al_requested))
1140 return -EINVAL;
1141
1142 try_again:
1143 /* We need to hold the rindex unless the inode we're using is
1144 the rindex itself, in which case it's already held. */
1145 if (ip != GFS2_I(sdp->sd_rindex))
1146 error = gfs2_rindex_hold(sdp, &al->al_ri_gh);
1147 else if (!sdp->sd_rgrps) /* We may not have the rindex read in, so: */
1148 error = gfs2_ri_update_special(ip);
1149
1150 if (error)
1151 return error;
1152
1153 inode = get_local_rgrp(ip, &last_unlinked);
1154 if (inode) {
1155 if (ip != GFS2_I(sdp->sd_rindex))
1156 gfs2_glock_dq_uninit(&al->al_ri_gh);
1157 if (IS_ERR(inode))
1158 return PTR_ERR(inode);
1159 iput(inode);
1160 gfs2_log_flush(sdp, NULL);
1161 goto try_again;
1162 }
1163
1164 al->al_file = file;
1165 al->al_line = line;
1166
1167 return 0;
1168 }
1169
1170 /**
1171 * gfs2_inplace_release - release an inplace reservation
1172 * @ip: the inode the reservation was taken out on
1173 *
1174 * Release a reservation made by gfs2_inplace_reserve().
1175 */
1176
1177 void gfs2_inplace_release(struct gfs2_inode *ip)
1178 {
1179 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1180 struct gfs2_alloc *al = ip->i_alloc;
1181
1182 if (gfs2_assert_warn(sdp, al->al_alloced <= al->al_requested) == -1)
1183 fs_warn(sdp, "al_alloced = %u, al_requested = %u "
1184 "al_file = %s, al_line = %u\n",
1185 al->al_alloced, al->al_requested, al->al_file,
1186 al->al_line);
1187
1188 al->al_rgd = NULL;
1189 if (al->al_rgd_gh.gh_gl)
1190 gfs2_glock_dq_uninit(&al->al_rgd_gh);
1191 if (ip != GFS2_I(sdp->sd_rindex))
1192 gfs2_glock_dq_uninit(&al->al_ri_gh);
1193 }
1194
1195 /**
1196 * gfs2_get_block_type - Check a block in a RG is of given type
1197 * @rgd: the resource group holding the block
1198 * @block: the block number
1199 *
1200 * Returns: The block type (GFS2_BLKST_*)
1201 */
1202
1203 unsigned char gfs2_get_block_type(struct gfs2_rgrpd *rgd, u64 block)
1204 {
1205 struct gfs2_bitmap *bi = NULL;
1206 u32 length, rgrp_block, buf_block;
1207 unsigned int buf;
1208 unsigned char type;
1209
1210 length = rgd->rd_length;
1211 rgrp_block = block - rgd->rd_data0;
1212
1213 for (buf = 0; buf < length; buf++) {
1214 bi = rgd->rd_bits + buf;
1215 if (rgrp_block < (bi->bi_start + bi->bi_len) * GFS2_NBBY)
1216 break;
1217 }
1218
1219 gfs2_assert(rgd->rd_sbd, buf < length);
1220 buf_block = rgrp_block - bi->bi_start * GFS2_NBBY;
1221
1222 type = gfs2_testbit(rgd, bi->bi_bh->b_data + bi->bi_offset,
1223 bi->bi_len, buf_block);
1224
1225 return type;
1226 }
1227
1228 /**
1229 * rgblk_search - find a block in @old_state, change allocation
1230 * state to @new_state
1231 * @rgd: the resource group descriptor
1232 * @goal: the goal block within the RG (start here to search for avail block)
1233 * @old_state: GFS2_BLKST_XXX the before-allocation state to find
1234 * @new_state: GFS2_BLKST_XXX the after-allocation block state
1235 * @n: The extent length
1236 *
1237 * Walk rgrp's bitmap to find bits that represent a block in @old_state.
1238 * Add the found bitmap buffer to the transaction.
1239 * Set the found bits to @new_state to change block's allocation state.
1240 *
1241 * This function never fails, because we wouldn't call it unless we
1242 * know (from reservation results, etc.) that a block is available.
1243 *
1244 * Scope of @goal and returned block is just within rgrp, not the whole
1245 * filesystem.
1246 *
1247 * Returns: the block number allocated
1248 */
1249
1250 static u32 rgblk_search(struct gfs2_rgrpd *rgd, u32 goal,
1251 unsigned char old_state, unsigned char new_state,
1252 unsigned int *n)
1253 {
1254 struct gfs2_bitmap *bi = NULL;
1255 const u32 length = rgd->rd_length;
1256 u32 blk = 0;
1257 unsigned int buf, x;
1258 const unsigned int elen = *n;
1259 const u8 *buffer;
1260
1261 *n = 0;
1262 /* Find bitmap block that contains bits for goal block */
1263 for (buf = 0; buf < length; buf++) {
1264 bi = rgd->rd_bits + buf;
1265 if (goal < (bi->bi_start + bi->bi_len) * GFS2_NBBY)
1266 break;
1267 }
1268
1269 gfs2_assert(rgd->rd_sbd, buf < length);
1270
1271 /* Convert scope of "goal" from rgrp-wide to within found bit block */
1272 goal -= bi->bi_start * GFS2_NBBY;
1273
1274 /* Search (up to entire) bitmap in this rgrp for allocatable block.
1275 "x <= length", instead of "x < length", because we typically start
1276 the search in the middle of a bit block, but if we can't find an
1277 allocatable block anywhere else, we want to be able wrap around and
1278 search in the first part of our first-searched bit block. */
1279 for (x = 0; x <= length; x++) {
1280 /* The GFS2_BLKST_UNLINKED state doesn't apply to the clone
1281 bitmaps, so we must search the originals for that. */
1282 buffer = bi->bi_bh->b_data + bi->bi_offset;
1283 if (old_state != GFS2_BLKST_UNLINKED && bi->bi_clone)
1284 buffer = bi->bi_clone + bi->bi_offset;
1285
1286 blk = gfs2_bitfit(buffer, bi->bi_len, goal, old_state);
1287 if (blk != BFITNOENT)
1288 break;
1289
1290 /* Try next bitmap block (wrap back to rgrp header if at end) */
1291 buf = (buf + 1) % length;
1292 bi = rgd->rd_bits + buf;
1293 goal = 0;
1294 }
1295
1296 if (blk != BFITNOENT && old_state != new_state) {
1297 *n = 1;
1298 gfs2_trans_add_bh(rgd->rd_gl, bi->bi_bh, 1);
1299 gfs2_setbit(rgd, bi->bi_bh->b_data, bi->bi_clone, bi->bi_offset,
1300 bi->bi_len, blk, new_state);
1301 goal = blk;
1302 while (*n < elen) {
1303 goal++;
1304 if (goal >= (bi->bi_len * GFS2_NBBY))
1305 break;
1306 if (gfs2_testbit(rgd, buffer, bi->bi_len, goal) !=
1307 GFS2_BLKST_FREE)
1308 break;
1309 gfs2_setbit(rgd, bi->bi_bh->b_data, bi->bi_clone,
1310 bi->bi_offset, bi->bi_len, goal,
1311 new_state);
1312 (*n)++;
1313 }
1314 }
1315
1316 return (blk == BFITNOENT) ? blk : (bi->bi_start * GFS2_NBBY) + blk;
1317 }
1318
1319 /**
1320 * rgblk_free - Change alloc state of given block(s)
1321 * @sdp: the filesystem
1322 * @bstart: the start of a run of blocks to free
1323 * @blen: the length of the block run (all must lie within ONE RG!)
1324 * @new_state: GFS2_BLKST_XXX the after-allocation block state
1325 *
1326 * Returns: Resource group containing the block(s)
1327 */
1328
1329 static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, u64 bstart,
1330 u32 blen, unsigned char new_state)
1331 {
1332 struct gfs2_rgrpd *rgd;
1333 struct gfs2_bitmap *bi = NULL;
1334 u32 length, rgrp_blk, buf_blk;
1335 unsigned int buf;
1336
1337 rgd = gfs2_blk2rgrpd(sdp, bstart);
1338 if (!rgd) {
1339 if (gfs2_consist(sdp))
1340 fs_err(sdp, "block = %llu\n", (unsigned long long)bstart);
1341 return NULL;
1342 }
1343
1344 length = rgd->rd_length;
1345
1346 rgrp_blk = bstart - rgd->rd_data0;
1347
1348 while (blen--) {
1349 for (buf = 0; buf < length; buf++) {
1350 bi = rgd->rd_bits + buf;
1351 if (rgrp_blk < (bi->bi_start + bi->bi_len) * GFS2_NBBY)
1352 break;
1353 }
1354
1355 gfs2_assert(rgd->rd_sbd, buf < length);
1356
1357 buf_blk = rgrp_blk - bi->bi_start * GFS2_NBBY;
1358 rgrp_blk++;
1359
1360 if (!bi->bi_clone) {
1361 bi->bi_clone = kmalloc(bi->bi_bh->b_size,
1362 GFP_NOFS | __GFP_NOFAIL);
1363 memcpy(bi->bi_clone + bi->bi_offset,
1364 bi->bi_bh->b_data + bi->bi_offset,
1365 bi->bi_len);
1366 }
1367 gfs2_trans_add_bh(rgd->rd_gl, bi->bi_bh, 1);
1368 gfs2_setbit(rgd, bi->bi_bh->b_data, NULL, bi->bi_offset,
1369 bi->bi_len, buf_blk, new_state);
1370 }
1371
1372 return rgd;
1373 }
1374
1375 /**
1376 * gfs2_alloc_block - Allocate a block
1377 * @ip: the inode to allocate the block for
1378 *
1379 * Returns: the allocated block
1380 */
1381
1382 u64 gfs2_alloc_block(struct gfs2_inode *ip, unsigned int *n)
1383 {
1384 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1385 struct gfs2_alloc *al = ip->i_alloc;
1386 struct gfs2_rgrpd *rgd = al->al_rgd;
1387 u32 goal, blk;
1388 u64 block;
1389
1390 if (rgrp_contains_block(rgd, ip->i_goal))
1391 goal = ip->i_goal - rgd->rd_data0;
1392 else
1393 goal = rgd->rd_last_alloc;
1394
1395 blk = rgblk_search(rgd, goal, GFS2_BLKST_FREE, GFS2_BLKST_USED, n);
1396 BUG_ON(blk == BFITNOENT);
1397
1398 rgd->rd_last_alloc = blk;
1399 block = rgd->rd_data0 + blk;
1400 ip->i_goal = block;
1401
1402 gfs2_assert_withdraw(sdp, rgd->rd_free >= *n);
1403 rgd->rd_free -= *n;
1404
1405 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
1406 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
1407
1408 al->al_alloced += *n;
1409
1410 gfs2_statfs_change(sdp, 0, -(s64)*n, 0);
1411 gfs2_quota_change(ip, *n, ip->i_inode.i_uid, ip->i_inode.i_gid);
1412
1413 spin_lock(&sdp->sd_rindex_spin);
1414 rgd->rd_free_clone -= *n;
1415 spin_unlock(&sdp->sd_rindex_spin);
1416
1417 return block;
1418 }
1419
1420 /**
1421 * gfs2_alloc_di - Allocate a dinode
1422 * @dip: the directory that the inode is going in
1423 *
1424 * Returns: the block allocated
1425 */
1426
1427 u64 gfs2_alloc_di(struct gfs2_inode *dip, u64 *generation)
1428 {
1429 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
1430 struct gfs2_alloc *al = dip->i_alloc;
1431 struct gfs2_rgrpd *rgd = al->al_rgd;
1432 u32 blk;
1433 u64 block;
1434 unsigned int n = 1;
1435
1436 blk = rgblk_search(rgd, rgd->rd_last_alloc,
1437 GFS2_BLKST_FREE, GFS2_BLKST_DINODE, &n);
1438 BUG_ON(blk == BFITNOENT);
1439
1440 rgd->rd_last_alloc = blk;
1441
1442 block = rgd->rd_data0 + blk;
1443
1444 gfs2_assert_withdraw(sdp, rgd->rd_free);
1445 rgd->rd_free--;
1446 rgd->rd_dinodes++;
1447 *generation = rgd->rd_igeneration++;
1448 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
1449 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
1450
1451 al->al_alloced++;
1452
1453 gfs2_statfs_change(sdp, 0, -1, +1);
1454 gfs2_trans_add_unrevoke(sdp, block, 1);
1455
1456 spin_lock(&sdp->sd_rindex_spin);
1457 rgd->rd_free_clone--;
1458 spin_unlock(&sdp->sd_rindex_spin);
1459
1460 return block;
1461 }
1462
1463 /**
1464 * gfs2_free_data - free a contiguous run of data block(s)
1465 * @ip: the inode these blocks are being freed from
1466 * @bstart: first block of a run of contiguous blocks
1467 * @blen: the length of the block run
1468 *
1469 */
1470
1471 void gfs2_free_data(struct gfs2_inode *ip, u64 bstart, u32 blen)
1472 {
1473 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1474 struct gfs2_rgrpd *rgd;
1475
1476 rgd = rgblk_free(sdp, bstart, blen, GFS2_BLKST_FREE);
1477 if (!rgd)
1478 return;
1479
1480 rgd->rd_free += blen;
1481
1482 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
1483 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
1484
1485 gfs2_trans_add_rg(rgd);
1486
1487 gfs2_statfs_change(sdp, 0, +blen, 0);
1488 gfs2_quota_change(ip, -(s64)blen, ip->i_inode.i_uid, ip->i_inode.i_gid);
1489 }
1490
1491 /**
1492 * gfs2_free_meta - free a contiguous run of data block(s)
1493 * @ip: the inode these blocks are being freed from
1494 * @bstart: first block of a run of contiguous blocks
1495 * @blen: the length of the block run
1496 *
1497 */
1498
1499 void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen)
1500 {
1501 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1502 struct gfs2_rgrpd *rgd;
1503
1504 rgd = rgblk_free(sdp, bstart, blen, GFS2_BLKST_FREE);
1505 if (!rgd)
1506 return;
1507
1508 rgd->rd_free += blen;
1509
1510 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
1511 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
1512
1513 gfs2_trans_add_rg(rgd);
1514
1515 gfs2_statfs_change(sdp, 0, +blen, 0);
1516 gfs2_quota_change(ip, -(s64)blen, ip->i_inode.i_uid, ip->i_inode.i_gid);
1517 gfs2_meta_wipe(ip, bstart, blen);
1518 }
1519
1520 void gfs2_unlink_di(struct inode *inode)
1521 {
1522 struct gfs2_inode *ip = GFS2_I(inode);
1523 struct gfs2_sbd *sdp = GFS2_SB(inode);
1524 struct gfs2_rgrpd *rgd;
1525 u64 blkno = ip->i_no_addr;
1526
1527 rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_UNLINKED);
1528 if (!rgd)
1529 return;
1530 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
1531 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
1532 gfs2_trans_add_rg(rgd);
1533 }
1534
1535 static void gfs2_free_uninit_di(struct gfs2_rgrpd *rgd, u64 blkno)
1536 {
1537 struct gfs2_sbd *sdp = rgd->rd_sbd;
1538 struct gfs2_rgrpd *tmp_rgd;
1539
1540 tmp_rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_FREE);
1541 if (!tmp_rgd)
1542 return;
1543 gfs2_assert_withdraw(sdp, rgd == tmp_rgd);
1544
1545 if (!rgd->rd_dinodes)
1546 gfs2_consist_rgrpd(rgd);
1547 rgd->rd_dinodes--;
1548 rgd->rd_free++;
1549
1550 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
1551 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
1552
1553 gfs2_statfs_change(sdp, 0, +1, -1);
1554 gfs2_trans_add_rg(rgd);
1555 }
1556
1557
1558 void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip)
1559 {
1560 gfs2_free_uninit_di(rgd, ip->i_no_addr);
1561 gfs2_quota_change(ip, -1, ip->i_inode.i_uid, ip->i_inode.i_gid);
1562 gfs2_meta_wipe(ip, ip->i_no_addr, 1);
1563 }
1564
1565 /**
1566 * gfs2_rlist_add - add a RG to a list of RGs
1567 * @sdp: the filesystem
1568 * @rlist: the list of resource groups
1569 * @block: the block
1570 *
1571 * Figure out what RG a block belongs to and add that RG to the list
1572 *
1573 * FIXME: Don't use NOFAIL
1574 *
1575 */
1576
1577 void gfs2_rlist_add(struct gfs2_sbd *sdp, struct gfs2_rgrp_list *rlist,
1578 u64 block)
1579 {
1580 struct gfs2_rgrpd *rgd;
1581 struct gfs2_rgrpd **tmp;
1582 unsigned int new_space;
1583 unsigned int x;
1584
1585 if (gfs2_assert_warn(sdp, !rlist->rl_ghs))
1586 return;
1587
1588 rgd = gfs2_blk2rgrpd(sdp, block);
1589 if (!rgd) {
1590 if (gfs2_consist(sdp))
1591 fs_err(sdp, "block = %llu\n", (unsigned long long)block);
1592 return;
1593 }
1594
1595 for (x = 0; x < rlist->rl_rgrps; x++)
1596 if (rlist->rl_rgd[x] == rgd)
1597 return;
1598
1599 if (rlist->rl_rgrps == rlist->rl_space) {
1600 new_space = rlist->rl_space + 10;
1601
1602 tmp = kcalloc(new_space, sizeof(struct gfs2_rgrpd *),
1603 GFP_NOFS | __GFP_NOFAIL);
1604
1605 if (rlist->rl_rgd) {
1606 memcpy(tmp, rlist->rl_rgd,
1607 rlist->rl_space * sizeof(struct gfs2_rgrpd *));
1608 kfree(rlist->rl_rgd);
1609 }
1610
1611 rlist->rl_space = new_space;
1612 rlist->rl_rgd = tmp;
1613 }
1614
1615 rlist->rl_rgd[rlist->rl_rgrps++] = rgd;
1616 }
1617
1618 /**
1619 * gfs2_rlist_alloc - all RGs have been added to the rlist, now allocate
1620 * and initialize an array of glock holders for them
1621 * @rlist: the list of resource groups
1622 * @state: the lock state to acquire the RG lock in
1623 * @flags: the modifier flags for the holder structures
1624 *
1625 * FIXME: Don't use NOFAIL
1626 *
1627 */
1628
1629 void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state)
1630 {
1631 unsigned int x;
1632
1633 rlist->rl_ghs = kcalloc(rlist->rl_rgrps, sizeof(struct gfs2_holder),
1634 GFP_NOFS | __GFP_NOFAIL);
1635 for (x = 0; x < rlist->rl_rgrps; x++)
1636 gfs2_holder_init(rlist->rl_rgd[x]->rd_gl,
1637 state, 0,
1638 &rlist->rl_ghs[x]);
1639 }
1640
1641 /**
1642 * gfs2_rlist_free - free a resource group list
1643 * @list: the list of resource groups
1644 *
1645 */
1646
1647 void gfs2_rlist_free(struct gfs2_rgrp_list *rlist)
1648 {
1649 unsigned int x;
1650
1651 kfree(rlist->rl_rgd);
1652
1653 if (rlist->rl_ghs) {
1654 for (x = 0; x < rlist->rl_rgrps; x++)
1655 gfs2_holder_uninit(&rlist->rl_ghs[x]);
1656 kfree(rlist->rl_ghs);
1657 }
1658 }
1659
This page took 0.066834 seconds and 5 git commands to generate.