GFS2: Update gfs2_get_block_type() to use rbm
[deliverable/linux.git] / fs / gfs2 / rgrp.c
CommitLineData
b3b94faa
DT
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
fe6c991c 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
b3b94faa
DT
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
e9fc2aa0 7 * of the GNU General Public License version 2.
b3b94faa
DT
8 */
9
b3b94faa
DT
10#include <linux/slab.h>
11#include <linux/spinlock.h>
12#include <linux/completion.h>
13#include <linux/buffer_head.h>
f42faf4f 14#include <linux/fs.h>
5c676f6d 15#include <linux/gfs2_ondisk.h>
1f466a47 16#include <linux/prefetch.h>
f15ab561 17#include <linux/blkdev.h>
7c9ca621 18#include <linux/rbtree.h>
b3b94faa
DT
19
20#include "gfs2.h"
5c676f6d 21#include "incore.h"
b3b94faa
DT
22#include "glock.h"
23#include "glops.h"
b3b94faa
DT
24#include "lops.h"
25#include "meta_io.h"
26#include "quota.h"
27#include "rgrp.h"
28#include "super.h"
29#include "trans.h"
5c676f6d 30#include "util.h"
172e045a 31#include "log.h"
c8cdf479 32#include "inode.h"
63997775 33#include "trace_gfs2.h"
b3b94faa 34
2c1e52aa 35#define BFITNOENT ((u32)~0)
6760bdcd 36#define NO_BLOCK ((u64)~0)
88c8ab1f 37
8e2e0047
BP
38#define RSRV_CONTENTION_FACTOR 4
39#define RGRP_RSRV_MAX_CONTENDERS 2
40
1f466a47
BP
41#if BITS_PER_LONG == 32
42#define LBITMASK (0x55555555UL)
43#define LBITSKIP55 (0x55555555UL)
44#define LBITSKIP00 (0x00000000UL)
45#else
46#define LBITMASK (0x5555555555555555UL)
47#define LBITSKIP55 (0x5555555555555555UL)
48#define LBITSKIP00 (0x0000000000000000UL)
49#endif
50
88c8ab1f
SW
51/*
52 * These routines are used by the resource group routines (rgrp.c)
53 * to keep track of block allocation. Each block is represented by two
feaa7bba
SW
54 * bits. So, each byte represents GFS2_NBBY (i.e. 4) blocks.
55 *
56 * 0 = Free
57 * 1 = Used (not metadata)
58 * 2 = Unlinked (still in use) inode
59 * 3 = Used (metadata)
88c8ab1f
SW
60 */
61
62static const char valid_change[16] = {
63 /* current */
feaa7bba 64 /* n */ 0, 1, 1, 1,
88c8ab1f 65 /* e */ 1, 0, 0, 0,
feaa7bba 66 /* w */ 0, 0, 0, 1,
88c8ab1f
SW
67 1, 0, 0, 0
68};
69
70/**
71 * gfs2_setbit - Set a bit in the bitmaps
29c578f5 72 * @rgd: the resource group descriptor
29c578f5
BP
73 * @buf2: the clone buffer that holds the bitmaps
74 * @bi: the bitmap structure
88c8ab1f
SW
75 * @block: the block to set
76 * @new_state: the new state of the block
77 *
78 */
79
06344b91
BP
80static inline void gfs2_setbit(struct gfs2_rgrpd *rgd, unsigned char *buf2,
81 struct gfs2_bitmap *bi, u32 block,
82 unsigned char new_state)
88c8ab1f 83{
b45e41d7 84 unsigned char *byte1, *byte2, *end, cur_state;
95c8e17f 85 unsigned int buflen = bi->bi_len;
b45e41d7 86 const unsigned int bit = (block % GFS2_NBBY) * GFS2_BIT_SIZE;
88c8ab1f 87
06344b91
BP
88 byte1 = bi->bi_bh->b_data + bi->bi_offset + (block / GFS2_NBBY);
89 end = bi->bi_bh->b_data + bi->bi_offset + buflen;
88c8ab1f 90
b45e41d7 91 BUG_ON(byte1 >= end);
88c8ab1f 92
b45e41d7 93 cur_state = (*byte1 >> bit) & GFS2_BIT_MASK;
88c8ab1f 94
b45e41d7 95 if (unlikely(!valid_change[new_state * 4 + cur_state])) {
95c8e17f
BP
96 printk(KERN_WARNING "GFS2: buf_blk = 0x%llx old_state=%d, "
97 "new_state=%d\n",
98 (unsigned long long)block, cur_state, new_state);
99 printk(KERN_WARNING "GFS2: rgrp=0x%llx bi_start=0x%lx\n",
100 (unsigned long long)rgd->rd_addr,
101 (unsigned long)bi->bi_start);
102 printk(KERN_WARNING "GFS2: bi_offset=0x%lx bi_len=0x%lx\n",
103 (unsigned long)bi->bi_offset,
104 (unsigned long)bi->bi_len);
105 dump_stack();
88c8ab1f 106 gfs2_consist_rgrpd(rgd);
b45e41d7
SW
107 return;
108 }
109 *byte1 ^= (cur_state ^ new_state) << bit;
110
111 if (buf2) {
29c578f5 112 byte2 = buf2 + bi->bi_offset + (block / GFS2_NBBY);
b45e41d7
SW
113 cur_state = (*byte2 >> bit) & GFS2_BIT_MASK;
114 *byte2 ^= (cur_state ^ new_state) << bit;
115 }
88c8ab1f
SW
116}
117
118/**
119 * gfs2_testbit - test a bit in the bitmaps
886b1416 120 * @rgd: the resource group descriptor
88c8ab1f
SW
121 * @buffer: the buffer that holds the bitmaps
122 * @buflen: the length (in bytes) of the buffer
123 * @block: the block to read
124 *
125 */
126
b45e41d7
SW
127static inline unsigned char gfs2_testbit(struct gfs2_rgrpd *rgd,
128 const unsigned char *buffer,
129 unsigned int buflen, u32 block)
88c8ab1f 130{
b45e41d7
SW
131 const unsigned char *byte, *end;
132 unsigned char cur_state;
88c8ab1f
SW
133 unsigned int bit;
134
135 byte = buffer + (block / GFS2_NBBY);
136 bit = (block % GFS2_NBBY) * GFS2_BIT_SIZE;
137 end = buffer + buflen;
138
139 gfs2_assert(rgd->rd_sbd, byte < end);
140
141 cur_state = (*byte >> bit) & GFS2_BIT_MASK;
142
143 return cur_state;
144}
145
223b2b88
SW
146/**
147 * gfs2_bit_search
148 * @ptr: Pointer to bitmap data
149 * @mask: Mask to use (normally 0x55555.... but adjusted for search start)
150 * @state: The state we are searching for
151 *
152 * We xor the bitmap data with a patter which is the bitwise opposite
153 * of what we are looking for, this gives rise to a pattern of ones
154 * wherever there is a match. Since we have two bits per entry, we
155 * take this pattern, shift it down by one place and then and it with
156 * the original. All the even bit positions (0,2,4, etc) then represent
157 * successful matches, so we mask with 0x55555..... to remove the unwanted
158 * odd bit positions.
159 *
160 * This allows searching of a whole u64 at once (32 blocks) with a
161 * single test (on 64 bit arches).
162 */
163
164static inline u64 gfs2_bit_search(const __le64 *ptr, u64 mask, u8 state)
165{
166 u64 tmp;
167 static const u64 search[] = {
075ac448
HE
168 [0] = 0xffffffffffffffffULL,
169 [1] = 0xaaaaaaaaaaaaaaaaULL,
170 [2] = 0x5555555555555555ULL,
171 [3] = 0x0000000000000000ULL,
223b2b88
SW
172 };
173 tmp = le64_to_cpu(*ptr) ^ search[state];
174 tmp &= (tmp >> 1);
175 tmp &= mask;
176 return tmp;
177}
178
8e2e0047
BP
179/**
180 * rs_cmp - multi-block reservation range compare
181 * @blk: absolute file system block number of the new reservation
182 * @len: number of blocks in the new reservation
183 * @rs: existing reservation to compare against
184 *
185 * returns: 1 if the block range is beyond the reach of the reservation
186 * -1 if the block range is before the start of the reservation
187 * 0 if the block range overlaps with the reservation
188 */
189static inline int rs_cmp(u64 blk, u32 len, struct gfs2_blkreserv *rs)
190{
4a993fb1 191 u64 startblk = gfs2_rbm_to_block(&rs->rs_rbm);
8e2e0047
BP
192
193 if (blk >= startblk + rs->rs_free)
194 return 1;
195 if (blk + len - 1 < startblk)
196 return -1;
197 return 0;
198}
199
88c8ab1f
SW
200/**
201 * gfs2_bitfit - Search an rgrp's bitmap buffer to find a bit-pair representing
202 * a block in a given allocation state.
886b1416 203 * @buf: the buffer that holds the bitmaps
223b2b88 204 * @len: the length (in bytes) of the buffer
88c8ab1f 205 * @goal: start search at this block's bit-pair (within @buffer)
223b2b88 206 * @state: GFS2_BLKST_XXX the state of the block we're looking for.
88c8ab1f
SW
207 *
208 * Scope of @goal and returned block number is only within this bitmap buffer,
209 * not entire rgrp or filesystem. @buffer will be offset from the actual
223b2b88
SW
210 * beginning of a bitmap block buffer, skipping any header structures, but
211 * headers are always a multiple of 64 bits long so that the buffer is
212 * always aligned to a 64 bit boundary.
213 *
214 * The size of the buffer is in bytes, but is it assumed that it is
fd589a8f 215 * always ok to read a complete multiple of 64 bits at the end
223b2b88 216 * of the block in case the end is no aligned to a natural boundary.
88c8ab1f
SW
217 *
218 * Return: the block number (bitmap buffer scope) that was found
219 */
220
02ab1721
HE
221static u32 gfs2_bitfit(const u8 *buf, const unsigned int len,
222 u32 goal, u8 state)
88c8ab1f 223{
223b2b88
SW
224 u32 spoint = (goal << 1) & ((8*sizeof(u64)) - 1);
225 const __le64 *ptr = ((__le64 *)buf) + (goal >> 5);
226 const __le64 *end = (__le64 *)(buf + ALIGN(len, sizeof(u64)));
227 u64 tmp;
075ac448 228 u64 mask = 0x5555555555555555ULL;
223b2b88
SW
229 u32 bit;
230
231 BUG_ON(state > 3);
232
233 /* Mask off bits we don't care about at the start of the search */
234 mask <<= spoint;
235 tmp = gfs2_bit_search(ptr, mask, state);
236 ptr++;
237 while(tmp == 0 && ptr < end) {
075ac448 238 tmp = gfs2_bit_search(ptr, 0x5555555555555555ULL, state);
223b2b88 239 ptr++;
1f466a47 240 }
223b2b88
SW
241 /* Mask off any bits which are more than len bytes from the start */
242 if (ptr == end && (len & (sizeof(u64) - 1)))
243 tmp &= (((u64)~0) >> (64 - 8*(len & (sizeof(u64) - 1))));
244 /* Didn't find anything, so return */
245 if (tmp == 0)
246 return BFITNOENT;
247 ptr--;
d8bd504a 248 bit = __ffs64(tmp);
223b2b88
SW
249 bit /= 2; /* two bits per entry in the bitmap */
250 return (((const unsigned char *)ptr - buf) * GFS2_NBBY) + bit;
88c8ab1f
SW
251}
252
253/**
254 * gfs2_bitcount - count the number of bits in a certain state
886b1416 255 * @rgd: the resource group descriptor
88c8ab1f
SW
256 * @buffer: the buffer that holds the bitmaps
257 * @buflen: the length (in bytes) of the buffer
258 * @state: the state of the block we're looking for
259 *
260 * Returns: The number of bits
261 */
262
110acf38
SW
263static u32 gfs2_bitcount(struct gfs2_rgrpd *rgd, const u8 *buffer,
264 unsigned int buflen, u8 state)
88c8ab1f 265{
110acf38
SW
266 const u8 *byte = buffer;
267 const u8 *end = buffer + buflen;
268 const u8 state1 = state << 2;
269 const u8 state2 = state << 4;
270 const u8 state3 = state << 6;
cd915493 271 u32 count = 0;
88c8ab1f
SW
272
273 for (; byte < end; byte++) {
274 if (((*byte) & 0x03) == state)
275 count++;
276 if (((*byte) & 0x0C) == state1)
277 count++;
278 if (((*byte) & 0x30) == state2)
279 count++;
280 if (((*byte) & 0xC0) == state3)
281 count++;
282 }
283
284 return count;
285}
286
b3b94faa
DT
287/**
288 * gfs2_rgrp_verify - Verify that a resource group is consistent
b3b94faa
DT
289 * @rgd: the rgrp
290 *
291 */
292
293void gfs2_rgrp_verify(struct gfs2_rgrpd *rgd)
294{
295 struct gfs2_sbd *sdp = rgd->rd_sbd;
296 struct gfs2_bitmap *bi = NULL;
bb8d8a6f 297 u32 length = rgd->rd_length;
cd915493 298 u32 count[4], tmp;
b3b94faa
DT
299 int buf, x;
300
cd915493 301 memset(count, 0, 4 * sizeof(u32));
b3b94faa
DT
302
303 /* Count # blocks in each of 4 possible allocation states */
304 for (buf = 0; buf < length; buf++) {
305 bi = rgd->rd_bits + buf;
306 for (x = 0; x < 4; x++)
307 count[x] += gfs2_bitcount(rgd,
308 bi->bi_bh->b_data +
309 bi->bi_offset,
310 bi->bi_len, x);
311 }
312
cfc8b549 313 if (count[0] != rgd->rd_free) {
b3b94faa
DT
314 if (gfs2_consist_rgrpd(rgd))
315 fs_err(sdp, "free data mismatch: %u != %u\n",
cfc8b549 316 count[0], rgd->rd_free);
b3b94faa
DT
317 return;
318 }
319
73f74948 320 tmp = rgd->rd_data - rgd->rd_free - rgd->rd_dinodes;
6b946170 321 if (count[1] != tmp) {
b3b94faa
DT
322 if (gfs2_consist_rgrpd(rgd))
323 fs_err(sdp, "used data mismatch: %u != %u\n",
324 count[1], tmp);
325 return;
326 }
327
6b946170 328 if (count[2] + count[3] != rgd->rd_dinodes) {
b3b94faa 329 if (gfs2_consist_rgrpd(rgd))
feaa7bba 330 fs_err(sdp, "used metadata mismatch: %u != %u\n",
6b946170 331 count[2] + count[3], rgd->rd_dinodes);
b3b94faa
DT
332 return;
333 }
b3b94faa
DT
334}
335
bb8d8a6f 336static inline int rgrp_contains_block(struct gfs2_rgrpd *rgd, u64 block)
b3b94faa 337{
bb8d8a6f
SW
338 u64 first = rgd->rd_data0;
339 u64 last = first + rgd->rd_data;
16910427 340 return first <= block && block < last;
b3b94faa
DT
341}
342
343/**
344 * gfs2_blk2rgrpd - Find resource group for a given data/meta block number
345 * @sdp: The GFS2 superblock
886b1416
BP
346 * @blk: The data block number
347 * @exact: True if this needs to be an exact match
b3b94faa
DT
348 *
349 * Returns: The resource group, or NULL if not found
350 */
351
66fc061b 352struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, u64 blk, bool exact)
b3b94faa 353{
66fc061b 354 struct rb_node *n, *next;
f75bbfb4 355 struct gfs2_rgrpd *cur;
b3b94faa
DT
356
357 spin_lock(&sdp->sd_rindex_spin);
66fc061b
SW
358 n = sdp->sd_rindex_tree.rb_node;
359 while (n) {
360 cur = rb_entry(n, struct gfs2_rgrpd, rd_node);
361 next = NULL;
7c9ca621 362 if (blk < cur->rd_addr)
66fc061b 363 next = n->rb_left;
f75bbfb4 364 else if (blk >= cur->rd_data0 + cur->rd_data)
66fc061b
SW
365 next = n->rb_right;
366 if (next == NULL) {
b3b94faa 367 spin_unlock(&sdp->sd_rindex_spin);
66fc061b
SW
368 if (exact) {
369 if (blk < cur->rd_addr)
370 return NULL;
371 if (blk >= cur->rd_data0 + cur->rd_data)
372 return NULL;
373 }
7c9ca621 374 return cur;
b3b94faa 375 }
66fc061b 376 n = next;
b3b94faa 377 }
b3b94faa
DT
378 spin_unlock(&sdp->sd_rindex_spin);
379
380 return NULL;
381}
382
383/**
384 * gfs2_rgrpd_get_first - get the first Resource Group in the filesystem
385 * @sdp: The GFS2 superblock
386 *
387 * Returns: The first rgrp in the filesystem
388 */
389
390struct gfs2_rgrpd *gfs2_rgrpd_get_first(struct gfs2_sbd *sdp)
391{
7c9ca621
BP
392 const struct rb_node *n;
393 struct gfs2_rgrpd *rgd;
394
8339ee54 395 spin_lock(&sdp->sd_rindex_spin);
7c9ca621
BP
396 n = rb_first(&sdp->sd_rindex_tree);
397 rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
8339ee54 398 spin_unlock(&sdp->sd_rindex_spin);
7c9ca621
BP
399
400 return rgd;
b3b94faa
DT
401}
402
403/**
404 * gfs2_rgrpd_get_next - get the next RG
886b1416 405 * @rgd: the resource group descriptor
b3b94faa
DT
406 *
407 * Returns: The next rgrp
408 */
409
410struct gfs2_rgrpd *gfs2_rgrpd_get_next(struct gfs2_rgrpd *rgd)
411{
7c9ca621
BP
412 struct gfs2_sbd *sdp = rgd->rd_sbd;
413 const struct rb_node *n;
414
415 spin_lock(&sdp->sd_rindex_spin);
416 n = rb_next(&rgd->rd_node);
417 if (n == NULL)
418 n = rb_first(&sdp->sd_rindex_tree);
419
420 if (unlikely(&rgd->rd_node == n)) {
421 spin_unlock(&sdp->sd_rindex_spin);
b3b94faa 422 return NULL;
7c9ca621
BP
423 }
424 rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
425 spin_unlock(&sdp->sd_rindex_spin);
426 return rgd;
b3b94faa
DT
427}
428
8339ee54
SW
429void gfs2_free_clones(struct gfs2_rgrpd *rgd)
430{
431 int x;
432
433 for (x = 0; x < rgd->rd_length; x++) {
434 struct gfs2_bitmap *bi = rgd->rd_bits + x;
435 kfree(bi->bi_clone);
436 bi->bi_clone = NULL;
437 }
438}
439
0a305e49
BP
440/**
441 * gfs2_rs_alloc - make sure we have a reservation assigned to the inode
442 * @ip: the inode for this reservation
443 */
444int gfs2_rs_alloc(struct gfs2_inode *ip)
445{
446 int error = 0;
8e2e0047
BP
447 struct gfs2_blkreserv *res;
448
449 if (ip->i_res)
450 return 0;
451
452 res = kmem_cache_zalloc(gfs2_rsrv_cachep, GFP_NOFS);
453 if (!res)
454 error = -ENOMEM;
0a305e49 455
4a993fb1
SW
456 rb_init_node(&res->rs_node);
457
0a305e49 458 down_write(&ip->i_rw_mutex);
8e2e0047
BP
459 if (ip->i_res)
460 kmem_cache_free(gfs2_rsrv_cachep, res);
461 else
462 ip->i_res = res;
0a305e49
BP
463 up_write(&ip->i_rw_mutex);
464 return error;
465}
466
8e2e0047
BP
467static void dump_rs(struct seq_file *seq, struct gfs2_blkreserv *rs)
468{
469 gfs2_print_dbg(seq, " r: %llu s:%llu b:%u f:%u\n",
4a993fb1
SW
470 rs->rs_rbm.rgd->rd_addr, gfs2_rbm_to_block(&rs->rs_rbm),
471 rs->rs_rbm.offset, rs->rs_free);
8e2e0047
BP
472}
473
0a305e49 474/**
8e2e0047
BP
475 * __rs_deltree - remove a multi-block reservation from the rgd tree
476 * @rs: The reservation to remove
477 *
478 */
4a993fb1 479static void __rs_deltree(struct gfs2_inode *ip, struct gfs2_blkreserv *rs)
8e2e0047
BP
480{
481 struct gfs2_rgrpd *rgd;
482
483 if (!gfs2_rs_active(rs))
484 return;
485
4a993fb1
SW
486 rgd = rs->rs_rbm.rgd;
487 trace_gfs2_rs(ip, rs, TRACE_RS_TREEDEL);
488 rb_erase(&rs->rs_node, &rgd->rd_rstree);
489 rb_init_node(&rs->rs_node);
8e2e0047
BP
490 BUG_ON(!rgd->rd_rs_cnt);
491 rgd->rd_rs_cnt--;
492
493 if (rs->rs_free) {
494 /* return reserved blocks to the rgrp and the ip */
4a993fb1
SW
495 BUG_ON(rs->rs_rbm.rgd->rd_reserved < rs->rs_free);
496 rs->rs_rbm.rgd->rd_reserved -= rs->rs_free;
8e2e0047 497 rs->rs_free = 0;
4a993fb1 498 clear_bit(GBF_FULL, &rs->rs_rbm.bi->bi_flags);
8e2e0047
BP
499 smp_mb__after_clear_bit();
500 }
8e2e0047
BP
501}
502
503/**
504 * gfs2_rs_deltree - remove a multi-block reservation from the rgd tree
505 * @rs: The reservation to remove
506 *
507 */
4a993fb1 508void gfs2_rs_deltree(struct gfs2_inode *ip, struct gfs2_blkreserv *rs)
8e2e0047
BP
509{
510 struct gfs2_rgrpd *rgd;
511
4a993fb1
SW
512 rgd = rs->rs_rbm.rgd;
513 if (rgd) {
514 spin_lock(&rgd->rd_rsspin);
515 __rs_deltree(ip, rs);
516 spin_unlock(&rgd->rd_rsspin);
517 }
8e2e0047
BP
518}
519
520/**
521 * gfs2_rs_delete - delete a multi-block reservation
0a305e49
BP
522 * @ip: The inode for this reservation
523 *
524 */
525void gfs2_rs_delete(struct gfs2_inode *ip)
526{
527 down_write(&ip->i_rw_mutex);
528 if (ip->i_res) {
4a993fb1 529 gfs2_rs_deltree(ip, ip->i_res);
8e2e0047
BP
530 trace_gfs2_rs(ip, ip->i_res, TRACE_RS_DELETE);
531 BUG_ON(ip->i_res->rs_free);
0a305e49
BP
532 kmem_cache_free(gfs2_rsrv_cachep, ip->i_res);
533 ip->i_res = NULL;
534 }
535 up_write(&ip->i_rw_mutex);
536}
537
8e2e0047
BP
538/**
539 * return_all_reservations - return all reserved blocks back to the rgrp.
540 * @rgd: the rgrp that needs its space back
541 *
542 * We previously reserved a bunch of blocks for allocation. Now we need to
543 * give them back. This leave the reservation structures in tact, but removes
544 * all of their corresponding "no-fly zones".
545 */
546static void return_all_reservations(struct gfs2_rgrpd *rgd)
547{
548 struct rb_node *n;
549 struct gfs2_blkreserv *rs;
550
551 spin_lock(&rgd->rd_rsspin);
552 while ((n = rb_first(&rgd->rd_rstree))) {
553 rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
4a993fb1 554 __rs_deltree(NULL, rs);
8e2e0047
BP
555 }
556 spin_unlock(&rgd->rd_rsspin);
557}
558
8339ee54 559void gfs2_clear_rgrpd(struct gfs2_sbd *sdp)
b3b94faa 560{
7c9ca621 561 struct rb_node *n;
b3b94faa
DT
562 struct gfs2_rgrpd *rgd;
563 struct gfs2_glock *gl;
564
7c9ca621
BP
565 while ((n = rb_first(&sdp->sd_rindex_tree))) {
566 rgd = rb_entry(n, struct gfs2_rgrpd, rd_node);
b3b94faa
DT
567 gl = rgd->rd_gl;
568
7c9ca621 569 rb_erase(n, &sdp->sd_rindex_tree);
b3b94faa
DT
570
571 if (gl) {
8339ee54 572 spin_lock(&gl->gl_spin);
5c676f6d 573 gl->gl_object = NULL;
8339ee54 574 spin_unlock(&gl->gl_spin);
29687a2a 575 gfs2_glock_add_to_lru(gl);
b3b94faa
DT
576 gfs2_glock_put(gl);
577 }
578
8339ee54 579 gfs2_free_clones(rgd);
b3b94faa 580 kfree(rgd->rd_bits);
8e2e0047 581 return_all_reservations(rgd);
6bdd9be6 582 kmem_cache_free(gfs2_rgrpd_cachep, rgd);
b3b94faa
DT
583 }
584}
585
bb8d8a6f
SW
586static void gfs2_rindex_print(const struct gfs2_rgrpd *rgd)
587{
588 printk(KERN_INFO " ri_addr = %llu\n", (unsigned long long)rgd->rd_addr);
589 printk(KERN_INFO " ri_length = %u\n", rgd->rd_length);
590 printk(KERN_INFO " ri_data0 = %llu\n", (unsigned long long)rgd->rd_data0);
591 printk(KERN_INFO " ri_data = %u\n", rgd->rd_data);
592 printk(KERN_INFO " ri_bitbytes = %u\n", rgd->rd_bitbytes);
593}
594
b3b94faa
DT
595/**
596 * gfs2_compute_bitstructs - Compute the bitmap sizes
597 * @rgd: The resource group descriptor
598 *
599 * Calculates bitmap descriptors, one for each block that contains bitmap data
600 *
601 * Returns: errno
602 */
603
604static int compute_bitstructs(struct gfs2_rgrpd *rgd)
605{
606 struct gfs2_sbd *sdp = rgd->rd_sbd;
607 struct gfs2_bitmap *bi;
bb8d8a6f 608 u32 length = rgd->rd_length; /* # blocks in hdr & bitmap */
cd915493 609 u32 bytes_left, bytes;
b3b94faa
DT
610 int x;
611
feaa7bba
SW
612 if (!length)
613 return -EINVAL;
614
dd894be8 615 rgd->rd_bits = kcalloc(length, sizeof(struct gfs2_bitmap), GFP_NOFS);
b3b94faa
DT
616 if (!rgd->rd_bits)
617 return -ENOMEM;
618
bb8d8a6f 619 bytes_left = rgd->rd_bitbytes;
b3b94faa
DT
620
621 for (x = 0; x < length; x++) {
622 bi = rgd->rd_bits + x;
623
60a0b8f9 624 bi->bi_flags = 0;
b3b94faa
DT
625 /* small rgrp; bitmap stored completely in header block */
626 if (length == 1) {
627 bytes = bytes_left;
628 bi->bi_offset = sizeof(struct gfs2_rgrp);
629 bi->bi_start = 0;
630 bi->bi_len = bytes;
631 /* header block */
632 } else if (x == 0) {
633 bytes = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_rgrp);
634 bi->bi_offset = sizeof(struct gfs2_rgrp);
635 bi->bi_start = 0;
636 bi->bi_len = bytes;
637 /* last block */
638 } else if (x + 1 == length) {
639 bytes = bytes_left;
640 bi->bi_offset = sizeof(struct gfs2_meta_header);
bb8d8a6f 641 bi->bi_start = rgd->rd_bitbytes - bytes_left;
b3b94faa
DT
642 bi->bi_len = bytes;
643 /* other blocks */
644 } else {
568f4c96
SW
645 bytes = sdp->sd_sb.sb_bsize -
646 sizeof(struct gfs2_meta_header);
b3b94faa 647 bi->bi_offset = sizeof(struct gfs2_meta_header);
bb8d8a6f 648 bi->bi_start = rgd->rd_bitbytes - bytes_left;
b3b94faa
DT
649 bi->bi_len = bytes;
650 }
651
652 bytes_left -= bytes;
653 }
654
655 if (bytes_left) {
656 gfs2_consist_rgrpd(rgd);
657 return -EIO;
658 }
659 bi = rgd->rd_bits + (length - 1);
bb8d8a6f 660 if ((bi->bi_start + bi->bi_len) * GFS2_NBBY != rgd->rd_data) {
b3b94faa 661 if (gfs2_consist_rgrpd(rgd)) {
bb8d8a6f 662 gfs2_rindex_print(rgd);
b3b94faa
DT
663 fs_err(sdp, "start=%u len=%u offset=%u\n",
664 bi->bi_start, bi->bi_len, bi->bi_offset);
665 }
666 return -EIO;
667 }
668
669 return 0;
670}
671
7ae8fa84
RP
672/**
673 * gfs2_ri_total - Total up the file system space, according to the rindex.
886b1416 674 * @sdp: the filesystem
7ae8fa84
RP
675 *
676 */
677u64 gfs2_ri_total(struct gfs2_sbd *sdp)
678{
679 u64 total_data = 0;
680 struct inode *inode = sdp->sd_rindex;
681 struct gfs2_inode *ip = GFS2_I(inode);
7ae8fa84 682 char buf[sizeof(struct gfs2_rindex)];
7ae8fa84
RP
683 int error, rgrps;
684
7ae8fa84
RP
685 for (rgrps = 0;; rgrps++) {
686 loff_t pos = rgrps * sizeof(struct gfs2_rindex);
687
bcd7278d 688 if (pos + sizeof(struct gfs2_rindex) > i_size_read(inode))
7ae8fa84 689 break;
4306629e 690 error = gfs2_internal_read(ip, buf, &pos,
7ae8fa84
RP
691 sizeof(struct gfs2_rindex));
692 if (error != sizeof(struct gfs2_rindex))
693 break;
bb8d8a6f 694 total_data += be32_to_cpu(((struct gfs2_rindex *)buf)->ri_data);
7ae8fa84 695 }
7ae8fa84
RP
696 return total_data;
697}
698
6aad1c3d 699static int rgd_insert(struct gfs2_rgrpd *rgd)
7c9ca621
BP
700{
701 struct gfs2_sbd *sdp = rgd->rd_sbd;
702 struct rb_node **newn = &sdp->sd_rindex_tree.rb_node, *parent = NULL;
703
704 /* Figure out where to put new node */
705 while (*newn) {
706 struct gfs2_rgrpd *cur = rb_entry(*newn, struct gfs2_rgrpd,
707 rd_node);
708
709 parent = *newn;
710 if (rgd->rd_addr < cur->rd_addr)
711 newn = &((*newn)->rb_left);
712 else if (rgd->rd_addr > cur->rd_addr)
713 newn = &((*newn)->rb_right);
714 else
6aad1c3d 715 return -EEXIST;
7c9ca621
BP
716 }
717
718 rb_link_node(&rgd->rd_node, parent, newn);
719 rb_insert_color(&rgd->rd_node, &sdp->sd_rindex_tree);
6aad1c3d
BP
720 sdp->sd_rgrps++;
721 return 0;
7c9ca621
BP
722}
723
b3b94faa 724/**
6c53267f 725 * read_rindex_entry - Pull in a new resource index entry from the disk
4306629e 726 * @ip: Pointer to the rindex inode
b3b94faa 727 *
8339ee54 728 * Returns: 0 on success, > 0 on EOF, error code otherwise
6c53267f
RP
729 */
730
4306629e 731static int read_rindex_entry(struct gfs2_inode *ip)
6c53267f
RP
732{
733 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
734 loff_t pos = sdp->sd_rgrps * sizeof(struct gfs2_rindex);
8339ee54 735 struct gfs2_rindex buf;
6c53267f
RP
736 int error;
737 struct gfs2_rgrpd *rgd;
738
8339ee54
SW
739 if (pos >= i_size_read(&ip->i_inode))
740 return 1;
741
4306629e 742 error = gfs2_internal_read(ip, (char *)&buf, &pos,
6c53267f 743 sizeof(struct gfs2_rindex));
8339ee54
SW
744
745 if (error != sizeof(struct gfs2_rindex))
746 return (error == 0) ? 1 : error;
6c53267f 747
6bdd9be6 748 rgd = kmem_cache_zalloc(gfs2_rgrpd_cachep, GFP_NOFS);
6c53267f
RP
749 error = -ENOMEM;
750 if (!rgd)
751 return error;
752
6c53267f 753 rgd->rd_sbd = sdp;
8339ee54
SW
754 rgd->rd_addr = be64_to_cpu(buf.ri_addr);
755 rgd->rd_length = be32_to_cpu(buf.ri_length);
756 rgd->rd_data0 = be64_to_cpu(buf.ri_data0);
757 rgd->rd_data = be32_to_cpu(buf.ri_data);
758 rgd->rd_bitbytes = be32_to_cpu(buf.ri_bitbytes);
8e2e0047 759 spin_lock_init(&rgd->rd_rsspin);
7c9ca621 760
6c53267f
RP
761 error = compute_bitstructs(rgd);
762 if (error)
8339ee54 763 goto fail;
6c53267f 764
bb8d8a6f 765 error = gfs2_glock_get(sdp, rgd->rd_addr,
6c53267f
RP
766 &gfs2_rgrp_glops, CREATE, &rgd->rd_gl);
767 if (error)
8339ee54 768 goto fail;
6c53267f
RP
769
770 rgd->rd_gl->gl_object = rgd;
90306c41 771 rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lvb;
cf45b752 772 rgd->rd_flags &= ~GFS2_RDF_UPTODATE;
7c9ca621
BP
773 if (rgd->rd_data > sdp->sd_max_rg_data)
774 sdp->sd_max_rg_data = rgd->rd_data;
8339ee54 775 spin_lock(&sdp->sd_rindex_spin);
6aad1c3d 776 error = rgd_insert(rgd);
8339ee54 777 spin_unlock(&sdp->sd_rindex_spin);
6aad1c3d
BP
778 if (!error)
779 return 0;
780
781 error = 0; /* someone else read in the rgrp; free it and ignore it */
c1ac539e 782 gfs2_glock_put(rgd->rd_gl);
8339ee54
SW
783
784fail:
785 kfree(rgd->rd_bits);
786 kmem_cache_free(gfs2_rgrpd_cachep, rgd);
6c53267f
RP
787 return error;
788}
789
790/**
791 * gfs2_ri_update - Pull in a new resource index from the disk
792 * @ip: pointer to the rindex inode
793 *
b3b94faa
DT
794 * Returns: 0 on successful update, error code otherwise
795 */
796
8339ee54 797static int gfs2_ri_update(struct gfs2_inode *ip)
b3b94faa 798{
feaa7bba 799 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
b3b94faa
DT
800 int error;
801
8339ee54 802 do {
4306629e 803 error = read_rindex_entry(ip);
8339ee54
SW
804 } while (error == 0);
805
806 if (error < 0)
807 return error;
b3b94faa 808
cf45b752 809 sdp->sd_rindex_uptodate = 1;
6c53267f
RP
810 return 0;
811}
b3b94faa 812
b3b94faa 813/**
8339ee54 814 * gfs2_rindex_update - Update the rindex if required
b3b94faa 815 * @sdp: The GFS2 superblock
b3b94faa
DT
816 *
817 * We grab a lock on the rindex inode to make sure that it doesn't
818 * change whilst we are performing an operation. We keep this lock
819 * for quite long periods of time compared to other locks. This
820 * doesn't matter, since it is shared and it is very, very rarely
821 * accessed in the exclusive mode (i.e. only when expanding the filesystem).
822 *
823 * This makes sure that we're using the latest copy of the resource index
824 * special file, which might have been updated if someone expanded the
825 * filesystem (via gfs2_grow utility), which adds new resource groups.
826 *
8339ee54 827 * Returns: 0 on succeess, error code otherwise
b3b94faa
DT
828 */
829
8339ee54 830int gfs2_rindex_update(struct gfs2_sbd *sdp)
b3b94faa 831{
feaa7bba 832 struct gfs2_inode *ip = GFS2_I(sdp->sd_rindex);
b3b94faa 833 struct gfs2_glock *gl = ip->i_gl;
8339ee54
SW
834 struct gfs2_holder ri_gh;
835 int error = 0;
a365fbf3 836 int unlock_required = 0;
b3b94faa
DT
837
838 /* Read new copy from disk if we don't have the latest */
cf45b752 839 if (!sdp->sd_rindex_uptodate) {
a365fbf3
SW
840 if (!gfs2_glock_is_locked_by_me(gl)) {
841 error = gfs2_glock_nq_init(gl, LM_ST_SHARED, 0, &ri_gh);
842 if (error)
6aad1c3d 843 return error;
a365fbf3
SW
844 unlock_required = 1;
845 }
8339ee54 846 if (!sdp->sd_rindex_uptodate)
b3b94faa 847 error = gfs2_ri_update(ip);
a365fbf3
SW
848 if (unlock_required)
849 gfs2_glock_dq_uninit(&ri_gh);
b3b94faa
DT
850 }
851
852 return error;
853}
854
42d52e38 855static void gfs2_rgrp_in(struct gfs2_rgrpd *rgd, const void *buf)
bb8d8a6f
SW
856{
857 const struct gfs2_rgrp *str = buf;
42d52e38 858 u32 rg_flags;
bb8d8a6f 859
42d52e38 860 rg_flags = be32_to_cpu(str->rg_flags);
09010978 861 rg_flags &= ~GFS2_RDF_MASK;
1ce97e56
SW
862 rgd->rd_flags &= GFS2_RDF_MASK;
863 rgd->rd_flags |= rg_flags;
cfc8b549 864 rgd->rd_free = be32_to_cpu(str->rg_free);
73f74948 865 rgd->rd_dinodes = be32_to_cpu(str->rg_dinodes);
d8b71f73 866 rgd->rd_igeneration = be64_to_cpu(str->rg_igeneration);
bb8d8a6f
SW
867}
868
42d52e38 869static void gfs2_rgrp_out(struct gfs2_rgrpd *rgd, void *buf)
bb8d8a6f
SW
870{
871 struct gfs2_rgrp *str = buf;
872
09010978 873 str->rg_flags = cpu_to_be32(rgd->rd_flags & ~GFS2_RDF_MASK);
cfc8b549 874 str->rg_free = cpu_to_be32(rgd->rd_free);
73f74948 875 str->rg_dinodes = cpu_to_be32(rgd->rd_dinodes);
bb8d8a6f 876 str->__pad = cpu_to_be32(0);
d8b71f73 877 str->rg_igeneration = cpu_to_be64(rgd->rd_igeneration);
bb8d8a6f
SW
878 memset(&str->rg_reserved, 0, sizeof(str->rg_reserved));
879}
880
90306c41
BM
881static int gfs2_rgrp_lvb_valid(struct gfs2_rgrpd *rgd)
882{
883 struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;
884 struct gfs2_rgrp *str = (struct gfs2_rgrp *)rgd->rd_bits[0].bi_bh->b_data;
885
886 if (rgl->rl_flags != str->rg_flags || rgl->rl_free != str->rg_free ||
887 rgl->rl_dinodes != str->rg_dinodes ||
888 rgl->rl_igeneration != str->rg_igeneration)
889 return 0;
890 return 1;
891}
892
893static void gfs2_rgrp_ondisk2lvb(struct gfs2_rgrp_lvb *rgl, const void *buf)
894{
895 const struct gfs2_rgrp *str = buf;
896
897 rgl->rl_magic = cpu_to_be32(GFS2_MAGIC);
898 rgl->rl_flags = str->rg_flags;
899 rgl->rl_free = str->rg_free;
900 rgl->rl_dinodes = str->rg_dinodes;
901 rgl->rl_igeneration = str->rg_igeneration;
902 rgl->__pad = 0UL;
903}
904
905static void update_rgrp_lvb_unlinked(struct gfs2_rgrpd *rgd, u32 change)
906{
907 struct gfs2_rgrp_lvb *rgl = rgd->rd_rgl;
908 u32 unlinked = be32_to_cpu(rgl->rl_unlinked) + change;
909 rgl->rl_unlinked = cpu_to_be32(unlinked);
910}
911
912static u32 count_unlinked(struct gfs2_rgrpd *rgd)
913{
914 struct gfs2_bitmap *bi;
915 const u32 length = rgd->rd_length;
916 const u8 *buffer = NULL;
917 u32 i, goal, count = 0;
918
919 for (i = 0, bi = rgd->rd_bits; i < length; i++, bi++) {
920 goal = 0;
921 buffer = bi->bi_bh->b_data + bi->bi_offset;
922 WARN_ON(!buffer_uptodate(bi->bi_bh));
923 while (goal < bi->bi_len * GFS2_NBBY) {
924 goal = gfs2_bitfit(buffer, bi->bi_len, goal,
925 GFS2_BLKST_UNLINKED);
926 if (goal == BFITNOENT)
927 break;
928 count++;
929 goal++;
930 }
931 }
932
933 return count;
934}
935
936
b3b94faa 937/**
90306c41
BM
938 * gfs2_rgrp_bh_get - Read in a RG's header and bitmaps
939 * @rgd: the struct gfs2_rgrpd describing the RG to read in
b3b94faa
DT
940 *
941 * Read in all of a Resource Group's header and bitmap blocks.
942 * Caller must eventually call gfs2_rgrp_relse() to free the bitmaps.
943 *
944 * Returns: errno
945 */
946
90306c41 947int gfs2_rgrp_bh_get(struct gfs2_rgrpd *rgd)
b3b94faa
DT
948{
949 struct gfs2_sbd *sdp = rgd->rd_sbd;
950 struct gfs2_glock *gl = rgd->rd_gl;
bb8d8a6f 951 unsigned int length = rgd->rd_length;
b3b94faa
DT
952 struct gfs2_bitmap *bi;
953 unsigned int x, y;
954 int error;
955
90306c41
BM
956 if (rgd->rd_bits[0].bi_bh != NULL)
957 return 0;
958
b3b94faa
DT
959 for (x = 0; x < length; x++) {
960 bi = rgd->rd_bits + x;
bb8d8a6f 961 error = gfs2_meta_read(gl, rgd->rd_addr + x, 0, &bi->bi_bh);
b3b94faa
DT
962 if (error)
963 goto fail;
964 }
965
966 for (y = length; y--;) {
967 bi = rgd->rd_bits + y;
7276b3b0 968 error = gfs2_meta_wait(sdp, bi->bi_bh);
b3b94faa
DT
969 if (error)
970 goto fail;
feaa7bba 971 if (gfs2_metatype_check(sdp, bi->bi_bh, y ? GFS2_METATYPE_RB :
b3b94faa
DT
972 GFS2_METATYPE_RG)) {
973 error = -EIO;
974 goto fail;
975 }
976 }
977
cf45b752 978 if (!(rgd->rd_flags & GFS2_RDF_UPTODATE)) {
60a0b8f9
SW
979 for (x = 0; x < length; x++)
980 clear_bit(GBF_FULL, &rgd->rd_bits[x].bi_flags);
42d52e38 981 gfs2_rgrp_in(rgd, (rgd->rd_bits[0].bi_bh)->b_data);
1ce97e56 982 rgd->rd_flags |= (GFS2_RDF_UPTODATE | GFS2_RDF_CHECK);
7c9ca621 983 rgd->rd_free_clone = rgd->rd_free;
b3b94faa 984 }
90306c41
BM
985 if (be32_to_cpu(GFS2_MAGIC) != rgd->rd_rgl->rl_magic) {
986 rgd->rd_rgl->rl_unlinked = cpu_to_be32(count_unlinked(rgd));
987 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl,
988 rgd->rd_bits[0].bi_bh->b_data);
989 }
990 else if (sdp->sd_args.ar_rgrplvb) {
991 if (!gfs2_rgrp_lvb_valid(rgd)){
992 gfs2_consist_rgrpd(rgd);
993 error = -EIO;
994 goto fail;
995 }
996 if (rgd->rd_rgl->rl_unlinked == 0)
997 rgd->rd_flags &= ~GFS2_RDF_CHECK;
998 }
b3b94faa
DT
999 return 0;
1000
feaa7bba 1001fail:
b3b94faa
DT
1002 while (x--) {
1003 bi = rgd->rd_bits + x;
1004 brelse(bi->bi_bh);
1005 bi->bi_bh = NULL;
1006 gfs2_assert_warn(sdp, !bi->bi_clone);
1007 }
b3b94faa
DT
1008
1009 return error;
1010}
1011
90306c41
BM
1012int update_rgrp_lvb(struct gfs2_rgrpd *rgd)
1013{
1014 u32 rl_flags;
1015
1016 if (rgd->rd_flags & GFS2_RDF_UPTODATE)
1017 return 0;
1018
1019 if (be32_to_cpu(GFS2_MAGIC) != rgd->rd_rgl->rl_magic)
1020 return gfs2_rgrp_bh_get(rgd);
1021
1022 rl_flags = be32_to_cpu(rgd->rd_rgl->rl_flags);
1023 rl_flags &= ~GFS2_RDF_MASK;
1024 rgd->rd_flags &= GFS2_RDF_MASK;
1025 rgd->rd_flags |= (rl_flags | GFS2_RDF_UPTODATE | GFS2_RDF_CHECK);
1026 if (rgd->rd_rgl->rl_unlinked == 0)
1027 rgd->rd_flags &= ~GFS2_RDF_CHECK;
1028 rgd->rd_free = be32_to_cpu(rgd->rd_rgl->rl_free);
1029 rgd->rd_free_clone = rgd->rd_free;
1030 rgd->rd_dinodes = be32_to_cpu(rgd->rd_rgl->rl_dinodes);
1031 rgd->rd_igeneration = be64_to_cpu(rgd->rd_rgl->rl_igeneration);
1032 return 0;
1033}
1034
1035int gfs2_rgrp_go_lock(struct gfs2_holder *gh)
1036{
1037 struct gfs2_rgrpd *rgd = gh->gh_gl->gl_object;
1038 struct gfs2_sbd *sdp = rgd->rd_sbd;
1039
1040 if (gh->gh_flags & GL_SKIP && sdp->sd_args.ar_rgrplvb)
1041 return 0;
1042 return gfs2_rgrp_bh_get((struct gfs2_rgrpd *)gh->gh_gl->gl_object);
1043}
1044
b3b94faa 1045/**
7c9ca621 1046 * gfs2_rgrp_go_unlock - Release RG bitmaps read in with gfs2_rgrp_bh_get()
886b1416 1047 * @gh: The glock holder for the resource group
b3b94faa
DT
1048 *
1049 */
1050
7c9ca621 1051void gfs2_rgrp_go_unlock(struct gfs2_holder *gh)
b3b94faa 1052{
7c9ca621 1053 struct gfs2_rgrpd *rgd = gh->gh_gl->gl_object;
bb8d8a6f 1054 int x, length = rgd->rd_length;
b3b94faa 1055
b3b94faa
DT
1056 for (x = 0; x < length; x++) {
1057 struct gfs2_bitmap *bi = rgd->rd_bits + x;
90306c41
BM
1058 if (bi->bi_bh) {
1059 brelse(bi->bi_bh);
1060 bi->bi_bh = NULL;
1061 }
b3b94faa
DT
1062 }
1063
b3b94faa
DT
1064}
1065
66fc061b 1066int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
7c9ca621 1067 struct buffer_head *bh,
66fc061b 1068 const struct gfs2_bitmap *bi, unsigned minlen, u64 *ptrimmed)
f15ab561
SW
1069{
1070 struct super_block *sb = sdp->sd_vfs;
1071 struct block_device *bdev = sb->s_bdev;
1072 const unsigned int sects_per_blk = sdp->sd_sb.sb_bsize /
e1defc4f 1073 bdev_logical_block_size(sb->s_bdev);
f15ab561 1074 u64 blk;
64d576ba 1075 sector_t start = 0;
f15ab561
SW
1076 sector_t nr_sects = 0;
1077 int rv;
1078 unsigned int x;
66fc061b
SW
1079 u32 trimmed = 0;
1080 u8 diff;
f15ab561
SW
1081
1082 for (x = 0; x < bi->bi_len; x++) {
66fc061b
SW
1083 const u8 *clone = bi->bi_clone ? bi->bi_clone : bi->bi_bh->b_data;
1084 clone += bi->bi_offset;
1085 clone += x;
1086 if (bh) {
1087 const u8 *orig = bh->b_data + bi->bi_offset + x;
1088 diff = ~(*orig | (*orig >> 1)) & (*clone | (*clone >> 1));
1089 } else {
1090 diff = ~(*clone | (*clone >> 1));
1091 }
f15ab561
SW
1092 diff &= 0x55;
1093 if (diff == 0)
1094 continue;
1095 blk = offset + ((bi->bi_start + x) * GFS2_NBBY);
1096 blk *= sects_per_blk; /* convert to sectors */
1097 while(diff) {
1098 if (diff & 1) {
1099 if (nr_sects == 0)
1100 goto start_new_extent;
1101 if ((start + nr_sects) != blk) {
66fc061b
SW
1102 if (nr_sects >= minlen) {
1103 rv = blkdev_issue_discard(bdev,
1104 start, nr_sects,
1105 GFP_NOFS, 0);
1106 if (rv)
1107 goto fail;
1108 trimmed += nr_sects;
1109 }
f15ab561
SW
1110 nr_sects = 0;
1111start_new_extent:
1112 start = blk;
1113 }
1114 nr_sects += sects_per_blk;
1115 }
1116 diff >>= 2;
1117 blk += sects_per_blk;
1118 }
1119 }
66fc061b 1120 if (nr_sects >= minlen) {
dd3932ed 1121 rv = blkdev_issue_discard(bdev, start, nr_sects, GFP_NOFS, 0);
f15ab561
SW
1122 if (rv)
1123 goto fail;
66fc061b 1124 trimmed += nr_sects;
f15ab561 1125 }
66fc061b
SW
1126 if (ptrimmed)
1127 *ptrimmed = trimmed;
1128 return 0;
1129
f15ab561 1130fail:
66fc061b
SW
1131 if (sdp->sd_args.ar_discard)
1132 fs_warn(sdp, "error %d on discard request, turning discards off for this filesystem", rv);
f15ab561 1133 sdp->sd_args.ar_discard = 0;
66fc061b
SW
1134 return -EIO;
1135}
1136
1137/**
1138 * gfs2_fitrim - Generate discard requests for unused bits of the filesystem
1139 * @filp: Any file on the filesystem
1140 * @argp: Pointer to the arguments (also used to pass result)
1141 *
1142 * Returns: 0 on success, otherwise error code
1143 */
1144
1145int gfs2_fitrim(struct file *filp, void __user *argp)
1146{
1147 struct inode *inode = filp->f_dentry->d_inode;
1148 struct gfs2_sbd *sdp = GFS2_SB(inode);
1149 struct request_queue *q = bdev_get_queue(sdp->sd_vfs->s_bdev);
1150 struct buffer_head *bh;
1151 struct gfs2_rgrpd *rgd;
1152 struct gfs2_rgrpd *rgd_end;
1153 struct gfs2_holder gh;
1154 struct fstrim_range r;
1155 int ret = 0;
1156 u64 amt;
1157 u64 trimmed = 0;
1158 unsigned int x;
1159
1160 if (!capable(CAP_SYS_ADMIN))
1161 return -EPERM;
1162
1163 if (!blk_queue_discard(q))
1164 return -EOPNOTSUPP;
1165
66fc061b
SW
1166 if (argp == NULL) {
1167 r.start = 0;
1168 r.len = ULLONG_MAX;
1169 r.minlen = 0;
1170 } else if (copy_from_user(&r, argp, sizeof(r)))
1171 return -EFAULT;
1172
5e2f7d61
BP
1173 ret = gfs2_rindex_update(sdp);
1174 if (ret)
1175 return ret;
1176
66fc061b
SW
1177 rgd = gfs2_blk2rgrpd(sdp, r.start, 0);
1178 rgd_end = gfs2_blk2rgrpd(sdp, r.start + r.len, 0);
1179
1180 while (1) {
1181
1182 ret = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &gh);
1183 if (ret)
1184 goto out;
1185
1186 if (!(rgd->rd_flags & GFS2_RGF_TRIMMED)) {
1187 /* Trim each bitmap in the rgrp */
1188 for (x = 0; x < rgd->rd_length; x++) {
1189 struct gfs2_bitmap *bi = rgd->rd_bits + x;
1190 ret = gfs2_rgrp_send_discards(sdp, rgd->rd_data0, NULL, bi, r.minlen, &amt);
1191 if (ret) {
1192 gfs2_glock_dq_uninit(&gh);
1193 goto out;
1194 }
1195 trimmed += amt;
1196 }
1197
1198 /* Mark rgrp as having been trimmed */
1199 ret = gfs2_trans_begin(sdp, RES_RG_HDR, 0);
1200 if (ret == 0) {
1201 bh = rgd->rd_bits[0].bi_bh;
1202 rgd->rd_flags |= GFS2_RGF_TRIMMED;
1203 gfs2_trans_add_bh(rgd->rd_gl, bh, 1);
1204 gfs2_rgrp_out(rgd, bh->b_data);
90306c41 1205 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, bh->b_data);
66fc061b
SW
1206 gfs2_trans_end(sdp);
1207 }
1208 }
1209 gfs2_glock_dq_uninit(&gh);
1210
1211 if (rgd == rgd_end)
1212 break;
1213
1214 rgd = gfs2_rgrpd_get_next(rgd);
1215 }
1216
1217out:
1218 r.len = trimmed << 9;
1219 if (argp && copy_to_user(argp, &r, sizeof(r)))
1220 return -EFAULT;
1221
1222 return ret;
f15ab561
SW
1223}
1224
8e2e0047
BP
1225/**
1226 * rs_insert - insert a new multi-block reservation into the rgrp's rb_tree
1227 * @bi: the bitmap with the blocks
1228 * @ip: the inode structure
1229 * @biblk: the 32-bit block number relative to the start of the bitmap
1230 * @amount: the number of blocks to reserve
1231 *
1232 * Returns: NULL - reservation was already taken, so not inserted
1233 * pointer to the inserted reservation
1234 */
1235static struct gfs2_blkreserv *rs_insert(struct gfs2_bitmap *bi,
1236 struct gfs2_inode *ip, u32 biblk,
1237 int amount)
1238{
1239 struct rb_node **newn, *parent = NULL;
1240 int rc;
1241 struct gfs2_blkreserv *rs = ip->i_res;
4a993fb1 1242 struct gfs2_rgrpd *rgd = rs->rs_rbm.rgd;
8e2e0047
BP
1243 u64 fsblock = gfs2_bi2rgd_blk(bi, biblk) + rgd->rd_data0;
1244
1245 spin_lock(&rgd->rd_rsspin);
1246 newn = &rgd->rd_rstree.rb_node;
1247 BUG_ON(!ip->i_res);
1248 BUG_ON(gfs2_rs_active(rs));
1249 /* Figure out where to put new node */
1250 /*BUG_ON(!gfs2_glock_is_locked_by_me(rgd->rd_gl));*/
1251 while (*newn) {
1252 struct gfs2_blkreserv *cur =
1253 rb_entry(*newn, struct gfs2_blkreserv, rs_node);
1254
1255 parent = *newn;
1256 rc = rs_cmp(fsblock, amount, cur);
1257 if (rc > 0)
1258 newn = &((*newn)->rb_right);
1259 else if (rc < 0)
1260 newn = &((*newn)->rb_left);
1261 else {
1262 spin_unlock(&rgd->rd_rsspin);
1263 return NULL; /* reservation already in use */
1264 }
1265 }
1266
1267 /* Do our reservation work */
1268 rs = ip->i_res;
1269 rs->rs_free = amount;
4a993fb1
SW
1270 rs->rs_rbm.offset = biblk;
1271 rs->rs_rbm.bi = bi;
8e2e0047
BP
1272 rb_link_node(&rs->rs_node, parent, newn);
1273 rb_insert_color(&rs->rs_node, &rgd->rd_rstree);
1274
8e2e0047
BP
1275 /* Do our rgrp accounting for the reservation */
1276 rgd->rd_reserved += amount; /* blocks reserved */
1277 rgd->rd_rs_cnt++; /* number of in-tree reservations */
1278 spin_unlock(&rgd->rd_rsspin);
1279 trace_gfs2_rs(ip, rs, TRACE_RS_INSERT);
1280 return rs;
1281}
1282
1283/**
1284 * unclaimed_blocks - return number of blocks that aren't spoken for
1285 */
1286static u32 unclaimed_blocks(struct gfs2_rgrpd *rgd)
1287{
1288 return rgd->rd_free_clone - rgd->rd_reserved;
1289}
1290
1291/**
1292 * rg_mblk_search - find a group of multiple free blocks
1293 * @rgd: the resource group descriptor
1294 * @rs: the block reservation
1295 * @ip: pointer to the inode for which we're reserving blocks
1296 *
1297 * This is very similar to rgblk_search, except we're looking for whole
1298 * 64-bit words that represent a chunk of 32 free blocks. I'm only focusing
1299 * on aligned dwords for speed's sake.
1300 *
1301 * Returns: 0 if successful or BFITNOENT if there isn't enough free space
1302 */
1303
71f890f7 1304static int rg_mblk_search(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip, unsigned requested)
8e2e0047
BP
1305{
1306 struct gfs2_bitmap *bi = rgd->rd_bits;
1307 const u32 length = rgd->rd_length;
1308 u32 blk;
1309 unsigned int buf, x, search_bytes;
1310 u8 *buffer = NULL;
1311 u8 *ptr, *end, *nonzero;
1312 u32 goal, rsv_bytes;
1313 struct gfs2_blkreserv *rs;
1314 u32 best_rs_bytes, unclaimed;
1315 int best_rs_blocks;
1316
1317 /* Find bitmap block that contains bits for goal block */
1318 if (rgrp_contains_block(rgd, ip->i_goal))
1319 goal = ip->i_goal - rgd->rd_data0;
1320 else
1321 goal = rgd->rd_last_alloc;
1322 for (buf = 0; buf < length; buf++) {
1323 bi = rgd->rd_bits + buf;
1324 /* Convert scope of "goal" from rgrp-wide to within
1325 found bit block */
1326 if (goal < (bi->bi_start + bi->bi_len) * GFS2_NBBY) {
1327 goal -= bi->bi_start * GFS2_NBBY;
1328 goto do_search;
1329 }
1330 }
1331 buf = 0;
1332 goal = 0;
1333
1334do_search:
1335 best_rs_blocks = max_t(int, atomic_read(&ip->i_res->rs_sizehint),
1336 (RGRP_RSRV_MINBLKS * rgd->rd_length));
1337 best_rs_bytes = (best_rs_blocks *
1338 (1 + (RSRV_CONTENTION_FACTOR * rgd->rd_rs_cnt))) /
1339 GFS2_NBBY; /* 1 + is for our not-yet-created reservation */
1340 best_rs_bytes = ALIGN(best_rs_bytes, sizeof(u64));
1341 unclaimed = unclaimed_blocks(rgd);
1342 if (best_rs_bytes * GFS2_NBBY > unclaimed)
1343 best_rs_bytes = unclaimed >> GFS2_BIT_SIZE;
1344
1345 for (x = 0; x <= length; x++) {
1346 bi = rgd->rd_bits + buf;
1347
1348 if (test_bit(GBF_FULL, &bi->bi_flags))
1349 goto skip;
1350
1351 WARN_ON(!buffer_uptodate(bi->bi_bh));
1352 if (bi->bi_clone)
1353 buffer = bi->bi_clone + bi->bi_offset;
1354 else
1355 buffer = bi->bi_bh->b_data + bi->bi_offset;
1356
1357 /* We have to keep the reservations aligned on u64 boundaries
1358 otherwise we could get situations where a byte can't be
1359 used because it's after a reservation, but a free bit still
1360 is within the reservation's area. */
1361 ptr = buffer + ALIGN(goal >> GFS2_BIT_SIZE, sizeof(u64));
1362 end = (buffer + bi->bi_len);
1363 while (ptr < end) {
1364 rsv_bytes = 0;
1365 if ((ptr + best_rs_bytes) <= end)
1366 search_bytes = best_rs_bytes;
1367 else
1368 search_bytes = end - ptr;
1369 BUG_ON(!search_bytes);
1370 nonzero = memchr_inv(ptr, 0, search_bytes);
1371 /* If the lot is all zeroes, reserve the whole size. If
1372 there's enough zeroes to satisfy the request, use
1373 what we can. If there's not enough, keep looking. */
1374 if (nonzero == NULL)
1375 rsv_bytes = search_bytes;
71f890f7 1376 else if ((nonzero - ptr) * GFS2_NBBY >= requested)
8e2e0047
BP
1377 rsv_bytes = (nonzero - ptr);
1378
1379 if (rsv_bytes) {
1380 blk = ((ptr - buffer) * GFS2_NBBY);
1381 BUG_ON(blk >= bi->bi_len * GFS2_NBBY);
1382 rs = rs_insert(bi, ip, blk,
1383 rsv_bytes * GFS2_NBBY);
1384 if (IS_ERR(rs))
1385 return PTR_ERR(rs);
1386 if (rs)
1387 return 0;
1388 }
1389 ptr += ALIGN(search_bytes, sizeof(u64));
1390 }
1391skip:
1392 /* Try next bitmap block (wrap back to rgrp header
1393 if at end) */
1394 buf++;
1395 buf %= length;
1396 goal = 0;
1397 }
1398
1399 return BFITNOENT;
1400}
1401
b3b94faa
DT
1402/**
1403 * try_rgrp_fit - See if a given reservation will fit in a given RG
1404 * @rgd: the RG data
54335b1f 1405 * @ip: the inode
b3b94faa
DT
1406 *
1407 * If there's room for the requested blocks to be allocated from the RG:
8e2e0047
BP
1408 * This will try to get a multi-block reservation first, and if that doesn't
1409 * fit, it will take what it can.
b3b94faa
DT
1410 *
1411 * Returns: 1 on success (it fits), 0 on failure (it doesn't fit)
1412 */
1413
71f890f7
SW
1414static int try_rgrp_fit(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip,
1415 unsigned requested)
b3b94faa 1416{
09010978 1417 if (rgd->rd_flags & (GFS2_RGF_NOALLOC | GFS2_RDF_ERROR))
a43a4906 1418 return 0;
8e2e0047
BP
1419 /* Look for a multi-block reservation. */
1420 if (unclaimed_blocks(rgd) >= RGRP_RSRV_MINBLKS &&
71f890f7 1421 rg_mblk_search(rgd, ip, requested) != BFITNOENT)
8e2e0047 1422 return 1;
71f890f7 1423 if (unclaimed_blocks(rgd) >= requested)
7c9ca621 1424 return 1;
b3b94faa 1425
8e2e0047 1426 return 0;
b3e47ca0
BP
1427}
1428
5b924ae2
SW
1429/**
1430 * gfs2_next_unreserved_block - Return next block that is not reserved
1431 * @rgd: The resource group
1432 * @block: The starting block
1433 * @ip: Ignore any reservations for this inode
1434 *
1435 * If the block does not appear in any reservation, then return the
1436 * block number unchanged. If it does appear in the reservation, then
1437 * keep looking through the tree of reservations in order to find the
1438 * first block number which is not reserved.
1439 */
1440
1441static u64 gfs2_next_unreserved_block(struct gfs2_rgrpd *rgd, u64 block,
1442 const struct gfs2_inode *ip)
1443{
1444 struct gfs2_blkreserv *rs;
1445 struct rb_node *n;
1446 int rc;
1447
1448 spin_lock(&rgd->rd_rsspin);
1449 n = rb_first(&rgd->rd_rstree);
1450 while (n) {
1451 rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
1452 rc = rs_cmp(block, 1, rs);
1453 if (rc < 0)
1454 n = n->rb_left;
1455 else if (rc > 0)
1456 n = n->rb_right;
1457 else
1458 break;
1459 }
1460
1461 if (n) {
1462 while ((rs_cmp(block, 1, rs) == 0) && (ip->i_res != rs)) {
1463 block = gfs2_rbm_to_block(&rs->rs_rbm) + rs->rs_free;
1464 n = rb_next(&rs->rs_node);
1465 if (n == NULL)
1466 break;
1467 rs = rb_entry(n, struct gfs2_blkreserv, rs_node);
1468 }
1469 }
1470
1471 spin_unlock(&rgd->rd_rsspin);
1472 return block;
1473}
1474
1475/**
1476 * gfs2_rbm_from_block - Set the rbm based upon rgd and block number
1477 * @rbm: The rbm with rgd already set correctly
1478 * @block: The block number (filesystem relative)
1479 *
1480 * This sets the bi and offset members of an rbm based on a
1481 * resource group and a filesystem relative block number. The
1482 * resource group must be set in the rbm on entry, the bi and
1483 * offset members will be set by this function.
1484 *
1485 * Returns: 0 on success, or an error code
1486 */
1487
1488static int gfs2_rbm_from_block(struct gfs2_rbm *rbm, u64 block)
1489{
1490 u64 rblock = block - rbm->rgd->rd_data0;
1491 u32 goal = (u32)rblock;
1492 int x;
1493
1494 if (WARN_ON_ONCE(rblock > UINT_MAX))
1495 return -EINVAL;
1496
1497 for (x = 0; x < rbm->rgd->rd_length; x++) {
1498 rbm->bi = rbm->rgd->rd_bits + x;
1499 if (goal < (rbm->bi->bi_start + rbm->bi->bi_len) * GFS2_NBBY) {
1500 rbm->offset = goal - (rbm->bi->bi_start * GFS2_NBBY);
1501 return 0;
1502 }
1503 }
1504
1505 return -E2BIG;
1506}
1507
1508/**
1509 * gfs2_reservation_check_and_update - Check for reservations during block alloc
1510 * @rbm: The current position in the resource group
1511 *
1512 * This checks the current position in the rgrp to see whether there is
1513 * a reservation covering this block. If not then this function is a
1514 * no-op. If there is, then the position is moved to the end of the
1515 * contiguous reservation(s) so that we are pointing at the first
1516 * non-reserved block.
1517 *
1518 * Returns: 0 if no reservation, 1 if @rbm has changed, otherwise an error
1519 */
1520
1521static int gfs2_reservation_check_and_update(struct gfs2_rbm *rbm,
1522 const struct gfs2_inode *ip)
1523{
1524 u64 block = gfs2_rbm_to_block(rbm);
1525 u64 nblock;
1526 int ret;
1527
1528 nblock = gfs2_next_unreserved_block(rbm->rgd, block, ip);
1529 if (nblock == block)
1530 return 0;
1531 ret = gfs2_rbm_from_block(rbm, nblock);
1532 if (ret < 0)
1533 return ret;
1534 return 1;
1535}
1536
1537/**
1538 * gfs2_rbm_find - Look for blocks of a particular state
1539 * @rbm: Value/result starting position and final position
1540 * @state: The state which we want to find
1541 * @ip: If set, check for reservations
1542 * @nowrap: Stop looking at the end of the rgrp, rather than wrapping
1543 * around until we've reached the starting point.
1544 *
1545 * Side effects:
1546 * - If looking for free blocks, we set GBF_FULL on each bitmap which
1547 * has no free blocks in it.
1548 *
1549 * Returns: 0 on success, -ENOSPC if there is no block of the requested state
1550 */
1551
1552static int gfs2_rbm_find(struct gfs2_rbm *rbm, u8 state,
1553 const struct gfs2_inode *ip, bool nowrap)
1554{
1555 struct buffer_head *bh;
1556 struct gfs2_bitmap *initial_bi;
1557 u32 initial_offset;
1558 u32 offset;
1559 u8 *buffer;
1560 int index;
1561 int n = 0;
1562 int iters = rbm->rgd->rd_length;
1563 int ret;
1564
1565 /* If we are not starting at the beginning of a bitmap, then we
1566 * need to add one to the bitmap count to ensure that we search
1567 * the starting bitmap twice.
1568 */
1569 if (rbm->offset != 0)
1570 iters++;
1571
1572 while(1) {
1573 if (test_bit(GBF_FULL, &rbm->bi->bi_flags) &&
1574 (state == GFS2_BLKST_FREE))
1575 goto next_bitmap;
1576
1577 bh = rbm->bi->bi_bh;
1578 buffer = bh->b_data + rbm->bi->bi_offset;
1579 WARN_ON(!buffer_uptodate(bh));
1580 if (state != GFS2_BLKST_UNLINKED && rbm->bi->bi_clone)
1581 buffer = rbm->bi->bi_clone + rbm->bi->bi_offset;
1582find_next:
1583 initial_offset = rbm->offset;
1584 offset = gfs2_bitfit(buffer, rbm->bi->bi_len, rbm->offset, state);
1585 if (offset == BFITNOENT)
1586 goto bitmap_full;
1587 rbm->offset = offset;
1588 if (ip == NULL)
1589 return 0;
1590
1591 initial_bi = rbm->bi;
1592 ret = gfs2_reservation_check_and_update(rbm, ip);
1593 if (ret == 0)
1594 return 0;
1595 if (ret > 0) {
1596 n += (rbm->bi - initial_bi);
1597 goto find_next;
1598 }
1599 return ret;
1600
1601bitmap_full: /* Mark bitmap as full and fall through */
1602 if ((state == GFS2_BLKST_FREE) && initial_offset == 0)
1603 set_bit(GBF_FULL, &rbm->bi->bi_flags);
1604
1605next_bitmap: /* Find next bitmap in the rgrp */
1606 rbm->offset = 0;
1607 index = rbm->bi - rbm->rgd->rd_bits;
1608 index++;
1609 if (index == rbm->rgd->rd_length)
1610 index = 0;
1611 rbm->bi = &rbm->rgd->rd_bits[index];
1612 if ((index == 0) && nowrap)
1613 break;
1614 n++;
1615 if (n >= iters)
1616 break;
1617 }
1618
1619 return -ENOSPC;
1620}
1621
c8cdf479
SW
1622/**
1623 * try_rgrp_unlink - Look for any unlinked, allocated, but unused inodes
1624 * @rgd: The rgrp
886b1416
BP
1625 * @last_unlinked: block address of the last dinode we unlinked
1626 * @skip: block address we should explicitly not unlink
c8cdf479 1627 *
1a0eae88
BP
1628 * Returns: 0 if no error
1629 * The inode, if one has been found, in inode.
c8cdf479
SW
1630 */
1631
044b9414 1632static void try_rgrp_unlink(struct gfs2_rgrpd *rgd, u64 *last_unlinked, u64 skip)
c8cdf479 1633{
5b924ae2 1634 u64 block;
5f3eae75 1635 struct gfs2_sbd *sdp = rgd->rd_sbd;
044b9414
SW
1636 struct gfs2_glock *gl;
1637 struct gfs2_inode *ip;
1638 int error;
1639 int found = 0;
5b924ae2 1640 struct gfs2_rbm rbm = { .rgd = rgd, .bi = rgd->rd_bits, .offset = 0 };
c8cdf479 1641
5b924ae2 1642 while (1) {
5f3eae75 1643 down_write(&sdp->sd_log_flush_lock);
5b924ae2 1644 error = gfs2_rbm_find(&rbm, GFS2_BLKST_UNLINKED, NULL, true);
5f3eae75 1645 up_write(&sdp->sd_log_flush_lock);
5b924ae2
SW
1646 if (error == -ENOSPC)
1647 break;
1648 if (WARN_ON_ONCE(error))
24c73873 1649 break;
b3e47ca0 1650
5b924ae2
SW
1651 block = gfs2_rbm_to_block(&rbm);
1652 if (gfs2_rbm_from_block(&rbm, block + 1))
1653 break;
1654 if (*last_unlinked != NO_BLOCK && block <= *last_unlinked)
c8cdf479 1655 continue;
5b924ae2 1656 if (block == skip)
1e19a195 1657 continue;
5b924ae2 1658 *last_unlinked = block;
044b9414 1659
5b924ae2 1660 error = gfs2_glock_get(sdp, block, &gfs2_inode_glops, CREATE, &gl);
044b9414
SW
1661 if (error)
1662 continue;
1663
1664 /* If the inode is already in cache, we can ignore it here
1665 * because the existing inode disposal code will deal with
1666 * it when all refs have gone away. Accessing gl_object like
1667 * this is not safe in general. Here it is ok because we do
1668 * not dereference the pointer, and we only need an approx
1669 * answer to whether it is NULL or not.
1670 */
1671 ip = gl->gl_object;
1672
1673 if (ip || queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
1674 gfs2_glock_put(gl);
1675 else
1676 found++;
1677
1678 /* Limit reclaim to sensible number of tasks */
44ad37d6 1679 if (found > NR_CPUS)
044b9414 1680 return;
c8cdf479
SW
1681 }
1682
1683 rgd->rd_flags &= ~GFS2_RDF_CHECK;
044b9414 1684 return;
c8cdf479
SW
1685}
1686
b3b94faa 1687/**
666d1d8a 1688 * gfs2_inplace_reserve - Reserve space in the filesystem
b3b94faa 1689 * @ip: the inode to reserve space for
666d1d8a 1690 * @requested: the number of blocks to be reserved
b3b94faa
DT
1691 *
1692 * Returns: errno
1693 */
1694
666d1d8a 1695int gfs2_inplace_reserve(struct gfs2_inode *ip, u32 requested)
b3b94faa 1696{
feaa7bba 1697 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
8e2e0047 1698 struct gfs2_rgrpd *begin = NULL;
564e12b1 1699 struct gfs2_blkreserv *rs = ip->i_res;
666d1d8a
BP
1700 int error = 0, rg_locked, flags = LM_FLAG_TRY;
1701 u64 last_unlinked = NO_BLOCK;
7c9ca621 1702 int loops = 0;
b3b94faa 1703
90306c41
BM
1704 if (sdp->sd_args.ar_rgrplvb)
1705 flags |= GL_SKIP;
666d1d8a
BP
1706 if (gfs2_assert_warn(sdp, requested)) {
1707 error = -EINVAL;
1708 goto out;
1709 }
8e2e0047 1710 if (gfs2_rs_active(rs)) {
4a993fb1 1711 begin = rs->rs_rbm.rgd;
8e2e0047
BP
1712 flags = 0; /* Yoda: Do or do not. There is no try */
1713 } else if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, ip->i_goal)) {
4a993fb1 1714 rs->rs_rbm.rgd = begin = ip->i_rgd;
8e2e0047 1715 } else {
4a993fb1 1716 rs->rs_rbm.rgd = begin = gfs2_blk2rgrpd(sdp, ip->i_goal, 1);
8e2e0047 1717 }
4a993fb1 1718 if (rs->rs_rbm.rgd == NULL)
7c9ca621
BP
1719 return -EBADSLT;
1720
1721 while (loops < 3) {
292c8c14
AD
1722 rg_locked = 0;
1723
4a993fb1 1724 if (gfs2_glock_is_locked_by_me(rs->rs_rbm.rgd->rd_gl)) {
292c8c14
AD
1725 rg_locked = 1;
1726 error = 0;
8e2e0047 1727 } else if (!loops && !gfs2_rs_active(rs) &&
4a993fb1 1728 rs->rs_rbm.rgd->rd_rs_cnt > RGRP_RSRV_MAX_CONTENDERS) {
8e2e0047
BP
1729 /* If the rgrp already is maxed out for contenders,
1730 we can eliminate it as a "first pass" without even
1731 requesting the rgrp glock. */
1732 error = GLR_TRYFAILED;
292c8c14 1733 } else {
4a993fb1 1734 error = gfs2_glock_nq_init(rs->rs_rbm.rgd->rd_gl,
8e2e0047
BP
1735 LM_ST_EXCLUSIVE, flags,
1736 &rs->rs_rgd_gh);
90306c41 1737 if (!error && sdp->sd_args.ar_rgrplvb) {
4a993fb1 1738 error = update_rgrp_lvb(rs->rs_rbm.rgd);
90306c41
BM
1739 if (error) {
1740 gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
1741 return error;
1742 }
1743 }
292c8c14 1744 }
b3b94faa
DT
1745 switch (error) {
1746 case 0:
8e2e0047 1747 if (gfs2_rs_active(rs)) {
4a993fb1 1748 if (unclaimed_blocks(rs->rs_rbm.rgd) +
71f890f7 1749 rs->rs_free >= requested) {
4a993fb1 1750 ip->i_rgd = rs->rs_rbm.rgd;
8e2e0047
BP
1751 return 0;
1752 }
1753 /* We have a multi-block reservation, but the
1754 rgrp doesn't have enough free blocks to
1755 satisfy the request. Free the reservation
1756 and look for a suitable rgrp. */
4a993fb1 1757 gfs2_rs_deltree(ip, rs);
8e2e0047 1758 }
4a993fb1 1759 if (try_rgrp_fit(rs->rs_rbm.rgd, ip, requested)) {
90306c41 1760 if (sdp->sd_args.ar_rgrplvb)
4a993fb1
SW
1761 gfs2_rgrp_bh_get(rs->rs_rbm.rgd);
1762 ip->i_rgd = rs->rs_rbm.rgd;
7c9ca621 1763 return 0;
54335b1f 1764 }
4a993fb1 1765 if (rs->rs_rbm.rgd->rd_flags & GFS2_RDF_CHECK) {
90306c41 1766 if (sdp->sd_args.ar_rgrplvb)
4a993fb1
SW
1767 gfs2_rgrp_bh_get(rs->rs_rbm.rgd);
1768 try_rgrp_unlink(rs->rs_rbm.rgd, &last_unlinked,
666d1d8a 1769 ip->i_no_addr);
90306c41 1770 }
292c8c14 1771 if (!rg_locked)
564e12b1 1772 gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
9cabcdbd 1773 /* fall through */
b3b94faa 1774 case GLR_TRYFAILED:
4a993fb1
SW
1775 rs->rs_rbm.rgd = gfs2_rgrpd_get_next(rs->rs_rbm.rgd);
1776 rs->rs_rbm.rgd = rs->rs_rbm.rgd ? : begin; /* if NULL, wrap */
1777 if (rs->rs_rbm.rgd != begin) /* If we didn't wrap */
666d1d8a
BP
1778 break;
1779
1780 flags &= ~LM_FLAG_TRY;
1781 loops++;
1782 /* Check that fs hasn't grown if writing to rindex */
1783 if (ip == GFS2_I(sdp->sd_rindex) &&
1784 !sdp->sd_rindex_uptodate) {
1785 error = gfs2_ri_update(ip);
1786 if (error)
1787 goto out;
1788 } else if (loops == 2)
1789 /* Flushing the log may release space */
1790 gfs2_log_flush(sdp, NULL);
b3b94faa 1791 break;
b3b94faa 1792 default:
666d1d8a 1793 goto out;
b3b94faa 1794 }
b3b94faa 1795 }
666d1d8a 1796 error = -ENOSPC;
b3b94faa 1797
564e12b1 1798out:
9ae32429 1799 return error;
b3b94faa
DT
1800}
1801
1802/**
1803 * gfs2_inplace_release - release an inplace reservation
1804 * @ip: the inode the reservation was taken out on
1805 *
1806 * Release a reservation made by gfs2_inplace_reserve().
1807 */
1808
1809void gfs2_inplace_release(struct gfs2_inode *ip)
1810{
564e12b1 1811 struct gfs2_blkreserv *rs = ip->i_res;
b3b94faa 1812
564e12b1
BP
1813 if (rs->rs_rgd_gh.gh_gl)
1814 gfs2_glock_dq_uninit(&rs->rs_rgd_gh);
b3b94faa
DT
1815}
1816
1817/**
1818 * gfs2_get_block_type - Check a block in a RG is of given type
1819 * @rgd: the resource group holding the block
1820 * @block: the block number
1821 *
1822 * Returns: The block type (GFS2_BLKST_*)
1823 */
1824
acf7e244 1825static unsigned char gfs2_get_block_type(struct gfs2_rgrpd *rgd, u64 block)
b3b94faa 1826{
3983903a
SW
1827 struct gfs2_rbm rbm = { .rgd = rgd, };
1828 int ret;
b3b94faa 1829
3983903a
SW
1830 ret = gfs2_rbm_from_block(&rbm, block);
1831 WARN_ON_ONCE(ret != 0);
b3b94faa 1832
3983903a
SW
1833 return gfs2_testbit(rgd, rbm.bi->bi_bh->b_data + rbm.bi->bi_offset,
1834 rbm.bi->bi_len, rbm.offset);
b3b94faa
DT
1835}
1836
60a0b8f9 1837
b3e47ca0
BP
1838/**
1839 * gfs2_alloc_extent - allocate an extent from a given bitmap
4a993fb1 1840 * @rbm: the resource group information
b3e47ca0
BP
1841 * @dinode: TRUE if the first block we allocate is for a dinode
1842 * @n: The extent length
1843 *
1844 * Add the found bitmap buffer to the transaction.
1845 * Set the found bits to @new_state to change block's allocation state.
1846 * Returns: starting block number of the extent (fs scope)
1847 */
4a993fb1
SW
1848static u64 gfs2_alloc_extent(const struct gfs2_rbm *rbm, bool dinode,
1849 unsigned int *n)
b3e47ca0 1850{
4a993fb1
SW
1851 struct gfs2_rgrpd *rgd = rbm->rgd;
1852 struct gfs2_bitmap *bi = rbm->bi;
1853 u32 blk = rbm->offset;
b3e47ca0 1854 const unsigned int elen = *n;
5b924ae2 1855 u32 goal;
b3e47ca0
BP
1856 const u8 *buffer = NULL;
1857
6a8099ed 1858 *n = 0;
b3e47ca0 1859 buffer = bi->bi_bh->b_data + bi->bi_offset;
60a0b8f9 1860 gfs2_trans_add_bh(rgd->rd_gl, bi->bi_bh, 1);
06344b91
BP
1861 gfs2_setbit(rgd, bi->bi_clone, bi, blk,
1862 dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
6a8099ed 1863 (*n)++;
60a0b8f9
SW
1864 goal = blk;
1865 while (*n < elen) {
1866 goal++;
1867 if (goal >= (bi->bi_len * GFS2_NBBY))
1868 break;
1869 if (gfs2_testbit(rgd, buffer, bi->bi_len, goal) !=
1870 GFS2_BLKST_FREE)
1871 break;
06344b91 1872 gfs2_setbit(rgd, bi->bi_clone, bi, goal, GFS2_BLKST_USED);
60a0b8f9 1873 (*n)++;
c8cdf479 1874 }
b3e47ca0 1875 blk = gfs2_bi2rgd_blk(bi, blk);
6a8099ed 1876 rgd->rd_last_alloc = blk + *n - 1;
b3e47ca0 1877 return rgd->rd_data0 + blk;
b3b94faa
DT
1878}
1879
1880/**
1881 * rgblk_free - Change alloc state of given block(s)
1882 * @sdp: the filesystem
1883 * @bstart: the start of a run of blocks to free
1884 * @blen: the length of the block run (all must lie within ONE RG!)
1885 * @new_state: GFS2_BLKST_XXX the after-allocation block state
1886 *
1887 * Returns: Resource group containing the block(s)
1888 */
1889
cd915493
SW
1890static struct gfs2_rgrpd *rgblk_free(struct gfs2_sbd *sdp, u64 bstart,
1891 u32 blen, unsigned char new_state)
b3b94faa
DT
1892{
1893 struct gfs2_rgrpd *rgd;
1894 struct gfs2_bitmap *bi = NULL;
cd915493 1895 u32 length, rgrp_blk, buf_blk;
b3b94faa
DT
1896 unsigned int buf;
1897
66fc061b 1898 rgd = gfs2_blk2rgrpd(sdp, bstart, 1);
b3b94faa
DT
1899 if (!rgd) {
1900 if (gfs2_consist(sdp))
382066da 1901 fs_err(sdp, "block = %llu\n", (unsigned long long)bstart);
b3b94faa
DT
1902 return NULL;
1903 }
1904
bb8d8a6f 1905 length = rgd->rd_length;
b3b94faa 1906
bb8d8a6f 1907 rgrp_blk = bstart - rgd->rd_data0;
b3b94faa
DT
1908
1909 while (blen--) {
1910 for (buf = 0; buf < length; buf++) {
1911 bi = rgd->rd_bits + buf;
1912 if (rgrp_blk < (bi->bi_start + bi->bi_len) * GFS2_NBBY)
1913 break;
1914 }
1915
1916 gfs2_assert(rgd->rd_sbd, buf < length);
1917
1918 buf_blk = rgrp_blk - bi->bi_start * GFS2_NBBY;
1919 rgrp_blk++;
1920
1921 if (!bi->bi_clone) {
1922 bi->bi_clone = kmalloc(bi->bi_bh->b_size,
dd894be8 1923 GFP_NOFS | __GFP_NOFAIL);
b3b94faa
DT
1924 memcpy(bi->bi_clone + bi->bi_offset,
1925 bi->bi_bh->b_data + bi->bi_offset,
1926 bi->bi_len);
1927 }
d4e9c4c3 1928 gfs2_trans_add_bh(rgd->rd_gl, bi->bi_bh, 1);
06344b91 1929 gfs2_setbit(rgd, NULL, bi, buf_blk, new_state);
b3b94faa
DT
1930 }
1931
1932 return rgd;
1933}
1934
1935/**
09010978
SW
1936 * gfs2_rgrp_dump - print out an rgrp
1937 * @seq: The iterator
1938 * @gl: The glock in question
1939 *
1940 */
1941
1942int gfs2_rgrp_dump(struct seq_file *seq, const struct gfs2_glock *gl)
1943{
8e2e0047
BP
1944 struct gfs2_rgrpd *rgd = gl->gl_object;
1945 struct gfs2_blkreserv *trs;
1946 const struct rb_node *n;
1947
09010978
SW
1948 if (rgd == NULL)
1949 return 0;
8e2e0047 1950 gfs2_print_dbg(seq, " R: n:%llu f:%02x b:%u/%u i:%u r:%u\n",
09010978 1951 (unsigned long long)rgd->rd_addr, rgd->rd_flags,
8e2e0047
BP
1952 rgd->rd_free, rgd->rd_free_clone, rgd->rd_dinodes,
1953 rgd->rd_reserved);
1954 spin_lock(&rgd->rd_rsspin);
1955 for (n = rb_first(&rgd->rd_rstree); n; n = rb_next(&trs->rs_node)) {
1956 trs = rb_entry(n, struct gfs2_blkreserv, rs_node);
1957 dump_rs(seq, trs);
1958 }
1959 spin_unlock(&rgd->rd_rsspin);
09010978
SW
1960 return 0;
1961}
1962
6050b9c7
SW
1963static void gfs2_rgrp_error(struct gfs2_rgrpd *rgd)
1964{
1965 struct gfs2_sbd *sdp = rgd->rd_sbd;
1966 fs_warn(sdp, "rgrp %llu has an error, marking it readonly until umount\n",
86d00636 1967 (unsigned long long)rgd->rd_addr);
6050b9c7
SW
1968 fs_warn(sdp, "umount on all nodes and run fsck.gfs2 to fix the error\n");
1969 gfs2_rgrp_dump(NULL, rgd->rd_gl);
1970 rgd->rd_flags |= GFS2_RDF_ERROR;
1971}
1972
8e2e0047 1973/**
5b924ae2
SW
1974 * gfs2_adjust_reservation - Adjust (or remove) a reservation after allocation
1975 * @ip: The inode we have just allocated blocks for
1976 * @rbm: The start of the allocated blocks
1977 * @len: The extent length
8e2e0047 1978 *
5b924ae2
SW
1979 * Adjusts a reservation after an allocation has taken place. If the
1980 * reservation does not match the allocation, or if it is now empty
1981 * then it is removed.
8e2e0047 1982 */
5b924ae2
SW
1983
1984static void gfs2_adjust_reservation(struct gfs2_inode *ip,
1985 const struct gfs2_rbm *rbm, unsigned len)
8e2e0047
BP
1986{
1987 struct gfs2_blkreserv *rs = ip->i_res;
5b924ae2
SW
1988 struct gfs2_rgrpd *rgd = rbm->rgd;
1989 unsigned rlen;
1990 u64 block;
1991 int ret;
8e2e0047 1992
5b924ae2
SW
1993 spin_lock(&rgd->rd_rsspin);
1994 if (gfs2_rs_active(rs)) {
1995 if (gfs2_rbm_eq(&rs->rs_rbm, rbm)) {
1996 block = gfs2_rbm_to_block(rbm);
1997 ret = gfs2_rbm_from_block(&rs->rs_rbm, block + len);
1998 rlen = min(rs->rs_free, len);
1999 rs->rs_free -= rlen;
2000 rgd->rd_reserved -= rlen;
2001 trace_gfs2_rs(ip, rs, TRACE_RS_CLAIM);
2002 if (rs->rs_free && !ret)
2003 goto out;
2004 }
2005 __rs_deltree(ip, rs);
8e2e0047 2006 }
5b924ae2
SW
2007out:
2008 spin_unlock(&rgd->rd_rsspin);
8e2e0047
BP
2009}
2010
09010978 2011/**
6e87ed0f 2012 * gfs2_alloc_blocks - Allocate one or more blocks of data and/or a dinode
1639431a 2013 * @ip: the inode to allocate the block for
09010978 2014 * @bn: Used to return the starting block number
8e2e0047 2015 * @nblocks: requested number of blocks/extent length (value/result)
6e87ed0f 2016 * @dinode: 1 if we're allocating a dinode block, else 0
3c5d785a 2017 * @generation: the generation number of the inode
b3b94faa 2018 *
09010978 2019 * Returns: 0 or error
b3b94faa
DT
2020 */
2021
6a8099ed 2022int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
6e87ed0f 2023 bool dinode, u64 *generation)
b3b94faa 2024{
feaa7bba 2025 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
d9ba7615 2026 struct buffer_head *dibh;
4a993fb1 2027 struct gfs2_rbm rbm = { .rgd = ip->i_rgd, };
6a8099ed 2028 unsigned int ndata;
5b924ae2 2029 u64 goal;
3c5d785a 2030 u64 block; /* block, within the file system scope */
d9ba7615 2031 int error;
b3b94faa 2032
5b924ae2
SW
2033 if (gfs2_rs_active(ip->i_res))
2034 goal = gfs2_rbm_to_block(&ip->i_res->rs_rbm);
2035 else if (!dinode && rgrp_contains_block(rbm.rgd, ip->i_goal))
2036 goal = ip->i_goal;
62e252ee 2037 else
5b924ae2 2038 goal = rbm.rgd->rd_last_alloc + rbm.rgd->rd_data0;
62e252ee 2039
5b924ae2
SW
2040 gfs2_rbm_from_block(&rbm, goal);
2041 error = gfs2_rbm_find(&rbm, GFS2_BLKST_FREE, ip, false);
62e252ee
SW
2042
2043 /* Since all blocks are reserved in advance, this shouldn't happen */
5b924ae2
SW
2044 if (error) {
2045 fs_warn(sdp, "error=%d, nblocks=%u, full=%d\n", error, *nblocks,
2046 test_bit(GBF_FULL, &rbm.rgd->rd_bits->bi_flags));
62e252ee 2047 goto rgrp_error;
8e2e0047 2048 }
62e252ee 2049
4a993fb1 2050 block = gfs2_alloc_extent(&rbm, dinode, nblocks);
5b924ae2
SW
2051 if (gfs2_rs_active(ip->i_res))
2052 gfs2_adjust_reservation(ip, &rbm, *nblocks);
6a8099ed
SW
2053 ndata = *nblocks;
2054 if (dinode)
2055 ndata--;
b3e47ca0 2056
3c5d785a 2057 if (!dinode) {
6a8099ed 2058 ip->i_goal = block + ndata - 1;
3c5d785a
BP
2059 error = gfs2_meta_inode_buffer(ip, &dibh);
2060 if (error == 0) {
2061 struct gfs2_dinode *di =
2062 (struct gfs2_dinode *)dibh->b_data;
2063 gfs2_trans_add_bh(ip->i_gl, dibh, 1);
2064 di->di_goal_meta = di->di_goal_data =
2065 cpu_to_be64(ip->i_goal);
2066 brelse(dibh);
2067 }
d9ba7615 2068 }
4a993fb1 2069 if (rbm.rgd->rd_free < *nblocks) {
8e2e0047 2070 printk(KERN_WARNING "nblocks=%u\n", *nblocks);
09010978 2071 goto rgrp_error;
8e2e0047 2072 }
09010978 2073
4a993fb1 2074 rbm.rgd->rd_free -= *nblocks;
3c5d785a 2075 if (dinode) {
4a993fb1
SW
2076 rbm.rgd->rd_dinodes++;
2077 *generation = rbm.rgd->rd_igeneration++;
3c5d785a 2078 if (*generation == 0)
4a993fb1 2079 *generation = rbm.rgd->rd_igeneration++;
3c5d785a 2080 }
b3b94faa 2081
4a993fb1
SW
2082 gfs2_trans_add_bh(rbm.rgd->rd_gl, rbm.rgd->rd_bits[0].bi_bh, 1);
2083 gfs2_rgrp_out(rbm.rgd, rbm.rgd->rd_bits[0].bi_bh->b_data);
2084 gfs2_rgrp_ondisk2lvb(rbm.rgd->rd_rgl, rbm.rgd->rd_bits[0].bi_bh->b_data);
b3b94faa 2085
6a8099ed 2086 gfs2_statfs_change(sdp, 0, -(s64)*nblocks, dinode ? 1 : 0);
3c5d785a
BP
2087 if (dinode)
2088 gfs2_trans_add_unrevoke(sdp, block, 1);
6a8099ed
SW
2089
2090 /*
2091 * This needs reviewing to see why we cannot do the quota change
2092 * at this point in the dinode case.
2093 */
2094 if (ndata)
2095 gfs2_quota_change(ip, ndata, ip->i_inode.i_uid,
3c5d785a 2096 ip->i_inode.i_gid);
b3b94faa 2097
4a993fb1
SW
2098 rbm.rgd->rd_free_clone -= *nblocks;
2099 trace_gfs2_block_alloc(ip, rbm.rgd, block, *nblocks,
6e87ed0f 2100 dinode ? GFS2_BLKST_DINODE : GFS2_BLKST_USED);
6050b9c7
SW
2101 *bn = block;
2102 return 0;
2103
2104rgrp_error:
4a993fb1 2105 gfs2_rgrp_error(rbm.rgd);
6050b9c7 2106 return -EIO;
b3b94faa
DT
2107}
2108
2109/**
46fcb2ed 2110 * __gfs2_free_blocks - free a contiguous run of block(s)
b3b94faa
DT
2111 * @ip: the inode these blocks are being freed from
2112 * @bstart: first block of a run of contiguous blocks
2113 * @blen: the length of the block run
46fcb2ed 2114 * @meta: 1 if the blocks represent metadata
b3b94faa
DT
2115 *
2116 */
2117
46fcb2ed 2118void __gfs2_free_blocks(struct gfs2_inode *ip, u64 bstart, u32 blen, int meta)
b3b94faa 2119{
feaa7bba 2120 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
b3b94faa
DT
2121 struct gfs2_rgrpd *rgd;
2122
2123 rgd = rgblk_free(sdp, bstart, blen, GFS2_BLKST_FREE);
2124 if (!rgd)
2125 return;
41db1ab9 2126 trace_gfs2_block_alloc(ip, rgd, bstart, blen, GFS2_BLKST_FREE);
cfc8b549 2127 rgd->rd_free += blen;
66fc061b 2128 rgd->rd_flags &= ~GFS2_RGF_TRIMMED;
d4e9c4c3 2129 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
42d52e38 2130 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
90306c41 2131 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
b3b94faa 2132
6d3117b4 2133 /* Directories keep their data in the metadata address space */
46fcb2ed 2134 if (meta || ip->i_depth)
6d3117b4 2135 gfs2_meta_wipe(ip, bstart, blen);
4c16c36a 2136}
b3b94faa 2137
4c16c36a
BP
2138/**
2139 * gfs2_free_meta - free a contiguous run of data block(s)
2140 * @ip: the inode these blocks are being freed from
2141 * @bstart: first block of a run of contiguous blocks
2142 * @blen: the length of the block run
2143 *
2144 */
2145
2146void gfs2_free_meta(struct gfs2_inode *ip, u64 bstart, u32 blen)
2147{
2148 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
2149
46fcb2ed 2150 __gfs2_free_blocks(ip, bstart, blen, 1);
b3b94faa 2151 gfs2_statfs_change(sdp, 0, +blen, 0);
2933f925 2152 gfs2_quota_change(ip, -(s64)blen, ip->i_inode.i_uid, ip->i_inode.i_gid);
b3b94faa
DT
2153}
2154
feaa7bba
SW
2155void gfs2_unlink_di(struct inode *inode)
2156{
2157 struct gfs2_inode *ip = GFS2_I(inode);
2158 struct gfs2_sbd *sdp = GFS2_SB(inode);
2159 struct gfs2_rgrpd *rgd;
dbb7cae2 2160 u64 blkno = ip->i_no_addr;
feaa7bba
SW
2161
2162 rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_UNLINKED);
2163 if (!rgd)
2164 return;
41db1ab9 2165 trace_gfs2_block_alloc(ip, rgd, blkno, 1, GFS2_BLKST_UNLINKED);
feaa7bba 2166 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
42d52e38 2167 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
90306c41
BM
2168 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
2169 update_rgrp_lvb_unlinked(rgd, 1);
feaa7bba
SW
2170}
2171
cd915493 2172static void gfs2_free_uninit_di(struct gfs2_rgrpd *rgd, u64 blkno)
b3b94faa
DT
2173{
2174 struct gfs2_sbd *sdp = rgd->rd_sbd;
2175 struct gfs2_rgrpd *tmp_rgd;
2176
2177 tmp_rgd = rgblk_free(sdp, blkno, 1, GFS2_BLKST_FREE);
2178 if (!tmp_rgd)
2179 return;
2180 gfs2_assert_withdraw(sdp, rgd == tmp_rgd);
2181
73f74948 2182 if (!rgd->rd_dinodes)
b3b94faa 2183 gfs2_consist_rgrpd(rgd);
73f74948 2184 rgd->rd_dinodes--;
cfc8b549 2185 rgd->rd_free++;
b3b94faa 2186
d4e9c4c3 2187 gfs2_trans_add_bh(rgd->rd_gl, rgd->rd_bits[0].bi_bh, 1);
42d52e38 2188 gfs2_rgrp_out(rgd, rgd->rd_bits[0].bi_bh->b_data);
90306c41
BM
2189 gfs2_rgrp_ondisk2lvb(rgd->rd_rgl, rgd->rd_bits[0].bi_bh->b_data);
2190 update_rgrp_lvb_unlinked(rgd, -1);
b3b94faa
DT
2191
2192 gfs2_statfs_change(sdp, 0, +1, -1);
b3b94faa
DT
2193}
2194
b3b94faa
DT
2195
2196void gfs2_free_di(struct gfs2_rgrpd *rgd, struct gfs2_inode *ip)
2197{
dbb7cae2 2198 gfs2_free_uninit_di(rgd, ip->i_no_addr);
41db1ab9 2199 trace_gfs2_block_alloc(ip, rgd, ip->i_no_addr, 1, GFS2_BLKST_FREE);
2933f925 2200 gfs2_quota_change(ip, -1, ip->i_inode.i_uid, ip->i_inode.i_gid);
dbb7cae2 2201 gfs2_meta_wipe(ip, ip->i_no_addr, 1);
b3b94faa
DT
2202}
2203
acf7e244
SW
2204/**
2205 * gfs2_check_blk_type - Check the type of a block
2206 * @sdp: The superblock
2207 * @no_addr: The block number to check
2208 * @type: The block type we are looking for
2209 *
2210 * Returns: 0 if the block type matches the expected type
2211 * -ESTALE if it doesn't match
2212 * or -ve errno if something went wrong while checking
2213 */
2214
2215int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr, unsigned int type)
2216{
2217 struct gfs2_rgrpd *rgd;
8339ee54 2218 struct gfs2_holder rgd_gh;
58884c4d 2219 int error = -EINVAL;
acf7e244 2220
66fc061b 2221 rgd = gfs2_blk2rgrpd(sdp, no_addr, 1);
acf7e244 2222 if (!rgd)
8339ee54 2223 goto fail;
acf7e244
SW
2224
2225 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_SHARED, 0, &rgd_gh);
2226 if (error)
8339ee54 2227 goto fail;
acf7e244
SW
2228
2229 if (gfs2_get_block_type(rgd, no_addr) != type)
2230 error = -ESTALE;
2231
2232 gfs2_glock_dq_uninit(&rgd_gh);
acf7e244
SW
2233fail:
2234 return error;
2235}
2236
b3b94faa
DT
2237/**
2238 * gfs2_rlist_add - add a RG to a list of RGs
70b0c365 2239 * @ip: the inode
b3b94faa
DT
2240 * @rlist: the list of resource groups
2241 * @block: the block
2242 *
2243 * Figure out what RG a block belongs to and add that RG to the list
2244 *
2245 * FIXME: Don't use NOFAIL
2246 *
2247 */
2248
70b0c365 2249void gfs2_rlist_add(struct gfs2_inode *ip, struct gfs2_rgrp_list *rlist,
cd915493 2250 u64 block)
b3b94faa 2251{
70b0c365 2252 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
b3b94faa
DT
2253 struct gfs2_rgrpd *rgd;
2254 struct gfs2_rgrpd **tmp;
2255 unsigned int new_space;
2256 unsigned int x;
2257
2258 if (gfs2_assert_warn(sdp, !rlist->rl_ghs))
2259 return;
2260
70b0c365
SW
2261 if (ip->i_rgd && rgrp_contains_block(ip->i_rgd, block))
2262 rgd = ip->i_rgd;
2263 else
66fc061b 2264 rgd = gfs2_blk2rgrpd(sdp, block, 1);
b3b94faa 2265 if (!rgd) {
70b0c365 2266 fs_err(sdp, "rlist_add: no rgrp for block %llu\n", (unsigned long long)block);
b3b94faa
DT
2267 return;
2268 }
70b0c365 2269 ip->i_rgd = rgd;
b3b94faa
DT
2270
2271 for (x = 0; x < rlist->rl_rgrps; x++)
2272 if (rlist->rl_rgd[x] == rgd)
2273 return;
2274
2275 if (rlist->rl_rgrps == rlist->rl_space) {
2276 new_space = rlist->rl_space + 10;
2277
2278 tmp = kcalloc(new_space, sizeof(struct gfs2_rgrpd *),
dd894be8 2279 GFP_NOFS | __GFP_NOFAIL);
b3b94faa
DT
2280
2281 if (rlist->rl_rgd) {
2282 memcpy(tmp, rlist->rl_rgd,
2283 rlist->rl_space * sizeof(struct gfs2_rgrpd *));
2284 kfree(rlist->rl_rgd);
2285 }
2286
2287 rlist->rl_space = new_space;
2288 rlist->rl_rgd = tmp;
2289 }
2290
2291 rlist->rl_rgd[rlist->rl_rgrps++] = rgd;
2292}
2293
2294/**
2295 * gfs2_rlist_alloc - all RGs have been added to the rlist, now allocate
2296 * and initialize an array of glock holders for them
2297 * @rlist: the list of resource groups
2298 * @state: the lock state to acquire the RG lock in
b3b94faa
DT
2299 *
2300 * FIXME: Don't use NOFAIL
2301 *
2302 */
2303
fe6c991c 2304void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state)
b3b94faa
DT
2305{
2306 unsigned int x;
2307
2308 rlist->rl_ghs = kcalloc(rlist->rl_rgrps, sizeof(struct gfs2_holder),
dd894be8 2309 GFP_NOFS | __GFP_NOFAIL);
b3b94faa
DT
2310 for (x = 0; x < rlist->rl_rgrps; x++)
2311 gfs2_holder_init(rlist->rl_rgd[x]->rd_gl,
fe6c991c 2312 state, 0,
b3b94faa
DT
2313 &rlist->rl_ghs[x]);
2314}
2315
2316/**
2317 * gfs2_rlist_free - free a resource group list
2318 * @list: the list of resource groups
2319 *
2320 */
2321
2322void gfs2_rlist_free(struct gfs2_rgrp_list *rlist)
2323{
2324 unsigned int x;
2325
2326 kfree(rlist->rl_rgd);
2327
2328 if (rlist->rl_ghs) {
2329 for (x = 0; x < rlist->rl_rgrps; x++)
2330 gfs2_holder_uninit(&rlist->rl_ghs[x]);
2331 kfree(rlist->rl_ghs);
8e2e0047 2332 rlist->rl_ghs = NULL;
b3b94faa
DT
2333 }
2334}
2335
This page took 0.67382 seconds and 5 git commands to generate.