cfq-iosched: get rid of the need for __GFP_NOFAIL in cfq_find_alloc_queue()
[deliverable/linux.git] / fs / gfs2 / quota.c
CommitLineData
b3b94faa
DT
1/*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
0d0868bd 3 * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
b3b94faa
DT
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
e9fc2aa0 7 * of the GNU General Public License version 2.
b3b94faa
DT
8 */
9
10/*
11 * Quota change tags are associated with each transaction that allocates or
12 * deallocates space. Those changes are accumulated locally to each node (in a
13 * per-node file) and then are periodically synced to the quota file. This
14 * avoids the bottleneck of constantly touching the quota file, but introduces
15 * fuzziness in the current usage value of IDs that are being used on different
16 * nodes in the cluster simultaneously. So, it is possible for a user on
17 * multiple nodes to overrun their quota, but that overrun is controlable.
18 * Since quota tags are part of transactions, there is no need to a quota check
19 * program to be run on node crashes or anything like that.
20 *
21 * There are couple of knobs that let the administrator manage the quota
22 * fuzziness. "quota_quantum" sets the maximum time a quota change can be
23 * sitting on one node before being synced to the quota file. (The default is
24 * 60 seconds.) Another knob, "quota_scale" controls how quickly the frequency
25 * of quota file syncs increases as the user moves closer to their limit. The
26 * more frequent the syncs, the more accurate the quota enforcement, but that
27 * means that there is more contention between the nodes for the quota file.
28 * The default value is one. This sets the maximum theoretical quota overrun
29 * (with infinite node with infinite bandwidth) to twice the user's limit. (In
30 * practice, the maximum overrun you see should be much less.) A "quota_scale"
31 * number greater than one makes quota syncs more frequent and reduces the
32 * maximum overrun. Numbers less than one (but greater than zero) make quota
33 * syncs less frequent.
34 *
35 * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of
36 * the quota file, so it is not being constantly read.
37 */
38
39#include <linux/sched.h>
40#include <linux/slab.h>
41#include <linux/spinlock.h>
42#include <linux/completion.h>
43#include <linux/buffer_head.h>
b3b94faa 44#include <linux/sort.h>
18ec7d5c 45#include <linux/fs.h>
2e565bb6 46#include <linux/bio.h>
5c676f6d 47#include <linux/gfs2_ondisk.h>
37b2c837
SW
48#include <linux/kthread.h>
49#include <linux/freezer.h>
b3b94faa
DT
50
51#include "gfs2.h"
5c676f6d 52#include "incore.h"
b3b94faa
DT
53#include "bmap.h"
54#include "glock.h"
55#include "glops.h"
b3b94faa
DT
56#include "log.h"
57#include "meta_io.h"
58#include "quota.h"
59#include "rgrp.h"
60#include "super.h"
61#include "trans.h"
18ec7d5c 62#include "inode.h"
5c676f6d 63#include "util.h"
b3b94faa
DT
64
65#define QUOTA_USER 1
66#define QUOTA_GROUP 0
67
bb8d8a6f
SW
68struct gfs2_quota_host {
69 u64 qu_limit;
70 u64 qu_warn;
71 s64 qu_value;
2d9a4bbf 72 u32 qu_ll_next;
bb8d8a6f
SW
73};
74
75struct gfs2_quota_change_host {
76 u64 qc_change;
77 u32 qc_flags; /* GFS2_QCF_... */
78 u32 qc_id;
79};
80
0a7ab79c
AD
81static LIST_HEAD(qd_lru_list);
82static atomic_t qd_lru_count = ATOMIC_INIT(0);
1328df72 83static DEFINE_SPINLOCK(qd_lru_lock);
0a7ab79c
AD
84
85int gfs2_shrink_qd_memory(int nr, gfp_t gfp_mask)
86{
87 struct gfs2_quota_data *qd;
88 struct gfs2_sbd *sdp;
89
90 if (nr == 0)
91 goto out;
92
93 if (!(gfp_mask & __GFP_FS))
94 return -1;
95
96 spin_lock(&qd_lru_lock);
97 while (nr && !list_empty(&qd_lru_list)) {
98 qd = list_entry(qd_lru_list.next,
99 struct gfs2_quota_data, qd_reclaim);
100 sdp = qd->qd_gl->gl_sbd;
101
102 /* Free from the filesystem-specific list */
103 list_del(&qd->qd_list);
104
0a7ab79c
AD
105 gfs2_assert_warn(sdp, !qd->qd_change);
106 gfs2_assert_warn(sdp, !qd->qd_slot_count);
107 gfs2_assert_warn(sdp, !qd->qd_bh_count);
108
f057f6cd 109 gfs2_glock_put(qd->qd_gl);
0a7ab79c
AD
110 atomic_dec(&sdp->sd_quota_count);
111
112 /* Delete it from the common reclaim list */
113 list_del_init(&qd->qd_reclaim);
114 atomic_dec(&qd_lru_count);
115 spin_unlock(&qd_lru_lock);
116 kmem_cache_free(gfs2_quotad_cachep, qd);
117 spin_lock(&qd_lru_lock);
118 nr--;
119 }
120 spin_unlock(&qd_lru_lock);
121
122out:
123 return (atomic_read(&qd_lru_count) * sysctl_vfs_cache_pressure) / 100;
124}
125
cd915493 126static u64 qd2offset(struct gfs2_quota_data *qd)
b3b94faa 127{
cd915493 128 u64 offset;
b3b94faa 129
cd915493 130 offset = 2 * (u64)qd->qd_id + !test_bit(QDF_USER, &qd->qd_flags);
b3b94faa
DT
131 offset *= sizeof(struct gfs2_quota);
132
133 return offset;
134}
135
cd915493 136static int qd_alloc(struct gfs2_sbd *sdp, int user, u32 id,
b3b94faa
DT
137 struct gfs2_quota_data **qdp)
138{
139 struct gfs2_quota_data *qd;
140 int error;
141
37b2c837 142 qd = kmem_cache_zalloc(gfs2_quotad_cachep, GFP_NOFS);
b3b94faa
DT
143 if (!qd)
144 return -ENOMEM;
145
0a7ab79c 146 atomic_set(&qd->qd_count, 1);
b3b94faa
DT
147 qd->qd_id = id;
148 if (user)
149 set_bit(QDF_USER, &qd->qd_flags);
150 qd->qd_slot = -1;
0a7ab79c 151 INIT_LIST_HEAD(&qd->qd_reclaim);
b3b94faa 152
cd915493 153 error = gfs2_glock_get(sdp, 2 * (u64)id + !user,
b3b94faa
DT
154 &gfs2_quota_glops, CREATE, &qd->qd_gl);
155 if (error)
156 goto fail;
157
b3b94faa
DT
158 *qdp = qd;
159
160 return 0;
161
a91ea69f 162fail:
37b2c837 163 kmem_cache_free(gfs2_quotad_cachep, qd);
b3b94faa
DT
164 return error;
165}
166
cd915493 167static int qd_get(struct gfs2_sbd *sdp, int user, u32 id, int create,
b3b94faa
DT
168 struct gfs2_quota_data **qdp)
169{
170 struct gfs2_quota_data *qd = NULL, *new_qd = NULL;
171 int error, found;
172
173 *qdp = NULL;
174
175 for (;;) {
176 found = 0;
0a7ab79c 177 spin_lock(&qd_lru_lock);
b3b94faa
DT
178 list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
179 if (qd->qd_id == id &&
180 !test_bit(QDF_USER, &qd->qd_flags) == !user) {
0a7ab79c
AD
181 if (!atomic_read(&qd->qd_count) &&
182 !list_empty(&qd->qd_reclaim)) {
183 /* Remove it from reclaim list */
184 list_del_init(&qd->qd_reclaim);
185 atomic_dec(&qd_lru_count);
186 }
187 atomic_inc(&qd->qd_count);
b3b94faa
DT
188 found = 1;
189 break;
190 }
191 }
192
193 if (!found)
194 qd = NULL;
195
196 if (!qd && new_qd) {
197 qd = new_qd;
198 list_add(&qd->qd_list, &sdp->sd_quota_list);
199 atomic_inc(&sdp->sd_quota_count);
200 new_qd = NULL;
201 }
202
0a7ab79c 203 spin_unlock(&qd_lru_lock);
b3b94faa
DT
204
205 if (qd || !create) {
206 if (new_qd) {
f057f6cd 207 gfs2_glock_put(new_qd->qd_gl);
37b2c837 208 kmem_cache_free(gfs2_quotad_cachep, new_qd);
b3b94faa
DT
209 }
210 *qdp = qd;
211 return 0;
212 }
213
214 error = qd_alloc(sdp, user, id, &new_qd);
215 if (error)
216 return error;
217 }
218}
219
220static void qd_hold(struct gfs2_quota_data *qd)
221{
222 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
0a7ab79c
AD
223 gfs2_assert(sdp, atomic_read(&qd->qd_count));
224 atomic_inc(&qd->qd_count);
b3b94faa
DT
225}
226
227static void qd_put(struct gfs2_quota_data *qd)
228{
0a7ab79c
AD
229 if (atomic_dec_and_lock(&qd->qd_count, &qd_lru_lock)) {
230 /* Add to the reclaim list */
231 list_add_tail(&qd->qd_reclaim, &qd_lru_list);
232 atomic_inc(&qd_lru_count);
233 spin_unlock(&qd_lru_lock);
234 }
b3b94faa
DT
235}
236
237static int slot_get(struct gfs2_quota_data *qd)
238{
239 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
240 unsigned int c, o = 0, b;
241 unsigned char byte = 0;
242
22077f57 243 spin_lock(&qd_lru_lock);
b3b94faa
DT
244
245 if (qd->qd_slot_count++) {
22077f57 246 spin_unlock(&qd_lru_lock);
b3b94faa
DT
247 return 0;
248 }
249
250 for (c = 0; c < sdp->sd_quota_chunks; c++)
251 for (o = 0; o < PAGE_SIZE; o++) {
252 byte = sdp->sd_quota_bitmap[c][o];
253 if (byte != 0xFF)
254 goto found;
255 }
256
257 goto fail;
258
a91ea69f 259found:
b3b94faa
DT
260 for (b = 0; b < 8; b++)
261 if (!(byte & (1 << b)))
262 break;
263 qd->qd_slot = c * (8 * PAGE_SIZE) + o * 8 + b;
264
265 if (qd->qd_slot >= sdp->sd_quota_slots)
266 goto fail;
267
268 sdp->sd_quota_bitmap[c][o] |= 1 << b;
269
22077f57 270 spin_unlock(&qd_lru_lock);
b3b94faa
DT
271
272 return 0;
273
a91ea69f 274fail:
b3b94faa 275 qd->qd_slot_count--;
22077f57 276 spin_unlock(&qd_lru_lock);
b3b94faa
DT
277 return -ENOSPC;
278}
279
280static void slot_hold(struct gfs2_quota_data *qd)
281{
282 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
283
22077f57 284 spin_lock(&qd_lru_lock);
b3b94faa
DT
285 gfs2_assert(sdp, qd->qd_slot_count);
286 qd->qd_slot_count++;
22077f57 287 spin_unlock(&qd_lru_lock);
b3b94faa
DT
288}
289
290static void slot_put(struct gfs2_quota_data *qd)
291{
292 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
293
22077f57 294 spin_lock(&qd_lru_lock);
b3b94faa
DT
295 gfs2_assert(sdp, qd->qd_slot_count);
296 if (!--qd->qd_slot_count) {
297 gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, qd->qd_slot, 0);
298 qd->qd_slot = -1;
299 }
22077f57 300 spin_unlock(&qd_lru_lock);
b3b94faa
DT
301}
302
303static int bh_get(struct gfs2_quota_data *qd)
304{
305 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
feaa7bba 306 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
b3b94faa 307 unsigned int block, offset;
b3b94faa
DT
308 struct buffer_head *bh;
309 int error;
23591256 310 struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
b3b94faa 311
f55ab26a 312 mutex_lock(&sdp->sd_quota_mutex);
b3b94faa
DT
313
314 if (qd->qd_bh_count++) {
f55ab26a 315 mutex_unlock(&sdp->sd_quota_mutex);
b3b94faa
DT
316 return 0;
317 }
318
319 block = qd->qd_slot / sdp->sd_qc_per_block;
0d0868bd 320 offset = qd->qd_slot % sdp->sd_qc_per_block;
b3b94faa 321
23591256 322 bh_map.b_size = 1 << ip->i_inode.i_blkbits;
e9e1ef2b 323 error = gfs2_block_map(&ip->i_inode, block, &bh_map, 0);
b3b94faa
DT
324 if (error)
325 goto fail;
7276b3b0 326 error = gfs2_meta_read(ip->i_gl, bh_map.b_blocknr, DIO_WAIT, &bh);
b3b94faa
DT
327 if (error)
328 goto fail;
329 error = -EIO;
330 if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC))
331 goto fail_brelse;
332
333 qd->qd_bh = bh;
334 qd->qd_bh_qc = (struct gfs2_quota_change *)
335 (bh->b_data + sizeof(struct gfs2_meta_header) +
336 offset * sizeof(struct gfs2_quota_change));
337
2e95b665 338 mutex_unlock(&sdp->sd_quota_mutex);
b3b94faa
DT
339
340 return 0;
341
a91ea69f 342fail_brelse:
b3b94faa 343 brelse(bh);
a91ea69f 344fail:
b3b94faa 345 qd->qd_bh_count--;
f55ab26a 346 mutex_unlock(&sdp->sd_quota_mutex);
b3b94faa
DT
347 return error;
348}
349
350static void bh_put(struct gfs2_quota_data *qd)
351{
352 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
353
f55ab26a 354 mutex_lock(&sdp->sd_quota_mutex);
b3b94faa
DT
355 gfs2_assert(sdp, qd->qd_bh_count);
356 if (!--qd->qd_bh_count) {
357 brelse(qd->qd_bh);
358 qd->qd_bh = NULL;
359 qd->qd_bh_qc = NULL;
360 }
f55ab26a 361 mutex_unlock(&sdp->sd_quota_mutex);
b3b94faa
DT
362}
363
364static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
365{
366 struct gfs2_quota_data *qd = NULL;
367 int error;
368 int found = 0;
369
370 *qdp = NULL;
371
372 if (sdp->sd_vfs->s_flags & MS_RDONLY)
373 return 0;
374
0a7ab79c 375 spin_lock(&qd_lru_lock);
b3b94faa
DT
376
377 list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
378 if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
379 !test_bit(QDF_CHANGE, &qd->qd_flags) ||
380 qd->qd_sync_gen >= sdp->sd_quota_sync_gen)
381 continue;
382
383 list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
384
385 set_bit(QDF_LOCKED, &qd->qd_flags);
0a7ab79c
AD
386 gfs2_assert_warn(sdp, atomic_read(&qd->qd_count));
387 atomic_inc(&qd->qd_count);
b3b94faa
DT
388 qd->qd_change_sync = qd->qd_change;
389 gfs2_assert_warn(sdp, qd->qd_slot_count);
390 qd->qd_slot_count++;
391 found = 1;
392
393 break;
394 }
395
396 if (!found)
397 qd = NULL;
398
0a7ab79c 399 spin_unlock(&qd_lru_lock);
b3b94faa
DT
400
401 if (qd) {
402 gfs2_assert_warn(sdp, qd->qd_change_sync);
403 error = bh_get(qd);
404 if (error) {
405 clear_bit(QDF_LOCKED, &qd->qd_flags);
406 slot_put(qd);
407 qd_put(qd);
408 return error;
409 }
410 }
411
412 *qdp = qd;
413
414 return 0;
415}
416
417static int qd_trylock(struct gfs2_quota_data *qd)
418{
419 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
420
421 if (sdp->sd_vfs->s_flags & MS_RDONLY)
422 return 0;
423
0a7ab79c 424 spin_lock(&qd_lru_lock);
b3b94faa
DT
425
426 if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
427 !test_bit(QDF_CHANGE, &qd->qd_flags)) {
0a7ab79c 428 spin_unlock(&qd_lru_lock);
b3b94faa
DT
429 return 0;
430 }
431
432 list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
433
434 set_bit(QDF_LOCKED, &qd->qd_flags);
0a7ab79c
AD
435 gfs2_assert_warn(sdp, atomic_read(&qd->qd_count));
436 atomic_inc(&qd->qd_count);
b3b94faa
DT
437 qd->qd_change_sync = qd->qd_change;
438 gfs2_assert_warn(sdp, qd->qd_slot_count);
439 qd->qd_slot_count++;
440
0a7ab79c 441 spin_unlock(&qd_lru_lock);
b3b94faa
DT
442
443 gfs2_assert_warn(sdp, qd->qd_change_sync);
444 if (bh_get(qd)) {
445 clear_bit(QDF_LOCKED, &qd->qd_flags);
446 slot_put(qd);
447 qd_put(qd);
448 return 0;
449 }
450
451 return 1;
452}
453
454static void qd_unlock(struct gfs2_quota_data *qd)
455{
568f4c96
SW
456 gfs2_assert_warn(qd->qd_gl->gl_sbd,
457 test_bit(QDF_LOCKED, &qd->qd_flags));
b3b94faa
DT
458 clear_bit(QDF_LOCKED, &qd->qd_flags);
459 bh_put(qd);
460 slot_put(qd);
461 qd_put(qd);
462}
463
cd915493 464static int qdsb_get(struct gfs2_sbd *sdp, int user, u32 id, int create,
b3b94faa
DT
465 struct gfs2_quota_data **qdp)
466{
467 int error;
468
469 error = qd_get(sdp, user, id, create, qdp);
470 if (error)
471 return error;
472
473 error = slot_get(*qdp);
474 if (error)
475 goto fail;
476
477 error = bh_get(*qdp);
478 if (error)
479 goto fail_slot;
480
481 return 0;
482
a91ea69f 483fail_slot:
b3b94faa 484 slot_put(*qdp);
a91ea69f 485fail:
b3b94faa
DT
486 qd_put(*qdp);
487 return error;
488}
489
490static void qdsb_put(struct gfs2_quota_data *qd)
491{
492 bh_put(qd);
493 slot_put(qd);
494 qd_put(qd);
495}
496
cd915493 497int gfs2_quota_hold(struct gfs2_inode *ip, u32 uid, u32 gid)
b3b94faa 498{
feaa7bba 499 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
6dbd8224 500 struct gfs2_alloc *al = ip->i_alloc;
b3b94faa
DT
501 struct gfs2_quota_data **qd = al->al_qd;
502 int error;
503
504 if (gfs2_assert_warn(sdp, !al->al_qd_num) ||
505 gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags)))
506 return -EIO;
507
508 if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
509 return 0;
510
2933f925 511 error = qdsb_get(sdp, QUOTA_USER, ip->i_inode.i_uid, CREATE, qd);
b3b94faa
DT
512 if (error)
513 goto out;
514 al->al_qd_num++;
515 qd++;
516
2933f925 517 error = qdsb_get(sdp, QUOTA_GROUP, ip->i_inode.i_gid, CREATE, qd);
b3b94faa
DT
518 if (error)
519 goto out;
520 al->al_qd_num++;
521 qd++;
522
2933f925 523 if (uid != NO_QUOTA_CHANGE && uid != ip->i_inode.i_uid) {
b3b94faa
DT
524 error = qdsb_get(sdp, QUOTA_USER, uid, CREATE, qd);
525 if (error)
526 goto out;
527 al->al_qd_num++;
528 qd++;
529 }
530
2933f925 531 if (gid != NO_QUOTA_CHANGE && gid != ip->i_inode.i_gid) {
b3b94faa
DT
532 error = qdsb_get(sdp, QUOTA_GROUP, gid, CREATE, qd);
533 if (error)
534 goto out;
535 al->al_qd_num++;
536 qd++;
537 }
538
a91ea69f 539out:
b3b94faa
DT
540 if (error)
541 gfs2_quota_unhold(ip);
b3b94faa
DT
542 return error;
543}
544
545void gfs2_quota_unhold(struct gfs2_inode *ip)
546{
feaa7bba 547 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
6dbd8224 548 struct gfs2_alloc *al = ip->i_alloc;
b3b94faa
DT
549 unsigned int x;
550
551 gfs2_assert_warn(sdp, !test_bit(GIF_QD_LOCKED, &ip->i_flags));
552
553 for (x = 0; x < al->al_qd_num; x++) {
554 qdsb_put(al->al_qd[x]);
555 al->al_qd[x] = NULL;
556 }
557 al->al_qd_num = 0;
558}
559
560static int sort_qd(const void *a, const void *b)
561{
48fac179
SW
562 const struct gfs2_quota_data *qd_a = *(const struct gfs2_quota_data **)a;
563 const struct gfs2_quota_data *qd_b = *(const struct gfs2_quota_data **)b;
b3b94faa
DT
564
565 if (!test_bit(QDF_USER, &qd_a->qd_flags) !=
566 !test_bit(QDF_USER, &qd_b->qd_flags)) {
567 if (test_bit(QDF_USER, &qd_a->qd_flags))
48fac179 568 return -1;
b3b94faa 569 else
48fac179 570 return 1;
b3b94faa 571 }
48fac179
SW
572 if (qd_a->qd_id < qd_b->qd_id)
573 return -1;
574 if (qd_a->qd_id > qd_b->qd_id)
575 return 1;
b3b94faa 576
48fac179 577 return 0;
b3b94faa
DT
578}
579
cd915493 580static void do_qc(struct gfs2_quota_data *qd, s64 change)
b3b94faa
DT
581{
582 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
feaa7bba 583 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
b3b94faa 584 struct gfs2_quota_change *qc = qd->qd_bh_qc;
cd915493 585 s64 x;
b3b94faa 586
f55ab26a 587 mutex_lock(&sdp->sd_quota_mutex);
d4e9c4c3 588 gfs2_trans_add_bh(ip->i_gl, qd->qd_bh, 1);
b3b94faa
DT
589
590 if (!test_bit(QDF_CHANGE, &qd->qd_flags)) {
591 qc->qc_change = 0;
592 qc->qc_flags = 0;
593 if (test_bit(QDF_USER, &qd->qd_flags))
594 qc->qc_flags = cpu_to_be32(GFS2_QCF_USER);
595 qc->qc_id = cpu_to_be32(qd->qd_id);
596 }
597
b44b84d7 598 x = be64_to_cpu(qc->qc_change) + change;
b3b94faa
DT
599 qc->qc_change = cpu_to_be64(x);
600
22077f57 601 spin_lock(&qd_lru_lock);
b3b94faa 602 qd->qd_change = x;
22077f57 603 spin_unlock(&qd_lru_lock);
b3b94faa
DT
604
605 if (!x) {
606 gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags));
607 clear_bit(QDF_CHANGE, &qd->qd_flags);
608 qc->qc_flags = 0;
609 qc->qc_id = 0;
610 slot_put(qd);
611 qd_put(qd);
612 } else if (!test_and_set_bit(QDF_CHANGE, &qd->qd_flags)) {
613 qd_hold(qd);
614 slot_hold(qd);
615 }
907b9bce 616
f55ab26a 617 mutex_unlock(&sdp->sd_quota_mutex);
b3b94faa
DT
618}
619
bb8d8a6f
SW
620static void gfs2_quota_in(struct gfs2_quota_host *qu, const void *buf)
621{
622 const struct gfs2_quota *str = buf;
623
624 qu->qu_limit = be64_to_cpu(str->qu_limit);
625 qu->qu_warn = be64_to_cpu(str->qu_warn);
626 qu->qu_value = be64_to_cpu(str->qu_value);
2d9a4bbf 627 qu->qu_ll_next = be32_to_cpu(str->qu_ll_next);
bb8d8a6f
SW
628}
629
630static void gfs2_quota_out(const struct gfs2_quota_host *qu, void *buf)
631{
632 struct gfs2_quota *str = buf;
633
634 str->qu_limit = cpu_to_be64(qu->qu_limit);
635 str->qu_warn = cpu_to_be64(qu->qu_warn);
636 str->qu_value = cpu_to_be64(qu->qu_value);
2d9a4bbf 637 str->qu_ll_next = cpu_to_be32(qu->qu_ll_next);
bb8d8a6f
SW
638 memset(&str->qu_reserved, 0, sizeof(str->qu_reserved));
639}
640
18ec7d5c
SW
641/**
642 * gfs2_adjust_quota
643 *
644 * This function was mostly borrowed from gfs2_block_truncate_page which was
645 * in turn mostly borrowed from ext3
646 */
647static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
cd915493 648 s64 change, struct gfs2_quota_data *qd)
18ec7d5c 649{
feaa7bba 650 struct inode *inode = &ip->i_inode;
18ec7d5c
SW
651 struct address_space *mapping = inode->i_mapping;
652 unsigned long index = loc >> PAGE_CACHE_SHIFT;
1990e917 653 unsigned offset = loc & (PAGE_CACHE_SIZE - 1);
18ec7d5c
SW
654 unsigned blocksize, iblock, pos;
655 struct buffer_head *bh;
656 struct page *page;
657 void *kaddr;
1990e917
AD
658 char *ptr;
659 struct gfs2_quota_host qp;
e9fc2aa0 660 s64 value;
18ec7d5c
SW
661 int err = -EIO;
662
20b95bf2 663 if (gfs2_is_stuffed(ip))
0fd53554 664 gfs2_unstuff_dinode(ip, NULL);
20b95bf2 665
18ec7d5c
SW
666 page = grab_cache_page(mapping, index);
667 if (!page)
668 return -ENOMEM;
669
670 blocksize = inode->i_sb->s_blocksize;
671 iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
672
673 if (!page_has_buffers(page))
674 create_empty_buffers(page, blocksize, 0);
675
676 bh = page_buffers(page);
677 pos = blocksize;
678 while (offset >= pos) {
679 bh = bh->b_this_page;
680 iblock++;
681 pos += blocksize;
682 }
683
684 if (!buffer_mapped(bh)) {
e9e1ef2b 685 gfs2_block_map(inode, iblock, bh, 1);
18ec7d5c
SW
686 if (!buffer_mapped(bh))
687 goto unlock;
688 }
689
690 if (PageUptodate(page))
691 set_buffer_uptodate(bh);
692
693 if (!buffer_uptodate(bh)) {
2e565bb6 694 ll_rw_block(READ_META, 1, &bh);
18ec7d5c
SW
695 wait_on_buffer(bh);
696 if (!buffer_uptodate(bh))
697 goto unlock;
698 }
699
700 gfs2_trans_add_bh(ip->i_gl, bh, 0);
701
702 kaddr = kmap_atomic(page, KM_USER0);
48fac179 703 ptr = kaddr + offset;
1990e917
AD
704 gfs2_quota_in(&qp, ptr);
705 qp.qu_value += change;
706 value = qp.qu_value;
707 gfs2_quota_out(&qp, ptr);
18ec7d5c
SW
708 flush_dcache_page(page);
709 kunmap_atomic(kaddr, KM_USER0);
710 err = 0;
711 qd->qd_qb.qb_magic = cpu_to_be32(GFS2_MAGIC);
18ec7d5c 712 qd->qd_qb.qb_value = cpu_to_be64(value);
2a87ab08
AD
713 ((struct gfs2_quota_lvb*)(qd->qd_gl->gl_lvb))->qb_magic = cpu_to_be32(GFS2_MAGIC);
714 ((struct gfs2_quota_lvb*)(qd->qd_gl->gl_lvb))->qb_value = cpu_to_be64(value);
18ec7d5c
SW
715unlock:
716 unlock_page(page);
717 page_cache_release(page);
718 return err;
719}
720
b3b94faa
DT
721static int do_sync(unsigned int num_qd, struct gfs2_quota_data **qda)
722{
723 struct gfs2_sbd *sdp = (*qda)->qd_gl->gl_sbd;
feaa7bba 724 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
b3b94faa
DT
725 unsigned int data_blocks, ind_blocks;
726 struct gfs2_holder *ghs, i_gh;
727 unsigned int qx, x;
728 struct gfs2_quota_data *qd;
f42faf4f 729 loff_t offset;
20b95bf2 730 unsigned int nalloc = 0, blocks;
b3b94faa
DT
731 struct gfs2_alloc *al = NULL;
732 int error;
733
734 gfs2_write_calc_reserv(ip, sizeof(struct gfs2_quota),
735 &data_blocks, &ind_blocks);
736
16c5f06f 737 ghs = kcalloc(num_qd, sizeof(struct gfs2_holder), GFP_NOFS);
b3b94faa
DT
738 if (!ghs)
739 return -ENOMEM;
740
741 sort(qda, num_qd, sizeof(struct gfs2_quota_data *), sort_qd, NULL);
742 for (qx = 0; qx < num_qd; qx++) {
743 error = gfs2_glock_nq_init(qda[qx]->qd_gl,
744 LM_ST_EXCLUSIVE,
745 GL_NOCACHE, &ghs[qx]);
746 if (error)
747 goto out;
748 }
749
750 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &i_gh);
751 if (error)
752 goto out;
753
754 for (x = 0; x < num_qd; x++) {
755 int alloc_required;
756
757 offset = qd2offset(qda[x]);
758 error = gfs2_write_alloc_required(ip, offset,
759 sizeof(struct gfs2_quota),
760 &alloc_required);
761 if (error)
762 goto out_gunlock;
763 if (alloc_required)
764 nalloc++;
765 }
766
20b95bf2
AD
767 al = gfs2_alloc_get(ip);
768 if (!al) {
769 error = -ENOMEM;
770 goto out_gunlock;
771 }
772 /*
773 * 1 blk for unstuffing inode if stuffed. We add this extra
774 * block to the reservation unconditionally. If the inode
775 * doesn't need unstuffing, the block will be released to the
776 * rgrp since it won't be allocated during the transaction
777 */
778 al->al_requested = 1;
779 /* +1 in the end for block requested above for unstuffing */
780 blocks = num_qd * data_blocks + RES_DINODE + num_qd + 1;
b3b94faa 781
20b95bf2
AD
782 if (nalloc)
783 al->al_requested += nalloc * (data_blocks + ind_blocks);
784 error = gfs2_inplace_reserve(ip);
785 if (error)
786 goto out_alloc;
b3b94faa 787
20b95bf2
AD
788 if (nalloc)
789 blocks += al->al_rgd->rd_length + nalloc * ind_blocks + RES_STATFS;
790
791 error = gfs2_trans_begin(sdp, blocks, 0);
792 if (error)
793 goto out_ipres;
b3b94faa
DT
794
795 for (x = 0; x < num_qd; x++) {
b3b94faa
DT
796 qd = qda[x];
797 offset = qd2offset(qd);
18ec7d5c 798 error = gfs2_adjust_quota(ip, offset, qd->qd_change_sync,
568f4c96 799 (struct gfs2_quota_data *)
2a87ab08 800 qd);
18ec7d5c 801 if (error)
b3b94faa 802 goto out_end_trans;
b3b94faa
DT
803
804 do_qc(qd, -qd->qd_change_sync);
b3b94faa
DT
805 }
806
807 error = 0;
808
a91ea69f 809out_end_trans:
b3b94faa 810 gfs2_trans_end(sdp);
a91ea69f 811out_ipres:
20b95bf2 812 gfs2_inplace_release(ip);
a91ea69f 813out_alloc:
20b95bf2 814 gfs2_alloc_put(ip);
a91ea69f 815out_gunlock:
b3b94faa 816 gfs2_glock_dq_uninit(&i_gh);
a91ea69f 817out:
b3b94faa
DT
818 while (qx--)
819 gfs2_glock_dq_uninit(&ghs[qx]);
820 kfree(ghs);
b09e593d 821 gfs2_log_flush(ip->i_gl->gl_sbd, ip->i_gl);
b3b94faa
DT
822 return error;
823}
824
825static int do_glock(struct gfs2_quota_data *qd, int force_refresh,
826 struct gfs2_holder *q_gh)
827{
828 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
feaa7bba 829 struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
b3b94faa 830 struct gfs2_holder i_gh;
b5bc9e8b 831 struct gfs2_quota_host q;
b3b94faa
DT
832 char buf[sizeof(struct gfs2_quota)];
833 int error;
e9fc2aa0 834 struct gfs2_quota_lvb *qlvb;
b3b94faa 835
a91ea69f 836restart:
b3b94faa
DT
837 error = gfs2_glock_nq_init(qd->qd_gl, LM_ST_SHARED, 0, q_gh);
838 if (error)
839 return error;
840
e9fc2aa0 841 qd->qd_qb = *(struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
b3b94faa 842
e9fc2aa0 843 if (force_refresh || qd->qd_qb.qb_magic != cpu_to_be32(GFS2_MAGIC)) {
f42faf4f 844 loff_t pos;
b3b94faa
DT
845 gfs2_glock_dq_uninit(q_gh);
846 error = gfs2_glock_nq_init(qd->qd_gl,
0a7ab79c
AD
847 LM_ST_EXCLUSIVE, GL_NOCACHE,
848 q_gh);
b3b94faa
DT
849 if (error)
850 return error;
851
e9fc2aa0 852 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &i_gh);
b3b94faa
DT
853 if (error)
854 goto fail;
855
856 memset(buf, 0, sizeof(struct gfs2_quota));
f42faf4f 857 pos = qd2offset(qd);
51ff87bd
SW
858 error = gfs2_internal_read(ip, NULL, buf, &pos,
859 sizeof(struct gfs2_quota));
b3b94faa
DT
860 if (error < 0)
861 goto fail_gunlock;
862
863 gfs2_glock_dq_uninit(&i_gh);
864
865 gfs2_quota_in(&q, buf);
e9fc2aa0
SW
866 qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
867 qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
868 qlvb->__pad = 0;
869 qlvb->qb_limit = cpu_to_be64(q.qu_limit);
870 qlvb->qb_warn = cpu_to_be64(q.qu_warn);
871 qlvb->qb_value = cpu_to_be64(q.qu_value);
872 qd->qd_qb = *qlvb;
b3b94faa
DT
873
874 if (gfs2_glock_is_blocking(qd->qd_gl)) {
875 gfs2_glock_dq_uninit(q_gh);
876 force_refresh = 0;
877 goto restart;
878 }
879 }
880
881 return 0;
882
a91ea69f 883fail_gunlock:
b3b94faa 884 gfs2_glock_dq_uninit(&i_gh);
a91ea69f 885fail:
b3b94faa 886 gfs2_glock_dq_uninit(q_gh);
b3b94faa
DT
887 return error;
888}
889
cd915493 890int gfs2_quota_lock(struct gfs2_inode *ip, u32 uid, u32 gid)
b3b94faa 891{
feaa7bba 892 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
6dbd8224 893 struct gfs2_alloc *al = ip->i_alloc;
b3b94faa
DT
894 unsigned int x;
895 int error = 0;
896
897 gfs2_quota_hold(ip, uid, gid);
898
899 if (capable(CAP_SYS_RESOURCE) ||
900 sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
901 return 0;
902
903 sort(al->al_qd, al->al_qd_num, sizeof(struct gfs2_quota_data *),
904 sort_qd, NULL);
905
906 for (x = 0; x < al->al_qd_num; x++) {
907 error = do_glock(al->al_qd[x], NO_FORCE, &al->al_qd_ghs[x]);
908 if (error)
909 break;
910 }
911
912 if (!error)
913 set_bit(GIF_QD_LOCKED, &ip->i_flags);
914 else {
915 while (x--)
916 gfs2_glock_dq_uninit(&al->al_qd_ghs[x]);
917 gfs2_quota_unhold(ip);
918 }
919
920 return error;
921}
922
923static int need_sync(struct gfs2_quota_data *qd)
924{
925 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
926 struct gfs2_tune *gt = &sdp->sd_tune;
cd915493 927 s64 value;
b3b94faa
DT
928 unsigned int num, den;
929 int do_sync = 1;
930
931 if (!qd->qd_qb.qb_limit)
932 return 0;
933
22077f57 934 spin_lock(&qd_lru_lock);
b3b94faa 935 value = qd->qd_change;
22077f57 936 spin_unlock(&qd_lru_lock);
b3b94faa
DT
937
938 spin_lock(&gt->gt_spin);
939 num = gt->gt_quota_scale_num;
940 den = gt->gt_quota_scale_den;
941 spin_unlock(&gt->gt_spin);
942
943 if (value < 0)
944 do_sync = 0;
e9fc2aa0
SW
945 else if ((s64)be64_to_cpu(qd->qd_qb.qb_value) >=
946 (s64)be64_to_cpu(qd->qd_qb.qb_limit))
b3b94faa
DT
947 do_sync = 0;
948 else {
949 value *= gfs2_jindex_size(sdp) * num;
4abaca17 950 value = div_s64(value, den);
e9fc2aa0 951 value += (s64)be64_to_cpu(qd->qd_qb.qb_value);
cd915493 952 if (value < (s64)be64_to_cpu(qd->qd_qb.qb_limit))
b3b94faa
DT
953 do_sync = 0;
954 }
955
956 return do_sync;
957}
958
959void gfs2_quota_unlock(struct gfs2_inode *ip)
960{
6dbd8224 961 struct gfs2_alloc *al = ip->i_alloc;
b3b94faa
DT
962 struct gfs2_quota_data *qda[4];
963 unsigned int count = 0;
964 unsigned int x;
965
966 if (!test_and_clear_bit(GIF_QD_LOCKED, &ip->i_flags))
967 goto out;
968
969 for (x = 0; x < al->al_qd_num; x++) {
970 struct gfs2_quota_data *qd;
971 int sync;
972
973 qd = al->al_qd[x];
974 sync = need_sync(qd);
975
976 gfs2_glock_dq_uninit(&al->al_qd_ghs[x]);
977
978 if (sync && qd_trylock(qd))
979 qda[count++] = qd;
980 }
981
982 if (count) {
983 do_sync(count, qda);
984 for (x = 0; x < count; x++)
985 qd_unlock(qda[x]);
986 }
987
a91ea69f 988out:
b3b94faa
DT
989 gfs2_quota_unhold(ip);
990}
991
992#define MAX_LINE 256
993
994static int print_message(struct gfs2_quota_data *qd, char *type)
995{
996 struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
b3b94faa 997
02630a12
SW
998 printk(KERN_INFO "GFS2: fsid=%s: quota %s for %s %u\r\n",
999 sdp->sd_fsname, type,
1000 (test_bit(QDF_USER, &qd->qd_flags)) ? "user" : "group",
1001 qd->qd_id);
b3b94faa
DT
1002
1003 return 0;
1004}
1005
cd915493 1006int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid)
b3b94faa 1007{
feaa7bba 1008 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
6dbd8224 1009 struct gfs2_alloc *al = ip->i_alloc;
b3b94faa 1010 struct gfs2_quota_data *qd;
cd915493 1011 s64 value;
b3b94faa
DT
1012 unsigned int x;
1013 int error = 0;
1014
1015 if (!test_bit(GIF_QD_LOCKED, &ip->i_flags))
1016 return 0;
1017
1018 if (sdp->sd_args.ar_quota != GFS2_QUOTA_ON)
1019 return 0;
1020
1021 for (x = 0; x < al->al_qd_num; x++) {
1022 qd = al->al_qd[x];
1023
1024 if (!((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
1025 (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))))
1026 continue;
1027
e9fc2aa0 1028 value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
22077f57 1029 spin_lock(&qd_lru_lock);
b3b94faa 1030 value += qd->qd_change;
22077f57 1031 spin_unlock(&qd_lru_lock);
b3b94faa 1032
cd915493 1033 if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) {
b3b94faa
DT
1034 print_message(qd, "exceeded");
1035 error = -EDQUOT;
1036 break;
e9fc2aa0 1037 } else if (be64_to_cpu(qd->qd_qb.qb_warn) &&
cd915493 1038 (s64)be64_to_cpu(qd->qd_qb.qb_warn) < value &&
b3b94faa 1039 time_after_eq(jiffies, qd->qd_last_warn +
568f4c96
SW
1040 gfs2_tune_get(sdp,
1041 gt_quota_warn_period) * HZ)) {
b3b94faa
DT
1042 error = print_message(qd, "warning");
1043 qd->qd_last_warn = jiffies;
1044 }
1045 }
1046
1047 return error;
1048}
1049
cd915493
SW
1050void gfs2_quota_change(struct gfs2_inode *ip, s64 change,
1051 u32 uid, u32 gid)
b3b94faa 1052{
6dbd8224 1053 struct gfs2_alloc *al = ip->i_alloc;
b3b94faa
DT
1054 struct gfs2_quota_data *qd;
1055 unsigned int x;
b3b94faa 1056
feaa7bba 1057 if (gfs2_assert_warn(GFS2_SB(&ip->i_inode), change))
b3b94faa 1058 return;
383f01fb 1059 if (ip->i_diskflags & GFS2_DIF_SYSTEM)
b3b94faa
DT
1060 return;
1061
1062 for (x = 0; x < al->al_qd_num; x++) {
1063 qd = al->al_qd[x];
1064
1065 if ((qd->qd_id == uid && test_bit(QDF_USER, &qd->qd_flags)) ||
1066 (qd->qd_id == gid && !test_bit(QDF_USER, &qd->qd_flags))) {
1067 do_qc(qd, change);
b3b94faa
DT
1068 }
1069 }
1070}
1071
1072int gfs2_quota_sync(struct gfs2_sbd *sdp)
1073{
1074 struct gfs2_quota_data **qda;
1075 unsigned int max_qd = gfs2_tune_get(sdp, gt_quota_simul_sync);
1076 unsigned int num_qd;
1077 unsigned int x;
1078 int error = 0;
1079
1080 sdp->sd_quota_sync_gen++;
1081
1082 qda = kcalloc(max_qd, sizeof(struct gfs2_quota_data *), GFP_KERNEL);
1083 if (!qda)
1084 return -ENOMEM;
1085
1086 do {
1087 num_qd = 0;
1088
1089 for (;;) {
1090 error = qd_fish(sdp, qda + num_qd);
1091 if (error || !qda[num_qd])
1092 break;
1093 if (++num_qd == max_qd)
1094 break;
1095 }
1096
1097 if (num_qd) {
1098 if (!error)
1099 error = do_sync(num_qd, qda);
1100 if (!error)
1101 for (x = 0; x < num_qd; x++)
1102 qda[x]->qd_sync_gen =
1103 sdp->sd_quota_sync_gen;
1104
1105 for (x = 0; x < num_qd; x++)
1106 qd_unlock(qda[x]);
1107 }
1108 } while (!error && num_qd == max_qd);
1109
1110 kfree(qda);
1111
1112 return error;
1113}
1114
cd915493 1115int gfs2_quota_refresh(struct gfs2_sbd *sdp, int user, u32 id)
b3b94faa
DT
1116{
1117 struct gfs2_quota_data *qd;
1118 struct gfs2_holder q_gh;
1119 int error;
1120
1121 error = qd_get(sdp, user, id, CREATE, &qd);
1122 if (error)
1123 return error;
1124
1125 error = do_glock(qd, FORCE, &q_gh);
1126 if (!error)
1127 gfs2_glock_dq_uninit(&q_gh);
1128
1129 qd_put(qd);
1130
1131 return error;
1132}
1133
bb8d8a6f
SW
1134static void gfs2_quota_change_in(struct gfs2_quota_change_host *qc, const void *buf)
1135{
1136 const struct gfs2_quota_change *str = buf;
1137
1138 qc->qc_change = be64_to_cpu(str->qc_change);
1139 qc->qc_flags = be32_to_cpu(str->qc_flags);
1140 qc->qc_id = be32_to_cpu(str->qc_id);
1141}
1142
b3b94faa
DT
1143int gfs2_quota_init(struct gfs2_sbd *sdp)
1144{
feaa7bba 1145 struct gfs2_inode *ip = GFS2_I(sdp->sd_qc_inode);
c9e98886 1146 unsigned int blocks = ip->i_disksize >> sdp->sd_sb.sb_bsize_shift;
b3b94faa
DT
1147 unsigned int x, slot = 0;
1148 unsigned int found = 0;
cd915493
SW
1149 u64 dblock;
1150 u32 extlen = 0;
b3b94faa
DT
1151 int error;
1152
c9e98886
SW
1153 if (!ip->i_disksize || ip->i_disksize > (64 << 20) ||
1154 ip->i_disksize & (sdp->sd_sb.sb_bsize - 1)) {
b3b94faa 1155 gfs2_consist_inode(ip);
907b9bce 1156 return -EIO;
b3b94faa
DT
1157 }
1158 sdp->sd_quota_slots = blocks * sdp->sd_qc_per_block;
5c676f6d 1159 sdp->sd_quota_chunks = DIV_ROUND_UP(sdp->sd_quota_slots, 8 * PAGE_SIZE);
b3b94faa
DT
1160
1161 error = -ENOMEM;
1162
1163 sdp->sd_quota_bitmap = kcalloc(sdp->sd_quota_chunks,
16c5f06f 1164 sizeof(unsigned char *), GFP_NOFS);
b3b94faa
DT
1165 if (!sdp->sd_quota_bitmap)
1166 return error;
1167
1168 for (x = 0; x < sdp->sd_quota_chunks; x++) {
16c5f06f 1169 sdp->sd_quota_bitmap[x] = kzalloc(PAGE_SIZE, GFP_NOFS);
b3b94faa
DT
1170 if (!sdp->sd_quota_bitmap[x])
1171 goto fail;
1172 }
1173
1174 for (x = 0; x < blocks; x++) {
1175 struct buffer_head *bh;
1176 unsigned int y;
1177
1178 if (!extlen) {
1179 int new = 0;
feaa7bba 1180 error = gfs2_extent_map(&ip->i_inode, x, &new, &dblock, &extlen);
b3b94faa
DT
1181 if (error)
1182 goto fail;
1183 }
b3b94faa 1184 error = -EIO;
7276b3b0
SW
1185 bh = gfs2_meta_ra(ip->i_gl, dblock, extlen);
1186 if (!bh)
1187 goto fail;
b3b94faa
DT
1188 if (gfs2_metatype_check(sdp, bh, GFS2_METATYPE_QC)) {
1189 brelse(bh);
1190 goto fail;
1191 }
1192
7276b3b0 1193 for (y = 0; y < sdp->sd_qc_per_block && slot < sdp->sd_quota_slots;
b3b94faa 1194 y++, slot++) {
b62f963e 1195 struct gfs2_quota_change_host qc;
b3b94faa
DT
1196 struct gfs2_quota_data *qd;
1197
1198 gfs2_quota_change_in(&qc, bh->b_data +
1199 sizeof(struct gfs2_meta_header) +
1200 y * sizeof(struct gfs2_quota_change));
1201 if (!qc.qc_change)
1202 continue;
1203
1204 error = qd_alloc(sdp, (qc.qc_flags & GFS2_QCF_USER),
1205 qc.qc_id, &qd);
1206 if (error) {
1207 brelse(bh);
1208 goto fail;
1209 }
1210
1211 set_bit(QDF_CHANGE, &qd->qd_flags);
1212 qd->qd_change = qc.qc_change;
1213 qd->qd_slot = slot;
1214 qd->qd_slot_count = 1;
b3b94faa 1215
0a7ab79c 1216 spin_lock(&qd_lru_lock);
b3b94faa
DT
1217 gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, slot, 1);
1218 list_add(&qd->qd_list, &sdp->sd_quota_list);
1219 atomic_inc(&sdp->sd_quota_count);
0a7ab79c 1220 spin_unlock(&qd_lru_lock);
b3b94faa
DT
1221
1222 found++;
1223 }
1224
1225 brelse(bh);
1226 dblock++;
1227 extlen--;
1228 }
1229
1230 if (found)
1231 fs_info(sdp, "found %u quota changes\n", found);
1232
1233 return 0;
1234
a91ea69f 1235fail:
b3b94faa
DT
1236 gfs2_quota_cleanup(sdp);
1237 return error;
1238}
1239
b3b94faa
DT
1240void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
1241{
1242 struct list_head *head = &sdp->sd_quota_list;
1243 struct gfs2_quota_data *qd;
1244 unsigned int x;
1245
0a7ab79c 1246 spin_lock(&qd_lru_lock);
b3b94faa
DT
1247 while (!list_empty(head)) {
1248 qd = list_entry(head->prev, struct gfs2_quota_data, qd_list);
1249
0a7ab79c
AD
1250 if (atomic_read(&qd->qd_count) > 1 ||
1251 (atomic_read(&qd->qd_count) &&
1252 !test_bit(QDF_CHANGE, &qd->qd_flags))) {
0a7ab79c
AD
1253 list_move(&qd->qd_list, head);
1254 spin_unlock(&qd_lru_lock);
b3b94faa 1255 schedule();
0a7ab79c 1256 spin_lock(&qd_lru_lock);
b3b94faa
DT
1257 continue;
1258 }
1259
1260 list_del(&qd->qd_list);
0a7ab79c
AD
1261 /* Also remove if this qd exists in the reclaim list */
1262 if (!list_empty(&qd->qd_reclaim)) {
1263 list_del_init(&qd->qd_reclaim);
1264 atomic_dec(&qd_lru_count);
1265 }
b3b94faa 1266 atomic_dec(&sdp->sd_quota_count);
0a7ab79c 1267 spin_unlock(&qd_lru_lock);
b3b94faa 1268
0a7ab79c 1269 if (!atomic_read(&qd->qd_count)) {
b3b94faa
DT
1270 gfs2_assert_warn(sdp, !qd->qd_change);
1271 gfs2_assert_warn(sdp, !qd->qd_slot_count);
1272 } else
1273 gfs2_assert_warn(sdp, qd->qd_slot_count == 1);
1274 gfs2_assert_warn(sdp, !qd->qd_bh_count);
1275
f057f6cd 1276 gfs2_glock_put(qd->qd_gl);
37b2c837 1277 kmem_cache_free(gfs2_quotad_cachep, qd);
b3b94faa 1278
0a7ab79c 1279 spin_lock(&qd_lru_lock);
b3b94faa 1280 }
0a7ab79c 1281 spin_unlock(&qd_lru_lock);
b3b94faa
DT
1282
1283 gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count));
1284
1285 if (sdp->sd_quota_bitmap) {
1286 for (x = 0; x < sdp->sd_quota_chunks; x++)
1287 kfree(sdp->sd_quota_bitmap[x]);
1288 kfree(sdp->sd_quota_bitmap);
1289 }
1290}
1291
37b2c837
SW
1292static void quotad_error(struct gfs2_sbd *sdp, const char *msg, int error)
1293{
1294 if (error == 0 || error == -EROFS)
1295 return;
1296 if (!test_bit(SDF_SHUTDOWN, &sdp->sd_flags))
1297 fs_err(sdp, "gfs2_quotad: %s error %d\n", msg, error);
1298}
1299
1300static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg,
1301 int (*fxn)(struct gfs2_sbd *sdp),
1302 unsigned long t, unsigned long *timeo,
1303 unsigned int *new_timeo)
1304{
1305 if (t >= *timeo) {
1306 int error = fxn(sdp);
1307 quotad_error(sdp, msg, error);
1308 *timeo = gfs2_tune_get_i(&sdp->sd_tune, new_timeo) * HZ;
1309 } else {
1310 *timeo -= t;
1311 }
1312}
1313
813e0c46
SW
1314static void quotad_check_trunc_list(struct gfs2_sbd *sdp)
1315{
1316 struct gfs2_inode *ip;
1317
1318 while(1) {
1319 ip = NULL;
1320 spin_lock(&sdp->sd_trunc_lock);
1321 if (!list_empty(&sdp->sd_trunc_list)) {
1322 ip = list_entry(sdp->sd_trunc_list.next,
1323 struct gfs2_inode, i_trunc_list);
1324 list_del_init(&ip->i_trunc_list);
1325 }
1326 spin_unlock(&sdp->sd_trunc_lock);
1327 if (ip == NULL)
1328 return;
1329 gfs2_glock_finish_truncate(ip);
1330 }
1331}
1332
37b2c837
SW
1333/**
1334 * gfs2_quotad - Write cached quota changes into the quota file
1335 * @sdp: Pointer to GFS2 superblock
1336 *
1337 */
1338
1339int gfs2_quotad(void *data)
1340{
1341 struct gfs2_sbd *sdp = data;
1342 struct gfs2_tune *tune = &sdp->sd_tune;
1343 unsigned long statfs_timeo = 0;
1344 unsigned long quotad_timeo = 0;
1345 unsigned long t = 0;
1346 DEFINE_WAIT(wait);
813e0c46 1347 int empty;
37b2c837
SW
1348
1349 while (!kthread_should_stop()) {
1350
1351 /* Update the master statfs file */
1352 quotad_check_timeo(sdp, "statfs", gfs2_statfs_sync, t,
1353 &statfs_timeo, &tune->gt_statfs_quantum);
1354
1355 /* Update quota file */
1356 quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t,
1357 &quotad_timeo, &tune->gt_quota_quantum);
1358
813e0c46
SW
1359 /* Check for & recover partially truncated inodes */
1360 quotad_check_trunc_list(sdp);
1361
37b2c837
SW
1362 if (freezing(current))
1363 refrigerator();
1364 t = min(quotad_timeo, statfs_timeo);
1365
7fa5d20d 1366 prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE);
813e0c46
SW
1367 spin_lock(&sdp->sd_trunc_lock);
1368 empty = list_empty(&sdp->sd_trunc_list);
1369 spin_unlock(&sdp->sd_trunc_lock);
1370 if (empty)
1371 t -= schedule_timeout(t);
1372 else
1373 t = 0;
37b2c837
SW
1374 finish_wait(&sdp->sd_quota_wait, &wait);
1375 }
1376
1377 return 0;
1378}
1379
This page took 0.36663 seconds and 5 git commands to generate.