xfs: merge xfs_ag.h into xfs_format.h
[deliverable/linux.git] / fs / xfs / xfs_dquot.c
1 /*
2 * Copyright (c) 2000-2003 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_format.h"
21 #include "xfs_log_format.h"
22 #include "xfs_shared.h"
23 #include "xfs_trans_resv.h"
24 #include "xfs_bit.h"
25 #include "xfs_sb.h"
26 #include "xfs_mount.h"
27 #include "xfs_inode.h"
28 #include "xfs_bmap.h"
29 #include "xfs_bmap_util.h"
30 #include "xfs_alloc.h"
31 #include "xfs_quota.h"
32 #include "xfs_error.h"
33 #include "xfs_trans.h"
34 #include "xfs_buf_item.h"
35 #include "xfs_trans_space.h"
36 #include "xfs_trans_priv.h"
37 #include "xfs_qm.h"
38 #include "xfs_cksum.h"
39 #include "xfs_trace.h"
40 #include "xfs_log.h"
41 #include "xfs_bmap_btree.h"
42
43 /*
44 * Lock order:
45 *
46 * ip->i_lock
47 * qi->qi_tree_lock
48 * dquot->q_qlock (xfs_dqlock() and friends)
49 * dquot->q_flush (xfs_dqflock() and friends)
50 * qi->qi_lru_lock
51 *
52 * If two dquots need to be locked the order is user before group/project,
53 * otherwise by the lowest id first, see xfs_dqlock2.
54 */
55
56 #ifdef DEBUG
57 xfs_buftarg_t *xfs_dqerror_target;
58 int xfs_do_dqerror;
59 int xfs_dqreq_num;
60 int xfs_dqerror_mod = 33;
61 #endif
62
63 struct kmem_zone *xfs_qm_dqtrxzone;
64 static struct kmem_zone *xfs_qm_dqzone;
65
66 static struct lock_class_key xfs_dquot_group_class;
67 static struct lock_class_key xfs_dquot_project_class;
68
69 /*
70 * This is called to free all the memory associated with a dquot
71 */
72 void
73 xfs_qm_dqdestroy(
74 xfs_dquot_t *dqp)
75 {
76 ASSERT(list_empty(&dqp->q_lru));
77
78 mutex_destroy(&dqp->q_qlock);
79 kmem_zone_free(xfs_qm_dqzone, dqp);
80
81 XFS_STATS_DEC(xs_qm_dquot);
82 }
83
84 /*
85 * If default limits are in force, push them into the dquot now.
86 * We overwrite the dquot limits only if they are zero and this
87 * is not the root dquot.
88 */
89 void
90 xfs_qm_adjust_dqlimits(
91 struct xfs_mount *mp,
92 struct xfs_dquot *dq)
93 {
94 struct xfs_quotainfo *q = mp->m_quotainfo;
95 struct xfs_disk_dquot *d = &dq->q_core;
96 int prealloc = 0;
97
98 ASSERT(d->d_id);
99
100 if (q->qi_bsoftlimit && !d->d_blk_softlimit) {
101 d->d_blk_softlimit = cpu_to_be64(q->qi_bsoftlimit);
102 prealloc = 1;
103 }
104 if (q->qi_bhardlimit && !d->d_blk_hardlimit) {
105 d->d_blk_hardlimit = cpu_to_be64(q->qi_bhardlimit);
106 prealloc = 1;
107 }
108 if (q->qi_isoftlimit && !d->d_ino_softlimit)
109 d->d_ino_softlimit = cpu_to_be64(q->qi_isoftlimit);
110 if (q->qi_ihardlimit && !d->d_ino_hardlimit)
111 d->d_ino_hardlimit = cpu_to_be64(q->qi_ihardlimit);
112 if (q->qi_rtbsoftlimit && !d->d_rtb_softlimit)
113 d->d_rtb_softlimit = cpu_to_be64(q->qi_rtbsoftlimit);
114 if (q->qi_rtbhardlimit && !d->d_rtb_hardlimit)
115 d->d_rtb_hardlimit = cpu_to_be64(q->qi_rtbhardlimit);
116
117 if (prealloc)
118 xfs_dquot_set_prealloc_limits(dq);
119 }
120
121 /*
122 * Check the limits and timers of a dquot and start or reset timers
123 * if necessary.
124 * This gets called even when quota enforcement is OFF, which makes our
125 * life a little less complicated. (We just don't reject any quota
126 * reservations in that case, when enforcement is off).
127 * We also return 0 as the values of the timers in Q_GETQUOTA calls, when
128 * enforcement's off.
129 * In contrast, warnings are a little different in that they don't
130 * 'automatically' get started when limits get exceeded. They do
131 * get reset to zero, however, when we find the count to be under
132 * the soft limit (they are only ever set non-zero via userspace).
133 */
134 void
135 xfs_qm_adjust_dqtimers(
136 xfs_mount_t *mp,
137 xfs_disk_dquot_t *d)
138 {
139 ASSERT(d->d_id);
140
141 #ifdef DEBUG
142 if (d->d_blk_hardlimit)
143 ASSERT(be64_to_cpu(d->d_blk_softlimit) <=
144 be64_to_cpu(d->d_blk_hardlimit));
145 if (d->d_ino_hardlimit)
146 ASSERT(be64_to_cpu(d->d_ino_softlimit) <=
147 be64_to_cpu(d->d_ino_hardlimit));
148 if (d->d_rtb_hardlimit)
149 ASSERT(be64_to_cpu(d->d_rtb_softlimit) <=
150 be64_to_cpu(d->d_rtb_hardlimit));
151 #endif
152
153 if (!d->d_btimer) {
154 if ((d->d_blk_softlimit &&
155 (be64_to_cpu(d->d_bcount) >
156 be64_to_cpu(d->d_blk_softlimit))) ||
157 (d->d_blk_hardlimit &&
158 (be64_to_cpu(d->d_bcount) >
159 be64_to_cpu(d->d_blk_hardlimit)))) {
160 d->d_btimer = cpu_to_be32(get_seconds() +
161 mp->m_quotainfo->qi_btimelimit);
162 } else {
163 d->d_bwarns = 0;
164 }
165 } else {
166 if ((!d->d_blk_softlimit ||
167 (be64_to_cpu(d->d_bcount) <=
168 be64_to_cpu(d->d_blk_softlimit))) &&
169 (!d->d_blk_hardlimit ||
170 (be64_to_cpu(d->d_bcount) <=
171 be64_to_cpu(d->d_blk_hardlimit)))) {
172 d->d_btimer = 0;
173 }
174 }
175
176 if (!d->d_itimer) {
177 if ((d->d_ino_softlimit &&
178 (be64_to_cpu(d->d_icount) >
179 be64_to_cpu(d->d_ino_softlimit))) ||
180 (d->d_ino_hardlimit &&
181 (be64_to_cpu(d->d_icount) >
182 be64_to_cpu(d->d_ino_hardlimit)))) {
183 d->d_itimer = cpu_to_be32(get_seconds() +
184 mp->m_quotainfo->qi_itimelimit);
185 } else {
186 d->d_iwarns = 0;
187 }
188 } else {
189 if ((!d->d_ino_softlimit ||
190 (be64_to_cpu(d->d_icount) <=
191 be64_to_cpu(d->d_ino_softlimit))) &&
192 (!d->d_ino_hardlimit ||
193 (be64_to_cpu(d->d_icount) <=
194 be64_to_cpu(d->d_ino_hardlimit)))) {
195 d->d_itimer = 0;
196 }
197 }
198
199 if (!d->d_rtbtimer) {
200 if ((d->d_rtb_softlimit &&
201 (be64_to_cpu(d->d_rtbcount) >
202 be64_to_cpu(d->d_rtb_softlimit))) ||
203 (d->d_rtb_hardlimit &&
204 (be64_to_cpu(d->d_rtbcount) >
205 be64_to_cpu(d->d_rtb_hardlimit)))) {
206 d->d_rtbtimer = cpu_to_be32(get_seconds() +
207 mp->m_quotainfo->qi_rtbtimelimit);
208 } else {
209 d->d_rtbwarns = 0;
210 }
211 } else {
212 if ((!d->d_rtb_softlimit ||
213 (be64_to_cpu(d->d_rtbcount) <=
214 be64_to_cpu(d->d_rtb_softlimit))) &&
215 (!d->d_rtb_hardlimit ||
216 (be64_to_cpu(d->d_rtbcount) <=
217 be64_to_cpu(d->d_rtb_hardlimit)))) {
218 d->d_rtbtimer = 0;
219 }
220 }
221 }
222
223 /*
224 * initialize a buffer full of dquots and log the whole thing
225 */
226 STATIC void
227 xfs_qm_init_dquot_blk(
228 xfs_trans_t *tp,
229 xfs_mount_t *mp,
230 xfs_dqid_t id,
231 uint type,
232 xfs_buf_t *bp)
233 {
234 struct xfs_quotainfo *q = mp->m_quotainfo;
235 xfs_dqblk_t *d;
236 int curid, i;
237
238 ASSERT(tp);
239 ASSERT(xfs_buf_islocked(bp));
240
241 d = bp->b_addr;
242
243 /*
244 * ID of the first dquot in the block - id's are zero based.
245 */
246 curid = id - (id % q->qi_dqperchunk);
247 ASSERT(curid >= 0);
248 memset(d, 0, BBTOB(q->qi_dqchunklen));
249 for (i = 0; i < q->qi_dqperchunk; i++, d++, curid++) {
250 d->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC);
251 d->dd_diskdq.d_version = XFS_DQUOT_VERSION;
252 d->dd_diskdq.d_id = cpu_to_be32(curid);
253 d->dd_diskdq.d_flags = type;
254 if (xfs_sb_version_hascrc(&mp->m_sb)) {
255 uuid_copy(&d->dd_uuid, &mp->m_sb.sb_uuid);
256 xfs_update_cksum((char *)d, sizeof(struct xfs_dqblk),
257 XFS_DQUOT_CRC_OFF);
258 }
259 }
260
261 xfs_trans_dquot_buf(tp, bp,
262 (type & XFS_DQ_USER ? XFS_BLF_UDQUOT_BUF :
263 ((type & XFS_DQ_PROJ) ? XFS_BLF_PDQUOT_BUF :
264 XFS_BLF_GDQUOT_BUF)));
265 xfs_trans_log_buf(tp, bp, 0, BBTOB(q->qi_dqchunklen) - 1);
266 }
267
268 /*
269 * Initialize the dynamic speculative preallocation thresholds. The lo/hi
270 * watermarks correspond to the soft and hard limits by default. If a soft limit
271 * is not specified, we use 95% of the hard limit.
272 */
273 void
274 xfs_dquot_set_prealloc_limits(struct xfs_dquot *dqp)
275 {
276 __uint64_t space;
277
278 dqp->q_prealloc_hi_wmark = be64_to_cpu(dqp->q_core.d_blk_hardlimit);
279 dqp->q_prealloc_lo_wmark = be64_to_cpu(dqp->q_core.d_blk_softlimit);
280 if (!dqp->q_prealloc_lo_wmark) {
281 dqp->q_prealloc_lo_wmark = dqp->q_prealloc_hi_wmark;
282 do_div(dqp->q_prealloc_lo_wmark, 100);
283 dqp->q_prealloc_lo_wmark *= 95;
284 }
285
286 space = dqp->q_prealloc_hi_wmark;
287
288 do_div(space, 100);
289 dqp->q_low_space[XFS_QLOWSP_1_PCNT] = space;
290 dqp->q_low_space[XFS_QLOWSP_3_PCNT] = space * 3;
291 dqp->q_low_space[XFS_QLOWSP_5_PCNT] = space * 5;
292 }
293
294 /*
295 * Allocate a block and fill it with dquots.
296 * This is called when the bmapi finds a hole.
297 */
298 STATIC int
299 xfs_qm_dqalloc(
300 xfs_trans_t **tpp,
301 xfs_mount_t *mp,
302 xfs_dquot_t *dqp,
303 xfs_inode_t *quotip,
304 xfs_fileoff_t offset_fsb,
305 xfs_buf_t **O_bpp)
306 {
307 xfs_fsblock_t firstblock;
308 xfs_bmap_free_t flist;
309 xfs_bmbt_irec_t map;
310 int nmaps, error, committed;
311 xfs_buf_t *bp;
312 xfs_trans_t *tp = *tpp;
313
314 ASSERT(tp != NULL);
315
316 trace_xfs_dqalloc(dqp);
317
318 /*
319 * Initialize the bmap freelist prior to calling bmapi code.
320 */
321 xfs_bmap_init(&flist, &firstblock);
322 xfs_ilock(quotip, XFS_ILOCK_EXCL);
323 /*
324 * Return if this type of quotas is turned off while we didn't
325 * have an inode lock
326 */
327 if (!xfs_this_quota_on(dqp->q_mount, dqp->dq_flags)) {
328 xfs_iunlock(quotip, XFS_ILOCK_EXCL);
329 return -ESRCH;
330 }
331
332 xfs_trans_ijoin(tp, quotip, XFS_ILOCK_EXCL);
333 nmaps = 1;
334 error = xfs_bmapi_write(tp, quotip, offset_fsb,
335 XFS_DQUOT_CLUSTER_SIZE_FSB, XFS_BMAPI_METADATA,
336 &firstblock, XFS_QM_DQALLOC_SPACE_RES(mp),
337 &map, &nmaps, &flist);
338 if (error)
339 goto error0;
340 ASSERT(map.br_blockcount == XFS_DQUOT_CLUSTER_SIZE_FSB);
341 ASSERT(nmaps == 1);
342 ASSERT((map.br_startblock != DELAYSTARTBLOCK) &&
343 (map.br_startblock != HOLESTARTBLOCK));
344
345 /*
346 * Keep track of the blkno to save a lookup later
347 */
348 dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
349
350 /* now we can just get the buffer (there's nothing to read yet) */
351 bp = xfs_trans_get_buf(tp, mp->m_ddev_targp,
352 dqp->q_blkno,
353 mp->m_quotainfo->qi_dqchunklen,
354 0);
355 if (!bp) {
356 error = -ENOMEM;
357 goto error1;
358 }
359 bp->b_ops = &xfs_dquot_buf_ops;
360
361 /*
362 * Make a chunk of dquots out of this buffer and log
363 * the entire thing.
364 */
365 xfs_qm_init_dquot_blk(tp, mp, be32_to_cpu(dqp->q_core.d_id),
366 dqp->dq_flags & XFS_DQ_ALLTYPES, bp);
367
368 /*
369 * xfs_bmap_finish() may commit the current transaction and
370 * start a second transaction if the freelist is not empty.
371 *
372 * Since we still want to modify this buffer, we need to
373 * ensure that the buffer is not released on commit of
374 * the first transaction and ensure the buffer is added to the
375 * second transaction.
376 *
377 * If there is only one transaction then don't stop the buffer
378 * from being released when it commits later on.
379 */
380
381 xfs_trans_bhold(tp, bp);
382
383 if ((error = xfs_bmap_finish(tpp, &flist, &committed))) {
384 goto error1;
385 }
386
387 if (committed) {
388 tp = *tpp;
389 xfs_trans_bjoin(tp, bp);
390 } else {
391 xfs_trans_bhold_release(tp, bp);
392 }
393
394 *O_bpp = bp;
395 return 0;
396
397 error1:
398 xfs_bmap_cancel(&flist);
399 error0:
400 xfs_iunlock(quotip, XFS_ILOCK_EXCL);
401
402 return error;
403 }
404
405 STATIC int
406 xfs_qm_dqrepair(
407 struct xfs_mount *mp,
408 struct xfs_trans *tp,
409 struct xfs_dquot *dqp,
410 xfs_dqid_t firstid,
411 struct xfs_buf **bpp)
412 {
413 int error;
414 struct xfs_disk_dquot *ddq;
415 struct xfs_dqblk *d;
416 int i;
417
418 /*
419 * Read the buffer without verification so we get the corrupted
420 * buffer returned to us. make sure we verify it on write, though.
421 */
422 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, dqp->q_blkno,
423 mp->m_quotainfo->qi_dqchunklen,
424 0, bpp, NULL);
425
426 if (error) {
427 ASSERT(*bpp == NULL);
428 return error;
429 }
430 (*bpp)->b_ops = &xfs_dquot_buf_ops;
431
432 ASSERT(xfs_buf_islocked(*bpp));
433 d = (struct xfs_dqblk *)(*bpp)->b_addr;
434
435 /* Do the actual repair of dquots in this buffer */
436 for (i = 0; i < mp->m_quotainfo->qi_dqperchunk; i++) {
437 ddq = &d[i].dd_diskdq;
438 error = xfs_dqcheck(mp, ddq, firstid + i,
439 dqp->dq_flags & XFS_DQ_ALLTYPES,
440 XFS_QMOPT_DQREPAIR, "xfs_qm_dqrepair");
441 if (error) {
442 /* repair failed, we're screwed */
443 xfs_trans_brelse(tp, *bpp);
444 return -EIO;
445 }
446 }
447
448 return 0;
449 }
450
451 /*
452 * Maps a dquot to the buffer containing its on-disk version.
453 * This returns a ptr to the buffer containing the on-disk dquot
454 * in the bpp param, and a ptr to the on-disk dquot within that buffer
455 */
456 STATIC int
457 xfs_qm_dqtobp(
458 xfs_trans_t **tpp,
459 xfs_dquot_t *dqp,
460 xfs_disk_dquot_t **O_ddpp,
461 xfs_buf_t **O_bpp,
462 uint flags)
463 {
464 struct xfs_bmbt_irec map;
465 int nmaps = 1, error;
466 struct xfs_buf *bp;
467 struct xfs_inode *quotip = xfs_dq_to_quota_inode(dqp);
468 struct xfs_mount *mp = dqp->q_mount;
469 xfs_dqid_t id = be32_to_cpu(dqp->q_core.d_id);
470 struct xfs_trans *tp = (tpp ? *tpp : NULL);
471 uint lock_mode;
472
473 dqp->q_fileoffset = (xfs_fileoff_t)id / mp->m_quotainfo->qi_dqperchunk;
474
475 lock_mode = xfs_ilock_data_map_shared(quotip);
476 if (!xfs_this_quota_on(dqp->q_mount, dqp->dq_flags)) {
477 /*
478 * Return if this type of quotas is turned off while we
479 * didn't have the quota inode lock.
480 */
481 xfs_iunlock(quotip, lock_mode);
482 return -ESRCH;
483 }
484
485 /*
486 * Find the block map; no allocations yet
487 */
488 error = xfs_bmapi_read(quotip, dqp->q_fileoffset,
489 XFS_DQUOT_CLUSTER_SIZE_FSB, &map, &nmaps, 0);
490
491 xfs_iunlock(quotip, lock_mode);
492 if (error)
493 return error;
494
495 ASSERT(nmaps == 1);
496 ASSERT(map.br_blockcount == 1);
497
498 /*
499 * Offset of dquot in the (fixed sized) dquot chunk.
500 */
501 dqp->q_bufoffset = (id % mp->m_quotainfo->qi_dqperchunk) *
502 sizeof(xfs_dqblk_t);
503
504 ASSERT(map.br_startblock != DELAYSTARTBLOCK);
505 if (map.br_startblock == HOLESTARTBLOCK) {
506 /*
507 * We don't allocate unless we're asked to
508 */
509 if (!(flags & XFS_QMOPT_DQALLOC))
510 return -ENOENT;
511
512 ASSERT(tp);
513 error = xfs_qm_dqalloc(tpp, mp, dqp, quotip,
514 dqp->q_fileoffset, &bp);
515 if (error)
516 return error;
517 tp = *tpp;
518 } else {
519 trace_xfs_dqtobp_read(dqp);
520
521 /*
522 * store the blkno etc so that we don't have to do the
523 * mapping all the time
524 */
525 dqp->q_blkno = XFS_FSB_TO_DADDR(mp, map.br_startblock);
526
527 error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
528 dqp->q_blkno,
529 mp->m_quotainfo->qi_dqchunklen,
530 0, &bp, &xfs_dquot_buf_ops);
531
532 if (error == -EFSCORRUPTED && (flags & XFS_QMOPT_DQREPAIR)) {
533 xfs_dqid_t firstid = (xfs_dqid_t)map.br_startoff *
534 mp->m_quotainfo->qi_dqperchunk;
535 ASSERT(bp == NULL);
536 error = xfs_qm_dqrepair(mp, tp, dqp, firstid, &bp);
537 }
538
539 if (error) {
540 ASSERT(bp == NULL);
541 return error;
542 }
543 }
544
545 ASSERT(xfs_buf_islocked(bp));
546 *O_bpp = bp;
547 *O_ddpp = bp->b_addr + dqp->q_bufoffset;
548
549 return 0;
550 }
551
552
553 /*
554 * Read in the ondisk dquot using dqtobp() then copy it to an incore version,
555 * and release the buffer immediately.
556 *
557 * If XFS_QMOPT_DQALLOC is set, allocate a dquot on disk if it needed.
558 */
559 int
560 xfs_qm_dqread(
561 struct xfs_mount *mp,
562 xfs_dqid_t id,
563 uint type,
564 uint flags,
565 struct xfs_dquot **O_dqpp)
566 {
567 struct xfs_dquot *dqp;
568 struct xfs_disk_dquot *ddqp;
569 struct xfs_buf *bp;
570 struct xfs_trans *tp = NULL;
571 int error;
572 int cancelflags = 0;
573
574
575 dqp = kmem_zone_zalloc(xfs_qm_dqzone, KM_SLEEP);
576
577 dqp->dq_flags = type;
578 dqp->q_core.d_id = cpu_to_be32(id);
579 dqp->q_mount = mp;
580 INIT_LIST_HEAD(&dqp->q_lru);
581 mutex_init(&dqp->q_qlock);
582 init_waitqueue_head(&dqp->q_pinwait);
583
584 /*
585 * Because we want to use a counting completion, complete
586 * the flush completion once to allow a single access to
587 * the flush completion without blocking.
588 */
589 init_completion(&dqp->q_flush);
590 complete(&dqp->q_flush);
591
592 /*
593 * Make sure group quotas have a different lock class than user
594 * quotas.
595 */
596 switch (type) {
597 case XFS_DQ_USER:
598 /* uses the default lock class */
599 break;
600 case XFS_DQ_GROUP:
601 lockdep_set_class(&dqp->q_qlock, &xfs_dquot_group_class);
602 break;
603 case XFS_DQ_PROJ:
604 lockdep_set_class(&dqp->q_qlock, &xfs_dquot_project_class);
605 break;
606 default:
607 ASSERT(0);
608 break;
609 }
610
611 XFS_STATS_INC(xs_qm_dquot);
612
613 trace_xfs_dqread(dqp);
614
615 if (flags & XFS_QMOPT_DQALLOC) {
616 tp = xfs_trans_alloc(mp, XFS_TRANS_QM_DQALLOC);
617 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_qm_dqalloc,
618 XFS_QM_DQALLOC_SPACE_RES(mp), 0);
619 if (error)
620 goto error1;
621 cancelflags = XFS_TRANS_RELEASE_LOG_RES;
622 }
623
624 /*
625 * get a pointer to the on-disk dquot and the buffer containing it
626 * dqp already knows its own type (GROUP/USER).
627 */
628 error = xfs_qm_dqtobp(&tp, dqp, &ddqp, &bp, flags);
629 if (error) {
630 /*
631 * This can happen if quotas got turned off (ESRCH),
632 * or if the dquot didn't exist on disk and we ask to
633 * allocate (ENOENT).
634 */
635 trace_xfs_dqread_fail(dqp);
636 cancelflags |= XFS_TRANS_ABORT;
637 goto error1;
638 }
639
640 /* copy everything from disk dquot to the incore dquot */
641 memcpy(&dqp->q_core, ddqp, sizeof(xfs_disk_dquot_t));
642 xfs_qm_dquot_logitem_init(dqp);
643
644 /*
645 * Reservation counters are defined as reservation plus current usage
646 * to avoid having to add every time.
647 */
648 dqp->q_res_bcount = be64_to_cpu(ddqp->d_bcount);
649 dqp->q_res_icount = be64_to_cpu(ddqp->d_icount);
650 dqp->q_res_rtbcount = be64_to_cpu(ddqp->d_rtbcount);
651
652 /* initialize the dquot speculative prealloc thresholds */
653 xfs_dquot_set_prealloc_limits(dqp);
654
655 /* Mark the buf so that this will stay incore a little longer */
656 xfs_buf_set_ref(bp, XFS_DQUOT_REF);
657
658 /*
659 * We got the buffer with a xfs_trans_read_buf() (in dqtobp())
660 * So we need to release with xfs_trans_brelse().
661 * The strategy here is identical to that of inodes; we lock
662 * the dquot in xfs_qm_dqget() before making it accessible to
663 * others. This is because dquots, like inodes, need a good level of
664 * concurrency, and we don't want to take locks on the entire buffers
665 * for dquot accesses.
666 * Note also that the dquot buffer may even be dirty at this point, if
667 * this particular dquot was repaired. We still aren't afraid to
668 * brelse it because we have the changes incore.
669 */
670 ASSERT(xfs_buf_islocked(bp));
671 xfs_trans_brelse(tp, bp);
672
673 if (tp) {
674 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
675 if (error)
676 goto error0;
677 }
678
679 *O_dqpp = dqp;
680 return error;
681
682 error1:
683 if (tp)
684 xfs_trans_cancel(tp, cancelflags);
685 error0:
686 xfs_qm_dqdestroy(dqp);
687 *O_dqpp = NULL;
688 return error;
689 }
690
691 /*
692 * Given the file system, inode OR id, and type (UDQUOT/GDQUOT), return a
693 * a locked dquot, doing an allocation (if requested) as needed.
694 * When both an inode and an id are given, the inode's id takes precedence.
695 * That is, if the id changes while we don't hold the ilock inside this
696 * function, the new dquot is returned, not necessarily the one requested
697 * in the id argument.
698 */
699 int
700 xfs_qm_dqget(
701 xfs_mount_t *mp,
702 xfs_inode_t *ip, /* locked inode (optional) */
703 xfs_dqid_t id, /* uid/projid/gid depending on type */
704 uint type, /* XFS_DQ_USER/XFS_DQ_PROJ/XFS_DQ_GROUP */
705 uint flags, /* DQALLOC, DQSUSER, DQREPAIR, DOWARN */
706 xfs_dquot_t **O_dqpp) /* OUT : locked incore dquot */
707 {
708 struct xfs_quotainfo *qi = mp->m_quotainfo;
709 struct radix_tree_root *tree = xfs_dquot_tree(qi, type);
710 struct xfs_dquot *dqp;
711 int error;
712
713 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
714 if ((! XFS_IS_UQUOTA_ON(mp) && type == XFS_DQ_USER) ||
715 (! XFS_IS_PQUOTA_ON(mp) && type == XFS_DQ_PROJ) ||
716 (! XFS_IS_GQUOTA_ON(mp) && type == XFS_DQ_GROUP)) {
717 return -ESRCH;
718 }
719
720 #ifdef DEBUG
721 if (xfs_do_dqerror) {
722 if ((xfs_dqerror_target == mp->m_ddev_targp) &&
723 (xfs_dqreq_num++ % xfs_dqerror_mod) == 0) {
724 xfs_debug(mp, "Returning error in dqget");
725 return -EIO;
726 }
727 }
728
729 ASSERT(type == XFS_DQ_USER ||
730 type == XFS_DQ_PROJ ||
731 type == XFS_DQ_GROUP);
732 if (ip) {
733 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
734 ASSERT(xfs_inode_dquot(ip, type) == NULL);
735 }
736 #endif
737
738 restart:
739 mutex_lock(&qi->qi_tree_lock);
740 dqp = radix_tree_lookup(tree, id);
741 if (dqp) {
742 xfs_dqlock(dqp);
743 if (dqp->dq_flags & XFS_DQ_FREEING) {
744 xfs_dqunlock(dqp);
745 mutex_unlock(&qi->qi_tree_lock);
746 trace_xfs_dqget_freeing(dqp);
747 delay(1);
748 goto restart;
749 }
750
751 dqp->q_nrefs++;
752 mutex_unlock(&qi->qi_tree_lock);
753
754 trace_xfs_dqget_hit(dqp);
755 XFS_STATS_INC(xs_qm_dqcachehits);
756 *O_dqpp = dqp;
757 return 0;
758 }
759 mutex_unlock(&qi->qi_tree_lock);
760 XFS_STATS_INC(xs_qm_dqcachemisses);
761
762 /*
763 * Dquot cache miss. We don't want to keep the inode lock across
764 * a (potential) disk read. Also we don't want to deal with the lock
765 * ordering between quotainode and this inode. OTOH, dropping the inode
766 * lock here means dealing with a chown that can happen before
767 * we re-acquire the lock.
768 */
769 if (ip)
770 xfs_iunlock(ip, XFS_ILOCK_EXCL);
771
772 error = xfs_qm_dqread(mp, id, type, flags, &dqp);
773
774 if (ip)
775 xfs_ilock(ip, XFS_ILOCK_EXCL);
776
777 if (error)
778 return error;
779
780 if (ip) {
781 /*
782 * A dquot could be attached to this inode by now, since
783 * we had dropped the ilock.
784 */
785 if (xfs_this_quota_on(mp, type)) {
786 struct xfs_dquot *dqp1;
787
788 dqp1 = xfs_inode_dquot(ip, type);
789 if (dqp1) {
790 xfs_qm_dqdestroy(dqp);
791 dqp = dqp1;
792 xfs_dqlock(dqp);
793 goto dqret;
794 }
795 } else {
796 /* inode stays locked on return */
797 xfs_qm_dqdestroy(dqp);
798 return -ESRCH;
799 }
800 }
801
802 mutex_lock(&qi->qi_tree_lock);
803 error = radix_tree_insert(tree, id, dqp);
804 if (unlikely(error)) {
805 WARN_ON(error != -EEXIST);
806
807 /*
808 * Duplicate found. Just throw away the new dquot and start
809 * over.
810 */
811 mutex_unlock(&qi->qi_tree_lock);
812 trace_xfs_dqget_dup(dqp);
813 xfs_qm_dqdestroy(dqp);
814 XFS_STATS_INC(xs_qm_dquot_dups);
815 goto restart;
816 }
817
818 /*
819 * We return a locked dquot to the caller, with a reference taken
820 */
821 xfs_dqlock(dqp);
822 dqp->q_nrefs = 1;
823
824 qi->qi_dquots++;
825 mutex_unlock(&qi->qi_tree_lock);
826
827 dqret:
828 ASSERT((ip == NULL) || xfs_isilocked(ip, XFS_ILOCK_EXCL));
829 trace_xfs_dqget_miss(dqp);
830 *O_dqpp = dqp;
831 return 0;
832 }
833
834 /*
835 * Release a reference to the dquot (decrement ref-count) and unlock it.
836 *
837 * If there is a group quota attached to this dquot, carefully release that
838 * too without tripping over deadlocks'n'stuff.
839 */
840 void
841 xfs_qm_dqput(
842 struct xfs_dquot *dqp)
843 {
844 ASSERT(dqp->q_nrefs > 0);
845 ASSERT(XFS_DQ_IS_LOCKED(dqp));
846
847 trace_xfs_dqput(dqp);
848
849 if (--dqp->q_nrefs == 0) {
850 struct xfs_quotainfo *qi = dqp->q_mount->m_quotainfo;
851 trace_xfs_dqput_free(dqp);
852
853 if (list_lru_add(&qi->qi_lru, &dqp->q_lru))
854 XFS_STATS_INC(xs_qm_dquot_unused);
855 }
856 xfs_dqunlock(dqp);
857 }
858
859 /*
860 * Release a dquot. Flush it if dirty, then dqput() it.
861 * dquot must not be locked.
862 */
863 void
864 xfs_qm_dqrele(
865 xfs_dquot_t *dqp)
866 {
867 if (!dqp)
868 return;
869
870 trace_xfs_dqrele(dqp);
871
872 xfs_dqlock(dqp);
873 /*
874 * We don't care to flush it if the dquot is dirty here.
875 * That will create stutters that we want to avoid.
876 * Instead we do a delayed write when we try to reclaim
877 * a dirty dquot. Also xfs_sync will take part of the burden...
878 */
879 xfs_qm_dqput(dqp);
880 }
881
882 /*
883 * This is the dquot flushing I/O completion routine. It is called
884 * from interrupt level when the buffer containing the dquot is
885 * flushed to disk. It is responsible for removing the dquot logitem
886 * from the AIL if it has not been re-logged, and unlocking the dquot's
887 * flush lock. This behavior is very similar to that of inodes..
888 */
889 STATIC void
890 xfs_qm_dqflush_done(
891 struct xfs_buf *bp,
892 struct xfs_log_item *lip)
893 {
894 xfs_dq_logitem_t *qip = (struct xfs_dq_logitem *)lip;
895 xfs_dquot_t *dqp = qip->qli_dquot;
896 struct xfs_ail *ailp = lip->li_ailp;
897
898 /*
899 * We only want to pull the item from the AIL if its
900 * location in the log has not changed since we started the flush.
901 * Thus, we only bother if the dquot's lsn has
902 * not changed. First we check the lsn outside the lock
903 * since it's cheaper, and then we recheck while
904 * holding the lock before removing the dquot from the AIL.
905 */
906 if ((lip->li_flags & XFS_LI_IN_AIL) &&
907 lip->li_lsn == qip->qli_flush_lsn) {
908
909 /* xfs_trans_ail_delete() drops the AIL lock. */
910 spin_lock(&ailp->xa_lock);
911 if (lip->li_lsn == qip->qli_flush_lsn)
912 xfs_trans_ail_delete(ailp, lip, SHUTDOWN_CORRUPT_INCORE);
913 else
914 spin_unlock(&ailp->xa_lock);
915 }
916
917 /*
918 * Release the dq's flush lock since we're done with it.
919 */
920 xfs_dqfunlock(dqp);
921 }
922
923 /*
924 * Write a modified dquot to disk.
925 * The dquot must be locked and the flush lock too taken by caller.
926 * The flush lock will not be unlocked until the dquot reaches the disk,
927 * but the dquot is free to be unlocked and modified by the caller
928 * in the interim. Dquot is still locked on return. This behavior is
929 * identical to that of inodes.
930 */
931 int
932 xfs_qm_dqflush(
933 struct xfs_dquot *dqp,
934 struct xfs_buf **bpp)
935 {
936 struct xfs_mount *mp = dqp->q_mount;
937 struct xfs_buf *bp;
938 struct xfs_disk_dquot *ddqp;
939 int error;
940
941 ASSERT(XFS_DQ_IS_LOCKED(dqp));
942 ASSERT(!completion_done(&dqp->q_flush));
943
944 trace_xfs_dqflush(dqp);
945
946 *bpp = NULL;
947
948 xfs_qm_dqunpin_wait(dqp);
949
950 /*
951 * This may have been unpinned because the filesystem is shutting
952 * down forcibly. If that's the case we must not write this dquot
953 * to disk, because the log record didn't make it to disk.
954 *
955 * We also have to remove the log item from the AIL in this case,
956 * as we wait for an emptry AIL as part of the unmount process.
957 */
958 if (XFS_FORCED_SHUTDOWN(mp)) {
959 struct xfs_log_item *lip = &dqp->q_logitem.qli_item;
960 dqp->dq_flags &= ~XFS_DQ_DIRTY;
961
962 spin_lock(&mp->m_ail->xa_lock);
963 if (lip->li_flags & XFS_LI_IN_AIL)
964 xfs_trans_ail_delete(mp->m_ail, lip,
965 SHUTDOWN_CORRUPT_INCORE);
966 else
967 spin_unlock(&mp->m_ail->xa_lock);
968 error = -EIO;
969 goto out_unlock;
970 }
971
972 /*
973 * Get the buffer containing the on-disk dquot
974 */
975 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp, dqp->q_blkno,
976 mp->m_quotainfo->qi_dqchunklen, 0, &bp,
977 &xfs_dquot_buf_ops);
978 if (error)
979 goto out_unlock;
980
981 /*
982 * Calculate the location of the dquot inside the buffer.
983 */
984 ddqp = bp->b_addr + dqp->q_bufoffset;
985
986 /*
987 * A simple sanity check in case we got a corrupted dquot..
988 */
989 error = xfs_dqcheck(mp, &dqp->q_core, be32_to_cpu(ddqp->d_id), 0,
990 XFS_QMOPT_DOWARN, "dqflush (incore copy)");
991 if (error) {
992 xfs_buf_relse(bp);
993 xfs_dqfunlock(dqp);
994 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
995 return -EIO;
996 }
997
998 /* This is the only portion of data that needs to persist */
999 memcpy(ddqp, &dqp->q_core, sizeof(xfs_disk_dquot_t));
1000
1001 /*
1002 * Clear the dirty field and remember the flush lsn for later use.
1003 */
1004 dqp->dq_flags &= ~XFS_DQ_DIRTY;
1005
1006 xfs_trans_ail_copy_lsn(mp->m_ail, &dqp->q_logitem.qli_flush_lsn,
1007 &dqp->q_logitem.qli_item.li_lsn);
1008
1009 /*
1010 * copy the lsn into the on-disk dquot now while we have the in memory
1011 * dquot here. This can't be done later in the write verifier as we
1012 * can't get access to the log item at that point in time.
1013 *
1014 * We also calculate the CRC here so that the on-disk dquot in the
1015 * buffer always has a valid CRC. This ensures there is no possibility
1016 * of a dquot without an up-to-date CRC getting to disk.
1017 */
1018 if (xfs_sb_version_hascrc(&mp->m_sb)) {
1019 struct xfs_dqblk *dqb = (struct xfs_dqblk *)ddqp;
1020
1021 dqb->dd_lsn = cpu_to_be64(dqp->q_logitem.qli_item.li_lsn);
1022 xfs_update_cksum((char *)dqb, sizeof(struct xfs_dqblk),
1023 XFS_DQUOT_CRC_OFF);
1024 }
1025
1026 /*
1027 * Attach an iodone routine so that we can remove this dquot from the
1028 * AIL and release the flush lock once the dquot is synced to disk.
1029 */
1030 xfs_buf_attach_iodone(bp, xfs_qm_dqflush_done,
1031 &dqp->q_logitem.qli_item);
1032
1033 /*
1034 * If the buffer is pinned then push on the log so we won't
1035 * get stuck waiting in the write for too long.
1036 */
1037 if (xfs_buf_ispinned(bp)) {
1038 trace_xfs_dqflush_force(dqp);
1039 xfs_log_force(mp, 0);
1040 }
1041
1042 trace_xfs_dqflush_done(dqp);
1043 *bpp = bp;
1044 return 0;
1045
1046 out_unlock:
1047 xfs_dqfunlock(dqp);
1048 return -EIO;
1049 }
1050
1051 /*
1052 * Lock two xfs_dquot structures.
1053 *
1054 * To avoid deadlocks we always lock the quota structure with
1055 * the lowerd id first.
1056 */
1057 void
1058 xfs_dqlock2(
1059 xfs_dquot_t *d1,
1060 xfs_dquot_t *d2)
1061 {
1062 if (d1 && d2) {
1063 ASSERT(d1 != d2);
1064 if (be32_to_cpu(d1->q_core.d_id) >
1065 be32_to_cpu(d2->q_core.d_id)) {
1066 mutex_lock(&d2->q_qlock);
1067 mutex_lock_nested(&d1->q_qlock, XFS_QLOCK_NESTED);
1068 } else {
1069 mutex_lock(&d1->q_qlock);
1070 mutex_lock_nested(&d2->q_qlock, XFS_QLOCK_NESTED);
1071 }
1072 } else if (d1) {
1073 mutex_lock(&d1->q_qlock);
1074 } else if (d2) {
1075 mutex_lock(&d2->q_qlock);
1076 }
1077 }
1078
1079 int __init
1080 xfs_qm_init(void)
1081 {
1082 xfs_qm_dqzone =
1083 kmem_zone_init(sizeof(struct xfs_dquot), "xfs_dquot");
1084 if (!xfs_qm_dqzone)
1085 goto out;
1086
1087 xfs_qm_dqtrxzone =
1088 kmem_zone_init(sizeof(struct xfs_dquot_acct), "xfs_dqtrx");
1089 if (!xfs_qm_dqtrxzone)
1090 goto out_free_dqzone;
1091
1092 return 0;
1093
1094 out_free_dqzone:
1095 kmem_zone_destroy(xfs_qm_dqzone);
1096 out:
1097 return -ENOMEM;
1098 }
1099
1100 void
1101 xfs_qm_exit(void)
1102 {
1103 kmem_zone_destroy(xfs_qm_dqtrxzone);
1104 kmem_zone_destroy(xfs_qm_dqzone);
1105 }
This page took 0.081042 seconds and 5 git commands to generate.