xfs: create a shared header file for format-related information
[deliverable/linux.git] / fs / xfs / xfs_qm.c
1 /*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_format.h"
21 #include "xfs_shared.h"
22 #include "xfs_bit.h"
23 #include "xfs_log.h"
24 #include "xfs_trans.h"
25 #include "xfs_sb.h"
26 #include "xfs_ag.h"
27 #include "xfs_alloc.h"
28 #include "xfs_quota.h"
29 #include "xfs_mount.h"
30 #include "xfs_bmap_btree.h"
31 #include "xfs_ialloc_btree.h"
32 #include "xfs_dinode.h"
33 #include "xfs_inode.h"
34 #include "xfs_ialloc.h"
35 #include "xfs_itable.h"
36 #include "xfs_rtalloc.h"
37 #include "xfs_error.h"
38 #include "xfs_bmap.h"
39 #include "xfs_attr.h"
40 #include "xfs_buf_item.h"
41 #include "xfs_trans_space.h"
42 #include "xfs_qm.h"
43 #include "xfs_trace.h"
44 #include "xfs_icache.h"
45 #include "xfs_cksum.h"
46
47 /*
48 * The global quota manager. There is only one of these for the entire
49 * system, _not_ one per file system. XQM keeps track of the overall
50 * quota functionality, including maintaining the freelist and hash
51 * tables of dquots.
52 */
53 STATIC int xfs_qm_init_quotainos(xfs_mount_t *);
54 STATIC int xfs_qm_init_quotainfo(xfs_mount_t *);
55
56
57 STATIC void xfs_qm_dqfree_one(struct xfs_dquot *dqp);
58 /*
59 * We use the batch lookup interface to iterate over the dquots as it
60 * currently is the only interface into the radix tree code that allows
61 * fuzzy lookups instead of exact matches. Holding the lock over multiple
62 * operations is fine as all callers are used either during mount/umount
63 * or quotaoff.
64 */
65 #define XFS_DQ_LOOKUP_BATCH 32
66
67 STATIC int
68 xfs_qm_dquot_walk(
69 struct xfs_mount *mp,
70 int type,
71 int (*execute)(struct xfs_dquot *dqp, void *data),
72 void *data)
73 {
74 struct xfs_quotainfo *qi = mp->m_quotainfo;
75 struct radix_tree_root *tree = xfs_dquot_tree(qi, type);
76 uint32_t next_index;
77 int last_error = 0;
78 int skipped;
79 int nr_found;
80
81 restart:
82 skipped = 0;
83 next_index = 0;
84 nr_found = 0;
85
86 while (1) {
87 struct xfs_dquot *batch[XFS_DQ_LOOKUP_BATCH];
88 int error = 0;
89 int i;
90
91 mutex_lock(&qi->qi_tree_lock);
92 nr_found = radix_tree_gang_lookup(tree, (void **)batch,
93 next_index, XFS_DQ_LOOKUP_BATCH);
94 if (!nr_found) {
95 mutex_unlock(&qi->qi_tree_lock);
96 break;
97 }
98
99 for (i = 0; i < nr_found; i++) {
100 struct xfs_dquot *dqp = batch[i];
101
102 next_index = be32_to_cpu(dqp->q_core.d_id) + 1;
103
104 error = execute(batch[i], data);
105 if (error == EAGAIN) {
106 skipped++;
107 continue;
108 }
109 if (error && last_error != EFSCORRUPTED)
110 last_error = error;
111 }
112
113 mutex_unlock(&qi->qi_tree_lock);
114
115 /* bail out if the filesystem is corrupted. */
116 if (last_error == EFSCORRUPTED) {
117 skipped = 0;
118 break;
119 }
120 }
121
122 if (skipped) {
123 delay(1);
124 goto restart;
125 }
126
127 return last_error;
128 }
129
130
131 /*
132 * Purge a dquot from all tracking data structures and free it.
133 */
134 STATIC int
135 xfs_qm_dqpurge(
136 struct xfs_dquot *dqp,
137 void *data)
138 {
139 struct xfs_mount *mp = dqp->q_mount;
140 struct xfs_quotainfo *qi = mp->m_quotainfo;
141 struct xfs_dquot *gdqp = NULL;
142 struct xfs_dquot *pdqp = NULL;
143
144 xfs_dqlock(dqp);
145 if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0) {
146 xfs_dqunlock(dqp);
147 return EAGAIN;
148 }
149
150 /*
151 * If this quota has a hint attached, prepare for releasing it now.
152 */
153 gdqp = dqp->q_gdquot;
154 if (gdqp) {
155 xfs_dqlock(gdqp);
156 dqp->q_gdquot = NULL;
157 }
158
159 pdqp = dqp->q_pdquot;
160 if (pdqp) {
161 xfs_dqlock(pdqp);
162 dqp->q_pdquot = NULL;
163 }
164
165 dqp->dq_flags |= XFS_DQ_FREEING;
166
167 xfs_dqflock(dqp);
168
169 /*
170 * If we are turning this type of quotas off, we don't care
171 * about the dirty metadata sitting in this dquot. OTOH, if
172 * we're unmounting, we do care, so we flush it and wait.
173 */
174 if (XFS_DQ_IS_DIRTY(dqp)) {
175 struct xfs_buf *bp = NULL;
176 int error;
177
178 /*
179 * We don't care about getting disk errors here. We need
180 * to purge this dquot anyway, so we go ahead regardless.
181 */
182 error = xfs_qm_dqflush(dqp, &bp);
183 if (error) {
184 xfs_warn(mp, "%s: dquot %p flush failed",
185 __func__, dqp);
186 } else {
187 error = xfs_bwrite(bp);
188 xfs_buf_relse(bp);
189 }
190 xfs_dqflock(dqp);
191 }
192
193 ASSERT(atomic_read(&dqp->q_pincount) == 0);
194 ASSERT(XFS_FORCED_SHUTDOWN(mp) ||
195 !(dqp->q_logitem.qli_item.li_flags & XFS_LI_IN_AIL));
196
197 xfs_dqfunlock(dqp);
198 xfs_dqunlock(dqp);
199
200 radix_tree_delete(xfs_dquot_tree(qi, dqp->q_core.d_flags),
201 be32_to_cpu(dqp->q_core.d_id));
202 qi->qi_dquots--;
203
204 /*
205 * We move dquots to the freelist as soon as their reference count
206 * hits zero, so it really should be on the freelist here.
207 */
208 ASSERT(!list_empty(&dqp->q_lru));
209 list_lru_del(&qi->qi_lru, &dqp->q_lru);
210 XFS_STATS_DEC(xs_qm_dquot_unused);
211
212 xfs_qm_dqdestroy(dqp);
213
214 if (gdqp)
215 xfs_qm_dqput(gdqp);
216 if (pdqp)
217 xfs_qm_dqput(pdqp);
218 return 0;
219 }
220
221 /*
222 * Purge the dquot cache.
223 */
224 void
225 xfs_qm_dqpurge_all(
226 struct xfs_mount *mp,
227 uint flags)
228 {
229 if (flags & XFS_QMOPT_UQUOTA)
230 xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge, NULL);
231 if (flags & XFS_QMOPT_GQUOTA)
232 xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_dqpurge, NULL);
233 if (flags & XFS_QMOPT_PQUOTA)
234 xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_dqpurge, NULL);
235 }
236
237 /*
238 * Just destroy the quotainfo structure.
239 */
240 void
241 xfs_qm_unmount(
242 struct xfs_mount *mp)
243 {
244 if (mp->m_quotainfo) {
245 xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
246 xfs_qm_destroy_quotainfo(mp);
247 }
248 }
249
250
251 /*
252 * This is called from xfs_mountfs to start quotas and initialize all
253 * necessary data structures like quotainfo. This is also responsible for
254 * running a quotacheck as necessary. We are guaranteed that the superblock
255 * is consistently read in at this point.
256 *
257 * If we fail here, the mount will continue with quota turned off. We don't
258 * need to inidicate success or failure at all.
259 */
260 void
261 xfs_qm_mount_quotas(
262 xfs_mount_t *mp)
263 {
264 int error = 0;
265 uint sbf;
266
267 /*
268 * If quotas on realtime volumes is not supported, we disable
269 * quotas immediately.
270 */
271 if (mp->m_sb.sb_rextents) {
272 xfs_notice(mp, "Cannot turn on quotas for realtime filesystem");
273 mp->m_qflags = 0;
274 goto write_changes;
275 }
276
277 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
278
279 /*
280 * Allocate the quotainfo structure inside the mount struct, and
281 * create quotainode(s), and change/rev superblock if necessary.
282 */
283 error = xfs_qm_init_quotainfo(mp);
284 if (error) {
285 /*
286 * We must turn off quotas.
287 */
288 ASSERT(mp->m_quotainfo == NULL);
289 mp->m_qflags = 0;
290 goto write_changes;
291 }
292 /*
293 * If any of the quotas are not consistent, do a quotacheck.
294 */
295 if (XFS_QM_NEED_QUOTACHECK(mp)) {
296 error = xfs_qm_quotacheck(mp);
297 if (error) {
298 /* Quotacheck failed and disabled quotas. */
299 return;
300 }
301 }
302 /*
303 * If one type of quotas is off, then it will lose its
304 * quotachecked status, since we won't be doing accounting for
305 * that type anymore.
306 */
307 if (!XFS_IS_UQUOTA_ON(mp))
308 mp->m_qflags &= ~XFS_UQUOTA_CHKD;
309 if (!XFS_IS_GQUOTA_ON(mp))
310 mp->m_qflags &= ~XFS_GQUOTA_CHKD;
311 if (!XFS_IS_PQUOTA_ON(mp))
312 mp->m_qflags &= ~XFS_PQUOTA_CHKD;
313
314 write_changes:
315 /*
316 * We actually don't have to acquire the m_sb_lock at all.
317 * This can only be called from mount, and that's single threaded. XXX
318 */
319 spin_lock(&mp->m_sb_lock);
320 sbf = mp->m_sb.sb_qflags;
321 mp->m_sb.sb_qflags = mp->m_qflags & XFS_MOUNT_QUOTA_ALL;
322 spin_unlock(&mp->m_sb_lock);
323
324 if (sbf != (mp->m_qflags & XFS_MOUNT_QUOTA_ALL)) {
325 if (xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS)) {
326 /*
327 * We could only have been turning quotas off.
328 * We aren't in very good shape actually because
329 * the incore structures are convinced that quotas are
330 * off, but the on disk superblock doesn't know that !
331 */
332 ASSERT(!(XFS_IS_QUOTA_RUNNING(mp)));
333 xfs_alert(mp, "%s: Superblock update failed!",
334 __func__);
335 }
336 }
337
338 if (error) {
339 xfs_warn(mp, "Failed to initialize disk quotas.");
340 return;
341 }
342 }
343
344 /*
345 * Called from the vfsops layer.
346 */
347 void
348 xfs_qm_unmount_quotas(
349 xfs_mount_t *mp)
350 {
351 /*
352 * Release the dquots that root inode, et al might be holding,
353 * before we flush quotas and blow away the quotainfo structure.
354 */
355 ASSERT(mp->m_rootip);
356 xfs_qm_dqdetach(mp->m_rootip);
357 if (mp->m_rbmip)
358 xfs_qm_dqdetach(mp->m_rbmip);
359 if (mp->m_rsumip)
360 xfs_qm_dqdetach(mp->m_rsumip);
361
362 /*
363 * Release the quota inodes.
364 */
365 if (mp->m_quotainfo) {
366 if (mp->m_quotainfo->qi_uquotaip) {
367 IRELE(mp->m_quotainfo->qi_uquotaip);
368 mp->m_quotainfo->qi_uquotaip = NULL;
369 }
370 if (mp->m_quotainfo->qi_gquotaip) {
371 IRELE(mp->m_quotainfo->qi_gquotaip);
372 mp->m_quotainfo->qi_gquotaip = NULL;
373 }
374 if (mp->m_quotainfo->qi_pquotaip) {
375 IRELE(mp->m_quotainfo->qi_pquotaip);
376 mp->m_quotainfo->qi_pquotaip = NULL;
377 }
378 }
379 }
380
381 STATIC int
382 xfs_qm_dqattach_one(
383 xfs_inode_t *ip,
384 xfs_dqid_t id,
385 uint type,
386 uint doalloc,
387 xfs_dquot_t *udqhint, /* hint */
388 xfs_dquot_t **IO_idqpp)
389 {
390 xfs_dquot_t *dqp;
391 int error;
392
393 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
394 error = 0;
395
396 /*
397 * See if we already have it in the inode itself. IO_idqpp is
398 * &i_udquot or &i_gdquot. This made the code look weird, but
399 * made the logic a lot simpler.
400 */
401 dqp = *IO_idqpp;
402 if (dqp) {
403 trace_xfs_dqattach_found(dqp);
404 return 0;
405 }
406
407 /*
408 * udqhint is the i_udquot field in inode, and is non-NULL only
409 * when the type arg is group/project. Its purpose is to save a
410 * lookup by dqid (xfs_qm_dqget) by caching a group dquot inside
411 * the user dquot.
412 */
413 if (udqhint) {
414 ASSERT(type == XFS_DQ_GROUP || type == XFS_DQ_PROJ);
415 xfs_dqlock(udqhint);
416
417 /*
418 * No need to take dqlock to look at the id.
419 *
420 * The ID can't change until it gets reclaimed, and it won't
421 * be reclaimed as long as we have a ref from inode and we
422 * hold the ilock.
423 */
424 if (type == XFS_DQ_GROUP)
425 dqp = udqhint->q_gdquot;
426 else
427 dqp = udqhint->q_pdquot;
428 if (dqp && be32_to_cpu(dqp->q_core.d_id) == id) {
429 ASSERT(*IO_idqpp == NULL);
430
431 *IO_idqpp = xfs_qm_dqhold(dqp);
432 xfs_dqunlock(udqhint);
433 return 0;
434 }
435
436 /*
437 * We can't hold a dquot lock when we call the dqget code.
438 * We'll deadlock in no time, because of (not conforming to)
439 * lock ordering - the inodelock comes before any dquot lock,
440 * and we may drop and reacquire the ilock in xfs_qm_dqget().
441 */
442 xfs_dqunlock(udqhint);
443 }
444
445 /*
446 * Find the dquot from somewhere. This bumps the
447 * reference count of dquot and returns it locked.
448 * This can return ENOENT if dquot didn't exist on
449 * disk and we didn't ask it to allocate;
450 * ESRCH if quotas got turned off suddenly.
451 */
452 error = xfs_qm_dqget(ip->i_mount, ip, id, type,
453 doalloc | XFS_QMOPT_DOWARN, &dqp);
454 if (error)
455 return error;
456
457 trace_xfs_dqattach_get(dqp);
458
459 /*
460 * dqget may have dropped and re-acquired the ilock, but it guarantees
461 * that the dquot returned is the one that should go in the inode.
462 */
463 *IO_idqpp = dqp;
464 xfs_dqunlock(dqp);
465 return 0;
466 }
467
468
469 /*
470 * Given a udquot and group/project type, attach the group/project
471 * dquot pointer to the udquot as a hint for future lookups.
472 */
473 STATIC void
474 xfs_qm_dqattach_hint(
475 struct xfs_inode *ip,
476 int type)
477 {
478 struct xfs_dquot **dqhintp;
479 struct xfs_dquot *dqp;
480 struct xfs_dquot *udq = ip->i_udquot;
481
482 ASSERT(type == XFS_DQ_GROUP || type == XFS_DQ_PROJ);
483
484 xfs_dqlock(udq);
485
486 if (type == XFS_DQ_GROUP) {
487 dqp = ip->i_gdquot;
488 dqhintp = &udq->q_gdquot;
489 } else {
490 dqp = ip->i_pdquot;
491 dqhintp = &udq->q_pdquot;
492 }
493
494 if (*dqhintp) {
495 struct xfs_dquot *tmp;
496
497 if (*dqhintp == dqp)
498 goto done;
499
500 tmp = *dqhintp;
501 *dqhintp = NULL;
502 xfs_qm_dqrele(tmp);
503 }
504
505 *dqhintp = xfs_qm_dqhold(dqp);
506 done:
507 xfs_dqunlock(udq);
508 }
509
510 static bool
511 xfs_qm_need_dqattach(
512 struct xfs_inode *ip)
513 {
514 struct xfs_mount *mp = ip->i_mount;
515
516 if (!XFS_IS_QUOTA_RUNNING(mp))
517 return false;
518 if (!XFS_IS_QUOTA_ON(mp))
519 return false;
520 if (!XFS_NOT_DQATTACHED(mp, ip))
521 return false;
522 if (xfs_is_quota_inode(&mp->m_sb, ip->i_ino))
523 return false;
524 return true;
525 }
526
527 /*
528 * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
529 * into account.
530 * If XFS_QMOPT_DQALLOC, the dquot(s) will be allocated if needed.
531 * Inode may get unlocked and relocked in here, and the caller must deal with
532 * the consequences.
533 */
534 int
535 xfs_qm_dqattach_locked(
536 xfs_inode_t *ip,
537 uint flags)
538 {
539 xfs_mount_t *mp = ip->i_mount;
540 uint nquotas = 0;
541 int error = 0;
542
543 if (!xfs_qm_need_dqattach(ip))
544 return 0;
545
546 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
547
548 if (XFS_IS_UQUOTA_ON(mp)) {
549 error = xfs_qm_dqattach_one(ip, ip->i_d.di_uid, XFS_DQ_USER,
550 flags & XFS_QMOPT_DQALLOC,
551 NULL, &ip->i_udquot);
552 if (error)
553 goto done;
554 nquotas++;
555 }
556
557 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
558 if (XFS_IS_GQUOTA_ON(mp)) {
559 error = xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP,
560 flags & XFS_QMOPT_DQALLOC,
561 ip->i_udquot, &ip->i_gdquot);
562 /*
563 * Don't worry about the udquot that we may have
564 * attached above. It'll get detached, if not already.
565 */
566 if (error)
567 goto done;
568 nquotas++;
569 }
570
571 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
572 if (XFS_IS_PQUOTA_ON(mp)) {
573 error = xfs_qm_dqattach_one(ip, xfs_get_projid(ip), XFS_DQ_PROJ,
574 flags & XFS_QMOPT_DQALLOC,
575 ip->i_udquot, &ip->i_pdquot);
576 /*
577 * Don't worry about the udquot that we may have
578 * attached above. It'll get detached, if not already.
579 */
580 if (error)
581 goto done;
582 nquotas++;
583 }
584
585 /*
586 * Attach this group/project quota to the user quota as a hint.
587 * This WON'T, in general, result in a thrash.
588 */
589 if (nquotas > 1 && ip->i_udquot) {
590 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
591 ASSERT(ip->i_gdquot || !XFS_IS_GQUOTA_ON(mp));
592 ASSERT(ip->i_pdquot || !XFS_IS_PQUOTA_ON(mp));
593
594 /*
595 * We do not have i_udquot locked at this point, but this check
596 * is OK since we don't depend on the i_gdquot to be accurate
597 * 100% all the time. It is just a hint, and this will
598 * succeed in general.
599 */
600 if (ip->i_udquot->q_gdquot != ip->i_gdquot)
601 xfs_qm_dqattach_hint(ip, XFS_DQ_GROUP);
602
603 if (ip->i_udquot->q_pdquot != ip->i_pdquot)
604 xfs_qm_dqattach_hint(ip, XFS_DQ_PROJ);
605 }
606
607 done:
608 #ifdef DEBUG
609 if (!error) {
610 if (XFS_IS_UQUOTA_ON(mp))
611 ASSERT(ip->i_udquot);
612 if (XFS_IS_GQUOTA_ON(mp))
613 ASSERT(ip->i_gdquot);
614 if (XFS_IS_PQUOTA_ON(mp))
615 ASSERT(ip->i_pdquot);
616 }
617 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
618 #endif
619 return error;
620 }
621
622 int
623 xfs_qm_dqattach(
624 struct xfs_inode *ip,
625 uint flags)
626 {
627 int error;
628
629 if (!xfs_qm_need_dqattach(ip))
630 return 0;
631
632 xfs_ilock(ip, XFS_ILOCK_EXCL);
633 error = xfs_qm_dqattach_locked(ip, flags);
634 xfs_iunlock(ip, XFS_ILOCK_EXCL);
635
636 return error;
637 }
638
639 /*
640 * Release dquots (and their references) if any.
641 * The inode should be locked EXCL except when this's called by
642 * xfs_ireclaim.
643 */
644 void
645 xfs_qm_dqdetach(
646 xfs_inode_t *ip)
647 {
648 if (!(ip->i_udquot || ip->i_gdquot || ip->i_pdquot))
649 return;
650
651 trace_xfs_dquot_dqdetach(ip);
652
653 ASSERT(!xfs_is_quota_inode(&ip->i_mount->m_sb, ip->i_ino));
654 if (ip->i_udquot) {
655 xfs_qm_dqrele(ip->i_udquot);
656 ip->i_udquot = NULL;
657 }
658 if (ip->i_gdquot) {
659 xfs_qm_dqrele(ip->i_gdquot);
660 ip->i_gdquot = NULL;
661 }
662 if (ip->i_pdquot) {
663 xfs_qm_dqrele(ip->i_pdquot);
664 ip->i_pdquot = NULL;
665 }
666 }
667
668 int
669 xfs_qm_calc_dquots_per_chunk(
670 struct xfs_mount *mp,
671 unsigned int nbblks) /* basic block units */
672 {
673 unsigned int ndquots;
674
675 ASSERT(nbblks > 0);
676 ndquots = BBTOB(nbblks);
677 do_div(ndquots, sizeof(xfs_dqblk_t));
678
679 return ndquots;
680 }
681
682 struct xfs_qm_isolate {
683 struct list_head buffers;
684 struct list_head dispose;
685 };
686
687 static enum lru_status
688 xfs_qm_dquot_isolate(
689 struct list_head *item,
690 spinlock_t *lru_lock,
691 void *arg)
692 {
693 struct xfs_dquot *dqp = container_of(item,
694 struct xfs_dquot, q_lru);
695 struct xfs_qm_isolate *isol = arg;
696
697 if (!xfs_dqlock_nowait(dqp))
698 goto out_miss_busy;
699
700 /*
701 * This dquot has acquired a reference in the meantime remove it from
702 * the freelist and try again.
703 */
704 if (dqp->q_nrefs) {
705 xfs_dqunlock(dqp);
706 XFS_STATS_INC(xs_qm_dqwants);
707
708 trace_xfs_dqreclaim_want(dqp);
709 list_del_init(&dqp->q_lru);
710 XFS_STATS_DEC(xs_qm_dquot_unused);
711 return LRU_REMOVED;
712 }
713
714 /*
715 * If the dquot is dirty, flush it. If it's already being flushed, just
716 * skip it so there is time for the IO to complete before we try to
717 * reclaim it again on the next LRU pass.
718 */
719 if (!xfs_dqflock_nowait(dqp)) {
720 xfs_dqunlock(dqp);
721 goto out_miss_busy;
722 }
723
724 if (XFS_DQ_IS_DIRTY(dqp)) {
725 struct xfs_buf *bp = NULL;
726 int error;
727
728 trace_xfs_dqreclaim_dirty(dqp);
729
730 /* we have to drop the LRU lock to flush the dquot */
731 spin_unlock(lru_lock);
732
733 error = xfs_qm_dqflush(dqp, &bp);
734 if (error) {
735 xfs_warn(dqp->q_mount, "%s: dquot %p flush failed",
736 __func__, dqp);
737 goto out_unlock_dirty;
738 }
739
740 xfs_buf_delwri_queue(bp, &isol->buffers);
741 xfs_buf_relse(bp);
742 goto out_unlock_dirty;
743 }
744 xfs_dqfunlock(dqp);
745
746 /*
747 * Prevent lookups now that we are past the point of no return.
748 */
749 dqp->dq_flags |= XFS_DQ_FREEING;
750 xfs_dqunlock(dqp);
751
752 ASSERT(dqp->q_nrefs == 0);
753 list_move_tail(&dqp->q_lru, &isol->dispose);
754 XFS_STATS_DEC(xs_qm_dquot_unused);
755 trace_xfs_dqreclaim_done(dqp);
756 XFS_STATS_INC(xs_qm_dqreclaims);
757 return LRU_REMOVED;
758
759 out_miss_busy:
760 trace_xfs_dqreclaim_busy(dqp);
761 XFS_STATS_INC(xs_qm_dqreclaim_misses);
762 return LRU_SKIP;
763
764 out_unlock_dirty:
765 trace_xfs_dqreclaim_busy(dqp);
766 XFS_STATS_INC(xs_qm_dqreclaim_misses);
767 xfs_dqunlock(dqp);
768 spin_lock(lru_lock);
769 return LRU_RETRY;
770 }
771
772 static unsigned long
773 xfs_qm_shrink_scan(
774 struct shrinker *shrink,
775 struct shrink_control *sc)
776 {
777 struct xfs_quotainfo *qi = container_of(shrink,
778 struct xfs_quotainfo, qi_shrinker);
779 struct xfs_qm_isolate isol;
780 unsigned long freed;
781 int error;
782 unsigned long nr_to_scan = sc->nr_to_scan;
783
784 if ((sc->gfp_mask & (__GFP_FS|__GFP_WAIT)) != (__GFP_FS|__GFP_WAIT))
785 return 0;
786
787 INIT_LIST_HEAD(&isol.buffers);
788 INIT_LIST_HEAD(&isol.dispose);
789
790 freed = list_lru_walk_node(&qi->qi_lru, sc->nid, xfs_qm_dquot_isolate, &isol,
791 &nr_to_scan);
792
793 error = xfs_buf_delwri_submit(&isol.buffers);
794 if (error)
795 xfs_warn(NULL, "%s: dquot reclaim failed", __func__);
796
797 while (!list_empty(&isol.dispose)) {
798 struct xfs_dquot *dqp;
799
800 dqp = list_first_entry(&isol.dispose, struct xfs_dquot, q_lru);
801 list_del_init(&dqp->q_lru);
802 xfs_qm_dqfree_one(dqp);
803 }
804
805 return freed;
806 }
807
808 static unsigned long
809 xfs_qm_shrink_count(
810 struct shrinker *shrink,
811 struct shrink_control *sc)
812 {
813 struct xfs_quotainfo *qi = container_of(shrink,
814 struct xfs_quotainfo, qi_shrinker);
815
816 return list_lru_count_node(&qi->qi_lru, sc->nid);
817 }
818
819 /*
820 * This initializes all the quota information that's kept in the
821 * mount structure
822 */
823 STATIC int
824 xfs_qm_init_quotainfo(
825 xfs_mount_t *mp)
826 {
827 xfs_quotainfo_t *qinf;
828 int error;
829 xfs_dquot_t *dqp;
830
831 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
832
833 qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP);
834
835 if ((error = list_lru_init(&qinf->qi_lru))) {
836 kmem_free(qinf);
837 mp->m_quotainfo = NULL;
838 return error;
839 }
840
841 /*
842 * See if quotainodes are setup, and if not, allocate them,
843 * and change the superblock accordingly.
844 */
845 if ((error = xfs_qm_init_quotainos(mp))) {
846 list_lru_destroy(&qinf->qi_lru);
847 kmem_free(qinf);
848 mp->m_quotainfo = NULL;
849 return error;
850 }
851
852 INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS);
853 INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS);
854 INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_NOFS);
855 mutex_init(&qinf->qi_tree_lock);
856
857 /* mutex used to serialize quotaoffs */
858 mutex_init(&qinf->qi_quotaofflock);
859
860 /* Precalc some constants */
861 qinf->qi_dqchunklen = XFS_FSB_TO_BB(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
862 qinf->qi_dqperchunk = xfs_qm_calc_dquots_per_chunk(mp,
863 qinf->qi_dqchunklen);
864
865 mp->m_qflags |= (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_CHKD);
866
867 /*
868 * We try to get the limits from the superuser's limits fields.
869 * This is quite hacky, but it is standard quota practice.
870 *
871 * We look at the USR dquot with id == 0 first, but if user quotas
872 * are not enabled we goto the GRP dquot with id == 0.
873 * We don't really care to keep separate default limits for user
874 * and group quotas, at least not at this point.
875 *
876 * Since we may not have done a quotacheck by this point, just read
877 * the dquot without attaching it to any hashtables or lists.
878 */
879 error = xfs_qm_dqread(mp, 0,
880 XFS_IS_UQUOTA_RUNNING(mp) ? XFS_DQ_USER :
881 (XFS_IS_GQUOTA_RUNNING(mp) ? XFS_DQ_GROUP :
882 XFS_DQ_PROJ),
883 XFS_QMOPT_DOWARN, &dqp);
884 if (!error) {
885 xfs_disk_dquot_t *ddqp = &dqp->q_core;
886
887 /*
888 * The warnings and timers set the grace period given to
889 * a user or group before he or she can not perform any
890 * more writing. If it is zero, a default is used.
891 */
892 qinf->qi_btimelimit = ddqp->d_btimer ?
893 be32_to_cpu(ddqp->d_btimer) : XFS_QM_BTIMELIMIT;
894 qinf->qi_itimelimit = ddqp->d_itimer ?
895 be32_to_cpu(ddqp->d_itimer) : XFS_QM_ITIMELIMIT;
896 qinf->qi_rtbtimelimit = ddqp->d_rtbtimer ?
897 be32_to_cpu(ddqp->d_rtbtimer) : XFS_QM_RTBTIMELIMIT;
898 qinf->qi_bwarnlimit = ddqp->d_bwarns ?
899 be16_to_cpu(ddqp->d_bwarns) : XFS_QM_BWARNLIMIT;
900 qinf->qi_iwarnlimit = ddqp->d_iwarns ?
901 be16_to_cpu(ddqp->d_iwarns) : XFS_QM_IWARNLIMIT;
902 qinf->qi_rtbwarnlimit = ddqp->d_rtbwarns ?
903 be16_to_cpu(ddqp->d_rtbwarns) : XFS_QM_RTBWARNLIMIT;
904 qinf->qi_bhardlimit = be64_to_cpu(ddqp->d_blk_hardlimit);
905 qinf->qi_bsoftlimit = be64_to_cpu(ddqp->d_blk_softlimit);
906 qinf->qi_ihardlimit = be64_to_cpu(ddqp->d_ino_hardlimit);
907 qinf->qi_isoftlimit = be64_to_cpu(ddqp->d_ino_softlimit);
908 qinf->qi_rtbhardlimit = be64_to_cpu(ddqp->d_rtb_hardlimit);
909 qinf->qi_rtbsoftlimit = be64_to_cpu(ddqp->d_rtb_softlimit);
910
911 xfs_qm_dqdestroy(dqp);
912 } else {
913 qinf->qi_btimelimit = XFS_QM_BTIMELIMIT;
914 qinf->qi_itimelimit = XFS_QM_ITIMELIMIT;
915 qinf->qi_rtbtimelimit = XFS_QM_RTBTIMELIMIT;
916 qinf->qi_bwarnlimit = XFS_QM_BWARNLIMIT;
917 qinf->qi_iwarnlimit = XFS_QM_IWARNLIMIT;
918 qinf->qi_rtbwarnlimit = XFS_QM_RTBWARNLIMIT;
919 }
920
921 qinf->qi_shrinker.count_objects = xfs_qm_shrink_count;
922 qinf->qi_shrinker.scan_objects = xfs_qm_shrink_scan;
923 qinf->qi_shrinker.seeks = DEFAULT_SEEKS;
924 qinf->qi_shrinker.flags = SHRINKER_NUMA_AWARE;
925 register_shrinker(&qinf->qi_shrinker);
926 return 0;
927 }
928
929
930 /*
931 * Gets called when unmounting a filesystem or when all quotas get
932 * turned off.
933 * This purges the quota inodes, destroys locks and frees itself.
934 */
935 void
936 xfs_qm_destroy_quotainfo(
937 xfs_mount_t *mp)
938 {
939 xfs_quotainfo_t *qi;
940
941 qi = mp->m_quotainfo;
942 ASSERT(qi != NULL);
943
944 unregister_shrinker(&qi->qi_shrinker);
945 list_lru_destroy(&qi->qi_lru);
946
947 if (qi->qi_uquotaip) {
948 IRELE(qi->qi_uquotaip);
949 qi->qi_uquotaip = NULL; /* paranoia */
950 }
951 if (qi->qi_gquotaip) {
952 IRELE(qi->qi_gquotaip);
953 qi->qi_gquotaip = NULL;
954 }
955 if (qi->qi_pquotaip) {
956 IRELE(qi->qi_pquotaip);
957 qi->qi_pquotaip = NULL;
958 }
959 mutex_destroy(&qi->qi_quotaofflock);
960 kmem_free(qi);
961 mp->m_quotainfo = NULL;
962 }
963
964 /*
965 * Create an inode and return with a reference already taken, but unlocked
966 * This is how we create quota inodes
967 */
968 STATIC int
969 xfs_qm_qino_alloc(
970 xfs_mount_t *mp,
971 xfs_inode_t **ip,
972 __int64_t sbfields,
973 uint flags)
974 {
975 xfs_trans_t *tp;
976 int error;
977 int committed;
978
979 *ip = NULL;
980 /*
981 * With superblock that doesn't have separate pquotino, we
982 * share an inode between gquota and pquota. If the on-disk
983 * superblock has GQUOTA and the filesystem is now mounted
984 * with PQUOTA, just use sb_gquotino for sb_pquotino and
985 * vice-versa.
986 */
987 if (!xfs_sb_version_has_pquotino(&mp->m_sb) &&
988 (flags & (XFS_QMOPT_PQUOTA|XFS_QMOPT_GQUOTA))) {
989 xfs_ino_t ino = NULLFSINO;
990
991 if ((flags & XFS_QMOPT_PQUOTA) &&
992 (mp->m_sb.sb_gquotino != NULLFSINO)) {
993 ino = mp->m_sb.sb_gquotino;
994 ASSERT(mp->m_sb.sb_pquotino == NULLFSINO);
995 } else if ((flags & XFS_QMOPT_GQUOTA) &&
996 (mp->m_sb.sb_pquotino != NULLFSINO)) {
997 ino = mp->m_sb.sb_pquotino;
998 ASSERT(mp->m_sb.sb_gquotino == NULLFSINO);
999 }
1000 if (ino != NULLFSINO) {
1001 error = xfs_iget(mp, NULL, ino, 0, 0, ip);
1002 if (error)
1003 return error;
1004 mp->m_sb.sb_gquotino = NULLFSINO;
1005 mp->m_sb.sb_pquotino = NULLFSINO;
1006 }
1007 }
1008
1009 tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QINOCREATE);
1010 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_create,
1011 XFS_QM_QINOCREATE_SPACE_RES(mp), 0);
1012 if (error) {
1013 xfs_trans_cancel(tp, 0);
1014 return error;
1015 }
1016
1017 if (!*ip) {
1018 error = xfs_dir_ialloc(&tp, NULL, S_IFREG, 1, 0, 0, 1, ip,
1019 &committed);
1020 if (error) {
1021 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES |
1022 XFS_TRANS_ABORT);
1023 return error;
1024 }
1025 }
1026
1027 /*
1028 * Make the changes in the superblock, and log those too.
1029 * sbfields arg may contain fields other than *QUOTINO;
1030 * VERSIONNUM for example.
1031 */
1032 spin_lock(&mp->m_sb_lock);
1033 if (flags & XFS_QMOPT_SBVERSION) {
1034 ASSERT(!xfs_sb_version_hasquota(&mp->m_sb));
1035 ASSERT((sbfields & (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
1036 XFS_SB_GQUOTINO | XFS_SB_PQUOTINO | XFS_SB_QFLAGS)) ==
1037 (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
1038 XFS_SB_GQUOTINO | XFS_SB_PQUOTINO |
1039 XFS_SB_QFLAGS));
1040
1041 xfs_sb_version_addquota(&mp->m_sb);
1042 mp->m_sb.sb_uquotino = NULLFSINO;
1043 mp->m_sb.sb_gquotino = NULLFSINO;
1044 mp->m_sb.sb_pquotino = NULLFSINO;
1045
1046 /* qflags will get updated fully _after_ quotacheck */
1047 mp->m_sb.sb_qflags = mp->m_qflags & XFS_ALL_QUOTA_ACCT;
1048 }
1049 if (flags & XFS_QMOPT_UQUOTA)
1050 mp->m_sb.sb_uquotino = (*ip)->i_ino;
1051 else if (flags & XFS_QMOPT_GQUOTA)
1052 mp->m_sb.sb_gquotino = (*ip)->i_ino;
1053 else
1054 mp->m_sb.sb_pquotino = (*ip)->i_ino;
1055 spin_unlock(&mp->m_sb_lock);
1056 xfs_mod_sb(tp, sbfields);
1057
1058 if ((error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES))) {
1059 xfs_alert(mp, "%s failed (error %d)!", __func__, error);
1060 return error;
1061 }
1062 return 0;
1063 }
1064
1065
1066 STATIC void
1067 xfs_qm_reset_dqcounts(
1068 xfs_mount_t *mp,
1069 xfs_buf_t *bp,
1070 xfs_dqid_t id,
1071 uint type)
1072 {
1073 struct xfs_dqblk *dqb;
1074 int j;
1075
1076 trace_xfs_reset_dqcounts(bp, _RET_IP_);
1077
1078 /*
1079 * Reset all counters and timers. They'll be
1080 * started afresh by xfs_qm_quotacheck.
1081 */
1082 #ifdef DEBUG
1083 j = XFS_FSB_TO_B(mp, XFS_DQUOT_CLUSTER_SIZE_FSB);
1084 do_div(j, sizeof(xfs_dqblk_t));
1085 ASSERT(mp->m_quotainfo->qi_dqperchunk == j);
1086 #endif
1087 dqb = bp->b_addr;
1088 for (j = 0; j < mp->m_quotainfo->qi_dqperchunk; j++) {
1089 struct xfs_disk_dquot *ddq;
1090
1091 ddq = (struct xfs_disk_dquot *)&dqb[j];
1092
1093 /*
1094 * Do a sanity check, and if needed, repair the dqblk. Don't
1095 * output any warnings because it's perfectly possible to
1096 * find uninitialised dquot blks. See comment in xfs_qm_dqcheck.
1097 */
1098 (void) xfs_qm_dqcheck(mp, ddq, id+j, type, XFS_QMOPT_DQREPAIR,
1099 "xfs_quotacheck");
1100 ddq->d_bcount = 0;
1101 ddq->d_icount = 0;
1102 ddq->d_rtbcount = 0;
1103 ddq->d_btimer = 0;
1104 ddq->d_itimer = 0;
1105 ddq->d_rtbtimer = 0;
1106 ddq->d_bwarns = 0;
1107 ddq->d_iwarns = 0;
1108 ddq->d_rtbwarns = 0;
1109
1110 if (xfs_sb_version_hascrc(&mp->m_sb)) {
1111 xfs_update_cksum((char *)&dqb[j],
1112 sizeof(struct xfs_dqblk),
1113 XFS_DQUOT_CRC_OFF);
1114 }
1115 }
1116 }
1117
1118 STATIC int
1119 xfs_qm_dqiter_bufs(
1120 struct xfs_mount *mp,
1121 xfs_dqid_t firstid,
1122 xfs_fsblock_t bno,
1123 xfs_filblks_t blkcnt,
1124 uint flags,
1125 struct list_head *buffer_list)
1126 {
1127 struct xfs_buf *bp;
1128 int error;
1129 int type;
1130
1131 ASSERT(blkcnt > 0);
1132 type = flags & XFS_QMOPT_UQUOTA ? XFS_DQ_USER :
1133 (flags & XFS_QMOPT_PQUOTA ? XFS_DQ_PROJ : XFS_DQ_GROUP);
1134 error = 0;
1135
1136 /*
1137 * Blkcnt arg can be a very big number, and might even be
1138 * larger than the log itself. So, we have to break it up into
1139 * manageable-sized transactions.
1140 * Note that we don't start a permanent transaction here; we might
1141 * not be able to get a log reservation for the whole thing up front,
1142 * and we don't really care to either, because we just discard
1143 * everything if we were to crash in the middle of this loop.
1144 */
1145 while (blkcnt--) {
1146 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
1147 XFS_FSB_TO_DADDR(mp, bno),
1148 mp->m_quotainfo->qi_dqchunklen, 0, &bp,
1149 &xfs_dquot_buf_ops);
1150
1151 /*
1152 * CRC and validation errors will return a EFSCORRUPTED here. If
1153 * this occurs, re-read without CRC validation so that we can
1154 * repair the damage via xfs_qm_reset_dqcounts(). This process
1155 * will leave a trace in the log indicating corruption has
1156 * been detected.
1157 */
1158 if (error == EFSCORRUPTED) {
1159 error = xfs_trans_read_buf(mp, NULL, mp->m_ddev_targp,
1160 XFS_FSB_TO_DADDR(mp, bno),
1161 mp->m_quotainfo->qi_dqchunklen, 0, &bp,
1162 NULL);
1163 }
1164
1165 if (error)
1166 break;
1167
1168 xfs_qm_reset_dqcounts(mp, bp, firstid, type);
1169 xfs_buf_delwri_queue(bp, buffer_list);
1170 xfs_buf_relse(bp);
1171
1172 /* goto the next block. */
1173 bno++;
1174 firstid += mp->m_quotainfo->qi_dqperchunk;
1175 }
1176
1177 return error;
1178 }
1179
1180 /*
1181 * Iterate over all allocated USR/GRP/PRJ dquots in the system, calling a
1182 * caller supplied function for every chunk of dquots that we find.
1183 */
1184 STATIC int
1185 xfs_qm_dqiterate(
1186 struct xfs_mount *mp,
1187 struct xfs_inode *qip,
1188 uint flags,
1189 struct list_head *buffer_list)
1190 {
1191 struct xfs_bmbt_irec *map;
1192 int i, nmaps; /* number of map entries */
1193 int error; /* return value */
1194 xfs_fileoff_t lblkno;
1195 xfs_filblks_t maxlblkcnt;
1196 xfs_dqid_t firstid;
1197 xfs_fsblock_t rablkno;
1198 xfs_filblks_t rablkcnt;
1199
1200 error = 0;
1201 /*
1202 * This looks racy, but we can't keep an inode lock across a
1203 * trans_reserve. But, this gets called during quotacheck, and that
1204 * happens only at mount time which is single threaded.
1205 */
1206 if (qip->i_d.di_nblocks == 0)
1207 return 0;
1208
1209 map = kmem_alloc(XFS_DQITER_MAP_SIZE * sizeof(*map), KM_SLEEP);
1210
1211 lblkno = 0;
1212 maxlblkcnt = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
1213 do {
1214 nmaps = XFS_DQITER_MAP_SIZE;
1215 /*
1216 * We aren't changing the inode itself. Just changing
1217 * some of its data. No new blocks are added here, and
1218 * the inode is never added to the transaction.
1219 */
1220 xfs_ilock(qip, XFS_ILOCK_SHARED);
1221 error = xfs_bmapi_read(qip, lblkno, maxlblkcnt - lblkno,
1222 map, &nmaps, 0);
1223 xfs_iunlock(qip, XFS_ILOCK_SHARED);
1224 if (error)
1225 break;
1226
1227 ASSERT(nmaps <= XFS_DQITER_MAP_SIZE);
1228 for (i = 0; i < nmaps; i++) {
1229 ASSERT(map[i].br_startblock != DELAYSTARTBLOCK);
1230 ASSERT(map[i].br_blockcount);
1231
1232
1233 lblkno += map[i].br_blockcount;
1234
1235 if (map[i].br_startblock == HOLESTARTBLOCK)
1236 continue;
1237
1238 firstid = (xfs_dqid_t) map[i].br_startoff *
1239 mp->m_quotainfo->qi_dqperchunk;
1240 /*
1241 * Do a read-ahead on the next extent.
1242 */
1243 if ((i+1 < nmaps) &&
1244 (map[i+1].br_startblock != HOLESTARTBLOCK)) {
1245 rablkcnt = map[i+1].br_blockcount;
1246 rablkno = map[i+1].br_startblock;
1247 while (rablkcnt--) {
1248 xfs_buf_readahead(mp->m_ddev_targp,
1249 XFS_FSB_TO_DADDR(mp, rablkno),
1250 mp->m_quotainfo->qi_dqchunklen,
1251 NULL);
1252 rablkno++;
1253 }
1254 }
1255 /*
1256 * Iterate thru all the blks in the extent and
1257 * reset the counters of all the dquots inside them.
1258 */
1259 error = xfs_qm_dqiter_bufs(mp, firstid,
1260 map[i].br_startblock,
1261 map[i].br_blockcount,
1262 flags, buffer_list);
1263 if (error)
1264 goto out;
1265 }
1266 } while (nmaps > 0);
1267
1268 out:
1269 kmem_free(map);
1270 return error;
1271 }
1272
1273 /*
1274 * Called by dqusage_adjust in doing a quotacheck.
1275 *
1276 * Given the inode, and a dquot id this updates both the incore dqout as well
1277 * as the buffer copy. This is so that once the quotacheck is done, we can
1278 * just log all the buffers, as opposed to logging numerous updates to
1279 * individual dquots.
1280 */
1281 STATIC int
1282 xfs_qm_quotacheck_dqadjust(
1283 struct xfs_inode *ip,
1284 xfs_dqid_t id,
1285 uint type,
1286 xfs_qcnt_t nblks,
1287 xfs_qcnt_t rtblks)
1288 {
1289 struct xfs_mount *mp = ip->i_mount;
1290 struct xfs_dquot *dqp;
1291 int error;
1292
1293 error = xfs_qm_dqget(mp, ip, id, type,
1294 XFS_QMOPT_DQALLOC | XFS_QMOPT_DOWARN, &dqp);
1295 if (error) {
1296 /*
1297 * Shouldn't be able to turn off quotas here.
1298 */
1299 ASSERT(error != ESRCH);
1300 ASSERT(error != ENOENT);
1301 return error;
1302 }
1303
1304 trace_xfs_dqadjust(dqp);
1305
1306 /*
1307 * Adjust the inode count and the block count to reflect this inode's
1308 * resource usage.
1309 */
1310 be64_add_cpu(&dqp->q_core.d_icount, 1);
1311 dqp->q_res_icount++;
1312 if (nblks) {
1313 be64_add_cpu(&dqp->q_core.d_bcount, nblks);
1314 dqp->q_res_bcount += nblks;
1315 }
1316 if (rtblks) {
1317 be64_add_cpu(&dqp->q_core.d_rtbcount, rtblks);
1318 dqp->q_res_rtbcount += rtblks;
1319 }
1320
1321 /*
1322 * Set default limits, adjust timers (since we changed usages)
1323 *
1324 * There are no timers for the default values set in the root dquot.
1325 */
1326 if (dqp->q_core.d_id) {
1327 xfs_qm_adjust_dqlimits(mp, dqp);
1328 xfs_qm_adjust_dqtimers(mp, &dqp->q_core);
1329 }
1330
1331 dqp->dq_flags |= XFS_DQ_DIRTY;
1332 xfs_qm_dqput(dqp);
1333 return 0;
1334 }
1335
1336 STATIC int
1337 xfs_qm_get_rtblks(
1338 xfs_inode_t *ip,
1339 xfs_qcnt_t *O_rtblks)
1340 {
1341 xfs_filblks_t rtblks; /* total rt blks */
1342 xfs_extnum_t idx; /* extent record index */
1343 xfs_ifork_t *ifp; /* inode fork pointer */
1344 xfs_extnum_t nextents; /* number of extent entries */
1345 int error;
1346
1347 ASSERT(XFS_IS_REALTIME_INODE(ip));
1348 ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
1349 if (!(ifp->if_flags & XFS_IFEXTENTS)) {
1350 if ((error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK)))
1351 return error;
1352 }
1353 rtblks = 0;
1354 nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
1355 for (idx = 0; idx < nextents; idx++)
1356 rtblks += xfs_bmbt_get_blockcount(xfs_iext_get_ext(ifp, idx));
1357 *O_rtblks = (xfs_qcnt_t)rtblks;
1358 return 0;
1359 }
1360
1361 /*
1362 * callback routine supplied to bulkstat(). Given an inumber, find its
1363 * dquots and update them to account for resources taken by that inode.
1364 */
1365 /* ARGSUSED */
1366 STATIC int
1367 xfs_qm_dqusage_adjust(
1368 xfs_mount_t *mp, /* mount point for filesystem */
1369 xfs_ino_t ino, /* inode number to get data for */
1370 void __user *buffer, /* not used */
1371 int ubsize, /* not used */
1372 int *ubused, /* not used */
1373 int *res) /* result code value */
1374 {
1375 xfs_inode_t *ip;
1376 xfs_qcnt_t nblks, rtblks = 0;
1377 int error;
1378
1379 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1380
1381 /*
1382 * rootino must have its resources accounted for, not so with the quota
1383 * inodes.
1384 */
1385 if (xfs_is_quota_inode(&mp->m_sb, ino)) {
1386 *res = BULKSTAT_RV_NOTHING;
1387 return XFS_ERROR(EINVAL);
1388 }
1389
1390 /*
1391 * We don't _need_ to take the ilock EXCL. However, the xfs_qm_dqget
1392 * interface expects the inode to be exclusively locked because that's
1393 * the case in all other instances. It's OK that we do this because
1394 * quotacheck is done only at mount time.
1395 */
1396 error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip);
1397 if (error) {
1398 *res = BULKSTAT_RV_NOTHING;
1399 return error;
1400 }
1401
1402 ASSERT(ip->i_delayed_blks == 0);
1403
1404 if (XFS_IS_REALTIME_INODE(ip)) {
1405 /*
1406 * Walk thru the extent list and count the realtime blocks.
1407 */
1408 error = xfs_qm_get_rtblks(ip, &rtblks);
1409 if (error)
1410 goto error0;
1411 }
1412
1413 nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks;
1414
1415 /*
1416 * Add the (disk blocks and inode) resources occupied by this
1417 * inode to its dquots. We do this adjustment in the incore dquot,
1418 * and also copy the changes to its buffer.
1419 * We don't care about putting these changes in a transaction
1420 * envelope because if we crash in the middle of a 'quotacheck'
1421 * we have to start from the beginning anyway.
1422 * Once we're done, we'll log all the dquot bufs.
1423 *
1424 * The *QUOTA_ON checks below may look pretty racy, but quotachecks
1425 * and quotaoffs don't race. (Quotachecks happen at mount time only).
1426 */
1427 if (XFS_IS_UQUOTA_ON(mp)) {
1428 error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_uid,
1429 XFS_DQ_USER, nblks, rtblks);
1430 if (error)
1431 goto error0;
1432 }
1433
1434 if (XFS_IS_GQUOTA_ON(mp)) {
1435 error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_gid,
1436 XFS_DQ_GROUP, nblks, rtblks);
1437 if (error)
1438 goto error0;
1439 }
1440
1441 if (XFS_IS_PQUOTA_ON(mp)) {
1442 error = xfs_qm_quotacheck_dqadjust(ip, xfs_get_projid(ip),
1443 XFS_DQ_PROJ, nblks, rtblks);
1444 if (error)
1445 goto error0;
1446 }
1447
1448 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1449 IRELE(ip);
1450 *res = BULKSTAT_RV_DIDONE;
1451 return 0;
1452
1453 error0:
1454 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1455 IRELE(ip);
1456 *res = BULKSTAT_RV_GIVEUP;
1457 return error;
1458 }
1459
1460 STATIC int
1461 xfs_qm_flush_one(
1462 struct xfs_dquot *dqp,
1463 void *data)
1464 {
1465 struct list_head *buffer_list = data;
1466 struct xfs_buf *bp = NULL;
1467 int error = 0;
1468
1469 xfs_dqlock(dqp);
1470 if (dqp->dq_flags & XFS_DQ_FREEING)
1471 goto out_unlock;
1472 if (!XFS_DQ_IS_DIRTY(dqp))
1473 goto out_unlock;
1474
1475 xfs_dqflock(dqp);
1476 error = xfs_qm_dqflush(dqp, &bp);
1477 if (error)
1478 goto out_unlock;
1479
1480 xfs_buf_delwri_queue(bp, buffer_list);
1481 xfs_buf_relse(bp);
1482 out_unlock:
1483 xfs_dqunlock(dqp);
1484 return error;
1485 }
1486
1487 /*
1488 * Walk thru all the filesystem inodes and construct a consistent view
1489 * of the disk quota world. If the quotacheck fails, disable quotas.
1490 */
1491 int
1492 xfs_qm_quotacheck(
1493 xfs_mount_t *mp)
1494 {
1495 int done, count, error, error2;
1496 xfs_ino_t lastino;
1497 size_t structsz;
1498 uint flags;
1499 LIST_HEAD (buffer_list);
1500 struct xfs_inode *uip = mp->m_quotainfo->qi_uquotaip;
1501 struct xfs_inode *gip = mp->m_quotainfo->qi_gquotaip;
1502 struct xfs_inode *pip = mp->m_quotainfo->qi_pquotaip;
1503
1504 count = INT_MAX;
1505 structsz = 1;
1506 lastino = 0;
1507 flags = 0;
1508
1509 ASSERT(uip || gip || pip);
1510 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1511
1512 xfs_notice(mp, "Quotacheck needed: Please wait.");
1513
1514 /*
1515 * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset
1516 * their counters to zero. We need a clean slate.
1517 * We don't log our changes till later.
1518 */
1519 if (uip) {
1520 error = xfs_qm_dqiterate(mp, uip, XFS_QMOPT_UQUOTA,
1521 &buffer_list);
1522 if (error)
1523 goto error_return;
1524 flags |= XFS_UQUOTA_CHKD;
1525 }
1526
1527 if (gip) {
1528 error = xfs_qm_dqiterate(mp, gip, XFS_QMOPT_GQUOTA,
1529 &buffer_list);
1530 if (error)
1531 goto error_return;
1532 flags |= XFS_GQUOTA_CHKD;
1533 }
1534
1535 if (pip) {
1536 error = xfs_qm_dqiterate(mp, pip, XFS_QMOPT_PQUOTA,
1537 &buffer_list);
1538 if (error)
1539 goto error_return;
1540 flags |= XFS_PQUOTA_CHKD;
1541 }
1542
1543 do {
1544 /*
1545 * Iterate thru all the inodes in the file system,
1546 * adjusting the corresponding dquot counters in core.
1547 */
1548 error = xfs_bulkstat(mp, &lastino, &count,
1549 xfs_qm_dqusage_adjust,
1550 structsz, NULL, &done);
1551 if (error)
1552 break;
1553
1554 } while (!done);
1555
1556 /*
1557 * We've made all the changes that we need to make incore. Flush them
1558 * down to disk buffers if everything was updated successfully.
1559 */
1560 if (XFS_IS_UQUOTA_ON(mp)) {
1561 error = xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_flush_one,
1562 &buffer_list);
1563 }
1564 if (XFS_IS_GQUOTA_ON(mp)) {
1565 error2 = xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_flush_one,
1566 &buffer_list);
1567 if (!error)
1568 error = error2;
1569 }
1570 if (XFS_IS_PQUOTA_ON(mp)) {
1571 error2 = xfs_qm_dquot_walk(mp, XFS_DQ_PROJ, xfs_qm_flush_one,
1572 &buffer_list);
1573 if (!error)
1574 error = error2;
1575 }
1576
1577 error2 = xfs_buf_delwri_submit(&buffer_list);
1578 if (!error)
1579 error = error2;
1580
1581 /*
1582 * We can get this error if we couldn't do a dquot allocation inside
1583 * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
1584 * dirty dquots that might be cached, we just want to get rid of them
1585 * and turn quotaoff. The dquots won't be attached to any of the inodes
1586 * at this point (because we intentionally didn't in dqget_noattach).
1587 */
1588 if (error) {
1589 xfs_qm_dqpurge_all(mp, XFS_QMOPT_QUOTALL);
1590 goto error_return;
1591 }
1592
1593 /*
1594 * If one type of quotas is off, then it will lose its
1595 * quotachecked status, since we won't be doing accounting for
1596 * that type anymore.
1597 */
1598 mp->m_qflags &= ~XFS_ALL_QUOTA_CHKD;
1599 mp->m_qflags |= flags;
1600
1601 error_return:
1602 while (!list_empty(&buffer_list)) {
1603 struct xfs_buf *bp =
1604 list_first_entry(&buffer_list, struct xfs_buf, b_list);
1605 list_del_init(&bp->b_list);
1606 xfs_buf_relse(bp);
1607 }
1608
1609 if (error) {
1610 xfs_warn(mp,
1611 "Quotacheck: Unsuccessful (Error %d): Disabling quotas.",
1612 error);
1613 /*
1614 * We must turn off quotas.
1615 */
1616 ASSERT(mp->m_quotainfo != NULL);
1617 xfs_qm_destroy_quotainfo(mp);
1618 if (xfs_mount_reset_sbqflags(mp)) {
1619 xfs_warn(mp,
1620 "Quotacheck: Failed to reset quota flags.");
1621 }
1622 } else
1623 xfs_notice(mp, "Quotacheck: Done.");
1624 return (error);
1625 }
1626
1627 /*
1628 * This is called after the superblock has been read in and we're ready to
1629 * iget the quota inodes.
1630 */
1631 STATIC int
1632 xfs_qm_init_quotainos(
1633 xfs_mount_t *mp)
1634 {
1635 struct xfs_inode *uip = NULL;
1636 struct xfs_inode *gip = NULL;
1637 struct xfs_inode *pip = NULL;
1638 int error;
1639 __int64_t sbflags = 0;
1640 uint flags = 0;
1641
1642 ASSERT(mp->m_quotainfo);
1643
1644 /*
1645 * Get the uquota and gquota inodes
1646 */
1647 if (xfs_sb_version_hasquota(&mp->m_sb)) {
1648 if (XFS_IS_UQUOTA_ON(mp) &&
1649 mp->m_sb.sb_uquotino != NULLFSINO) {
1650 ASSERT(mp->m_sb.sb_uquotino > 0);
1651 error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
1652 0, 0, &uip);
1653 if (error)
1654 return XFS_ERROR(error);
1655 }
1656 if (XFS_IS_GQUOTA_ON(mp) &&
1657 mp->m_sb.sb_gquotino != NULLFSINO) {
1658 ASSERT(mp->m_sb.sb_gquotino > 0);
1659 error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
1660 0, 0, &gip);
1661 if (error)
1662 goto error_rele;
1663 }
1664 if (XFS_IS_PQUOTA_ON(mp) &&
1665 mp->m_sb.sb_pquotino != NULLFSINO) {
1666 ASSERT(mp->m_sb.sb_pquotino > 0);
1667 error = xfs_iget(mp, NULL, mp->m_sb.sb_pquotino,
1668 0, 0, &pip);
1669 if (error)
1670 goto error_rele;
1671 }
1672 } else {
1673 flags |= XFS_QMOPT_SBVERSION;
1674 sbflags |= (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
1675 XFS_SB_GQUOTINO | XFS_SB_PQUOTINO |
1676 XFS_SB_QFLAGS);
1677 }
1678
1679 /*
1680 * Create the three inodes, if they don't exist already. The changes
1681 * made above will get added to a transaction and logged in one of
1682 * the qino_alloc calls below. If the device is readonly,
1683 * temporarily switch to read-write to do this.
1684 */
1685 if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) {
1686 error = xfs_qm_qino_alloc(mp, &uip,
1687 sbflags | XFS_SB_UQUOTINO,
1688 flags | XFS_QMOPT_UQUOTA);
1689 if (error)
1690 goto error_rele;
1691
1692 flags &= ~XFS_QMOPT_SBVERSION;
1693 }
1694 if (XFS_IS_GQUOTA_ON(mp) && gip == NULL) {
1695 error = xfs_qm_qino_alloc(mp, &gip,
1696 sbflags | XFS_SB_GQUOTINO,
1697 flags | XFS_QMOPT_GQUOTA);
1698 if (error)
1699 goto error_rele;
1700
1701 flags &= ~XFS_QMOPT_SBVERSION;
1702 }
1703 if (XFS_IS_PQUOTA_ON(mp) && pip == NULL) {
1704 error = xfs_qm_qino_alloc(mp, &pip,
1705 sbflags | XFS_SB_PQUOTINO,
1706 flags | XFS_QMOPT_PQUOTA);
1707 if (error)
1708 goto error_rele;
1709 }
1710
1711 mp->m_quotainfo->qi_uquotaip = uip;
1712 mp->m_quotainfo->qi_gquotaip = gip;
1713 mp->m_quotainfo->qi_pquotaip = pip;
1714
1715 return 0;
1716
1717 error_rele:
1718 if (uip)
1719 IRELE(uip);
1720 if (gip)
1721 IRELE(gip);
1722 if (pip)
1723 IRELE(pip);
1724 return XFS_ERROR(error);
1725 }
1726
1727 STATIC void
1728 xfs_qm_dqfree_one(
1729 struct xfs_dquot *dqp)
1730 {
1731 struct xfs_mount *mp = dqp->q_mount;
1732 struct xfs_quotainfo *qi = mp->m_quotainfo;
1733
1734 mutex_lock(&qi->qi_tree_lock);
1735 radix_tree_delete(xfs_dquot_tree(qi, dqp->q_core.d_flags),
1736 be32_to_cpu(dqp->q_core.d_id));
1737
1738 qi->qi_dquots--;
1739 mutex_unlock(&qi->qi_tree_lock);
1740
1741 xfs_qm_dqdestroy(dqp);
1742 }
1743
1744 /*
1745 * Start a transaction and write the incore superblock changes to
1746 * disk. flags parameter indicates which fields have changed.
1747 */
1748 int
1749 xfs_qm_write_sb_changes(
1750 xfs_mount_t *mp,
1751 __int64_t flags)
1752 {
1753 xfs_trans_t *tp;
1754 int error;
1755
1756 tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE);
1757 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_qm_sbchange, 0, 0);
1758 if (error) {
1759 xfs_trans_cancel(tp, 0);
1760 return error;
1761 }
1762
1763 xfs_mod_sb(tp, flags);
1764 error = xfs_trans_commit(tp, 0);
1765
1766 return error;
1767 }
1768
1769
1770 /* --------------- utility functions for vnodeops ---------------- */
1771
1772
1773 /*
1774 * Given an inode, a uid, gid and prid make sure that we have
1775 * allocated relevant dquot(s) on disk, and that we won't exceed inode
1776 * quotas by creating this file.
1777 * This also attaches dquot(s) to the given inode after locking it,
1778 * and returns the dquots corresponding to the uid and/or gid.
1779 *
1780 * in : inode (unlocked)
1781 * out : udquot, gdquot with references taken and unlocked
1782 */
1783 int
1784 xfs_qm_vop_dqalloc(
1785 struct xfs_inode *ip,
1786 xfs_dqid_t uid,
1787 xfs_dqid_t gid,
1788 prid_t prid,
1789 uint flags,
1790 struct xfs_dquot **O_udqpp,
1791 struct xfs_dquot **O_gdqpp,
1792 struct xfs_dquot **O_pdqpp)
1793 {
1794 struct xfs_mount *mp = ip->i_mount;
1795 struct xfs_dquot *uq = NULL;
1796 struct xfs_dquot *gq = NULL;
1797 struct xfs_dquot *pq = NULL;
1798 int error;
1799 uint lockflags;
1800
1801 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
1802 return 0;
1803
1804 lockflags = XFS_ILOCK_EXCL;
1805 xfs_ilock(ip, lockflags);
1806
1807 if ((flags & XFS_QMOPT_INHERIT) && XFS_INHERIT_GID(ip))
1808 gid = ip->i_d.di_gid;
1809
1810 /*
1811 * Attach the dquot(s) to this inode, doing a dquot allocation
1812 * if necessary. The dquot(s) will not be locked.
1813 */
1814 if (XFS_NOT_DQATTACHED(mp, ip)) {
1815 error = xfs_qm_dqattach_locked(ip, XFS_QMOPT_DQALLOC);
1816 if (error) {
1817 xfs_iunlock(ip, lockflags);
1818 return error;
1819 }
1820 }
1821
1822 if ((flags & XFS_QMOPT_UQUOTA) && XFS_IS_UQUOTA_ON(mp)) {
1823 if (ip->i_d.di_uid != uid) {
1824 /*
1825 * What we need is the dquot that has this uid, and
1826 * if we send the inode to dqget, the uid of the inode
1827 * takes priority over what's sent in the uid argument.
1828 * We must unlock inode here before calling dqget if
1829 * we're not sending the inode, because otherwise
1830 * we'll deadlock by doing trans_reserve while
1831 * holding ilock.
1832 */
1833 xfs_iunlock(ip, lockflags);
1834 error = xfs_qm_dqget(mp, NULL, uid,
1835 XFS_DQ_USER,
1836 XFS_QMOPT_DQALLOC |
1837 XFS_QMOPT_DOWARN,
1838 &uq);
1839 if (error) {
1840 ASSERT(error != ENOENT);
1841 return error;
1842 }
1843 /*
1844 * Get the ilock in the right order.
1845 */
1846 xfs_dqunlock(uq);
1847 lockflags = XFS_ILOCK_SHARED;
1848 xfs_ilock(ip, lockflags);
1849 } else {
1850 /*
1851 * Take an extra reference, because we'll return
1852 * this to caller
1853 */
1854 ASSERT(ip->i_udquot);
1855 uq = xfs_qm_dqhold(ip->i_udquot);
1856 }
1857 }
1858 if ((flags & XFS_QMOPT_GQUOTA) && XFS_IS_GQUOTA_ON(mp)) {
1859 if (ip->i_d.di_gid != gid) {
1860 xfs_iunlock(ip, lockflags);
1861 error = xfs_qm_dqget(mp, NULL, gid,
1862 XFS_DQ_GROUP,
1863 XFS_QMOPT_DQALLOC |
1864 XFS_QMOPT_DOWARN,
1865 &gq);
1866 if (error) {
1867 ASSERT(error != ENOENT);
1868 goto error_rele;
1869 }
1870 xfs_dqunlock(gq);
1871 lockflags = XFS_ILOCK_SHARED;
1872 xfs_ilock(ip, lockflags);
1873 } else {
1874 ASSERT(ip->i_gdquot);
1875 gq = xfs_qm_dqhold(ip->i_gdquot);
1876 }
1877 }
1878 if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
1879 if (xfs_get_projid(ip) != prid) {
1880 xfs_iunlock(ip, lockflags);
1881 error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)prid,
1882 XFS_DQ_PROJ,
1883 XFS_QMOPT_DQALLOC |
1884 XFS_QMOPT_DOWARN,
1885 &pq);
1886 if (error) {
1887 ASSERT(error != ENOENT);
1888 goto error_rele;
1889 }
1890 xfs_dqunlock(pq);
1891 lockflags = XFS_ILOCK_SHARED;
1892 xfs_ilock(ip, lockflags);
1893 } else {
1894 ASSERT(ip->i_pdquot);
1895 pq = xfs_qm_dqhold(ip->i_pdquot);
1896 }
1897 }
1898 if (uq)
1899 trace_xfs_dquot_dqalloc(ip);
1900
1901 xfs_iunlock(ip, lockflags);
1902 if (O_udqpp)
1903 *O_udqpp = uq;
1904 else if (uq)
1905 xfs_qm_dqrele(uq);
1906 if (O_gdqpp)
1907 *O_gdqpp = gq;
1908 else if (gq)
1909 xfs_qm_dqrele(gq);
1910 if (O_pdqpp)
1911 *O_pdqpp = pq;
1912 else if (pq)
1913 xfs_qm_dqrele(pq);
1914 return 0;
1915
1916 error_rele:
1917 if (gq)
1918 xfs_qm_dqrele(gq);
1919 if (uq)
1920 xfs_qm_dqrele(uq);
1921 return error;
1922 }
1923
1924 /*
1925 * Actually transfer ownership, and do dquot modifications.
1926 * These were already reserved.
1927 */
1928 xfs_dquot_t *
1929 xfs_qm_vop_chown(
1930 xfs_trans_t *tp,
1931 xfs_inode_t *ip,
1932 xfs_dquot_t **IO_olddq,
1933 xfs_dquot_t *newdq)
1934 {
1935 xfs_dquot_t *prevdq;
1936 uint bfield = XFS_IS_REALTIME_INODE(ip) ?
1937 XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;
1938
1939
1940 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
1941 ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount));
1942
1943 /* old dquot */
1944 prevdq = *IO_olddq;
1945 ASSERT(prevdq);
1946 ASSERT(prevdq != newdq);
1947
1948 xfs_trans_mod_dquot(tp, prevdq, bfield, -(ip->i_d.di_nblocks));
1949 xfs_trans_mod_dquot(tp, prevdq, XFS_TRANS_DQ_ICOUNT, -1);
1950
1951 /* the sparkling new dquot */
1952 xfs_trans_mod_dquot(tp, newdq, bfield, ip->i_d.di_nblocks);
1953 xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1);
1954
1955 /*
1956 * Take an extra reference, because the inode is going to keep
1957 * this dquot pointer even after the trans_commit.
1958 */
1959 *IO_olddq = xfs_qm_dqhold(newdq);
1960
1961 return prevdq;
1962 }
1963
1964 /*
1965 * Quota reservations for setattr(AT_UID|AT_GID|AT_PROJID).
1966 */
1967 int
1968 xfs_qm_vop_chown_reserve(
1969 struct xfs_trans *tp,
1970 struct xfs_inode *ip,
1971 struct xfs_dquot *udqp,
1972 struct xfs_dquot *gdqp,
1973 struct xfs_dquot *pdqp,
1974 uint flags)
1975 {
1976 struct xfs_mount *mp = ip->i_mount;
1977 uint delblks, blkflags, prjflags = 0;
1978 struct xfs_dquot *udq_unres = NULL;
1979 struct xfs_dquot *gdq_unres = NULL;
1980 struct xfs_dquot *pdq_unres = NULL;
1981 struct xfs_dquot *udq_delblks = NULL;
1982 struct xfs_dquot *gdq_delblks = NULL;
1983 struct xfs_dquot *pdq_delblks = NULL;
1984 int error;
1985
1986
1987 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
1988 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
1989
1990 delblks = ip->i_delayed_blks;
1991 blkflags = XFS_IS_REALTIME_INODE(ip) ?
1992 XFS_QMOPT_RES_RTBLKS : XFS_QMOPT_RES_REGBLKS;
1993
1994 if (XFS_IS_UQUOTA_ON(mp) && udqp &&
1995 ip->i_d.di_uid != be32_to_cpu(udqp->q_core.d_id)) {
1996 udq_delblks = udqp;
1997 /*
1998 * If there are delayed allocation blocks, then we have to
1999 * unreserve those from the old dquot, and add them to the
2000 * new dquot.
2001 */
2002 if (delblks) {
2003 ASSERT(ip->i_udquot);
2004 udq_unres = ip->i_udquot;
2005 }
2006 }
2007 if (XFS_IS_GQUOTA_ON(ip->i_mount) && gdqp &&
2008 ip->i_d.di_gid != be32_to_cpu(gdqp->q_core.d_id)) {
2009 gdq_delblks = gdqp;
2010 if (delblks) {
2011 ASSERT(ip->i_gdquot);
2012 gdq_unres = ip->i_gdquot;
2013 }
2014 }
2015
2016 if (XFS_IS_PQUOTA_ON(ip->i_mount) && pdqp &&
2017 xfs_get_projid(ip) != be32_to_cpu(pdqp->q_core.d_id)) {
2018 prjflags = XFS_QMOPT_ENOSPC;
2019 pdq_delblks = pdqp;
2020 if (delblks) {
2021 ASSERT(ip->i_pdquot);
2022 pdq_unres = ip->i_pdquot;
2023 }
2024 }
2025
2026 error = xfs_trans_reserve_quota_bydquots(tp, ip->i_mount,
2027 udq_delblks, gdq_delblks, pdq_delblks,
2028 ip->i_d.di_nblocks, 1,
2029 flags | blkflags | prjflags);
2030 if (error)
2031 return error;
2032
2033 /*
2034 * Do the delayed blks reservations/unreservations now. Since, these
2035 * are done without the help of a transaction, if a reservation fails
2036 * its previous reservations won't be automatically undone by trans
2037 * code. So, we have to do it manually here.
2038 */
2039 if (delblks) {
2040 /*
2041 * Do the reservations first. Unreservation can't fail.
2042 */
2043 ASSERT(udq_delblks || gdq_delblks || pdq_delblks);
2044 ASSERT(udq_unres || gdq_unres || pdq_unres);
2045 error = xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
2046 udq_delblks, gdq_delblks, pdq_delblks,
2047 (xfs_qcnt_t)delblks, 0,
2048 flags | blkflags | prjflags);
2049 if (error)
2050 return error;
2051 xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
2052 udq_unres, gdq_unres, pdq_unres,
2053 -((xfs_qcnt_t)delblks), 0, blkflags);
2054 }
2055
2056 return (0);
2057 }
2058
2059 int
2060 xfs_qm_vop_rename_dqattach(
2061 struct xfs_inode **i_tab)
2062 {
2063 struct xfs_mount *mp = i_tab[0]->i_mount;
2064 int i;
2065
2066 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
2067 return 0;
2068
2069 for (i = 0; (i < 4 && i_tab[i]); i++) {
2070 struct xfs_inode *ip = i_tab[i];
2071 int error;
2072
2073 /*
2074 * Watch out for duplicate entries in the table.
2075 */
2076 if (i == 0 || ip != i_tab[i-1]) {
2077 if (XFS_NOT_DQATTACHED(mp, ip)) {
2078 error = xfs_qm_dqattach(ip, 0);
2079 if (error)
2080 return error;
2081 }
2082 }
2083 }
2084 return 0;
2085 }
2086
2087 void
2088 xfs_qm_vop_create_dqattach(
2089 struct xfs_trans *tp,
2090 struct xfs_inode *ip,
2091 struct xfs_dquot *udqp,
2092 struct xfs_dquot *gdqp,
2093 struct xfs_dquot *pdqp)
2094 {
2095 struct xfs_mount *mp = tp->t_mountp;
2096
2097 if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
2098 return;
2099
2100 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
2101 ASSERT(XFS_IS_QUOTA_RUNNING(mp));
2102
2103 if (udqp) {
2104 ASSERT(ip->i_udquot == NULL);
2105 ASSERT(XFS_IS_UQUOTA_ON(mp));
2106 ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id));
2107
2108 ip->i_udquot = xfs_qm_dqhold(udqp);
2109 xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
2110 }
2111 if (gdqp) {
2112 ASSERT(ip->i_gdquot == NULL);
2113 ASSERT(XFS_IS_GQUOTA_ON(mp));
2114 ASSERT(ip->i_d.di_gid == be32_to_cpu(gdqp->q_core.d_id));
2115 ip->i_gdquot = xfs_qm_dqhold(gdqp);
2116 xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
2117 }
2118 if (pdqp) {
2119 ASSERT(ip->i_pdquot == NULL);
2120 ASSERT(XFS_IS_PQUOTA_ON(mp));
2121 ASSERT(xfs_get_projid(ip) == be32_to_cpu(pdqp->q_core.d_id));
2122
2123 ip->i_pdquot = xfs_qm_dqhold(pdqp);
2124 xfs_trans_mod_dquot(tp, pdqp, XFS_TRANS_DQ_ICOUNT, 1);
2125 }
2126 }
2127
This page took 0.103936 seconds and 5 git commands to generate.