2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 #include "xfs_shared.h"
21 #include "xfs_format.h"
22 #include "xfs_log_format.h"
23 #include "xfs_trans_resv.h"
26 #include "xfs_mount.h"
27 #include "xfs_inode.h"
28 #include "xfs_ialloc.h"
29 #include "xfs_itable.h"
30 #include "xfs_quota.h"
31 #include "xfs_error.h"
33 #include "xfs_bmap_btree.h"
34 #include "xfs_trans.h"
35 #include "xfs_trans_space.h"
37 #include "xfs_trace.h"
38 #include "xfs_icache.h"
39 #include "xfs_cksum.h"
42 * The global quota manager. There is only one of these for the entire
43 * system, _not_ one per file system. XQM keeps track of the overall
44 * quota functionality, including maintaining the freelist and hash
47 STATIC
int xfs_qm_init_quotainos(xfs_mount_t
*);
48 STATIC
int xfs_qm_init_quotainfo(xfs_mount_t
*);
51 STATIC
void xfs_qm_dqfree_one(struct xfs_dquot
*dqp
);
53 * We use the batch lookup interface to iterate over the dquots as it
54 * currently is the only interface into the radix tree code that allows
55 * fuzzy lookups instead of exact matches. Holding the lock over multiple
56 * operations is fine as all callers are used either during mount/umount
59 #define XFS_DQ_LOOKUP_BATCH 32
65 int (*execute
)(struct xfs_dquot
*dqp
, void *data
),
68 struct xfs_quotainfo
*qi
= mp
->m_quotainfo
;
69 struct radix_tree_root
*tree
= xfs_dquot_tree(qi
, type
);
81 struct xfs_dquot
*batch
[XFS_DQ_LOOKUP_BATCH
];
85 mutex_lock(&qi
->qi_tree_lock
);
86 nr_found
= radix_tree_gang_lookup(tree
, (void **)batch
,
87 next_index
, XFS_DQ_LOOKUP_BATCH
);
89 mutex_unlock(&qi
->qi_tree_lock
);
93 for (i
= 0; i
< nr_found
; i
++) {
94 struct xfs_dquot
*dqp
= batch
[i
];
96 next_index
= be32_to_cpu(dqp
->q_core
.d_id
) + 1;
98 error
= execute(batch
[i
], data
);
99 if (error
== -EAGAIN
) {
103 if (error
&& last_error
!= -EFSCORRUPTED
)
107 mutex_unlock(&qi
->qi_tree_lock
);
109 /* bail out if the filesystem is corrupted. */
110 if (last_error
== -EFSCORRUPTED
) {
126 * Purge a dquot from all tracking data structures and free it.
130 struct xfs_dquot
*dqp
,
133 struct xfs_mount
*mp
= dqp
->q_mount
;
134 struct xfs_quotainfo
*qi
= mp
->m_quotainfo
;
137 if ((dqp
->dq_flags
& XFS_DQ_FREEING
) || dqp
->q_nrefs
!= 0) {
142 dqp
->dq_flags
|= XFS_DQ_FREEING
;
147 * If we are turning this type of quotas off, we don't care
148 * about the dirty metadata sitting in this dquot. OTOH, if
149 * we're unmounting, we do care, so we flush it and wait.
151 if (XFS_DQ_IS_DIRTY(dqp
)) {
152 struct xfs_buf
*bp
= NULL
;
156 * We don't care about getting disk errors here. We need
157 * to purge this dquot anyway, so we go ahead regardless.
159 error
= xfs_qm_dqflush(dqp
, &bp
);
161 xfs_warn(mp
, "%s: dquot %p flush failed",
164 error
= xfs_bwrite(bp
);
170 ASSERT(atomic_read(&dqp
->q_pincount
) == 0);
171 ASSERT(XFS_FORCED_SHUTDOWN(mp
) ||
172 !(dqp
->q_logitem
.qli_item
.li_flags
& XFS_LI_IN_AIL
));
177 radix_tree_delete(xfs_dquot_tree(qi
, dqp
->q_core
.d_flags
),
178 be32_to_cpu(dqp
->q_core
.d_id
));
182 * We move dquots to the freelist as soon as their reference count
183 * hits zero, so it really should be on the freelist here.
185 ASSERT(!list_empty(&dqp
->q_lru
));
186 list_lru_del(&qi
->qi_lru
, &dqp
->q_lru
);
187 XFS_STATS_DEC(xs_qm_dquot_unused
);
189 xfs_qm_dqdestroy(dqp
);
194 * Purge the dquot cache.
198 struct xfs_mount
*mp
,
201 if (flags
& XFS_QMOPT_UQUOTA
)
202 xfs_qm_dquot_walk(mp
, XFS_DQ_USER
, xfs_qm_dqpurge
, NULL
);
203 if (flags
& XFS_QMOPT_GQUOTA
)
204 xfs_qm_dquot_walk(mp
, XFS_DQ_GROUP
, xfs_qm_dqpurge
, NULL
);
205 if (flags
& XFS_QMOPT_PQUOTA
)
206 xfs_qm_dquot_walk(mp
, XFS_DQ_PROJ
, xfs_qm_dqpurge
, NULL
);
210 * Just destroy the quotainfo structure.
214 struct xfs_mount
*mp
)
216 if (mp
->m_quotainfo
) {
217 xfs_qm_dqpurge_all(mp
, XFS_QMOPT_QUOTALL
);
218 xfs_qm_destroy_quotainfo(mp
);
223 * Called from the vfsops layer.
226 xfs_qm_unmount_quotas(
230 * Release the dquots that root inode, et al might be holding,
231 * before we flush quotas and blow away the quotainfo structure.
233 ASSERT(mp
->m_rootip
);
234 xfs_qm_dqdetach(mp
->m_rootip
);
236 xfs_qm_dqdetach(mp
->m_rbmip
);
238 xfs_qm_dqdetach(mp
->m_rsumip
);
241 * Release the quota inodes.
243 if (mp
->m_quotainfo
) {
244 if (mp
->m_quotainfo
->qi_uquotaip
) {
245 IRELE(mp
->m_quotainfo
->qi_uquotaip
);
246 mp
->m_quotainfo
->qi_uquotaip
= NULL
;
248 if (mp
->m_quotainfo
->qi_gquotaip
) {
249 IRELE(mp
->m_quotainfo
->qi_gquotaip
);
250 mp
->m_quotainfo
->qi_gquotaip
= NULL
;
252 if (mp
->m_quotainfo
->qi_pquotaip
) {
253 IRELE(mp
->m_quotainfo
->qi_pquotaip
);
254 mp
->m_quotainfo
->qi_pquotaip
= NULL
;
265 xfs_dquot_t
**IO_idqpp
)
270 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
274 * See if we already have it in the inode itself. IO_idqpp is &i_udquot
275 * or &i_gdquot. This made the code look weird, but made the logic a lot
280 trace_xfs_dqattach_found(dqp
);
285 * Find the dquot from somewhere. This bumps the reference count of
286 * dquot and returns it locked. This can return ENOENT if dquot didn't
287 * exist on disk and we didn't ask it to allocate; ESRCH if quotas got
288 * turned off suddenly.
290 error
= xfs_qm_dqget(ip
->i_mount
, ip
, id
, type
,
291 doalloc
| XFS_QMOPT_DOWARN
, &dqp
);
295 trace_xfs_dqattach_get(dqp
);
298 * dqget may have dropped and re-acquired the ilock, but it guarantees
299 * that the dquot returned is the one that should go in the inode.
307 xfs_qm_need_dqattach(
308 struct xfs_inode
*ip
)
310 struct xfs_mount
*mp
= ip
->i_mount
;
312 if (!XFS_IS_QUOTA_RUNNING(mp
))
314 if (!XFS_IS_QUOTA_ON(mp
))
316 if (!XFS_NOT_DQATTACHED(mp
, ip
))
318 if (xfs_is_quota_inode(&mp
->m_sb
, ip
->i_ino
))
324 * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
326 * If XFS_QMOPT_DQALLOC, the dquot(s) will be allocated if needed.
327 * Inode may get unlocked and relocked in here, and the caller must deal with
331 xfs_qm_dqattach_locked(
335 xfs_mount_t
*mp
= ip
->i_mount
;
338 if (!xfs_qm_need_dqattach(ip
))
341 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
343 if (XFS_IS_UQUOTA_ON(mp
) && !ip
->i_udquot
) {
344 error
= xfs_qm_dqattach_one(ip
, ip
->i_d
.di_uid
, XFS_DQ_USER
,
345 flags
& XFS_QMOPT_DQALLOC
,
349 ASSERT(ip
->i_udquot
);
352 if (XFS_IS_GQUOTA_ON(mp
) && !ip
->i_gdquot
) {
353 error
= xfs_qm_dqattach_one(ip
, ip
->i_d
.di_gid
, XFS_DQ_GROUP
,
354 flags
& XFS_QMOPT_DQALLOC
,
358 ASSERT(ip
->i_gdquot
);
361 if (XFS_IS_PQUOTA_ON(mp
) && !ip
->i_pdquot
) {
362 error
= xfs_qm_dqattach_one(ip
, xfs_get_projid(ip
), XFS_DQ_PROJ
,
363 flags
& XFS_QMOPT_DQALLOC
,
367 ASSERT(ip
->i_pdquot
);
372 * Don't worry about the dquots that we may have attached before any
373 * error - they'll get detached later if it has not already been done.
375 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
381 struct xfs_inode
*ip
,
386 if (!xfs_qm_need_dqattach(ip
))
389 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
390 error
= xfs_qm_dqattach_locked(ip
, flags
);
391 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
397 * Release dquots (and their references) if any.
398 * The inode should be locked EXCL except when this's called by
405 if (!(ip
->i_udquot
|| ip
->i_gdquot
|| ip
->i_pdquot
))
408 trace_xfs_dquot_dqdetach(ip
);
410 ASSERT(!xfs_is_quota_inode(&ip
->i_mount
->m_sb
, ip
->i_ino
));
412 xfs_qm_dqrele(ip
->i_udquot
);
416 xfs_qm_dqrele(ip
->i_gdquot
);
420 xfs_qm_dqrele(ip
->i_pdquot
);
425 struct xfs_qm_isolate
{
426 struct list_head buffers
;
427 struct list_head dispose
;
430 static enum lru_status
431 xfs_qm_dquot_isolate(
432 struct list_head
*item
,
433 spinlock_t
*lru_lock
,
435 __releases(lru_lock
) __acquires(lru_lock
)
437 struct xfs_dquot
*dqp
= container_of(item
,
438 struct xfs_dquot
, q_lru
);
439 struct xfs_qm_isolate
*isol
= arg
;
441 if (!xfs_dqlock_nowait(dqp
))
445 * This dquot has acquired a reference in the meantime remove it from
446 * the freelist and try again.
450 XFS_STATS_INC(xs_qm_dqwants
);
452 trace_xfs_dqreclaim_want(dqp
);
453 list_del_init(&dqp
->q_lru
);
454 XFS_STATS_DEC(xs_qm_dquot_unused
);
459 * If the dquot is dirty, flush it. If it's already being flushed, just
460 * skip it so there is time for the IO to complete before we try to
461 * reclaim it again on the next LRU pass.
463 if (!xfs_dqflock_nowait(dqp
)) {
468 if (XFS_DQ_IS_DIRTY(dqp
)) {
469 struct xfs_buf
*bp
= NULL
;
472 trace_xfs_dqreclaim_dirty(dqp
);
474 /* we have to drop the LRU lock to flush the dquot */
475 spin_unlock(lru_lock
);
477 error
= xfs_qm_dqflush(dqp
, &bp
);
479 xfs_warn(dqp
->q_mount
, "%s: dquot %p flush failed",
481 goto out_unlock_dirty
;
484 xfs_buf_delwri_queue(bp
, &isol
->buffers
);
486 goto out_unlock_dirty
;
491 * Prevent lookups now that we are past the point of no return.
493 dqp
->dq_flags
|= XFS_DQ_FREEING
;
496 ASSERT(dqp
->q_nrefs
== 0);
497 list_move_tail(&dqp
->q_lru
, &isol
->dispose
);
498 XFS_STATS_DEC(xs_qm_dquot_unused
);
499 trace_xfs_dqreclaim_done(dqp
);
500 XFS_STATS_INC(xs_qm_dqreclaims
);
504 trace_xfs_dqreclaim_busy(dqp
);
505 XFS_STATS_INC(xs_qm_dqreclaim_misses
);
509 trace_xfs_dqreclaim_busy(dqp
);
510 XFS_STATS_INC(xs_qm_dqreclaim_misses
);
518 struct shrinker
*shrink
,
519 struct shrink_control
*sc
)
521 struct xfs_quotainfo
*qi
= container_of(shrink
,
522 struct xfs_quotainfo
, qi_shrinker
);
523 struct xfs_qm_isolate isol
;
526 unsigned long nr_to_scan
= sc
->nr_to_scan
;
528 if ((sc
->gfp_mask
& (__GFP_FS
|__GFP_WAIT
)) != (__GFP_FS
|__GFP_WAIT
))
531 INIT_LIST_HEAD(&isol
.buffers
);
532 INIT_LIST_HEAD(&isol
.dispose
);
534 freed
= list_lru_walk_node(&qi
->qi_lru
, sc
->nid
, xfs_qm_dquot_isolate
, &isol
,
537 error
= xfs_buf_delwri_submit(&isol
.buffers
);
539 xfs_warn(NULL
, "%s: dquot reclaim failed", __func__
);
541 while (!list_empty(&isol
.dispose
)) {
542 struct xfs_dquot
*dqp
;
544 dqp
= list_first_entry(&isol
.dispose
, struct xfs_dquot
, q_lru
);
545 list_del_init(&dqp
->q_lru
);
546 xfs_qm_dqfree_one(dqp
);
554 struct shrinker
*shrink
,
555 struct shrink_control
*sc
)
557 struct xfs_quotainfo
*qi
= container_of(shrink
,
558 struct xfs_quotainfo
, qi_shrinker
);
560 return list_lru_count_node(&qi
->qi_lru
, sc
->nid
);
564 * This initializes all the quota information that's kept in the
568 xfs_qm_init_quotainfo(
571 xfs_quotainfo_t
*qinf
;
575 ASSERT(XFS_IS_QUOTA_RUNNING(mp
));
577 qinf
= mp
->m_quotainfo
= kmem_zalloc(sizeof(xfs_quotainfo_t
), KM_SLEEP
);
579 error
= list_lru_init(&qinf
->qi_lru
);
584 * See if quotainodes are setup, and if not, allocate them,
585 * and change the superblock accordingly.
587 error
= xfs_qm_init_quotainos(mp
);
591 INIT_RADIX_TREE(&qinf
->qi_uquota_tree
, GFP_NOFS
);
592 INIT_RADIX_TREE(&qinf
->qi_gquota_tree
, GFP_NOFS
);
593 INIT_RADIX_TREE(&qinf
->qi_pquota_tree
, GFP_NOFS
);
594 mutex_init(&qinf
->qi_tree_lock
);
596 /* mutex used to serialize quotaoffs */
597 mutex_init(&qinf
->qi_quotaofflock
);
599 /* Precalc some constants */
600 qinf
->qi_dqchunklen
= XFS_FSB_TO_BB(mp
, XFS_DQUOT_CLUSTER_SIZE_FSB
);
601 qinf
->qi_dqperchunk
= xfs_calc_dquots_per_chunk(qinf
->qi_dqchunklen
);
603 mp
->m_qflags
|= (mp
->m_sb
.sb_qflags
& XFS_ALL_QUOTA_CHKD
);
606 * We try to get the limits from the superuser's limits fields.
607 * This is quite hacky, but it is standard quota practice.
609 * We look at the USR dquot with id == 0 first, but if user quotas
610 * are not enabled we goto the GRP dquot with id == 0.
611 * We don't really care to keep separate default limits for user
612 * and group quotas, at least not at this point.
614 * Since we may not have done a quotacheck by this point, just read
615 * the dquot without attaching it to any hashtables or lists.
617 error
= xfs_qm_dqread(mp
, 0,
618 XFS_IS_UQUOTA_RUNNING(mp
) ? XFS_DQ_USER
:
619 (XFS_IS_GQUOTA_RUNNING(mp
) ? XFS_DQ_GROUP
:
621 XFS_QMOPT_DOWARN
, &dqp
);
623 xfs_disk_dquot_t
*ddqp
= &dqp
->q_core
;
626 * The warnings and timers set the grace period given to
627 * a user or group before he or she can not perform any
628 * more writing. If it is zero, a default is used.
630 qinf
->qi_btimelimit
= ddqp
->d_btimer
?
631 be32_to_cpu(ddqp
->d_btimer
) : XFS_QM_BTIMELIMIT
;
632 qinf
->qi_itimelimit
= ddqp
->d_itimer
?
633 be32_to_cpu(ddqp
->d_itimer
) : XFS_QM_ITIMELIMIT
;
634 qinf
->qi_rtbtimelimit
= ddqp
->d_rtbtimer
?
635 be32_to_cpu(ddqp
->d_rtbtimer
) : XFS_QM_RTBTIMELIMIT
;
636 qinf
->qi_bwarnlimit
= ddqp
->d_bwarns
?
637 be16_to_cpu(ddqp
->d_bwarns
) : XFS_QM_BWARNLIMIT
;
638 qinf
->qi_iwarnlimit
= ddqp
->d_iwarns
?
639 be16_to_cpu(ddqp
->d_iwarns
) : XFS_QM_IWARNLIMIT
;
640 qinf
->qi_rtbwarnlimit
= ddqp
->d_rtbwarns
?
641 be16_to_cpu(ddqp
->d_rtbwarns
) : XFS_QM_RTBWARNLIMIT
;
642 qinf
->qi_bhardlimit
= be64_to_cpu(ddqp
->d_blk_hardlimit
);
643 qinf
->qi_bsoftlimit
= be64_to_cpu(ddqp
->d_blk_softlimit
);
644 qinf
->qi_ihardlimit
= be64_to_cpu(ddqp
->d_ino_hardlimit
);
645 qinf
->qi_isoftlimit
= be64_to_cpu(ddqp
->d_ino_softlimit
);
646 qinf
->qi_rtbhardlimit
= be64_to_cpu(ddqp
->d_rtb_hardlimit
);
647 qinf
->qi_rtbsoftlimit
= be64_to_cpu(ddqp
->d_rtb_softlimit
);
649 xfs_qm_dqdestroy(dqp
);
651 qinf
->qi_btimelimit
= XFS_QM_BTIMELIMIT
;
652 qinf
->qi_itimelimit
= XFS_QM_ITIMELIMIT
;
653 qinf
->qi_rtbtimelimit
= XFS_QM_RTBTIMELIMIT
;
654 qinf
->qi_bwarnlimit
= XFS_QM_BWARNLIMIT
;
655 qinf
->qi_iwarnlimit
= XFS_QM_IWARNLIMIT
;
656 qinf
->qi_rtbwarnlimit
= XFS_QM_RTBWARNLIMIT
;
659 qinf
->qi_shrinker
.count_objects
= xfs_qm_shrink_count
;
660 qinf
->qi_shrinker
.scan_objects
= xfs_qm_shrink_scan
;
661 qinf
->qi_shrinker
.seeks
= DEFAULT_SEEKS
;
662 qinf
->qi_shrinker
.flags
= SHRINKER_NUMA_AWARE
;
663 register_shrinker(&qinf
->qi_shrinker
);
667 list_lru_destroy(&qinf
->qi_lru
);
670 mp
->m_quotainfo
= NULL
;
676 * Gets called when unmounting a filesystem or when all quotas get
678 * This purges the quota inodes, destroys locks and frees itself.
681 xfs_qm_destroy_quotainfo(
686 qi
= mp
->m_quotainfo
;
689 unregister_shrinker(&qi
->qi_shrinker
);
690 list_lru_destroy(&qi
->qi_lru
);
692 if (qi
->qi_uquotaip
) {
693 IRELE(qi
->qi_uquotaip
);
694 qi
->qi_uquotaip
= NULL
; /* paranoia */
696 if (qi
->qi_gquotaip
) {
697 IRELE(qi
->qi_gquotaip
);
698 qi
->qi_gquotaip
= NULL
;
700 if (qi
->qi_pquotaip
) {
701 IRELE(qi
->qi_pquotaip
);
702 qi
->qi_pquotaip
= NULL
;
704 mutex_destroy(&qi
->qi_quotaofflock
);
706 mp
->m_quotainfo
= NULL
;
710 * Create an inode and return with a reference already taken, but unlocked
711 * This is how we create quota inodes
725 * With superblock that doesn't have separate pquotino, we
726 * share an inode between gquota and pquota. If the on-disk
727 * superblock has GQUOTA and the filesystem is now mounted
728 * with PQUOTA, just use sb_gquotino for sb_pquotino and
731 if (!xfs_sb_version_has_pquotino(&mp
->m_sb
) &&
732 (flags
& (XFS_QMOPT_PQUOTA
|XFS_QMOPT_GQUOTA
))) {
733 xfs_ino_t ino
= NULLFSINO
;
735 if ((flags
& XFS_QMOPT_PQUOTA
) &&
736 (mp
->m_sb
.sb_gquotino
!= NULLFSINO
)) {
737 ino
= mp
->m_sb
.sb_gquotino
;
738 ASSERT(mp
->m_sb
.sb_pquotino
== NULLFSINO
);
739 } else if ((flags
& XFS_QMOPT_GQUOTA
) &&
740 (mp
->m_sb
.sb_pquotino
!= NULLFSINO
)) {
741 ino
= mp
->m_sb
.sb_pquotino
;
742 ASSERT(mp
->m_sb
.sb_gquotino
== NULLFSINO
);
744 if (ino
!= NULLFSINO
) {
745 error
= xfs_iget(mp
, NULL
, ino
, 0, 0, ip
);
748 mp
->m_sb
.sb_gquotino
= NULLFSINO
;
749 mp
->m_sb
.sb_pquotino
= NULLFSINO
;
753 tp
= xfs_trans_alloc(mp
, XFS_TRANS_QM_QINOCREATE
);
754 error
= xfs_trans_reserve(tp
, &M_RES(mp
)->tr_create
,
755 XFS_QM_QINOCREATE_SPACE_RES(mp
), 0);
757 xfs_trans_cancel(tp
, 0);
762 error
= xfs_dir_ialloc(&tp
, NULL
, S_IFREG
, 1, 0, 0, 1, ip
,
765 xfs_trans_cancel(tp
, XFS_TRANS_RELEASE_LOG_RES
|
772 * Make the changes in the superblock, and log those too.
773 * sbfields arg may contain fields other than *QUOTINO;
774 * VERSIONNUM for example.
776 spin_lock(&mp
->m_sb_lock
);
777 if (flags
& XFS_QMOPT_SBVERSION
) {
778 ASSERT(!xfs_sb_version_hasquota(&mp
->m_sb
));
780 xfs_sb_version_addquota(&mp
->m_sb
);
781 mp
->m_sb
.sb_uquotino
= NULLFSINO
;
782 mp
->m_sb
.sb_gquotino
= NULLFSINO
;
783 mp
->m_sb
.sb_pquotino
= NULLFSINO
;
785 /* qflags will get updated fully _after_ quotacheck */
786 mp
->m_sb
.sb_qflags
= mp
->m_qflags
& XFS_ALL_QUOTA_ACCT
;
788 if (flags
& XFS_QMOPT_UQUOTA
)
789 mp
->m_sb
.sb_uquotino
= (*ip
)->i_ino
;
790 else if (flags
& XFS_QMOPT_GQUOTA
)
791 mp
->m_sb
.sb_gquotino
= (*ip
)->i_ino
;
793 mp
->m_sb
.sb_pquotino
= (*ip
)->i_ino
;
794 spin_unlock(&mp
->m_sb_lock
);
797 if ((error
= xfs_trans_commit(tp
, XFS_TRANS_RELEASE_LOG_RES
))) {
798 xfs_alert(mp
, "%s failed (error %d)!", __func__
, error
);
806 xfs_qm_reset_dqcounts(
812 struct xfs_dqblk
*dqb
;
815 trace_xfs_reset_dqcounts(bp
, _RET_IP_
);
818 * Reset all counters and timers. They'll be
819 * started afresh by xfs_qm_quotacheck.
822 j
= XFS_FSB_TO_B(mp
, XFS_DQUOT_CLUSTER_SIZE_FSB
);
823 do_div(j
, sizeof(xfs_dqblk_t
));
824 ASSERT(mp
->m_quotainfo
->qi_dqperchunk
== j
);
827 for (j
= 0; j
< mp
->m_quotainfo
->qi_dqperchunk
; j
++) {
828 struct xfs_disk_dquot
*ddq
;
830 ddq
= (struct xfs_disk_dquot
*)&dqb
[j
];
833 * Do a sanity check, and if needed, repair the dqblk. Don't
834 * output any warnings because it's perfectly possible to
835 * find uninitialised dquot blks. See comment in xfs_dqcheck.
837 xfs_dqcheck(mp
, ddq
, id
+j
, type
, XFS_QMOPT_DQREPAIR
,
849 if (xfs_sb_version_hascrc(&mp
->m_sb
)) {
850 xfs_update_cksum((char *)&dqb
[j
],
851 sizeof(struct xfs_dqblk
),
859 struct xfs_mount
*mp
,
862 xfs_filblks_t blkcnt
,
864 struct list_head
*buffer_list
)
871 type
= flags
& XFS_QMOPT_UQUOTA
? XFS_DQ_USER
:
872 (flags
& XFS_QMOPT_PQUOTA
? XFS_DQ_PROJ
: XFS_DQ_GROUP
);
876 * Blkcnt arg can be a very big number, and might even be
877 * larger than the log itself. So, we have to break it up into
878 * manageable-sized transactions.
879 * Note that we don't start a permanent transaction here; we might
880 * not be able to get a log reservation for the whole thing up front,
881 * and we don't really care to either, because we just discard
882 * everything if we were to crash in the middle of this loop.
885 error
= xfs_trans_read_buf(mp
, NULL
, mp
->m_ddev_targp
,
886 XFS_FSB_TO_DADDR(mp
, bno
),
887 mp
->m_quotainfo
->qi_dqchunklen
, 0, &bp
,
891 * CRC and validation errors will return a EFSCORRUPTED here. If
892 * this occurs, re-read without CRC validation so that we can
893 * repair the damage via xfs_qm_reset_dqcounts(). This process
894 * will leave a trace in the log indicating corruption has
897 if (error
== -EFSCORRUPTED
) {
898 error
= xfs_trans_read_buf(mp
, NULL
, mp
->m_ddev_targp
,
899 XFS_FSB_TO_DADDR(mp
, bno
),
900 mp
->m_quotainfo
->qi_dqchunklen
, 0, &bp
,
908 * A corrupt buffer might not have a verifier attached, so
909 * make sure we have the correct one attached before writeback
912 bp
->b_ops
= &xfs_dquot_buf_ops
;
913 xfs_qm_reset_dqcounts(mp
, bp
, firstid
, type
);
914 xfs_buf_delwri_queue(bp
, buffer_list
);
917 /* goto the next block. */
919 firstid
+= mp
->m_quotainfo
->qi_dqperchunk
;
926 * Iterate over all allocated USR/GRP/PRJ dquots in the system, calling a
927 * caller supplied function for every chunk of dquots that we find.
931 struct xfs_mount
*mp
,
932 struct xfs_inode
*qip
,
934 struct list_head
*buffer_list
)
936 struct xfs_bmbt_irec
*map
;
937 int i
, nmaps
; /* number of map entries */
938 int error
; /* return value */
939 xfs_fileoff_t lblkno
;
940 xfs_filblks_t maxlblkcnt
;
942 xfs_fsblock_t rablkno
;
943 xfs_filblks_t rablkcnt
;
947 * This looks racy, but we can't keep an inode lock across a
948 * trans_reserve. But, this gets called during quotacheck, and that
949 * happens only at mount time which is single threaded.
951 if (qip
->i_d
.di_nblocks
== 0)
954 map
= kmem_alloc(XFS_DQITER_MAP_SIZE
* sizeof(*map
), KM_SLEEP
);
957 maxlblkcnt
= XFS_B_TO_FSB(mp
, mp
->m_super
->s_maxbytes
);
961 nmaps
= XFS_DQITER_MAP_SIZE
;
963 * We aren't changing the inode itself. Just changing
964 * some of its data. No new blocks are added here, and
965 * the inode is never added to the transaction.
967 lock_mode
= xfs_ilock_data_map_shared(qip
);
968 error
= xfs_bmapi_read(qip
, lblkno
, maxlblkcnt
- lblkno
,
970 xfs_iunlock(qip
, lock_mode
);
974 ASSERT(nmaps
<= XFS_DQITER_MAP_SIZE
);
975 for (i
= 0; i
< nmaps
; i
++) {
976 ASSERT(map
[i
].br_startblock
!= DELAYSTARTBLOCK
);
977 ASSERT(map
[i
].br_blockcount
);
980 lblkno
+= map
[i
].br_blockcount
;
982 if (map
[i
].br_startblock
== HOLESTARTBLOCK
)
985 firstid
= (xfs_dqid_t
) map
[i
].br_startoff
*
986 mp
->m_quotainfo
->qi_dqperchunk
;
988 * Do a read-ahead on the next extent.
991 (map
[i
+1].br_startblock
!= HOLESTARTBLOCK
)) {
992 rablkcnt
= map
[i
+1].br_blockcount
;
993 rablkno
= map
[i
+1].br_startblock
;
995 xfs_buf_readahead(mp
->m_ddev_targp
,
996 XFS_FSB_TO_DADDR(mp
, rablkno
),
997 mp
->m_quotainfo
->qi_dqchunklen
,
1003 * Iterate thru all the blks in the extent and
1004 * reset the counters of all the dquots inside them.
1006 error
= xfs_qm_dqiter_bufs(mp
, firstid
,
1007 map
[i
].br_startblock
,
1008 map
[i
].br_blockcount
,
1009 flags
, buffer_list
);
1013 } while (nmaps
> 0);
1021 * Called by dqusage_adjust in doing a quotacheck.
1023 * Given the inode, and a dquot id this updates both the incore dqout as well
1024 * as the buffer copy. This is so that once the quotacheck is done, we can
1025 * just log all the buffers, as opposed to logging numerous updates to
1026 * individual dquots.
1029 xfs_qm_quotacheck_dqadjust(
1030 struct xfs_inode
*ip
,
1036 struct xfs_mount
*mp
= ip
->i_mount
;
1037 struct xfs_dquot
*dqp
;
1040 error
= xfs_qm_dqget(mp
, ip
, id
, type
,
1041 XFS_QMOPT_DQALLOC
| XFS_QMOPT_DOWARN
, &dqp
);
1044 * Shouldn't be able to turn off quotas here.
1046 ASSERT(error
!= -ESRCH
);
1047 ASSERT(error
!= -ENOENT
);
1051 trace_xfs_dqadjust(dqp
);
1054 * Adjust the inode count and the block count to reflect this inode's
1057 be64_add_cpu(&dqp
->q_core
.d_icount
, 1);
1058 dqp
->q_res_icount
++;
1060 be64_add_cpu(&dqp
->q_core
.d_bcount
, nblks
);
1061 dqp
->q_res_bcount
+= nblks
;
1064 be64_add_cpu(&dqp
->q_core
.d_rtbcount
, rtblks
);
1065 dqp
->q_res_rtbcount
+= rtblks
;
1069 * Set default limits, adjust timers (since we changed usages)
1071 * There are no timers for the default values set in the root dquot.
1073 if (dqp
->q_core
.d_id
) {
1074 xfs_qm_adjust_dqlimits(mp
, dqp
);
1075 xfs_qm_adjust_dqtimers(mp
, &dqp
->q_core
);
1078 dqp
->dq_flags
|= XFS_DQ_DIRTY
;
1086 xfs_qcnt_t
*O_rtblks
)
1088 xfs_filblks_t rtblks
; /* total rt blks */
1089 xfs_extnum_t idx
; /* extent record index */
1090 xfs_ifork_t
*ifp
; /* inode fork pointer */
1091 xfs_extnum_t nextents
; /* number of extent entries */
1094 ASSERT(XFS_IS_REALTIME_INODE(ip
));
1095 ifp
= XFS_IFORK_PTR(ip
, XFS_DATA_FORK
);
1096 if (!(ifp
->if_flags
& XFS_IFEXTENTS
)) {
1097 if ((error
= xfs_iread_extents(NULL
, ip
, XFS_DATA_FORK
)))
1101 nextents
= ifp
->if_bytes
/ (uint
)sizeof(xfs_bmbt_rec_t
);
1102 for (idx
= 0; idx
< nextents
; idx
++)
1103 rtblks
+= xfs_bmbt_get_blockcount(xfs_iext_get_ext(ifp
, idx
));
1104 *O_rtblks
= (xfs_qcnt_t
)rtblks
;
1109 * callback routine supplied to bulkstat(). Given an inumber, find its
1110 * dquots and update them to account for resources taken by that inode.
1114 xfs_qm_dqusage_adjust(
1115 xfs_mount_t
*mp
, /* mount point for filesystem */
1116 xfs_ino_t ino
, /* inode number to get data for */
1117 void __user
*buffer
, /* not used */
1118 int ubsize
, /* not used */
1119 int *ubused
, /* not used */
1120 int *res
) /* result code value */
1123 xfs_qcnt_t nblks
, rtblks
= 0;
1126 ASSERT(XFS_IS_QUOTA_RUNNING(mp
));
1129 * rootino must have its resources accounted for, not so with the quota
1132 if (xfs_is_quota_inode(&mp
->m_sb
, ino
)) {
1133 *res
= BULKSTAT_RV_NOTHING
;
1138 * We don't _need_ to take the ilock EXCL. However, the xfs_qm_dqget
1139 * interface expects the inode to be exclusively locked because that's
1140 * the case in all other instances. It's OK that we do this because
1141 * quotacheck is done only at mount time.
1143 error
= xfs_iget(mp
, NULL
, ino
, 0, XFS_ILOCK_EXCL
, &ip
);
1145 *res
= BULKSTAT_RV_NOTHING
;
1149 ASSERT(ip
->i_delayed_blks
== 0);
1151 if (XFS_IS_REALTIME_INODE(ip
)) {
1153 * Walk thru the extent list and count the realtime blocks.
1155 error
= xfs_qm_get_rtblks(ip
, &rtblks
);
1160 nblks
= (xfs_qcnt_t
)ip
->i_d
.di_nblocks
- rtblks
;
1163 * Add the (disk blocks and inode) resources occupied by this
1164 * inode to its dquots. We do this adjustment in the incore dquot,
1165 * and also copy the changes to its buffer.
1166 * We don't care about putting these changes in a transaction
1167 * envelope because if we crash in the middle of a 'quotacheck'
1168 * we have to start from the beginning anyway.
1169 * Once we're done, we'll log all the dquot bufs.
1171 * The *QUOTA_ON checks below may look pretty racy, but quotachecks
1172 * and quotaoffs don't race. (Quotachecks happen at mount time only).
1174 if (XFS_IS_UQUOTA_ON(mp
)) {
1175 error
= xfs_qm_quotacheck_dqadjust(ip
, ip
->i_d
.di_uid
,
1176 XFS_DQ_USER
, nblks
, rtblks
);
1181 if (XFS_IS_GQUOTA_ON(mp
)) {
1182 error
= xfs_qm_quotacheck_dqadjust(ip
, ip
->i_d
.di_gid
,
1183 XFS_DQ_GROUP
, nblks
, rtblks
);
1188 if (XFS_IS_PQUOTA_ON(mp
)) {
1189 error
= xfs_qm_quotacheck_dqadjust(ip
, xfs_get_projid(ip
),
1190 XFS_DQ_PROJ
, nblks
, rtblks
);
1195 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
1197 *res
= BULKSTAT_RV_DIDONE
;
1201 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
1203 *res
= BULKSTAT_RV_GIVEUP
;
1209 struct xfs_dquot
*dqp
,
1212 struct list_head
*buffer_list
= data
;
1213 struct xfs_buf
*bp
= NULL
;
1217 if (dqp
->dq_flags
& XFS_DQ_FREEING
)
1219 if (!XFS_DQ_IS_DIRTY(dqp
))
1223 error
= xfs_qm_dqflush(dqp
, &bp
);
1227 xfs_buf_delwri_queue(bp
, buffer_list
);
1235 * Walk thru all the filesystem inodes and construct a consistent view
1236 * of the disk quota world. If the quotacheck fails, disable quotas.
1242 int done
, count
, error
, error2
;
1246 LIST_HEAD (buffer_list
);
1247 struct xfs_inode
*uip
= mp
->m_quotainfo
->qi_uquotaip
;
1248 struct xfs_inode
*gip
= mp
->m_quotainfo
->qi_gquotaip
;
1249 struct xfs_inode
*pip
= mp
->m_quotainfo
->qi_pquotaip
;
1256 ASSERT(uip
|| gip
|| pip
);
1257 ASSERT(XFS_IS_QUOTA_RUNNING(mp
));
1259 xfs_notice(mp
, "Quotacheck needed: Please wait.");
1262 * First we go thru all the dquots on disk, USR and GRP/PRJ, and reset
1263 * their counters to zero. We need a clean slate.
1264 * We don't log our changes till later.
1267 error
= xfs_qm_dqiterate(mp
, uip
, XFS_QMOPT_UQUOTA
,
1271 flags
|= XFS_UQUOTA_CHKD
;
1275 error
= xfs_qm_dqiterate(mp
, gip
, XFS_QMOPT_GQUOTA
,
1279 flags
|= XFS_GQUOTA_CHKD
;
1283 error
= xfs_qm_dqiterate(mp
, pip
, XFS_QMOPT_PQUOTA
,
1287 flags
|= XFS_PQUOTA_CHKD
;
1292 * Iterate thru all the inodes in the file system,
1293 * adjusting the corresponding dquot counters in core.
1295 error
= xfs_bulkstat(mp
, &lastino
, &count
,
1296 xfs_qm_dqusage_adjust
,
1297 structsz
, NULL
, &done
);
1304 * We've made all the changes that we need to make incore. Flush them
1305 * down to disk buffers if everything was updated successfully.
1307 if (XFS_IS_UQUOTA_ON(mp
)) {
1308 error
= xfs_qm_dquot_walk(mp
, XFS_DQ_USER
, xfs_qm_flush_one
,
1311 if (XFS_IS_GQUOTA_ON(mp
)) {
1312 error2
= xfs_qm_dquot_walk(mp
, XFS_DQ_GROUP
, xfs_qm_flush_one
,
1317 if (XFS_IS_PQUOTA_ON(mp
)) {
1318 error2
= xfs_qm_dquot_walk(mp
, XFS_DQ_PROJ
, xfs_qm_flush_one
,
1324 error2
= xfs_buf_delwri_submit(&buffer_list
);
1329 * We can get this error if we couldn't do a dquot allocation inside
1330 * xfs_qm_dqusage_adjust (via bulkstat). We don't care about the
1331 * dirty dquots that might be cached, we just want to get rid of them
1332 * and turn quotaoff. The dquots won't be attached to any of the inodes
1333 * at this point (because we intentionally didn't in dqget_noattach).
1336 xfs_qm_dqpurge_all(mp
, XFS_QMOPT_QUOTALL
);
1341 * If one type of quotas is off, then it will lose its
1342 * quotachecked status, since we won't be doing accounting for
1343 * that type anymore.
1345 mp
->m_qflags
&= ~XFS_ALL_QUOTA_CHKD
;
1346 mp
->m_qflags
|= flags
;
1349 while (!list_empty(&buffer_list
)) {
1350 struct xfs_buf
*bp
=
1351 list_first_entry(&buffer_list
, struct xfs_buf
, b_list
);
1352 list_del_init(&bp
->b_list
);
1358 "Quotacheck: Unsuccessful (Error %d): Disabling quotas.",
1361 * We must turn off quotas.
1363 ASSERT(mp
->m_quotainfo
!= NULL
);
1364 xfs_qm_destroy_quotainfo(mp
);
1365 if (xfs_mount_reset_sbqflags(mp
)) {
1367 "Quotacheck: Failed to reset quota flags.");
1370 xfs_notice(mp
, "Quotacheck: Done.");
1375 * This is called from xfs_mountfs to start quotas and initialize all
1376 * necessary data structures like quotainfo. This is also responsible for
1377 * running a quotacheck as necessary. We are guaranteed that the superblock
1378 * is consistently read in at this point.
1380 * If we fail here, the mount will continue with quota turned off. We don't
1381 * need to inidicate success or failure at all.
1384 xfs_qm_mount_quotas(
1385 struct xfs_mount
*mp
)
1391 * If quotas on realtime volumes is not supported, we disable
1392 * quotas immediately.
1394 if (mp
->m_sb
.sb_rextents
) {
1395 xfs_notice(mp
, "Cannot turn on quotas for realtime filesystem");
1400 ASSERT(XFS_IS_QUOTA_RUNNING(mp
));
1403 * Allocate the quotainfo structure inside the mount struct, and
1404 * create quotainode(s), and change/rev superblock if necessary.
1406 error
= xfs_qm_init_quotainfo(mp
);
1409 * We must turn off quotas.
1411 ASSERT(mp
->m_quotainfo
== NULL
);
1416 * If any of the quotas are not consistent, do a quotacheck.
1418 if (XFS_QM_NEED_QUOTACHECK(mp
)) {
1419 error
= xfs_qm_quotacheck(mp
);
1421 /* Quotacheck failed and disabled quotas. */
1426 * If one type of quotas is off, then it will lose its
1427 * quotachecked status, since we won't be doing accounting for
1428 * that type anymore.
1430 if (!XFS_IS_UQUOTA_ON(mp
))
1431 mp
->m_qflags
&= ~XFS_UQUOTA_CHKD
;
1432 if (!XFS_IS_GQUOTA_ON(mp
))
1433 mp
->m_qflags
&= ~XFS_GQUOTA_CHKD
;
1434 if (!XFS_IS_PQUOTA_ON(mp
))
1435 mp
->m_qflags
&= ~XFS_PQUOTA_CHKD
;
1439 * We actually don't have to acquire the m_sb_lock at all.
1440 * This can only be called from mount, and that's single threaded. XXX
1442 spin_lock(&mp
->m_sb_lock
);
1443 sbf
= mp
->m_sb
.sb_qflags
;
1444 mp
->m_sb
.sb_qflags
= mp
->m_qflags
& XFS_MOUNT_QUOTA_ALL
;
1445 spin_unlock(&mp
->m_sb_lock
);
1447 if (sbf
!= (mp
->m_qflags
& XFS_MOUNT_QUOTA_ALL
)) {
1448 if (xfs_qm_write_sb_changes(mp
)) {
1450 * We could only have been turning quotas off.
1451 * We aren't in very good shape actually because
1452 * the incore structures are convinced that quotas are
1453 * off, but the on disk superblock doesn't know that !
1455 ASSERT(!(XFS_IS_QUOTA_RUNNING(mp
)));
1456 xfs_alert(mp
, "%s: Superblock update failed!",
1462 xfs_warn(mp
, "Failed to initialize disk quotas.");
1468 * This is called after the superblock has been read in and we're ready to
1469 * iget the quota inodes.
1472 xfs_qm_init_quotainos(
1475 struct xfs_inode
*uip
= NULL
;
1476 struct xfs_inode
*gip
= NULL
;
1477 struct xfs_inode
*pip
= NULL
;
1481 ASSERT(mp
->m_quotainfo
);
1484 * Get the uquota and gquota inodes
1486 if (xfs_sb_version_hasquota(&mp
->m_sb
)) {
1487 if (XFS_IS_UQUOTA_ON(mp
) &&
1488 mp
->m_sb
.sb_uquotino
!= NULLFSINO
) {
1489 ASSERT(mp
->m_sb
.sb_uquotino
> 0);
1490 error
= xfs_iget(mp
, NULL
, mp
->m_sb
.sb_uquotino
,
1495 if (XFS_IS_GQUOTA_ON(mp
) &&
1496 mp
->m_sb
.sb_gquotino
!= NULLFSINO
) {
1497 ASSERT(mp
->m_sb
.sb_gquotino
> 0);
1498 error
= xfs_iget(mp
, NULL
, mp
->m_sb
.sb_gquotino
,
1503 if (XFS_IS_PQUOTA_ON(mp
) &&
1504 mp
->m_sb
.sb_pquotino
!= NULLFSINO
) {
1505 ASSERT(mp
->m_sb
.sb_pquotino
> 0);
1506 error
= xfs_iget(mp
, NULL
, mp
->m_sb
.sb_pquotino
,
1512 flags
|= XFS_QMOPT_SBVERSION
;
1516 * Create the three inodes, if they don't exist already. The changes
1517 * made above will get added to a transaction and logged in one of
1518 * the qino_alloc calls below. If the device is readonly,
1519 * temporarily switch to read-write to do this.
1521 if (XFS_IS_UQUOTA_ON(mp
) && uip
== NULL
) {
1522 error
= xfs_qm_qino_alloc(mp
, &uip
,
1523 flags
| XFS_QMOPT_UQUOTA
);
1527 flags
&= ~XFS_QMOPT_SBVERSION
;
1529 if (XFS_IS_GQUOTA_ON(mp
) && gip
== NULL
) {
1530 error
= xfs_qm_qino_alloc(mp
, &gip
,
1531 flags
| XFS_QMOPT_GQUOTA
);
1535 flags
&= ~XFS_QMOPT_SBVERSION
;
1537 if (XFS_IS_PQUOTA_ON(mp
) && pip
== NULL
) {
1538 error
= xfs_qm_qino_alloc(mp
, &pip
,
1539 flags
| XFS_QMOPT_PQUOTA
);
1544 mp
->m_quotainfo
->qi_uquotaip
= uip
;
1545 mp
->m_quotainfo
->qi_gquotaip
= gip
;
1546 mp
->m_quotainfo
->qi_pquotaip
= pip
;
1562 struct xfs_dquot
*dqp
)
1564 struct xfs_mount
*mp
= dqp
->q_mount
;
1565 struct xfs_quotainfo
*qi
= mp
->m_quotainfo
;
1567 mutex_lock(&qi
->qi_tree_lock
);
1568 radix_tree_delete(xfs_dquot_tree(qi
, dqp
->q_core
.d_flags
),
1569 be32_to_cpu(dqp
->q_core
.d_id
));
1572 mutex_unlock(&qi
->qi_tree_lock
);
1574 xfs_qm_dqdestroy(dqp
);
1578 * Start a transaction and write the incore superblock changes to
1579 * disk. flags parameter indicates which fields have changed.
1582 xfs_qm_write_sb_changes(
1583 struct xfs_mount
*mp
)
1588 tp
= xfs_trans_alloc(mp
, XFS_TRANS_QM_SBCHANGE
);
1589 error
= xfs_trans_reserve(tp
, &M_RES(mp
)->tr_qm_sbchange
, 0, 0);
1591 xfs_trans_cancel(tp
, 0);
1596 return xfs_trans_commit(tp
, 0);
1600 /* --------------- utility functions for vnodeops ---------------- */
1604 * Given an inode, a uid, gid and prid make sure that we have
1605 * allocated relevant dquot(s) on disk, and that we won't exceed inode
1606 * quotas by creating this file.
1607 * This also attaches dquot(s) to the given inode after locking it,
1608 * and returns the dquots corresponding to the uid and/or gid.
1610 * in : inode (unlocked)
1611 * out : udquot, gdquot with references taken and unlocked
1615 struct xfs_inode
*ip
,
1620 struct xfs_dquot
**O_udqpp
,
1621 struct xfs_dquot
**O_gdqpp
,
1622 struct xfs_dquot
**O_pdqpp
)
1624 struct xfs_mount
*mp
= ip
->i_mount
;
1625 struct xfs_dquot
*uq
= NULL
;
1626 struct xfs_dquot
*gq
= NULL
;
1627 struct xfs_dquot
*pq
= NULL
;
1631 if (!XFS_IS_QUOTA_RUNNING(mp
) || !XFS_IS_QUOTA_ON(mp
))
1634 lockflags
= XFS_ILOCK_EXCL
;
1635 xfs_ilock(ip
, lockflags
);
1637 if ((flags
& XFS_QMOPT_INHERIT
) && XFS_INHERIT_GID(ip
))
1638 gid
= ip
->i_d
.di_gid
;
1641 * Attach the dquot(s) to this inode, doing a dquot allocation
1642 * if necessary. The dquot(s) will not be locked.
1644 if (XFS_NOT_DQATTACHED(mp
, ip
)) {
1645 error
= xfs_qm_dqattach_locked(ip
, XFS_QMOPT_DQALLOC
);
1647 xfs_iunlock(ip
, lockflags
);
1652 if ((flags
& XFS_QMOPT_UQUOTA
) && XFS_IS_UQUOTA_ON(mp
)) {
1653 if (ip
->i_d
.di_uid
!= uid
) {
1655 * What we need is the dquot that has this uid, and
1656 * if we send the inode to dqget, the uid of the inode
1657 * takes priority over what's sent in the uid argument.
1658 * We must unlock inode here before calling dqget if
1659 * we're not sending the inode, because otherwise
1660 * we'll deadlock by doing trans_reserve while
1663 xfs_iunlock(ip
, lockflags
);
1664 error
= xfs_qm_dqget(mp
, NULL
, uid
,
1670 ASSERT(error
!= -ENOENT
);
1674 * Get the ilock in the right order.
1677 lockflags
= XFS_ILOCK_SHARED
;
1678 xfs_ilock(ip
, lockflags
);
1681 * Take an extra reference, because we'll return
1684 ASSERT(ip
->i_udquot
);
1685 uq
= xfs_qm_dqhold(ip
->i_udquot
);
1688 if ((flags
& XFS_QMOPT_GQUOTA
) && XFS_IS_GQUOTA_ON(mp
)) {
1689 if (ip
->i_d
.di_gid
!= gid
) {
1690 xfs_iunlock(ip
, lockflags
);
1691 error
= xfs_qm_dqget(mp
, NULL
, gid
,
1697 ASSERT(error
!= -ENOENT
);
1701 lockflags
= XFS_ILOCK_SHARED
;
1702 xfs_ilock(ip
, lockflags
);
1704 ASSERT(ip
->i_gdquot
);
1705 gq
= xfs_qm_dqhold(ip
->i_gdquot
);
1708 if ((flags
& XFS_QMOPT_PQUOTA
) && XFS_IS_PQUOTA_ON(mp
)) {
1709 if (xfs_get_projid(ip
) != prid
) {
1710 xfs_iunlock(ip
, lockflags
);
1711 error
= xfs_qm_dqget(mp
, NULL
, (xfs_dqid_t
)prid
,
1717 ASSERT(error
!= -ENOENT
);
1721 lockflags
= XFS_ILOCK_SHARED
;
1722 xfs_ilock(ip
, lockflags
);
1724 ASSERT(ip
->i_pdquot
);
1725 pq
= xfs_qm_dqhold(ip
->i_pdquot
);
1729 trace_xfs_dquot_dqalloc(ip
);
1731 xfs_iunlock(ip
, lockflags
);
1753 * Actually transfer ownership, and do dquot modifications.
1754 * These were already reserved.
1760 xfs_dquot_t
**IO_olddq
,
1763 xfs_dquot_t
*prevdq
;
1764 uint bfield
= XFS_IS_REALTIME_INODE(ip
) ?
1765 XFS_TRANS_DQ_RTBCOUNT
: XFS_TRANS_DQ_BCOUNT
;
1768 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
1769 ASSERT(XFS_IS_QUOTA_RUNNING(ip
->i_mount
));
1774 ASSERT(prevdq
!= newdq
);
1776 xfs_trans_mod_dquot(tp
, prevdq
, bfield
, -(ip
->i_d
.di_nblocks
));
1777 xfs_trans_mod_dquot(tp
, prevdq
, XFS_TRANS_DQ_ICOUNT
, -1);
1779 /* the sparkling new dquot */
1780 xfs_trans_mod_dquot(tp
, newdq
, bfield
, ip
->i_d
.di_nblocks
);
1781 xfs_trans_mod_dquot(tp
, newdq
, XFS_TRANS_DQ_ICOUNT
, 1);
1784 * Take an extra reference, because the inode is going to keep
1785 * this dquot pointer even after the trans_commit.
1787 *IO_olddq
= xfs_qm_dqhold(newdq
);
1793 * Quota reservations for setattr(AT_UID|AT_GID|AT_PROJID).
1796 xfs_qm_vop_chown_reserve(
1797 struct xfs_trans
*tp
,
1798 struct xfs_inode
*ip
,
1799 struct xfs_dquot
*udqp
,
1800 struct xfs_dquot
*gdqp
,
1801 struct xfs_dquot
*pdqp
,
1804 struct xfs_mount
*mp
= ip
->i_mount
;
1805 uint delblks
, blkflags
, prjflags
= 0;
1806 struct xfs_dquot
*udq_unres
= NULL
;
1807 struct xfs_dquot
*gdq_unres
= NULL
;
1808 struct xfs_dquot
*pdq_unres
= NULL
;
1809 struct xfs_dquot
*udq_delblks
= NULL
;
1810 struct xfs_dquot
*gdq_delblks
= NULL
;
1811 struct xfs_dquot
*pdq_delblks
= NULL
;
1815 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
|XFS_ILOCK_SHARED
));
1816 ASSERT(XFS_IS_QUOTA_RUNNING(mp
));
1818 delblks
= ip
->i_delayed_blks
;
1819 blkflags
= XFS_IS_REALTIME_INODE(ip
) ?
1820 XFS_QMOPT_RES_RTBLKS
: XFS_QMOPT_RES_REGBLKS
;
1822 if (XFS_IS_UQUOTA_ON(mp
) && udqp
&&
1823 ip
->i_d
.di_uid
!= be32_to_cpu(udqp
->q_core
.d_id
)) {
1826 * If there are delayed allocation blocks, then we have to
1827 * unreserve those from the old dquot, and add them to the
1831 ASSERT(ip
->i_udquot
);
1832 udq_unres
= ip
->i_udquot
;
1835 if (XFS_IS_GQUOTA_ON(ip
->i_mount
) && gdqp
&&
1836 ip
->i_d
.di_gid
!= be32_to_cpu(gdqp
->q_core
.d_id
)) {
1839 ASSERT(ip
->i_gdquot
);
1840 gdq_unres
= ip
->i_gdquot
;
1844 if (XFS_IS_PQUOTA_ON(ip
->i_mount
) && pdqp
&&
1845 xfs_get_projid(ip
) != be32_to_cpu(pdqp
->q_core
.d_id
)) {
1846 prjflags
= XFS_QMOPT_ENOSPC
;
1849 ASSERT(ip
->i_pdquot
);
1850 pdq_unres
= ip
->i_pdquot
;
1854 error
= xfs_trans_reserve_quota_bydquots(tp
, ip
->i_mount
,
1855 udq_delblks
, gdq_delblks
, pdq_delblks
,
1856 ip
->i_d
.di_nblocks
, 1,
1857 flags
| blkflags
| prjflags
);
1862 * Do the delayed blks reservations/unreservations now. Since, these
1863 * are done without the help of a transaction, if a reservation fails
1864 * its previous reservations won't be automatically undone by trans
1865 * code. So, we have to do it manually here.
1869 * Do the reservations first. Unreservation can't fail.
1871 ASSERT(udq_delblks
|| gdq_delblks
|| pdq_delblks
);
1872 ASSERT(udq_unres
|| gdq_unres
|| pdq_unres
);
1873 error
= xfs_trans_reserve_quota_bydquots(NULL
, ip
->i_mount
,
1874 udq_delblks
, gdq_delblks
, pdq_delblks
,
1875 (xfs_qcnt_t
)delblks
, 0,
1876 flags
| blkflags
| prjflags
);
1879 xfs_trans_reserve_quota_bydquots(NULL
, ip
->i_mount
,
1880 udq_unres
, gdq_unres
, pdq_unres
,
1881 -((xfs_qcnt_t
)delblks
), 0, blkflags
);
1888 xfs_qm_vop_rename_dqattach(
1889 struct xfs_inode
**i_tab
)
1891 struct xfs_mount
*mp
= i_tab
[0]->i_mount
;
1894 if (!XFS_IS_QUOTA_RUNNING(mp
) || !XFS_IS_QUOTA_ON(mp
))
1897 for (i
= 0; (i
< 4 && i_tab
[i
]); i
++) {
1898 struct xfs_inode
*ip
= i_tab
[i
];
1902 * Watch out for duplicate entries in the table.
1904 if (i
== 0 || ip
!= i_tab
[i
-1]) {
1905 if (XFS_NOT_DQATTACHED(mp
, ip
)) {
1906 error
= xfs_qm_dqattach(ip
, 0);
1916 xfs_qm_vop_create_dqattach(
1917 struct xfs_trans
*tp
,
1918 struct xfs_inode
*ip
,
1919 struct xfs_dquot
*udqp
,
1920 struct xfs_dquot
*gdqp
,
1921 struct xfs_dquot
*pdqp
)
1923 struct xfs_mount
*mp
= tp
->t_mountp
;
1925 if (!XFS_IS_QUOTA_RUNNING(mp
) || !XFS_IS_QUOTA_ON(mp
))
1928 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
1929 ASSERT(XFS_IS_QUOTA_RUNNING(mp
));
1931 if (udqp
&& XFS_IS_UQUOTA_ON(mp
)) {
1932 ASSERT(ip
->i_udquot
== NULL
);
1933 ASSERT(ip
->i_d
.di_uid
== be32_to_cpu(udqp
->q_core
.d_id
));
1935 ip
->i_udquot
= xfs_qm_dqhold(udqp
);
1936 xfs_trans_mod_dquot(tp
, udqp
, XFS_TRANS_DQ_ICOUNT
, 1);
1938 if (gdqp
&& XFS_IS_GQUOTA_ON(mp
)) {
1939 ASSERT(ip
->i_gdquot
== NULL
);
1940 ASSERT(ip
->i_d
.di_gid
== be32_to_cpu(gdqp
->q_core
.d_id
));
1941 ip
->i_gdquot
= xfs_qm_dqhold(gdqp
);
1942 xfs_trans_mod_dquot(tp
, gdqp
, XFS_TRANS_DQ_ICOUNT
, 1);
1944 if (pdqp
&& XFS_IS_PQUOTA_ON(mp
)) {
1945 ASSERT(ip
->i_pdquot
== NULL
);
1946 ASSERT(xfs_get_projid(ip
) == be32_to_cpu(pdqp
->q_core
.d_id
));
1948 ip
->i_pdquot
= xfs_qm_dqhold(pdqp
);
1949 xfs_trans_mod_dquot(tp
, pdqp
, XFS_TRANS_DQ_ICOUNT
, 1);