xfs: replace i_pin_wait with a bit waitqueue
[deliverable/linux.git] / fs / xfs / xfs_qm_syscalls.c
1 /*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18
19 #include <linux/capability.h>
20
21 #include "xfs.h"
22 #include "xfs_fs.h"
23 #include "xfs_bit.h"
24 #include "xfs_log.h"
25 #include "xfs_inum.h"
26 #include "xfs_trans.h"
27 #include "xfs_sb.h"
28 #include "xfs_ag.h"
29 #include "xfs_alloc.h"
30 #include "xfs_quota.h"
31 #include "xfs_mount.h"
32 #include "xfs_bmap_btree.h"
33 #include "xfs_inode.h"
34 #include "xfs_inode_item.h"
35 #include "xfs_itable.h"
36 #include "xfs_bmap.h"
37 #include "xfs_rtalloc.h"
38 #include "xfs_error.h"
39 #include "xfs_attr.h"
40 #include "xfs_buf_item.h"
41 #include "xfs_utils.h"
42 #include "xfs_qm.h"
43 #include "xfs_trace.h"
44
45 STATIC int xfs_qm_log_quotaoff(xfs_mount_t *, xfs_qoff_logitem_t **, uint);
46 STATIC int xfs_qm_log_quotaoff_end(xfs_mount_t *, xfs_qoff_logitem_t *,
47 uint);
48 STATIC uint xfs_qm_export_flags(uint);
49 STATIC uint xfs_qm_export_qtype_flags(uint);
50 STATIC void xfs_qm_export_dquot(xfs_mount_t *, xfs_disk_dquot_t *,
51 fs_disk_quota_t *);
52
53
54 /*
55 * Turn off quota accounting and/or enforcement for all udquots and/or
56 * gdquots. Called only at unmount time.
57 *
58 * This assumes that there are no dquots of this file system cached
59 * incore, and modifies the ondisk dquot directly. Therefore, for example,
60 * it is an error to call this twice, without purging the cache.
61 */
62 int
63 xfs_qm_scall_quotaoff(
64 xfs_mount_t *mp,
65 uint flags)
66 {
67 struct xfs_quotainfo *q = mp->m_quotainfo;
68 uint dqtype;
69 int error;
70 uint inactivate_flags;
71 xfs_qoff_logitem_t *qoffstart;
72 int nculprits;
73
74 /*
75 * No file system can have quotas enabled on disk but not in core.
76 * Note that quota utilities (like quotaoff) _expect_
77 * errno == EEXIST here.
78 */
79 if ((mp->m_qflags & flags) == 0)
80 return XFS_ERROR(EEXIST);
81 error = 0;
82
83 flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD);
84
85 /*
86 * We don't want to deal with two quotaoffs messing up each other,
87 * so we're going to serialize it. quotaoff isn't exactly a performance
88 * critical thing.
89 * If quotaoff, then we must be dealing with the root filesystem.
90 */
91 ASSERT(q);
92 mutex_lock(&q->qi_quotaofflock);
93
94 /*
95 * If we're just turning off quota enforcement, change mp and go.
96 */
97 if ((flags & XFS_ALL_QUOTA_ACCT) == 0) {
98 mp->m_qflags &= ~(flags);
99
100 spin_lock(&mp->m_sb_lock);
101 mp->m_sb.sb_qflags = mp->m_qflags;
102 spin_unlock(&mp->m_sb_lock);
103 mutex_unlock(&q->qi_quotaofflock);
104
105 /* XXX what to do if error ? Revert back to old vals incore ? */
106 error = xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS);
107 return (error);
108 }
109
110 dqtype = 0;
111 inactivate_flags = 0;
112 /*
113 * If accounting is off, we must turn enforcement off, clear the
114 * quota 'CHKD' certificate to make it known that we have to
115 * do a quotacheck the next time this quota is turned on.
116 */
117 if (flags & XFS_UQUOTA_ACCT) {
118 dqtype |= XFS_QMOPT_UQUOTA;
119 flags |= (XFS_UQUOTA_CHKD | XFS_UQUOTA_ENFD);
120 inactivate_flags |= XFS_UQUOTA_ACTIVE;
121 }
122 if (flags & XFS_GQUOTA_ACCT) {
123 dqtype |= XFS_QMOPT_GQUOTA;
124 flags |= (XFS_OQUOTA_CHKD | XFS_OQUOTA_ENFD);
125 inactivate_flags |= XFS_GQUOTA_ACTIVE;
126 } else if (flags & XFS_PQUOTA_ACCT) {
127 dqtype |= XFS_QMOPT_PQUOTA;
128 flags |= (XFS_OQUOTA_CHKD | XFS_OQUOTA_ENFD);
129 inactivate_flags |= XFS_PQUOTA_ACTIVE;
130 }
131
132 /*
133 * Nothing to do? Don't complain. This happens when we're just
134 * turning off quota enforcement.
135 */
136 if ((mp->m_qflags & flags) == 0)
137 goto out_unlock;
138
139 /*
140 * Write the LI_QUOTAOFF log record, and do SB changes atomically,
141 * and synchronously. If we fail to write, we should abort the
142 * operation as it cannot be recovered safely if we crash.
143 */
144 error = xfs_qm_log_quotaoff(mp, &qoffstart, flags);
145 if (error)
146 goto out_unlock;
147
148 /*
149 * Next we clear the XFS_MOUNT_*DQ_ACTIVE bit(s) in the mount struct
150 * to take care of the race between dqget and quotaoff. We don't take
151 * any special locks to reset these bits. All processes need to check
152 * these bits *after* taking inode lock(s) to see if the particular
153 * quota type is in the process of being turned off. If *ACTIVE, it is
154 * guaranteed that all dquot structures and all quotainode ptrs will all
155 * stay valid as long as that inode is kept locked.
156 *
157 * There is no turning back after this.
158 */
159 mp->m_qflags &= ~inactivate_flags;
160
161 /*
162 * Give back all the dquot reference(s) held by inodes.
163 * Here we go thru every single incore inode in this file system, and
164 * do a dqrele on the i_udquot/i_gdquot that it may have.
165 * Essentially, as long as somebody has an inode locked, this guarantees
166 * that quotas will not be turned off. This is handy because in a
167 * transaction once we lock the inode(s) and check for quotaon, we can
168 * depend on the quota inodes (and other things) being valid as long as
169 * we keep the lock(s).
170 */
171 xfs_qm_dqrele_all_inodes(mp, flags);
172
173 /*
174 * Next we make the changes in the quota flag in the mount struct.
175 * This isn't protected by a particular lock directly, because we
176 * don't want to take a mrlock every time we depend on quotas being on.
177 */
178 mp->m_qflags &= ~(flags);
179
180 /*
181 * Go through all the dquots of this file system and purge them,
182 * according to what was turned off. We may not be able to get rid
183 * of all dquots, because dquots can have temporary references that
184 * are not attached to inodes. eg. xfs_setattr, xfs_create.
185 * So, if we couldn't purge all the dquots from the filesystem,
186 * we can't get rid of the incore data structures.
187 */
188 while ((nculprits = xfs_qm_dqpurge_all(mp, dqtype)))
189 delay(10 * nculprits);
190
191 /*
192 * Transactions that had started before ACTIVE state bit was cleared
193 * could have logged many dquots, so they'd have higher LSNs than
194 * the first QUOTAOFF log record does. If we happen to crash when
195 * the tail of the log has gone past the QUOTAOFF record, but
196 * before the last dquot modification, those dquots __will__
197 * recover, and that's not good.
198 *
199 * So, we have QUOTAOFF start and end logitems; the start
200 * logitem won't get overwritten until the end logitem appears...
201 */
202 error = xfs_qm_log_quotaoff_end(mp, qoffstart, flags);
203 if (error) {
204 /* We're screwed now. Shutdown is the only option. */
205 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
206 goto out_unlock;
207 }
208
209 /*
210 * If quotas is completely disabled, close shop.
211 */
212 if (((flags & XFS_MOUNT_QUOTA_ALL) == XFS_MOUNT_QUOTA_SET1) ||
213 ((flags & XFS_MOUNT_QUOTA_ALL) == XFS_MOUNT_QUOTA_SET2)) {
214 mutex_unlock(&q->qi_quotaofflock);
215 xfs_qm_destroy_quotainfo(mp);
216 return (0);
217 }
218
219 /*
220 * Release our quotainode references if we don't need them anymore.
221 */
222 if ((dqtype & XFS_QMOPT_UQUOTA) && q->qi_uquotaip) {
223 IRELE(q->qi_uquotaip);
224 q->qi_uquotaip = NULL;
225 }
226 if ((dqtype & (XFS_QMOPT_GQUOTA|XFS_QMOPT_PQUOTA)) && q->qi_gquotaip) {
227 IRELE(q->qi_gquotaip);
228 q->qi_gquotaip = NULL;
229 }
230
231 out_unlock:
232 mutex_unlock(&q->qi_quotaofflock);
233 return error;
234 }
235
236 STATIC int
237 xfs_qm_scall_trunc_qfile(
238 struct xfs_mount *mp,
239 xfs_ino_t ino)
240 {
241 struct xfs_inode *ip;
242 struct xfs_trans *tp;
243 int error;
244
245 if (ino == NULLFSINO)
246 return 0;
247
248 error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
249 if (error)
250 return error;
251
252 xfs_ilock(ip, XFS_IOLOCK_EXCL);
253
254 tp = xfs_trans_alloc(mp, XFS_TRANS_TRUNCATE_FILE);
255 error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0,
256 XFS_TRANS_PERM_LOG_RES,
257 XFS_ITRUNCATE_LOG_COUNT);
258 if (error) {
259 xfs_trans_cancel(tp, 0);
260 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
261 goto out_put;
262 }
263
264 xfs_ilock(ip, XFS_ILOCK_EXCL);
265 xfs_trans_ijoin(tp, ip, 0);
266
267 ip->i_d.di_size = 0;
268 ip->i_size = 0;
269 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
270
271 error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
272 if (error) {
273 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES |
274 XFS_TRANS_ABORT);
275 goto out_unlock;
276 }
277
278 ASSERT(ip->i_d.di_nextents == 0);
279
280 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
281 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
282
283 out_unlock:
284 xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
285 out_put:
286 IRELE(ip);
287 return error;
288 }
289
290 int
291 xfs_qm_scall_trunc_qfiles(
292 xfs_mount_t *mp,
293 uint flags)
294 {
295 int error = 0, error2 = 0;
296
297 if (!xfs_sb_version_hasquota(&mp->m_sb) || flags == 0) {
298 xfs_debug(mp, "%s: flags=%x m_qflags=%x\n",
299 __func__, flags, mp->m_qflags);
300 return XFS_ERROR(EINVAL);
301 }
302
303 if (flags & XFS_DQ_USER)
304 error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_uquotino);
305 if (flags & (XFS_DQ_GROUP|XFS_DQ_PROJ))
306 error2 = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_gquotino);
307
308 return error ? error : error2;
309 }
310
311 /*
312 * Switch on (a given) quota enforcement for a filesystem. This takes
313 * effect immediately.
314 * (Switching on quota accounting must be done at mount time.)
315 */
316 int
317 xfs_qm_scall_quotaon(
318 xfs_mount_t *mp,
319 uint flags)
320 {
321 int error;
322 uint qf;
323 __int64_t sbflags;
324
325 flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD);
326 /*
327 * Switching on quota accounting must be done at mount time.
328 */
329 flags &= ~(XFS_ALL_QUOTA_ACCT);
330
331 sbflags = 0;
332
333 if (flags == 0) {
334 xfs_debug(mp, "%s: zero flags, m_qflags=%x\n",
335 __func__, mp->m_qflags);
336 return XFS_ERROR(EINVAL);
337 }
338
339 /* No fs can turn on quotas with a delayed effect */
340 ASSERT((flags & XFS_ALL_QUOTA_ACCT) == 0);
341
342 /*
343 * Can't enforce without accounting. We check the superblock
344 * qflags here instead of m_qflags because rootfs can have
345 * quota acct on ondisk without m_qflags' knowing.
346 */
347 if (((flags & XFS_UQUOTA_ACCT) == 0 &&
348 (mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) == 0 &&
349 (flags & XFS_UQUOTA_ENFD))
350 ||
351 ((flags & XFS_PQUOTA_ACCT) == 0 &&
352 (mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) == 0 &&
353 (flags & XFS_GQUOTA_ACCT) == 0 &&
354 (mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) == 0 &&
355 (flags & XFS_OQUOTA_ENFD))) {
356 xfs_debug(mp,
357 "%s: Can't enforce without acct, flags=%x sbflags=%x\n",
358 __func__, flags, mp->m_sb.sb_qflags);
359 return XFS_ERROR(EINVAL);
360 }
361 /*
362 * If everything's up to-date incore, then don't waste time.
363 */
364 if ((mp->m_qflags & flags) == flags)
365 return XFS_ERROR(EEXIST);
366
367 /*
368 * Change sb_qflags on disk but not incore mp->qflags
369 * if this is the root filesystem.
370 */
371 spin_lock(&mp->m_sb_lock);
372 qf = mp->m_sb.sb_qflags;
373 mp->m_sb.sb_qflags = qf | flags;
374 spin_unlock(&mp->m_sb_lock);
375
376 /*
377 * There's nothing to change if it's the same.
378 */
379 if ((qf & flags) == flags && sbflags == 0)
380 return XFS_ERROR(EEXIST);
381 sbflags |= XFS_SB_QFLAGS;
382
383 if ((error = xfs_qm_write_sb_changes(mp, sbflags)))
384 return (error);
385 /*
386 * If we aren't trying to switch on quota enforcement, we are done.
387 */
388 if (((mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) !=
389 (mp->m_qflags & XFS_UQUOTA_ACCT)) ||
390 ((mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) !=
391 (mp->m_qflags & XFS_PQUOTA_ACCT)) ||
392 ((mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) !=
393 (mp->m_qflags & XFS_GQUOTA_ACCT)) ||
394 (flags & XFS_ALL_QUOTA_ENFD) == 0)
395 return (0);
396
397 if (! XFS_IS_QUOTA_RUNNING(mp))
398 return XFS_ERROR(ESRCH);
399
400 /*
401 * Switch on quota enforcement in core.
402 */
403 mutex_lock(&mp->m_quotainfo->qi_quotaofflock);
404 mp->m_qflags |= (flags & XFS_ALL_QUOTA_ENFD);
405 mutex_unlock(&mp->m_quotainfo->qi_quotaofflock);
406
407 return (0);
408 }
409
410
411 /*
412 * Return quota status information, such as uquota-off, enforcements, etc.
413 */
414 int
415 xfs_qm_scall_getqstat(
416 struct xfs_mount *mp,
417 struct fs_quota_stat *out)
418 {
419 struct xfs_quotainfo *q = mp->m_quotainfo;
420 struct xfs_inode *uip, *gip;
421 boolean_t tempuqip, tempgqip;
422
423 uip = gip = NULL;
424 tempuqip = tempgqip = B_FALSE;
425 memset(out, 0, sizeof(fs_quota_stat_t));
426
427 out->qs_version = FS_QSTAT_VERSION;
428 if (!xfs_sb_version_hasquota(&mp->m_sb)) {
429 out->qs_uquota.qfs_ino = NULLFSINO;
430 out->qs_gquota.qfs_ino = NULLFSINO;
431 return (0);
432 }
433 out->qs_flags = (__uint16_t) xfs_qm_export_flags(mp->m_qflags &
434 (XFS_ALL_QUOTA_ACCT|
435 XFS_ALL_QUOTA_ENFD));
436 out->qs_pad = 0;
437 out->qs_uquota.qfs_ino = mp->m_sb.sb_uquotino;
438 out->qs_gquota.qfs_ino = mp->m_sb.sb_gquotino;
439
440 if (q) {
441 uip = q->qi_uquotaip;
442 gip = q->qi_gquotaip;
443 }
444 if (!uip && mp->m_sb.sb_uquotino != NULLFSINO) {
445 if (xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
446 0, 0, &uip) == 0)
447 tempuqip = B_TRUE;
448 }
449 if (!gip && mp->m_sb.sb_gquotino != NULLFSINO) {
450 if (xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
451 0, 0, &gip) == 0)
452 tempgqip = B_TRUE;
453 }
454 if (uip) {
455 out->qs_uquota.qfs_nblks = uip->i_d.di_nblocks;
456 out->qs_uquota.qfs_nextents = uip->i_d.di_nextents;
457 if (tempuqip)
458 IRELE(uip);
459 }
460 if (gip) {
461 out->qs_gquota.qfs_nblks = gip->i_d.di_nblocks;
462 out->qs_gquota.qfs_nextents = gip->i_d.di_nextents;
463 if (tempgqip)
464 IRELE(gip);
465 }
466 if (q) {
467 out->qs_incoredqs = q->qi_dquots;
468 out->qs_btimelimit = q->qi_btimelimit;
469 out->qs_itimelimit = q->qi_itimelimit;
470 out->qs_rtbtimelimit = q->qi_rtbtimelimit;
471 out->qs_bwarnlimit = q->qi_bwarnlimit;
472 out->qs_iwarnlimit = q->qi_iwarnlimit;
473 }
474 return 0;
475 }
476
477 #define XFS_DQ_MASK \
478 (FS_DQ_LIMIT_MASK | FS_DQ_TIMER_MASK | FS_DQ_WARNS_MASK)
479
480 /*
481 * Adjust quota limits, and start/stop timers accordingly.
482 */
483 int
484 xfs_qm_scall_setqlim(
485 xfs_mount_t *mp,
486 xfs_dqid_t id,
487 uint type,
488 fs_disk_quota_t *newlim)
489 {
490 struct xfs_quotainfo *q = mp->m_quotainfo;
491 xfs_disk_dquot_t *ddq;
492 xfs_dquot_t *dqp;
493 xfs_trans_t *tp;
494 int error;
495 xfs_qcnt_t hard, soft;
496
497 if (newlim->d_fieldmask & ~XFS_DQ_MASK)
498 return EINVAL;
499 if ((newlim->d_fieldmask & XFS_DQ_MASK) == 0)
500 return 0;
501
502 tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SETQLIM);
503 if ((error = xfs_trans_reserve(tp, 0, sizeof(xfs_disk_dquot_t) + 128,
504 0, 0, XFS_DEFAULT_LOG_COUNT))) {
505 xfs_trans_cancel(tp, 0);
506 return (error);
507 }
508
509 /*
510 * We don't want to race with a quotaoff so take the quotaoff lock.
511 * (We don't hold an inode lock, so there's nothing else to stop
512 * a quotaoff from happening). (XXXThis doesn't currently happen
513 * because we take the vfslock before calling xfs_qm_sysent).
514 */
515 mutex_lock(&q->qi_quotaofflock);
516
517 /*
518 * Get the dquot (locked), and join it to the transaction.
519 * Allocate the dquot if this doesn't exist.
520 */
521 if ((error = xfs_qm_dqget(mp, NULL, id, type, XFS_QMOPT_DQALLOC, &dqp))) {
522 xfs_trans_cancel(tp, XFS_TRANS_ABORT);
523 ASSERT(error != ENOENT);
524 goto out_unlock;
525 }
526 xfs_trans_dqjoin(tp, dqp);
527 ddq = &dqp->q_core;
528
529 /*
530 * Make sure that hardlimits are >= soft limits before changing.
531 */
532 hard = (newlim->d_fieldmask & FS_DQ_BHARD) ?
533 (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_hardlimit) :
534 be64_to_cpu(ddq->d_blk_hardlimit);
535 soft = (newlim->d_fieldmask & FS_DQ_BSOFT) ?
536 (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_softlimit) :
537 be64_to_cpu(ddq->d_blk_softlimit);
538 if (hard == 0 || hard >= soft) {
539 ddq->d_blk_hardlimit = cpu_to_be64(hard);
540 ddq->d_blk_softlimit = cpu_to_be64(soft);
541 if (id == 0) {
542 q->qi_bhardlimit = hard;
543 q->qi_bsoftlimit = soft;
544 }
545 } else {
546 xfs_debug(mp, "blkhard %Ld < blksoft %Ld\n", hard, soft);
547 }
548 hard = (newlim->d_fieldmask & FS_DQ_RTBHARD) ?
549 (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_hardlimit) :
550 be64_to_cpu(ddq->d_rtb_hardlimit);
551 soft = (newlim->d_fieldmask & FS_DQ_RTBSOFT) ?
552 (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_softlimit) :
553 be64_to_cpu(ddq->d_rtb_softlimit);
554 if (hard == 0 || hard >= soft) {
555 ddq->d_rtb_hardlimit = cpu_to_be64(hard);
556 ddq->d_rtb_softlimit = cpu_to_be64(soft);
557 if (id == 0) {
558 q->qi_rtbhardlimit = hard;
559 q->qi_rtbsoftlimit = soft;
560 }
561 } else {
562 xfs_debug(mp, "rtbhard %Ld < rtbsoft %Ld\n", hard, soft);
563 }
564
565 hard = (newlim->d_fieldmask & FS_DQ_IHARD) ?
566 (xfs_qcnt_t) newlim->d_ino_hardlimit :
567 be64_to_cpu(ddq->d_ino_hardlimit);
568 soft = (newlim->d_fieldmask & FS_DQ_ISOFT) ?
569 (xfs_qcnt_t) newlim->d_ino_softlimit :
570 be64_to_cpu(ddq->d_ino_softlimit);
571 if (hard == 0 || hard >= soft) {
572 ddq->d_ino_hardlimit = cpu_to_be64(hard);
573 ddq->d_ino_softlimit = cpu_to_be64(soft);
574 if (id == 0) {
575 q->qi_ihardlimit = hard;
576 q->qi_isoftlimit = soft;
577 }
578 } else {
579 xfs_debug(mp, "ihard %Ld < isoft %Ld\n", hard, soft);
580 }
581
582 /*
583 * Update warnings counter(s) if requested
584 */
585 if (newlim->d_fieldmask & FS_DQ_BWARNS)
586 ddq->d_bwarns = cpu_to_be16(newlim->d_bwarns);
587 if (newlim->d_fieldmask & FS_DQ_IWARNS)
588 ddq->d_iwarns = cpu_to_be16(newlim->d_iwarns);
589 if (newlim->d_fieldmask & FS_DQ_RTBWARNS)
590 ddq->d_rtbwarns = cpu_to_be16(newlim->d_rtbwarns);
591
592 if (id == 0) {
593 /*
594 * Timelimits for the super user set the relative time
595 * the other users can be over quota for this file system.
596 * If it is zero a default is used. Ditto for the default
597 * soft and hard limit values (already done, above), and
598 * for warnings.
599 */
600 if (newlim->d_fieldmask & FS_DQ_BTIMER) {
601 q->qi_btimelimit = newlim->d_btimer;
602 ddq->d_btimer = cpu_to_be32(newlim->d_btimer);
603 }
604 if (newlim->d_fieldmask & FS_DQ_ITIMER) {
605 q->qi_itimelimit = newlim->d_itimer;
606 ddq->d_itimer = cpu_to_be32(newlim->d_itimer);
607 }
608 if (newlim->d_fieldmask & FS_DQ_RTBTIMER) {
609 q->qi_rtbtimelimit = newlim->d_rtbtimer;
610 ddq->d_rtbtimer = cpu_to_be32(newlim->d_rtbtimer);
611 }
612 if (newlim->d_fieldmask & FS_DQ_BWARNS)
613 q->qi_bwarnlimit = newlim->d_bwarns;
614 if (newlim->d_fieldmask & FS_DQ_IWARNS)
615 q->qi_iwarnlimit = newlim->d_iwarns;
616 if (newlim->d_fieldmask & FS_DQ_RTBWARNS)
617 q->qi_rtbwarnlimit = newlim->d_rtbwarns;
618 } else {
619 /*
620 * If the user is now over quota, start the timelimit.
621 * The user will not be 'warned'.
622 * Note that we keep the timers ticking, whether enforcement
623 * is on or off. We don't really want to bother with iterating
624 * over all ondisk dquots and turning the timers on/off.
625 */
626 xfs_qm_adjust_dqtimers(mp, ddq);
627 }
628 dqp->dq_flags |= XFS_DQ_DIRTY;
629 xfs_trans_log_dquot(tp, dqp);
630
631 error = xfs_trans_commit(tp, 0);
632 xfs_qm_dqrele(dqp);
633
634 out_unlock:
635 mutex_unlock(&q->qi_quotaofflock);
636 return error;
637 }
638
639 int
640 xfs_qm_scall_getquota(
641 xfs_mount_t *mp,
642 xfs_dqid_t id,
643 uint type,
644 fs_disk_quota_t *out)
645 {
646 xfs_dquot_t *dqp;
647 int error;
648
649 /*
650 * Try to get the dquot. We don't want it allocated on disk, so
651 * we aren't passing the XFS_QMOPT_DOALLOC flag. If it doesn't
652 * exist, we'll get ENOENT back.
653 */
654 if ((error = xfs_qm_dqget(mp, NULL, id, type, 0, &dqp))) {
655 return (error);
656 }
657
658 /*
659 * If everything's NULL, this dquot doesn't quite exist as far as
660 * our utility programs are concerned.
661 */
662 if (XFS_IS_DQUOT_UNINITIALIZED(dqp)) {
663 xfs_qm_dqput(dqp);
664 return XFS_ERROR(ENOENT);
665 }
666 /*
667 * Convert the disk dquot to the exportable format
668 */
669 xfs_qm_export_dquot(mp, &dqp->q_core, out);
670 xfs_qm_dqput(dqp);
671 return (error ? XFS_ERROR(EFAULT) : 0);
672 }
673
674
675 STATIC int
676 xfs_qm_log_quotaoff_end(
677 xfs_mount_t *mp,
678 xfs_qoff_logitem_t *startqoff,
679 uint flags)
680 {
681 xfs_trans_t *tp;
682 int error;
683 xfs_qoff_logitem_t *qoffi;
684
685 tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QUOTAOFF_END);
686
687 if ((error = xfs_trans_reserve(tp, 0, sizeof(xfs_qoff_logitem_t) * 2,
688 0, 0, XFS_DEFAULT_LOG_COUNT))) {
689 xfs_trans_cancel(tp, 0);
690 return (error);
691 }
692
693 qoffi = xfs_trans_get_qoff_item(tp, startqoff,
694 flags & XFS_ALL_QUOTA_ACCT);
695 xfs_trans_log_quotaoff_item(tp, qoffi);
696
697 /*
698 * We have to make sure that the transaction is secure on disk before we
699 * return and actually stop quota accounting. So, make it synchronous.
700 * We don't care about quotoff's performance.
701 */
702 xfs_trans_set_sync(tp);
703 error = xfs_trans_commit(tp, 0);
704 return (error);
705 }
706
707
708 STATIC int
709 xfs_qm_log_quotaoff(
710 xfs_mount_t *mp,
711 xfs_qoff_logitem_t **qoffstartp,
712 uint flags)
713 {
714 xfs_trans_t *tp;
715 int error;
716 xfs_qoff_logitem_t *qoffi=NULL;
717 uint oldsbqflag=0;
718
719 tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QUOTAOFF);
720 if ((error = xfs_trans_reserve(tp, 0,
721 sizeof(xfs_qoff_logitem_t) * 2 +
722 mp->m_sb.sb_sectsize + 128,
723 0,
724 0,
725 XFS_DEFAULT_LOG_COUNT))) {
726 goto error0;
727 }
728
729 qoffi = xfs_trans_get_qoff_item(tp, NULL, flags & XFS_ALL_QUOTA_ACCT);
730 xfs_trans_log_quotaoff_item(tp, qoffi);
731
732 spin_lock(&mp->m_sb_lock);
733 oldsbqflag = mp->m_sb.sb_qflags;
734 mp->m_sb.sb_qflags = (mp->m_qflags & ~(flags)) & XFS_MOUNT_QUOTA_ALL;
735 spin_unlock(&mp->m_sb_lock);
736
737 xfs_mod_sb(tp, XFS_SB_QFLAGS);
738
739 /*
740 * We have to make sure that the transaction is secure on disk before we
741 * return and actually stop quota accounting. So, make it synchronous.
742 * We don't care about quotoff's performance.
743 */
744 xfs_trans_set_sync(tp);
745 error = xfs_trans_commit(tp, 0);
746
747 error0:
748 if (error) {
749 xfs_trans_cancel(tp, 0);
750 /*
751 * No one else is modifying sb_qflags, so this is OK.
752 * We still hold the quotaofflock.
753 */
754 spin_lock(&mp->m_sb_lock);
755 mp->m_sb.sb_qflags = oldsbqflag;
756 spin_unlock(&mp->m_sb_lock);
757 }
758 *qoffstartp = qoffi;
759 return (error);
760 }
761
762
763 /*
764 * Translate an internal style on-disk-dquot to the exportable format.
765 * The main differences are that the counters/limits are all in Basic
766 * Blocks (BBs) instead of the internal FSBs, and all on-disk data has
767 * to be converted to the native endianness.
768 */
769 STATIC void
770 xfs_qm_export_dquot(
771 xfs_mount_t *mp,
772 xfs_disk_dquot_t *src,
773 struct fs_disk_quota *dst)
774 {
775 memset(dst, 0, sizeof(*dst));
776 dst->d_version = FS_DQUOT_VERSION; /* different from src->d_version */
777 dst->d_flags = xfs_qm_export_qtype_flags(src->d_flags);
778 dst->d_id = be32_to_cpu(src->d_id);
779 dst->d_blk_hardlimit =
780 XFS_FSB_TO_BB(mp, be64_to_cpu(src->d_blk_hardlimit));
781 dst->d_blk_softlimit =
782 XFS_FSB_TO_BB(mp, be64_to_cpu(src->d_blk_softlimit));
783 dst->d_ino_hardlimit = be64_to_cpu(src->d_ino_hardlimit);
784 dst->d_ino_softlimit = be64_to_cpu(src->d_ino_softlimit);
785 dst->d_bcount = XFS_FSB_TO_BB(mp, be64_to_cpu(src->d_bcount));
786 dst->d_icount = be64_to_cpu(src->d_icount);
787 dst->d_btimer = be32_to_cpu(src->d_btimer);
788 dst->d_itimer = be32_to_cpu(src->d_itimer);
789 dst->d_iwarns = be16_to_cpu(src->d_iwarns);
790 dst->d_bwarns = be16_to_cpu(src->d_bwarns);
791 dst->d_rtb_hardlimit =
792 XFS_FSB_TO_BB(mp, be64_to_cpu(src->d_rtb_hardlimit));
793 dst->d_rtb_softlimit =
794 XFS_FSB_TO_BB(mp, be64_to_cpu(src->d_rtb_softlimit));
795 dst->d_rtbcount = XFS_FSB_TO_BB(mp, be64_to_cpu(src->d_rtbcount));
796 dst->d_rtbtimer = be32_to_cpu(src->d_rtbtimer);
797 dst->d_rtbwarns = be16_to_cpu(src->d_rtbwarns);
798
799 /*
800 * Internally, we don't reset all the timers when quota enforcement
801 * gets turned off. No need to confuse the user level code,
802 * so return zeroes in that case.
803 */
804 if ((!XFS_IS_UQUOTA_ENFORCED(mp) && src->d_flags == XFS_DQ_USER) ||
805 (!XFS_IS_OQUOTA_ENFORCED(mp) &&
806 (src->d_flags & (XFS_DQ_PROJ | XFS_DQ_GROUP)))) {
807 dst->d_btimer = 0;
808 dst->d_itimer = 0;
809 dst->d_rtbtimer = 0;
810 }
811
812 #ifdef DEBUG
813 if (((XFS_IS_UQUOTA_ENFORCED(mp) && dst->d_flags == FS_USER_QUOTA) ||
814 (XFS_IS_OQUOTA_ENFORCED(mp) &&
815 (dst->d_flags & (FS_PROJ_QUOTA | FS_GROUP_QUOTA)))) &&
816 dst->d_id != 0) {
817 if (((int) dst->d_bcount >= (int) dst->d_blk_softlimit) &&
818 (dst->d_blk_softlimit > 0)) {
819 ASSERT(dst->d_btimer != 0);
820 }
821 if (((int) dst->d_icount >= (int) dst->d_ino_softlimit) &&
822 (dst->d_ino_softlimit > 0)) {
823 ASSERT(dst->d_itimer != 0);
824 }
825 }
826 #endif
827 }
828
829 STATIC uint
830 xfs_qm_export_qtype_flags(
831 uint flags)
832 {
833 /*
834 * Can't be more than one, or none.
835 */
836 ASSERT((flags & (FS_PROJ_QUOTA | FS_USER_QUOTA)) !=
837 (FS_PROJ_QUOTA | FS_USER_QUOTA));
838 ASSERT((flags & (FS_PROJ_QUOTA | FS_GROUP_QUOTA)) !=
839 (FS_PROJ_QUOTA | FS_GROUP_QUOTA));
840 ASSERT((flags & (FS_USER_QUOTA | FS_GROUP_QUOTA)) !=
841 (FS_USER_QUOTA | FS_GROUP_QUOTA));
842 ASSERT((flags & (FS_PROJ_QUOTA|FS_USER_QUOTA|FS_GROUP_QUOTA)) != 0);
843
844 return (flags & XFS_DQ_USER) ?
845 FS_USER_QUOTA : (flags & XFS_DQ_PROJ) ?
846 FS_PROJ_QUOTA : FS_GROUP_QUOTA;
847 }
848
849 STATIC uint
850 xfs_qm_export_flags(
851 uint flags)
852 {
853 uint uflags;
854
855 uflags = 0;
856 if (flags & XFS_UQUOTA_ACCT)
857 uflags |= FS_QUOTA_UDQ_ACCT;
858 if (flags & XFS_PQUOTA_ACCT)
859 uflags |= FS_QUOTA_PDQ_ACCT;
860 if (flags & XFS_GQUOTA_ACCT)
861 uflags |= FS_QUOTA_GDQ_ACCT;
862 if (flags & XFS_UQUOTA_ENFD)
863 uflags |= FS_QUOTA_UDQ_ENFD;
864 if (flags & (XFS_OQUOTA_ENFD)) {
865 uflags |= (flags & XFS_GQUOTA_ACCT) ?
866 FS_QUOTA_GDQ_ENFD : FS_QUOTA_PDQ_ENFD;
867 }
868 return (uflags);
869 }
870
871
872 STATIC int
873 xfs_dqrele_inode(
874 struct xfs_inode *ip,
875 struct xfs_perag *pag,
876 int flags)
877 {
878 /* skip quota inodes */
879 if (ip == ip->i_mount->m_quotainfo->qi_uquotaip ||
880 ip == ip->i_mount->m_quotainfo->qi_gquotaip) {
881 ASSERT(ip->i_udquot == NULL);
882 ASSERT(ip->i_gdquot == NULL);
883 return 0;
884 }
885
886 xfs_ilock(ip, XFS_ILOCK_EXCL);
887 if ((flags & XFS_UQUOTA_ACCT) && ip->i_udquot) {
888 xfs_qm_dqrele(ip->i_udquot);
889 ip->i_udquot = NULL;
890 }
891 if (flags & (XFS_PQUOTA_ACCT|XFS_GQUOTA_ACCT) && ip->i_gdquot) {
892 xfs_qm_dqrele(ip->i_gdquot);
893 ip->i_gdquot = NULL;
894 }
895 xfs_iunlock(ip, XFS_ILOCK_EXCL);
896 return 0;
897 }
898
899
900 /*
901 * Go thru all the inodes in the file system, releasing their dquots.
902 *
903 * Note that the mount structure gets modified to indicate that quotas are off
904 * AFTER this, in the case of quotaoff.
905 */
906 void
907 xfs_qm_dqrele_all_inodes(
908 struct xfs_mount *mp,
909 uint flags)
910 {
911 ASSERT(mp->m_quotainfo);
912 xfs_inode_ag_iterator(mp, xfs_dqrele_inode, flags);
913 }
This page took 0.069809 seconds and 5 git commands to generate.