2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
11 * Quota change tags are associated with each transaction that allocates or
12 * deallocates space. Those changes are accumulated locally to each node (in a
13 * per-node file) and then are periodically synced to the quota file. This
14 * avoids the bottleneck of constantly touching the quota file, but introduces
15 * fuzziness in the current usage value of IDs that are being used on different
16 * nodes in the cluster simultaneously. So, it is possible for a user on
17 * multiple nodes to overrun their quota, but that overrun is controlable.
18 * Since quota tags are part of transactions, there is no need for a quota check
19 * program to be run on node crashes or anything like that.
21 * There are couple of knobs that let the administrator manage the quota
22 * fuzziness. "quota_quantum" sets the maximum time a quota change can be
23 * sitting on one node before being synced to the quota file. (The default is
24 * 60 seconds.) Another knob, "quota_scale" controls how quickly the frequency
25 * of quota file syncs increases as the user moves closer to their limit. The
26 * more frequent the syncs, the more accurate the quota enforcement, but that
27 * means that there is more contention between the nodes for the quota file.
28 * The default value is one. This sets the maximum theoretical quota overrun
29 * (with infinite node with infinite bandwidth) to twice the user's limit. (In
30 * practice, the maximum overrun you see should be much less.) A "quota_scale"
31 * number greater than one makes quota syncs more frequent and reduces the
32 * maximum overrun. Numbers less than one (but greater than zero) make quota
33 * syncs less frequent.
35 * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of
36 * the quota file, so it is not being constantly read.
39 #include <linux/sched.h>
40 #include <linux/slab.h>
42 #include <linux/spinlock.h>
43 #include <linux/completion.h>
44 #include <linux/buffer_head.h>
45 #include <linux/sort.h>
47 #include <linux/bio.h>
48 #include <linux/gfs2_ondisk.h>
49 #include <linux/kthread.h>
50 #include <linux/freezer.h>
51 #include <linux/quota.h>
52 #include <linux/dqblk_xfs.h>
53 #include <linux/lockref.h>
54 #include <linux/list_lru.h>
55 #include <linux/rcupdate.h>
56 #include <linux/rculist_bl.h>
57 #include <linux/bit_spinlock.h>
58 #include <linux/jhash.h>
59 #include <linux/vmalloc.h>
75 #define GFS2_QD_HASH_SHIFT 12
76 #define GFS2_QD_HASH_SIZE (1 << GFS2_QD_HASH_SHIFT)
77 #define GFS2_QD_HASH_MASK (GFS2_QD_HASH_SIZE - 1)
79 /* Lock order: qd_lock -> bucket lock -> qd->lockref.lock -> lru lock */
80 /* -> sd_bitmap_lock */
81 static DEFINE_SPINLOCK(qd_lock
);
82 struct list_lru gfs2_qd_lru
;
84 static struct hlist_bl_head qd_hash_table
[GFS2_QD_HASH_SIZE
];
86 static unsigned int gfs2_qd_hash(const struct gfs2_sbd
*sdp
,
87 const struct kqid qid
)
91 h
= jhash(&sdp
, sizeof(struct gfs2_sbd
*), 0);
92 h
= jhash(&qid
, sizeof(struct kqid
), h
);
94 return h
& GFS2_QD_HASH_MASK
;
97 static inline void spin_lock_bucket(unsigned int hash
)
99 hlist_bl_lock(&qd_hash_table
[hash
]);
102 static inline void spin_unlock_bucket(unsigned int hash
)
104 hlist_bl_unlock(&qd_hash_table
[hash
]);
107 static void gfs2_qd_dealloc(struct rcu_head
*rcu
)
109 struct gfs2_quota_data
*qd
= container_of(rcu
, struct gfs2_quota_data
, qd_rcu
);
110 kmem_cache_free(gfs2_quotad_cachep
, qd
);
113 static void gfs2_qd_dispose(struct list_head
*list
)
115 struct gfs2_quota_data
*qd
;
116 struct gfs2_sbd
*sdp
;
118 while (!list_empty(list
)) {
119 qd
= list_entry(list
->next
, struct gfs2_quota_data
, qd_lru
);
120 sdp
= qd
->qd_gl
->gl_sbd
;
122 list_del(&qd
->qd_lru
);
124 /* Free from the filesystem-specific list */
126 list_del(&qd
->qd_list
);
127 spin_unlock(&qd_lock
);
129 spin_lock_bucket(qd
->qd_hash
);
130 hlist_bl_del_rcu(&qd
->qd_hlist
);
131 spin_unlock_bucket(qd
->qd_hash
);
133 gfs2_assert_warn(sdp
, !qd
->qd_change
);
134 gfs2_assert_warn(sdp
, !qd
->qd_slot_count
);
135 gfs2_assert_warn(sdp
, !qd
->qd_bh_count
);
137 gfs2_glock_put(qd
->qd_gl
);
138 atomic_dec(&sdp
->sd_quota_count
);
140 /* Delete it from the common reclaim list */
141 call_rcu(&qd
->qd_rcu
, gfs2_qd_dealloc
);
146 static enum lru_status
gfs2_qd_isolate(struct list_head
*item
, spinlock_t
*lock
, void *arg
)
148 struct list_head
*dispose
= arg
;
149 struct gfs2_quota_data
*qd
= list_entry(item
, struct gfs2_quota_data
, qd_lru
);
151 if (!spin_trylock(&qd
->qd_lockref
.lock
))
154 if (qd
->qd_lockref
.count
== 0) {
155 lockref_mark_dead(&qd
->qd_lockref
);
156 list_move(&qd
->qd_lru
, dispose
);
159 spin_unlock(&qd
->qd_lockref
.lock
);
163 static unsigned long gfs2_qd_shrink_scan(struct shrinker
*shrink
,
164 struct shrink_control
*sc
)
169 if (!(sc
->gfp_mask
& __GFP_FS
))
172 freed
= list_lru_walk_node(&gfs2_qd_lru
, sc
->nid
, gfs2_qd_isolate
,
173 &dispose
, &sc
->nr_to_scan
);
175 gfs2_qd_dispose(&dispose
);
180 static unsigned long gfs2_qd_shrink_count(struct shrinker
*shrink
,
181 struct shrink_control
*sc
)
183 return vfs_pressure_ratio(list_lru_count_node(&gfs2_qd_lru
, sc
->nid
));
186 struct shrinker gfs2_qd_shrinker
= {
187 .count_objects
= gfs2_qd_shrink_count
,
188 .scan_objects
= gfs2_qd_shrink_scan
,
189 .seeks
= DEFAULT_SEEKS
,
190 .flags
= SHRINKER_NUMA_AWARE
,
194 static u64
qd2index(struct gfs2_quota_data
*qd
)
196 struct kqid qid
= qd
->qd_id
;
197 return (2 * (u64
)from_kqid(&init_user_ns
, qid
)) +
198 ((qid
.type
== USRQUOTA
) ? 0 : 1);
201 static u64
qd2offset(struct gfs2_quota_data
*qd
)
205 offset
= qd2index(qd
);
206 offset
*= sizeof(struct gfs2_quota
);
211 static struct gfs2_quota_data
*qd_alloc(unsigned hash
, struct gfs2_sbd
*sdp
, struct kqid qid
)
213 struct gfs2_quota_data
*qd
;
216 qd
= kmem_cache_zalloc(gfs2_quotad_cachep
, GFP_NOFS
);
221 qd
->qd_lockref
.count
= 1;
222 spin_lock_init(&qd
->qd_lockref
.lock
);
225 INIT_LIST_HEAD(&qd
->qd_lru
);
228 error
= gfs2_glock_get(sdp
, qd2index(qd
),
229 &gfs2_quota_glops
, CREATE
, &qd
->qd_gl
);
236 kmem_cache_free(gfs2_quotad_cachep
, qd
);
240 static struct gfs2_quota_data
*gfs2_qd_search_bucket(unsigned int hash
,
241 const struct gfs2_sbd
*sdp
,
244 struct gfs2_quota_data
*qd
;
245 struct hlist_bl_node
*h
;
247 hlist_bl_for_each_entry_rcu(qd
, h
, &qd_hash_table
[hash
], qd_hlist
) {
248 if (!qid_eq(qd
->qd_id
, qid
))
250 if (qd
->qd_sbd
!= sdp
)
252 if (lockref_get_not_dead(&qd
->qd_lockref
)) {
253 list_lru_del(&gfs2_qd_lru
, &qd
->qd_lru
);
262 static int qd_get(struct gfs2_sbd
*sdp
, struct kqid qid
,
263 struct gfs2_quota_data
**qdp
)
265 struct gfs2_quota_data
*qd
, *new_qd
;
266 unsigned int hash
= gfs2_qd_hash(sdp
, qid
);
269 *qdp
= qd
= gfs2_qd_search_bucket(hash
, sdp
, qid
);
275 new_qd
= qd_alloc(hash
, sdp
, qid
);
280 spin_lock_bucket(hash
);
281 *qdp
= qd
= gfs2_qd_search_bucket(hash
, sdp
, qid
);
284 list_add(&new_qd
->qd_list
, &sdp
->sd_quota_list
);
285 hlist_bl_add_head_rcu(&new_qd
->qd_hlist
, &qd_hash_table
[hash
]);
286 atomic_inc(&sdp
->sd_quota_count
);
288 spin_unlock_bucket(hash
);
289 spin_unlock(&qd_lock
);
292 gfs2_glock_put(new_qd
->qd_gl
);
293 kmem_cache_free(gfs2_quotad_cachep
, new_qd
);
300 static void qd_hold(struct gfs2_quota_data
*qd
)
302 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_sbd
;
303 gfs2_assert(sdp
, !__lockref_is_dead(&qd
->qd_lockref
));
304 lockref_get(&qd
->qd_lockref
);
307 static void qd_put(struct gfs2_quota_data
*qd
)
309 if (lockref_put_or_lock(&qd
->qd_lockref
))
312 qd
->qd_lockref
.count
= 0;
313 list_lru_add(&gfs2_qd_lru
, &qd
->qd_lru
);
314 spin_unlock(&qd
->qd_lockref
.lock
);
318 static int slot_get(struct gfs2_quota_data
*qd
)
320 struct gfs2_sbd
*sdp
= qd
->qd_sbd
;
324 spin_lock(&sdp
->sd_bitmap_lock
);
325 if (qd
->qd_slot_count
!= 0)
329 bit
= find_first_zero_bit(sdp
->sd_quota_bitmap
, sdp
->sd_quota_slots
);
330 if (bit
< sdp
->sd_quota_slots
) {
331 set_bit(bit
, sdp
->sd_quota_bitmap
);
336 spin_unlock(&sdp
->sd_bitmap_lock
);
341 static void slot_hold(struct gfs2_quota_data
*qd
)
343 struct gfs2_sbd
*sdp
= qd
->qd_sbd
;
345 spin_lock(&sdp
->sd_bitmap_lock
);
346 gfs2_assert(sdp
, qd
->qd_slot_count
);
348 spin_unlock(&sdp
->sd_bitmap_lock
);
351 static void slot_put(struct gfs2_quota_data
*qd
)
353 struct gfs2_sbd
*sdp
= qd
->qd_sbd
;
355 spin_lock(&sdp
->sd_bitmap_lock
);
356 gfs2_assert(sdp
, qd
->qd_slot_count
);
357 if (!--qd
->qd_slot_count
) {
358 BUG_ON(!test_and_clear_bit(qd
->qd_slot
, sdp
->sd_quota_bitmap
));
361 spin_unlock(&sdp
->sd_bitmap_lock
);
364 static int bh_get(struct gfs2_quota_data
*qd
)
366 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_sbd
;
367 struct gfs2_inode
*ip
= GFS2_I(sdp
->sd_qc_inode
);
368 unsigned int block
, offset
;
369 struct buffer_head
*bh
;
371 struct buffer_head bh_map
= { .b_state
= 0, .b_blocknr
= 0 };
373 mutex_lock(&sdp
->sd_quota_mutex
);
375 if (qd
->qd_bh_count
++) {
376 mutex_unlock(&sdp
->sd_quota_mutex
);
380 block
= qd
->qd_slot
/ sdp
->sd_qc_per_block
;
381 offset
= qd
->qd_slot
% sdp
->sd_qc_per_block
;
383 bh_map
.b_size
= 1 << ip
->i_inode
.i_blkbits
;
384 error
= gfs2_block_map(&ip
->i_inode
, block
, &bh_map
, 0);
387 error
= gfs2_meta_read(ip
->i_gl
, bh_map
.b_blocknr
, DIO_WAIT
, &bh
);
391 if (gfs2_metatype_check(sdp
, bh
, GFS2_METATYPE_QC
))
395 qd
->qd_bh_qc
= (struct gfs2_quota_change
*)
396 (bh
->b_data
+ sizeof(struct gfs2_meta_header
) +
397 offset
* sizeof(struct gfs2_quota_change
));
399 mutex_unlock(&sdp
->sd_quota_mutex
);
407 mutex_unlock(&sdp
->sd_quota_mutex
);
411 static void bh_put(struct gfs2_quota_data
*qd
)
413 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_sbd
;
415 mutex_lock(&sdp
->sd_quota_mutex
);
416 gfs2_assert(sdp
, qd
->qd_bh_count
);
417 if (!--qd
->qd_bh_count
) {
422 mutex_unlock(&sdp
->sd_quota_mutex
);
425 static int qd_check_sync(struct gfs2_sbd
*sdp
, struct gfs2_quota_data
*qd
,
428 if (test_bit(QDF_LOCKED
, &qd
->qd_flags
) ||
429 !test_bit(QDF_CHANGE
, &qd
->qd_flags
) ||
430 (sync_gen
&& (qd
->qd_sync_gen
>= *sync_gen
)))
433 if (!lockref_get_not_dead(&qd
->qd_lockref
))
436 list_move_tail(&qd
->qd_list
, &sdp
->sd_quota_list
);
437 set_bit(QDF_LOCKED
, &qd
->qd_flags
);
438 qd
->qd_change_sync
= qd
->qd_change
;
443 static int qd_fish(struct gfs2_sbd
*sdp
, struct gfs2_quota_data
**qdp
)
445 struct gfs2_quota_data
*qd
= NULL
;
451 if (sdp
->sd_vfs
->s_flags
& MS_RDONLY
)
456 list_for_each_entry(qd
, &sdp
->sd_quota_list
, qd_list
) {
457 found
= qd_check_sync(sdp
, qd
, &sdp
->sd_quota_sync_gen
);
465 spin_unlock(&qd_lock
);
468 gfs2_assert_warn(sdp
, qd
->qd_change_sync
);
471 clear_bit(QDF_LOCKED
, &qd
->qd_flags
);
483 static void qd_unlock(struct gfs2_quota_data
*qd
)
485 gfs2_assert_warn(qd
->qd_gl
->gl_sbd
,
486 test_bit(QDF_LOCKED
, &qd
->qd_flags
));
487 clear_bit(QDF_LOCKED
, &qd
->qd_flags
);
493 static int qdsb_get(struct gfs2_sbd
*sdp
, struct kqid qid
,
494 struct gfs2_quota_data
**qdp
)
498 error
= qd_get(sdp
, qid
, qdp
);
502 error
= slot_get(*qdp
);
506 error
= bh_get(*qdp
);
519 static void qdsb_put(struct gfs2_quota_data
*qd
)
526 int gfs2_quota_hold(struct gfs2_inode
*ip
, kuid_t uid
, kgid_t gid
)
528 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
529 struct gfs2_quota_data
**qd
;
532 if (ip
->i_res
== NULL
) {
533 error
= gfs2_rs_alloc(ip
);
538 qd
= ip
->i_res
->rs_qa_qd
;
540 if (gfs2_assert_warn(sdp
, !ip
->i_res
->rs_qa_qd_num
) ||
541 gfs2_assert_warn(sdp
, !test_bit(GIF_QD_LOCKED
, &ip
->i_flags
)))
544 if (sdp
->sd_args
.ar_quota
== GFS2_QUOTA_OFF
)
547 error
= qdsb_get(sdp
, make_kqid_uid(ip
->i_inode
.i_uid
), qd
);
550 ip
->i_res
->rs_qa_qd_num
++;
553 error
= qdsb_get(sdp
, make_kqid_gid(ip
->i_inode
.i_gid
), qd
);
556 ip
->i_res
->rs_qa_qd_num
++;
559 if (!uid_eq(uid
, NO_UID_QUOTA_CHANGE
) &&
560 !uid_eq(uid
, ip
->i_inode
.i_uid
)) {
561 error
= qdsb_get(sdp
, make_kqid_uid(uid
), qd
);
564 ip
->i_res
->rs_qa_qd_num
++;
568 if (!gid_eq(gid
, NO_GID_QUOTA_CHANGE
) &&
569 !gid_eq(gid
, ip
->i_inode
.i_gid
)) {
570 error
= qdsb_get(sdp
, make_kqid_gid(gid
), qd
);
573 ip
->i_res
->rs_qa_qd_num
++;
579 gfs2_quota_unhold(ip
);
583 void gfs2_quota_unhold(struct gfs2_inode
*ip
)
585 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
588 if (ip
->i_res
== NULL
)
590 gfs2_assert_warn(sdp
, !test_bit(GIF_QD_LOCKED
, &ip
->i_flags
));
592 for (x
= 0; x
< ip
->i_res
->rs_qa_qd_num
; x
++) {
593 qdsb_put(ip
->i_res
->rs_qa_qd
[x
]);
594 ip
->i_res
->rs_qa_qd
[x
] = NULL
;
596 ip
->i_res
->rs_qa_qd_num
= 0;
599 static int sort_qd(const void *a
, const void *b
)
601 const struct gfs2_quota_data
*qd_a
= *(const struct gfs2_quota_data
**)a
;
602 const struct gfs2_quota_data
*qd_b
= *(const struct gfs2_quota_data
**)b
;
604 if (qid_lt(qd_a
->qd_id
, qd_b
->qd_id
))
606 if (qid_lt(qd_b
->qd_id
, qd_a
->qd_id
))
611 static void do_qc(struct gfs2_quota_data
*qd
, s64 change
)
613 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_sbd
;
614 struct gfs2_inode
*ip
= GFS2_I(sdp
->sd_qc_inode
);
615 struct gfs2_quota_change
*qc
= qd
->qd_bh_qc
;
618 mutex_lock(&sdp
->sd_quota_mutex
);
619 gfs2_trans_add_meta(ip
->i_gl
, qd
->qd_bh
);
621 if (!test_bit(QDF_CHANGE
, &qd
->qd_flags
)) {
624 if (qd
->qd_id
.type
== USRQUOTA
)
625 qc
->qc_flags
= cpu_to_be32(GFS2_QCF_USER
);
626 qc
->qc_id
= cpu_to_be32(from_kqid(&init_user_ns
, qd
->qd_id
));
629 x
= be64_to_cpu(qc
->qc_change
) + change
;
630 qc
->qc_change
= cpu_to_be64(x
);
634 spin_unlock(&qd_lock
);
637 gfs2_assert_warn(sdp
, test_bit(QDF_CHANGE
, &qd
->qd_flags
));
638 clear_bit(QDF_CHANGE
, &qd
->qd_flags
);
643 } else if (!test_and_set_bit(QDF_CHANGE
, &qd
->qd_flags
)) {
648 mutex_unlock(&sdp
->sd_quota_mutex
);
652 * gfs2_adjust_quota - adjust record of current block usage
653 * @ip: The quota inode
654 * @loc: Offset of the entry in the quota file
655 * @change: The amount of usage change to record
656 * @qd: The quota data
657 * @fdq: The updated limits to record
659 * This function was mostly borrowed from gfs2_block_truncate_page which was
660 * in turn mostly borrowed from ext3
662 * Returns: 0 or -ve on error
665 static int gfs2_adjust_quota(struct gfs2_inode
*ip
, loff_t loc
,
666 s64 change
, struct gfs2_quota_data
*qd
,
667 struct fs_disk_quota
*fdq
)
669 struct inode
*inode
= &ip
->i_inode
;
670 struct gfs2_sbd
*sdp
= GFS2_SB(inode
);
671 struct address_space
*mapping
= inode
->i_mapping
;
672 unsigned long index
= loc
>> PAGE_CACHE_SHIFT
;
673 unsigned offset
= loc
& (PAGE_CACHE_SIZE
- 1);
674 unsigned blocksize
, iblock
, pos
;
675 struct buffer_head
*bh
;
682 if (gfs2_is_stuffed(ip
)) {
683 err
= gfs2_unstuff_dinode(ip
, NULL
);
688 memset(&q
, 0, sizeof(struct gfs2_quota
));
689 err
= gfs2_internal_read(ip
, (char *)&q
, &loc
, sizeof(q
));
694 be64_add_cpu(&q
.qu_value
, change
);
695 qd
->qd_qb
.qb_value
= q
.qu_value
;
697 if (fdq
->d_fieldmask
& FS_DQ_BSOFT
) {
698 q
.qu_warn
= cpu_to_be64(fdq
->d_blk_softlimit
>> sdp
->sd_fsb2bb_shift
);
699 qd
->qd_qb
.qb_warn
= q
.qu_warn
;
701 if (fdq
->d_fieldmask
& FS_DQ_BHARD
) {
702 q
.qu_limit
= cpu_to_be64(fdq
->d_blk_hardlimit
>> sdp
->sd_fsb2bb_shift
);
703 qd
->qd_qb
.qb_limit
= q
.qu_limit
;
705 if (fdq
->d_fieldmask
& FS_DQ_BCOUNT
) {
706 q
.qu_value
= cpu_to_be64(fdq
->d_bcount
>> sdp
->sd_fsb2bb_shift
);
707 qd
->qd_qb
.qb_value
= q
.qu_value
;
711 /* Write the quota into the quota file on disk */
713 nbytes
= sizeof(struct gfs2_quota
);
715 page
= find_or_create_page(mapping
, index
, GFP_NOFS
);
719 blocksize
= inode
->i_sb
->s_blocksize
;
720 iblock
= index
<< (PAGE_CACHE_SHIFT
- inode
->i_sb
->s_blocksize_bits
);
722 if (!page_has_buffers(page
))
723 create_empty_buffers(page
, blocksize
, 0);
725 bh
= page_buffers(page
);
727 while (offset
>= pos
) {
728 bh
= bh
->b_this_page
;
733 if (!buffer_mapped(bh
)) {
734 gfs2_block_map(inode
, iblock
, bh
, 1);
735 if (!buffer_mapped(bh
))
737 /* If it's a newly allocated disk block for quota, zero it */
739 zero_user(page
, pos
- blocksize
, bh
->b_size
);
742 if (PageUptodate(page
))
743 set_buffer_uptodate(bh
);
745 if (!buffer_uptodate(bh
)) {
746 ll_rw_block(READ
| REQ_META
, 1, &bh
);
748 if (!buffer_uptodate(bh
))
752 gfs2_trans_add_data(ip
->i_gl
, bh
);
754 kaddr
= kmap_atomic(page
);
755 if (offset
+ sizeof(struct gfs2_quota
) > PAGE_CACHE_SIZE
)
756 nbytes
= PAGE_CACHE_SIZE
- offset
;
757 memcpy(kaddr
+ offset
, ptr
, nbytes
);
758 flush_dcache_page(page
);
759 kunmap_atomic(kaddr
);
761 page_cache_release(page
);
763 /* If quota straddles page boundary, we need to update the rest of the
764 * quota at the beginning of the next page */
765 if ((offset
+ sizeof(struct gfs2_quota
)) > PAGE_CACHE_SIZE
) {
767 nbytes
= sizeof(struct gfs2_quota
) - nbytes
;
773 size
= loc
+ sizeof(struct gfs2_quota
);
774 if (size
> inode
->i_size
)
775 i_size_write(inode
, size
);
776 inode
->i_mtime
= inode
->i_atime
= CURRENT_TIME
;
777 mark_inode_dirty(inode
);
782 page_cache_release(page
);
786 static int do_sync(unsigned int num_qd
, struct gfs2_quota_data
**qda
)
788 struct gfs2_sbd
*sdp
= (*qda
)->qd_gl
->gl_sbd
;
789 struct gfs2_inode
*ip
= GFS2_I(sdp
->sd_quota_inode
);
790 struct gfs2_alloc_parms ap
= { .aflags
= 0, };
791 unsigned int data_blocks
, ind_blocks
;
792 struct gfs2_holder
*ghs
, i_gh
;
794 struct gfs2_quota_data
*qd
;
797 unsigned int nalloc
= 0, blocks
;
800 error
= gfs2_rs_alloc(ip
);
804 gfs2_write_calc_reserv(ip
, sizeof(struct gfs2_quota
),
805 &data_blocks
, &ind_blocks
);
807 ghs
= kcalloc(num_qd
, sizeof(struct gfs2_holder
), GFP_NOFS
);
811 sort(qda
, num_qd
, sizeof(struct gfs2_quota_data
*), sort_qd
, NULL
);
812 mutex_lock(&ip
->i_inode
.i_mutex
);
813 for (qx
= 0; qx
< num_qd
; qx
++) {
814 error
= gfs2_glock_nq_init(qda
[qx
]->qd_gl
, LM_ST_EXCLUSIVE
,
815 GL_NOCACHE
, &ghs
[qx
]);
820 error
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_EXCLUSIVE
, 0, &i_gh
);
824 for (x
= 0; x
< num_qd
; x
++) {
825 offset
= qd2offset(qda
[x
]);
826 if (gfs2_write_alloc_required(ip
, offset
,
827 sizeof(struct gfs2_quota
)))
832 * 1 blk for unstuffing inode if stuffed. We add this extra
833 * block to the reservation unconditionally. If the inode
834 * doesn't need unstuffing, the block will be released to the
835 * rgrp since it won't be allocated during the transaction
837 /* +3 in the end for unstuffing block, inode size update block
838 * and another block in case quota straddles page boundary and
839 * two blocks need to be updated instead of 1 */
840 blocks
= num_qd
* data_blocks
+ RES_DINODE
+ num_qd
+ 3;
842 reserved
= 1 + (nalloc
* (data_blocks
+ ind_blocks
));
843 ap
.target
= reserved
;
844 error
= gfs2_inplace_reserve(ip
, &ap
);
849 blocks
+= gfs2_rg_blocks(ip
, reserved
) + nalloc
* ind_blocks
+ RES_STATFS
;
851 error
= gfs2_trans_begin(sdp
, blocks
, 0);
855 for (x
= 0; x
< num_qd
; x
++) {
857 offset
= qd2offset(qd
);
858 error
= gfs2_adjust_quota(ip
, offset
, qd
->qd_change_sync
, qd
, NULL
);
862 do_qc(qd
, -qd
->qd_change_sync
);
863 set_bit(QDF_REFRESH
, &qd
->qd_flags
);
871 gfs2_inplace_release(ip
);
873 gfs2_glock_dq_uninit(&i_gh
);
876 gfs2_glock_dq_uninit(&ghs
[qx
]);
877 mutex_unlock(&ip
->i_inode
.i_mutex
);
879 gfs2_log_flush(ip
->i_gl
->gl_sbd
, ip
->i_gl
);
883 static int update_qd(struct gfs2_sbd
*sdp
, struct gfs2_quota_data
*qd
)
885 struct gfs2_inode
*ip
= GFS2_I(sdp
->sd_quota_inode
);
887 struct gfs2_quota_lvb
*qlvb
;
891 memset(&q
, 0, sizeof(struct gfs2_quota
));
893 error
= gfs2_internal_read(ip
, (char *)&q
, &pos
, sizeof(q
));
897 qlvb
= (struct gfs2_quota_lvb
*)qd
->qd_gl
->gl_lksb
.sb_lvbptr
;
898 qlvb
->qb_magic
= cpu_to_be32(GFS2_MAGIC
);
900 qlvb
->qb_limit
= q
.qu_limit
;
901 qlvb
->qb_warn
= q
.qu_warn
;
902 qlvb
->qb_value
= q
.qu_value
;
908 static int do_glock(struct gfs2_quota_data
*qd
, int force_refresh
,
909 struct gfs2_holder
*q_gh
)
911 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_sbd
;
912 struct gfs2_inode
*ip
= GFS2_I(sdp
->sd_quota_inode
);
913 struct gfs2_holder i_gh
;
917 error
= gfs2_glock_nq_init(qd
->qd_gl
, LM_ST_SHARED
, 0, q_gh
);
921 qd
->qd_qb
= *(struct gfs2_quota_lvb
*)qd
->qd_gl
->gl_lksb
.sb_lvbptr
;
923 if (force_refresh
|| qd
->qd_qb
.qb_magic
!= cpu_to_be32(GFS2_MAGIC
)) {
924 gfs2_glock_dq_uninit(q_gh
);
925 error
= gfs2_glock_nq_init(qd
->qd_gl
, LM_ST_EXCLUSIVE
,
930 error
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_SHARED
, 0, &i_gh
);
934 error
= update_qd(sdp
, qd
);
938 gfs2_glock_dq_uninit(&i_gh
);
939 gfs2_glock_dq_uninit(q_gh
);
947 gfs2_glock_dq_uninit(&i_gh
);
949 gfs2_glock_dq_uninit(q_gh
);
953 int gfs2_quota_lock(struct gfs2_inode
*ip
, kuid_t uid
, kgid_t gid
)
955 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
956 struct gfs2_quota_data
*qd
;
960 error
= gfs2_quota_hold(ip
, uid
, gid
);
964 if (capable(CAP_SYS_RESOURCE
) ||
965 sdp
->sd_args
.ar_quota
!= GFS2_QUOTA_ON
)
968 sort(ip
->i_res
->rs_qa_qd
, ip
->i_res
->rs_qa_qd_num
,
969 sizeof(struct gfs2_quota_data
*), sort_qd
, NULL
);
971 for (x
= 0; x
< ip
->i_res
->rs_qa_qd_num
; x
++) {
972 int force
= NO_FORCE
;
973 qd
= ip
->i_res
->rs_qa_qd
[x
];
974 if (test_and_clear_bit(QDF_REFRESH
, &qd
->qd_flags
))
976 error
= do_glock(qd
, force
, &ip
->i_res
->rs_qa_qd_ghs
[x
]);
982 set_bit(GIF_QD_LOCKED
, &ip
->i_flags
);
985 gfs2_glock_dq_uninit(&ip
->i_res
->rs_qa_qd_ghs
[x
]);
986 gfs2_quota_unhold(ip
);
992 static int need_sync(struct gfs2_quota_data
*qd
)
994 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_sbd
;
995 struct gfs2_tune
*gt
= &sdp
->sd_tune
;
997 unsigned int num
, den
;
1000 if (!qd
->qd_qb
.qb_limit
)
1003 spin_lock(&qd_lock
);
1004 value
= qd
->qd_change
;
1005 spin_unlock(&qd_lock
);
1007 spin_lock(>
->gt_spin
);
1008 num
= gt
->gt_quota_scale_num
;
1009 den
= gt
->gt_quota_scale_den
;
1010 spin_unlock(>
->gt_spin
);
1014 else if ((s64
)be64_to_cpu(qd
->qd_qb
.qb_value
) >=
1015 (s64
)be64_to_cpu(qd
->qd_qb
.qb_limit
))
1018 value
*= gfs2_jindex_size(sdp
) * num
;
1019 value
= div_s64(value
, den
);
1020 value
+= (s64
)be64_to_cpu(qd
->qd_qb
.qb_value
);
1021 if (value
< (s64
)be64_to_cpu(qd
->qd_qb
.qb_limit
))
1028 void gfs2_quota_unlock(struct gfs2_inode
*ip
)
1030 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
1031 struct gfs2_quota_data
*qda
[4];
1032 unsigned int count
= 0;
1036 if (!test_and_clear_bit(GIF_QD_LOCKED
, &ip
->i_flags
))
1039 for (x
= 0; x
< ip
->i_res
->rs_qa_qd_num
; x
++) {
1040 struct gfs2_quota_data
*qd
;
1043 qd
= ip
->i_res
->rs_qa_qd
[x
];
1044 sync
= need_sync(qd
);
1046 gfs2_glock_dq_uninit(&ip
->i_res
->rs_qa_qd_ghs
[x
]);
1050 spin_lock(&qd_lock
);
1051 found
= qd_check_sync(sdp
, qd
, NULL
);
1052 spin_unlock(&qd_lock
);
1057 gfs2_assert_warn(sdp
, qd
->qd_change_sync
);
1059 clear_bit(QDF_LOCKED
, &qd
->qd_flags
);
1069 do_sync(count
, qda
);
1070 for (x
= 0; x
< count
; x
++)
1075 gfs2_quota_unhold(ip
);
1078 #define MAX_LINE 256
1080 static int print_message(struct gfs2_quota_data
*qd
, char *type
)
1082 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_sbd
;
1084 pr_info("GFS2: fsid=%s: quota %s for %s %u\n",
1085 sdp
->sd_fsname
, type
,
1086 (qd
->qd_id
.type
== USRQUOTA
) ? "user" : "group",
1087 from_kqid(&init_user_ns
, qd
->qd_id
));
1092 int gfs2_quota_check(struct gfs2_inode
*ip
, kuid_t uid
, kgid_t gid
)
1094 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
1095 struct gfs2_quota_data
*qd
;
1100 if (!test_bit(GIF_QD_LOCKED
, &ip
->i_flags
))
1103 if (sdp
->sd_args
.ar_quota
!= GFS2_QUOTA_ON
)
1106 for (x
= 0; x
< ip
->i_res
->rs_qa_qd_num
; x
++) {
1107 qd
= ip
->i_res
->rs_qa_qd
[x
];
1109 if (!(qid_eq(qd
->qd_id
, make_kqid_uid(uid
)) ||
1110 qid_eq(qd
->qd_id
, make_kqid_gid(gid
))))
1113 value
= (s64
)be64_to_cpu(qd
->qd_qb
.qb_value
);
1114 spin_lock(&qd_lock
);
1115 value
+= qd
->qd_change
;
1116 spin_unlock(&qd_lock
);
1118 if (be64_to_cpu(qd
->qd_qb
.qb_limit
) && (s64
)be64_to_cpu(qd
->qd_qb
.qb_limit
) < value
) {
1119 print_message(qd
, "exceeded");
1120 quota_send_warning(qd
->qd_id
,
1121 sdp
->sd_vfs
->s_dev
, QUOTA_NL_BHARDWARN
);
1125 } else if (be64_to_cpu(qd
->qd_qb
.qb_warn
) &&
1126 (s64
)be64_to_cpu(qd
->qd_qb
.qb_warn
) < value
&&
1127 time_after_eq(jiffies
, qd
->qd_last_warn
+
1129 gt_quota_warn_period
) * HZ
)) {
1130 quota_send_warning(qd
->qd_id
,
1131 sdp
->sd_vfs
->s_dev
, QUOTA_NL_BSOFTWARN
);
1132 error
= print_message(qd
, "warning");
1133 qd
->qd_last_warn
= jiffies
;
1140 void gfs2_quota_change(struct gfs2_inode
*ip
, s64 change
,
1141 kuid_t uid
, kgid_t gid
)
1143 struct gfs2_quota_data
*qd
;
1146 if (gfs2_assert_warn(GFS2_SB(&ip
->i_inode
), change
))
1148 if (ip
->i_diskflags
& GFS2_DIF_SYSTEM
)
1151 for (x
= 0; x
< ip
->i_res
->rs_qa_qd_num
; x
++) {
1152 qd
= ip
->i_res
->rs_qa_qd
[x
];
1154 if (qid_eq(qd
->qd_id
, make_kqid_uid(uid
)) ||
1155 qid_eq(qd
->qd_id
, make_kqid_gid(gid
))) {
1161 int gfs2_quota_sync(struct super_block
*sb
, int type
)
1163 struct gfs2_sbd
*sdp
= sb
->s_fs_info
;
1164 struct gfs2_quota_data
**qda
;
1165 unsigned int max_qd
= PAGE_SIZE
/sizeof(struct gfs2_holder
);
1166 unsigned int num_qd
;
1170 qda
= kcalloc(max_qd
, sizeof(struct gfs2_quota_data
*), GFP_KERNEL
);
1174 mutex_lock(&sdp
->sd_quota_sync_mutex
);
1175 sdp
->sd_quota_sync_gen
++;
1181 error
= qd_fish(sdp
, qda
+ num_qd
);
1182 if (error
|| !qda
[num_qd
])
1184 if (++num_qd
== max_qd
)
1190 error
= do_sync(num_qd
, qda
);
1192 for (x
= 0; x
< num_qd
; x
++)
1193 qda
[x
]->qd_sync_gen
=
1194 sdp
->sd_quota_sync_gen
;
1196 for (x
= 0; x
< num_qd
; x
++)
1199 } while (!error
&& num_qd
== max_qd
);
1201 mutex_unlock(&sdp
->sd_quota_sync_mutex
);
1207 int gfs2_quota_refresh(struct gfs2_sbd
*sdp
, struct kqid qid
)
1209 struct gfs2_quota_data
*qd
;
1210 struct gfs2_holder q_gh
;
1213 error
= qd_get(sdp
, qid
, &qd
);
1217 error
= do_glock(qd
, FORCE
, &q_gh
);
1219 gfs2_glock_dq_uninit(&q_gh
);
1225 int gfs2_quota_init(struct gfs2_sbd
*sdp
)
1227 struct gfs2_inode
*ip
= GFS2_I(sdp
->sd_qc_inode
);
1228 u64 size
= i_size_read(sdp
->sd_qc_inode
);
1229 unsigned int blocks
= size
>> sdp
->sd_sb
.sb_bsize_shift
;
1230 unsigned int x
, slot
= 0;
1231 unsigned int found
= 0;
1233 unsigned int bm_size
;
1238 if (gfs2_check_internal_file_size(sdp
->sd_qc_inode
, 1, 64 << 20))
1241 sdp
->sd_quota_slots
= blocks
* sdp
->sd_qc_per_block
;
1242 bm_size
= DIV_ROUND_UP(sdp
->sd_quota_slots
, 8 * sizeof(unsigned long));
1243 bm_size
*= sizeof(unsigned long);
1245 sdp
->sd_quota_bitmap
= kzalloc(bm_size
, GFP_NOFS
| __GFP_NOWARN
);
1246 if (sdp
->sd_quota_bitmap
== NULL
)
1247 sdp
->sd_quota_bitmap
= __vmalloc(bm_size
, GFP_NOFS
|
1248 __GFP_ZERO
, PAGE_KERNEL
);
1249 if (!sdp
->sd_quota_bitmap
)
1252 for (x
= 0; x
< blocks
; x
++) {
1253 struct buffer_head
*bh
;
1254 const struct gfs2_quota_change
*qc
;
1259 error
= gfs2_extent_map(&ip
->i_inode
, x
, &new, &dblock
, &extlen
);
1264 bh
= gfs2_meta_ra(ip
->i_gl
, dblock
, extlen
);
1267 if (gfs2_metatype_check(sdp
, bh
, GFS2_METATYPE_QC
)) {
1272 qc
= (const struct gfs2_quota_change
*)(bh
->b_data
+ sizeof(struct gfs2_meta_header
));
1273 for (y
= 0; y
< sdp
->sd_qc_per_block
&& slot
< sdp
->sd_quota_slots
;
1275 struct gfs2_quota_data
*qd
;
1276 s64 qc_change
= be64_to_cpu(qc
->qc_change
);
1277 u32 qc_flags
= be32_to_cpu(qc
->qc_flags
);
1278 enum quota_type qtype
= (qc_flags
& GFS2_QCF_USER
) ?
1279 USRQUOTA
: GRPQUOTA
;
1280 struct kqid qc_id
= make_kqid(&init_user_ns
, qtype
,
1281 be32_to_cpu(qc
->qc_id
));
1286 hash
= gfs2_qd_hash(sdp
, qc_id
);
1287 qd
= qd_alloc(hash
, sdp
, qc_id
);
1293 set_bit(QDF_CHANGE
, &qd
->qd_flags
);
1294 qd
->qd_change
= qc_change
;
1296 qd
->qd_slot_count
= 1;
1298 spin_lock(&qd_lock
);
1299 BUG_ON(test_and_set_bit(slot
, sdp
->sd_quota_bitmap
));
1300 list_add(&qd
->qd_list
, &sdp
->sd_quota_list
);
1301 atomic_inc(&sdp
->sd_quota_count
);
1302 spin_unlock(&qd_lock
);
1304 spin_lock_bucket(hash
);
1305 hlist_bl_add_head_rcu(&qd
->qd_hlist
, &qd_hash_table
[hash
]);
1306 spin_unlock_bucket(hash
);
1317 fs_info(sdp
, "found %u quota changes\n", found
);
1322 gfs2_quota_cleanup(sdp
);
1326 void gfs2_quota_cleanup(struct gfs2_sbd
*sdp
)
1328 struct list_head
*head
= &sdp
->sd_quota_list
;
1329 struct gfs2_quota_data
*qd
;
1331 spin_lock(&qd_lock
);
1332 while (!list_empty(head
)) {
1333 qd
= list_entry(head
->prev
, struct gfs2_quota_data
, qd_list
);
1335 list_del(&qd
->qd_list
);
1337 /* Also remove if this qd exists in the reclaim list */
1338 list_lru_del(&gfs2_qd_lru
, &qd
->qd_lru
);
1339 atomic_dec(&sdp
->sd_quota_count
);
1340 spin_unlock(&qd_lock
);
1342 spin_lock_bucket(qd
->qd_hash
);
1343 hlist_bl_del_rcu(&qd
->qd_hlist
);
1344 spin_unlock_bucket(qd
->qd_hash
);
1346 gfs2_assert_warn(sdp
, !qd
->qd_change
);
1347 gfs2_assert_warn(sdp
, !qd
->qd_slot_count
);
1348 gfs2_assert_warn(sdp
, !qd
->qd_bh_count
);
1350 gfs2_glock_put(qd
->qd_gl
);
1351 call_rcu(&qd
->qd_rcu
, gfs2_qd_dealloc
);
1353 spin_lock(&qd_lock
);
1355 spin_unlock(&qd_lock
);
1357 gfs2_assert_warn(sdp
, !atomic_read(&sdp
->sd_quota_count
));
1359 if (sdp
->sd_quota_bitmap
) {
1360 if (is_vmalloc_addr(sdp
->sd_quota_bitmap
))
1361 vfree(sdp
->sd_quota_bitmap
);
1363 kfree(sdp
->sd_quota_bitmap
);
1364 sdp
->sd_quota_bitmap
= NULL
;
1368 static void quotad_error(struct gfs2_sbd
*sdp
, const char *msg
, int error
)
1370 if (error
== 0 || error
== -EROFS
)
1372 if (!test_bit(SDF_SHUTDOWN
, &sdp
->sd_flags
))
1373 fs_err(sdp
, "gfs2_quotad: %s error %d\n", msg
, error
);
1376 static void quotad_check_timeo(struct gfs2_sbd
*sdp
, const char *msg
,
1377 int (*fxn
)(struct super_block
*sb
, int type
),
1378 unsigned long t
, unsigned long *timeo
,
1379 unsigned int *new_timeo
)
1382 int error
= fxn(sdp
->sd_vfs
, 0);
1383 quotad_error(sdp
, msg
, error
);
1384 *timeo
= gfs2_tune_get_i(&sdp
->sd_tune
, new_timeo
) * HZ
;
1390 static void quotad_check_trunc_list(struct gfs2_sbd
*sdp
)
1392 struct gfs2_inode
*ip
;
1396 spin_lock(&sdp
->sd_trunc_lock
);
1397 if (!list_empty(&sdp
->sd_trunc_list
)) {
1398 ip
= list_entry(sdp
->sd_trunc_list
.next
,
1399 struct gfs2_inode
, i_trunc_list
);
1400 list_del_init(&ip
->i_trunc_list
);
1402 spin_unlock(&sdp
->sd_trunc_lock
);
1405 gfs2_glock_finish_truncate(ip
);
1409 void gfs2_wake_up_statfs(struct gfs2_sbd
*sdp
) {
1410 if (!sdp
->sd_statfs_force_sync
) {
1411 sdp
->sd_statfs_force_sync
= 1;
1412 wake_up(&sdp
->sd_quota_wait
);
1418 * gfs2_quotad - Write cached quota changes into the quota file
1419 * @sdp: Pointer to GFS2 superblock
1423 int gfs2_quotad(void *data
)
1425 struct gfs2_sbd
*sdp
= data
;
1426 struct gfs2_tune
*tune
= &sdp
->sd_tune
;
1427 unsigned long statfs_timeo
= 0;
1428 unsigned long quotad_timeo
= 0;
1429 unsigned long t
= 0;
1433 while (!kthread_should_stop()) {
1435 /* Update the master statfs file */
1436 if (sdp
->sd_statfs_force_sync
) {
1437 int error
= gfs2_statfs_sync(sdp
->sd_vfs
, 0);
1438 quotad_error(sdp
, "statfs", error
);
1439 statfs_timeo
= gfs2_tune_get(sdp
, gt_statfs_quantum
) * HZ
;
1442 quotad_check_timeo(sdp
, "statfs", gfs2_statfs_sync
, t
,
1444 &tune
->gt_statfs_quantum
);
1446 /* Update quota file */
1447 quotad_check_timeo(sdp
, "sync", gfs2_quota_sync
, t
,
1448 "ad_timeo
, &tune
->gt_quota_quantum
);
1450 /* Check for & recover partially truncated inodes */
1451 quotad_check_trunc_list(sdp
);
1455 t
= min(quotad_timeo
, statfs_timeo
);
1457 prepare_to_wait(&sdp
->sd_quota_wait
, &wait
, TASK_INTERRUPTIBLE
);
1458 spin_lock(&sdp
->sd_trunc_lock
);
1459 empty
= list_empty(&sdp
->sd_trunc_list
);
1460 spin_unlock(&sdp
->sd_trunc_lock
);
1461 if (empty
&& !sdp
->sd_statfs_force_sync
)
1462 t
-= schedule_timeout(t
);
1465 finish_wait(&sdp
->sd_quota_wait
, &wait
);
1471 static int gfs2_quota_get_xstate(struct super_block
*sb
,
1472 struct fs_quota_stat
*fqs
)
1474 struct gfs2_sbd
*sdp
= sb
->s_fs_info
;
1476 memset(fqs
, 0, sizeof(struct fs_quota_stat
));
1477 fqs
->qs_version
= FS_QSTAT_VERSION
;
1479 switch (sdp
->sd_args
.ar_quota
) {
1481 fqs
->qs_flags
|= (FS_QUOTA_UDQ_ENFD
| FS_QUOTA_GDQ_ENFD
);
1483 case GFS2_QUOTA_ACCOUNT
:
1484 fqs
->qs_flags
|= (FS_QUOTA_UDQ_ACCT
| FS_QUOTA_GDQ_ACCT
);
1486 case GFS2_QUOTA_OFF
:
1490 if (sdp
->sd_quota_inode
) {
1491 fqs
->qs_uquota
.qfs_ino
= GFS2_I(sdp
->sd_quota_inode
)->i_no_addr
;
1492 fqs
->qs_uquota
.qfs_nblks
= sdp
->sd_quota_inode
->i_blocks
;
1494 fqs
->qs_uquota
.qfs_nextents
= 1; /* unsupported */
1495 fqs
->qs_gquota
= fqs
->qs_uquota
; /* its the same inode in both cases */
1496 fqs
->qs_incoredqs
= list_lru_count(&gfs2_qd_lru
);
1500 static int gfs2_get_dqblk(struct super_block
*sb
, struct kqid qid
,
1501 struct fs_disk_quota
*fdq
)
1503 struct gfs2_sbd
*sdp
= sb
->s_fs_info
;
1504 struct gfs2_quota_lvb
*qlvb
;
1505 struct gfs2_quota_data
*qd
;
1506 struct gfs2_holder q_gh
;
1509 memset(fdq
, 0, sizeof(struct fs_disk_quota
));
1511 if (sdp
->sd_args
.ar_quota
== GFS2_QUOTA_OFF
)
1512 return -ESRCH
; /* Crazy XFS error code */
1514 if ((qid
.type
!= USRQUOTA
) &&
1515 (qid
.type
!= GRPQUOTA
))
1518 error
= qd_get(sdp
, qid
, &qd
);
1521 error
= do_glock(qd
, FORCE
, &q_gh
);
1525 qlvb
= (struct gfs2_quota_lvb
*)qd
->qd_gl
->gl_lksb
.sb_lvbptr
;
1526 fdq
->d_version
= FS_DQUOT_VERSION
;
1527 fdq
->d_flags
= (qid
.type
== USRQUOTA
) ? FS_USER_QUOTA
: FS_GROUP_QUOTA
;
1528 fdq
->d_id
= from_kqid_munged(current_user_ns(), qid
);
1529 fdq
->d_blk_hardlimit
= be64_to_cpu(qlvb
->qb_limit
) << sdp
->sd_fsb2bb_shift
;
1530 fdq
->d_blk_softlimit
= be64_to_cpu(qlvb
->qb_warn
) << sdp
->sd_fsb2bb_shift
;
1531 fdq
->d_bcount
= be64_to_cpu(qlvb
->qb_value
) << sdp
->sd_fsb2bb_shift
;
1533 gfs2_glock_dq_uninit(&q_gh
);
1539 /* GFS2 only supports a subset of the XFS fields */
1540 #define GFS2_FIELDMASK (FS_DQ_BSOFT|FS_DQ_BHARD|FS_DQ_BCOUNT)
1542 static int gfs2_set_dqblk(struct super_block
*sb
, struct kqid qid
,
1543 struct fs_disk_quota
*fdq
)
1545 struct gfs2_sbd
*sdp
= sb
->s_fs_info
;
1546 struct gfs2_inode
*ip
= GFS2_I(sdp
->sd_quota_inode
);
1547 struct gfs2_quota_data
*qd
;
1548 struct gfs2_holder q_gh
, i_gh
;
1549 unsigned int data_blocks
, ind_blocks
;
1550 unsigned int blocks
= 0;
1555 if (sdp
->sd_args
.ar_quota
== GFS2_QUOTA_OFF
)
1556 return -ESRCH
; /* Crazy XFS error code */
1558 if ((qid
.type
!= USRQUOTA
) &&
1559 (qid
.type
!= GRPQUOTA
))
1562 if (fdq
->d_fieldmask
& ~GFS2_FIELDMASK
)
1565 error
= qd_get(sdp
, qid
, &qd
);
1569 error
= gfs2_rs_alloc(ip
);
1573 mutex_lock(&ip
->i_inode
.i_mutex
);
1574 error
= gfs2_glock_nq_init(qd
->qd_gl
, LM_ST_EXCLUSIVE
, 0, &q_gh
);
1577 error
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_EXCLUSIVE
, 0, &i_gh
);
1581 /* Check for existing entry, if none then alloc new blocks */
1582 error
= update_qd(sdp
, qd
);
1586 /* If nothing has changed, this is a no-op */
1587 if ((fdq
->d_fieldmask
& FS_DQ_BSOFT
) &&
1588 ((fdq
->d_blk_softlimit
>> sdp
->sd_fsb2bb_shift
) == be64_to_cpu(qd
->qd_qb
.qb_warn
)))
1589 fdq
->d_fieldmask
^= FS_DQ_BSOFT
;
1591 if ((fdq
->d_fieldmask
& FS_DQ_BHARD
) &&
1592 ((fdq
->d_blk_hardlimit
>> sdp
->sd_fsb2bb_shift
) == be64_to_cpu(qd
->qd_qb
.qb_limit
)))
1593 fdq
->d_fieldmask
^= FS_DQ_BHARD
;
1595 if ((fdq
->d_fieldmask
& FS_DQ_BCOUNT
) &&
1596 ((fdq
->d_bcount
>> sdp
->sd_fsb2bb_shift
) == be64_to_cpu(qd
->qd_qb
.qb_value
)))
1597 fdq
->d_fieldmask
^= FS_DQ_BCOUNT
;
1599 if (fdq
->d_fieldmask
== 0)
1602 offset
= qd2offset(qd
);
1603 alloc_required
= gfs2_write_alloc_required(ip
, offset
, sizeof(struct gfs2_quota
));
1604 if (gfs2_is_stuffed(ip
))
1606 if (alloc_required
) {
1607 struct gfs2_alloc_parms ap
= { .aflags
= 0, };
1608 gfs2_write_calc_reserv(ip
, sizeof(struct gfs2_quota
),
1609 &data_blocks
, &ind_blocks
);
1610 blocks
= 1 + data_blocks
+ ind_blocks
;
1612 error
= gfs2_inplace_reserve(ip
, &ap
);
1615 blocks
+= gfs2_rg_blocks(ip
, blocks
);
1618 /* Some quotas span block boundaries and can update two blocks,
1619 adding an extra block to the transaction to handle such quotas */
1620 error
= gfs2_trans_begin(sdp
, blocks
+ RES_DINODE
+ 2, 0);
1625 error
= gfs2_adjust_quota(ip
, offset
, 0, qd
, fdq
);
1627 gfs2_trans_end(sdp
);
1630 gfs2_inplace_release(ip
);
1632 gfs2_glock_dq_uninit(&i_gh
);
1634 gfs2_glock_dq_uninit(&q_gh
);
1636 mutex_unlock(&ip
->i_inode
.i_mutex
);
1642 const struct quotactl_ops gfs2_quotactl_ops
= {
1643 .quota_sync
= gfs2_quota_sync
,
1644 .get_xstate
= gfs2_quota_get_xstate
,
1645 .get_dqblk
= gfs2_get_dqblk
,
1646 .set_dqblk
= gfs2_set_dqblk
,
1649 void __init
gfs2_quota_hash_init(void)
1653 for(i
= 0; i
< GFS2_QD_HASH_SIZE
; i
++)
1654 INIT_HLIST_BL_HEAD(&qd_hash_table
[i
]);