2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License v.2.
11 * Quota change tags are associated with each transaction that allocates or
12 * deallocates space. Those changes are accumulated locally to each node (in a
13 * per-node file) and then are periodically synced to the quota file. This
14 * avoids the bottleneck of constantly touching the quota file, but introduces
15 * fuzziness in the current usage value of IDs that are being used on different
16 * nodes in the cluster simultaneously. So, it is possible for a user on
17 * multiple nodes to overrun their quota, but that overrun is controlable.
18 * Since quota tags are part of transactions, there is no need to a quota check
19 * program to be run on node crashes or anything like that.
21 * There are couple of knobs that let the administrator manage the quota
22 * fuzziness. "quota_quantum" sets the maximum time a quota change can be
23 * sitting on one node before being synced to the quota file. (The default is
24 * 60 seconds.) Another knob, "quota_scale" controls how quickly the frequency
25 * of quota file syncs increases as the user moves closer to their limit. The
26 * more frequent the syncs, the more accurate the quota enforcement, but that
27 * means that there is more contention between the nodes for the quota file.
28 * The default value is one. This sets the maximum theoretical quota overrun
29 * (with infinite node with infinite bandwidth) to twice the user's limit. (In
30 * practice, the maximum overrun you see should be much less.) A "quota_scale"
31 * number greater than one makes quota syncs more frequent and reduces the
32 * maximum overrun. Numbers less than one (but greater than zero) make quota
33 * syncs less frequent.
35 * GFS quotas also use per-ID Lock Value Blocks (LVBs) to cache the contents of
36 * the quota file, so it is not being constantly read.
39 #include <linux/sched.h>
40 #include <linux/slab.h>
41 #include <linux/spinlock.h>
42 #include <linux/completion.h>
43 #include <linux/buffer_head.h>
44 #include <linux/sort.h>
46 #include <linux/gfs2_ondisk.h>
49 #include "lm_interface.h"
63 #include "ops_address.h"
69 static uint64_t qd2offset(struct gfs2_quota_data
*qd
)
73 offset
= 2 * (uint64_t)qd
->qd_id
+ !test_bit(QDF_USER
, &qd
->qd_flags
);
74 offset
*= sizeof(struct gfs2_quota
);
79 static int qd_alloc(struct gfs2_sbd
*sdp
, int user
, uint32_t id
,
80 struct gfs2_quota_data
**qdp
)
82 struct gfs2_quota_data
*qd
;
85 qd
= kzalloc(sizeof(struct gfs2_quota_data
), GFP_KERNEL
);
92 set_bit(QDF_USER
, &qd
->qd_flags
);
95 error
= gfs2_glock_get(sdp
, 2 * (uint64_t)id
+ !user
,
96 &gfs2_quota_glops
, CREATE
, &qd
->qd_gl
);
100 error
= gfs2_lvb_hold(qd
->qd_gl
);
101 gfs2_glock_put(qd
->qd_gl
);
114 static int qd_get(struct gfs2_sbd
*sdp
, int user
, uint32_t id
, int create
,
115 struct gfs2_quota_data
**qdp
)
117 struct gfs2_quota_data
*qd
= NULL
, *new_qd
= NULL
;
124 spin_lock(&sdp
->sd_quota_spin
);
125 list_for_each_entry(qd
, &sdp
->sd_quota_list
, qd_list
) {
126 if (qd
->qd_id
== id
&&
127 !test_bit(QDF_USER
, &qd
->qd_flags
) == !user
) {
139 list_add(&qd
->qd_list
, &sdp
->sd_quota_list
);
140 atomic_inc(&sdp
->sd_quota_count
);
144 spin_unlock(&sdp
->sd_quota_spin
);
148 gfs2_lvb_unhold(new_qd
->qd_gl
);
155 error
= qd_alloc(sdp
, user
, id
, &new_qd
);
161 static void qd_hold(struct gfs2_quota_data
*qd
)
163 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_sbd
;
165 spin_lock(&sdp
->sd_quota_spin
);
166 gfs2_assert(sdp
, qd
->qd_count
);
168 spin_unlock(&sdp
->sd_quota_spin
);
171 static void qd_put(struct gfs2_quota_data
*qd
)
173 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_sbd
;
174 spin_lock(&sdp
->sd_quota_spin
);
175 gfs2_assert(sdp
, qd
->qd_count
);
177 qd
->qd_last_touched
= jiffies
;
178 spin_unlock(&sdp
->sd_quota_spin
);
181 static int slot_get(struct gfs2_quota_data
*qd
)
183 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_sbd
;
184 unsigned int c
, o
= 0, b
;
185 unsigned char byte
= 0;
187 spin_lock(&sdp
->sd_quota_spin
);
189 if (qd
->qd_slot_count
++) {
190 spin_unlock(&sdp
->sd_quota_spin
);
194 for (c
= 0; c
< sdp
->sd_quota_chunks
; c
++)
195 for (o
= 0; o
< PAGE_SIZE
; o
++) {
196 byte
= sdp
->sd_quota_bitmap
[c
][o
];
204 for (b
= 0; b
< 8; b
++)
205 if (!(byte
& (1 << b
)))
207 qd
->qd_slot
= c
* (8 * PAGE_SIZE
) + o
* 8 + b
;
209 if (qd
->qd_slot
>= sdp
->sd_quota_slots
)
212 sdp
->sd_quota_bitmap
[c
][o
] |= 1 << b
;
214 spin_unlock(&sdp
->sd_quota_spin
);
220 spin_unlock(&sdp
->sd_quota_spin
);
224 static void slot_hold(struct gfs2_quota_data
*qd
)
226 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_sbd
;
228 spin_lock(&sdp
->sd_quota_spin
);
229 gfs2_assert(sdp
, qd
->qd_slot_count
);
231 spin_unlock(&sdp
->sd_quota_spin
);
234 static void slot_put(struct gfs2_quota_data
*qd
)
236 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_sbd
;
238 spin_lock(&sdp
->sd_quota_spin
);
239 gfs2_assert(sdp
, qd
->qd_slot_count
);
240 if (!--qd
->qd_slot_count
) {
241 gfs2_icbit_munge(sdp
, sdp
->sd_quota_bitmap
, qd
->qd_slot
, 0);
244 spin_unlock(&sdp
->sd_quota_spin
);
247 static int bh_get(struct gfs2_quota_data
*qd
)
249 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_sbd
;
250 struct gfs2_inode
*ip
= GFS2_I(sdp
->sd_qc_inode
);
251 unsigned int block
, offset
;
254 struct buffer_head
*bh
;
258 mutex_lock(&sdp
->sd_quota_mutex
);
260 if (qd
->qd_bh_count
++) {
261 mutex_unlock(&sdp
->sd_quota_mutex
);
265 block
= qd
->qd_slot
/ sdp
->sd_qc_per_block
;
266 offset
= qd
->qd_slot
% sdp
->sd_qc_per_block
;;
268 error
= gfs2_block_map(&ip
->i_inode
, block
, &new, &dblock
, &boundary
);
271 error
= gfs2_meta_read(ip
->i_gl
, dblock
, DIO_START
| DIO_WAIT
, &bh
);
275 if (gfs2_metatype_check(sdp
, bh
, GFS2_METATYPE_QC
))
279 qd
->qd_bh_qc
= (struct gfs2_quota_change
*)
280 (bh
->b_data
+ sizeof(struct gfs2_meta_header
) +
281 offset
* sizeof(struct gfs2_quota_change
));
283 mutex_lock(&sdp
->sd_quota_mutex
);
292 mutex_unlock(&sdp
->sd_quota_mutex
);
296 static void bh_put(struct gfs2_quota_data
*qd
)
298 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_sbd
;
300 mutex_lock(&sdp
->sd_quota_mutex
);
301 gfs2_assert(sdp
, qd
->qd_bh_count
);
302 if (!--qd
->qd_bh_count
) {
307 mutex_unlock(&sdp
->sd_quota_mutex
);
310 static int qd_fish(struct gfs2_sbd
*sdp
, struct gfs2_quota_data
**qdp
)
312 struct gfs2_quota_data
*qd
= NULL
;
318 if (sdp
->sd_vfs
->s_flags
& MS_RDONLY
)
321 spin_lock(&sdp
->sd_quota_spin
);
323 list_for_each_entry(qd
, &sdp
->sd_quota_list
, qd_list
) {
324 if (test_bit(QDF_LOCKED
, &qd
->qd_flags
) ||
325 !test_bit(QDF_CHANGE
, &qd
->qd_flags
) ||
326 qd
->qd_sync_gen
>= sdp
->sd_quota_sync_gen
)
329 list_move_tail(&qd
->qd_list
, &sdp
->sd_quota_list
);
331 set_bit(QDF_LOCKED
, &qd
->qd_flags
);
332 gfs2_assert_warn(sdp
, qd
->qd_count
);
334 qd
->qd_change_sync
= qd
->qd_change
;
335 gfs2_assert_warn(sdp
, qd
->qd_slot_count
);
345 spin_unlock(&sdp
->sd_quota_spin
);
348 gfs2_assert_warn(sdp
, qd
->qd_change_sync
);
351 clear_bit(QDF_LOCKED
, &qd
->qd_flags
);
363 static int qd_trylock(struct gfs2_quota_data
*qd
)
365 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_sbd
;
367 if (sdp
->sd_vfs
->s_flags
& MS_RDONLY
)
370 spin_lock(&sdp
->sd_quota_spin
);
372 if (test_bit(QDF_LOCKED
, &qd
->qd_flags
) ||
373 !test_bit(QDF_CHANGE
, &qd
->qd_flags
)) {
374 spin_unlock(&sdp
->sd_quota_spin
);
378 list_move_tail(&qd
->qd_list
, &sdp
->sd_quota_list
);
380 set_bit(QDF_LOCKED
, &qd
->qd_flags
);
381 gfs2_assert_warn(sdp
, qd
->qd_count
);
383 qd
->qd_change_sync
= qd
->qd_change
;
384 gfs2_assert_warn(sdp
, qd
->qd_slot_count
);
387 spin_unlock(&sdp
->sd_quota_spin
);
389 gfs2_assert_warn(sdp
, qd
->qd_change_sync
);
391 clear_bit(QDF_LOCKED
, &qd
->qd_flags
);
400 static void qd_unlock(struct gfs2_quota_data
*qd
)
402 gfs2_assert_warn(qd
->qd_gl
->gl_sbd
,
403 test_bit(QDF_LOCKED
, &qd
->qd_flags
));
404 clear_bit(QDF_LOCKED
, &qd
->qd_flags
);
410 static int qdsb_get(struct gfs2_sbd
*sdp
, int user
, uint32_t id
, int create
,
411 struct gfs2_quota_data
**qdp
)
415 error
= qd_get(sdp
, user
, id
, create
, qdp
);
419 error
= slot_get(*qdp
);
423 error
= bh_get(*qdp
);
437 static void qdsb_put(struct gfs2_quota_data
*qd
)
444 int gfs2_quota_hold(struct gfs2_inode
*ip
, uint32_t uid
, uint32_t gid
)
446 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
447 struct gfs2_alloc
*al
= &ip
->i_alloc
;
448 struct gfs2_quota_data
**qd
= al
->al_qd
;
451 if (gfs2_assert_warn(sdp
, !al
->al_qd_num
) ||
452 gfs2_assert_warn(sdp
, !test_bit(GIF_QD_LOCKED
, &ip
->i_flags
)))
455 if (sdp
->sd_args
.ar_quota
== GFS2_QUOTA_OFF
)
458 error
= qdsb_get(sdp
, QUOTA_USER
, ip
->i_di
.di_uid
, CREATE
, qd
);
464 error
= qdsb_get(sdp
, QUOTA_GROUP
, ip
->i_di
.di_gid
, CREATE
, qd
);
470 if (uid
!= NO_QUOTA_CHANGE
&& uid
!= ip
->i_di
.di_uid
) {
471 error
= qdsb_get(sdp
, QUOTA_USER
, uid
, CREATE
, qd
);
478 if (gid
!= NO_QUOTA_CHANGE
&& gid
!= ip
->i_di
.di_gid
) {
479 error
= qdsb_get(sdp
, QUOTA_GROUP
, gid
, CREATE
, qd
);
488 gfs2_quota_unhold(ip
);
493 void gfs2_quota_unhold(struct gfs2_inode
*ip
)
495 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
496 struct gfs2_alloc
*al
= &ip
->i_alloc
;
499 gfs2_assert_warn(sdp
, !test_bit(GIF_QD_LOCKED
, &ip
->i_flags
));
501 for (x
= 0; x
< al
->al_qd_num
; x
++) {
502 qdsb_put(al
->al_qd
[x
]);
508 static int sort_qd(const void *a
, const void *b
)
510 struct gfs2_quota_data
*qd_a
= *(struct gfs2_quota_data
**)a
;
511 struct gfs2_quota_data
*qd_b
= *(struct gfs2_quota_data
**)b
;
514 if (!test_bit(QDF_USER
, &qd_a
->qd_flags
) !=
515 !test_bit(QDF_USER
, &qd_b
->qd_flags
)) {
516 if (test_bit(QDF_USER
, &qd_a
->qd_flags
))
521 if (qd_a
->qd_id
< qd_b
->qd_id
)
523 else if (qd_a
->qd_id
> qd_b
->qd_id
)
530 static void do_qc(struct gfs2_quota_data
*qd
, int64_t change
)
532 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_sbd
;
533 struct gfs2_inode
*ip
= GFS2_I(sdp
->sd_qc_inode
);
534 struct gfs2_quota_change
*qc
= qd
->qd_bh_qc
;
537 mutex_lock(&sdp
->sd_quota_mutex
);
538 gfs2_trans_add_bh(ip
->i_gl
, qd
->qd_bh
, 1);
540 if (!test_bit(QDF_CHANGE
, &qd
->qd_flags
)) {
543 if (test_bit(QDF_USER
, &qd
->qd_flags
))
544 qc
->qc_flags
= cpu_to_be32(GFS2_QCF_USER
);
545 qc
->qc_id
= cpu_to_be32(qd
->qd_id
);
549 x
= be64_to_cpu(x
) + change
;
550 qc
->qc_change
= cpu_to_be64(x
);
552 spin_lock(&sdp
->sd_quota_spin
);
554 spin_unlock(&sdp
->sd_quota_spin
);
557 gfs2_assert_warn(sdp
, test_bit(QDF_CHANGE
, &qd
->qd_flags
));
558 clear_bit(QDF_CHANGE
, &qd
->qd_flags
);
563 } else if (!test_and_set_bit(QDF_CHANGE
, &qd
->qd_flags
)) {
568 mutex_unlock(&sdp
->sd_quota_mutex
);
574 * This function was mostly borrowed from gfs2_block_truncate_page which was
575 * in turn mostly borrowed from ext3
577 static int gfs2_adjust_quota(struct gfs2_inode
*ip
, loff_t loc
,
578 int64_t change
, struct gfs2_quota_data
*qd
)
580 struct inode
*inode
= &ip
->i_inode
;
581 struct address_space
*mapping
= inode
->i_mapping
;
582 unsigned long index
= loc
>> PAGE_CACHE_SHIFT
;
583 unsigned offset
= loc
& (PAGE_CACHE_SHIFT
- 1);
584 unsigned blocksize
, iblock
, pos
;
585 struct buffer_head
*bh
;
592 page
= grab_cache_page(mapping
, index
);
596 blocksize
= inode
->i_sb
->s_blocksize
;
597 iblock
= index
<< (PAGE_CACHE_SHIFT
- inode
->i_sb
->s_blocksize_bits
);
599 if (!page_has_buffers(page
))
600 create_empty_buffers(page
, blocksize
, 0);
602 bh
= page_buffers(page
);
604 while (offset
>= pos
) {
605 bh
= bh
->b_this_page
;
610 if (!buffer_mapped(bh
)) {
611 gfs2_get_block(inode
, iblock
, bh
, 1);
612 if (!buffer_mapped(bh
))
616 if (PageUptodate(page
))
617 set_buffer_uptodate(bh
);
619 if (!buffer_uptodate(bh
)) {
620 ll_rw_block(READ
, 1, &bh
);
622 if (!buffer_uptodate(bh
))
626 gfs2_trans_add_bh(ip
->i_gl
, bh
, 0);
628 kaddr
= kmap_atomic(page
, KM_USER0
);
629 ptr
= (__be64
*)(kaddr
+ offset
);
630 value
= *ptr
= cpu_to_be64(be64_to_cpu(*ptr
) + change
);
631 flush_dcache_page(page
);
632 kunmap_atomic(kaddr
, KM_USER0
);
634 qd
->qd_qb
.qb_magic
= cpu_to_be32(GFS2_MAGIC
);
636 qd
->qd_qb
.qb_limit
= cpu_to_be64(q
.qu_limit
);
637 qd
->qd_qb
.qb_warn
= cpu_to_be64(q
.qu_warn
);
639 qd
->qd_qb
.qb_value
= cpu_to_be64(value
);
642 page_cache_release(page
);
646 static int do_sync(unsigned int num_qd
, struct gfs2_quota_data
**qda
)
648 struct gfs2_sbd
*sdp
= (*qda
)->qd_gl
->gl_sbd
;
649 struct gfs2_inode
*ip
= GFS2_I(sdp
->sd_quota_inode
);
650 unsigned int data_blocks
, ind_blocks
;
651 struct gfs2_holder
*ghs
, i_gh
;
653 struct gfs2_quota_data
*qd
;
655 unsigned int nalloc
= 0;
656 struct gfs2_alloc
*al
= NULL
;
659 gfs2_write_calc_reserv(ip
, sizeof(struct gfs2_quota
),
660 &data_blocks
, &ind_blocks
);
662 ghs
= kcalloc(num_qd
, sizeof(struct gfs2_holder
), GFP_KERNEL
);
666 sort(qda
, num_qd
, sizeof(struct gfs2_quota_data
*), sort_qd
, NULL
);
667 for (qx
= 0; qx
< num_qd
; qx
++) {
668 error
= gfs2_glock_nq_init(qda
[qx
]->qd_gl
,
670 GL_NOCACHE
, &ghs
[qx
]);
675 error
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_EXCLUSIVE
, 0, &i_gh
);
679 for (x
= 0; x
< num_qd
; x
++) {
682 offset
= qd2offset(qda
[x
]);
683 error
= gfs2_write_alloc_required(ip
, offset
,
684 sizeof(struct gfs2_quota
),
693 al
= gfs2_alloc_get(ip
);
695 al
->al_requested
= nalloc
* (data_blocks
+ ind_blocks
);
697 error
= gfs2_inplace_reserve(ip
);
701 error
= gfs2_trans_begin(sdp
,
702 al
->al_rgd
->rd_ri
.ri_length
+
703 num_qd
* data_blocks
+
704 nalloc
* ind_blocks
+
705 RES_DINODE
+ num_qd
+
710 error
= gfs2_trans_begin(sdp
,
711 num_qd
* data_blocks
+
712 RES_DINODE
+ num_qd
, 0);
717 for (x
= 0; x
< num_qd
; x
++) {
719 offset
= qd2offset(qd
);
720 error
= gfs2_adjust_quota(ip
, offset
, qd
->qd_change_sync
,
721 (struct gfs2_quota_data
*)
726 do_qc(qd
, -qd
->qd_change_sync
);
736 gfs2_inplace_release(ip
);
743 gfs2_glock_dq_uninit(&i_gh
);
747 gfs2_glock_dq_uninit(&ghs
[qx
]);
749 gfs2_log_flush(ip
->i_gl
->gl_sbd
, ip
->i_gl
);
754 static int do_glock(struct gfs2_quota_data
*qd
, int force_refresh
,
755 struct gfs2_holder
*q_gh
)
757 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_sbd
;
758 struct gfs2_inode
*ip
= GFS2_I(sdp
->sd_quota_inode
);
759 struct gfs2_holder i_gh
;
761 char buf
[sizeof(struct gfs2_quota
)];
762 struct file_ra_state ra_state
;
765 file_ra_state_init(&ra_state
, sdp
->sd_quota_inode
->i_mapping
);
767 error
= gfs2_glock_nq_init(qd
->qd_gl
, LM_ST_SHARED
, 0, q_gh
);
771 gfs2_quota_lvb_in(&qd
->qd_qb
, qd
->qd_gl
->gl_lvb
);
773 if (force_refresh
|| qd
->qd_qb
.qb_magic
!= GFS2_MAGIC
) {
775 gfs2_glock_dq_uninit(q_gh
);
776 error
= gfs2_glock_nq_init(qd
->qd_gl
,
777 LM_ST_EXCLUSIVE
, GL_NOCACHE
,
782 error
= gfs2_glock_nq_init(ip
->i_gl
,
788 memset(buf
, 0, sizeof(struct gfs2_quota
));
790 error
= gfs2_internal_read(ip
, &ra_state
, buf
,
791 &pos
, sizeof(struct gfs2_quota
));
795 gfs2_glock_dq_uninit(&i_gh
);
797 gfs2_quota_in(&q
, buf
);
799 memset(&qd
->qd_qb
, 0, sizeof(struct gfs2_quota_lvb
));
800 qd
->qd_qb
.qb_magic
= GFS2_MAGIC
;
801 qd
->qd_qb
.qb_limit
= q
.qu_limit
;
802 qd
->qd_qb
.qb_warn
= q
.qu_warn
;
803 qd
->qd_qb
.qb_value
= q
.qu_value
;
805 gfs2_quota_lvb_out(&qd
->qd_qb
, qd
->qd_gl
->gl_lvb
);
807 if (gfs2_glock_is_blocking(qd
->qd_gl
)) {
808 gfs2_glock_dq_uninit(q_gh
);
817 gfs2_glock_dq_uninit(&i_gh
);
820 gfs2_glock_dq_uninit(q_gh
);
825 int gfs2_quota_lock(struct gfs2_inode
*ip
, uint32_t uid
, uint32_t gid
)
827 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
828 struct gfs2_alloc
*al
= &ip
->i_alloc
;
832 gfs2_quota_hold(ip
, uid
, gid
);
834 if (capable(CAP_SYS_RESOURCE
) ||
835 sdp
->sd_args
.ar_quota
!= GFS2_QUOTA_ON
)
838 sort(al
->al_qd
, al
->al_qd_num
, sizeof(struct gfs2_quota_data
*),
841 for (x
= 0; x
< al
->al_qd_num
; x
++) {
842 error
= do_glock(al
->al_qd
[x
], NO_FORCE
, &al
->al_qd_ghs
[x
]);
848 set_bit(GIF_QD_LOCKED
, &ip
->i_flags
);
851 gfs2_glock_dq_uninit(&al
->al_qd_ghs
[x
]);
852 gfs2_quota_unhold(ip
);
858 static int need_sync(struct gfs2_quota_data
*qd
)
860 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_sbd
;
861 struct gfs2_tune
*gt
= &sdp
->sd_tune
;
863 unsigned int num
, den
;
866 if (!qd
->qd_qb
.qb_limit
)
869 spin_lock(&sdp
->sd_quota_spin
);
870 value
= qd
->qd_change
;
871 spin_unlock(&sdp
->sd_quota_spin
);
873 spin_lock(>
->gt_spin
);
874 num
= gt
->gt_quota_scale_num
;
875 den
= gt
->gt_quota_scale_den
;
876 spin_unlock(>
->gt_spin
);
880 else if (qd
->qd_qb
.qb_value
>= (int64_t)qd
->qd_qb
.qb_limit
)
883 value
*= gfs2_jindex_size(sdp
) * num
;
885 value
+= qd
->qd_qb
.qb_value
;
886 if (value
< (int64_t)qd
->qd_qb
.qb_limit
)
893 void gfs2_quota_unlock(struct gfs2_inode
*ip
)
895 struct gfs2_alloc
*al
= &ip
->i_alloc
;
896 struct gfs2_quota_data
*qda
[4];
897 unsigned int count
= 0;
900 if (!test_and_clear_bit(GIF_QD_LOCKED
, &ip
->i_flags
))
903 for (x
= 0; x
< al
->al_qd_num
; x
++) {
904 struct gfs2_quota_data
*qd
;
908 sync
= need_sync(qd
);
910 gfs2_glock_dq_uninit(&al
->al_qd_ghs
[x
]);
912 if (sync
&& qd_trylock(qd
))
918 for (x
= 0; x
< count
; x
++)
923 gfs2_quota_unhold(ip
);
928 static int print_message(struct gfs2_quota_data
*qd
, char *type
)
930 struct gfs2_sbd
*sdp
= qd
->qd_gl
->gl_sbd
;
932 printk(KERN_INFO
"GFS2: fsid=%s: quota %s for %s %u\r\n",
933 sdp
->sd_fsname
, type
,
934 (test_bit(QDF_USER
, &qd
->qd_flags
)) ? "user" : "group",
940 int gfs2_quota_check(struct gfs2_inode
*ip
, uint32_t uid
, uint32_t gid
)
942 struct gfs2_sbd
*sdp
= GFS2_SB(&ip
->i_inode
);
943 struct gfs2_alloc
*al
= &ip
->i_alloc
;
944 struct gfs2_quota_data
*qd
;
949 if (!test_bit(GIF_QD_LOCKED
, &ip
->i_flags
))
952 if (sdp
->sd_args
.ar_quota
!= GFS2_QUOTA_ON
)
955 for (x
= 0; x
< al
->al_qd_num
; x
++) {
958 if (!((qd
->qd_id
== uid
&& test_bit(QDF_USER
, &qd
->qd_flags
)) ||
959 (qd
->qd_id
== gid
&& !test_bit(QDF_USER
, &qd
->qd_flags
))))
962 value
= qd
->qd_qb
.qb_value
;
963 spin_lock(&sdp
->sd_quota_spin
);
964 value
+= qd
->qd_change
;
965 spin_unlock(&sdp
->sd_quota_spin
);
967 if (qd
->qd_qb
.qb_limit
&& (int64_t)qd
->qd_qb
.qb_limit
< value
) {
968 print_message(qd
, "exceeded");
971 } else if (qd
->qd_qb
.qb_warn
&&
972 (int64_t)qd
->qd_qb
.qb_warn
< value
&&
973 time_after_eq(jiffies
, qd
->qd_last_warn
+
975 gt_quota_warn_period
) * HZ
)) {
976 error
= print_message(qd
, "warning");
977 qd
->qd_last_warn
= jiffies
;
984 void gfs2_quota_change(struct gfs2_inode
*ip
, int64_t change
,
985 uint32_t uid
, uint32_t gid
)
987 struct gfs2_alloc
*al
= &ip
->i_alloc
;
988 struct gfs2_quota_data
*qd
;
990 unsigned int found
= 0;
992 if (gfs2_assert_warn(GFS2_SB(&ip
->i_inode
), change
))
994 if (ip
->i_di
.di_flags
& GFS2_DIF_SYSTEM
)
997 for (x
= 0; x
< al
->al_qd_num
; x
++) {
1000 if ((qd
->qd_id
== uid
&& test_bit(QDF_USER
, &qd
->qd_flags
)) ||
1001 (qd
->qd_id
== gid
&& !test_bit(QDF_USER
, &qd
->qd_flags
))) {
1008 int gfs2_quota_sync(struct gfs2_sbd
*sdp
)
1010 struct gfs2_quota_data
**qda
;
1011 unsigned int max_qd
= gfs2_tune_get(sdp
, gt_quota_simul_sync
);
1012 unsigned int num_qd
;
1016 sdp
->sd_quota_sync_gen
++;
1018 qda
= kcalloc(max_qd
, sizeof(struct gfs2_quota_data
*), GFP_KERNEL
);
1026 error
= qd_fish(sdp
, qda
+ num_qd
);
1027 if (error
|| !qda
[num_qd
])
1029 if (++num_qd
== max_qd
)
1035 error
= do_sync(num_qd
, qda
);
1037 for (x
= 0; x
< num_qd
; x
++)
1038 qda
[x
]->qd_sync_gen
=
1039 sdp
->sd_quota_sync_gen
;
1041 for (x
= 0; x
< num_qd
; x
++)
1044 } while (!error
&& num_qd
== max_qd
);
1051 int gfs2_quota_refresh(struct gfs2_sbd
*sdp
, int user
, uint32_t id
)
1053 struct gfs2_quota_data
*qd
;
1054 struct gfs2_holder q_gh
;
1057 error
= qd_get(sdp
, user
, id
, CREATE
, &qd
);
1061 error
= do_glock(qd
, FORCE
, &q_gh
);
1063 gfs2_glock_dq_uninit(&q_gh
);
1071 int gfs2_quota_read(struct gfs2_sbd
*sdp
, int user
, uint32_t id
,
1072 struct gfs2_quota
*q
)
1074 struct gfs2_quota_data
*qd
;
1075 struct gfs2_holder q_gh
;
1078 if (((user
) ? (id
!= current
->fsuid
) : (!in_group_p(id
))) &&
1079 !capable(CAP_SYS_ADMIN
))
1082 error
= qd_get(sdp
, user
, id
, CREATE
, &qd
);
1086 error
= do_glock(qd
, NO_FORCE
, &q_gh
);
1090 memset(q
, 0, sizeof(struct gfs2_quota
));
1091 q
->qu_limit
= qd
->qd_qb
.qb_limit
;
1092 q
->qu_warn
= qd
->qd_qb
.qb_warn
;
1093 q
->qu_value
= qd
->qd_qb
.qb_value
;
1095 spin_lock(&sdp
->sd_quota_spin
);
1096 q
->qu_value
+= qd
->qd_change
;
1097 spin_unlock(&sdp
->sd_quota_spin
);
1099 gfs2_glock_dq_uninit(&q_gh
);
1108 int gfs2_quota_init(struct gfs2_sbd
*sdp
)
1110 struct gfs2_inode
*ip
= GFS2_I(sdp
->sd_qc_inode
);
1111 unsigned int blocks
= ip
->i_di
.di_size
>> sdp
->sd_sb
.sb_bsize_shift
;
1112 unsigned int x
, slot
= 0;
1113 unsigned int found
= 0;
1115 uint32_t extlen
= 0;
1118 if (!ip
->i_di
.di_size
||
1119 ip
->i_di
.di_size
> (64 << 20) ||
1120 ip
->i_di
.di_size
& (sdp
->sd_sb
.sb_bsize
- 1)) {
1121 gfs2_consist_inode(ip
);
1124 sdp
->sd_quota_slots
= blocks
* sdp
->sd_qc_per_block
;
1125 sdp
->sd_quota_chunks
= DIV_ROUND_UP(sdp
->sd_quota_slots
, 8 * PAGE_SIZE
);
1129 sdp
->sd_quota_bitmap
= kcalloc(sdp
->sd_quota_chunks
,
1130 sizeof(unsigned char *), GFP_KERNEL
);
1131 if (!sdp
->sd_quota_bitmap
)
1134 for (x
= 0; x
< sdp
->sd_quota_chunks
; x
++) {
1135 sdp
->sd_quota_bitmap
[x
] = kzalloc(PAGE_SIZE
, GFP_KERNEL
);
1136 if (!sdp
->sd_quota_bitmap
[x
])
1140 for (x
= 0; x
< blocks
; x
++) {
1141 struct buffer_head
*bh
;
1146 error
= gfs2_extent_map(&ip
->i_inode
, x
, &new, &dblock
, &extlen
);
1150 gfs2_meta_ra(ip
->i_gl
, dblock
, extlen
);
1151 error
= gfs2_meta_read(ip
->i_gl
, dblock
, DIO_START
| DIO_WAIT
,
1156 if (gfs2_metatype_check(sdp
, bh
, GFS2_METATYPE_QC
)) {
1162 y
< sdp
->sd_qc_per_block
&& slot
< sdp
->sd_quota_slots
;
1164 struct gfs2_quota_change qc
;
1165 struct gfs2_quota_data
*qd
;
1167 gfs2_quota_change_in(&qc
, bh
->b_data
+
1168 sizeof(struct gfs2_meta_header
) +
1169 y
* sizeof(struct gfs2_quota_change
));
1173 error
= qd_alloc(sdp
, (qc
.qc_flags
& GFS2_QCF_USER
),
1180 set_bit(QDF_CHANGE
, &qd
->qd_flags
);
1181 qd
->qd_change
= qc
.qc_change
;
1183 qd
->qd_slot_count
= 1;
1184 qd
->qd_last_touched
= jiffies
;
1186 spin_lock(&sdp
->sd_quota_spin
);
1187 gfs2_icbit_munge(sdp
, sdp
->sd_quota_bitmap
, slot
, 1);
1188 list_add(&qd
->qd_list
, &sdp
->sd_quota_list
);
1189 atomic_inc(&sdp
->sd_quota_count
);
1190 spin_unlock(&sdp
->sd_quota_spin
);
1201 fs_info(sdp
, "found %u quota changes\n", found
);
1206 gfs2_quota_cleanup(sdp
);
1210 void gfs2_quota_scan(struct gfs2_sbd
*sdp
)
1212 struct gfs2_quota_data
*qd
, *safe
;
1215 spin_lock(&sdp
->sd_quota_spin
);
1216 list_for_each_entry_safe(qd
, safe
, &sdp
->sd_quota_list
, qd_list
) {
1217 if (!qd
->qd_count
&&
1218 time_after_eq(jiffies
, qd
->qd_last_touched
+
1219 gfs2_tune_get(sdp
, gt_quota_cache_secs
) * HZ
)) {
1220 list_move(&qd
->qd_list
, &dead
);
1221 gfs2_assert_warn(sdp
,
1222 atomic_read(&sdp
->sd_quota_count
) > 0);
1223 atomic_dec(&sdp
->sd_quota_count
);
1226 spin_unlock(&sdp
->sd_quota_spin
);
1228 while (!list_empty(&dead
)) {
1229 qd
= list_entry(dead
.next
, struct gfs2_quota_data
, qd_list
);
1230 list_del(&qd
->qd_list
);
1232 gfs2_assert_warn(sdp
, !qd
->qd_change
);
1233 gfs2_assert_warn(sdp
, !qd
->qd_slot_count
);
1234 gfs2_assert_warn(sdp
, !qd
->qd_bh_count
);
1236 gfs2_lvb_unhold(qd
->qd_gl
);
1241 void gfs2_quota_cleanup(struct gfs2_sbd
*sdp
)
1243 struct list_head
*head
= &sdp
->sd_quota_list
;
1244 struct gfs2_quota_data
*qd
;
1247 spin_lock(&sdp
->sd_quota_spin
);
1248 while (!list_empty(head
)) {
1249 qd
= list_entry(head
->prev
, struct gfs2_quota_data
, qd_list
);
1251 if (qd
->qd_count
> 1 ||
1252 (qd
->qd_count
&& !test_bit(QDF_CHANGE
, &qd
->qd_flags
))) {
1253 list_move(&qd
->qd_list
, head
);
1254 spin_unlock(&sdp
->sd_quota_spin
);
1256 spin_lock(&sdp
->sd_quota_spin
);
1260 list_del(&qd
->qd_list
);
1261 atomic_dec(&sdp
->sd_quota_count
);
1262 spin_unlock(&sdp
->sd_quota_spin
);
1264 if (!qd
->qd_count
) {
1265 gfs2_assert_warn(sdp
, !qd
->qd_change
);
1266 gfs2_assert_warn(sdp
, !qd
->qd_slot_count
);
1268 gfs2_assert_warn(sdp
, qd
->qd_slot_count
== 1);
1269 gfs2_assert_warn(sdp
, !qd
->qd_bh_count
);
1271 gfs2_lvb_unhold(qd
->qd_gl
);
1274 spin_lock(&sdp
->sd_quota_spin
);
1276 spin_unlock(&sdp
->sd_quota_spin
);
1278 gfs2_assert_warn(sdp
, !atomic_read(&sdp
->sd_quota_count
));
1280 if (sdp
->sd_quota_bitmap
) {
1281 for (x
= 0; x
< sdp
->sd_quota_chunks
; x
++)
1282 kfree(sdp
->sd_quota_bitmap
[x
]);
1283 kfree(sdp
->sd_quota_bitmap
);