2 * Copyright (c) 2000-2003 Silicon Graphics, Inc.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 #include "xfs_trans.h"
25 #include "xfs_alloc.h"
26 #include "xfs_quota.h"
27 #include "xfs_mount.h"
28 #include "xfs_bmap_btree.h"
29 #include "xfs_inode.h"
31 #include "xfs_rtalloc.h"
32 #include "xfs_error.h"
33 #include "xfs_itable.h"
35 #include "xfs_buf_item.h"
36 #include "xfs_trans_space.h"
37 #include "xfs_trans_priv.h"
39 #include "xfs_cksum.h"
40 #include "xfs_trace.h"
47 * dquot->q_qlock (xfs_dqlock() and friends)
48 * dquot->q_flush (xfs_dqflock() and friends)
51 * If two dquots need to be locked the order is user before group/project,
52 * otherwise by the lowest id first, see xfs_dqlock2.
56 xfs_buftarg_t
*xfs_dqerror_target
;
59 int xfs_dqerror_mod
= 33;
62 struct kmem_zone
*xfs_qm_dqtrxzone
;
63 static struct kmem_zone
*xfs_qm_dqzone
;
65 static struct lock_class_key xfs_dquot_other_class
;
68 * This is called to free all the memory associated with a dquot
74 ASSERT(list_empty(&dqp
->q_lru
));
76 mutex_destroy(&dqp
->q_qlock
);
77 kmem_zone_free(xfs_qm_dqzone
, dqp
);
79 XFS_STATS_DEC(xs_qm_dquot
);
83 * If default limits are in force, push them into the dquot now.
84 * We overwrite the dquot limits only if they are zero and this
85 * is not the root dquot.
88 xfs_qm_adjust_dqlimits(
92 struct xfs_quotainfo
*q
= mp
->m_quotainfo
;
93 struct xfs_disk_dquot
*d
= &dq
->q_core
;
98 if (q
->qi_bsoftlimit
&& !d
->d_blk_softlimit
) {
99 d
->d_blk_softlimit
= cpu_to_be64(q
->qi_bsoftlimit
);
102 if (q
->qi_bhardlimit
&& !d
->d_blk_hardlimit
) {
103 d
->d_blk_hardlimit
= cpu_to_be64(q
->qi_bhardlimit
);
106 if (q
->qi_isoftlimit
&& !d
->d_ino_softlimit
)
107 d
->d_ino_softlimit
= cpu_to_be64(q
->qi_isoftlimit
);
108 if (q
->qi_ihardlimit
&& !d
->d_ino_hardlimit
)
109 d
->d_ino_hardlimit
= cpu_to_be64(q
->qi_ihardlimit
);
110 if (q
->qi_rtbsoftlimit
&& !d
->d_rtb_softlimit
)
111 d
->d_rtb_softlimit
= cpu_to_be64(q
->qi_rtbsoftlimit
);
112 if (q
->qi_rtbhardlimit
&& !d
->d_rtb_hardlimit
)
113 d
->d_rtb_hardlimit
= cpu_to_be64(q
->qi_rtbhardlimit
);
116 xfs_dquot_set_prealloc_limits(dq
);
120 * Check the limits and timers of a dquot and start or reset timers
122 * This gets called even when quota enforcement is OFF, which makes our
123 * life a little less complicated. (We just don't reject any quota
124 * reservations in that case, when enforcement is off).
125 * We also return 0 as the values of the timers in Q_GETQUOTA calls, when
127 * In contrast, warnings are a little different in that they don't
128 * 'automatically' get started when limits get exceeded. They do
129 * get reset to zero, however, when we find the count to be under
130 * the soft limit (they are only ever set non-zero via userspace).
133 xfs_qm_adjust_dqtimers(
140 if (d
->d_blk_hardlimit
)
141 ASSERT(be64_to_cpu(d
->d_blk_softlimit
) <=
142 be64_to_cpu(d
->d_blk_hardlimit
));
143 if (d
->d_ino_hardlimit
)
144 ASSERT(be64_to_cpu(d
->d_ino_softlimit
) <=
145 be64_to_cpu(d
->d_ino_hardlimit
));
146 if (d
->d_rtb_hardlimit
)
147 ASSERT(be64_to_cpu(d
->d_rtb_softlimit
) <=
148 be64_to_cpu(d
->d_rtb_hardlimit
));
152 if ((d
->d_blk_softlimit
&&
153 (be64_to_cpu(d
->d_bcount
) >
154 be64_to_cpu(d
->d_blk_softlimit
))) ||
155 (d
->d_blk_hardlimit
&&
156 (be64_to_cpu(d
->d_bcount
) >
157 be64_to_cpu(d
->d_blk_hardlimit
)))) {
158 d
->d_btimer
= cpu_to_be32(get_seconds() +
159 mp
->m_quotainfo
->qi_btimelimit
);
164 if ((!d
->d_blk_softlimit
||
165 (be64_to_cpu(d
->d_bcount
) <=
166 be64_to_cpu(d
->d_blk_softlimit
))) &&
167 (!d
->d_blk_hardlimit
||
168 (be64_to_cpu(d
->d_bcount
) <=
169 be64_to_cpu(d
->d_blk_hardlimit
)))) {
175 if ((d
->d_ino_softlimit
&&
176 (be64_to_cpu(d
->d_icount
) >
177 be64_to_cpu(d
->d_ino_softlimit
))) ||
178 (d
->d_ino_hardlimit
&&
179 (be64_to_cpu(d
->d_icount
) >
180 be64_to_cpu(d
->d_ino_hardlimit
)))) {
181 d
->d_itimer
= cpu_to_be32(get_seconds() +
182 mp
->m_quotainfo
->qi_itimelimit
);
187 if ((!d
->d_ino_softlimit
||
188 (be64_to_cpu(d
->d_icount
) <=
189 be64_to_cpu(d
->d_ino_softlimit
))) &&
190 (!d
->d_ino_hardlimit
||
191 (be64_to_cpu(d
->d_icount
) <=
192 be64_to_cpu(d
->d_ino_hardlimit
)))) {
197 if (!d
->d_rtbtimer
) {
198 if ((d
->d_rtb_softlimit
&&
199 (be64_to_cpu(d
->d_rtbcount
) >
200 be64_to_cpu(d
->d_rtb_softlimit
))) ||
201 (d
->d_rtb_hardlimit
&&
202 (be64_to_cpu(d
->d_rtbcount
) >
203 be64_to_cpu(d
->d_rtb_hardlimit
)))) {
204 d
->d_rtbtimer
= cpu_to_be32(get_seconds() +
205 mp
->m_quotainfo
->qi_rtbtimelimit
);
210 if ((!d
->d_rtb_softlimit
||
211 (be64_to_cpu(d
->d_rtbcount
) <=
212 be64_to_cpu(d
->d_rtb_softlimit
))) &&
213 (!d
->d_rtb_hardlimit
||
214 (be64_to_cpu(d
->d_rtbcount
) <=
215 be64_to_cpu(d
->d_rtb_hardlimit
)))) {
222 * initialize a buffer full of dquots and log the whole thing
225 xfs_qm_init_dquot_blk(
232 struct xfs_quotainfo
*q
= mp
->m_quotainfo
;
237 ASSERT(xfs_buf_islocked(bp
));
242 * ID of the first dquot in the block - id's are zero based.
244 curid
= id
- (id
% q
->qi_dqperchunk
);
246 memset(d
, 0, BBTOB(q
->qi_dqchunklen
));
247 for (i
= 0; i
< q
->qi_dqperchunk
; i
++, d
++, curid
++) {
248 d
->dd_diskdq
.d_magic
= cpu_to_be16(XFS_DQUOT_MAGIC
);
249 d
->dd_diskdq
.d_version
= XFS_DQUOT_VERSION
;
250 d
->dd_diskdq
.d_id
= cpu_to_be32(curid
);
251 d
->dd_diskdq
.d_flags
= type
;
252 if (xfs_sb_version_hascrc(&mp
->m_sb
)) {
253 uuid_copy(&d
->dd_uuid
, &mp
->m_sb
.sb_uuid
);
254 xfs_update_cksum((char *)d
, sizeof(struct xfs_dqblk
),
259 xfs_trans_dquot_buf(tp
, bp
,
260 (type
& XFS_DQ_USER
? XFS_BLF_UDQUOT_BUF
:
261 ((type
& XFS_DQ_PROJ
) ? XFS_BLF_PDQUOT_BUF
:
262 XFS_BLF_GDQUOT_BUF
)));
263 xfs_trans_log_buf(tp
, bp
, 0, BBTOB(q
->qi_dqchunklen
) - 1);
267 * Initialize the dynamic speculative preallocation thresholds. The lo/hi
268 * watermarks correspond to the soft and hard limits by default. If a soft limit
269 * is not specified, we use 95% of the hard limit.
272 xfs_dquot_set_prealloc_limits(struct xfs_dquot
*dqp
)
276 dqp
->q_prealloc_hi_wmark
= be64_to_cpu(dqp
->q_core
.d_blk_hardlimit
);
277 dqp
->q_prealloc_lo_wmark
= be64_to_cpu(dqp
->q_core
.d_blk_softlimit
);
278 if (!dqp
->q_prealloc_lo_wmark
) {
279 dqp
->q_prealloc_lo_wmark
= dqp
->q_prealloc_hi_wmark
;
280 do_div(dqp
->q_prealloc_lo_wmark
, 100);
281 dqp
->q_prealloc_lo_wmark
*= 95;
284 space
= dqp
->q_prealloc_hi_wmark
;
287 dqp
->q_low_space
[XFS_QLOWSP_1_PCNT
] = space
;
288 dqp
->q_low_space
[XFS_QLOWSP_3_PCNT
] = space
* 3;
289 dqp
->q_low_space
[XFS_QLOWSP_5_PCNT
] = space
* 5;
293 xfs_dquot_buf_verify_crc(
294 struct xfs_mount
*mp
,
297 struct xfs_dqblk
*d
= (struct xfs_dqblk
*)bp
->b_addr
;
301 if (!xfs_sb_version_hascrc(&mp
->m_sb
))
305 * if we are in log recovery, the quota subsystem has not been
306 * initialised so we have no quotainfo structure. In that case, we need
307 * to manually calculate the number of dquots in the buffer.
310 ndquots
= mp
->m_quotainfo
->qi_dqperchunk
;
312 ndquots
= xfs_qm_calc_dquots_per_chunk(mp
, bp
->b_length
);
314 for (i
= 0; i
< ndquots
; i
++, d
++) {
315 if (!xfs_verify_cksum((char *)d
, sizeof(struct xfs_dqblk
),
318 if (!uuid_equal(&d
->dd_uuid
, &mp
->m_sb
.sb_uuid
))
325 xfs_dquot_buf_verify(
326 struct xfs_mount
*mp
,
329 struct xfs_dqblk
*d
= (struct xfs_dqblk
*)bp
->b_addr
;
335 * if we are in log recovery, the quota subsystem has not been
336 * initialised so we have no quotainfo structure. In that case, we need
337 * to manually calculate the number of dquots in the buffer.
340 ndquots
= mp
->m_quotainfo
->qi_dqperchunk
;
342 ndquots
= xfs_qm_calc_dquots_per_chunk(mp
, bp
->b_length
);
345 * On the first read of the buffer, verify that each dquot is valid.
346 * We don't know what the id of the dquot is supposed to be, just that
347 * they should be increasing monotonically within the buffer. If the
348 * first id is corrupt, then it will fail on the second dquot in the
349 * buffer so corruptions could point to the wrong dquot in this case.
351 for (i
= 0; i
< ndquots
; i
++) {
352 struct xfs_disk_dquot
*ddq
;
355 ddq
= &d
[i
].dd_diskdq
;
358 id
= be32_to_cpu(ddq
->d_id
);
360 error
= xfs_qm_dqcheck(mp
, ddq
, id
+ i
, 0, XFS_QMOPT_DOWARN
,
361 "xfs_dquot_buf_verify");
369 xfs_dquot_buf_read_verify(
372 struct xfs_mount
*mp
= bp
->b_target
->bt_mount
;
374 if (!xfs_dquot_buf_verify_crc(mp
, bp
) || !xfs_dquot_buf_verify(mp
, bp
)) {
375 XFS_CORRUPTION_ERROR(__func__
, XFS_ERRLEVEL_LOW
, mp
, bp
->b_addr
);
376 xfs_buf_ioerror(bp
, EFSCORRUPTED
);
381 * we don't calculate the CRC here as that is done when the dquot is flushed to
382 * the buffer after the update is done. This ensures that the dquot in the
383 * buffer always has an up-to-date CRC value.
386 xfs_dquot_buf_write_verify(
389 struct xfs_mount
*mp
= bp
->b_target
->bt_mount
;
391 if (!xfs_dquot_buf_verify(mp
, bp
)) {
392 XFS_CORRUPTION_ERROR(__func__
, XFS_ERRLEVEL_LOW
, mp
, bp
->b_addr
);
393 xfs_buf_ioerror(bp
, EFSCORRUPTED
);
398 const struct xfs_buf_ops xfs_dquot_buf_ops
= {
399 .verify_read
= xfs_dquot_buf_read_verify
,
400 .verify_write
= xfs_dquot_buf_write_verify
,
404 * Allocate a block and fill it with dquots.
405 * This is called when the bmapi finds a hole.
413 xfs_fileoff_t offset_fsb
,
416 xfs_fsblock_t firstblock
;
417 xfs_bmap_free_t flist
;
419 int nmaps
, error
, committed
;
421 xfs_trans_t
*tp
= *tpp
;
425 trace_xfs_dqalloc(dqp
);
428 * Initialize the bmap freelist prior to calling bmapi code.
430 xfs_bmap_init(&flist
, &firstblock
);
431 xfs_ilock(quotip
, XFS_ILOCK_EXCL
);
433 * Return if this type of quotas is turned off while we didn't
436 if (!xfs_this_quota_on(dqp
->q_mount
, dqp
->dq_flags
)) {
437 xfs_iunlock(quotip
, XFS_ILOCK_EXCL
);
441 xfs_trans_ijoin(tp
, quotip
, XFS_ILOCK_EXCL
);
443 error
= xfs_bmapi_write(tp
, quotip
, offset_fsb
,
444 XFS_DQUOT_CLUSTER_SIZE_FSB
, XFS_BMAPI_METADATA
,
445 &firstblock
, XFS_QM_DQALLOC_SPACE_RES(mp
),
446 &map
, &nmaps
, &flist
);
449 ASSERT(map
.br_blockcount
== XFS_DQUOT_CLUSTER_SIZE_FSB
);
451 ASSERT((map
.br_startblock
!= DELAYSTARTBLOCK
) &&
452 (map
.br_startblock
!= HOLESTARTBLOCK
));
455 * Keep track of the blkno to save a lookup later
457 dqp
->q_blkno
= XFS_FSB_TO_DADDR(mp
, map
.br_startblock
);
459 /* now we can just get the buffer (there's nothing to read yet) */
460 bp
= xfs_trans_get_buf(tp
, mp
->m_ddev_targp
,
462 mp
->m_quotainfo
->qi_dqchunklen
,
465 error
= xfs_buf_geterror(bp
);
468 bp
->b_ops
= &xfs_dquot_buf_ops
;
471 * Make a chunk of dquots out of this buffer and log
474 xfs_qm_init_dquot_blk(tp
, mp
, be32_to_cpu(dqp
->q_core
.d_id
),
475 dqp
->dq_flags
& XFS_DQ_ALLTYPES
, bp
);
478 * xfs_bmap_finish() may commit the current transaction and
479 * start a second transaction if the freelist is not empty.
481 * Since we still want to modify this buffer, we need to
482 * ensure that the buffer is not released on commit of
483 * the first transaction and ensure the buffer is added to the
484 * second transaction.
486 * If there is only one transaction then don't stop the buffer
487 * from being released when it commits later on.
490 xfs_trans_bhold(tp
, bp
);
492 if ((error
= xfs_bmap_finish(tpp
, &flist
, &committed
))) {
498 xfs_trans_bjoin(tp
, bp
);
500 xfs_trans_bhold_release(tp
, bp
);
507 xfs_bmap_cancel(&flist
);
509 xfs_iunlock(quotip
, XFS_ILOCK_EXCL
);
515 struct xfs_mount
*mp
,
516 struct xfs_trans
*tp
,
517 struct xfs_dquot
*dqp
,
519 struct xfs_buf
**bpp
)
522 struct xfs_disk_dquot
*ddq
;
527 * Read the buffer without verification so we get the corrupted
528 * buffer returned to us. make sure we verify it on write, though.
530 error
= xfs_trans_read_buf(mp
, tp
, mp
->m_ddev_targp
, dqp
->q_blkno
,
531 mp
->m_quotainfo
->qi_dqchunklen
,
535 ASSERT(*bpp
== NULL
);
536 return XFS_ERROR(error
);
538 (*bpp
)->b_ops
= &xfs_dquot_buf_ops
;
540 ASSERT(xfs_buf_islocked(*bpp
));
541 d
= (struct xfs_dqblk
*)(*bpp
)->b_addr
;
543 /* Do the actual repair of dquots in this buffer */
544 for (i
= 0; i
< mp
->m_quotainfo
->qi_dqperchunk
; i
++) {
545 ddq
= &d
[i
].dd_diskdq
;
546 error
= xfs_qm_dqcheck(mp
, ddq
, firstid
+ i
,
547 dqp
->dq_flags
& XFS_DQ_ALLTYPES
,
548 XFS_QMOPT_DQREPAIR
, "xfs_qm_dqrepair");
550 /* repair failed, we're screwed */
551 xfs_trans_brelse(tp
, *bpp
);
552 return XFS_ERROR(EIO
);
560 * Maps a dquot to the buffer containing its on-disk version.
561 * This returns a ptr to the buffer containing the on-disk dquot
562 * in the bpp param, and a ptr to the on-disk dquot within that buffer
568 xfs_disk_dquot_t
**O_ddpp
,
573 int nmaps
= 1, error
;
575 xfs_inode_t
*quotip
= XFS_DQ_TO_QIP(dqp
);
576 xfs_mount_t
*mp
= dqp
->q_mount
;
577 xfs_dqid_t id
= be32_to_cpu(dqp
->q_core
.d_id
);
578 xfs_trans_t
*tp
= (tpp
? *tpp
: NULL
);
580 dqp
->q_fileoffset
= (xfs_fileoff_t
)id
/ mp
->m_quotainfo
->qi_dqperchunk
;
582 xfs_ilock(quotip
, XFS_ILOCK_SHARED
);
583 if (!xfs_this_quota_on(dqp
->q_mount
, dqp
->dq_flags
)) {
585 * Return if this type of quotas is turned off while we
586 * didn't have the quota inode lock.
588 xfs_iunlock(quotip
, XFS_ILOCK_SHARED
);
593 * Find the block map; no allocations yet
595 error
= xfs_bmapi_read(quotip
, dqp
->q_fileoffset
,
596 XFS_DQUOT_CLUSTER_SIZE_FSB
, &map
, &nmaps
, 0);
598 xfs_iunlock(quotip
, XFS_ILOCK_SHARED
);
603 ASSERT(map
.br_blockcount
== 1);
606 * Offset of dquot in the (fixed sized) dquot chunk.
608 dqp
->q_bufoffset
= (id
% mp
->m_quotainfo
->qi_dqperchunk
) *
611 ASSERT(map
.br_startblock
!= DELAYSTARTBLOCK
);
612 if (map
.br_startblock
== HOLESTARTBLOCK
) {
614 * We don't allocate unless we're asked to
616 if (!(flags
& XFS_QMOPT_DQALLOC
))
620 error
= xfs_qm_dqalloc(tpp
, mp
, dqp
, quotip
,
621 dqp
->q_fileoffset
, &bp
);
626 trace_xfs_dqtobp_read(dqp
);
629 * store the blkno etc so that we don't have to do the
630 * mapping all the time
632 dqp
->q_blkno
= XFS_FSB_TO_DADDR(mp
, map
.br_startblock
);
634 error
= xfs_trans_read_buf(mp
, tp
, mp
->m_ddev_targp
,
636 mp
->m_quotainfo
->qi_dqchunklen
,
637 0, &bp
, &xfs_dquot_buf_ops
);
639 if (error
== EFSCORRUPTED
&& (flags
& XFS_QMOPT_DQREPAIR
)) {
640 xfs_dqid_t firstid
= (xfs_dqid_t
)map
.br_startoff
*
641 mp
->m_quotainfo
->qi_dqperchunk
;
643 error
= xfs_qm_dqrepair(mp
, tp
, dqp
, firstid
, &bp
);
648 return XFS_ERROR(error
);
652 ASSERT(xfs_buf_islocked(bp
));
654 *O_ddpp
= bp
->b_addr
+ dqp
->q_bufoffset
;
661 * Read in the ondisk dquot using dqtobp() then copy it to an incore version,
662 * and release the buffer immediately.
664 * If XFS_QMOPT_DQALLOC is set, allocate a dquot on disk if it needed.
668 struct xfs_mount
*mp
,
672 struct xfs_dquot
**O_dqpp
)
674 struct xfs_dquot
*dqp
;
675 struct xfs_disk_dquot
*ddqp
;
677 struct xfs_trans
*tp
= NULL
;
682 dqp
= kmem_zone_zalloc(xfs_qm_dqzone
, KM_SLEEP
);
684 dqp
->dq_flags
= type
;
685 dqp
->q_core
.d_id
= cpu_to_be32(id
);
687 INIT_LIST_HEAD(&dqp
->q_lru
);
688 mutex_init(&dqp
->q_qlock
);
689 init_waitqueue_head(&dqp
->q_pinwait
);
692 * Because we want to use a counting completion, complete
693 * the flush completion once to allow a single access to
694 * the flush completion without blocking.
696 init_completion(&dqp
->q_flush
);
697 complete(&dqp
->q_flush
);
700 * Make sure group quotas have a different lock class than user
703 if (!(type
& XFS_DQ_USER
))
704 lockdep_set_class(&dqp
->q_qlock
, &xfs_dquot_other_class
);
706 XFS_STATS_INC(xs_qm_dquot
);
708 trace_xfs_dqread(dqp
);
710 if (flags
& XFS_QMOPT_DQALLOC
) {
711 tp
= xfs_trans_alloc(mp
, XFS_TRANS_QM_DQALLOC
);
712 error
= xfs_trans_reserve(tp
, XFS_QM_DQALLOC_SPACE_RES(mp
),
713 XFS_QM_DQALLOC_LOG_RES(mp
), 0,
714 XFS_TRANS_PERM_LOG_RES
,
715 XFS_WRITE_LOG_COUNT
);
718 cancelflags
= XFS_TRANS_RELEASE_LOG_RES
;
722 * get a pointer to the on-disk dquot and the buffer containing it
723 * dqp already knows its own type (GROUP/USER).
725 error
= xfs_qm_dqtobp(&tp
, dqp
, &ddqp
, &bp
, flags
);
728 * This can happen if quotas got turned off (ESRCH),
729 * or if the dquot didn't exist on disk and we ask to
732 trace_xfs_dqread_fail(dqp
);
733 cancelflags
|= XFS_TRANS_ABORT
;
737 /* copy everything from disk dquot to the incore dquot */
738 memcpy(&dqp
->q_core
, ddqp
, sizeof(xfs_disk_dquot_t
));
739 xfs_qm_dquot_logitem_init(dqp
);
742 * Reservation counters are defined as reservation plus current usage
743 * to avoid having to add every time.
745 dqp
->q_res_bcount
= be64_to_cpu(ddqp
->d_bcount
);
746 dqp
->q_res_icount
= be64_to_cpu(ddqp
->d_icount
);
747 dqp
->q_res_rtbcount
= be64_to_cpu(ddqp
->d_rtbcount
);
749 /* initialize the dquot speculative prealloc thresholds */
750 xfs_dquot_set_prealloc_limits(dqp
);
752 /* Mark the buf so that this will stay incore a little longer */
753 xfs_buf_set_ref(bp
, XFS_DQUOT_REF
);
756 * We got the buffer with a xfs_trans_read_buf() (in dqtobp())
757 * So we need to release with xfs_trans_brelse().
758 * The strategy here is identical to that of inodes; we lock
759 * the dquot in xfs_qm_dqget() before making it accessible to
760 * others. This is because dquots, like inodes, need a good level of
761 * concurrency, and we don't want to take locks on the entire buffers
762 * for dquot accesses.
763 * Note also that the dquot buffer may even be dirty at this point, if
764 * this particular dquot was repaired. We still aren't afraid to
765 * brelse it because we have the changes incore.
767 ASSERT(xfs_buf_islocked(bp
));
768 xfs_trans_brelse(tp
, bp
);
771 error
= xfs_trans_commit(tp
, XFS_TRANS_RELEASE_LOG_RES
);
781 xfs_trans_cancel(tp
, cancelflags
);
783 xfs_qm_dqdestroy(dqp
);
789 * Given the file system, inode OR id, and type (UDQUOT/GDQUOT), return a
790 * a locked dquot, doing an allocation (if requested) as needed.
791 * When both an inode and an id are given, the inode's id takes precedence.
792 * That is, if the id changes while we don't hold the ilock inside this
793 * function, the new dquot is returned, not necessarily the one requested
794 * in the id argument.
799 xfs_inode_t
*ip
, /* locked inode (optional) */
800 xfs_dqid_t id
, /* uid/projid/gid depending on type */
801 uint type
, /* XFS_DQ_USER/XFS_DQ_PROJ/XFS_DQ_GROUP */
802 uint flags
, /* DQALLOC, DQSUSER, DQREPAIR, DOWARN */
803 xfs_dquot_t
**O_dqpp
) /* OUT : locked incore dquot */
805 struct xfs_quotainfo
*qi
= mp
->m_quotainfo
;
806 struct radix_tree_root
*tree
= XFS_DQUOT_TREE(qi
, type
);
807 struct xfs_dquot
*dqp
;
810 ASSERT(XFS_IS_QUOTA_RUNNING(mp
));
811 if ((! XFS_IS_UQUOTA_ON(mp
) && type
== XFS_DQ_USER
) ||
812 (! XFS_IS_PQUOTA_ON(mp
) && type
== XFS_DQ_PROJ
) ||
813 (! XFS_IS_GQUOTA_ON(mp
) && type
== XFS_DQ_GROUP
)) {
818 if (xfs_do_dqerror
) {
819 if ((xfs_dqerror_target
== mp
->m_ddev_targp
) &&
820 (xfs_dqreq_num
++ % xfs_dqerror_mod
) == 0) {
821 xfs_debug(mp
, "Returning error in dqget");
826 ASSERT(type
== XFS_DQ_USER
||
827 type
== XFS_DQ_PROJ
||
828 type
== XFS_DQ_GROUP
);
830 ASSERT(xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
831 ASSERT(xfs_inode_dquot(ip
, type
) == NULL
);
836 mutex_lock(&qi
->qi_tree_lock
);
837 dqp
= radix_tree_lookup(tree
, id
);
840 if (dqp
->dq_flags
& XFS_DQ_FREEING
) {
842 mutex_unlock(&qi
->qi_tree_lock
);
843 trace_xfs_dqget_freeing(dqp
);
849 mutex_unlock(&qi
->qi_tree_lock
);
851 trace_xfs_dqget_hit(dqp
);
852 XFS_STATS_INC(xs_qm_dqcachehits
);
856 mutex_unlock(&qi
->qi_tree_lock
);
857 XFS_STATS_INC(xs_qm_dqcachemisses
);
860 * Dquot cache miss. We don't want to keep the inode lock across
861 * a (potential) disk read. Also we don't want to deal with the lock
862 * ordering between quotainode and this inode. OTOH, dropping the inode
863 * lock here means dealing with a chown that can happen before
864 * we re-acquire the lock.
867 xfs_iunlock(ip
, XFS_ILOCK_EXCL
);
869 error
= xfs_qm_dqread(mp
, id
, type
, flags
, &dqp
);
872 xfs_ilock(ip
, XFS_ILOCK_EXCL
);
879 * A dquot could be attached to this inode by now, since
880 * we had dropped the ilock.
882 if (xfs_this_quota_on(mp
, type
)) {
883 struct xfs_dquot
*dqp1
;
885 dqp1
= xfs_inode_dquot(ip
, type
);
887 xfs_qm_dqdestroy(dqp
);
893 /* inode stays locked on return */
894 xfs_qm_dqdestroy(dqp
);
895 return XFS_ERROR(ESRCH
);
899 mutex_lock(&qi
->qi_tree_lock
);
900 error
= -radix_tree_insert(tree
, id
, dqp
);
901 if (unlikely(error
)) {
902 WARN_ON(error
!= EEXIST
);
905 * Duplicate found. Just throw away the new dquot and start
908 mutex_unlock(&qi
->qi_tree_lock
);
909 trace_xfs_dqget_dup(dqp
);
910 xfs_qm_dqdestroy(dqp
);
911 XFS_STATS_INC(xs_qm_dquot_dups
);
916 * We return a locked dquot to the caller, with a reference taken
922 mutex_unlock(&qi
->qi_tree_lock
);
925 ASSERT((ip
== NULL
) || xfs_isilocked(ip
, XFS_ILOCK_EXCL
));
926 trace_xfs_dqget_miss(dqp
);
934 struct xfs_dquot
*dqp
)
936 struct xfs_quotainfo
*qi
= dqp
->q_mount
->m_quotainfo
;
937 struct xfs_dquot
*gdqp
;
939 trace_xfs_dqput_free(dqp
);
941 mutex_lock(&qi
->qi_lru_lock
);
942 if (list_empty(&dqp
->q_lru
)) {
943 list_add_tail(&dqp
->q_lru
, &qi
->qi_lru_list
);
945 XFS_STATS_INC(xs_qm_dquot_unused
);
947 mutex_unlock(&qi
->qi_lru_lock
);
950 * If we just added a udquot to the freelist, then we want to release
951 * the gdquot reference that it (probably) has. Otherwise it'll keep
952 * the gdquot from getting reclaimed.
954 gdqp
= dqp
->q_gdquot
;
957 dqp
->q_gdquot
= NULL
;
962 * If we had a group quota hint, release it now.
969 * Release a reference to the dquot (decrement ref-count) and unlock it.
971 * If there is a group quota attached to this dquot, carefully release that
972 * too without tripping over deadlocks'n'stuff.
976 struct xfs_dquot
*dqp
)
978 ASSERT(dqp
->q_nrefs
> 0);
979 ASSERT(XFS_DQ_IS_LOCKED(dqp
));
981 trace_xfs_dqput(dqp
);
983 if (--dqp
->q_nrefs
> 0)
986 xfs_qm_dqput_final(dqp
);
990 * Release a dquot. Flush it if dirty, then dqput() it.
991 * dquot must not be locked.
1000 trace_xfs_dqrele(dqp
);
1004 * We don't care to flush it if the dquot is dirty here.
1005 * That will create stutters that we want to avoid.
1006 * Instead we do a delayed write when we try to reclaim
1007 * a dirty dquot. Also xfs_sync will take part of the burden...
1013 * This is the dquot flushing I/O completion routine. It is called
1014 * from interrupt level when the buffer containing the dquot is
1015 * flushed to disk. It is responsible for removing the dquot logitem
1016 * from the AIL if it has not been re-logged, and unlocking the dquot's
1017 * flush lock. This behavior is very similar to that of inodes..
1020 xfs_qm_dqflush_done(
1022 struct xfs_log_item
*lip
)
1024 xfs_dq_logitem_t
*qip
= (struct xfs_dq_logitem
*)lip
;
1025 xfs_dquot_t
*dqp
= qip
->qli_dquot
;
1026 struct xfs_ail
*ailp
= lip
->li_ailp
;
1029 * We only want to pull the item from the AIL if its
1030 * location in the log has not changed since we started the flush.
1031 * Thus, we only bother if the dquot's lsn has
1032 * not changed. First we check the lsn outside the lock
1033 * since it's cheaper, and then we recheck while
1034 * holding the lock before removing the dquot from the AIL.
1036 if ((lip
->li_flags
& XFS_LI_IN_AIL
) &&
1037 lip
->li_lsn
== qip
->qli_flush_lsn
) {
1039 /* xfs_trans_ail_delete() drops the AIL lock. */
1040 spin_lock(&ailp
->xa_lock
);
1041 if (lip
->li_lsn
== qip
->qli_flush_lsn
)
1042 xfs_trans_ail_delete(ailp
, lip
, SHUTDOWN_CORRUPT_INCORE
);
1044 spin_unlock(&ailp
->xa_lock
);
1048 * Release the dq's flush lock since we're done with it.
1054 * Write a modified dquot to disk.
1055 * The dquot must be locked and the flush lock too taken by caller.
1056 * The flush lock will not be unlocked until the dquot reaches the disk,
1057 * but the dquot is free to be unlocked and modified by the caller
1058 * in the interim. Dquot is still locked on return. This behavior is
1059 * identical to that of inodes.
1063 struct xfs_dquot
*dqp
,
1064 struct xfs_buf
**bpp
)
1066 struct xfs_mount
*mp
= dqp
->q_mount
;
1068 struct xfs_disk_dquot
*ddqp
;
1071 ASSERT(XFS_DQ_IS_LOCKED(dqp
));
1072 ASSERT(!completion_done(&dqp
->q_flush
));
1074 trace_xfs_dqflush(dqp
);
1078 xfs_qm_dqunpin_wait(dqp
);
1081 * This may have been unpinned because the filesystem is shutting
1082 * down forcibly. If that's the case we must not write this dquot
1083 * to disk, because the log record didn't make it to disk.
1085 * We also have to remove the log item from the AIL in this case,
1086 * as we wait for an emptry AIL as part of the unmount process.
1088 if (XFS_FORCED_SHUTDOWN(mp
)) {
1089 struct xfs_log_item
*lip
= &dqp
->q_logitem
.qli_item
;
1090 dqp
->dq_flags
&= ~XFS_DQ_DIRTY
;
1092 spin_lock(&mp
->m_ail
->xa_lock
);
1093 if (lip
->li_flags
& XFS_LI_IN_AIL
)
1094 xfs_trans_ail_delete(mp
->m_ail
, lip
,
1095 SHUTDOWN_CORRUPT_INCORE
);
1097 spin_unlock(&mp
->m_ail
->xa_lock
);
1098 error
= XFS_ERROR(EIO
);
1103 * Get the buffer containing the on-disk dquot
1105 error
= xfs_trans_read_buf(mp
, NULL
, mp
->m_ddev_targp
, dqp
->q_blkno
,
1106 mp
->m_quotainfo
->qi_dqchunklen
, 0, &bp
,
1107 &xfs_dquot_buf_ops
);
1112 * Calculate the location of the dquot inside the buffer.
1114 ddqp
= bp
->b_addr
+ dqp
->q_bufoffset
;
1117 * A simple sanity check in case we got a corrupted dquot..
1119 error
= xfs_qm_dqcheck(mp
, &dqp
->q_core
, be32_to_cpu(ddqp
->d_id
), 0,
1120 XFS_QMOPT_DOWARN
, "dqflush (incore copy)");
1124 xfs_force_shutdown(mp
, SHUTDOWN_CORRUPT_INCORE
);
1125 return XFS_ERROR(EIO
);
1128 /* This is the only portion of data that needs to persist */
1129 memcpy(ddqp
, &dqp
->q_core
, sizeof(xfs_disk_dquot_t
));
1132 * Clear the dirty field and remember the flush lsn for later use.
1134 dqp
->dq_flags
&= ~XFS_DQ_DIRTY
;
1136 xfs_trans_ail_copy_lsn(mp
->m_ail
, &dqp
->q_logitem
.qli_flush_lsn
,
1137 &dqp
->q_logitem
.qli_item
.li_lsn
);
1140 * copy the lsn into the on-disk dquot now while we have the in memory
1141 * dquot here. This can't be done later in the write verifier as we
1142 * can't get access to the log item at that point in time.
1144 * We also calculate the CRC here so that the on-disk dquot in the
1145 * buffer always has a valid CRC. This ensures there is no possibility
1146 * of a dquot without an up-to-date CRC getting to disk.
1148 if (xfs_sb_version_hascrc(&mp
->m_sb
)) {
1149 struct xfs_dqblk
*dqb
= (struct xfs_dqblk
*)ddqp
;
1151 dqb
->dd_lsn
= cpu_to_be64(dqp
->q_logitem
.qli_item
.li_lsn
);
1152 xfs_update_cksum((char *)dqb
, sizeof(struct xfs_dqblk
),
1157 * Attach an iodone routine so that we can remove this dquot from the
1158 * AIL and release the flush lock once the dquot is synced to disk.
1160 xfs_buf_attach_iodone(bp
, xfs_qm_dqflush_done
,
1161 &dqp
->q_logitem
.qli_item
);
1164 * If the buffer is pinned then push on the log so we won't
1165 * get stuck waiting in the write for too long.
1167 if (xfs_buf_ispinned(bp
)) {
1168 trace_xfs_dqflush_force(dqp
);
1169 xfs_log_force(mp
, 0);
1172 trace_xfs_dqflush_done(dqp
);
1178 return XFS_ERROR(EIO
);
1182 * Lock two xfs_dquot structures.
1184 * To avoid deadlocks we always lock the quota structure with
1185 * the lowerd id first.
1194 if (be32_to_cpu(d1
->q_core
.d_id
) >
1195 be32_to_cpu(d2
->q_core
.d_id
)) {
1196 mutex_lock(&d2
->q_qlock
);
1197 mutex_lock_nested(&d1
->q_qlock
, XFS_QLOCK_NESTED
);
1199 mutex_lock(&d1
->q_qlock
);
1200 mutex_lock_nested(&d2
->q_qlock
, XFS_QLOCK_NESTED
);
1203 mutex_lock(&d1
->q_qlock
);
1205 mutex_lock(&d2
->q_qlock
);
1213 kmem_zone_init(sizeof(struct xfs_dquot
), "xfs_dquot");
1218 kmem_zone_init(sizeof(struct xfs_dquot_acct
), "xfs_dqtrx");
1219 if (!xfs_qm_dqtrxzone
)
1220 goto out_free_dqzone
;
1225 kmem_zone_destroy(xfs_qm_dqzone
);
1233 kmem_zone_destroy(xfs_qm_dqtrxzone
);
1234 kmem_zone_destroy(xfs_qm_dqzone
);