{
struct xfs_quotainfo *qi = dqp->q_mount->m_quotainfo;
struct xfs_dquot *gdqp;
+ struct xfs_dquot *pdqp;
trace_xfs_dqput_free(dqp);
/*
* If we just added a udquot to the freelist, then we want to release
- * the gdquot reference that it (probably) has. Otherwise it'll keep
- * the gdquot from getting reclaimed.
+ * the gdquot/pdquot reference that it (probably) has. Otherwise it'll
+ * keep the gdquot/pdquot from getting reclaimed.
*/
gdqp = dqp->q_gdquot;
if (gdqp) {
xfs_dqlock(gdqp);
dqp->q_gdquot = NULL;
}
+
+ pdqp = dqp->q_pdquot;
+ if (pdqp) {
+ xfs_dqlock(pdqp);
+ dqp->q_pdquot = NULL;
+ }
xfs_dqunlock(dqp);
/*
- * If we had a group quota hint, release it now.
+ * If we had a group/project quota hint, release it now.
*/
if (gdqp)
xfs_qm_dqput(gdqp);
+ if (pdqp)
+ xfs_qm_dqput(pdqp);
}
/*
xfs_fileoff_t q_fileoffset; /* offset in quotas file */
struct xfs_dquot*q_gdquot; /* group dquot, hint only */
+ struct xfs_dquot*q_pdquot; /* project dquot, hint only */
xfs_disk_dquot_t q_core; /* actual usage & quotas */
xfs_dq_logitem_t q_logitem; /* dquot log item */
xfs_qcnt_t q_res_bcount; /* total regular nblks used+reserved */
case XFS_DQ_USER:
return XFS_IS_UQUOTA_ON(mp);
case XFS_DQ_GROUP:
+ return XFS_IS_GQUOTA_ON(mp);
case XFS_DQ_PROJ:
- return XFS_IS_OQUOTA_ON(mp);
+ return XFS_IS_PQUOTA_ON(mp);
default:
return 0;
}
case XFS_DQ_USER:
return ip->i_udquot;
case XFS_DQ_GROUP:
- case XFS_DQ_PROJ:
return ip->i_gdquot;
+ case XFS_DQ_PROJ:
+ return ip->i_pdquot;
default:
return NULL;
}
iflags |= XFS_IDONTCACHE;
ip->i_udquot = NULL;
ip->i_gdquot = NULL;
+ ip->i_pdquot = NULL;
xfs_iflags_set(ip, iflags);
/* insert the new inode */
struct xfs_mount *i_mount; /* fs mount struct ptr */
struct xfs_dquot *i_udquot; /* user dquot */
struct xfs_dquot *i_gdquot; /* group dquot */
+ struct xfs_dquot *i_pdquot; /* project dquot */
/* Inode location stuff */
xfs_ino_t i_ino; /* inode number (agno/agino)*/
struct xfs_trans *tp;
unsigned int lock_flags = 0;
struct xfs_dquot *udqp = NULL;
- struct xfs_dquot *gdqp = NULL;
+ struct xfs_dquot *pdqp = NULL;
struct xfs_dquot *olddquot = NULL;
int code;
if (XFS_IS_QUOTA_ON(mp) && (mask & FSX_PROJID)) {
code = xfs_qm_vop_dqalloc(ip, ip->i_d.di_uid,
ip->i_d.di_gid, fa->fsx_projid,
- XFS_QMOPT_PQUOTA, &udqp, &gdqp);
+ XFS_QMOPT_PQUOTA, &udqp, NULL, &pdqp);
if (code)
return code;
}
XFS_IS_PQUOTA_ON(mp) &&
xfs_get_projid(ip) != fa->fsx_projid) {
ASSERT(tp);
- code = xfs_qm_vop_chown_reserve(tp, ip, udqp, gdqp,
- capable(CAP_FOWNER) ?
+ code = xfs_qm_vop_chown_reserve(tp, ip, udqp, NULL,
+ pdqp, capable(CAP_FOWNER) ?
XFS_QMOPT_FORCE_RES : 0);
if (code) /* out of quota */
goto error_return;
if (xfs_get_projid(ip) != fa->fsx_projid) {
if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_PQUOTA_ON(mp)) {
olddquot = xfs_qm_vop_chown(tp, ip,
- &ip->i_gdquot, gdqp);
+ &ip->i_pdquot, pdqp);
}
xfs_set_projid(ip, fa->fsx_projid);
*/
xfs_qm_dqrele(olddquot);
xfs_qm_dqrele(udqp);
- xfs_qm_dqrele(gdqp);
+ xfs_qm_dqrele(pdqp);
return code;
error_return:
xfs_qm_dqrele(udqp);
- xfs_qm_dqrele(gdqp);
+ xfs_qm_dqrele(pdqp);
xfs_trans_cancel(tp, 0);
if (lock_flags)
xfs_iunlock(ip, lock_flags);
ASSERT(udqp == NULL);
ASSERT(gdqp == NULL);
error = xfs_qm_vop_dqalloc(ip, uid, gid, xfs_get_projid(ip),
- qflags, &udqp, &gdqp);
+ qflags, &udqp, &gdqp, NULL);
if (error)
return error;
}
(XFS_IS_GQUOTA_ON(mp) && igid != gid))) {
ASSERT(tp);
error = xfs_qm_vop_chown_reserve(tp, ip, udqp, gdqp,
- capable(CAP_FOWNER) ?
+ NULL, capable(CAP_FOWNER) ?
XFS_QMOPT_FORCE_RES : 0);
if (error) /* out of quota */
goto out_trans_cancel;
struct xfs_mount *mp = dqp->q_mount;
struct xfs_quotainfo *qi = mp->m_quotainfo;
struct xfs_dquot *gdqp = NULL;
+ struct xfs_dquot *pdqp = NULL;
xfs_dqlock(dqp);
if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0) {
}
/*
- * If this quota has a group hint attached, prepare for releasing it
- * now.
+ * If this quota has a hint attached, prepare for releasing it now.
*/
gdqp = dqp->q_gdquot;
if (gdqp) {
dqp->q_gdquot = NULL;
}
+ pdqp = dqp->q_pdquot;
+ if (pdqp) {
+ xfs_dqlock(pdqp);
+ dqp->q_pdquot = NULL;
+ }
+
dqp->dq_flags |= XFS_DQ_FREEING;
xfs_dqflock(dqp);
if (gdqp)
xfs_qm_dqput(gdqp);
+ if (pdqp)
+ xfs_qm_dqput(pdqp);
return 0;
}
IRELE(mp->m_quotainfo->qi_gquotaip);
mp->m_quotainfo->qi_gquotaip = NULL;
}
+ if (mp->m_quotainfo->qi_pquotaip) {
+ IRELE(mp->m_quotainfo->qi_pquotaip);
+ mp->m_quotainfo->qi_pquotaip = NULL;
+ }
}
}
* be reclaimed as long as we have a ref from inode and we
* hold the ilock.
*/
- dqp = udqhint->q_gdquot;
+ if (type == XFS_DQ_GROUP)
+ dqp = udqhint->q_gdquot;
+ else
+ dqp = udqhint->q_pdquot;
if (dqp && be32_to_cpu(dqp->q_core.d_id) == id) {
ASSERT(*IO_idqpp == NULL);
/*
- * Given a udquot and gdquot, attach a ptr to the group dquot in the
- * udquot as a hint for future lookups.
+ * Given a udquot and group/project type, attach the group/project
+ * dquot pointer to the udquot as a hint for future lookups.
*/
STATIC void
-xfs_qm_dqattach_grouphint(
- xfs_dquot_t *udq,
- xfs_dquot_t *gdq)
+xfs_qm_dqattach_hint(
+ struct xfs_inode *ip,
+ int type)
{
- xfs_dquot_t *tmp;
+ struct xfs_dquot **dqhintp;
+ struct xfs_dquot *dqp;
+ struct xfs_dquot *udq = ip->i_udquot;
+
+ ASSERT(type == XFS_DQ_GROUP || type == XFS_DQ_PROJ);
xfs_dqlock(udq);
- tmp = udq->q_gdquot;
- if (tmp) {
- if (tmp == gdq)
+ if (type == XFS_DQ_GROUP) {
+ dqp = ip->i_gdquot;
+ dqhintp = &udq->q_gdquot;
+ } else {
+ dqp = ip->i_pdquot;
+ dqhintp = &udq->q_pdquot;
+ }
+
+ if (*dqhintp) {
+ struct xfs_dquot *tmp;
+
+ if (*dqhintp == dqp)
goto done;
- udq->q_gdquot = NULL;
+ tmp = *dqhintp;
+ *dqhintp = NULL;
xfs_qm_dqrele(tmp);
}
- udq->q_gdquot = xfs_qm_dqhold(gdq);
+ *dqhintp = xfs_qm_dqhold(dqp);
done:
xfs_dqunlock(udq);
}
}
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
- if (XFS_IS_OQUOTA_ON(mp)) {
- error = XFS_IS_GQUOTA_ON(mp) ?
- xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP,
- flags & XFS_QMOPT_DQALLOC,
- ip->i_udquot, &ip->i_gdquot) :
- xfs_qm_dqattach_one(ip, xfs_get_projid(ip), XFS_DQ_PROJ,
+ if (XFS_IS_GQUOTA_ON(mp)) {
+ error = xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP,
flags & XFS_QMOPT_DQALLOC,
ip->i_udquot, &ip->i_gdquot);
/*
nquotas++;
}
+ ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
+ if (XFS_IS_PQUOTA_ON(mp)) {
+ error = xfs_qm_dqattach_one(ip, xfs_get_projid(ip), XFS_DQ_PROJ,
+ flags & XFS_QMOPT_DQALLOC,
+ ip->i_udquot, &ip->i_pdquot);
+ /*
+ * Don't worry about the udquot that we may have
+ * attached above. It'll get detached, if not already.
+ */
+ if (error)
+ goto done;
+ nquotas++;
+ }
+
/*
- * Attach this group quota to the user quota as a hint.
+ * Attach this group/project quota to the user quota as a hint.
* This WON'T, in general, result in a thrash.
*/
- if (nquotas == 2) {
+ if (nquotas > 1 && ip->i_udquot) {
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
- ASSERT(ip->i_udquot);
- ASSERT(ip->i_gdquot);
+ ASSERT(ip->i_gdquot || !XFS_IS_GQUOTA_ON(mp));
+ ASSERT(ip->i_pdquot || !XFS_IS_PQUOTA_ON(mp));
/*
* We do not have i_udquot locked at this point, but this check
* succeed in general.
*/
if (ip->i_udquot->q_gdquot != ip->i_gdquot)
- xfs_qm_dqattach_grouphint(ip->i_udquot, ip->i_gdquot);
+ xfs_qm_dqattach_hint(ip, XFS_DQ_GROUP);
+
+ if (ip->i_udquot->q_pdquot != ip->i_pdquot)
+ xfs_qm_dqattach_hint(ip, XFS_DQ_PROJ);
}
done:
if (!error) {
if (XFS_IS_UQUOTA_ON(mp))
ASSERT(ip->i_udquot);
- if (XFS_IS_OQUOTA_ON(mp))
+ if (XFS_IS_GQUOTA_ON(mp))
ASSERT(ip->i_gdquot);
+ if (XFS_IS_PQUOTA_ON(mp))
+ ASSERT(ip->i_pdquot);
}
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
#endif
xfs_qm_dqdetach(
xfs_inode_t *ip)
{
- if (!(ip->i_udquot || ip->i_gdquot))
+ if (!(ip->i_udquot || ip->i_gdquot || ip->i_pdquot))
return;
trace_xfs_dquot_dqdetach(ip);
xfs_qm_dqrele(ip->i_gdquot);
ip->i_gdquot = NULL;
}
+ if (ip->i_pdquot) {
+ xfs_qm_dqrele(ip->i_pdquot);
+ ip->i_pdquot = NULL;
+ }
}
int
INIT_RADIX_TREE(&qinf->qi_uquota_tree, GFP_NOFS);
INIT_RADIX_TREE(&qinf->qi_gquota_tree, GFP_NOFS);
+ INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_NOFS);
mutex_init(&qinf->qi_tree_lock);
INIT_LIST_HEAD(&qinf->qi_lru_list);
IRELE(qi->qi_gquotaip);
qi->qi_gquotaip = NULL;
}
+ if (qi->qi_pquotaip) {
+ IRELE(qi->qi_pquotaip);
+ qi->qi_pquotaip = NULL;
+ }
mutex_destroy(&qi->qi_quotaofflock);
kmem_free(qi);
mp->m_quotainfo = NULL;
LIST_HEAD (buffer_list);
struct xfs_inode *uip = mp->m_quotainfo->qi_uquotaip;
struct xfs_inode *gip = mp->m_quotainfo->qi_gquotaip;
+ struct xfs_inode *pip = mp->m_quotainfo->qi_pquotaip;
count = INT_MAX;
structsz = 1;
lastino = 0;
flags = 0;
- ASSERT(uip || gip);
+ ASSERT(uip || gip || pip);
ASSERT(XFS_IS_QUOTA_RUNNING(mp));
xfs_notice(mp, "Quotacheck needed: Please wait.");
}
if (gip) {
- error = xfs_qm_dqiterate(mp, gip, XFS_IS_GQUOTA_ON(mp) ?
- XFS_QMOPT_GQUOTA : XFS_QMOPT_PQUOTA,
+ error = xfs_qm_dqiterate(mp, gip, XFS_QMOPT_GQUOTA,
&buffer_list);
if (error)
goto error_return;
- flags |= XFS_IS_GQUOTA_ON(mp) ?
- XFS_GQUOTA_CHKD : XFS_PQUOTA_CHKD;
+ flags |= XFS_GQUOTA_CHKD;
+ }
+
+ if (pip) {
+ error = xfs_qm_dqiterate(mp, pip, XFS_QMOPT_PQUOTA,
+ &buffer_list);
+ if (error)
+ goto error_return;
+ flags |= XFS_PQUOTA_CHKD;
}
do {
{
struct xfs_inode *uip = NULL;
struct xfs_inode *gip = NULL;
+ struct xfs_inode *pip = NULL;
int error;
__int64_t sbflags = 0;
uint flags = 0;
if (error)
return XFS_ERROR(error);
}
- if (XFS_IS_OQUOTA_ON(mp) &&
+ if (XFS_IS_GQUOTA_ON(mp) &&
mp->m_sb.sb_gquotino != NULLFSINO) {
ASSERT(mp->m_sb.sb_gquotino > 0);
error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
if (error)
goto error_rele;
}
+ /* XXX: Use gquotino for now */
+ if (XFS_IS_PQUOTA_ON(mp) &&
+ mp->m_sb.sb_gquotino != NULLFSINO) {
+ ASSERT(mp->m_sb.sb_gquotino > 0);
+ error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
+ 0, 0, &pip);
+ if (error)
+ goto error_rele;
+ }
} else {
flags |= XFS_QMOPT_SBVERSION;
sbflags |= (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
}
/*
- * Create the two inodes, if they don't exist already. The changes
+ * Create the three inodes, if they don't exist already. The changes
* made above will get added to a transaction and logged in one of
* the qino_alloc calls below. If the device is readonly,
* temporarily switch to read-write to do this.
flags &= ~XFS_QMOPT_SBVERSION;
}
- if (XFS_IS_OQUOTA_ON(mp) && gip == NULL) {
- flags |= (XFS_IS_GQUOTA_ON(mp) ?
- XFS_QMOPT_GQUOTA : XFS_QMOPT_PQUOTA);
+ if (XFS_IS_GQUOTA_ON(mp) && gip == NULL) {
error = xfs_qm_qino_alloc(mp, &gip,
- sbflags | XFS_SB_GQUOTINO, flags);
+ sbflags | XFS_SB_GQUOTINO,
+ flags | XFS_QMOPT_GQUOTA);
+ if (error)
+ goto error_rele;
+
+ flags &= ~XFS_QMOPT_SBVERSION;
+ }
+ if (XFS_IS_PQUOTA_ON(mp) && pip == NULL) {
+ /* XXX: Use XFS_SB_GQUOTINO for now */
+ error = xfs_qm_qino_alloc(mp, &pip,
+ sbflags | XFS_SB_GQUOTINO,
+ flags | XFS_QMOPT_PQUOTA);
if (error)
goto error_rele;
}
mp->m_quotainfo->qi_uquotaip = uip;
mp->m_quotainfo->qi_gquotaip = gip;
+ mp->m_quotainfo->qi_pquotaip = pip;
return 0;
IRELE(uip);
if (gip)
IRELE(gip);
+ if (pip)
+ IRELE(pip);
return XFS_ERROR(error);
}
prid_t prid,
uint flags,
struct xfs_dquot **O_udqpp,
- struct xfs_dquot **O_gdqpp)
+ struct xfs_dquot **O_gdqpp,
+ struct xfs_dquot **O_pdqpp)
{
struct xfs_mount *mp = ip->i_mount;
struct xfs_dquot *uq = NULL;
struct xfs_dquot *gq = NULL;
+ struct xfs_dquot *pq = NULL;
int error;
uint lockflags;
ASSERT(ip->i_gdquot);
gq = xfs_qm_dqhold(ip->i_gdquot);
}
- } else if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
+ }
+ if ((flags & XFS_QMOPT_PQUOTA) && XFS_IS_PQUOTA_ON(mp)) {
if (xfs_get_projid(ip) != prid) {
xfs_iunlock(ip, lockflags);
error = xfs_qm_dqget(mp, NULL, (xfs_dqid_t)prid,
XFS_DQ_PROJ,
XFS_QMOPT_DQALLOC |
XFS_QMOPT_DOWARN,
- &gq);
+ &pq);
if (error) {
ASSERT(error != ENOENT);
goto error_rele;
}
- xfs_dqunlock(gq);
+ xfs_dqunlock(pq);
lockflags = XFS_ILOCK_SHARED;
xfs_ilock(ip, lockflags);
} else {
- ASSERT(ip->i_gdquot);
- gq = xfs_qm_dqhold(ip->i_gdquot);
+ ASSERT(ip->i_pdquot);
+ pq = xfs_qm_dqhold(ip->i_pdquot);
}
}
if (uq)
*O_gdqpp = gq;
else if (gq)
xfs_qm_dqrele(gq);
+ if (O_pdqpp)
+ *O_pdqpp = pq;
+ else if (pq)
+ xfs_qm_dqrele(pq);
return 0;
error_rele:
+ if (gq)
+ xfs_qm_dqrele(gq);
if (uq)
xfs_qm_dqrele(uq);
return error;
struct xfs_inode *ip,
struct xfs_dquot *udqp,
struct xfs_dquot *gdqp,
+ struct xfs_dquot *pdqp,
uint flags)
{
struct xfs_mount *mp = ip->i_mount;
uint delblks, blkflags, prjflags = 0;
struct xfs_dquot *udq_unres = NULL;
struct xfs_dquot *gdq_unres = NULL;
+ struct xfs_dquot *pdq_unres = NULL;
struct xfs_dquot *udq_delblks = NULL;
struct xfs_dquot *gdq_delblks = NULL;
+ struct xfs_dquot *pdq_delblks = NULL;
int error;
udq_unres = ip->i_udquot;
}
}
- if (XFS_IS_OQUOTA_ON(ip->i_mount) && gdqp) {
- if (XFS_IS_PQUOTA_ON(ip->i_mount) &&
- xfs_get_projid(ip) != be32_to_cpu(gdqp->q_core.d_id))
- prjflags = XFS_QMOPT_ENOSPC;
-
- if (prjflags ||
- (XFS_IS_GQUOTA_ON(ip->i_mount) &&
- ip->i_d.di_gid != be32_to_cpu(gdqp->q_core.d_id))) {
- gdq_delblks = gdqp;
- if (delblks) {
- ASSERT(ip->i_gdquot);
- gdq_unres = ip->i_gdquot;
- }
+ if (XFS_IS_GQUOTA_ON(ip->i_mount) && gdqp &&
+ ip->i_d.di_gid != be32_to_cpu(gdqp->q_core.d_id)) {
+ gdq_delblks = gdqp;
+ if (delblks) {
+ ASSERT(ip->i_gdquot);
+ gdq_unres = ip->i_gdquot;
+ }
+ }
+
+ if (XFS_IS_PQUOTA_ON(ip->i_mount) && pdqp &&
+ xfs_get_projid(ip) != be32_to_cpu(pdqp->q_core.d_id)) {
+ prjflags = XFS_QMOPT_ENOSPC;
+ pdq_delblks = pdqp;
+ if (delblks) {
+ ASSERT(ip->i_pdquot);
+ pdq_unres = ip->i_pdquot;
}
}
error = xfs_trans_reserve_quota_bydquots(tp, ip->i_mount,
- udq_delblks, gdq_delblks, ip->i_d.di_nblocks, 1,
+ udq_delblks, gdq_delblks, pdq_delblks,
+ ip->i_d.di_nblocks, 1,
flags | blkflags | prjflags);
if (error)
return error;
/*
* Do the reservations first. Unreservation can't fail.
*/
- ASSERT(udq_delblks || gdq_delblks);
- ASSERT(udq_unres || gdq_unres);
+ ASSERT(udq_delblks || gdq_delblks || pdq_delblks);
+ ASSERT(udq_unres || gdq_unres || pdq_unres);
error = xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
- udq_delblks, gdq_delblks, (xfs_qcnt_t)delblks, 0,
+ udq_delblks, gdq_delblks, pdq_delblks,
+ (xfs_qcnt_t)delblks, 0,
flags | blkflags | prjflags);
if (error)
return error;
xfs_trans_reserve_quota_bydquots(NULL, ip->i_mount,
- udq_unres, gdq_unres, -((xfs_qcnt_t)delblks), 0,
- blkflags);
+ udq_unres, gdq_unres, pdq_unres,
+ -((xfs_qcnt_t)delblks), 0, blkflags);
}
return (0);
struct xfs_trans *tp,
struct xfs_inode *ip,
struct xfs_dquot *udqp,
- struct xfs_dquot *gdqp)
+ struct xfs_dquot *gdqp,
+ struct xfs_dquot *pdqp)
{
struct xfs_mount *mp = tp->t_mountp;
}
if (gdqp) {
ASSERT(ip->i_gdquot == NULL);
- ASSERT(XFS_IS_OQUOTA_ON(mp));
- ASSERT((XFS_IS_GQUOTA_ON(mp) ?
- ip->i_d.di_gid : xfs_get_projid(ip)) ==
- be32_to_cpu(gdqp->q_core.d_id));
-
+ ASSERT(XFS_IS_GQUOTA_ON(mp));
+ ASSERT(ip->i_d.di_gid == be32_to_cpu(gdqp->q_core.d_id));
ip->i_gdquot = xfs_qm_dqhold(gdqp);
xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
}
+ if (pdqp) {
+ ASSERT(ip->i_pdquot == NULL);
+ ASSERT(XFS_IS_PQUOTA_ON(mp));
+ ASSERT(xfs_get_projid(ip) == be32_to_cpu(pdqp->q_core.d_id));
+
+ ip->i_pdquot = xfs_qm_dqhold(pdqp);
+ xfs_trans_mod_dquot(tp, pdqp, XFS_TRANS_DQ_ICOUNT, 1);
+ }
}
typedef struct xfs_quotainfo {
struct radix_tree_root qi_uquota_tree;
struct radix_tree_root qi_gquota_tree;
+ struct radix_tree_root qi_pquota_tree;
struct mutex qi_tree_lock;
- xfs_inode_t *qi_uquotaip; /* user quota inode */
- xfs_inode_t *qi_gquotaip; /* group quota inode */
+ struct xfs_inode *qi_uquotaip; /* user quota inode */
+ struct xfs_inode *qi_gquotaip; /* group quota inode */
+ struct xfs_inode *qi_pquotaip; /* project quota inode */
struct list_head qi_lru_list;
struct mutex qi_lru_lock;
int qi_lru_count;
case XFS_DQ_USER:
return &qi->qi_uquota_tree;
case XFS_DQ_GROUP:
- case XFS_DQ_PROJ:
return &qi->qi_gquota_tree;
+ case XFS_DQ_PROJ:
+ return &qi->qi_pquota_tree;
default:
ASSERT(0);
}
case XFS_DQ_USER:
return dqp->q_mount->m_quotainfo->qi_uquotaip;
case XFS_DQ_GROUP:
- case XFS_DQ_PROJ:
return dqp->q_mount->m_quotainfo->qi_gquotaip;
+ case XFS_DQ_PROJ:
+ return dqp->q_mount->m_quotainfo->qi_pquotaip;
default:
ASSERT(0);
}
struct xfs_dquot *, uint, long);
extern int xfs_trans_reserve_quota_bydquots(struct xfs_trans *,
struct xfs_mount *, struct xfs_dquot *,
- struct xfs_dquot *, long, long, uint);
+ struct xfs_dquot *, struct xfs_dquot *,
+ long, long, uint);
extern void xfs_trans_dqjoin(struct xfs_trans *, struct xfs_dquot *);
extern void xfs_trans_log_dquot(struct xfs_trans *, struct xfs_dquot *);
/*
- * We keep the usr and grp dquots separately so that locking will be easier
- * to do at commit time. All transactions that we know of at this point
+ * We keep the usr, grp, and prj dquots separately so that locking will be
+ * easier to do at commit time. All transactions that we know of at this point
* affect no more than two dquots of one type. Hence, the TRANS_MAXDQS value.
*/
enum {
XFS_QM_TRANS_USR = 0,
XFS_QM_TRANS_GRP,
+ XFS_QM_TRANS_PRJ,
XFS_QM_TRANS_DQTYPES
};
#define XFS_QM_TRANS_MAXDQS 2
if (((uquotaondisk && !XFS_IS_UQUOTA_ON(mp)) ||
(!uquotaondisk && XFS_IS_UQUOTA_ON(mp)) ||
- (pquotaondisk && !XFS_IS_PQUOTA_ON(mp)) ||
- (!pquotaondisk && XFS_IS_PQUOTA_ON(mp)) ||
(gquotaondisk && !XFS_IS_GQUOTA_ON(mp)) ||
- (!gquotaondisk && XFS_IS_OQUOTA_ON(mp))) &&
+ (!gquotaondisk && XFS_IS_GQUOTA_ON(mp)) ||
+ (pquotaondisk && !XFS_IS_PQUOTA_ON(mp)) ||
+ (!pquotaondisk && XFS_IS_PQUOTA_ON(mp))) &&
xfs_dev_is_read_only(mp, "changing quota state")) {
xfs_warn(mp, "please mount with%s%s%s%s.",
(!quotaondisk ? "out quota" : ""),
(uquotaondisk ? " usrquota" : ""),
- (pquotaondisk ? " prjquota" : ""),
- (gquotaondisk ? " grpquota" : ""));
+ (gquotaondisk ? " grpquota" : ""),
+ (pquotaondisk ? " prjquota" : ""));
return XFS_ERROR(EPERM);
}
dqtype |= XFS_QMOPT_GQUOTA;
flags |= (XFS_GQUOTA_CHKD | XFS_GQUOTA_ENFD);
inactivate_flags |= XFS_GQUOTA_ACTIVE;
- } else if (flags & XFS_PQUOTA_ACCT) {
+ }
+ if (flags & XFS_PQUOTA_ACCT) {
dqtype |= XFS_QMOPT_PQUOTA;
flags |= (XFS_PQUOTA_CHKD | XFS_PQUOTA_ENFD);
inactivate_flags |= XFS_PQUOTA_ACTIVE;
IRELE(q->qi_uquotaip);
q->qi_uquotaip = NULL;
}
- if ((dqtype & (XFS_QMOPT_GQUOTA|XFS_QMOPT_PQUOTA)) && q->qi_gquotaip) {
+ if ((dqtype & XFS_QMOPT_GQUOTA) && q->qi_gquotaip) {
IRELE(q->qi_gquotaip);
q->qi_gquotaip = NULL;
}
+ if ((dqtype & XFS_QMOPT_PQUOTA) && q->qi_pquotaip) {
+ IRELE(q->qi_pquotaip);
+ q->qi_pquotaip = NULL;
+ }
out_unlock:
mutex_unlock(&q->qi_quotaofflock);
{
/* skip quota inodes */
if (ip == ip->i_mount->m_quotainfo->qi_uquotaip ||
- ip == ip->i_mount->m_quotainfo->qi_gquotaip) {
+ ip == ip->i_mount->m_quotainfo->qi_gquotaip ||
+ ip == ip->i_mount->m_quotainfo->qi_pquotaip) {
ASSERT(ip->i_udquot == NULL);
ASSERT(ip->i_gdquot == NULL);
+ ASSERT(ip->i_pdquot == NULL);
return 0;
}
xfs_qm_dqrele(ip->i_udquot);
ip->i_udquot = NULL;
}
- if (flags & (XFS_PQUOTA_ACCT|XFS_GQUOTA_ACCT) && ip->i_gdquot) {
+ if ((flags & XFS_GQUOTA_ACCT) && ip->i_gdquot) {
xfs_qm_dqrele(ip->i_gdquot);
ip->i_gdquot = NULL;
}
+ if ((flags & XFS_PQUOTA_ACCT) && ip->i_pdquot) {
+ xfs_qm_dqrele(ip->i_pdquot);
+ ip->i_pdquot = NULL;
+ }
xfs_iunlock(ip, XFS_ILOCK_EXCL);
return 0;
}
* we didn't have the inode locked, the appropriate dquot(s) will be
* attached atomically.
*/
-#define XFS_NOT_DQATTACHED(mp, ip) ((XFS_IS_UQUOTA_ON(mp) &&\
- (ip)->i_udquot == NULL) || \
- (XFS_IS_OQUOTA_ON(mp) && \
- (ip)->i_gdquot == NULL))
+#define XFS_NOT_DQATTACHED(mp, ip) \
+ ((XFS_IS_UQUOTA_ON(mp) && (ip)->i_udquot == NULL) || \
+ (XFS_IS_GQUOTA_ON(mp) && (ip)->i_gdquot == NULL) || \
+ (XFS_IS_PQUOTA_ON(mp) && (ip)->i_pdquot == NULL))
#define XFS_QM_NEED_QUOTACHECK(mp) \
((XFS_IS_UQUOTA_ON(mp) && \
struct xfs_inode *, long, long, uint);
extern int xfs_trans_reserve_quota_bydquots(struct xfs_trans *,
struct xfs_mount *, struct xfs_dquot *,
- struct xfs_dquot *, long, long, uint);
+ struct xfs_dquot *, struct xfs_dquot *, long, long, uint);
extern int xfs_qm_vop_dqalloc(struct xfs_inode *, uid_t, gid_t, prid_t, uint,
- struct xfs_dquot **, struct xfs_dquot **);
+ struct xfs_dquot **, struct xfs_dquot **, struct xfs_dquot **);
extern void xfs_qm_vop_create_dqattach(struct xfs_trans *, struct xfs_inode *,
- struct xfs_dquot *, struct xfs_dquot *);
+ struct xfs_dquot *, struct xfs_dquot *, struct xfs_dquot *);
extern int xfs_qm_vop_rename_dqattach(struct xfs_inode **);
extern struct xfs_dquot *xfs_qm_vop_chown(struct xfs_trans *,
struct xfs_inode *, struct xfs_dquot **, struct xfs_dquot *);
extern int xfs_qm_vop_chown_reserve(struct xfs_trans *, struct xfs_inode *,
- struct xfs_dquot *, struct xfs_dquot *, uint);
+ struct xfs_dquot *, struct xfs_dquot *,
+ struct xfs_dquot *, uint);
extern int xfs_qm_dqattach(struct xfs_inode *, uint);
extern int xfs_qm_dqattach_locked(struct xfs_inode *, uint);
extern void xfs_qm_dqdetach(struct xfs_inode *);
#else
static inline int
xfs_qm_vop_dqalloc(struct xfs_inode *ip, uid_t uid, gid_t gid, prid_t prid,
- uint flags, struct xfs_dquot **udqp, struct xfs_dquot **gdqp)
+ uint flags, struct xfs_dquot **udqp, struct xfs_dquot **gdqp,
+ struct xfs_dquot **pdqp)
{
*udqp = NULL;
*gdqp = NULL;
+ *pdqp = NULL;
return 0;
}
#define xfs_trans_dup_dqinfo(tp, tp2)
}
static inline int xfs_trans_reserve_quota_bydquots(struct xfs_trans *tp,
struct xfs_mount *mp, struct xfs_dquot *udqp,
- struct xfs_dquot *gdqp, long nblks, long nions, uint flags)
+ struct xfs_dquot *gdqp, struct xfs_dquot *pdqp,
+ long nblks, long nions, uint flags)
{
return 0;
}
-#define xfs_qm_vop_create_dqattach(tp, ip, u, g)
+#define xfs_qm_vop_create_dqattach(tp, ip, u, g, p)
#define xfs_qm_vop_rename_dqattach(it) (0)
#define xfs_qm_vop_chown(tp, ip, old, new) (NULL)
-#define xfs_qm_vop_chown_reserve(tp, ip, u, g, fl) (0)
+#define xfs_qm_vop_chown_reserve(tp, ip, u, g, p, fl) (0)
#define xfs_qm_dqattach(ip, fl) (0)
#define xfs_qm_dqattach_locked(ip, fl) (0)
#define xfs_qm_dqdetach(ip)
#define xfs_trans_unreserve_quota_nblks(tp, ip, nblks, ninos, flags) \
xfs_trans_reserve_quota_nblks(tp, ip, -(nblks), -(ninos), flags)
-#define xfs_trans_reserve_quota(tp, mp, ud, gd, nb, ni, f) \
- xfs_trans_reserve_quota_bydquots(tp, mp, ud, gd, nb, ni, \
+#define xfs_trans_reserve_quota(tp, mp, ud, gd, pd, nb, ni, f) \
+ xfs_trans_reserve_quota_bydquots(tp, mp, ud, gd, pd, nb, ni, \
f | XFS_QMOPT_RES_REGBLKS)
extern int xfs_qm_dqcheck(struct xfs_mount *, xfs_disk_dquot_t *,
prid_t prid;
struct xfs_dquot *udqp = NULL;
struct xfs_dquot *gdqp = NULL;
+ struct xfs_dquot *pdqp = NULL;
uint resblks;
*ipp = NULL;
* Make sure that we have allocated dquot(s) on disk.
*/
error = xfs_qm_vop_dqalloc(dp, current_fsuid(), current_fsgid(), prid,
- XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, &udqp, &gdqp);
+ XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, &udqp, &gdqp, &pdqp);
if (error)
goto std_return;
/*
* Reserve disk quota : blocks and inode.
*/
- error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp, resblks, 1, 0);
+ error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp,
+ pdqp, resblks, 1, 0);
if (error)
goto error_return;
/*
* Also attach the dquot(s) to it, if applicable.
*/
- xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp);
+ xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
if (resblks)
resblks -= XFS_IALLOC_SPACE_RES(mp);
error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
xfs_qm_dqrele(udqp);
xfs_qm_dqrele(gdqp);
+ xfs_qm_dqrele(pdqp);
*ipp = ip;
return 0;
xfs_trans_cancel(tp, cancel_flags);
xfs_qm_dqrele(udqp);
xfs_qm_dqrele(gdqp);
+ xfs_qm_dqrele(pdqp);
if (unlock_dp_on_error)
xfs_iunlock(dp, XFS_ILOCK_EXCL);
if (XFS_IS_UQUOTA_ON(mp) && ip->i_udquot)
(void) xfs_trans_mod_dquot(tp, ip->i_udquot, field, delta);
- if (XFS_IS_OQUOTA_ON(mp) && ip->i_gdquot)
+ if (XFS_IS_GQUOTA_ON(mp) && ip->i_gdquot)
(void) xfs_trans_mod_dquot(tp, ip->i_gdquot, field, delta);
+ if (XFS_IS_PQUOTA_ON(mp) && ip->i_pdquot)
+ (void) xfs_trans_mod_dquot(tp, ip->i_pdquot, field, delta);
}
STATIC struct xfs_dqtrx *
if (XFS_QM_ISUDQ(dqp))
qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_USR];
- else
+ else if (XFS_QM_ISGDQ(dqp))
qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_GRP];
+ else if (XFS_QM_ISPDQ(dqp))
+ qa = tp->t_dqinfo->dqs[XFS_QM_TRANS_PRJ];
+ else
+ return NULL;
for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) {
if (qa[i].qt_dquot == NULL ||
/*
* Given dquot(s), make disk block and/or inode reservations against them.
- * The fact that this does the reservation against both the usr and
- * grp/prj quotas is important, because this follows a both-or-nothing
+ * The fact that this does the reservation against user, group and
+ * project quotas is important, because this follows a all-or-nothing
* approach.
*
* flags = XFS_QMOPT_FORCE_RES evades limit enforcement. Used by chown.
struct xfs_mount *mp,
struct xfs_dquot *udqp,
struct xfs_dquot *gdqp,
+ struct xfs_dquot *pdqp,
long nblks,
long ninos,
uint flags)
goto unwind_usr;
}
+ if (pdqp) {
+ error = xfs_trans_dqresv(tp, mp, pdqp, nblks, ninos, flags);
+ if (error)
+ goto unwind_grp;
+ }
+
/*
* Didn't change anything critical, so, no need to log
*/
return 0;
+unwind_grp:
+ flags |= XFS_QMOPT_FORCE_RES;
+ if (gdqp)
+ xfs_trans_dqresv(tp, mp, gdqp, -nblks, -ninos, flags);
unwind_usr:
flags |= XFS_QMOPT_FORCE_RES;
if (udqp)
*/
return xfs_trans_reserve_quota_bydquots(tp, mp,
ip->i_udquot, ip->i_gdquot,
+ ip->i_pdquot,
nblks, ninos, flags);
}
prid_t prid;
struct xfs_dquot *udqp = NULL;
struct xfs_dquot *gdqp = NULL;
+ struct xfs_dquot *pdqp = NULL;
uint resblks;
uint log_res;
uint log_count;
* Make sure that we have allocated dquot(s) on disk.
*/
error = xfs_qm_vop_dqalloc(dp, current_fsuid(), current_fsgid(), prid,
- XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, &udqp, &gdqp);
+ XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
+ &udqp, &gdqp, &pdqp);
if (error)
return error;
/*
* Reserve disk quota and the inode.
*/
- error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp, resblks, 1, 0);
+ error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp,
+ pdqp, resblks, 1, 0);
if (error)
goto out_trans_cancel;
* These ids of the inode couldn't have changed since the new
* inode has been locked ever since it was created.
*/
- xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp);
+ xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);
error = xfs_bmap_finish(&tp, &free_list, &committed);
if (error)
xfs_qm_dqrele(udqp);
xfs_qm_dqrele(gdqp);
+ xfs_qm_dqrele(pdqp);
*ipp = ip;
return 0;
xfs_qm_dqrele(udqp);
xfs_qm_dqrele(gdqp);
+ xfs_qm_dqrele(pdqp);
if (unlock_dp_on_error)
xfs_iunlock(dp, XFS_ILOCK_EXCL);
}
xfs_ilock(ip, XFS_ILOCK_EXCL);
error = xfs_trans_reserve_quota(tp, mp,
- ip->i_udquot, ip->i_gdquot,
+ ip->i_udquot, ip->i_gdquot, ip->i_pdquot,
resblks, 0, XFS_QMOPT_RES_REGBLKS);
if (error)
goto error1;