Merge tag 'boards-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/arm...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / fs / xfs / xfs_qm_syscalls.c
1 /*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18
19 #include <linux/capability.h>
20
21 #include "xfs.h"
22 #include "xfs_fs.h"
23 #include "xfs_bit.h"
24 #include "xfs_log.h"
25 #include "xfs_trans.h"
26 #include "xfs_sb.h"
27 #include "xfs_ag.h"
28 #include "xfs_alloc.h"
29 #include "xfs_quota.h"
30 #include "xfs_mount.h"
31 #include "xfs_bmap_btree.h"
32 #include "xfs_inode.h"
33 #include "xfs_inode_item.h"
34 #include "xfs_itable.h"
35 #include "xfs_bmap.h"
36 #include "xfs_rtalloc.h"
37 #include "xfs_error.h"
38 #include "xfs_attr.h"
39 #include "xfs_buf_item.h"
40 #include "xfs_utils.h"
41 #include "xfs_qm.h"
42 #include "xfs_trace.h"
43 #include "xfs_icache.h"
44
45 STATIC int xfs_qm_log_quotaoff(xfs_mount_t *, xfs_qoff_logitem_t **, uint);
46 STATIC int xfs_qm_log_quotaoff_end(xfs_mount_t *, xfs_qoff_logitem_t *,
47 uint);
48 STATIC uint xfs_qm_export_flags(uint);
49 STATIC uint xfs_qm_export_qtype_flags(uint);
50
51 /*
52 * Turn off quota accounting and/or enforcement for all udquots and/or
53 * gdquots. Called only at unmount time.
54 *
55 * This assumes that there are no dquots of this file system cached
56 * incore, and modifies the ondisk dquot directly. Therefore, for example,
57 * it is an error to call this twice, without purging the cache.
58 */
59 int
60 xfs_qm_scall_quotaoff(
61 xfs_mount_t *mp,
62 uint flags)
63 {
64 struct xfs_quotainfo *q = mp->m_quotainfo;
65 uint dqtype;
66 int error;
67 uint inactivate_flags;
68 xfs_qoff_logitem_t *qoffstart;
69
70 /*
71 * No file system can have quotas enabled on disk but not in core.
72 * Note that quota utilities (like quotaoff) _expect_
73 * errno == EEXIST here.
74 */
75 if ((mp->m_qflags & flags) == 0)
76 return XFS_ERROR(EEXIST);
77 error = 0;
78
79 flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD);
80
81 /*
82 * We don't want to deal with two quotaoffs messing up each other,
83 * so we're going to serialize it. quotaoff isn't exactly a performance
84 * critical thing.
85 * If quotaoff, then we must be dealing with the root filesystem.
86 */
87 ASSERT(q);
88 mutex_lock(&q->qi_quotaofflock);
89
90 /*
91 * If we're just turning off quota enforcement, change mp and go.
92 */
93 if ((flags & XFS_ALL_QUOTA_ACCT) == 0) {
94 mp->m_qflags &= ~(flags);
95
96 spin_lock(&mp->m_sb_lock);
97 mp->m_sb.sb_qflags = mp->m_qflags;
98 spin_unlock(&mp->m_sb_lock);
99 mutex_unlock(&q->qi_quotaofflock);
100
101 /* XXX what to do if error ? Revert back to old vals incore ? */
102 error = xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS);
103 return (error);
104 }
105
106 dqtype = 0;
107 inactivate_flags = 0;
108 /*
109 * If accounting is off, we must turn enforcement off, clear the
110 * quota 'CHKD' certificate to make it known that we have to
111 * do a quotacheck the next time this quota is turned on.
112 */
113 if (flags & XFS_UQUOTA_ACCT) {
114 dqtype |= XFS_QMOPT_UQUOTA;
115 flags |= (XFS_UQUOTA_CHKD | XFS_UQUOTA_ENFD);
116 inactivate_flags |= XFS_UQUOTA_ACTIVE;
117 }
118 if (flags & XFS_GQUOTA_ACCT) {
119 dqtype |= XFS_QMOPT_GQUOTA;
120 flags |= (XFS_OQUOTA_CHKD | XFS_OQUOTA_ENFD);
121 inactivate_flags |= XFS_GQUOTA_ACTIVE;
122 } else if (flags & XFS_PQUOTA_ACCT) {
123 dqtype |= XFS_QMOPT_PQUOTA;
124 flags |= (XFS_OQUOTA_CHKD | XFS_OQUOTA_ENFD);
125 inactivate_flags |= XFS_PQUOTA_ACTIVE;
126 }
127
128 /*
129 * Nothing to do? Don't complain. This happens when we're just
130 * turning off quota enforcement.
131 */
132 if ((mp->m_qflags & flags) == 0)
133 goto out_unlock;
134
135 /*
136 * Write the LI_QUOTAOFF log record, and do SB changes atomically,
137 * and synchronously. If we fail to write, we should abort the
138 * operation as it cannot be recovered safely if we crash.
139 */
140 error = xfs_qm_log_quotaoff(mp, &qoffstart, flags);
141 if (error)
142 goto out_unlock;
143
144 /*
145 * Next we clear the XFS_MOUNT_*DQ_ACTIVE bit(s) in the mount struct
146 * to take care of the race between dqget and quotaoff. We don't take
147 * any special locks to reset these bits. All processes need to check
148 * these bits *after* taking inode lock(s) to see if the particular
149 * quota type is in the process of being turned off. If *ACTIVE, it is
150 * guaranteed that all dquot structures and all quotainode ptrs will all
151 * stay valid as long as that inode is kept locked.
152 *
153 * There is no turning back after this.
154 */
155 mp->m_qflags &= ~inactivate_flags;
156
157 /*
158 * Give back all the dquot reference(s) held by inodes.
159 * Here we go thru every single incore inode in this file system, and
160 * do a dqrele on the i_udquot/i_gdquot that it may have.
161 * Essentially, as long as somebody has an inode locked, this guarantees
162 * that quotas will not be turned off. This is handy because in a
163 * transaction once we lock the inode(s) and check for quotaon, we can
164 * depend on the quota inodes (and other things) being valid as long as
165 * we keep the lock(s).
166 */
167 xfs_qm_dqrele_all_inodes(mp, flags);
168
169 /*
170 * Next we make the changes in the quota flag in the mount struct.
171 * This isn't protected by a particular lock directly, because we
172 * don't want to take a mrlock every time we depend on quotas being on.
173 */
174 mp->m_qflags &= ~flags;
175
176 /*
177 * Go through all the dquots of this file system and purge them,
178 * according to what was turned off.
179 */
180 xfs_qm_dqpurge_all(mp, dqtype);
181
182 /*
183 * Transactions that had started before ACTIVE state bit was cleared
184 * could have logged many dquots, so they'd have higher LSNs than
185 * the first QUOTAOFF log record does. If we happen to crash when
186 * the tail of the log has gone past the QUOTAOFF record, but
187 * before the last dquot modification, those dquots __will__
188 * recover, and that's not good.
189 *
190 * So, we have QUOTAOFF start and end logitems; the start
191 * logitem won't get overwritten until the end logitem appears...
192 */
193 error = xfs_qm_log_quotaoff_end(mp, qoffstart, flags);
194 if (error) {
195 /* We're screwed now. Shutdown is the only option. */
196 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
197 goto out_unlock;
198 }
199
200 /*
201 * If quotas is completely disabled, close shop.
202 */
203 if (((flags & XFS_MOUNT_QUOTA_ALL) == XFS_MOUNT_QUOTA_SET1) ||
204 ((flags & XFS_MOUNT_QUOTA_ALL) == XFS_MOUNT_QUOTA_SET2)) {
205 mutex_unlock(&q->qi_quotaofflock);
206 xfs_qm_destroy_quotainfo(mp);
207 return (0);
208 }
209
210 /*
211 * Release our quotainode references if we don't need them anymore.
212 */
213 if ((dqtype & XFS_QMOPT_UQUOTA) && q->qi_uquotaip) {
214 IRELE(q->qi_uquotaip);
215 q->qi_uquotaip = NULL;
216 }
217 if ((dqtype & (XFS_QMOPT_GQUOTA|XFS_QMOPT_PQUOTA)) && q->qi_gquotaip) {
218 IRELE(q->qi_gquotaip);
219 q->qi_gquotaip = NULL;
220 }
221
222 out_unlock:
223 mutex_unlock(&q->qi_quotaofflock);
224 return error;
225 }
226
227 STATIC int
228 xfs_qm_scall_trunc_qfile(
229 struct xfs_mount *mp,
230 xfs_ino_t ino)
231 {
232 struct xfs_inode *ip;
233 struct xfs_trans *tp;
234 int error;
235
236 if (ino == NULLFSINO)
237 return 0;
238
239 error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
240 if (error)
241 return error;
242
243 xfs_ilock(ip, XFS_IOLOCK_EXCL);
244
245 tp = xfs_trans_alloc(mp, XFS_TRANS_TRUNCATE_FILE);
246 error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0,
247 XFS_TRANS_PERM_LOG_RES,
248 XFS_ITRUNCATE_LOG_COUNT);
249 if (error) {
250 xfs_trans_cancel(tp, 0);
251 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
252 goto out_put;
253 }
254
255 xfs_ilock(ip, XFS_ILOCK_EXCL);
256 xfs_trans_ijoin(tp, ip, 0);
257
258 ip->i_d.di_size = 0;
259 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
260
261 error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
262 if (error) {
263 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES |
264 XFS_TRANS_ABORT);
265 goto out_unlock;
266 }
267
268 ASSERT(ip->i_d.di_nextents == 0);
269
270 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
271 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
272
273 out_unlock:
274 xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
275 out_put:
276 IRELE(ip);
277 return error;
278 }
279
280 int
281 xfs_qm_scall_trunc_qfiles(
282 xfs_mount_t *mp,
283 uint flags)
284 {
285 int error = 0, error2 = 0;
286
287 if (!xfs_sb_version_hasquota(&mp->m_sb) || flags == 0) {
288 xfs_debug(mp, "%s: flags=%x m_qflags=%x\n",
289 __func__, flags, mp->m_qflags);
290 return XFS_ERROR(EINVAL);
291 }
292
293 if (flags & XFS_DQ_USER)
294 error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_uquotino);
295 if (flags & (XFS_DQ_GROUP|XFS_DQ_PROJ))
296 error2 = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_gquotino);
297
298 return error ? error : error2;
299 }
300
301 /*
302 * Switch on (a given) quota enforcement for a filesystem. This takes
303 * effect immediately.
304 * (Switching on quota accounting must be done at mount time.)
305 */
306 int
307 xfs_qm_scall_quotaon(
308 xfs_mount_t *mp,
309 uint flags)
310 {
311 int error;
312 uint qf;
313 __int64_t sbflags;
314
315 flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD);
316 /*
317 * Switching on quota accounting must be done at mount time.
318 */
319 flags &= ~(XFS_ALL_QUOTA_ACCT);
320
321 sbflags = 0;
322
323 if (flags == 0) {
324 xfs_debug(mp, "%s: zero flags, m_qflags=%x\n",
325 __func__, mp->m_qflags);
326 return XFS_ERROR(EINVAL);
327 }
328
329 /* No fs can turn on quotas with a delayed effect */
330 ASSERT((flags & XFS_ALL_QUOTA_ACCT) == 0);
331
332 /*
333 * Can't enforce without accounting. We check the superblock
334 * qflags here instead of m_qflags because rootfs can have
335 * quota acct on ondisk without m_qflags' knowing.
336 */
337 if (((flags & XFS_UQUOTA_ACCT) == 0 &&
338 (mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) == 0 &&
339 (flags & XFS_UQUOTA_ENFD))
340 ||
341 ((flags & XFS_PQUOTA_ACCT) == 0 &&
342 (mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) == 0 &&
343 (flags & XFS_GQUOTA_ACCT) == 0 &&
344 (mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) == 0 &&
345 (flags & XFS_OQUOTA_ENFD))) {
346 xfs_debug(mp,
347 "%s: Can't enforce without acct, flags=%x sbflags=%x\n",
348 __func__, flags, mp->m_sb.sb_qflags);
349 return XFS_ERROR(EINVAL);
350 }
351 /*
352 * If everything's up to-date incore, then don't waste time.
353 */
354 if ((mp->m_qflags & flags) == flags)
355 return XFS_ERROR(EEXIST);
356
357 /*
358 * Change sb_qflags on disk but not incore mp->qflags
359 * if this is the root filesystem.
360 */
361 spin_lock(&mp->m_sb_lock);
362 qf = mp->m_sb.sb_qflags;
363 mp->m_sb.sb_qflags = qf | flags;
364 spin_unlock(&mp->m_sb_lock);
365
366 /*
367 * There's nothing to change if it's the same.
368 */
369 if ((qf & flags) == flags && sbflags == 0)
370 return XFS_ERROR(EEXIST);
371 sbflags |= XFS_SB_QFLAGS;
372
373 if ((error = xfs_qm_write_sb_changes(mp, sbflags)))
374 return (error);
375 /*
376 * If we aren't trying to switch on quota enforcement, we are done.
377 */
378 if (((mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) !=
379 (mp->m_qflags & XFS_UQUOTA_ACCT)) ||
380 ((mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) !=
381 (mp->m_qflags & XFS_PQUOTA_ACCT)) ||
382 ((mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) !=
383 (mp->m_qflags & XFS_GQUOTA_ACCT)) ||
384 (flags & XFS_ALL_QUOTA_ENFD) == 0)
385 return (0);
386
387 if (! XFS_IS_QUOTA_RUNNING(mp))
388 return XFS_ERROR(ESRCH);
389
390 /*
391 * Switch on quota enforcement in core.
392 */
393 mutex_lock(&mp->m_quotainfo->qi_quotaofflock);
394 mp->m_qflags |= (flags & XFS_ALL_QUOTA_ENFD);
395 mutex_unlock(&mp->m_quotainfo->qi_quotaofflock);
396
397 return (0);
398 }
399
400
401 /*
402 * Return quota status information, such as uquota-off, enforcements, etc.
403 */
404 int
405 xfs_qm_scall_getqstat(
406 struct xfs_mount *mp,
407 struct fs_quota_stat *out)
408 {
409 struct xfs_quotainfo *q = mp->m_quotainfo;
410 struct xfs_inode *uip, *gip;
411 bool tempuqip, tempgqip;
412
413 uip = gip = NULL;
414 tempuqip = tempgqip = false;
415 memset(out, 0, sizeof(fs_quota_stat_t));
416
417 out->qs_version = FS_QSTAT_VERSION;
418 if (!xfs_sb_version_hasquota(&mp->m_sb)) {
419 out->qs_uquota.qfs_ino = NULLFSINO;
420 out->qs_gquota.qfs_ino = NULLFSINO;
421 return (0);
422 }
423 out->qs_flags = (__uint16_t) xfs_qm_export_flags(mp->m_qflags &
424 (XFS_ALL_QUOTA_ACCT|
425 XFS_ALL_QUOTA_ENFD));
426 out->qs_pad = 0;
427 out->qs_uquota.qfs_ino = mp->m_sb.sb_uquotino;
428 out->qs_gquota.qfs_ino = mp->m_sb.sb_gquotino;
429
430 if (q) {
431 uip = q->qi_uquotaip;
432 gip = q->qi_gquotaip;
433 }
434 if (!uip && mp->m_sb.sb_uquotino != NULLFSINO) {
435 if (xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
436 0, 0, &uip) == 0)
437 tempuqip = true;
438 }
439 if (!gip && mp->m_sb.sb_gquotino != NULLFSINO) {
440 if (xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
441 0, 0, &gip) == 0)
442 tempgqip = true;
443 }
444 if (uip) {
445 out->qs_uquota.qfs_nblks = uip->i_d.di_nblocks;
446 out->qs_uquota.qfs_nextents = uip->i_d.di_nextents;
447 if (tempuqip)
448 IRELE(uip);
449 }
450 if (gip) {
451 out->qs_gquota.qfs_nblks = gip->i_d.di_nblocks;
452 out->qs_gquota.qfs_nextents = gip->i_d.di_nextents;
453 if (tempgqip)
454 IRELE(gip);
455 }
456 if (q) {
457 out->qs_incoredqs = q->qi_dquots;
458 out->qs_btimelimit = q->qi_btimelimit;
459 out->qs_itimelimit = q->qi_itimelimit;
460 out->qs_rtbtimelimit = q->qi_rtbtimelimit;
461 out->qs_bwarnlimit = q->qi_bwarnlimit;
462 out->qs_iwarnlimit = q->qi_iwarnlimit;
463 }
464 return 0;
465 }
466
467 #define XFS_DQ_MASK \
468 (FS_DQ_LIMIT_MASK | FS_DQ_TIMER_MASK | FS_DQ_WARNS_MASK)
469
470 /*
471 * Adjust quota limits, and start/stop timers accordingly.
472 */
473 int
474 xfs_qm_scall_setqlim(
475 struct xfs_mount *mp,
476 xfs_dqid_t id,
477 uint type,
478 fs_disk_quota_t *newlim)
479 {
480 struct xfs_quotainfo *q = mp->m_quotainfo;
481 struct xfs_disk_dquot *ddq;
482 struct xfs_dquot *dqp;
483 struct xfs_trans *tp;
484 int error;
485 xfs_qcnt_t hard, soft;
486
487 if (newlim->d_fieldmask & ~XFS_DQ_MASK)
488 return EINVAL;
489 if ((newlim->d_fieldmask & XFS_DQ_MASK) == 0)
490 return 0;
491
492 tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SETQLIM);
493 error = xfs_trans_reserve(tp, 0, XFS_QM_SETQLIM_LOG_RES(mp),
494 0, 0, XFS_DEFAULT_LOG_COUNT);
495 if (error) {
496 xfs_trans_cancel(tp, 0);
497 return (error);
498 }
499
500 /*
501 * We don't want to race with a quotaoff so take the quotaoff lock.
502 * (We don't hold an inode lock, so there's nothing else to stop
503 * a quotaoff from happening). (XXXThis doesn't currently happen
504 * because we take the vfslock before calling xfs_qm_sysent).
505 */
506 mutex_lock(&q->qi_quotaofflock);
507
508 /*
509 * Get the dquot (locked), and join it to the transaction.
510 * Allocate the dquot if this doesn't exist.
511 */
512 if ((error = xfs_qm_dqget(mp, NULL, id, type, XFS_QMOPT_DQALLOC, &dqp))) {
513 xfs_trans_cancel(tp, XFS_TRANS_ABORT);
514 ASSERT(error != ENOENT);
515 goto out_unlock;
516 }
517 xfs_trans_dqjoin(tp, dqp);
518 ddq = &dqp->q_core;
519
520 /*
521 * Make sure that hardlimits are >= soft limits before changing.
522 */
523 hard = (newlim->d_fieldmask & FS_DQ_BHARD) ?
524 (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_hardlimit) :
525 be64_to_cpu(ddq->d_blk_hardlimit);
526 soft = (newlim->d_fieldmask & FS_DQ_BSOFT) ?
527 (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_softlimit) :
528 be64_to_cpu(ddq->d_blk_softlimit);
529 if (hard == 0 || hard >= soft) {
530 ddq->d_blk_hardlimit = cpu_to_be64(hard);
531 ddq->d_blk_softlimit = cpu_to_be64(soft);
532 xfs_dquot_set_prealloc_limits(dqp);
533 if (id == 0) {
534 q->qi_bhardlimit = hard;
535 q->qi_bsoftlimit = soft;
536 }
537 } else {
538 xfs_debug(mp, "blkhard %Ld < blksoft %Ld\n", hard, soft);
539 }
540 hard = (newlim->d_fieldmask & FS_DQ_RTBHARD) ?
541 (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_hardlimit) :
542 be64_to_cpu(ddq->d_rtb_hardlimit);
543 soft = (newlim->d_fieldmask & FS_DQ_RTBSOFT) ?
544 (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_softlimit) :
545 be64_to_cpu(ddq->d_rtb_softlimit);
546 if (hard == 0 || hard >= soft) {
547 ddq->d_rtb_hardlimit = cpu_to_be64(hard);
548 ddq->d_rtb_softlimit = cpu_to_be64(soft);
549 if (id == 0) {
550 q->qi_rtbhardlimit = hard;
551 q->qi_rtbsoftlimit = soft;
552 }
553 } else {
554 xfs_debug(mp, "rtbhard %Ld < rtbsoft %Ld\n", hard, soft);
555 }
556
557 hard = (newlim->d_fieldmask & FS_DQ_IHARD) ?
558 (xfs_qcnt_t) newlim->d_ino_hardlimit :
559 be64_to_cpu(ddq->d_ino_hardlimit);
560 soft = (newlim->d_fieldmask & FS_DQ_ISOFT) ?
561 (xfs_qcnt_t) newlim->d_ino_softlimit :
562 be64_to_cpu(ddq->d_ino_softlimit);
563 if (hard == 0 || hard >= soft) {
564 ddq->d_ino_hardlimit = cpu_to_be64(hard);
565 ddq->d_ino_softlimit = cpu_to_be64(soft);
566 if (id == 0) {
567 q->qi_ihardlimit = hard;
568 q->qi_isoftlimit = soft;
569 }
570 } else {
571 xfs_debug(mp, "ihard %Ld < isoft %Ld\n", hard, soft);
572 }
573
574 /*
575 * Update warnings counter(s) if requested
576 */
577 if (newlim->d_fieldmask & FS_DQ_BWARNS)
578 ddq->d_bwarns = cpu_to_be16(newlim->d_bwarns);
579 if (newlim->d_fieldmask & FS_DQ_IWARNS)
580 ddq->d_iwarns = cpu_to_be16(newlim->d_iwarns);
581 if (newlim->d_fieldmask & FS_DQ_RTBWARNS)
582 ddq->d_rtbwarns = cpu_to_be16(newlim->d_rtbwarns);
583
584 if (id == 0) {
585 /*
586 * Timelimits for the super user set the relative time
587 * the other users can be over quota for this file system.
588 * If it is zero a default is used. Ditto for the default
589 * soft and hard limit values (already done, above), and
590 * for warnings.
591 */
592 if (newlim->d_fieldmask & FS_DQ_BTIMER) {
593 q->qi_btimelimit = newlim->d_btimer;
594 ddq->d_btimer = cpu_to_be32(newlim->d_btimer);
595 }
596 if (newlim->d_fieldmask & FS_DQ_ITIMER) {
597 q->qi_itimelimit = newlim->d_itimer;
598 ddq->d_itimer = cpu_to_be32(newlim->d_itimer);
599 }
600 if (newlim->d_fieldmask & FS_DQ_RTBTIMER) {
601 q->qi_rtbtimelimit = newlim->d_rtbtimer;
602 ddq->d_rtbtimer = cpu_to_be32(newlim->d_rtbtimer);
603 }
604 if (newlim->d_fieldmask & FS_DQ_BWARNS)
605 q->qi_bwarnlimit = newlim->d_bwarns;
606 if (newlim->d_fieldmask & FS_DQ_IWARNS)
607 q->qi_iwarnlimit = newlim->d_iwarns;
608 if (newlim->d_fieldmask & FS_DQ_RTBWARNS)
609 q->qi_rtbwarnlimit = newlim->d_rtbwarns;
610 } else {
611 /*
612 * If the user is now over quota, start the timelimit.
613 * The user will not be 'warned'.
614 * Note that we keep the timers ticking, whether enforcement
615 * is on or off. We don't really want to bother with iterating
616 * over all ondisk dquots and turning the timers on/off.
617 */
618 xfs_qm_adjust_dqtimers(mp, ddq);
619 }
620 dqp->dq_flags |= XFS_DQ_DIRTY;
621 xfs_trans_log_dquot(tp, dqp);
622
623 error = xfs_trans_commit(tp, 0);
624 xfs_qm_dqrele(dqp);
625
626 out_unlock:
627 mutex_unlock(&q->qi_quotaofflock);
628 return error;
629 }
630
631 STATIC int
632 xfs_qm_log_quotaoff_end(
633 xfs_mount_t *mp,
634 xfs_qoff_logitem_t *startqoff,
635 uint flags)
636 {
637 xfs_trans_t *tp;
638 int error;
639 xfs_qoff_logitem_t *qoffi;
640
641 tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QUOTAOFF_END);
642
643 error = xfs_trans_reserve(tp, 0, XFS_QM_QUOTAOFF_END_LOG_RES(mp),
644 0, 0, XFS_DEFAULT_LOG_COUNT);
645 if (error) {
646 xfs_trans_cancel(tp, 0);
647 return (error);
648 }
649
650 qoffi = xfs_trans_get_qoff_item(tp, startqoff,
651 flags & XFS_ALL_QUOTA_ACCT);
652 xfs_trans_log_quotaoff_item(tp, qoffi);
653
654 /*
655 * We have to make sure that the transaction is secure on disk before we
656 * return and actually stop quota accounting. So, make it synchronous.
657 * We don't care about quotoff's performance.
658 */
659 xfs_trans_set_sync(tp);
660 error = xfs_trans_commit(tp, 0);
661 return (error);
662 }
663
664
665 STATIC int
666 xfs_qm_log_quotaoff(
667 xfs_mount_t *mp,
668 xfs_qoff_logitem_t **qoffstartp,
669 uint flags)
670 {
671 xfs_trans_t *tp;
672 int error;
673 xfs_qoff_logitem_t *qoffi=NULL;
674 uint oldsbqflag=0;
675
676 tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QUOTAOFF);
677 error = xfs_trans_reserve(tp, 0, XFS_QM_QUOTAOFF_LOG_RES(mp),
678 0, 0, XFS_DEFAULT_LOG_COUNT);
679 if (error)
680 goto error0;
681
682 qoffi = xfs_trans_get_qoff_item(tp, NULL, flags & XFS_ALL_QUOTA_ACCT);
683 xfs_trans_log_quotaoff_item(tp, qoffi);
684
685 spin_lock(&mp->m_sb_lock);
686 oldsbqflag = mp->m_sb.sb_qflags;
687 mp->m_sb.sb_qflags = (mp->m_qflags & ~(flags)) & XFS_MOUNT_QUOTA_ALL;
688 spin_unlock(&mp->m_sb_lock);
689
690 xfs_mod_sb(tp, XFS_SB_QFLAGS);
691
692 /*
693 * We have to make sure that the transaction is secure on disk before we
694 * return and actually stop quota accounting. So, make it synchronous.
695 * We don't care about quotoff's performance.
696 */
697 xfs_trans_set_sync(tp);
698 error = xfs_trans_commit(tp, 0);
699
700 error0:
701 if (error) {
702 xfs_trans_cancel(tp, 0);
703 /*
704 * No one else is modifying sb_qflags, so this is OK.
705 * We still hold the quotaofflock.
706 */
707 spin_lock(&mp->m_sb_lock);
708 mp->m_sb.sb_qflags = oldsbqflag;
709 spin_unlock(&mp->m_sb_lock);
710 }
711 *qoffstartp = qoffi;
712 return (error);
713 }
714
715
716 int
717 xfs_qm_scall_getquota(
718 struct xfs_mount *mp,
719 xfs_dqid_t id,
720 uint type,
721 struct fs_disk_quota *dst)
722 {
723 struct xfs_dquot *dqp;
724 int error;
725
726 /*
727 * Try to get the dquot. We don't want it allocated on disk, so
728 * we aren't passing the XFS_QMOPT_DOALLOC flag. If it doesn't
729 * exist, we'll get ENOENT back.
730 */
731 error = xfs_qm_dqget(mp, NULL, id, type, 0, &dqp);
732 if (error)
733 return error;
734
735 /*
736 * If everything's NULL, this dquot doesn't quite exist as far as
737 * our utility programs are concerned.
738 */
739 if (XFS_IS_DQUOT_UNINITIALIZED(dqp)) {
740 error = XFS_ERROR(ENOENT);
741 goto out_put;
742 }
743
744 memset(dst, 0, sizeof(*dst));
745 dst->d_version = FS_DQUOT_VERSION;
746 dst->d_flags = xfs_qm_export_qtype_flags(dqp->q_core.d_flags);
747 dst->d_id = be32_to_cpu(dqp->q_core.d_id);
748 dst->d_blk_hardlimit =
749 XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_blk_hardlimit));
750 dst->d_blk_softlimit =
751 XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_blk_softlimit));
752 dst->d_ino_hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit);
753 dst->d_ino_softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit);
754 dst->d_bcount = XFS_FSB_TO_BB(mp, dqp->q_res_bcount);
755 dst->d_icount = dqp->q_res_icount;
756 dst->d_btimer = be32_to_cpu(dqp->q_core.d_btimer);
757 dst->d_itimer = be32_to_cpu(dqp->q_core.d_itimer);
758 dst->d_iwarns = be16_to_cpu(dqp->q_core.d_iwarns);
759 dst->d_bwarns = be16_to_cpu(dqp->q_core.d_bwarns);
760 dst->d_rtb_hardlimit =
761 XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_rtb_hardlimit));
762 dst->d_rtb_softlimit =
763 XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_rtb_softlimit));
764 dst->d_rtbcount = XFS_FSB_TO_BB(mp, dqp->q_res_rtbcount);
765 dst->d_rtbtimer = be32_to_cpu(dqp->q_core.d_rtbtimer);
766 dst->d_rtbwarns = be16_to_cpu(dqp->q_core.d_rtbwarns);
767
768 /*
769 * Internally, we don't reset all the timers when quota enforcement
770 * gets turned off. No need to confuse the user level code,
771 * so return zeroes in that case.
772 */
773 if ((!XFS_IS_UQUOTA_ENFORCED(mp) && dqp->q_core.d_flags == XFS_DQ_USER) ||
774 (!XFS_IS_OQUOTA_ENFORCED(mp) &&
775 (dqp->q_core.d_flags & (XFS_DQ_PROJ | XFS_DQ_GROUP)))) {
776 dst->d_btimer = 0;
777 dst->d_itimer = 0;
778 dst->d_rtbtimer = 0;
779 }
780
781 #ifdef DEBUG
782 if (((XFS_IS_UQUOTA_ENFORCED(mp) && dst->d_flags == FS_USER_QUOTA) ||
783 (XFS_IS_OQUOTA_ENFORCED(mp) &&
784 (dst->d_flags & (FS_PROJ_QUOTA | FS_GROUP_QUOTA)))) &&
785 dst->d_id != 0) {
786 if ((dst->d_bcount > dst->d_blk_softlimit) &&
787 (dst->d_blk_softlimit > 0)) {
788 ASSERT(dst->d_btimer != 0);
789 }
790 if ((dst->d_icount > dst->d_ino_softlimit) &&
791 (dst->d_ino_softlimit > 0)) {
792 ASSERT(dst->d_itimer != 0);
793 }
794 }
795 #endif
796 out_put:
797 xfs_qm_dqput(dqp);
798 return error;
799 }
800
801 STATIC uint
802 xfs_qm_export_qtype_flags(
803 uint flags)
804 {
805 /*
806 * Can't be more than one, or none.
807 */
808 ASSERT((flags & (FS_PROJ_QUOTA | FS_USER_QUOTA)) !=
809 (FS_PROJ_QUOTA | FS_USER_QUOTA));
810 ASSERT((flags & (FS_PROJ_QUOTA | FS_GROUP_QUOTA)) !=
811 (FS_PROJ_QUOTA | FS_GROUP_QUOTA));
812 ASSERT((flags & (FS_USER_QUOTA | FS_GROUP_QUOTA)) !=
813 (FS_USER_QUOTA | FS_GROUP_QUOTA));
814 ASSERT((flags & (FS_PROJ_QUOTA|FS_USER_QUOTA|FS_GROUP_QUOTA)) != 0);
815
816 return (flags & XFS_DQ_USER) ?
817 FS_USER_QUOTA : (flags & XFS_DQ_PROJ) ?
818 FS_PROJ_QUOTA : FS_GROUP_QUOTA;
819 }
820
821 STATIC uint
822 xfs_qm_export_flags(
823 uint flags)
824 {
825 uint uflags;
826
827 uflags = 0;
828 if (flags & XFS_UQUOTA_ACCT)
829 uflags |= FS_QUOTA_UDQ_ACCT;
830 if (flags & XFS_PQUOTA_ACCT)
831 uflags |= FS_QUOTA_PDQ_ACCT;
832 if (flags & XFS_GQUOTA_ACCT)
833 uflags |= FS_QUOTA_GDQ_ACCT;
834 if (flags & XFS_UQUOTA_ENFD)
835 uflags |= FS_QUOTA_UDQ_ENFD;
836 if (flags & (XFS_OQUOTA_ENFD)) {
837 uflags |= (flags & XFS_GQUOTA_ACCT) ?
838 FS_QUOTA_GDQ_ENFD : FS_QUOTA_PDQ_ENFD;
839 }
840 return (uflags);
841 }
842
843
844 STATIC int
845 xfs_dqrele_inode(
846 struct xfs_inode *ip,
847 struct xfs_perag *pag,
848 int flags,
849 void *args)
850 {
851 /* skip quota inodes */
852 if (ip == ip->i_mount->m_quotainfo->qi_uquotaip ||
853 ip == ip->i_mount->m_quotainfo->qi_gquotaip) {
854 ASSERT(ip->i_udquot == NULL);
855 ASSERT(ip->i_gdquot == NULL);
856 return 0;
857 }
858
859 xfs_ilock(ip, XFS_ILOCK_EXCL);
860 if ((flags & XFS_UQUOTA_ACCT) && ip->i_udquot) {
861 xfs_qm_dqrele(ip->i_udquot);
862 ip->i_udquot = NULL;
863 }
864 if (flags & (XFS_PQUOTA_ACCT|XFS_GQUOTA_ACCT) && ip->i_gdquot) {
865 xfs_qm_dqrele(ip->i_gdquot);
866 ip->i_gdquot = NULL;
867 }
868 xfs_iunlock(ip, XFS_ILOCK_EXCL);
869 return 0;
870 }
871
872
873 /*
874 * Go thru all the inodes in the file system, releasing their dquots.
875 *
876 * Note that the mount structure gets modified to indicate that quotas are off
877 * AFTER this, in the case of quotaoff.
878 */
879 void
880 xfs_qm_dqrele_all_inodes(
881 struct xfs_mount *mp,
882 uint flags)
883 {
884 ASSERT(mp->m_quotainfo);
885 xfs_inode_ag_iterator(mp, xfs_dqrele_inode, flags, NULL);
886 }