Merge branch 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / fs / xfs / xfs_dquot_item.c
1 /*
2 * Copyright (c) 2000-2003 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_bit.h"
21 #include "xfs_log.h"
22 #include "xfs_inum.h"
23 #include "xfs_trans.h"
24 #include "xfs_sb.h"
25 #include "xfs_ag.h"
26 #include "xfs_alloc.h"
27 #include "xfs_quota.h"
28 #include "xfs_mount.h"
29 #include "xfs_bmap_btree.h"
30 #include "xfs_inode.h"
31 #include "xfs_bmap.h"
32 #include "xfs_rtalloc.h"
33 #include "xfs_error.h"
34 #include "xfs_itable.h"
35 #include "xfs_attr.h"
36 #include "xfs_buf_item.h"
37 #include "xfs_trans_priv.h"
38 #include "xfs_qm.h"
39
40 static inline struct xfs_dq_logitem *DQUOT_ITEM(struct xfs_log_item *lip)
41 {
42 return container_of(lip, struct xfs_dq_logitem, qli_item);
43 }
44
45 /*
46 * returns the number of iovecs needed to log the given dquot item.
47 */
48 STATIC uint
49 xfs_qm_dquot_logitem_size(
50 struct xfs_log_item *lip)
51 {
52 /*
53 * we need only two iovecs, one for the format, one for the real thing
54 */
55 return 2;
56 }
57
58 /*
59 * fills in the vector of log iovecs for the given dquot log item.
60 */
61 STATIC void
62 xfs_qm_dquot_logitem_format(
63 struct xfs_log_item *lip,
64 struct xfs_log_iovec *logvec)
65 {
66 struct xfs_dq_logitem *qlip = DQUOT_ITEM(lip);
67
68 logvec->i_addr = &qlip->qli_format;
69 logvec->i_len = sizeof(xfs_dq_logformat_t);
70 logvec->i_type = XLOG_REG_TYPE_QFORMAT;
71 logvec++;
72 logvec->i_addr = &qlip->qli_dquot->q_core;
73 logvec->i_len = sizeof(xfs_disk_dquot_t);
74 logvec->i_type = XLOG_REG_TYPE_DQUOT;
75
76 qlip->qli_format.qlf_size = 2;
77
78 }
79
80 /*
81 * Increment the pin count of the given dquot.
82 */
83 STATIC void
84 xfs_qm_dquot_logitem_pin(
85 struct xfs_log_item *lip)
86 {
87 struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot;
88
89 ASSERT(XFS_DQ_IS_LOCKED(dqp));
90 atomic_inc(&dqp->q_pincount);
91 }
92
93 /*
94 * Decrement the pin count of the given dquot, and wake up
95 * anyone in xfs_dqwait_unpin() if the count goes to 0. The
96 * dquot must have been previously pinned with a call to
97 * xfs_qm_dquot_logitem_pin().
98 */
99 STATIC void
100 xfs_qm_dquot_logitem_unpin(
101 struct xfs_log_item *lip,
102 int remove)
103 {
104 struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot;
105
106 ASSERT(atomic_read(&dqp->q_pincount) > 0);
107 if (atomic_dec_and_test(&dqp->q_pincount))
108 wake_up(&dqp->q_pinwait);
109 }
110
111 /*
112 * Given the logitem, this writes the corresponding dquot entry to disk
113 * asynchronously. This is called with the dquot entry securely locked;
114 * we simply get xfs_qm_dqflush() to do the work, and unlock the dquot
115 * at the end.
116 */
117 STATIC void
118 xfs_qm_dquot_logitem_push(
119 struct xfs_log_item *lip)
120 {
121 struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot;
122 int error;
123
124 ASSERT(XFS_DQ_IS_LOCKED(dqp));
125 ASSERT(!completion_done(&dqp->q_flush));
126
127 /*
128 * Since we were able to lock the dquot's flush lock and
129 * we found it on the AIL, the dquot must be dirty. This
130 * is because the dquot is removed from the AIL while still
131 * holding the flush lock in xfs_dqflush_done(). Thus, if
132 * we found it in the AIL and were able to obtain the flush
133 * lock without sleeping, then there must not have been
134 * anyone in the process of flushing the dquot.
135 */
136 error = xfs_qm_dqflush(dqp, SYNC_TRYLOCK);
137 if (error)
138 xfs_warn(dqp->q_mount, "%s: push error %d on dqp %p",
139 __func__, error, dqp);
140 xfs_dqunlock(dqp);
141 }
142
143 STATIC xfs_lsn_t
144 xfs_qm_dquot_logitem_committed(
145 struct xfs_log_item *lip,
146 xfs_lsn_t lsn)
147 {
148 /*
149 * We always re-log the entire dquot when it becomes dirty,
150 * so, the latest copy _is_ the only one that matters.
151 */
152 return lsn;
153 }
154
155 /*
156 * This is called to wait for the given dquot to be unpinned.
157 * Most of these pin/unpin routines are plagiarized from inode code.
158 */
159 void
160 xfs_qm_dqunpin_wait(
161 struct xfs_dquot *dqp)
162 {
163 ASSERT(XFS_DQ_IS_LOCKED(dqp));
164 if (atomic_read(&dqp->q_pincount) == 0)
165 return;
166
167 /*
168 * Give the log a push so we don't wait here too long.
169 */
170 xfs_log_force(dqp->q_mount, 0);
171 wait_event(dqp->q_pinwait, (atomic_read(&dqp->q_pincount) == 0));
172 }
173
174 /*
175 * This is called when IOP_TRYLOCK returns XFS_ITEM_PUSHBUF to indicate that
176 * the dquot is locked by us, but the flush lock isn't. So, here we are
177 * going to see if the relevant dquot buffer is incore, waiting on DELWRI.
178 * If so, we want to push it out to help us take this item off the AIL as soon
179 * as possible.
180 *
181 * We must not be holding the AIL lock at this point. Calling incore() to
182 * search the buffer cache can be a time consuming thing, and AIL lock is a
183 * spinlock.
184 */
185 STATIC bool
186 xfs_qm_dquot_logitem_pushbuf(
187 struct xfs_log_item *lip)
188 {
189 struct xfs_dq_logitem *qlip = DQUOT_ITEM(lip);
190 struct xfs_dquot *dqp = qlip->qli_dquot;
191 struct xfs_buf *bp;
192 bool ret = true;
193
194 ASSERT(XFS_DQ_IS_LOCKED(dqp));
195
196 /*
197 * If flushlock isn't locked anymore, chances are that the
198 * inode flush completed and the inode was taken off the AIL.
199 * So, just get out.
200 */
201 if (completion_done(&dqp->q_flush) ||
202 !(lip->li_flags & XFS_LI_IN_AIL)) {
203 xfs_dqunlock(dqp);
204 return true;
205 }
206
207 bp = xfs_incore(dqp->q_mount->m_ddev_targp, qlip->qli_format.qlf_blkno,
208 dqp->q_mount->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK);
209 xfs_dqunlock(dqp);
210 if (!bp)
211 return true;
212 if (XFS_BUF_ISDELAYWRITE(bp))
213 xfs_buf_delwri_promote(bp);
214 if (xfs_buf_ispinned(bp))
215 ret = false;
216 xfs_buf_relse(bp);
217 return ret;
218 }
219
220 /*
221 * This is called to attempt to lock the dquot associated with this
222 * dquot log item. Don't sleep on the dquot lock or the flush lock.
223 * If the flush lock is already held, indicating that the dquot has
224 * been or is in the process of being flushed, then see if we can
225 * find the dquot's buffer in the buffer cache without sleeping. If
226 * we can and it is marked delayed write, then we want to send it out.
227 * We delay doing so until the push routine, though, to avoid sleeping
228 * in any device strategy routines.
229 */
230 STATIC uint
231 xfs_qm_dquot_logitem_trylock(
232 struct xfs_log_item *lip)
233 {
234 struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot;
235
236 if (atomic_read(&dqp->q_pincount) > 0)
237 return XFS_ITEM_PINNED;
238
239 if (!xfs_dqlock_nowait(dqp))
240 return XFS_ITEM_LOCKED;
241
242 if (!xfs_dqflock_nowait(dqp)) {
243 /*
244 * dquot has already been flushed to the backing buffer,
245 * leave it locked, pushbuf routine will unlock it.
246 */
247 return XFS_ITEM_PUSHBUF;
248 }
249
250 ASSERT(lip->li_flags & XFS_LI_IN_AIL);
251 return XFS_ITEM_SUCCESS;
252 }
253
254 /*
255 * Unlock the dquot associated with the log item.
256 * Clear the fields of the dquot and dquot log item that
257 * are specific to the current transaction. If the
258 * hold flags is set, do not unlock the dquot.
259 */
260 STATIC void
261 xfs_qm_dquot_logitem_unlock(
262 struct xfs_log_item *lip)
263 {
264 struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot;
265
266 ASSERT(XFS_DQ_IS_LOCKED(dqp));
267
268 /*
269 * Clear the transaction pointer in the dquot
270 */
271 dqp->q_transp = NULL;
272
273 /*
274 * dquots are never 'held' from getting unlocked at the end of
275 * a transaction. Their locking and unlocking is hidden inside the
276 * transaction layer, within trans_commit. Hence, no LI_HOLD flag
277 * for the logitem.
278 */
279 xfs_dqunlock(dqp);
280 }
281
282 /*
283 * this needs to stamp an lsn into the dquot, I think.
284 * rpc's that look at user dquot's would then have to
285 * push on the dependency recorded in the dquot
286 */
287 STATIC void
288 xfs_qm_dquot_logitem_committing(
289 struct xfs_log_item *lip,
290 xfs_lsn_t lsn)
291 {
292 }
293
294 /*
295 * This is the ops vector for dquots
296 */
297 static const struct xfs_item_ops xfs_dquot_item_ops = {
298 .iop_size = xfs_qm_dquot_logitem_size,
299 .iop_format = xfs_qm_dquot_logitem_format,
300 .iop_pin = xfs_qm_dquot_logitem_pin,
301 .iop_unpin = xfs_qm_dquot_logitem_unpin,
302 .iop_trylock = xfs_qm_dquot_logitem_trylock,
303 .iop_unlock = xfs_qm_dquot_logitem_unlock,
304 .iop_committed = xfs_qm_dquot_logitem_committed,
305 .iop_push = xfs_qm_dquot_logitem_push,
306 .iop_pushbuf = xfs_qm_dquot_logitem_pushbuf,
307 .iop_committing = xfs_qm_dquot_logitem_committing
308 };
309
310 /*
311 * Initialize the dquot log item for a newly allocated dquot.
312 * The dquot isn't locked at this point, but it isn't on any of the lists
313 * either, so we don't care.
314 */
315 void
316 xfs_qm_dquot_logitem_init(
317 struct xfs_dquot *dqp)
318 {
319 struct xfs_dq_logitem *lp = &dqp->q_logitem;
320
321 xfs_log_item_init(dqp->q_mount, &lp->qli_item, XFS_LI_DQUOT,
322 &xfs_dquot_item_ops);
323 lp->qli_dquot = dqp;
324 lp->qli_format.qlf_type = XFS_LI_DQUOT;
325 lp->qli_format.qlf_id = be32_to_cpu(dqp->q_core.d_id);
326 lp->qli_format.qlf_blkno = dqp->q_blkno;
327 lp->qli_format.qlf_len = 1;
328 /*
329 * This is just the offset of this dquot within its buffer
330 * (which is currently 1 FSB and probably won't change).
331 * Hence 32 bits for this offset should be just fine.
332 * Alternatively, we can store (bufoffset / sizeof(xfs_dqblk_t))
333 * here, and recompute it at recovery time.
334 */
335 lp->qli_format.qlf_boffset = (__uint32_t)dqp->q_bufoffset;
336 }
337
338 /*------------------ QUOTAOFF LOG ITEMS -------------------*/
339
340 static inline struct xfs_qoff_logitem *QOFF_ITEM(struct xfs_log_item *lip)
341 {
342 return container_of(lip, struct xfs_qoff_logitem, qql_item);
343 }
344
345
346 /*
347 * This returns the number of iovecs needed to log the given quotaoff item.
348 * We only need 1 iovec for an quotaoff item. It just logs the
349 * quotaoff_log_format structure.
350 */
351 STATIC uint
352 xfs_qm_qoff_logitem_size(
353 struct xfs_log_item *lip)
354 {
355 return 1;
356 }
357
358 /*
359 * This is called to fill in the vector of log iovecs for the
360 * given quotaoff log item. We use only 1 iovec, and we point that
361 * at the quotaoff_log_format structure embedded in the quotaoff item.
362 * It is at this point that we assert that all of the extent
363 * slots in the quotaoff item have been filled.
364 */
365 STATIC void
366 xfs_qm_qoff_logitem_format(
367 struct xfs_log_item *lip,
368 struct xfs_log_iovec *log_vector)
369 {
370 struct xfs_qoff_logitem *qflip = QOFF_ITEM(lip);
371
372 ASSERT(qflip->qql_format.qf_type == XFS_LI_QUOTAOFF);
373
374 log_vector->i_addr = &qflip->qql_format;
375 log_vector->i_len = sizeof(xfs_qoff_logitem_t);
376 log_vector->i_type = XLOG_REG_TYPE_QUOTAOFF;
377 qflip->qql_format.qf_size = 1;
378 }
379
380 /*
381 * Pinning has no meaning for an quotaoff item, so just return.
382 */
383 STATIC void
384 xfs_qm_qoff_logitem_pin(
385 struct xfs_log_item *lip)
386 {
387 }
388
389 /*
390 * Since pinning has no meaning for an quotaoff item, unpinning does
391 * not either.
392 */
393 STATIC void
394 xfs_qm_qoff_logitem_unpin(
395 struct xfs_log_item *lip,
396 int remove)
397 {
398 }
399
400 /*
401 * Quotaoff items have no locking, so just return success.
402 */
403 STATIC uint
404 xfs_qm_qoff_logitem_trylock(
405 struct xfs_log_item *lip)
406 {
407 return XFS_ITEM_LOCKED;
408 }
409
410 /*
411 * Quotaoff items have no locking or pushing, so return failure
412 * so that the caller doesn't bother with us.
413 */
414 STATIC void
415 xfs_qm_qoff_logitem_unlock(
416 struct xfs_log_item *lip)
417 {
418 }
419
420 /*
421 * The quotaoff-start-item is logged only once and cannot be moved in the log,
422 * so simply return the lsn at which it's been logged.
423 */
424 STATIC xfs_lsn_t
425 xfs_qm_qoff_logitem_committed(
426 struct xfs_log_item *lip,
427 xfs_lsn_t lsn)
428 {
429 return lsn;
430 }
431
432 /*
433 * There isn't much you can do to push on an quotaoff item. It is simply
434 * stuck waiting for the log to be flushed to disk.
435 */
436 STATIC void
437 xfs_qm_qoff_logitem_push(
438 struct xfs_log_item *lip)
439 {
440 }
441
442
443 STATIC xfs_lsn_t
444 xfs_qm_qoffend_logitem_committed(
445 struct xfs_log_item *lip,
446 xfs_lsn_t lsn)
447 {
448 struct xfs_qoff_logitem *qfe = QOFF_ITEM(lip);
449 struct xfs_qoff_logitem *qfs = qfe->qql_start_lip;
450 struct xfs_ail *ailp = qfs->qql_item.li_ailp;
451
452 /*
453 * Delete the qoff-start logitem from the AIL.
454 * xfs_trans_ail_delete() drops the AIL lock.
455 */
456 spin_lock(&ailp->xa_lock);
457 xfs_trans_ail_delete(ailp, (xfs_log_item_t *)qfs);
458
459 kmem_free(qfs);
460 kmem_free(qfe);
461 return (xfs_lsn_t)-1;
462 }
463
464 /*
465 * XXX rcc - don't know quite what to do with this. I think we can
466 * just ignore it. The only time that isn't the case is if we allow
467 * the client to somehow see that quotas have been turned off in which
468 * we can't allow that to get back until the quotaoff hits the disk.
469 * So how would that happen? Also, do we need different routines for
470 * quotaoff start and quotaoff end? I suspect the answer is yes but
471 * to be sure, I need to look at the recovery code and see how quota off
472 * recovery is handled (do we roll forward or back or do something else).
473 * If we roll forwards or backwards, then we need two separate routines,
474 * one that does nothing and one that stamps in the lsn that matters
475 * (truly makes the quotaoff irrevocable). If we do something else,
476 * then maybe we don't need two.
477 */
478 STATIC void
479 xfs_qm_qoff_logitem_committing(
480 struct xfs_log_item *lip,
481 xfs_lsn_t commit_lsn)
482 {
483 }
484
485 static const struct xfs_item_ops xfs_qm_qoffend_logitem_ops = {
486 .iop_size = xfs_qm_qoff_logitem_size,
487 .iop_format = xfs_qm_qoff_logitem_format,
488 .iop_pin = xfs_qm_qoff_logitem_pin,
489 .iop_unpin = xfs_qm_qoff_logitem_unpin,
490 .iop_trylock = xfs_qm_qoff_logitem_trylock,
491 .iop_unlock = xfs_qm_qoff_logitem_unlock,
492 .iop_committed = xfs_qm_qoffend_logitem_committed,
493 .iop_push = xfs_qm_qoff_logitem_push,
494 .iop_committing = xfs_qm_qoff_logitem_committing
495 };
496
497 /*
498 * This is the ops vector shared by all quotaoff-start log items.
499 */
500 static const struct xfs_item_ops xfs_qm_qoff_logitem_ops = {
501 .iop_size = xfs_qm_qoff_logitem_size,
502 .iop_format = xfs_qm_qoff_logitem_format,
503 .iop_pin = xfs_qm_qoff_logitem_pin,
504 .iop_unpin = xfs_qm_qoff_logitem_unpin,
505 .iop_trylock = xfs_qm_qoff_logitem_trylock,
506 .iop_unlock = xfs_qm_qoff_logitem_unlock,
507 .iop_committed = xfs_qm_qoff_logitem_committed,
508 .iop_push = xfs_qm_qoff_logitem_push,
509 .iop_committing = xfs_qm_qoff_logitem_committing
510 };
511
512 /*
513 * Allocate and initialize an quotaoff item of the correct quota type(s).
514 */
515 struct xfs_qoff_logitem *
516 xfs_qm_qoff_logitem_init(
517 struct xfs_mount *mp,
518 struct xfs_qoff_logitem *start,
519 uint flags)
520 {
521 struct xfs_qoff_logitem *qf;
522
523 qf = kmem_zalloc(sizeof(struct xfs_qoff_logitem), KM_SLEEP);
524
525 xfs_log_item_init(mp, &qf->qql_item, XFS_LI_QUOTAOFF, start ?
526 &xfs_qm_qoffend_logitem_ops : &xfs_qm_qoff_logitem_ops);
527 qf->qql_item.li_mountp = mp;
528 qf->qql_format.qf_type = XFS_LI_QUOTAOFF;
529 qf->qql_format.qf_flags = flags;
530 qf->qql_start_lip = start;
531 return qf;
532 }