#include "xfs_log.h"
#include "xfs_inum.h"
#include "xfs_trans.h"
+#include "xfs_trans_priv.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
else
xfs_log_force(mp, 0);
error = xfs_qm_sync(mp, SYNC_TRYLOCK);
+
+ /* start pushing all the metadata that is currently dirty */
+ xfs_ail_push_all(mp->m_ail);
}
/* queue us up again */
mp = container_of(shrink, struct xfs_mount, m_inode_shrink);
if (nr_to_scan) {
- /* kick background reclaimer */
+ /* kick background reclaimer and push the AIL */
xfs_syncd_queue_reclaim(mp);
+ xfs_ail_push_all(mp->m_ail);
if (!(gfp_mask & __GFP_FS))
return -1;
break;
case XLOG_STATE_COVER_NEED:
case XLOG_STATE_COVER_NEED2:
- if (!xfs_trans_ail_tail(log->l_ailp) &&
+ if (!xfs_ail_min_lsn(log->l_ailp) &&
xlog_iclogs_empty(log)) {
if (log->l_covered_state == XLOG_STATE_COVER_NEED)
log->l_covered_state = XLOG_STATE_COVER_DONE;
xfs_lsn_t tail_lsn;
struct log *log = mp->m_log;
- tail_lsn = xfs_trans_ail_tail(mp->m_ail);
+ tail_lsn = xfs_ail_min_lsn(mp->m_ail);
if (!tail_lsn)
tail_lsn = atomic64_read(&log->l_last_sync_lsn);
* the filesystem is shutting down.
*/
if (!XLOG_FORCED_SHUTDOWN(log))
- xfs_trans_ail_push(log->l_ailp, threshold_lsn);
+ xfs_ail_push(log->l_ailp, threshold_lsn);
}
/*
return list_first_entry(&ailp->xa_ail, xfs_log_item_t, li_ail);
}
+ /*
+ * Return a pointer to the last item in the AIL. If the AIL is empty, then
+ * return NULL.
+ */
+static xfs_log_item_t *
+xfs_ail_max(
+ struct xfs_ail *ailp)
+{
+ if (list_empty(&ailp->xa_ail))
+ return NULL;
+
+ return list_entry(ailp->xa_ail.prev, xfs_log_item_t, li_ail);
+}
+
/*
* Return a pointer to the item which follows the given item in the AIL. If
* the given item is the last item in the list, then return NULL.
* item in the AIL.
*/
xfs_lsn_t
-xfs_trans_ail_tail(
+xfs_ail_min_lsn(
struct xfs_ail *ailp)
{
xfs_lsn_t lsn = 0;
return lsn;
}
+/*
+ * Return the maximum lsn held in the AIL, or zero if the AIL is empty.
+ */
+static xfs_lsn_t
+xfs_ail_max_lsn(
+ struct xfs_ail *ailp)
+{
+ xfs_lsn_t lsn = 0;
+ xfs_log_item_t *lip;
+
+ spin_lock(&ailp->xa_lock);
+ lip = xfs_ail_max(ailp);
+ if (lip)
+ lsn = lip->li_lsn;
+ spin_unlock(&ailp->xa_lock);
+
+ return lsn;
+}
+
/*
* AIL traversal cursor initialisation.
*
* any of the objects, so the lock is not needed.
*/
void
-xfs_trans_ail_push(
+xfs_ail_push(
struct xfs_ail *ailp,
xfs_lsn_t threshold_lsn)
{
queue_delayed_work(xfs_syncd_wq, &ailp->xa_work, 0);
}
+/*
+ * Push out all items in the AIL immediately
+ */
+void
+xfs_ail_push_all(
+ struct xfs_ail *ailp)
+{
+ xfs_lsn_t threshold_lsn = xfs_ail_max_lsn(ailp);
+
+ if (threshold_lsn)
+ xfs_ail_push(ailp, threshold_lsn);
+}
+
/*
* This is to be called when an item is unlocked that may have
* been in the AIL. It will wake up the first member of the AIL
xfs_trans_ail_delete_bulk(ailp, &lip, 1);
}
-void xfs_trans_ail_push(struct xfs_ail *, xfs_lsn_t);
+void xfs_ail_push(struct xfs_ail *, xfs_lsn_t);
+void xfs_ail_push_all(struct xfs_ail *);
+xfs_lsn_t xfs_ail_min_lsn(struct xfs_ail *ailp);
+
void xfs_trans_unlocked_item(struct xfs_ail *,
xfs_log_item_t *);
-xfs_lsn_t xfs_trans_ail_tail(struct xfs_ail *ailp);
-
struct xfs_log_item *xfs_trans_ail_cursor_first(struct xfs_ail *ailp,
struct xfs_ail_cursor *cur,
xfs_lsn_t lsn);