spin_lock(&wb->list_lock);
wb_put(wb); /* not gonna deref it anymore */
- if (likely(wb == inode_to_wb(inode)))
+ /* i_wb may have changed inbetween, can't use inode_to_wb() */
+ if (likely(wb == inode->i_wb))
return wb; /* @inode already has ref */
spin_unlock(&wb->list_lock);
* Once set, ->i_wb never becomes NULL while the inode is alive.
* Start transaction iff ->i_wb is visible.
*/
- if (inode && inode_to_wb(inode)) {
+ if (inode && inode_to_wb_is_valid(inode)) {
struct bdi_writeback *wb;
bool locked, congested;
return wb;
}
+/**
+ * inode_to_wb_is_valid - test whether an inode has a wb associated
+ * @inode: inode of interest
+ *
+ * Returns %true if @inode has a wb associated. May be called without any
+ * locking.
+ */
+static inline bool inode_to_wb_is_valid(struct inode *inode)
+{
+ return inode->i_wb;
+}
+
/**
* inode_to_wb - determine the wb of an inode
* @inode: inode of interest
*
- * Returns the wb @inode is currently associated with.
+ * Returns the wb @inode is currently associated with. The caller must be
+ * holding either @inode->i_lock, @inode->i_mapping->tree_lock, or the
+ * associated wb's list_lock.
*/
static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
{
+#ifdef CONFIG_LOCKDEP
+ WARN_ON_ONCE(debug_locks &&
+ (!lockdep_is_held(&inode->i_lock) &&
+ !lockdep_is_held(&inode->i_mapping->tree_lock) &&
+ !lockdep_is_held(&inode->i_wb->list_lock)));
+#endif
return inode->i_wb;
}
if (unlikely(*lockedp))
spin_lock_irq(&inode->i_mapping->tree_lock);
- return inode_to_wb(inode);
+
+ /*
+ * Protected by either !I_WB_SWITCH + rcu_read_lock() or tree_lock.
+ * inode_to_wb() will bark. Deref directly.
+ */
+ return inode->i_wb;
}
/**
return &bdi->wb;
}
+static inline bool inode_to_wb_is_valid(struct inode *inode)
+{
+ return true;
+}
+
static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
{
return &inode_to_bdi(inode)->wb;