{
struct wb_writeback_work *work;
+ if (!wb_has_dirty_io(wb))
+ return;
+
/*
* This is WB_SYNC_NONE writeback, so if allocation fails just
* wakeup the thread for old dirty data writeback
nr_pages = get_nr_dirty_pages();
rcu_read_lock();
- list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
- if (!bdi_has_dirty_io(bdi))
- continue;
+ list_for_each_entry_rcu(bdi, &bdi_list, bdi_list)
__wb_start_writeback(&bdi->wb, nr_pages, false, reason);
- }
rcu_read_unlock();
}
.nr_pages = nr,
.reason = reason,
};
+ struct backing_dev_info *bdi = sb->s_bdi;
- if (sb->s_bdi == &noop_backing_dev_info)
+ if (!bdi_has_dirty_io(bdi) || bdi == &noop_backing_dev_info)
return;
WARN_ON(!rwsem_is_locked(&sb->s_umount));
- wb_queue_work(&sb->s_bdi->wb, &work);
+ wb_queue_work(&bdi->wb, &work);
wait_for_completion(&done);
}
EXPORT_SYMBOL(writeback_inodes_sb_nr);
.reason = WB_REASON_SYNC,
.for_sync = 1,
};
+ struct backing_dev_info *bdi = sb->s_bdi;
/* Nothing to do? */
- if (sb->s_bdi == &noop_backing_dev_info)
+ if (!bdi_has_dirty_io(bdi) || bdi == &noop_backing_dev_info)
return;
WARN_ON(!rwsem_is_locked(&sb->s_umount));
- wb_queue_work(&sb->s_bdi->wb, &work);
+ wb_queue_work(&bdi->wb, &work);
wait_for_completion(&done);
wait_sb_inodes(sb);