writeback: remove unused bdi_pending_list
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / fs / fs-writeback.c
CommitLineData
1da177e4
LT
1/*
2 * fs/fs-writeback.c
3 *
4 * Copyright (C) 2002, Linus Torvalds.
5 *
6 * Contains all the functions related to writing back and waiting
7 * upon dirty inodes against superblocks, and writing back dirty
8 * pages against inodes. ie: data writeback. Writeout of the
9 * inode itself is not handled here.
10 *
e1f8e874 11 * 10Apr2002 Andrew Morton
1da177e4
LT
12 * Split out of fs/inode.c
13 * Additions for address_space-based writeback
14 */
15
16#include <linux/kernel.h>
630d9c47 17#include <linux/export.h>
1da177e4 18#include <linux/spinlock.h>
5a0e3ad6 19#include <linux/slab.h>
1da177e4
LT
20#include <linux/sched.h>
21#include <linux/fs.h>
22#include <linux/mm.h>
bc31b86a 23#include <linux/pagemap.h>
03ba3782
JA
24#include <linux/kthread.h>
25#include <linux/freezer.h>
1da177e4
LT
26#include <linux/writeback.h>
27#include <linux/blkdev.h>
28#include <linux/backing-dev.h>
455b2864 29#include <linux/tracepoint.h>
07f3f05c 30#include "internal.h"
1da177e4 31
bc31b86a
WF
32/*
33 * 4MB minimal write chunk size
34 */
35#define MIN_WRITEBACK_PAGES (4096UL >> (PAGE_CACHE_SHIFT - 10))
36
c4a77a6c
JA
37/*
38 * Passed into wb_writeback(), essentially a subset of writeback_control
39 */
83ba7b07 40struct wb_writeback_work {
c4a77a6c
JA
41 long nr_pages;
42 struct super_block *sb;
d46db3d5 43 unsigned long *older_than_this;
c4a77a6c 44 enum writeback_sync_modes sync_mode;
6e6938b6 45 unsigned int tagged_writepages:1;
52957fe1
HS
46 unsigned int for_kupdate:1;
47 unsigned int range_cyclic:1;
48 unsigned int for_background:1;
0e175a18 49 enum wb_reason reason; /* why was writeback initiated? */
c4a77a6c 50
8010c3b6 51 struct list_head list; /* pending work list */
83ba7b07 52 struct completion *done; /* set if the caller waits */
03ba3782
JA
53};
54
f11b00f3
AB
55/**
56 * writeback_in_progress - determine whether there is writeback in progress
57 * @bdi: the device's backing_dev_info structure.
58 *
03ba3782
JA
59 * Determine whether there is writeback waiting to be handled against a
60 * backing device.
f11b00f3
AB
61 */
62int writeback_in_progress(struct backing_dev_info *bdi)
63{
81d73a32 64 return test_bit(BDI_writeback_running, &bdi->state);
f11b00f3 65}
00d4e736 66EXPORT_SYMBOL(writeback_in_progress);
f11b00f3 67
692ebd17
JK
68static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
69{
70 struct super_block *sb = inode->i_sb;
692ebd17 71
aaead25b
CH
72 if (strcmp(sb->s_type->name, "bdev") == 0)
73 return inode->i_mapping->backing_dev_info;
74
75 return sb->s_bdi;
692ebd17
JK
76}
77
7ccf19a8
NP
78static inline struct inode *wb_inode(struct list_head *head)
79{
80 return list_entry(head, struct inode, i_wb_list);
81}
82
15eb77a0
WF
83/*
84 * Include the creation of the trace points after defining the
85 * wb_writeback_work structure and inline functions so that the definition
86 * remains local to this file.
87 */
88#define CREATE_TRACE_POINTS
89#include <trace/events/writeback.h>
90
6585027a
JK
91/* Wakeup flusher thread or forker thread to fork it. Requires bdi->wb_lock. */
92static void bdi_wakeup_flusher(struct backing_dev_info *bdi)
03ba3782 93{
fff5b85a
AB
94 if (bdi->wb.task) {
95 wake_up_process(bdi->wb.task);
96 } else {
97 /*
98 * The bdi thread isn't there, wake up the forker thread which
99 * will create and run it.
100 */
03ba3782 101 wake_up_process(default_backing_dev_info.wb.task);
1da177e4 102 }
6585027a
JK
103}
104
105static void bdi_queue_work(struct backing_dev_info *bdi,
106 struct wb_writeback_work *work)
107{
108 trace_writeback_queue(bdi, work);
109
110 spin_lock_bh(&bdi->wb_lock);
111 list_add_tail(&work->list, &bdi->work_list);
112 if (!bdi->wb.task)
113 trace_writeback_nothread(bdi, work);
114 bdi_wakeup_flusher(bdi);
6467716a 115 spin_unlock_bh(&bdi->wb_lock);
1da177e4
LT
116}
117
83ba7b07
CH
118static void
119__bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
0e175a18 120 bool range_cyclic, enum wb_reason reason)
1da177e4 121{
83ba7b07 122 struct wb_writeback_work *work;
03ba3782 123
bcddc3f0
JA
124 /*
125 * This is WB_SYNC_NONE writeback, so if allocation fails just
126 * wakeup the thread for old dirty data writeback
127 */
83ba7b07
CH
128 work = kzalloc(sizeof(*work), GFP_ATOMIC);
129 if (!work) {
455b2864
DC
130 if (bdi->wb.task) {
131 trace_writeback_nowork(bdi);
83ba7b07 132 wake_up_process(bdi->wb.task);
455b2864 133 }
83ba7b07 134 return;
bcddc3f0 135 }
03ba3782 136
83ba7b07
CH
137 work->sync_mode = WB_SYNC_NONE;
138 work->nr_pages = nr_pages;
139 work->range_cyclic = range_cyclic;
0e175a18 140 work->reason = reason;
03ba3782 141
83ba7b07 142 bdi_queue_work(bdi, work);
b6e51316
JA
143}
144
145/**
146 * bdi_start_writeback - start writeback
147 * @bdi: the backing device to write from
148 * @nr_pages: the number of pages to write
786228ab 149 * @reason: reason why some writeback work was initiated
b6e51316
JA
150 *
151 * Description:
152 * This does WB_SYNC_NONE opportunistic writeback. The IO is only
25985edc 153 * started when this function returns, we make no guarantees on
0e3c9a22 154 * completion. Caller need not hold sb s_umount semaphore.
b6e51316
JA
155 *
156 */
0e175a18
CW
157void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
158 enum wb_reason reason)
b6e51316 159{
0e175a18 160 __bdi_start_writeback(bdi, nr_pages, true, reason);
c5444198 161}
d3ddec76 162
c5444198
CH
163/**
164 * bdi_start_background_writeback - start background writeback
165 * @bdi: the backing device to write from
166 *
167 * Description:
6585027a
JK
168 * This makes sure WB_SYNC_NONE background writeback happens. When
169 * this function returns, it is only guaranteed that for given BDI
170 * some IO is happening if we are over background dirty threshold.
171 * Caller need not hold sb s_umount semaphore.
c5444198
CH
172 */
173void bdi_start_background_writeback(struct backing_dev_info *bdi)
174{
6585027a
JK
175 /*
176 * We just wake up the flusher thread. It will perform background
177 * writeback as soon as there is no other work to do.
178 */
71927e84 179 trace_writeback_wake_background(bdi);
6585027a
JK
180 spin_lock_bh(&bdi->wb_lock);
181 bdi_wakeup_flusher(bdi);
182 spin_unlock_bh(&bdi->wb_lock);
1da177e4
LT
183}
184
a66979ab
DC
185/*
186 * Remove the inode from the writeback list it is on.
187 */
188void inode_wb_list_del(struct inode *inode)
189{
f758eeab
CH
190 struct backing_dev_info *bdi = inode_to_bdi(inode);
191
192 spin_lock(&bdi->wb.list_lock);
a66979ab 193 list_del_init(&inode->i_wb_list);
f758eeab 194 spin_unlock(&bdi->wb.list_lock);
a66979ab
DC
195}
196
6610a0bc
AM
197/*
198 * Redirty an inode: set its when-it-was dirtied timestamp and move it to the
199 * furthest end of its superblock's dirty-inode list.
200 *
201 * Before stamping the inode's ->dirtied_when, we check to see whether it is
66f3b8e2 202 * already the most-recently-dirtied inode on the b_dirty list. If that is
6610a0bc
AM
203 * the case then the inode must have been redirtied while it was being written
204 * out and we don't reset its dirtied_when.
205 */
f758eeab 206static void redirty_tail(struct inode *inode, struct bdi_writeback *wb)
6610a0bc 207{
f758eeab 208 assert_spin_locked(&wb->list_lock);
03ba3782 209 if (!list_empty(&wb->b_dirty)) {
66f3b8e2 210 struct inode *tail;
6610a0bc 211
7ccf19a8 212 tail = wb_inode(wb->b_dirty.next);
66f3b8e2 213 if (time_before(inode->dirtied_when, tail->dirtied_when))
6610a0bc
AM
214 inode->dirtied_when = jiffies;
215 }
7ccf19a8 216 list_move(&inode->i_wb_list, &wb->b_dirty);
6610a0bc
AM
217}
218
c986d1e2 219/*
66f3b8e2 220 * requeue inode for re-scanning after bdi->b_io list is exhausted.
c986d1e2 221 */
f758eeab 222static void requeue_io(struct inode *inode, struct bdi_writeback *wb)
c986d1e2 223{
f758eeab 224 assert_spin_locked(&wb->list_lock);
7ccf19a8 225 list_move(&inode->i_wb_list, &wb->b_more_io);
c986d1e2
AM
226}
227
1c0eeaf5
JE
228static void inode_sync_complete(struct inode *inode)
229{
365b94ae 230 inode->i_state &= ~I_SYNC;
4eff96dd
JK
231 /* If inode is clean an unused, put it into LRU now... */
232 inode_add_lru(inode);
365b94ae 233 /* Waiters must see I_SYNC cleared before being woken up */
1c0eeaf5
JE
234 smp_mb();
235 wake_up_bit(&inode->i_state, __I_SYNC);
236}
237
d2caa3c5
JL
238static bool inode_dirtied_after(struct inode *inode, unsigned long t)
239{
240 bool ret = time_after(inode->dirtied_when, t);
241#ifndef CONFIG_64BIT
242 /*
243 * For inodes being constantly redirtied, dirtied_when can get stuck.
244 * It _appears_ to be in the future, but is actually in distant past.
245 * This test is necessary to prevent such wrapped-around relative times
5b0830cb 246 * from permanently stopping the whole bdi writeback.
d2caa3c5
JL
247 */
248 ret = ret && time_before_eq(inode->dirtied_when, jiffies);
249#endif
250 return ret;
251}
252
2c136579 253/*
0e2f2b23 254 * Move expired (dirtied before work->older_than_this) dirty inodes from
697e6fed 255 * @delaying_queue to @dispatch_queue.
2c136579 256 */
e84d0a4f 257static int move_expired_inodes(struct list_head *delaying_queue,
2c136579 258 struct list_head *dispatch_queue,
ad4e38dd 259 struct wb_writeback_work *work)
2c136579 260{
5c03449d
SL
261 LIST_HEAD(tmp);
262 struct list_head *pos, *node;
cf137307 263 struct super_block *sb = NULL;
5c03449d 264 struct inode *inode;
cf137307 265 int do_sb_sort = 0;
e84d0a4f 266 int moved = 0;
5c03449d 267
2c136579 268 while (!list_empty(delaying_queue)) {
7ccf19a8 269 inode = wb_inode(delaying_queue->prev);
ad4e38dd
CW
270 if (work->older_than_this &&
271 inode_dirtied_after(inode, *work->older_than_this))
2c136579 272 break;
cf137307
JA
273 if (sb && sb != inode->i_sb)
274 do_sb_sort = 1;
275 sb = inode->i_sb;
7ccf19a8 276 list_move(&inode->i_wb_list, &tmp);
e84d0a4f 277 moved++;
5c03449d
SL
278 }
279
cf137307
JA
280 /* just one sb in list, splice to dispatch_queue and we're done */
281 if (!do_sb_sort) {
282 list_splice(&tmp, dispatch_queue);
e84d0a4f 283 goto out;
cf137307
JA
284 }
285
5c03449d
SL
286 /* Move inodes from one superblock together */
287 while (!list_empty(&tmp)) {
7ccf19a8 288 sb = wb_inode(tmp.prev)->i_sb;
5c03449d 289 list_for_each_prev_safe(pos, node, &tmp) {
7ccf19a8 290 inode = wb_inode(pos);
5c03449d 291 if (inode->i_sb == sb)
7ccf19a8 292 list_move(&inode->i_wb_list, dispatch_queue);
5c03449d 293 }
2c136579 294 }
e84d0a4f
WF
295out:
296 return moved;
2c136579
FW
297}
298
299/*
300 * Queue all expired dirty inodes for io, eldest first.
4ea879b9
WF
301 * Before
302 * newly dirtied b_dirty b_io b_more_io
303 * =============> gf edc BA
304 * After
305 * newly dirtied b_dirty b_io b_more_io
306 * =============> g fBAedc
307 * |
308 * +--> dequeue for IO
2c136579 309 */
ad4e38dd 310static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work)
66f3b8e2 311{
e84d0a4f 312 int moved;
f758eeab 313 assert_spin_locked(&wb->list_lock);
4ea879b9 314 list_splice_init(&wb->b_more_io, &wb->b_io);
ad4e38dd
CW
315 moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, work);
316 trace_writeback_queue_io(wb, work, moved);
66f3b8e2
JA
317}
318
a9185b41 319static int write_inode(struct inode *inode, struct writeback_control *wbc)
08d8e974 320{
9fb0a7da
TH
321 int ret;
322
323 if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode)) {
324 trace_writeback_write_inode_start(inode, wbc);
325 ret = inode->i_sb->s_op->write_inode(inode, wbc);
326 trace_writeback_write_inode(inode, wbc);
327 return ret;
328 }
03ba3782 329 return 0;
08d8e974 330}
08d8e974 331
1da177e4 332/*
169ebd90
JK
333 * Wait for writeback on an inode to complete. Called with i_lock held.
334 * Caller must make sure inode cannot go away when we drop i_lock.
01c03194 335 */
169ebd90
JK
336static void __inode_wait_for_writeback(struct inode *inode)
337 __releases(inode->i_lock)
338 __acquires(inode->i_lock)
01c03194
CH
339{
340 DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
341 wait_queue_head_t *wqh;
342
343 wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
250df6ed
DC
344 while (inode->i_state & I_SYNC) {
345 spin_unlock(&inode->i_lock);
01c03194 346 __wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE);
250df6ed 347 spin_lock(&inode->i_lock);
58a9d3d8 348 }
01c03194
CH
349}
350
169ebd90
JK
351/*
352 * Wait for writeback on an inode to complete. Caller must have inode pinned.
353 */
354void inode_wait_for_writeback(struct inode *inode)
355{
356 spin_lock(&inode->i_lock);
357 __inode_wait_for_writeback(inode);
358 spin_unlock(&inode->i_lock);
359}
360
361/*
362 * Sleep until I_SYNC is cleared. This function must be called with i_lock
363 * held and drops it. It is aimed for callers not holding any inode reference
364 * so once i_lock is dropped, inode can go away.
365 */
366static void inode_sleep_on_writeback(struct inode *inode)
367 __releases(inode->i_lock)
368{
369 DEFINE_WAIT(wait);
370 wait_queue_head_t *wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
371 int sleep;
372
373 prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
374 sleep = inode->i_state & I_SYNC;
375 spin_unlock(&inode->i_lock);
376 if (sleep)
377 schedule();
378 finish_wait(wqh, &wait);
379}
380
ccb26b5a
JK
381/*
382 * Find proper writeback list for the inode depending on its current state and
383 * possibly also change of its state while we were doing writeback. Here we
384 * handle things such as livelock prevention or fairness of writeback among
385 * inodes. This function can be called only by flusher thread - noone else
386 * processes all inodes in writeback lists and requeueing inodes behind flusher
387 * thread's back can have unexpected consequences.
388 */
389static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
390 struct writeback_control *wbc)
391{
392 if (inode->i_state & I_FREEING)
393 return;
394
395 /*
396 * Sync livelock prevention. Each inode is tagged and synced in one
397 * shot. If still dirty, it will be redirty_tail()'ed below. Update
398 * the dirty time to prevent enqueue and sync it again.
399 */
400 if ((inode->i_state & I_DIRTY) &&
401 (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages))
402 inode->dirtied_when = jiffies;
403
4f8ad655
JK
404 if (wbc->pages_skipped) {
405 /*
406 * writeback is not making progress due to locked
407 * buffers. Skip this inode for now.
408 */
409 redirty_tail(inode, wb);
410 return;
411 }
412
ccb26b5a
JK
413 if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) {
414 /*
415 * We didn't write back all the pages. nfs_writepages()
416 * sometimes bales out without doing anything.
417 */
418 if (wbc->nr_to_write <= 0) {
419 /* Slice used up. Queue for next turn. */
420 requeue_io(inode, wb);
421 } else {
422 /*
423 * Writeback blocked by something other than
424 * congestion. Delay the inode for some time to
425 * avoid spinning on the CPU (100% iowait)
426 * retrying writeback of the dirty page/inode
427 * that cannot be performed immediately.
428 */
429 redirty_tail(inode, wb);
430 }
431 } else if (inode->i_state & I_DIRTY) {
432 /*
433 * Filesystems can dirty the inode during writeback operations,
434 * such as delayed allocation during submission or metadata
435 * updates after data IO completion.
436 */
437 redirty_tail(inode, wb);
438 } else {
439 /* The inode is clean. Remove from writeback lists. */
440 list_del_init(&inode->i_wb_list);
441 }
442}
443
01c03194 444/*
4f8ad655
JK
445 * Write out an inode and its dirty pages. Do not update the writeback list
446 * linkage. That is left to the caller. The caller is also responsible for
447 * setting I_SYNC flag and calling inode_sync_complete() to clear it.
1da177e4
LT
448 */
449static int
cd8ed2a4 450__writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
1da177e4 451{
1da177e4 452 struct address_space *mapping = inode->i_mapping;
251d6a47 453 long nr_to_write = wbc->nr_to_write;
01c03194 454 unsigned dirty;
1da177e4
LT
455 int ret;
456
4f8ad655 457 WARN_ON(!(inode->i_state & I_SYNC));
1da177e4 458
9fb0a7da
TH
459 trace_writeback_single_inode_start(inode, wbc, nr_to_write);
460
1da177e4
LT
461 ret = do_writepages(mapping, wbc);
462
26821ed4
CH
463 /*
464 * Make sure to wait on the data before writing out the metadata.
465 * This is important for filesystems that modify metadata on data
466 * I/O completion.
467 */
a9185b41 468 if (wbc->sync_mode == WB_SYNC_ALL) {
26821ed4 469 int err = filemap_fdatawait(mapping);
1da177e4
LT
470 if (ret == 0)
471 ret = err;
472 }
473
5547e8aa
DM
474 /*
475 * Some filesystems may redirty the inode during the writeback
476 * due to delalloc, clear dirty metadata flags right before
477 * write_inode()
478 */
250df6ed 479 spin_lock(&inode->i_lock);
6290be1c
JK
480 /* Clear I_DIRTY_PAGES if we've written out all dirty pages */
481 if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
482 inode->i_state &= ~I_DIRTY_PAGES;
5547e8aa
DM
483 dirty = inode->i_state & I_DIRTY;
484 inode->i_state &= ~(I_DIRTY_SYNC | I_DIRTY_DATASYNC);
250df6ed 485 spin_unlock(&inode->i_lock);
26821ed4
CH
486 /* Don't write the inode if only I_DIRTY_PAGES was set */
487 if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
a9185b41 488 int err = write_inode(inode, wbc);
1da177e4
LT
489 if (ret == 0)
490 ret = err;
491 }
4f8ad655
JK
492 trace_writeback_single_inode(inode, wbc, nr_to_write);
493 return ret;
494}
495
496/*
497 * Write out an inode's dirty pages. Either the caller has an active reference
498 * on the inode or the inode has I_WILL_FREE set.
499 *
500 * This function is designed to be called for writing back one inode which
501 * we go e.g. from filesystem. Flusher thread uses __writeback_single_inode()
502 * and does more profound writeback list handling in writeback_sb_inodes().
503 */
504static int
505writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
506 struct writeback_control *wbc)
507{
508 int ret = 0;
509
510 spin_lock(&inode->i_lock);
511 if (!atomic_read(&inode->i_count))
512 WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
513 else
514 WARN_ON(inode->i_state & I_WILL_FREE);
515
516 if (inode->i_state & I_SYNC) {
517 if (wbc->sync_mode != WB_SYNC_ALL)
518 goto out;
519 /*
169ebd90
JK
520 * It's a data-integrity sync. We must wait. Since callers hold
521 * inode reference or inode has I_WILL_FREE set, it cannot go
522 * away under us.
4f8ad655 523 */
169ebd90 524 __inode_wait_for_writeback(inode);
4f8ad655
JK
525 }
526 WARN_ON(inode->i_state & I_SYNC);
527 /*
528 * Skip inode if it is clean. We don't want to mess with writeback
529 * lists in this function since flusher thread may be doing for example
530 * sync in parallel and if we move the inode, it could get skipped. So
531 * here we make sure inode is on some writeback list and leave it there
532 * unless we have completely cleaned the inode.
533 */
534 if (!(inode->i_state & I_DIRTY))
535 goto out;
536 inode->i_state |= I_SYNC;
537 spin_unlock(&inode->i_lock);
538
cd8ed2a4 539 ret = __writeback_single_inode(inode, wbc);
1da177e4 540
f758eeab 541 spin_lock(&wb->list_lock);
250df6ed 542 spin_lock(&inode->i_lock);
4f8ad655
JK
543 /*
544 * If inode is clean, remove it from writeback lists. Otherwise don't
545 * touch it. See comment above for explanation.
546 */
547 if (!(inode->i_state & I_DIRTY))
548 list_del_init(&inode->i_wb_list);
549 spin_unlock(&wb->list_lock);
1c0eeaf5 550 inode_sync_complete(inode);
4f8ad655
JK
551out:
552 spin_unlock(&inode->i_lock);
1da177e4
LT
553 return ret;
554}
555
1a12d8bd
WF
556static long writeback_chunk_size(struct backing_dev_info *bdi,
557 struct wb_writeback_work *work)
d46db3d5
WF
558{
559 long pages;
560
561 /*
562 * WB_SYNC_ALL mode does livelock avoidance by syncing dirty
563 * inodes/pages in one big loop. Setting wbc.nr_to_write=LONG_MAX
564 * here avoids calling into writeback_inodes_wb() more than once.
565 *
566 * The intended call sequence for WB_SYNC_ALL writeback is:
567 *
568 * wb_writeback()
569 * writeback_sb_inodes() <== called only once
570 * write_cache_pages() <== called once for each inode
571 * (quickly) tag currently dirty pages
572 * (maybe slowly) sync all tagged pages
573 */
574 if (work->sync_mode == WB_SYNC_ALL || work->tagged_writepages)
575 pages = LONG_MAX;
1a12d8bd
WF
576 else {
577 pages = min(bdi->avg_write_bandwidth / 2,
578 global_dirty_limit / DIRTY_SCOPE);
579 pages = min(pages, work->nr_pages);
580 pages = round_down(pages + MIN_WRITEBACK_PAGES,
581 MIN_WRITEBACK_PAGES);
582 }
d46db3d5
WF
583
584 return pages;
585}
586
f11c9c5c
ES
587/*
588 * Write a portion of b_io inodes which belong to @sb.
edadfb10 589 *
d46db3d5 590 * Return the number of pages and/or inodes written.
f11c9c5c 591 */
d46db3d5
WF
592static long writeback_sb_inodes(struct super_block *sb,
593 struct bdi_writeback *wb,
594 struct wb_writeback_work *work)
1da177e4 595{
d46db3d5
WF
596 struct writeback_control wbc = {
597 .sync_mode = work->sync_mode,
598 .tagged_writepages = work->tagged_writepages,
599 .for_kupdate = work->for_kupdate,
600 .for_background = work->for_background,
601 .range_cyclic = work->range_cyclic,
602 .range_start = 0,
603 .range_end = LLONG_MAX,
604 };
605 unsigned long start_time = jiffies;
606 long write_chunk;
607 long wrote = 0; /* count both pages and inodes */
608
03ba3782 609 while (!list_empty(&wb->b_io)) {
7ccf19a8 610 struct inode *inode = wb_inode(wb->b_io.prev);
edadfb10
CH
611
612 if (inode->i_sb != sb) {
d46db3d5 613 if (work->sb) {
edadfb10
CH
614 /*
615 * We only want to write back data for this
616 * superblock, move all inodes not belonging
617 * to it back onto the dirty list.
618 */
f758eeab 619 redirty_tail(inode, wb);
edadfb10
CH
620 continue;
621 }
622
623 /*
624 * The inode belongs to a different superblock.
625 * Bounce back to the caller to unpin this and
626 * pin the next superblock.
627 */
d46db3d5 628 break;
edadfb10
CH
629 }
630
9843b76a 631 /*
331cbdee
WL
632 * Don't bother with new inodes or inodes being freed, first
633 * kind does not need periodic writeout yet, and for the latter
9843b76a
CH
634 * kind writeout is handled by the freer.
635 */
250df6ed 636 spin_lock(&inode->i_lock);
9843b76a 637 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
250df6ed 638 spin_unlock(&inode->i_lock);
fcc5c222 639 redirty_tail(inode, wb);
7ef0d737
NP
640 continue;
641 }
cc1676d9
JK
642 if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) {
643 /*
644 * If this inode is locked for writeback and we are not
645 * doing writeback-for-data-integrity, move it to
646 * b_more_io so that writeback can proceed with the
647 * other inodes on s_io.
648 *
649 * We'll have another go at writing back this inode
650 * when we completed a full scan of b_io.
651 */
652 spin_unlock(&inode->i_lock);
653 requeue_io(inode, wb);
654 trace_writeback_sb_inodes_requeue(inode);
655 continue;
656 }
f0d07b7f
JK
657 spin_unlock(&wb->list_lock);
658
4f8ad655
JK
659 /*
660 * We already requeued the inode if it had I_SYNC set and we
661 * are doing WB_SYNC_NONE writeback. So this catches only the
662 * WB_SYNC_ALL case.
663 */
169ebd90
JK
664 if (inode->i_state & I_SYNC) {
665 /* Wait for I_SYNC. This function drops i_lock... */
666 inode_sleep_on_writeback(inode);
667 /* Inode may be gone, start again */
ead188f9 668 spin_lock(&wb->list_lock);
169ebd90
JK
669 continue;
670 }
4f8ad655
JK
671 inode->i_state |= I_SYNC;
672 spin_unlock(&inode->i_lock);
169ebd90 673
1a12d8bd 674 write_chunk = writeback_chunk_size(wb->bdi, work);
d46db3d5
WF
675 wbc.nr_to_write = write_chunk;
676 wbc.pages_skipped = 0;
250df6ed 677
169ebd90
JK
678 /*
679 * We use I_SYNC to pin the inode in memory. While it is set
680 * evict_inode() will wait so the inode cannot be freed.
681 */
cd8ed2a4 682 __writeback_single_inode(inode, &wbc);
250df6ed 683
d46db3d5
WF
684 work->nr_pages -= write_chunk - wbc.nr_to_write;
685 wrote += write_chunk - wbc.nr_to_write;
4f8ad655
JK
686 spin_lock(&wb->list_lock);
687 spin_lock(&inode->i_lock);
d46db3d5
WF
688 if (!(inode->i_state & I_DIRTY))
689 wrote++;
4f8ad655
JK
690 requeue_inode(inode, wb, &wbc);
691 inode_sync_complete(inode);
0f1b1fd8 692 spin_unlock(&inode->i_lock);
169ebd90 693 cond_resched_lock(&wb->list_lock);
d46db3d5
WF
694 /*
695 * bail out to wb_writeback() often enough to check
696 * background threshold and other termination conditions.
697 */
698 if (wrote) {
699 if (time_is_before_jiffies(start_time + HZ / 10UL))
700 break;
701 if (work->nr_pages <= 0)
702 break;
8bc3be27 703 }
1da177e4 704 }
d46db3d5 705 return wrote;
f11c9c5c
ES
706}
707
d46db3d5
WF
708static long __writeback_inodes_wb(struct bdi_writeback *wb,
709 struct wb_writeback_work *work)
f11c9c5c 710{
d46db3d5
WF
711 unsigned long start_time = jiffies;
712 long wrote = 0;
38f21977 713
f11c9c5c 714 while (!list_empty(&wb->b_io)) {
7ccf19a8 715 struct inode *inode = wb_inode(wb->b_io.prev);
f11c9c5c 716 struct super_block *sb = inode->i_sb;
9ecc2738 717
12ad3ab6 718 if (!grab_super_passive(sb)) {
0e995816
WF
719 /*
720 * grab_super_passive() may fail consistently due to
721 * s_umount being grabbed by someone else. Don't use
722 * requeue_io() to avoid busy retrying the inode/sb.
723 */
724 redirty_tail(inode, wb);
edadfb10 725 continue;
f11c9c5c 726 }
d46db3d5 727 wrote += writeback_sb_inodes(sb, wb, work);
edadfb10 728 drop_super(sb);
f11c9c5c 729
d46db3d5
WF
730 /* refer to the same tests at the end of writeback_sb_inodes */
731 if (wrote) {
732 if (time_is_before_jiffies(start_time + HZ / 10UL))
733 break;
734 if (work->nr_pages <= 0)
735 break;
736 }
f11c9c5c 737 }
66f3b8e2 738 /* Leave any unwritten inodes on b_io */
d46db3d5 739 return wrote;
66f3b8e2
JA
740}
741
0e175a18
CW
742long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages,
743 enum wb_reason reason)
edadfb10 744{
d46db3d5
WF
745 struct wb_writeback_work work = {
746 .nr_pages = nr_pages,
747 .sync_mode = WB_SYNC_NONE,
748 .range_cyclic = 1,
0e175a18 749 .reason = reason,
d46db3d5 750 };
edadfb10 751
f758eeab 752 spin_lock(&wb->list_lock);
424b351f 753 if (list_empty(&wb->b_io))
ad4e38dd 754 queue_io(wb, &work);
d46db3d5 755 __writeback_inodes_wb(wb, &work);
f758eeab 756 spin_unlock(&wb->list_lock);
edadfb10 757
d46db3d5
WF
758 return nr_pages - work.nr_pages;
759}
03ba3782 760
b00949aa 761static bool over_bground_thresh(struct backing_dev_info *bdi)
03ba3782
JA
762{
763 unsigned long background_thresh, dirty_thresh;
764
16c4042f 765 global_dirty_limits(&background_thresh, &dirty_thresh);
03ba3782 766
b00949aa
WF
767 if (global_page_state(NR_FILE_DIRTY) +
768 global_page_state(NR_UNSTABLE_NFS) > background_thresh)
769 return true;
770
771 if (bdi_stat(bdi, BDI_RECLAIMABLE) >
772 bdi_dirty_limit(bdi, background_thresh))
773 return true;
774
775 return false;
03ba3782
JA
776}
777
e98be2d5
WF
778/*
779 * Called under wb->list_lock. If there are multiple wb per bdi,
780 * only the flusher working on the first wb should do it.
781 */
782static void wb_update_bandwidth(struct bdi_writeback *wb,
783 unsigned long start_time)
784{
af6a3113 785 __bdi_update_bandwidth(wb->bdi, 0, 0, 0, 0, 0, start_time);
e98be2d5
WF
786}
787
03ba3782
JA
788/*
789 * Explicit flushing or periodic writeback of "old" data.
66f3b8e2 790 *
03ba3782
JA
791 * Define "old": the first time one of an inode's pages is dirtied, we mark the
792 * dirtying-time in the inode's address_space. So this periodic writeback code
793 * just walks the superblock inode list, writing back any inodes which are
794 * older than a specific point in time.
66f3b8e2 795 *
03ba3782
JA
796 * Try to run once per dirty_writeback_interval. But if a writeback event
797 * takes longer than a dirty_writeback_interval interval, then leave a
798 * one-second gap.
66f3b8e2 799 *
03ba3782
JA
800 * older_than_this takes precedence over nr_to_write. So we'll only write back
801 * all dirty pages if they are all attached to "old" mappings.
66f3b8e2 802 */
c4a77a6c 803static long wb_writeback(struct bdi_writeback *wb,
83ba7b07 804 struct wb_writeback_work *work)
66f3b8e2 805{
e98be2d5 806 unsigned long wb_start = jiffies;
d46db3d5 807 long nr_pages = work->nr_pages;
03ba3782 808 unsigned long oldest_jif;
a5989bdc 809 struct inode *inode;
d46db3d5 810 long progress;
66f3b8e2 811
e185dda8 812 oldest_jif = jiffies;
d46db3d5 813 work->older_than_this = &oldest_jif;
38f21977 814
e8dfc305 815 spin_lock(&wb->list_lock);
03ba3782
JA
816 for (;;) {
817 /*
d3ddec76 818 * Stop writeback when nr_pages has been consumed
03ba3782 819 */
83ba7b07 820 if (work->nr_pages <= 0)
03ba3782 821 break;
66f3b8e2 822
aa373cf5
JK
823 /*
824 * Background writeout and kupdate-style writeback may
825 * run forever. Stop them if there is other work to do
826 * so that e.g. sync can proceed. They'll be restarted
827 * after the other works are all done.
828 */
829 if ((work->for_background || work->for_kupdate) &&
830 !list_empty(&wb->bdi->work_list))
831 break;
832
38f21977 833 /*
d3ddec76
WF
834 * For background writeout, stop when we are below the
835 * background dirty threshold
38f21977 836 */
b00949aa 837 if (work->for_background && !over_bground_thresh(wb->bdi))
03ba3782 838 break;
38f21977 839
1bc36b64
JK
840 /*
841 * Kupdate and background works are special and we want to
842 * include all inodes that need writing. Livelock avoidance is
843 * handled by these works yielding to any other work so we are
844 * safe.
845 */
ba9aa839
WF
846 if (work->for_kupdate) {
847 oldest_jif = jiffies -
848 msecs_to_jiffies(dirty_expire_interval * 10);
1bc36b64
JK
849 } else if (work->for_background)
850 oldest_jif = jiffies;
028c2dd1 851
d46db3d5 852 trace_writeback_start(wb->bdi, work);
e8dfc305 853 if (list_empty(&wb->b_io))
ad4e38dd 854 queue_io(wb, work);
83ba7b07 855 if (work->sb)
d46db3d5 856 progress = writeback_sb_inodes(work->sb, wb, work);
edadfb10 857 else
d46db3d5
WF
858 progress = __writeback_inodes_wb(wb, work);
859 trace_writeback_written(wb->bdi, work);
028c2dd1 860
e98be2d5 861 wb_update_bandwidth(wb, wb_start);
03ba3782
JA
862
863 /*
e6fb6da2
WF
864 * Did we write something? Try for more
865 *
866 * Dirty inodes are moved to b_io for writeback in batches.
867 * The completion of the current batch does not necessarily
868 * mean the overall work is done. So we keep looping as long
869 * as made some progress on cleaning pages or inodes.
03ba3782 870 */
d46db3d5 871 if (progress)
71fd05a8
JA
872 continue;
873 /*
e6fb6da2 874 * No more inodes for IO, bail
71fd05a8 875 */
b7a2441f 876 if (list_empty(&wb->b_more_io))
03ba3782 877 break;
71fd05a8
JA
878 /*
879 * Nothing written. Wait for some inode to
880 * become available for writeback. Otherwise
881 * we'll just busyloop.
882 */
71fd05a8 883 if (!list_empty(&wb->b_more_io)) {
d46db3d5 884 trace_writeback_wait(wb->bdi, work);
7ccf19a8 885 inode = wb_inode(wb->b_more_io.prev);
250df6ed 886 spin_lock(&inode->i_lock);
f0d07b7f 887 spin_unlock(&wb->list_lock);
169ebd90
JK
888 /* This function drops i_lock... */
889 inode_sleep_on_writeback(inode);
f0d07b7f 890 spin_lock(&wb->list_lock);
03ba3782
JA
891 }
892 }
e8dfc305 893 spin_unlock(&wb->list_lock);
03ba3782 894
d46db3d5 895 return nr_pages - work->nr_pages;
03ba3782
JA
896}
897
898/*
83ba7b07 899 * Return the next wb_writeback_work struct that hasn't been processed yet.
03ba3782 900 */
83ba7b07 901static struct wb_writeback_work *
08852b6d 902get_next_work_item(struct backing_dev_info *bdi)
03ba3782 903{
83ba7b07 904 struct wb_writeback_work *work = NULL;
03ba3782 905
6467716a 906 spin_lock_bh(&bdi->wb_lock);
83ba7b07
CH
907 if (!list_empty(&bdi->work_list)) {
908 work = list_entry(bdi->work_list.next,
909 struct wb_writeback_work, list);
910 list_del_init(&work->list);
03ba3782 911 }
6467716a 912 spin_unlock_bh(&bdi->wb_lock);
83ba7b07 913 return work;
03ba3782
JA
914}
915
cdf01dd5
LT
916/*
917 * Add in the number of potentially dirty inodes, because each inode
918 * write can dirty pagecache in the underlying blockdev.
919 */
920static unsigned long get_nr_dirty_pages(void)
921{
922 return global_page_state(NR_FILE_DIRTY) +
923 global_page_state(NR_UNSTABLE_NFS) +
924 get_nr_dirty_inodes();
925}
926
6585027a
JK
927static long wb_check_background_flush(struct bdi_writeback *wb)
928{
b00949aa 929 if (over_bground_thresh(wb->bdi)) {
6585027a
JK
930
931 struct wb_writeback_work work = {
932 .nr_pages = LONG_MAX,
933 .sync_mode = WB_SYNC_NONE,
934 .for_background = 1,
935 .range_cyclic = 1,
0e175a18 936 .reason = WB_REASON_BACKGROUND,
6585027a
JK
937 };
938
939 return wb_writeback(wb, &work);
940 }
941
942 return 0;
943}
944
03ba3782
JA
945static long wb_check_old_data_flush(struct bdi_writeback *wb)
946{
947 unsigned long expired;
948 long nr_pages;
949
69b62d01
JA
950 /*
951 * When set to zero, disable periodic writeback
952 */
953 if (!dirty_writeback_interval)
954 return 0;
955
03ba3782
JA
956 expired = wb->last_old_flush +
957 msecs_to_jiffies(dirty_writeback_interval * 10);
958 if (time_before(jiffies, expired))
959 return 0;
960
961 wb->last_old_flush = jiffies;
cdf01dd5 962 nr_pages = get_nr_dirty_pages();
03ba3782 963
c4a77a6c 964 if (nr_pages) {
83ba7b07 965 struct wb_writeback_work work = {
c4a77a6c
JA
966 .nr_pages = nr_pages,
967 .sync_mode = WB_SYNC_NONE,
968 .for_kupdate = 1,
969 .range_cyclic = 1,
0e175a18 970 .reason = WB_REASON_PERIODIC,
c4a77a6c
JA
971 };
972
83ba7b07 973 return wb_writeback(wb, &work);
c4a77a6c 974 }
03ba3782
JA
975
976 return 0;
977}
978
979/*
980 * Retrieve work items and do the writeback they describe
981 */
982long wb_do_writeback(struct bdi_writeback *wb, int force_wait)
983{
984 struct backing_dev_info *bdi = wb->bdi;
83ba7b07 985 struct wb_writeback_work *work;
c4a77a6c 986 long wrote = 0;
03ba3782 987
81d73a32 988 set_bit(BDI_writeback_running, &wb->bdi->state);
08852b6d 989 while ((work = get_next_work_item(bdi)) != NULL) {
03ba3782
JA
990 /*
991 * Override sync mode, in case we must wait for completion
83ba7b07 992 * because this thread is exiting now.
03ba3782
JA
993 */
994 if (force_wait)
83ba7b07 995 work->sync_mode = WB_SYNC_ALL;
03ba3782 996
455b2864
DC
997 trace_writeback_exec(bdi, work);
998
83ba7b07 999 wrote += wb_writeback(wb, work);
03ba3782
JA
1000
1001 /*
83ba7b07
CH
1002 * Notify the caller of completion if this is a synchronous
1003 * work item, otherwise just free it.
03ba3782 1004 */
83ba7b07
CH
1005 if (work->done)
1006 complete(work->done);
1007 else
1008 kfree(work);
03ba3782
JA
1009 }
1010
1011 /*
1012 * Check for periodic writeback, kupdated() style
1013 */
1014 wrote += wb_check_old_data_flush(wb);
6585027a 1015 wrote += wb_check_background_flush(wb);
81d73a32 1016 clear_bit(BDI_writeback_running, &wb->bdi->state);
03ba3782
JA
1017
1018 return wrote;
1019}
1020
1021/*
1022 * Handle writeback of dirty data for the device backed by this bdi. Also
1023 * wakes up periodically and does kupdated style flushing.
1024 */
08243900 1025int bdi_writeback_thread(void *data)
03ba3782 1026{
08243900
CH
1027 struct bdi_writeback *wb = data;
1028 struct backing_dev_info *bdi = wb->bdi;
03ba3782
JA
1029 long pages_written;
1030
766f9164 1031 current->flags |= PF_SWAPWRITE;
08243900 1032 set_freezable();
ecd58403 1033 wb->last_active = jiffies;
08243900
CH
1034
1035 /*
1036 * Our parent may run at a different priority, just set us to normal
1037 */
1038 set_user_nice(current, 0);
1039
455b2864
DC
1040 trace_writeback_thread_start(bdi);
1041
8a32c441 1042 while (!kthread_freezable_should_stop(NULL)) {
6467716a
AB
1043 /*
1044 * Remove own delayed wake-up timer, since we are already awake
5aaea51d 1045 * and we'll take care of the periodic write-back.
6467716a
AB
1046 */
1047 del_timer(&wb->wakeup_timer);
1048
03ba3782
JA
1049 pages_written = wb_do_writeback(wb, 0);
1050
455b2864
DC
1051 trace_writeback_pages_written(pages_written);
1052
03ba3782 1053 if (pages_written)
ecd58403 1054 wb->last_active = jiffies;
03ba3782 1055
297252c8 1056 set_current_state(TASK_INTERRUPTIBLE);
b76b4014 1057 if (!list_empty(&bdi->work_list) || kthread_should_stop()) {
f9eadbbd 1058 __set_current_state(TASK_RUNNING);
297252c8 1059 continue;
03ba3782
JA
1060 }
1061
253c34e9 1062 if (wb_has_dirty_io(wb) && dirty_writeback_interval)
fff5b85a 1063 schedule_timeout(msecs_to_jiffies(dirty_writeback_interval * 10));
253c34e9
AB
1064 else {
1065 /*
1066 * We have nothing to do, so can go sleep without any
1067 * timeout and save power. When a work is queued or
1068 * something is made dirty - we will be woken up.
1069 */
297252c8 1070 schedule();
f9eadbbd 1071 }
03ba3782
JA
1072 }
1073
fff5b85a 1074 /* Flush any work that raced with us exiting */
08243900
CH
1075 if (!list_empty(&bdi->work_list))
1076 wb_do_writeback(wb, 1);
455b2864
DC
1077
1078 trace_writeback_thread_stop(bdi);
03ba3782
JA
1079 return 0;
1080}
1081
08243900 1082
03ba3782 1083/*
b8c2f347
CH
1084 * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back
1085 * the whole world.
03ba3782 1086 */
0e175a18 1087void wakeup_flusher_threads(long nr_pages, enum wb_reason reason)
03ba3782 1088{
b8c2f347 1089 struct backing_dev_info *bdi;
03ba3782 1090
83ba7b07
CH
1091 if (!nr_pages) {
1092 nr_pages = global_page_state(NR_FILE_DIRTY) +
b8c2f347
CH
1093 global_page_state(NR_UNSTABLE_NFS);
1094 }
03ba3782 1095
b8c2f347 1096 rcu_read_lock();
cfc4ba53 1097 list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
03ba3782
JA
1098 if (!bdi_has_dirty_io(bdi))
1099 continue;
0e175a18 1100 __bdi_start_writeback(bdi, nr_pages, false, reason);
03ba3782 1101 }
cfc4ba53 1102 rcu_read_unlock();
1da177e4
LT
1103}
1104
03ba3782
JA
1105static noinline void block_dump___mark_inode_dirty(struct inode *inode)
1106{
1107 if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
1108 struct dentry *dentry;
1109 const char *name = "?";
1110
1111 dentry = d_find_alias(inode);
1112 if (dentry) {
1113 spin_lock(&dentry->d_lock);
1114 name = (const char *) dentry->d_name.name;
1115 }
1116 printk(KERN_DEBUG
1117 "%s(%d): dirtied inode %lu (%s) on %s\n",
1118 current->comm, task_pid_nr(current), inode->i_ino,
1119 name, inode->i_sb->s_id);
1120 if (dentry) {
1121 spin_unlock(&dentry->d_lock);
1122 dput(dentry);
1123 }
1124 }
1125}
1126
1127/**
1128 * __mark_inode_dirty - internal function
1129 * @inode: inode to mark
1130 * @flags: what kind of dirty (i.e. I_DIRTY_SYNC)
1131 * Mark an inode as dirty. Callers should use mark_inode_dirty or
1132 * mark_inode_dirty_sync.
1da177e4 1133 *
03ba3782
JA
1134 * Put the inode on the super block's dirty list.
1135 *
1136 * CAREFUL! We mark it dirty unconditionally, but move it onto the
1137 * dirty list only if it is hashed or if it refers to a blockdev.
1138 * If it was not hashed, it will never be added to the dirty list
1139 * even if it is later hashed, as it will have been marked dirty already.
1140 *
1141 * In short, make sure you hash any inodes _before_ you start marking
1142 * them dirty.
1da177e4 1143 *
03ba3782
JA
1144 * Note that for blockdevs, inode->dirtied_when represents the dirtying time of
1145 * the block-special inode (/dev/hda1) itself. And the ->dirtied_when field of
1146 * the kernel-internal blockdev inode represents the dirtying time of the
1147 * blockdev's pages. This is why for I_DIRTY_PAGES we always use
1148 * page->mapping->host, so the page-dirtying time is recorded in the internal
1149 * blockdev inode.
1da177e4 1150 */
03ba3782 1151void __mark_inode_dirty(struct inode *inode, int flags)
1da177e4 1152{
03ba3782 1153 struct super_block *sb = inode->i_sb;
253c34e9 1154 struct backing_dev_info *bdi = NULL;
1da177e4 1155
03ba3782
JA
1156 /*
1157 * Don't do this for I_DIRTY_PAGES - that doesn't actually
1158 * dirty the inode itself
1159 */
1160 if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
9fb0a7da
TH
1161 trace_writeback_dirty_inode_start(inode, flags);
1162
03ba3782 1163 if (sb->s_op->dirty_inode)
aa385729 1164 sb->s_op->dirty_inode(inode, flags);
9fb0a7da
TH
1165
1166 trace_writeback_dirty_inode(inode, flags);
03ba3782
JA
1167 }
1168
1169 /*
1170 * make sure that changes are seen by all cpus before we test i_state
1171 * -- mikulas
1172 */
1173 smp_mb();
1174
1175 /* avoid the locking if we can */
1176 if ((inode->i_state & flags) == flags)
1177 return;
1178
1179 if (unlikely(block_dump))
1180 block_dump___mark_inode_dirty(inode);
1181
250df6ed 1182 spin_lock(&inode->i_lock);
03ba3782
JA
1183 if ((inode->i_state & flags) != flags) {
1184 const int was_dirty = inode->i_state & I_DIRTY;
1185
1186 inode->i_state |= flags;
1187
1188 /*
1189 * If the inode is being synced, just update its dirty state.
1190 * The unlocker will place the inode on the appropriate
1191 * superblock list, based upon its state.
1192 */
1193 if (inode->i_state & I_SYNC)
250df6ed 1194 goto out_unlock_inode;
03ba3782
JA
1195
1196 /*
1197 * Only add valid (hashed) inodes to the superblock's
1198 * dirty list. Add blockdev inodes as well.
1199 */
1200 if (!S_ISBLK(inode->i_mode)) {
1d3382cb 1201 if (inode_unhashed(inode))
250df6ed 1202 goto out_unlock_inode;
03ba3782 1203 }
a4ffdde6 1204 if (inode->i_state & I_FREEING)
250df6ed 1205 goto out_unlock_inode;
03ba3782
JA
1206
1207 /*
1208 * If the inode was already on b_dirty/b_io/b_more_io, don't
1209 * reposition it (that would break b_dirty time-ordering).
1210 */
1211 if (!was_dirty) {
a66979ab 1212 bool wakeup_bdi = false;
253c34e9
AB
1213 bdi = inode_to_bdi(inode);
1214
1215 if (bdi_cap_writeback_dirty(bdi)) {
1216 WARN(!test_bit(BDI_registered, &bdi->state),
1217 "bdi-%s not registered\n", bdi->name);
1218
1219 /*
1220 * If this is the first dirty inode for this
1221 * bdi, we have to wake-up the corresponding
1222 * bdi thread to make sure background
1223 * write-back happens later.
1224 */
1225 if (!wb_has_dirty_io(&bdi->wb))
1226 wakeup_bdi = true;
500b067c 1227 }
03ba3782 1228
a66979ab 1229 spin_unlock(&inode->i_lock);
f758eeab 1230 spin_lock(&bdi->wb.list_lock);
03ba3782 1231 inode->dirtied_when = jiffies;
7ccf19a8 1232 list_move(&inode->i_wb_list, &bdi->wb.b_dirty);
f758eeab 1233 spin_unlock(&bdi->wb.list_lock);
a66979ab
DC
1234
1235 if (wakeup_bdi)
1236 bdi_wakeup_thread_delayed(bdi);
1237 return;
1da177e4 1238 }
1da177e4 1239 }
250df6ed
DC
1240out_unlock_inode:
1241 spin_unlock(&inode->i_lock);
253c34e9 1242
03ba3782
JA
1243}
1244EXPORT_SYMBOL(__mark_inode_dirty);
1245
b6e51316 1246static void wait_sb_inodes(struct super_block *sb)
03ba3782
JA
1247{
1248 struct inode *inode, *old_inode = NULL;
1249
1250 /*
1251 * We need to be protected against the filesystem going from
1252 * r/o to r/w or vice versa.
1253 */
b6e51316 1254 WARN_ON(!rwsem_is_locked(&sb->s_umount));
03ba3782 1255
55fa6091 1256 spin_lock(&inode_sb_list_lock);
03ba3782
JA
1257
1258 /*
1259 * Data integrity sync. Must wait for all pages under writeback,
1260 * because there may have been pages dirtied before our sync
1261 * call, but which had writeout started before we write it out.
1262 * In which case, the inode may not be on the dirty list, but
1263 * we still have to wait for that writeout.
1264 */
b6e51316 1265 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
250df6ed 1266 struct address_space *mapping = inode->i_mapping;
03ba3782 1267
250df6ed
DC
1268 spin_lock(&inode->i_lock);
1269 if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
1270 (mapping->nrpages == 0)) {
1271 spin_unlock(&inode->i_lock);
03ba3782 1272 continue;
250df6ed 1273 }
03ba3782 1274 __iget(inode);
250df6ed 1275 spin_unlock(&inode->i_lock);
55fa6091
DC
1276 spin_unlock(&inode_sb_list_lock);
1277
03ba3782 1278 /*
55fa6091
DC
1279 * We hold a reference to 'inode' so it couldn't have been
1280 * removed from s_inodes list while we dropped the
1281 * inode_sb_list_lock. We cannot iput the inode now as we can
1282 * be holding the last reference and we cannot iput it under
1283 * inode_sb_list_lock. So we keep the reference and iput it
1284 * later.
03ba3782
JA
1285 */
1286 iput(old_inode);
1287 old_inode = inode;
1288
1289 filemap_fdatawait(mapping);
1290
1291 cond_resched();
1292
55fa6091 1293 spin_lock(&inode_sb_list_lock);
03ba3782 1294 }
55fa6091 1295 spin_unlock(&inode_sb_list_lock);
03ba3782 1296 iput(old_inode);
1da177e4
LT
1297}
1298
d8a8559c 1299/**
3259f8be 1300 * writeback_inodes_sb_nr - writeback dirty inodes from given super_block
d8a8559c 1301 * @sb: the superblock
3259f8be 1302 * @nr: the number of pages to write
786228ab 1303 * @reason: reason why some writeback work initiated
1da177e4 1304 *
d8a8559c
JA
1305 * Start writeback on some inodes on this super_block. No guarantees are made
1306 * on how many (if any) will be written, and this function does not wait
3259f8be 1307 * for IO completion of submitted IO.
1da177e4 1308 */
0e175a18
CW
1309void writeback_inodes_sb_nr(struct super_block *sb,
1310 unsigned long nr,
1311 enum wb_reason reason)
1da177e4 1312{
83ba7b07
CH
1313 DECLARE_COMPLETION_ONSTACK(done);
1314 struct wb_writeback_work work = {
6e6938b6
WF
1315 .sb = sb,
1316 .sync_mode = WB_SYNC_NONE,
1317 .tagged_writepages = 1,
1318 .done = &done,
1319 .nr_pages = nr,
0e175a18 1320 .reason = reason,
3c4d7165 1321 };
d8a8559c 1322
6eedc701
JK
1323 if (sb->s_bdi == &noop_backing_dev_info)
1324 return;
cf37e972 1325 WARN_ON(!rwsem_is_locked(&sb->s_umount));
83ba7b07
CH
1326 bdi_queue_work(sb->s_bdi, &work);
1327 wait_for_completion(&done);
e913fc82 1328}
3259f8be
CM
1329EXPORT_SYMBOL(writeback_inodes_sb_nr);
1330
1331/**
1332 * writeback_inodes_sb - writeback dirty inodes from given super_block
1333 * @sb: the superblock
786228ab 1334 * @reason: reason why some writeback work was initiated
3259f8be
CM
1335 *
1336 * Start writeback on some inodes on this super_block. No guarantees are made
1337 * on how many (if any) will be written, and this function does not wait
1338 * for IO completion of submitted IO.
1339 */
0e175a18 1340void writeback_inodes_sb(struct super_block *sb, enum wb_reason reason)
3259f8be 1341{
0e175a18 1342 return writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason);
3259f8be 1343}
0e3c9a22 1344EXPORT_SYMBOL(writeback_inodes_sb);
e913fc82 1345
17bd55d0 1346/**
10ee27a0 1347 * try_to_writeback_inodes_sb_nr - try to start writeback if none underway
17bd55d0 1348 * @sb: the superblock
10ee27a0
MX
1349 * @nr: the number of pages to write
1350 * @reason: the reason of writeback
17bd55d0 1351 *
10ee27a0 1352 * Invoke writeback_inodes_sb_nr if no writeback is currently underway.
17bd55d0
ES
1353 * Returns 1 if writeback was started, 0 if not.
1354 */
10ee27a0
MX
1355int try_to_writeback_inodes_sb_nr(struct super_block *sb,
1356 unsigned long nr,
1357 enum wb_reason reason)
17bd55d0 1358{
10ee27a0 1359 if (writeback_in_progress(sb->s_bdi))
17bd55d0 1360 return 1;
10ee27a0
MX
1361
1362 if (!down_read_trylock(&sb->s_umount))
17bd55d0 1363 return 0;
10ee27a0
MX
1364
1365 writeback_inodes_sb_nr(sb, nr, reason);
1366 up_read(&sb->s_umount);
1367 return 1;
17bd55d0 1368}
10ee27a0 1369EXPORT_SYMBOL(try_to_writeback_inodes_sb_nr);
17bd55d0 1370
3259f8be 1371/**
10ee27a0 1372 * try_to_writeback_inodes_sb - try to start writeback if none underway
3259f8be 1373 * @sb: the superblock
786228ab 1374 * @reason: reason why some writeback work was initiated
3259f8be 1375 *
10ee27a0 1376 * Implement by try_to_writeback_inodes_sb_nr()
3259f8be
CM
1377 * Returns 1 if writeback was started, 0 if not.
1378 */
10ee27a0 1379int try_to_writeback_inodes_sb(struct super_block *sb, enum wb_reason reason)
3259f8be 1380{
10ee27a0 1381 return try_to_writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason);
3259f8be 1382}
10ee27a0 1383EXPORT_SYMBOL(try_to_writeback_inodes_sb);
3259f8be 1384
d8a8559c
JA
1385/**
1386 * sync_inodes_sb - sync sb inode pages
1387 * @sb: the superblock
1388 *
1389 * This function writes and waits on any dirty inode belonging to this
cb9ef8d5 1390 * super_block.
d8a8559c 1391 */
b6e51316 1392void sync_inodes_sb(struct super_block *sb)
d8a8559c 1393{
83ba7b07
CH
1394 DECLARE_COMPLETION_ONSTACK(done);
1395 struct wb_writeback_work work = {
3c4d7165
CH
1396 .sb = sb,
1397 .sync_mode = WB_SYNC_ALL,
1398 .nr_pages = LONG_MAX,
1399 .range_cyclic = 0,
83ba7b07 1400 .done = &done,
0e175a18 1401 .reason = WB_REASON_SYNC,
3c4d7165
CH
1402 };
1403
6eedc701
JK
1404 /* Nothing to do? */
1405 if (sb->s_bdi == &noop_backing_dev_info)
1406 return;
cf37e972
CH
1407 WARN_ON(!rwsem_is_locked(&sb->s_umount));
1408
83ba7b07
CH
1409 bdi_queue_work(sb->s_bdi, &work);
1410 wait_for_completion(&done);
1411
b6e51316 1412 wait_sb_inodes(sb);
1da177e4 1413}
d8a8559c 1414EXPORT_SYMBOL(sync_inodes_sb);
1da177e4 1415
1da177e4 1416/**
7f04c26d
AA
1417 * write_inode_now - write an inode to disk
1418 * @inode: inode to write to disk
1419 * @sync: whether the write should be synchronous or not
1420 *
1421 * This function commits an inode to disk immediately if it is dirty. This is
1422 * primarily needed by knfsd.
1da177e4 1423 *
7f04c26d 1424 * The caller must either have a ref on the inode or must have set I_WILL_FREE.
1da177e4 1425 */
1da177e4
LT
1426int write_inode_now(struct inode *inode, int sync)
1427{
f758eeab 1428 struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
1da177e4
LT
1429 struct writeback_control wbc = {
1430 .nr_to_write = LONG_MAX,
18914b18 1431 .sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
111ebb6e
OH
1432 .range_start = 0,
1433 .range_end = LLONG_MAX,
1da177e4
LT
1434 };
1435
1436 if (!mapping_cap_writeback_dirty(inode->i_mapping))
49364ce2 1437 wbc.nr_to_write = 0;
1da177e4
LT
1438
1439 might_sleep();
4f8ad655 1440 return writeback_single_inode(inode, wb, &wbc);
1da177e4
LT
1441}
1442EXPORT_SYMBOL(write_inode_now);
1443
1444/**
1445 * sync_inode - write an inode and its pages to disk.
1446 * @inode: the inode to sync
1447 * @wbc: controls the writeback mode
1448 *
1449 * sync_inode() will write an inode and its pages to disk. It will also
1450 * correctly update the inode on its superblock's dirty inode lists and will
1451 * update inode->i_state.
1452 *
1453 * The caller must have a ref on the inode.
1454 */
1455int sync_inode(struct inode *inode, struct writeback_control *wbc)
1456{
4f8ad655 1457 return writeback_single_inode(inode, &inode_to_bdi(inode)->wb, wbc);
1da177e4
LT
1458}
1459EXPORT_SYMBOL(sync_inode);
c3765016
CH
1460
1461/**
c691b9d9 1462 * sync_inode_metadata - write an inode to disk
c3765016
CH
1463 * @inode: the inode to sync
1464 * @wait: wait for I/O to complete.
1465 *
c691b9d9 1466 * Write an inode to disk and adjust its dirty state after completion.
c3765016
CH
1467 *
1468 * Note: only writes the actual inode, no associated data or other metadata.
1469 */
1470int sync_inode_metadata(struct inode *inode, int wait)
1471{
1472 struct writeback_control wbc = {
1473 .sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_NONE,
1474 .nr_to_write = 0, /* metadata-only */
1475 };
1476
1477 return sync_inode(inode, &wbc);
1478}
1479EXPORT_SYMBOL(sync_inode_metadata);