writeback: move over_bground_thresh() to mm/page-writeback.c
authorTejun Heo <tj@kernel.org>
Fri, 22 May 2015 22:23:31 +0000 (18:23 -0400)
committerJens Axboe <axboe@fb.com>
Tue, 2 Jun 2015 14:38:13 +0000 (08:38 -0600)
and rename it to wb_over_bg_thresh().  The function is closely tied to
the dirty throttling mechanism implemented in page-writeback.c.  This
relocation will allow future updates necessary for cgroup writeback
support.

While at it, add function comment.

This is pure reorganization and doesn't introduce any behavioral
changes.

Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Jan Kara <jack@suse.cz>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Greg Thelen <gthelen@google.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
fs/fs-writeback.c
include/linux/writeback.h
mm/page-writeback.c

index 51c8a5b14cdfa1968808a4be164be7a39925cd4b..da355879ba7c55cf9017cf288944a40c5672abad 100644 (file)
@@ -1071,22 +1071,6 @@ static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages,
        return nr_pages - work.nr_pages;
 }
 
-static bool over_bground_thresh(struct bdi_writeback *wb)
-{
-       unsigned long background_thresh, dirty_thresh;
-
-       global_dirty_limits(&background_thresh, &dirty_thresh);
-
-       if (global_page_state(NR_FILE_DIRTY) +
-           global_page_state(NR_UNSTABLE_NFS) > background_thresh)
-               return true;
-
-       if (wb_stat(wb, WB_RECLAIMABLE) > wb_calc_thresh(wb, background_thresh))
-               return true;
-
-       return false;
-}
-
 /*
  * Explicit flushing or periodic writeback of "old" data.
  *
@@ -1136,7 +1120,7 @@ static long wb_writeback(struct bdi_writeback *wb,
                 * For background writeout, stop when we are below the
                 * background dirty threshold
                 */
-               if (work->for_background && !over_bground_thresh(wb))
+               if (work->for_background && !wb_over_bg_thresh(wb))
                        break;
 
                /*
@@ -1227,7 +1211,7 @@ static unsigned long get_nr_dirty_pages(void)
 
 static long wb_check_background_flush(struct bdi_writeback *wb)
 {
-       if (over_bground_thresh(wb)) {
+       if (wb_over_bg_thresh(wb)) {
 
                struct wb_writeback_work work = {
                        .nr_pages       = LONG_MAX,
index 5fdd4e1805e675c1c7241dcd04042cdccdc2ef68..b57c2786b5aaa9097aad25f35cd134c1e740ac11 100644 (file)
@@ -207,6 +207,7 @@ unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh);
 void wb_update_bandwidth(struct bdi_writeback *wb, unsigned long start_time);
 void page_writeback_init(void);
 void balance_dirty_pages_ratelimited(struct address_space *mapping);
+bool wb_over_bg_thresh(struct bdi_writeback *wb);
 
 typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc,
                                void *data);
index c8ac8cea67dc023342942f30196e2f97dc8837b2..9d9a896fa7b5fc63787b7e08c15d835d0300a997 100644 (file)
@@ -1740,6 +1740,29 @@ void balance_dirty_pages_ratelimited(struct address_space *mapping)
 }
 EXPORT_SYMBOL(balance_dirty_pages_ratelimited);
 
+/**
+ * wb_over_bg_thresh - does @wb need to be written back?
+ * @wb: bdi_writeback of interest
+ *
+ * Determines whether background writeback should keep writing @wb or it's
+ * clean enough.  Returns %true if writeback should continue.
+ */
+bool wb_over_bg_thresh(struct bdi_writeback *wb)
+{
+       unsigned long background_thresh, dirty_thresh;
+
+       global_dirty_limits(&background_thresh, &dirty_thresh);
+
+       if (global_page_state(NR_FILE_DIRTY) +
+           global_page_state(NR_UNSTABLE_NFS) > background_thresh)
+               return true;
+
+       if (wb_stat(wb, WB_RECLAIMABLE) > wb_calc_thresh(wb, background_thresh))
+               return true;
+
+       return false;
+}
+
 void throttle_vm_writeout(gfp_t gfp_mask)
 {
        unsigned long background_thresh;