writeback: avoid unnecessary calculation of bdi dirty thresholds
authorWu Fengguang <fengguang.wu@intel.com>
Wed, 11 Aug 2010 21:17:39 +0000 (14:17 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 12 Aug 2010 15:43:29 +0000 (08:43 -0700)
Split get_dirty_limits() into global_dirty_limits()+bdi_dirty_limit(), so
that the latter can be avoided when under global dirty background
threshold (which is the normal state for most systems).

Signed-off-by: Wu Fengguang <fengguang.wu@intel.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Jens Axboe <axboe@kernel.dk>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
fs/fs-writeback.c
include/linux/writeback.h
mm/backing-dev.c
mm/page-writeback.c

index 2f76c4a081a2803249063b8c5e05e68bff525dfa..fca43d4d7bf4f0657edcf41def8ee1281d822f29 100644 (file)
@@ -590,7 +590,7 @@ static inline bool over_bground_thresh(void)
 {
        unsigned long background_thresh, dirty_thresh;
 
-       get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL);
+       global_dirty_limits(&background_thresh, &dirty_thresh);
 
        return (global_page_state(NR_FILE_DIRTY) +
                global_page_state(NR_UNSTABLE_NFS) >= background_thresh);
index c24eca71e80c8a1908a44cf94e11ed487b50b875..72a5d647a5f26d9eb3c798894443d4eb7849c0dd 100644 (file)
@@ -124,8 +124,9 @@ struct ctl_table;
 int dirty_writeback_centisecs_handler(struct ctl_table *, int,
                                      void __user *, size_t *, loff_t *);
 
-void get_dirty_limits(unsigned long *pbackground, unsigned long *pdirty,
-                     unsigned long *pbdi_dirty, struct backing_dev_info *bdi);
+void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty);
+unsigned long bdi_dirty_limit(struct backing_dev_info *bdi,
+                              unsigned long dirty);
 
 void page_writeback_init(void);
 void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
index 08d357522e784c602205a988687eddbeb0f72c10..eaa4a5bbe0634390fc802ebffdbad4291fa3b991 100644 (file)
@@ -81,7 +81,8 @@ static int bdi_debug_stats_show(struct seq_file *m, void *v)
                nr_more_io++;
        spin_unlock(&inode_lock);
 
-       get_dirty_limits(&background_thresh, &dirty_thresh, &bdi_thresh, bdi);
+       global_dirty_limits(&background_thresh, &dirty_thresh);
+       bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
 
 #define K(x) ((x) << (PAGE_SHIFT - 10))
        seq_printf(m,
index 2cf69a5e46e624a02767d564be4ce4a0daacf842..1ea13ef350a83f7afde19d88f52c16be2921a40b 100644 (file)
@@ -267,10 +267,11 @@ static inline void task_dirties_fraction(struct task_struct *tsk,
  *
  *   dirty -= (dirty/8) * p_{t}
  */
-static void task_dirty_limit(struct task_struct *tsk, unsigned long *pdirty)
+static unsigned long task_dirty_limit(struct task_struct *tsk,
+                                      unsigned long bdi_dirty)
 {
        long numerator, denominator;
-       unsigned long dirty = *pdirty;
+       unsigned long dirty = bdi_dirty;
        u64 inv = dirty >> 3;
 
        task_dirties_fraction(tsk, &numerator, &denominator);
@@ -278,10 +279,8 @@ static void task_dirty_limit(struct task_struct *tsk, unsigned long *pdirty)
        do_div(inv, denominator);
 
        dirty -= inv;
-       if (dirty < *pdirty/2)
-               dirty = *pdirty/2;
 
-       *pdirty = dirty;
+       return max(dirty, bdi_dirty/2);
 }
 
 /*
@@ -391,9 +390,7 @@ unsigned long determine_dirtyable_memory(void)
        return x + 1;   /* Ensure that we never return 0 */
 }
 
-void
-get_dirty_limits(unsigned long *pbackground, unsigned long *pdirty,
-                unsigned long *pbdi_dirty, struct backing_dev_info *bdi)
+void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
 {
        unsigned long background;
        unsigned long dirty;
@@ -425,26 +422,28 @@ get_dirty_limits(unsigned long *pbackground, unsigned long *pdirty,
        }
        *pbackground = background;
        *pdirty = dirty;
+}
 
-       if (bdi) {
-               u64 bdi_dirty;
-               long numerator, denominator;
+unsigned long bdi_dirty_limit(struct backing_dev_info *bdi,
+                              unsigned long dirty)
+{
+       u64 bdi_dirty;
+       long numerator, denominator;
 
-               /*
-                * Calculate this BDI's share of the dirty ratio.
-                */
-               bdi_writeout_fraction(bdi, &numerator, &denominator);
+       /*
+        * Calculate this BDI's share of the dirty ratio.
+        */
+       bdi_writeout_fraction(bdi, &numerator, &denominator);
 
-               bdi_dirty = (dirty * (100 - bdi_min_ratio)) / 100;
-               bdi_dirty *= numerator;
-               do_div(bdi_dirty, denominator);
-               bdi_dirty += (dirty * bdi->min_ratio) / 100;
-               if (bdi_dirty > (dirty * bdi->max_ratio) / 100)
-                       bdi_dirty = dirty * bdi->max_ratio / 100;
+       bdi_dirty = (dirty * (100 - bdi_min_ratio)) / 100;
+       bdi_dirty *= numerator;
+       do_div(bdi_dirty, denominator);
 
-               *pbdi_dirty = bdi_dirty;
-               task_dirty_limit(current, pbdi_dirty);
-       }
+       bdi_dirty += (dirty * bdi->min_ratio) / 100;
+       if (bdi_dirty > (dirty * bdi->max_ratio) / 100)
+               bdi_dirty = dirty * bdi->max_ratio / 100;
+
+       return bdi_dirty;
 }
 
 /*
@@ -475,13 +474,24 @@ static void balance_dirty_pages(struct address_space *mapping,
                        .range_cyclic   = 1,
                };
 
-               get_dirty_limits(&background_thresh, &dirty_thresh,
-                               &bdi_thresh, bdi);
-
                nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
                                        global_page_state(NR_UNSTABLE_NFS);
                nr_writeback = global_page_state(NR_WRITEBACK);
 
+               global_dirty_limits(&background_thresh, &dirty_thresh);
+
+               /*
+                * Throttle it only when the background writeback cannot
+                * catch-up. This avoids (excessively) small writeouts
+                * when the bdi limits are ramping up.
+                */
+               if (nr_reclaimable + nr_writeback <
+                               (background_thresh + dirty_thresh) / 2)
+                       break;
+
+               bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
+               bdi_thresh = task_dirty_limit(current, bdi_thresh);
+
                /*
                 * In order to avoid the stacked BDI deadlock we need
                 * to ensure we accurately count the 'dirty' pages when
@@ -513,15 +523,6 @@ static void balance_dirty_pages(struct address_space *mapping,
                if (!dirty_exceeded)
                        break;
 
-               /*
-                * Throttle it only when the background writeback cannot
-                * catch-up. This avoids (excessively) small writeouts
-                * when the bdi limits are ramping up.
-                */
-               if (nr_reclaimable + nr_writeback <
-                               (background_thresh + dirty_thresh) / 2)
-                       break;
-
                if (!bdi->dirty_exceeded)
                        bdi->dirty_exceeded = 1;
 
@@ -634,7 +635,7 @@ void throttle_vm_writeout(gfp_t gfp_mask)
        unsigned long dirty_thresh;
 
         for ( ; ; ) {
-               get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL);
+               global_dirty_limits(&background_thresh, &dirty_thresh);
 
                 /*
                  * Boost the allowable dirty threshold a bit for page