mm: sched: numa: Implement constant, per task Working Set Sampling (WSS) rate
authorPeter Zijlstra <a.p.zijlstra@chello.nl>
Thu, 25 Oct 2012 12:16:45 +0000 (14:16 +0200)
committerMel Gorman <mgorman@suse.de>
Tue, 11 Dec 2012 14:42:46 +0000 (14:42 +0000)
Previously, to probe the working set of a task, we'd use
a very simple and crude method: mark all of its address
space PROT_NONE.

That method has various (obvious) disadvantages:

 - it samples the working set at dissimilar rates,
   giving some tasks a sampling quality advantage
   over others.

 - creates performance problems for tasks with very
   large working sets

 - over-samples processes with large address spaces but
   which only very rarely execute

Improve that method by keeping a rotating offset into the
address space that marks the current position of the scan,
and advance it by a constant rate (in a CPU cycles execution
proportional manner). If the offset reaches the last mapped
address of the mm then it then it starts over at the first
address.

The per-task nature of the working set sampling functionality in this tree
allows such constant rate, per task, execution-weight proportional sampling
of the working set, with an adaptive sampling interval/frequency that
goes from once per 100ms up to just once per 8 seconds.  The current
sampling volume is 256 MB per interval.

As tasks mature and converge their working set, so does the
sampling rate slow down to just a trickle, 256 MB per 8
seconds of CPU time executed.

This, beyond being adaptive, also rate-limits rarely
executing systems and does not over-sample on overloaded
systems.

[ In AutoNUMA speak, this patch deals with the effective sampling
  rate of the 'hinting page fault'. AutoNUMA's scanning is
  currently rate-limited, but it is also fundamentally
  single-threaded, executing in the knuma_scand kernel thread,
  so the limit in AutoNUMA is global and does not scale up with
  the number of CPUs, nor does it scan tasks in an execution
  proportional manner.

  So the idea of rate-limiting the scanning was first implemented
  in the AutoNUMA tree via a global rate limit. This patch goes
  beyond that by implementing an execution rate proportional
  working set sampling rate that is not implemented via a single
  global scanning daemon. ]

[ Dan Carpenter pointed out a possible NULL pointer dereference in the
  first version of this patch. ]

Based-on-idea-by: Andrea Arcangeli <aarcange@redhat.com>
Bug-Found-By: Dan Carpenter <dan.carpenter@oracle.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Rik van Riel <riel@redhat.com>
[ Wrote changelog and fixed bug. ]
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Mel Gorman <mgorman@suse.de>
Reviewed-by: Rik van Riel <riel@redhat.com>
include/linux/mm_types.h
include/linux/sched.h
kernel/sched/fair.c
kernel/sysctl.c

index ed8638c29b3ea7d94946edb1d7af5f3c78e25f0f..d1e246c5e50c939d1ba4d34c5b6a4f7e9a3b9f7c 100644 (file)
@@ -406,6 +406,9 @@ struct mm_struct {
         */
        unsigned long numa_next_scan;
 
+       /* Restart point for scanning and setting pte_numa */
+       unsigned long numa_scan_offset;
+
        /* numa_scan_seq prevents two threads setting pte_numa */
        int numa_scan_seq;
 #endif
index 844af5b12cb2c15a1ec5676f7f9173d63f4e93ae..37841958d234c42af49ce9f709f7382d5e33c3f2 100644 (file)
@@ -2008,6 +2008,7 @@ extern enum sched_tunable_scaling sysctl_sched_tunable_scaling;
 
 extern unsigned int sysctl_numa_balancing_scan_period_min;
 extern unsigned int sysctl_numa_balancing_scan_period_max;
+extern unsigned int sysctl_numa_balancing_scan_size;
 extern unsigned int sysctl_numa_balancing_settle_count;
 
 #ifdef CONFIG_SCHED_DEBUG
index 6831abb5dbefd74a5d75fa125a3fd89a92fbf5c6..0a349dd1fa60fd97a4efbf49ccbb7ec9ef0249a8 100644 (file)
@@ -780,10 +780,13 @@ update_stats_curr_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
 
 #ifdef CONFIG_NUMA_BALANCING
 /*
- * numa task sample period in ms: 5s
+ * numa task sample period in ms
  */
-unsigned int sysctl_numa_balancing_scan_period_min = 5000;
-unsigned int sysctl_numa_balancing_scan_period_max = 5000*16;
+unsigned int sysctl_numa_balancing_scan_period_min = 100;
+unsigned int sysctl_numa_balancing_scan_period_max = 100*16;
+
+/* Portion of address space to scan in MB */
+unsigned int sysctl_numa_balancing_scan_size = 256;
 
 static void task_numa_placement(struct task_struct *p)
 {
@@ -808,6 +811,12 @@ void task_numa_fault(int node, int pages)
        task_numa_placement(p);
 }
 
+static void reset_ptenuma_scan(struct task_struct *p)
+{
+       ACCESS_ONCE(p->mm->numa_scan_seq)++;
+       p->mm->numa_scan_offset = 0;
+}
+
 /*
  * The expensive part of numa migration is done from task_work context.
  * Triggered from task_tick_numa().
@@ -817,6 +826,9 @@ void task_numa_work(struct callback_head *work)
        unsigned long migrate, next_scan, now = jiffies;
        struct task_struct *p = current;
        struct mm_struct *mm = p->mm;
+       struct vm_area_struct *vma;
+       unsigned long offset, end;
+       long length;
 
        WARN_ON_ONCE(p != container_of(work, struct task_struct, numa_work));
 
@@ -846,18 +858,45 @@ void task_numa_work(struct callback_head *work)
        if (cmpxchg(&mm->numa_next_scan, migrate, next_scan) != migrate)
                return;
 
-       ACCESS_ONCE(mm->numa_scan_seq)++;
-       {
-               struct vm_area_struct *vma;
+       offset = mm->numa_scan_offset;
+       length = sysctl_numa_balancing_scan_size;
+       length <<= 20;
 
-               down_read(&mm->mmap_sem);
-               for (vma = mm->mmap; vma; vma = vma->vm_next) {
-                       if (!vma_migratable(vma))
-                               continue;
-                       change_prot_numa(vma, vma->vm_start, vma->vm_end);
-               }
-               up_read(&mm->mmap_sem);
+       down_read(&mm->mmap_sem);
+       vma = find_vma(mm, offset);
+       if (!vma) {
+               reset_ptenuma_scan(p);
+               offset = 0;
+               vma = mm->mmap;
+       }
+       for (; vma && length > 0; vma = vma->vm_next) {
+               if (!vma_migratable(vma))
+                       continue;
+
+               /* Skip small VMAs. They are not likely to be of relevance */
+               if (((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) < HPAGE_PMD_NR)
+                       continue;
+
+               offset = max(offset, vma->vm_start);
+               end = min(ALIGN(offset + length, HPAGE_SIZE), vma->vm_end);
+               length -= end - offset;
+
+               change_prot_numa(vma, offset, end);
+
+               offset = end;
        }
+
+       /*
+        * It is possible to reach the end of the VMA list but the last few VMAs are
+        * not guaranteed to the vma_migratable. If they are not, we would find the
+        * !migratable VMA on the next scan but not reset the scanner to the start
+        * so check it now.
+        */
+       if (vma)
+               mm->numa_scan_offset = offset;
+       else
+               reset_ptenuma_scan(p);
+       up_read(&mm->mmap_sem);
 }
 
 /*
index 025e1ae50ef1942247485939eab28ef805c94ba4..7d3a2e0475e51f29dac7a9674eb08b5b0e1b75db 100644 (file)
@@ -366,6 +366,13 @@ static struct ctl_table kern_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec,
        },
+       {
+               .procname       = "numa_balancing_scan_size_mb",
+               .data           = &sysctl_numa_balancing_scan_size,
+               .maxlen         = sizeof(unsigned int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec,
+       },
 #endif /* CONFIG_NUMA_BALANCING */
 #endif /* CONFIG_SCHED_DEBUG */
        {