mm: sched: numa: Control enabling and disabling of NUMA balancing
authorMel Gorman <mgorman@suse.de>
Thu, 22 Nov 2012 11:16:36 +0000 (11:16 +0000)
committerMel Gorman <mgorman@suse.de>
Tue, 11 Dec 2012 14:42:55 +0000 (14:42 +0000)
This patch adds Kconfig options and kernel parameters to allow the
enabling and disabling of automatic NUMA balancing. The existance
of such a switch was and is very important when debugging problems
related to transparent hugepages and we should have the same for
automatic NUMA placement.

Signed-off-by: Mel Gorman <mgorman@suse.de>
Documentation/kernel-parameters.txt
include/linux/sched.h
init/Kconfig
kernel/sched/core.c
kernel/sched/fair.c
kernel/sched/features.h
mm/mempolicy.c

index 9776f068306b7f7ec6425ec4c02b17f5dac4788a..2e8d2625b814c52d069caaf0dad730a75d4e7dcf 100644 (file)
@@ -1996,6 +1996,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
 
        nr_uarts=       [SERIAL] maximum number of UARTs to be registered.
 
+       numa_balancing= [KNL,X86] Enable or disable automatic NUMA balancing.
+                       Allowed values are enable and disable
+
        numa_zonelist_order= [KNL, BOOT] Select zonelist order for NUMA.
                        one of ['zone', 'node', 'default'] can be specified
                        This can be set from sysctl after boot.
index 0f4ff2bd03f6380011740e688a931fc8eb43d9a6..b1e619f9ff1aafa8f4b7131954ca2c0dc58723d0 100644 (file)
@@ -1563,10 +1563,14 @@ struct task_struct {
 
 #ifdef CONFIG_NUMA_BALANCING
 extern void task_numa_fault(int node, int pages, bool migrated);
+extern void set_numabalancing_state(bool enabled);
 #else
 static inline void task_numa_fault(int node, int pages, bool migrated)
 {
 }
+static inline void set_numabalancing_state(bool enabled)
+{
+}
 #endif
 
 /*
index 9f00f004796afd907602ca82d50a1e35fd111f47..18e2a5920a34288a18b150adc46aaa952704aac9 100644 (file)
@@ -720,6 +720,14 @@ config ARCH_USES_NUMA_PROT_NONE
        depends on ARCH_WANTS_PROT_NUMA_PROT_NONE
        depends on NUMA_BALANCING
 
+config NUMA_BALANCING_DEFAULT_ENABLED
+       bool "Automatically enable NUMA aware memory/task placement"
+       default y
+       depends on NUMA_BALANCING
+       help
+         If set, autonumic NUMA balancing will be enabled if running on a NUMA
+         machine.
+
 config NUMA_BALANCING
        bool "Memory placement aware NUMA scheduler"
        default y
index 9d255bc0e278943fd85e6f9569d45a3ac6ab542d..7a45015274ab1674df3ec212d007865630978012 100644 (file)
@@ -192,23 +192,10 @@ static void sched_feat_disable(int i) { };
 static void sched_feat_enable(int i) { };
 #endif /* HAVE_JUMP_LABEL */
 
-static ssize_t
-sched_feat_write(struct file *filp, const char __user *ubuf,
-               size_t cnt, loff_t *ppos)
+static int sched_feat_set(char *cmp)
 {
-       char buf[64];
-       char *cmp;
-       int neg = 0;
        int i;
-
-       if (cnt > 63)
-               cnt = 63;
-
-       if (copy_from_user(&buf, ubuf, cnt))
-               return -EFAULT;
-
-       buf[cnt] = 0;
-       cmp = strstrip(buf);
+       int neg = 0;
 
        if (strncmp(cmp, "NO_", 3) == 0) {
                neg = 1;
@@ -228,6 +215,27 @@ sched_feat_write(struct file *filp, const char __user *ubuf,
                }
        }
 
+       return i;
+}
+
+static ssize_t
+sched_feat_write(struct file *filp, const char __user *ubuf,
+               size_t cnt, loff_t *ppos)
+{
+       char buf[64];
+       char *cmp;
+       int i;
+
+       if (cnt > 63)
+               cnt = 63;
+
+       if (copy_from_user(&buf, ubuf, cnt))
+               return -EFAULT;
+
+       buf[cnt] = 0;
+       cmp = strstrip(buf);
+
+       i = sched_feat_set(cmp);
        if (i == __SCHED_FEAT_NR)
                return -EINVAL;
 
@@ -1549,6 +1557,16 @@ static void __sched_fork(struct task_struct *p)
 #endif /* CONFIG_NUMA_BALANCING */
 }
 
+#ifdef CONFIG_NUMA_BALANCING
+void set_numabalancing_state(bool enabled)
+{
+       if (enabled)
+               sched_feat_set("NUMA");
+       else
+               sched_feat_set("NO_NUMA");
+}
+#endif /* CONFIG_NUMA_BALANCING */
+
 /*
  * fork()/clone()-time setup:
  */
index 4b577863933f04c133db5406d049ad2b3b99f471..7a02a2082e95154d5094ded219b20f76ac98a525 100644 (file)
@@ -811,6 +811,9 @@ void task_numa_fault(int node, int pages, bool migrated)
 {
        struct task_struct *p = current;
 
+       if (!sched_feat_numa(NUMA))
+               return;
+
        /* FIXME: Allocate task-specific structure for placement policy here */
 
        /*
index 5fb7aefbec80284f25f716ff52184846069d5c0a..d2373a3e32528ae6869248188108dc2283cd249e 100644 (file)
@@ -63,8 +63,10 @@ SCHED_FEAT(RT_RUNTIME_SHARE, true)
 SCHED_FEAT(LB_MIN, false)
 
 /*
- * Apply the automatic NUMA scheduling policy
+ * Apply the automatic NUMA scheduling policy. Enabled automatically
+ * at runtime if running on a NUMA machine. Can be controlled via
+ * numa_balancing=
  */
 #ifdef CONFIG_NUMA_BALANCING
-SCHED_FEAT(NUMA,       true)
+SCHED_FEAT(NUMA,       false)
 #endif
index fd20e28fd2adcfdd540ef7fcdfefb319d3c0d72b..046308e9b9999eac79751c00de5aa67dea4a52b4 100644 (file)
@@ -2521,6 +2521,50 @@ void mpol_free_shared_policy(struct shared_policy *p)
        mutex_unlock(&p->mutex);
 }
 
+#ifdef CONFIG_NUMA_BALANCING
+static bool __initdata numabalancing_override;
+
+static void __init check_numabalancing_enable(void)
+{
+       bool numabalancing_default = false;
+
+       if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
+               numabalancing_default = true;
+
+       if (nr_node_ids > 1 && !numabalancing_override) {
+               printk(KERN_INFO "Enabling automatic NUMA balancing. "
+                       "Configure with numa_balancing= or sysctl");
+               set_numabalancing_state(numabalancing_default);
+       }
+}
+
+static int __init setup_numabalancing(char *str)
+{
+       int ret = 0;
+       if (!str)
+               goto out;
+       numabalancing_override = true;
+
+       if (!strcmp(str, "enable")) {
+               set_numabalancing_state(true);
+               ret = 1;
+       } else if (!strcmp(str, "disable")) {
+               set_numabalancing_state(false);
+               ret = 1;
+       }
+out:
+       if (!ret)
+               printk(KERN_WARNING "Unable to parse numa_balancing=\n");
+
+       return ret;
+}
+__setup("numa_balancing=", setup_numabalancing);
+#else
+static inline void __init check_numabalancing_enable(void)
+{
+}
+#endif /* CONFIG_NUMA_BALANCING */
+
 /* assumes fs == KERNEL_DS */
 void __init numa_policy_init(void)
 {
@@ -2571,6 +2615,8 @@ void __init numa_policy_init(void)
 
        if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
                printk("numa_policy_init: interleaving failed\n");
+
+       check_numabalancing_enable();
 }
 
 /* Reset policy of current process to default */