From fc75cdfa5b43ac4d3232b490800cd35063adafd3 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Sun, 25 Jun 2006 05:49:10 -0700 Subject: [PATCH] [PATCH] cpu hotplug: fix CPU_UP_CANCEL handling If a cpu hotplug callback fails on CPU_UP_PREPARE, all callbacks will be called with CPU_UP_CANCELED. A few of these callbacks assume that on CPU_UP_PREPARE a pointer to task has been stored in a percpu array. This assumption is not true if CPU_UP_PREPARE fails and the following calls to kthread_bind() in CPU_UP_CANCELED will cause an addressing exception because of passing a NULL pointer. Signed-off-by: Heiko Carstens Cc: Ashok Raj Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sched.c | 2 ++ kernel/softirq.c | 2 ++ kernel/softlockup.c | 2 ++ kernel/workqueue.c | 2 ++ 4 files changed, 8 insertions(+) diff --git a/kernel/sched.c b/kernel/sched.c index f8d540b324ca..f06d059edef5 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -4756,6 +4756,8 @@ static int migration_call(struct notifier_block *nfb, unsigned long action, break; #ifdef CONFIG_HOTPLUG_CPU case CPU_UP_CANCELED: + if (!cpu_rq(cpu)->migration_thread) + break; /* Unbind it from offline cpu so it can run. Fall thru. */ kthread_bind(cpu_rq(cpu)->migration_thread, any_online_cpu(cpu_online_map)); diff --git a/kernel/softirq.c b/kernel/softirq.c index 336f92d64e2e..9e2f1c6e73d7 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -470,6 +470,8 @@ static int cpu_callback(struct notifier_block *nfb, break; #ifdef CONFIG_HOTPLUG_CPU case CPU_UP_CANCELED: + if (!per_cpu(ksoftirqd, hotcpu)) + break; /* Unbind so it can run. Fall thru. */ kthread_bind(per_cpu(ksoftirqd, hotcpu), any_online_cpu(cpu_online_map)); diff --git a/kernel/softlockup.c b/kernel/softlockup.c index 2c1be1163edc..b5c3b94e01ce 100644 --- a/kernel/softlockup.c +++ b/kernel/softlockup.c @@ -127,6 +127,8 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) break; #ifdef CONFIG_HOTPLUG_CPU case CPU_UP_CANCELED: + if (!per_cpu(watchdog_task, hotcpu)) + break; /* Unbind so it can run. Fall thru. */ kthread_bind(per_cpu(watchdog_task, hotcpu), any_online_cpu(cpu_online_map)); diff --git a/kernel/workqueue.c b/kernel/workqueue.c index f869aff6bc0c..565cf7a1febd 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -590,6 +590,8 @@ static int workqueue_cpu_callback(struct notifier_block *nfb, case CPU_UP_CANCELED: list_for_each_entry(wq, &workqueues, list) { + if (!per_cpu_ptr(wq->cpu_wq, hotcpu)->thread) + continue; /* Unbind so it can run. */ kthread_bind(per_cpu_ptr(wq->cpu_wq, hotcpu)->thread, any_online_cpu(cpu_online_map)); -- 2.20.1