[PATCH] Workaround for gcc 2.96 (undefined references)
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / cpu.c
CommitLineData
1da177e4
LT
1/* CPU control.
2 * (C) 2001, 2002, 2003, 2004 Rusty Russell
3 *
4 * This code is licenced under the GPL.
5 */
6#include <linux/proc_fs.h>
7#include <linux/smp.h>
8#include <linux/init.h>
9#include <linux/notifier.h>
10#include <linux/sched.h>
11#include <linux/unistd.h>
12#include <linux/cpu.h>
13#include <linux/module.h>
14#include <linux/kthread.h>
15#include <linux/stop_machine.h>
16#include <asm/semaphore.h>
17
18/* This protects CPUs going up and down... */
19DECLARE_MUTEX(cpucontrol);
c32b6b8e 20EXPORT_SYMBOL_GPL(cpucontrol);
1da177e4
LT
21
22static struct notifier_block *cpu_chain;
23
90d45d17
AR
24/*
25 * Used to check by callers if they need to acquire the cpucontrol
26 * or not to protect a cpu from being removed. Its sometimes required to
27 * call these functions both for normal operations, and in response to
28 * a cpu being added/removed. If the context of the call is in the same
29 * thread context as a CPU hotplug thread, we dont need to take the lock
30 * since its already protected
31 * check drivers/cpufreq/cpufreq.c for its usage - Ashok Raj
32 */
33
34int current_in_cpu_hotplug(void)
35{
36 return (current->flags & PF_HOTPLUG_CPU);
37}
38
39EXPORT_SYMBOL_GPL(current_in_cpu_hotplug);
40
41
1da177e4
LT
42/* Need to know about CPUs going up/down? */
43int register_cpu_notifier(struct notifier_block *nb)
44{
45 int ret;
46
47 if ((ret = down_interruptible(&cpucontrol)) != 0)
48 return ret;
49 ret = notifier_chain_register(&cpu_chain, nb);
50 up(&cpucontrol);
51 return ret;
52}
53EXPORT_SYMBOL(register_cpu_notifier);
54
55void unregister_cpu_notifier(struct notifier_block *nb)
56{
57 down(&cpucontrol);
58 notifier_chain_unregister(&cpu_chain, nb);
59 up(&cpucontrol);
60}
61EXPORT_SYMBOL(unregister_cpu_notifier);
62
63#ifdef CONFIG_HOTPLUG_CPU
64static inline void check_for_tasks(int cpu)
65{
66 struct task_struct *p;
67
68 write_lock_irq(&tasklist_lock);
69 for_each_process(p) {
70 if (task_cpu(p) == cpu &&
71 (!cputime_eq(p->utime, cputime_zero) ||
72 !cputime_eq(p->stime, cputime_zero)))
73 printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d\
74 (state = %ld, flags = %lx) \n",
75 p->comm, p->pid, cpu, p->state, p->flags);
76 }
77 write_unlock_irq(&tasklist_lock);
78}
79
80/* Take this CPU down. */
81static int take_cpu_down(void *unused)
82{
83 int err;
84
1da177e4
LT
85 /* Ensure this CPU doesn't handle any more interrupts. */
86 err = __cpu_disable();
87 if (err < 0)
f3705136 88 return err;
1da177e4 89
f3705136
ZM
90 /* Force idle task to run as soon as we yield: it should
91 immediately notice cpu is offline and die quickly. */
92 sched_idle_next();
93 return 0;
1da177e4
LT
94}
95
96int cpu_down(unsigned int cpu)
97{
98 int err;
99 struct task_struct *p;
100 cpumask_t old_allowed, tmp;
101
102 if ((err = lock_cpu_hotplug_interruptible()) != 0)
103 return err;
104
105 if (num_online_cpus() == 1) {
106 err = -EBUSY;
107 goto out;
108 }
109
110 if (!cpu_online(cpu)) {
111 err = -EINVAL;
112 goto out;
113 }
114
90d45d17
AR
115 /*
116 * Leave a trace in current->flags indicating we are already in
117 * process of performing CPU hotplug. Callers can check if cpucontrol
118 * is already acquired by current thread, and if so not cause
119 * a dead lock by not acquiring the lock
120 */
121 current->flags |= PF_HOTPLUG_CPU;
1da177e4
LT
122 err = notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE,
123 (void *)(long)cpu);
124 if (err == NOTIFY_BAD) {
125 printk("%s: attempt to take down CPU %u failed\n",
126 __FUNCTION__, cpu);
127 err = -EINVAL;
128 goto out;
129 }
130
131 /* Ensure that we are not runnable on dying cpu */
132 old_allowed = current->cpus_allowed;
133 tmp = CPU_MASK_ALL;
134 cpu_clear(cpu, tmp);
135 set_cpus_allowed(current, tmp);
136
137 p = __stop_machine_run(take_cpu_down, NULL, cpu);
138 if (IS_ERR(p)) {
139 /* CPU didn't die: tell everyone. Can't complain. */
140 if (notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED,
141 (void *)(long)cpu) == NOTIFY_BAD)
142 BUG();
143
144 err = PTR_ERR(p);
145 goto out_allowed;
146 }
147
148 if (cpu_online(cpu))
149 goto out_thread;
150
151 /* Wait for it to sleep (leaving idle task). */
152 while (!idle_cpu(cpu))
153 yield();
154
155 /* This actually kills the CPU. */
156 __cpu_die(cpu);
157
158 /* Move it here so it can run. */
159 kthread_bind(p, get_cpu());
160 put_cpu();
161
162 /* CPU is completely dead: tell everyone. Too late to complain. */
163 if (notifier_call_chain(&cpu_chain, CPU_DEAD, (void *)(long)cpu)
164 == NOTIFY_BAD)
165 BUG();
166
167 check_for_tasks(cpu);
168
169out_thread:
170 err = kthread_stop(p);
171out_allowed:
172 set_cpus_allowed(current, old_allowed);
173out:
90d45d17 174 current->flags &= ~PF_HOTPLUG_CPU;
1da177e4
LT
175 unlock_cpu_hotplug();
176 return err;
177}
178#endif /*CONFIG_HOTPLUG_CPU*/
179
180int __devinit cpu_up(unsigned int cpu)
181{
182 int ret;
183 void *hcpu = (void *)(long)cpu;
184
185 if ((ret = down_interruptible(&cpucontrol)) != 0)
186 return ret;
187
188 if (cpu_online(cpu) || !cpu_present(cpu)) {
189 ret = -EINVAL;
190 goto out;
191 }
90d45d17
AR
192
193 /*
194 * Leave a trace in current->flags indicating we are already in
195 * process of performing CPU hotplug.
196 */
197 current->flags |= PF_HOTPLUG_CPU;
1da177e4
LT
198 ret = notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu);
199 if (ret == NOTIFY_BAD) {
200 printk("%s: attempt to bring up CPU %u failed\n",
201 __FUNCTION__, cpu);
202 ret = -EINVAL;
203 goto out_notify;
204 }
205
206 /* Arch-specific enabling code. */
207 ret = __cpu_up(cpu);
208 if (ret != 0)
209 goto out_notify;
210 if (!cpu_online(cpu))
211 BUG();
212
213 /* Now call notifier in preparation. */
214 notifier_call_chain(&cpu_chain, CPU_ONLINE, hcpu);
215
216out_notify:
217 if (ret != 0)
218 notifier_call_chain(&cpu_chain, CPU_UP_CANCELED, hcpu);
219out:
90d45d17 220 current->flags &= ~PF_HOTPLUG_CPU;
1da177e4
LT
221 up(&cpucontrol);
222 return ret;
223}