ACPI: restore CONFIG_ACPI_SLEEP
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / s390 / kernel / smp.c
CommitLineData
1da177e4
LT
1/*
2 * arch/s390/kernel/smp.c
3 *
39ce010d 4 * Copyright IBM Corp. 1999,2007
1da177e4 5 * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
39ce010d
HC
6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
7 * Heiko Carstens (heiko.carstens@de.ibm.com)
1da177e4 8 *
39ce010d 9 * based on other smp stuff by
1da177e4
LT
10 * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net>
11 * (c) 1998 Ingo Molnar
12 *
13 * We work with logical cpu numbering everywhere we can. The only
14 * functions using the real cpu address (got from STAP) are the sigp
15 * functions. For all other functions we use the identity mapping.
16 * That means that cpu_number_map[i] == i for every cpu. cpu_number_map is
17 * used e.g. to find the idle task belonging to a logical cpu. Every array
18 * in the kernel is sorted by the logical cpu number and not by the physical
19 * one which is causing all the confusion with __cpu_logical_map and
20 * cpu_number_map in other architectures.
21 */
22
23#include <linux/module.h>
24#include <linux/init.h>
1da177e4
LT
25#include <linux/mm.h>
26#include <linux/spinlock.h>
27#include <linux/kernel_stat.h>
1da177e4
LT
28#include <linux/delay.h>
29#include <linux/cache.h>
30#include <linux/interrupt.h>
31#include <linux/cpu.h>
2b67fc46 32#include <linux/timex.h>
411ed322 33#include <linux/bootmem.h>
46b05d26 34#include <asm/ipl.h>
2b67fc46 35#include <asm/setup.h>
1da177e4
LT
36#include <asm/sigp.h>
37#include <asm/pgalloc.h>
38#include <asm/irq.h>
39#include <asm/s390_ext.h>
40#include <asm/cpcmd.h>
41#include <asm/tlbflush.h>
2b67fc46 42#include <asm/timer.h>
411ed322 43#include <asm/lowcore.h>
1da177e4 44
1da177e4
LT
45/*
46 * An array with a pointer the lowcore of every CPU.
47 */
1da177e4 48struct _lowcore *lowcore_ptr[NR_CPUS];
39ce010d 49EXPORT_SYMBOL(lowcore_ptr);
1da177e4 50
255acee7 51cpumask_t cpu_online_map = CPU_MASK_NONE;
39ce010d
HC
52EXPORT_SYMBOL(cpu_online_map);
53
255acee7 54cpumask_t cpu_possible_map = CPU_MASK_NONE;
39ce010d 55EXPORT_SYMBOL(cpu_possible_map);
1da177e4
LT
56
57static struct task_struct *current_set[NR_CPUS];
58
1da177e4 59static void smp_ext_bitcall(int, ec_bit_sig);
1da177e4
LT
60
61/*
63db6e8d
JG
62 * Structure and data for __smp_call_function_map(). This is designed to
63 * minimise static memory requirements. It also looks cleaner.
1da177e4
LT
64 */
65static DEFINE_SPINLOCK(call_lock);
66
67struct call_data_struct {
68 void (*func) (void *info);
69 void *info;
63db6e8d
JG
70 cpumask_t started;
71 cpumask_t finished;
1da177e4
LT
72 int wait;
73};
74
39ce010d 75static struct call_data_struct *call_data;
1da177e4
LT
76
77/*
78 * 'Call function' interrupt callback
79 */
80static void do_call_function(void)
81{
82 void (*func) (void *info) = call_data->func;
83 void *info = call_data->info;
84 int wait = call_data->wait;
85
63db6e8d 86 cpu_set(smp_processor_id(), call_data->started);
1da177e4
LT
87 (*func)(info);
88 if (wait)
63db6e8d 89 cpu_set(smp_processor_id(), call_data->finished);;
1da177e4
LT
90}
91
63db6e8d
JG
92static void __smp_call_function_map(void (*func) (void *info), void *info,
93 int nonatomic, int wait, cpumask_t map)
1da177e4
LT
94{
95 struct call_data_struct data;
63db6e8d 96 int cpu, local = 0;
1da177e4 97
63db6e8d 98 /*
25864162 99 * Can deadlock when interrupts are disabled or if in wrong context.
63db6e8d 100 */
25864162 101 WARN_ON(irqs_disabled() || in_irq());
1da177e4 102
63db6e8d
JG
103 /*
104 * Check for local function call. We have to have the same call order
105 * as in on_each_cpu() because of machine_restart_smp().
106 */
107 if (cpu_isset(smp_processor_id(), map)) {
108 local = 1;
109 cpu_clear(smp_processor_id(), map);
110 }
111
112 cpus_and(map, map, cpu_online_map);
113 if (cpus_empty(map))
114 goto out;
1da177e4
LT
115
116 data.func = func;
117 data.info = info;
63db6e8d 118 data.started = CPU_MASK_NONE;
1da177e4
LT
119 data.wait = wait;
120 if (wait)
63db6e8d 121 data.finished = CPU_MASK_NONE;
1da177e4 122
8da1aecd 123 spin_lock(&call_lock);
1da177e4 124 call_data = &data;
63db6e8d
JG
125
126 for_each_cpu_mask(cpu, map)
127 smp_ext_bitcall(cpu, ec_call_function);
1da177e4
LT
128
129 /* Wait for response */
63db6e8d 130 while (!cpus_equal(map, data.started))
1da177e4 131 cpu_relax();
1da177e4 132 if (wait)
63db6e8d 133 while (!cpus_equal(map, data.finished))
1da177e4 134 cpu_relax();
8da1aecd 135 spin_unlock(&call_lock);
63db6e8d 136out:
8da1aecd
HC
137 if (local) {
138 local_irq_disable();
63db6e8d 139 func(info);
8da1aecd
HC
140 local_irq_enable();
141 }
1da177e4
LT
142}
143
144/*
63db6e8d
JG
145 * smp_call_function:
146 * @func: the function to run; this must be fast and non-blocking
147 * @info: an arbitrary pointer to pass to the function
148 * @nonatomic: unused
149 * @wait: if true, wait (atomically) until function has completed on other CPUs
1da177e4 150 *
63db6e8d 151 * Run a function on all other CPUs.
1da177e4 152 *
39ce010d
HC
153 * You must not call this function with disabled interrupts, from a
154 * hardware interrupt handler or from a bottom half.
1da177e4 155 */
63db6e8d
JG
156int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
157 int wait)
1da177e4 158{
63db6e8d 159 cpumask_t map;
1da177e4 160
25864162 161 preempt_disable();
63db6e8d
JG
162 map = cpu_online_map;
163 cpu_clear(smp_processor_id(), map);
164 __smp_call_function_map(func, info, nonatomic, wait, map);
25864162 165 preempt_enable();
63db6e8d
JG
166 return 0;
167}
168EXPORT_SYMBOL(smp_call_function);
1da177e4 169
63db6e8d 170/*
3bb447fc
HC
171 * smp_call_function_single:
172 * @cpu: the CPU where func should run
63db6e8d
JG
173 * @func: the function to run; this must be fast and non-blocking
174 * @info: an arbitrary pointer to pass to the function
175 * @nonatomic: unused
176 * @wait: if true, wait (atomically) until function has completed on other CPUs
63db6e8d
JG
177 *
178 * Run a function on one processor.
179 *
39ce010d
HC
180 * You must not call this function with disabled interrupts, from a
181 * hardware interrupt handler or from a bottom half.
63db6e8d 182 */
3bb447fc
HC
183int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
184 int nonatomic, int wait)
63db6e8d 185{
25864162 186 preempt_disable();
3bb447fc
HC
187 __smp_call_function_map(func, info, nonatomic, wait,
188 cpumask_of_cpu(cpu));
25864162 189 preempt_enable();
1da177e4
LT
190 return 0;
191}
3bb447fc 192EXPORT_SYMBOL(smp_call_function_single);
1da177e4 193
4d284cac 194static void do_send_stop(void)
1da177e4 195{
39ce010d 196 int cpu, rc;
1da177e4 197
39ce010d 198 /* stop all processors */
1da177e4
LT
199 for_each_online_cpu(cpu) {
200 if (cpu == smp_processor_id())
201 continue;
202 do {
203 rc = signal_processor(cpu, sigp_stop);
204 } while (rc == sigp_busy);
205 }
206}
207
4d284cac 208static void do_store_status(void)
1da177e4 209{
39ce010d 210 int cpu, rc;
1da177e4 211
39ce010d 212 /* store status of all processors in their lowcores (real 0) */
1da177e4
LT
213 for_each_online_cpu(cpu) {
214 if (cpu == smp_processor_id())
215 continue;
216 do {
217 rc = signal_processor_p(
218 (__u32)(unsigned long) lowcore_ptr[cpu], cpu,
219 sigp_store_status_at_address);
39ce010d
HC
220 } while (rc == sigp_busy);
221 }
1da177e4
LT
222}
223
4d284cac 224static void do_wait_for_stop(void)
c6b5b847
HC
225{
226 int cpu;
227
228 /* Wait for all other cpus to enter stopped state */
229 for_each_online_cpu(cpu) {
230 if (cpu == smp_processor_id())
231 continue;
39ce010d 232 while (!smp_cpu_not_running(cpu))
c6b5b847
HC
233 cpu_relax();
234 }
235}
236
1da177e4
LT
237/*
238 * this function sends a 'stop' sigp to all other CPUs in the system.
239 * it goes straight through.
240 */
241void smp_send_stop(void)
242{
c6b5b847 243 /* Disable all interrupts/machine checks */
c1821c2e 244 __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK);
c6b5b847 245
39ce010d 246 /* write magic number to zero page (absolute 0) */
1da177e4
LT
247 lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC;
248
249 /* stop other processors. */
250 do_send_stop();
251
c6b5b847
HC
252 /* wait until other processors are stopped */
253 do_wait_for_stop();
254
1da177e4
LT
255 /* store status of other processors. */
256 do_store_status();
257}
258
259/*
260 * Reboot, halt and power_off routines for SMP.
261 */
39ce010d 262void machine_restart_smp(char *__unused)
1da177e4 263{
c6b5b847
HC
264 smp_send_stop();
265 do_reipl();
1da177e4
LT
266}
267
268void machine_halt_smp(void)
269{
c6b5b847
HC
270 smp_send_stop();
271 if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0)
272 __cpcmd(vmhalt_cmd, NULL, 0, NULL);
273 signal_processor(smp_processor_id(), sigp_stop_and_store_status);
274 for (;;);
1da177e4
LT
275}
276
277void machine_power_off_smp(void)
278{
c6b5b847
HC
279 smp_send_stop();
280 if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0)
281 __cpcmd(vmpoff_cmd, NULL, 0, NULL);
282 signal_processor(smp_processor_id(), sigp_stop_and_store_status);
283 for (;;);
1da177e4
LT
284}
285
286/*
287 * This is the main routine where commands issued by other
288 * cpus are handled.
289 */
290
2b67fc46 291static void do_ext_call_interrupt(__u16 code)
1da177e4 292{
39ce010d 293 unsigned long bits;
1da177e4 294
39ce010d
HC
295 /*
296 * handle bit signal external calls
297 *
298 * For the ec_schedule signal we have to do nothing. All the work
299 * is done automatically when we return from the interrupt.
300 */
1da177e4
LT
301 bits = xchg(&S390_lowcore.ext_call_fast, 0);
302
39ce010d 303 if (test_bit(ec_call_function, &bits))
1da177e4
LT
304 do_call_function();
305}
306
307/*
308 * Send an external call sigp to another cpu and return without waiting
309 * for its completion.
310 */
311static void smp_ext_bitcall(int cpu, ec_bit_sig sig)
312{
39ce010d
HC
313 /*
314 * Set signaling bit in lowcore of target cpu and kick it
315 */
1da177e4 316 set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast);
39ce010d 317 while (signal_processor(cpu, sigp_emergency_signal) == sigp_busy)
1da177e4
LT
318 udelay(10);
319}
320
347a8dc3 321#ifndef CONFIG_64BIT
1da177e4
LT
322/*
323 * this function sends a 'purge tlb' signal to another CPU.
324 */
325void smp_ptlb_callback(void *info)
326{
327 local_flush_tlb();
328}
329
330void smp_ptlb_all(void)
331{
39ce010d 332 on_each_cpu(smp_ptlb_callback, NULL, 0, 1);
1da177e4
LT
333}
334EXPORT_SYMBOL(smp_ptlb_all);
347a8dc3 335#endif /* ! CONFIG_64BIT */
1da177e4
LT
336
337/*
338 * this function sends a 'reschedule' IPI to another CPU.
339 * it goes straight through and wastes no time serializing
340 * anything. Worst case is that we lose a reschedule ...
341 */
342void smp_send_reschedule(int cpu)
343{
39ce010d 344 smp_ext_bitcall(cpu, ec_schedule);
1da177e4
LT
345}
346
347/*
348 * parameter area for the set/clear control bit callbacks
349 */
94c12cc7 350struct ec_creg_mask_parms {
1da177e4
LT
351 unsigned long orvals[16];
352 unsigned long andvals[16];
94c12cc7 353};
1da177e4
LT
354
355/*
356 * callback for setting/clearing control bits
357 */
39ce010d
HC
358static void smp_ctl_bit_callback(void *info)
359{
94c12cc7 360 struct ec_creg_mask_parms *pp = info;
1da177e4
LT
361 unsigned long cregs[16];
362 int i;
39ce010d 363
94c12cc7
MS
364 __ctl_store(cregs, 0, 15);
365 for (i = 0; i <= 15; i++)
1da177e4 366 cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i];
94c12cc7 367 __ctl_load(cregs, 0, 15);
1da177e4
LT
368}
369
370/*
371 * Set a bit in a control register of all cpus
372 */
94c12cc7
MS
373void smp_ctl_set_bit(int cr, int bit)
374{
375 struct ec_creg_mask_parms parms;
1da177e4 376
94c12cc7
MS
377 memset(&parms.orvals, 0, sizeof(parms.orvals));
378 memset(&parms.andvals, 0xff, sizeof(parms.andvals));
1da177e4 379 parms.orvals[cr] = 1 << bit;
94c12cc7 380 on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1);
1da177e4 381}
39ce010d 382EXPORT_SYMBOL(smp_ctl_set_bit);
1da177e4
LT
383
384/*
385 * Clear a bit in a control register of all cpus
386 */
94c12cc7
MS
387void smp_ctl_clear_bit(int cr, int bit)
388{
389 struct ec_creg_mask_parms parms;
1da177e4 390
94c12cc7
MS
391 memset(&parms.orvals, 0, sizeof(parms.orvals));
392 memset(&parms.andvals, 0xff, sizeof(parms.andvals));
1da177e4 393 parms.andvals[cr] = ~(1L << bit);
94c12cc7 394 on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1);
1da177e4 395}
39ce010d 396EXPORT_SYMBOL(smp_ctl_clear_bit);
1da177e4 397
411ed322
MH
398#if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE)
399
400/*
401 * zfcpdump_prefix_array holds prefix registers for the following scenario:
402 * 64 bit zfcpdump kernel and 31 bit kernel which is to be dumped. We have to
403 * save its prefix registers, since they get lost, when switching from 31 bit
404 * to 64 bit.
405 */
406unsigned int zfcpdump_prefix_array[NR_CPUS + 1] \
407 __attribute__((__section__(".data")));
408
285f6722 409static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu)
411ed322 410{
411ed322
MH
411 if (ipl_info.type != IPL_TYPE_FCP_DUMP)
412 return;
285f6722
HC
413 if (cpu >= NR_CPUS) {
414 printk(KERN_WARNING "Registers for cpu %i not saved since dump "
415 "kernel was compiled with NR_CPUS=%i\n", cpu, NR_CPUS);
416 return;
411ed322 417 }
285f6722
HC
418 zfcpdump_save_areas[cpu] = alloc_bootmem(sizeof(union save_area));
419 __cpu_logical_map[1] = (__u16) phy_cpu;
420 while (signal_processor(1, sigp_stop_and_store_status) == sigp_busy)
421 cpu_relax();
422 memcpy(zfcpdump_save_areas[cpu],
423 (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE,
424 SAVE_AREA_SIZE);
425#ifdef CONFIG_64BIT
426 /* copy original prefix register */
427 zfcpdump_save_areas[cpu]->s390x.pref_reg = zfcpdump_prefix_array[cpu];
428#endif
411ed322
MH
429}
430
431union save_area *zfcpdump_save_areas[NR_CPUS + 1];
432EXPORT_SYMBOL_GPL(zfcpdump_save_areas);
433
434#else
285f6722
HC
435
436static inline void smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) { }
437
438#endif /* CONFIG_ZFCPDUMP || CONFIG_ZFCPDUMP_MODULE */
411ed322 439
1da177e4
LT
440/*
441 * Lets check how many CPUs we have.
442 */
39ce010d 443static unsigned int __init smp_count_cpus(void)
1da177e4 444{
255acee7 445 unsigned int cpu, num_cpus;
1da177e4
LT
446 __u16 boot_cpu_addr;
447
448 /*
449 * cpu 0 is the boot cpu. See smp_prepare_boot_cpu.
450 */
1da177e4
LT
451 boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr;
452 current_thread_info()->cpu = 0;
453 num_cpus = 1;
255acee7 454 for (cpu = 0; cpu <= 65535; cpu++) {
1da177e4
LT
455 if ((__u16) cpu == boot_cpu_addr)
456 continue;
255acee7 457 __cpu_logical_map[1] = (__u16) cpu;
39ce010d 458 if (signal_processor(1, sigp_sense) == sigp_not_operational)
1da177e4 459 continue;
285f6722 460 smp_get_save_area(num_cpus, cpu);
1da177e4
LT
461 num_cpus++;
462 }
39ce010d 463 printk("Detected %d CPU's\n", (int) num_cpus);
1da177e4 464 printk("Boot cpu address %2X\n", boot_cpu_addr);
255acee7 465 return num_cpus;
1da177e4
LT
466}
467
468/*
39ce010d 469 * Activate a secondary processor.
1da177e4 470 */
ea1f4eec 471int __cpuinit start_secondary(void *cpuvoid)
1da177e4 472{
39ce010d
HC
473 /* Setup the cpu */
474 cpu_init();
5bfb5d69 475 preempt_disable();
d54853ef 476 /* Enable TOD clock interrupts on the secondary cpu. */
39ce010d 477 init_cpu_timer();
1da177e4 478#ifdef CONFIG_VIRT_TIMER
d54853ef 479 /* Enable cpu timer interrupts on the secondary cpu. */
39ce010d 480 init_cpu_vtimer();
1da177e4 481#endif
1da177e4 482 /* Enable pfault pseudo page faults on this cpu. */
29b08d2b
HC
483 pfault_init();
484
1da177e4
LT
485 /* Mark this cpu as online */
486 cpu_set(smp_processor_id(), cpu_online_map);
487 /* Switch on interrupts */
488 local_irq_enable();
39ce010d
HC
489 /* Print info about this processor */
490 print_cpu_info(&S390_lowcore.cpu_data);
491 /* cpu_idle will call schedule for us */
492 cpu_idle();
493 return 0;
1da177e4
LT
494}
495
496static void __init smp_create_idle(unsigned int cpu)
497{
498 struct task_struct *p;
499
500 /*
501 * don't care about the psw and regs settings since we'll never
502 * reschedule the forked task.
503 */
504 p = fork_idle(cpu);
505 if (IS_ERR(p))
506 panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
507 current_set[cpu] = p;
508}
509
39ce010d 510static int cpu_stopped(int cpu)
1da177e4
LT
511{
512 __u32 status;
513
514 /* Check for stopped state */
39ce010d
HC
515 if (signal_processor_ps(&status, 0, cpu, sigp_sense) ==
516 sigp_status_stored) {
1da177e4
LT
517 if (status & 0x40)
518 return 1;
519 }
520 return 0;
521}
522
523/* Upping and downing of CPUs */
524
39ce010d 525int __cpu_up(unsigned int cpu)
1da177e4
LT
526{
527 struct task_struct *idle;
39ce010d 528 struct _lowcore *cpu_lowcore;
1da177e4 529 struct stack_frame *sf;
39ce010d
HC
530 sigp_ccode ccode;
531 int curr_cpu;
1da177e4
LT
532
533 for (curr_cpu = 0; curr_cpu <= 65535; curr_cpu++) {
534 __cpu_logical_map[cpu] = (__u16) curr_cpu;
535 if (cpu_stopped(cpu))
536 break;
537 }
538
539 if (!cpu_stopped(cpu))
540 return -ENODEV;
541
542 ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]),
543 cpu, sigp_set_prefix);
39ce010d 544 if (ccode) {
1da177e4
LT
545 printk("sigp_set_prefix failed for cpu %d "
546 "with condition code %d\n",
547 (int) cpu, (int) ccode);
548 return -EIO;
549 }
550
551 idle = current_set[cpu];
39ce010d 552 cpu_lowcore = lowcore_ptr[cpu];
1da177e4 553 cpu_lowcore->kernel_stack = (unsigned long)
39ce010d 554 task_stack_page(idle) + THREAD_SIZE;
1da177e4
LT
555 sf = (struct stack_frame *) (cpu_lowcore->kernel_stack
556 - sizeof(struct pt_regs)
557 - sizeof(struct stack_frame));
558 memset(sf, 0, sizeof(struct stack_frame));
559 sf->gprs[9] = (unsigned long) sf;
560 cpu_lowcore->save_area[15] = (unsigned long) sf;
561 __ctl_store(cpu_lowcore->cregs_save_area[0], 0, 15);
94c12cc7
MS
562 asm volatile(
563 " stam 0,15,0(%0)"
564 : : "a" (&cpu_lowcore->access_regs_save_area) : "memory");
1da177e4 565 cpu_lowcore->percpu_offset = __per_cpu_offset[cpu];
39ce010d
HC
566 cpu_lowcore->current_task = (unsigned long) idle;
567 cpu_lowcore->cpu_data.cpu_nr = cpu;
1da177e4 568 eieio();
699ff13f 569
39ce010d 570 while (signal_processor(cpu, sigp_restart) == sigp_busy)
699ff13f 571 udelay(10);
1da177e4
LT
572
573 while (!cpu_online(cpu))
574 cpu_relax();
575 return 0;
576}
577
255acee7 578static unsigned int __initdata additional_cpus;
37a33026 579static unsigned int __initdata possible_cpus;
255acee7
HC
580
581void __init smp_setup_cpu_possible_map(void)
582{
54330456 583 unsigned int phy_cpus, pos_cpus, cpu;
255acee7 584
54330456
HC
585 phy_cpus = smp_count_cpus();
586 pos_cpus = min(phy_cpus + additional_cpus, (unsigned int) NR_CPUS);
255acee7 587
37a33026 588 if (possible_cpus)
54330456 589 pos_cpus = min(possible_cpus, (unsigned int) NR_CPUS);
255acee7 590
54330456 591 for (cpu = 0; cpu < pos_cpus; cpu++)
255acee7
HC
592 cpu_set(cpu, cpu_possible_map);
593
54330456
HC
594 phy_cpus = min(phy_cpus, pos_cpus);
595
596 for (cpu = 0; cpu < phy_cpus; cpu++)
597 cpu_set(cpu, cpu_present_map);
255acee7
HC
598}
599
600#ifdef CONFIG_HOTPLUG_CPU
601
602static int __init setup_additional_cpus(char *s)
603{
604 additional_cpus = simple_strtoul(s, NULL, 0);
605 return 0;
606}
607early_param("additional_cpus", setup_additional_cpus);
608
37a33026
HC
609static int __init setup_possible_cpus(char *s)
610{
611 possible_cpus = simple_strtoul(s, NULL, 0);
612 return 0;
613}
614early_param("possible_cpus", setup_possible_cpus);
615
39ce010d 616int __cpu_disable(void)
1da177e4 617{
94c12cc7 618 struct ec_creg_mask_parms cr_parms;
f3705136 619 int cpu = smp_processor_id();
1da177e4 620
f3705136 621 cpu_clear(cpu, cpu_online_map);
1da177e4 622
1da177e4 623 /* Disable pfault pseudo page faults on this cpu. */
29b08d2b 624 pfault_fini();
1da177e4 625
94c12cc7
MS
626 memset(&cr_parms.orvals, 0, sizeof(cr_parms.orvals));
627 memset(&cr_parms.andvals, 0xff, sizeof(cr_parms.andvals));
1da177e4 628
94c12cc7 629 /* disable all external interrupts */
1da177e4 630 cr_parms.orvals[0] = 0;
39ce010d
HC
631 cr_parms.andvals[0] = ~(1 << 15 | 1 << 14 | 1 << 13 | 1 << 12 |
632 1 << 11 | 1 << 10 | 1 << 6 | 1 << 4);
1da177e4 633 /* disable all I/O interrupts */
1da177e4 634 cr_parms.orvals[6] = 0;
39ce010d
HC
635 cr_parms.andvals[6] = ~(1 << 31 | 1 << 30 | 1 << 29 | 1 << 28 |
636 1 << 27 | 1 << 26 | 1 << 25 | 1 << 24);
1da177e4 637 /* disable most machine checks */
1da177e4 638 cr_parms.orvals[14] = 0;
39ce010d
HC
639 cr_parms.andvals[14] = ~(1 << 28 | 1 << 27 | 1 << 26 |
640 1 << 25 | 1 << 24);
94c12cc7 641
1da177e4
LT
642 smp_ctl_bit_callback(&cr_parms);
643
1da177e4
LT
644 return 0;
645}
646
39ce010d 647void __cpu_die(unsigned int cpu)
1da177e4
LT
648{
649 /* Wait until target cpu is down */
650 while (!smp_cpu_not_running(cpu))
651 cpu_relax();
652 printk("Processor %d spun down\n", cpu);
653}
654
39ce010d 655void cpu_die(void)
1da177e4
LT
656{
657 idle_task_exit();
658 signal_processor(smp_processor_id(), sigp_stop);
659 BUG();
39ce010d 660 for (;;);
1da177e4
LT
661}
662
255acee7
HC
663#endif /* CONFIG_HOTPLUG_CPU */
664
1da177e4
LT
665/*
666 * Cycle through the processors and setup structures.
667 */
668
669void __init smp_prepare_cpus(unsigned int max_cpus)
670{
671 unsigned long stack;
672 unsigned int cpu;
39ce010d
HC
673 int i;
674
675 /* request the 0x1201 emergency signal external interrupt */
676 if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0)
677 panic("Couldn't request external interrupt 0x1201");
678 memset(lowcore_ptr, 0, sizeof(lowcore_ptr));
679 /*
680 * Initialize prefix pages and stacks for all possible cpus
681 */
1da177e4
LT
682 print_cpu_info(&S390_lowcore.cpu_data);
683
39ce010d 684 for_each_possible_cpu(i) {
1da177e4 685 lowcore_ptr[i] = (struct _lowcore *)
39ce010d
HC
686 __get_free_pages(GFP_KERNEL | GFP_DMA,
687 sizeof(void*) == 8 ? 1 : 0);
688 stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
689 if (!lowcore_ptr[i] || !stack)
1da177e4
LT
690 panic("smp_boot_cpus failed to allocate memory\n");
691
692 *(lowcore_ptr[i]) = S390_lowcore;
39ce010d
HC
693 lowcore_ptr[i]->async_stack = stack + ASYNC_SIZE;
694 stack = __get_free_pages(GFP_KERNEL, 0);
695 if (!stack)
1da177e4 696 panic("smp_boot_cpus failed to allocate memory\n");
39ce010d 697 lowcore_ptr[i]->panic_stack = stack + PAGE_SIZE;
347a8dc3 698#ifndef CONFIG_64BIT
77fa2245
HC
699 if (MACHINE_HAS_IEEE) {
700 lowcore_ptr[i]->extended_save_area_addr =
39ce010d
HC
701 (__u32) __get_free_pages(GFP_KERNEL, 0);
702 if (!lowcore_ptr[i]->extended_save_area_addr)
77fa2245
HC
703 panic("smp_boot_cpus failed to "
704 "allocate memory\n");
705 }
1da177e4
LT
706#endif
707 }
347a8dc3 708#ifndef CONFIG_64BIT
77fa2245
HC
709 if (MACHINE_HAS_IEEE)
710 ctl_set_bit(14, 29); /* enable extended save area */
711#endif
1da177e4
LT
712 set_prefix((u32)(unsigned long) lowcore_ptr[smp_processor_id()]);
713
97db7fbf 714 for_each_possible_cpu(cpu)
1da177e4
LT
715 if (cpu != smp_processor_id())
716 smp_create_idle(cpu);
717}
718
ea1f4eec 719void __init smp_prepare_boot_cpu(void)
1da177e4
LT
720{
721 BUG_ON(smp_processor_id() != 0);
722
723 cpu_set(0, cpu_online_map);
1da177e4
LT
724 S390_lowcore.percpu_offset = __per_cpu_offset[0];
725 current_set[0] = current;
726}
727
ea1f4eec 728void __init smp_cpus_done(unsigned int max_cpus)
1da177e4 729{
54330456 730 cpu_present_map = cpu_possible_map;
1da177e4
LT
731}
732
733/*
734 * the frequency of the profiling timer can be changed
735 * by writing a multiplier value into /proc/profile.
736 *
737 * usually you want to run this on all CPUs ;)
738 */
739int setup_profiling_timer(unsigned int multiplier)
740{
39ce010d 741 return 0;
1da177e4
LT
742}
743
744static DEFINE_PER_CPU(struct cpu, cpu_devices);
745
2fc2d1e9
HC
746static ssize_t show_capability(struct sys_device *dev, char *buf)
747{
748 unsigned int capability;
749 int rc;
750
751 rc = get_cpu_capability(&capability);
752 if (rc)
753 return rc;
754 return sprintf(buf, "%u\n", capability);
755}
756static SYSDEV_ATTR(capability, 0444, show_capability, NULL);
757
758static int __cpuinit smp_cpu_notify(struct notifier_block *self,
759 unsigned long action, void *hcpu)
760{
761 unsigned int cpu = (unsigned int)(long)hcpu;
762 struct cpu *c = &per_cpu(cpu_devices, cpu);
763 struct sys_device *s = &c->sysdev;
764
765 switch (action) {
766 case CPU_ONLINE:
8bb78442 767 case CPU_ONLINE_FROZEN:
2fc2d1e9
HC
768 if (sysdev_create_file(s, &attr_capability))
769 return NOTIFY_BAD;
770 break;
771 case CPU_DEAD:
8bb78442 772 case CPU_DEAD_FROZEN:
2fc2d1e9
HC
773 sysdev_remove_file(s, &attr_capability);
774 break;
775 }
776 return NOTIFY_OK;
777}
778
779static struct notifier_block __cpuinitdata smp_cpu_nb = {
39ce010d 780 .notifier_call = smp_cpu_notify,
2fc2d1e9
HC
781};
782
1da177e4
LT
783static int __init topology_init(void)
784{
785 int cpu;
2fc2d1e9
HC
786
787 register_cpu_notifier(&smp_cpu_nb);
1da177e4 788
97db7fbf 789 for_each_possible_cpu(cpu) {
6721f778 790 struct cpu *c = &per_cpu(cpu_devices, cpu);
2fc2d1e9 791 struct sys_device *s = &c->sysdev;
6721f778
HC
792
793 c->hotpluggable = 1;
2fc2d1e9
HC
794 register_cpu(c, cpu);
795 if (!cpu_online(cpu))
796 continue;
797 s = &c->sysdev;
798 sysdev_create_file(s, &attr_capability);
1da177e4
LT
799 }
800 return 0;
801}
1da177e4 802subsys_initcall(topology_init);