Merge tag 'v3.10.55' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / arm / kernel / process.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/kernel/process.c
3 *
4 * Copyright (C) 1996-2000 Russell King - Converted to ARM.
5 * Original Copyright (C) 1995 Linus Torvalds
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <stdarg.h>
12
ecea4ab6 13#include <linux/export.h>
1da177e4
LT
14#include <linux/sched.h>
15#include <linux/kernel.h>
16#include <linux/mm.h>
17#include <linux/stddef.h>
18#include <linux/unistd.h>
1da177e4 19#include <linux/user.h>
1da177e4
LT
20#include <linux/delay.h>
21#include <linux/reboot.h>
22#include <linux/interrupt.h>
23#include <linux/kallsyms.h>
24#include <linux/init.h>
a054a811 25#include <linux/cpu.h>
84dff1a7 26#include <linux/elfcore.h>
74617fb6 27#include <linux/pm.h>
9e4559dd 28#include <linux/tick.h>
154c772e 29#include <linux/utsname.h>
33fa9b13 30#include <linux/uaccess.h>
990cb8ac 31#include <linux/random.h>
864232fa 32#include <linux/hw_breakpoint.h>
a0bfa137 33#include <linux/cpuidle.h>
fa8bbb13 34#include <linux/leds.h>
6fa3eb70
S
35#include <linux/console.h>
36#include <linux/mtk_ram_console.h>
1da177e4 37
9ca03a21 38#include <asm/cacheflush.h>
9ecb47de 39#include <asm/idmap.h>
1da177e4 40#include <asm/processor.h>
d6551e88 41#include <asm/thread_notify.h>
2d7c11bf 42#include <asm/stacktrace.h>
2ea83398 43#include <asm/mach/time.h>
4b9e9796 44#include <asm/tls.h>
6fa3eb70 45#include <mach/system.h>
1da177e4 46
c743f380
NP
47#ifdef CONFIG_CC_STACKPROTECTOR
48#include <linux/stackprotector.h>
49unsigned long __stack_chk_guard __read_mostly;
50EXPORT_SYMBOL(__stack_chk_guard);
51#endif
52
ae0a846e
RK
53static const char *processor_modes[] = {
54 "USER_26", "FIQ_26" , "IRQ_26" , "SVC_26" , "UK4_26" , "UK5_26" , "UK6_26" , "UK7_26" ,
55 "UK8_26" , "UK9_26" , "UK10_26", "UK11_26", "UK12_26", "UK13_26", "UK14_26", "UK15_26",
56 "USER_32", "FIQ_32" , "IRQ_32" , "SVC_32" , "UK4_32" , "UK5_32" , "UK6_32" , "ABT_32" ,
57 "UK8_32" , "UK9_32" , "UK10_32", "UND_32" , "UK12_32", "UK13_32", "UK14_32", "SYS_32"
58};
59
909d6c6c
GD
60static const char *isa_modes[] = {
61 "ARM" , "Thumb" , "Jazelle", "ThumbEE"
62};
63
4b9e9796
S
64//[BUGFIX]-Add-BEGIN by SCDTABLET.(fangyou.wang),10/10/2015,1097303,
65//auto reboot after power off
66extern void force_enable_uart_log(void);
67//[BUGFIX]-Add-END by SCDTABLET.(fangyou.wang)
6fa3eb70
S
68#ifdef CONFIG_SMP
69void arch_trigger_all_cpu_backtrace(void)
70{
71 smp_send_all_cpu_backtrace();
72}
73#else
74void arch_trigger_all_cpu_backtrace(void)
75{
76 dump_stack();
77}
78#endif
79
290130a1
WD
80extern void call_with_stack(void (*fn)(void *), void *arg, void *sp);
81typedef void (*phys_reset_t)(unsigned long);
82
6fa3eb70
S
83#ifdef CONFIG_ARM_FLUSH_CONSOLE_ON_RESTART
84void arm_machine_flush_console(void)
85{
86 printk("\n");
87 pr_emerg("Restarting %s\n", linux_banner);
88 if (console_trylock()) {
89 console_unlock();
90 return;
91 }
92
93 mdelay(50);
94
95 local_irq_disable();
96 if (!console_trylock())
97 pr_emerg("arm_restart: Console was locked! Busting\n");
98 else
99 pr_emerg("arm_restart: Console was locked!\n");
100 console_unlock();
101}
102#else
103void arm_machine_flush_console(void)
104{
105}
106#endif
107
290130a1
WD
108/*
109 * A temporary stack to use for CPU reset. This is static so that we
110 * don't clobber it with the identity mapping. When running with this
111 * stack, any references to the current task *will not work* so you
112 * should really do as little as possible before jumping to your reset
113 * code.
114 */
115static u64 soft_restart_stack[16];
116
6fa3eb70
S
117void arm_machine_restart(char mode, const char *cmd)
118{
119 /* Flush the console to make sure all the relevant messages make it
120 * out to the console drivers */
121 arm_machine_flush_console();
122
123 /* Disable interrupts first */
124 local_irq_disable();
125 local_fiq_disable();
126
127 /*
128 * Tell the mm system that we are going to reboot -
129 * we may need it to insert some 1:1 mappings so that
130 * soft boot works.
131 */
132 setup_mm_for_reboot();
133
134 /* When l1 is disabled and l2 is enabled, the spinlock cannot get the lock,
135 * so we need to disable the l2 as well. by Chia-Hao Hsu
136 */
137 outer_flush_all();
138 outer_disable();
139 outer_flush_all();
140
141 /* Clean and invalidate caches */
142 flush_cache_all();
143#ifdef CONFIG_RESTART_DISABLE_CACHE
144 /* Turn off caching */
145 // cpu_proc_fin(); // Don't turn off cach during reboot phase. CA15 have risk if turn off cach.
146#endif
147 /* Push out any further dirty data, and ensure cache is empty */
148 flush_cache_all();
149
150 /*
151 * Now call the architecture specific reboot code.
152 */
153 arch_reset(mode, cmd);
154
155 /*
156 * Whoops - the architecture was unable to reboot.
157 * Tell the user!
158 */
159 mdelay(1000);
160 printk("Reboot failed -- System halted\n");
161 while (1);
162}
163
290130a1 164static void __soft_restart(void *addr)
74617fb6 165{
290130a1 166 phys_reset_t phys_reset;
74617fb6 167
290130a1 168 /* Take out a flat memory mapping. */
5aafec15 169 setup_mm_for_reboot();
74617fb6 170
9ca03a21
RK
171 /* Clean and invalidate caches */
172 flush_cache_all();
173
174 /* Turn off caching */
175 cpu_proc_fin();
176
177 /* Push out any further dirty data, and ensure cache is empty */
178 flush_cache_all();
179
290130a1
WD
180 /* Switch to the identity mapping. */
181 phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
182 phys_reset((unsigned long)addr);
74617fb6 183
290130a1
WD
184 /* Should never get here. */
185 BUG();
186}
187
188void soft_restart(unsigned long addr)
189{
190 u64 *stack = soft_restart_stack + ARRAY_SIZE(soft_restart_stack);
191
192 /* Disable interrupts first */
193 local_irq_disable();
194 local_fiq_disable();
195
196 /* Disable the L2 if we're the last man standing. */
6fa3eb70
S
197 if (num_online_cpus() == 1) {
198 outer_flush_all();
290130a1 199 outer_disable();
6fa3eb70 200 }
290130a1
WD
201
202 /* Change to the new stack and continue with the reset. */
203 call_with_stack(__soft_restart, (void *)addr, (void *)stack);
204
205 /* Should never get here. */
206 BUG();
e879c862
RK
207}
208
f88b8979 209static void null_restart(char mode, const char *cmd)
e879c862 210{
74617fb6
RP
211}
212
1da177e4 213/*
74617fb6 214 * Function pointers to optional machine specific functions
1da177e4 215 */
1da177e4
LT
216void (*pm_power_off)(void);
217EXPORT_SYMBOL(pm_power_off);
218
f88b8979 219void (*arm_pm_restart)(char str, const char *cmd) = null_restart;
74617fb6
RP
220EXPORT_SYMBOL_GPL(arm_pm_restart);
221
1da177e4 222/*
4fa20439 223 * This is our default idle handler.
1da177e4 224 */
4fa20439
NP
225
226void (*arm_pm_idle)(void);
227
84dff1a7 228static void default_idle(void)
1da177e4 229{
4fa20439
NP
230 if (arm_pm_idle)
231 arm_pm_idle();
232 else
ae940913 233 cpu_do_idle();
9ccdac36 234 local_irq_enable();
1da177e4
LT
235}
236
f7b861b7 237void arch_cpu_idle_prepare(void)
1da177e4
LT
238{
239 local_fiq_enable();
f7b861b7 240}
1da177e4 241
f7b861b7
TG
242void arch_cpu_idle_enter(void)
243{
6fa3eb70 244 idle_notifier_call_chain(IDLE_START);
f7b861b7
TG
245 ledtrig_cpu(CPU_LED_IDLE_START);
246#ifdef CONFIG_PL310_ERRATA_769419
247 wmb();
a054a811 248#endif
f7b861b7 249}
a054a811 250
f7b861b7
TG
251void arch_cpu_idle_exit(void)
252{
253 ledtrig_cpu(CPU_LED_IDLE_END);
6fa3eb70 254 idle_notifier_call_chain(IDLE_END);
f7b861b7
TG
255}
256
257#ifdef CONFIG_HOTPLUG_CPU
258void arch_cpu_idle_dead(void)
259{
260 cpu_die();
261}
11ed0ba1 262#endif
f7b861b7
TG
263
264/*
265 * Called from the core idle loop.
266 */
267void arch_cpu_idle(void)
268{
269 if (cpuidle_idle_call())
270 default_idle();
1da177e4
LT
271}
272
273static char reboot_mode = 'h';
274
275int __init reboot_setup(char *str)
276{
277 reboot_mode = str[0];
278 return 1;
279}
280
281__setup("reboot=", reboot_setup);
282
19ab428f
SW
283/*
284 * Called by kexec, immediately prior to machine_kexec().
285 *
286 * This must completely disable all secondary CPUs; simply causing those CPUs
287 * to execute e.g. a RAM-based pin loop is not sufficient. This allows the
288 * kexec'd kernel to use any and all RAM as it sees fit, without having to
289 * avoid any code or data used by any SW CPU pin loop. The CPU hotplug
290 * functionality embodied in disable_nonboot_cpus() to achieve this.
291 */
3d3f78d7 292void machine_shutdown(void)
1da177e4 293{
6fa3eb70
S
294#ifdef CONFIG_SMP
295 /*
296 * Disable preemption so we're guaranteed to
297 * run to power off or reboot and prevent
298 * the possibility of switching to another
299 * thread that might wind up blocking on
300 * one of the stopped CPUs.
301 */
302 printk("machine_shutdown: start, Proess(%s:%d)\n", current->comm, current->pid);
303 dump_stack();
304 preempt_disable();
305#endif
19ab428f 306 disable_nonboot_cpus();
6fa3eb70 307 printk("machine_shutdown: done\n");
1da177e4
LT
308}
309
19ab428f
SW
310/*
311 * Halting simply requires that the secondary CPUs stop performing any
312 * activity (executing tasks, handling interrupts). smp_send_stop()
313 * achieves this.
314 */
3d3f78d7
RK
315void machine_halt(void)
316{
19ab428f
SW
317 smp_send_stop();
318
98bd8b96 319 local_irq_disable();
3d3f78d7
RK
320 while (1);
321}
1da177e4 322
6fa3eb70 323extern int reboot_pid;
19ab428f
SW
324/*
325 * Power-off simply requires that the secondary CPUs stop performing any
326 * activity (executing tasks, handling interrupts). smp_send_stop()
327 * achieves this. When the system power is turned off, it will take all CPUs
328 * with it.
329 */
1da177e4
LT
330void machine_power_off(void)
331{
6fa3eb70
S
332 struct task_struct *tsk;
333
334 /* Disable interrupts first */
335 local_irq_disable();
336 local_fiq_disable();
4b9e9796
S
337//[BUGFIX]-Add-BEGIN by SCDTABLET.(fangyou.wang),10/10/2015,1097303,
338//auto reboot after power off
339 force_enable_uart_log();
340//[BUGFIX]-Add-END by SCDTABLET.(fangyou.wang)
341
6fa3eb70
S
342 smp_send_stop();
343 if(reboot_pid > 1)
344 {
345 tsk = find_task_by_vpid(reboot_pid);
346 if(tsk == NULL)
347 tsk = current;
348 dump_stack();
349 }
350 else
351 {
352 tsk = current;
353 }
354
355 if(tsk->real_parent)
356 {
357 if(tsk->real_parent->real_parent)
358 {
359 printk("machine_shutdown: start, Proess(%s:%d). father %s:%d. grandfather %s:%d.\n",
360 tsk->comm, tsk->pid,tsk->real_parent->comm,tsk->real_parent->pid,
361 tsk->real_parent->real_parent->comm,tsk->real_parent->real_parent->pid);
362 }
363 else
364 {
365 printk("machine_shutdown: start, Proess(%s:%d). father %s:%d.\n",
366 tsk->comm, tsk->pid,tsk->real_parent->comm,tsk->real_parent->pid);
367 }
368 }
369 else
370 {
371 printk("machine_shutdown: start, Proess(%s:%d)\n", tsk->comm, tsk->pid);
372 }
373
374#ifdef CONFIG_MTK_EMMC_SUPPORT
375 last_kmsg_store_to_emmc();
376#endif
19ab428f 377
1da177e4
LT
378 if (pm_power_off)
379 pm_power_off();
380}
381
19ab428f
SW
382/*
383 * Restart requires that the secondary CPUs stop performing any activity
384 * while the primary CPU resets the system. Systems with a single CPU can
385 * use soft_restart() as their machine descriptor's .restart hook, since that
386 * will cause the only available CPU to reset. Systems with multiple CPUs must
387 * provide a HW restart implementation, to ensure that all CPUs reset at once.
388 * This is required so that any code running after reset on the primary CPU
389 * doesn't have to co-ordinate with other CPUs to ensure they aren't still
390 * executing pre-reset code, and using RAM that the primary CPU's code wishes
391 * to use. Implementing such co-ordination would be essentially impossible.
392 */
be093beb 393void machine_restart(char *cmd)
1da177e4 394{
6fa3eb70
S
395 struct task_struct *tsk;
396 /* Disable interrupts first */
397 local_irq_disable();
398 local_fiq_disable();
399
19ab428f 400 smp_send_stop();
ac15e00b 401
6fa3eb70
S
402 if(reboot_pid > 1)
403 {
404 tsk = find_task_by_vpid(reboot_pid);
405 if(tsk == NULL)
406 tsk = current;
407 dump_stack();
408 }
409 else
410 {
411 tsk = current;
412 }
413
414 if(tsk->real_parent)
415 {
416 if(tsk->real_parent->real_parent)
417 {
418 printk("machine_shutdown: start, Proess(%s:%d). father %s:%d. grandfather %s:%d.\n",
419 tsk->comm, tsk->pid,tsk->real_parent->comm,tsk->real_parent->pid,
420 tsk->real_parent->real_parent->comm,tsk->real_parent->real_parent->pid);
421 }
422 else
423 {
424 printk("machine_shutdown: start, Proess(%s:%d). father %s:%d.\n",
425 tsk->comm, tsk->pid,tsk->real_parent->comm,tsk->real_parent->pid);
426 }
427 }
428 else
429 {
430 printk("machine_shutdown: start, Proess(%s:%d)\n", tsk->comm, tsk->pid);
431 }
432
433 /* Flush the console to make sure all the relevant messages make it
434 * out to the console drivers */
435 arm_machine_flush_console();
436
be093beb 437 arm_pm_restart(reboot_mode, cmd);
ac15e00b
RK
438
439 /* Give a grace period for failure to restart of 1s */
440 mdelay(1000);
441
442 /* Whoops - the platform was unable to reboot. Tell the user! */
443 printk("Reboot failed -- System halted\n");
98bd8b96 444 local_irq_disable();
ac15e00b 445 while (1);
1da177e4
LT
446}
447
6fa3eb70
S
448/*
449 * dump a block of kernel memory from around the given address
450 */
451static void show_data(unsigned long addr, int nbytes, const char *name)
452{
453 int i, j;
454 int nlines;
455 u32 *p;
456
457 /*
458 * don't attempt to dump non-kernel addresses or
459 * values that are probably just small negative numbers
460 */
461 if (addr < PAGE_OFFSET || addr > -256UL)
462 return;
463
464 printk("\n%s: %#lx:\n", name, addr);
465
466 /*
467 * round address down to a 32 bit boundary
468 * and always dump a multiple of 32 bytes
469 */
470 p = (u32 *)(addr & ~(sizeof(u32) - 1));
471 nbytes += (addr & (sizeof(u32) - 1));
472 nlines = (nbytes + 31) / 32;
473
474
475 for (i = 0; i < nlines; i++) {
476 /*
477 * just display low 16 bits of address to keep
478 * each line of the dump < 80 characters
479 */
480 printk("%04lx ", (unsigned long)p & 0xffff);
481 for (j = 0; j < 8; j++) {
482 u32 data;
483 if (probe_kernel_address(p, data)) {
484 printk(" ********");
485 } else {
486 printk(" %08x", data);
487 }
488 ++p;
489 }
490 printk("\n");
491 }
492}
493
494static void show_extra_register_data(struct pt_regs *regs, int nbytes)
495{
496 mm_segment_t fs;
497
498 fs = get_fs();
499 set_fs(KERNEL_DS);
500 show_data(regs->ARM_pc - nbytes, nbytes * 2, "PC");
501 show_data(regs->ARM_lr - nbytes, nbytes * 2, "LR");
502 show_data(regs->ARM_sp - nbytes, nbytes * 2, "SP");
503 show_data(regs->ARM_ip - nbytes, nbytes * 2, "IP");
504 show_data(regs->ARM_fp - nbytes, nbytes * 2, "FP");
505 show_data(regs->ARM_r0 - nbytes, nbytes * 2, "R0");
506 show_data(regs->ARM_r1 - nbytes, nbytes * 2, "R1");
507 show_data(regs->ARM_r2 - nbytes, nbytes * 2, "R2");
508 show_data(regs->ARM_r3 - nbytes, nbytes * 2, "R3");
509 show_data(regs->ARM_r4 - nbytes, nbytes * 2, "R4");
510 show_data(regs->ARM_r5 - nbytes, nbytes * 2, "R5");
511 show_data(regs->ARM_r6 - nbytes, nbytes * 2, "R6");
512 show_data(regs->ARM_r7 - nbytes, nbytes * 2, "R7");
513 show_data(regs->ARM_r8 - nbytes, nbytes * 2, "R8");
514 show_data(regs->ARM_r9 - nbytes, nbytes * 2, "R9");
515 show_data(regs->ARM_r10 - nbytes, nbytes * 2, "R10");
516 set_fs(fs);
517}
518
652a12ef 519void __show_regs(struct pt_regs *regs)
1da177e4 520{
154c772e
RK
521 unsigned long flags;
522 char buf[64];
1da177e4 523
a43cb95d
TH
524 show_regs_print_info(KERN_DEFAULT);
525
1da177e4
LT
526 print_symbol("PC is at %s\n", instruction_pointer(regs));
527 print_symbol("LR is at %s\n", regs->ARM_lr);
154c772e 528 printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
1da177e4 529 "sp : %08lx ip : %08lx fp : %08lx\n",
154c772e
RK
530 regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
531 regs->ARM_sp, regs->ARM_ip, regs->ARM_fp);
1da177e4
LT
532 printk("r10: %08lx r9 : %08lx r8 : %08lx\n",
533 regs->ARM_r10, regs->ARM_r9,
534 regs->ARM_r8);
535 printk("r7 : %08lx r6 : %08lx r5 : %08lx r4 : %08lx\n",
536 regs->ARM_r7, regs->ARM_r6,
537 regs->ARM_r5, regs->ARM_r4);
538 printk("r3 : %08lx r2 : %08lx r1 : %08lx r0 : %08lx\n",
539 regs->ARM_r3, regs->ARM_r2,
540 regs->ARM_r1, regs->ARM_r0);
154c772e
RK
541
542 flags = regs->ARM_cpsr;
543 buf[0] = flags & PSR_N_BIT ? 'N' : 'n';
544 buf[1] = flags & PSR_Z_BIT ? 'Z' : 'z';
545 buf[2] = flags & PSR_C_BIT ? 'C' : 'c';
546 buf[3] = flags & PSR_V_BIT ? 'V' : 'v';
547 buf[4] = '\0';
548
909d6c6c 549 printk("Flags: %s IRQs o%s FIQs o%s Mode %s ISA %s Segment %s\n",
154c772e 550 buf, interrupts_enabled(regs) ? "n" : "ff",
1da177e4
LT
551 fast_interrupts_enabled(regs) ? "n" : "ff",
552 processor_modes[processor_mode(regs)],
909d6c6c 553 isa_modes[isa_mode(regs)],
1da177e4 554 get_fs() == get_ds() ? "kernel" : "user");
154c772e 555#ifdef CONFIG_CPU_CP15
1da177e4 556 {
f12d0d7c 557 unsigned int ctrl;
154c772e
RK
558
559 buf[0] = '\0';
f12d0d7c 560#ifdef CONFIG_CPU_CP15_MMU
154c772e
RK
561 {
562 unsigned int transbase, dac;
563 asm("mrc p15, 0, %0, c2, c0\n\t"
564 "mrc p15, 0, %1, c3, c0\n"
565 : "=r" (transbase), "=r" (dac));
566 snprintf(buf, sizeof(buf), " Table: %08x DAC: %08x",
567 transbase, dac);
568 }
f12d0d7c 569#endif
154c772e
RK
570 asm("mrc p15, 0, %0, c1, c0\n" : "=r" (ctrl));
571
572 printk("Control: %08x%s\n", ctrl, buf);
573 }
f12d0d7c 574#endif
6fa3eb70
S
575
576 show_extra_register_data(regs, 128);
1da177e4
LT
577}
578
652a12ef
RK
579void show_regs(struct pt_regs * regs)
580{
581 printk("\n");
652a12ef 582 __show_regs(regs);
b380ab4f 583 dump_stack();
652a12ef
RK
584}
585
797245f5
RK
586ATOMIC_NOTIFIER_HEAD(thread_notify_head);
587
588EXPORT_SYMBOL_GPL(thread_notify_head);
589
1da177e4
LT
590/*
591 * Free current thread data structures etc..
592 */
593void exit_thread(void)
594{
797245f5 595 thread_notify(THREAD_NOTIFY_EXIT, current_thread_info());
1da177e4
LT
596}
597
1da177e4
LT
598void flush_thread(void)
599{
600 struct thread_info *thread = current_thread_info();
601 struct task_struct *tsk = current;
602
864232fa
WD
603 flush_ptrace_hw_breakpoint(tsk);
604
1da177e4
LT
605 memset(thread->used_cp, 0, sizeof(thread->used_cp));
606 memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
d6551e88
RK
607 memset(&thread->fpstate, 0, sizeof(union fp_state));
608
609 thread_notify(THREAD_NOTIFY_FLUSH, thread);
1da177e4
LT
610}
611
612void release_thread(struct task_struct *dead_task)
613{
1da177e4
LT
614}
615
616asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
617
618int
6f2c55b8 619copy_thread(unsigned long clone_flags, unsigned long stack_start,
afa86fc4 620 unsigned long stk_sz, struct task_struct *p)
1da177e4 621{
815d5ec8
AV
622 struct thread_info *thread = task_thread_info(p);
623 struct pt_regs *childregs = task_pt_regs(p);
1da177e4 624
1da177e4 625 memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save));
9e14f828 626
38a61b6b
AV
627 if (likely(!(p->flags & PF_KTHREAD))) {
628 *childregs = *current_pt_regs();
9e14f828 629 childregs->ARM_r0 = 0;
38a61b6b
AV
630 if (stack_start)
631 childregs->ARM_sp = stack_start;
9e14f828 632 } else {
9fff2fa0 633 memset(childregs, 0, sizeof(struct pt_regs));
9e14f828
AV
634 thread->cpu_context.r4 = stk_sz;
635 thread->cpu_context.r5 = stack_start;
9e14f828
AV
636 childregs->ARM_cpsr = SVC_MODE;
637 }
9fff2fa0 638 thread->cpu_context.pc = (unsigned long)ret_from_fork;
1da177e4 639 thread->cpu_context.sp = (unsigned long)childregs;
1da177e4 640
864232fa
WD
641 clear_ptrace_hw_breakpoint(p);
642
1da177e4 643 if (clone_flags & CLONE_SETTLS)
4b9e9796
S
644 thread->tp_value[0] = childregs->ARM_r3;
645 thread->tp_value[1] = get_tpuser();
1da177e4 646
2e82669a
CM
647 thread_notify(THREAD_NOTIFY_COPY, thread);
648
1da177e4
LT
649 return 0;
650}
651
cde3f860
AB
652/*
653 * Fill in the task's elfregs structure for a core dump.
654 */
655int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs)
656{
657 elf_core_copy_regs(elfregs, task_pt_regs(t));
658 return 1;
659}
660
1da177e4
LT
661/*
662 * fill in the fpe structure for a core dump...
663 */
664int dump_fpu (struct pt_regs *regs, struct user_fp *fp)
665{
666 struct thread_info *thread = current_thread_info();
667 int used_math = thread->used_cp[1] | thread->used_cp[2];
668
669 if (used_math)
670 memcpy(fp, &thread->fpstate.soft, sizeof (*fp));
671
672 return used_math != 0;
673}
674EXPORT_SYMBOL(dump_fpu);
675
1da177e4
LT
676unsigned long get_wchan(struct task_struct *p)
677{
2d7c11bf 678 struct stackframe frame;
7768a84a 679 unsigned long stack_page;
1da177e4
LT
680 int count = 0;
681 if (!p || p == current || p->state == TASK_RUNNING)
682 return 0;
683
2d7c11bf
CM
684 frame.fp = thread_saved_fp(p);
685 frame.sp = thread_saved_sp(p);
686 frame.lr = 0; /* recovered from the stack */
687 frame.pc = thread_saved_pc(p);
7768a84a 688 stack_page = (unsigned long)task_stack_page(p);
1da177e4 689 do {
7768a84a
KK
690 if (frame.sp < stack_page ||
691 frame.sp >= stack_page + THREAD_SIZE ||
692 unwind_frame(&frame) < 0)
1da177e4 693 return 0;
2d7c11bf
CM
694 if (!in_sched_functions(frame.pc))
695 return frame.pc;
1da177e4
LT
696 } while (count ++ < 16);
697 return 0;
698}
990cb8ac
NP
699
700unsigned long arch_randomize_brk(struct mm_struct *mm)
701{
702 unsigned long range_end = mm->brk + 0x02000000;
703 return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
704}
ec706dab 705
6cde6d42 706#ifdef CONFIG_MMU
75bc4446 707#ifdef CONFIG_KUSER_HELPERS
ec706dab
NP
708/*
709 * The vectors page is always readable from user space for the
a5510daa
RK
710 * atomic helpers. Insert it into the gate_vma so that it is visible
711 * through ptrace and /proc/<pid>/mem.
ec706dab 712 */
f6604efe
RK
713static struct vm_area_struct gate_vma = {
714 .vm_start = 0xffff0000,
715 .vm_end = 0xffff0000 + PAGE_SIZE,
716 .vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC,
f6604efe 717};
ec706dab 718
f9d4861f 719static int __init gate_vma_init(void)
ec706dab 720{
f6604efe 721 gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
f9d4861f
WD
722 return 0;
723}
724arch_initcall(gate_vma_init);
725
726struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
727{
728 return &gate_vma;
729}
730
731int in_gate_area(struct mm_struct *mm, unsigned long addr)
732{
733 return (addr >= gate_vma.vm_start) && (addr < gate_vma.vm_end);
734}
735
736int in_gate_area_no_mm(unsigned long addr)
737{
738 return in_gate_area(NULL, addr);
ec706dab 739}
1176dcde 740#define is_gate_vma(vma) ((vma) == &gate_vma)
75bc4446
RK
741#else
742#define is_gate_vma(vma) 0
743#endif
ec706dab
NP
744
745const char *arch_vma_name(struct vm_area_struct *vma)
746{
75bc4446 747 return is_gate_vma(vma) ? "[vectors]" :
a5510daa
RK
748 (vma->vm_mm && vma->vm_start == vma->vm_mm->context.sigpage) ?
749 "[sigpage]" : NULL;
750}
751
17ef3295 752static struct page *signal_page;
a5510daa
RK
753extern struct page *get_signal_page(void);
754
755int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
756{
757 struct mm_struct *mm = current->mm;
a5510daa
RK
758 unsigned long addr;
759 int ret;
760
17ef3295
RK
761 if (!signal_page)
762 signal_page = get_signal_page();
763 if (!signal_page)
a5510daa
RK
764 return -ENOMEM;
765
766 down_write(&mm->mmap_sem);
767 addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
768 if (IS_ERR_VALUE(addr)) {
769 ret = addr;
770 goto up_fail;
771 }
772
773 ret = install_special_mapping(mm, addr, PAGE_SIZE,
774 VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
17ef3295 775 &signal_page);
a5510daa
RK
776
777 if (ret == 0)
778 mm->context.sigpage = addr;
779
780 up_fail:
781 up_write(&mm->mmap_sem);
782 return ret;
ec706dab 783}
6cde6d42 784#endif