Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/arm/kernel/process.c | |
3 | * | |
4 | * Copyright (C) 1996-2000 Russell King - Converted to ARM. | |
5 | * Original Copyright (C) 1995 Linus Torvalds | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | */ | |
11 | #include <stdarg.h> | |
12 | ||
ecea4ab6 | 13 | #include <linux/export.h> |
1da177e4 LT |
14 | #include <linux/sched.h> |
15 | #include <linux/kernel.h> | |
16 | #include <linux/mm.h> | |
17 | #include <linux/stddef.h> | |
18 | #include <linux/unistd.h> | |
1da177e4 | 19 | #include <linux/user.h> |
1da177e4 LT |
20 | #include <linux/delay.h> |
21 | #include <linux/reboot.h> | |
22 | #include <linux/interrupt.h> | |
23 | #include <linux/kallsyms.h> | |
24 | #include <linux/init.h> | |
a054a811 | 25 | #include <linux/cpu.h> |
84dff1a7 | 26 | #include <linux/elfcore.h> |
74617fb6 | 27 | #include <linux/pm.h> |
9e4559dd | 28 | #include <linux/tick.h> |
154c772e | 29 | #include <linux/utsname.h> |
33fa9b13 | 30 | #include <linux/uaccess.h> |
990cb8ac | 31 | #include <linux/random.h> |
864232fa | 32 | #include <linux/hw_breakpoint.h> |
a0bfa137 | 33 | #include <linux/cpuidle.h> |
fa8bbb13 | 34 | #include <linux/leds.h> |
6fa3eb70 S |
35 | #include <linux/console.h> |
36 | #include <linux/mtk_ram_console.h> | |
1da177e4 | 37 | |
9ca03a21 | 38 | #include <asm/cacheflush.h> |
9ecb47de | 39 | #include <asm/idmap.h> |
1da177e4 | 40 | #include <asm/processor.h> |
d6551e88 | 41 | #include <asm/thread_notify.h> |
2d7c11bf | 42 | #include <asm/stacktrace.h> |
2ea83398 | 43 | #include <asm/mach/time.h> |
6fa3eb70 | 44 | #include <mach/system.h> |
1da177e4 | 45 | |
c743f380 NP |
46 | #ifdef CONFIG_CC_STACKPROTECTOR |
47 | #include <linux/stackprotector.h> | |
48 | unsigned long __stack_chk_guard __read_mostly; | |
49 | EXPORT_SYMBOL(__stack_chk_guard); | |
50 | #endif | |
51 | ||
ae0a846e RK |
52 | static const char *processor_modes[] = { |
53 | "USER_26", "FIQ_26" , "IRQ_26" , "SVC_26" , "UK4_26" , "UK5_26" , "UK6_26" , "UK7_26" , | |
54 | "UK8_26" , "UK9_26" , "UK10_26", "UK11_26", "UK12_26", "UK13_26", "UK14_26", "UK15_26", | |
55 | "USER_32", "FIQ_32" , "IRQ_32" , "SVC_32" , "UK4_32" , "UK5_32" , "UK6_32" , "ABT_32" , | |
56 | "UK8_32" , "UK9_32" , "UK10_32", "UND_32" , "UK12_32", "UK13_32", "UK14_32", "SYS_32" | |
57 | }; | |
58 | ||
909d6c6c GD |
59 | static const char *isa_modes[] = { |
60 | "ARM" , "Thumb" , "Jazelle", "ThumbEE" | |
61 | }; | |
62 | ||
6fa3eb70 S |
63 | #ifdef CONFIG_SMP |
64 | void arch_trigger_all_cpu_backtrace(void) | |
65 | { | |
66 | smp_send_all_cpu_backtrace(); | |
67 | } | |
68 | #else | |
69 | void arch_trigger_all_cpu_backtrace(void) | |
70 | { | |
71 | dump_stack(); | |
72 | } | |
73 | #endif | |
74 | ||
290130a1 WD |
75 | extern void call_with_stack(void (*fn)(void *), void *arg, void *sp); |
76 | typedef void (*phys_reset_t)(unsigned long); | |
77 | ||
6fa3eb70 S |
78 | #ifdef CONFIG_ARM_FLUSH_CONSOLE_ON_RESTART |
79 | void arm_machine_flush_console(void) | |
80 | { | |
81 | printk("\n"); | |
82 | pr_emerg("Restarting %s\n", linux_banner); | |
83 | if (console_trylock()) { | |
84 | console_unlock(); | |
85 | return; | |
86 | } | |
87 | ||
88 | mdelay(50); | |
89 | ||
90 | local_irq_disable(); | |
91 | if (!console_trylock()) | |
92 | pr_emerg("arm_restart: Console was locked! Busting\n"); | |
93 | else | |
94 | pr_emerg("arm_restart: Console was locked!\n"); | |
95 | console_unlock(); | |
96 | } | |
97 | #else | |
98 | void arm_machine_flush_console(void) | |
99 | { | |
100 | } | |
101 | #endif | |
102 | ||
290130a1 WD |
103 | /* |
104 | * A temporary stack to use for CPU reset. This is static so that we | |
105 | * don't clobber it with the identity mapping. When running with this | |
106 | * stack, any references to the current task *will not work* so you | |
107 | * should really do as little as possible before jumping to your reset | |
108 | * code. | |
109 | */ | |
110 | static u64 soft_restart_stack[16]; | |
111 | ||
6fa3eb70 S |
112 | void arm_machine_restart(char mode, const char *cmd) |
113 | { | |
114 | /* Flush the console to make sure all the relevant messages make it | |
115 | * out to the console drivers */ | |
116 | arm_machine_flush_console(); | |
117 | ||
118 | /* Disable interrupts first */ | |
119 | local_irq_disable(); | |
120 | local_fiq_disable(); | |
121 | ||
122 | /* | |
123 | * Tell the mm system that we are going to reboot - | |
124 | * we may need it to insert some 1:1 mappings so that | |
125 | * soft boot works. | |
126 | */ | |
127 | setup_mm_for_reboot(); | |
128 | ||
129 | /* When l1 is disabled and l2 is enabled, the spinlock cannot get the lock, | |
130 | * so we need to disable the l2 as well. by Chia-Hao Hsu | |
131 | */ | |
132 | outer_flush_all(); | |
133 | outer_disable(); | |
134 | outer_flush_all(); | |
135 | ||
136 | /* Clean and invalidate caches */ | |
137 | flush_cache_all(); | |
138 | #ifdef CONFIG_RESTART_DISABLE_CACHE | |
139 | /* Turn off caching */ | |
140 | // cpu_proc_fin(); // Don't turn off cach during reboot phase. CA15 have risk if turn off cach. | |
141 | #endif | |
142 | /* Push out any further dirty data, and ensure cache is empty */ | |
143 | flush_cache_all(); | |
144 | ||
145 | /* | |
146 | * Now call the architecture specific reboot code. | |
147 | */ | |
148 | arch_reset(mode, cmd); | |
149 | ||
150 | /* | |
151 | * Whoops - the architecture was unable to reboot. | |
152 | * Tell the user! | |
153 | */ | |
154 | mdelay(1000); | |
155 | printk("Reboot failed -- System halted\n"); | |
156 | while (1); | |
157 | } | |
158 | ||
290130a1 | 159 | static void __soft_restart(void *addr) |
74617fb6 | 160 | { |
290130a1 | 161 | phys_reset_t phys_reset; |
74617fb6 | 162 | |
290130a1 | 163 | /* Take out a flat memory mapping. */ |
5aafec15 | 164 | setup_mm_for_reboot(); |
74617fb6 | 165 | |
9ca03a21 RK |
166 | /* Clean and invalidate caches */ |
167 | flush_cache_all(); | |
168 | ||
169 | /* Turn off caching */ | |
170 | cpu_proc_fin(); | |
171 | ||
172 | /* Push out any further dirty data, and ensure cache is empty */ | |
173 | flush_cache_all(); | |
174 | ||
290130a1 WD |
175 | /* Switch to the identity mapping. */ |
176 | phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset); | |
177 | phys_reset((unsigned long)addr); | |
74617fb6 | 178 | |
290130a1 WD |
179 | /* Should never get here. */ |
180 | BUG(); | |
181 | } | |
182 | ||
183 | void soft_restart(unsigned long addr) | |
184 | { | |
185 | u64 *stack = soft_restart_stack + ARRAY_SIZE(soft_restart_stack); | |
186 | ||
187 | /* Disable interrupts first */ | |
188 | local_irq_disable(); | |
189 | local_fiq_disable(); | |
190 | ||
191 | /* Disable the L2 if we're the last man standing. */ | |
6fa3eb70 S |
192 | if (num_online_cpus() == 1) { |
193 | outer_flush_all(); | |
290130a1 | 194 | outer_disable(); |
6fa3eb70 | 195 | } |
290130a1 WD |
196 | |
197 | /* Change to the new stack and continue with the reset. */ | |
198 | call_with_stack(__soft_restart, (void *)addr, (void *)stack); | |
199 | ||
200 | /* Should never get here. */ | |
201 | BUG(); | |
e879c862 RK |
202 | } |
203 | ||
f88b8979 | 204 | static void null_restart(char mode, const char *cmd) |
e879c862 | 205 | { |
74617fb6 RP |
206 | } |
207 | ||
1da177e4 | 208 | /* |
74617fb6 | 209 | * Function pointers to optional machine specific functions |
1da177e4 | 210 | */ |
1da177e4 LT |
211 | void (*pm_power_off)(void); |
212 | EXPORT_SYMBOL(pm_power_off); | |
213 | ||
f88b8979 | 214 | void (*arm_pm_restart)(char str, const char *cmd) = null_restart; |
74617fb6 RP |
215 | EXPORT_SYMBOL_GPL(arm_pm_restart); |
216 | ||
1da177e4 | 217 | /* |
4fa20439 | 218 | * This is our default idle handler. |
1da177e4 | 219 | */ |
4fa20439 NP |
220 | |
221 | void (*arm_pm_idle)(void); | |
222 | ||
84dff1a7 | 223 | static void default_idle(void) |
1da177e4 | 224 | { |
4fa20439 NP |
225 | if (arm_pm_idle) |
226 | arm_pm_idle(); | |
227 | else | |
ae940913 | 228 | cpu_do_idle(); |
9ccdac36 | 229 | local_irq_enable(); |
1da177e4 LT |
230 | } |
231 | ||
f7b861b7 | 232 | void arch_cpu_idle_prepare(void) |
1da177e4 LT |
233 | { |
234 | local_fiq_enable(); | |
f7b861b7 | 235 | } |
1da177e4 | 236 | |
f7b861b7 TG |
237 | void arch_cpu_idle_enter(void) |
238 | { | |
6fa3eb70 | 239 | idle_notifier_call_chain(IDLE_START); |
f7b861b7 TG |
240 | ledtrig_cpu(CPU_LED_IDLE_START); |
241 | #ifdef CONFIG_PL310_ERRATA_769419 | |
242 | wmb(); | |
a054a811 | 243 | #endif |
f7b861b7 | 244 | } |
a054a811 | 245 | |
f7b861b7 TG |
246 | void arch_cpu_idle_exit(void) |
247 | { | |
248 | ledtrig_cpu(CPU_LED_IDLE_END); | |
6fa3eb70 | 249 | idle_notifier_call_chain(IDLE_END); |
f7b861b7 TG |
250 | } |
251 | ||
252 | #ifdef CONFIG_HOTPLUG_CPU | |
253 | void arch_cpu_idle_dead(void) | |
254 | { | |
255 | cpu_die(); | |
256 | } | |
11ed0ba1 | 257 | #endif |
f7b861b7 TG |
258 | |
259 | /* | |
260 | * Called from the core idle loop. | |
261 | */ | |
262 | void arch_cpu_idle(void) | |
263 | { | |
264 | if (cpuidle_idle_call()) | |
265 | default_idle(); | |
1da177e4 LT |
266 | } |
267 | ||
268 | static char reboot_mode = 'h'; | |
269 | ||
270 | int __init reboot_setup(char *str) | |
271 | { | |
272 | reboot_mode = str[0]; | |
273 | return 1; | |
274 | } | |
275 | ||
276 | __setup("reboot=", reboot_setup); | |
277 | ||
19ab428f SW |
278 | /* |
279 | * Called by kexec, immediately prior to machine_kexec(). | |
280 | * | |
281 | * This must completely disable all secondary CPUs; simply causing those CPUs | |
282 | * to execute e.g. a RAM-based pin loop is not sufficient. This allows the | |
283 | * kexec'd kernel to use any and all RAM as it sees fit, without having to | |
284 | * avoid any code or data used by any SW CPU pin loop. The CPU hotplug | |
285 | * functionality embodied in disable_nonboot_cpus() to achieve this. | |
286 | */ | |
3d3f78d7 | 287 | void machine_shutdown(void) |
1da177e4 | 288 | { |
6fa3eb70 S |
289 | #ifdef CONFIG_SMP |
290 | /* | |
291 | * Disable preemption so we're guaranteed to | |
292 | * run to power off or reboot and prevent | |
293 | * the possibility of switching to another | |
294 | * thread that might wind up blocking on | |
295 | * one of the stopped CPUs. | |
296 | */ | |
297 | printk("machine_shutdown: start, Proess(%s:%d)\n", current->comm, current->pid); | |
298 | dump_stack(); | |
299 | preempt_disable(); | |
300 | #endif | |
19ab428f | 301 | disable_nonboot_cpus(); |
6fa3eb70 | 302 | printk("machine_shutdown: done\n"); |
1da177e4 LT |
303 | } |
304 | ||
19ab428f SW |
305 | /* |
306 | * Halting simply requires that the secondary CPUs stop performing any | |
307 | * activity (executing tasks, handling interrupts). smp_send_stop() | |
308 | * achieves this. | |
309 | */ | |
3d3f78d7 RK |
310 | void machine_halt(void) |
311 | { | |
19ab428f SW |
312 | smp_send_stop(); |
313 | ||
98bd8b96 | 314 | local_irq_disable(); |
3d3f78d7 RK |
315 | while (1); |
316 | } | |
1da177e4 | 317 | |
6fa3eb70 | 318 | extern int reboot_pid; |
19ab428f SW |
319 | /* |
320 | * Power-off simply requires that the secondary CPUs stop performing any | |
321 | * activity (executing tasks, handling interrupts). smp_send_stop() | |
322 | * achieves this. When the system power is turned off, it will take all CPUs | |
323 | * with it. | |
324 | */ | |
1da177e4 LT |
325 | void machine_power_off(void) |
326 | { | |
6fa3eb70 S |
327 | struct task_struct *tsk; |
328 | ||
329 | /* Disable interrupts first */ | |
330 | local_irq_disable(); | |
331 | local_fiq_disable(); | |
332 | ||
333 | smp_send_stop(); | |
334 | if(reboot_pid > 1) | |
335 | { | |
336 | tsk = find_task_by_vpid(reboot_pid); | |
337 | if(tsk == NULL) | |
338 | tsk = current; | |
339 | dump_stack(); | |
340 | } | |
341 | else | |
342 | { | |
343 | tsk = current; | |
344 | } | |
345 | ||
346 | if(tsk->real_parent) | |
347 | { | |
348 | if(tsk->real_parent->real_parent) | |
349 | { | |
350 | printk("machine_shutdown: start, Proess(%s:%d). father %s:%d. grandfather %s:%d.\n", | |
351 | tsk->comm, tsk->pid,tsk->real_parent->comm,tsk->real_parent->pid, | |
352 | tsk->real_parent->real_parent->comm,tsk->real_parent->real_parent->pid); | |
353 | } | |
354 | else | |
355 | { | |
356 | printk("machine_shutdown: start, Proess(%s:%d). father %s:%d.\n", | |
357 | tsk->comm, tsk->pid,tsk->real_parent->comm,tsk->real_parent->pid); | |
358 | } | |
359 | } | |
360 | else | |
361 | { | |
362 | printk("machine_shutdown: start, Proess(%s:%d)\n", tsk->comm, tsk->pid); | |
363 | } | |
364 | ||
365 | #ifdef CONFIG_MTK_EMMC_SUPPORT | |
366 | last_kmsg_store_to_emmc(); | |
367 | #endif | |
19ab428f | 368 | |
1da177e4 LT |
369 | if (pm_power_off) |
370 | pm_power_off(); | |
371 | } | |
372 | ||
19ab428f SW |
373 | /* |
374 | * Restart requires that the secondary CPUs stop performing any activity | |
375 | * while the primary CPU resets the system. Systems with a single CPU can | |
376 | * use soft_restart() as their machine descriptor's .restart hook, since that | |
377 | * will cause the only available CPU to reset. Systems with multiple CPUs must | |
378 | * provide a HW restart implementation, to ensure that all CPUs reset at once. | |
379 | * This is required so that any code running after reset on the primary CPU | |
380 | * doesn't have to co-ordinate with other CPUs to ensure they aren't still | |
381 | * executing pre-reset code, and using RAM that the primary CPU's code wishes | |
382 | * to use. Implementing such co-ordination would be essentially impossible. | |
383 | */ | |
be093beb | 384 | void machine_restart(char *cmd) |
1da177e4 | 385 | { |
6fa3eb70 S |
386 | struct task_struct *tsk; |
387 | /* Disable interrupts first */ | |
388 | local_irq_disable(); | |
389 | local_fiq_disable(); | |
390 | ||
19ab428f | 391 | smp_send_stop(); |
ac15e00b | 392 | |
6fa3eb70 S |
393 | if(reboot_pid > 1) |
394 | { | |
395 | tsk = find_task_by_vpid(reboot_pid); | |
396 | if(tsk == NULL) | |
397 | tsk = current; | |
398 | dump_stack(); | |
399 | } | |
400 | else | |
401 | { | |
402 | tsk = current; | |
403 | } | |
404 | ||
405 | if(tsk->real_parent) | |
406 | { | |
407 | if(tsk->real_parent->real_parent) | |
408 | { | |
409 | printk("machine_shutdown: start, Proess(%s:%d). father %s:%d. grandfather %s:%d.\n", | |
410 | tsk->comm, tsk->pid,tsk->real_parent->comm,tsk->real_parent->pid, | |
411 | tsk->real_parent->real_parent->comm,tsk->real_parent->real_parent->pid); | |
412 | } | |
413 | else | |
414 | { | |
415 | printk("machine_shutdown: start, Proess(%s:%d). father %s:%d.\n", | |
416 | tsk->comm, tsk->pid,tsk->real_parent->comm,tsk->real_parent->pid); | |
417 | } | |
418 | } | |
419 | else | |
420 | { | |
421 | printk("machine_shutdown: start, Proess(%s:%d)\n", tsk->comm, tsk->pid); | |
422 | } | |
423 | ||
424 | /* Flush the console to make sure all the relevant messages make it | |
425 | * out to the console drivers */ | |
426 | arm_machine_flush_console(); | |
427 | ||
be093beb | 428 | arm_pm_restart(reboot_mode, cmd); |
ac15e00b RK |
429 | |
430 | /* Give a grace period for failure to restart of 1s */ | |
431 | mdelay(1000); | |
432 | ||
433 | /* Whoops - the platform was unable to reboot. Tell the user! */ | |
434 | printk("Reboot failed -- System halted\n"); | |
98bd8b96 | 435 | local_irq_disable(); |
ac15e00b | 436 | while (1); |
1da177e4 LT |
437 | } |
438 | ||
6fa3eb70 S |
439 | /* |
440 | * dump a block of kernel memory from around the given address | |
441 | */ | |
442 | static void show_data(unsigned long addr, int nbytes, const char *name) | |
443 | { | |
444 | int i, j; | |
445 | int nlines; | |
446 | u32 *p; | |
447 | ||
448 | /* | |
449 | * don't attempt to dump non-kernel addresses or | |
450 | * values that are probably just small negative numbers | |
451 | */ | |
452 | if (addr < PAGE_OFFSET || addr > -256UL) | |
453 | return; | |
454 | ||
455 | printk("\n%s: %#lx:\n", name, addr); | |
456 | ||
457 | /* | |
458 | * round address down to a 32 bit boundary | |
459 | * and always dump a multiple of 32 bytes | |
460 | */ | |
461 | p = (u32 *)(addr & ~(sizeof(u32) - 1)); | |
462 | nbytes += (addr & (sizeof(u32) - 1)); | |
463 | nlines = (nbytes + 31) / 32; | |
464 | ||
465 | ||
466 | for (i = 0; i < nlines; i++) { | |
467 | /* | |
468 | * just display low 16 bits of address to keep | |
469 | * each line of the dump < 80 characters | |
470 | */ | |
471 | printk("%04lx ", (unsigned long)p & 0xffff); | |
472 | for (j = 0; j < 8; j++) { | |
473 | u32 data; | |
474 | if (probe_kernel_address(p, data)) { | |
475 | printk(" ********"); | |
476 | } else { | |
477 | printk(" %08x", data); | |
478 | } | |
479 | ++p; | |
480 | } | |
481 | printk("\n"); | |
482 | } | |
483 | } | |
484 | ||
485 | static void show_extra_register_data(struct pt_regs *regs, int nbytes) | |
486 | { | |
487 | mm_segment_t fs; | |
488 | ||
489 | fs = get_fs(); | |
490 | set_fs(KERNEL_DS); | |
491 | show_data(regs->ARM_pc - nbytes, nbytes * 2, "PC"); | |
492 | show_data(regs->ARM_lr - nbytes, nbytes * 2, "LR"); | |
493 | show_data(regs->ARM_sp - nbytes, nbytes * 2, "SP"); | |
494 | show_data(regs->ARM_ip - nbytes, nbytes * 2, "IP"); | |
495 | show_data(regs->ARM_fp - nbytes, nbytes * 2, "FP"); | |
496 | show_data(regs->ARM_r0 - nbytes, nbytes * 2, "R0"); | |
497 | show_data(regs->ARM_r1 - nbytes, nbytes * 2, "R1"); | |
498 | show_data(regs->ARM_r2 - nbytes, nbytes * 2, "R2"); | |
499 | show_data(regs->ARM_r3 - nbytes, nbytes * 2, "R3"); | |
500 | show_data(regs->ARM_r4 - nbytes, nbytes * 2, "R4"); | |
501 | show_data(regs->ARM_r5 - nbytes, nbytes * 2, "R5"); | |
502 | show_data(regs->ARM_r6 - nbytes, nbytes * 2, "R6"); | |
503 | show_data(regs->ARM_r7 - nbytes, nbytes * 2, "R7"); | |
504 | show_data(regs->ARM_r8 - nbytes, nbytes * 2, "R8"); | |
505 | show_data(regs->ARM_r9 - nbytes, nbytes * 2, "R9"); | |
506 | show_data(regs->ARM_r10 - nbytes, nbytes * 2, "R10"); | |
507 | set_fs(fs); | |
508 | } | |
509 | ||
652a12ef | 510 | void __show_regs(struct pt_regs *regs) |
1da177e4 | 511 | { |
154c772e RK |
512 | unsigned long flags; |
513 | char buf[64]; | |
1da177e4 | 514 | |
a43cb95d TH |
515 | show_regs_print_info(KERN_DEFAULT); |
516 | ||
1da177e4 LT |
517 | print_symbol("PC is at %s\n", instruction_pointer(regs)); |
518 | print_symbol("LR is at %s\n", regs->ARM_lr); | |
154c772e | 519 | printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n" |
1da177e4 | 520 | "sp : %08lx ip : %08lx fp : %08lx\n", |
154c772e RK |
521 | regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr, |
522 | regs->ARM_sp, regs->ARM_ip, regs->ARM_fp); | |
1da177e4 LT |
523 | printk("r10: %08lx r9 : %08lx r8 : %08lx\n", |
524 | regs->ARM_r10, regs->ARM_r9, | |
525 | regs->ARM_r8); | |
526 | printk("r7 : %08lx r6 : %08lx r5 : %08lx r4 : %08lx\n", | |
527 | regs->ARM_r7, regs->ARM_r6, | |
528 | regs->ARM_r5, regs->ARM_r4); | |
529 | printk("r3 : %08lx r2 : %08lx r1 : %08lx r0 : %08lx\n", | |
530 | regs->ARM_r3, regs->ARM_r2, | |
531 | regs->ARM_r1, regs->ARM_r0); | |
154c772e RK |
532 | |
533 | flags = regs->ARM_cpsr; | |
534 | buf[0] = flags & PSR_N_BIT ? 'N' : 'n'; | |
535 | buf[1] = flags & PSR_Z_BIT ? 'Z' : 'z'; | |
536 | buf[2] = flags & PSR_C_BIT ? 'C' : 'c'; | |
537 | buf[3] = flags & PSR_V_BIT ? 'V' : 'v'; | |
538 | buf[4] = '\0'; | |
539 | ||
909d6c6c | 540 | printk("Flags: %s IRQs o%s FIQs o%s Mode %s ISA %s Segment %s\n", |
154c772e | 541 | buf, interrupts_enabled(regs) ? "n" : "ff", |
1da177e4 LT |
542 | fast_interrupts_enabled(regs) ? "n" : "ff", |
543 | processor_modes[processor_mode(regs)], | |
909d6c6c | 544 | isa_modes[isa_mode(regs)], |
1da177e4 | 545 | get_fs() == get_ds() ? "kernel" : "user"); |
154c772e | 546 | #ifdef CONFIG_CPU_CP15 |
1da177e4 | 547 | { |
f12d0d7c | 548 | unsigned int ctrl; |
154c772e RK |
549 | |
550 | buf[0] = '\0'; | |
f12d0d7c | 551 | #ifdef CONFIG_CPU_CP15_MMU |
154c772e RK |
552 | { |
553 | unsigned int transbase, dac; | |
554 | asm("mrc p15, 0, %0, c2, c0\n\t" | |
555 | "mrc p15, 0, %1, c3, c0\n" | |
556 | : "=r" (transbase), "=r" (dac)); | |
557 | snprintf(buf, sizeof(buf), " Table: %08x DAC: %08x", | |
558 | transbase, dac); | |
559 | } | |
f12d0d7c | 560 | #endif |
154c772e RK |
561 | asm("mrc p15, 0, %0, c1, c0\n" : "=r" (ctrl)); |
562 | ||
563 | printk("Control: %08x%s\n", ctrl, buf); | |
564 | } | |
f12d0d7c | 565 | #endif |
6fa3eb70 S |
566 | |
567 | show_extra_register_data(regs, 128); | |
1da177e4 LT |
568 | } |
569 | ||
652a12ef RK |
570 | void show_regs(struct pt_regs * regs) |
571 | { | |
572 | printk("\n"); | |
652a12ef | 573 | __show_regs(regs); |
b380ab4f | 574 | dump_stack(); |
652a12ef RK |
575 | } |
576 | ||
797245f5 RK |
577 | ATOMIC_NOTIFIER_HEAD(thread_notify_head); |
578 | ||
579 | EXPORT_SYMBOL_GPL(thread_notify_head); | |
580 | ||
1da177e4 LT |
581 | /* |
582 | * Free current thread data structures etc.. | |
583 | */ | |
584 | void exit_thread(void) | |
585 | { | |
797245f5 | 586 | thread_notify(THREAD_NOTIFY_EXIT, current_thread_info()); |
1da177e4 LT |
587 | } |
588 | ||
1da177e4 LT |
589 | void flush_thread(void) |
590 | { | |
591 | struct thread_info *thread = current_thread_info(); | |
592 | struct task_struct *tsk = current; | |
593 | ||
864232fa WD |
594 | flush_ptrace_hw_breakpoint(tsk); |
595 | ||
1da177e4 LT |
596 | memset(thread->used_cp, 0, sizeof(thread->used_cp)); |
597 | memset(&tsk->thread.debug, 0, sizeof(struct debug_info)); | |
d6551e88 RK |
598 | memset(&thread->fpstate, 0, sizeof(union fp_state)); |
599 | ||
600 | thread_notify(THREAD_NOTIFY_FLUSH, thread); | |
1da177e4 LT |
601 | } |
602 | ||
603 | void release_thread(struct task_struct *dead_task) | |
604 | { | |
1da177e4 LT |
605 | } |
606 | ||
607 | asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); | |
608 | ||
609 | int | |
6f2c55b8 | 610 | copy_thread(unsigned long clone_flags, unsigned long stack_start, |
afa86fc4 | 611 | unsigned long stk_sz, struct task_struct *p) |
1da177e4 | 612 | { |
815d5ec8 AV |
613 | struct thread_info *thread = task_thread_info(p); |
614 | struct pt_regs *childregs = task_pt_regs(p); | |
1da177e4 | 615 | |
1da177e4 | 616 | memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save)); |
9e14f828 | 617 | |
38a61b6b AV |
618 | if (likely(!(p->flags & PF_KTHREAD))) { |
619 | *childregs = *current_pt_regs(); | |
9e14f828 | 620 | childregs->ARM_r0 = 0; |
38a61b6b AV |
621 | if (stack_start) |
622 | childregs->ARM_sp = stack_start; | |
9e14f828 | 623 | } else { |
9fff2fa0 | 624 | memset(childregs, 0, sizeof(struct pt_regs)); |
9e14f828 AV |
625 | thread->cpu_context.r4 = stk_sz; |
626 | thread->cpu_context.r5 = stack_start; | |
9e14f828 AV |
627 | childregs->ARM_cpsr = SVC_MODE; |
628 | } | |
9fff2fa0 | 629 | thread->cpu_context.pc = (unsigned long)ret_from_fork; |
1da177e4 | 630 | thread->cpu_context.sp = (unsigned long)childregs; |
1da177e4 | 631 | |
864232fa WD |
632 | clear_ptrace_hw_breakpoint(p); |
633 | ||
1da177e4 | 634 | if (clone_flags & CLONE_SETTLS) |
38a61b6b | 635 | thread->tp_value = childregs->ARM_r3; |
1da177e4 | 636 | |
2e82669a CM |
637 | thread_notify(THREAD_NOTIFY_COPY, thread); |
638 | ||
1da177e4 LT |
639 | return 0; |
640 | } | |
641 | ||
cde3f860 AB |
642 | /* |
643 | * Fill in the task's elfregs structure for a core dump. | |
644 | */ | |
645 | int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs) | |
646 | { | |
647 | elf_core_copy_regs(elfregs, task_pt_regs(t)); | |
648 | return 1; | |
649 | } | |
650 | ||
1da177e4 LT |
651 | /* |
652 | * fill in the fpe structure for a core dump... | |
653 | */ | |
654 | int dump_fpu (struct pt_regs *regs, struct user_fp *fp) | |
655 | { | |
656 | struct thread_info *thread = current_thread_info(); | |
657 | int used_math = thread->used_cp[1] | thread->used_cp[2]; | |
658 | ||
659 | if (used_math) | |
660 | memcpy(fp, &thread->fpstate.soft, sizeof (*fp)); | |
661 | ||
662 | return used_math != 0; | |
663 | } | |
664 | EXPORT_SYMBOL(dump_fpu); | |
665 | ||
1da177e4 LT |
666 | unsigned long get_wchan(struct task_struct *p) |
667 | { | |
2d7c11bf | 668 | struct stackframe frame; |
7768a84a | 669 | unsigned long stack_page; |
1da177e4 LT |
670 | int count = 0; |
671 | if (!p || p == current || p->state == TASK_RUNNING) | |
672 | return 0; | |
673 | ||
2d7c11bf CM |
674 | frame.fp = thread_saved_fp(p); |
675 | frame.sp = thread_saved_sp(p); | |
676 | frame.lr = 0; /* recovered from the stack */ | |
677 | frame.pc = thread_saved_pc(p); | |
7768a84a | 678 | stack_page = (unsigned long)task_stack_page(p); |
1da177e4 | 679 | do { |
7768a84a KK |
680 | if (frame.sp < stack_page || |
681 | frame.sp >= stack_page + THREAD_SIZE || | |
682 | unwind_frame(&frame) < 0) | |
1da177e4 | 683 | return 0; |
2d7c11bf CM |
684 | if (!in_sched_functions(frame.pc)) |
685 | return frame.pc; | |
1da177e4 LT |
686 | } while (count ++ < 16); |
687 | return 0; | |
688 | } | |
990cb8ac NP |
689 | |
690 | unsigned long arch_randomize_brk(struct mm_struct *mm) | |
691 | { | |
692 | unsigned long range_end = mm->brk + 0x02000000; | |
693 | return randomize_range(mm->brk, range_end, 0) ? : mm->brk; | |
694 | } | |
ec706dab | 695 | |
6cde6d42 | 696 | #ifdef CONFIG_MMU |
75bc4446 | 697 | #ifdef CONFIG_KUSER_HELPERS |
ec706dab NP |
698 | /* |
699 | * The vectors page is always readable from user space for the | |
a5510daa RK |
700 | * atomic helpers. Insert it into the gate_vma so that it is visible |
701 | * through ptrace and /proc/<pid>/mem. | |
ec706dab | 702 | */ |
f6604efe RK |
703 | static struct vm_area_struct gate_vma = { |
704 | .vm_start = 0xffff0000, | |
705 | .vm_end = 0xffff0000 + PAGE_SIZE, | |
706 | .vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC, | |
f6604efe | 707 | }; |
ec706dab | 708 | |
f9d4861f | 709 | static int __init gate_vma_init(void) |
ec706dab | 710 | { |
f6604efe | 711 | gate_vma.vm_page_prot = PAGE_READONLY_EXEC; |
f9d4861f WD |
712 | return 0; |
713 | } | |
714 | arch_initcall(gate_vma_init); | |
715 | ||
716 | struct vm_area_struct *get_gate_vma(struct mm_struct *mm) | |
717 | { | |
718 | return &gate_vma; | |
719 | } | |
720 | ||
721 | int in_gate_area(struct mm_struct *mm, unsigned long addr) | |
722 | { | |
723 | return (addr >= gate_vma.vm_start) && (addr < gate_vma.vm_end); | |
724 | } | |
725 | ||
726 | int in_gate_area_no_mm(unsigned long addr) | |
727 | { | |
728 | return in_gate_area(NULL, addr); | |
ec706dab | 729 | } |
1176dcde | 730 | #define is_gate_vma(vma) ((vma) == &gate_vma) |
75bc4446 RK |
731 | #else |
732 | #define is_gate_vma(vma) 0 | |
733 | #endif | |
ec706dab NP |
734 | |
735 | const char *arch_vma_name(struct vm_area_struct *vma) | |
736 | { | |
75bc4446 | 737 | return is_gate_vma(vma) ? "[vectors]" : |
a5510daa RK |
738 | (vma->vm_mm && vma->vm_start == vma->vm_mm->context.sigpage) ? |
739 | "[sigpage]" : NULL; | |
740 | } | |
741 | ||
17ef3295 | 742 | static struct page *signal_page; |
a5510daa RK |
743 | extern struct page *get_signal_page(void); |
744 | ||
745 | int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) | |
746 | { | |
747 | struct mm_struct *mm = current->mm; | |
a5510daa RK |
748 | unsigned long addr; |
749 | int ret; | |
750 | ||
17ef3295 RK |
751 | if (!signal_page) |
752 | signal_page = get_signal_page(); | |
753 | if (!signal_page) | |
a5510daa RK |
754 | return -ENOMEM; |
755 | ||
756 | down_write(&mm->mmap_sem); | |
757 | addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0); | |
758 | if (IS_ERR_VALUE(addr)) { | |
759 | ret = addr; | |
760 | goto up_fail; | |
761 | } | |
762 | ||
763 | ret = install_special_mapping(mm, addr, PAGE_SIZE, | |
764 | VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC, | |
17ef3295 | 765 | &signal_page); |
a5510daa RK |
766 | |
767 | if (ret == 0) | |
768 | mm->context.sigpage = addr; | |
769 | ||
770 | up_fail: | |
771 | up_write(&mm->mmap_sem); | |
772 | return ret; | |
ec706dab | 773 | } |
6cde6d42 | 774 | #endif |