073c3c2fa521a4c680e175144885a07c5daff28b
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / avr32 / kernel / process.c
1 /*
2 * Copyright (C) 2004-2006 Atmel Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8 #include <linux/sched.h>
9 #include <linux/module.h>
10 #include <linux/kallsyms.h>
11 #include <linux/fs.h>
12 #include <linux/pm.h>
13 #include <linux/ptrace.h>
14 #include <linux/slab.h>
15 #include <linux/reboot.h>
16 #include <linux/tick.h>
17 #include <linux/uaccess.h>
18 #include <linux/unistd.h>
19
20 #include <asm/sysreg.h>
21 #include <asm/ocd.h>
22 #include <asm/syscalls.h>
23
24 #include <mach/pm.h>
25
26 void (*pm_power_off)(void);
27 EXPORT_SYMBOL(pm_power_off);
28
29 /*
30 * This file handles the architecture-dependent parts of process handling..
31 */
32
33 void arch_cpu_idle(void)
34 {
35 cpu_enter_idle();
36 }
37
38 void machine_halt(void)
39 {
40 /*
41 * Enter Stop mode. The 32 kHz oscillator will keep running so
42 * the RTC will keep the time properly and the system will
43 * boot quickly.
44 */
45 asm volatile("sleep 3\n\t"
46 "sub pc, -2");
47 }
48
49 void machine_power_off(void)
50 {
51 if (pm_power_off)
52 pm_power_off();
53 }
54
55 void machine_restart(char *cmd)
56 {
57 ocd_write(DC, (1 << OCD_DC_DBE_BIT));
58 ocd_write(DC, (1 << OCD_DC_RES_BIT));
59 while (1) ;
60 }
61
62 /*
63 * Free current thread data structures etc
64 */
65 void exit_thread(void)
66 {
67 ocd_disable(current);
68 }
69
70 void flush_thread(void)
71 {
72 /* nothing to do */
73 }
74
75 void release_thread(struct task_struct *dead_task)
76 {
77 /* do nothing */
78 }
79
80 static void dump_mem(const char *str, const char *log_lvl,
81 unsigned long bottom, unsigned long top)
82 {
83 unsigned long p;
84 int i;
85
86 printk("%s%s(0x%08lx to 0x%08lx)\n", log_lvl, str, bottom, top);
87
88 for (p = bottom & ~31; p < top; ) {
89 printk("%s%04lx: ", log_lvl, p & 0xffff);
90
91 for (i = 0; i < 8; i++, p += 4) {
92 unsigned int val;
93
94 if (p < bottom || p >= top)
95 printk(" ");
96 else {
97 if (__get_user(val, (unsigned int __user *)p)) {
98 printk("\n");
99 goto out;
100 }
101 printk("%08x ", val);
102 }
103 }
104 printk("\n");
105 }
106
107 out:
108 return;
109 }
110
111 static inline int valid_stack_ptr(struct thread_info *tinfo, unsigned long p)
112 {
113 return (p > (unsigned long)tinfo)
114 && (p < (unsigned long)tinfo + THREAD_SIZE - 3);
115 }
116
117 #ifdef CONFIG_FRAME_POINTER
118 static void show_trace_log_lvl(struct task_struct *tsk, unsigned long *sp,
119 struct pt_regs *regs, const char *log_lvl)
120 {
121 unsigned long lr, fp;
122 struct thread_info *tinfo;
123
124 if (regs)
125 fp = regs->r7;
126 else if (tsk == current)
127 asm("mov %0, r7" : "=r"(fp));
128 else
129 fp = tsk->thread.cpu_context.r7;
130
131 /*
132 * Walk the stack as long as the frame pointer (a) is within
133 * the kernel stack of the task, and (b) it doesn't move
134 * downwards.
135 */
136 tinfo = task_thread_info(tsk);
137 printk("%sCall trace:\n", log_lvl);
138 while (valid_stack_ptr(tinfo, fp)) {
139 unsigned long new_fp;
140
141 lr = *(unsigned long *)fp;
142 #ifdef CONFIG_KALLSYMS
143 printk("%s [<%08lx>] ", log_lvl, lr);
144 #else
145 printk(" [<%08lx>] ", lr);
146 #endif
147 print_symbol("%s\n", lr);
148
149 new_fp = *(unsigned long *)(fp + 4);
150 if (new_fp <= fp)
151 break;
152 fp = new_fp;
153 }
154 printk("\n");
155 }
156 #else
157 static void show_trace_log_lvl(struct task_struct *tsk, unsigned long *sp,
158 struct pt_regs *regs, const char *log_lvl)
159 {
160 unsigned long addr;
161
162 printk("%sCall trace:\n", log_lvl);
163
164 while (!kstack_end(sp)) {
165 addr = *sp++;
166 if (kernel_text_address(addr)) {
167 #ifdef CONFIG_KALLSYMS
168 printk("%s [<%08lx>] ", log_lvl, addr);
169 #else
170 printk(" [<%08lx>] ", addr);
171 #endif
172 print_symbol("%s\n", addr);
173 }
174 }
175 printk("\n");
176 }
177 #endif
178
179 void show_stack_log_lvl(struct task_struct *tsk, unsigned long sp,
180 struct pt_regs *regs, const char *log_lvl)
181 {
182 struct thread_info *tinfo;
183
184 if (sp == 0) {
185 if (tsk)
186 sp = tsk->thread.cpu_context.ksp;
187 else
188 sp = (unsigned long)&tinfo;
189 }
190 if (!tsk)
191 tsk = current;
192
193 tinfo = task_thread_info(tsk);
194
195 if (valid_stack_ptr(tinfo, sp)) {
196 dump_mem("Stack: ", log_lvl, sp,
197 THREAD_SIZE + (unsigned long)tinfo);
198 show_trace_log_lvl(tsk, (unsigned long *)sp, regs, log_lvl);
199 }
200 }
201
202 void show_stack(struct task_struct *tsk, unsigned long *stack)
203 {
204 show_stack_log_lvl(tsk, (unsigned long)stack, NULL, "");
205 }
206
207 void dump_stack(void)
208 {
209 unsigned long stack;
210
211 show_trace_log_lvl(current, &stack, NULL, "");
212 }
213 EXPORT_SYMBOL(dump_stack);
214
215 static const char *cpu_modes[] = {
216 "Application", "Supervisor", "Interrupt level 0", "Interrupt level 1",
217 "Interrupt level 2", "Interrupt level 3", "Exception", "NMI"
218 };
219
220 void show_regs_log_lvl(struct pt_regs *regs, const char *log_lvl)
221 {
222 unsigned long sp = regs->sp;
223 unsigned long lr = regs->lr;
224 unsigned long mode = (regs->sr & MODE_MASK) >> MODE_SHIFT;
225
226 if (!user_mode(regs)) {
227 sp = (unsigned long)regs + FRAME_SIZE_FULL;
228
229 printk("%s", log_lvl);
230 print_symbol("PC is at %s\n", instruction_pointer(regs));
231 printk("%s", log_lvl);
232 print_symbol("LR is at %s\n", lr);
233 }
234
235 printk("%spc : [<%08lx>] lr : [<%08lx>] %s\n"
236 "%ssp : %08lx r12: %08lx r11: %08lx\n",
237 log_lvl, instruction_pointer(regs), lr, print_tainted(),
238 log_lvl, sp, regs->r12, regs->r11);
239 printk("%sr10: %08lx r9 : %08lx r8 : %08lx\n",
240 log_lvl, regs->r10, regs->r9, regs->r8);
241 printk("%sr7 : %08lx r6 : %08lx r5 : %08lx r4 : %08lx\n",
242 log_lvl, regs->r7, regs->r6, regs->r5, regs->r4);
243 printk("%sr3 : %08lx r2 : %08lx r1 : %08lx r0 : %08lx\n",
244 log_lvl, regs->r3, regs->r2, regs->r1, regs->r0);
245 printk("%sFlags: %c%c%c%c%c\n", log_lvl,
246 regs->sr & SR_Q ? 'Q' : 'q',
247 regs->sr & SR_V ? 'V' : 'v',
248 regs->sr & SR_N ? 'N' : 'n',
249 regs->sr & SR_Z ? 'Z' : 'z',
250 regs->sr & SR_C ? 'C' : 'c');
251 printk("%sMode bits: %c%c%c%c%c%c%c%c%c%c\n", log_lvl,
252 regs->sr & SR_H ? 'H' : 'h',
253 regs->sr & SR_J ? 'J' : 'j',
254 regs->sr & SR_DM ? 'M' : 'm',
255 regs->sr & SR_D ? 'D' : 'd',
256 regs->sr & SR_EM ? 'E' : 'e',
257 regs->sr & SR_I3M ? '3' : '.',
258 regs->sr & SR_I2M ? '2' : '.',
259 regs->sr & SR_I1M ? '1' : '.',
260 regs->sr & SR_I0M ? '0' : '.',
261 regs->sr & SR_GM ? 'G' : 'g');
262 printk("%sCPU Mode: %s\n", log_lvl, cpu_modes[mode]);
263 printk("%sProcess: %s [%d] (task: %p thread: %p)\n",
264 log_lvl, current->comm, current->pid, current,
265 task_thread_info(current));
266 }
267
268 void show_regs(struct pt_regs *regs)
269 {
270 unsigned long sp = regs->sp;
271
272 if (!user_mode(regs))
273 sp = (unsigned long)regs + FRAME_SIZE_FULL;
274
275 show_regs_log_lvl(regs, "");
276 show_trace_log_lvl(current, (unsigned long *)sp, regs, "");
277 }
278 EXPORT_SYMBOL(show_regs);
279
280 /* Fill in the fpu structure for a core dump. This is easy -- we don't have any */
281 int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpu)
282 {
283 /* Not valid */
284 return 0;
285 }
286
287 asmlinkage void ret_from_fork(void);
288 asmlinkage void ret_from_kernel_thread(void);
289 asmlinkage void syscall_return(void);
290
291 int copy_thread(unsigned long clone_flags, unsigned long usp,
292 unsigned long arg,
293 struct task_struct *p)
294 {
295 struct pt_regs *childregs = task_pt_regs(p);
296
297 if (unlikely(p->flags & PF_KTHREAD)) {
298 memset(childregs, 0, sizeof(struct pt_regs));
299 p->thread.cpu_context.r0 = arg;
300 p->thread.cpu_context.r1 = usp; /* fn */
301 p->thread.cpu_context.r2 = syscall_return;
302 p->thread.cpu_context.pc = (unsigned long)ret_from_kernel_thread;
303 childregs->sr = MODE_SUPERVISOR;
304 } else {
305 *childregs = *current_pt_regs();
306 if (usp)
307 childregs->sp = usp;
308 childregs->r12 = 0; /* Set return value for child */
309 p->thread.cpu_context.pc = (unsigned long)ret_from_fork;
310 }
311
312 p->thread.cpu_context.sr = MODE_SUPERVISOR | SR_GM;
313 p->thread.cpu_context.ksp = (unsigned long)childregs;
314
315 clear_tsk_thread_flag(p, TIF_DEBUG);
316 if ((clone_flags & CLONE_PTRACE) && test_thread_flag(TIF_DEBUG))
317 ocd_enable(p);
318
319 return 0;
320 }
321
322 /*
323 * This function is supposed to answer the question "who called
324 * schedule()?"
325 */
326 unsigned long get_wchan(struct task_struct *p)
327 {
328 unsigned long pc;
329 unsigned long stack_page;
330
331 if (!p || p == current || p->state == TASK_RUNNING)
332 return 0;
333
334 stack_page = (unsigned long)task_stack_page(p);
335 BUG_ON(!stack_page);
336
337 /*
338 * The stored value of PC is either the address right after
339 * the call to __switch_to() or ret_from_fork.
340 */
341 pc = thread_saved_pc(p);
342 if (in_sched_functions(pc)) {
343 #ifdef CONFIG_FRAME_POINTER
344 unsigned long fp = p->thread.cpu_context.r7;
345 BUG_ON(fp < stack_page || fp > (THREAD_SIZE + stack_page));
346 pc = *(unsigned long *)fp;
347 #else
348 /*
349 * We depend on the frame size of schedule here, which
350 * is actually quite ugly. It might be possible to
351 * determine the frame size automatically at build
352 * time by doing this:
353 * - compile sched.c
354 * - disassemble the resulting sched.o
355 * - look for 'sub sp,??' shortly after '<schedule>:'
356 */
357 unsigned long sp = p->thread.cpu_context.ksp + 16;
358 BUG_ON(sp < stack_page || sp > (THREAD_SIZE + stack_page));
359 pc = *(unsigned long *)sp;
360 #endif
361 }
362
363 return pc;
364 }