[PATCH] sched: consider migration thread with smp nice
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / xtensa / kernel / process.c
CommitLineData
5a0015d6
CZ
1// TODO verify coprocessor handling
2/*
3 * arch/xtensa/kernel/process.c
4 *
5 * Xtensa Processor version.
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 *
11 * Copyright (C) 2001 - 2005 Tensilica Inc.
12 *
13 * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
14 * Chris Zankel <chris@zankel.net>
15 * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
16 * Kevin Chea
17 */
18
19#include <linux/config.h>
20#include <linux/errno.h>
21#include <linux/sched.h>
22#include <linux/kernel.h>
23#include <linux/mm.h>
24#include <linux/smp.h>
25#include <linux/smp_lock.h>
26#include <linux/stddef.h>
27#include <linux/unistd.h>
28#include <linux/ptrace.h>
29#include <linux/slab.h>
30#include <linux/elf.h>
31#include <linux/init.h>
32#include <linux/prctl.h>
33#include <linux/init_task.h>
34#include <linux/module.h>
35#include <linux/mqueue.h>
36
37#include <asm/pgtable.h>
38#include <asm/uaccess.h>
39#include <asm/system.h>
40#include <asm/io.h>
41#include <asm/processor.h>
42#include <asm/platform.h>
43#include <asm/mmu.h>
44#include <asm/irq.h>
45#include <asm/atomic.h>
0013a854 46#include <asm/asm-offsets.h>
5a0015d6
CZ
47#include <asm/coprocessor.h>
48
49extern void ret_from_fork(void);
50
51static struct fs_struct init_fs = INIT_FS;
52static struct files_struct init_files = INIT_FILES;
53static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
54static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
55struct mm_struct init_mm = INIT_MM(init_mm);
56EXPORT_SYMBOL(init_mm);
57
58union thread_union init_thread_union
59 __attribute__((__section__(".data.init_task"))) =
60{ INIT_THREAD_INFO(init_task) };
61
62struct task_struct init_task = INIT_TASK(init_task);
63EXPORT_SYMBOL(init_task);
64
65struct task_struct *current_set[NR_CPUS] = {&init_task, };
66
67
68#if XCHAL_CP_NUM > 0
69
70/*
71 * Coprocessor ownership.
72 */
73
74coprocessor_info_t coprocessor_info[] = {
75 { 0, XTENSA_CPE_CP0_OFFSET },
76 { 0, XTENSA_CPE_CP1_OFFSET },
77 { 0, XTENSA_CPE_CP2_OFFSET },
78 { 0, XTENSA_CPE_CP3_OFFSET },
79 { 0, XTENSA_CPE_CP4_OFFSET },
80 { 0, XTENSA_CPE_CP5_OFFSET },
81 { 0, XTENSA_CPE_CP6_OFFSET },
82 { 0, XTENSA_CPE_CP7_OFFSET },
83};
84
85#endif
86
87/*
88 * Powermanagement idle function, if any is provided by the platform.
89 */
90
91void cpu_idle(void)
92{
93 local_irq_enable();
94
95 /* endless idle loop with no priority at all */
96 while (1) {
97 while (!need_resched())
98 platform_idle();
99 preempt_enable();
100 schedule();
101 }
102}
103
104/*
105 * Free current thread data structures etc..
106 */
107
108void exit_thread(void)
109{
110 release_coprocessors(current); /* Empty macro if no CPs are defined */
111}
112
113void flush_thread(void)
114{
115 release_coprocessors(current); /* Empty macro if no CPs are defined */
116}
117
118/*
119 * Copy thread.
120 *
121 * The stack layout for the new thread looks like this:
122 *
123 * +------------------------+ <- sp in childregs (= tos)
124 * | childregs |
125 * +------------------------+ <- thread.sp = sp in dummy-frame
126 * | dummy-frame | (saved in dummy-frame spill-area)
127 * +------------------------+
128 *
129 * We create a dummy frame to return to ret_from_fork:
130 * a0 points to ret_from_fork (simulating a call4)
131 * sp points to itself (thread.sp)
132 * a2, a3 are unused.
133 *
134 * Note: This is a pristine frame, so we don't need any spill region on top of
135 * childregs.
136 */
137
138int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
139 unsigned long unused,
140 struct task_struct * p, struct pt_regs * regs)
141{
142 struct pt_regs *childregs;
143 unsigned long tos;
144 int user_mode = user_mode(regs);
145
146 /* Set up new TSS. */
147 tos = (unsigned long)p->thread_info + THREAD_SIZE;
148 if (user_mode)
149 childregs = (struct pt_regs*)(tos - PT_USER_SIZE);
150 else
151 childregs = (struct pt_regs*)tos - 1;
152
153 *childregs = *regs;
154
155 /* Create a call4 dummy-frame: a0 = 0, a1 = childregs. */
156 *((int*)childregs - 3) = (unsigned long)childregs;
157 *((int*)childregs - 4) = 0;
158
159 childregs->areg[1] = tos;
160 childregs->areg[2] = 0;
161 p->set_child_tid = p->clear_child_tid = NULL;
162 p->thread.ra = MAKE_RA_FOR_CALL((unsigned long)ret_from_fork, 0x1);
163 p->thread.sp = (unsigned long)childregs;
164 if (user_mode(regs)) {
165
166 int len = childregs->wmask & ~0xf;
167 childregs->areg[1] = usp;
168 memcpy(&childregs->areg[XCHAL_NUM_AREGS - len/4],
169 &regs->areg[XCHAL_NUM_AREGS - len/4], len);
170
171 if (clone_flags & CLONE_SETTLS)
172 childregs->areg[2] = childregs->areg[6];
173
174 } else {
175 /* In kernel space, we start a new thread with a new stack. */
176 childregs->wmask = 1;
177 }
178 return 0;
179}
180
181
182/*
183 * Create a kernel thread
184 */
185
186int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
187{
188 long retval;
189 __asm__ __volatile__
190 ("mov a5, %4\n\t" /* preserve fn in a5 */
191 "mov a6, %3\n\t" /* preserve and setup arg in a6 */
192 "movi a2, %1\n\t" /* load __NR_clone for syscall*/
193 "mov a3, sp\n\t" /* sp check and sys_clone */
194 "mov a4, %5\n\t" /* load flags for syscall */
195 "syscall\n\t"
196 "beq a3, sp, 1f\n\t" /* branch if parent */
197 "callx4 a5\n\t" /* call fn */
198 "movi a2, %2\n\t" /* load __NR_exit for syscall */
199 "mov a3, a6\n\t" /* load fn return value */
200 "syscall\n"
201 "1:\n\t"
202 "mov %0, a2\n\t" /* parent returns zero */
203 :"=r" (retval)
204 :"i" (__NR_clone), "i" (__NR_exit),
205 "r" (arg), "r" (fn),
206 "r" (flags | CLONE_VM)
207 : "a2", "a3", "a4", "a5", "a6" );
208 return retval;
209}
210
211
212/*
213 * These bracket the sleeping functions..
214 */
215
216unsigned long get_wchan(struct task_struct *p)
217{
218 unsigned long sp, pc;
219 unsigned long stack_page = (unsigned long) p->thread_info;
220 int count = 0;
221
222 if (!p || p == current || p->state == TASK_RUNNING)
223 return 0;
224
225 sp = p->thread.sp;
226 pc = MAKE_PC_FROM_RA(p->thread.ra, p->thread.sp);
227
228 do {
229 if (sp < stack_page + sizeof(struct task_struct) ||
230 sp >= (stack_page + THREAD_SIZE) ||
231 pc == 0)
232 return 0;
233 if (!in_sched_functions(pc))
234 return pc;
235
236 /* Stack layout: sp-4: ra, sp-3: sp' */
237
238 pc = MAKE_PC_FROM_RA(*(unsigned long*)sp - 4, sp);
239 sp = *(unsigned long *)sp - 3;
240 } while (count++ < 16);
241 return 0;
242}
243
244/*
245 * do_copy_regs() gathers information from 'struct pt_regs' and
246 * 'current->thread.areg[]' to fill in the xtensa_gregset_t
247 * structure.
248 *
249 * xtensa_gregset_t and 'struct pt_regs' are vastly different formats
250 * of processor registers. Besides different ordering,
251 * xtensa_gregset_t contains non-live register information that
252 * 'struct pt_regs' does not. Exception handling (primarily) uses
253 * 'struct pt_regs'. Core files and ptrace use xtensa_gregset_t.
254 *
255 */
256
257void do_copy_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs,
258 struct task_struct *tsk)
259{
260 int i, n, wb_offset;
261
262 elfregs->xchal_config_id0 = XCHAL_HW_CONFIGID0;
263 elfregs->xchal_config_id1 = XCHAL_HW_CONFIGID1;
264
265 __asm__ __volatile__ ("rsr %0, 176\n" : "=a" (i));
266 elfregs->cpux = i;
267 __asm__ __volatile__ ("rsr %0, 208\n" : "=a" (i));
268 elfregs->cpuy = i;
269
270 /* Note: PS.EXCM is not set while user task is running; its
271 * being set in regs->ps is for exception handling convenience.
272 */
273
274 elfregs->pc = regs->pc;
275 elfregs->ps = (regs->ps & ~XCHAL_PS_EXCM_MASK);
276 elfregs->exccause = regs->exccause;
277 elfregs->excvaddr = regs->excvaddr;
278 elfregs->windowbase = regs->windowbase;
279 elfregs->windowstart = regs->windowstart;
280 elfregs->lbeg = regs->lbeg;
281 elfregs->lend = regs->lend;
282 elfregs->lcount = regs->lcount;
283 elfregs->sar = regs->sar;
284 elfregs->syscall = regs->syscall;
285
286 /* Copy register file.
287 * The layout looks like this:
288 *
289 * | a0 ... a15 | Z ... Z | arX ... arY |
290 * current window unused saved frames
291 */
292
293 memset (elfregs->ar, 0, sizeof(elfregs->ar));
294
295 wb_offset = regs->windowbase * 4;
296 n = (regs->wmask&1)? 4 : (regs->wmask&2)? 8 : (regs->wmask&4)? 12 : 16;
297
298 for (i = 0; i < n; i++)
299 elfregs->ar[(wb_offset + i) % XCHAL_NUM_AREGS] = regs->areg[i];
300
301 n = (regs->wmask >> 4) * 4;
302
303 for (i = XCHAL_NUM_AREGS - n; n > 0; i++, n--)
304 elfregs->ar[(wb_offset + i) % XCHAL_NUM_AREGS] = regs->areg[i];
305}
306
307void xtensa_elf_core_copy_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs)
308{
309 do_copy_regs ((xtensa_gregset_t *)elfregs, regs, current);
310}
311
312
313/* The inverse of do_copy_regs(). No error or sanity checking. */
314
315void do_restore_regs (xtensa_gregset_t *elfregs, struct pt_regs *regs,
316 struct task_struct *tsk)
317{
318 int i, n, wb_offset;
319
320 /* Note: PS.EXCM is not set while user task is running; it
321 * needs to be set in regs->ps is for exception handling convenience.
322 */
323
324 regs->pc = elfregs->pc;
325 regs->ps = (elfregs->ps | XCHAL_PS_EXCM_MASK);
326 regs->exccause = elfregs->exccause;
327 regs->excvaddr = elfregs->excvaddr;
328 regs->windowbase = elfregs->windowbase;
329 regs->windowstart = elfregs->windowstart;
330 regs->lbeg = elfregs->lbeg;
331 regs->lend = elfregs->lend;
332 regs->lcount = elfregs->lcount;
333 regs->sar = elfregs->sar;
334 regs->syscall = elfregs->syscall;
335
336 /* Clear everything. */
337
338 memset (regs->areg, 0, sizeof(regs->areg));
339
340 /* Copy regs from live window frame. */
341
342 wb_offset = regs->windowbase * 4;
343 n = (regs->wmask&1)? 4 : (regs->wmask&2)? 8 : (regs->wmask&4)? 12 : 16;
344
345 for (i = 0; i < n; i++)
346 regs->areg[(wb_offset+i) % XCHAL_NUM_AREGS] = elfregs->ar[i];
347
348 n = (regs->wmask >> 4) * 4;
349
350 for (i = XCHAL_NUM_AREGS - n; n > 0; i++, n--)
351 regs->areg[(wb_offset+i) % XCHAL_NUM_AREGS] = elfregs->ar[i];
352}
353
354/*
355 * do_save_fpregs() gathers information from 'struct pt_regs' and
356 * 'current->thread' to fill in the elf_fpregset_t structure.
357 *
358 * Core files and ptrace use elf_fpregset_t.
359 */
360
361void do_save_fpregs (elf_fpregset_t *fpregs, struct pt_regs *regs,
362 struct task_struct *tsk)
363{
364#if XCHAL_HAVE_CP
365
366 extern unsigned char _xtensa_reginfo_tables[];
367 extern unsigned _xtensa_reginfo_table_size;
368 int i;
369 unsigned long flags;
370
371 /* Before dumping coprocessor state from memory,
372 * ensure any live coprocessor contents for this
373 * task are first saved to memory:
374 */
375 local_irq_save(flags);
376
377 for (i = 0; i < XCHAL_CP_MAX; i++) {
378 if (tsk == coprocessor_info[i].owner) {
379 enable_coprocessor(i);
380 save_coprocessor_registers(
381 tsk->thread.cp_save+coprocessor_info[i].offset,i);
382 disable_coprocessor(i);
383 }
384 }
385
386 local_irq_restore(flags);
387
388 /* Now dump coprocessor & extra state: */
389 memcpy((unsigned char*)fpregs,
390 _xtensa_reginfo_tables, _xtensa_reginfo_table_size);
391 memcpy((unsigned char*)fpregs + _xtensa_reginfo_table_size,
392 tsk->thread.cp_save, XTENSA_CP_EXTRA_SIZE);
393#endif
394}
395
396/*
397 * The inverse of do_save_fpregs().
398 * Copies coprocessor and extra state from fpregs into regs and tsk->thread.
399 * Returns 0 on success, non-zero if layout doesn't match.
400 */
401
402int do_restore_fpregs (elf_fpregset_t *fpregs, struct pt_regs *regs,
403 struct task_struct *tsk)
404{
405#if XCHAL_HAVE_CP
406
407 extern unsigned char _xtensa_reginfo_tables[];
408 extern unsigned _xtensa_reginfo_table_size;
409 int i;
410 unsigned long flags;
411
412 /* Make sure save area layouts match.
413 * FIXME: in the future we could allow restoring from
414 * a different layout of the same registers, by comparing
415 * fpregs' table with _xtensa_reginfo_tables and matching
416 * entries and copying registers one at a time.
417 * Not too sure yet whether that's very useful.
418 */
419
420 if( memcmp((unsigned char*)fpregs,
421 _xtensa_reginfo_tables, _xtensa_reginfo_table_size) ) {
422 return -1;
423 }
424
425 /* Before restoring coprocessor state from memory,
426 * ensure any live coprocessor contents for this
427 * task are first invalidated.
428 */
429
430 local_irq_save(flags);
431
432 for (i = 0; i < XCHAL_CP_MAX; i++) {
433 if (tsk == coprocessor_info[i].owner) {
434 enable_coprocessor(i);
435 save_coprocessor_registers(
436 tsk->thread.cp_save+coprocessor_info[i].offset,i);
437 coprocessor_info[i].owner = 0;
438 disable_coprocessor(i);
439 }
440 }
441
442 local_irq_restore(flags);
443
444 /* Now restore coprocessor & extra state: */
445
446 memcpy(tsk->thread.cp_save,
447 (unsigned char*)fpregs + _xtensa_reginfo_table_size,
448 XTENSA_CP_EXTRA_SIZE);
449#endif
450 return 0;
451}
452/*
453 * Fill in the CP structure for a core dump for a particular task.
454 */
455
456int
457dump_task_fpu(struct pt_regs *regs, struct task_struct *task, elf_fpregset_t *r)
458{
459/* see asm/coprocessor.h for this magic number 16 */
288a60cf 460#if XTENSA_CP_EXTRA_SIZE > 16
5a0015d6
CZ
461 do_save_fpregs (r, regs, task);
462
463 /* For now, bit 16 means some extra state may be present: */
464// FIXME!! need to track to return more accurate mask
465 return 0x10000 | XCHAL_CP_MASK;
466#else
467 return 0; /* no coprocessors active on this processor */
468#endif
469}
470
471/*
472 * Fill in the CP structure for a core dump.
473 * This includes any FPU coprocessor.
474 * Here, we dump all coprocessors, and other ("extra") custom state.
475 *
476 * This function is called by elf_core_dump() in fs/binfmt_elf.c
477 * (in which case 'regs' comes from calls to do_coredump, see signals.c).
478 */
479int dump_fpu(struct pt_regs *regs, elf_fpregset_t *r)
480{
481 return dump_task_fpu(regs, current, r);
482}