[PATCH] i386: actively synchronize vmalloc area when registering certain callbacks
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / i386 / kernel / process.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/i386/kernel/process.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 *
6 * Pentium III FXSR, SSE support
7 * Gareth Hughes <gareth@valinux.com>, May 2000
8 */
9
10/*
11 * This file handles the architecture-dependent parts of process handling..
12 */
13
14#include <stdarg.h>
15
f3705136 16#include <linux/cpu.h>
1da177e4
LT
17#include <linux/errno.h>
18#include <linux/sched.h>
19#include <linux/fs.h>
20#include <linux/kernel.h>
21#include <linux/mm.h>
22#include <linux/elfcore.h>
23#include <linux/smp.h>
24#include <linux/smp_lock.h>
25#include <linux/stddef.h>
26#include <linux/slab.h>
27#include <linux/vmalloc.h>
28#include <linux/user.h>
29#include <linux/a.out.h>
30#include <linux/interrupt.h>
31#include <linux/config.h>
32#include <linux/utsname.h>
33#include <linux/delay.h>
34#include <linux/reboot.h>
35#include <linux/init.h>
36#include <linux/mc146818rtc.h>
37#include <linux/module.h>
38#include <linux/kallsyms.h>
39#include <linux/ptrace.h>
40#include <linux/random.h>
b94cce92 41#include <linux/kprobes.h>
1da177e4
LT
42
43#include <asm/uaccess.h>
44#include <asm/pgtable.h>
45#include <asm/system.h>
46#include <asm/io.h>
47#include <asm/ldt.h>
48#include <asm/processor.h>
49#include <asm/i387.h>
1da177e4 50#include <asm/desc.h>
64ca9004 51#include <asm/vm86.h>
1da177e4
LT
52#ifdef CONFIG_MATH_EMULATION
53#include <asm/math_emu.h>
54#endif
55
1da177e4
LT
56#include <linux/err.h>
57
f3705136
ZM
58#include <asm/tlbflush.h>
59#include <asm/cpu.h>
60
1da177e4
LT
61asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
62
63static int hlt_counter;
64
65unsigned long boot_option_idle_override = 0;
66EXPORT_SYMBOL(boot_option_idle_override);
67
68/*
69 * Return saved PC of a blocked thread.
70 */
71unsigned long thread_saved_pc(struct task_struct *tsk)
72{
73 return ((unsigned long *)tsk->thread.esp)[3];
74}
75
76/*
77 * Powermanagement idle function, if any..
78 */
79void (*pm_idle)(void);
129f6946 80EXPORT_SYMBOL(pm_idle);
1da177e4
LT
81static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
82
83void disable_hlt(void)
84{
85 hlt_counter++;
86}
87
88EXPORT_SYMBOL(disable_hlt);
89
90void enable_hlt(void)
91{
92 hlt_counter--;
93}
94
95EXPORT_SYMBOL(enable_hlt);
96
97/*
98 * We use this if we don't have any better
99 * idle routine..
100 */
101void default_idle(void)
102{
64c7c8f8
NP
103 local_irq_enable();
104
1da177e4 105 if (!hlt_counter && boot_cpu_data.hlt_works_ok) {
64c7c8f8
NP
106 clear_thread_flag(TIF_POLLING_NRFLAG);
107 smp_mb__after_clear_bit();
108 while (!need_resched()) {
109 local_irq_disable();
110 if (!need_resched())
111 safe_halt();
112 else
113 local_irq_enable();
114 }
115 set_thread_flag(TIF_POLLING_NRFLAG);
1da177e4 116 } else {
64c7c8f8
NP
117 while (!need_resched())
118 cpu_relax();
1da177e4
LT
119 }
120}
129f6946
AD
121#ifdef CONFIG_APM_MODULE
122EXPORT_SYMBOL(default_idle);
123#endif
1da177e4
LT
124
125/*
126 * On SMP it's slightly faster (but much more power-consuming!)
127 * to poll the ->work.need_resched flag instead of waiting for the
128 * cross-CPU IPI to arrive. Use this option with caution.
129 */
130static void poll_idle (void)
131{
1da177e4
LT
132 local_irq_enable();
133
64c7c8f8
NP
134 asm volatile(
135 "2:"
136 "testl %0, %1;"
137 "rep; nop;"
138 "je 2b;"
139 : : "i"(_TIF_NEED_RESCHED), "m" (current_thread_info()->flags));
1da177e4
LT
140}
141
f3705136
ZM
142#ifdef CONFIG_HOTPLUG_CPU
143#include <asm/nmi.h>
144/* We don't actually take CPU down, just spin without interrupts. */
145static inline void play_dead(void)
146{
e1367daf
LS
147 /* This must be done before dead CPU ack */
148 cpu_exit_clear();
149 wbinvd();
150 mb();
f3705136
ZM
151 /* Ack it */
152 __get_cpu_var(cpu_state) = CPU_DEAD;
153
e1367daf
LS
154 /*
155 * With physical CPU hotplug, we should halt the cpu
156 */
f3705136 157 local_irq_disable();
e1367daf 158 while (1)
f2ab4461 159 halt();
f3705136
ZM
160}
161#else
162static inline void play_dead(void)
163{
164 BUG();
165}
166#endif /* CONFIG_HOTPLUG_CPU */
167
1da177e4
LT
168/*
169 * The idle thread. There's no useful work to be
170 * done, so just try to conserve power and have a
171 * low exit latency (ie sit in a loop waiting for
172 * somebody to say that they'd like to reschedule)
173 */
f3705136 174void cpu_idle(void)
1da177e4 175{
5bfb5d69 176 int cpu = smp_processor_id();
f3705136 177
64c7c8f8
NP
178 set_thread_flag(TIF_POLLING_NRFLAG);
179
1da177e4
LT
180 /* endless idle loop with no priority at all */
181 while (1) {
182 while (!need_resched()) {
183 void (*idle)(void);
184
185 if (__get_cpu_var(cpu_idle_state))
186 __get_cpu_var(cpu_idle_state) = 0;
187
188 rmb();
189 idle = pm_idle;
190
191 if (!idle)
192 idle = default_idle;
193
f3705136
ZM
194 if (cpu_is_offline(cpu))
195 play_dead();
196
1da177e4
LT
197 __get_cpu_var(irq_stat).idle_timestamp = jiffies;
198 idle();
199 }
5bfb5d69 200 preempt_enable_no_resched();
1da177e4 201 schedule();
5bfb5d69 202 preempt_disable();
1da177e4
LT
203 }
204}
205
206void cpu_idle_wait(void)
207{
208 unsigned int cpu, this_cpu = get_cpu();
209 cpumask_t map;
210
211 set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
212 put_cpu();
213
214 cpus_clear(map);
215 for_each_online_cpu(cpu) {
216 per_cpu(cpu_idle_state, cpu) = 1;
217 cpu_set(cpu, map);
218 }
219
220 __get_cpu_var(cpu_idle_state) = 0;
221
222 wmb();
223 do {
224 ssleep(1);
225 for_each_online_cpu(cpu) {
226 if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu))
227 cpu_clear(cpu, map);
228 }
229 cpus_and(map, map, cpu_online_map);
230 } while (!cpus_empty(map));
231}
232EXPORT_SYMBOL_GPL(cpu_idle_wait);
233
234/*
235 * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
236 * which can obviate IPI to trigger checking of need_resched.
237 * We execute MONITOR against need_resched and enter optimized wait state
238 * through MWAIT. Whenever someone changes need_resched, we would be woken
239 * up from MWAIT (without an IPI).
240 */
241static void mwait_idle(void)
242{
243 local_irq_enable();
244
64c7c8f8
NP
245 while (!need_resched()) {
246 __monitor((void *)&current_thread_info()->flags, 0, 0);
247 smp_mb();
248 if (need_resched())
249 break;
250 __mwait(0, 0);
1da177e4
LT
251 }
252}
253
0bb3184d 254void __devinit select_idle_routine(const struct cpuinfo_x86 *c)
1da177e4
LT
255{
256 if (cpu_has(c, X86_FEATURE_MWAIT)) {
257 printk("monitor/mwait feature present.\n");
258 /*
259 * Skip, if setup has overridden idle.
260 * One CPU supports mwait => All CPUs supports mwait
261 */
262 if (!pm_idle) {
263 printk("using mwait in idle threads.\n");
264 pm_idle = mwait_idle;
265 }
266 }
267}
268
269static int __init idle_setup (char *str)
270{
271 if (!strncmp(str, "poll", 4)) {
272 printk("using polling idle threads.\n");
273 pm_idle = poll_idle;
274#ifdef CONFIG_X86_SMP
275 if (smp_num_siblings > 1)
276 printk("WARNING: polling idle and HT enabled, performance may degrade.\n");
277#endif
278 } else if (!strncmp(str, "halt", 4)) {
279 printk("using halt in idle threads.\n");
280 pm_idle = default_idle;
281 }
282
283 boot_option_idle_override = 1;
284 return 1;
285}
286
287__setup("idle=", idle_setup);
288
289void show_regs(struct pt_regs * regs)
290{
291 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
292
293 printk("\n");
294 printk("Pid: %d, comm: %20s\n", current->pid, current->comm);
295 printk("EIP: %04x:[<%08lx>] CPU: %d\n",0xffff & regs->xcs,regs->eip, smp_processor_id());
296 print_symbol("EIP is at %s\n", regs->eip);
297
717b594a 298 if (user_mode(regs))
1da177e4 299 printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp);
b53e8f68
CE
300 printk(" EFLAGS: %08lx %s (%s %.*s)\n",
301 regs->eflags, print_tainted(), system_utsname.release,
302 (int)strcspn(system_utsname.version, " "),
303 system_utsname.version);
1da177e4
LT
304 printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
305 regs->eax,regs->ebx,regs->ecx,regs->edx);
306 printk("ESI: %08lx EDI: %08lx EBP: %08lx",
307 regs->esi, regs->edi, regs->ebp);
308 printk(" DS: %04x ES: %04x\n",
309 0xffff & regs->xds,0xffff & regs->xes);
310
4bb0d3ec
ZA
311 cr0 = read_cr0();
312 cr2 = read_cr2();
313 cr3 = read_cr3();
ff6e8c0d 314 cr4 = read_cr4_safe();
1da177e4
LT
315 printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", cr0, cr2, cr3, cr4);
316 show_trace(NULL, &regs->esp);
317}
318
319/*
320 * This gets run with %ebx containing the
321 * function to call, and %edx containing
322 * the "args".
323 */
324extern void kernel_thread_helper(void);
325__asm__(".section .text\n"
326 ".align 4\n"
327 "kernel_thread_helper:\n\t"
328 "movl %edx,%eax\n\t"
329 "pushl %edx\n\t"
330 "call *%ebx\n\t"
331 "pushl %eax\n\t"
332 "call do_exit\n"
333 ".previous");
334
335/*
336 * Create a kernel thread
337 */
338int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
339{
340 struct pt_regs regs;
341
342 memset(&regs, 0, sizeof(regs));
343
344 regs.ebx = (unsigned long) fn;
345 regs.edx = (unsigned long) arg;
346
347 regs.xds = __USER_DS;
348 regs.xes = __USER_DS;
349 regs.orig_eax = -1;
350 regs.eip = (unsigned long) kernel_thread_helper;
351 regs.xcs = __KERNEL_CS;
352 regs.eflags = X86_EFLAGS_IF | X86_EFLAGS_SF | X86_EFLAGS_PF | 0x2;
353
354 /* Ok, create the new process.. */
355 return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
356}
129f6946 357EXPORT_SYMBOL(kernel_thread);
1da177e4
LT
358
359/*
360 * Free current thread data structures etc..
361 */
362void exit_thread(void)
363{
364 struct task_struct *tsk = current;
365 struct thread_struct *t = &tsk->thread;
366
b94cce92
HN
367 /*
368 * Remove function-return probe instances associated with this task
369 * and put them back on the free list. Do not insert an exit probe for
370 * this function, it will be disabled by kprobe_flush_task if you do.
371 */
372 kprobe_flush_task(tsk);
373
1da177e4
LT
374 /* The process may have allocated an io port bitmap... nuke it. */
375 if (unlikely(NULL != t->io_bitmap_ptr)) {
376 int cpu = get_cpu();
377 struct tss_struct *tss = &per_cpu(init_tss, cpu);
378
379 kfree(t->io_bitmap_ptr);
380 t->io_bitmap_ptr = NULL;
381 /*
382 * Careful, clear this in the TSS too:
383 */
384 memset(tss->io_bitmap, 0xff, tss->io_bitmap_max);
385 t->io_bitmap_max = 0;
386 tss->io_bitmap_owner = NULL;
387 tss->io_bitmap_max = 0;
388 tss->io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
389 put_cpu();
390 }
391}
392
393void flush_thread(void)
394{
395 struct task_struct *tsk = current;
396
397 memset(tsk->thread.debugreg, 0, sizeof(unsigned long)*8);
398 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
399 /*
400 * Forget coprocessor state..
401 */
402 clear_fpu(tsk);
403 clear_used_math();
404}
405
406void release_thread(struct task_struct *dead_task)
407{
2684927c 408 BUG_ON(dead_task->mm);
1da177e4
LT
409 release_vm86_irqs(dead_task);
410}
411
412/*
413 * This gets called before we allocate a new thread and copy
414 * the current task into it.
415 */
416void prepare_to_copy(struct task_struct *tsk)
417{
418 unlazy_fpu(tsk);
419}
420
421int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
422 unsigned long unused,
423 struct task_struct * p, struct pt_regs * regs)
424{
425 struct pt_regs * childregs;
426 struct task_struct *tsk;
427 int err;
428
07b047fc 429 childregs = task_pt_regs(p);
f48d9663
AN
430 *childregs = *regs;
431 childregs->eax = 0;
432 childregs->esp = esp;
433
434 p->thread.esp = (unsigned long) childregs;
435 p->thread.esp0 = (unsigned long) (childregs+1);
1da177e4
LT
436
437 p->thread.eip = (unsigned long) ret_from_fork;
438
439 savesegment(fs,p->thread.fs);
440 savesegment(gs,p->thread.gs);
441
442 tsk = current;
443 if (unlikely(NULL != tsk->thread.io_bitmap_ptr)) {
444 p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
445 if (!p->thread.io_bitmap_ptr) {
446 p->thread.io_bitmap_max = 0;
447 return -ENOMEM;
448 }
449 memcpy(p->thread.io_bitmap_ptr, tsk->thread.io_bitmap_ptr,
450 IO_BITMAP_BYTES);
451 }
452
453 /*
454 * Set a new TLS for the child thread?
455 */
456 if (clone_flags & CLONE_SETTLS) {
457 struct desc_struct *desc;
458 struct user_desc info;
459 int idx;
460
461 err = -EFAULT;
462 if (copy_from_user(&info, (void __user *)childregs->esi, sizeof(info)))
463 goto out;
464 err = -EINVAL;
465 if (LDT_empty(&info))
466 goto out;
467
468 idx = info.entry_number;
469 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
470 goto out;
471
472 desc = p->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
473 desc->a = LDT_entry_a(&info);
474 desc->b = LDT_entry_b(&info);
475 }
476
477 err = 0;
478 out:
479 if (err && p->thread.io_bitmap_ptr) {
480 kfree(p->thread.io_bitmap_ptr);
481 p->thread.io_bitmap_max = 0;
482 }
483 return err;
484}
485
486/*
487 * fill in the user structure for a core dump..
488 */
489void dump_thread(struct pt_regs * regs, struct user * dump)
490{
491 int i;
492
493/* changed the size calculations - should hopefully work better. lbt */
494 dump->magic = CMAGIC;
495 dump->start_code = 0;
496 dump->start_stack = regs->esp & ~(PAGE_SIZE - 1);
497 dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT;
498 dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT;
499 dump->u_dsize -= dump->u_tsize;
500 dump->u_ssize = 0;
501 for (i = 0; i < 8; i++)
502 dump->u_debugreg[i] = current->thread.debugreg[i];
503
504 if (dump->start_stack < TASK_SIZE)
505 dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT;
506
507 dump->regs.ebx = regs->ebx;
508 dump->regs.ecx = regs->ecx;
509 dump->regs.edx = regs->edx;
510 dump->regs.esi = regs->esi;
511 dump->regs.edi = regs->edi;
512 dump->regs.ebp = regs->ebp;
513 dump->regs.eax = regs->eax;
514 dump->regs.ds = regs->xds;
515 dump->regs.es = regs->xes;
516 savesegment(fs,dump->regs.fs);
517 savesegment(gs,dump->regs.gs);
518 dump->regs.orig_eax = regs->orig_eax;
519 dump->regs.eip = regs->eip;
520 dump->regs.cs = regs->xcs;
521 dump->regs.eflags = regs->eflags;
522 dump->regs.esp = regs->esp;
523 dump->regs.ss = regs->xss;
524
525 dump->u_fpvalid = dump_fpu (regs, &dump->i387);
526}
129f6946 527EXPORT_SYMBOL(dump_thread);
1da177e4
LT
528
529/*
530 * Capture the user space registers if the task is not running (in user space)
531 */
532int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
533{
07b047fc 534 struct pt_regs ptregs = *task_pt_regs(tsk);
1da177e4
LT
535 ptregs.xcs &= 0xffff;
536 ptregs.xds &= 0xffff;
537 ptregs.xes &= 0xffff;
538 ptregs.xss &= 0xffff;
539
540 elf_core_copy_regs(regs, &ptregs);
541
542 return 1;
543}
544
545static inline void
546handle_io_bitmap(struct thread_struct *next, struct tss_struct *tss)
547{
548 if (!next->io_bitmap_ptr) {
549 /*
550 * Disable the bitmap via an invalid offset. We still cache
551 * the previous bitmap owner and the IO bitmap contents:
552 */
553 tss->io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
554 return;
555 }
556 if (likely(next == tss->io_bitmap_owner)) {
557 /*
558 * Previous owner of the bitmap (hence the bitmap content)
559 * matches the next task, we dont have to do anything but
560 * to set a valid offset in the TSS:
561 */
562 tss->io_bitmap_base = IO_BITMAP_OFFSET;
563 return;
564 }
565 /*
566 * Lazy TSS's I/O bitmap copy. We set an invalid offset here
567 * and we let the task to get a GPF in case an I/O instruction
568 * is performed. The handler of the GPF will verify that the
569 * faulting task has a valid I/O bitmap and, it true, does the
570 * real copy and restart the instruction. This will save us
571 * redundant copies when the currently switched task does not
572 * perform any I/O during its timeslice.
573 */
574 tss->io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY;
575}
1da177e4 576
ffaa8bd6
AA
577/*
578 * This function selects if the context switch from prev to next
579 * has to tweak the TSC disable bit in the cr4.
580 */
581static inline void disable_tsc(struct task_struct *prev_p,
582 struct task_struct *next_p)
583{
584 struct thread_info *prev, *next;
585
586 /*
587 * gcc should eliminate the ->thread_info dereference if
588 * has_secure_computing returns 0 at compile time (SECCOMP=n).
589 */
06b425d8
AV
590 prev = task_thread_info(prev_p);
591 next = task_thread_info(next_p);
ffaa8bd6
AA
592
593 if (has_secure_computing(prev) || has_secure_computing(next)) {
594 /* slow path here */
595 if (has_secure_computing(prev) &&
596 !has_secure_computing(next)) {
597 write_cr4(read_cr4() & ~X86_CR4_TSD);
598 } else if (!has_secure_computing(prev) &&
599 has_secure_computing(next))
600 write_cr4(read_cr4() | X86_CR4_TSD);
601 }
602}
603
1da177e4
LT
604/*
605 * switch_to(x,yn) should switch tasks from x to y.
606 *
607 * We fsave/fwait so that an exception goes off at the right time
608 * (as a call from the fsave or fwait in effect) rather than to
609 * the wrong process. Lazy FP saving no longer makes any sense
610 * with modern CPU's, and this simplifies a lot of things (SMP
611 * and UP become the same).
612 *
613 * NOTE! We used to use the x86 hardware context switching. The
614 * reason for not using it any more becomes apparent when you
615 * try to recover gracefully from saved state that is no longer
616 * valid (stale segment register values in particular). With the
617 * hardware task-switch, there is no way to fix up bad state in
618 * a reasonable manner.
619 *
620 * The fact that Intel documents the hardware task-switching to
621 * be slow is a fairly red herring - this code is not noticeably
622 * faster. However, there _is_ some room for improvement here,
623 * so the performance issues may eventually be a valid point.
624 * More important, however, is the fact that this allows us much
625 * more flexibility.
626 *
627 * The return value (in %eax) will be the "prev" task after
628 * the task-switch, and shows up in ret_from_fork in entry.S,
629 * for example.
630 */
631struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
632{
633 struct thread_struct *prev = &prev_p->thread,
634 *next = &next_p->thread;
635 int cpu = smp_processor_id();
636 struct tss_struct *tss = &per_cpu(init_tss, cpu);
637
638 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
639
640 __unlazy_fpu(prev_p);
641
642 /*
e7a2ff59 643 * Reload esp0.
1da177e4
LT
644 */
645 load_esp0(tss, next);
646
647 /*
e7a2ff59
ZA
648 * Save away %fs and %gs. No need to save %es and %ds, as
649 * those are always kernel segments while inside the kernel.
650 * Doing this before setting the new TLS descriptors avoids
651 * the situation where we temporarily have non-reloadable
652 * segments in %fs and %gs. This could be an issue if the
653 * NMI handler ever used %fs or %gs (it does not today), or
654 * if the kernel is running inside of a hypervisor layer.
1da177e4 655 */
e7a2ff59
ZA
656 savesegment(fs, prev->fs);
657 savesegment(gs, prev->gs);
1da177e4
LT
658
659 /*
e7a2ff59 660 * Load the per-thread Thread-Local Storage descriptor.
1da177e4 661 */
e7a2ff59 662 load_TLS(next, cpu);
1da177e4
LT
663
664 /*
665 * Restore %fs and %gs if needed.
b339a18b
LT
666 *
667 * Glibc normally makes %fs be zero, and %gs is one of
668 * the TLS segments.
1da177e4 669 */
b339a18b 670 if (unlikely(prev->fs | next->fs))
1da177e4 671 loadsegment(fs, next->fs);
b339a18b
LT
672
673 if (prev->gs | next->gs)
1da177e4 674 loadsegment(gs, next->gs);
1da177e4 675
a5201129
ZA
676 /*
677 * Restore IOPL if needed.
678 */
679 if (unlikely(prev->iopl != next->iopl))
680 set_iopl_mask(next->iopl);
681
1da177e4
LT
682 /*
683 * Now maybe reload the debug registers
684 */
685 if (unlikely(next->debugreg[7])) {
b339a18b
LT
686 set_debugreg(next->debugreg[0], 0);
687 set_debugreg(next->debugreg[1], 1);
688 set_debugreg(next->debugreg[2], 2);
689 set_debugreg(next->debugreg[3], 3);
1da177e4 690 /* no 4 and 5 */
b339a18b
LT
691 set_debugreg(next->debugreg[6], 6);
692 set_debugreg(next->debugreg[7], 7);
1da177e4
LT
693 }
694
695 if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr))
696 handle_io_bitmap(next, tss);
697
ffaa8bd6
AA
698 disable_tsc(prev_p, next_p);
699
1da177e4
LT
700 return prev_p;
701}
702
703asmlinkage int sys_fork(struct pt_regs regs)
704{
705 return do_fork(SIGCHLD, regs.esp, &regs, 0, NULL, NULL);
706}
707
708asmlinkage int sys_clone(struct pt_regs regs)
709{
710 unsigned long clone_flags;
711 unsigned long newsp;
712 int __user *parent_tidptr, *child_tidptr;
713
714 clone_flags = regs.ebx;
715 newsp = regs.ecx;
716 parent_tidptr = (int __user *)regs.edx;
717 child_tidptr = (int __user *)regs.edi;
718 if (!newsp)
719 newsp = regs.esp;
720 return do_fork(clone_flags, newsp, &regs, 0, parent_tidptr, child_tidptr);
721}
722
723/*
724 * This is trivial, and on the face of it looks like it
725 * could equally well be done in user mode.
726 *
727 * Not so, for quite unobvious reasons - register pressure.
728 * In user mode vfork() cannot have a stack frame, and if
729 * done by calling the "clone()" system call directly, you
730 * do not have enough call-clobbered registers to hold all
731 * the information you need.
732 */
733asmlinkage int sys_vfork(struct pt_regs regs)
734{
735 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.esp, &regs, 0, NULL, NULL);
736}
737
738/*
739 * sys_execve() executes a new program.
740 */
741asmlinkage int sys_execve(struct pt_regs regs)
742{
743 int error;
744 char * filename;
745
746 filename = getname((char __user *) regs.ebx);
747 error = PTR_ERR(filename);
748 if (IS_ERR(filename))
749 goto out;
750 error = do_execve(filename,
751 (char __user * __user *) regs.ecx,
752 (char __user * __user *) regs.edx,
753 &regs);
754 if (error == 0) {
755 task_lock(current);
756 current->ptrace &= ~PT_DTRACE;
757 task_unlock(current);
758 /* Make sure we don't return using sysenter.. */
759 set_thread_flag(TIF_IRET);
760 }
761 putname(filename);
762out:
763 return error;
764}
765
766#define top_esp (THREAD_SIZE - sizeof(unsigned long))
767#define top_ebp (THREAD_SIZE - 2*sizeof(unsigned long))
768
769unsigned long get_wchan(struct task_struct *p)
770{
771 unsigned long ebp, esp, eip;
772 unsigned long stack_page;
773 int count = 0;
774 if (!p || p == current || p->state == TASK_RUNNING)
775 return 0;
65e0fdff 776 stack_page = (unsigned long)task_stack_page(p);
1da177e4
LT
777 esp = p->thread.esp;
778 if (!stack_page || esp < stack_page || esp > top_esp+stack_page)
779 return 0;
780 /* include/asm-i386/system.h:switch_to() pushes ebp last. */
781 ebp = *(unsigned long *) esp;
782 do {
783 if (ebp < stack_page || ebp > top_ebp+stack_page)
784 return 0;
785 eip = *(unsigned long *) (ebp+4);
786 if (!in_sched_functions(eip))
787 return eip;
788 ebp = *(unsigned long *) ebp;
789 } while (count++ < 16);
790 return 0;
791}
129f6946 792EXPORT_SYMBOL(get_wchan);
1da177e4
LT
793
794/*
795 * sys_alloc_thread_area: get a yet unused TLS descriptor index.
796 */
797static int get_free_idx(void)
798{
799 struct thread_struct *t = &current->thread;
800 int idx;
801
802 for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
803 if (desc_empty(t->tls_array + idx))
804 return idx + GDT_ENTRY_TLS_MIN;
805 return -ESRCH;
806}
807
808/*
809 * Set a given TLS descriptor:
810 */
811asmlinkage int sys_set_thread_area(struct user_desc __user *u_info)
812{
813 struct thread_struct *t = &current->thread;
814 struct user_desc info;
815 struct desc_struct *desc;
816 int cpu, idx;
817
818 if (copy_from_user(&info, u_info, sizeof(info)))
819 return -EFAULT;
820 idx = info.entry_number;
821
822 /*
823 * index -1 means the kernel should try to find and
824 * allocate an empty descriptor:
825 */
826 if (idx == -1) {
827 idx = get_free_idx();
828 if (idx < 0)
829 return idx;
830 if (put_user(idx, &u_info->entry_number))
831 return -EFAULT;
832 }
833
834 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
835 return -EINVAL;
836
837 desc = t->tls_array + idx - GDT_ENTRY_TLS_MIN;
838
839 /*
840 * We must not get preempted while modifying the TLS.
841 */
842 cpu = get_cpu();
843
844 if (LDT_empty(&info)) {
845 desc->a = 0;
846 desc->b = 0;
847 } else {
848 desc->a = LDT_entry_a(&info);
849 desc->b = LDT_entry_b(&info);
850 }
851 load_TLS(t, cpu);
852
853 put_cpu();
854
855 return 0;
856}
857
858/*
859 * Get the current Thread-Local Storage area:
860 */
861
862#define GET_BASE(desc) ( \
863 (((desc)->a >> 16) & 0x0000ffff) | \
864 (((desc)->b << 16) & 0x00ff0000) | \
865 ( (desc)->b & 0xff000000) )
866
867#define GET_LIMIT(desc) ( \
868 ((desc)->a & 0x0ffff) | \
869 ((desc)->b & 0xf0000) )
870
871#define GET_32BIT(desc) (((desc)->b >> 22) & 1)
872#define GET_CONTENTS(desc) (((desc)->b >> 10) & 3)
873#define GET_WRITABLE(desc) (((desc)->b >> 9) & 1)
874#define GET_LIMIT_PAGES(desc) (((desc)->b >> 23) & 1)
875#define GET_PRESENT(desc) (((desc)->b >> 15) & 1)
876#define GET_USEABLE(desc) (((desc)->b >> 20) & 1)
877
878asmlinkage int sys_get_thread_area(struct user_desc __user *u_info)
879{
880 struct user_desc info;
881 struct desc_struct *desc;
882 int idx;
883
884 if (get_user(idx, &u_info->entry_number))
885 return -EFAULT;
886 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
887 return -EINVAL;
888
71ae18ec
PBG
889 memset(&info, 0, sizeof(info));
890
1da177e4
LT
891 desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
892
893 info.entry_number = idx;
894 info.base_addr = GET_BASE(desc);
895 info.limit = GET_LIMIT(desc);
896 info.seg_32bit = GET_32BIT(desc);
897 info.contents = GET_CONTENTS(desc);
898 info.read_exec_only = !GET_WRITABLE(desc);
899 info.limit_in_pages = GET_LIMIT_PAGES(desc);
900 info.seg_not_present = !GET_PRESENT(desc);
901 info.useable = GET_USEABLE(desc);
902
903 if (copy_to_user(u_info, &info, sizeof(info)))
904 return -EFAULT;
905 return 0;
906}
907
908unsigned long arch_align_stack(unsigned long sp)
909{
910 if (randomize_va_space)
911 sp -= get_random_int() % 8192;
912 return sp & ~0xf;
913}