Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/i386/kernel/process.c | |
3 | * | |
4 | * Copyright (C) 1995 Linus Torvalds | |
5 | * | |
6 | * Pentium III FXSR, SSE support | |
7 | * Gareth Hughes <gareth@valinux.com>, May 2000 | |
8 | */ | |
9 | ||
10 | /* | |
11 | * This file handles the architecture-dependent parts of process handling.. | |
12 | */ | |
13 | ||
14 | #include <stdarg.h> | |
15 | ||
f3705136 | 16 | #include <linux/cpu.h> |
1da177e4 LT |
17 | #include <linux/errno.h> |
18 | #include <linux/sched.h> | |
19 | #include <linux/fs.h> | |
20 | #include <linux/kernel.h> | |
21 | #include <linux/mm.h> | |
22 | #include <linux/elfcore.h> | |
23 | #include <linux/smp.h> | |
24 | #include <linux/smp_lock.h> | |
25 | #include <linux/stddef.h> | |
26 | #include <linux/slab.h> | |
27 | #include <linux/vmalloc.h> | |
28 | #include <linux/user.h> | |
29 | #include <linux/a.out.h> | |
30 | #include <linux/interrupt.h> | |
1da177e4 LT |
31 | #include <linux/utsname.h> |
32 | #include <linux/delay.h> | |
33 | #include <linux/reboot.h> | |
34 | #include <linux/init.h> | |
35 | #include <linux/mc146818rtc.h> | |
36 | #include <linux/module.h> | |
37 | #include <linux/kallsyms.h> | |
38 | #include <linux/ptrace.h> | |
39 | #include <linux/random.h> | |
c16b63e0 | 40 | #include <linux/personality.h> |
74167347 | 41 | #include <linux/tick.h> |
1da177e4 LT |
42 | |
43 | #include <asm/uaccess.h> | |
44 | #include <asm/pgtable.h> | |
45 | #include <asm/system.h> | |
46 | #include <asm/io.h> | |
47 | #include <asm/ldt.h> | |
48 | #include <asm/processor.h> | |
49 | #include <asm/i387.h> | |
1da177e4 | 50 | #include <asm/desc.h> |
64ca9004 | 51 | #include <asm/vm86.h> |
1da177e4 LT |
52 | #ifdef CONFIG_MATH_EMULATION |
53 | #include <asm/math_emu.h> | |
54 | #endif | |
55 | ||
1da177e4 LT |
56 | #include <linux/err.h> |
57 | ||
f3705136 ZM |
58 | #include <asm/tlbflush.h> |
59 | #include <asm/cpu.h> | |
f95d47ca | 60 | #include <asm/pda.h> |
f3705136 | 61 | |
1da177e4 LT |
62 | asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); |
63 | ||
64 | static int hlt_counter; | |
65 | ||
66 | unsigned long boot_option_idle_override = 0; | |
67 | EXPORT_SYMBOL(boot_option_idle_override); | |
68 | ||
69 | /* | |
70 | * Return saved PC of a blocked thread. | |
71 | */ | |
72 | unsigned long thread_saved_pc(struct task_struct *tsk) | |
73 | { | |
74 | return ((unsigned long *)tsk->thread.esp)[3]; | |
75 | } | |
76 | ||
77 | /* | |
78 | * Powermanagement idle function, if any.. | |
79 | */ | |
80 | void (*pm_idle)(void); | |
129f6946 | 81 | EXPORT_SYMBOL(pm_idle); |
1da177e4 LT |
82 | static DEFINE_PER_CPU(unsigned int, cpu_idle_state); |
83 | ||
84 | void disable_hlt(void) | |
85 | { | |
86 | hlt_counter++; | |
87 | } | |
88 | ||
89 | EXPORT_SYMBOL(disable_hlt); | |
90 | ||
91 | void enable_hlt(void) | |
92 | { | |
93 | hlt_counter--; | |
94 | } | |
95 | ||
96 | EXPORT_SYMBOL(enable_hlt); | |
97 | ||
98 | /* | |
99 | * We use this if we don't have any better | |
100 | * idle routine.. | |
101 | */ | |
102 | void default_idle(void) | |
103 | { | |
104 | if (!hlt_counter && boot_cpu_data.hlt_works_ok) { | |
495ab9c0 | 105 | current_thread_info()->status &= ~TS_POLLING; |
0888f06a IM |
106 | /* |
107 | * TS_POLLING-cleared state must be visible before we | |
108 | * test NEED_RESCHED: | |
109 | */ | |
110 | smp_mb(); | |
111 | ||
72690a21 AK |
112 | local_irq_disable(); |
113 | if (!need_resched()) | |
114 | safe_halt(); /* enables interrupts racelessly */ | |
115 | else | |
116 | local_irq_enable(); | |
495ab9c0 | 117 | current_thread_info()->status |= TS_POLLING; |
1da177e4 | 118 | } else { |
72690a21 AK |
119 | /* loop is done by the caller */ |
120 | cpu_relax(); | |
1da177e4 LT |
121 | } |
122 | } | |
129f6946 AD |
123 | #ifdef CONFIG_APM_MODULE |
124 | EXPORT_SYMBOL(default_idle); | |
125 | #endif | |
1da177e4 LT |
126 | |
127 | /* | |
128 | * On SMP it's slightly faster (but much more power-consuming!) | |
129 | * to poll the ->work.need_resched flag instead of waiting for the | |
130 | * cross-CPU IPI to arrive. Use this option with caution. | |
131 | */ | |
132 | static void poll_idle (void) | |
133 | { | |
72690a21 | 134 | cpu_relax(); |
1da177e4 LT |
135 | } |
136 | ||
f3705136 ZM |
137 | #ifdef CONFIG_HOTPLUG_CPU |
138 | #include <asm/nmi.h> | |
139 | /* We don't actually take CPU down, just spin without interrupts. */ | |
140 | static inline void play_dead(void) | |
141 | { | |
e1367daf LS |
142 | /* This must be done before dead CPU ack */ |
143 | cpu_exit_clear(); | |
144 | wbinvd(); | |
145 | mb(); | |
f3705136 ZM |
146 | /* Ack it */ |
147 | __get_cpu_var(cpu_state) = CPU_DEAD; | |
148 | ||
e1367daf LS |
149 | /* |
150 | * With physical CPU hotplug, we should halt the cpu | |
151 | */ | |
f3705136 | 152 | local_irq_disable(); |
e1367daf | 153 | while (1) |
f2ab4461 | 154 | halt(); |
f3705136 ZM |
155 | } |
156 | #else | |
157 | static inline void play_dead(void) | |
158 | { | |
159 | BUG(); | |
160 | } | |
161 | #endif /* CONFIG_HOTPLUG_CPU */ | |
162 | ||
1da177e4 LT |
163 | /* |
164 | * The idle thread. There's no useful work to be | |
165 | * done, so just try to conserve power and have a | |
166 | * low exit latency (ie sit in a loop waiting for | |
167 | * somebody to say that they'd like to reschedule) | |
168 | */ | |
f3705136 | 169 | void cpu_idle(void) |
1da177e4 | 170 | { |
5bfb5d69 | 171 | int cpu = smp_processor_id(); |
f3705136 | 172 | |
495ab9c0 | 173 | current_thread_info()->status |= TS_POLLING; |
64c7c8f8 | 174 | |
1da177e4 LT |
175 | /* endless idle loop with no priority at all */ |
176 | while (1) { | |
74167347 | 177 | tick_nohz_stop_sched_tick(); |
1da177e4 LT |
178 | while (!need_resched()) { |
179 | void (*idle)(void); | |
180 | ||
181 | if (__get_cpu_var(cpu_idle_state)) | |
182 | __get_cpu_var(cpu_idle_state) = 0; | |
183 | ||
184 | rmb(); | |
185 | idle = pm_idle; | |
186 | ||
187 | if (!idle) | |
188 | idle = default_idle; | |
189 | ||
f3705136 ZM |
190 | if (cpu_is_offline(cpu)) |
191 | play_dead(); | |
192 | ||
1da177e4 LT |
193 | __get_cpu_var(irq_stat).idle_timestamp = jiffies; |
194 | idle(); | |
195 | } | |
74167347 | 196 | tick_nohz_restart_sched_tick(); |
5bfb5d69 | 197 | preempt_enable_no_resched(); |
1da177e4 | 198 | schedule(); |
5bfb5d69 | 199 | preempt_disable(); |
1da177e4 LT |
200 | } |
201 | } | |
202 | ||
203 | void cpu_idle_wait(void) | |
204 | { | |
205 | unsigned int cpu, this_cpu = get_cpu(); | |
dc1829a4 | 206 | cpumask_t map, tmp = current->cpus_allowed; |
1da177e4 LT |
207 | |
208 | set_cpus_allowed(current, cpumask_of_cpu(this_cpu)); | |
209 | put_cpu(); | |
210 | ||
211 | cpus_clear(map); | |
212 | for_each_online_cpu(cpu) { | |
213 | per_cpu(cpu_idle_state, cpu) = 1; | |
214 | cpu_set(cpu, map); | |
215 | } | |
216 | ||
217 | __get_cpu_var(cpu_idle_state) = 0; | |
218 | ||
219 | wmb(); | |
220 | do { | |
221 | ssleep(1); | |
222 | for_each_online_cpu(cpu) { | |
223 | if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu)) | |
224 | cpu_clear(cpu, map); | |
225 | } | |
226 | cpus_and(map, map, cpu_online_map); | |
227 | } while (!cpus_empty(map)); | |
dc1829a4 IM |
228 | |
229 | set_cpus_allowed(current, tmp); | |
1da177e4 LT |
230 | } |
231 | EXPORT_SYMBOL_GPL(cpu_idle_wait); | |
232 | ||
233 | /* | |
234 | * This uses new MONITOR/MWAIT instructions on P4 processors with PNI, | |
235 | * which can obviate IPI to trigger checking of need_resched. | |
236 | * We execute MONITOR against need_resched and enter optimized wait state | |
237 | * through MWAIT. Whenever someone changes need_resched, we would be woken | |
238 | * up from MWAIT (without an IPI). | |
991528d7 VP |
239 | * |
240 | * New with Core Duo processors, MWAIT can take some hints based on CPU | |
241 | * capability. | |
1da177e4 | 242 | */ |
991528d7 | 243 | void mwait_idle_with_hints(unsigned long eax, unsigned long ecx) |
1da177e4 | 244 | { |
991528d7 | 245 | if (!need_resched()) { |
64c7c8f8 NP |
246 | __monitor((void *)¤t_thread_info()->flags, 0, 0); |
247 | smp_mb(); | |
991528d7 | 248 | if (!need_resched()) |
ea3d5226 | 249 | __mwait(eax, ecx); |
1da177e4 LT |
250 | } |
251 | } | |
252 | ||
991528d7 VP |
253 | /* Default MONITOR/MWAIT with no hints, used for default C1 state */ |
254 | static void mwait_idle(void) | |
255 | { | |
256 | local_irq_enable(); | |
72690a21 | 257 | mwait_idle_with_hints(0, 0); |
991528d7 VP |
258 | } |
259 | ||
0bb3184d | 260 | void __devinit select_idle_routine(const struct cpuinfo_x86 *c) |
1da177e4 LT |
261 | { |
262 | if (cpu_has(c, X86_FEATURE_MWAIT)) { | |
263 | printk("monitor/mwait feature present.\n"); | |
264 | /* | |
265 | * Skip, if setup has overridden idle. | |
266 | * One CPU supports mwait => All CPUs supports mwait | |
267 | */ | |
268 | if (!pm_idle) { | |
269 | printk("using mwait in idle threads.\n"); | |
270 | pm_idle = mwait_idle; | |
271 | } | |
272 | } | |
273 | } | |
274 | ||
f039b754 | 275 | static int __init idle_setup(char *str) |
1da177e4 | 276 | { |
f039b754 | 277 | if (!strcmp(str, "poll")) { |
1da177e4 LT |
278 | printk("using polling idle threads.\n"); |
279 | pm_idle = poll_idle; | |
280 | #ifdef CONFIG_X86_SMP | |
281 | if (smp_num_siblings > 1) | |
282 | printk("WARNING: polling idle and HT enabled, performance may degrade.\n"); | |
283 | #endif | |
f039b754 AK |
284 | } else if (!strcmp(str, "mwait")) |
285 | force_mwait = 1; | |
286 | else | |
287 | return -1; | |
1da177e4 LT |
288 | |
289 | boot_option_idle_override = 1; | |
f039b754 | 290 | return 0; |
1da177e4 | 291 | } |
f039b754 | 292 | early_param("idle", idle_setup); |
1da177e4 LT |
293 | |
294 | void show_regs(struct pt_regs * regs) | |
295 | { | |
296 | unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L; | |
297 | ||
298 | printk("\n"); | |
299 | printk("Pid: %d, comm: %20s\n", current->pid, current->comm); | |
300 | printk("EIP: %04x:[<%08lx>] CPU: %d\n",0xffff & regs->xcs,regs->eip, smp_processor_id()); | |
301 | print_symbol("EIP is at %s\n", regs->eip); | |
302 | ||
db753bdf | 303 | if (user_mode_vm(regs)) |
1da177e4 | 304 | printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp); |
b53e8f68 | 305 | printk(" EFLAGS: %08lx %s (%s %.*s)\n", |
96b644bd SH |
306 | regs->eflags, print_tainted(), init_utsname()->release, |
307 | (int)strcspn(init_utsname()->version, " "), | |
308 | init_utsname()->version); | |
1da177e4 LT |
309 | printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n", |
310 | regs->eax,regs->ebx,regs->ecx,regs->edx); | |
311 | printk("ESI: %08lx EDI: %08lx EBP: %08lx", | |
312 | regs->esi, regs->edi, regs->ebp); | |
464d1a78 JF |
313 | printk(" DS: %04x ES: %04x FS: %04x\n", |
314 | 0xffff & regs->xds,0xffff & regs->xes, 0xffff & regs->xfs); | |
1da177e4 | 315 | |
4bb0d3ec ZA |
316 | cr0 = read_cr0(); |
317 | cr2 = read_cr2(); | |
318 | cr3 = read_cr3(); | |
ff6e8c0d | 319 | cr4 = read_cr4_safe(); |
1da177e4 | 320 | printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", cr0, cr2, cr3, cr4); |
176a2718 | 321 | show_trace(NULL, regs, ®s->esp); |
1da177e4 LT |
322 | } |
323 | ||
324 | /* | |
325 | * This gets run with %ebx containing the | |
326 | * function to call, and %edx containing | |
327 | * the "args". | |
328 | */ | |
329 | extern void kernel_thread_helper(void); | |
1da177e4 LT |
330 | |
331 | /* | |
332 | * Create a kernel thread | |
333 | */ | |
334 | int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) | |
335 | { | |
336 | struct pt_regs regs; | |
337 | ||
338 | memset(®s, 0, sizeof(regs)); | |
339 | ||
340 | regs.ebx = (unsigned long) fn; | |
341 | regs.edx = (unsigned long) arg; | |
342 | ||
343 | regs.xds = __USER_DS; | |
344 | regs.xes = __USER_DS; | |
464d1a78 | 345 | regs.xfs = __KERNEL_PDA; |
1da177e4 LT |
346 | regs.orig_eax = -1; |
347 | regs.eip = (unsigned long) kernel_thread_helper; | |
78be3706 | 348 | regs.xcs = __KERNEL_CS | get_kernel_rpl(); |
1da177e4 LT |
349 | regs.eflags = X86_EFLAGS_IF | X86_EFLAGS_SF | X86_EFLAGS_PF | 0x2; |
350 | ||
351 | /* Ok, create the new process.. */ | |
8cf2c519 | 352 | return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, ®s, 0, NULL, NULL); |
1da177e4 | 353 | } |
129f6946 | 354 | EXPORT_SYMBOL(kernel_thread); |
1da177e4 LT |
355 | |
356 | /* | |
357 | * Free current thread data structures etc.. | |
358 | */ | |
359 | void exit_thread(void) | |
360 | { | |
1da177e4 | 361 | /* The process may have allocated an io port bitmap... nuke it. */ |
b3cf2576 SE |
362 | if (unlikely(test_thread_flag(TIF_IO_BITMAP))) { |
363 | struct task_struct *tsk = current; | |
364 | struct thread_struct *t = &tsk->thread; | |
1da177e4 LT |
365 | int cpu = get_cpu(); |
366 | struct tss_struct *tss = &per_cpu(init_tss, cpu); | |
367 | ||
368 | kfree(t->io_bitmap_ptr); | |
369 | t->io_bitmap_ptr = NULL; | |
b3cf2576 | 370 | clear_thread_flag(TIF_IO_BITMAP); |
1da177e4 LT |
371 | /* |
372 | * Careful, clear this in the TSS too: | |
373 | */ | |
374 | memset(tss->io_bitmap, 0xff, tss->io_bitmap_max); | |
375 | t->io_bitmap_max = 0; | |
376 | tss->io_bitmap_owner = NULL; | |
377 | tss->io_bitmap_max = 0; | |
a75c54f9 | 378 | tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET; |
1da177e4 LT |
379 | put_cpu(); |
380 | } | |
381 | } | |
382 | ||
383 | void flush_thread(void) | |
384 | { | |
385 | struct task_struct *tsk = current; | |
386 | ||
387 | memset(tsk->thread.debugreg, 0, sizeof(unsigned long)*8); | |
388 | memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); | |
b3cf2576 | 389 | clear_tsk_thread_flag(tsk, TIF_DEBUG); |
1da177e4 LT |
390 | /* |
391 | * Forget coprocessor state.. | |
392 | */ | |
393 | clear_fpu(tsk); | |
394 | clear_used_math(); | |
395 | } | |
396 | ||
397 | void release_thread(struct task_struct *dead_task) | |
398 | { | |
2684927c | 399 | BUG_ON(dead_task->mm); |
1da177e4 LT |
400 | release_vm86_irqs(dead_task); |
401 | } | |
402 | ||
403 | /* | |
404 | * This gets called before we allocate a new thread and copy | |
405 | * the current task into it. | |
406 | */ | |
407 | void prepare_to_copy(struct task_struct *tsk) | |
408 | { | |
409 | unlazy_fpu(tsk); | |
410 | } | |
411 | ||
412 | int copy_thread(int nr, unsigned long clone_flags, unsigned long esp, | |
413 | unsigned long unused, | |
414 | struct task_struct * p, struct pt_regs * regs) | |
415 | { | |
416 | struct pt_regs * childregs; | |
417 | struct task_struct *tsk; | |
418 | int err; | |
419 | ||
07b047fc | 420 | childregs = task_pt_regs(p); |
f48d9663 AN |
421 | *childregs = *regs; |
422 | childregs->eax = 0; | |
423 | childregs->esp = esp; | |
424 | ||
425 | p->thread.esp = (unsigned long) childregs; | |
426 | p->thread.esp0 = (unsigned long) (childregs+1); | |
1da177e4 LT |
427 | |
428 | p->thread.eip = (unsigned long) ret_from_fork; | |
429 | ||
464d1a78 | 430 | savesegment(gs,p->thread.gs); |
1da177e4 LT |
431 | |
432 | tsk = current; | |
b3cf2576 | 433 | if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) { |
52978be6 AD |
434 | p->thread.io_bitmap_ptr = kmemdup(tsk->thread.io_bitmap_ptr, |
435 | IO_BITMAP_BYTES, GFP_KERNEL); | |
1da177e4 LT |
436 | if (!p->thread.io_bitmap_ptr) { |
437 | p->thread.io_bitmap_max = 0; | |
438 | return -ENOMEM; | |
439 | } | |
b3cf2576 | 440 | set_tsk_thread_flag(p, TIF_IO_BITMAP); |
1da177e4 LT |
441 | } |
442 | ||
443 | /* | |
444 | * Set a new TLS for the child thread? | |
445 | */ | |
446 | if (clone_flags & CLONE_SETTLS) { | |
447 | struct desc_struct *desc; | |
448 | struct user_desc info; | |
449 | int idx; | |
450 | ||
451 | err = -EFAULT; | |
452 | if (copy_from_user(&info, (void __user *)childregs->esi, sizeof(info))) | |
453 | goto out; | |
454 | err = -EINVAL; | |
455 | if (LDT_empty(&info)) | |
456 | goto out; | |
457 | ||
458 | idx = info.entry_number; | |
459 | if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) | |
460 | goto out; | |
461 | ||
462 | desc = p->thread.tls_array + idx - GDT_ENTRY_TLS_MIN; | |
463 | desc->a = LDT_entry_a(&info); | |
464 | desc->b = LDT_entry_b(&info); | |
465 | } | |
466 | ||
467 | err = 0; | |
468 | out: | |
469 | if (err && p->thread.io_bitmap_ptr) { | |
470 | kfree(p->thread.io_bitmap_ptr); | |
471 | p->thread.io_bitmap_max = 0; | |
472 | } | |
473 | return err; | |
474 | } | |
475 | ||
476 | /* | |
477 | * fill in the user structure for a core dump.. | |
478 | */ | |
479 | void dump_thread(struct pt_regs * regs, struct user * dump) | |
480 | { | |
481 | int i; | |
482 | ||
483 | /* changed the size calculations - should hopefully work better. lbt */ | |
484 | dump->magic = CMAGIC; | |
485 | dump->start_code = 0; | |
486 | dump->start_stack = regs->esp & ~(PAGE_SIZE - 1); | |
487 | dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT; | |
488 | dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT; | |
489 | dump->u_dsize -= dump->u_tsize; | |
490 | dump->u_ssize = 0; | |
491 | for (i = 0; i < 8; i++) | |
492 | dump->u_debugreg[i] = current->thread.debugreg[i]; | |
493 | ||
494 | if (dump->start_stack < TASK_SIZE) | |
495 | dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT; | |
496 | ||
497 | dump->regs.ebx = regs->ebx; | |
498 | dump->regs.ecx = regs->ecx; | |
499 | dump->regs.edx = regs->edx; | |
500 | dump->regs.esi = regs->esi; | |
501 | dump->regs.edi = regs->edi; | |
502 | dump->regs.ebp = regs->ebp; | |
503 | dump->regs.eax = regs->eax; | |
504 | dump->regs.ds = regs->xds; | |
505 | dump->regs.es = regs->xes; | |
464d1a78 JF |
506 | dump->regs.fs = regs->xfs; |
507 | savesegment(gs,dump->regs.gs); | |
1da177e4 LT |
508 | dump->regs.orig_eax = regs->orig_eax; |
509 | dump->regs.eip = regs->eip; | |
510 | dump->regs.cs = regs->xcs; | |
511 | dump->regs.eflags = regs->eflags; | |
512 | dump->regs.esp = regs->esp; | |
513 | dump->regs.ss = regs->xss; | |
514 | ||
515 | dump->u_fpvalid = dump_fpu (regs, &dump->i387); | |
516 | } | |
129f6946 | 517 | EXPORT_SYMBOL(dump_thread); |
1da177e4 LT |
518 | |
519 | /* | |
520 | * Capture the user space registers if the task is not running (in user space) | |
521 | */ | |
522 | int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs) | |
523 | { | |
07b047fc | 524 | struct pt_regs ptregs = *task_pt_regs(tsk); |
1da177e4 LT |
525 | ptregs.xcs &= 0xffff; |
526 | ptregs.xds &= 0xffff; | |
527 | ptregs.xes &= 0xffff; | |
528 | ptregs.xss &= 0xffff; | |
529 | ||
530 | elf_core_copy_regs(regs, &ptregs); | |
531 | ||
532 | return 1; | |
533 | } | |
534 | ||
b3cf2576 SE |
535 | static noinline void __switch_to_xtra(struct task_struct *next_p, |
536 | struct tss_struct *tss) | |
1da177e4 | 537 | { |
b3cf2576 SE |
538 | struct thread_struct *next; |
539 | ||
540 | next = &next_p->thread; | |
541 | ||
542 | if (test_tsk_thread_flag(next_p, TIF_DEBUG)) { | |
543 | set_debugreg(next->debugreg[0], 0); | |
544 | set_debugreg(next->debugreg[1], 1); | |
545 | set_debugreg(next->debugreg[2], 2); | |
546 | set_debugreg(next->debugreg[3], 3); | |
547 | /* no 4 and 5 */ | |
548 | set_debugreg(next->debugreg[6], 6); | |
549 | set_debugreg(next->debugreg[7], 7); | |
550 | } | |
551 | ||
552 | if (!test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) { | |
1da177e4 LT |
553 | /* |
554 | * Disable the bitmap via an invalid offset. We still cache | |
555 | * the previous bitmap owner and the IO bitmap contents: | |
556 | */ | |
a75c54f9 | 557 | tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET; |
1da177e4 LT |
558 | return; |
559 | } | |
b3cf2576 | 560 | |
1da177e4 LT |
561 | if (likely(next == tss->io_bitmap_owner)) { |
562 | /* | |
563 | * Previous owner of the bitmap (hence the bitmap content) | |
564 | * matches the next task, we dont have to do anything but | |
565 | * to set a valid offset in the TSS: | |
566 | */ | |
a75c54f9 | 567 | tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET; |
1da177e4 LT |
568 | return; |
569 | } | |
570 | /* | |
571 | * Lazy TSS's I/O bitmap copy. We set an invalid offset here | |
572 | * and we let the task to get a GPF in case an I/O instruction | |
573 | * is performed. The handler of the GPF will verify that the | |
574 | * faulting task has a valid I/O bitmap and, it true, does the | |
575 | * real copy and restart the instruction. This will save us | |
576 | * redundant copies when the currently switched task does not | |
577 | * perform any I/O during its timeslice. | |
578 | */ | |
a75c54f9 | 579 | tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY; |
1da177e4 | 580 | } |
1da177e4 | 581 | |
ffaa8bd6 AA |
582 | /* |
583 | * This function selects if the context switch from prev to next | |
584 | * has to tweak the TSC disable bit in the cr4. | |
585 | */ | |
586 | static inline void disable_tsc(struct task_struct *prev_p, | |
587 | struct task_struct *next_p) | |
588 | { | |
589 | struct thread_info *prev, *next; | |
590 | ||
591 | /* | |
592 | * gcc should eliminate the ->thread_info dereference if | |
593 | * has_secure_computing returns 0 at compile time (SECCOMP=n). | |
594 | */ | |
06b425d8 AV |
595 | prev = task_thread_info(prev_p); |
596 | next = task_thread_info(next_p); | |
ffaa8bd6 AA |
597 | |
598 | if (has_secure_computing(prev) || has_secure_computing(next)) { | |
599 | /* slow path here */ | |
600 | if (has_secure_computing(prev) && | |
601 | !has_secure_computing(next)) { | |
602 | write_cr4(read_cr4() & ~X86_CR4_TSD); | |
603 | } else if (!has_secure_computing(prev) && | |
604 | has_secure_computing(next)) | |
605 | write_cr4(read_cr4() | X86_CR4_TSD); | |
606 | } | |
607 | } | |
608 | ||
1da177e4 LT |
609 | /* |
610 | * switch_to(x,yn) should switch tasks from x to y. | |
611 | * | |
612 | * We fsave/fwait so that an exception goes off at the right time | |
613 | * (as a call from the fsave or fwait in effect) rather than to | |
614 | * the wrong process. Lazy FP saving no longer makes any sense | |
615 | * with modern CPU's, and this simplifies a lot of things (SMP | |
616 | * and UP become the same). | |
617 | * | |
618 | * NOTE! We used to use the x86 hardware context switching. The | |
619 | * reason for not using it any more becomes apparent when you | |
620 | * try to recover gracefully from saved state that is no longer | |
621 | * valid (stale segment register values in particular). With the | |
622 | * hardware task-switch, there is no way to fix up bad state in | |
623 | * a reasonable manner. | |
624 | * | |
625 | * The fact that Intel documents the hardware task-switching to | |
626 | * be slow is a fairly red herring - this code is not noticeably | |
627 | * faster. However, there _is_ some room for improvement here, | |
628 | * so the performance issues may eventually be a valid point. | |
629 | * More important, however, is the fact that this allows us much | |
630 | * more flexibility. | |
631 | * | |
632 | * The return value (in %eax) will be the "prev" task after | |
633 | * the task-switch, and shows up in ret_from_fork in entry.S, | |
634 | * for example. | |
635 | */ | |
636 | struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |
637 | { | |
638 | struct thread_struct *prev = &prev_p->thread, | |
639 | *next = &next_p->thread; | |
640 | int cpu = smp_processor_id(); | |
641 | struct tss_struct *tss = &per_cpu(init_tss, cpu); | |
642 | ||
643 | /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ | |
644 | ||
645 | __unlazy_fpu(prev_p); | |
646 | ||
acc20761 CE |
647 | |
648 | /* we're going to use this soon, after a few expensive things */ | |
649 | if (next_p->fpu_counter > 5) | |
650 | prefetch(&next->i387.fxsave); | |
651 | ||
1da177e4 | 652 | /* |
e7a2ff59 | 653 | * Reload esp0. |
1da177e4 LT |
654 | */ |
655 | load_esp0(tss, next); | |
656 | ||
657 | /* | |
464d1a78 | 658 | * Save away %gs. No need to save %fs, as it was saved on the |
f95d47ca JF |
659 | * stack on entry. No need to save %es and %ds, as those are |
660 | * always kernel segments while inside the kernel. Doing this | |
661 | * before setting the new TLS descriptors avoids the situation | |
662 | * where we temporarily have non-reloadable segments in %fs | |
663 | * and %gs. This could be an issue if the NMI handler ever | |
664 | * used %fs or %gs (it does not today), or if the kernel is | |
665 | * running inside of a hypervisor layer. | |
1da177e4 | 666 | */ |
464d1a78 | 667 | savesegment(gs, prev->gs); |
1da177e4 LT |
668 | |
669 | /* | |
e7a2ff59 | 670 | * Load the per-thread Thread-Local Storage descriptor. |
1da177e4 | 671 | */ |
e7a2ff59 | 672 | load_TLS(next, cpu); |
1da177e4 | 673 | |
8b151144 ZA |
674 | /* |
675 | * Restore IOPL if needed. In normal use, the flags restore | |
676 | * in the switch assembly will handle this. But if the kernel | |
677 | * is running virtualized at a non-zero CPL, the popf will | |
678 | * not restore flags, so it must be done in a separate step. | |
679 | */ | |
680 | if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl)) | |
681 | set_iopl_mask(next->iopl); | |
682 | ||
1da177e4 | 683 | /* |
b3cf2576 | 684 | * Now maybe handle debug registers and/or IO bitmaps |
1da177e4 | 685 | */ |
facf0147 CE |
686 | if (unlikely((task_thread_info(next_p)->flags & _TIF_WORK_CTXSW) |
687 | || test_tsk_thread_flag(prev_p, TIF_IO_BITMAP))) | |
b3cf2576 | 688 | __switch_to_xtra(next_p, tss); |
1da177e4 | 689 | |
ffaa8bd6 AA |
690 | disable_tsc(prev_p, next_p); |
691 | ||
9226d125 ZA |
692 | /* |
693 | * Leave lazy mode, flushing any hypercalls made here. | |
694 | * This must be done before restoring TLS segments so | |
695 | * the GDT and LDT are properly updated, and must be | |
696 | * done before math_state_restore, so the TS bit is up | |
697 | * to date. | |
698 | */ | |
699 | arch_leave_lazy_cpu_mode(); | |
700 | ||
acc20761 CE |
701 | /* If the task has used fpu the last 5 timeslices, just do a full |
702 | * restore of the math state immediately to avoid the trap; the | |
703 | * chances of needing FPU soon are obviously high now | |
704 | */ | |
705 | if (next_p->fpu_counter > 5) | |
706 | math_state_restore(); | |
707 | ||
9226d125 ZA |
708 | /* |
709 | * Restore %gs if needed (which is common) | |
710 | */ | |
711 | if (prev->gs | next->gs) | |
712 | loadsegment(gs, next->gs); | |
713 | ||
714 | write_pda(pcurrent, next_p); | |
715 | ||
1da177e4 LT |
716 | return prev_p; |
717 | } | |
718 | ||
719 | asmlinkage int sys_fork(struct pt_regs regs) | |
720 | { | |
721 | return do_fork(SIGCHLD, regs.esp, ®s, 0, NULL, NULL); | |
722 | } | |
723 | ||
724 | asmlinkage int sys_clone(struct pt_regs regs) | |
725 | { | |
726 | unsigned long clone_flags; | |
727 | unsigned long newsp; | |
728 | int __user *parent_tidptr, *child_tidptr; | |
729 | ||
730 | clone_flags = regs.ebx; | |
731 | newsp = regs.ecx; | |
732 | parent_tidptr = (int __user *)regs.edx; | |
733 | child_tidptr = (int __user *)regs.edi; | |
734 | if (!newsp) | |
735 | newsp = regs.esp; | |
736 | return do_fork(clone_flags, newsp, ®s, 0, parent_tidptr, child_tidptr); | |
737 | } | |
738 | ||
739 | /* | |
740 | * This is trivial, and on the face of it looks like it | |
741 | * could equally well be done in user mode. | |
742 | * | |
743 | * Not so, for quite unobvious reasons - register pressure. | |
744 | * In user mode vfork() cannot have a stack frame, and if | |
745 | * done by calling the "clone()" system call directly, you | |
746 | * do not have enough call-clobbered registers to hold all | |
747 | * the information you need. | |
748 | */ | |
749 | asmlinkage int sys_vfork(struct pt_regs regs) | |
750 | { | |
751 | return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.esp, ®s, 0, NULL, NULL); | |
752 | } | |
753 | ||
754 | /* | |
755 | * sys_execve() executes a new program. | |
756 | */ | |
757 | asmlinkage int sys_execve(struct pt_regs regs) | |
758 | { | |
759 | int error; | |
760 | char * filename; | |
761 | ||
762 | filename = getname((char __user *) regs.ebx); | |
763 | error = PTR_ERR(filename); | |
764 | if (IS_ERR(filename)) | |
765 | goto out; | |
766 | error = do_execve(filename, | |
767 | (char __user * __user *) regs.ecx, | |
768 | (char __user * __user *) regs.edx, | |
769 | ®s); | |
770 | if (error == 0) { | |
771 | task_lock(current); | |
772 | current->ptrace &= ~PT_DTRACE; | |
773 | task_unlock(current); | |
774 | /* Make sure we don't return using sysenter.. */ | |
775 | set_thread_flag(TIF_IRET); | |
776 | } | |
777 | putname(filename); | |
778 | out: | |
779 | return error; | |
780 | } | |
781 | ||
782 | #define top_esp (THREAD_SIZE - sizeof(unsigned long)) | |
783 | #define top_ebp (THREAD_SIZE - 2*sizeof(unsigned long)) | |
784 | ||
785 | unsigned long get_wchan(struct task_struct *p) | |
786 | { | |
787 | unsigned long ebp, esp, eip; | |
788 | unsigned long stack_page; | |
789 | int count = 0; | |
790 | if (!p || p == current || p->state == TASK_RUNNING) | |
791 | return 0; | |
65e0fdff | 792 | stack_page = (unsigned long)task_stack_page(p); |
1da177e4 LT |
793 | esp = p->thread.esp; |
794 | if (!stack_page || esp < stack_page || esp > top_esp+stack_page) | |
795 | return 0; | |
796 | /* include/asm-i386/system.h:switch_to() pushes ebp last. */ | |
797 | ebp = *(unsigned long *) esp; | |
798 | do { | |
799 | if (ebp < stack_page || ebp > top_ebp+stack_page) | |
800 | return 0; | |
801 | eip = *(unsigned long *) (ebp+4); | |
802 | if (!in_sched_functions(eip)) | |
803 | return eip; | |
804 | ebp = *(unsigned long *) ebp; | |
805 | } while (count++ < 16); | |
806 | return 0; | |
807 | } | |
808 | ||
809 | /* | |
810 | * sys_alloc_thread_area: get a yet unused TLS descriptor index. | |
811 | */ | |
812 | static int get_free_idx(void) | |
813 | { | |
814 | struct thread_struct *t = ¤t->thread; | |
815 | int idx; | |
816 | ||
817 | for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++) | |
818 | if (desc_empty(t->tls_array + idx)) | |
819 | return idx + GDT_ENTRY_TLS_MIN; | |
820 | return -ESRCH; | |
821 | } | |
822 | ||
823 | /* | |
824 | * Set a given TLS descriptor: | |
825 | */ | |
826 | asmlinkage int sys_set_thread_area(struct user_desc __user *u_info) | |
827 | { | |
828 | struct thread_struct *t = ¤t->thread; | |
829 | struct user_desc info; | |
830 | struct desc_struct *desc; | |
831 | int cpu, idx; | |
832 | ||
833 | if (copy_from_user(&info, u_info, sizeof(info))) | |
834 | return -EFAULT; | |
835 | idx = info.entry_number; | |
836 | ||
837 | /* | |
838 | * index -1 means the kernel should try to find and | |
839 | * allocate an empty descriptor: | |
840 | */ | |
841 | if (idx == -1) { | |
842 | idx = get_free_idx(); | |
843 | if (idx < 0) | |
844 | return idx; | |
845 | if (put_user(idx, &u_info->entry_number)) | |
846 | return -EFAULT; | |
847 | } | |
848 | ||
849 | if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) | |
850 | return -EINVAL; | |
851 | ||
852 | desc = t->tls_array + idx - GDT_ENTRY_TLS_MIN; | |
853 | ||
854 | /* | |
855 | * We must not get preempted while modifying the TLS. | |
856 | */ | |
857 | cpu = get_cpu(); | |
858 | ||
859 | if (LDT_empty(&info)) { | |
860 | desc->a = 0; | |
861 | desc->b = 0; | |
862 | } else { | |
863 | desc->a = LDT_entry_a(&info); | |
864 | desc->b = LDT_entry_b(&info); | |
865 | } | |
866 | load_TLS(t, cpu); | |
867 | ||
868 | put_cpu(); | |
869 | ||
870 | return 0; | |
871 | } | |
872 | ||
873 | /* | |
874 | * Get the current Thread-Local Storage area: | |
875 | */ | |
876 | ||
877 | #define GET_BASE(desc) ( \ | |
878 | (((desc)->a >> 16) & 0x0000ffff) | \ | |
879 | (((desc)->b << 16) & 0x00ff0000) | \ | |
880 | ( (desc)->b & 0xff000000) ) | |
881 | ||
882 | #define GET_LIMIT(desc) ( \ | |
883 | ((desc)->a & 0x0ffff) | \ | |
884 | ((desc)->b & 0xf0000) ) | |
885 | ||
886 | #define GET_32BIT(desc) (((desc)->b >> 22) & 1) | |
887 | #define GET_CONTENTS(desc) (((desc)->b >> 10) & 3) | |
888 | #define GET_WRITABLE(desc) (((desc)->b >> 9) & 1) | |
889 | #define GET_LIMIT_PAGES(desc) (((desc)->b >> 23) & 1) | |
890 | #define GET_PRESENT(desc) (((desc)->b >> 15) & 1) | |
891 | #define GET_USEABLE(desc) (((desc)->b >> 20) & 1) | |
892 | ||
893 | asmlinkage int sys_get_thread_area(struct user_desc __user *u_info) | |
894 | { | |
895 | struct user_desc info; | |
896 | struct desc_struct *desc; | |
897 | int idx; | |
898 | ||
899 | if (get_user(idx, &u_info->entry_number)) | |
900 | return -EFAULT; | |
901 | if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) | |
902 | return -EINVAL; | |
903 | ||
71ae18ec PBG |
904 | memset(&info, 0, sizeof(info)); |
905 | ||
1da177e4 LT |
906 | desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN; |
907 | ||
908 | info.entry_number = idx; | |
909 | info.base_addr = GET_BASE(desc); | |
910 | info.limit = GET_LIMIT(desc); | |
911 | info.seg_32bit = GET_32BIT(desc); | |
912 | info.contents = GET_CONTENTS(desc); | |
913 | info.read_exec_only = !GET_WRITABLE(desc); | |
914 | info.limit_in_pages = GET_LIMIT_PAGES(desc); | |
915 | info.seg_not_present = !GET_PRESENT(desc); | |
916 | info.useable = GET_USEABLE(desc); | |
917 | ||
918 | if (copy_to_user(u_info, &info, sizeof(info))) | |
919 | return -EFAULT; | |
920 | return 0; | |
921 | } | |
922 | ||
923 | unsigned long arch_align_stack(unsigned long sp) | |
924 | { | |
c16b63e0 | 925 | if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) |
1da177e4 LT |
926 | sp -= get_random_int() % 8192; |
927 | return sp & ~0xf; | |
928 | } |