Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 LT |
2 | * Copyright (C) 1995 Linus Torvalds |
3 | * | |
4 | * Pentium III FXSR, SSE support | |
5 | * Gareth Hughes <gareth@valinux.com>, May 2000 | |
6 | */ | |
7 | ||
8 | /* | |
9 | * This file handles the architecture-dependent parts of process handling.. | |
10 | */ | |
11 | ||
12 | #include <stdarg.h> | |
13 | ||
f3705136 | 14 | #include <linux/cpu.h> |
1da177e4 LT |
15 | #include <linux/errno.h> |
16 | #include <linux/sched.h> | |
17 | #include <linux/fs.h> | |
18 | #include <linux/kernel.h> | |
19 | #include <linux/mm.h> | |
20 | #include <linux/elfcore.h> | |
21 | #include <linux/smp.h> | |
1da177e4 LT |
22 | #include <linux/stddef.h> |
23 | #include <linux/slab.h> | |
24 | #include <linux/vmalloc.h> | |
25 | #include <linux/user.h> | |
1da177e4 | 26 | #include <linux/interrupt.h> |
1da177e4 LT |
27 | #include <linux/utsname.h> |
28 | #include <linux/delay.h> | |
29 | #include <linux/reboot.h> | |
30 | #include <linux/init.h> | |
31 | #include <linux/mc146818rtc.h> | |
32 | #include <linux/module.h> | |
33 | #include <linux/kallsyms.h> | |
34 | #include <linux/ptrace.h> | |
35 | #include <linux/random.h> | |
c16b63e0 | 36 | #include <linux/personality.h> |
74167347 | 37 | #include <linux/tick.h> |
7c3576d2 | 38 | #include <linux/percpu.h> |
529e25f6 | 39 | #include <linux/prctl.h> |
1da177e4 LT |
40 | |
41 | #include <asm/uaccess.h> | |
42 | #include <asm/pgtable.h> | |
43 | #include <asm/system.h> | |
44 | #include <asm/io.h> | |
45 | #include <asm/ldt.h> | |
46 | #include <asm/processor.h> | |
47 | #include <asm/i387.h> | |
1da177e4 LT |
48 | #include <asm/desc.h> |
49 | #ifdef CONFIG_MATH_EMULATION | |
50 | #include <asm/math_emu.h> | |
51 | #endif | |
52 | ||
1da177e4 LT |
53 | #include <linux/err.h> |
54 | ||
f3705136 ZM |
55 | #include <asm/tlbflush.h> |
56 | #include <asm/cpu.h> | |
718fc13b | 57 | #include <asm/kdebug.h> |
f3705136 | 58 | |
1da177e4 LT |
59 | asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); |
60 | ||
61 | static int hlt_counter; | |
62 | ||
63 | unsigned long boot_option_idle_override = 0; | |
64 | EXPORT_SYMBOL(boot_option_idle_override); | |
65 | ||
7c3576d2 JF |
66 | DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task; |
67 | EXPORT_PER_CPU_SYMBOL(current_task); | |
68 | ||
69 | DEFINE_PER_CPU(int, cpu_number); | |
70 | EXPORT_PER_CPU_SYMBOL(cpu_number); | |
71 | ||
1da177e4 LT |
72 | /* |
73 | * Return saved PC of a blocked thread. | |
74 | */ | |
75 | unsigned long thread_saved_pc(struct task_struct *tsk) | |
76 | { | |
faca6227 | 77 | return ((unsigned long *)tsk->thread.sp)[3]; |
1da177e4 LT |
78 | } |
79 | ||
80 | /* | |
81 | * Powermanagement idle function, if any.. | |
82 | */ | |
83 | void (*pm_idle)(void); | |
129f6946 | 84 | EXPORT_SYMBOL(pm_idle); |
1da177e4 LT |
85 | |
86 | void disable_hlt(void) | |
87 | { | |
88 | hlt_counter++; | |
89 | } | |
90 | ||
91 | EXPORT_SYMBOL(disable_hlt); | |
92 | ||
93 | void enable_hlt(void) | |
94 | { | |
95 | hlt_counter--; | |
96 | } | |
97 | ||
98 | EXPORT_SYMBOL(enable_hlt); | |
99 | ||
100 | /* | |
101 | * We use this if we don't have any better | |
102 | * idle routine.. | |
103 | */ | |
104 | void default_idle(void) | |
105 | { | |
106 | if (!hlt_counter && boot_cpu_data.hlt_works_ok) { | |
495ab9c0 | 107 | current_thread_info()->status &= ~TS_POLLING; |
0888f06a IM |
108 | /* |
109 | * TS_POLLING-cleared state must be visible before we | |
110 | * test NEED_RESCHED: | |
111 | */ | |
112 | smp_mb(); | |
113 | ||
7f424a8b | 114 | if (!need_resched()) |
72690a21 | 115 | safe_halt(); /* enables interrupts racelessly */ |
7f424a8b PZ |
116 | else |
117 | local_irq_enable(); | |
495ab9c0 | 118 | current_thread_info()->status |= TS_POLLING; |
1da177e4 | 119 | } else { |
3b22ec7b | 120 | local_irq_enable(); |
72690a21 AK |
121 | /* loop is done by the caller */ |
122 | cpu_relax(); | |
1da177e4 LT |
123 | } |
124 | } | |
129f6946 AD |
125 | #ifdef CONFIG_APM_MODULE |
126 | EXPORT_SYMBOL(default_idle); | |
127 | #endif | |
1da177e4 | 128 | |
f3705136 ZM |
129 | #ifdef CONFIG_HOTPLUG_CPU |
130 | #include <asm/nmi.h> | |
131 | /* We don't actually take CPU down, just spin without interrupts. */ | |
132 | static inline void play_dead(void) | |
133 | { | |
e1367daf LS |
134 | /* This must be done before dead CPU ack */ |
135 | cpu_exit_clear(); | |
136 | wbinvd(); | |
137 | mb(); | |
f3705136 ZM |
138 | /* Ack it */ |
139 | __get_cpu_var(cpu_state) = CPU_DEAD; | |
140 | ||
e1367daf LS |
141 | /* |
142 | * With physical CPU hotplug, we should halt the cpu | |
143 | */ | |
f3705136 | 144 | local_irq_disable(); |
e1367daf | 145 | while (1) |
f2ab4461 | 146 | halt(); |
f3705136 ZM |
147 | } |
148 | #else | |
149 | static inline void play_dead(void) | |
150 | { | |
151 | BUG(); | |
152 | } | |
153 | #endif /* CONFIG_HOTPLUG_CPU */ | |
154 | ||
1da177e4 LT |
155 | /* |
156 | * The idle thread. There's no useful work to be | |
157 | * done, so just try to conserve power and have a | |
158 | * low exit latency (ie sit in a loop waiting for | |
159 | * somebody to say that they'd like to reschedule) | |
160 | */ | |
f3705136 | 161 | void cpu_idle(void) |
1da177e4 | 162 | { |
5bfb5d69 | 163 | int cpu = smp_processor_id(); |
f3705136 | 164 | |
495ab9c0 | 165 | current_thread_info()->status |= TS_POLLING; |
64c7c8f8 | 166 | |
1da177e4 LT |
167 | /* endless idle loop with no priority at all */ |
168 | while (1) { | |
74167347 | 169 | tick_nohz_stop_sched_tick(); |
1da177e4 LT |
170 | while (!need_resched()) { |
171 | void (*idle)(void); | |
172 | ||
f1d1a842 | 173 | check_pgt_cache(); |
1da177e4 LT |
174 | rmb(); |
175 | idle = pm_idle; | |
176 | ||
0723a69a BL |
177 | if (rcu_pending(cpu)) |
178 | rcu_check_callbacks(cpu, 0); | |
179 | ||
1da177e4 LT |
180 | if (!idle) |
181 | idle = default_idle; | |
182 | ||
f3705136 ZM |
183 | if (cpu_is_offline(cpu)) |
184 | play_dead(); | |
185 | ||
7f424a8b | 186 | local_irq_disable(); |
1da177e4 | 187 | __get_cpu_var(irq_stat).idle_timestamp = jiffies; |
6cd8a4bb SR |
188 | /* Don't trace irqs off for idle */ |
189 | stop_critical_timings(); | |
1da177e4 | 190 | idle(); |
6cd8a4bb | 191 | start_critical_timings(); |
1da177e4 | 192 | } |
74167347 | 193 | tick_nohz_restart_sched_tick(); |
5bfb5d69 | 194 | preempt_enable_no_resched(); |
1da177e4 | 195 | schedule(); |
5bfb5d69 | 196 | preempt_disable(); |
1da177e4 LT |
197 | } |
198 | } | |
199 | ||
9d975ebd | 200 | void __show_registers(struct pt_regs *regs, int all) |
1da177e4 LT |
201 | { |
202 | unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L; | |
bb1995d5 | 203 | unsigned long d0, d1, d2, d3, d6, d7; |
65ea5b03 | 204 | unsigned long sp; |
9d975ebd PE |
205 | unsigned short ss, gs; |
206 | ||
207 | if (user_mode_vm(regs)) { | |
65ea5b03 PA |
208 | sp = regs->sp; |
209 | ss = regs->ss & 0xffff; | |
9d975ebd PE |
210 | savesegment(gs, gs); |
211 | } else { | |
65ea5b03 | 212 | sp = (unsigned long) (®s->sp); |
9d975ebd PE |
213 | savesegment(ss, ss); |
214 | savesegment(gs, gs); | |
215 | } | |
1da177e4 LT |
216 | |
217 | printk("\n"); | |
60812a4a LT |
218 | printk("Pid: %d, comm: %s %s (%s %.*s)\n", |
219 | task_pid_nr(current), current->comm, | |
9d975ebd PE |
220 | print_tainted(), init_utsname()->release, |
221 | (int)strcspn(init_utsname()->version, " "), | |
222 | init_utsname()->version); | |
223 | ||
224 | printk("EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n", | |
92bc2056 | 225 | (u16)regs->cs, regs->ip, regs->flags, |
9d975ebd | 226 | smp_processor_id()); |
65ea5b03 | 227 | print_symbol("EIP is at %s\n", regs->ip); |
1da177e4 | 228 | |
1da177e4 | 229 | printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n", |
65ea5b03 | 230 | regs->ax, regs->bx, regs->cx, regs->dx); |
9d975ebd | 231 | printk("ESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n", |
65ea5b03 | 232 | regs->si, regs->di, regs->bp, sp); |
9d975ebd | 233 | printk(" DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x\n", |
92bc2056 | 234 | (u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, ss); |
9d975ebd PE |
235 | |
236 | if (!all) | |
237 | return; | |
1da177e4 | 238 | |
4bb0d3ec ZA |
239 | cr0 = read_cr0(); |
240 | cr2 = read_cr2(); | |
241 | cr3 = read_cr3(); | |
ff6e8c0d | 242 | cr4 = read_cr4_safe(); |
9d975ebd PE |
243 | printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", |
244 | cr0, cr2, cr3, cr4); | |
bb1995d5 AS |
245 | |
246 | get_debugreg(d0, 0); | |
247 | get_debugreg(d1, 1); | |
248 | get_debugreg(d2, 2); | |
249 | get_debugreg(d3, 3); | |
250 | printk("DR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n", | |
251 | d0, d1, d2, d3); | |
9d975ebd | 252 | |
bb1995d5 AS |
253 | get_debugreg(d6, 6); |
254 | get_debugreg(d7, 7); | |
9d975ebd PE |
255 | printk("DR6: %08lx DR7: %08lx\n", |
256 | d6, d7); | |
257 | } | |
bb1995d5 | 258 | |
9d975ebd PE |
259 | void show_regs(struct pt_regs *regs) |
260 | { | |
261 | __show_registers(regs, 1); | |
5bc27dc2 | 262 | show_trace(NULL, regs, ®s->sp, regs->bp); |
1da177e4 LT |
263 | } |
264 | ||
265 | /* | |
65ea5b03 PA |
266 | * This gets run with %bx containing the |
267 | * function to call, and %dx containing | |
1da177e4 LT |
268 | * the "args". |
269 | */ | |
270 | extern void kernel_thread_helper(void); | |
1da177e4 LT |
271 | |
272 | /* | |
273 | * Create a kernel thread | |
274 | */ | |
275 | int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) | |
276 | { | |
277 | struct pt_regs regs; | |
278 | ||
279 | memset(®s, 0, sizeof(regs)); | |
280 | ||
65ea5b03 PA |
281 | regs.bx = (unsigned long) fn; |
282 | regs.dx = (unsigned long) arg; | |
1da177e4 | 283 | |
65ea5b03 PA |
284 | regs.ds = __USER_DS; |
285 | regs.es = __USER_DS; | |
286 | regs.fs = __KERNEL_PERCPU; | |
287 | regs.orig_ax = -1; | |
288 | regs.ip = (unsigned long) kernel_thread_helper; | |
289 | regs.cs = __KERNEL_CS | get_kernel_rpl(); | |
290 | regs.flags = X86_EFLAGS_IF | X86_EFLAGS_SF | X86_EFLAGS_PF | 0x2; | |
1da177e4 LT |
291 | |
292 | /* Ok, create the new process.. */ | |
8cf2c519 | 293 | return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, ®s, 0, NULL, NULL); |
1da177e4 | 294 | } |
129f6946 | 295 | EXPORT_SYMBOL(kernel_thread); |
1da177e4 LT |
296 | |
297 | /* | |
298 | * Free current thread data structures etc.. | |
299 | */ | |
300 | void exit_thread(void) | |
301 | { | |
1da177e4 | 302 | /* The process may have allocated an io port bitmap... nuke it. */ |
b3cf2576 SE |
303 | if (unlikely(test_thread_flag(TIF_IO_BITMAP))) { |
304 | struct task_struct *tsk = current; | |
305 | struct thread_struct *t = &tsk->thread; | |
1da177e4 LT |
306 | int cpu = get_cpu(); |
307 | struct tss_struct *tss = &per_cpu(init_tss, cpu); | |
308 | ||
309 | kfree(t->io_bitmap_ptr); | |
310 | t->io_bitmap_ptr = NULL; | |
b3cf2576 | 311 | clear_thread_flag(TIF_IO_BITMAP); |
1da177e4 LT |
312 | /* |
313 | * Careful, clear this in the TSS too: | |
314 | */ | |
315 | memset(tss->io_bitmap, 0xff, tss->io_bitmap_max); | |
316 | t->io_bitmap_max = 0; | |
317 | tss->io_bitmap_owner = NULL; | |
318 | tss->io_bitmap_max = 0; | |
a75c54f9 | 319 | tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET; |
1da177e4 LT |
320 | put_cpu(); |
321 | } | |
322 | } | |
323 | ||
324 | void flush_thread(void) | |
325 | { | |
326 | struct task_struct *tsk = current; | |
327 | ||
0f534093 RM |
328 | tsk->thread.debugreg0 = 0; |
329 | tsk->thread.debugreg1 = 0; | |
330 | tsk->thread.debugreg2 = 0; | |
331 | tsk->thread.debugreg3 = 0; | |
332 | tsk->thread.debugreg6 = 0; | |
333 | tsk->thread.debugreg7 = 0; | |
1da177e4 | 334 | memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); |
b3cf2576 | 335 | clear_tsk_thread_flag(tsk, TIF_DEBUG); |
1da177e4 LT |
336 | /* |
337 | * Forget coprocessor state.. | |
338 | */ | |
339 | clear_fpu(tsk); | |
340 | clear_used_math(); | |
341 | } | |
342 | ||
343 | void release_thread(struct task_struct *dead_task) | |
344 | { | |
2684927c | 345 | BUG_ON(dead_task->mm); |
1da177e4 LT |
346 | release_vm86_irqs(dead_task); |
347 | } | |
348 | ||
349 | /* | |
350 | * This gets called before we allocate a new thread and copy | |
351 | * the current task into it. | |
352 | */ | |
353 | void prepare_to_copy(struct task_struct *tsk) | |
354 | { | |
355 | unlazy_fpu(tsk); | |
356 | } | |
357 | ||
65ea5b03 | 358 | int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, |
1da177e4 LT |
359 | unsigned long unused, |
360 | struct task_struct * p, struct pt_regs * regs) | |
361 | { | |
362 | struct pt_regs * childregs; | |
363 | struct task_struct *tsk; | |
364 | int err; | |
365 | ||
07b047fc | 366 | childregs = task_pt_regs(p); |
f48d9663 | 367 | *childregs = *regs; |
65ea5b03 PA |
368 | childregs->ax = 0; |
369 | childregs->sp = sp; | |
f48d9663 | 370 | |
faca6227 PA |
371 | p->thread.sp = (unsigned long) childregs; |
372 | p->thread.sp0 = (unsigned long) (childregs+1); | |
1da177e4 | 373 | |
faca6227 | 374 | p->thread.ip = (unsigned long) ret_from_fork; |
1da177e4 | 375 | |
6612538c | 376 | savesegment(gs, p->thread.gs); |
1da177e4 LT |
377 | |
378 | tsk = current; | |
b3cf2576 | 379 | if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) { |
52978be6 AD |
380 | p->thread.io_bitmap_ptr = kmemdup(tsk->thread.io_bitmap_ptr, |
381 | IO_BITMAP_BYTES, GFP_KERNEL); | |
1da177e4 LT |
382 | if (!p->thread.io_bitmap_ptr) { |
383 | p->thread.io_bitmap_max = 0; | |
384 | return -ENOMEM; | |
385 | } | |
b3cf2576 | 386 | set_tsk_thread_flag(p, TIF_IO_BITMAP); |
1da177e4 LT |
387 | } |
388 | ||
efd1ca52 RM |
389 | err = 0; |
390 | ||
1da177e4 LT |
391 | /* |
392 | * Set a new TLS for the child thread? | |
393 | */ | |
efd1ca52 RM |
394 | if (clone_flags & CLONE_SETTLS) |
395 | err = do_set_thread_area(p, -1, | |
65ea5b03 | 396 | (struct user_desc __user *)childregs->si, 0); |
1da177e4 | 397 | |
1da177e4 LT |
398 | if (err && p->thread.io_bitmap_ptr) { |
399 | kfree(p->thread.io_bitmap_ptr); | |
400 | p->thread.io_bitmap_max = 0; | |
401 | } | |
402 | return err; | |
403 | } | |
404 | ||
513ad84b IM |
405 | void |
406 | start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) | |
407 | { | |
408 | __asm__("movl %0, %%gs" :: "r"(0)); | |
409 | regs->fs = 0; | |
410 | set_fs(USER_DS); | |
411 | regs->ds = __USER_DS; | |
412 | regs->es = __USER_DS; | |
413 | regs->ss = __USER_DS; | |
414 | regs->cs = __USER_CS; | |
415 | regs->ip = new_ip; | |
416 | regs->sp = new_sp; | |
aa283f49 SS |
417 | /* |
418 | * Free the old FP and other extended state | |
419 | */ | |
420 | free_thread_xstate(current); | |
513ad84b IM |
421 | } |
422 | EXPORT_SYMBOL_GPL(start_thread); | |
423 | ||
bdb4f156 | 424 | static void hard_disable_TSC(void) |
cf99abac AA |
425 | { |
426 | write_cr4(read_cr4() | X86_CR4_TSD); | |
427 | } | |
529e25f6 | 428 | |
cf99abac AA |
429 | void disable_TSC(void) |
430 | { | |
431 | preempt_disable(); | |
432 | if (!test_and_set_thread_flag(TIF_NOTSC)) | |
433 | /* | |
434 | * Must flip the CPU state synchronously with | |
435 | * TIF_NOTSC in the current running context. | |
436 | */ | |
437 | hard_disable_TSC(); | |
438 | preempt_enable(); | |
439 | } | |
529e25f6 | 440 | |
bdb4f156 | 441 | static void hard_enable_TSC(void) |
cf99abac AA |
442 | { |
443 | write_cr4(read_cr4() & ~X86_CR4_TSD); | |
444 | } | |
529e25f6 | 445 | |
a4928cff | 446 | static void enable_TSC(void) |
529e25f6 EB |
447 | { |
448 | preempt_disable(); | |
449 | if (test_and_clear_thread_flag(TIF_NOTSC)) | |
450 | /* | |
451 | * Must flip the CPU state synchronously with | |
452 | * TIF_NOTSC in the current running context. | |
453 | */ | |
454 | hard_enable_TSC(); | |
455 | preempt_enable(); | |
456 | } | |
457 | ||
458 | int get_tsc_mode(unsigned long adr) | |
459 | { | |
460 | unsigned int val; | |
461 | ||
462 | if (test_thread_flag(TIF_NOTSC)) | |
463 | val = PR_TSC_SIGSEGV; | |
464 | else | |
465 | val = PR_TSC_ENABLE; | |
466 | ||
467 | return put_user(val, (unsigned int __user *)adr); | |
468 | } | |
469 | ||
470 | int set_tsc_mode(unsigned int val) | |
471 | { | |
472 | if (val == PR_TSC_SIGSEGV) | |
473 | disable_TSC(); | |
474 | else if (val == PR_TSC_ENABLE) | |
475 | enable_TSC(); | |
476 | else | |
477 | return -EINVAL; | |
478 | ||
479 | return 0; | |
480 | } | |
cf99abac AA |
481 | |
482 | static noinline void | |
483 | __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p, | |
484 | struct tss_struct *tss) | |
1da177e4 | 485 | { |
7e991604 | 486 | struct thread_struct *prev, *next; |
eee3af4a | 487 | unsigned long debugctl; |
b3cf2576 | 488 | |
7e991604 | 489 | prev = &prev_p->thread; |
b3cf2576 SE |
490 | next = &next_p->thread; |
491 | ||
eee3af4a MM |
492 | debugctl = prev->debugctlmsr; |
493 | if (next->ds_area_msr != prev->ds_area_msr) { | |
494 | /* we clear debugctl to make sure DS | |
495 | * is not in use when we change it */ | |
496 | debugctl = 0; | |
5b0e5084 | 497 | update_debugctlmsr(0); |
eee3af4a MM |
498 | wrmsr(MSR_IA32_DS_AREA, next->ds_area_msr, 0); |
499 | } | |
500 | ||
501 | if (next->debugctlmsr != debugctl) | |
5b0e5084 | 502 | update_debugctlmsr(next->debugctlmsr); |
7e991604 | 503 | |
b3cf2576 | 504 | if (test_tsk_thread_flag(next_p, TIF_DEBUG)) { |
0f534093 RM |
505 | set_debugreg(next->debugreg0, 0); |
506 | set_debugreg(next->debugreg1, 1); | |
507 | set_debugreg(next->debugreg2, 2); | |
508 | set_debugreg(next->debugreg3, 3); | |
b3cf2576 | 509 | /* no 4 and 5 */ |
0f534093 RM |
510 | set_debugreg(next->debugreg6, 6); |
511 | set_debugreg(next->debugreg7, 7); | |
b3cf2576 SE |
512 | } |
513 | ||
cf99abac AA |
514 | if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^ |
515 | test_tsk_thread_flag(next_p, TIF_NOTSC)) { | |
516 | /* prev and next are different */ | |
517 | if (test_tsk_thread_flag(next_p, TIF_NOTSC)) | |
518 | hard_disable_TSC(); | |
519 | else | |
520 | hard_enable_TSC(); | |
521 | } | |
cf99abac | 522 | |
b4ef95de | 523 | #ifdef X86_BTS |
eee3af4a MM |
524 | if (test_tsk_thread_flag(prev_p, TIF_BTS_TRACE_TS)) |
525 | ptrace_bts_take_timestamp(prev_p, BTS_TASK_DEPARTS); | |
526 | ||
527 | if (test_tsk_thread_flag(next_p, TIF_BTS_TRACE_TS)) | |
528 | ptrace_bts_take_timestamp(next_p, BTS_TASK_ARRIVES); | |
b4ef95de | 529 | #endif |
eee3af4a MM |
530 | |
531 | ||
b3cf2576 | 532 | if (!test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) { |
1da177e4 LT |
533 | /* |
534 | * Disable the bitmap via an invalid offset. We still cache | |
535 | * the previous bitmap owner and the IO bitmap contents: | |
536 | */ | |
a75c54f9 | 537 | tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET; |
1da177e4 LT |
538 | return; |
539 | } | |
b3cf2576 | 540 | |
1da177e4 LT |
541 | if (likely(next == tss->io_bitmap_owner)) { |
542 | /* | |
543 | * Previous owner of the bitmap (hence the bitmap content) | |
544 | * matches the next task, we dont have to do anything but | |
545 | * to set a valid offset in the TSS: | |
546 | */ | |
a75c54f9 | 547 | tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET; |
1da177e4 LT |
548 | return; |
549 | } | |
550 | /* | |
551 | * Lazy TSS's I/O bitmap copy. We set an invalid offset here | |
552 | * and we let the task to get a GPF in case an I/O instruction | |
553 | * is performed. The handler of the GPF will verify that the | |
554 | * faulting task has a valid I/O bitmap and, it true, does the | |
555 | * real copy and restart the instruction. This will save us | |
556 | * redundant copies when the currently switched task does not | |
557 | * perform any I/O during its timeslice. | |
558 | */ | |
a75c54f9 | 559 | tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY; |
1da177e4 | 560 | } |
1da177e4 LT |
561 | |
562 | /* | |
563 | * switch_to(x,yn) should switch tasks from x to y. | |
564 | * | |
565 | * We fsave/fwait so that an exception goes off at the right time | |
566 | * (as a call from the fsave or fwait in effect) rather than to | |
567 | * the wrong process. Lazy FP saving no longer makes any sense | |
568 | * with modern CPU's, and this simplifies a lot of things (SMP | |
569 | * and UP become the same). | |
570 | * | |
571 | * NOTE! We used to use the x86 hardware context switching. The | |
572 | * reason for not using it any more becomes apparent when you | |
573 | * try to recover gracefully from saved state that is no longer | |
574 | * valid (stale segment register values in particular). With the | |
575 | * hardware task-switch, there is no way to fix up bad state in | |
576 | * a reasonable manner. | |
577 | * | |
578 | * The fact that Intel documents the hardware task-switching to | |
579 | * be slow is a fairly red herring - this code is not noticeably | |
580 | * faster. However, there _is_ some room for improvement here, | |
581 | * so the performance issues may eventually be a valid point. | |
582 | * More important, however, is the fact that this allows us much | |
583 | * more flexibility. | |
584 | * | |
65ea5b03 | 585 | * The return value (in %ax) will be the "prev" task after |
1da177e4 LT |
586 | * the task-switch, and shows up in ret_from_fork in entry.S, |
587 | * for example. | |
588 | */ | |
75604d7f | 589 | struct task_struct * __switch_to(struct task_struct *prev_p, struct task_struct *next_p) |
1da177e4 LT |
590 | { |
591 | struct thread_struct *prev = &prev_p->thread, | |
592 | *next = &next_p->thread; | |
593 | int cpu = smp_processor_id(); | |
594 | struct tss_struct *tss = &per_cpu(init_tss, cpu); | |
595 | ||
596 | /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ | |
597 | ||
598 | __unlazy_fpu(prev_p); | |
599 | ||
acc20761 CE |
600 | |
601 | /* we're going to use this soon, after a few expensive things */ | |
602 | if (next_p->fpu_counter > 5) | |
61c4628b | 603 | prefetch(next->xstate); |
acc20761 | 604 | |
1da177e4 | 605 | /* |
e7a2ff59 | 606 | * Reload esp0. |
1da177e4 | 607 | */ |
faca6227 | 608 | load_sp0(tss, next); |
1da177e4 LT |
609 | |
610 | /* | |
464d1a78 | 611 | * Save away %gs. No need to save %fs, as it was saved on the |
f95d47ca JF |
612 | * stack on entry. No need to save %es and %ds, as those are |
613 | * always kernel segments while inside the kernel. Doing this | |
614 | * before setting the new TLS descriptors avoids the situation | |
615 | * where we temporarily have non-reloadable segments in %fs | |
616 | * and %gs. This could be an issue if the NMI handler ever | |
617 | * used %fs or %gs (it does not today), or if the kernel is | |
618 | * running inside of a hypervisor layer. | |
1da177e4 | 619 | */ |
464d1a78 | 620 | savesegment(gs, prev->gs); |
1da177e4 LT |
621 | |
622 | /* | |
e7a2ff59 | 623 | * Load the per-thread Thread-Local Storage descriptor. |
1da177e4 | 624 | */ |
e7a2ff59 | 625 | load_TLS(next, cpu); |
1da177e4 | 626 | |
8b151144 ZA |
627 | /* |
628 | * Restore IOPL if needed. In normal use, the flags restore | |
629 | * in the switch assembly will handle this. But if the kernel | |
630 | * is running virtualized at a non-zero CPL, the popf will | |
631 | * not restore flags, so it must be done in a separate step. | |
632 | */ | |
633 | if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl)) | |
634 | set_iopl_mask(next->iopl); | |
635 | ||
1da177e4 | 636 | /* |
b3cf2576 | 637 | * Now maybe handle debug registers and/or IO bitmaps |
1da177e4 | 638 | */ |
cf99abac AA |
639 | if (unlikely(task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV || |
640 | task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT)) | |
641 | __switch_to_xtra(prev_p, next_p, tss); | |
ffaa8bd6 | 642 | |
9226d125 ZA |
643 | /* |
644 | * Leave lazy mode, flushing any hypercalls made here. | |
645 | * This must be done before restoring TLS segments so | |
646 | * the GDT and LDT are properly updated, and must be | |
647 | * done before math_state_restore, so the TS bit is up | |
648 | * to date. | |
649 | */ | |
650 | arch_leave_lazy_cpu_mode(); | |
651 | ||
acc20761 CE |
652 | /* If the task has used fpu the last 5 timeslices, just do a full |
653 | * restore of the math state immediately to avoid the trap; the | |
654 | * chances of needing FPU soon are obviously high now | |
655 | */ | |
656 | if (next_p->fpu_counter > 5) | |
657 | math_state_restore(); | |
658 | ||
9226d125 ZA |
659 | /* |
660 | * Restore %gs if needed (which is common) | |
661 | */ | |
662 | if (prev->gs | next->gs) | |
663 | loadsegment(gs, next->gs); | |
664 | ||
7c3576d2 | 665 | x86_write_percpu(current_task, next_p); |
9226d125 | 666 | |
1da177e4 LT |
667 | return prev_p; |
668 | } | |
669 | ||
670 | asmlinkage int sys_fork(struct pt_regs regs) | |
671 | { | |
65ea5b03 | 672 | return do_fork(SIGCHLD, regs.sp, ®s, 0, NULL, NULL); |
1da177e4 LT |
673 | } |
674 | ||
675 | asmlinkage int sys_clone(struct pt_regs regs) | |
676 | { | |
677 | unsigned long clone_flags; | |
678 | unsigned long newsp; | |
679 | int __user *parent_tidptr, *child_tidptr; | |
680 | ||
65ea5b03 PA |
681 | clone_flags = regs.bx; |
682 | newsp = regs.cx; | |
683 | parent_tidptr = (int __user *)regs.dx; | |
684 | child_tidptr = (int __user *)regs.di; | |
1da177e4 | 685 | if (!newsp) |
65ea5b03 | 686 | newsp = regs.sp; |
1da177e4 LT |
687 | return do_fork(clone_flags, newsp, ®s, 0, parent_tidptr, child_tidptr); |
688 | } | |
689 | ||
690 | /* | |
691 | * This is trivial, and on the face of it looks like it | |
692 | * could equally well be done in user mode. | |
693 | * | |
694 | * Not so, for quite unobvious reasons - register pressure. | |
695 | * In user mode vfork() cannot have a stack frame, and if | |
696 | * done by calling the "clone()" system call directly, you | |
697 | * do not have enough call-clobbered registers to hold all | |
698 | * the information you need. | |
699 | */ | |
700 | asmlinkage int sys_vfork(struct pt_regs regs) | |
701 | { | |
65ea5b03 | 702 | return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.sp, ®s, 0, NULL, NULL); |
1da177e4 LT |
703 | } |
704 | ||
705 | /* | |
706 | * sys_execve() executes a new program. | |
707 | */ | |
708 | asmlinkage int sys_execve(struct pt_regs regs) | |
709 | { | |
710 | int error; | |
711 | char * filename; | |
712 | ||
65ea5b03 | 713 | filename = getname((char __user *) regs.bx); |
1da177e4 LT |
714 | error = PTR_ERR(filename); |
715 | if (IS_ERR(filename)) | |
716 | goto out; | |
717 | error = do_execve(filename, | |
65ea5b03 PA |
718 | (char __user * __user *) regs.cx, |
719 | (char __user * __user *) regs.dx, | |
1da177e4 LT |
720 | ®s); |
721 | if (error == 0) { | |
1da177e4 LT |
722 | /* Make sure we don't return using sysenter.. */ |
723 | set_thread_flag(TIF_IRET); | |
724 | } | |
725 | putname(filename); | |
726 | out: | |
727 | return error; | |
728 | } | |
729 | ||
730 | #define top_esp (THREAD_SIZE - sizeof(unsigned long)) | |
731 | #define top_ebp (THREAD_SIZE - 2*sizeof(unsigned long)) | |
732 | ||
733 | unsigned long get_wchan(struct task_struct *p) | |
734 | { | |
65ea5b03 | 735 | unsigned long bp, sp, ip; |
1da177e4 LT |
736 | unsigned long stack_page; |
737 | int count = 0; | |
738 | if (!p || p == current || p->state == TASK_RUNNING) | |
739 | return 0; | |
65e0fdff | 740 | stack_page = (unsigned long)task_stack_page(p); |
faca6227 | 741 | sp = p->thread.sp; |
65ea5b03 | 742 | if (!stack_page || sp < stack_page || sp > top_esp+stack_page) |
1da177e4 | 743 | return 0; |
65ea5b03 PA |
744 | /* include/asm-i386/system.h:switch_to() pushes bp last. */ |
745 | bp = *(unsigned long *) sp; | |
1da177e4 | 746 | do { |
65ea5b03 | 747 | if (bp < stack_page || bp > top_ebp+stack_page) |
1da177e4 | 748 | return 0; |
65ea5b03 PA |
749 | ip = *(unsigned long *) (bp+4); |
750 | if (!in_sched_functions(ip)) | |
751 | return ip; | |
752 | bp = *(unsigned long *) bp; | |
1da177e4 LT |
753 | } while (count++ < 16); |
754 | return 0; | |
755 | } | |
756 | ||
1da177e4 LT |
757 | unsigned long arch_align_stack(unsigned long sp) |
758 | { | |
c16b63e0 | 759 | if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) |
1da177e4 LT |
760 | sp -= get_random_int() % 8192; |
761 | return sp & ~0xf; | |
762 | } | |
c1d171a0 JK |
763 | |
764 | unsigned long arch_randomize_brk(struct mm_struct *mm) | |
765 | { | |
766 | unsigned long range_end = mm->brk + 0x02000000; | |
767 | return randomize_range(mm->brk, range_end, 0) ? : mm->brk; | |
768 | } |