Commit | Line | Data |
---|---|---|
995473ae | 1 | /* |
2eb5f31b AI |
2 | * Copyright (C) 2015 Anton Ivanov (aivanov@{brocade.com,kot-begemot.co.uk}) |
3 | * Copyright (C) 2015 Thomas Meyer (thomas@m3y3r.de) | |
ba180fd4 | 4 | * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) |
1da177e4 LT |
5 | * Copyright 2003 PathScale, Inc. |
6 | * Licensed under the GPL | |
7 | */ | |
8 | ||
c5d4bb17 JD |
9 | #include <linux/stddef.h> |
10 | #include <linux/err.h> | |
11 | #include <linux/hardirq.h> | |
c5d4bb17 | 12 | #include <linux/mm.h> |
6613c5e8 | 13 | #include <linux/module.h> |
c5d4bb17 JD |
14 | #include <linux/personality.h> |
15 | #include <linux/proc_fs.h> | |
16 | #include <linux/ptrace.h> | |
17 | #include <linux/random.h> | |
5a0e3ad6 | 18 | #include <linux/slab.h> |
c5d4bb17 | 19 | #include <linux/sched.h> |
6613c5e8 | 20 | #include <linux/seq_file.h> |
c5d4bb17 JD |
21 | #include <linux/tick.h> |
22 | #include <linux/threads.h> | |
d50349b0 | 23 | #include <linux/tracehook.h> |
c5d4bb17 JD |
24 | #include <asm/current.h> |
25 | #include <asm/pgtable.h> | |
445c5786 | 26 | #include <asm/mmu_context.h> |
7c0f6ba6 | 27 | #include <linux/uaccess.h> |
37185b33 AV |
28 | #include <as-layout.h> |
29 | #include <kern_util.h> | |
30 | #include <os.h> | |
31 | #include <skas.h> | |
2eb5f31b | 32 | #include <timer-internal.h> |
1da177e4 | 33 | |
ba180fd4 JD |
34 | /* |
35 | * This is a per-cpu array. A processor only modifies its entry and it only | |
1da177e4 LT |
36 | * cares about its entry, so it's OK if another processor is modifying its |
37 | * entry. | |
38 | */ | |
39 | struct cpu_task cpu_tasks[NR_CPUS] = { [0 ... NR_CPUS - 1] = { -1, NULL } }; | |
40 | ||
2dc5802a | 41 | static inline int external_pid(void) |
1da177e4 | 42 | { |
77bf4400 | 43 | /* FIXME: Need to look up userspace_pid by cpu */ |
ba180fd4 | 44 | return userspace_pid[0]; |
1da177e4 LT |
45 | } |
46 | ||
47 | int pid_to_processor_id(int pid) | |
48 | { | |
49 | int i; | |
50 | ||
c5d4bb17 | 51 | for (i = 0; i < ncpus; i++) { |
ba180fd4 | 52 | if (cpu_tasks[i].pid == pid) |
6e21aec3 | 53 | return i; |
1da177e4 | 54 | } |
6e21aec3 | 55 | return -1; |
1da177e4 LT |
56 | } |
57 | ||
58 | void free_stack(unsigned long stack, int order) | |
59 | { | |
60 | free_pages(stack, order); | |
61 | } | |
62 | ||
63 | unsigned long alloc_stack(int order, int atomic) | |
64 | { | |
65 | unsigned long page; | |
53f9fc93 | 66 | gfp_t flags = GFP_KERNEL; |
1da177e4 | 67 | |
46db4a42 PBG |
68 | if (atomic) |
69 | flags = GFP_ATOMIC; | |
1da177e4 | 70 | page = __get_free_pages(flags, order); |
5c8aacea | 71 | |
6e21aec3 | 72 | return page; |
1da177e4 LT |
73 | } |
74 | ||
6e21aec3 | 75 | static inline void set_current(struct task_struct *task) |
1da177e4 | 76 | { |
ca9bc0bb | 77 | cpu_tasks[task_thread_info(task)->cpu] = ((struct cpu_task) |
2dc5802a | 78 | { external_pid(), task }); |
1da177e4 LT |
79 | } |
80 | ||
291248fd | 81 | extern void arch_switch_to(struct task_struct *to); |
77bf4400 | 82 | |
76b278ed | 83 | void *__switch_to(struct task_struct *from, struct task_struct *to) |
1da177e4 | 84 | { |
995473ae JD |
85 | to->thread.prev_sched = from; |
86 | set_current(to); | |
f6e34c6a | 87 | |
a1850e9c RW |
88 | switch_threads(&from->thread.switch_buf, &to->thread.switch_buf); |
89 | arch_switch_to(current); | |
f6e34c6a | 90 | |
6e21aec3 | 91 | return current->thread.prev_sched; |
1da177e4 LT |
92 | } |
93 | ||
94 | void interrupt_end(void) | |
95 | { | |
ccaee5f8 IM |
96 | struct pt_regs *regs = ¤t->thread.regs; |
97 | ||
ba180fd4 | 98 | if (need_resched()) |
6e21aec3 | 99 | schedule(); |
d50349b0 | 100 | if (test_thread_flag(TIF_SIGPENDING)) |
ccaee5f8 | 101 | do_signal(regs); |
a42c6ded | 102 | if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME)) |
ccaee5f8 | 103 | tracehook_notify_resume(regs); |
1da177e4 LT |
104 | } |
105 | ||
c2220b2a | 106 | int get_current_pid(void) |
1da177e4 | 107 | { |
c2220b2a | 108 | return task_pid_nr(current); |
1da177e4 LT |
109 | } |
110 | ||
ba180fd4 JD |
111 | /* |
112 | * This is called magically, by its address being stuffed in a jmp_buf | |
77bf4400 JD |
113 | * and being longjmp-d to. |
114 | */ | |
115 | void new_thread_handler(void) | |
116 | { | |
117 | int (*fn)(void *), n; | |
118 | void *arg; | |
119 | ||
ba180fd4 | 120 | if (current->thread.prev_sched != NULL) |
77bf4400 JD |
121 | schedule_tail(current->thread.prev_sched); |
122 | current->thread.prev_sched = NULL; | |
123 | ||
124 | fn = current->thread.request.u.thread.proc; | |
125 | arg = current->thread.request.u.thread.arg; | |
126 | ||
ba180fd4 | 127 | /* |
22e2430d | 128 | * callback returns only if the kernel thread execs a process |
77bf4400 | 129 | */ |
22e2430d AV |
130 | n = fn(arg); |
131 | userspace(¤t->thread.regs.regs); | |
77bf4400 JD |
132 | } |
133 | ||
134 | /* Called magically, see new_thread_handler above */ | |
135 | void fork_handler(void) | |
136 | { | |
137 | force_flush_all(); | |
77bf4400 JD |
138 | |
139 | schedule_tail(current->thread.prev_sched); | |
140 | ||
ba180fd4 JD |
141 | /* |
142 | * XXX: if interrupt_end() calls schedule, this call to | |
77bf4400 | 143 | * arch_switch_to isn't needed. We could want to apply this to |
ba180fd4 JD |
144 | * improve performance. -bb |
145 | */ | |
291248fd | 146 | arch_switch_to(current); |
77bf4400 JD |
147 | |
148 | current->thread.prev_sched = NULL; | |
149 | ||
77bf4400 JD |
150 | userspace(¤t->thread.regs.regs); |
151 | } | |
152 | ||
6f2c55b8 | 153 | int copy_thread(unsigned long clone_flags, unsigned long sp, |
afa86fc4 | 154 | unsigned long arg, struct task_struct * p) |
1da177e4 | 155 | { |
77bf4400 | 156 | void (*handler)(void); |
d2ce4e92 | 157 | int kthread = current->flags & PF_KTHREAD; |
77bf4400 | 158 | int ret = 0; |
aa6758d4 | 159 | |
1da177e4 | 160 | p->thread = (struct thread_struct) INIT_THREAD; |
aa6758d4 | 161 | |
d2ce4e92 | 162 | if (!kthread) { |
2b067fc9 | 163 | memcpy(&p->thread.regs.regs, current_pt_regs(), |
77bf4400 | 164 | sizeof(p->thread.regs.regs)); |
a3170d2e | 165 | PT_REGS_SET_SYSCALL_RETURN(&p->thread.regs, 0); |
ba180fd4 | 166 | if (sp != 0) |
18baddda | 167 | REGS_SP(p->thread.regs.regs.gp) = sp; |
aa6758d4 | 168 | |
77bf4400 | 169 | handler = fork_handler; |
aa6758d4 | 170 | |
77bf4400 | 171 | arch_copy_thread(¤t->thread.arch, &p->thread.arch); |
d2ce4e92 | 172 | } else { |
fbfe9c84 | 173 | get_safe_registers(p->thread.regs.regs.gp, p->thread.regs.regs.fp); |
1f02ab4a AV |
174 | p->thread.request.u.thread.proc = (int (*)(void *))sp; |
175 | p->thread.request.u.thread.arg = (void *)arg; | |
77bf4400 JD |
176 | handler = new_thread_handler; |
177 | } | |
178 | ||
179 | new_thread(task_stack_page(p), &p->thread.switch_buf, handler); | |
180 | ||
d2ce4e92 | 181 | if (!kthread) { |
77bf4400 JD |
182 | clear_flushed_tls(p); |
183 | ||
184 | /* | |
185 | * Set a new TLS for the child thread? | |
186 | */ | |
187 | if (clone_flags & CLONE_SETTLS) | |
188 | ret = arch_copy_tls(p); | |
189 | } | |
aa6758d4 | 190 | |
aa6758d4 | 191 | return ret; |
1da177e4 LT |
192 | } |
193 | ||
194 | void initial_thread_cb(void (*proc)(void *), void *arg) | |
195 | { | |
196 | int save_kmalloc_ok = kmalloc_ok; | |
197 | ||
198 | kmalloc_ok = 0; | |
6aa802ce | 199 | initial_thread_cb_skas(proc, arg); |
1da177e4 LT |
200 | kmalloc_ok = save_kmalloc_ok; |
201 | } | |
995473ae | 202 | |
8198c169 | 203 | void arch_cpu_idle(void) |
1da177e4 | 204 | { |
a5a678c8 | 205 | cpu_tasks[current_thread_info()->cpu].pid = os_getpid(); |
2eb5f31b | 206 | os_idle_sleep(UM_NSEC_PER_SEC); |
8198c169 | 207 | local_irq_enable(); |
1da177e4 LT |
208 | } |
209 | ||
b6316293 PBG |
210 | int __cant_sleep(void) { |
211 | return in_atomic() || irqs_disabled() || in_interrupt(); | |
212 | /* Is in_interrupt() really needed? */ | |
1da177e4 LT |
213 | } |
214 | ||
1da177e4 LT |
215 | int user_context(unsigned long sp) |
216 | { | |
217 | unsigned long stack; | |
218 | ||
219 | stack = sp & (PAGE_MASK << CONFIG_KERNEL_STACK_ORDER); | |
a5a678c8 | 220 | return stack != (unsigned long) current_thread_info(); |
1da177e4 LT |
221 | } |
222 | ||
1da177e4 LT |
223 | extern exitcall_t __uml_exitcall_begin, __uml_exitcall_end; |
224 | ||
225 | void do_uml_exitcalls(void) | |
226 | { | |
227 | exitcall_t *call; | |
228 | ||
229 | call = &__uml_exitcall_end; | |
230 | while (--call >= &__uml_exitcall_begin) | |
231 | (*call)(); | |
232 | } | |
233 | ||
c0a9290e | 234 | char *uml_strdup(const char *string) |
1da177e4 | 235 | { |
dfe52244 | 236 | return kstrdup(string, GFP_KERNEL); |
1da177e4 | 237 | } |
73395a00 | 238 | EXPORT_SYMBOL(uml_strdup); |
1da177e4 | 239 | |
1da177e4 LT |
240 | int copy_to_user_proc(void __user *to, void *from, int size) |
241 | { | |
6e21aec3 | 242 | return copy_to_user(to, from, size); |
1da177e4 LT |
243 | } |
244 | ||
245 | int copy_from_user_proc(void *to, void __user *from, int size) | |
246 | { | |
6e21aec3 | 247 | return copy_from_user(to, from, size); |
1da177e4 LT |
248 | } |
249 | ||
250 | int clear_user_proc(void __user *buf, int size) | |
251 | { | |
6e21aec3 | 252 | return clear_user(buf, size); |
1da177e4 LT |
253 | } |
254 | ||
255 | int strlen_user_proc(char __user *str) | |
256 | { | |
6e21aec3 | 257 | return strlen_user(str); |
1da177e4 LT |
258 | } |
259 | ||
1da177e4 LT |
260 | int cpu(void) |
261 | { | |
a5a678c8 | 262 | return current_thread_info()->cpu; |
1da177e4 LT |
263 | } |
264 | ||
265 | static atomic_t using_sysemu = ATOMIC_INIT(0); | |
266 | int sysemu_supported; | |
267 | ||
268 | void set_using_sysemu(int value) | |
269 | { | |
270 | if (value > sysemu_supported) | |
271 | return; | |
272 | atomic_set(&using_sysemu, value); | |
273 | } | |
274 | ||
275 | int get_using_sysemu(void) | |
276 | { | |
277 | return atomic_read(&using_sysemu); | |
278 | } | |
279 | ||
6613c5e8 | 280 | static int sysemu_proc_show(struct seq_file *m, void *v) |
1da177e4 | 281 | { |
6613c5e8 AD |
282 | seq_printf(m, "%d\n", get_using_sysemu()); |
283 | return 0; | |
284 | } | |
1da177e4 | 285 | |
6613c5e8 AD |
286 | static int sysemu_proc_open(struct inode *inode, struct file *file) |
287 | { | |
288 | return single_open(file, sysemu_proc_show, NULL); | |
1da177e4 LT |
289 | } |
290 | ||
6613c5e8 AD |
291 | static ssize_t sysemu_proc_write(struct file *file, const char __user *buf, |
292 | size_t count, loff_t *pos) | |
1da177e4 LT |
293 | { |
294 | char tmp[2]; | |
295 | ||
296 | if (copy_from_user(tmp, buf, 1)) | |
297 | return -EFAULT; | |
298 | ||
299 | if (tmp[0] >= '0' && tmp[0] <= '2') | |
300 | set_using_sysemu(tmp[0] - '0'); | |
ba180fd4 JD |
301 | /* We use the first char, but pretend to write everything */ |
302 | return count; | |
1da177e4 LT |
303 | } |
304 | ||
6613c5e8 AD |
305 | static const struct file_operations sysemu_proc_fops = { |
306 | .owner = THIS_MODULE, | |
307 | .open = sysemu_proc_open, | |
308 | .read = seq_read, | |
309 | .llseek = seq_lseek, | |
310 | .release = single_release, | |
311 | .write = sysemu_proc_write, | |
312 | }; | |
313 | ||
1da177e4 LT |
314 | int __init make_proc_sysemu(void) |
315 | { | |
316 | struct proc_dir_entry *ent; | |
317 | if (!sysemu_supported) | |
318 | return 0; | |
319 | ||
6613c5e8 | 320 | ent = proc_create("sysemu", 0600, NULL, &sysemu_proc_fops); |
1da177e4 LT |
321 | |
322 | if (ent == NULL) | |
323 | { | |
30f417c6 | 324 | printk(KERN_WARNING "Failed to register /proc/sysemu\n"); |
6e21aec3 | 325 | return 0; |
1da177e4 LT |
326 | } |
327 | ||
1da177e4 LT |
328 | return 0; |
329 | } | |
330 | ||
331 | late_initcall(make_proc_sysemu); | |
332 | ||
333 | int singlestepping(void * t) | |
334 | { | |
335 | struct task_struct *task = t ? t : current; | |
336 | ||
c5d4bb17 | 337 | if (!(task->ptrace & PT_DTRACE)) |
ba180fd4 | 338 | return 0; |
1da177e4 LT |
339 | |
340 | if (task->thread.singlestep_syscall) | |
ba180fd4 | 341 | return 1; |
1da177e4 LT |
342 | |
343 | return 2; | |
344 | } | |
345 | ||
b8bd0220 BS |
346 | /* |
347 | * Only x86 and x86_64 have an arch_align_stack(). | |
348 | * All other arches have "#define arch_align_stack(x) (x)" | |
cf7bc58f | 349 | * in their asm/exec.h |
b8bd0220 BS |
350 | * As this is included in UML from asm-um/system-generic.h, |
351 | * we can use it to behave as the subarch does. | |
352 | */ | |
353 | #ifndef arch_align_stack | |
1da177e4 LT |
354 | unsigned long arch_align_stack(unsigned long sp) |
355 | { | |
8f80e946 | 356 | if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) |
1da177e4 LT |
357 | sp -= get_random_int() % 8192; |
358 | return sp & ~0xf; | |
359 | } | |
b8bd0220 | 360 | #endif |
c1127465 JD |
361 | |
362 | unsigned long get_wchan(struct task_struct *p) | |
363 | { | |
364 | unsigned long stack_page, sp, ip; | |
365 | bool seen_sched = 0; | |
366 | ||
367 | if ((p == NULL) || (p == current) || (p->state == TASK_RUNNING)) | |
368 | return 0; | |
369 | ||
370 | stack_page = (unsigned long) task_stack_page(p); | |
371 | /* Bail if the process has no kernel stack for some reason */ | |
372 | if (stack_page == 0) | |
373 | return 0; | |
374 | ||
375 | sp = p->thread.switch_buf->JB_SP; | |
376 | /* | |
377 | * Bail if the stack pointer is below the bottom of the kernel | |
378 | * stack for some reason | |
379 | */ | |
380 | if (sp < stack_page) | |
381 | return 0; | |
382 | ||
383 | while (sp < stack_page + THREAD_SIZE) { | |
384 | ip = *((unsigned long *) sp); | |
385 | if (in_sched_functions(ip)) | |
386 | /* Ignore everything until we're above the scheduler */ | |
387 | seen_sched = 1; | |
388 | else if (kernel_text_address(ip) && seen_sched) | |
389 | return ip; | |
390 | ||
391 | sp += sizeof(unsigned long); | |
392 | } | |
393 | ||
394 | return 0; | |
395 | } | |
8192ab42 JD |
396 | |
397 | int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu) | |
398 | { | |
399 | int cpu = current_thread_info()->cpu; | |
400 | ||
a78ff111 | 401 | return save_i387_registers(userspace_pid[cpu], (unsigned long *) fpu); |
8192ab42 JD |
402 | } |
403 |