Merge branches 'at91', 'cache', 'cup', 'ep93xx', 'ixp4xx', 'nuc', 'pending-dma-stream...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / arch / sparc / kernel / sys_sparc_64.c
1 /* linux/arch/sparc64/kernel/sys_sparc.c
2 *
3 * This file contains various random system calls that
4 * have a non-standard calling sequence on the Linux/sparc
5 * platform.
6 */
7
8 #include <linux/errno.h>
9 #include <linux/types.h>
10 #include <linux/sched.h>
11 #include <linux/fs.h>
12 #include <linux/file.h>
13 #include <linux/mm.h>
14 #include <linux/sem.h>
15 #include <linux/msg.h>
16 #include <linux/shm.h>
17 #include <linux/stat.h>
18 #include <linux/mman.h>
19 #include <linux/utsname.h>
20 #include <linux/smp.h>
21 #include <linux/slab.h>
22 #include <linux/syscalls.h>
23 #include <linux/ipc.h>
24 #include <linux/personality.h>
25 #include <linux/random.h>
26 #include <linux/module.h>
27
28 #include <asm/uaccess.h>
29 #include <asm/utrap.h>
30 #include <asm/perfctr.h>
31 #include <asm/unistd.h>
32
33 #include "entry.h"
34 #include "systbls.h"
35
36 /* #define DEBUG_UNIMP_SYSCALL */
37
38 asmlinkage unsigned long sys_getpagesize(void)
39 {
40 return PAGE_SIZE;
41 }
42
43 #define VA_EXCLUDE_START (0x0000080000000000UL - (1UL << 32UL))
44 #define VA_EXCLUDE_END (0xfffff80000000000UL + (1UL << 32UL))
45
46 /* Does addr --> addr+len fall within 4GB of the VA-space hole or
47 * overflow past the end of the 64-bit address space?
48 */
49 static inline int invalid_64bit_range(unsigned long addr, unsigned long len)
50 {
51 unsigned long va_exclude_start, va_exclude_end;
52
53 va_exclude_start = VA_EXCLUDE_START;
54 va_exclude_end = VA_EXCLUDE_END;
55
56 if (unlikely(len >= va_exclude_start))
57 return 1;
58
59 if (unlikely((addr + len) < addr))
60 return 1;
61
62 if (unlikely((addr >= va_exclude_start && addr < va_exclude_end) ||
63 ((addr + len) >= va_exclude_start &&
64 (addr + len) < va_exclude_end)))
65 return 1;
66
67 return 0;
68 }
69
70 /* Does start,end straddle the VA-space hole? */
71 static inline int straddles_64bit_va_hole(unsigned long start, unsigned long end)
72 {
73 unsigned long va_exclude_start, va_exclude_end;
74
75 va_exclude_start = VA_EXCLUDE_START;
76 va_exclude_end = VA_EXCLUDE_END;
77
78 if (likely(start < va_exclude_start && end < va_exclude_start))
79 return 0;
80
81 if (likely(start >= va_exclude_end && end >= va_exclude_end))
82 return 0;
83
84 return 1;
85 }
86
87 /* These functions differ from the default implementations in
88 * mm/mmap.c in two ways:
89 *
90 * 1) For file backed MAP_SHARED mmap()'s we D-cache color align,
91 * for fixed such mappings we just validate what the user gave us.
92 * 2) For 64-bit tasks we avoid mapping anything within 4GB of
93 * the spitfire/niagara VA-hole.
94 */
95
96 static inline unsigned long COLOUR_ALIGN(unsigned long addr,
97 unsigned long pgoff)
98 {
99 unsigned long base = (addr+SHMLBA-1)&~(SHMLBA-1);
100 unsigned long off = (pgoff<<PAGE_SHIFT) & (SHMLBA-1);
101
102 return base + off;
103 }
104
105 static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr,
106 unsigned long pgoff)
107 {
108 unsigned long base = addr & ~(SHMLBA-1);
109 unsigned long off = (pgoff<<PAGE_SHIFT) & (SHMLBA-1);
110
111 if (base + off <= addr)
112 return base + off;
113 return base - off;
114 }
115
116 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
117 {
118 struct mm_struct *mm = current->mm;
119 struct vm_area_struct * vma;
120 unsigned long task_size = TASK_SIZE;
121 unsigned long start_addr;
122 int do_color_align;
123
124 if (flags & MAP_FIXED) {
125 /* We do not accept a shared mapping if it would violate
126 * cache aliasing constraints.
127 */
128 if ((flags & MAP_SHARED) &&
129 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
130 return -EINVAL;
131 return addr;
132 }
133
134 if (test_thread_flag(TIF_32BIT))
135 task_size = STACK_TOP32;
136 if (unlikely(len > task_size || len >= VA_EXCLUDE_START))
137 return -ENOMEM;
138
139 do_color_align = 0;
140 if (filp || (flags & MAP_SHARED))
141 do_color_align = 1;
142
143 if (addr) {
144 if (do_color_align)
145 addr = COLOUR_ALIGN(addr, pgoff);
146 else
147 addr = PAGE_ALIGN(addr);
148
149 vma = find_vma(mm, addr);
150 if (task_size - len >= addr &&
151 (!vma || addr + len <= vma->vm_start))
152 return addr;
153 }
154
155 if (len > mm->cached_hole_size) {
156 start_addr = addr = mm->free_area_cache;
157 } else {
158 start_addr = addr = TASK_UNMAPPED_BASE;
159 mm->cached_hole_size = 0;
160 }
161
162 task_size -= len;
163
164 full_search:
165 if (do_color_align)
166 addr = COLOUR_ALIGN(addr, pgoff);
167 else
168 addr = PAGE_ALIGN(addr);
169
170 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
171 /* At this point: (!vma || addr < vma->vm_end). */
172 if (addr < VA_EXCLUDE_START &&
173 (addr + len) >= VA_EXCLUDE_START) {
174 addr = VA_EXCLUDE_END;
175 vma = find_vma(mm, VA_EXCLUDE_END);
176 }
177 if (unlikely(task_size < addr)) {
178 if (start_addr != TASK_UNMAPPED_BASE) {
179 start_addr = addr = TASK_UNMAPPED_BASE;
180 mm->cached_hole_size = 0;
181 goto full_search;
182 }
183 return -ENOMEM;
184 }
185 if (likely(!vma || addr + len <= vma->vm_start)) {
186 /*
187 * Remember the place where we stopped the search:
188 */
189 mm->free_area_cache = addr + len;
190 return addr;
191 }
192 if (addr + mm->cached_hole_size < vma->vm_start)
193 mm->cached_hole_size = vma->vm_start - addr;
194
195 addr = vma->vm_end;
196 if (do_color_align)
197 addr = COLOUR_ALIGN(addr, pgoff);
198 }
199 }
200
201 unsigned long
202 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
203 const unsigned long len, const unsigned long pgoff,
204 const unsigned long flags)
205 {
206 struct vm_area_struct *vma;
207 struct mm_struct *mm = current->mm;
208 unsigned long task_size = STACK_TOP32;
209 unsigned long addr = addr0;
210 int do_color_align;
211
212 /* This should only ever run for 32-bit processes. */
213 BUG_ON(!test_thread_flag(TIF_32BIT));
214
215 if (flags & MAP_FIXED) {
216 /* We do not accept a shared mapping if it would violate
217 * cache aliasing constraints.
218 */
219 if ((flags & MAP_SHARED) &&
220 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
221 return -EINVAL;
222 return addr;
223 }
224
225 if (unlikely(len > task_size))
226 return -ENOMEM;
227
228 do_color_align = 0;
229 if (filp || (flags & MAP_SHARED))
230 do_color_align = 1;
231
232 /* requesting a specific address */
233 if (addr) {
234 if (do_color_align)
235 addr = COLOUR_ALIGN(addr, pgoff);
236 else
237 addr = PAGE_ALIGN(addr);
238
239 vma = find_vma(mm, addr);
240 if (task_size - len >= addr &&
241 (!vma || addr + len <= vma->vm_start))
242 return addr;
243 }
244
245 /* check if free_area_cache is useful for us */
246 if (len <= mm->cached_hole_size) {
247 mm->cached_hole_size = 0;
248 mm->free_area_cache = mm->mmap_base;
249 }
250
251 /* either no address requested or can't fit in requested address hole */
252 addr = mm->free_area_cache;
253 if (do_color_align) {
254 unsigned long base = COLOUR_ALIGN_DOWN(addr-len, pgoff);
255
256 addr = base + len;
257 }
258
259 /* make sure it can fit in the remaining address space */
260 if (likely(addr > len)) {
261 vma = find_vma(mm, addr-len);
262 if (!vma || addr <= vma->vm_start) {
263 /* remember the address as a hint for next time */
264 return (mm->free_area_cache = addr-len);
265 }
266 }
267
268 if (unlikely(mm->mmap_base < len))
269 goto bottomup;
270
271 addr = mm->mmap_base-len;
272 if (do_color_align)
273 addr = COLOUR_ALIGN_DOWN(addr, pgoff);
274
275 do {
276 /*
277 * Lookup failure means no vma is above this address,
278 * else if new region fits below vma->vm_start,
279 * return with success:
280 */
281 vma = find_vma(mm, addr);
282 if (likely(!vma || addr+len <= vma->vm_start)) {
283 /* remember the address as a hint for next time */
284 return (mm->free_area_cache = addr);
285 }
286
287 /* remember the largest hole we saw so far */
288 if (addr + mm->cached_hole_size < vma->vm_start)
289 mm->cached_hole_size = vma->vm_start - addr;
290
291 /* try just below the current vma->vm_start */
292 addr = vma->vm_start-len;
293 if (do_color_align)
294 addr = COLOUR_ALIGN_DOWN(addr, pgoff);
295 } while (likely(len < vma->vm_start));
296
297 bottomup:
298 /*
299 * A failed mmap() very likely causes application failure,
300 * so fall back to the bottom-up function here. This scenario
301 * can happen with large stack limits and large mmap()
302 * allocations.
303 */
304 mm->cached_hole_size = ~0UL;
305 mm->free_area_cache = TASK_UNMAPPED_BASE;
306 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
307 /*
308 * Restore the topdown base:
309 */
310 mm->free_area_cache = mm->mmap_base;
311 mm->cached_hole_size = ~0UL;
312
313 return addr;
314 }
315
316 /* Try to align mapping such that we align it as much as possible. */
317 unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, unsigned long len, unsigned long pgoff, unsigned long flags)
318 {
319 unsigned long align_goal, addr = -ENOMEM;
320 unsigned long (*get_area)(struct file *, unsigned long,
321 unsigned long, unsigned long, unsigned long);
322
323 get_area = current->mm->get_unmapped_area;
324
325 if (flags & MAP_FIXED) {
326 /* Ok, don't mess with it. */
327 return get_area(NULL, orig_addr, len, pgoff, flags);
328 }
329 flags &= ~MAP_SHARED;
330
331 align_goal = PAGE_SIZE;
332 if (len >= (4UL * 1024 * 1024))
333 align_goal = (4UL * 1024 * 1024);
334 else if (len >= (512UL * 1024))
335 align_goal = (512UL * 1024);
336 else if (len >= (64UL * 1024))
337 align_goal = (64UL * 1024);
338
339 do {
340 addr = get_area(NULL, orig_addr, len + (align_goal - PAGE_SIZE), pgoff, flags);
341 if (!(addr & ~PAGE_MASK)) {
342 addr = (addr + (align_goal - 1UL)) & ~(align_goal - 1UL);
343 break;
344 }
345
346 if (align_goal == (4UL * 1024 * 1024))
347 align_goal = (512UL * 1024);
348 else if (align_goal == (512UL * 1024))
349 align_goal = (64UL * 1024);
350 else
351 align_goal = PAGE_SIZE;
352 } while ((addr & ~PAGE_MASK) && align_goal > PAGE_SIZE);
353
354 /* Mapping is smaller than 64K or larger areas could not
355 * be obtained.
356 */
357 if (addr & ~PAGE_MASK)
358 addr = get_area(NULL, orig_addr, len, pgoff, flags);
359
360 return addr;
361 }
362 EXPORT_SYMBOL(get_fb_unmapped_area);
363
364 /* Essentially the same as PowerPC... */
365 void arch_pick_mmap_layout(struct mm_struct *mm)
366 {
367 unsigned long random_factor = 0UL;
368
369 if (current->flags & PF_RANDOMIZE) {
370 random_factor = get_random_int();
371 if (test_thread_flag(TIF_32BIT))
372 random_factor &= ((1 * 1024 * 1024) - 1);
373 else
374 random_factor = ((random_factor << PAGE_SHIFT) &
375 0xffffffffUL);
376 }
377
378 /*
379 * Fall back to the standard layout if the personality
380 * bit is set, or if the expected stack growth is unlimited:
381 */
382 if (!test_thread_flag(TIF_32BIT) ||
383 (current->personality & ADDR_COMPAT_LAYOUT) ||
384 current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY ||
385 sysctl_legacy_va_layout) {
386 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
387 mm->get_unmapped_area = arch_get_unmapped_area;
388 mm->unmap_area = arch_unmap_area;
389 } else {
390 /* We know it's 32-bit */
391 unsigned long task_size = STACK_TOP32;
392 unsigned long gap;
393
394 gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
395 if (gap < 128 * 1024 * 1024)
396 gap = 128 * 1024 * 1024;
397 if (gap > (task_size / 6 * 5))
398 gap = (task_size / 6 * 5);
399
400 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
401 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
402 mm->unmap_area = arch_unmap_area_topdown;
403 }
404 }
405
406 /*
407 * sys_pipe() is the normal C calling standard for creating
408 * a pipe. It's not the way unix traditionally does this, though.
409 */
410 SYSCALL_DEFINE1(sparc_pipe_real, struct pt_regs *, regs)
411 {
412 int fd[2];
413 int error;
414
415 error = do_pipe_flags(fd, 0);
416 if (error)
417 goto out;
418 regs->u_regs[UREG_I1] = fd[1];
419 error = fd[0];
420 out:
421 return error;
422 }
423
424 /*
425 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
426 *
427 * This is really horribly ugly.
428 */
429
430 SYSCALL_DEFINE6(ipc, unsigned int, call, int, first, unsigned long, second,
431 unsigned long, third, void __user *, ptr, long, fifth)
432 {
433 long err;
434
435 /* No need for backward compatibility. We can start fresh... */
436 if (call <= SEMCTL) {
437 switch (call) {
438 case SEMOP:
439 err = sys_semtimedop(first, ptr,
440 (unsigned)second, NULL);
441 goto out;
442 case SEMTIMEDOP:
443 err = sys_semtimedop(first, ptr, (unsigned)second,
444 (const struct timespec __user *)
445 (unsigned long) fifth);
446 goto out;
447 case SEMGET:
448 err = sys_semget(first, (int)second, (int)third);
449 goto out;
450 case SEMCTL: {
451 err = sys_semctl(first, second,
452 (int)third | IPC_64,
453 (union semun) ptr);
454 goto out;
455 }
456 default:
457 err = -ENOSYS;
458 goto out;
459 };
460 }
461 if (call <= MSGCTL) {
462 switch (call) {
463 case MSGSND:
464 err = sys_msgsnd(first, ptr, (size_t)second,
465 (int)third);
466 goto out;
467 case MSGRCV:
468 err = sys_msgrcv(first, ptr, (size_t)second, fifth,
469 (int)third);
470 goto out;
471 case MSGGET:
472 err = sys_msgget((key_t)first, (int)second);
473 goto out;
474 case MSGCTL:
475 err = sys_msgctl(first, (int)second | IPC_64, ptr);
476 goto out;
477 default:
478 err = -ENOSYS;
479 goto out;
480 };
481 }
482 if (call <= SHMCTL) {
483 switch (call) {
484 case SHMAT: {
485 ulong raddr;
486 err = do_shmat(first, ptr, (int)second, &raddr);
487 if (!err) {
488 if (put_user(raddr,
489 (ulong __user *) third))
490 err = -EFAULT;
491 }
492 goto out;
493 }
494 case SHMDT:
495 err = sys_shmdt(ptr);
496 goto out;
497 case SHMGET:
498 err = sys_shmget(first, (size_t)second, (int)third);
499 goto out;
500 case SHMCTL:
501 err = sys_shmctl(first, (int)second | IPC_64, ptr);
502 goto out;
503 default:
504 err = -ENOSYS;
505 goto out;
506 };
507 } else {
508 err = -ENOSYS;
509 }
510 out:
511 return err;
512 }
513
514 SYSCALL_DEFINE1(sparc64_newuname, struct new_utsname __user *, name)
515 {
516 int ret = sys_newuname(name);
517
518 if (current->personality == PER_LINUX32 && !ret) {
519 ret = (copy_to_user(name->machine, "sparc\0\0", 8)
520 ? -EFAULT : 0);
521 }
522 return ret;
523 }
524
525 SYSCALL_DEFINE1(sparc64_personality, unsigned long, personality)
526 {
527 int ret;
528
529 if (current->personality == PER_LINUX32 &&
530 personality == PER_LINUX)
531 personality = PER_LINUX32;
532 ret = sys_personality(personality);
533 if (ret == PER_LINUX32)
534 ret = PER_LINUX;
535
536 return ret;
537 }
538
539 int sparc_mmap_check(unsigned long addr, unsigned long len)
540 {
541 if (test_thread_flag(TIF_32BIT)) {
542 if (len >= STACK_TOP32)
543 return -EINVAL;
544
545 if (addr > STACK_TOP32 - len)
546 return -EINVAL;
547 } else {
548 if (len >= VA_EXCLUDE_START)
549 return -EINVAL;
550
551 if (invalid_64bit_range(addr, len))
552 return -EINVAL;
553 }
554
555 return 0;
556 }
557
558 /* Linux version of mmap */
559 SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
560 unsigned long, prot, unsigned long, flags, unsigned long, fd,
561 unsigned long, off)
562 {
563 unsigned long retval = -EINVAL;
564
565 if ((off + PAGE_ALIGN(len)) < off)
566 goto out;
567 if (off & ~PAGE_MASK)
568 goto out;
569 retval = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
570 out:
571 return retval;
572 }
573
574 SYSCALL_DEFINE2(64_munmap, unsigned long, addr, size_t, len)
575 {
576 long ret;
577
578 if (invalid_64bit_range(addr, len))
579 return -EINVAL;
580
581 down_write(&current->mm->mmap_sem);
582 ret = do_munmap(current->mm, addr, len);
583 up_write(&current->mm->mmap_sem);
584 return ret;
585 }
586
587 extern unsigned long do_mremap(unsigned long addr,
588 unsigned long old_len, unsigned long new_len,
589 unsigned long flags, unsigned long new_addr);
590
591 SYSCALL_DEFINE5(64_mremap, unsigned long, addr, unsigned long, old_len,
592 unsigned long, new_len, unsigned long, flags,
593 unsigned long, new_addr)
594 {
595 unsigned long ret = -EINVAL;
596
597 if (test_thread_flag(TIF_32BIT))
598 goto out;
599
600 down_write(&current->mm->mmap_sem);
601 ret = do_mremap(addr, old_len, new_len, flags, new_addr);
602 up_write(&current->mm->mmap_sem);
603 out:
604 return ret;
605 }
606
607 /* we come to here via sys_nis_syscall so it can setup the regs argument */
608 asmlinkage unsigned long c_sys_nis_syscall(struct pt_regs *regs)
609 {
610 static int count;
611
612 /* Don't make the system unusable, if someone goes stuck */
613 if (count++ > 5)
614 return -ENOSYS;
615
616 printk ("Unimplemented SPARC system call %ld\n",regs->u_regs[1]);
617 #ifdef DEBUG_UNIMP_SYSCALL
618 show_regs (regs);
619 #endif
620
621 return -ENOSYS;
622 }
623
624 /* #define DEBUG_SPARC_BREAKPOINT */
625
626 asmlinkage void sparc_breakpoint(struct pt_regs *regs)
627 {
628 siginfo_t info;
629
630 if (test_thread_flag(TIF_32BIT)) {
631 regs->tpc &= 0xffffffff;
632 regs->tnpc &= 0xffffffff;
633 }
634 #ifdef DEBUG_SPARC_BREAKPOINT
635 printk ("TRAP: Entering kernel PC=%lx, nPC=%lx\n", regs->tpc, regs->tnpc);
636 #endif
637 info.si_signo = SIGTRAP;
638 info.si_errno = 0;
639 info.si_code = TRAP_BRKPT;
640 info.si_addr = (void __user *)regs->tpc;
641 info.si_trapno = 0;
642 force_sig_info(SIGTRAP, &info, current);
643 #ifdef DEBUG_SPARC_BREAKPOINT
644 printk ("TRAP: Returning to space: PC=%lx nPC=%lx\n", regs->tpc, regs->tnpc);
645 #endif
646 }
647
648 extern void check_pending(int signum);
649
650 SYSCALL_DEFINE2(getdomainname, char __user *, name, int, len)
651 {
652 int nlen, err;
653
654 if (len < 0)
655 return -EINVAL;
656
657 down_read(&uts_sem);
658
659 nlen = strlen(utsname()->domainname) + 1;
660 err = -EINVAL;
661 if (nlen > len)
662 goto out;
663
664 err = -EFAULT;
665 if (!copy_to_user(name, utsname()->domainname, nlen))
666 err = 0;
667
668 out:
669 up_read(&uts_sem);
670 return err;
671 }
672
673 SYSCALL_DEFINE5(utrap_install, utrap_entry_t, type,
674 utrap_handler_t, new_p, utrap_handler_t, new_d,
675 utrap_handler_t __user *, old_p,
676 utrap_handler_t __user *, old_d)
677 {
678 if (type < UT_INSTRUCTION_EXCEPTION || type > UT_TRAP_INSTRUCTION_31)
679 return -EINVAL;
680 if (new_p == (utrap_handler_t)(long)UTH_NOCHANGE) {
681 if (old_p) {
682 if (!current_thread_info()->utraps) {
683 if (put_user(NULL, old_p))
684 return -EFAULT;
685 } else {
686 if (put_user((utrap_handler_t)(current_thread_info()->utraps[type]), old_p))
687 return -EFAULT;
688 }
689 }
690 if (old_d) {
691 if (put_user(NULL, old_d))
692 return -EFAULT;
693 }
694 return 0;
695 }
696 if (!current_thread_info()->utraps) {
697 current_thread_info()->utraps =
698 kzalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long), GFP_KERNEL);
699 if (!current_thread_info()->utraps)
700 return -ENOMEM;
701 current_thread_info()->utraps[0] = 1;
702 } else {
703 if ((utrap_handler_t)current_thread_info()->utraps[type] != new_p &&
704 current_thread_info()->utraps[0] > 1) {
705 unsigned long *p = current_thread_info()->utraps;
706
707 current_thread_info()->utraps =
708 kmalloc((UT_TRAP_INSTRUCTION_31+1)*sizeof(long),
709 GFP_KERNEL);
710 if (!current_thread_info()->utraps) {
711 current_thread_info()->utraps = p;
712 return -ENOMEM;
713 }
714 p[0]--;
715 current_thread_info()->utraps[0] = 1;
716 memcpy(current_thread_info()->utraps+1, p+1,
717 UT_TRAP_INSTRUCTION_31*sizeof(long));
718 }
719 }
720 if (old_p) {
721 if (put_user((utrap_handler_t)(current_thread_info()->utraps[type]), old_p))
722 return -EFAULT;
723 }
724 if (old_d) {
725 if (put_user(NULL, old_d))
726 return -EFAULT;
727 }
728 current_thread_info()->utraps[type] = (long)new_p;
729
730 return 0;
731 }
732
733 asmlinkage long sparc_memory_ordering(unsigned long model,
734 struct pt_regs *regs)
735 {
736 if (model >= 3)
737 return -EINVAL;
738 regs->tstate = (regs->tstate & ~TSTATE_MM) | (model << 14);
739 return 0;
740 }
741
742 SYSCALL_DEFINE5(rt_sigaction, int, sig, const struct sigaction __user *, act,
743 struct sigaction __user *, oact, void __user *, restorer,
744 size_t, sigsetsize)
745 {
746 struct k_sigaction new_ka, old_ka;
747 int ret;
748
749 /* XXX: Don't preclude handling different sized sigset_t's. */
750 if (sigsetsize != sizeof(sigset_t))
751 return -EINVAL;
752
753 if (act) {
754 new_ka.ka_restorer = restorer;
755 if (copy_from_user(&new_ka.sa, act, sizeof(*act)))
756 return -EFAULT;
757 }
758
759 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
760
761 if (!ret && oact) {
762 if (copy_to_user(oact, &old_ka.sa, sizeof(*oact)))
763 return -EFAULT;
764 }
765
766 return ret;
767 }
768
769 /* Invoked by rtrap code to update performance counters in
770 * user space.
771 */
772 asmlinkage void update_perfctrs(void)
773 {
774 unsigned long pic, tmp;
775
776 read_pic(pic);
777 tmp = (current_thread_info()->kernel_cntd0 += (unsigned int)pic);
778 __put_user(tmp, current_thread_info()->user_cntd0);
779 tmp = (current_thread_info()->kernel_cntd1 += (pic >> 32));
780 __put_user(tmp, current_thread_info()->user_cntd1);
781 reset_pic();
782 }
783
784 SYSCALL_DEFINE4(perfctr, int, opcode, unsigned long, arg0,
785 unsigned long, arg1, unsigned long, arg2)
786 {
787 int err = 0;
788
789 switch(opcode) {
790 case PERFCTR_ON:
791 current_thread_info()->pcr_reg = arg2;
792 current_thread_info()->user_cntd0 = (u64 __user *) arg0;
793 current_thread_info()->user_cntd1 = (u64 __user *) arg1;
794 current_thread_info()->kernel_cntd0 =
795 current_thread_info()->kernel_cntd1 = 0;
796 write_pcr(arg2);
797 reset_pic();
798 set_thread_flag(TIF_PERFCTR);
799 break;
800
801 case PERFCTR_OFF:
802 err = -EINVAL;
803 if (test_thread_flag(TIF_PERFCTR)) {
804 current_thread_info()->user_cntd0 =
805 current_thread_info()->user_cntd1 = NULL;
806 current_thread_info()->pcr_reg = 0;
807 write_pcr(0);
808 clear_thread_flag(TIF_PERFCTR);
809 err = 0;
810 }
811 break;
812
813 case PERFCTR_READ: {
814 unsigned long pic, tmp;
815
816 if (!test_thread_flag(TIF_PERFCTR)) {
817 err = -EINVAL;
818 break;
819 }
820 read_pic(pic);
821 tmp = (current_thread_info()->kernel_cntd0 += (unsigned int)pic);
822 err |= __put_user(tmp, current_thread_info()->user_cntd0);
823 tmp = (current_thread_info()->kernel_cntd1 += (pic >> 32));
824 err |= __put_user(tmp, current_thread_info()->user_cntd1);
825 reset_pic();
826 break;
827 }
828
829 case PERFCTR_CLRPIC:
830 if (!test_thread_flag(TIF_PERFCTR)) {
831 err = -EINVAL;
832 break;
833 }
834 current_thread_info()->kernel_cntd0 =
835 current_thread_info()->kernel_cntd1 = 0;
836 reset_pic();
837 break;
838
839 case PERFCTR_SETPCR: {
840 u64 __user *user_pcr = (u64 __user *)arg0;
841
842 if (!test_thread_flag(TIF_PERFCTR)) {
843 err = -EINVAL;
844 break;
845 }
846 err |= __get_user(current_thread_info()->pcr_reg, user_pcr);
847 write_pcr(current_thread_info()->pcr_reg);
848 current_thread_info()->kernel_cntd0 =
849 current_thread_info()->kernel_cntd1 = 0;
850 reset_pic();
851 break;
852 }
853
854 case PERFCTR_GETPCR: {
855 u64 __user *user_pcr = (u64 __user *)arg0;
856
857 if (!test_thread_flag(TIF_PERFCTR)) {
858 err = -EINVAL;
859 break;
860 }
861 err |= __put_user(current_thread_info()->pcr_reg, user_pcr);
862 break;
863 }
864
865 default:
866 err = -EINVAL;
867 break;
868 };
869 return err;
870 }
871
872 /*
873 * Do a system call from kernel instead of calling sys_execve so we
874 * end up with proper pt_regs.
875 */
876 int kernel_execve(const char *filename, char *const argv[], char *const envp[])
877 {
878 long __res;
879 register long __g1 __asm__ ("g1") = __NR_execve;
880 register long __o0 __asm__ ("o0") = (long)(filename);
881 register long __o1 __asm__ ("o1") = (long)(argv);
882 register long __o2 __asm__ ("o2") = (long)(envp);
883 asm volatile ("t 0x6d\n\t"
884 "sub %%g0, %%o0, %0\n\t"
885 "movcc %%xcc, %%o0, %0\n\t"
886 : "=r" (__res), "=&r" (__o0)
887 : "1" (__o0), "r" (__o1), "r" (__o2), "r" (__g1)
888 : "cc");
889 return __res;
890 }