2 * linux/arch/m68k/kernel/sys_m68k.c
4 * This file contains various random system calls that
5 * have a non-standard calling sequence on the Linux/m68k
9 #include <linux/capability.h>
10 #include <linux/errno.h>
11 #include <linux/sched.h>
14 #include <linux/smp.h>
15 #include <linux/smp_lock.h>
16 #include <linux/sem.h>
17 #include <linux/msg.h>
18 #include <linux/shm.h>
19 #include <linux/stat.h>
20 #include <linux/syscalls.h>
21 #include <linux/mman.h>
22 #include <linux/file.h>
23 #include <linux/ipc.h>
25 #include <asm/setup.h>
26 #include <asm/uaccess.h>
27 #include <asm/cachectl.h>
28 #include <asm/traps.h>
30 #include <asm/unistd.h>
31 #include <linux/elf.h>
34 asmlinkage
int do_page_fault(struct pt_regs
*regs
, unsigned long address
,
35 unsigned long error_code
);
37 asmlinkage
long sys_mmap2(unsigned long addr
, unsigned long len
,
38 unsigned long prot
, unsigned long flags
,
39 unsigned long fd
, unsigned long pgoff
)
42 * This is wrong for sun3 - there PAGE_SIZE is 8Kb,
43 * so we need to shift the argument down by 1; m68k mmap64(3)
44 * (in libc) expects the last argument of mmap2 in 4Kb units.
46 return sys_mmap_pgoff(addr
, len
, prot
, flags
, fd
, pgoff
);
50 * sys_ipc() is the de-multiplexer for the SysV IPC calls..
52 * This is really horribly ugly.
54 asmlinkage
int sys_ipc (uint call
, int first
, int second
,
55 int third
, void __user
*ptr
, long fifth
)
59 version
= call
>> 16; /* hack for backward compatibility */
65 return sys_semop (first
, ptr
, second
);
67 return sys_semget (first
, second
, third
);
72 if (get_user(fourth
.__pad
, (void __user
*__user
*) ptr
))
74 return sys_semctl (first
, second
, third
, fourth
);
82 return sys_msgsnd (first
, ptr
, second
, third
);
86 struct ipc_kludge tmp
;
89 if (copy_from_user (&tmp
, ptr
, sizeof (tmp
)))
91 return sys_msgrcv (first
, tmp
.msgp
, second
,
95 return sys_msgrcv (first
, ptr
,
96 second
, fifth
, third
);
99 return sys_msgget ((key_t
) first
, second
);
101 return sys_msgctl (first
, second
, ptr
);
111 ret
= do_shmat (first
, ptr
, second
, &raddr
);
114 return put_user (raddr
, (ulong __user
*) third
);
118 return sys_shmdt (ptr
);
120 return sys_shmget (first
, second
, third
);
122 return sys_shmctl (first
, second
, ptr
);
130 /* Convert virtual (user) address VADDR to physical address PADDR */
131 #define virt_to_phys_040(vaddr) \
133 unsigned long _mmusr, _paddr; \
135 __asm__ __volatile__ (".chip 68040\n\t" \
137 "movec %%mmusr,%0\n\t" \
141 _paddr = (_mmusr & MMU_R_040) ? (_mmusr & PAGE_MASK) : 0; \
146 cache_flush_040 (unsigned long addr
, int scope
, int cache
, unsigned long len
)
148 unsigned long paddr
, i
;
152 case FLUSH_SCOPE_ALL
:
155 case FLUSH_CACHE_DATA
:
156 /* This nop is needed for some broken versions of the 68040. */
157 __asm__
__volatile__ ("nop\n\t"
162 case FLUSH_CACHE_INSN
:
163 __asm__
__volatile__ ("nop\n\t"
169 case FLUSH_CACHE_BOTH
:
170 __asm__
__volatile__ ("nop\n\t"
178 case FLUSH_SCOPE_LINE
:
179 /* Find the physical address of the first mapped page in the
181 if ((paddr
= virt_to_phys_040(addr
))) {
182 paddr
+= addr
& ~(PAGE_MASK
| 15);
183 len
= (len
+ (addr
& 15) + 15) >> 4;
185 unsigned long tmp
= PAGE_SIZE
- (addr
& ~PAGE_MASK
);
194 if ((paddr
= virt_to_phys_040(addr
)))
201 len
= (len
+ 15) >> 4;
203 i
= (PAGE_SIZE
- (paddr
& ~PAGE_MASK
)) >> 4;
208 case FLUSH_CACHE_DATA
:
209 __asm__
__volatile__ ("nop\n\t"
211 "cpushl %%dc,(%0)\n\t"
215 case FLUSH_CACHE_INSN
:
216 __asm__
__volatile__ ("nop\n\t"
218 "cpushl %%ic,(%0)\n\t"
223 case FLUSH_CACHE_BOTH
:
224 __asm__
__volatile__ ("nop\n\t"
226 "cpushl %%bc,(%0)\n\t"
234 * No need to page align here since it is done by
235 * virt_to_phys_040().
239 /* Recompute physical address when crossing a page
243 if ((paddr
= virt_to_phys_040(addr
)))
257 case FLUSH_SCOPE_PAGE
:
258 len
+= (addr
& ~PAGE_MASK
) + (PAGE_SIZE
- 1);
259 for (len
>>= PAGE_SHIFT
; len
--; addr
+= PAGE_SIZE
)
261 if (!(paddr
= virt_to_phys_040(addr
)))
265 case FLUSH_CACHE_DATA
:
266 __asm__
__volatile__ ("nop\n\t"
268 "cpushp %%dc,(%0)\n\t"
272 case FLUSH_CACHE_INSN
:
273 __asm__
__volatile__ ("nop\n\t"
275 "cpushp %%ic,(%0)\n\t"
280 case FLUSH_CACHE_BOTH
:
281 __asm__
__volatile__ ("nop\n\t"
283 "cpushp %%bc,(%0)\n\t"
294 #define virt_to_phys_060(vaddr) \
296 unsigned long paddr; \
297 __asm__ __volatile__ (".chip 68060\n\t" \
306 cache_flush_060 (unsigned long addr
, int scope
, int cache
, unsigned long len
)
308 unsigned long paddr
, i
;
312 * cpush %dc : flush DC, remains valid (with our %cacr setup)
313 * cpush %ic : invalidate IC
314 * cpush %bc : flush DC + invalidate IC
318 case FLUSH_SCOPE_ALL
:
321 case FLUSH_CACHE_DATA
:
322 __asm__
__volatile__ (".chip 68060\n\t"
326 case FLUSH_CACHE_INSN
:
327 __asm__
__volatile__ (".chip 68060\n\t"
332 case FLUSH_CACHE_BOTH
:
333 __asm__
__volatile__ (".chip 68060\n\t"
340 case FLUSH_SCOPE_LINE
:
341 /* Find the physical address of the first mapped page in the
345 if (!(paddr
= virt_to_phys_060(addr
))) {
346 unsigned long tmp
= PAGE_SIZE
- (addr
& ~PAGE_MASK
);
355 if ((paddr
= virt_to_phys_060(addr
)))
363 len
= (len
+ 15) >> 4;
364 i
= (PAGE_SIZE
- (paddr
& ~PAGE_MASK
)) >> 4;
369 case FLUSH_CACHE_DATA
:
370 __asm__
__volatile__ (".chip 68060\n\t"
371 "cpushl %%dc,(%0)\n\t"
375 case FLUSH_CACHE_INSN
:
376 __asm__
__volatile__ (".chip 68060\n\t"
377 "cpushl %%ic,(%0)\n\t"
382 case FLUSH_CACHE_BOTH
:
383 __asm__
__volatile__ (".chip 68060\n\t"
384 "cpushl %%bc,(%0)\n\t"
393 * We just want to jump to the first cache line
400 /* Recompute physical address when crossing a page
404 if ((paddr
= virt_to_phys_060(addr
)))
418 case FLUSH_SCOPE_PAGE
:
419 len
+= (addr
& ~PAGE_MASK
) + (PAGE_SIZE
- 1);
420 addr
&= PAGE_MASK
; /* Workaround for bug in some
421 revisions of the 68060 */
422 for (len
>>= PAGE_SHIFT
; len
--; addr
+= PAGE_SIZE
)
424 if (!(paddr
= virt_to_phys_060(addr
)))
428 case FLUSH_CACHE_DATA
:
429 __asm__
__volatile__ (".chip 68060\n\t"
430 "cpushp %%dc,(%0)\n\t"
434 case FLUSH_CACHE_INSN
:
435 __asm__
__volatile__ (".chip 68060\n\t"
436 "cpushp %%ic,(%0)\n\t"
441 case FLUSH_CACHE_BOTH
:
442 __asm__
__volatile__ (".chip 68060\n\t"
443 "cpushp %%bc,(%0)\n\t"
454 /* sys_cacheflush -- flush (part of) the processor cache. */
456 sys_cacheflush (unsigned long addr
, int scope
, int cache
, unsigned long len
)
458 struct vm_area_struct
*vma
;
462 if (scope
< FLUSH_SCOPE_LINE
|| scope
> FLUSH_SCOPE_ALL
||
463 cache
& ~FLUSH_CACHE_BOTH
)
466 if (scope
== FLUSH_SCOPE_ALL
) {
467 /* Only the superuser may explicitly flush the whole cache. */
469 if (!capable(CAP_SYS_ADMIN
))
473 * Verify that the specified address region actually belongs
476 vma
= find_vma (current
->mm
, addr
);
478 /* Check for overflow. */
479 if (addr
+ len
< addr
)
481 if (vma
== NULL
|| addr
< vma
->vm_start
|| addr
+ len
> vma
->vm_end
)
485 if (CPU_IS_020_OR_030
) {
486 if (scope
== FLUSH_SCOPE_LINE
&& len
< 256) {
488 __asm__ ("movec %%cacr, %0" : "=r" (cacr
));
489 if (cache
& FLUSH_CACHE_INSN
)
491 if (cache
& FLUSH_CACHE_DATA
)
495 __asm__
__volatile__ ("movec %1, %%caar\n\t"
498 : "r" (cacr
), "r" (addr
));
502 /* Flush the whole cache, even if page granularity requested. */
504 __asm__ ("movec %%cacr, %0" : "=r" (cacr
));
505 if (cache
& FLUSH_CACHE_INSN
)
507 if (cache
& FLUSH_CACHE_DATA
)
509 __asm__
__volatile__ ("movec %0, %%cacr" : : "r" (cacr
));
515 * 040 or 060: don't blindly trust 'scope', someone could
516 * try to flush a few megs of memory.
519 if (len
>=3*PAGE_SIZE
&& scope
<FLUSH_SCOPE_PAGE
)
520 scope
=FLUSH_SCOPE_PAGE
;
521 if (len
>=10*PAGE_SIZE
&& scope
<FLUSH_SCOPE_ALL
)
522 scope
=FLUSH_SCOPE_ALL
;
524 ret
= cache_flush_040 (addr
, scope
, cache
, len
);
525 } else if (CPU_IS_060
) {
526 ret
= cache_flush_060 (addr
, scope
, cache
, len
);
534 asmlinkage
int sys_getpagesize(void)
540 * Do a system call from kernel instead of calling sys_execve so we
541 * end up with proper pt_regs.
543 int kernel_execve(const char *filename
, char *const argv
[], char *const envp
[])
545 register long __res
asm ("%d0") = __NR_execve
;
546 register long __a
asm ("%d1") = (long)(filename
);
547 register long __b
asm ("%d2") = (long)(argv
);
548 register long __c
asm ("%d3") = (long)(envp
);
549 asm volatile ("trap #0" : "+d" (__res
)
550 : "d" (__a
), "d" (__b
), "d" (__c
));
554 asmlinkage
unsigned long sys_get_thread_area(void)
556 return current_thread_info()->tp_value
;
559 asmlinkage
int sys_set_thread_area(unsigned long tp
)
561 current_thread_info()->tp_value
= tp
;
565 /* This syscall gets its arguments in A0 (mem), D2 (oldval) and
568 sys_atomic_cmpxchg_32(unsigned long newval
, int oldval
, int d3
, int d4
, int d5
,
569 unsigned long __user
* mem
)
571 /* This was borrowed from ARM's implementation. */
573 struct mm_struct
*mm
= current
->mm
;
578 unsigned long mem_value
;
580 down_read(&mm
->mmap_sem
);
581 pgd
= pgd_offset(mm
, (unsigned long)mem
);
582 if (!pgd_present(*pgd
))
584 pmd
= pmd_offset(pgd
, (unsigned long)mem
);
585 if (!pmd_present(*pmd
))
587 pte
= pte_offset_map_lock(mm
, pmd
, (unsigned long)mem
, &ptl
);
588 if (!pte_present(*pte
) || !pte_dirty(*pte
)
589 || !pte_write(*pte
)) {
590 pte_unmap_unlock(pte
, ptl
);
595 if (mem_value
== oldval
)
598 pte_unmap_unlock(pte
, ptl
);
599 up_read(&mm
->mmap_sem
);
603 up_read(&mm
->mmap_sem
);
604 /* This is not necessarily a bad access, we can get here if
605 a memory we're trying to write to should be copied-on-write.
606 Make the kernel do the necessary page stuff, then re-iterate.
607 Simulate a write access fault to do that. */
609 /* The first argument of the function corresponds to
610 D1, which is the first field of struct pt_regs. */
611 struct pt_regs
*fp
= (struct pt_regs
*)&newval
;
613 /* '3' is an RMW flag. */
614 if (do_page_fault(fp
, (unsigned long)mem
, 3))
615 /* If the do_page_fault() failed, we don't
616 have anything meaningful to return.
617 There should be a SIGSEGV pending for
624 asmlinkage
int sys_atomic_barrier(void)
626 /* no code needed for uniprocs */