void decompress_kernel(void)
{
- output_data = 0;
+ output_data = NULL;
output_ptr = PHYSADDR((unsigned long)&_text+PAGE_SIZE);
#ifdef CONFIG_29BIT
output_ptr |= P2SEG;
/* Should be defined by processor-specific code */
void arch_init_clk_ops(struct clk_ops **, int type);
+int __init arch_clk_init(void);
/* arch/sh/kernel/cpu/clock.c */
int clk_init(void);
#define IO_SPACE_LIMIT 0xffffffff
+extern unsigned long generic_io_base;
+
/*
* This function provides a method for the generic case where a board-specific
* ioport_map simply needs to return the port + some arbitrary port base.
*/
static inline void __set_io_port_base(unsigned long pbase)
{
- extern unsigned long generic_io_base;
-
generic_io_base = pbase;
}
#define irq_canonicalize(irq) (irq)
#define irq_demux(irq) sh_mv.mv_irq_demux(irq)
+void init_IRQ(void);
+asmlinkage int do_IRQ(unsigned int irq, struct pt_regs *regs);
+
#ifdef CONFIG_IRQSTACKS
extern void irq_ctx_init(int cpu);
extern void irq_ctx_exit(int cpu);
/* Forward decl */
struct sh_cpuinfo;
+struct seq_operations;
+
+extern struct pt_regs fake_swapper_regs;
/* arch/sh/kernel/setup.c */
const char *get_cpu_subtype(struct sh_cpuinfo *c);
+extern const struct seq_operations cpuinfo_op;
#ifdef CONFIG_VSYSCALL
int vsyscall_init(void);
#ifdef __KERNEL__
#include <linux/compiler.h>
+#include <linux/linkage.h>
#include <asm/page.h>
#include <asm/types.h>
#include <asm/cache.h>
#define current_cpu_data cpu_data[smp_processor_id()]
#define raw_current_cpu_data cpu_data[raw_smp_processor_id()]
+asmlinkage void __init sh_cpu_init(void);
+
/*
* User space process size: 2GB.
*
#define INIT_MMAP \
{ &init_mm, 0, 0, NULL, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, 1, NULL, NULL }
-extern struct pt_regs fake_swapper_regs;
-
#define INIT_THREAD { \
.sp = sizeof(init_stack) + \
(long) &init_stack, \
#ifndef _ASM_RTC_H
#define _ASM_RTC_H
+void time_init(void);
extern void (*board_time_init)(void);
extern void (*rtc_sh_get_time)(struct timespec *);
extern int (*rtc_sh_set_time)(const time_t);
#ifndef _SH_SETUP_H
#define _SH_SETUP_H
+#include <asm/mmzone.h>
+
#define COMMAND_LINE_SIZE 256
#ifdef __KERNEL__
--- /dev/null
+#ifndef __ASM_SH_SYSCALLS_H
+#define __ASM_SH_SYSCALLS_H
+
+#ifdef __KERNEL__
+
+struct old_utsname;
+
+asmlinkage int old_mmap(unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags,
+ int fd, unsigned long off);
+asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, unsigned long pgoff);
+asmlinkage int sys_ipc(uint call, int first, int second,
+ int third, void __user *ptr, long fifth);
+asmlinkage int sys_uname(struct old_utsname __user *name);
+
+#ifdef CONFIG_SUPERH32
+# include "syscalls_32.h"
+#else
+# include "syscalls_64.h"
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* __ASM_SH_SYSCALLS_H */
--- /dev/null
+#ifndef __ASM_SH_SYSCALLS_32_H
+#define __ASM_SH_SYSCALLS_32_H
+
+#ifdef __KERNEL__
+
+#include <linux/compiler.h>
+#include <linux/linkage.h>
+#include <linux/types.h>
+
+struct pt_regs;
+
+asmlinkage int sys_fork(unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs __regs);
+asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
+ unsigned long parent_tidptr,
+ unsigned long child_tidptr,
+ struct pt_regs __regs);
+asmlinkage int sys_vfork(unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs __regs);
+asmlinkage int sys_execve(char __user *ufilename, char __user * __user *uargv,
+ char __user * __user *uenvp, unsigned long r7,
+ struct pt_regs __regs);
+asmlinkage int sys_sigsuspend(old_sigset_t mask, unsigned long r5,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs __regs);
+asmlinkage int sys_sigaction(int sig, const struct old_sigaction __user *act,
+ struct old_sigaction __user *oact);
+asmlinkage int sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs __regs);
+asmlinkage int sys_sigreturn(unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs __regs);
+asmlinkage int sys_rt_sigreturn(unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs __regs);
+asmlinkage int sys_pipe(unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs __regs);
+asmlinkage ssize_t sys_pread_wrapper(unsigned int fd, char __user *buf,
+ size_t count, long dummy, loff_t pos);
+asmlinkage ssize_t sys_pwrite_wrapper(unsigned int fd, const char __user *buf,
+ size_t count, long dummy, loff_t pos);
+asmlinkage int sys_fadvise64_64_wrapper(int fd, u32 offset0, u32 offset1,
+ u32 len0, u32 len1, int advice);
+
+/* Misc syscall related bits */
+asmlinkage long do_syscall_trace_enter(struct pt_regs *regs);
+asmlinkage void do_syscall_trace_leave(struct pt_regs *regs);
+asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned int save_r0,
+ unsigned long thread_info_flags);
+
+#endif /* __KERNEL__ */
+#endif /* __ASM_SH_SYSCALLS_32_H */
--- /dev/null
+#ifndef __ASM_SH_SYSCALLS_64_H
+#define __ASM_SH_SYSCALLS_64_H
+
+#ifdef __KERNEL__
+
+#include <linux/compiler.h>
+#include <linux/linkage.h>
+#include <linux/types.h>
+
+struct pt_regs;
+
+asmlinkage int sys_fork(unsigned long r2, unsigned long r3,
+ unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs *pregs);
+asmlinkage int sys_clone(unsigned long clone_flags, unsigned long newsp,
+ unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs *pregs);
+asmlinkage int sys_vfork(unsigned long r2, unsigned long r3,
+ unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs *pregs);
+asmlinkage int sys_execve(char *ufilename, char **uargv,
+ char **uenvp, unsigned long r5,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs *pregs);
+
+/* Misc syscall related bits */
+asmlinkage long long do_syscall_trace_enter(struct pt_regs *regs);
+asmlinkage void do_syscall_trace_leave(struct pt_regs *regs);
+
+#endif /* __KERNEL__ */
+#endif /* __ASM_SH_SYSCALLS_64_H */
})
extern void die(const char *str, struct pt_regs *regs, long err) __attribute__ ((noreturn));
+void free_initmem(void);
+void free_initrd_mem(unsigned long start, unsigned long end);
extern void *set_exception_table_vec(unsigned int vec, void *handler);
#define arch_align_stack(x) (x)
struct mem_access {
- unsigned long (*from)(void *dst, const void *src, unsigned long cnt);
- unsigned long (*to)(void *dst, const void *src, unsigned long cnt);
+ unsigned long (*from)(void *dst, const void __user *src, unsigned long cnt);
+ unsigned long (*to)(void __user *dst, const void *src, unsigned long cnt);
};
#ifdef CONFIG_SUPERH32
int handle_unaligned_access(opcode_t instruction, struct pt_regs *regs,
struct mem_access *ma);
+asmlinkage void do_address_error(struct pt_regs *regs,
+ unsigned long writeaccess,
+ unsigned long address);
+asmlinkage void do_divide_error(unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs __regs);
+asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs __regs);
+asmlinkage void do_illegal_slot_inst(unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs __regs);
+asmlinkage void do_exception_error(unsigned long r4, unsigned long r5,
+ unsigned long r6, unsigned long r7,
+ struct pt_regs __regs);
+
#endif /* __ASM_SH_SYSTEM_32_H */
{
}
-void __init __attribute__ ((weak))
+int __init __attribute__ ((weak))
arch_clk_init(void)
{
+ return 0;
}
static int show_clocks(char *buf, char **start, off_t off,
ret |= clk_register(clk);
}
- arch_clk_init();
+ ret |= arch_clk_init();
/* Kick the child clocks.. */
propagate_rate(&master_clk);
volatile u8 *port_addr;
u8 *buf = dst;
- port_addr = (volatile u8 *)__ioport_map(port, 1);
+ port_addr = (volatile u8 __force *)__ioport_map(port, 1);
while (count--)
*buf++ = *port_addr;
}
volatile u16 *port_addr;
u16 *buf = dst;
- port_addr = (volatile u16 *)__ioport_map(port, 2);
+ port_addr = (volatile u16 __force *)__ioport_map(port, 2);
while (count--)
*buf++ = *port_addr;
volatile u32 *port_addr;
u32 *buf = dst;
- port_addr = (volatile u32 *)__ioport_map(port, 4);
+ port_addr = (volatile u32 __force *)__ioport_map(port, 4);
while (count--)
*buf++ = *port_addr;
#include <linux/string.h>
#include <asm/machvec.h>
#include <asm/sections.h>
+#include <asm/setup.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/system.h>
#include <asm/ubc.h>
#include <asm/fpu.h>
+#include <asm/syscalls.h>
static int hlt_counter;
int ubc_usercnt = 0;
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/io.h>
+#include <asm/syscalls.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/mmu_context.h>
#include <asm/system.h>
#include <asm/processor.h>
#include <asm/mmu_context.h>
+#include <asm/syscalls.h>
/*
* does not yet catch signals sent when the child dies.
long arch_ptrace(struct task_struct *child, long request, long addr, long data)
{
struct user * dummy = NULL;
+ unsigned long __user *datap = (unsigned long __user *)data;
int ret;
switch (request) {
tmp = !!tsk_used_math(child);
else
tmp = 0;
- ret = put_user(tmp, (unsigned long __user *)data);
+ ret = put_user(tmp, datap);
break;
}
}
ret = 0;
- if (put_user(tmp, (unsigned long *) data)) {
+ if (put_user(tmp, datap)) {
ret = -EFAULT;
break;
}
#include <asm/system.h>
#include <asm/processor.h>
#include <asm/mmu_context.h>
+#include <asm/syscalls.h>
#include <asm/fpu.h>
/* This mask defines the bits of the SR which the user is not allowed to
#include <linux/err.h>
#include <linux/debugfs.h>
#include <linux/crash_dump.h>
+#include <linux/mmzone.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/page.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
+#include <asm/syscalls.h>
#include <asm/fpu.h>
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
struct rt_sigframe __user *frame = (struct rt_sigframe __user *)regs->regs[15];
sigset_t set;
- stack_t st;
int r0;
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0))
goto badframe;
- if (__copy_from_user(&st, &frame->uc.uc_stack, sizeof(st)))
+ if (do_sigaltstack(&frame->uc.uc_stack, NULL,
+ regs->regs[15]) == -EFAULT)
goto badframe;
- /* It is more difficult to avoid calling this function than to
- call it and ignore errors. */
- do_sigaltstack((const stack_t __user *)&st, NULL, (unsigned long)frame);
return r0;
/* Create the ucontext. */
err |= __put_user(0, &frame->uc.uc_flags);
- err |= __put_user(0, &frame->uc.uc_link);
+ err |= __put_user(NULL, &frame->uc.uc_link);
err |= __put_user((void *)current->sas_ss_sp,
&frame->uc.uc_stack.ss_sp);
err |= __put_user(sas_ss_flags(regs->regs[15]),
#include <linux/fs.h>
#include <linux/ipc.h>
#include <asm/cacheflush.h>
+#include <asm/syscalls.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
union semun fourth;
if (!ptr)
return -EINVAL;
- if (get_user(fourth.__pad, (void * __user *) ptr))
+ if (get_user(fourth.__pad, (void __user * __user *) ptr))
return -EFAULT;
return sys_semctl (first, second, third, fourth);
}
return -EINVAL;
}
-asmlinkage int sys_uname(struct old_utsname * name)
+asmlinkage int sys_uname(struct old_utsname __user *name)
{
int err;
if (!name)
return -EFAULT;
down_read(&uts_sem);
- err = copy_to_user(name, utsname(), sizeof (*name));
+ err = copy_to_user(name, utsname(), sizeof(*name));
up_read(&uts_sem);
return err?-EFAULT:0;
}
#include <asm/cacheflush.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
+#include <asm/syscalls.h>
/*
* sys_pipe() is the normal C calling standard for creating
return error;
}
-asmlinkage ssize_t sys_pread_wrapper(unsigned int fd, char * buf,
+asmlinkage ssize_t sys_pread_wrapper(unsigned int fd, char __user *buf,
size_t count, long dummy, loff_t pos)
{
return sys_pread64(fd, buf, count, pos);
}
-asmlinkage ssize_t sys_pwrite_wrapper(unsigned int fd, const char * buf,
+asmlinkage ssize_t sys_pwrite_wrapper(unsigned int fd, const char __user *buf,
size_t count, long dummy, loff_t pos)
{
return sys_pwrite64(fd, buf, count, pos);
#include <linux/timex.h>
#include <linux/sched.h>
#include <linux/clockchips.h>
+#include <linux/mc146818rtc.h> /* for rtc_lock */
#include <linux/smp.h>
#include <asm/clock.h>
#include <asm/rtc.h>
return 0;
}
-struct sys_timer_ops cmt_timer_ops = {
+static struct sys_timer_ops cmt_timer_ops = {
.init = cmt_timer_init,
.start = cmt_timer_start,
.stop = cmt_timer_stop,
int ret, index, count;
unsigned long *rm, *rn;
unsigned char *src, *dst;
+ unsigned char __user *srcu, *dstu;
index = (instruction>>8)&15; /* 0x0F00 */
rn = ®s->regs[index];
case 0: /* mov.[bwl] to/from memory via r0+rn */
if (instruction & 8) {
/* from memory */
- src = (unsigned char*) *rm;
- src += regs->regs[0];
- dst = (unsigned char*) rn;
- *(unsigned long*)dst = 0;
+ srcu = (unsigned char __user *)*rm;
+ srcu += regs->regs[0];
+ dst = (unsigned char *)rn;
+ *(unsigned long *)dst = 0;
#if !defined(__LITTLE_ENDIAN__)
dst += 4-count;
#endif
- if (ma->from(dst, src, count))
+ if (ma->from(dst, srcu, count))
goto fetch_fault;
sign_extend(count, dst);
} else {
/* to memory */
- src = (unsigned char*) rm;
+ src = (unsigned char *)rm;
#if !defined(__LITTLE_ENDIAN__)
src += 4-count;
#endif
- dst = (unsigned char*) *rn;
- dst += regs->regs[0];
+ dstu = (unsigned char __user *)*rn;
+ dstu += regs->regs[0];
- if (ma->to(dst, src, count))
+ if (ma->to(dstu, src, count))
goto fetch_fault;
}
ret = 0;
case 1: /* mov.l Rm,@(disp,Rn) */
src = (unsigned char*) rm;
- dst = (unsigned char*) *rn;
- dst += (instruction&0x000F)<<2;
+ dstu = (unsigned char __user *)*rn;
+ dstu += (instruction&0x000F)<<2;
- if (ma->to(dst, src, 4))
+ if (ma->to(dstu, src, 4))
goto fetch_fault;
ret = 0;
break;
if (instruction & 4)
*rn -= count;
src = (unsigned char*) rm;
- dst = (unsigned char*) *rn;
+ dstu = (unsigned char __user *)*rn;
#if !defined(__LITTLE_ENDIAN__)
src += 4-count;
#endif
- if (ma->to(dst, src, count))
+ if (ma->to(dstu, src, count))
goto fetch_fault;
ret = 0;
break;
case 5: /* mov.l @(disp,Rm),Rn */
- src = (unsigned char*) *rm;
- src += (instruction&0x000F)<<2;
- dst = (unsigned char*) rn;
- *(unsigned long*)dst = 0;
+ srcu = (unsigned char __user *)*rm;
+ srcu += (instruction & 0x000F) << 2;
+ dst = (unsigned char *)rn;
+ *(unsigned long *)dst = 0;
- if (ma->from(dst, src, 4))
+ if (ma->from(dst, srcu, 4))
goto fetch_fault;
ret = 0;
break;
case 6: /* mov.[bwl] from memory, possibly with post-increment */
- src = (unsigned char*) *rm;
+ srcu = (unsigned char __user *)*rm;
if (instruction & 4)
*rm += count;
dst = (unsigned char*) rn;
#if !defined(__LITTLE_ENDIAN__)
dst += 4-count;
#endif
- if (ma->from(dst, src, count))
+ if (ma->from(dst, srcu, count))
goto fetch_fault;
sign_extend(count, dst);
ret = 0;
case 8:
switch ((instruction&0xFF00)>>8) {
case 0x81: /* mov.w R0,@(disp,Rn) */
- src = (unsigned char*) ®s->regs[0];
+ src = (unsigned char *) ®s->regs[0];
#if !defined(__LITTLE_ENDIAN__)
src += 2;
#endif
- dst = (unsigned char*) *rm; /* called Rn in the spec */
- dst += (instruction&0x000F)<<1;
+ dstu = (unsigned char __user *)*rm; /* called Rn in the spec */
+ dstu += (instruction & 0x000F) << 1;
- if (ma->to(dst, src, 2))
+ if (ma->to(dstu, src, 2))
goto fetch_fault;
ret = 0;
break;
case 0x85: /* mov.w @(disp,Rm),R0 */
- src = (unsigned char*) *rm;
- src += (instruction&0x000F)<<1;
- dst = (unsigned char*) ®s->regs[0];
- *(unsigned long*)dst = 0;
+ srcu = (unsigned char __user *)*rm;
+ srcu += (instruction & 0x000F) << 1;
+ dst = (unsigned char *) ®s->regs[0];
+ *(unsigned long *)dst = 0;
#if !defined(__LITTLE_ENDIAN__)
dst += 2;
#endif
- if (ma->from(dst, src, 2))
+ if (ma->from(dst, srcu, 2))
goto fetch_fault;
sign_extend(2, dst);
ret = 0;
struct mem_access *ma)
{
opcode_t instruction;
- void *addr = (void *)(regs->pc + instruction_size(old_instruction));
+ void __user *addr = (void __user *)(regs->pc +
+ instruction_size(old_instruction));
if (copy_from_user(&instruction, addr, sizeof(instruction))) {
/* the instruction-fetch faulted */
}
set_fs(USER_DS);
- if (copy_from_user(&instruction, (void *)(regs->pc),
+ if (copy_from_user(&instruction, (void __user *)(regs->pc),
sizeof(instruction))) {
/* Argh. Fault on the instruction itself.
This should never happen non-SMP
die("unaligned program counter", regs, error_code);
set_fs(KERNEL_DS);
- if (copy_from_user(&instruction, (void *)(regs->pc),
+ if (copy_from_user(&instruction, (void __user *)(regs->pc),
sizeof(instruction))) {
/* Argh. Fault on the instruction itself.
This should never happen non-SMP
*/
#include <linux/types.h>
+#include <asm/div64.h>
extern uint64_t __xdiv64_32(u64 n, u32 d);
*/
dma_cache_sync(dev, ret, size, DMA_BIDIRECTIONAL);
- ret_nocache = ioremap_nocache(virt_to_phys(ret), size);
+ ret_nocache = (void __force *)ioremap_nocache(virt_to_phys(ret), size);
if (!ret_nocache) {
free_pages((unsigned long)ret, order);
return NULL;
#include <linux/kernel.h>
#include <linux/string.h>
#include <asm/page.h>
+#include <asm/uaccess.h>
void copy_page(void *to, void *from)
{
#include <linux/kernel.h>
#include <linux/mm.h>
#include <asm/pgtable.h>
+#include <asm/tlbflush.h>
/*
* Nothing too terribly exciting here ..
case PORT_IRDA: return "irda";
}
- return 0;
+ return NULL;
}
static void sci_release_port(struct uart_port *port)