.Lnotkern:
SAVE_ALL_INT
GET_CURRENT(%d0)
- tstb %curptr@(TASK_NEEDRESCHED)
- jne ret_from_exception | deliver signals,
- | reschedule etc..
- RESTORE_ALL
+ | deliver signals, reschedule etc..
+ jra ret_from_exception
|
| mem_write --- write to user or supervisor address space
.Lnotkern:
SAVE_ALL_INT
GET_CURRENT(%d0)
- tstb %curptr@(TASK_NEEDRESCHED)
- jne ret_from_exception | deliver signals,
- | reschedule etc..
- RESTORE_ALL
+ | deliver signals, reschedule etc..
+ jra ret_from_exception
|
| _060_real_chk():
DEFINE(TASK_STATE, offsetof(struct task_struct, state));
DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags));
DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace));
- DEFINE(TASK_WORK, offsetof(struct task_struct, thread.work));
- DEFINE(TASK_NEEDRESCHED, offsetof(struct task_struct, thread.work.need_resched));
- DEFINE(TASK_SYSCALL_TRACE, offsetof(struct task_struct, thread.work.syscall_trace));
- DEFINE(TASK_SIGPENDING, offsetof(struct task_struct, thread.work.sigpending));
- DEFINE(TASK_NOTIFY_RESUME, offsetof(struct task_struct, thread.work.notify_resume));
DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
DEFINE(TASK_INFO, offsetof(struct task_struct, thread.info));
DEFINE(TASK_MM, offsetof(struct task_struct, mm));
#include <asm/asm-offsets.h>
-.globl system_call, buserr, trap
-.globl resume, ret_from_exception
-.globl ret_from_signal
+.globl system_call, buserr, trap, resume
.globl inthandler, sys_call_table
.globl sys_fork, sys_clone, sys_vfork
.globl ret_from_interrupt, bad_interrupt
movel %sp,%sp@- | stack frame pointer argument
bsrl buserr_c
addql #4,%sp
- jra ret_from_exception
+ jra .Lret_from_exception
ENTRY(trap)
SAVE_ALL_INT
movel %sp,%sp@- | stack frame pointer argument
bsrl trap_c
addql #4,%sp
- jra ret_from_exception
+ jra .Lret_from_exception
| After a fork we jump here directly from resume,
| so that %d1 contains the previous task
movel %d1,%sp@-
jsr schedule_tail
addql #4,%sp
- jra ret_from_exception
+ jra .Lret_from_exception
-badsys:
- movel #-ENOSYS,%sp@(PT_D0)
- jra ret_from_exception
-
-do_trace:
+do_trace_entry:
movel #-ENOSYS,%sp@(PT_D0) | needed for strace
subql #4,%sp
SAVE_SWITCH_STACK
jbsr syscall_trace
RESTORE_SWITCH_STACK
addql #4,%sp
- movel %sp@(PT_ORIG_D0),%d1
- movel #-ENOSYS,%d0
- cmpl #NR_syscalls,%d1
- jcc 1f
- jbsr @(sys_call_table,%d1:l:4)@(0)
-1: movel %d0,%sp@(PT_D0) | save the return value
- subql #4,%sp | dummy return address
+ movel %sp@(PT_ORIG_D0),%d0
+ cmpl #NR_syscalls,%d0
+ jcs syscall
+badsys:
+ movel #-ENOSYS,%sp@(PT_D0)
+ jra ret_from_syscall
+
+do_trace_exit:
+ subql #4,%sp
SAVE_SWITCH_STACK
jbsr syscall_trace
+ RESTORE_SWITCH_STACK
+ addql #4,%sp
+ jra .Lret_from_exception
-ret_from_signal:
+ENTRY(ret_from_signal)
RESTORE_SWITCH_STACK
addql #4,%sp
/* on 68040 complete pending writebacks if any */
addql #4,%sp
1:
#endif
- jra ret_from_exception
+ jra .Lret_from_exception
ENTRY(system_call)
SAVE_ALL_SYS
| save top of frame
movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0)
- tstb %curptr@(TASK_SYSCALL_TRACE)
- jne do_trace
+ | syscall trace?
+ tstb %curptr@(TASK_INFO+TINFO_FLAGS+2)
+ jmi do_trace_entry
cmpl #NR_syscalls,%d0
jcc badsys
+syscall:
jbsr @(sys_call_table,%d0:l:4)@(0)
movel %d0,%sp@(PT_D0) | save the return value
-
+ret_from_syscall:
|oriw #0x0700,%sr
- movel %curptr@(TASK_WORK),%d0
+ movew %curptr@(TASK_INFO+TINFO_FLAGS+2),%d0
jne syscall_exit_work
1: RESTORE_ALL
syscall_exit_work:
btst #5,%sp@(PT_SR) | check if returning to kernel
bnes 1b | if so, skip resched, signals
- tstw %d0
- jeq do_signal_return
- tstb %d0
- jne do_delayed_trace
-
+ lslw #1,%d0
+ jcs do_trace_exit
+ jmi do_delayed_trace
+ lslw #8,%d0
+ jmi do_signal_return
pea resume_userspace
- jmp schedule
+ jra schedule
+
-ret_from_exception:
+ENTRY(ret_from_exception)
+.Lret_from_exception:
btst #5,%sp@(PT_SR) | check if returning to kernel
bnes 1f | if so, skip resched, signals
| only allow interrupts when we are really the last one on the
andw #ALLOWINT,%sr
resume_userspace:
- movel %curptr@(TASK_WORK),%d0
- lsrl #8,%d0
+ moveb %curptr@(TASK_INFO+TINFO_FLAGS+3),%d0
jne exit_work
1: RESTORE_ALL
exit_work:
| save top of frame
movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0)
- tstb %d0
- jeq do_signal_return
-
+ lslb #1,%d0
+ jmi do_signal_return
pea resume_userspace
- jmp schedule
+ jra schedule
+
do_signal_return:
|andw #ALLOWINT,%sr
/* check if we need to do software interrupts */
tstl irq_stat+CPUSTAT_SOFTIRQ_PENDING
- jeq ret_from_exception
+ jeq .Lret_from_exception
pea ret_from_exception
jra do_softirq
{
unsigned long tmp = get_reg(child, PT_SR) & ~(TRACE_BITS << 16);
put_reg(child, PT_SR, tmp);
- child->thread.work.delayed_trace = 0;
+ clear_tsk_thread_flag(child, TIF_DELAYED_TRACE);
}
/*
void ptrace_disable(struct task_struct *child)
{
singlestep_disable(child);
- child->thread.work.syscall_trace = 0;
+ clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
}
long arch_ptrace(struct task_struct *child, long request, long addr, long data)
goto out_eio;
if (request == PTRACE_SYSCALL)
- child->thread.work.syscall_trace = ~0;
+ set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
else
- child->thread.work.syscall_trace = 0;
+ clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
child->exit_code = data;
singlestep_disable(child);
wake_up_process(child);
if (!valid_signal(data))
goto out_eio;
- child->thread.work.syscall_trace = 0;
+ clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
tmp = get_reg(child, PT_SR) | (TRACE_BITS << 16);
put_reg(child, PT_SR, tmp);
- child->thread.work.delayed_trace = 1;
+ set_tsk_thread_flag(child, TIF_DELAYED_TRACE);
child->exit_code = data;
/* give it a chance to run. */
asmlinkage void syscall_trace(void)
{
- if (!current->thread.work.delayed_trace &&
- !current->thread.work.syscall_trace)
- return;
ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
? 0x80 : 0));
/*
#endif
#define TASK_UNMAPPED_ALIGN(addr, off) PAGE_ALIGN(addr)
-struct task_work {
- unsigned char sigpending;
- unsigned char notify_resume; /* request for notification on
- userspace execution resumption */
- char need_resched;
- unsigned char delayed_trace; /* single step a syscall */
- unsigned char syscall_trace; /* count of syscall interceptors */
- unsigned char memdie; /* task was selected to be killed */
- unsigned char pad[2];
-};
-
struct thread_struct {
unsigned long ksp; /* kernel stack pointer */
unsigned long usp; /* user stack pointer */
unsigned long fp[8*3];
unsigned long fpcntl[3]; /* fp control regs */
unsigned char fpstate[FPSTATESIZE]; /* floating point state */
- struct task_work work;
struct thread_info info;
};
struct thread_info {
struct task_struct *task; /* main task structure */
+ unsigned long flags;
struct exec_domain *exec_domain; /* execution domain */
int preempt_count; /* 0 => preemptable, <0 => BUG */
__u32 cpu; /* should always be 0 on m68k */
struct restart_block restart_block;
-
- __u8 supervisor_stack[0];
};
#define PREEMPT_ACTIVE 0x4000000
#define end_of_stack(p) ((unsigned long *)(p)->thread_info + 1)
-#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
-#define TIF_DELAYED_TRACE 1 /* single step a syscall */
-#define TIF_NOTIFY_RESUME 2 /* resumption notification requested */
-#define TIF_SIGPENDING 3 /* signal pending */
-#define TIF_NEED_RESCHED 4 /* rescheduling necessary */
-#define TIF_MEMDIE 5
-
-extern int thread_flag_fixme(void);
-
-/*
- * flag set/clear/test wrappers
- * - pass TIF_xxxx constants to these functions
+/* entry.S relies on these definitions!
+ * bits 0-7 are tested at every exception exit
+ * bits 8-15 are also tested at syscall exit
*/
-
-#define __set_tsk_thread_flag(tsk, flag, val) ({ \
- switch (flag) { \
- case TIF_SIGPENDING: \
- tsk->thread.work.sigpending = val; \
- break; \
- case TIF_NEED_RESCHED: \
- tsk->thread.work.need_resched = val; \
- break; \
- case TIF_SYSCALL_TRACE: \
- tsk->thread.work.syscall_trace = val; \
- break; \
- case TIF_MEMDIE: \
- tsk->thread.work.memdie = val; \
- break; \
- default: \
- thread_flag_fixme(); \
- } \
-})
-
-#define __get_tsk_thread_flag(tsk, flag) ({ \
- int ___res; \
- switch (flag) { \
- case TIF_SIGPENDING: \
- ___res = tsk->thread.work.sigpending; \
- break; \
- case TIF_NEED_RESCHED: \
- ___res = tsk->thread.work.need_resched; \
- break; \
- case TIF_SYSCALL_TRACE: \
- ___res = tsk->thread.work.syscall_trace;\
- break; \
- case TIF_MEMDIE: \
- ___res = tsk->thread.work.memdie;\
- break; \
- default: \
- ___res = thread_flag_fixme(); \
- } \
- ___res; \
-})
-
-#define __get_set_tsk_thread_flag(tsk, flag, val) ({ \
- int __res = __get_tsk_thread_flag(tsk, flag); \
- __set_tsk_thread_flag(tsk, flag, val); \
- __res; \
-})
-
-#define set_tsk_thread_flag(tsk, flag) __set_tsk_thread_flag(tsk, flag, ~0)
-#define clear_tsk_thread_flag(tsk, flag) __set_tsk_thread_flag(tsk, flag, 0)
-#define test_and_set_tsk_thread_flag(tsk, flag) __get_set_tsk_thread_flag(tsk, flag, ~0)
-#define test_tsk_thread_flag(tsk, flag) __get_tsk_thread_flag(tsk, flag)
-
-#define set_thread_flag(flag) set_tsk_thread_flag(current, flag)
-#define clear_thread_flag(flag) clear_tsk_thread_flag(current, flag)
-#define test_thread_flag(flag) test_tsk_thread_flag(current, flag)
-
-#define set_need_resched() set_thread_flag(TIF_NEED_RESCHED)
-#define clear_need_resched() clear_thread_flag(TIF_NEED_RESCHED)
+#define TIF_SIGPENDING 6 /* signal pending */
+#define TIF_NEED_RESCHED 7 /* rescheduling necessary */
+#define TIF_DELAYED_TRACE 14 /* single step a syscall */
+#define TIF_SYSCALL_TRACE 15 /* syscall trace active */
+#define TIF_MEMDIE 16
#endif /* _ASM_M68K_THREAD_INFO_H */
* - pass TIF_xxxx constants to these functions
*/
-static inline void set_thread_flag(int flag)
-{
- set_bit(flag,¤t_thread_info()->flags);
-}
-
-static inline void clear_thread_flag(int flag)
-{
- clear_bit(flag,¤t_thread_info()->flags);
-}
-
-static inline int test_and_set_thread_flag(int flag)
-{
- return test_and_set_bit(flag,¤t_thread_info()->flags);
-}
-
-static inline int test_and_clear_thread_flag(int flag)
-{
- return test_and_clear_bit(flag,¤t_thread_info()->flags);
-}
-
-static inline int test_thread_flag(int flag)
-{
- return test_bit(flag,¤t_thread_info()->flags);
-}
-
static inline void set_ti_thread_flag(struct thread_info *ti, int flag)
{
set_bit(flag,&ti->flags);
return test_bit(flag,&ti->flags);
}
-static inline void set_need_resched(void)
-{
- set_thread_flag(TIF_NEED_RESCHED);
-}
-
-static inline void clear_need_resched(void)
-{
- clear_thread_flag(TIF_NEED_RESCHED);
-}
+#define set_thread_flag(flag) \
+ set_ti_thread_flag(current_thread_info(), flag)
+#define clear_thread_flag(flag) \
+ clear_ti_thread_flag(current_thread_info(), flag)
+#define test_and_set_thread_flag(flag) \
+ test_and_set_ti_thread_flag(current_thread_info(), flag)
+#define test_and_clear_thread_flag(flag) \
+ test_and_clear_ti_thread_flag(current_thread_info(), flag)
+#define test_thread_flag(flag) \
+ test_ti_thread_flag(current_thread_info(), flag)
+
+#define set_need_resched() set_thread_flag(TIF_NEED_RESCHED)
+#define clear_need_resched() clear_thread_flag(TIF_NEED_RESCHED)
#endif