{
struct pt_regs *childregs;
- childregs = ((struct pt_regs *)(THREAD_SIZE + (unsigned long)p->thread_info)) - 1;
+ childregs = ((struct pt_regs *)(THREAD_SIZE + (unsigned long)task_stack_page(p))) - 1;
*childregs = *regs;
if (user_mode(regs))
childregs->sp = usp;
else
- childregs->sp = (unsigned long)p->thread_info + THREAD_SIZE;
+ childregs->sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
childregs->r12 = 0; /* Set return value for child */
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
- stack_page = (unsigned long)p->thread_info;
+ stack_page = (unsigned long)task_stack_page(p);
BUG_ON(!stack_page);
/*
static struct pt_regs *get_user_regs(struct task_struct *tsk)
{
- return (struct pt_regs *)((unsigned long) tsk->thread_info +
+ return (struct pt_regs *)((unsigned long)task_stack_page(tsk) +
THREAD_SIZE - sizeof(struct pt_regs));
}
DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace));
DEFINE(TASK_BLOCKED, offsetof(struct task_struct, blocked));
DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
- DEFINE(TASK_THREAD_INFO, offsetof(struct task_struct, thread_info));
+ DEFINE(TASK_THREAD_INFO, offsetof(struct task_struct, stack));
DEFINE(TASK_MM, offsetof(struct task_struct, mm));
DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
DEFINE(TASK_SIGPENDING, offsetof(struct task_struct, pending));
regs->xds & 0xffff, regs->xes & 0xffff, regs->xfs & 0xffff, gs, ss);
printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)",
TASK_COMM_LEN, current->comm, current->pid,
- current_thread_info(), current, current->thread_info);
+ current_thread_info(), current, task_thread_info(current));
/*
* When in-kernel, we also print out the stack and code at the
* time of the fault..
DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace));
DEFINE(TASK_BLOCKED, offsetof(struct task_struct, blocked));
DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
- DEFINE(TASK_THREAD_INFO, offsetof(struct task_struct, thread_info));
+ DEFINE(TASK_THREAD_INFO, offsetof(struct task_struct, stack));
DEFINE(TASK_MM, offsetof(struct task_struct, mm));
DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
write_tc_gpr_sp(__KSTK_TOS(idle));
/* global pointer */
- write_tc_gpr_gp((unsigned long)idle->thread_info);
+ write_tc_gpr_gp((unsigned long)task_thread_info(idle));
smtc_status |= SMTC_MTC_ACTIVE;
write_tc_c0_tchalt(0);
*/
static inline void stack_overflow_check(struct pt_regs *regs)
{
- u64 curbase = (u64) current->thread_info;
+ u64 curbase = (u64)task_stack_page(current);
static unsigned long warned = -60*HZ;
if (regs->rsp >= curbase && regs->rsp <= curbase + THREAD_SIZE &&
#define TS_USEDFPU 0x0001 /* FPU was used by this task this quantum (SMP) */
#define TS_POLLING 0x0002 /* True if in idle loop and not sleeping */
-#define tsk_is_polling(t) ((t)->thread_info->status & TS_POLLING)
+#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)
#endif /* __KERNEL__ */
#define TS_POLLING 1 /* true if in idle loop and not sleeping */
-#define tsk_is_polling(t) ((t)->thread_info->status & TS_POLLING)
+#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)
#endif /* _ASM_IA64_THREAD_INFO_H */
if (cpu_has_dsp) \
__save_dsp(prev); \
next->thread.emulated_fp = 0; \
- (last) = resume(prev, next, next->thread_info); \
+ (last) = resume(prev, next, task_thread_info(next)); \
if (cpu_has_dsp) \
__restore_dsp(current); \
} while(0)
static inline int __is_compat_task(struct task_struct *t)
{
- return test_ti_thread_flag(t->thread_info, TIF_32BIT);
+ return test_ti_thread_flag(task_thread_info(t), TIF_32BIT);
}
static inline int is_compat_task(void)
#define TS_COMPAT 0x0002 /* 32bit syscall active */
#define TS_POLLING 0x0004 /* true if in idle loop and not sleeping */
-#define tsk_is_polling(t) ((t)->thread_info->status & TS_POLLING)
+#define tsk_is_polling(t) (task_thread_info(t)->status & TS_POLLING)
#endif /* __KERNEL__ */
debug_mutex_lock_common(lock, &waiter);
mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
- debug_mutex_add_waiter(lock, &waiter, task->thread_info);
+ debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
/* add waiting tasks to the end of the waitqueue (FIFO): */
list_add_tail(&waiter.list, &lock->wait_list);
*/
if (unlikely(state == TASK_INTERRUPTIBLE &&
signal_pending(task))) {
- mutex_remove_waiter(lock, &waiter, task->thread_info);
+ mutex_remove_waiter(lock, &waiter, task_thread_info(task));
mutex_release(&lock->dep_map, 1, _RET_IP_);
spin_unlock_mutex(&lock->wait_lock, flags);
}
/* got the lock - rejoice! */
- mutex_remove_waiter(lock, &waiter, task->thread_info);
- debug_mutex_set_owner(lock, task->thread_info);
+ mutex_remove_waiter(lock, &waiter, task_thread_info(task));
+ debug_mutex_set_owner(lock, task_thread_info(task));
/* set it to 0 if there are no waiters left: */
if (likely(list_empty(&lock->wait_list)))