struct pt_regs *regs;
per_struct *per_info;
- regs = __KSTK_PTREGS(task);
+ regs = task_pt_regs(task);
per_info = (per_struct *) &task->thread.per_info;
per_info->control_regs.bits.em_instruction_fetch =
per_info->single_step | per_info->instruction_fetch;
/*
* psw and gprs are stored on the stack
*/
- tmp = *(addr_t *)((addr_t) &__KSTK_PTREGS(child)->psw + addr);
+ tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr);
if (addr == (addr_t) &dummy->regs.psw.mask)
/* Remove per bit from user psw. */
tmp &= ~PSW_MASK_PER;
/*
* orig_gpr2 is stored on the kernel stack
*/
- tmp = (addr_t) __KSTK_PTREGS(child)->orig_gpr2;
+ tmp = (addr_t) task_pt_regs(child)->orig_gpr2;
} else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
/*
high order bit but older gdb's rely on it */
data |= PSW_ADDR_AMODE;
#endif
- *(addr_t *)((addr_t) &__KSTK_PTREGS(child)->psw + addr) = data;
+ *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data;
} else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) {
/*
/*
* orig_gpr2 is stored on the kernel stack
*/
- __KSTK_PTREGS(child)->orig_gpr2 = data;
+ task_pt_regs(child)->orig_gpr2 = data;
} else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
/*
*/
if (addr == (addr_t) &dummy32->regs.psw.mask) {
/* Fake a 31 bit psw mask. */
- tmp = (__u32)(__KSTK_PTREGS(child)->psw.mask >> 32);
+ tmp = (__u32)(task_pt_regs(child)->psw.mask >> 32);
tmp = PSW32_MASK_MERGE(PSW32_USER_BITS, tmp);
} else if (addr == (addr_t) &dummy32->regs.psw.addr) {
/* Fake a 31 bit psw address. */
- tmp = (__u32) __KSTK_PTREGS(child)->psw.addr |
+ tmp = (__u32) task_pt_regs(child)->psw.addr |
PSW32_ADDR_AMODE31;
} else {
/* gpr 0-15 */
- tmp = *(__u32 *)((addr_t) &__KSTK_PTREGS(child)->psw +
+ tmp = *(__u32 *)((addr_t) &task_pt_regs(child)->psw +
addr*2 + 4);
}
} else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
/*
* orig_gpr2 is stored on the kernel stack
*/
- tmp = *(__u32*)((addr_t) &__KSTK_PTREGS(child)->orig_gpr2 + 4);
+ tmp = *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4);
} else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
/*
if (tmp != PSW32_MASK_MERGE(PSW32_USER_BITS, tmp))
/* Invalid psw mask. */
return -EINVAL;
- __KSTK_PTREGS(child)->psw.mask =
+ task_pt_regs(child)->psw.mask =
PSW_MASK_MERGE(PSW_USER32_BITS, (__u64) tmp << 32);
} else if (addr == (addr_t) &dummy32->regs.psw.addr) {
/* Build a 64 bit psw address from 31 bit address. */
- __KSTK_PTREGS(child)->psw.addr =
+ task_pt_regs(child)->psw.addr =
(__u64) tmp & PSW32_ADDR_INSN;
} else {
/* gpr 0-15 */
- *(__u32*)((addr_t) &__KSTK_PTREGS(child)->psw
+ *(__u32*)((addr_t) &task_pt_regs(child)->psw
+ addr*2 + 4) = tmp;
}
} else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
/*
* orig_gpr2 is stored on the kernel stack
*/
- *(__u32*)((addr_t) &__KSTK_PTREGS(child)->orig_gpr2 + 4) = tmp;
+ *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4) = tmp;
} else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
/*
extern void show_trace(struct task_struct *task, unsigned long *sp);
unsigned long get_wchan(struct task_struct *p);
-#define __KSTK_PTREGS(tsk) ((struct pt_regs *) \
- ((unsigned long) tsk->thread_info + THREAD_SIZE - sizeof(struct pt_regs)))
-#define KSTK_EIP(tsk) (__KSTK_PTREGS(tsk)->psw.addr)
-#define KSTK_ESP(tsk) (__KSTK_PTREGS(tsk)->gprs[15])
+#define task_pt_regs(tsk) ((struct pt_regs *) \
+ ((void *)(tsk)->thread_info + THREAD_SIZE) - 1)
+#define KSTK_EIP(tsk) (task_pt_regs(tsk)->psw.addr)
+#define KSTK_ESP(tsk) (task_pt_regs(tsk)->gprs[15])
/*
* Give up the time slice of the virtual PU.