* If arch_uprobe->insn doesn't use rip-relative addressing, return
* immediately. Otherwise, rewrite the instruction so that it accesses
* its memory operand indirectly through a scratch register. Set
- * def->fixups accordingly. (The contents of the scratch register
+ * defparam->fixups accordingly. (The contents of the scratch register
* will be saved before we single-step the modified instruction,
* and restored afterward).
*
*/
if (reg != 6 && reg2 != 6) {
reg2 = 6;
- auprobe->def.fixups |= UPROBE_FIX_RIP_SI;
+ auprobe->defparam.fixups |= UPROBE_FIX_RIP_SI;
} else if (reg != 7 && reg2 != 7) {
reg2 = 7;
- auprobe->def.fixups |= UPROBE_FIX_RIP_DI;
+ auprobe->defparam.fixups |= UPROBE_FIX_RIP_DI;
/* TODO (paranoia): force maskmovq to not use di */
} else {
reg2 = 3;
- auprobe->def.fixups |= UPROBE_FIX_RIP_BX;
+ auprobe->defparam.fixups |= UPROBE_FIX_RIP_BX;
}
/*
* Point cursor at the modrm byte. The next 4 bytes are the
static inline unsigned long *
scratch_reg(struct arch_uprobe *auprobe, struct pt_regs *regs)
{
- if (auprobe->def.fixups & UPROBE_FIX_RIP_SI)
+ if (auprobe->defparam.fixups & UPROBE_FIX_RIP_SI)
return ®s->si;
- if (auprobe->def.fixups & UPROBE_FIX_RIP_DI)
+ if (auprobe->defparam.fixups & UPROBE_FIX_RIP_DI)
return ®s->di;
return ®s->bx;
}
*/
static void riprel_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
{
- if (auprobe->def.fixups & UPROBE_FIX_RIP_MASK) {
+ if (auprobe->defparam.fixups & UPROBE_FIX_RIP_MASK) {
struct uprobe_task *utask = current->utask;
unsigned long *sr = scratch_reg(auprobe, regs);
utask->autask.saved_scratch_register = *sr;
- *sr = utask->vaddr + auprobe->def.ilen;
+ *sr = utask->vaddr + auprobe->defparam.ilen;
}
}
static void riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
{
- if (auprobe->def.fixups & UPROBE_FIX_RIP_MASK) {
+ if (auprobe->defparam.fixups & UPROBE_FIX_RIP_MASK) {
struct uprobe_task *utask = current->utask;
unsigned long *sr = scratch_reg(auprobe, regs);
struct uprobe_task *utask = current->utask;
riprel_post_xol(auprobe, regs);
- if (auprobe->def.fixups & UPROBE_FIX_IP) {
+ if (auprobe->defparam.fixups & UPROBE_FIX_IP) {
long correction = utask->vaddr - utask->xol_vaddr;
regs->ip += correction;
- } else if (auprobe->def.fixups & UPROBE_FIX_CALL) {
- regs->sp += sizeof_long();
- if (push_ret_address(regs, utask->vaddr + auprobe->def.ilen))
+ } else if (auprobe->defparam.fixups & UPROBE_FIX_CALL) {
+ regs->sp += sizeof_long(); /* Pop incorrect return address */
+ if (push_ret_address(regs, utask->vaddr + auprobe->defparam.ilen))
return -ERESTART;
}
/* popf; tell the caller to not touch TF */
- if (auprobe->def.fixups & UPROBE_FIX_SETF)
+ if (auprobe->defparam.fixups & UPROBE_FIX_SETF)
utask->autask.saved_tf = true;
return 0;
/*
* Figure out which fixups default_post_xol_op() will need to perform,
- * and annotate def->fixups accordingly. To start with, ->fixups is
- * either zero or it reflects rip-related fixups.
+ * and annotate defparam->fixups accordingly.
*/
switch (OPCODE1(&insn)) {
case 0x9d: /* popf */
- auprobe->def.fixups |= UPROBE_FIX_SETF;
+ auprobe->defparam.fixups |= UPROBE_FIX_SETF;
break;
case 0xc3: /* ret or lret -- ip is correct */
case 0xcb:
riprel_analyze(auprobe, &insn);
}
- auprobe->def.ilen = insn.length;
- auprobe->def.fixups |= fix_ip_or_call;
+ auprobe->defparam.ilen = insn.length;
+ auprobe->defparam.fixups |= fix_ip_or_call;
auprobe->ops = &default_xol_ops;
return 0;