commit
f3c5c1bfd4308 (make ip_tables reentrant) introduced a race in
handling the stackptr restore, at the end of ipt_do_table()
We should do it before the call to xt_info_rdunlock_bh(), or we allow
cpu preemption and another cpu overwrites stackptr of original one.
A second fix is to change the underflow test to check the origptr value
instead of 0 to detect underflow, or else we allow a jump from different
hooks.
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Cc: Jan Engelhardt <jengelh@medozas.de>
Signed-off-by: Patrick McHardy <kaber@trash.net>
verdict = (unsigned)(-v) - 1;
break;
}
- if (*stackptr == 0) {
+ if (*stackptr <= origptr) {
e = get_entry(table_base,
private->underflow[hook]);
pr_debug("Underflow (this is normal) "
/* Verdict */
break;
} while (!acpar.hotdrop);
- xt_info_rdunlock_bh();
pr_debug("Exiting %s; resetting sp from %u to %u\n",
__func__, *stackptr, origptr);
*stackptr = origptr;
+ xt_info_rdunlock_bh();
#ifdef DEBUG_ALLOW_ALL
return NF_ACCEPT;
#else
verdict = (unsigned)(-v) - 1;
break;
}
- if (*stackptr == 0)
+ if (*stackptr <= origptr)
e = get_entry(table_base,
private->underflow[hook]);
else
break;
} while (!acpar.hotdrop);
- xt_info_rdunlock_bh();
*stackptr = origptr;
+ xt_info_rdunlock_bh();
#ifdef DEBUG_ALLOW_ALL
return NF_ACCEPT;