#define DBGUNDO(x...) do { } while (0)
#endif
-static void tcp_undo_cwnd_reduction(struct sock *sk, const bool undo_ssthresh,
- bool unmark_loss)
+static void tcp_undo_cwnd_reduction(struct sock *sk, bool unmark_loss)
{
struct tcp_sock *tp = tcp_sk(sk);
else
tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh << 1);
- if (undo_ssthresh && tp->prior_ssthresh > tp->snd_ssthresh) {
+ if (tp->prior_ssthresh > tp->snd_ssthresh) {
tp->snd_ssthresh = tp->prior_ssthresh;
TCP_ECN_withdraw_cwr(tp);
}
tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh);
}
tp->snd_cwnd_stamp = tcp_time_stamp;
-
- if (undo_ssthresh)
- tp->undo_marker = 0;
+ tp->undo_marker = 0;
}
static inline bool tcp_may_undo(const struct tcp_sock *tp)
* or our original transmission succeeded.
*/
DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans");
- tcp_undo_cwnd_reduction(sk, true, false);
+ tcp_undo_cwnd_reduction(sk, false);
if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss)
mib_idx = LINUX_MIB_TCPLOSSUNDO;
else
if (tp->undo_marker && !tp->undo_retrans) {
DBGUNDO(sk, "D-SACK");
- tcp_undo_cwnd_reduction(sk, true, false);
+ tcp_undo_cwnd_reduction(sk, false);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKUNDO);
}
}
struct tcp_sock *tp = tcp_sk(sk);
if (frto_undo || tcp_may_undo(tp)) {
- tcp_undo_cwnd_reduction(sk, true, true);
+ tcp_undo_cwnd_reduction(sk, true);
DBGUNDO(sk, "partial loss");
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSUNDO);
}
/* Undo during fast recovery after partial ACK. */
-static bool tcp_try_undo_partial(struct sock *sk, int acked)
+static bool tcp_try_undo_partial(struct sock *sk, const int acked,
+ const int prior_unsacked)
{
struct tcp_sock *tp = tcp_sk(sk);
- /* Partial ACK arrived. Force Hoe's retransmit. */
- bool failed = tcp_is_reno(tp) || (tcp_fackets_out(tp) > tp->reordering);
- if (tcp_may_undo(tp)) {
+ if (tp->undo_marker && tcp_packet_delayed(tp)) {
/* Plain luck! Hole if filled with delayed
* packet, rather than with a retransmit.
*/
+ tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1);
+
+ /* We are getting evidence that the reordering degree is higher
+ * than we realized. If there are no retransmits out then we
+ * can undo. Otherwise we clock out new packets but do not
+ * mark more packets lost or retransmit more.
+ */
+ if (tp->retrans_out) {
+ tcp_cwnd_reduction(sk, prior_unsacked, 0);
+ return true;
+ }
+
if (!tcp_any_retrans_done(sk))
tp->retrans_stamp = 0;
- tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1);
-
- DBGUNDO(sk, "Hoe");
- tcp_undo_cwnd_reduction(sk, false, false);
+ DBGUNDO(sk, "partial recovery");
+ tcp_undo_cwnd_reduction(sk, true);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO);
-
- /* So... Do not make Hoe's retransmit yet.
- * If the first packet was delayed, the rest
- * ones are most probably delayed as well.
- */
- failed = false;
+ tcp_try_keep_open(sk);
+ return true;
}
- return failed;
+ return false;
}
/* Process an event, which can update packets-in-flight not trivially.
if (!(flag & FLAG_SND_UNA_ADVANCED)) {
if (tcp_is_reno(tp) && is_dupack)
tcp_add_reno_sack(sk);
- } else
- do_lost = tcp_try_undo_partial(sk, acked);
+ } else {
+ if (tcp_try_undo_partial(sk, acked, prior_unsacked))
+ return;
+ /* Partial ACK arrived. Force fast retransmit. */
+ do_lost = tcp_is_reno(tp) ||
+ tcp_fackets_out(tp) > tp->reordering;
+ }
break;
case TCP_CA_Loss:
tcp_process_loss(sk, flag, is_dupack);