From: Ilpo Järvinen Date: Fri, 12 Oct 2007 00:36:13 +0000 (-0700) Subject: [TCP]: Limit processing lost_retrans loop to work-to-do cases X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=b08d6cb22c777c8c91c16d8e3b8aafc93c98cbd9;p=GitHub%2Fmoto-9609%2Fandroid_kernel_motorola_exynos9610.git [TCP]: Limit processing lost_retrans loop to work-to-do cases This addition of lost_retrans_low to tcp_sock might be unnecessary, it's not clear how often lost_retrans worker is executed when there wasn't work to do. Signed-off-by: Ilpo Järvinen Signed-off-by: David S. Miller --- diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 9ff456e8d6c7..c5b94c1a5ee2 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -348,6 +348,8 @@ struct tcp_sock { int lost_cnt_hint; int retransmit_cnt_hint; + u32 lost_retrans_low; /* Sent seq after any rxmit (lowest) */ + u16 advmss; /* Advertised MSS */ u16 prior_ssthresh; /* ssthresh saved at recovery start */ u32 lost_out; /* Lost packets */ diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index d5e0fcc22a3b..0a42e9340346 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -1112,7 +1112,8 @@ static int tcp_is_sackblock_valid(struct tcp_sock *tp, int is_dsack, * * Search retransmitted skbs from write_queue that were sent when snd_nxt was * less than what is now known to be received by the other end (derived from - * SACK blocks by the caller). + * SACK blocks by the caller). Also calculate the lowest snd_nxt among the + * remaining retransmitted skbs to avoid some costly processing per ACKs. */ static int tcp_mark_lost_retrans(struct sock *sk, u32 received_upto) { @@ -1120,6 +1121,7 @@ static int tcp_mark_lost_retrans(struct sock *sk, u32 received_upto) struct sk_buff *skb; int flag = 0; int cnt = 0; + u32 new_low_seq = 0; tcp_for_write_queue(skb, sk) { u32 ack_seq = TCP_SKB_CB(skb)->ack_seq; @@ -1151,9 +1153,15 @@ static int tcp_mark_lost_retrans(struct sock *sk, u32 received_upto) NET_INC_STATS_BH(LINUX_MIB_TCPLOSTRETRANSMIT); } } else { + if (!new_low_seq || before(ack_seq, new_low_seq)) + new_low_seq = ack_seq; cnt += tcp_skb_pcount(skb); } } + + if (tp->retrans_out) + tp->lost_retrans_low = new_low_seq; + return flag; } @@ -1481,8 +1489,8 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ } } - if (tp->retrans_out && highest_sack_end_seq && - after(highest_sack_end_seq, tp->high_seq) && + if (tp->retrans_out && + after(highest_sack_end_seq, tp->lost_retrans_low) && icsk->icsk_ca_state == TCP_CA_Recovery) flag |= tcp_mark_lost_retrans(sk, highest_sack_end_seq); diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 53296753b0bd..324b4207254a 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1914,6 +1914,8 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) printk(KERN_DEBUG "retrans_out leaked.\n"); } #endif + if (!tp->retrans_out) + tp->lost_retrans_low = tp->snd_nxt; TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS; tp->retrans_out += tcp_skb_pcount(skb);