tcp: tcp_prequeue() cleanup
authorEric Dumazet <dada1@cosmosbay.com>
Thu, 7 May 2009 07:08:38 +0000 (07:08 +0000)
committerDavid S. Miller <davem@davemloft.net>
Thu, 7 May 2009 21:52:26 +0000 (14:52 -0700)
Small cleanup patch to reduce line lengths, before a change in
tcp_prequeue().

Signed-off-by: Eric Dumazet <dada1@cosmosbay.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/net/tcp.h

index b55b4891029e9cb0b35439b02bc531b91bba1acc..ac37228b700137d84d6c7b69d152214a3dde782a 100644 (file)
@@ -890,30 +890,31 @@ static inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
 {
        struct tcp_sock *tp = tcp_sk(sk);
 
-       if (!sysctl_tcp_low_latency && tp->ucopy.task) {
-               __skb_queue_tail(&tp->ucopy.prequeue, skb);
-               tp->ucopy.memory += skb->truesize;
-               if (tp->ucopy.memory > sk->sk_rcvbuf) {
-                       struct sk_buff *skb1;
-
-                       BUG_ON(sock_owned_by_user(sk));
-
-                       while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
-                               sk_backlog_rcv(sk, skb1);
-                               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPREQUEUEDROPPED);
-                       }
-
-                       tp->ucopy.memory = 0;
-               } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
-                       wake_up_interruptible(sk->sk_sleep);
-                       if (!inet_csk_ack_scheduled(sk))
-                               inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
-                                                         (3 * TCP_RTO_MIN) / 4,
-                                                         TCP_RTO_MAX);
+       if (sysctl_tcp_low_latency || !tp->ucopy.task)
+               return 0;
+
+       __skb_queue_tail(&tp->ucopy.prequeue, skb);
+       tp->ucopy.memory += skb->truesize;
+       if (tp->ucopy.memory > sk->sk_rcvbuf) {
+               struct sk_buff *skb1;
+
+               BUG_ON(sock_owned_by_user(sk));
+
+               while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
+                       sk_backlog_rcv(sk, skb1);
+                       NET_INC_STATS_BH(sock_net(sk),
+                                        LINUX_MIB_TCPPREQUEUEDROPPED);
                }
-               return 1;
+
+               tp->ucopy.memory = 0;
+       } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
+               wake_up_interruptible(sk->sk_sleep);
+               if (!inet_csk_ack_scheduled(sk))
+                       inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
+                                                 (3 * TCP_RTO_MIN) / 4,
+                                                 TCP_RTO_MAX);
        }
-       return 0;
+       return 1;
 }