tcp: tsq: avoid one atomic in tcp_wfree()
authorEric Dumazet <edumazet@google.com>
Sat, 3 Dec 2016 19:14:53 +0000 (11:14 -0800)
committerDavid S. Miller <davem@davemloft.net>
Mon, 5 Dec 2016 18:32:23 +0000 (13:32 -0500)
Under high load, tcp_wfree() has an atomic operation trying
to schedule a tasklet over and over.

We can schedule it only if our per cpu list was empty.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
net/ipv4/tcp_output.c

index fa23b688a6f319b842d9d2bb06178e81c684ecb5..0db63efe5b8b98c6f7c1809440c2e071ce2febd2 100644 (file)
@@ -880,6 +880,7 @@ void tcp_wfree(struct sk_buff *skb)
 
        for (oval = READ_ONCE(tp->tsq_flags);; oval = nval) {
                struct tsq_tasklet *tsq;
+               bool empty;
 
                if (!(oval & TSQF_THROTTLED) || (oval & TSQF_QUEUED))
                        goto out;
@@ -892,8 +893,10 @@ void tcp_wfree(struct sk_buff *skb)
                /* queue this socket to tasklet queue */
                local_irq_save(flags);
                tsq = this_cpu_ptr(&tsq_tasklet);
+               empty = list_empty(&tsq->head);
                list_add(&tp->tsq_node, &tsq->head);
-               tasklet_schedule(&tsq->tasklet);
+               if (empty)
+                       tasklet_schedule(&tsq->tasklet);
                local_irq_restore(flags);
                return;
        }