net: sk_wmem_alloc has initial value of one, not zero
authorEric Dumazet <eric.dumazet@gmail.com>
Tue, 16 Jun 2009 10:12:03 +0000 (10:12 +0000)
committerDavid S. Miller <davem@davemloft.net>
Wed, 17 Jun 2009 11:31:25 +0000 (04:31 -0700)
commit 2b85a34e911bf483c27cfdd124aeb1605145dc80
(net: No more expensive sock_hold()/sock_put() on each tx)
changed initial sk_wmem_alloc value.

Some protocols check sk_wmem_alloc value to determine if a timer
must delay socket deallocation. We must take care of the sk_wmem_alloc
value being one instead of zero when no write allocations are pending.

Reported by Ingo Molnar, and full diagnostic from David Miller.

This patch introduces three helpers to get read/write allocations
and a followup patch will use these helpers to report correct
write allocations to user.

Reported-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/net/sock.h
net/appletalk/ddp.c
net/ax25/af_ax25.c
net/econet/af_econet.c
net/netrom/af_netrom.c
net/rose/af_rose.c
net/x25/af_x25.c

index 010e14a93c9256a0d92e35232e2292e746e8f86c..570c7a12b54eef4b1da00cc1c3883609ad5c4225 100644 (file)
@@ -1206,6 +1206,39 @@ static inline int skb_copy_to_page(struct sock *sk, char __user *from,
        return 0;
 }
 
+/**
+ * sk_wmem_alloc_get - returns write allocations
+ * @sk: socket
+ *
+ * Returns sk_wmem_alloc minus initial offset of one
+ */
+static inline int sk_wmem_alloc_get(const struct sock *sk)
+{
+       return atomic_read(&sk->sk_wmem_alloc) - 1;
+}
+
+/**
+ * sk_rmem_alloc_get - returns read allocations
+ * @sk: socket
+ *
+ * Returns sk_rmem_alloc
+ */
+static inline int sk_rmem_alloc_get(const struct sock *sk)
+{
+       return atomic_read(&sk->sk_rmem_alloc);
+}
+
+/**
+ * sk_has_allocations - check if allocations are outstanding
+ * @sk: socket
+ *
+ * Returns true if socket has write or read allocations
+ */
+static inline int sk_has_allocations(const struct sock *sk)
+{
+       return sk_wmem_alloc_get(sk) || sk_rmem_alloc_get(sk);
+}
+
 /*
  *     Queue a received datagram if it will fit. Stream and sequenced
  *     protocols can't normally use this as they need to fit buffers in
index b603cbacdc58eccf171c84599a9c2bfd426f74f4..f7a53b219ef07937ec3e42a183d2d51d87b5fbf2 100644 (file)
@@ -162,8 +162,7 @@ static void atalk_destroy_timer(unsigned long data)
 {
        struct sock *sk = (struct sock *)data;
 
-       if (atomic_read(&sk->sk_wmem_alloc) ||
-           atomic_read(&sk->sk_rmem_alloc)) {
+       if (sk_has_allocations(sk)) {
                sk->sk_timer.expires = jiffies + SOCK_DESTROY_TIME;
                add_timer(&sk->sk_timer);
        } else
@@ -175,8 +174,7 @@ static inline void atalk_destroy_socket(struct sock *sk)
        atalk_remove_socket(sk);
        skb_queue_purge(&sk->sk_receive_queue);
 
-       if (atomic_read(&sk->sk_wmem_alloc) ||
-           atomic_read(&sk->sk_rmem_alloc)) {
+       if (sk_has_allocations(sk)) {
                setup_timer(&sk->sk_timer, atalk_destroy_timer,
                                (unsigned long)sk);
                sk->sk_timer.expires    = jiffies + SOCK_DESTROY_TIME;
index fd9d06f291dc31f4bb0d108f85d3eff51ead0e3f..61b35b955490f6729a66fd5b073058e3b056b4fc 100644 (file)
@@ -330,8 +330,7 @@ void ax25_destroy_socket(ax25_cb *ax25)
        }
 
        if (ax25->sk != NULL) {
-               if (atomic_read(&ax25->sk->sk_wmem_alloc) ||
-                   atomic_read(&ax25->sk->sk_rmem_alloc)) {
+               if (sk_has_allocations(ax25->sk)) {
                        /* Defer: outstanding buffers */
                        setup_timer(&ax25->dtimer, ax25_destroy_timer,
                                        (unsigned long)ax25);
index 8121bf0029e305c6d13267c9aeee364647fcea97..2e1f836d424064ffe346321e053c136f86682545 100644 (file)
@@ -540,8 +540,7 @@ static void econet_destroy_timer(unsigned long data)
 {
        struct sock *sk=(struct sock *)data;
 
-       if (!atomic_read(&sk->sk_wmem_alloc) &&
-           !atomic_read(&sk->sk_rmem_alloc)) {
+       if (!sk_has_allocations(sk)) {
                sk_free(sk);
                return;
        }
@@ -579,8 +578,7 @@ static int econet_release(struct socket *sock)
 
        skb_queue_purge(&sk->sk_receive_queue);
 
-       if (atomic_read(&sk->sk_rmem_alloc) ||
-           atomic_read(&sk->sk_wmem_alloc)) {
+       if (sk_has_allocations(sk)) {
                sk->sk_timer.data     = (unsigned long)sk;
                sk->sk_timer.expires  = jiffies + HZ;
                sk->sk_timer.function = econet_destroy_timer;
index 3be0e016ab7df1bd34576816c1ea97232c36c7e8..cd911904cbe1f7e044b4ea57ac58fd2f276f8e32 100644 (file)
@@ -286,8 +286,7 @@ void nr_destroy_socket(struct sock *sk)
                kfree_skb(skb);
        }
 
-       if (atomic_read(&sk->sk_wmem_alloc) ||
-           atomic_read(&sk->sk_rmem_alloc)) {
+       if (sk_has_allocations(sk)) {
                /* Defer: outstanding buffers */
                sk->sk_timer.function = nr_destroy_timer;
                sk->sk_timer.expires  = jiffies + 2 * HZ;
index 877a7f65f707fa53688a4d7b349b58bcc84fcf2f..4dd9a7d18945142a1b2c065370a9c215e6f7f92b 100644 (file)
@@ -356,8 +356,7 @@ void rose_destroy_socket(struct sock *sk)
                kfree_skb(skb);
        }
 
-       if (atomic_read(&sk->sk_wmem_alloc) ||
-           atomic_read(&sk->sk_rmem_alloc)) {
+       if (sk_has_allocations(sk)) {
                /* Defer: outstanding buffers */
                setup_timer(&sk->sk_timer, rose_destroy_timer,
                                (unsigned long)sk);
index c51f3095739c5952eac35b8610d622309d09e1cd..8cd2390b0d45ef1410a038bc183f5962573a4291 100644 (file)
@@ -372,8 +372,7 @@ static void __x25_destroy_socket(struct sock *sk)
                kfree_skb(skb);
        }
 
-       if (atomic_read(&sk->sk_wmem_alloc) ||
-           atomic_read(&sk->sk_rmem_alloc)) {
+       if (sk_has_allocations(sk)) {
                /* Defer: outstanding buffers */
                sk->sk_timer.expires  = jiffies + 10 * HZ;
                sk->sk_timer.function = x25_destroy_timer;