[IPV4/IPV6]: Replace spin_lock_irq with spin_lock_bh
authorHerbert Xu <herbert@gondor.apana.org.au>
Sun, 19 Jun 2005 05:56:18 +0000 (22:56 -0700)
committerDavid S. Miller <davem@davemloft.net>
Sun, 19 Jun 2005 05:56:18 +0000 (22:56 -0700)
In light of my recent patch to net/ipv4/udp.c that replaced the
spin_lock_irq calls on the receive queue lock with spin_lock_bh,
here is a similar patch for all other occurences of spin_lock_irq
on receive/error queue locks in IPv4 and IPv6.

In these stacks, we know that they can only be entered from user
or softirq context.  Therefore it's safe to disable BH only.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: David S. Miller <davem@davemloft.net>
net/ipv4/ip_sockglue.c
net/ipv4/raw.c
net/ipv6/datagram.c
net/ipv6/raw.c
net/ipv6/udp.c

index 47012b93cad2e58c5b28dbdcb899e68006085e79..f8b172f89811d2b6fa2d9a762e1a20d47679b36b 100644 (file)
@@ -360,14 +360,14 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len)
        err = copied;
 
        /* Reset and regenerate socket error */
-       spin_lock_irq(&sk->sk_error_queue.lock);
+       spin_lock_bh(&sk->sk_error_queue.lock);
        sk->sk_err = 0;
        if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) {
                sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
-               spin_unlock_irq(&sk->sk_error_queue.lock);
+               spin_unlock_bh(&sk->sk_error_queue.lock);
                sk->sk_error_report(sk);
        } else
-               spin_unlock_irq(&sk->sk_error_queue.lock);
+               spin_unlock_bh(&sk->sk_error_queue.lock);
 
 out_free_skb:  
        kfree_skb(skb);
index 5b1ec586bae631eed81a51eb0472e1c48a1613f2..8c1512021ee86417649f4e2131602ffc340e18cd 100644 (file)
@@ -691,11 +691,11 @@ static int raw_ioctl(struct sock *sk, int cmd, unsigned long arg)
                        struct sk_buff *skb;
                        int amount = 0;
 
-                       spin_lock_irq(&sk->sk_receive_queue.lock);
+                       spin_lock_bh(&sk->sk_receive_queue.lock);
                        skb = skb_peek(&sk->sk_receive_queue);
                        if (skb != NULL)
                                amount = skb->len;
-                       spin_unlock_irq(&sk->sk_receive_queue.lock);
+                       spin_unlock_bh(&sk->sk_receive_queue.lock);
                        return put_user(amount, (int __user *)arg);
                }
 
index 65b9375df57d7d95d04ea6a3da309898a751f0f3..5229365cd8b4849371e498489fe0fe65427c6aa8 100644 (file)
@@ -353,14 +353,14 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
        err = copied;
 
        /* Reset and regenerate socket error */
-       spin_lock_irq(&sk->sk_error_queue.lock);
+       spin_lock_bh(&sk->sk_error_queue.lock);
        sk->sk_err = 0;
        if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) {
                sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
-               spin_unlock_irq(&sk->sk_error_queue.lock);
+               spin_unlock_bh(&sk->sk_error_queue.lock);
                sk->sk_error_report(sk);
        } else {
-               spin_unlock_irq(&sk->sk_error_queue.lock);
+               spin_unlock_bh(&sk->sk_error_queue.lock);
        }
 
 out_free_skb:  
index 617645bc5ed6ae0fb0ad060d284f24dd6b081ccd..e2b848ec98513ac9ababcc9f3d588c0fd93efc84 100644 (file)
@@ -434,12 +434,12 @@ csum_copy_err:
        /* Clear queue. */
        if (flags&MSG_PEEK) {
                int clear = 0;
-               spin_lock_irq(&sk->sk_receive_queue.lock);
+               spin_lock_bh(&sk->sk_receive_queue.lock);
                if (skb == skb_peek(&sk->sk_receive_queue)) {
                        __skb_unlink(skb, &sk->sk_receive_queue);
                        clear = 1;
                }
-               spin_unlock_irq(&sk->sk_receive_queue.lock);
+               spin_unlock_bh(&sk->sk_receive_queue.lock);
                if (clear)
                        kfree_skb(skb);
        }
@@ -971,11 +971,11 @@ static int rawv6_ioctl(struct sock *sk, int cmd, unsigned long arg)
                        struct sk_buff *skb;
                        int amount = 0;
 
-                       spin_lock_irq(&sk->sk_receive_queue.lock);
+                       spin_lock_bh(&sk->sk_receive_queue.lock);
                        skb = skb_peek(&sk->sk_receive_queue);
                        if (skb != NULL)
                                amount = skb->tail - skb->h.raw;
-                       spin_unlock_irq(&sk->sk_receive_queue.lock);
+                       spin_unlock_bh(&sk->sk_receive_queue.lock);
                        return put_user(amount, (int __user *)arg);
                }
 
index e251d0ba4f39362b0207d3304ecbd0f748ea98c3..eff050ac7049601e6cc040c14210ac678b16e788 100644 (file)
@@ -300,12 +300,12 @@ csum_copy_err:
        /* Clear queue. */
        if (flags&MSG_PEEK) {
                int clear = 0;
-               spin_lock_irq(&sk->sk_receive_queue.lock);
+               spin_lock_bh(&sk->sk_receive_queue.lock);
                if (skb == skb_peek(&sk->sk_receive_queue)) {
                        __skb_unlink(skb, &sk->sk_receive_queue);
                        clear = 1;
                }
-               spin_unlock_irq(&sk->sk_receive_queue.lock);
+               spin_unlock_bh(&sk->sk_receive_queue.lock);
                if (clear)
                        kfree_skb(skb);
        }