rds: Don't disable BH on BH context
authorYing Xue <ying.xue@windriver.com>
Sun, 19 Aug 2012 21:44:08 +0000 (21:44 +0000)
committerDavid S. Miller <davem@davemloft.net>
Thu, 23 Aug 2012 05:52:04 +0000 (22:52 -0700)
Since we have already in BH context when *_write_space(),
*_data_ready() as well as *_state_change() are called, it's
unnecessary to disable BH.

Signed-off-by: Ying Xue <ying.xue@windriver.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
net/rds/tcp_connect.c
net/rds/tcp_listen.c
net/rds/tcp_recv.c
net/rds/tcp_send.c

index af95c8e058fc0d45096234aa8aeb7c8da6fda175..a65ee78db0c54e1062186b9b132fe46fac380c91 100644 (file)
@@ -43,7 +43,7 @@ void rds_tcp_state_change(struct sock *sk)
        struct rds_connection *conn;
        struct rds_tcp_connection *tc;
 
-       read_lock_bh(&sk->sk_callback_lock);
+       read_lock(&sk->sk_callback_lock);
        conn = sk->sk_user_data;
        if (!conn) {
                state_change = sk->sk_state_change;
@@ -68,7 +68,7 @@ void rds_tcp_state_change(struct sock *sk)
                        break;
        }
 out:
-       read_unlock_bh(&sk->sk_callback_lock);
+       read_unlock(&sk->sk_callback_lock);
        state_change(sk);
 }
 
index 72981375f47cc90cba0aaac80952bec3b0636bb4..7787537e9c2e95fffbcc8f89d594fc6de6ad52fe 100644 (file)
@@ -114,7 +114,7 @@ void rds_tcp_listen_data_ready(struct sock *sk, int bytes)
 
        rdsdebug("listen data ready sk %p\n", sk);
 
-       read_lock_bh(&sk->sk_callback_lock);
+       read_lock(&sk->sk_callback_lock);
        ready = sk->sk_user_data;
        if (!ready) { /* check for teardown race */
                ready = sk->sk_data_ready;
@@ -131,7 +131,7 @@ void rds_tcp_listen_data_ready(struct sock *sk, int bytes)
                queue_work(rds_wq, &rds_tcp_listen_work);
 
 out:
-       read_unlock_bh(&sk->sk_callback_lock);
+       read_unlock(&sk->sk_callback_lock);
        ready(sk, bytes);
 }
 
index 6243258f840f0e79dace450b97e3914e1badbacc..4fac4f2bb9dccd11f304aa6dd94f8852e91a5b53 100644 (file)
@@ -322,7 +322,7 @@ void rds_tcp_data_ready(struct sock *sk, int bytes)
 
        rdsdebug("data ready sk %p bytes %d\n", sk, bytes);
 
-       read_lock_bh(&sk->sk_callback_lock);
+       read_lock(&sk->sk_callback_lock);
        conn = sk->sk_user_data;
        if (!conn) { /* check for teardown race */
                ready = sk->sk_data_ready;
@@ -336,7 +336,7 @@ void rds_tcp_data_ready(struct sock *sk, int bytes)
        if (rds_tcp_read_sock(conn, GFP_ATOMIC) == -ENOMEM)
                queue_delayed_work(rds_wq, &conn->c_recv_w, 0);
 out:
-       read_unlock_bh(&sk->sk_callback_lock);
+       read_unlock(&sk->sk_callback_lock);
        ready(sk, bytes);
 }
 
index 1b4fd68f0c7c4db1f97859f407cded3a1e4f8144..81cf5a4c5e40c3c50b98c6694edd265a37e13b51 100644 (file)
@@ -174,7 +174,7 @@ void rds_tcp_write_space(struct sock *sk)
        struct rds_connection *conn;
        struct rds_tcp_connection *tc;
 
-       read_lock_bh(&sk->sk_callback_lock);
+       read_lock(&sk->sk_callback_lock);
        conn = sk->sk_user_data;
        if (!conn) {
                write_space = sk->sk_write_space;
@@ -194,7 +194,7 @@ void rds_tcp_write_space(struct sock *sk)
                queue_delayed_work(rds_wq, &conn->c_send_w, 0);
 
 out:
-       read_unlock_bh(&sk->sk_callback_lock);
+       read_unlock(&sk->sk_callback_lock);
 
        /*
         * write_space is only called when data leaves tcp's send queue if