[PATCH] lockdep: annotate nfs/nfsd in-kernel sockets
authorPeter Zijlstra <a.p.zijlstra@chello.nl>
Thu, 7 Dec 2006 04:35:24 +0000 (20:35 -0800)
committerLinus Torvalds <torvalds@woody.osdl.org>
Thu, 7 Dec 2006 16:39:30 +0000 (08:39 -0800)
Stick NFS sockets in their own class to avoid some lockdep warnings.  NFS
sockets are never exposed to user-space, and will hence not trigger certain
code paths that would otherwise pose deadlock scenarios.

[akpm@osdl.org: cleanups]
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Steven Dickson <SteveD@redhat.com>
Acked-by: Ingo Molnar <mingo@elte.hu>
Cc: Trond Myklebust <trond.myklebust@fys.uio.no>
Acked-by: Neil Brown <neilb@suse.de>
Cc: "David S. Miller" <davem@davemloft.net>
Signed-off-by: Andrew Morton <akpm@osdl.org>
[ Fixed patch corruption by quilt, pointed out by Peter Zijlstra ]
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
include/net/sock.h
kernel/lockdep.c
net/core/sock.c
net/sunrpc/svcsock.c
net/sunrpc/xprtsock.c

index 730899ce5162ed1824cbbedca12848bb7da0422c..03684e702d13464cbc0a48b95354dc540341f504 100644 (file)
@@ -746,6 +746,25 @@ static inline int sk_stream_wmem_schedule(struct sock *sk, int size)
  */
 #define sock_owned_by_user(sk) ((sk)->sk_lock.owner)
 
+/*
+ * Macro so as to not evaluate some arguments when
+ * lockdep is not enabled.
+ *
+ * Mark both the sk_lock and the sk_lock.slock as a
+ * per-address-family lock class.
+ */
+#define sock_lock_init_class_and_name(sk, sname, skey, name, key)      \
+do {                                                                   \
+       sk->sk_lock.owner = NULL;                                       \
+       init_waitqueue_head(&sk->sk_lock.wq);                           \
+       spin_lock_init(&(sk)->sk_lock.slock);                           \
+       debug_check_no_locks_freed((void *)&(sk)->sk_lock,              \
+                       sizeof((sk)->sk_lock));                         \
+       lockdep_set_class_and_name(&(sk)->sk_lock.slock,                \
+                       (skey), (sname));                               \
+       lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0);     \
+} while (0)
+
 extern void FASTCALL(lock_sock_nested(struct sock *sk, int subclass));
 
 static inline void lock_sock(struct sock *sk)
index c9fefdb1a7db45df9658dd32d963e7f28bbdfdf4..e33f6207f5b3e5f407770eaca914f5b8c6ed6f62 100644 (file)
@@ -2645,6 +2645,7 @@ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
        }
        local_irq_restore(flags);
 }
+EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
 
 static void print_held_locks_bug(struct task_struct *curr)
 {
index 4a432da441e948c49b6829f083d2b4963dbd1586..0ed5b4f0bc407aef5b376149dcc2d49aa3c49937 100644 (file)
@@ -810,24 +810,11 @@ lenout:
  */
 static void inline sock_lock_init(struct sock *sk)
 {
-       spin_lock_init(&sk->sk_lock.slock);
-       sk->sk_lock.owner = NULL;
-       init_waitqueue_head(&sk->sk_lock.wq);
-       /*
-        * Make sure we are not reinitializing a held lock:
-        */
-       debug_check_no_locks_freed((void *)&sk->sk_lock, sizeof(sk->sk_lock));
-
-       /*
-        * Mark both the sk_lock and the sk_lock.slock as a
-        * per-address-family lock class:
-        */
-       lockdep_set_class_and_name(&sk->sk_lock.slock,
-                                  af_family_slock_keys + sk->sk_family,
-                                  af_family_slock_key_strings[sk->sk_family]);
-       lockdep_init_map(&sk->sk_lock.dep_map,
-                        af_family_key_strings[sk->sk_family],
-                        af_family_keys + sk->sk_family, 0);
+       sock_lock_init_class_and_name(sk,
+                       af_family_slock_key_strings[sk->sk_family],
+                       af_family_slock_keys + sk->sk_family,
+                       af_family_key_strings[sk->sk_family],
+                       af_family_keys + sk->sk_family);
 }
 
 /**
index 1c68956824e3098e28d1cfd48062092009df251a..99f54fb6d66903ae9b9887c76e4138838f60a43a 100644 (file)
@@ -85,6 +85,35 @@ static struct cache_deferred_req *svc_defer(struct cache_req *req);
  */
 static int svc_conn_age_period = 6*60;
 
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+static struct lock_class_key svc_key[2];
+static struct lock_class_key svc_slock_key[2];
+
+static inline void svc_reclassify_socket(struct socket *sock)
+{
+       struct sock *sk = sock->sk;
+       BUG_ON(sk->sk_lock.owner != NULL);
+       switch (sk->sk_family) {
+       case AF_INET:
+               sock_lock_init_class_and_name(sk, "slock-AF_INET-NFSD",
+                   &svc_slock_key[0], "sk_lock-AF_INET-NFSD", &svc_key[0]);
+               break;
+
+       case AF_INET6:
+               sock_lock_init_class_and_name(sk, "slock-AF_INET6-NFSD",
+                   &svc_slock_key[1], "sk_lock-AF_INET6-NFSD", &svc_key[1]);
+               break;
+
+       default:
+               BUG();
+       }
+}
+#else
+static inline void svc_reclassify_socket(struct socket *sock)
+{
+}
+#endif
+
 /*
  * Queue up an idle server thread.  Must have pool->sp_lock held.
  * Note: this is really a stack rather than a queue, so that we only
@@ -1557,6 +1586,8 @@ svc_create_socket(struct svc_serv *serv, int protocol, struct sockaddr_in *sin)
        if ((error = sock_create_kern(PF_INET, type, protocol, &sock)) < 0)
                return error;
 
+       svc_reclassify_socket(sock);
+
        if (type == SOCK_STREAM)
                sock->sk->sk_reuse = 1; /* allow address reuse */
        error = kernel_bind(sock, (struct sockaddr *) sin,
index cfe3c15be94895cb606b0184ab0855af5eefb11b..2fc4a3123261b1b9c37ff3ef122da2d10e817129 100644 (file)
@@ -1058,6 +1058,35 @@ static int xs_bindresvport(struct rpc_xprt *xprt, struct socket *sock)
        return err;
 }
 
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+static struct lock_class_key xs_key[2];
+static struct lock_class_key xs_slock_key[2];
+
+static inline void xs_reclassify_socket(struct socket *sock)
+{
+       struct sock *sk = sock->sk;
+       BUG_ON(sk->sk_lock.owner != NULL);
+       switch (sk->sk_family) {
+       case AF_INET:
+               sock_lock_init_class_and_name(sk, "slock-AF_INET-NFS",
+                       &xs_slock_key[0], "sk_lock-AF_INET-NFS", &xs_key[0]);
+               break;
+
+       case AF_INET6:
+               sock_lock_init_class_and_name(sk, "slock-AF_INET6-NFS",
+                       &xs_slock_key[1], "sk_lock-AF_INET6-NFS", &xs_key[1]);
+               break;
+
+       default:
+               BUG();
+       }
+}
+#else
+static inline void xs_reclassify_socket(struct socket *sock)
+{
+}
+#endif
+
 /**
  * xs_udp_connect_worker - set up a UDP socket
  * @work: RPC transport to connect
@@ -1081,6 +1110,7 @@ static void xs_udp_connect_worker(struct work_struct *work)
                dprintk("RPC:      can't create UDP transport socket (%d).\n", -err);
                goto out;
        }
+       xs_reclassify_socket(sock);
 
        if (xprt->resvport && xs_bindresvport(xprt, sock) < 0) {
                sock_release(sock);
@@ -1165,6 +1195,7 @@ static void xs_tcp_connect_worker(struct work_struct *work)
                        dprintk("RPC:      can't create TCP transport socket (%d).\n", -err);
                        goto out;
                }
+               xs_reclassify_socket(sock);
 
                if (xprt->resvport && xs_bindresvport(xprt, sock) < 0) {
                        sock_release(sock);