vfs: split do_lookup()
authorMiklos Szeredi <mszeredi@suse.cz>
Mon, 21 May 2012 15:30:05 +0000 (17:30 +0200)
committerAl Viro <viro@zeniv.linux.org.uk>
Fri, 1 Jun 2012 16:11:56 +0000 (12:11 -0400)
Split do_lookup() into two functions:

  lookup_fast() - does cached lookup without i_mutex
  lookup_slow() - does lookup with i_mutex

Both follow managed dentries.

The new functions are needed by atomic_open.

Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
fs/namei.c

index 93ac9323b1f7c560779766401157815bab5e7546..7f4ab820811a0992a34332542146fc8cf718d06c 100644 (file)
@@ -1125,8 +1125,8 @@ static struct dentry *__lookup_hash(struct qstr *name,
  *  small and for now I'd prefer to have fast path as straight as possible.
  *  It _is_ time-critical.
  */
-static int do_lookup(struct nameidata *nd, struct qstr *name,
-                       struct path *path, struct inode **inode)
+static int lookup_fast(struct nameidata *nd, struct qstr *name,
+                      struct path *path, struct inode **inode)
 {
        struct vfsmount *mnt = nd->path.mnt;
        struct dentry *dentry, *parent = nd->path.dentry;
@@ -1208,7 +1208,7 @@ unlazy:
                        goto need_lookup;
                }
        }
-done:
+
        path->mnt = mnt;
        path->dentry = dentry;
        err = follow_managed(path, nd->flags);
@@ -1222,6 +1222,17 @@ done:
        return 0;
 
 need_lookup:
+       return 1;
+}
+
+/* Fast lookup failed, do it the slow way */
+static int lookup_slow(struct nameidata *nd, struct qstr *name,
+                      struct path *path)
+{
+       struct dentry *dentry, *parent;
+       int err;
+
+       parent = nd->path.dentry;
        BUG_ON(nd->inode != parent->d_inode);
 
        mutex_lock(&parent->d_inode->i_mutex);
@@ -1229,7 +1240,16 @@ need_lookup:
        mutex_unlock(&parent->d_inode->i_mutex);
        if (IS_ERR(dentry))
                return PTR_ERR(dentry);
-       goto done;
+       path->mnt = nd->path.mnt;
+       path->dentry = dentry;
+       err = follow_managed(path, nd->flags);
+       if (unlikely(err < 0)) {
+               path_put_conditional(path, nd);
+               return err;
+       }
+       if (err)
+               nd->flags |= LOOKUP_JUMPED;
+       return 0;
 }
 
 static inline int may_lookup(struct nameidata *nd)
@@ -1301,21 +1321,26 @@ static inline int walk_component(struct nameidata *nd, struct path *path,
         */
        if (unlikely(type != LAST_NORM))
                return handle_dots(nd, type);
-       err = do_lookup(nd, name, path, &inode);
+       err = lookup_fast(nd, name, path, &inode);
        if (unlikely(err)) {
-               terminate_walk(nd);
-               return err;
-       }
-       if (!inode) {
-               path_to_nameidata(path, nd);
-               terminate_walk(nd);
-               return -ENOENT;
+               if (err < 0)
+                       goto out_err;
+
+               err = lookup_slow(nd, name, path);
+               if (err < 0)
+                       goto out_err;
+
+               inode = path->dentry->d_inode;
        }
+       err = -ENOENT;
+       if (!inode)
+               goto out_path_put;
+
        if (should_follow_link(inode, follow)) {
                if (nd->flags & LOOKUP_RCU) {
                        if (unlikely(unlazy_walk(nd, path->dentry))) {
-                               terminate_walk(nd);
-                               return -ECHILD;
+                               err = -ECHILD;
+                               goto out_err;
                        }
                }
                BUG_ON(inode != path->dentry->d_inode);
@@ -1324,6 +1349,12 @@ static inline int walk_component(struct nameidata *nd, struct path *path,
        path_to_nameidata(path, nd);
        nd->inode = inode;
        return 0;
+
+out_path_put:
+       path_to_nameidata(path, nd);
+out_err:
+       terminate_walk(nd);
+       return err;
 }
 
 /*