dcache_lock no longer protects anything. remove it.
Signed-off-by: Nick Piggin <npiggin@kernel.dk>
char *(*d_dname)((struct dentry *dentry, char *buffer, int buflen);
locking rules:
- dcache_lock rename_lock ->d_lock may block
-d_revalidate: no no no yes
-d_hash no no no no
-d_compare: no yes no no
-d_delete: yes no yes no
-d_release: no no no yes
-d_iput: no no no yes
-d_dname: no no no no
+ rename_lock ->d_lock may block
+d_revalidate: no no yes
+d_hash no no no
+d_compare: yes no no
+d_delete: no yes no
+d_release: no no yes
+d_iput: no no yes
+d_dname: no no no
--------------------------- inode_operations ---------------------------
prototypes:
doesn't acquire the dcache_lock for this and rely on RCU to ensure
that the dentry has not been *freed*.
+dcache_lock no longer exists, dentry locking is explained in fs/dcache.c
Dcache locking details
======================
Dcache is a complex data structure with the hash table entries also
linked together in other lists. In 2.4 kernel, dcache_lock protected
-all the lists. We applied RCU only on hash chain walking. The rest of
-the lists are still protected by dcache_lock. Some of the important
-changes are :
+all the lists. RCU dentry hash walking works like this:
1. The deletion from hash chain is done using hlist_del_rcu() macro
which doesn't initialize next pointer of the deleted dentry and
this allows us to walk safely lock-free while a deletion is
- happening.
+ happening. This is a standard hlist_rcu iteration.
2. Insertion of a dentry into the hash table is done using
hlist_add_head_rcu() which take care of ordering the writes - the
which has since been replaced by hlist_for_each_entry_rcu(), while
walking the hash chain. The only requirement is that all
initialization to the dentry must be done before
- hlist_add_head_rcu() since we don't have dcache_lock protection
- while traversing the hash chain. This isn't different from the
- existing code.
-
-3. The dentry looked up without holding dcache_lock by cannot be
- returned for walking if it is unhashed. It then may have a NULL
- d_inode or other bogosity since RCU doesn't protect the other
- fields in the dentry. We therefore use a flag DCACHE_UNHASHED to
- indicate unhashed dentries and use this in conjunction with a
- per-dentry lock (d_lock). Once looked up without the dcache_lock,
- we acquire the per-dentry lock (d_lock) and check if the dentry is
- unhashed. If so, the look-up is failed. If not, the reference count
- of the dentry is increased and the dentry is returned.
+ hlist_add_head_rcu() since we don't have lock protection
+ while traversing the hash chain.
+
+3. The dentry looked up without holding locks cannot be returned for
+ walking if it is unhashed. It then may have a NULL d_inode or other
+ bogosity since RCU doesn't protect the other fields in the dentry. We
+ therefore use a flag DCACHE_UNHASHED to indicate unhashed dentries
+ and use this in conjunction with a per-dentry lock (d_lock). Once
+ looked up without locks, we acquire the per-dentry lock (d_lock) and
+ check if the dentry is unhashed. If so, the look-up is failed. If not,
+ the reference count of the dentry is increased and the dentry is
+ returned.
4. Once a dentry is looked up, it must be ensured during the path walk
for that component it doesn't go away. In pre-2.5.10 code, this was
In some sense, dcache_rcu path walking looks like the pre-2.5.10
version.
-5. All dentry hash chain updates must take the dcache_lock as well as
- the per-dentry lock in that order. dput() does this to ensure that
- a dentry that has just been looked up in another CPU doesn't get
- deleted before dget() can be done on it.
+5. All dentry hash chain updates must take the per-dentry lock (see
+ fs/dcache.c). This excludes dput() to ensure that a dentry that has
+ been looked up concurrently does not get deleted before dget() can
+ take a ref.
6. There are several ways to do reference counting of RCU protected
objects. One such example is in ipv4 route cache where deferred
->d_parent changes are not protected by BKL anymore. Read access is safe
if at least one of the following is true:
* filesystem has no cross-directory rename()
- * dcache_lock is held
* we know that parent had been locked (e.g. we are looking at
->d_parent of ->lookup() argument).
* we are called from ->rename().
.d_hash() calling convention and locking rules are significantly
changed. Read updated documentation in Documentation/filesystems/vfs.txt (and
look at examples of other filesystems) for guidance.
+
+---
+[mandatory]
+ dcache_lock is gone, replaced by fine grained locks. See fs/dcache.c
+for details of what locks to replace dcache_lock with in order to protect
+particular things. Most of the time, a filesystem only needs ->d_lock, which
+protects *all* the dcache state of a given dentry.
mutex_lock(&dir->d_inode->i_mutex);
list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_u.d_child) {
- spin_lock(&dcache_lock);
spin_lock(&dentry->d_lock);
if (!(d_unhashed(dentry)) && dentry->d_inode) {
dget_locked_dlock(dentry);
__d_drop(dentry);
spin_unlock(&dentry->d_lock);
simple_unlink(dir->d_inode, dentry);
- /* XXX: what is dcache_lock protecting here? Other
+ /* XXX: what was dcache_lock protecting here? Other
* filesystems (IB, configfs) release dcache_lock
* before unlink */
- spin_unlock(&dcache_lock);
dput(dentry);
} else {
spin_unlock(&dentry->d_lock);
- spin_unlock(&dcache_lock);
}
}
shrink_dcache_parent(dir);
goto bail;
}
- spin_lock(&dcache_lock);
spin_lock(&tmp->d_lock);
if (!(d_unhashed(tmp) && tmp->d_inode)) {
dget_locked_dlock(tmp);
__d_drop(tmp);
spin_unlock(&tmp->d_lock);
- spin_unlock(&dcache_lock);
simple_unlink(parent->d_inode, tmp);
- } else {
+ } else
spin_unlock(&tmp->d_lock);
- spin_unlock(&dcache_lock);
- }
ret = 0;
bail:
goto bail;
}
- spin_lock(&dcache_lock);
spin_lock(&tmp->d_lock);
if (!(d_unhashed(tmp) && tmp->d_inode)) {
dget_locked_dlock(tmp);
__d_drop(tmp);
spin_unlock(&tmp->d_lock);
- spin_unlock(&dcache_lock);
simple_unlink(parent->d_inode, tmp);
} else {
spin_unlock(&tmp->d_lock);
- spin_unlock(&dcache_lock);
}
ret = 0;
d = first;
seq = read_seqbegin(&rename_lock);
rcu_read_lock();
- spin_lock(&dcache_lock);
if (!IS_ROOT(d) && d_unhashed(d))
len += UNHASHED_OBSCURE_STRING_SIZE; /* Obscure " (deleted)" string */
len += d->d_name.len + 1; /* Plus slash */
d = d->d_parent;
}
- spin_unlock(&dcache_lock);
rcu_read_unlock();
if (read_seqretry(&rename_lock, seq))
goto rename_retry;
struct list_head *next;
struct dentry *dentry;
- spin_lock(&dcache_lock);
spin_lock(&parent->d_lock);
next = parent->d_subdirs.next;
while (next != &parent->d_subdirs) {
next = next->next;
}
spin_unlock(&parent->d_lock);
- spin_unlock(&dcache_lock);
}
/*
}
/* If a pointer is invalid, we search the dentry. */
- spin_lock(&dcache_lock);
spin_lock(&parent->d_lock);
next = parent->d_subdirs.next;
while (next != &parent->d_subdirs) {
dent = NULL;
out_unlock:
spin_unlock(&parent->d_lock);
- spin_unlock(&dcache_lock);
return dent;
}
{
struct list_head *list;
- spin_lock(&dcache_lock);
spin_lock(&dentry->d_lock);
list_for_each(list, &dentry->d_subdirs) {
struct dentry *de = list_entry(list, struct dentry, d_u.d_child);
if (usbfs_positive(de)) {
spin_unlock(&de->d_lock);
spin_unlock(&dentry->d_lock);
- spin_unlock(&dcache_lock);
return 0;
}
spin_unlock(&de->d_lock);
}
spin_unlock(&dentry->d_lock);
- spin_unlock(&dcache_lock);
return 1;
}
{
struct dentry *dentry;
- spin_lock(&dcache_lock);
spin_lock(&dcache_inode_lock);
/* Directory should have only one entry. */
BUG_ON(S_ISDIR(inode->i_mode) && !list_is_singular(&inode->i_dentry));
dentry = list_entry(inode->i_dentry.next, struct dentry, d_alias);
spin_unlock(&dcache_inode_lock);
- spin_unlock(&dcache_lock);
return dentry;
}
void *data = dentry->d_fsdata;
struct list_head *head, *next;
- spin_lock(&dcache_lock);
spin_lock(&dcache_inode_lock);
head = &inode->i_dentry;
next = head->next;
next = next->next;
}
spin_unlock(&dcache_inode_lock);
- spin_unlock(&dcache_lock);
}
#include <linux/auto_fs4.h>
#include <linux/auto_dev-ioctl.h>
#include <linux/mutex.h>
+#include <linux/spinlock.h>
#include <linux/list.h>
/* This is the range of ioctl() numbers we claim as ours */
current->pid, __func__, ##args); \
} while (0)
+extern spinlock_t autofs4_lock;
+
/* Unified info structure. This is pointed to by both the dentry and
inode structures. Each file in the filesystem has an instance of this
structure. It holds a reference to the dentry, so dentries are never
if (prev == NULL)
return dget(prev);
- spin_lock(&dcache_lock);
+ spin_lock(&autofs4_lock);
relock:
p = prev;
spin_lock(&p->d_lock);
if (p == root) {
spin_unlock(&p->d_lock);
- spin_unlock(&dcache_lock);
+ spin_unlock(&autofs4_lock);
dput(prev);
return NULL;
}
dget_dlock(ret);
spin_unlock(&ret->d_lock);
spin_unlock(&p->d_lock);
- spin_unlock(&dcache_lock);
+ spin_unlock(&autofs4_lock);
dput(prev);
ino->flags |= AUTOFS_INF_EXPIRING;
init_completion(&ino->expire_complete);
spin_unlock(&sbi->fs_lock);
- spin_lock(&dcache_lock);
+ spin_lock(&autofs4_lock);
spin_lock(&expired->d_parent->d_lock);
spin_lock_nested(&expired->d_lock, DENTRY_D_LOCK_NESTED);
list_move(&expired->d_parent->d_subdirs, &expired->d_u.d_child);
spin_unlock(&expired->d_lock);
spin_unlock(&expired->d_parent->d_lock);
- spin_unlock(&dcache_lock);
+ spin_unlock(&autofs4_lock);
return expired;
}
#include "autofs_i.h"
+DEFINE_SPINLOCK(autofs4_lock);
+
static int autofs4_dir_symlink(struct inode *,struct dentry *,const char *);
static int autofs4_dir_unlink(struct inode *,struct dentry *);
static int autofs4_dir_rmdir(struct inode *,struct dentry *);
* autofs file system so just let the libfs routines handle
* it.
*/
- spin_lock(&dcache_lock);
+ spin_lock(&autofs4_lock);
spin_lock(&dentry->d_lock);
if (!d_mountpoint(dentry) && list_empty(&dentry->d_subdirs)) {
spin_unlock(&dentry->d_lock);
- spin_unlock(&dcache_lock);
+ spin_unlock(&autofs4_lock);
return -ENOENT;
}
spin_unlock(&dentry->d_lock);
- spin_unlock(&dcache_lock);
+ spin_unlock(&autofs4_lock);
out:
return dcache_dir_open(inode, file);
/* We trigger a mount for almost all flags */
lookup_type = autofs4_need_mount(nd->flags);
spin_lock(&sbi->fs_lock);
- spin_lock(&dcache_lock);
+ spin_lock(&autofs4_lock);
spin_lock(&dentry->d_lock);
if (!(lookup_type || ino->flags & AUTOFS_INF_PENDING)) {
spin_unlock(&dentry->d_lock);
- spin_unlock(&dcache_lock);
+ spin_unlock(&autofs4_lock);
spin_unlock(&sbi->fs_lock);
goto follow;
}
if (ino->flags & AUTOFS_INF_PENDING ||
(!d_mountpoint(dentry) && list_empty(&dentry->d_subdirs))) {
spin_unlock(&dentry->d_lock);
- spin_unlock(&dcache_lock);
+ spin_unlock(&autofs4_lock);
spin_unlock(&sbi->fs_lock);
status = try_to_fill_dentry(dentry, nd->flags);
goto follow;
}
spin_unlock(&dentry->d_lock);
- spin_unlock(&dcache_lock);
+ spin_unlock(&autofs4_lock);
spin_unlock(&sbi->fs_lock);
follow:
/*
return 0;
/* Check for a non-mountpoint directory with no contents */
- spin_lock(&dcache_lock);
+ spin_lock(&autofs4_lock);
spin_lock(&dentry->d_lock);
if (S_ISDIR(dentry->d_inode->i_mode) &&
!d_mountpoint(dentry) && list_empty(&dentry->d_subdirs)) {
DPRINTK("dentry=%p %.*s, emptydir",
dentry, dentry->d_name.len, dentry->d_name.name);
spin_unlock(&dentry->d_lock);
- spin_unlock(&dcache_lock);
+ spin_unlock(&autofs4_lock);
/* The daemon never causes a mount to trigger */
if (oz_mode)
return status;
}
spin_unlock(&dentry->d_lock);
- spin_unlock(&dcache_lock);
+ spin_unlock(&autofs4_lock);
return 1;
}
const unsigned char *str = name->name;
struct list_head *p, *head;
- spin_lock(&dcache_lock);
+ spin_lock(&autofs4_lock);
spin_lock(&sbi->lookup_lock);
head = &sbi->active_list;
list_for_each(p, head) {
dget_dlock(active);
spin_unlock(&active->d_lock);
spin_unlock(&sbi->lookup_lock);
- spin_unlock(&dcache_lock);
+ spin_unlock(&autofs4_lock);
return active;
}
next:
spin_unlock(&active->d_lock);
}
spin_unlock(&sbi->lookup_lock);
- spin_unlock(&dcache_lock);
+ spin_unlock(&autofs4_lock);
return NULL;
}
const unsigned char *str = name->name;
struct list_head *p, *head;
- spin_lock(&dcache_lock);
+ spin_lock(&autofs4_lock);
spin_lock(&sbi->lookup_lock);
head = &sbi->expiring_list;
list_for_each(p, head) {
dget_dlock(expiring);
spin_unlock(&expiring->d_lock);
spin_unlock(&sbi->lookup_lock);
- spin_unlock(&dcache_lock);
+ spin_unlock(&autofs4_lock);
return expiring;
}
next:
spin_unlock(&expiring->d_lock);
}
spin_unlock(&sbi->lookup_lock);
- spin_unlock(&dcache_lock);
+ spin_unlock(&autofs4_lock);
return NULL;
}
dir->i_mtime = CURRENT_TIME;
- spin_lock(&dcache_lock);
+ spin_lock(&autofs4_lock);
autofs4_add_expiring(dentry);
spin_lock(&dentry->d_lock);
__d_drop(dentry);
spin_unlock(&dentry->d_lock);
- spin_unlock(&dcache_lock);
+ spin_unlock(&autofs4_lock);
return 0;
}
if (!autofs4_oz_mode(sbi))
return -EACCES;
- spin_lock(&dcache_lock);
+ spin_lock(&autofs4_lock);
spin_lock(&sbi->lookup_lock);
spin_lock(&dentry->d_lock);
if (!list_empty(&dentry->d_subdirs)) {
spin_unlock(&dentry->d_lock);
spin_unlock(&sbi->lookup_lock);
- spin_unlock(&dcache_lock);
+ spin_unlock(&autofs4_lock);
return -ENOTEMPTY;
}
__autofs4_add_expiring(dentry);
spin_unlock(&sbi->lookup_lock);
__d_drop(dentry);
spin_unlock(&dentry->d_lock);
- spin_unlock(&dcache_lock);
+ spin_unlock(&autofs4_lock);
if (atomic_dec_and_test(&ino->count)) {
p_ino = autofs4_dentry_ino(dentry->d_parent);
rename_retry:
buf = *name;
len = 0;
+
seq = read_seqbegin(&rename_lock);
rcu_read_lock();
- spin_lock(&dcache_lock);
+ spin_lock(&autofs4_lock);
for (tmp = dentry ; tmp != root ; tmp = tmp->d_parent)
len += tmp->d_name.len + 1;
if (!len || --len > NAME_MAX) {
- spin_unlock(&dcache_lock);
+ spin_unlock(&autofs4_lock);
rcu_read_unlock();
if (read_seqretry(&rename_lock, seq))
goto rename_retry;
p -= tmp->d_name.len;
strncpy(p, tmp->d_name.name, tmp->d_name.len);
}
- spin_unlock(&dcache_lock);
+ spin_unlock(&autofs4_lock);
rcu_read_unlock();
if (read_seqretry(&rename_lock, seq))
goto rename_retry;
dout("__dcache_readdir %p at %llu (last %p)\n", dir, filp->f_pos,
last);
- spin_lock(&dcache_lock);
spin_lock(&parent->d_lock);
/* start at beginning? */
dget_dlock(dentry);
spin_unlock(&dentry->d_lock);
spin_unlock(&parent->d_lock);
- spin_unlock(&dcache_lock);
dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, filp->f_pos,
dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
filp->f_pos++;
- /* make sure a dentry wasn't dropped while we didn't have dcache_lock */
+ /* make sure a dentry wasn't dropped while we didn't have parent lock */
if (!ceph_i_test(dir, CEPH_I_COMPLETE)) {
dout(" lost I_COMPLETE on %p; falling back to mds\n", dir);
err = -EAGAIN;
goto out;
}
- spin_lock(&dcache_lock);
spin_lock(&parent->d_lock);
p = p->prev; /* advance to next dentry */
goto more;
out_unlock:
spin_unlock(&parent->d_lock);
- spin_unlock(&dcache_lock);
out:
if (last)
dput(last);
di->offset = ceph_inode(inode)->i_max_offset++;
spin_unlock(&inode->i_lock);
- spin_lock(&dcache_lock);
spin_lock(&dir->d_lock);
spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
list_move(&dn->d_u.d_child, &dir->d_subdirs);
dn->d_u.d_child.prev, dn->d_u.d_child.next);
spin_unlock(&dn->d_lock);
spin_unlock(&dir->d_lock);
- spin_unlock(&dcache_lock);
}
/*
goto retry_lookup;
} else {
/* reorder parent's d_subdirs */
- spin_lock(&dcache_lock);
spin_lock(&parent->d_lock);
spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
list_move(&dn->d_u.d_child, &parent->d_subdirs);
spin_unlock(&dn->d_lock);
spin_unlock(&parent->d_lock);
- spin_unlock(&dcache_lock);
}
di = dn->d_fsdata;
{
struct dentry *dentry;
- spin_lock(&dcache_lock);
spin_lock(&dcache_inode_lock);
list_for_each_entry(dentry, &inode->i_dentry, d_alias) {
if (!d_unhashed(dentry) || IS_ROOT(dentry)) {
spin_unlock(&dcache_inode_lock);
- spin_unlock(&dcache_lock);
return true;
}
}
spin_unlock(&dcache_inode_lock);
- spin_unlock(&dcache_lock);
return false;
}
struct list_head *child;
struct dentry *de;
- spin_lock(&dcache_lock);
spin_lock(&parent->d_lock);
list_for_each(child, &parent->d_subdirs)
{
coda_flag_inode(de->d_inode, flag);
}
spin_unlock(&parent->d_lock);
- spin_unlock(&dcache_lock);
return;
}
{
struct config_item * item = NULL;
- spin_lock(&dcache_lock);
spin_lock(&dentry->d_lock);
if (!d_unhashed(dentry)) {
struct configfs_dirent * sd = dentry->d_fsdata;
item = config_item_get(sd->s_element);
}
spin_unlock(&dentry->d_lock);
- spin_unlock(&dcache_lock);
return item;
}
struct dentry * dentry = sd->s_dentry;
if (dentry) {
- spin_lock(&dcache_lock);
spin_lock(&dentry->d_lock);
if (!(d_unhashed(dentry) && dentry->d_inode)) {
dget_locked_dlock(dentry);
__d_drop(dentry);
spin_unlock(&dentry->d_lock);
- spin_unlock(&dcache_lock);
simple_unlink(parent->d_inode, dentry);
- } else {
+ } else
spin_unlock(&dentry->d_lock);
- spin_unlock(&dcache_lock);
- }
}
}
* - d_alias, d_inode
*
* Ordering:
- * dcache_lock
- * dcache_inode_lock
- * dentry->d_lock
- * dcache_lru_lock
- * dcache_hash_lock
+ * dcache_inode_lock
+ * dentry->d_lock
+ * dcache_lru_lock
+ * dcache_hash_lock
*
* If there is an ancestor relationship:
* dentry->d_parent->...->d_parent->d_lock
__cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_inode_lock);
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_hash_lock);
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lru_lock);
-__cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lock);
__cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
EXPORT_SYMBOL(rename_lock);
EXPORT_SYMBOL(dcache_inode_lock);
-EXPORT_SYMBOL(dcache_lock);
static struct kmem_cache *dentry_cache __read_mostly;
}
/*
- * no dcache_lock, please.
+ * no locks, please.
*/
static void d_free(struct dentry *dentry)
{
static void dentry_iput(struct dentry * dentry)
__releases(dentry->d_lock)
__releases(dcache_inode_lock)
- __releases(dcache_lock)
{
struct inode *inode = dentry->d_inode;
if (inode) {
list_del_init(&dentry->d_alias);
spin_unlock(&dentry->d_lock);
spin_unlock(&dcache_inode_lock);
- spin_unlock(&dcache_lock);
if (!inode->i_nlink)
fsnotify_inoderemove(inode);
if (dentry->d_op && dentry->d_op->d_iput)
} else {
spin_unlock(&dentry->d_lock);
spin_unlock(&dcache_inode_lock);
- spin_unlock(&dcache_lock);
}
}
*
* If this is the root of the dentry tree, return NULL.
*
- * dcache_lock and d_lock and d_parent->d_lock must be held by caller, and
- * are dropped by d_kill.
+ * dentry->d_lock and parent->d_lock must be held by caller, and are dropped by
+ * d_kill.
*/
static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent)
__releases(dentry->d_lock)
__releases(parent->d_lock)
__releases(dcache_inode_lock)
- __releases(dcache_lock)
{
dentry->d_parent = NULL;
list_del(&dentry->d_u.d_child);
void d_drop(struct dentry *dentry)
{
- spin_lock(&dcache_lock);
spin_lock(&dentry->d_lock);
__d_drop(dentry);
spin_unlock(&dentry->d_lock);
- spin_unlock(&dcache_lock);
}
EXPORT_SYMBOL(d_drop);
else
parent = dentry->d_parent;
if (dentry->d_count == 1) {
- if (!spin_trylock(&dcache_lock)) {
- /*
- * Something of a livelock possibility we could avoid
- * by taking dcache_lock and trying again, but we
- * want to reduce dcache_lock anyway so this will
- * get improved.
- */
-drop1:
- spin_unlock(&dentry->d_lock);
- goto repeat;
- }
if (!spin_trylock(&dcache_inode_lock)) {
drop2:
- spin_unlock(&dcache_lock);
- goto drop1;
+ spin_unlock(&dentry->d_lock);
+ goto repeat;
}
if (parent && !spin_trylock(&parent->d_lock)) {
spin_unlock(&dcache_inode_lock);
spin_unlock(&dentry->d_lock);
if (parent)
spin_unlock(&parent->d_lock);
- spin_unlock(&dcache_lock);
return;
}
if (parent)
spin_unlock(&parent->d_lock);
spin_unlock(&dcache_inode_lock);
- spin_unlock(&dcache_lock);
return;
unhash_it:
/*
* If it's already been dropped, return OK.
*/
- spin_lock(&dcache_lock);
spin_lock(&dentry->d_lock);
if (d_unhashed(dentry)) {
spin_unlock(&dentry->d_lock);
- spin_unlock(&dcache_lock);
return 0;
}
/*
*/
if (!list_empty(&dentry->d_subdirs)) {
spin_unlock(&dentry->d_lock);
- spin_unlock(&dcache_lock);
shrink_dcache_parent(dentry);
- spin_lock(&dcache_lock);
spin_lock(&dentry->d_lock);
}
if (dentry->d_count > 1) {
if (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode)) {
spin_unlock(&dentry->d_lock);
- spin_unlock(&dcache_lock);
return -EBUSY;
}
}
__d_drop(dentry);
spin_unlock(&dentry->d_lock);
- spin_unlock(&dcache_lock);
return 0;
}
EXPORT_SYMBOL(d_invalidate);
-/* This must be called with dcache_lock and d_lock held */
+/* This must be called with d_lock held */
static inline struct dentry * __dget_locked_dlock(struct dentry *dentry)
{
dentry->d_count++;
return dentry;
}
-/* This should be called _only_ with dcache_lock held */
+/* This must be called with d_lock held */
static inline struct dentry * __dget_locked(struct dentry *dentry)
{
spin_lock(&dentry->d_lock);
struct dentry *de = NULL;
if (!list_empty(&inode->i_dentry)) {
- spin_lock(&dcache_lock);
spin_lock(&dcache_inode_lock);
de = __d_find_alias(inode, 0);
spin_unlock(&dcache_inode_lock);
- spin_unlock(&dcache_lock);
}
return de;
}
{
struct dentry *dentry;
restart:
- spin_lock(&dcache_lock);
spin_lock(&dcache_inode_lock);
list_for_each_entry(dentry, &inode->i_dentry, d_alias) {
spin_lock(&dentry->d_lock);
__d_drop(dentry);
spin_unlock(&dentry->d_lock);
spin_unlock(&dcache_inode_lock);
- spin_unlock(&dcache_lock);
dput(dentry);
goto restart;
}
spin_unlock(&dentry->d_lock);
}
spin_unlock(&dcache_inode_lock);
- spin_unlock(&dcache_lock);
}
EXPORT_SYMBOL(d_prune_aliases);
__releases(dentry->d_lock)
__releases(parent->d_lock)
__releases(dcache_inode_lock)
- __releases(dcache_lock)
{
__d_drop(dentry);
dentry = d_kill(dentry, parent);
/*
- * Prune ancestors. Locking is simpler than in dput(),
- * because dcache_lock needs to be taken anyway.
+ * Prune ancestors.
*/
while (dentry) {
- spin_lock(&dcache_lock);
spin_lock(&dcache_inode_lock);
again:
spin_lock(&dentry->d_lock);
spin_unlock(&parent->d_lock);
spin_unlock(&dentry->d_lock);
spin_unlock(&dcache_inode_lock);
- spin_unlock(&dcache_lock);
return;
}
spin_unlock(&dcache_lru_lock);
prune_one_dentry(dentry, parent);
- /* dcache_lock, dcache_inode_lock and dentry->d_lock dropped */
- spin_lock(&dcache_lock);
+ /* dcache_inode_lock and dentry->d_lock dropped */
spin_lock(&dcache_inode_lock);
spin_lock(&dcache_lru_lock);
}
LIST_HEAD(tmp);
int cnt = *count;
- spin_lock(&dcache_lock);
spin_lock(&dcache_inode_lock);
relock:
spin_lock(&dcache_lru_lock);
list_splice(&referenced, &sb->s_dentry_lru);
spin_unlock(&dcache_lru_lock);
spin_unlock(&dcache_inode_lock);
- spin_unlock(&dcache_lock);
}
/**
if (unused == 0 || count == 0)
return;
- spin_lock(&dcache_lock);
if (count >= unused)
prune_ratio = 1;
else
if (down_read_trylock(&sb->s_umount)) {
if ((sb->s_root != NULL) &&
(!list_empty(&sb->s_dentry_lru))) {
- spin_unlock(&dcache_lock);
__shrink_dcache_sb(sb, &w_count,
DCACHE_REFERENCED);
pruned -= w_count;
- spin_lock(&dcache_lock);
}
up_read(&sb->s_umount);
}
if (p)
__put_super(p);
spin_unlock(&sb_lock);
- spin_unlock(&dcache_lock);
}
/**
{
LIST_HEAD(tmp);
- spin_lock(&dcache_lock);
spin_lock(&dcache_inode_lock);
spin_lock(&dcache_lru_lock);
while (!list_empty(&sb->s_dentry_lru)) {
}
spin_unlock(&dcache_lru_lock);
spin_unlock(&dcache_inode_lock);
- spin_unlock(&dcache_lock);
}
EXPORT_SYMBOL(shrink_dcache_sb);
BUG_ON(!IS_ROOT(dentry));
/* detach this root from the system */
- spin_lock(&dcache_lock);
spin_lock(&dentry->d_lock);
dentry_lru_del(dentry);
__d_drop(dentry);
spin_unlock(&dentry->d_lock);
- spin_unlock(&dcache_lock);
for (;;) {
/* descend to the first leaf in the current subtree */
/* this is a branch with children - detach all of them
* from the system in one go */
- spin_lock(&dcache_lock);
spin_lock(&dentry->d_lock);
list_for_each_entry(loop, &dentry->d_subdirs,
d_u.d_child) {
spin_unlock(&loop->d_lock);
}
spin_unlock(&dentry->d_lock);
- spin_unlock(&dcache_lock);
/* move to the first child */
dentry = list_entry(dentry->d_subdirs.next,
/*
* destroy the dentries attached to a superblock on unmounting
- * - we don't need to use dentry->d_lock, and only need dcache_lock when
- * removing the dentry from the system lists and hashes because:
+ * - we don't need to use dentry->d_lock because:
* - the superblock is detached from all mountings and open files, so the
* dentry trees will not be rearranged by the VFS
* - s_umount is write-locked, so the memory pressure shrinker will ignore
this_parent = parent;
seq = read_seqbegin(&rename_lock);
- spin_lock(&dcache_lock);
if (d_mountpoint(parent))
goto positive;
spin_lock(&this_parent->d_lock);
if (this_parent != child->d_parent ||
read_seqretry(&rename_lock, seq)) {
spin_unlock(&this_parent->d_lock);
- spin_unlock(&dcache_lock);
rcu_read_unlock();
goto rename_retry;
}
goto resume;
}
spin_unlock(&this_parent->d_lock);
- spin_unlock(&dcache_lock);
if (read_seqretry(&rename_lock, seq))
goto rename_retry;
return 0; /* No mount points found in tree */
positive:
- spin_unlock(&dcache_lock);
if (read_seqretry(&rename_lock, seq))
goto rename_retry;
return 1;
this_parent = parent;
seq = read_seqbegin(&rename_lock);
- spin_lock(&dcache_lock);
spin_lock(&this_parent->d_lock);
repeat:
next = this_parent->d_subdirs.next;
if (this_parent != child->d_parent ||
read_seqretry(&rename_lock, seq)) {
spin_unlock(&this_parent->d_lock);
- spin_unlock(&dcache_lock);
rcu_read_unlock();
goto rename_retry;
}
}
out:
spin_unlock(&this_parent->d_lock);
- spin_unlock(&dcache_lock);
if (read_seqretry(&rename_lock, seq))
goto rename_retry;
return found;
INIT_LIST_HEAD(&dentry->d_u.d_child);
if (parent) {
- spin_lock(&dcache_lock);
spin_lock(&parent->d_lock);
spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
dentry->d_parent = dget_dlock(parent);
list_add(&dentry->d_u.d_child, &parent->d_subdirs);
spin_unlock(&dentry->d_lock);
spin_unlock(&parent->d_lock);
- spin_unlock(&dcache_lock);
}
this_cpu_inc(nr_dentry);
}
EXPORT_SYMBOL(d_alloc_name);
-/* the caller must hold dcache_lock */
static void __d_instantiate(struct dentry *dentry, struct inode *inode)
{
spin_lock(&dentry->d_lock);
void d_instantiate(struct dentry *entry, struct inode * inode)
{
BUG_ON(!list_empty(&entry->d_alias));
- spin_lock(&dcache_lock);
spin_lock(&dcache_inode_lock);
__d_instantiate(entry, inode);
spin_unlock(&dcache_inode_lock);
- spin_unlock(&dcache_lock);
security_d_instantiate(entry, inode);
}
EXPORT_SYMBOL(d_instantiate);
BUG_ON(!list_empty(&entry->d_alias));
- spin_lock(&dcache_lock);
spin_lock(&dcache_inode_lock);
result = __d_instantiate_unique(entry, inode);
spin_unlock(&dcache_inode_lock);
- spin_unlock(&dcache_lock);
if (!result) {
security_d_instantiate(entry, inode);
}
tmp->d_parent = tmp; /* make sure dput doesn't croak */
- spin_lock(&dcache_lock);
+
spin_lock(&dcache_inode_lock);
res = __d_find_alias(inode, 0);
if (res) {
spin_unlock(&dcache_inode_lock);
- spin_unlock(&dcache_lock);
dput(tmp);
goto out_iput;
}
spin_unlock(&tmp->d_lock);
spin_unlock(&dcache_inode_lock);
- spin_unlock(&dcache_lock);
return tmp;
out_iput:
struct dentry *new = NULL;
if (inode && S_ISDIR(inode->i_mode)) {
- spin_lock(&dcache_lock);
spin_lock(&dcache_inode_lock);
new = __d_find_alias(inode, 1);
if (new) {
BUG_ON(!(new->d_flags & DCACHE_DISCONNECTED));
spin_unlock(&dcache_inode_lock);
- spin_unlock(&dcache_lock);
security_d_instantiate(new, inode);
d_move(new, dentry);
iput(inode);
} else {
- /* already taking dcache_lock, so d_add() by hand */
+ /* already taking dcache_inode_lock, so d_add() by hand */
__d_instantiate(dentry, inode);
spin_unlock(&dcache_inode_lock);
- spin_unlock(&dcache_lock);
security_d_instantiate(dentry, inode);
d_rehash(dentry);
}
* Negative dentry: instantiate it unless the inode is a directory and
* already has a dentry.
*/
- spin_lock(&dcache_lock);
spin_lock(&dcache_inode_lock);
if (!S_ISDIR(inode->i_mode) || list_empty(&inode->i_dentry)) {
__d_instantiate(found, inode);
spin_unlock(&dcache_inode_lock);
- spin_unlock(&dcache_lock);
security_d_instantiate(found, inode);
return found;
}
new = list_entry(inode->i_dentry.next, struct dentry, d_alias);
dget_locked(new);
spin_unlock(&dcache_inode_lock);
- spin_unlock(&dcache_lock);
security_d_instantiate(found, inode);
d_move(new, found);
iput(inode);
{
struct dentry *child;
- spin_lock(&dcache_lock);
spin_lock(&dparent->d_lock);
list_for_each_entry(child, &dparent->d_subdirs, d_u.d_child) {
if (dentry == child) {
__dget_locked_dlock(dentry);
spin_unlock(&dentry->d_lock);
spin_unlock(&dparent->d_lock);
- spin_unlock(&dcache_lock);
return 1;
}
}
spin_unlock(&dparent->d_lock);
- spin_unlock(&dcache_lock);
return 0;
}
/*
* Are we the only user?
*/
- spin_lock(&dcache_lock);
spin_lock(&dcache_inode_lock);
spin_lock(&dentry->d_lock);
isdir = S_ISDIR(dentry->d_inode->i_mode);
spin_unlock(&dentry->d_lock);
spin_unlock(&dcache_inode_lock);
- spin_unlock(&dcache_lock);
fsnotify_nameremove(dentry, isdir);
}
void d_rehash(struct dentry * entry)
{
- spin_lock(&dcache_lock);
spin_lock(&entry->d_lock);
spin_lock(&dcache_hash_lock);
_d_rehash(entry);
spin_unlock(&dcache_hash_lock);
spin_unlock(&entry->d_lock);
- spin_unlock(&dcache_lock);
}
EXPORT_SYMBOL(d_rehash);
BUG_ON(!mutex_is_locked(&dentry->d_inode->i_mutex));
BUG_ON(dentry->d_name.len != name->len); /* d_lookup gives this */
- spin_lock(&dcache_lock);
spin_lock(&dentry->d_lock);
memcpy((unsigned char *)dentry->d_name.name, name->name, name->len);
spin_unlock(&dentry->d_lock);
- spin_unlock(&dcache_lock);
}
EXPORT_SYMBOL(dentry_update_name_case);
* The hash value has to match the hash queue that the dentry is on..
*/
/*
- * d_move_locked - move a dentry
+ * d_move - move a dentry
* @dentry: entry to move
* @target: new dentry
*
* Update the dcache to reflect the move of a file name. Negative
* dcache entries should not be moved in this way.
*/
-static void d_move_locked(struct dentry * dentry, struct dentry * target)
+void d_move(struct dentry * dentry, struct dentry * target)
{
if (!dentry->d_inode)
printk(KERN_WARNING "VFS: moving negative dcache entry\n");
spin_unlock(&dentry->d_lock);
write_sequnlock(&rename_lock);
}
-
-/**
- * d_move - move a dentry
- * @dentry: entry to move
- * @target: new dentry
- *
- * Update the dcache to reflect the move of a file name. Negative
- * dcache entries should not be moved in this way.
- */
-
-void d_move(struct dentry * dentry, struct dentry * target)
-{
- spin_lock(&dcache_lock);
- d_move_locked(dentry, target);
- spin_unlock(&dcache_lock);
-}
EXPORT_SYMBOL(d_move);
/**
* This helper attempts to cope with remotely renamed directories
*
* It assumes that the caller is already holding
- * dentry->d_parent->d_inode->i_mutex and the dcache_lock
+ * dentry->d_parent->d_inode->i_mutex and the dcache_inode_lock
*
* Note: If ever the locking in lock_rename() changes, then please
* remember to update this too...
*/
static struct dentry *__d_unalias(struct dentry *dentry, struct dentry *alias)
- __releases(dcache_lock)
__releases(dcache_inode_lock)
{
struct mutex *m1 = NULL, *m2 = NULL;
goto out_err;
m2 = &alias->d_parent->d_inode->i_mutex;
out_unalias:
- d_move_locked(alias, dentry);
+ d_move(alias, dentry);
ret = alias;
out_err:
spin_unlock(&dcache_inode_lock);
- spin_unlock(&dcache_lock);
if (m2)
mutex_unlock(m2);
if (m1)
BUG_ON(!d_unhashed(dentry));
- spin_lock(&dcache_lock);
spin_lock(&dcache_inode_lock);
if (!inode) {
spin_unlock(&dcache_hash_lock);
spin_unlock(&actual->d_lock);
spin_unlock(&dcache_inode_lock);
- spin_unlock(&dcache_lock);
out_nolock:
if (actual == dentry) {
security_d_instantiate(dentry, inode);
shouldnt_be_hashed:
spin_unlock(&dcache_inode_lock);
- spin_unlock(&dcache_lock);
BUG();
}
EXPORT_SYMBOL_GPL(d_materialise_unique);
int error;
prepend(&res, &buflen, "\0", 1);
- spin_lock(&dcache_lock);
write_seqlock(&rename_lock);
error = prepend_path(path, root, &res, &buflen);
write_sequnlock(&rename_lock);
- spin_unlock(&dcache_lock);
if (error)
return ERR_PTR(error);
return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
get_fs_root(current->fs, &root);
- spin_lock(&dcache_lock);
write_seqlock(&rename_lock);
tmp = root;
error = path_with_deleted(path, &tmp, &res, &buflen);
if (error)
res = ERR_PTR(error);
write_sequnlock(&rename_lock);
- spin_unlock(&dcache_lock);
path_put(&root);
return res;
}
return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
get_fs_root(current->fs, &root);
- spin_lock(&dcache_lock);
write_seqlock(&rename_lock);
tmp = root;
error = path_with_deleted(path, &tmp, &res, &buflen);
if (!error && !path_equal(&tmp, &root))
error = prepend_unreachable(&res, &buflen);
write_sequnlock(&rename_lock);
- spin_unlock(&dcache_lock);
path_put(&root);
if (error)
res = ERR_PTR(error);
{
char *retval;
- spin_lock(&dcache_lock);
write_seqlock(&rename_lock);
retval = __dentry_path(dentry, buf, buflen);
write_sequnlock(&rename_lock);
- spin_unlock(&dcache_lock);
return retval;
}
char *p = NULL;
char *retval;
- spin_lock(&dcache_lock);
write_seqlock(&rename_lock);
if (d_unlinked(dentry)) {
p = buf + buflen;
}
retval = __dentry_path(dentry, buf, buflen);
write_sequnlock(&rename_lock);
- spin_unlock(&dcache_lock);
if (!IS_ERR(retval) && p)
*p = '/'; /* restore '/' overriden with '\0' */
return retval;
Elong:
- spin_unlock(&dcache_lock);
return ERR_PTR(-ENAMETOOLONG);
}
get_fs_root_and_pwd(current->fs, &root, &pwd);
error = -ENOENT;
- spin_lock(&dcache_lock);
write_seqlock(&rename_lock);
if (!d_unlinked(pwd.dentry)) {
unsigned long len;
prepend(&cwd, &buflen, "\0", 1);
error = prepend_path(&pwd, &tmp, &cwd, &buflen);
write_sequnlock(&rename_lock);
- spin_unlock(&dcache_lock);
if (error)
goto out;
}
} else {
write_sequnlock(&rename_lock);
- spin_unlock(&dcache_lock);
}
out:
rename_retry:
this_parent = root;
seq = read_seqbegin(&rename_lock);
- spin_lock(&dcache_lock);
spin_lock(&this_parent->d_lock);
repeat:
next = this_parent->d_subdirs.next;
if (this_parent != child->d_parent ||
read_seqretry(&rename_lock, seq)) {
spin_unlock(&this_parent->d_lock);
- spin_unlock(&dcache_lock);
rcu_read_unlock();
goto rename_retry;
}
goto resume;
}
spin_unlock(&this_parent->d_lock);
- spin_unlock(&dcache_lock);
if (read_seqretry(&rename_lock, seq))
goto rename_retry;
}
if (acceptable(context, result))
return result;
- spin_lock(&dcache_lock);
spin_lock(&dcache_inode_lock);
list_for_each_entry(dentry, &result->d_inode->i_dentry, d_alias) {
dget_locked(dentry);
spin_unlock(&dcache_inode_lock);
- spin_unlock(&dcache_lock);
if (toput)
dput(toput);
if (dentry != result && acceptable(context, dentry)) {
dput(result);
return dentry;
}
- spin_lock(&dcache_lock);
spin_lock(&dcache_inode_lock);
toput = dentry;
}
spin_unlock(&dcache_inode_lock);
- spin_unlock(&dcache_lock);
if (toput)
dput(toput);
struct dentry *cursor = file->private_data;
loff_t n = file->f_pos - 2;
- spin_lock(&dcache_lock);
spin_lock(&dentry->d_lock);
/* d_lock not required for cursor */
list_del(&cursor->d_u.d_child);
}
list_add_tail(&cursor->d_u.d_child, p);
spin_unlock(&dentry->d_lock);
- spin_unlock(&dcache_lock);
}
}
mutex_unlock(&dentry->d_inode->i_mutex);
i++;
/* fallthrough */
default:
- spin_lock(&dcache_lock);
spin_lock(&dentry->d_lock);
if (filp->f_pos == 2)
list_move(q, &dentry->d_subdirs);
spin_unlock(&next->d_lock);
spin_unlock(&dentry->d_lock);
- spin_unlock(&dcache_lock);
if (filldir(dirent, next->d_name.name,
next->d_name.len, filp->f_pos,
next->d_inode->i_ino,
dt_type(next->d_inode)) < 0)
return 0;
- spin_lock(&dcache_lock);
spin_lock(&dentry->d_lock);
spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
/* next is still alive */
filp->f_pos++;
}
spin_unlock(&dentry->d_lock);
- spin_unlock(&dcache_lock);
}
return 0;
}
struct dentry *child;
int ret = 0;
- spin_lock(&dcache_lock);
spin_lock(&dentry->d_lock);
list_for_each_entry(child, &dentry->d_subdirs, d_u.d_child) {
spin_lock_nested(&child->d_lock, DENTRY_D_LOCK_NESTED);
ret = 1;
out:
spin_unlock(&dentry->d_lock);
- spin_unlock(&dcache_lock);
return ret;
}
return 1;
}
-/* no need for dcache_lock, as serialization is taken care in
- * namespace.c
+/*
+ * serialization is taken care of in namespace.c
*/
static int __follow_mount(struct path *path)
{
}
}
-/* no need for dcache_lock, as serialization is taken care in
- * namespace.c
- */
int follow_down(struct path *path)
{
struct vfsmount *mounted;
{
dget(dentry);
shrink_dcache_parent(dentry);
- spin_lock(&dcache_lock);
spin_lock(&dentry->d_lock);
if (dentry->d_count == 2)
__d_drop(dentry);
spin_unlock(&dentry->d_lock);
- spin_unlock(&dcache_lock);
}
int vfs_rmdir(struct inode *dir, struct dentry *dentry)
}
/* If a pointer is invalid, we search the dentry. */
- spin_lock(&dcache_lock);
spin_lock(&parent->d_lock);
next = parent->d_subdirs.next;
while (next != &parent->d_subdirs) {
else
dent = NULL;
spin_unlock(&parent->d_lock);
- spin_unlock(&dcache_lock);
goto out;
}
next = next->next;
}
spin_unlock(&parent->d_lock);
- spin_unlock(&dcache_lock);
return NULL;
out:
struct list_head *next;
struct dentry *dentry;
- spin_lock(&dcache_lock);
spin_lock(&parent->d_lock);
next = parent->d_subdirs.next;
while (next != &parent->d_subdirs) {
next = next->next;
}
spin_unlock(&parent->d_lock);
- spin_unlock(&dcache_lock);
}
static inline void
struct list_head *next;
struct dentry *dentry;
- spin_lock(&dcache_lock);
spin_lock(&parent->d_lock);
next = parent->d_subdirs.next;
while (next != &parent->d_subdirs) {
next = next->next;
}
spin_unlock(&parent->d_lock);
- spin_unlock(&dcache_lock);
}
struct ncp_cache_head {
dfprintk(VFS, "NFS: unlink(%s/%ld, %s)\n", dir->i_sb->s_id,
dir->i_ino, dentry->d_name.name);
- spin_lock(&dcache_lock);
spin_lock(&dentry->d_lock);
if (dentry->d_count > 1) {
spin_unlock(&dentry->d_lock);
- spin_unlock(&dcache_lock);
/* Start asynchronous writeout of the inode */
write_inode_now(dentry->d_inode, 0);
error = nfs_sillyrename(dir, dentry);
need_rehash = 1;
}
spin_unlock(&dentry->d_lock);
- spin_unlock(&dcache_lock);
error = nfs_safe_remove(dentry);
if (!error || error == -ENOENT) {
nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
* This again causes shrink_dcache_for_umount_subtree() to
* Oops, since the test for IS_ROOT() will fail.
*/
- spin_lock(&dcache_lock);
spin_lock(&dcache_inode_lock);
spin_lock(&sb->s_root->d_lock);
list_del_init(&sb->s_root->d_alias);
spin_unlock(&sb->s_root->d_lock);
spin_unlock(&dcache_inode_lock);
- spin_unlock(&dcache_lock);
}
return 0;
}
seq = read_seqbegin(&rename_lock);
rcu_read_lock();
- spin_lock(&dcache_lock);
while (!IS_ROOT(dentry) && dentry != droot) {
namelen = dentry->d_name.len;
buflen -= namelen + 1;
*--end = '/';
dentry = dentry->d_parent;
}
- spin_unlock(&dcache_lock);
rcu_read_unlock();
if (read_seqretry(&rename_lock, seq))
goto rename_retry;
memcpy(end, base, namelen);
return end;
Elong_unlock:
- spin_unlock(&dcache_lock);
rcu_read_unlock();
if (read_seqretry(&rename_lock, seq))
goto rename_retry;
/* determine if the children should tell inode about their events */
watched = fsnotify_inode_watches_children(inode);
- spin_lock(&dcache_lock);
spin_lock(&dcache_inode_lock);
/* run all of the dentries associated with this inode. Since this is a
* directory, there damn well better only be one item on this list */
spin_unlock(&alias->d_lock);
}
spin_unlock(&dcache_inode_lock);
- spin_unlock(&dcache_lock);
}
/* Notify this dentry's parent about a child's events. */
struct list_head *p;
struct dentry *dentry = NULL;
- spin_lock(&dcache_lock);
spin_lock(&dcache_inode_lock);
list_for_each(p, &inode->i_dentry) {
dentry = list_entry(p, struct dentry, d_alias);
}
spin_unlock(&dcache_inode_lock);
- spin_unlock(&dcache_lock);
return dentry;
}
#define DCACHE_GENOCIDE 0x0200
extern spinlock_t dcache_inode_lock;
-extern spinlock_t dcache_lock;
extern seqlock_t rename_lock;
static inline int dname_external(struct dentry *dentry)
* destroyed when it has references. dget() should never be
* called for dentries with zero reference counter. For these cases
* (preferably none, functions in dcache.c are sufficient for normal
- * needs and they take necessary precautions) you should hold dcache_lock
- * and call dget_locked() instead of dget().
+ * needs and they take necessary precautions) you should hold d_lock
+ * and call dget_dlock() instead of dget().
*/
static inline struct dentry *dget_dlock(struct dentry *dentry)
{
#else
struct list_head s_files;
#endif
- /* s_dentry_lru and s_nr_dentry_unused are protected by dcache_lock */
+ /* s_dentry_lru, s_nr_dentry_unused protected by dcache.c lru locks */
struct list_head s_dentry_lru; /* unused dentry lru */
int s_nr_dentry_unused; /* # of dentry on lru */
{
ino_t res;
+ /*
+ * Don't strictly need d_lock here? If the parent ino could change
+ * then surely we'd have a deeper race in the caller?
+ */
spin_lock(&dentry->d_lock);
res = dentry->d_parent->d_inode->i_ino;
spin_unlock(&dentry->d_lock);
/*
* fsnotify_d_instantiate - instantiate a dentry for inode
- * Called with dcache_lock held.
*/
static inline void fsnotify_d_instantiate(struct dentry *dentry,
struct inode *inode)
/*
* fsnotify_d_move - dentry has been moved
- * Called with dcache_lock and dentry->d_lock held.
*/
static inline void fsnotify_d_move(struct dentry *dentry)
{
{
struct dentry *parent;
- assert_spin_locked(&dcache_lock);
assert_spin_locked(&dentry->d_lock);
+ /*
+ * Serialisation of setting PARENT_WATCHED on the dentries is provided
+ * by d_lock. If inotify_inode_watched changes after we have taken
+ * d_lock, the following __fsnotify_update_child_dentry_flags call will
+ * find our entry, so it will spin until we complete here, and update
+ * us with the new state.
+ */
parent = dentry->d_parent;
if (parent->d_inode && fsnotify_inode_watches_children(parent->d_inode))
dentry->d_flags |= DCACHE_FSNOTIFY_PARENT_WATCHED;
/*
* fsnotify_d_instantiate - instantiate a dentry for inode
- * Called with dcache_lock held.
*/
static inline void __fsnotify_d_instantiate(struct dentry *dentry, struct inode *inode)
{
if (!inode)
return;
- assert_spin_locked(&dcache_lock);
-
spin_lock(&dentry->d_lock);
__fsnotify_update_dcache_flags(dentry);
spin_unlock(&dentry->d_lock);
* - require a directory
* - ending slashes ok even for nonexistent files
* - internal "there are more path components" flag
- * - locked when lookup done with dcache_lock held
* - dentry cache is untrusted; force a real lookup
*/
#define LOOKUP_FOLLOW 1
struct list_head *node;
BUG_ON(!mutex_is_locked(&dentry->d_inode->i_mutex));
- spin_lock(&dcache_lock);
spin_lock(&dentry->d_lock);
node = dentry->d_subdirs.next;
while (node != &dentry->d_subdirs) {
dget_locked_dlock(d);
spin_unlock(&d->d_lock);
spin_unlock(&dentry->d_lock);
- spin_unlock(&dcache_lock);
d_delete(d);
simple_unlink(dentry->d_inode, d);
dput(d);
- spin_lock(&dcache_lock);
spin_lock(&dentry->d_lock);
} else
spin_unlock(&d->d_lock);
node = dentry->d_subdirs.next;
}
spin_unlock(&dentry->d_lock);
- spin_unlock(&dcache_lock);
}
/*
cgroup_clear_directory(dentry);
- spin_lock(&dcache_lock);
parent = dentry->d_parent;
spin_lock(&parent->d_lock);
spin_lock(&dentry->d_lock);
list_del_init(&dentry->d_u.d_child);
spin_unlock(&dentry->d_lock);
spin_unlock(&parent->d_lock);
- spin_unlock(&dcache_lock);
remove_dir(dentry);
}
* ->inode_lock (zap_pte_range->set_page_dirty)
* ->private_lock (zap_pte_range->__set_page_dirty_buffers)
*
- * ->task->proc_lock
- * ->dcache_lock (proc_pid_lookup)
- *
* (code doesn't rely on that order, so you could switch it around)
* ->tasklist_lock (memory_failure, collect_procs_ao)
* ->i_mmap_lock
{
struct list_head *node;
- spin_lock(&dcache_lock);
spin_lock(&de->d_lock);
node = de->d_subdirs.next;
while (node != &de->d_subdirs) {
dget_locked_dlock(d);
spin_unlock(&de->d_lock);
spin_unlock(&d->d_lock);
- spin_unlock(&dcache_lock);
d_delete(d);
simple_unlink(de->d_inode, d);
dput(d);
- spin_lock(&dcache_lock);
spin_lock(&de->d_lock);
} else
spin_unlock(&d->d_lock);
}
spin_unlock(&de->d_lock);
- spin_unlock(&dcache_lock);
}
#define BOOL_DIR_NAME "booleans"