/*
* Usage:
+ * dcache_inode_lock protects:
+ * - i_dentry, d_alias, d_inode
* dcache_hash_lock protects:
* - the dcache hash table, s_anon lists
* dcache_lru_lock protects:
* - d_unhashed()
* - d_parent and d_subdirs
* - childrens' d_child and d_parent
+ * - d_alias, d_inode
*
* Ordering:
* dcache_lock
- * dentry->d_lock
- * dcache_lru_lock
- * dcache_hash_lock
+ * dcache_inode_lock
+ * dentry->d_lock
+ * dcache_lru_lock
+ * dcache_hash_lock
*
* If there is an ancestor relationship:
* dentry->d_parent->...->d_parent->d_lock
int sysctl_vfs_cache_pressure __read_mostly = 100;
EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
+__cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_inode_lock);
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_hash_lock);
static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lru_lock);
__cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lock);
__cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
+EXPORT_SYMBOL(dcache_inode_lock);
EXPORT_SYMBOL(dcache_lock);
static struct kmem_cache *dentry_cache __read_mostly;
*/
static void dentry_iput(struct dentry * dentry)
__releases(dentry->d_lock)
+ __releases(dcache_inode_lock)
__releases(dcache_lock)
{
struct inode *inode = dentry->d_inode;
dentry->d_inode = NULL;
list_del_init(&dentry->d_alias);
spin_unlock(&dentry->d_lock);
+ spin_unlock(&dcache_inode_lock);
spin_unlock(&dcache_lock);
if (!inode->i_nlink)
fsnotify_inoderemove(inode);
iput(inode);
} else {
spin_unlock(&dentry->d_lock);
+ spin_unlock(&dcache_inode_lock);
spin_unlock(&dcache_lock);
}
}
static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent)
__releases(dentry->d_lock)
__releases(parent->d_lock)
+ __releases(dcache_inode_lock)
__releases(dcache_lock)
{
list_del(&dentry->d_u.d_child);
* want to reduce dcache_lock anyway so this will
* get improved.
*/
+drop1:
spin_unlock(&dentry->d_lock);
goto repeat;
}
- if (parent && !spin_trylock(&parent->d_lock)) {
- spin_unlock(&dentry->d_lock);
+ if (!spin_trylock(&dcache_inode_lock)) {
+drop2:
spin_unlock(&dcache_lock);
- goto repeat;
+ goto drop1;
+ }
+ if (parent && !spin_trylock(&parent->d_lock)) {
+ spin_unlock(&dcache_inode_lock);
+ goto drop2;
}
}
dentry->d_count--;
spin_unlock(&dentry->d_lock);
if (parent)
spin_unlock(&parent->d_lock);
+ spin_unlock(&dcache_inode_lock);
spin_unlock(&dcache_lock);
return;
if (!list_empty(&inode->i_dentry)) {
spin_lock(&dcache_lock);
+ spin_lock(&dcache_inode_lock);
de = __d_find_alias(inode, 0);
+ spin_unlock(&dcache_inode_lock);
spin_unlock(&dcache_lock);
}
return de;
struct dentry *dentry;
restart:
spin_lock(&dcache_lock);
+ spin_lock(&dcache_inode_lock);
list_for_each_entry(dentry, &inode->i_dentry, d_alias) {
spin_lock(&dentry->d_lock);
if (!dentry->d_count) {
__dget_locked_dlock(dentry);
__d_drop(dentry);
spin_unlock(&dentry->d_lock);
+ spin_unlock(&dcache_inode_lock);
spin_unlock(&dcache_lock);
dput(dentry);
goto restart;
}
spin_unlock(&dentry->d_lock);
}
+ spin_unlock(&dcache_inode_lock);
spin_unlock(&dcache_lock);
}
EXPORT_SYMBOL(d_prune_aliases);
static void prune_one_dentry(struct dentry *dentry, struct dentry *parent)
__releases(dentry->d_lock)
__releases(parent->d_lock)
+ __releases(dcache_inode_lock)
__releases(dcache_lock)
{
__d_drop(dentry);
*/
while (dentry) {
spin_lock(&dcache_lock);
+ spin_lock(&dcache_inode_lock);
again:
spin_lock(&dentry->d_lock);
if (IS_ROOT(dentry))
if (parent)
spin_unlock(&parent->d_lock);
spin_unlock(&dentry->d_lock);
+ spin_unlock(&dcache_inode_lock);
spin_unlock(&dcache_lock);
return;
}
spin_unlock(&dcache_lru_lock);
prune_one_dentry(dentry, parent);
- /* dcache_lock and dentry->d_lock dropped */
+ /* dcache_lock, dcache_inode_lock and dentry->d_lock dropped */
spin_lock(&dcache_lock);
+ spin_lock(&dcache_inode_lock);
spin_lock(&dcache_lru_lock);
}
}
int cnt = *count;
spin_lock(&dcache_lock);
+ spin_lock(&dcache_inode_lock);
relock:
spin_lock(&dcache_lru_lock);
while (!list_empty(&sb->s_dentry_lru)) {
if (!list_empty(&referenced))
list_splice(&referenced, &sb->s_dentry_lru);
spin_unlock(&dcache_lru_lock);
+ spin_unlock(&dcache_inode_lock);
spin_unlock(&dcache_lock);
-
}
/**
LIST_HEAD(tmp);
spin_lock(&dcache_lock);
+ spin_lock(&dcache_inode_lock);
spin_lock(&dcache_lru_lock);
while (!list_empty(&sb->s_dentry_lru)) {
list_splice_init(&sb->s_dentry_lru, &tmp);
shrink_dentry_list(&tmp);
}
spin_unlock(&dcache_lru_lock);
+ spin_unlock(&dcache_inode_lock);
spin_unlock(&dcache_lock);
}
EXPORT_SYMBOL(shrink_dcache_sb);
/* the caller must hold dcache_lock */
static void __d_instantiate(struct dentry *dentry, struct inode *inode)
{
+ spin_lock(&dentry->d_lock);
if (inode)
list_add(&dentry->d_alias, &inode->i_dentry);
dentry->d_inode = inode;
+ spin_unlock(&dentry->d_lock);
fsnotify_d_instantiate(dentry, inode);
}
{
BUG_ON(!list_empty(&entry->d_alias));
spin_lock(&dcache_lock);
+ spin_lock(&dcache_inode_lock);
__d_instantiate(entry, inode);
+ spin_unlock(&dcache_inode_lock);
spin_unlock(&dcache_lock);
security_d_instantiate(entry, inode);
}
BUG_ON(!list_empty(&entry->d_alias));
spin_lock(&dcache_lock);
+ spin_lock(&dcache_inode_lock);
result = __d_instantiate_unique(entry, inode);
+ spin_unlock(&dcache_inode_lock);
spin_unlock(&dcache_lock);
if (!result) {
tmp->d_parent = tmp; /* make sure dput doesn't croak */
spin_lock(&dcache_lock);
+ spin_lock(&dcache_inode_lock);
res = __d_find_alias(inode, 0);
if (res) {
+ spin_unlock(&dcache_inode_lock);
spin_unlock(&dcache_lock);
dput(tmp);
goto out_iput;
hlist_add_head(&tmp->d_hash, &inode->i_sb->s_anon);
spin_unlock(&dcache_hash_lock);
spin_unlock(&tmp->d_lock);
+ spin_unlock(&dcache_inode_lock);
spin_unlock(&dcache_lock);
return tmp;
if (inode && S_ISDIR(inode->i_mode)) {
spin_lock(&dcache_lock);
+ spin_lock(&dcache_inode_lock);
new = __d_find_alias(inode, 1);
if (new) {
BUG_ON(!(new->d_flags & DCACHE_DISCONNECTED));
+ spin_unlock(&dcache_inode_lock);
spin_unlock(&dcache_lock);
security_d_instantiate(new, inode);
d_move(new, dentry);
} else {
/* already taking dcache_lock, so d_add() by hand */
__d_instantiate(dentry, inode);
+ spin_unlock(&dcache_inode_lock);
spin_unlock(&dcache_lock);
security_d_instantiate(dentry, inode);
d_rehash(dentry);
* already has a dentry.
*/
spin_lock(&dcache_lock);
+ spin_lock(&dcache_inode_lock);
if (!S_ISDIR(inode->i_mode) || list_empty(&inode->i_dentry)) {
__d_instantiate(found, inode);
+ spin_unlock(&dcache_inode_lock);
spin_unlock(&dcache_lock);
security_d_instantiate(found, inode);
return found;
*/
new = list_entry(inode->i_dentry.next, struct dentry, d_alias);
dget_locked(new);
+ spin_unlock(&dcache_inode_lock);
spin_unlock(&dcache_lock);
security_d_instantiate(found, inode);
d_move(new, found);
* Are we the only user?
*/
spin_lock(&dcache_lock);
+ spin_lock(&dcache_inode_lock);
spin_lock(&dentry->d_lock);
isdir = S_ISDIR(dentry->d_inode->i_mode);
if (dentry->d_count == 1) {
__d_drop(dentry);
spin_unlock(&dentry->d_lock);
+ spin_unlock(&dcache_inode_lock);
spin_unlock(&dcache_lock);
fsnotify_nameremove(dentry, isdir);
*/
static struct dentry *__d_unalias(struct dentry *dentry, struct dentry *alias)
__releases(dcache_lock)
+ __releases(dcache_inode_lock)
{
struct mutex *m1 = NULL, *m2 = NULL;
struct dentry *ret;
d_move_locked(alias, dentry);
ret = alias;
out_err:
+ spin_unlock(&dcache_inode_lock);
spin_unlock(&dcache_lock);
if (m2)
mutex_unlock(m2);
BUG_ON(!d_unhashed(dentry));
spin_lock(&dcache_lock);
+ spin_lock(&dcache_inode_lock);
if (!inode) {
actual = dentry;
_d_rehash(actual);
spin_unlock(&dcache_hash_lock);
spin_unlock(&actual->d_lock);
+ spin_unlock(&dcache_inode_lock);
spin_unlock(&dcache_lock);
out_nolock:
if (actual == dentry) {
return actual;
shouldnt_be_hashed:
+ spin_unlock(&dcache_inode_lock);
spin_unlock(&dcache_lock);
BUG();
}