}
EXPORT_SYMBOL_GPL(__mnt_is_readonly);
-static inline void inc_mnt_writers(struct vfsmount *mnt)
+static inline void mnt_inc_writers(struct vfsmount *mnt)
{
#ifdef CONFIG_SMP
(*per_cpu_ptr(mnt->mnt_writers, smp_processor_id()))++;
#endif
}
-static inline void dec_mnt_writers(struct vfsmount *mnt)
+static inline void mnt_dec_writers(struct vfsmount *mnt)
{
#ifdef CONFIG_SMP
(*per_cpu_ptr(mnt->mnt_writers, smp_processor_id()))--;
#endif
}
-static unsigned int count_mnt_writers(struct vfsmount *mnt)
+static unsigned int mnt_get_writers(struct vfsmount *mnt)
{
#ifdef CONFIG_SMP
unsigned int count = 0;
int ret = 0;
preempt_disable();
- inc_mnt_writers(mnt);
+ mnt_inc_writers(mnt);
/*
- * The store to inc_mnt_writers must be visible before we pass
+ * The store to mnt_inc_writers must be visible before we pass
* MNT_WRITE_HOLD loop below, so that the slowpath can see our
* incremented count after it has set MNT_WRITE_HOLD.
*/
*/
smp_rmb();
if (__mnt_is_readonly(mnt)) {
- dec_mnt_writers(mnt);
+ mnt_dec_writers(mnt);
ret = -EROFS;
goto out;
}
if (__mnt_is_readonly(mnt))
return -EROFS;
preempt_disable();
- inc_mnt_writers(mnt);
+ mnt_inc_writers(mnt);
preempt_enable();
return 0;
}
void mnt_drop_write(struct vfsmount *mnt)
{
preempt_disable();
- dec_mnt_writers(mnt);
+ mnt_dec_writers(mnt);
preempt_enable();
}
EXPORT_SYMBOL_GPL(mnt_drop_write);
* MNT_WRITE_HOLD, so it can't be decremented by another CPU while
* we're counting up here.
*/
- if (count_mnt_writers(mnt) > 0)
+ if (mnt_get_writers(mnt) > 0)
ret = -EBUSY;
else
mnt->mnt_flags |= MNT_READONLY;
*/
/*
* atomic_dec_and_lock() used to deal with ->mnt_count decrements
- * provides barriers, so count_mnt_writers() below is safe. AV
+ * provides barriers, so mnt_get_writers() below is safe. AV
*/
- WARN_ON(count_mnt_writers(mnt));
+ WARN_ON(mnt_get_writers(mnt));
fsnotify_vfsmount_delete(mnt);
dput(mnt->mnt_root);
free_vfsmnt(mnt);