};
static struct percpu_counter nr_dentry __cacheline_aligned_in_smp;
-static struct percpu_counter nr_dentry_unused __cacheline_aligned_in_smp;
#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
int proc_nr_dentry(ctl_table *table, int write, void __user *buffer,
size_t *lenp, loff_t *ppos)
{
dentry_stat.nr_dentry = percpu_counter_sum_positive(&nr_dentry);
- dentry_stat.nr_unused = percpu_counter_sum_positive(&nr_dentry_unused);
return proc_dointvec(table, write, buffer, lenp, ppos);
}
#endif
if (list_empty(&dentry->d_lru)) {
list_add(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
dentry->d_sb->s_nr_dentry_unused++;
- percpu_counter_inc(&nr_dentry_unused);
+ dentry_stat.nr_unused++;
}
}
if (!list_empty(&dentry->d_lru)) {
list_del_init(&dentry->d_lru);
dentry->d_sb->s_nr_dentry_unused--;
- percpu_counter_dec(&nr_dentry_unused);
+ dentry_stat.nr_unused--;
}
}
if (list_empty(&dentry->d_lru)) {
list_add_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
dentry->d_sb->s_nr_dentry_unused++;
- percpu_counter_inc(&nr_dentry_unused);
+ dentry_stat.nr_unused++;
} else {
list_move_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
}
{
struct super_block *sb, *p = NULL;
int w_count;
- int unused = percpu_counter_sum_positive(&nr_dentry_unused);
+ int unused = dentry_stat.nr_unused;
int prune_ratio;
int pruned;
*/
static int shrink_dcache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask)
{
- int nr_unused;
-
if (nr) {
if (!(gfp_mask & __GFP_FS))
return -1;
prune_dcache(nr);
}
- nr_unused = percpu_counter_sum_positive(&nr_dentry_unused);
- return (nr_unused / 100) * sysctl_vfs_cache_pressure;
+ return (dentry_stat.nr_unused / 100) * sysctl_vfs_cache_pressure;
}
static struct shrinker dcache_shrinker = {
int loop;
percpu_counter_init(&nr_dentry, 0);
- percpu_counter_init(&nr_dentry_unused, 0);
/*
* A constructor could be added for stable state like the lists,
struct inodes_stat_t inodes_stat;
static struct percpu_counter nr_inodes __cacheline_aligned_in_smp;
-static struct percpu_counter nr_inodes_unused __cacheline_aligned_in_smp;
static struct kmem_cache *inode_cachep __read_mostly;
static inline int get_nr_inodes_unused(void)
{
- return percpu_counter_sum_positive(&nr_inodes_unused);
+ return inodes_stat.nr_unused;
}
int get_nr_dirty_inodes(void)
void __user *buffer, size_t *lenp, loff_t *ppos)
{
inodes_stat.nr_inodes = get_nr_inodes();
- inodes_stat.nr_unused = get_nr_inodes_unused();
return proc_dointvec(table, write, buffer, lenp, ppos);
}
#endif
{
if (list_empty(&inode->i_lru)) {
list_add(&inode->i_lru, &inode_lru);
- percpu_counter_inc(&nr_inodes_unused);
+ inodes_stat.nr_unused++;
}
}
{
if (!list_empty(&inode->i_lru)) {
list_del_init(&inode->i_lru);
- percpu_counter_dec(&nr_inodes_unused);
+ inodes_stat.nr_unused--;
}
}
list_move(&inode->i_lru, &dispose);
list_del_init(&inode->i_wb_list);
if (!(inode->i_state & (I_DIRTY | I_SYNC)))
- percpu_counter_dec(&nr_inodes_unused);
+ inodes_stat.nr_unused--;
}
spin_unlock(&inode_lock);
list_move(&inode->i_lru, &dispose);
list_del_init(&inode->i_wb_list);
if (!(inode->i_state & (I_DIRTY | I_SYNC)))
- percpu_counter_dec(&nr_inodes_unused);
+ inodes_stat.nr_unused--;
}
spin_unlock(&inode_lock);
if (atomic_read(&inode->i_count) ||
(inode->i_state & ~I_REFERENCED)) {
list_del_init(&inode->i_lru);
- percpu_counter_dec(&nr_inodes_unused);
+ inodes_stat.nr_unused--;
continue;
}
*/
list_move(&inode->i_lru, &freeable);
list_del_init(&inode->i_wb_list);
- percpu_counter_dec(&nr_inodes_unused);
+ inodes_stat.nr_unused--;
}
if (current_is_kswapd())
__count_vm_events(KSWAPD_INODESTEAL, reap);
init_once);
register_shrinker(&icache_shrinker);
percpu_counter_init(&nr_inodes, 0);
- percpu_counter_init(&nr_inodes_unused, 0);
/* Hash may have been set up in inode_init_early */
if (!hashdist)