MEM_CGROUP_STAT_WRITEBACK, /* # of pages under writeback */
MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */
MEM_CGROUP_STAT_NSTATS,
+ /* default hierarchy stats */
+ MEMCG_SOCK,
+ MEMCG_NR_STAT,
};
struct mem_cgroup_reclaim_cookie {
#ifdef CONFIG_MEMCG
struct mem_cgroup_stat_cpu {
- long count[MEM_CGROUP_STAT_NSTATS];
+ long count[MEMCG_NR_STAT];
unsigned long events[MEMCG_NR_EVENTS];
unsigned long nr_page_events;
unsigned long targets[MEM_CGROUP_NTARGETS];
(u64)tree_stat(memcg, MEM_CGROUP_STAT_RSS) * PAGE_SIZE);
seq_printf(m, "file %llu\n",
(u64)tree_stat(memcg, MEM_CGROUP_STAT_CACHE) * PAGE_SIZE);
+ seq_printf(m, "sock %llu\n",
+ (u64)tree_stat(memcg, MEMCG_SOCK) * PAGE_SIZE);
seq_printf(m, "file_mapped %llu\n",
(u64)tree_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED) *
if (in_softirq())
gfp_mask = GFP_NOWAIT;
+ this_cpu_add(memcg->stat->count[MEMCG_SOCK], nr_pages);
+
if (try_charge(memcg, gfp_mask, nr_pages) == 0)
return true;
return;
}
+ this_cpu_sub(memcg->stat->count[MEMCG_SOCK], nr_pages);
+
page_counter_uncharge(&memcg->memory, nr_pages);
css_put_many(&memcg->css, nr_pages);
}