void lock_page_memcg(struct page *page);
void unlock_page_memcg(struct page *page);
-static inline unsigned long mem_cgroup_read_stat(struct mem_cgroup *memcg,
- enum memcg_stat_item idx)
+static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
+ enum memcg_stat_item idx)
{
long val = 0;
int cpu;
return val;
}
-static inline void mem_cgroup_update_stat(struct mem_cgroup *memcg,
- enum memcg_stat_item idx, int val)
+static inline void mod_memcg_state(struct mem_cgroup *memcg,
+ enum memcg_stat_item idx, int val)
{
if (!mem_cgroup_disabled())
this_cpu_add(memcg->stat->count[idx], val);
}
-static inline void mem_cgroup_inc_stat(struct mem_cgroup *memcg,
- enum memcg_stat_item idx)
+static inline void inc_memcg_state(struct mem_cgroup *memcg,
+ enum memcg_stat_item idx)
{
- mem_cgroup_update_stat(memcg, idx, 1);
+ mod_memcg_state(memcg, idx, 1);
}
-static inline void mem_cgroup_dec_stat(struct mem_cgroup *memcg,
- enum memcg_stat_item idx)
+static inline void dec_memcg_state(struct mem_cgroup *memcg,
+ enum memcg_stat_item idx)
{
- mem_cgroup_update_stat(memcg, idx, -1);
+ mod_memcg_state(memcg, idx, -1);
}
/**
- * mem_cgroup_update_page_stat - update page state statistics
+ * mod_memcg_page_state - update page state statistics
* @page: the page
* @idx: page state item to account
* @val: number of pages (positive or negative)
*
* lock_page(page) or lock_page_memcg(page)
* if (TestClearPageState(page))
- * mem_cgroup_update_page_stat(page, state, -1);
+ * mod_memcg_page_state(page, state, -1);
* unlock_page(page) or unlock_page_memcg(page)
*
* Kernel pages are an exception to this, since they'll never move.
*/
-static inline void mem_cgroup_update_page_stat(struct page *page,
- enum memcg_stat_item idx, int val)
+static inline void mod_memcg_page_state(struct page *page,
+ enum memcg_stat_item idx, int val)
{
if (page->mem_cgroup)
- mem_cgroup_update_stat(page->mem_cgroup, idx, val);
+ mod_memcg_state(page->mem_cgroup, idx, val);
}
-static inline void mem_cgroup_inc_page_stat(struct page *page,
- enum memcg_stat_item idx)
+static inline void inc_memcg_page_state(struct page *page,
+ enum memcg_stat_item idx)
{
- mem_cgroup_update_page_stat(page, idx, 1);
+ mod_memcg_page_state(page, idx, 1);
}
-static inline void mem_cgroup_dec_page_stat(struct page *page,
- enum memcg_stat_item idx)
+static inline void dec_memcg_page_state(struct page *page,
+ enum memcg_stat_item idx)
{
- mem_cgroup_update_page_stat(page, idx, -1);
+ mod_memcg_page_state(page, idx, -1);
}
unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
return false;
}
-static inline unsigned long mem_cgroup_read_stat(struct mem_cgroup *memcg,
- enum mem_cgroup_stat_index idx)
+static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
+ enum memcg_stat_item idx)
{
return 0;
}
-static inline void mem_cgroup_update_stat(struct mem_cgroup *memcg,
- enum memcg_stat_item idx, int val)
+static inline void mod_memcg_state(struct mem_cgroup *memcg,
+ enum memcg_stat_item idx,
+ int nr)
{
}
-static inline void mem_cgroup_inc_stat(struct mem_cgroup *memcg,
- enum memcg_stat_item idx)
+static inline void inc_memcg_state(struct mem_cgroup *memcg,
+ enum memcg_stat_item idx)
{
}
-static inline void mem_cgroup_dec_stat(struct mem_cgroup *memcg,
- enum memcg_stat_item idx)
+static inline void dec_memcg_state(struct mem_cgroup *memcg,
+ enum memcg_stat_item idx)
{
}
-static inline void mem_cgroup_update_page_stat(struct page *page,
- enum memcg_stat_item idx,
- int nr)
+static inline void mod_memcg_page_state(struct page *page,
+ enum memcg_stat_item idx,
+ int nr)
{
}
-static inline void mem_cgroup_inc_page_stat(struct page *page,
- enum memcg_stat_item idx)
+static inline void inc_memcg_page_state(struct page *page,
+ enum memcg_stat_item idx)
{
}
-static inline void mem_cgroup_dec_page_stat(struct page *page,
- enum memcg_stat_item idx)
+static inline void dec_memcg_page_state(struct page *page,
+ enum memcg_stat_item idx)
{
}
* implemented.
*/
-static unsigned long mem_cgroup_read_events(struct mem_cgroup *memcg,
- enum memcg_event_item event)
+static unsigned long memcg_sum_events(struct mem_cgroup *memcg,
+ enum memcg_event_item event)
{
unsigned long val = 0;
int cpu;
if (memcg1_stats[i] == MEMCG_SWAP && !do_swap_account)
continue;
pr_cont(" %s:%luKB", memcg1_stat_names[i],
- K(mem_cgroup_read_stat(iter, memcg1_stats[i])));
+ K(memcg_page_state(iter, memcg1_stats[i])));
}
for (i = 0; i < NR_LRU_LISTS; i++)
for_each_mem_cgroup_tree(iter, memcg) {
for (i = 0; i < MEMCG_NR_STAT; i++)
- stat[i] += mem_cgroup_read_stat(iter, i);
+ stat[i] += memcg_page_state(iter, i);
}
}
for_each_mem_cgroup_tree(iter, memcg) {
for (i = 0; i < MEMCG_NR_EVENTS; i++)
- events[i] += mem_cgroup_read_events(iter, i);
+ events[i] += memcg_sum_events(iter, i);
}
}
struct mem_cgroup *iter;
for_each_mem_cgroup_tree(iter, memcg) {
- val += mem_cgroup_read_stat(iter, MEMCG_CACHE);
- val += mem_cgroup_read_stat(iter, MEMCG_RSS);
+ val += memcg_page_state(iter, MEMCG_CACHE);
+ val += memcg_page_state(iter, MEMCG_RSS);
if (swap)
- val += mem_cgroup_read_stat(iter, MEMCG_SWAP);
+ val += memcg_page_state(iter, MEMCG_SWAP);
}
} else {
if (!swap)
if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
continue;
seq_printf(m, "%s %lu\n", memcg1_stat_names[i],
- mem_cgroup_read_stat(memcg, memcg1_stats[i]) *
+ memcg_page_state(memcg, memcg1_stats[i]) *
PAGE_SIZE);
}
for (i = 0; i < ARRAY_SIZE(memcg1_events); i++)
seq_printf(m, "%s %lu\n", memcg1_event_names[i],
- mem_cgroup_read_events(memcg, memcg1_events[i]));
+ memcg_sum_events(memcg, memcg1_events[i]));
for (i = 0; i < NR_LRU_LISTS; i++)
seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i],
if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account())
continue;
for_each_mem_cgroup_tree(mi, memcg)
- val += mem_cgroup_read_stat(mi, memcg1_stats[i]) *
+ val += memcg_page_state(mi, memcg1_stats[i]) *
PAGE_SIZE;
seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i], val);
}
unsigned long long val = 0;
for_each_mem_cgroup_tree(mi, memcg)
- val += mem_cgroup_read_events(mi, memcg1_events[i]);
+ val += memcg_sum_events(mi, memcg1_events[i]);
seq_printf(m, "total_%s %llu\n", memcg1_event_names[i], val);
}
struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css);
struct mem_cgroup *parent;
- *pdirty = mem_cgroup_read_stat(memcg, NR_FILE_DIRTY);
+ *pdirty = memcg_page_state(memcg, NR_FILE_DIRTY);
/* this should eventually include NR_UNSTABLE_NFS */
- *pwriteback = mem_cgroup_read_stat(memcg, NR_WRITEBACK);
+ *pwriteback = memcg_page_state(memcg, NR_WRITEBACK);
*pfilepages = mem_cgroup_nr_lru_pages(memcg, (1 << LRU_INACTIVE_FILE) |
(1 << LRU_ACTIVE_FILE));
*pheadroom = PAGE_COUNTER_MAX;
/*
* move_lock grabbed above and caller set from->moving_account, so
- * mem_cgroup_update_page_stat() will serialize updates to PageDirty.
+ * mod_memcg_page_state will serialize updates to PageDirty.
* So mapping should be stable for dirty pages.
*/
if (!anon && PageDirty(page)) {
{
struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
- seq_printf(m, "low %lu\n", mem_cgroup_read_events(memcg, MEMCG_LOW));
- seq_printf(m, "high %lu\n", mem_cgroup_read_events(memcg, MEMCG_HIGH));
- seq_printf(m, "max %lu\n", mem_cgroup_read_events(memcg, MEMCG_MAX));
- seq_printf(m, "oom %lu\n", mem_cgroup_read_events(memcg, MEMCG_OOM));
+ seq_printf(m, "low %lu\n", memcg_sum_events(memcg, MEMCG_LOW));
+ seq_printf(m, "high %lu\n", memcg_sum_events(memcg, MEMCG_HIGH));
+ seq_printf(m, "max %lu\n", memcg_sum_events(memcg, MEMCG_MAX));
+ seq_printf(m, "oom %lu\n", memcg_sum_events(memcg, MEMCG_OOM));
return 0;
}
inode_attach_wb(inode, page);
wb = inode_to_wb(inode);
- mem_cgroup_inc_page_stat(page, NR_FILE_DIRTY);
+ inc_memcg_page_state(page, NR_FILE_DIRTY);
__inc_node_page_state(page, NR_FILE_DIRTY);
__inc_zone_page_state(page, NR_ZONE_WRITE_PENDING);
__inc_node_page_state(page, NR_DIRTIED);
struct bdi_writeback *wb)
{
if (mapping_cap_account_dirty(mapping)) {
- mem_cgroup_dec_page_stat(page, NR_FILE_DIRTY);
+ dec_memcg_page_state(page, NR_FILE_DIRTY);
dec_node_page_state(page, NR_FILE_DIRTY);
dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
dec_wb_stat(wb, WB_RECLAIMABLE);
*/
wb = unlocked_inode_to_wb_begin(inode, &locked);
if (TestClearPageDirty(page)) {
- mem_cgroup_dec_page_stat(page, NR_FILE_DIRTY);
+ dec_memcg_page_state(page, NR_FILE_DIRTY);
dec_node_page_state(page, NR_FILE_DIRTY);
dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
dec_wb_stat(wb, WB_RECLAIMABLE);
ret = TestClearPageWriteback(page);
}
if (ret) {
- mem_cgroup_dec_page_stat(page, NR_WRITEBACK);
+ dec_memcg_page_state(page, NR_WRITEBACK);
dec_node_page_state(page, NR_WRITEBACK);
dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
inc_node_page_state(page, NR_WRITTEN);
ret = TestSetPageWriteback(page);
}
if (!ret) {
- mem_cgroup_inc_page_stat(page, NR_WRITEBACK);
+ inc_memcg_page_state(page, NR_WRITEBACK);
inc_node_page_state(page, NR_WRITEBACK);
inc_zone_page_state(page, NR_ZONE_WRITE_PENDING);
}
goto out;
}
__mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, nr);
- mem_cgroup_update_page_stat(page, NR_FILE_MAPPED, nr);
+ mod_memcg_page_state(page, NR_FILE_MAPPED, nr);
out:
unlock_page_memcg(page);
}
* pte lock(a spinlock) is held, which implies preemption disabled.
*/
__mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, -nr);
- mem_cgroup_update_page_stat(page, NR_FILE_MAPPED, -nr);
+ mod_memcg_page_state(page, NR_FILE_MAPPED, -nr);
if (unlikely(PageMlocked(page)))
clear_page_mlock(page);
active = lruvec_lru_size(lruvec, active_lru, sc->reclaim_idx);
if (memcg)
- refaults = mem_cgroup_read_stat(memcg, WORKINGSET_ACTIVATE);
+ refaults = memcg_page_state(memcg, WORKINGSET_ACTIVATE);
else
refaults = node_page_state(pgdat, WORKINGSET_ACTIVATE);
struct lruvec *lruvec;
if (memcg)
- refaults = mem_cgroup_read_stat(memcg,
- WORKINGSET_ACTIVATE);
+ refaults = memcg_page_state(memcg, WORKINGSET_ACTIVATE);
else
refaults = node_page_state(pgdat, WORKINGSET_ACTIVATE);
refault_distance = (refault - eviction) & EVICTION_MASK;
inc_node_state(pgdat, WORKINGSET_REFAULT);
- mem_cgroup_inc_stat(memcg, WORKINGSET_REFAULT);
+ inc_memcg_state(memcg, WORKINGSET_REFAULT);
if (refault_distance <= active_file) {
inc_node_state(pgdat, WORKINGSET_ACTIVATE);
- mem_cgroup_inc_stat(memcg, WORKINGSET_ACTIVATE);
+ inc_memcg_state(memcg, WORKINGSET_ACTIVATE);
rcu_read_unlock();
return true;
}
if (WARN_ON_ONCE(node->exceptional))
goto out_invalid;
inc_node_state(page_pgdat(virt_to_page(node)), WORKINGSET_NODERECLAIM);
- mem_cgroup_inc_page_stat(virt_to_page(node), WORKINGSET_NODERECLAIM);
+ inc_memcg_page_state(virt_to_page(node), WORKINGSET_NODERECLAIM);
__radix_tree_delete_node(&mapping->page_tree, node,
workingset_update_node, mapping);