mz->on_tree = false;
}
-static void
-mem_cgroup_insert_exceeded(struct mem_cgroup *mem,
- struct mem_cgroup_per_zone *mz,
- struct mem_cgroup_tree_per_zone *mctz)
-{
- spin_lock(&mctz->lock);
- __mem_cgroup_insert_exceeded(mem, mz, mctz);
- spin_unlock(&mctz->lock);
-}
-
static void
mem_cgroup_remove_exceeded(struct mem_cgroup *mem,
struct mem_cgroup_per_zone *mz,
static void mem_cgroup_update_tree(struct mem_cgroup *mem, struct page *page)
{
- unsigned long long prev_usage_in_excess, new_usage_in_excess;
- bool updated_tree = false;
+ unsigned long long new_usage_in_excess;
struct mem_cgroup_per_zone *mz;
struct mem_cgroup_tree_per_zone *mctz;
-
- mz = mem_cgroup_zoneinfo(mem, page_to_nid(page), page_zonenum(page));
+ int nid = page_to_nid(page);
+ int zid = page_zonenum(page);
mctz = soft_limit_tree_from_page(page);
/*
- * We do updates in lazy mode, mem's are removed
- * lazily from the per-zone, per-node rb tree
+ * Necessary to update all ancestors when hierarchy is used.
+ * because their event counter is not touched.
*/
- prev_usage_in_excess = mz->usage_in_excess;
-
- new_usage_in_excess = res_counter_soft_limit_excess(&mem->res);
- if (prev_usage_in_excess) {
- mem_cgroup_remove_exceeded(mem, mz, mctz);
- updated_tree = true;
- }
- if (!new_usage_in_excess)
- goto done;
- mem_cgroup_insert_exceeded(mem, mz, mctz);
-
-done:
- if (updated_tree) {
- spin_lock(&mctz->lock);
- mz->usage_in_excess = new_usage_in_excess;
- spin_unlock(&mctz->lock);
+ for (; mem; mem = parent_mem_cgroup(mem)) {
+ mz = mem_cgroup_zoneinfo(mem, nid, zid);
+ new_usage_in_excess =
+ res_counter_soft_limit_excess(&mem->res);
+ /*
+ * We have to update the tree if mz is on RB-tree or
+ * mem is over its softlimit.
+ */
+ if (new_usage_in_excess || mz->on_tree) {
+ spin_lock(&mctz->lock);
+ /* if on-tree, remove it */
+ if (mz->on_tree)
+ __mem_cgroup_remove_exceeded(mem, mz, mctz);
+ /*
+ * if over soft limit, insert again. mz->usage_in_excess
+ * will be updated properly.
+ */
+ if (new_usage_in_excess)
+ __mem_cgroup_insert_exceeded(mem, mz, mctz);
+ else
+ mz->usage_in_excess = 0;
+ spin_unlock(&mctz->lock);
+ }
}
}
gfp_t gfp_mask, struct mem_cgroup **memcg,
bool oom, struct page *page)
{
- struct mem_cgroup *mem, *mem_over_limit, *mem_over_soft_limit;
+ struct mem_cgroup *mem, *mem_over_limit;
int nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
- struct res_counter *fail_res, *soft_fail_res = NULL;
+ struct res_counter *fail_res;
if (unlikely(test_thread_flag(TIF_MEMDIE))) {
/* Don't account this! */
if (mem_cgroup_is_root(mem))
goto done;
- ret = res_counter_charge(&mem->res, PAGE_SIZE, &fail_res,
- &soft_fail_res);
+ ret = res_counter_charge(&mem->res, PAGE_SIZE, &fail_res);
if (likely(!ret)) {
if (!do_swap_account)
break;
ret = res_counter_charge(&mem->memsw, PAGE_SIZE,
- &fail_res, NULL);
+ &fail_res);
if (likely(!ret))
break;
/* mem+swap counter fails */
- res_counter_uncharge(&mem->res, PAGE_SIZE, NULL);
+ res_counter_uncharge(&mem->res, PAGE_SIZE);
flags |= MEM_CGROUP_RECLAIM_NOSWAP;
mem_over_limit = mem_cgroup_from_res_counter(fail_res,
memsw);
}
}
/*
- * Insert just the ancestor, we should trickle down to the correct
- * cgroup for reclaim, since the other nodes will be below their
- * soft limit
+ * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
+ * if they exceeds softlimit.
*/
- if (soft_fail_res) {
- mem_over_soft_limit =
- mem_cgroup_from_res_counter(soft_fail_res, res);
- if (mem_cgroup_soft_limit_check(mem_over_soft_limit))
- mem_cgroup_update_tree(mem_over_soft_limit, page);
- }
+ if (mem_cgroup_soft_limit_check(mem))
+ mem_cgroup_update_tree(mem, page);
done:
return 0;
nomem:
if (unlikely(PageCgroupUsed(pc))) {
unlock_page_cgroup(pc);
if (!mem_cgroup_is_root(mem)) {
- res_counter_uncharge(&mem->res, PAGE_SIZE, NULL);
+ res_counter_uncharge(&mem->res, PAGE_SIZE);
if (do_swap_account)
- res_counter_uncharge(&mem->memsw, PAGE_SIZE,
- NULL);
+ res_counter_uncharge(&mem->memsw, PAGE_SIZE);
}
css_put(&mem->css);
return;
goto out;
if (!mem_cgroup_is_root(from))
- res_counter_uncharge(&from->res, PAGE_SIZE, NULL);
+ res_counter_uncharge(&from->res, PAGE_SIZE);
mem_cgroup_charge_statistics(from, pc, false);
page = pc->page;
}
if (do_swap_account && !mem_cgroup_is_root(from))
- res_counter_uncharge(&from->memsw, PAGE_SIZE, NULL);
+ res_counter_uncharge(&from->memsw, PAGE_SIZE);
css_put(&from->css);
css_get(&to->css);
css_put(&parent->css);
/* uncharge if move fails */
if (!mem_cgroup_is_root(parent)) {
- res_counter_uncharge(&parent->res, PAGE_SIZE, NULL);
+ res_counter_uncharge(&parent->res, PAGE_SIZE);
if (do_swap_account)
- res_counter_uncharge(&parent->memsw, PAGE_SIZE, NULL);
+ res_counter_uncharge(&parent->memsw, PAGE_SIZE);
}
return ret;
}
* calling css_tryget
*/
if (!mem_cgroup_is_root(memcg))
- res_counter_uncharge(&memcg->memsw, PAGE_SIZE,
- NULL);
+ res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
mem_cgroup_swap_statistics(memcg, false);
mem_cgroup_put(memcg);
}
if (!mem)
return;
if (!mem_cgroup_is_root(mem)) {
- res_counter_uncharge(&mem->res, PAGE_SIZE, NULL);
+ res_counter_uncharge(&mem->res, PAGE_SIZE);
if (do_swap_account)
- res_counter_uncharge(&mem->memsw, PAGE_SIZE, NULL);
+ res_counter_uncharge(&mem->memsw, PAGE_SIZE);
}
css_put(&mem->css);
}
struct page_cgroup *pc;
struct mem_cgroup *mem = NULL;
struct mem_cgroup_per_zone *mz;
- bool soft_limit_excess = false;
if (mem_cgroup_disabled())
return NULL;
}
if (!mem_cgroup_is_root(mem)) {
- res_counter_uncharge(&mem->res, PAGE_SIZE, &soft_limit_excess);
+ res_counter_uncharge(&mem->res, PAGE_SIZE);
if (do_swap_account &&
(ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT))
- res_counter_uncharge(&mem->memsw, PAGE_SIZE, NULL);
+ res_counter_uncharge(&mem->memsw, PAGE_SIZE);
}
if (ctype == MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
mem_cgroup_swap_statistics(mem, true);
mz = page_cgroup_zoneinfo(pc);
unlock_page_cgroup(pc);
- if (soft_limit_excess && mem_cgroup_soft_limit_check(mem))
+ if (mem_cgroup_soft_limit_check(mem))
mem_cgroup_update_tree(mem, page);
/* at swapout, this memcg will be accessed to record to swap */
if (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
* This memcg can be obsolete one. We avoid calling css_tryget
*/
if (!mem_cgroup_is_root(memcg))
- res_counter_uncharge(&memcg->memsw, PAGE_SIZE, NULL);
+ res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
mem_cgroup_swap_statistics(memcg, false);
mem_cgroup_put(memcg);
}