MEM_CGROUP_NTARGETS,
};
-struct cg_proto {
- struct page_counter memory_allocated; /* Current allocated memory. */
- int memory_pressure;
- bool active;
-};
-
#ifdef CONFIG_MEMCG
struct mem_cgroup_stat_cpu {
long count[MEM_CGROUP_STAT_NSTATS];
/* Accounted resources */
struct page_counter memory;
+
+ /* Legacy consumer-oriented counters */
struct page_counter memsw;
struct page_counter kmem;
+ struct page_counter tcpmem;
/* Normal memory consumption range */
unsigned long low;
unsigned long socket_pressure;
/* Legacy tcp memory accounting */
- struct cg_proto tcp_mem;
+ bool tcpmem_active;
+ int tcpmem_pressure;
#ifndef CONFIG_SLOB
/* Index in the kmem_cache->memcg_params.memcg_caches array */
#define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key)
static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
{
- if (memcg->tcp_mem.memory_pressure)
+ if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure)
return true;
do {
if (time_before(jiffies, memcg->socket_pressure))
counter = &memcg->kmem;
break;
case _TCP:
- counter = &memcg->tcp_mem.memory_allocated;
+ counter = &memcg->tcpmem;
break;
default:
BUG();
mutex_lock(&memcg_limit_mutex);
- ret = page_counter_limit(&memcg->tcp_mem.memory_allocated, limit);
+ ret = page_counter_limit(&memcg->tcpmem, limit);
if (ret)
goto out;
- if (!memcg->tcp_mem.active) {
+ if (!memcg->tcpmem_active) {
/*
* The active flag needs to be written after the static_key
* update. This is what guarantees that the socket activation
* patched in yet.
*/
static_branch_inc(&memcg_sockets_enabled_key);
- memcg->tcp_mem.active = true;
+ memcg->tcpmem_active = true;
}
out:
mutex_unlock(&memcg_limit_mutex);
counter = &memcg->kmem;
break;
case _TCP:
- counter = &memcg->tcp_mem.memory_allocated;
+ counter = &memcg->tcpmem;
break;
default:
BUG();
memcg->soft_limit = PAGE_COUNTER_MAX;
page_counter_init(&memcg->memsw, &parent->memsw);
page_counter_init(&memcg->kmem, &parent->kmem);
- page_counter_init(&memcg->tcp_mem.memory_allocated,
- &parent->tcp_mem.memory_allocated);
+ page_counter_init(&memcg->tcpmem, &parent->tcpmem);
/*
* No need to take a reference to the parent because cgroup
memcg->soft_limit = PAGE_COUNTER_MAX;
page_counter_init(&memcg->memsw, NULL);
page_counter_init(&memcg->kmem, NULL);
- page_counter_init(&memcg->tcp_mem.memory_allocated, NULL);
+ page_counter_init(&memcg->tcpmem, NULL);
/*
* Deeper hierachy with use_hierarchy == false doesn't make
* much sense so let cgroup subsystem know about this
if (cgroup_subsys_on_dfl(memory_cgrp_subsys) && !cgroup_memory_nosocket)
static_branch_dec(&memcg_sockets_enabled_key);
- if (memcg->tcp_mem.active)
+ if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_active)
static_branch_dec(&memcg_sockets_enabled_key);
memcg_free_kmem(memcg);
memcg = mem_cgroup_from_task(current);
if (memcg == root_mem_cgroup)
goto out;
- if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcp_mem.active)
+ if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && !memcg->tcpmem_active)
goto out;
if (css_tryget_online(&memcg->css))
sk->sk_memcg = memcg;
gfp_t gfp_mask = GFP_KERNEL;
if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
- struct page_counter *counter;
+ struct page_counter *fail;
- if (page_counter_try_charge(&memcg->tcp_mem.memory_allocated,
- nr_pages, &counter)) {
- memcg->tcp_mem.memory_pressure = 0;
+ if (page_counter_try_charge(&memcg->tcpmem, nr_pages, &fail)) {
+ memcg->tcpmem_pressure = 0;
return true;
}
- page_counter_charge(&memcg->tcp_mem.memory_allocated, nr_pages);
- memcg->tcp_mem.memory_pressure = 1;
+ page_counter_charge(&memcg->tcpmem, nr_pages);
+ memcg->tcpmem_pressure = 1;
return false;
}
void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages)
{
if (!cgroup_subsys_on_dfl(memory_cgrp_subsys)) {
- page_counter_uncharge(&memcg->tcp_mem.memory_allocated,
- nr_pages);
+ page_counter_uncharge(&memcg->tcpmem, nr_pages);
return;
}