} ____cacheline_aligned_in_smp;
struct mem_cgroup_stat {
- struct mem_cgroup_stat_cpu cpustat[NR_CPUS];
+ struct mem_cgroup_stat_cpu cpustat[0];
};
/*
int prev_priority; /* for recording reclaim priority */
/*
- * statistics.
+ * statistics. This must be placed at the end of memcg.
*/
struct mem_cgroup_stat stat;
};
-static struct mem_cgroup init_mem_cgroup;
enum charge_type {
MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
kfree(mem->info.nodeinfo[node]);
}
+static int mem_cgroup_size(void)
+{
+ int cpustat_size = nr_cpu_ids * sizeof(struct mem_cgroup_stat_cpu);
+ return sizeof(struct mem_cgroup) + cpustat_size;
+}
+
static struct mem_cgroup *mem_cgroup_alloc(void)
{
struct mem_cgroup *mem;
+ int size = mem_cgroup_size();
- if (sizeof(*mem) < PAGE_SIZE)
- mem = kmalloc(sizeof(*mem), GFP_KERNEL);
+ if (size < PAGE_SIZE)
+ mem = kmalloc(size, GFP_KERNEL);
else
- mem = vmalloc(sizeof(*mem));
+ mem = vmalloc(size);
if (mem)
- memset(mem, 0, sizeof(*mem));
+ memset(mem, 0, size);
return mem;
}
static void mem_cgroup_free(struct mem_cgroup *mem)
{
- if (sizeof(*mem) < PAGE_SIZE)
+ if (mem_cgroup_size() < PAGE_SIZE)
kfree(mem);
else
vfree(mem);
struct mem_cgroup *mem;
int node;
- if (unlikely((cont->parent) == NULL)) {
- mem = &init_mem_cgroup;
- } else {
- mem = mem_cgroup_alloc();
- if (!mem)
- return ERR_PTR(-ENOMEM);
- }
+ mem = mem_cgroup_alloc();
+ if (!mem)
+ return ERR_PTR(-ENOMEM);
res_counter_init(&mem->res);
free_out:
for_each_node_state(node, N_POSSIBLE)
free_mem_cgroup_per_zone_info(mem, node);
- if (cont->parent != NULL)
- mem_cgroup_free(mem);
+ mem_cgroup_free(mem);
return ERR_PTR(-ENOMEM);
}