memcg: Fix race condition in memcg_check_events() with this_cpu usage
authorSteven Rostedt <srostedt@redhat.com>
Wed, 2 Nov 2011 20:38:33 +0000 (13:38 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 2 Nov 2011 23:07:00 +0000 (16:07 -0700)
Various code in memcontrol.c () calls this_cpu_read() on the calculations
to be done from two different percpu variables, or does an open-coded
read-modify-write on a single percpu variable.

Disable preemption throughout these operations so that the writes go to
the correct palces.

[hannes@cmpxchg.org: added this_cpu to __this_cpu conversion]
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Cc: Greg Thelen <gthelen@google.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Balbir Singh <balbir@linux.vnet.ibm.com>
Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Christoph Lameter <cl@linux.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/memcontrol.c

index 01e0c725de659826aac9872de632985e1baa47c7..7af1d5ee1598d485ced61f57c1b165fac4e0808d 100644 (file)
@@ -686,8 +686,8 @@ static bool __memcg_event_check(struct mem_cgroup *memcg, int target)
 {
        unsigned long val, next;
 
-       val = this_cpu_read(memcg->stat->events[MEM_CGROUP_EVENTS_COUNT]);
-       next = this_cpu_read(memcg->stat->targets[target]);
+       val = __this_cpu_read(memcg->stat->events[MEM_CGROUP_EVENTS_COUNT]);
+       next = __this_cpu_read(memcg->stat->targets[target]);
        /* from time_after() in jiffies.h */
        return ((long)next - (long)val < 0);
 }
@@ -696,7 +696,7 @@ static void __mem_cgroup_target_update(struct mem_cgroup *memcg, int target)
 {
        unsigned long val, next;
 
-       val = this_cpu_read(memcg->stat->events[MEM_CGROUP_EVENTS_COUNT]);
+       val = __this_cpu_read(memcg->stat->events[MEM_CGROUP_EVENTS_COUNT]);
 
        switch (target) {
        case MEM_CGROUP_TARGET_THRESH:
@@ -712,7 +712,7 @@ static void __mem_cgroup_target_update(struct mem_cgroup *memcg, int target)
                return;
        }
 
-       this_cpu_write(memcg->stat->targets[target], next);
+       __this_cpu_write(memcg->stat->targets[target], next);
 }
 
 /*
@@ -721,6 +721,7 @@ static void __mem_cgroup_target_update(struct mem_cgroup *memcg, int target)
  */
 static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
 {
+       preempt_disable();
        /* threshold event is triggered in finer grain than soft limit */
        if (unlikely(__memcg_event_check(memcg, MEM_CGROUP_TARGET_THRESH))) {
                mem_cgroup_threshold(memcg);
@@ -740,6 +741,7 @@ static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
                }
 #endif
        }
+       preempt_enable();
 }
 
 static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)