rescounters: add res_counter_uncharge_until()
authorFrederic Weisbecker <fweisbec@gmail.com>
Tue, 29 May 2012 22:07:03 +0000 (15:07 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 29 May 2012 23:22:27 +0000 (16:22 -0700)
When killing a res_counter which is a child of other counter, we need to
do

res_counter_uncharge(child, xxx)
res_counter_charge(parent, xxx)

This is not atomic and wastes CPU.  This patch adds
res_counter_uncharge_until().  This function's uncharge propagates to
ancestors until specified res_counter.

res_counter_uncharge_until(child, parent, xxx)

Now the operation is atomic and efficient.

Signed-off-by: Frederic Weisbecker <fweisbec@redhat.com>
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Ying Han <yinghan@google.com>
Cc: Glauber Costa <glommer@parallels.com>
Reviewed-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Documentation/cgroups/resource_counter.txt
include/linux/res_counter.h
kernel/res_counter.c

index f3c4ec3626a280bfd1b7f8811c9ea2cc3502f213..0c4a344e78fa4c32693bf231240597bee5a8fe2b 100644 (file)
@@ -92,6 +92,14 @@ to work with it.
 
        The _locked routines imply that the res_counter->lock is taken.
 
+ f. void res_counter_uncharge_until
+               (struct res_counter *rc, struct res_counter *top,
+                unsinged long val)
+
+       Almost same as res_cunter_uncharge() but propagation of uncharge
+       stops when rc == top. This is useful when kill a res_coutner in
+       child cgroup.
+
  2.1 Other accounting routines
 
     There are more routines that may help you with common needs, like
index fb201896a8b07136db13bcef486fdb382b7ee3c2..5de7a146ead929e21ea3c988677b2eb2c0314efd 100644 (file)
@@ -135,6 +135,9 @@ int __must_check res_counter_charge_nofail(struct res_counter *counter,
 void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val);
 void res_counter_uncharge(struct res_counter *counter, unsigned long val);
 
+void res_counter_uncharge_until(struct res_counter *counter,
+                               struct res_counter *top,
+                               unsigned long val);
 /**
  * res_counter_margin - calculate chargeable space of a counter
  * @cnt: the counter
index bebe2b170d49ffd4c5b96590114f40d4d9fb69f9..ad581aa2369a2ed8f925c395b2b4eadd9d8640f2 100644 (file)
@@ -94,13 +94,15 @@ void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val)
        counter->usage -= val;
 }
 
-void res_counter_uncharge(struct res_counter *counter, unsigned long val)
+void res_counter_uncharge_until(struct res_counter *counter,
+                               struct res_counter *top,
+                               unsigned long val)
 {
        unsigned long flags;
        struct res_counter *c;
 
        local_irq_save(flags);
-       for (c = counter; c != NULL; c = c->parent) {
+       for (c = counter; c != top; c = c->parent) {
                spin_lock(&c->lock);
                res_counter_uncharge_locked(c, val);
                spin_unlock(&c->lock);
@@ -108,6 +110,10 @@ void res_counter_uncharge(struct res_counter *counter, unsigned long val)
        local_irq_restore(flags);
 }
 
+void res_counter_uncharge(struct res_counter *counter, unsigned long val)
+{
+       res_counter_uncharge_until(counter, NULL, val);
+}
 
 static inline unsigned long long *
 res_counter_member(struct res_counter *counter, int member)