mm: memcontrol: remove obsolete kmemcg pinning tricks
authorJohannes Weiner <hannes@cmpxchg.org>
Wed, 10 Dec 2014 23:42:45 +0000 (15:42 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 11 Dec 2014 01:41:05 +0000 (17:41 -0800)
As charges now pin the css explicitely, there is no more need for kmemcg
to acquire a proxy reference for outstanding pages during offlining, or
maintain state to identify such "dead" groups.

This was the last user of the uncharge functions' return values, so remove
them as well.

Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Vladimir Davydov <vdavydov@parallels.com>
Acked-by: Michal Hocko <mhocko@suse.cz>
Cc: David Rientjes <rientjes@google.com>
Cc: Tejun Heo <tj@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/page_counter.h
mm/memcontrol.c
mm/page_counter.c

index 7cce3be99ff36f30661a966f9f6a65c17012d4bc..955421575d16aa60ec425bfd28d458804c3d4113 100644 (file)
@@ -34,12 +34,12 @@ static inline unsigned long page_counter_read(struct page_counter *counter)
        return atomic_long_read(&counter->count);
 }
 
-int page_counter_cancel(struct page_counter *counter, unsigned long nr_pages);
+void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages);
 void page_counter_charge(struct page_counter *counter, unsigned long nr_pages);
 int page_counter_try_charge(struct page_counter *counter,
                            unsigned long nr_pages,
                            struct page_counter **fail);
-int page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages);
+void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages);
 int page_counter_limit(struct page_counter *counter, unsigned long limit);
 int page_counter_memparse(const char *buf, unsigned long *nr_pages);
 
index f69da2ac632357bd17dab88fae1f2bafc88cab3d..0e6484ea268d6be661f19db8e1cac31657f69d6c 100644 (file)
@@ -369,7 +369,6 @@ struct mem_cgroup {
 /* internal only representation about the status of kmem accounting. */
 enum {
        KMEM_ACCOUNTED_ACTIVE, /* accounted by this cgroup itself */
-       KMEM_ACCOUNTED_DEAD, /* dead memcg with pending kmem charges */
 };
 
 #ifdef CONFIG_MEMCG_KMEM
@@ -383,22 +382,6 @@ static bool memcg_kmem_is_active(struct mem_cgroup *memcg)
        return test_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags);
 }
 
-static void memcg_kmem_mark_dead(struct mem_cgroup *memcg)
-{
-       /*
-        * Our caller must use css_get() first, because memcg_uncharge_kmem()
-        * will call css_put() if it sees the memcg is dead.
-        */
-       smp_wmb();
-       if (test_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags))
-               set_bit(KMEM_ACCOUNTED_DEAD, &memcg->kmem_account_flags);
-}
-
-static bool memcg_kmem_test_and_clear_dead(struct mem_cgroup *memcg)
-{
-       return test_and_clear_bit(KMEM_ACCOUNTED_DEAD,
-                                 &memcg->kmem_account_flags);
-}
 #endif
 
 /* Stuffs for move charges at task migration. */
@@ -2758,22 +2741,7 @@ static void memcg_uncharge_kmem(struct mem_cgroup *memcg,
        if (do_swap_account)
                page_counter_uncharge(&memcg->memsw, nr_pages);
 
-       /* Not down to 0 */
-       if (page_counter_uncharge(&memcg->kmem, nr_pages)) {
-               css_put_many(&memcg->css, nr_pages);
-               return;
-       }
-
-       /*
-        * Releases a reference taken in kmem_cgroup_css_offline in case
-        * this last uncharge is racing with the offlining code or it is
-        * outliving the memcg existence.
-        *
-        * The memory barrier imposed by test&clear is paired with the
-        * explicit one in memcg_kmem_mark_dead().
-        */
-       if (memcg_kmem_test_and_clear_dead(memcg))
-               css_put(&memcg->css);
+       page_counter_uncharge(&memcg->kmem, nr_pages);
 
        css_put_many(&memcg->css, nr_pages);
 }
@@ -4757,40 +4725,6 @@ static void memcg_destroy_kmem(struct mem_cgroup *memcg)
 {
        mem_cgroup_sockets_destroy(memcg);
 }
-
-static void kmem_cgroup_css_offline(struct mem_cgroup *memcg)
-{
-       if (!memcg_kmem_is_active(memcg))
-               return;
-
-       /*
-        * kmem charges can outlive the cgroup. In the case of slab
-        * pages, for instance, a page contain objects from various
-        * processes. As we prevent from taking a reference for every
-        * such allocation we have to be careful when doing uncharge
-        * (see memcg_uncharge_kmem) and here during offlining.
-        *
-        * The idea is that that only the _last_ uncharge which sees
-        * the dead memcg will drop the last reference. An additional
-        * reference is taken here before the group is marked dead
-        * which is then paired with css_put during uncharge resp. here.
-        *
-        * Although this might sound strange as this path is called from
-        * css_offline() when the referencemight have dropped down to 0 and
-        * shouldn't be incremented anymore (css_tryget_online() would
-        * fail) we do not have other options because of the kmem
-        * allocations lifetime.
-        */
-       css_get(&memcg->css);
-
-       memcg_kmem_mark_dead(memcg);
-
-       if (page_counter_read(&memcg->kmem))
-               return;
-
-       if (memcg_kmem_test_and_clear_dead(memcg))
-               css_put(&memcg->css);
-}
 #else
 static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
 {
@@ -4800,10 +4734,6 @@ static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
 static void memcg_destroy_kmem(struct mem_cgroup *memcg)
 {
 }
-
-static void kmem_cgroup_css_offline(struct mem_cgroup *memcg)
-{
-}
 #endif
 
 /*
@@ -5407,8 +5337,6 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
        }
        spin_unlock(&memcg->event_list_lock);
 
-       kmem_cgroup_css_offline(memcg);
-
        /*
         * This requires that offlining is serialized.  Right now that is
         * guaranteed because css_killed_work_fn() holds the cgroup_mutex.
index f0cbc08254266d2073f88e2b9f40a8becd66a361..a009574fbba9976bce5cdb05c096a008055388a4 100644 (file)
  * page_counter_cancel - take pages out of the local counter
  * @counter: counter
  * @nr_pages: number of pages to cancel
- *
- * Returns whether there are remaining pages in the counter.
  */
-int page_counter_cancel(struct page_counter *counter, unsigned long nr_pages)
+void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages)
 {
        long new;
 
        new = atomic_long_sub_return(nr_pages, &counter->count);
-
        /* More uncharges than charges? */
        WARN_ON_ONCE(new < 0);
-
-       return new > 0;
 }
 
 /**
@@ -117,23 +112,13 @@ failed:
  * page_counter_uncharge - hierarchically uncharge pages
  * @counter: counter
  * @nr_pages: number of pages to uncharge
- *
- * Returns whether there are remaining charges in @counter.
  */
-int page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages)
+void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages)
 {
        struct page_counter *c;
-       int ret = 1;
 
-       for (c = counter; c; c = c->parent) {
-               int remainder;
-
-               remainder = page_counter_cancel(c, nr_pages);
-               if (c == counter && !remainder)
-                       ret = 0;
-       }
-
-       return ret;
+       for (c = counter; c; c = c->parent)
+               page_counter_cancel(c, nr_pages);
 }
 
 /**