ksm: don't fail stable tree lookups if walking over stale stable_nodes
authorAndrea Arcangeli <aarcange@redhat.com>
Fri, 6 Nov 2015 02:49:10 +0000 (18:49 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 6 Nov 2015 03:34:48 +0000 (19:34 -0800)
The stable_nodes can become stale at any time if the underlying pages gets
freed.  The stable_node gets collected and removed from the stable rbtree
if that is detected during the rbtree lookups.

Don't fail the lookup if running into stale stable_nodes, just restart the
lookup after collecting the stale stable_nodes.  Otherwise the CPU spent
in the preparation stage is wasted and the lookup must be repeated at the
next loop potentially failing a second time in a second stale stable_node.

If we don't prune aggressively we delay the merging of the unstable node
candidates and at the same time we delay the freeing of the stale
stable_nodes.  Keeping stale stable_nodes around wastes memory and it
can't provide any benefit.

Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Acked-by: Hugh Dickins <hughd@google.com>
Cc: Petr Holasek <pholasek@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/ksm.c

index e87dec7ba950231aa87ac3415e74adc575f228db..9f182f99ff5e5e89c87eba4bd4df4afa6a683026 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1177,8 +1177,18 @@ again:
                cond_resched();
                stable_node = rb_entry(*new, struct stable_node, node);
                tree_page = get_ksm_page(stable_node, false);
-               if (!tree_page)
-                       return NULL;
+               if (!tree_page) {
+                       /*
+                        * If we walked over a stale stable_node,
+                        * get_ksm_page() will call rb_erase() and it
+                        * may rebalance the tree from under us. So
+                        * restart the search from scratch. Returning
+                        * NULL would be safe too, but we'd generate
+                        * false negative insertions just because some
+                        * stable_node was stale.
+                        */
+                       goto again;
+               }
 
                ret = memcmp_pages(page, tree_page);
                put_page(tree_page);
@@ -1254,12 +1264,14 @@ static struct stable_node *stable_tree_insert(struct page *kpage)
        unsigned long kpfn;
        struct rb_root *root;
        struct rb_node **new;
-       struct rb_node *parent = NULL;
+       struct rb_node *parent;
        struct stable_node *stable_node;
 
        kpfn = page_to_pfn(kpage);
        nid = get_kpfn_nid(kpfn);
        root = root_stable_tree + nid;
+again:
+       parent = NULL;
        new = &root->rb_node;
 
        while (*new) {
@@ -1269,8 +1281,18 @@ static struct stable_node *stable_tree_insert(struct page *kpage)
                cond_resched();
                stable_node = rb_entry(*new, struct stable_node, node);
                tree_page = get_ksm_page(stable_node, false);
-               if (!tree_page)
-                       return NULL;
+               if (!tree_page) {
+                       /*
+                        * If we walked over a stale stable_node,
+                        * get_ksm_page() will call rb_erase() and it
+                        * may rebalance the tree from under us. So
+                        * restart the search from scratch. Returning
+                        * NULL would be safe too, but we'd generate
+                        * false negative insertions just because some
+                        * stable_node was stale.
+                        */
+                       goto again;
+               }
 
                ret = memcmp_pages(kpage, tree_page);
                put_page(tree_page);