x86, pat: Fix cacheflush address in change_page_attr_set_clr()
authorJack Steiner <steiner@sgi.com>
Thu, 3 Sep 2009 17:56:02 +0000 (12:56 -0500)
committerH. Peter Anvin <hpa@zytor.com>
Wed, 9 Sep 2009 21:05:24 +0000 (14:05 -0700)
Fix address passed to cpa_flush_range() when changing page
attributes from WB to UC. The address (*addr) is
modified by __change_page_attr_set_clr(). The result is that
the pages being flushed start at the _end_ of the changed range
instead of the beginning.

This should be considered for 2.6.30-stable and 2.6.31-stable.

Signed-off-by: Jack Steiner <steiner@sgi.com>
Acked-by: Suresh Siddha <suresh.b.siddha@intel.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Cc: Stable team <stable@kernel.org>
arch/x86/mm/pageattr.c

index 7e600c1962db0c77d2dcb66559b75aff79672580..e245775ec8566bea3edc3cd72cbcde7ab5f3295e 100644 (file)
@@ -822,6 +822,7 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
 {
        struct cpa_data cpa;
        int ret, cache, checkalias;
+       unsigned long baddr = 0;
 
        /*
         * Check, if we are requested to change a not supported
@@ -853,6 +854,11 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
                         */
                        WARN_ON_ONCE(1);
                }
+               /*
+                * Save address for cache flush. *addr is modified in the call
+                * to __change_page_attr_set_clr() below.
+                */
+               baddr = *addr;
        }
 
        /* Must avoid aliasing mappings in the highmem code */
@@ -900,7 +906,7 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
                        cpa_flush_array(addr, numpages, cache,
                                        cpa.flags, pages);
                } else
-                       cpa_flush_range(*addr, numpages, cache);
+                       cpa_flush_range(baddr, numpages, cache);
        } else
                cpa_flush_all(cache);