x86: shrink pat_x_mtrr_type to its essentials
authorAndreas Herrmann <andreas.herrmann3@amd.com>
Wed, 18 Jun 2008 13:38:57 +0000 (15:38 +0200)
committerIngo Molnar <mingo@elte.hu>
Thu, 19 Jun 2008 10:57:40 +0000 (12:57 +0200)
Signed-off-by: Andreas Herrmann <andreas.herrmann3@amd.com>
Cc: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Cc: Suresh B Siddha <suresh.b.siddha@intel.com>
Cc: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/x86/mm/pat.c

index ac3a2b11eb38697a74608b08fbc29c97984d9d96..227df3ca9bfd7523a72a12d0f28b82f106e94d32 100644 (file)
@@ -161,29 +161,21 @@ static DEFINE_SPINLOCK(memtype_lock);     /* protects memtype list */
  */
 static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type)
 {
-       u8 mtrr_type;
-
-       /*
-        * We return the PAT request directly for types where PAT takes
-        * precedence with respect to MTRR and for UC_MINUS.
-        * Consistency checks with other PAT requests is done later
-        * while going through memtype list.
-        */
-       if (req_type == _PAGE_CACHE_WC ||
-           req_type == _PAGE_CACHE_UC_MINUS ||
-           req_type == _PAGE_CACHE_UC)
-               return req_type;
-
        /*
         * Look for MTRR hint to get the effective type in case where PAT
         * request is for WB.
         */
-       mtrr_type = mtrr_type_lookup(start, end);
-       if (mtrr_type == MTRR_TYPE_UNCACHABLE)
-               return _PAGE_CACHE_UC;
-       if (mtrr_type == MTRR_TYPE_WRCOMB)
-               return _PAGE_CACHE_WC;
-       return _PAGE_CACHE_WB;
+       if (req_type == _PAGE_CACHE_WB) {
+               u8 mtrr_type;
+
+               mtrr_type = mtrr_type_lookup(start, end);
+               if (mtrr_type == MTRR_TYPE_UNCACHABLE)
+                       return _PAGE_CACHE_UC;
+               if (mtrr_type == MTRR_TYPE_WRCOMB)
+                       return _PAGE_CACHE_WC;
+       }
+
+       return req_type;
 }
 
 /*