From: Ingo Molnar <mingo@elte.hu>
Date: Wed, 30 Jan 2008 12:34:09 +0000 (+0100)
Subject: x86: fix clflush_page_range logic
X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=4c61afcdb2cd4be299c1442b33adf312b695e2d7;p=GitHub%2FLineageOS%2FG12%2Fandroid_kernel_amlogic_linux-4.9.git

x86: fix clflush_page_range logic

only present ptes must be flushed.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
---

diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index bbfc8e2466ab..97ec9e7d29d9 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -26,7 +26,6 @@ within(unsigned long addr, unsigned long start, unsigned long end)
  * Flushing functions
  */
 
-
 /**
  * clflush_cache_range - flush a cache range with clflush
  * @addr:	virtual start address
@@ -35,13 +34,19 @@ within(unsigned long addr, unsigned long start, unsigned long end)
  * clflush is an unordered instruction which needs fencing with mfence
  * to avoid ordering issues.
  */
-void clflush_cache_range(void *addr, int size)
+void clflush_cache_range(void *vaddr, unsigned int size)
 {
-	int i;
+	void *vend = vaddr + size - 1;
 
 	mb();
-	for (i = 0; i < size; i += boot_cpu_data.x86_clflush_size)
-		clflush(addr+i);
+
+	for (; vaddr < vend; vaddr += boot_cpu_data.x86_clflush_size)
+		clflush(vaddr);
+	/*
+	 * Flush any possible final partial cacheline:
+	 */
+	clflush(vend);
+
 	mb();
 }
 
@@ -74,9 +79,13 @@ static void __cpa_flush_range(void *arg)
 	__flush_tlb_all();
 }
 
-static void cpa_flush_range(unsigned long addr, int numpages)
+static void cpa_flush_range(unsigned long start, int numpages)
 {
+	unsigned int i, level;
+	unsigned long addr;
+
 	BUG_ON(irqs_disabled());
+	WARN_ON(PAGE_ALIGN(start) != start);
 
 	on_each_cpu(__cpa_flush_range, NULL, 1, 1);
 
@@ -86,7 +95,15 @@ static void cpa_flush_range(unsigned long addr, int numpages)
 	 * will cause all other CPUs to flush the same
 	 * cachelines:
 	 */
-	clflush_cache_range((void *) addr, numpages * PAGE_SIZE);
+	for (i = 0, addr = start; i < numpages; i++, addr += PAGE_SIZE) {
+		pte_t *pte = lookup_address(addr, &level);
+
+		/*
+		 * Only flush present addresses:
+		 */
+		if (pte && pte_present(*pte))
+			clflush_cache_range((void *) addr, PAGE_SIZE);
+	}
 }
 
 /*
diff --git a/include/asm-x86/cacheflush.h b/include/asm-x86/cacheflush.h
index 3e74aff90809..8dd8c5e3cc7f 100644
--- a/include/asm-x86/cacheflush.h
+++ b/include/asm-x86/cacheflush.h
@@ -42,7 +42,7 @@ int set_memory_ro(unsigned long addr, int numpages);
 int set_memory_rw(unsigned long addr, int numpages);
 int set_memory_np(unsigned long addr, int numpages);
 
-void clflush_cache_range(void *addr, int size);
+void clflush_cache_range(void *addr, unsigned int size);
 
 #ifdef CONFIG_DEBUG_RODATA
 void mark_rodata_ro(void);