From: Arjan van de Ven Date: Wed, 30 Jan 2008 12:34:07 +0000 (+0100) Subject: x86: fix pageattr-selftest X-Git-Url: https://git.stricted.de/?a=commitdiff_plain;h=488fd99588bf23da951b524a806e44feaa1aa366;p=GitHub%2FLineageOS%2FG12%2Fandroid_kernel_amlogic_linux-4.9.git x86: fix pageattr-selftest In Ingo's testing, he found a bug in the CPA selftest code. What would happen is that the test would call change_page_attr_addr on a range of memory, part of which was read only, part of which was writable. The only thing the test wanted to change was the global bit... What actually happened was that the selftest would take the permissions of the first page, and then the change_page_attr_addr call would then set the permissions of the entire range to this first page. In the rodata section case, this resulted in pages after the .rodata becoming read only... which made the kernel rather unhappy in many interesting ways. This is just another example of how dangerous the cpa API is (was); this patch changes the test to use the incremental clear/set APIs instead, and it changes the clear/set implementation to work on a 1 page at a time basis. Signed-off-by: Arjan van de Ven Signed-off-by: Ingo Molnar Signed-off-by: Thomas Gleixner --- diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c index 6a41a0f0c149..fe73905d075e 100644 --- a/arch/x86/mm/pageattr-test.c +++ b/arch/x86/mm/pageattr-test.c @@ -162,8 +162,8 @@ static __init int exercise_pageattr(void) continue; } - err = change_page_attr_addr(addr[i], len[i], - pte_pgprot(pte_clrhuge(pte_clrglobal(pte0)))); + err = change_page_attr_clear(addr[i], len[i], + __pgprot(_PAGE_GLOBAL)); if (err < 0) { printk(KERN_ERR "CPA %d failed %d\n", i, err); failed++; @@ -197,8 +197,8 @@ static __init int exercise_pageattr(void) failed++; continue; } - err = change_page_attr_addr(addr[i], len[i], - pte_pgprot(pte_mkglobal(*pte))); + err = change_page_attr_set(addr[i], len[i], + __pgprot(_PAGE_GLOBAL)); if (err < 0) { printk(KERN_ERR "CPA reverting failed: %d\n", err); failed++; diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index a2d747c06147..23f0aa3d01c1 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -228,7 +228,6 @@ repeat: /** * change_page_attr_addr - Change page table attributes in linear mapping * @address: Virtual address in linear mapping. - * @numpages: Number of pages to change * @prot: New page table attribute (PAGE_*) * * Change page attributes of a page in the direct mapping. This is a variant @@ -240,10 +239,10 @@ repeat: * Modules and drivers should use the set_memory_* APIs instead. */ -static int change_page_attr_addr(unsigned long address, int numpages, - pgprot_t prot) +static int change_page_attr_addr(unsigned long address, pgprot_t prot) { - int err = 0, kernel_map = 0, i; + int err = 0, kernel_map = 0; + unsigned long pfn = __pa(address) >> PAGE_SHIFT; #ifdef CONFIG_X86_64 if (address >= __START_KERNEL_map && @@ -254,30 +253,27 @@ static int change_page_attr_addr(unsigned long address, int numpages, } #endif - for (i = 0; i < numpages; i++, address += PAGE_SIZE) { - unsigned long pfn = __pa(address) >> PAGE_SHIFT; + if (!kernel_map || pte_present(pfn_pte(0, prot))) { + err = __change_page_attr(address, pfn, prot); + if (err) + return err; + } - if (!kernel_map || pte_present(pfn_pte(0, prot))) { - err = __change_page_attr(address, pfn, prot); - if (err) - break; - } #ifdef CONFIG_X86_64 - /* - * Handle kernel mapping too which aliases part of - * lowmem: - */ - if (__pa(address) < KERNEL_TEXT_SIZE) { - unsigned long addr2; - pgprot_t prot2; - - addr2 = __START_KERNEL_map + __pa(address); - /* Make sure the kernel mappings stay executable */ - prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot))); - err = __change_page_attr(addr2, pfn, prot2); - } -#endif + /* + * Handle kernel mapping too which aliases part of + * lowmem: + */ + if (__pa(address) < KERNEL_TEXT_SIZE) { + unsigned long addr2; + pgprot_t prot2; + + addr2 = __START_KERNEL_map + __pa(address); + /* Make sure the kernel mappings stay executable */ + prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot))); + err = __change_page_attr(addr2, pfn, prot2); } +#endif return err; } @@ -307,16 +303,24 @@ static int change_page_attr_set(unsigned long addr, int numpages, pgprot_t current_prot; int level; pte_t *pte; + int i, ret; - pte = lookup_address(addr, &level); - if (pte) - current_prot = pte_pgprot(*pte); - else - pgprot_val(current_prot) = 0; + for (i = 0; i < numpages ; i++) { - pgprot_val(prot) = pgprot_val(current_prot) | pgprot_val(prot); + pte = lookup_address(addr, &level); + if (pte) + current_prot = pte_pgprot(*pte); + else + pgprot_val(current_prot) = 0; - return change_page_attr_addr(addr, numpages, prot); + pgprot_val(prot) = pgprot_val(current_prot) | pgprot_val(prot); + + ret = change_page_attr_addr(addr, prot); + if (ret) + return ret; + addr += PAGE_SIZE; + } + return 0; } /** @@ -344,16 +348,24 @@ static int change_page_attr_clear(unsigned long addr, int numpages, pgprot_t current_prot; int level; pte_t *pte; - - pte = lookup_address(addr, &level); - if (pte) - current_prot = pte_pgprot(*pte); - else - pgprot_val(current_prot) = 0; - - pgprot_val(prot) = pgprot_val(current_prot) & ~pgprot_val(prot); - - return change_page_attr_addr(addr, numpages, prot); + int i, ret; + + for (i = 0; i < numpages; i++) { + pte = lookup_address(addr, &level); + if (pte) + current_prot = pte_pgprot(*pte); + else + pgprot_val(current_prot) = 0; + + pgprot_val(prot) = + pgprot_val(current_prot) & ~pgprot_val(prot); + + ret = change_page_attr_addr(addr, prot); + if (ret) + return ret; + addr += PAGE_SIZE; + } + return 0; } int set_memory_uc(unsigned long addr, int numpages)