#define pgtable_cache_init() do { } while (0)
struct vm_area_struct;
-extern void update_mmu_cache(struct vm_area_struct * vma,
- unsigned long address, pte_t pte);
+
+extern void __update_cache(struct vm_area_struct *vma,
+ unsigned long address, pte_t pte);
+extern void __update_tlb(struct vm_area_struct *vma,
+ unsigned long address, pte_t pte);
+
+static inline void
+update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
+{
+ __update_cache(vma, address, pte);
+ __update_tlb(vma, address, pte);
+}
+
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
extern void paging_init(void);
extern void page_table_range_init(unsigned long start, unsigned long end,
kunmap_atomic(kaddr, KM_USER0);
}
EXPORT_SYMBOL(clear_user_highpage);
+
+void __update_cache(struct vm_area_struct *vma,
+ unsigned long address, pte_t pte)
+{
+ struct page *page;
+ unsigned long pfn = pte_pfn(pte);
+
+ if (!boot_cpu_data.dcache.n_aliases)
+ return;
+
+ page = pfn_to_page(pfn);
+ if (pfn_valid(pfn) && page_mapping(page)) {
+ int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
+ if (dirty) {
+ unsigned long addr = (unsigned long)page_address(page);
+
+ if (pages_do_alias(addr, address & PAGE_MASK))
+ __flush_wback_region((void *)addr, PAGE_SIZE);
+ }
+ }
+}
BUG();
}
-void update_mmu_cache(struct vm_area_struct * vma,
- unsigned long address, pte_t pte)
+void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
+{
+}
+
+void __update_cache(struct vm_area_struct *vma,
+ unsigned long address, pte_t pte)
{
- BUG();
}
void __init page_table_range_init(unsigned long start, unsigned long end,
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
-void update_mmu_cache(struct vm_area_struct * vma,
- unsigned long address, pte_t pte)
+void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
{
- unsigned long flags;
- unsigned long pteval;
- unsigned long vpn;
+ unsigned long flags, pteval, vpn;
- /* Ptrace may call this routine. */
- if (vma && current->active_mm != vma->vm_mm)
+ /*
+ * Handle debugger faulting in for debugee.
+ */
+ if (current->active_mm != vma->vm_mm)
return;
local_irq_save(flags);
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
-void update_mmu_cache(struct vm_area_struct * vma,
- unsigned long address, pte_t pte)
+void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
{
- unsigned long flags;
- unsigned long pteval;
- unsigned long vpn;
- unsigned long pfn = pte_pfn(pte);
- struct page *page;
+ unsigned long flags, pteval, vpn;
- /* Ptrace may call this routine. */
- if (vma && current->active_mm != vma->vm_mm)
+ /*
+ * Handle debugger faulting in for debugee.
+ */
+ if (current->active_mm != vma->vm_mm)
return;
- page = pfn_to_page(pfn);
- if (pfn_valid(pfn) && page_mapping(page)) {
-#if defined(CONFIG_SH7705_CACHE_32KB)
- int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
- if (dirty) {
- unsigned long addr = (unsigned long)page_address(page);
-
- if (pages_do_alias(addr, address & PAGE_MASK))
- __flush_wback_region((void *)addr, PAGE_SIZE);
- }
-#endif
- }
-
local_irq_save(flags);
/* Set PTEH register */
for (i = 0; i < ways; i++)
ctrl_outl(data, addr + (i << 8));
}
-
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
-void update_mmu_cache(struct vm_area_struct * vma,
- unsigned long address, pte_t pte)
+void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
{
- unsigned long flags;
- unsigned long pteval;
- unsigned long vpn;
- unsigned long pfn = pte_pfn(pte);
- struct page *page;
+ unsigned long flags, pteval, vpn;
- /* Ptrace may call this routine. */
- if (vma && current->active_mm != vma->vm_mm)
+ /*
+ * Handle debugger faulting in for debugee.
+ */
+ if (current->active_mm != vma->vm_mm)
return;
- page = pfn_to_page(pfn);
- if (pfn_valid(pfn) && page_mapping(page)) {
-#ifndef CONFIG_SMP
- int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
- if (dirty) {
-
- unsigned long addr = (unsigned long)page_address(page);
-
- if (pages_do_alias(addr, address & PAGE_MASK))
- __flush_wback_region((void *)addr, PAGE_SIZE);
- }
-#endif
- }
-
local_irq_save(flags);
/* Set PTEH register */
goto no_context;
}
-void update_mmu_cache(struct vm_area_struct * vma,
- unsigned long address, pte_t pte)
-{
- /*
- * This appears to get called once for every pte entry that gets
- * established => I don't think it's efficient to try refilling the
- * TLBs with the pages - some may not get accessed even. Also, for
- * executable pages, it is impossible to determine reliably here which
- * TLB they should be mapped into (or both even).
- *
- * So, just do nothing here and handle faults on demand. In the
- * TLBMISS handling case, the refill is now done anyway after the pte
- * has been fixed up, so that deals with most useful cases.
- */
-}
-
void local_flush_tlb_one(unsigned long asid, unsigned long page)
{
unsigned long long match, pteh=0, lpage;
/* FIXME: Optimize this later.. */
flush_tlb_all();
}
+
+void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
+{
+}
+
+void __update_cache(struct vm_area_struct *vma,
+ unsigned long address, pte_t pte)
+{
+}