#define PAGES_II_LVL_TABLE 512
#define PHYS_TO_PAGE(phys) pfn_to_page((phys) >> PAGE_SHIFT)
-#define MMU_GFLUSH 0x60
-
/* Forward Declarations: */
static int bridge_brd_monitor(struct bridge_dev_context *dev_context);
static int bridge_brd_read(struct bridge_dev_context *dev_context,
bridge_msg_set_queue_id,
};
-static inline void tlb_flush_all(const void __iomem *base)
-{
- __raw_writeb(__raw_readb(base + MMU_GFLUSH) | 1, base + MMU_GFLUSH);
-}
-
static inline void flush_all(struct bridge_dev_context *dev_context)
{
if (dev_context->dw_brd_state == BRD_DSP_HIBERNATION ||
dev_context->dw_brd_state == BRD_HIBERNATION)
wake_dsp(dev_context, NULL);
- tlb_flush_all(dev_context->dw_dsp_mmu_base);
+ hw_mmu_tlb_flush_all(dev_context->dw_dsp_mmu_base);
}
static void bad_page_dump(u32 pa, struct page *pg)
#define MMU_SMALL_PAGE_MASK 0xFFFFF000
#define MMU_LOAD_TLB 0x00000001
+#define MMU_GFLUSH 0x60
/*
* hw_mmu_page_size_t: Enumerated Type used to specify the MMU Page Size(SLSS)
return status;
}
+
+void hw_mmu_tlb_flush_all(const void __iomem *base)
+{
+ __raw_writeb(1, base + MMU_GFLUSH);
+}
extern hw_status hw_mmu_pte_clear(const u32 pg_tbl_va,
u32 page_size, u32 virtualAddr);
+void hw_mmu_tlb_flush_all(const void __iomem *base);
+
static inline u32 hw_mmu_pte_addr_l1(u32 L1_base, u32 va)
{
u32 pte_addr;