#define dmb(opt) asm volatile("dmb sy" : : : "memory")
#define dsb(opt) asm volatile("dsb sy" : : : "memory")
-#define mb() dsb()
+#define mb() dsb(sy)
#define rmb() asm volatile("dsb ld" : : : "memory")
#define wmb() asm volatile("dsb st" : : : "memory")
static inline void __flush_icache_all(void)
{
asm("ic ialluis");
- dsb();
+ dsb(ish);
}
#define flush_dcache_mmap_lock(mapping) \
* set_pte_at() called from vmap_pte_range() does not
* have a DSB after cleaning the cache line.
*/
- dsb();
+ dsb(ish);
}
static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
{
*pmdp = pmd;
- dsb();
+ dsb(ishst);
}
static inline void pmd_clear(pmd_t *pmdp)
static inline void set_pud(pud_t *pudp, pud_t pud)
{
*pudp = pud;
- dsb();
+ dsb(ishst);
}
static inline void pud_clear(pud_t *pudp)
*/
static inline void flush_tlb_all(void)
{
- dsb();
+ dsb(ishst);
asm("tlbi vmalle1is");
- dsb();
+ dsb(ish);
isb();
}
{
unsigned long asid = (unsigned long)ASID(mm) << 48;
- dsb();
+ dsb(ishst);
asm("tlbi aside1is, %0" : : "r" (asid));
- dsb();
+ dsb(ish);
}
static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long addr = uaddr >> 12 |
((unsigned long)ASID(vma->vm_mm) << 48);
- dsb();
+ dsb(ishst);
asm("tlbi vae1is, %0" : : "r" (addr));
- dsb();
+ dsb(ish);
}
static inline void flush_tlb_range(struct vm_area_struct *vma,
* set_pte() does not have a DSB, so make sure that the page table
* write is visible.
*/
- dsb();
+ dsb(ishst);
}
#define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
* Complete any pending TLB or cache maintenance on this CPU in case
* the thread migrates to a different CPU.
*/
- dsb();
+ dsb(ish);
/* the actual thread switch */
last = cpu_switch_to(prev, next);
static void do_dc_cisw(u32 val)
{
asm volatile("dc cisw, %x0" : : "r" (val));
- dsb();
+ dsb(ish);
}
static void do_dc_csw(u32 val)
{
asm volatile("dc csw, %x0" : : "r" (val));
- dsb();
+ dsb(ish);
}
/* See note at ARM ARM B1.14.4 */