#define FAULT_USERSUPV (1 << 17)
#define FAULT_CPLBBITS 0x0000ffff
-#endif /* _CPLB_H */
+#ifndef __ASSEMBLY__
+
+static inline void _disable_cplb(u32 mmr, u32 mask)
+{
+ u32 ctrl = bfin_read32(mmr) & ~mask;
+ /* CSYNC to ensure load store ordering */
+ __builtin_bfin_csync();
+ bfin_write32(mmr, ctrl);
+ __builtin_bfin_ssync();
+}
+static inline void disable_cplb(u32 mmr, u32 mask)
+{
+ u32 ctrl = bfin_read32(mmr) & ~mask;
+ CSYNC();
+ bfin_write32(mmr, ctrl);
+ SSYNC();
+}
+#define _disable_dcplb() _disable_cplb(DMEM_CONTROL, ENDCPLB)
+#define disable_dcplb() disable_cplb(DMEM_CONTROL, ENDCPLB)
+#define _disable_icplb() _disable_cplb(IMEM_CONTROL, ENICPLB)
+#define disable_icplb() disable_cplb(IMEM_CONTROL, ENICPLB)
+
+static inline void _enable_cplb(u32 mmr, u32 mask)
+{
+ u32 ctrl = bfin_read32(mmr) | mask;
+ /* CSYNC to ensure load store ordering */
+ __builtin_bfin_csync();
+ bfin_write32(mmr, ctrl);
+ __builtin_bfin_ssync();
+}
+static inline void enable_cplb(u32 mmr, u32 mask)
+{
+ u32 ctrl = bfin_read32(mmr) | mask;
+ CSYNC();
+ bfin_write32(mmr, ctrl);
+ SSYNC();
+}
+#define _enable_dcplb() _enable_cplb(DMEM_CONTROL, ENDCPLB)
+#define enable_dcplb() enable_cplb(DMEM_CONTROL, ENDCPLB)
+#define _enable_icplb() _enable_cplb(IMEM_CONTROL, ENICPLB)
+#define enable_icplb() enable_cplb(IMEM_CONTROL, ENICPLB)
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _CPLB_H */
unsigned long ctrl;
int i;
- SSYNC();
for (i = 0; i < MAX_CPLBS; i++) {
bfin_write32(ICPLB_ADDR0 + i * 4, icplb_tbl[i].addr);
bfin_write32(ICPLB_DATA0 + i * 4, icplb_tbl[i].data);
}
ctrl = bfin_read_IMEM_CONTROL();
ctrl |= IMC | ENICPLB;
+ /* CSYNC to ensure load store ordering */
+ CSYNC();
bfin_write_IMEM_CONTROL(ctrl);
SSYNC();
}
unsigned long ctrl;
int i;
- SSYNC();
for (i = 0; i < MAX_CPLBS; i++) {
bfin_write32(DCPLB_ADDR0 + i * 4, dcplb_tbl[i].addr);
bfin_write32(DCPLB_DATA0 + i * 4, dcplb_tbl[i].data);
* to port B
*/
ctrl |= DMEM_CNTR | PORT_PREF0 | (ANOMALY_05000287 ? PORT_PREF1 : 0);
+ /* CSYNC to ensure load store ordering */
+ CSYNC();
bfin_write_DMEM_CONTROL(ctrl);
SSYNC();
}
#include <asm/blackfin.h>
#include <asm/cacheflush.h>
+#include <asm/cplb.h>
#include <asm/cplbinit.h>
#include <asm/mmu_context.h>
int nr_icplb_supv_miss[NR_CPUS], nr_dcplb_prot[NR_CPUS];
int nr_cplb_flush[NR_CPUS];
-static inline void disable_dcplb(void)
-{
- unsigned long ctrl;
- SSYNC();
- ctrl = bfin_read_DMEM_CONTROL();
- ctrl &= ~ENDCPLB;
- bfin_write_DMEM_CONTROL(ctrl);
- SSYNC();
-}
-
-static inline void enable_dcplb(void)
-{
- unsigned long ctrl;
- SSYNC();
- ctrl = bfin_read_DMEM_CONTROL();
- ctrl |= ENDCPLB;
- bfin_write_DMEM_CONTROL(ctrl);
- SSYNC();
-}
-
-static inline void disable_icplb(void)
-{
- unsigned long ctrl;
- SSYNC();
- ctrl = bfin_read_IMEM_CONTROL();
- ctrl &= ~ENICPLB;
- bfin_write_IMEM_CONTROL(ctrl);
- SSYNC();
-}
-
-static inline void enable_icplb(void)
-{
- unsigned long ctrl;
- SSYNC();
- ctrl = bfin_read_IMEM_CONTROL();
- ctrl |= ENICPLB;
- bfin_write_IMEM_CONTROL(ctrl);
- SSYNC();
-}
-
/*
* Given the contents of the status register, return the index of the
* CPLB that caused the fault.
dcplb_tbl[cpu][idx].addr = addr;
dcplb_tbl[cpu][idx].data = d_data;
- disable_dcplb();
+ _disable_dcplb();
bfin_write32(DCPLB_DATA0 + idx * 4, d_data);
bfin_write32(DCPLB_ADDR0 + idx * 4, addr);
- enable_dcplb();
+ _enable_dcplb();
return 0;
}
icplb_tbl[cpu][idx].addr = addr;
icplb_tbl[cpu][idx].data = i_data;
- disable_icplb();
+ _disable_icplb();
bfin_write32(ICPLB_DATA0 + idx * 4, i_data);
bfin_write32(ICPLB_ADDR0 + idx * 4, addr);
- enable_icplb();
+ _enable_icplb();
return 0;
}
nr_cplb_flush[cpu]++;
local_irq_save_hw(flags);
- disable_icplb();
+ _disable_icplb();
for (i = first_switched_icplb; i < MAX_CPLBS; i++) {
icplb_tbl[cpu][i].data = 0;
bfin_write32(ICPLB_DATA0 + i * 4, 0);
}
- enable_icplb();
+ _enable_icplb();
- disable_dcplb();
+ _disable_dcplb();
for (i = first_switched_dcplb; i < MAX_CPLBS; i++) {
dcplb_tbl[cpu][i].data = 0;
bfin_write32(DCPLB_DATA0 + i * 4, 0);
}
- enable_dcplb();
+ _enable_dcplb();
local_irq_restore_hw(flags);
}
#endif
}
- disable_dcplb();
+ _disable_dcplb();
for (i = first_mask_dcplb; i < first_switched_dcplb; i++) {
dcplb_tbl[cpu][i].addr = addr;
dcplb_tbl[cpu][i].data = d_data;
bfin_write32(DCPLB_ADDR0 + i * 4, addr);
addr += PAGE_SIZE;
}
- enable_dcplb();
+ _enable_dcplb();
local_irq_restore_hw(flags);
}
unsigned long ctrl;
int i;
- SSYNC();
for (i = 0; i < MAX_CPLBS; i++) {
bfin_write32(ICPLB_ADDR0 + i * 4, icplb_tbl[i].addr);
bfin_write32(ICPLB_DATA0 + i * 4, icplb_tbl[i].data);
}
ctrl = bfin_read_IMEM_CONTROL();
ctrl |= IMC | ENICPLB;
+ /* CSYNC to ensure load store ordering */
+ CSYNC();
bfin_write_IMEM_CONTROL(ctrl);
SSYNC();
}
unsigned long ctrl;
int i;
- SSYNC();
for (i = 0; i < MAX_CPLBS; i++) {
bfin_write32(DCPLB_ADDR0 + i * 4, dcplb_tbl[i].addr);
bfin_write32(DCPLB_DATA0 + i * 4, dcplb_tbl[i].data);
* to port B
*/
ctrl |= DMEM_CNTR | PORT_PREF0 | (ANOMALY_05000287 ? PORT_PREF1 : 0);
+ /* CSYNC to ensure load store ordering */
+ CSYNC();
bfin_write_DMEM_CONTROL(ctrl);
SSYNC();
}
#define MGR_ATTR
#endif
-/*
- * We're in an exception handler. The normal cli nop nop workaround
- * isn't going to do very much, as the only thing that can interrupt
- * us is an NMI, and the cli isn't going to stop that.
- */
-#define NOWA_SSYNC __asm__ __volatile__ ("ssync;")
-
-/* Anomaly handlers provide SSYNCs, so avoid extra if anomaly is present */
-#if ANOMALY_05000125
-
-#define bfin_write_DMEM_CONTROL_SSYNC(v) bfin_write_DMEM_CONTROL(v)
-#define bfin_write_IMEM_CONTROL_SSYNC(v) bfin_write_IMEM_CONTROL(v)
-
-#else
-
-#define bfin_write_DMEM_CONTROL_SSYNC(v) \
- do { NOWA_SSYNC; bfin_write_DMEM_CONTROL(v); NOWA_SSYNC; } while (0)
-#define bfin_write_IMEM_CONTROL_SSYNC(v) \
- do { NOWA_SSYNC; bfin_write_IMEM_CONTROL(v); NOWA_SSYNC; } while (0)
-
-#endif
-
static inline void write_dcplb_data(int cpu, int idx, unsigned long data,
unsigned long addr)
{
- unsigned long ctrl = bfin_read_DMEM_CONTROL();
- bfin_write_DMEM_CONTROL_SSYNC(ctrl & ~ENDCPLB);
+ _disable_dcplb();
bfin_write32(DCPLB_DATA0 + idx * 4, data);
bfin_write32(DCPLB_ADDR0 + idx * 4, addr);
- bfin_write_DMEM_CONTROL_SSYNC(ctrl);
+ _enable_dcplb();
#ifdef CONFIG_CPLB_INFO
dcplb_tbl[cpu][idx].addr = addr;
static inline void write_icplb_data(int cpu, int idx, unsigned long data,
unsigned long addr)
{
- unsigned long ctrl = bfin_read_IMEM_CONTROL();
-
- bfin_write_IMEM_CONTROL_SSYNC(ctrl & ~ENICPLB);
+ _disable_icplb();
bfin_write32(ICPLB_DATA0 + idx * 4, data);
bfin_write32(ICPLB_ADDR0 + idx * 4, addr);
- bfin_write_IMEM_CONTROL_SSYNC(ctrl);
+ _enable_icplb();
#ifdef CONFIG_CPLB_INFO
icplb_tbl[cpu][idx].addr = addr;
R0 = ~ENICPLB;
R0 = R0 & R1;
- /* Anomaly 05000125 */
-#ifdef ANOMALY_05000125
- CLI R2;
- SSYNC;
-#endif
+ /* Disabling of CPLBs should be proceeded by a CSYNC */
+ CSYNC;
[p0] = R0;
SSYNC;
-#ifdef ANOMALY_05000125
- STI R2;
-#endif
/* Turn off the dcache */
p0.l = LO(DMEM_CONTROL);
R0 = ~ENDCPLB;
R0 = R0 & R1;
- /* Anomaly 05000125 */
-#ifdef ANOMALY_05000125
- CLI R2;
- SSYNC;
-#endif
+ /* Disabling of CPLBs should be proceeded by a CSYNC */
+ CSYNC;
[p0] = R0;
SSYNC;
-#ifdef ANOMALY_05000125
- STI R2;
-#endif
/* in case of double faults, save a few things */
p0.l = _init_retx_coreb;
R5 = [P4]; /* Control Register*/
BITCLR(R5,ENICPLB_P);
- SSYNC; /* SSYNC required before writing to IMEM_CONTROL. */
- .align 8;
+ CSYNC; /* Disabling of CPLBs should be proceeded by a CSYNC */
[P4] = R5;
SSYNC;
P4.H = HI(DMEM_CONTROL);
R5 = [P4];
BITCLR(R5,ENDCPLB_P);
- SSYNC; /* SSYNC required before writing to DMEM_CONTROL. */
- .align 8;
+ CSYNC; /* Disabling of CPLBs should be proceeded by a CSYNC */
[P4] = R5;
SSYNC;
R5 = [P4]; /* Control Register*/
BITCLR(R5,ENICPLB_P);
- CLI R1;
- SSYNC; /* SSYNC required before writing to IMEM_CONTROL. */
- .align 8;
+ CSYNC; /* Disabling of CPLBs should be proceeded by a CSYNC */
[P4] = R5;
SSYNC;
P4.H = HI(DMEM_CONTROL);
R5 = [P4];
BITCLR(R5,ENDCPLB_P);
- SSYNC; /* SSYNC required before writing to DMEM_CONTROL. */
- .align 8;
+ CSYNC; /* Disabling of CPLBs should be proceeded by a CSYNC */
[P4] = R5;
SSYNC;
- STI R1;
r0 = sp; /* stack frame pt_regs pointer argument ==> r0 */
r1 = RETX;
#include <linux/io.h>
#include <linux/irq.h>
+#include <asm/cplb.h>
#include <asm/gpio.h>
#include <asm/dma.h>
#include <asm/dpmc.h>
}
#endif
-static inline void dcache_disable(void)
-{
-#ifdef CONFIG_BFIN_DCACHE
- unsigned long ctrl;
-
-#if defined(CONFIG_BFIN_EXTMEM_WRITEBACK) || defined(CONFIG_BFIN_L2_WRITEBACK)
- flushinv_all_dcache();
-#endif
- SSYNC();
- ctrl = bfin_read_DMEM_CONTROL();
- ctrl &= ~ENDCPLB;
- bfin_write_DMEM_CONTROL(ctrl);
- SSYNC();
-#endif
-}
-
-static inline void dcache_enable(void)
-{
-#ifdef CONFIG_BFIN_DCACHE
- unsigned long ctrl;
- SSYNC();
- ctrl = bfin_read_DMEM_CONTROL();
- ctrl |= ENDCPLB;
- bfin_write_DMEM_CONTROL(ctrl);
- SSYNC();
-#endif
-}
-
-static inline void icache_disable(void)
-{
-#ifdef CONFIG_BFIN_ICACHE
- unsigned long ctrl;
- SSYNC();
- ctrl = bfin_read_IMEM_CONTROL();
- ctrl &= ~ENICPLB;
- bfin_write_IMEM_CONTROL(ctrl);
- SSYNC();
-#endif
-}
-
-static inline void icache_enable(void)
-{
-#ifdef CONFIG_BFIN_ICACHE
- unsigned long ctrl;
- SSYNC();
- ctrl = bfin_read_IMEM_CONTROL();
- ctrl |= ENICPLB;
- bfin_write_IMEM_CONTROL(ctrl);
- SSYNC();
-#endif
-}
-
int bfin_pm_suspend_mem_enter(void)
{
unsigned long flags;
bfin_gpio_pm_hibernate_suspend();
- dcache_disable();
- icache_disable();
+#if defined(CONFIG_BFIN_EXTMEM_WRITEBACK) || defined(CONFIG_BFIN_L2_WRITEBACK)
+ flushinv_all_dcache();
+#endif
+ _disable_dcplb();
+ _disable_icplb();
bf53x_suspend_l1_mem(memptr);
do_hibernate(wakeup | vr_wakeup); /* Goodbye */
bf53x_resume_l1_mem(memptr);
- icache_enable();
- dcache_enable();
+ _enable_icplb();
+ _enable_dcplb();
bfin_gpio_pm_hibernate_restore();
blackfin_dma_resume();