MIPS: CPC: provide locking functions
authorPaul Burton <paul.burton@imgtec.com>
Fri, 14 Feb 2014 09:28:06 +0000 (09:28 +0000)
committerPaul Burton <paul.burton@imgtec.com>
Fri, 2 May 2014 15:39:14 +0000 (16:39 +0100)
This patch provides functions to lock & unlock access to the
"core-other" register region of the CPC. Without performing appropriate
locking it is possible for code using this region to be preempted or to
race with code on another VPE within the same core, with one changing
the core which the "core-other" region is acting upon at an inopportune
time for the other.

Signed-off-by: Paul Burton <paul.burton@imgtec.com>
arch/mips/include/asm/mips-cpc.h
arch/mips/kernel/mips-cpc.c

index c5bb609c4b97c77ea1e534f255d1eb9dbb5edb2c..e139a534e0fd1e9b23157bbd28300c1b3ff0fc9d 100644 (file)
@@ -152,4 +152,31 @@ BUILD_CPC_Cx_RW(other,             0x10)
 #define CPC_Cx_OTHER_CORENUM_SHF               16
 #define CPC_Cx_OTHER_CORENUM_MSK               (_ULCAST_(0xff) << 16)
 
+#ifdef CONFIG_MIPS_CPC
+
+/**
+ * mips_cpc_lock_other - lock access to another core
+ * core: the other core to be accessed
+ *
+ * Call before operating upon a core via the 'other' register region in
+ * order to prevent the region being moved during access. Must be followed
+ * by a call to mips_cpc_unlock_other.
+ */
+extern void mips_cpc_lock_other(unsigned int core);
+
+/**
+ * mips_cpc_unlock_other - unlock access to another core
+ *
+ * Call after operating upon another core via the 'other' register region.
+ * Must be called after mips_cpc_lock_other.
+ */
+extern void mips_cpc_unlock_other(void);
+
+#else /* !CONFIG_MIPS_CPC */
+
+static inline void mips_cpc_lock_other(unsigned int core) { }
+static inline void mips_cpc_unlock_other(void) { }
+
+#endif /* !CONFIG_MIPS_CPC */
+
 #endif /* __MIPS_ASM_MIPS_CPC_H__ */
index c9dc674029694c59e4f5aae965ec750740f767e6..2368fc5ccf1e89f22586e5a183eda86b564803c0 100644 (file)
 
 void __iomem *mips_cpc_base;
 
+static DEFINE_PER_CPU_ALIGNED(spinlock_t, cpc_core_lock);
+
+static DEFINE_PER_CPU_ALIGNED(unsigned long, cpc_core_lock_flags);
+
 phys_t __weak mips_cpc_phys_base(void)
 {
        u32 cpc_base;
@@ -39,6 +43,10 @@ phys_t __weak mips_cpc_phys_base(void)
 int mips_cpc_probe(void)
 {
        phys_t addr;
+       unsigned cpu;
+
+       for_each_possible_cpu(cpu)
+               spin_lock_init(&per_cpu(cpc_core_lock, cpu));
 
        addr = mips_cpc_phys_base();
        if (!addr)
@@ -50,3 +58,21 @@ int mips_cpc_probe(void)
 
        return 0;
 }
+
+void mips_cpc_lock_other(unsigned int core)
+{
+       unsigned curr_core;
+       preempt_disable();
+       curr_core = current_cpu_data.core;
+       spin_lock_irqsave(&per_cpu(cpc_core_lock, curr_core),
+                         per_cpu(cpc_core_lock_flags, curr_core));
+       write_cpc_cl_other(core << CPC_Cx_OTHER_CORENUM_SHF);
+}
+
+void mips_cpc_unlock_other(void)
+{
+       unsigned curr_core = current_cpu_data.core;
+       spin_unlock_irqrestore(&per_cpu(cpc_core_lock, curr_core),
+                              per_cpu(cpc_core_lock_flags, curr_core));
+       preempt_enable();
+}