ARC: [SMP] Enable icache coherency
authorVineet Gupta <vgupta@synopsys.com>
Sun, 17 Feb 2013 10:51:42 +0000 (12:51 +0200)
committerVineet Gupta <vgupta@synopsys.com>
Thu, 26 Jun 2014 06:29:01 +0000 (11:59 +0530)
icaches are not snooped hence not cohrent in SMP setups which means
kernel has to do cross core calls to ensure the same.

The leaf routine __ic_line_inv_vaddr() now does cross core calls.

__sync_icache_dcache() is affected due to this:

* local dcache line flushed ahead of remote icache inv requests
* can't disable interrupts anymore, since
      __ic_line_inv_vaddr()->on_each_cpu() can deadlock.

| WARNING: CPU: 0 PID: 1 at kernel/smp.c:374
| smp_call_function_many+0x25a/0x2c4()
|
|  init_kprobes+0x90/0xc8
|     register_kprobe+0x1d6/0x510
| __sync_icache_dcache+0x28/0x80
|
|     DISABLE IRQ
|
|     __ic_line_inv_vaddr
| on_each_cpu
|      smp_call_function_many+0x25a/0x2c4   --> WARN
| __ic_line_inv_vaddr_local
|     __dc_line_op

* TODO: Needs to use mask of relevant CPUs to avoid broadcasting

Signed-off-by: Noam Camus <noamc@ezchip.com>
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
arch/arc/mm/cache_arc700.c

index 1f676c4794e01c90573937ee804882a27ac95ae4..353b202c37c91e452fb365caacde84efbe27181e 100644 (file)
@@ -389,7 +389,7 @@ static inline void __dc_line_op(unsigned long paddr, unsigned long vaddr,
 /***********************************************************
  * Machine specific helper for per line I-Cache invalidate.
  */
-static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr,
+static void __ic_line_inv_vaddr_local(unsigned long paddr, unsigned long vaddr,
                                unsigned long sz)
 {
        unsigned long flags;
@@ -405,6 +405,23 @@ static inline void __ic_entire_inv(void)
        read_aux_reg(ARC_REG_IC_CTRL);  /* blocks */
 }
 
+struct ic_line_inv_vaddr_ipi {
+       unsigned long paddr, vaddr;
+       int sz;
+};
+
+static void __ic_line_inv_vaddr_helper(void *info)
+{
+        struct ic_line_inv_vaddr_ipi *ic_inv = (struct ic_line_inv_vaddr_ipi*) info;
+        __ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz);
+}
+
+static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr,
+                               unsigned long sz)
+{
+       struct ic_line_inv_vaddr_ipi ic_inv = { paddr, vaddr , sz};
+       on_each_cpu(__ic_line_inv_vaddr_helper, &ic_inv, 1);
+}
 #else
 
 #define __ic_entire_inv()
@@ -553,12 +570,8 @@ void flush_icache_range(unsigned long kstart, unsigned long kend)
  */
 void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len)
 {
-       unsigned long flags;
-
-       local_irq_save(flags);
-       __ic_line_inv_vaddr(paddr, vaddr, len);
        __dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV);
-       local_irq_restore(flags);
+       __ic_line_inv_vaddr(paddr, vaddr, len);
 }
 
 /* wrapper to compile time eliminate alignment checks in flush loop */