sparc64: Make itc_sync_lock raw
authorKirill Tkhai <tkhai@yandex.ru>
Wed, 16 Apr 2014 20:45:24 +0000 (00:45 +0400)
committerDavid S. Miller <davem@davemloft.net>
Fri, 2 May 2014 05:15:16 +0000 (01:15 -0400)
One more place where we must not be able
to be preempted or to be interrupted in RT.

Always actually disable interrupts during
synchronization cycle.

Signed-off-by: Kirill Tkhai <tkhai@yandex.ru>
Signed-off-by: David S. Miller <davem@davemloft.net>
arch/sparc/kernel/smp_64.c

index 9781048161ab8865a3ce203074d4067312989b95..745a3633ce148208554a71a38c829d6a8c78e025 100644 (file)
@@ -149,7 +149,7 @@ void cpu_panic(void)
 #define NUM_ROUNDS     64      /* magic value */
 #define NUM_ITERS      5       /* likewise */
 
-static DEFINE_SPINLOCK(itc_sync_lock);
+static DEFINE_RAW_SPINLOCK(itc_sync_lock);
 static unsigned long go[SLAVE + 1];
 
 #define DEBUG_TICK_SYNC        0
@@ -257,7 +257,7 @@ static void smp_synchronize_one_tick(int cpu)
        go[MASTER] = 0;
        membar_safe("#StoreLoad");
 
-       spin_lock_irqsave(&itc_sync_lock, flags);
+       raw_spin_lock_irqsave(&itc_sync_lock, flags);
        {
                for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) {
                        while (!go[MASTER])
@@ -268,7 +268,7 @@ static void smp_synchronize_one_tick(int cpu)
                        membar_safe("#StoreLoad");
                }
        }
-       spin_unlock_irqrestore(&itc_sync_lock, flags);
+       raw_spin_unlock_irqrestore(&itc_sync_lock, flags);
 }
 
 #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)