powerpc: Stop using non-architected shared_proc field in lppaca
authorAnton Blanchard <anton@samba.org>
Tue, 6 Aug 2013 16:01:26 +0000 (02:01 +1000)
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>
Wed, 14 Aug 2013 01:50:26 +0000 (11:50 +1000)
Although the shared_proc field in the lppaca works today, it is
not architected. A shared processor partition will always have a non
zero yield_count so use that instead. Create a wrapper so users
don't have to know about the details.

In order for older kernels to continue to work on KVM we need
to set the shared_proc bit. While here, remove the ugly bitfield.

Signed-off-by: Anton Blanchard <anton@samba.org>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
arch/powerpc/include/asm/lppaca.h
arch/powerpc/include/asm/spinlock.h
arch/powerpc/kernel/lparcfg.c
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/mm/numa.c
arch/powerpc/platforms/pseries/hotplug-cpu.c
arch/powerpc/platforms/pseries/processor_idle.c

index 9b12f88d4adb4f0977c46bedf2f555b055f71955..bc8def08d5d81f995f88745ee9c03574e9675297 100644 (file)
@@ -50,10 +50,8 @@ struct lppaca {
 
        u32     desc;                   /* Eye catcher 0xD397D781 */
        u16     size;                   /* Size of this struct */
-       u16     reserved1;
-       u16     reserved2:14;
-       u8      shared_proc:1;          /* Shared processor indicator */
-       u8      secondary_thread:1;     /* Secondary thread indicator */
+       u8      reserved1[3];
+       u8      __old_status;           /* Old status, including shared proc */
        u8      reserved3[14];
        volatile u32 dyn_hw_node_id;    /* Dynamic hardware node id */
        volatile u32 dyn_hw_proc_id;    /* Dynamic hardware proc id */
@@ -107,6 +105,18 @@ extern struct lppaca lppaca[];
 
 #define lppaca_of(cpu) (*paca[cpu].lppaca_ptr)
 
+/*
+ * Old kernels used a reserved bit in the VPA to determine if it was running
+ * in shared processor mode. New kernels look for a non zero yield count
+ * but KVM still needs to set the bit to keep the old stuff happy.
+ */
+#define LPPACA_OLD_SHARED_PROC         2
+
+static inline bool lppaca_shared_proc(struct lppaca *l)
+{
+       return l->yield_count != 0;
+}
+
 /*
  * SLB shadow buffer structure as defined in the PAPR.  The save_area
  * contains adjacent ESID and VSID pairs for each shadowed SLB.  The
index 5b23f910ee57ea06817815cf0f5688fc0c024b93..7c345b6518db6a26e3f8d26e7084ec38972f399a 100644 (file)
@@ -96,7 +96,7 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
 
 #if defined(CONFIG_PPC_SPLPAR)
 /* We only yield to the hypervisor if we are in shared processor mode */
-#define SHARED_PROCESSOR (local_paca->lppaca_ptr->shared_proc)
+#define SHARED_PROCESSOR (lppaca_shared_proc(local_paca->lppaca_ptr))
 extern void __spin_yield(arch_spinlock_t *lock);
 extern void __rw_yield(arch_rwlock_t *lock);
 #else /* SPLPAR */
index d92f3871e9cf959b583cf35b03de95929fc253ed..e6024c2ed5c73697254c12e6e53079b478d8b477 100644 (file)
@@ -165,7 +165,7 @@ static void parse_ppp_data(struct seq_file *m)
                   ppp_data.active_system_procs);
 
        /* pool related entries are appropriate for shared configs */
-       if (lppaca_of(0).shared_proc) {
+       if (lppaca_shared_proc(get_lppaca())) {
                unsigned long pool_idle_time, pool_procs;
 
                seq_printf(m, "pool=%d\n", ppp_data.pool_num);
@@ -473,7 +473,8 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v)
        seq_printf(m, "partition_potential_processors=%d\n",
                   partition_potential_processors);
 
-       seq_printf(m, "shared_processor_mode=%d\n", lppaca_of(0).shared_proc);
+       seq_printf(m, "shared_processor_mode=%d\n",
+                  lppaca_shared_proc(get_lppaca()));
 
        seq_printf(m, "slb_size=%d\n", mmu_slb_size);
 
index 2efa9dde741a1aad3ed206481e9c272a94eb7cdf..cf39bf4f3c7d65bcc96fb2d37b192a2ad8e68ef0 100644 (file)
@@ -217,7 +217,7 @@ struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id)
 
 static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa)
 {
-       vpa->shared_proc = 1;
+       vpa->__old_status |= LPPACA_OLD_SHARED_PROC;
        vpa->yield_count = 1;
 }
 
index 5850798826cde0be9570368cb7968a9ea279b1e1..501e32ca43b49f9a4143c69a9e41ad3ded709514 100644 (file)
@@ -1609,7 +1609,7 @@ int start_topology_update(void)
 #endif
                }
        } else if (firmware_has_feature(FW_FEATURE_VPHN) &&
-                  get_lppaca()->shared_proc) {
+                  lppaca_shared_proc(get_lppaca())) {
                if (!vphn_enabled) {
                        prrn_enabled = 0;
                        vphn_enabled = 1;
index 217ca5c75b2007f32615266fb20f36fce359a2cd..1e490cf63a0d9669d85846b000b44ff9cb9a09c6 100644 (file)
@@ -123,7 +123,7 @@ static void pseries_mach_cpu_die(void)
                cede_latency_hint = 2;
 
                get_lppaca()->idle = 1;
-               if (!get_lppaca()->shared_proc)
+               if (!lppaca_shared_proc(get_lppaca()))
                        get_lppaca()->donate_dedicated_cpu = 1;
 
                while (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) {
@@ -137,7 +137,7 @@ static void pseries_mach_cpu_die(void)
 
                local_irq_disable();
 
-               if (!get_lppaca()->shared_proc)
+               if (!lppaca_shared_proc(get_lppaca()))
                        get_lppaca()->donate_dedicated_cpu = 0;
                get_lppaca()->idle = 0;
 
index 4644efa069411e8f797ff53f1a853620278ca3a4..92db881be27e1d6cf1f03372c11cf4fd45006b76 100644 (file)
@@ -308,7 +308,7 @@ static int pseries_idle_probe(void)
                return -EPERM;
        }
 
-       if (get_lppaca()->shared_proc)
+       if (lppaca_shared_proc(get_lppaca()))
                cpuidle_state_table = shared_states;
        else
                cpuidle_state_table = dedicated_states;