cpuidle: fix HP nx6125 regression
authorVenkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Tue, 20 Nov 2007 02:43:22 +0000 (21:43 -0500)
committerLen Brown <len.brown@intel.com>
Tue, 20 Nov 2007 02:43:22 +0000 (21:43 -0500)
Fix for http://bugzilla.kernel.org/show_bug.cgi?id=9355

cpuidle always used to fallback to C2 if there is some bm activity while
entering C3. But, presence of C2 is not always guaranteed. Change cpuidle
algorithm to detect a safe_state to fallback in case of bm_activity and
use that state instead of C2.

Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
drivers/acpi/processor_idle.c
include/acpi/processor.h
include/linux/cpuidle.h

index 1af0694e8520559e052b993bcea48cbdfd3aa4ac..8904f5c82a1c09781c10889178356fd85d26fc54 100644 (file)
@@ -197,6 +197,19 @@ static inline u32 ticks_elapsed_in_us(u32 t1, u32 t2)
                return PM_TIMER_TICKS_TO_US((0xFFFFFFFF - t1) + t2);
 }
 
+static void acpi_safe_halt(void)
+{
+       current_thread_info()->status &= ~TS_POLLING;
+       /*
+        * TS_POLLING-cleared state must be visible before we
+        * test NEED_RESCHED:
+        */
+       smp_mb();
+       if (!need_resched())
+               safe_halt();
+       current_thread_info()->status |= TS_POLLING;
+}
+
 #ifndef CONFIG_CPU_IDLE
 
 static void
@@ -239,19 +252,6 @@ acpi_processor_power_activate(struct acpi_processor *pr,
        return;
 }
 
-static void acpi_safe_halt(void)
-{
-       current_thread_info()->status &= ~TS_POLLING;
-       /*
-        * TS_POLLING-cleared state must be visible before we
-        * test NEED_RESCHED:
-        */
-       smp_mb();
-       if (!need_resched())
-               safe_halt();
-       current_thread_info()->status |= TS_POLLING;
-}
-
 static atomic_t c3_cpu_count;
 
 /* Common C-state entry for C2, C3, .. */
@@ -1385,15 +1385,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
        if (pr->flags.bm_check)
                acpi_idle_update_bm_rld(pr, cx);
 
-       current_thread_info()->status &= ~TS_POLLING;
-       /*
-        * TS_POLLING-cleared state must be visible before we test
-        * NEED_RESCHED:
-        */
-       smp_mb();
-       if (!need_resched())
-               safe_halt();
-       current_thread_info()->status |= TS_POLLING;
+       acpi_safe_halt();
 
        cx->usage++;
 
@@ -1493,6 +1485,15 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
        if (acpi_idle_suspend)
                return(acpi_idle_enter_c1(dev, state));
 
+       if (acpi_idle_bm_check()) {
+               if (dev->safe_state) {
+                       return dev->safe_state->enter(dev, dev->safe_state);
+               } else {
+                       acpi_safe_halt();
+                       return 0;
+               }
+       }
+
        local_irq_disable();
        current_thread_info()->status &= ~TS_POLLING;
        /*
@@ -1515,49 +1516,39 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
         */
        acpi_state_timer_broadcast(pr, cx, 1);
 
-       if (acpi_idle_bm_check()) {
-               cx = pr->power.bm_state;
-
-               acpi_idle_update_bm_rld(pr, cx);
-
-               t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
-               acpi_idle_do_entry(cx);
-               t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
-       } else {
-               acpi_idle_update_bm_rld(pr, cx);
+       acpi_idle_update_bm_rld(pr, cx);
 
-               /*
-                * disable bus master
-                * bm_check implies we need ARB_DIS
-                * !bm_check implies we need cache flush
-                * bm_control implies whether we can do ARB_DIS
-                *
-                * That leaves a case where bm_check is set and bm_control is
-                * not set. In that case we cannot do much, we enter C3
-                * without doing anything.
-                */
-               if (pr->flags.bm_check && pr->flags.bm_control) {
-                       spin_lock(&c3_lock);
-                       c3_cpu_count++;
-                       /* Disable bus master arbitration when all CPUs are in C3 */
-                       if (c3_cpu_count == num_online_cpus())
-                               acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
-                       spin_unlock(&c3_lock);
-               } else if (!pr->flags.bm_check) {
-                       ACPI_FLUSH_CPU_CACHE();
-               }
+       /*
+        * disable bus master
+        * bm_check implies we need ARB_DIS
+        * !bm_check implies we need cache flush
+        * bm_control implies whether we can do ARB_DIS
+        *
+        * That leaves a case where bm_check is set and bm_control is
+        * not set. In that case we cannot do much, we enter C3
+        * without doing anything.
+        */
+       if (pr->flags.bm_check && pr->flags.bm_control) {
+               spin_lock(&c3_lock);
+               c3_cpu_count++;
+               /* Disable bus master arbitration when all CPUs are in C3 */
+               if (c3_cpu_count == num_online_cpus())
+                       acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
+               spin_unlock(&c3_lock);
+       } else if (!pr->flags.bm_check) {
+               ACPI_FLUSH_CPU_CACHE();
+       }
 
-               t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
-               acpi_idle_do_entry(cx);
-               t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
+       t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
+       acpi_idle_do_entry(cx);
+       t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
 
-               /* Re-enable bus master arbitration */
-               if (pr->flags.bm_check && pr->flags.bm_control) {
-                       spin_lock(&c3_lock);
-                       acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
-                       c3_cpu_count--;
-                       spin_unlock(&c3_lock);
-               }
+       /* Re-enable bus master arbitration */
+       if (pr->flags.bm_check && pr->flags.bm_control) {
+               spin_lock(&c3_lock);
+               acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
+               c3_cpu_count--;
+               spin_unlock(&c3_lock);
        }
 
 #if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)
@@ -1626,12 +1617,14 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
                        case ACPI_STATE_C1:
                        state->flags |= CPUIDLE_FLAG_SHALLOW;
                        state->enter = acpi_idle_enter_c1;
+                       dev->safe_state = state;
                        break;
 
                        case ACPI_STATE_C2:
                        state->flags |= CPUIDLE_FLAG_BALANCED;
                        state->flags |= CPUIDLE_FLAG_TIME_VALID;
                        state->enter = acpi_idle_enter_simple;
+                       dev->safe_state = state;
                        break;
 
                        case ACPI_STATE_C3:
@@ -1652,14 +1645,6 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
        if (!count)
                return -EINVAL;
 
-       /* find the deepest state that can handle active BM */
-       if (pr->flags.bm_check) {
-               for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++)
-                       if (pr->power.states[i].type == ACPI_STATE_C3)
-                               break;
-               pr->power.bm_state = &pr->power.states[i-1];
-       }
-
        return 0;
 }
 
index 26d79f6db8a044490c95c8752a40a9fa4c68eff5..76411b1fc4fd4778e2e35cda00da46ead7b7dd5e 100644 (file)
@@ -78,7 +78,6 @@ struct acpi_processor_cx {
 struct acpi_processor_power {
        struct cpuidle_device dev;
        struct acpi_processor_cx *state;
-       struct acpi_processor_cx *bm_state;
        unsigned long bm_check_timestamp;
        u32 default_state;
        u32 bm_activity;
index 16a51546db444f9cbbbc0d845b312621eec0ae44..c4e00161a247975aca754484ecb5f232d1ff4f08 100644 (file)
@@ -92,6 +92,7 @@ struct cpuidle_device {
        struct kobject          kobj;
        struct completion       kobj_unregister;
        void                    *governor_data;
+       struct cpuidle_state    *safe_state;
 };
 
 DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices);