Linux 2.6.23-rc2
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / acpi / processor_idle.c
CommitLineData
1da177e4
LT
1/*
2 * processor_idle - idle state submodule to the ACPI processor driver
3 *
4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
c5ab81ca 6 * Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de>
1da177e4
LT
7 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8 * - Added processor hotplug support
02df8b93
VP
9 * Copyright (C) 2005 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
10 * - Added support for C3 on SMP
1da177e4
LT
11 *
12 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or (at
17 * your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful, but
20 * WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License along
25 * with this program; if not, write to the Free Software Foundation, Inc.,
26 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
27 *
28 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
29 */
30
31#include <linux/kernel.h>
32#include <linux/module.h>
33#include <linux/init.h>
34#include <linux/cpufreq.h>
35#include <linux/proc_fs.h>
36#include <linux/seq_file.h>
37#include <linux/acpi.h>
38#include <linux/dmi.h>
39#include <linux/moduleparam.h>
4e57b681 40#include <linux/sched.h> /* need_resched() */
5c87579e 41#include <linux/latency.h>
e9e2cdb4 42#include <linux/clockchips.h>
1da177e4 43
3434933b
TG
44/*
45 * Include the apic definitions for x86 to have the APIC timer related defines
46 * available also for UP (on SMP it gets magically included via linux/smp.h).
47 * asm/acpi.h is not an option, as it would require more include magic. Also
48 * creating an empty asm-ia64/apic.h would just trade pest vs. cholera.
49 */
50#ifdef CONFIG_X86
51#include <asm/apic.h>
52#endif
53
1da177e4
LT
54#include <asm/io.h>
55#include <asm/uaccess.h>
56
57#include <acpi/acpi_bus.h>
58#include <acpi/processor.h>
59
60#define ACPI_PROCESSOR_COMPONENT 0x01000000
61#define ACPI_PROCESSOR_CLASS "processor"
1da177e4 62#define _COMPONENT ACPI_PROCESSOR_COMPONENT
f52fd66d 63ACPI_MODULE_NAME("processor_idle");
1da177e4 64#define ACPI_PROCESSOR_FILE_POWER "power"
1da177e4
LT
65#define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000)
66#define C2_OVERHEAD 4 /* 1us (3.579 ticks per us) */
67#define C3_OVERHEAD 4 /* 1us (3.579 ticks per us) */
b6835052 68static void (*pm_idle_save) (void) __read_mostly;
1da177e4
LT
69module_param(max_cstate, uint, 0644);
70
b6835052 71static unsigned int nocst __read_mostly;
1da177e4
LT
72module_param(nocst, uint, 0000);
73
74/*
75 * bm_history -- bit-mask with a bit per jiffy of bus-master activity
76 * 1000 HZ: 0xFFFFFFFF: 32 jiffies = 32ms
77 * 800 HZ: 0xFFFFFFFF: 32 jiffies = 40ms
78 * 100 HZ: 0x0000000F: 4 jiffies = 40ms
79 * reduce history for more aggressive entry into C3
80 */
b6835052 81static unsigned int bm_history __read_mostly =
4be44fcd 82 (HZ >= 800 ? 0xFFFFFFFF : ((1U << (HZ / 25)) - 1));
1da177e4
LT
83module_param(bm_history, uint, 0644);
84/* --------------------------------------------------------------------------
85 Power Management
86 -------------------------------------------------------------------------- */
87
88/*
89 * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
90 * For now disable this. Probably a bug somewhere else.
91 *
92 * To skip this limit, boot/load with a large max_cstate limit.
93 */
335f16be 94static int set_max_cstate(struct dmi_system_id *id)
1da177e4
LT
95{
96 if (max_cstate > ACPI_PROCESSOR_MAX_POWER)
97 return 0;
98
3d35600a 99 printk(KERN_NOTICE PREFIX "%s detected - limiting to C%ld max_cstate."
4be44fcd
LB
100 " Override with \"processor.max_cstate=%d\"\n", id->ident,
101 (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1);
1da177e4 102
3d35600a 103 max_cstate = (long)id->driver_data;
1da177e4
LT
104
105 return 0;
106}
107
7ded5689
AR
108/* Actually this shouldn't be __cpuinitdata, would be better to fix the
109 callers to only run once -AK */
110static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = {
f831335d
BS
111 { set_max_cstate, "IBM ThinkPad R40e", {
112 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
113 DMI_MATCH(DMI_BIOS_VERSION,"1SET70WW")}, (void *)1},
876c184b
TR
114 { set_max_cstate, "IBM ThinkPad R40e", {
115 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
116 DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW")}, (void *)1},
117 { set_max_cstate, "IBM ThinkPad R40e", {
118 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
119 DMI_MATCH(DMI_BIOS_VERSION,"1SET43WW") }, (void*)1},
120 { set_max_cstate, "IBM ThinkPad R40e", {
121 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
122 DMI_MATCH(DMI_BIOS_VERSION,"1SET45WW") }, (void*)1},
123 { set_max_cstate, "IBM ThinkPad R40e", {
124 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
125 DMI_MATCH(DMI_BIOS_VERSION,"1SET47WW") }, (void*)1},
126 { set_max_cstate, "IBM ThinkPad R40e", {
127 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
128 DMI_MATCH(DMI_BIOS_VERSION,"1SET50WW") }, (void*)1},
129 { set_max_cstate, "IBM ThinkPad R40e", {
130 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
131 DMI_MATCH(DMI_BIOS_VERSION,"1SET52WW") }, (void*)1},
132 { set_max_cstate, "IBM ThinkPad R40e", {
133 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
134 DMI_MATCH(DMI_BIOS_VERSION,"1SET55WW") }, (void*)1},
135 { set_max_cstate, "IBM ThinkPad R40e", {
136 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
137 DMI_MATCH(DMI_BIOS_VERSION,"1SET56WW") }, (void*)1},
138 { set_max_cstate, "IBM ThinkPad R40e", {
139 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
140 DMI_MATCH(DMI_BIOS_VERSION,"1SET59WW") }, (void*)1},
141 { set_max_cstate, "IBM ThinkPad R40e", {
142 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
143 DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW") }, (void*)1},
144 { set_max_cstate, "IBM ThinkPad R40e", {
145 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
146 DMI_MATCH(DMI_BIOS_VERSION,"1SET61WW") }, (void*)1},
147 { set_max_cstate, "IBM ThinkPad R40e", {
148 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
149 DMI_MATCH(DMI_BIOS_VERSION,"1SET62WW") }, (void*)1},
150 { set_max_cstate, "IBM ThinkPad R40e", {
151 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
152 DMI_MATCH(DMI_BIOS_VERSION,"1SET64WW") }, (void*)1},
153 { set_max_cstate, "IBM ThinkPad R40e", {
154 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
155 DMI_MATCH(DMI_BIOS_VERSION,"1SET65WW") }, (void*)1},
156 { set_max_cstate, "IBM ThinkPad R40e", {
157 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
158 DMI_MATCH(DMI_BIOS_VERSION,"1SET68WW") }, (void*)1},
159 { set_max_cstate, "Medion 41700", {
160 DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
161 DMI_MATCH(DMI_BIOS_VERSION,"R01-A1J")}, (void *)1},
162 { set_max_cstate, "Clevo 5600D", {
163 DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
164 DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
4be44fcd 165 (void *)2},
1da177e4
LT
166 {},
167};
168
4be44fcd 169static inline u32 ticks_elapsed(u32 t1, u32 t2)
1da177e4
LT
170{
171 if (t2 >= t1)
172 return (t2 - t1);
cee324b1 173 else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER))
1da177e4
LT
174 return (((0x00FFFFFF - t1) + t2) & 0x00FFFFFF);
175 else
176 return ((0xFFFFFFFF - t1) + t2);
177}
178
1da177e4 179static void
4be44fcd
LB
180acpi_processor_power_activate(struct acpi_processor *pr,
181 struct acpi_processor_cx *new)
1da177e4 182{
4be44fcd 183 struct acpi_processor_cx *old;
1da177e4
LT
184
185 if (!pr || !new)
186 return;
187
188 old = pr->power.state;
189
190 if (old)
191 old->promotion.count = 0;
4be44fcd 192 new->demotion.count = 0;
1da177e4
LT
193
194 /* Cleanup from old state. */
195 if (old) {
196 switch (old->type) {
197 case ACPI_STATE_C3:
198 /* Disable bus master reload */
02df8b93 199 if (new->type != ACPI_STATE_C3 && pr->flags.bm_check)
d8c71b6d 200 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
1da177e4
LT
201 break;
202 }
203 }
204
205 /* Prepare to use new state. */
206 switch (new->type) {
207 case ACPI_STATE_C3:
208 /* Enable bus master reload */
02df8b93 209 if (old->type != ACPI_STATE_C3 && pr->flags.bm_check)
d8c71b6d 210 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
1da177e4
LT
211 break;
212 }
213
214 pr->power.state = new;
215
216 return;
217}
218
64c7c8f8
NP
219static void acpi_safe_halt(void)
220{
495ab9c0 221 current_thread_info()->status &= ~TS_POLLING;
0888f06a
IM
222 /*
223 * TS_POLLING-cleared state must be visible before we
224 * test NEED_RESCHED:
225 */
226 smp_mb();
64c7c8f8
NP
227 if (!need_resched())
228 safe_halt();
495ab9c0 229 current_thread_info()->status |= TS_POLLING;
64c7c8f8
NP
230}
231
4be44fcd 232static atomic_t c3_cpu_count;
1da177e4 233
991528d7
VP
234/* Common C-state entry for C2, C3, .. */
235static void acpi_cstate_enter(struct acpi_processor_cx *cstate)
236{
237 if (cstate->space_id == ACPI_CSTATE_FFH) {
238 /* Call into architectural FFH based C-state */
239 acpi_processor_ffh_cstate_enter(cstate);
240 } else {
241 int unused;
242 /* IO port based C-state */
243 inb(cstate->address);
244 /* Dummy wait op - must do something useless after P_LVL2 read
245 because chipsets cannot guarantee that STPCLK# signal
246 gets asserted in time to freeze execution properly. */
cee324b1 247 unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
991528d7
VP
248 }
249}
250
169a0abb
TG
251#ifdef ARCH_APICTIMER_STOPS_ON_C3
252
253/*
254 * Some BIOS implementations switch to C3 in the published C2 state.
296d93cd
LT
255 * This seems to be a common problem on AMD boxen, but other vendors
256 * are affected too. We pick the most conservative approach: we assume
257 * that the local APIC stops in both C2 and C3.
169a0abb
TG
258 */
259static void acpi_timer_check_state(int state, struct acpi_processor *pr,
260 struct acpi_processor_cx *cx)
261{
262 struct acpi_processor_power *pwr = &pr->power;
e585bef8 263 u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2;
169a0abb
TG
264
265 /*
266 * Check, if one of the previous states already marked the lapic
267 * unstable
268 */
269 if (pwr->timer_broadcast_on_state < state)
270 return;
271
e585bef8 272 if (cx->type >= type)
296d93cd 273 pr->power.timer_broadcast_on_state = state;
169a0abb
TG
274}
275
276static void acpi_propagate_timer_broadcast(struct acpi_processor *pr)
277{
e9e2cdb4
TG
278#ifdef CONFIG_GENERIC_CLOCKEVENTS
279 unsigned long reason;
280
281 reason = pr->power.timer_broadcast_on_state < INT_MAX ?
282 CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF;
283
284 clockevents_notify(reason, &pr->id);
285#else
169a0abb
TG
286 cpumask_t mask = cpumask_of_cpu(pr->id);
287
296d93cd 288 if (pr->power.timer_broadcast_on_state < INT_MAX)
169a0abb 289 on_each_cpu(switch_APIC_timer_to_ipi, &mask, 1, 1);
296d93cd 290 else
169a0abb 291 on_each_cpu(switch_ipi_to_APIC_timer, &mask, 1, 1);
e9e2cdb4
TG
292#endif
293}
294
295/* Power(C) State timer broadcast control */
296static void acpi_state_timer_broadcast(struct acpi_processor *pr,
297 struct acpi_processor_cx *cx,
298 int broadcast)
299{
300#ifdef CONFIG_GENERIC_CLOCKEVENTS
301
302 int state = cx - pr->power.states;
303
304 if (state >= pr->power.timer_broadcast_on_state) {
305 unsigned long reason;
306
307 reason = broadcast ? CLOCK_EVT_NOTIFY_BROADCAST_ENTER :
308 CLOCK_EVT_NOTIFY_BROADCAST_EXIT;
309 clockevents_notify(reason, &pr->id);
310 }
311#endif
169a0abb
TG
312}
313
314#else
315
316static void acpi_timer_check_state(int state, struct acpi_processor *pr,
317 struct acpi_processor_cx *cstate) { }
318static void acpi_propagate_timer_broadcast(struct acpi_processor *pr) { }
e9e2cdb4
TG
319static void acpi_state_timer_broadcast(struct acpi_processor *pr,
320 struct acpi_processor_cx *cx,
321 int broadcast)
322{
323}
169a0abb
TG
324
325#endif
326
4be44fcd 327static void acpi_processor_idle(void)
1da177e4 328{
4be44fcd 329 struct acpi_processor *pr = NULL;
1da177e4
LT
330 struct acpi_processor_cx *cx = NULL;
331 struct acpi_processor_cx *next_state = NULL;
4be44fcd
LB
332 int sleep_ticks = 0;
333 u32 t1, t2 = 0;
1da177e4 334
1da177e4
LT
335 /*
336 * Interrupts must be disabled during bus mastering calculations and
337 * for C2/C3 transitions.
338 */
339 local_irq_disable();
340
d5a3d32a
VP
341 pr = processors[smp_processor_id()];
342 if (!pr) {
343 local_irq_enable();
344 return;
345 }
346
1da177e4
LT
347 /*
348 * Check whether we truly need to go idle, or should
349 * reschedule:
350 */
351 if (unlikely(need_resched())) {
352 local_irq_enable();
353 return;
354 }
355
356 cx = pr->power.state;
64c7c8f8
NP
357 if (!cx) {
358 if (pm_idle_save)
359 pm_idle_save();
360 else
361 acpi_safe_halt();
362 return;
363 }
1da177e4
LT
364
365 /*
366 * Check BM Activity
367 * -----------------
368 * Check for bus mastering activity (if required), record, and check
369 * for demotion.
370 */
371 if (pr->flags.bm_check) {
4be44fcd
LB
372 u32 bm_status = 0;
373 unsigned long diff = jiffies - pr->power.bm_check_timestamp;
1da177e4 374
c5ab81ca
DB
375 if (diff > 31)
376 diff = 31;
1da177e4 377
c5ab81ca 378 pr->power.bm_activity <<= diff;
1da177e4 379
d8c71b6d 380 acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
1da177e4 381 if (bm_status) {
c5ab81ca 382 pr->power.bm_activity |= 0x1;
d8c71b6d 383 acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
1da177e4
LT
384 }
385 /*
386 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
387 * the true state of bus mastering activity; forcing us to
388 * manually check the BMIDEA bit of each IDE channel.
389 */
390 else if (errata.piix4.bmisx) {
391 if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
4be44fcd 392 || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
c5ab81ca 393 pr->power.bm_activity |= 0x1;
1da177e4
LT
394 }
395
396 pr->power.bm_check_timestamp = jiffies;
397
398 /*
c4a001b1 399 * If bus mastering is or was active this jiffy, demote
1da177e4
LT
400 * to avoid a faulty transition. Note that the processor
401 * won't enter a low-power state during this call (to this
c4a001b1 402 * function) but should upon the next.
1da177e4
LT
403 *
404 * TBD: A better policy might be to fallback to the demotion
405 * state (use it for this quantum only) istead of
406 * demoting -- and rely on duration as our sole demotion
407 * qualification. This may, however, introduce DMA
408 * issues (e.g. floppy DMA transfer overrun/underrun).
409 */
c4a001b1
DB
410 if ((pr->power.bm_activity & 0x1) &&
411 cx->demotion.threshold.bm) {
1da177e4
LT
412 local_irq_enable();
413 next_state = cx->demotion.state;
414 goto end;
415 }
416 }
417
4c033552
VP
418#ifdef CONFIG_HOTPLUG_CPU
419 /*
420 * Check for P_LVL2_UP flag before entering C2 and above on
421 * an SMP system. We do it here instead of doing it at _CST/P_LVL
422 * detection phase, to work cleanly with logical CPU hotplug.
423 */
424 if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
cee324b1 425 !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
1e483969 426 cx = &pr->power.states[ACPI_STATE_C1];
4c033552 427#endif
1e483969 428
1da177e4
LT
429 /*
430 * Sleep:
431 * ------
432 * Invoke the current Cx state to put the processor to sleep.
433 */
2a298a35 434 if (cx->type == ACPI_STATE_C2 || cx->type == ACPI_STATE_C3) {
495ab9c0 435 current_thread_info()->status &= ~TS_POLLING;
0888f06a
IM
436 /*
437 * TS_POLLING-cleared state must be visible before we
438 * test NEED_RESCHED:
439 */
440 smp_mb();
2a298a35 441 if (need_resched()) {
495ab9c0 442 current_thread_info()->status |= TS_POLLING;
af2eb17b 443 local_irq_enable();
2a298a35
NP
444 return;
445 }
446 }
447
1da177e4
LT
448 switch (cx->type) {
449
450 case ACPI_STATE_C1:
451 /*
452 * Invoke C1.
453 * Use the appropriate idle routine, the one that would
454 * be used without acpi C-states.
455 */
456 if (pm_idle_save)
457 pm_idle_save();
458 else
64c7c8f8
NP
459 acpi_safe_halt();
460
1da177e4 461 /*
4be44fcd 462 * TBD: Can't get time duration while in C1, as resumes
1da177e4
LT
463 * go to an ISR rather than here. Need to instrument
464 * base interrupt handler.
465 */
466 sleep_ticks = 0xFFFFFFFF;
467 break;
468
469 case ACPI_STATE_C2:
470 /* Get start time (ticks) */
cee324b1 471 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1da177e4 472 /* Invoke C2 */
e9e2cdb4 473 acpi_state_timer_broadcast(pr, cx, 1);
991528d7 474 acpi_cstate_enter(cx);
1da177e4 475 /* Get end time (ticks) */
cee324b1 476 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
539eb11e 477
0aa366f3 478#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)
539eb11e 479 /* TSC halts in C2, so notify users */
5a90cf20 480 mark_tsc_unstable("possible TSC halt in C2");
539eb11e 481#endif
1da177e4
LT
482 /* Re-enable interrupts */
483 local_irq_enable();
495ab9c0 484 current_thread_info()->status |= TS_POLLING;
1da177e4 485 /* Compute time (ticks) that we were actually asleep */
4be44fcd
LB
486 sleep_ticks =
487 ticks_elapsed(t1, t2) - cx->latency_ticks - C2_OVERHEAD;
e9e2cdb4 488 acpi_state_timer_broadcast(pr, cx, 0);
1da177e4
LT
489 break;
490
491 case ACPI_STATE_C3:
4be44fcd 492
18eab855
VP
493 /*
494 * disable bus master
495 * bm_check implies we need ARB_DIS
496 * !bm_check implies we need cache flush
497 * bm_control implies whether we can do ARB_DIS
498 *
499 * That leaves a case where bm_check is set and bm_control is
500 * not set. In that case we cannot do much, we enter C3
501 * without doing anything.
502 */
503 if (pr->flags.bm_check && pr->flags.bm_control) {
02df8b93 504 if (atomic_inc_return(&c3_cpu_count) ==
4be44fcd 505 num_online_cpus()) {
02df8b93
VP
506 /*
507 * All CPUs are trying to go to C3
508 * Disable bus master arbitration
509 */
d8c71b6d 510 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
02df8b93 511 }
18eab855 512 } else if (!pr->flags.bm_check) {
02df8b93
VP
513 /* SMP with no shared cache... Invalidate cache */
514 ACPI_FLUSH_CPU_CACHE();
515 }
4be44fcd 516
1da177e4 517 /* Get start time (ticks) */
cee324b1 518 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1da177e4 519 /* Invoke C3 */
e9e2cdb4 520 acpi_state_timer_broadcast(pr, cx, 1);
991528d7 521 acpi_cstate_enter(cx);
1da177e4 522 /* Get end time (ticks) */
cee324b1 523 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
18eab855 524 if (pr->flags.bm_check && pr->flags.bm_control) {
02df8b93
VP
525 /* Enable bus master arbitration */
526 atomic_dec(&c3_cpu_count);
d8c71b6d 527 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
02df8b93
VP
528 }
529
0aa366f3 530#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)
539eb11e 531 /* TSC halts in C3, so notify users */
5a90cf20 532 mark_tsc_unstable("TSC halts in C3");
539eb11e 533#endif
1da177e4
LT
534 /* Re-enable interrupts */
535 local_irq_enable();
495ab9c0 536 current_thread_info()->status |= TS_POLLING;
1da177e4 537 /* Compute time (ticks) that we were actually asleep */
4be44fcd
LB
538 sleep_ticks =
539 ticks_elapsed(t1, t2) - cx->latency_ticks - C3_OVERHEAD;
e9e2cdb4 540 acpi_state_timer_broadcast(pr, cx, 0);
1da177e4
LT
541 break;
542
543 default:
544 local_irq_enable();
545 return;
546 }
a3c6598f
DB
547 cx->usage++;
548 if ((cx->type != ACPI_STATE_C1) && (sleep_ticks > 0))
549 cx->time += sleep_ticks;
1da177e4
LT
550
551 next_state = pr->power.state;
552
1e483969
DSL
553#ifdef CONFIG_HOTPLUG_CPU
554 /* Don't do promotion/demotion */
555 if ((cx->type == ACPI_STATE_C1) && (num_online_cpus() > 1) &&
cee324b1 556 !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) {
1e483969
DSL
557 next_state = cx;
558 goto end;
559 }
560#endif
561
1da177e4
LT
562 /*
563 * Promotion?
564 * ----------
565 * Track the number of longs (time asleep is greater than threshold)
566 * and promote when the count threshold is reached. Note that bus
567 * mastering activity may prevent promotions.
568 * Do not promote above max_cstate.
569 */
570 if (cx->promotion.state &&
571 ((cx->promotion.state - pr->power.states) <= max_cstate)) {
5c87579e
AV
572 if (sleep_ticks > cx->promotion.threshold.ticks &&
573 cx->promotion.state->latency <= system_latency_constraint()) {
1da177e4 574 cx->promotion.count++;
4be44fcd
LB
575 cx->demotion.count = 0;
576 if (cx->promotion.count >=
577 cx->promotion.threshold.count) {
1da177e4 578 if (pr->flags.bm_check) {
4be44fcd
LB
579 if (!
580 (pr->power.bm_activity & cx->
581 promotion.threshold.bm)) {
582 next_state =
583 cx->promotion.state;
1da177e4
LT
584 goto end;
585 }
4be44fcd 586 } else {
1da177e4
LT
587 next_state = cx->promotion.state;
588 goto end;
589 }
590 }
591 }
592 }
593
594 /*
595 * Demotion?
596 * ---------
597 * Track the number of shorts (time asleep is less than time threshold)
598 * and demote when the usage threshold is reached.
599 */
600 if (cx->demotion.state) {
601 if (sleep_ticks < cx->demotion.threshold.ticks) {
602 cx->demotion.count++;
603 cx->promotion.count = 0;
604 if (cx->demotion.count >= cx->demotion.threshold.count) {
605 next_state = cx->demotion.state;
606 goto end;
607 }
608 }
609 }
610
4be44fcd 611 end:
1da177e4
LT
612 /*
613 * Demote if current state exceeds max_cstate
5c87579e 614 * or if the latency of the current state is unacceptable
1da177e4 615 */
5c87579e
AV
616 if ((pr->power.state - pr->power.states) > max_cstate ||
617 pr->power.state->latency > system_latency_constraint()) {
1da177e4
LT
618 if (cx->demotion.state)
619 next_state = cx->demotion.state;
620 }
621
622 /*
623 * New Cx State?
624 * -------------
625 * If we're going to start using a new Cx state we must clean up
626 * from the previous and prepare to use the new.
627 */
628 if (next_state != pr->power.state)
629 acpi_processor_power_activate(pr, next_state);
1da177e4
LT
630}
631
4be44fcd 632static int acpi_processor_set_power_policy(struct acpi_processor *pr)
1da177e4
LT
633{
634 unsigned int i;
635 unsigned int state_is_set = 0;
636 struct acpi_processor_cx *lower = NULL;
637 struct acpi_processor_cx *higher = NULL;
638 struct acpi_processor_cx *cx;
639
1da177e4
LT
640
641 if (!pr)
d550d98d 642 return -EINVAL;
1da177e4
LT
643
644 /*
645 * This function sets the default Cx state policy (OS idle handler).
646 * Our scheme is to promote quickly to C2 but more conservatively
647 * to C3. We're favoring C2 for its characteristics of low latency
648 * (quick response), good power savings, and ability to allow bus
649 * mastering activity. Note that the Cx state policy is completely
650 * customizable and can be altered dynamically.
651 */
652
653 /* startup state */
4be44fcd 654 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
1da177e4
LT
655 cx = &pr->power.states[i];
656 if (!cx->valid)
657 continue;
658
659 if (!state_is_set)
660 pr->power.state = cx;
661 state_is_set++;
662 break;
4be44fcd 663 }
1da177e4
LT
664
665 if (!state_is_set)
d550d98d 666 return -ENODEV;
1da177e4
LT
667
668 /* demotion */
4be44fcd 669 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
1da177e4
LT
670 cx = &pr->power.states[i];
671 if (!cx->valid)
672 continue;
673
674 if (lower) {
675 cx->demotion.state = lower;
676 cx->demotion.threshold.ticks = cx->latency_ticks;
677 cx->demotion.threshold.count = 1;
678 if (cx->type == ACPI_STATE_C3)
679 cx->demotion.threshold.bm = bm_history;
680 }
681
682 lower = cx;
683 }
684
685 /* promotion */
686 for (i = (ACPI_PROCESSOR_MAX_POWER - 1); i > 0; i--) {
687 cx = &pr->power.states[i];
688 if (!cx->valid)
689 continue;
690
691 if (higher) {
4be44fcd 692 cx->promotion.state = higher;
1da177e4
LT
693 cx->promotion.threshold.ticks = cx->latency_ticks;
694 if (cx->type >= ACPI_STATE_C2)
695 cx->promotion.threshold.count = 4;
696 else
697 cx->promotion.threshold.count = 10;
698 if (higher->type == ACPI_STATE_C3)
699 cx->promotion.threshold.bm = bm_history;
700 }
701
702 higher = cx;
703 }
704
d550d98d 705 return 0;
1da177e4
LT
706}
707
4be44fcd 708static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
1da177e4 709{
1da177e4
LT
710
711 if (!pr)
d550d98d 712 return -EINVAL;
1da177e4
LT
713
714 if (!pr->pblk)
d550d98d 715 return -ENODEV;
1da177e4 716
1da177e4 717 /* if info is obtained from pblk/fadt, type equals state */
1da177e4
LT
718 pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2;
719 pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3;
720
4c033552
VP
721#ifndef CONFIG_HOTPLUG_CPU
722 /*
723 * Check for P_LVL2_UP flag before entering C2 and above on
724 * an SMP system.
725 */
ad71860a 726 if ((num_online_cpus() > 1) &&
cee324b1 727 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
d550d98d 728 return -ENODEV;
4c033552
VP
729#endif
730
1da177e4
LT
731 /* determine C2 and C3 address from pblk */
732 pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4;
733 pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
734
735 /* determine latencies from FADT */
cee324b1
AS
736 pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.C2latency;
737 pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.C3latency;
1da177e4
LT
738
739 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
740 "lvl2[0x%08x] lvl3[0x%08x]\n",
741 pr->power.states[ACPI_STATE_C2].address,
742 pr->power.states[ACPI_STATE_C3].address));
743
d550d98d 744 return 0;
1da177e4
LT
745}
746
991528d7 747static int acpi_processor_get_power_info_default(struct acpi_processor *pr)
acf05f4b 748{
991528d7
VP
749 if (!pr->power.states[ACPI_STATE_C1].valid) {
750 /* set the first C-State to C1 */
751 /* all processors need to support C1 */
752 pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1;
753 pr->power.states[ACPI_STATE_C1].valid = 1;
754 }
755 /* the C0 state only exists as a filler in our array */
acf05f4b 756 pr->power.states[ACPI_STATE_C0].valid = 1;
d550d98d 757 return 0;
acf05f4b
VP
758}
759
4be44fcd 760static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
1da177e4 761{
4be44fcd
LB
762 acpi_status status = 0;
763 acpi_integer count;
cf824788 764 int current_count;
4be44fcd
LB
765 int i;
766 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
767 union acpi_object *cst;
1da177e4 768
1da177e4 769
1da177e4 770 if (nocst)
d550d98d 771 return -ENODEV;
1da177e4 772
991528d7 773 current_count = 0;
1da177e4
LT
774
775 status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer);
776 if (ACPI_FAILURE(status)) {
777 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n"));
d550d98d 778 return -ENODEV;
4be44fcd 779 }
1da177e4 780
50dd0969 781 cst = buffer.pointer;
1da177e4
LT
782
783 /* There must be at least 2 elements */
784 if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) {
6468463a 785 printk(KERN_ERR PREFIX "not enough elements in _CST\n");
1da177e4
LT
786 status = -EFAULT;
787 goto end;
788 }
789
790 count = cst->package.elements[0].integer.value;
791
792 /* Validate number of power states. */
793 if (count < 1 || count != cst->package.count - 1) {
6468463a 794 printk(KERN_ERR PREFIX "count given by _CST is not valid\n");
1da177e4
LT
795 status = -EFAULT;
796 goto end;
797 }
798
1da177e4
LT
799 /* Tell driver that at least _CST is supported. */
800 pr->flags.has_cst = 1;
801
802 for (i = 1; i <= count; i++) {
803 union acpi_object *element;
804 union acpi_object *obj;
805 struct acpi_power_register *reg;
806 struct acpi_processor_cx cx;
807
808 memset(&cx, 0, sizeof(cx));
809
50dd0969 810 element = &(cst->package.elements[i]);
1da177e4
LT
811 if (element->type != ACPI_TYPE_PACKAGE)
812 continue;
813
814 if (element->package.count != 4)
815 continue;
816
50dd0969 817 obj = &(element->package.elements[0]);
1da177e4
LT
818
819 if (obj->type != ACPI_TYPE_BUFFER)
820 continue;
821
4be44fcd 822 reg = (struct acpi_power_register *)obj->buffer.pointer;
1da177e4
LT
823
824 if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
4be44fcd 825 (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE))
1da177e4
LT
826 continue;
827
1da177e4 828 /* There should be an easy way to extract an integer... */
50dd0969 829 obj = &(element->package.elements[1]);
1da177e4
LT
830 if (obj->type != ACPI_TYPE_INTEGER)
831 continue;
832
833 cx.type = obj->integer.value;
991528d7
VP
834 /*
835 * Some buggy BIOSes won't list C1 in _CST -
836 * Let acpi_processor_get_power_info_default() handle them later
837 */
838 if (i == 1 && cx.type != ACPI_STATE_C1)
839 current_count++;
840
841 cx.address = reg->address;
842 cx.index = current_count + 1;
843
844 cx.space_id = ACPI_CSTATE_SYSTEMIO;
845 if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
846 if (acpi_processor_ffh_cstate_probe
847 (pr->id, &cx, reg) == 0) {
848 cx.space_id = ACPI_CSTATE_FFH;
849 } else if (cx.type != ACPI_STATE_C1) {
850 /*
851 * C1 is a special case where FIXED_HARDWARE
852 * can be handled in non-MWAIT way as well.
853 * In that case, save this _CST entry info.
854 * That is, we retain space_id of SYSTEM_IO for
855 * halt based C1.
856 * Otherwise, ignore this info and continue.
857 */
858 continue;
859 }
860 }
1da177e4 861
50dd0969 862 obj = &(element->package.elements[2]);
1da177e4
LT
863 if (obj->type != ACPI_TYPE_INTEGER)
864 continue;
865
866 cx.latency = obj->integer.value;
867
50dd0969 868 obj = &(element->package.elements[3]);
1da177e4
LT
869 if (obj->type != ACPI_TYPE_INTEGER)
870 continue;
871
872 cx.power = obj->integer.value;
873
cf824788
JM
874 current_count++;
875 memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx));
876
877 /*
878 * We support total ACPI_PROCESSOR_MAX_POWER - 1
879 * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1)
880 */
881 if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) {
882 printk(KERN_WARNING
883 "Limiting number of power states to max (%d)\n",
884 ACPI_PROCESSOR_MAX_POWER);
885 printk(KERN_WARNING
886 "Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
887 break;
888 }
1da177e4
LT
889 }
890
4be44fcd 891 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n",
cf824788 892 current_count));
1da177e4
LT
893
894 /* Validate number of power states discovered */
cf824788 895 if (current_count < 2)
6d93c648 896 status = -EFAULT;
1da177e4 897
4be44fcd 898 end:
02438d87 899 kfree(buffer.pointer);
1da177e4 900
d550d98d 901 return status;
1da177e4
LT
902}
903
1da177e4
LT
904static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx)
905{
1da177e4
LT
906
907 if (!cx->address)
d550d98d 908 return;
1da177e4
LT
909
910 /*
911 * C2 latency must be less than or equal to 100
912 * microseconds.
913 */
914 else if (cx->latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
915 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
4be44fcd 916 "latency too large [%d]\n", cx->latency));
d550d98d 917 return;
1da177e4
LT
918 }
919
1da177e4
LT
920 /*
921 * Otherwise we've met all of our C2 requirements.
922 * Normalize the C2 latency to expidite policy
923 */
924 cx->valid = 1;
925 cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency);
926
d550d98d 927 return;
1da177e4
LT
928}
929
4be44fcd
LB
930static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
931 struct acpi_processor_cx *cx)
1da177e4 932{
02df8b93
VP
933 static int bm_check_flag;
934
1da177e4
LT
935
936 if (!cx->address)
d550d98d 937 return;
1da177e4
LT
938
939 /*
940 * C3 latency must be less than or equal to 1000
941 * microseconds.
942 */
943 else if (cx->latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
944 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
4be44fcd 945 "latency too large [%d]\n", cx->latency));
d550d98d 946 return;
1da177e4
LT
947 }
948
1da177e4
LT
949 /*
950 * PIIX4 Erratum #18: We don't support C3 when Type-F (fast)
951 * DMA transfers are used by any ISA device to avoid livelock.
952 * Note that we could disable Type-F DMA (as recommended by
953 * the erratum), but this is known to disrupt certain ISA
954 * devices thus we take the conservative approach.
955 */
956 else if (errata.piix4.fdma) {
957 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
4be44fcd 958 "C3 not supported on PIIX4 with Type-F DMA\n"));
d550d98d 959 return;
1da177e4
LT
960 }
961
02df8b93
VP
962 /* All the logic here assumes flags.bm_check is same across all CPUs */
963 if (!bm_check_flag) {
964 /* Determine whether bm_check is needed based on CPU */
965 acpi_processor_power_init_bm_check(&(pr->flags), pr->id);
966 bm_check_flag = pr->flags.bm_check;
967 } else {
968 pr->flags.bm_check = bm_check_flag;
969 }
970
971 if (pr->flags.bm_check) {
02df8b93
VP
972 /* bus mastering control is necessary */
973 if (!pr->flags.bm_control) {
18eab855 974 /* In this case we enter C3 without bus mastering */
02df8b93 975 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
18eab855 976 "C3 support without bus mastering control\n"));
02df8b93
VP
977 }
978 } else {
02df8b93
VP
979 /*
980 * WBINVD should be set in fadt, for C3 state to be
981 * supported on when bm_check is not required.
982 */
cee324b1 983 if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) {
02df8b93 984 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
4be44fcd
LB
985 "Cache invalidation should work properly"
986 " for C3 to be enabled on SMP systems\n"));
d550d98d 987 return;
02df8b93 988 }
d8c71b6d 989 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
02df8b93
VP
990 }
991
1da177e4
LT
992 /*
993 * Otherwise we've met all of our C3 requirements.
994 * Normalize the C3 latency to expidite policy. Enable
995 * checking of bus mastering status (bm_check) so we can
996 * use this in our C3 policy
997 */
998 cx->valid = 1;
999 cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency);
1da177e4 1000
d550d98d 1001 return;
1da177e4
LT
1002}
1003
1da177e4
LT
1004static int acpi_processor_power_verify(struct acpi_processor *pr)
1005{
1006 unsigned int i;
1007 unsigned int working = 0;
6eb0a0fd 1008
169a0abb 1009 pr->power.timer_broadcast_on_state = INT_MAX;
6eb0a0fd 1010
4be44fcd 1011 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
1da177e4
LT
1012 struct acpi_processor_cx *cx = &pr->power.states[i];
1013
1014 switch (cx->type) {
1015 case ACPI_STATE_C1:
1016 cx->valid = 1;
1017 break;
1018
1019 case ACPI_STATE_C2:
1020 acpi_processor_power_verify_c2(cx);
296d93cd 1021 if (cx->valid)
169a0abb 1022 acpi_timer_check_state(i, pr, cx);
1da177e4
LT
1023 break;
1024
1025 case ACPI_STATE_C3:
1026 acpi_processor_power_verify_c3(pr, cx);
296d93cd 1027 if (cx->valid)
169a0abb 1028 acpi_timer_check_state(i, pr, cx);
1da177e4
LT
1029 break;
1030 }
1031
1032 if (cx->valid)
1033 working++;
1034 }
bd663347 1035
169a0abb 1036 acpi_propagate_timer_broadcast(pr);
1da177e4
LT
1037
1038 return (working);
1039}
1040
4be44fcd 1041static int acpi_processor_get_power_info(struct acpi_processor *pr)
1da177e4
LT
1042{
1043 unsigned int i;
1044 int result;
1045
1da177e4
LT
1046
1047 /* NOTE: the idle thread may not be running while calling
1048 * this function */
1049
991528d7
VP
1050 /* Zero initialize all the C-states info. */
1051 memset(pr->power.states, 0, sizeof(pr->power.states));
1052
1da177e4 1053 result = acpi_processor_get_power_info_cst(pr);
6d93c648 1054 if (result == -ENODEV)
c5a114f1 1055 result = acpi_processor_get_power_info_fadt(pr);
6d93c648 1056
991528d7
VP
1057 if (result)
1058 return result;
1059
1060 acpi_processor_get_power_info_default(pr);
1061
cf824788 1062 pr->power.count = acpi_processor_power_verify(pr);
1da177e4
LT
1063
1064 /*
1065 * Set Default Policy
1066 * ------------------
1067 * Now that we know which states are supported, set the default
1068 * policy. Note that this policy can be changed dynamically
1069 * (e.g. encourage deeper sleeps to conserve battery life when
1070 * not on AC).
1071 */
1072 result = acpi_processor_set_power_policy(pr);
1073 if (result)
d550d98d 1074 return result;
1da177e4
LT
1075
1076 /*
1077 * if one state of type C2 or C3 is available, mark this
1078 * CPU as being "idle manageable"
1079 */
1080 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
acf05f4b 1081 if (pr->power.states[i].valid) {
1da177e4 1082 pr->power.count = i;
2203d6ed
LT
1083 if (pr->power.states[i].type >= ACPI_STATE_C2)
1084 pr->flags.power = 1;
acf05f4b 1085 }
1da177e4
LT
1086 }
1087
d550d98d 1088 return 0;
1da177e4
LT
1089}
1090
4be44fcd 1091int acpi_processor_cst_has_changed(struct acpi_processor *pr)
1da177e4 1092{
4be44fcd 1093 int result = 0;
1da177e4 1094
1da177e4
LT
1095
1096 if (!pr)
d550d98d 1097 return -EINVAL;
1da177e4 1098
4be44fcd 1099 if (nocst) {
d550d98d 1100 return -ENODEV;
1da177e4
LT
1101 }
1102
1103 if (!pr->flags.power_setup_done)
d550d98d 1104 return -ENODEV;
1da177e4
LT
1105
1106 /* Fall back to the default idle loop */
1107 pm_idle = pm_idle_save;
4be44fcd 1108 synchronize_sched(); /* Relies on interrupts forcing exit from idle. */
1da177e4
LT
1109
1110 pr->flags.power = 0;
1111 result = acpi_processor_get_power_info(pr);
1112 if ((pr->flags.power == 1) && (pr->flags.power_setup_done))
1113 pm_idle = acpi_processor_idle;
1114
d550d98d 1115 return result;
1da177e4
LT
1116}
1117
1118/* proc interface */
1119
1120static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset)
1121{
50dd0969 1122 struct acpi_processor *pr = seq->private;
4be44fcd 1123 unsigned int i;
1da177e4 1124
1da177e4
LT
1125
1126 if (!pr)
1127 goto end;
1128
1129 seq_printf(seq, "active state: C%zd\n"
4be44fcd 1130 "max_cstate: C%d\n"
5c87579e
AV
1131 "bus master activity: %08x\n"
1132 "maximum allowed latency: %d usec\n",
4be44fcd 1133 pr->power.state ? pr->power.state - pr->power.states : 0,
5c87579e
AV
1134 max_cstate, (unsigned)pr->power.bm_activity,
1135 system_latency_constraint());
1da177e4
LT
1136
1137 seq_puts(seq, "states:\n");
1138
1139 for (i = 1; i <= pr->power.count; i++) {
1140 seq_printf(seq, " %cC%d: ",
4be44fcd
LB
1141 (&pr->power.states[i] ==
1142 pr->power.state ? '*' : ' '), i);
1da177e4
LT
1143
1144 if (!pr->power.states[i].valid) {
1145 seq_puts(seq, "<not supported>\n");
1146 continue;
1147 }
1148
1149 switch (pr->power.states[i].type) {
1150 case ACPI_STATE_C1:
1151 seq_printf(seq, "type[C1] ");
1152 break;
1153 case ACPI_STATE_C2:
1154 seq_printf(seq, "type[C2] ");
1155 break;
1156 case ACPI_STATE_C3:
1157 seq_printf(seq, "type[C3] ");
1158 break;
1159 default:
1160 seq_printf(seq, "type[--] ");
1161 break;
1162 }
1163
1164 if (pr->power.states[i].promotion.state)
1165 seq_printf(seq, "promotion[C%zd] ",
4be44fcd
LB
1166 (pr->power.states[i].promotion.state -
1167 pr->power.states));
1da177e4
LT
1168 else
1169 seq_puts(seq, "promotion[--] ");
1170
1171 if (pr->power.states[i].demotion.state)
1172 seq_printf(seq, "demotion[C%zd] ",
4be44fcd
LB
1173 (pr->power.states[i].demotion.state -
1174 pr->power.states));
1da177e4
LT
1175 else
1176 seq_puts(seq, "demotion[--] ");
1177
a3c6598f 1178 seq_printf(seq, "latency[%03d] usage[%08d] duration[%020llu]\n",
4be44fcd 1179 pr->power.states[i].latency,
a3c6598f 1180 pr->power.states[i].usage,
b0b7eaaf 1181 (unsigned long long)pr->power.states[i].time);
1da177e4
LT
1182 }
1183
4be44fcd 1184 end:
d550d98d 1185 return 0;
1da177e4
LT
1186}
1187
1188static int acpi_processor_power_open_fs(struct inode *inode, struct file *file)
1189{
1190 return single_open(file, acpi_processor_power_seq_show,
4be44fcd 1191 PDE(inode)->data);
1da177e4
LT
1192}
1193
d7508032 1194static const struct file_operations acpi_processor_power_fops = {
4be44fcd
LB
1195 .open = acpi_processor_power_open_fs,
1196 .read = seq_read,
1197 .llseek = seq_lseek,
1198 .release = single_release,
1da177e4
LT
1199};
1200
1fec74a9 1201#ifdef CONFIG_SMP
5c87579e
AV
1202static void smp_callback(void *v)
1203{
1204 /* we already woke the CPU up, nothing more to do */
1205}
1206
1207/*
1208 * This function gets called when a part of the kernel has a new latency
1209 * requirement. This means we need to get all processors out of their C-state,
1210 * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that
1211 * wakes them all right up.
1212 */
1213static int acpi_processor_latency_notify(struct notifier_block *b,
1214 unsigned long l, void *v)
1215{
1216 smp_call_function(smp_callback, NULL, 0, 1);
1217 return NOTIFY_OK;
1218}
1219
1220static struct notifier_block acpi_processor_latency_notifier = {
1221 .notifier_call = acpi_processor_latency_notify,
1222};
1fec74a9 1223#endif
5c87579e 1224
7af8b660 1225int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
4be44fcd 1226 struct acpi_device *device)
1da177e4 1227{
4be44fcd 1228 acpi_status status = 0;
b6835052 1229 static int first_run;
4be44fcd 1230 struct proc_dir_entry *entry = NULL;
1da177e4
LT
1231 unsigned int i;
1232
1da177e4
LT
1233
1234 if (!first_run) {
1235 dmi_check_system(processor_power_dmi_table);
1236 if (max_cstate < ACPI_C_STATES_MAX)
4be44fcd
LB
1237 printk(KERN_NOTICE
1238 "ACPI: processor limited to max C-state %d\n",
1239 max_cstate);
1da177e4 1240 first_run++;
1fec74a9 1241#ifdef CONFIG_SMP
5c87579e 1242 register_latency_notifier(&acpi_processor_latency_notifier);
1fec74a9 1243#endif
1da177e4
LT
1244 }
1245
02df8b93 1246 if (!pr)
d550d98d 1247 return -EINVAL;
02df8b93 1248
cee324b1 1249 if (acpi_gbl_FADT.cst_control && !nocst) {
4be44fcd 1250 status =
cee324b1 1251 acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8);
1da177e4 1252 if (ACPI_FAILURE(status)) {
a6fc6720
TR
1253 ACPI_EXCEPTION((AE_INFO, status,
1254 "Notifying BIOS of _CST ability failed"));
1da177e4
LT
1255 }
1256 }
1257
1258 acpi_processor_get_power_info(pr);
1259
1260 /*
1261 * Install the idle handler if processor power management is supported.
1262 * Note that we use previously set idle handler will be used on
1263 * platforms that only support C1.
1264 */
1265 if ((pr->flags.power) && (!boot_option_idle_override)) {
1266 printk(KERN_INFO PREFIX "CPU%d (power states:", pr->id);
1267 for (i = 1; i <= pr->power.count; i++)
1268 if (pr->power.states[i].valid)
4be44fcd
LB
1269 printk(" C%d[C%d]", i,
1270 pr->power.states[i].type);
1da177e4
LT
1271 printk(")\n");
1272
1273 if (pr->id == 0) {
1274 pm_idle_save = pm_idle;
1275 pm_idle = acpi_processor_idle;
1276 }
1277 }
1278
1279 /* 'power' [R] */
1280 entry = create_proc_entry(ACPI_PROCESSOR_FILE_POWER,
4be44fcd 1281 S_IRUGO, acpi_device_dir(device));
1da177e4 1282 if (!entry)
a6fc6720 1283 return -EIO;
1da177e4
LT
1284 else {
1285 entry->proc_fops = &acpi_processor_power_fops;
1286 entry->data = acpi_driver_data(device);
1287 entry->owner = THIS_MODULE;
1288 }
1289
1290 pr->flags.power_setup_done = 1;
1291
d550d98d 1292 return 0;
1da177e4
LT
1293}
1294
4be44fcd
LB
1295int acpi_processor_power_exit(struct acpi_processor *pr,
1296 struct acpi_device *device)
1da177e4 1297{
1da177e4
LT
1298
1299 pr->flags.power_setup_done = 0;
1300
1301 if (acpi_device_dir(device))
4be44fcd
LB
1302 remove_proc_entry(ACPI_PROCESSOR_FILE_POWER,
1303 acpi_device_dir(device));
1da177e4
LT
1304
1305 /* Unregister the idle handler when processor #0 is removed. */
1306 if (pr->id == 0) {
1307 pm_idle = pm_idle_save;
1308
1309 /*
1310 * We are about to unload the current idle thread pm callback
1311 * (pm_idle), Wait for all processors to update cached/local
1312 * copies of pm_idle before proceeding.
1313 */
1314 cpu_idle_wait();
1fec74a9 1315#ifdef CONFIG_SMP
5c87579e 1316 unregister_latency_notifier(&acpi_processor_latency_notifier);
1fec74a9 1317#endif
1da177e4
LT
1318 }
1319
d550d98d 1320 return 0;
1da177e4 1321}