Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-rc-fixes-2.6
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / acpi / processor_idle.c
CommitLineData
1da177e4
LT
1/*
2 * processor_idle - idle state submodule to the ACPI processor driver
3 *
4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
c5ab81ca 6 * Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de>
1da177e4
LT
7 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8 * - Added processor hotplug support
02df8b93
VP
9 * Copyright (C) 2005 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
10 * - Added support for C3 on SMP
1da177e4
LT
11 *
12 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or (at
17 * your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful, but
20 * WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License along
25 * with this program; if not, write to the Free Software Foundation, Inc.,
26 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
27 *
28 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
29 */
30
31#include <linux/kernel.h>
32#include <linux/module.h>
33#include <linux/init.h>
34#include <linux/cpufreq.h>
35#include <linux/proc_fs.h>
36#include <linux/seq_file.h>
37#include <linux/acpi.h>
38#include <linux/dmi.h>
39#include <linux/moduleparam.h>
4e57b681 40#include <linux/sched.h> /* need_resched() */
f011e2e2 41#include <linux/pm_qos_params.h>
e9e2cdb4 42#include <linux/clockchips.h>
4f86d3a8 43#include <linux/cpuidle.h>
1da177e4 44
3434933b
TG
45/*
46 * Include the apic definitions for x86 to have the APIC timer related defines
47 * available also for UP (on SMP it gets magically included via linux/smp.h).
48 * asm/acpi.h is not an option, as it would require more include magic. Also
49 * creating an empty asm-ia64/apic.h would just trade pest vs. cholera.
50 */
51#ifdef CONFIG_X86
52#include <asm/apic.h>
53#endif
54
1da177e4
LT
55#include <asm/io.h>
56#include <asm/uaccess.h>
57
58#include <acpi/acpi_bus.h>
59#include <acpi/processor.h>
60
61#define ACPI_PROCESSOR_COMPONENT 0x01000000
62#define ACPI_PROCESSOR_CLASS "processor"
1da177e4 63#define _COMPONENT ACPI_PROCESSOR_COMPONENT
f52fd66d 64ACPI_MODULE_NAME("processor_idle");
1da177e4 65#define ACPI_PROCESSOR_FILE_POWER "power"
1da177e4 66#define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000)
2aa44d05 67#define PM_TIMER_TICK_NS (1000000000ULL/PM_TIMER_FREQUENCY)
4f86d3a8 68#ifndef CONFIG_CPU_IDLE
1da177e4
LT
69#define C2_OVERHEAD 4 /* 1us (3.579 ticks per us) */
70#define C3_OVERHEAD 4 /* 1us (3.579 ticks per us) */
b6835052 71static void (*pm_idle_save) (void) __read_mostly;
4f86d3a8
LB
72#else
73#define C2_OVERHEAD 1 /* 1us */
74#define C3_OVERHEAD 1 /* 1us */
75#endif
76#define PM_TIMER_TICKS_TO_US(p) (((p) * 1000)/(PM_TIMER_FREQUENCY/1000))
1da177e4 77
4f86d3a8 78static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER;
5b3f0e6c 79#ifdef CONFIG_CPU_IDLE
4f86d3a8 80module_param(max_cstate, uint, 0000);
5b3f0e6c
VP
81#else
82module_param(max_cstate, uint, 0644);
83#endif
b6835052 84static unsigned int nocst __read_mostly;
1da177e4
LT
85module_param(nocst, uint, 0000);
86
4f86d3a8 87#ifndef CONFIG_CPU_IDLE
1da177e4
LT
88/*
89 * bm_history -- bit-mask with a bit per jiffy of bus-master activity
90 * 1000 HZ: 0xFFFFFFFF: 32 jiffies = 32ms
91 * 800 HZ: 0xFFFFFFFF: 32 jiffies = 40ms
92 * 100 HZ: 0x0000000F: 4 jiffies = 40ms
93 * reduce history for more aggressive entry into C3
94 */
b6835052 95static unsigned int bm_history __read_mostly =
4be44fcd 96 (HZ >= 800 ? 0xFFFFFFFF : ((1U << (HZ / 25)) - 1));
1da177e4 97module_param(bm_history, uint, 0644);
4f86d3a8
LB
98
99static int acpi_processor_set_power_policy(struct acpi_processor *pr);
100
4963f620 101#else /* CONFIG_CPU_IDLE */
25de5718 102static unsigned int latency_factor __read_mostly = 2;
4963f620 103module_param(latency_factor, uint, 0644);
4f86d3a8 104#endif
1da177e4
LT
105
106/*
107 * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
108 * For now disable this. Probably a bug somewhere else.
109 *
110 * To skip this limit, boot/load with a large max_cstate limit.
111 */
1855256c 112static int set_max_cstate(const struct dmi_system_id *id)
1da177e4
LT
113{
114 if (max_cstate > ACPI_PROCESSOR_MAX_POWER)
115 return 0;
116
3d35600a 117 printk(KERN_NOTICE PREFIX "%s detected - limiting to C%ld max_cstate."
4be44fcd
LB
118 " Override with \"processor.max_cstate=%d\"\n", id->ident,
119 (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1);
1da177e4 120
3d35600a 121 max_cstate = (long)id->driver_data;
1da177e4
LT
122
123 return 0;
124}
125
7ded5689
AR
126/* Actually this shouldn't be __cpuinitdata, would be better to fix the
127 callers to only run once -AK */
128static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = {
f831335d
BS
129 { set_max_cstate, "IBM ThinkPad R40e", {
130 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
131 DMI_MATCH(DMI_BIOS_VERSION,"1SET70WW")}, (void *)1},
876c184b
TR
132 { set_max_cstate, "IBM ThinkPad R40e", {
133 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
134 DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW")}, (void *)1},
135 { set_max_cstate, "IBM ThinkPad R40e", {
136 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
137 DMI_MATCH(DMI_BIOS_VERSION,"1SET43WW") }, (void*)1},
138 { set_max_cstate, "IBM ThinkPad R40e", {
139 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
140 DMI_MATCH(DMI_BIOS_VERSION,"1SET45WW") }, (void*)1},
141 { set_max_cstate, "IBM ThinkPad R40e", {
142 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
143 DMI_MATCH(DMI_BIOS_VERSION,"1SET47WW") }, (void*)1},
144 { set_max_cstate, "IBM ThinkPad R40e", {
145 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
146 DMI_MATCH(DMI_BIOS_VERSION,"1SET50WW") }, (void*)1},
147 { set_max_cstate, "IBM ThinkPad R40e", {
148 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
149 DMI_MATCH(DMI_BIOS_VERSION,"1SET52WW") }, (void*)1},
150 { set_max_cstate, "IBM ThinkPad R40e", {
151 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
152 DMI_MATCH(DMI_BIOS_VERSION,"1SET55WW") }, (void*)1},
153 { set_max_cstate, "IBM ThinkPad R40e", {
154 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
155 DMI_MATCH(DMI_BIOS_VERSION,"1SET56WW") }, (void*)1},
156 { set_max_cstate, "IBM ThinkPad R40e", {
157 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
158 DMI_MATCH(DMI_BIOS_VERSION,"1SET59WW") }, (void*)1},
159 { set_max_cstate, "IBM ThinkPad R40e", {
160 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
161 DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW") }, (void*)1},
162 { set_max_cstate, "IBM ThinkPad R40e", {
163 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
164 DMI_MATCH(DMI_BIOS_VERSION,"1SET61WW") }, (void*)1},
165 { set_max_cstate, "IBM ThinkPad R40e", {
166 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
167 DMI_MATCH(DMI_BIOS_VERSION,"1SET62WW") }, (void*)1},
168 { set_max_cstate, "IBM ThinkPad R40e", {
169 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
170 DMI_MATCH(DMI_BIOS_VERSION,"1SET64WW") }, (void*)1},
171 { set_max_cstate, "IBM ThinkPad R40e", {
172 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
173 DMI_MATCH(DMI_BIOS_VERSION,"1SET65WW") }, (void*)1},
174 { set_max_cstate, "IBM ThinkPad R40e", {
175 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
176 DMI_MATCH(DMI_BIOS_VERSION,"1SET68WW") }, (void*)1},
177 { set_max_cstate, "Medion 41700", {
178 DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
179 DMI_MATCH(DMI_BIOS_VERSION,"R01-A1J")}, (void *)1},
180 { set_max_cstate, "Clevo 5600D", {
181 DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
182 DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
4be44fcd 183 (void *)2},
1da177e4
LT
184 {},
185};
186
4be44fcd 187static inline u32 ticks_elapsed(u32 t1, u32 t2)
1da177e4
LT
188{
189 if (t2 >= t1)
190 return (t2 - t1);
cee324b1 191 else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER))
1da177e4
LT
192 return (((0x00FFFFFF - t1) + t2) & 0x00FFFFFF);
193 else
194 return ((0xFFFFFFFF - t1) + t2);
195}
196
4f86d3a8
LB
197static inline u32 ticks_elapsed_in_us(u32 t1, u32 t2)
198{
199 if (t2 >= t1)
200 return PM_TIMER_TICKS_TO_US(t2 - t1);
201 else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER))
202 return PM_TIMER_TICKS_TO_US(((0x00FFFFFF - t1) + t2) & 0x00FFFFFF);
203 else
204 return PM_TIMER_TICKS_TO_US((0xFFFFFFFF - t1) + t2);
205}
206
2e906655 207/*
208 * Callers should disable interrupts before the call and enable
209 * interrupts after return.
210 */
ddc081a1
VP
211static void acpi_safe_halt(void)
212{
213 current_thread_info()->status &= ~TS_POLLING;
214 /*
215 * TS_POLLING-cleared state must be visible before we
216 * test NEED_RESCHED:
217 */
218 smp_mb();
219 if (!need_resched())
220 safe_halt();
221 current_thread_info()->status |= TS_POLLING;
222}
223
4f86d3a8
LB
224#ifndef CONFIG_CPU_IDLE
225
1da177e4 226static void
4be44fcd
LB
227acpi_processor_power_activate(struct acpi_processor *pr,
228 struct acpi_processor_cx *new)
1da177e4 229{
4be44fcd 230 struct acpi_processor_cx *old;
1da177e4
LT
231
232 if (!pr || !new)
233 return;
234
235 old = pr->power.state;
236
237 if (old)
238 old->promotion.count = 0;
4be44fcd 239 new->demotion.count = 0;
1da177e4
LT
240
241 /* Cleanup from old state. */
242 if (old) {
243 switch (old->type) {
244 case ACPI_STATE_C3:
245 /* Disable bus master reload */
02df8b93 246 if (new->type != ACPI_STATE_C3 && pr->flags.bm_check)
d8c71b6d 247 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
1da177e4
LT
248 break;
249 }
250 }
251
252 /* Prepare to use new state. */
253 switch (new->type) {
254 case ACPI_STATE_C3:
255 /* Enable bus master reload */
02df8b93 256 if (old->type != ACPI_STATE_C3 && pr->flags.bm_check)
d8c71b6d 257 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
1da177e4
LT
258 break;
259 }
260
261 pr->power.state = new;
262
263 return;
264}
265
4be44fcd 266static atomic_t c3_cpu_count;
1da177e4 267
991528d7
VP
268/* Common C-state entry for C2, C3, .. */
269static void acpi_cstate_enter(struct acpi_processor_cx *cstate)
270{
bc71bec9 271 if (cstate->entry_method == ACPI_CSTATE_FFH) {
991528d7
VP
272 /* Call into architectural FFH based C-state */
273 acpi_processor_ffh_cstate_enter(cstate);
274 } else {
275 int unused;
276 /* IO port based C-state */
277 inb(cstate->address);
278 /* Dummy wait op - must do something useless after P_LVL2 read
279 because chipsets cannot guarantee that STPCLK# signal
280 gets asserted in time to freeze execution properly. */
cee324b1 281 unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
991528d7
VP
282 }
283}
4f86d3a8 284#endif /* !CONFIG_CPU_IDLE */
991528d7 285
169a0abb
TG
286#ifdef ARCH_APICTIMER_STOPS_ON_C3
287
288/*
289 * Some BIOS implementations switch to C3 in the published C2 state.
296d93cd
LT
290 * This seems to be a common problem on AMD boxen, but other vendors
291 * are affected too. We pick the most conservative approach: we assume
292 * that the local APIC stops in both C2 and C3.
169a0abb
TG
293 */
294static void acpi_timer_check_state(int state, struct acpi_processor *pr,
295 struct acpi_processor_cx *cx)
296{
297 struct acpi_processor_power *pwr = &pr->power;
e585bef8 298 u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2;
169a0abb
TG
299
300 /*
301 * Check, if one of the previous states already marked the lapic
302 * unstable
303 */
304 if (pwr->timer_broadcast_on_state < state)
305 return;
306
e585bef8 307 if (cx->type >= type)
296d93cd 308 pr->power.timer_broadcast_on_state = state;
169a0abb
TG
309}
310
311static void acpi_propagate_timer_broadcast(struct acpi_processor *pr)
312{
e9e2cdb4
TG
313 unsigned long reason;
314
315 reason = pr->power.timer_broadcast_on_state < INT_MAX ?
316 CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF;
317
318 clockevents_notify(reason, &pr->id);
e9e2cdb4
TG
319}
320
321/* Power(C) State timer broadcast control */
322static void acpi_state_timer_broadcast(struct acpi_processor *pr,
323 struct acpi_processor_cx *cx,
324 int broadcast)
325{
e9e2cdb4
TG
326 int state = cx - pr->power.states;
327
328 if (state >= pr->power.timer_broadcast_on_state) {
329 unsigned long reason;
330
331 reason = broadcast ? CLOCK_EVT_NOTIFY_BROADCAST_ENTER :
332 CLOCK_EVT_NOTIFY_BROADCAST_EXIT;
333 clockevents_notify(reason, &pr->id);
334 }
169a0abb
TG
335}
336
337#else
338
339static void acpi_timer_check_state(int state, struct acpi_processor *pr,
340 struct acpi_processor_cx *cstate) { }
341static void acpi_propagate_timer_broadcast(struct acpi_processor *pr) { }
e9e2cdb4
TG
342static void acpi_state_timer_broadcast(struct acpi_processor *pr,
343 struct acpi_processor_cx *cx,
344 int broadcast)
345{
346}
169a0abb
TG
347
348#endif
349
b04e7bdb
TG
350/*
351 * Suspend / resume control
352 */
353static int acpi_idle_suspend;
354
355int acpi_processor_suspend(struct acpi_device * device, pm_message_t state)
356{
357 acpi_idle_suspend = 1;
358 return 0;
359}
360
361int acpi_processor_resume(struct acpi_device * device)
362{
363 acpi_idle_suspend = 0;
364 return 0;
365}
366
ddb25f9a
AK
367#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)
368static int tsc_halts_in_c(int state)
369{
370 switch (boot_cpu_data.x86_vendor) {
371 case X86_VENDOR_AMD:
372 /*
373 * AMD Fam10h TSC will tick in all
374 * C/P/S0/S1 states when this bit is set.
375 */
376 if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
377 return 0;
378 /*FALL THROUGH*/
379 case X86_VENDOR_INTEL:
380 /* Several cases known where TSC halts in C2 too */
381 default:
382 return state > ACPI_STATE_C1;
383 }
384}
385#endif
386
4f86d3a8 387#ifndef CONFIG_CPU_IDLE
4be44fcd 388static void acpi_processor_idle(void)
1da177e4 389{
4be44fcd 390 struct acpi_processor *pr = NULL;
1da177e4
LT
391 struct acpi_processor_cx *cx = NULL;
392 struct acpi_processor_cx *next_state = NULL;
4be44fcd
LB
393 int sleep_ticks = 0;
394 u32 t1, t2 = 0;
1da177e4 395
1da177e4
LT
396 /*
397 * Interrupts must be disabled during bus mastering calculations and
398 * for C2/C3 transitions.
399 */
400 local_irq_disable();
401
d5a3d32a
VP
402 pr = processors[smp_processor_id()];
403 if (!pr) {
404 local_irq_enable();
405 return;
406 }
407
1da177e4
LT
408 /*
409 * Check whether we truly need to go idle, or should
410 * reschedule:
411 */
412 if (unlikely(need_resched())) {
413 local_irq_enable();
414 return;
415 }
416
417 cx = pr->power.state;
b04e7bdb 418 if (!cx || acpi_idle_suspend) {
64c7c8f8
NP
419 if (pm_idle_save)
420 pm_idle_save();
421 else
422 acpi_safe_halt();
2e906655 423
424 local_irq_enable();
64c7c8f8
NP
425 return;
426 }
1da177e4
LT
427
428 /*
429 * Check BM Activity
430 * -----------------
431 * Check for bus mastering activity (if required), record, and check
432 * for demotion.
433 */
434 if (pr->flags.bm_check) {
4be44fcd
LB
435 u32 bm_status = 0;
436 unsigned long diff = jiffies - pr->power.bm_check_timestamp;
1da177e4 437
c5ab81ca
DB
438 if (diff > 31)
439 diff = 31;
1da177e4 440
c5ab81ca 441 pr->power.bm_activity <<= diff;
1da177e4 442
d8c71b6d 443 acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
1da177e4 444 if (bm_status) {
c5ab81ca 445 pr->power.bm_activity |= 0x1;
d8c71b6d 446 acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
1da177e4
LT
447 }
448 /*
449 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
450 * the true state of bus mastering activity; forcing us to
451 * manually check the BMIDEA bit of each IDE channel.
452 */
453 else if (errata.piix4.bmisx) {
454 if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
4be44fcd 455 || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
c5ab81ca 456 pr->power.bm_activity |= 0x1;
1da177e4
LT
457 }
458
459 pr->power.bm_check_timestamp = jiffies;
460
461 /*
c4a001b1 462 * If bus mastering is or was active this jiffy, demote
1da177e4
LT
463 * to avoid a faulty transition. Note that the processor
464 * won't enter a low-power state during this call (to this
c4a001b1 465 * function) but should upon the next.
1da177e4
LT
466 *
467 * TBD: A better policy might be to fallback to the demotion
468 * state (use it for this quantum only) istead of
469 * demoting -- and rely on duration as our sole demotion
470 * qualification. This may, however, introduce DMA
471 * issues (e.g. floppy DMA transfer overrun/underrun).
472 */
c4a001b1
DB
473 if ((pr->power.bm_activity & 0x1) &&
474 cx->demotion.threshold.bm) {
1da177e4
LT
475 local_irq_enable();
476 next_state = cx->demotion.state;
477 goto end;
478 }
479 }
480
4c033552
VP
481#ifdef CONFIG_HOTPLUG_CPU
482 /*
483 * Check for P_LVL2_UP flag before entering C2 and above on
484 * an SMP system. We do it here instead of doing it at _CST/P_LVL
485 * detection phase, to work cleanly with logical CPU hotplug.
486 */
4f86d3a8 487 if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
cee324b1 488 !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
1e483969 489 cx = &pr->power.states[ACPI_STATE_C1];
4c033552 490#endif
1e483969 491
1da177e4
LT
492 /*
493 * Sleep:
494 * ------
495 * Invoke the current Cx state to put the processor to sleep.
496 */
2a298a35 497 if (cx->type == ACPI_STATE_C2 || cx->type == ACPI_STATE_C3) {
495ab9c0 498 current_thread_info()->status &= ~TS_POLLING;
0888f06a
IM
499 /*
500 * TS_POLLING-cleared state must be visible before we
501 * test NEED_RESCHED:
502 */
503 smp_mb();
2a298a35 504 if (need_resched()) {
495ab9c0 505 current_thread_info()->status |= TS_POLLING;
af2eb17b 506 local_irq_enable();
2a298a35
NP
507 return;
508 }
509 }
510
1da177e4
LT
511 switch (cx->type) {
512
513 case ACPI_STATE_C1:
514 /*
515 * Invoke C1.
516 * Use the appropriate idle routine, the one that would
517 * be used without acpi C-states.
518 */
519 if (pm_idle_save)
520 pm_idle_save();
521 else
64c7c8f8
NP
522 acpi_safe_halt();
523
1da177e4 524 /*
4be44fcd 525 * TBD: Can't get time duration while in C1, as resumes
1da177e4
LT
526 * go to an ISR rather than here. Need to instrument
527 * base interrupt handler.
2aa44d05
IM
528 *
529 * Note: the TSC better not stop in C1, sched_clock() will
530 * skew otherwise.
1da177e4
LT
531 */
532 sleep_ticks = 0xFFFFFFFF;
2e906655 533 local_irq_enable();
1da177e4
LT
534 break;
535
536 case ACPI_STATE_C2:
537 /* Get start time (ticks) */
cee324b1 538 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
2aa44d05
IM
539 /* Tell the scheduler that we are going deep-idle: */
540 sched_clock_idle_sleep_event();
1da177e4 541 /* Invoke C2 */
e9e2cdb4 542 acpi_state_timer_broadcast(pr, cx, 1);
991528d7 543 acpi_cstate_enter(cx);
1da177e4 544 /* Get end time (ticks) */
cee324b1 545 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
539eb11e 546
0aa366f3 547#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)
539eb11e 548 /* TSC halts in C2, so notify users */
ddb25f9a
AK
549 if (tsc_halts_in_c(ACPI_STATE_C2))
550 mark_tsc_unstable("possible TSC halt in C2");
539eb11e 551#endif
2aa44d05
IM
552 /* Compute time (ticks) that we were actually asleep */
553 sleep_ticks = ticks_elapsed(t1, t2);
554
555 /* Tell the scheduler how much we idled: */
556 sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
557
1da177e4
LT
558 /* Re-enable interrupts */
559 local_irq_enable();
2aa44d05
IM
560 /* Do not account our idle-switching overhead: */
561 sleep_ticks -= cx->latency_ticks + C2_OVERHEAD;
562
495ab9c0 563 current_thread_info()->status |= TS_POLLING;
e9e2cdb4 564 acpi_state_timer_broadcast(pr, cx, 0);
1da177e4
LT
565 break;
566
567 case ACPI_STATE_C3:
bde6f5f5 568 acpi_unlazy_tlb(smp_processor_id());
e17bcb43
TG
569 /*
570 * Must be done before busmaster disable as we might
571 * need to access HPET !
572 */
573 acpi_state_timer_broadcast(pr, cx, 1);
18eab855
VP
574 /*
575 * disable bus master
576 * bm_check implies we need ARB_DIS
577 * !bm_check implies we need cache flush
578 * bm_control implies whether we can do ARB_DIS
579 *
580 * That leaves a case where bm_check is set and bm_control is
581 * not set. In that case we cannot do much, we enter C3
582 * without doing anything.
583 */
584 if (pr->flags.bm_check && pr->flags.bm_control) {
02df8b93 585 if (atomic_inc_return(&c3_cpu_count) ==
4be44fcd 586 num_online_cpus()) {
02df8b93
VP
587 /*
588 * All CPUs are trying to go to C3
589 * Disable bus master arbitration
590 */
d8c71b6d 591 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
02df8b93 592 }
18eab855 593 } else if (!pr->flags.bm_check) {
02df8b93
VP
594 /* SMP with no shared cache... Invalidate cache */
595 ACPI_FLUSH_CPU_CACHE();
596 }
4be44fcd 597
1da177e4 598 /* Get start time (ticks) */
cee324b1 599 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1da177e4 600 /* Invoke C3 */
2aa44d05
IM
601 /* Tell the scheduler that we are going deep-idle: */
602 sched_clock_idle_sleep_event();
991528d7 603 acpi_cstate_enter(cx);
1da177e4 604 /* Get end time (ticks) */
cee324b1 605 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
18eab855 606 if (pr->flags.bm_check && pr->flags.bm_control) {
02df8b93
VP
607 /* Enable bus master arbitration */
608 atomic_dec(&c3_cpu_count);
d8c71b6d 609 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
02df8b93
VP
610 }
611
0aa366f3 612#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)
539eb11e 613 /* TSC halts in C3, so notify users */
ddb25f9a
AK
614 if (tsc_halts_in_c(ACPI_STATE_C3))
615 mark_tsc_unstable("TSC halts in C3");
539eb11e 616#endif
2aa44d05
IM
617 /* Compute time (ticks) that we were actually asleep */
618 sleep_ticks = ticks_elapsed(t1, t2);
619 /* Tell the scheduler how much we idled: */
620 sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
621
1da177e4
LT
622 /* Re-enable interrupts */
623 local_irq_enable();
2aa44d05
IM
624 /* Do not account our idle-switching overhead: */
625 sleep_ticks -= cx->latency_ticks + C3_OVERHEAD;
626
495ab9c0 627 current_thread_info()->status |= TS_POLLING;
e9e2cdb4 628 acpi_state_timer_broadcast(pr, cx, 0);
1da177e4
LT
629 break;
630
631 default:
632 local_irq_enable();
633 return;
634 }
a3c6598f
DB
635 cx->usage++;
636 if ((cx->type != ACPI_STATE_C1) && (sleep_ticks > 0))
637 cx->time += sleep_ticks;
1da177e4
LT
638
639 next_state = pr->power.state;
640
1e483969
DSL
641#ifdef CONFIG_HOTPLUG_CPU
642 /* Don't do promotion/demotion */
643 if ((cx->type == ACPI_STATE_C1) && (num_online_cpus() > 1) &&
cee324b1 644 !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) {
1e483969
DSL
645 next_state = cx;
646 goto end;
647 }
648#endif
649
1da177e4
LT
650 /*
651 * Promotion?
652 * ----------
653 * Track the number of longs (time asleep is greater than threshold)
654 * and promote when the count threshold is reached. Note that bus
655 * mastering activity may prevent promotions.
656 * Do not promote above max_cstate.
657 */
658 if (cx->promotion.state &&
659 ((cx->promotion.state - pr->power.states) <= max_cstate)) {
5c87579e 660 if (sleep_ticks > cx->promotion.threshold.ticks &&
f011e2e2
MG
661 cx->promotion.state->latency <=
662 pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) {
1da177e4 663 cx->promotion.count++;
4be44fcd
LB
664 cx->demotion.count = 0;
665 if (cx->promotion.count >=
666 cx->promotion.threshold.count) {
1da177e4 667 if (pr->flags.bm_check) {
4be44fcd
LB
668 if (!
669 (pr->power.bm_activity & cx->
670 promotion.threshold.bm)) {
671 next_state =
672 cx->promotion.state;
1da177e4
LT
673 goto end;
674 }
4be44fcd 675 } else {
1da177e4
LT
676 next_state = cx->promotion.state;
677 goto end;
678 }
679 }
680 }
681 }
682
683 /*
684 * Demotion?
685 * ---------
686 * Track the number of shorts (time asleep is less than time threshold)
687 * and demote when the usage threshold is reached.
688 */
689 if (cx->demotion.state) {
690 if (sleep_ticks < cx->demotion.threshold.ticks) {
691 cx->demotion.count++;
692 cx->promotion.count = 0;
693 if (cx->demotion.count >= cx->demotion.threshold.count) {
694 next_state = cx->demotion.state;
695 goto end;
696 }
697 }
698 }
699
4be44fcd 700 end:
1da177e4
LT
701 /*
702 * Demote if current state exceeds max_cstate
5c87579e 703 * or if the latency of the current state is unacceptable
1da177e4 704 */
5c87579e 705 if ((pr->power.state - pr->power.states) > max_cstate ||
f011e2e2
MG
706 pr->power.state->latency >
707 pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY)) {
1da177e4
LT
708 if (cx->demotion.state)
709 next_state = cx->demotion.state;
710 }
711
712 /*
713 * New Cx State?
714 * -------------
715 * If we're going to start using a new Cx state we must clean up
716 * from the previous and prepare to use the new.
717 */
718 if (next_state != pr->power.state)
719 acpi_processor_power_activate(pr, next_state);
1da177e4
LT
720}
721
4be44fcd 722static int acpi_processor_set_power_policy(struct acpi_processor *pr)
1da177e4
LT
723{
724 unsigned int i;
725 unsigned int state_is_set = 0;
726 struct acpi_processor_cx *lower = NULL;
727 struct acpi_processor_cx *higher = NULL;
728 struct acpi_processor_cx *cx;
729
1da177e4
LT
730
731 if (!pr)
d550d98d 732 return -EINVAL;
1da177e4
LT
733
734 /*
735 * This function sets the default Cx state policy (OS idle handler).
736 * Our scheme is to promote quickly to C2 but more conservatively
737 * to C3. We're favoring C2 for its characteristics of low latency
738 * (quick response), good power savings, and ability to allow bus
739 * mastering activity. Note that the Cx state policy is completely
740 * customizable and can be altered dynamically.
741 */
742
743 /* startup state */
4be44fcd 744 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
1da177e4
LT
745 cx = &pr->power.states[i];
746 if (!cx->valid)
747 continue;
748
749 if (!state_is_set)
750 pr->power.state = cx;
751 state_is_set++;
752 break;
4be44fcd 753 }
1da177e4
LT
754
755 if (!state_is_set)
d550d98d 756 return -ENODEV;
1da177e4
LT
757
758 /* demotion */
4be44fcd 759 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
1da177e4
LT
760 cx = &pr->power.states[i];
761 if (!cx->valid)
762 continue;
763
764 if (lower) {
765 cx->demotion.state = lower;
766 cx->demotion.threshold.ticks = cx->latency_ticks;
767 cx->demotion.threshold.count = 1;
768 if (cx->type == ACPI_STATE_C3)
769 cx->demotion.threshold.bm = bm_history;
770 }
771
772 lower = cx;
773 }
774
775 /* promotion */
776 for (i = (ACPI_PROCESSOR_MAX_POWER - 1); i > 0; i--) {
777 cx = &pr->power.states[i];
778 if (!cx->valid)
779 continue;
780
781 if (higher) {
4be44fcd 782 cx->promotion.state = higher;
1da177e4
LT
783 cx->promotion.threshold.ticks = cx->latency_ticks;
784 if (cx->type >= ACPI_STATE_C2)
785 cx->promotion.threshold.count = 4;
786 else
787 cx->promotion.threshold.count = 10;
788 if (higher->type == ACPI_STATE_C3)
789 cx->promotion.threshold.bm = bm_history;
790 }
791
792 higher = cx;
793 }
794
d550d98d 795 return 0;
1da177e4 796}
4f86d3a8 797#endif /* !CONFIG_CPU_IDLE */
1da177e4 798
4be44fcd 799static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
1da177e4 800{
1da177e4
LT
801
802 if (!pr)
d550d98d 803 return -EINVAL;
1da177e4
LT
804
805 if (!pr->pblk)
d550d98d 806 return -ENODEV;
1da177e4 807
1da177e4 808 /* if info is obtained from pblk/fadt, type equals state */
1da177e4
LT
809 pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2;
810 pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3;
811
4c033552
VP
812#ifndef CONFIG_HOTPLUG_CPU
813 /*
814 * Check for P_LVL2_UP flag before entering C2 and above on
4f86d3a8 815 * an SMP system.
4c033552 816 */
ad71860a 817 if ((num_online_cpus() > 1) &&
cee324b1 818 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
d550d98d 819 return -ENODEV;
4c033552
VP
820#endif
821
1da177e4
LT
822 /* determine C2 and C3 address from pblk */
823 pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4;
824 pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
825
826 /* determine latencies from FADT */
cee324b1
AS
827 pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.C2latency;
828 pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.C3latency;
1da177e4
LT
829
830 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
831 "lvl2[0x%08x] lvl3[0x%08x]\n",
832 pr->power.states[ACPI_STATE_C2].address,
833 pr->power.states[ACPI_STATE_C3].address));
834
d550d98d 835 return 0;
1da177e4
LT
836}
837
991528d7 838static int acpi_processor_get_power_info_default(struct acpi_processor *pr)
acf05f4b 839{
991528d7
VP
840 if (!pr->power.states[ACPI_STATE_C1].valid) {
841 /* set the first C-State to C1 */
842 /* all processors need to support C1 */
843 pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1;
844 pr->power.states[ACPI_STATE_C1].valid = 1;
845 }
846 /* the C0 state only exists as a filler in our array */
acf05f4b 847 pr->power.states[ACPI_STATE_C0].valid = 1;
d550d98d 848 return 0;
acf05f4b
VP
849}
850
4be44fcd 851static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
1da177e4 852{
4be44fcd
LB
853 acpi_status status = 0;
854 acpi_integer count;
cf824788 855 int current_count;
4be44fcd
LB
856 int i;
857 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
858 union acpi_object *cst;
1da177e4 859
1da177e4 860
1da177e4 861 if (nocst)
d550d98d 862 return -ENODEV;
1da177e4 863
991528d7 864 current_count = 0;
1da177e4
LT
865
866 status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer);
867 if (ACPI_FAILURE(status)) {
868 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n"));
d550d98d 869 return -ENODEV;
4be44fcd 870 }
1da177e4 871
50dd0969 872 cst = buffer.pointer;
1da177e4
LT
873
874 /* There must be at least 2 elements */
875 if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) {
6468463a 876 printk(KERN_ERR PREFIX "not enough elements in _CST\n");
1da177e4
LT
877 status = -EFAULT;
878 goto end;
879 }
880
881 count = cst->package.elements[0].integer.value;
882
883 /* Validate number of power states. */
884 if (count < 1 || count != cst->package.count - 1) {
6468463a 885 printk(KERN_ERR PREFIX "count given by _CST is not valid\n");
1da177e4
LT
886 status = -EFAULT;
887 goto end;
888 }
889
1da177e4
LT
890 /* Tell driver that at least _CST is supported. */
891 pr->flags.has_cst = 1;
892
893 for (i = 1; i <= count; i++) {
894 union acpi_object *element;
895 union acpi_object *obj;
896 struct acpi_power_register *reg;
897 struct acpi_processor_cx cx;
898
899 memset(&cx, 0, sizeof(cx));
900
50dd0969 901 element = &(cst->package.elements[i]);
1da177e4
LT
902 if (element->type != ACPI_TYPE_PACKAGE)
903 continue;
904
905 if (element->package.count != 4)
906 continue;
907
50dd0969 908 obj = &(element->package.elements[0]);
1da177e4
LT
909
910 if (obj->type != ACPI_TYPE_BUFFER)
911 continue;
912
4be44fcd 913 reg = (struct acpi_power_register *)obj->buffer.pointer;
1da177e4
LT
914
915 if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
4be44fcd 916 (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE))
1da177e4
LT
917 continue;
918
1da177e4 919 /* There should be an easy way to extract an integer... */
50dd0969 920 obj = &(element->package.elements[1]);
1da177e4
LT
921 if (obj->type != ACPI_TYPE_INTEGER)
922 continue;
923
924 cx.type = obj->integer.value;
991528d7
VP
925 /*
926 * Some buggy BIOSes won't list C1 in _CST -
927 * Let acpi_processor_get_power_info_default() handle them later
928 */
929 if (i == 1 && cx.type != ACPI_STATE_C1)
930 current_count++;
931
932 cx.address = reg->address;
933 cx.index = current_count + 1;
934
bc71bec9 935 cx.entry_method = ACPI_CSTATE_SYSTEMIO;
991528d7
VP
936 if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
937 if (acpi_processor_ffh_cstate_probe
938 (pr->id, &cx, reg) == 0) {
bc71bec9 939 cx.entry_method = ACPI_CSTATE_FFH;
940 } else if (cx.type == ACPI_STATE_C1) {
991528d7
VP
941 /*
942 * C1 is a special case where FIXED_HARDWARE
943 * can be handled in non-MWAIT way as well.
944 * In that case, save this _CST entry info.
991528d7
VP
945 * Otherwise, ignore this info and continue.
946 */
bc71bec9 947 cx.entry_method = ACPI_CSTATE_HALT;
948 } else {
991528d7
VP
949 continue;
950 }
951 }
1da177e4 952
50dd0969 953 obj = &(element->package.elements[2]);
1da177e4
LT
954 if (obj->type != ACPI_TYPE_INTEGER)
955 continue;
956
957 cx.latency = obj->integer.value;
958
50dd0969 959 obj = &(element->package.elements[3]);
1da177e4
LT
960 if (obj->type != ACPI_TYPE_INTEGER)
961 continue;
962
963 cx.power = obj->integer.value;
964
cf824788
JM
965 current_count++;
966 memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx));
967
968 /*
969 * We support total ACPI_PROCESSOR_MAX_POWER - 1
970 * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1)
971 */
972 if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) {
973 printk(KERN_WARNING
974 "Limiting number of power states to max (%d)\n",
975 ACPI_PROCESSOR_MAX_POWER);
976 printk(KERN_WARNING
977 "Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
978 break;
979 }
1da177e4
LT
980 }
981
4be44fcd 982 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n",
cf824788 983 current_count));
1da177e4
LT
984
985 /* Validate number of power states discovered */
cf824788 986 if (current_count < 2)
6d93c648 987 status = -EFAULT;
1da177e4 988
4be44fcd 989 end:
02438d87 990 kfree(buffer.pointer);
1da177e4 991
d550d98d 992 return status;
1da177e4
LT
993}
994
1da177e4
LT
995static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx)
996{
1da177e4
LT
997
998 if (!cx->address)
d550d98d 999 return;
1da177e4
LT
1000
1001 /*
1002 * C2 latency must be less than or equal to 100
1003 * microseconds.
1004 */
1005 else if (cx->latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
1006 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
4be44fcd 1007 "latency too large [%d]\n", cx->latency));
d550d98d 1008 return;
1da177e4
LT
1009 }
1010
1da177e4
LT
1011 /*
1012 * Otherwise we've met all of our C2 requirements.
1013 * Normalize the C2 latency to expidite policy
1014 */
1015 cx->valid = 1;
4f86d3a8
LB
1016
1017#ifndef CONFIG_CPU_IDLE
1da177e4 1018 cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency);
4f86d3a8
LB
1019#else
1020 cx->latency_ticks = cx->latency;
1021#endif
1da177e4 1022
d550d98d 1023 return;
1da177e4
LT
1024}
1025
4be44fcd
LB
1026static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
1027 struct acpi_processor_cx *cx)
1da177e4 1028{
02df8b93
VP
1029 static int bm_check_flag;
1030
1da177e4
LT
1031
1032 if (!cx->address)
d550d98d 1033 return;
1da177e4
LT
1034
1035 /*
1036 * C3 latency must be less than or equal to 1000
1037 * microseconds.
1038 */
1039 else if (cx->latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
1040 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
4be44fcd 1041 "latency too large [%d]\n", cx->latency));
d550d98d 1042 return;
1da177e4
LT
1043 }
1044
1da177e4
LT
1045 /*
1046 * PIIX4 Erratum #18: We don't support C3 when Type-F (fast)
1047 * DMA transfers are used by any ISA device to avoid livelock.
1048 * Note that we could disable Type-F DMA (as recommended by
1049 * the erratum), but this is known to disrupt certain ISA
1050 * devices thus we take the conservative approach.
1051 */
1052 else if (errata.piix4.fdma) {
1053 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
4be44fcd 1054 "C3 not supported on PIIX4 with Type-F DMA\n"));
d550d98d 1055 return;
1da177e4
LT
1056 }
1057
02df8b93
VP
1058 /* All the logic here assumes flags.bm_check is same across all CPUs */
1059 if (!bm_check_flag) {
1060 /* Determine whether bm_check is needed based on CPU */
1061 acpi_processor_power_init_bm_check(&(pr->flags), pr->id);
1062 bm_check_flag = pr->flags.bm_check;
1063 } else {
1064 pr->flags.bm_check = bm_check_flag;
1065 }
1066
1067 if (pr->flags.bm_check) {
02df8b93 1068 if (!pr->flags.bm_control) {
ed3110ef
VP
1069 if (pr->flags.has_cst != 1) {
1070 /* bus mastering control is necessary */
1071 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1072 "C3 support requires BM control\n"));
1073 return;
1074 } else {
1075 /* Here we enter C3 without bus mastering */
1076 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1077 "C3 support without BM control\n"));
1078 }
02df8b93
VP
1079 }
1080 } else {
02df8b93
VP
1081 /*
1082 * WBINVD should be set in fadt, for C3 state to be
1083 * supported on when bm_check is not required.
1084 */
cee324b1 1085 if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) {
02df8b93 1086 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
4be44fcd
LB
1087 "Cache invalidation should work properly"
1088 " for C3 to be enabled on SMP systems\n"));
d550d98d 1089 return;
02df8b93 1090 }
d8c71b6d 1091 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
02df8b93
VP
1092 }
1093
1da177e4
LT
1094 /*
1095 * Otherwise we've met all of our C3 requirements.
1096 * Normalize the C3 latency to expidite policy. Enable
1097 * checking of bus mastering status (bm_check) so we can
1098 * use this in our C3 policy
1099 */
1100 cx->valid = 1;
4f86d3a8
LB
1101
1102#ifndef CONFIG_CPU_IDLE
1da177e4 1103 cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency);
4f86d3a8
LB
1104#else
1105 cx->latency_ticks = cx->latency;
1106#endif
1da177e4 1107
d550d98d 1108 return;
1da177e4
LT
1109}
1110
1da177e4
LT
1111static int acpi_processor_power_verify(struct acpi_processor *pr)
1112{
1113 unsigned int i;
1114 unsigned int working = 0;
6eb0a0fd 1115
169a0abb 1116 pr->power.timer_broadcast_on_state = INT_MAX;
6eb0a0fd 1117
4be44fcd 1118 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
1da177e4
LT
1119 struct acpi_processor_cx *cx = &pr->power.states[i];
1120
1121 switch (cx->type) {
1122 case ACPI_STATE_C1:
1123 cx->valid = 1;
1124 break;
1125
1126 case ACPI_STATE_C2:
1127 acpi_processor_power_verify_c2(cx);
296d93cd 1128 if (cx->valid)
169a0abb 1129 acpi_timer_check_state(i, pr, cx);
1da177e4
LT
1130 break;
1131
1132 case ACPI_STATE_C3:
1133 acpi_processor_power_verify_c3(pr, cx);
296d93cd 1134 if (cx->valid)
169a0abb 1135 acpi_timer_check_state(i, pr, cx);
1da177e4
LT
1136 break;
1137 }
1138
1139 if (cx->valid)
1140 working++;
1141 }
bd663347 1142
169a0abb 1143 acpi_propagate_timer_broadcast(pr);
1da177e4
LT
1144
1145 return (working);
1146}
1147
4be44fcd 1148static int acpi_processor_get_power_info(struct acpi_processor *pr)
1da177e4
LT
1149{
1150 unsigned int i;
1151 int result;
1152
1da177e4
LT
1153
1154 /* NOTE: the idle thread may not be running while calling
1155 * this function */
1156
991528d7
VP
1157 /* Zero initialize all the C-states info. */
1158 memset(pr->power.states, 0, sizeof(pr->power.states));
1159
1da177e4 1160 result = acpi_processor_get_power_info_cst(pr);
6d93c648 1161 if (result == -ENODEV)
c5a114f1 1162 result = acpi_processor_get_power_info_fadt(pr);
6d93c648 1163
991528d7
VP
1164 if (result)
1165 return result;
1166
1167 acpi_processor_get_power_info_default(pr);
1168
cf824788 1169 pr->power.count = acpi_processor_power_verify(pr);
1da177e4 1170
4f86d3a8 1171#ifndef CONFIG_CPU_IDLE
1da177e4
LT
1172 /*
1173 * Set Default Policy
1174 * ------------------
1175 * Now that we know which states are supported, set the default
1176 * policy. Note that this policy can be changed dynamically
1177 * (e.g. encourage deeper sleeps to conserve battery life when
1178 * not on AC).
1179 */
1180 result = acpi_processor_set_power_policy(pr);
1181 if (result)
d550d98d 1182 return result;
4f86d3a8 1183#endif
1da177e4
LT
1184
1185 /*
1186 * if one state of type C2 or C3 is available, mark this
1187 * CPU as being "idle manageable"
1188 */
1189 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
acf05f4b 1190 if (pr->power.states[i].valid) {
1da177e4 1191 pr->power.count = i;
2203d6ed
LT
1192 if (pr->power.states[i].type >= ACPI_STATE_C2)
1193 pr->flags.power = 1;
acf05f4b 1194 }
1da177e4
LT
1195 }
1196
d550d98d 1197 return 0;
1da177e4
LT
1198}
1199
1da177e4
LT
1200static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset)
1201{
50dd0969 1202 struct acpi_processor *pr = seq->private;
4be44fcd 1203 unsigned int i;
1da177e4 1204
1da177e4
LT
1205
1206 if (!pr)
1207 goto end;
1208
1209 seq_printf(seq, "active state: C%zd\n"
4be44fcd 1210 "max_cstate: C%d\n"
5c87579e
AV
1211 "bus master activity: %08x\n"
1212 "maximum allowed latency: %d usec\n",
4be44fcd 1213 pr->power.state ? pr->power.state - pr->power.states : 0,
5c87579e 1214 max_cstate, (unsigned)pr->power.bm_activity,
f011e2e2 1215 pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY));
1da177e4
LT
1216
1217 seq_puts(seq, "states:\n");
1218
1219 for (i = 1; i <= pr->power.count; i++) {
1220 seq_printf(seq, " %cC%d: ",
4be44fcd
LB
1221 (&pr->power.states[i] ==
1222 pr->power.state ? '*' : ' '), i);
1da177e4
LT
1223
1224 if (!pr->power.states[i].valid) {
1225 seq_puts(seq, "<not supported>\n");
1226 continue;
1227 }
1228
1229 switch (pr->power.states[i].type) {
1230 case ACPI_STATE_C1:
1231 seq_printf(seq, "type[C1] ");
1232 break;
1233 case ACPI_STATE_C2:
1234 seq_printf(seq, "type[C2] ");
1235 break;
1236 case ACPI_STATE_C3:
1237 seq_printf(seq, "type[C3] ");
1238 break;
1239 default:
1240 seq_printf(seq, "type[--] ");
1241 break;
1242 }
1243
1244 if (pr->power.states[i].promotion.state)
1245 seq_printf(seq, "promotion[C%zd] ",
4be44fcd
LB
1246 (pr->power.states[i].promotion.state -
1247 pr->power.states));
1da177e4
LT
1248 else
1249 seq_puts(seq, "promotion[--] ");
1250
1251 if (pr->power.states[i].demotion.state)
1252 seq_printf(seq, "demotion[C%zd] ",
4be44fcd
LB
1253 (pr->power.states[i].demotion.state -
1254 pr->power.states));
1da177e4
LT
1255 else
1256 seq_puts(seq, "demotion[--] ");
1257
a3c6598f 1258 seq_printf(seq, "latency[%03d] usage[%08d] duration[%020llu]\n",
4be44fcd 1259 pr->power.states[i].latency,
a3c6598f 1260 pr->power.states[i].usage,
b0b7eaaf 1261 (unsigned long long)pr->power.states[i].time);
1da177e4
LT
1262 }
1263
4be44fcd 1264 end:
d550d98d 1265 return 0;
1da177e4
LT
1266}
1267
1268static int acpi_processor_power_open_fs(struct inode *inode, struct file *file)
1269{
1270 return single_open(file, acpi_processor_power_seq_show,
4be44fcd 1271 PDE(inode)->data);
1da177e4
LT
1272}
1273
d7508032 1274static const struct file_operations acpi_processor_power_fops = {
4be44fcd
LB
1275 .open = acpi_processor_power_open_fs,
1276 .read = seq_read,
1277 .llseek = seq_lseek,
1278 .release = single_release,
1da177e4
LT
1279};
1280
4f86d3a8
LB
1281#ifndef CONFIG_CPU_IDLE
1282
1283int acpi_processor_cst_has_changed(struct acpi_processor *pr)
1284{
1285 int result = 0;
1286
1287
1288 if (!pr)
1289 return -EINVAL;
1290
1291 if (nocst) {
1292 return -ENODEV;
1293 }
1294
1295 if (!pr->flags.power_setup_done)
1296 return -ENODEV;
1297
1298 /* Fall back to the default idle loop */
1299 pm_idle = pm_idle_save;
1300 synchronize_sched(); /* Relies on interrupts forcing exit from idle. */
1301
1302 pr->flags.power = 0;
1303 result = acpi_processor_get_power_info(pr);
1304 if ((pr->flags.power == 1) && (pr->flags.power_setup_done))
1305 pm_idle = acpi_processor_idle;
1306
1307 return result;
1308}
1309
1fec74a9 1310#ifdef CONFIG_SMP
5c87579e
AV
1311static void smp_callback(void *v)
1312{
1313 /* we already woke the CPU up, nothing more to do */
1314}
1315
1316/*
1317 * This function gets called when a part of the kernel has a new latency
1318 * requirement. This means we need to get all processors out of their C-state,
1319 * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that
1320 * wakes them all right up.
1321 */
1322static int acpi_processor_latency_notify(struct notifier_block *b,
1323 unsigned long l, void *v)
1324{
1325 smp_call_function(smp_callback, NULL, 0, 1);
1326 return NOTIFY_OK;
1327}
1328
1329static struct notifier_block acpi_processor_latency_notifier = {
1330 .notifier_call = acpi_processor_latency_notify,
1331};
4f86d3a8
LB
1332
1333#endif
1334
1335#else /* CONFIG_CPU_IDLE */
1336
1337/**
1338 * acpi_idle_bm_check - checks if bus master activity was detected
1339 */
1340static int acpi_idle_bm_check(void)
1341{
1342 u32 bm_status = 0;
1343
1344 acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
1345 if (bm_status)
1346 acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
1347 /*
1348 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
1349 * the true state of bus mastering activity; forcing us to
1350 * manually check the BMIDEA bit of each IDE channel.
1351 */
1352 else if (errata.piix4.bmisx) {
1353 if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
1354 || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
1355 bm_status = 1;
1356 }
1357 return bm_status;
1358}
1359
1360/**
1361 * acpi_idle_update_bm_rld - updates the BM_RLD bit depending on target state
1362 * @pr: the processor
1363 * @target: the new target state
1364 */
1365static inline void acpi_idle_update_bm_rld(struct acpi_processor *pr,
1366 struct acpi_processor_cx *target)
1367{
1368 if (pr->flags.bm_rld_set && target->type != ACPI_STATE_C3) {
1369 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
1370 pr->flags.bm_rld_set = 0;
1371 }
1372
1373 if (!pr->flags.bm_rld_set && target->type == ACPI_STATE_C3) {
1374 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
1375 pr->flags.bm_rld_set = 1;
1376 }
1377}
1378
1379/**
1380 * acpi_idle_do_entry - a helper function that does C2 and C3 type entry
1381 * @cx: cstate data
bc71bec9 1382 *
1383 * Caller disables interrupt before call and enables interrupt after return.
4f86d3a8
LB
1384 */
1385static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
1386{
bc71bec9 1387 if (cx->entry_method == ACPI_CSTATE_FFH) {
4f86d3a8
LB
1388 /* Call into architectural FFH based C-state */
1389 acpi_processor_ffh_cstate_enter(cx);
bc71bec9 1390 } else if (cx->entry_method == ACPI_CSTATE_HALT) {
1391 acpi_safe_halt();
4f86d3a8
LB
1392 } else {
1393 int unused;
1394 /* IO port based C-state */
1395 inb(cx->address);
1396 /* Dummy wait op - must do something useless after P_LVL2 read
1397 because chipsets cannot guarantee that STPCLK# signal
1398 gets asserted in time to freeze execution properly. */
1399 unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
1400 }
1401}
1402
1403/**
1404 * acpi_idle_enter_c1 - enters an ACPI C1 state-type
1405 * @dev: the target CPU
1406 * @state: the state data
1407 *
1408 * This is equivalent to the HALT instruction.
1409 */
1410static int acpi_idle_enter_c1(struct cpuidle_device *dev,
1411 struct cpuidle_state *state)
1412{
9b12e18c 1413 u32 t1, t2;
4f86d3a8
LB
1414 struct acpi_processor *pr;
1415 struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
9b12e18c 1416
4f86d3a8
LB
1417 pr = processors[smp_processor_id()];
1418
1419 if (unlikely(!pr))
1420 return 0;
1421
2e906655 1422 local_irq_disable();
4f86d3a8
LB
1423 if (pr->flags.bm_check)
1424 acpi_idle_update_bm_rld(pr, cx);
1425
9b12e18c 1426 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
bc71bec9 1427 acpi_idle_do_entry(cx);
9b12e18c 1428 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
4f86d3a8 1429
2e906655 1430 local_irq_enable();
4f86d3a8
LB
1431 cx->usage++;
1432
9b12e18c 1433 return ticks_elapsed_in_us(t1, t2);
4f86d3a8
LB
1434}
1435
1436/**
1437 * acpi_idle_enter_simple - enters an ACPI state without BM handling
1438 * @dev: the target CPU
1439 * @state: the state data
1440 */
1441static int acpi_idle_enter_simple(struct cpuidle_device *dev,
1442 struct cpuidle_state *state)
1443{
1444 struct acpi_processor *pr;
1445 struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
1446 u32 t1, t2;
50629118
VP
1447 int sleep_ticks = 0;
1448
4f86d3a8
LB
1449 pr = processors[smp_processor_id()];
1450
1451 if (unlikely(!pr))
1452 return 0;
1453
e196441b
LB
1454 if (acpi_idle_suspend)
1455 return(acpi_idle_enter_c1(dev, state));
1456
4f86d3a8
LB
1457 local_irq_disable();
1458 current_thread_info()->status &= ~TS_POLLING;
1459 /*
1460 * TS_POLLING-cleared state must be visible before we test
1461 * NEED_RESCHED:
1462 */
1463 smp_mb();
1464
1465 if (unlikely(need_resched())) {
1466 current_thread_info()->status |= TS_POLLING;
1467 local_irq_enable();
1468 return 0;
1469 }
1470
bde6f5f5 1471 acpi_unlazy_tlb(smp_processor_id());
e17bcb43
TG
1472 /*
1473 * Must be done before busmaster disable as we might need to
1474 * access HPET !
1475 */
1476 acpi_state_timer_broadcast(pr, cx, 1);
1477
1478 if (pr->flags.bm_check)
1479 acpi_idle_update_bm_rld(pr, cx);
1480
4f86d3a8
LB
1481 if (cx->type == ACPI_STATE_C3)
1482 ACPI_FLUSH_CPU_CACHE();
1483
1484 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
50629118
VP
1485 /* Tell the scheduler that we are going deep-idle: */
1486 sched_clock_idle_sleep_event();
4f86d3a8
LB
1487 acpi_idle_do_entry(cx);
1488 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1489
1490#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)
1491 /* TSC could halt in idle, so notify users */
ddb25f9a
AK
1492 if (tsc_halts_in_c(cx->type))
1493 mark_tsc_unstable("TSC halts in idle");;
4f86d3a8 1494#endif
50629118
VP
1495 sleep_ticks = ticks_elapsed(t1, t2);
1496
1497 /* Tell the scheduler how much we idled: */
1498 sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
4f86d3a8
LB
1499
1500 local_irq_enable();
1501 current_thread_info()->status |= TS_POLLING;
1502
1503 cx->usage++;
1504
1505 acpi_state_timer_broadcast(pr, cx, 0);
50629118 1506 cx->time += sleep_ticks;
4f86d3a8
LB
1507 return ticks_elapsed_in_us(t1, t2);
1508}
1509
1510static int c3_cpu_count;
1511static DEFINE_SPINLOCK(c3_lock);
1512
1513/**
1514 * acpi_idle_enter_bm - enters C3 with proper BM handling
1515 * @dev: the target CPU
1516 * @state: the state data
1517 *
1518 * If BM is detected, the deepest non-C3 idle state is entered instead.
1519 */
1520static int acpi_idle_enter_bm(struct cpuidle_device *dev,
1521 struct cpuidle_state *state)
1522{
1523 struct acpi_processor *pr;
1524 struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
1525 u32 t1, t2;
50629118
VP
1526 int sleep_ticks = 0;
1527
4f86d3a8
LB
1528 pr = processors[smp_processor_id()];
1529
1530 if (unlikely(!pr))
1531 return 0;
1532
e196441b
LB
1533 if (acpi_idle_suspend)
1534 return(acpi_idle_enter_c1(dev, state));
1535
ddc081a1
VP
1536 if (acpi_idle_bm_check()) {
1537 if (dev->safe_state) {
1538 return dev->safe_state->enter(dev, dev->safe_state);
1539 } else {
2e906655 1540 local_irq_disable();
ddc081a1 1541 acpi_safe_halt();
2e906655 1542 local_irq_enable();
ddc081a1
VP
1543 return 0;
1544 }
1545 }
1546
4f86d3a8
LB
1547 local_irq_disable();
1548 current_thread_info()->status &= ~TS_POLLING;
1549 /*
1550 * TS_POLLING-cleared state must be visible before we test
1551 * NEED_RESCHED:
1552 */
1553 smp_mb();
1554
1555 if (unlikely(need_resched())) {
1556 current_thread_info()->status |= TS_POLLING;
1557 local_irq_enable();
1558 return 0;
1559 }
1560
50629118
VP
1561 /* Tell the scheduler that we are going deep-idle: */
1562 sched_clock_idle_sleep_event();
4f86d3a8
LB
1563 /*
1564 * Must be done before busmaster disable as we might need to
1565 * access HPET !
1566 */
1567 acpi_state_timer_broadcast(pr, cx, 1);
1568
ddc081a1 1569 acpi_idle_update_bm_rld(pr, cx);
4f86d3a8 1570
ddc081a1
VP
1571 /*
1572 * disable bus master
1573 * bm_check implies we need ARB_DIS
1574 * !bm_check implies we need cache flush
1575 * bm_control implies whether we can do ARB_DIS
1576 *
1577 * That leaves a case where bm_check is set and bm_control is
1578 * not set. In that case we cannot do much, we enter C3
1579 * without doing anything.
1580 */
1581 if (pr->flags.bm_check && pr->flags.bm_control) {
4f86d3a8
LB
1582 spin_lock(&c3_lock);
1583 c3_cpu_count++;
1584 /* Disable bus master arbitration when all CPUs are in C3 */
1585 if (c3_cpu_count == num_online_cpus())
1586 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
1587 spin_unlock(&c3_lock);
ddc081a1
VP
1588 } else if (!pr->flags.bm_check) {
1589 ACPI_FLUSH_CPU_CACHE();
1590 }
4f86d3a8 1591
ddc081a1
VP
1592 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1593 acpi_idle_do_entry(cx);
1594 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
4f86d3a8 1595
ddc081a1
VP
1596 /* Re-enable bus master arbitration */
1597 if (pr->flags.bm_check && pr->flags.bm_control) {
4f86d3a8 1598 spin_lock(&c3_lock);
ddc081a1 1599 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
4f86d3a8
LB
1600 c3_cpu_count--;
1601 spin_unlock(&c3_lock);
1602 }
1603
1604#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)
1605 /* TSC could halt in idle, so notify users */
ddb25f9a
AK
1606 if (tsc_halts_in_c(ACPI_STATE_C3))
1607 mark_tsc_unstable("TSC halts in idle");
4f86d3a8 1608#endif
50629118
VP
1609 sleep_ticks = ticks_elapsed(t1, t2);
1610 /* Tell the scheduler how much we idled: */
1611 sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
4f86d3a8
LB
1612
1613 local_irq_enable();
1614 current_thread_info()->status |= TS_POLLING;
1615
1616 cx->usage++;
1617
1618 acpi_state_timer_broadcast(pr, cx, 0);
50629118 1619 cx->time += sleep_ticks;
4f86d3a8
LB
1620 return ticks_elapsed_in_us(t1, t2);
1621}
1622
1623struct cpuidle_driver acpi_idle_driver = {
1624 .name = "acpi_idle",
1625 .owner = THIS_MODULE,
1626};
1627
1628/**
1629 * acpi_processor_setup_cpuidle - prepares and configures CPUIDLE
1630 * @pr: the ACPI processor
1631 */
1632static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
1633{
9a0b8415 1634 int i, count = CPUIDLE_DRIVER_STATE_START;
4f86d3a8
LB
1635 struct acpi_processor_cx *cx;
1636 struct cpuidle_state *state;
1637 struct cpuidle_device *dev = &pr->power.dev;
1638
1639 if (!pr->flags.power_setup_done)
1640 return -EINVAL;
1641
1642 if (pr->flags.power == 0) {
1643 return -EINVAL;
1644 }
1645
1646 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
1647 cx = &pr->power.states[i];
1648 state = &dev->states[count];
1649
1650 if (!cx->valid)
1651 continue;
1652
1653#ifdef CONFIG_HOTPLUG_CPU
1654 if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
1655 !pr->flags.has_cst &&
1656 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
1657 continue;
1fec74a9 1658#endif
4f86d3a8
LB
1659 cpuidle_set_statedata(state, cx);
1660
1661 snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
1662 state->exit_latency = cx->latency;
4963f620 1663 state->target_residency = cx->latency * latency_factor;
4f86d3a8
LB
1664 state->power_usage = cx->power;
1665
1666 state->flags = 0;
1667 switch (cx->type) {
1668 case ACPI_STATE_C1:
1669 state->flags |= CPUIDLE_FLAG_SHALLOW;
9b12e18c 1670 state->flags |= CPUIDLE_FLAG_TIME_VALID;
4f86d3a8 1671 state->enter = acpi_idle_enter_c1;
ddc081a1 1672 dev->safe_state = state;
4f86d3a8
LB
1673 break;
1674
1675 case ACPI_STATE_C2:
1676 state->flags |= CPUIDLE_FLAG_BALANCED;
1677 state->flags |= CPUIDLE_FLAG_TIME_VALID;
1678 state->enter = acpi_idle_enter_simple;
ddc081a1 1679 dev->safe_state = state;
4f86d3a8
LB
1680 break;
1681
1682 case ACPI_STATE_C3:
1683 state->flags |= CPUIDLE_FLAG_DEEP;
1684 state->flags |= CPUIDLE_FLAG_TIME_VALID;
1685 state->flags |= CPUIDLE_FLAG_CHECK_BM;
1686 state->enter = pr->flags.bm_check ?
1687 acpi_idle_enter_bm :
1688 acpi_idle_enter_simple;
1689 break;
1690 }
1691
1692 count++;
9a0b8415 1693 if (count == CPUIDLE_STATE_MAX)
1694 break;
4f86d3a8
LB
1695 }
1696
1697 dev->state_count = count;
1698
1699 if (!count)
1700 return -EINVAL;
1701
4f86d3a8
LB
1702 return 0;
1703}
1704
1705int acpi_processor_cst_has_changed(struct acpi_processor *pr)
1706{
1707 int ret;
1708
1709 if (!pr)
1710 return -EINVAL;
1711
1712 if (nocst) {
1713 return -ENODEV;
1714 }
1715
1716 if (!pr->flags.power_setup_done)
1717 return -ENODEV;
1718
1719 cpuidle_pause_and_lock();
1720 cpuidle_disable_device(&pr->power.dev);
1721 acpi_processor_get_power_info(pr);
1722 acpi_processor_setup_cpuidle(pr);
1723 ret = cpuidle_enable_device(&pr->power.dev);
1724 cpuidle_resume_and_unlock();
1725
1726 return ret;
1727}
1728
1729#endif /* CONFIG_CPU_IDLE */
5c87579e 1730
7af8b660 1731int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
4be44fcd 1732 struct acpi_device *device)
1da177e4 1733{
4be44fcd 1734 acpi_status status = 0;
b6835052 1735 static int first_run;
4be44fcd 1736 struct proc_dir_entry *entry = NULL;
1da177e4
LT
1737 unsigned int i;
1738
1da177e4
LT
1739
1740 if (!first_run) {
1741 dmi_check_system(processor_power_dmi_table);
c1c30634 1742 max_cstate = acpi_processor_cstate_check(max_cstate);
1da177e4 1743 if (max_cstate < ACPI_C_STATES_MAX)
4be44fcd
LB
1744 printk(KERN_NOTICE
1745 "ACPI: processor limited to max C-state %d\n",
1746 max_cstate);
1da177e4 1747 first_run++;
f011e2e2
MG
1748#if !defined(CONFIG_CPU_IDLE) && defined(CONFIG_SMP)
1749 pm_qos_add_notifier(PM_QOS_CPU_DMA_LATENCY,
1750 &acpi_processor_latency_notifier);
1fec74a9 1751#endif
1da177e4
LT
1752 }
1753
02df8b93 1754 if (!pr)
d550d98d 1755 return -EINVAL;
02df8b93 1756
cee324b1 1757 if (acpi_gbl_FADT.cst_control && !nocst) {
4be44fcd 1758 status =
cee324b1 1759 acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8);
1da177e4 1760 if (ACPI_FAILURE(status)) {
a6fc6720
TR
1761 ACPI_EXCEPTION((AE_INFO, status,
1762 "Notifying BIOS of _CST ability failed"));
1da177e4
LT
1763 }
1764 }
1765
1766 acpi_processor_get_power_info(pr);
4f86d3a8 1767 pr->flags.power_setup_done = 1;
1da177e4
LT
1768
1769 /*
1770 * Install the idle handler if processor power management is supported.
1771 * Note that we use previously set idle handler will be used on
1772 * platforms that only support C1.
1773 */
1774 if ((pr->flags.power) && (!boot_option_idle_override)) {
4f86d3a8
LB
1775#ifdef CONFIG_CPU_IDLE
1776 acpi_processor_setup_cpuidle(pr);
1777 pr->power.dev.cpu = pr->id;
1778 if (cpuidle_register_device(&pr->power.dev))
1779 return -EIO;
1780#endif
1781
1da177e4
LT
1782 printk(KERN_INFO PREFIX "CPU%d (power states:", pr->id);
1783 for (i = 1; i <= pr->power.count; i++)
1784 if (pr->power.states[i].valid)
4be44fcd
LB
1785 printk(" C%d[C%d]", i,
1786 pr->power.states[i].type);
1da177e4
LT
1787 printk(")\n");
1788
4f86d3a8 1789#ifndef CONFIG_CPU_IDLE
1da177e4
LT
1790 if (pr->id == 0) {
1791 pm_idle_save = pm_idle;
1792 pm_idle = acpi_processor_idle;
1793 }
4f86d3a8 1794#endif
1da177e4
LT
1795 }
1796
1797 /* 'power' [R] */
1798 entry = create_proc_entry(ACPI_PROCESSOR_FILE_POWER,
4be44fcd 1799 S_IRUGO, acpi_device_dir(device));
1da177e4 1800 if (!entry)
a6fc6720 1801 return -EIO;
1da177e4
LT
1802 else {
1803 entry->proc_fops = &acpi_processor_power_fops;
1804 entry->data = acpi_driver_data(device);
1805 entry->owner = THIS_MODULE;
1806 }
1807
d550d98d 1808 return 0;
1da177e4
LT
1809}
1810
4be44fcd
LB
1811int acpi_processor_power_exit(struct acpi_processor *pr,
1812 struct acpi_device *device)
1da177e4 1813{
4f86d3a8
LB
1814#ifdef CONFIG_CPU_IDLE
1815 if ((pr->flags.power) && (!boot_option_idle_override))
1816 cpuidle_unregister_device(&pr->power.dev);
1817#endif
1da177e4
LT
1818 pr->flags.power_setup_done = 0;
1819
1820 if (acpi_device_dir(device))
4be44fcd
LB
1821 remove_proc_entry(ACPI_PROCESSOR_FILE_POWER,
1822 acpi_device_dir(device));
1da177e4 1823
4f86d3a8
LB
1824#ifndef CONFIG_CPU_IDLE
1825
1da177e4
LT
1826 /* Unregister the idle handler when processor #0 is removed. */
1827 if (pr->id == 0) {
1828 pm_idle = pm_idle_save;
1829
1830 /*
1831 * We are about to unload the current idle thread pm callback
1832 * (pm_idle), Wait for all processors to update cached/local
1833 * copies of pm_idle before proceeding.
1834 */
1835 cpu_idle_wait();
1fec74a9 1836#ifdef CONFIG_SMP
f011e2e2
MG
1837 pm_qos_remove_notifier(PM_QOS_CPU_DMA_LATENCY,
1838 &acpi_processor_latency_notifier);
1fec74a9 1839#endif
1da177e4 1840 }
4f86d3a8 1841#endif
1da177e4 1842
d550d98d 1843 return 0;
1da177e4 1844}