Merge branch 'drm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / acpi / processor_idle.c
CommitLineData
1da177e4
LT
1/*
2 * processor_idle - idle state submodule to the ACPI processor driver
3 *
4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
c5ab81ca 6 * Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de>
1da177e4
LT
7 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8 * - Added processor hotplug support
02df8b93
VP
9 * Copyright (C) 2005 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
10 * - Added support for C3 on SMP
1da177e4
LT
11 *
12 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or (at
17 * your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful, but
20 * WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License along
25 * with this program; if not, write to the Free Software Foundation, Inc.,
26 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
27 *
28 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
29 */
30
31#include <linux/kernel.h>
32#include <linux/module.h>
33#include <linux/init.h>
34#include <linux/cpufreq.h>
35#include <linux/proc_fs.h>
36#include <linux/seq_file.h>
37#include <linux/acpi.h>
38#include <linux/dmi.h>
39#include <linux/moduleparam.h>
4e57b681 40#include <linux/sched.h> /* need_resched() */
f011e2e2 41#include <linux/pm_qos_params.h>
e9e2cdb4 42#include <linux/clockchips.h>
4f86d3a8 43#include <linux/cpuidle.h>
ba84be23 44#include <linux/irqflags.h>
1da177e4 45
3434933b
TG
46/*
47 * Include the apic definitions for x86 to have the APIC timer related defines
48 * available also for UP (on SMP it gets magically included via linux/smp.h).
49 * asm/acpi.h is not an option, as it would require more include magic. Also
50 * creating an empty asm-ia64/apic.h would just trade pest vs. cholera.
51 */
52#ifdef CONFIG_X86
53#include <asm/apic.h>
54#endif
55
1da177e4
LT
56#include <asm/io.h>
57#include <asm/uaccess.h>
58
59#include <acpi/acpi_bus.h>
60#include <acpi/processor.h>
c1e3b377 61#include <asm/processor.h>
1da177e4 62
1da177e4 63#define ACPI_PROCESSOR_CLASS "processor"
1da177e4 64#define _COMPONENT ACPI_PROCESSOR_COMPONENT
f52fd66d 65ACPI_MODULE_NAME("processor_idle");
1da177e4 66#define ACPI_PROCESSOR_FILE_POWER "power"
1da177e4 67#define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000)
2aa44d05 68#define PM_TIMER_TICK_NS (1000000000ULL/PM_TIMER_FREQUENCY)
4f86d3a8
LB
69#define C2_OVERHEAD 1 /* 1us */
70#define C3_OVERHEAD 1 /* 1us */
4f86d3a8 71#define PM_TIMER_TICKS_TO_US(p) (((p) * 1000)/(PM_TIMER_FREQUENCY/1000))
1da177e4 72
4f86d3a8
LB
73static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER;
74module_param(max_cstate, uint, 0000);
b6835052 75static unsigned int nocst __read_mostly;
1da177e4
LT
76module_param(nocst, uint, 0000);
77
25de5718 78static unsigned int latency_factor __read_mostly = 2;
4963f620 79module_param(latency_factor, uint, 0644);
1da177e4
LT
80
81/*
82 * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
83 * For now disable this. Probably a bug somewhere else.
84 *
85 * To skip this limit, boot/load with a large max_cstate limit.
86 */
1855256c 87static int set_max_cstate(const struct dmi_system_id *id)
1da177e4
LT
88{
89 if (max_cstate > ACPI_PROCESSOR_MAX_POWER)
90 return 0;
91
3d35600a 92 printk(KERN_NOTICE PREFIX "%s detected - limiting to C%ld max_cstate."
4be44fcd
LB
93 " Override with \"processor.max_cstate=%d\"\n", id->ident,
94 (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1);
1da177e4 95
3d35600a 96 max_cstate = (long)id->driver_data;
1da177e4
LT
97
98 return 0;
99}
100
7ded5689
AR
101/* Actually this shouldn't be __cpuinitdata, would be better to fix the
102 callers to only run once -AK */
103static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = {
f831335d
BS
104 { set_max_cstate, "IBM ThinkPad R40e", {
105 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
106 DMI_MATCH(DMI_BIOS_VERSION,"1SET70WW")}, (void *)1},
876c184b
TR
107 { set_max_cstate, "IBM ThinkPad R40e", {
108 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
109 DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW")}, (void *)1},
110 { set_max_cstate, "IBM ThinkPad R40e", {
111 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
112 DMI_MATCH(DMI_BIOS_VERSION,"1SET43WW") }, (void*)1},
113 { set_max_cstate, "IBM ThinkPad R40e", {
114 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
115 DMI_MATCH(DMI_BIOS_VERSION,"1SET45WW") }, (void*)1},
116 { set_max_cstate, "IBM ThinkPad R40e", {
117 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
118 DMI_MATCH(DMI_BIOS_VERSION,"1SET47WW") }, (void*)1},
119 { set_max_cstate, "IBM ThinkPad R40e", {
120 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
121 DMI_MATCH(DMI_BIOS_VERSION,"1SET50WW") }, (void*)1},
122 { set_max_cstate, "IBM ThinkPad R40e", {
123 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
124 DMI_MATCH(DMI_BIOS_VERSION,"1SET52WW") }, (void*)1},
125 { set_max_cstate, "IBM ThinkPad R40e", {
126 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
127 DMI_MATCH(DMI_BIOS_VERSION,"1SET55WW") }, (void*)1},
128 { set_max_cstate, "IBM ThinkPad R40e", {
129 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
130 DMI_MATCH(DMI_BIOS_VERSION,"1SET56WW") }, (void*)1},
131 { set_max_cstate, "IBM ThinkPad R40e", {
132 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
133 DMI_MATCH(DMI_BIOS_VERSION,"1SET59WW") }, (void*)1},
134 { set_max_cstate, "IBM ThinkPad R40e", {
135 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
136 DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW") }, (void*)1},
137 { set_max_cstate, "IBM ThinkPad R40e", {
138 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
139 DMI_MATCH(DMI_BIOS_VERSION,"1SET61WW") }, (void*)1},
140 { set_max_cstate, "IBM ThinkPad R40e", {
141 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
142 DMI_MATCH(DMI_BIOS_VERSION,"1SET62WW") }, (void*)1},
143 { set_max_cstate, "IBM ThinkPad R40e", {
144 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
145 DMI_MATCH(DMI_BIOS_VERSION,"1SET64WW") }, (void*)1},
146 { set_max_cstate, "IBM ThinkPad R40e", {
147 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
148 DMI_MATCH(DMI_BIOS_VERSION,"1SET65WW") }, (void*)1},
149 { set_max_cstate, "IBM ThinkPad R40e", {
150 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
151 DMI_MATCH(DMI_BIOS_VERSION,"1SET68WW") }, (void*)1},
152 { set_max_cstate, "Medion 41700", {
153 DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
154 DMI_MATCH(DMI_BIOS_VERSION,"R01-A1J")}, (void *)1},
155 { set_max_cstate, "Clevo 5600D", {
156 DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
157 DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
4be44fcd 158 (void *)2},
1da177e4
LT
159 {},
160};
161
4be44fcd 162static inline u32 ticks_elapsed(u32 t1, u32 t2)
1da177e4
LT
163{
164 if (t2 >= t1)
165 return (t2 - t1);
cee324b1 166 else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER))
1da177e4
LT
167 return (((0x00FFFFFF - t1) + t2) & 0x00FFFFFF);
168 else
169 return ((0xFFFFFFFF - t1) + t2);
170}
171
4f86d3a8
LB
172static inline u32 ticks_elapsed_in_us(u32 t1, u32 t2)
173{
174 if (t2 >= t1)
175 return PM_TIMER_TICKS_TO_US(t2 - t1);
176 else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER))
177 return PM_TIMER_TICKS_TO_US(((0x00FFFFFF - t1) + t2) & 0x00FFFFFF);
178 else
179 return PM_TIMER_TICKS_TO_US((0xFFFFFFFF - t1) + t2);
180}
181
2e906655 182/*
183 * Callers should disable interrupts before the call and enable
184 * interrupts after return.
185 */
ddc081a1
VP
186static void acpi_safe_halt(void)
187{
188 current_thread_info()->status &= ~TS_POLLING;
189 /*
190 * TS_POLLING-cleared state must be visible before we
191 * test NEED_RESCHED:
192 */
193 smp_mb();
71e93d15 194 if (!need_resched()) {
ddc081a1 195 safe_halt();
71e93d15
VP
196 local_irq_disable();
197 }
ddc081a1
VP
198 current_thread_info()->status |= TS_POLLING;
199}
200
169a0abb
TG
201#ifdef ARCH_APICTIMER_STOPS_ON_C3
202
203/*
204 * Some BIOS implementations switch to C3 in the published C2 state.
296d93cd
LT
205 * This seems to be a common problem on AMD boxen, but other vendors
206 * are affected too. We pick the most conservative approach: we assume
207 * that the local APIC stops in both C2 and C3.
169a0abb
TG
208 */
209static void acpi_timer_check_state(int state, struct acpi_processor *pr,
210 struct acpi_processor_cx *cx)
211{
212 struct acpi_processor_power *pwr = &pr->power;
e585bef8 213 u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2;
169a0abb
TG
214
215 /*
216 * Check, if one of the previous states already marked the lapic
217 * unstable
218 */
219 if (pwr->timer_broadcast_on_state < state)
220 return;
221
e585bef8 222 if (cx->type >= type)
296d93cd 223 pr->power.timer_broadcast_on_state = state;
169a0abb
TG
224}
225
226static void acpi_propagate_timer_broadcast(struct acpi_processor *pr)
227{
e9e2cdb4
TG
228 unsigned long reason;
229
230 reason = pr->power.timer_broadcast_on_state < INT_MAX ?
231 CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF;
232
233 clockevents_notify(reason, &pr->id);
e9e2cdb4
TG
234}
235
236/* Power(C) State timer broadcast control */
237static void acpi_state_timer_broadcast(struct acpi_processor *pr,
238 struct acpi_processor_cx *cx,
239 int broadcast)
240{
e9e2cdb4
TG
241 int state = cx - pr->power.states;
242
243 if (state >= pr->power.timer_broadcast_on_state) {
244 unsigned long reason;
245
246 reason = broadcast ? CLOCK_EVT_NOTIFY_BROADCAST_ENTER :
247 CLOCK_EVT_NOTIFY_BROADCAST_EXIT;
248 clockevents_notify(reason, &pr->id);
249 }
169a0abb
TG
250}
251
252#else
253
254static void acpi_timer_check_state(int state, struct acpi_processor *pr,
255 struct acpi_processor_cx *cstate) { }
256static void acpi_propagate_timer_broadcast(struct acpi_processor *pr) { }
e9e2cdb4
TG
257static void acpi_state_timer_broadcast(struct acpi_processor *pr,
258 struct acpi_processor_cx *cx,
259 int broadcast)
260{
261}
169a0abb
TG
262
263#endif
264
b04e7bdb
TG
265/*
266 * Suspend / resume control
267 */
268static int acpi_idle_suspend;
269
270int acpi_processor_suspend(struct acpi_device * device, pm_message_t state)
271{
272 acpi_idle_suspend = 1;
273 return 0;
274}
275
276int acpi_processor_resume(struct acpi_device * device)
277{
278 acpi_idle_suspend = 0;
279 return 0;
280}
281
61331168 282#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
ddb25f9a
AK
283static int tsc_halts_in_c(int state)
284{
285 switch (boot_cpu_data.x86_vendor) {
286 case X86_VENDOR_AMD:
40fb1715 287 case X86_VENDOR_INTEL:
ddb25f9a
AK
288 /*
289 * AMD Fam10h TSC will tick in all
290 * C/P/S0/S1 states when this bit is set.
291 */
40fb1715 292 if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
ddb25f9a 293 return 0;
40fb1715 294
ddb25f9a 295 /*FALL THROUGH*/
ddb25f9a
AK
296 default:
297 return state > ACPI_STATE_C1;
298 }
299}
300#endif
301
4be44fcd 302static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
1da177e4 303{
1da177e4
LT
304
305 if (!pr)
d550d98d 306 return -EINVAL;
1da177e4
LT
307
308 if (!pr->pblk)
d550d98d 309 return -ENODEV;
1da177e4 310
1da177e4 311 /* if info is obtained from pblk/fadt, type equals state */
1da177e4
LT
312 pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2;
313 pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3;
314
4c033552
VP
315#ifndef CONFIG_HOTPLUG_CPU
316 /*
317 * Check for P_LVL2_UP flag before entering C2 and above on
4f86d3a8 318 * an SMP system.
4c033552 319 */
ad71860a 320 if ((num_online_cpus() > 1) &&
cee324b1 321 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
d550d98d 322 return -ENODEV;
4c033552
VP
323#endif
324
1da177e4
LT
325 /* determine C2 and C3 address from pblk */
326 pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4;
327 pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
328
329 /* determine latencies from FADT */
cee324b1
AS
330 pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.C2latency;
331 pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.C3latency;
1da177e4
LT
332
333 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
334 "lvl2[0x%08x] lvl3[0x%08x]\n",
335 pr->power.states[ACPI_STATE_C2].address,
336 pr->power.states[ACPI_STATE_C3].address));
337
d550d98d 338 return 0;
1da177e4
LT
339}
340
991528d7 341static int acpi_processor_get_power_info_default(struct acpi_processor *pr)
acf05f4b 342{
991528d7
VP
343 if (!pr->power.states[ACPI_STATE_C1].valid) {
344 /* set the first C-State to C1 */
345 /* all processors need to support C1 */
346 pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1;
347 pr->power.states[ACPI_STATE_C1].valid = 1;
0fda6b40 348 pr->power.states[ACPI_STATE_C1].entry_method = ACPI_CSTATE_HALT;
991528d7
VP
349 }
350 /* the C0 state only exists as a filler in our array */
acf05f4b 351 pr->power.states[ACPI_STATE_C0].valid = 1;
d550d98d 352 return 0;
acf05f4b
VP
353}
354
4be44fcd 355static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
1da177e4 356{
4be44fcd
LB
357 acpi_status status = 0;
358 acpi_integer count;
cf824788 359 int current_count;
4be44fcd
LB
360 int i;
361 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
362 union acpi_object *cst;
1da177e4 363
1da177e4 364
1da177e4 365 if (nocst)
d550d98d 366 return -ENODEV;
1da177e4 367
991528d7 368 current_count = 0;
1da177e4
LT
369
370 status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer);
371 if (ACPI_FAILURE(status)) {
372 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n"));
d550d98d 373 return -ENODEV;
4be44fcd 374 }
1da177e4 375
50dd0969 376 cst = buffer.pointer;
1da177e4
LT
377
378 /* There must be at least 2 elements */
379 if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) {
6468463a 380 printk(KERN_ERR PREFIX "not enough elements in _CST\n");
1da177e4
LT
381 status = -EFAULT;
382 goto end;
383 }
384
385 count = cst->package.elements[0].integer.value;
386
387 /* Validate number of power states. */
388 if (count < 1 || count != cst->package.count - 1) {
6468463a 389 printk(KERN_ERR PREFIX "count given by _CST is not valid\n");
1da177e4
LT
390 status = -EFAULT;
391 goto end;
392 }
393
1da177e4
LT
394 /* Tell driver that at least _CST is supported. */
395 pr->flags.has_cst = 1;
396
397 for (i = 1; i <= count; i++) {
398 union acpi_object *element;
399 union acpi_object *obj;
400 struct acpi_power_register *reg;
401 struct acpi_processor_cx cx;
402
403 memset(&cx, 0, sizeof(cx));
404
50dd0969 405 element = &(cst->package.elements[i]);
1da177e4
LT
406 if (element->type != ACPI_TYPE_PACKAGE)
407 continue;
408
409 if (element->package.count != 4)
410 continue;
411
50dd0969 412 obj = &(element->package.elements[0]);
1da177e4
LT
413
414 if (obj->type != ACPI_TYPE_BUFFER)
415 continue;
416
4be44fcd 417 reg = (struct acpi_power_register *)obj->buffer.pointer;
1da177e4
LT
418
419 if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
4be44fcd 420 (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE))
1da177e4
LT
421 continue;
422
1da177e4 423 /* There should be an easy way to extract an integer... */
50dd0969 424 obj = &(element->package.elements[1]);
1da177e4
LT
425 if (obj->type != ACPI_TYPE_INTEGER)
426 continue;
427
428 cx.type = obj->integer.value;
991528d7
VP
429 /*
430 * Some buggy BIOSes won't list C1 in _CST -
431 * Let acpi_processor_get_power_info_default() handle them later
432 */
433 if (i == 1 && cx.type != ACPI_STATE_C1)
434 current_count++;
435
436 cx.address = reg->address;
437 cx.index = current_count + 1;
438
bc71bec9 439 cx.entry_method = ACPI_CSTATE_SYSTEMIO;
991528d7
VP
440 if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
441 if (acpi_processor_ffh_cstate_probe
442 (pr->id, &cx, reg) == 0) {
bc71bec9 443 cx.entry_method = ACPI_CSTATE_FFH;
444 } else if (cx.type == ACPI_STATE_C1) {
991528d7
VP
445 /*
446 * C1 is a special case where FIXED_HARDWARE
447 * can be handled in non-MWAIT way as well.
448 * In that case, save this _CST entry info.
991528d7
VP
449 * Otherwise, ignore this info and continue.
450 */
bc71bec9 451 cx.entry_method = ACPI_CSTATE_HALT;
4fcb2fcd 452 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
bc71bec9 453 } else {
991528d7
VP
454 continue;
455 }
da5e09a1
ZY
456 if (cx.type == ACPI_STATE_C1 &&
457 (idle_halt || idle_nomwait)) {
c1e3b377
ZY
458 /*
459 * In most cases the C1 space_id obtained from
460 * _CST object is FIXED_HARDWARE access mode.
461 * But when the option of idle=halt is added,
462 * the entry_method type should be changed from
463 * CSTATE_FFH to CSTATE_HALT.
da5e09a1
ZY
464 * When the option of idle=nomwait is added,
465 * the C1 entry_method type should be
466 * CSTATE_HALT.
c1e3b377
ZY
467 */
468 cx.entry_method = ACPI_CSTATE_HALT;
469 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
470 }
4fcb2fcd
VP
471 } else {
472 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x",
473 cx.address);
991528d7 474 }
1da177e4 475
0fda6b40
VP
476 if (cx.type == ACPI_STATE_C1) {
477 cx.valid = 1;
478 }
4fcb2fcd 479
50dd0969 480 obj = &(element->package.elements[2]);
1da177e4
LT
481 if (obj->type != ACPI_TYPE_INTEGER)
482 continue;
483
484 cx.latency = obj->integer.value;
485
50dd0969 486 obj = &(element->package.elements[3]);
1da177e4
LT
487 if (obj->type != ACPI_TYPE_INTEGER)
488 continue;
489
490 cx.power = obj->integer.value;
491
cf824788
JM
492 current_count++;
493 memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx));
494
495 /*
496 * We support total ACPI_PROCESSOR_MAX_POWER - 1
497 * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1)
498 */
499 if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) {
500 printk(KERN_WARNING
501 "Limiting number of power states to max (%d)\n",
502 ACPI_PROCESSOR_MAX_POWER);
503 printk(KERN_WARNING
504 "Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
505 break;
506 }
1da177e4
LT
507 }
508
4be44fcd 509 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n",
cf824788 510 current_count));
1da177e4
LT
511
512 /* Validate number of power states discovered */
cf824788 513 if (current_count < 2)
6d93c648 514 status = -EFAULT;
1da177e4 515
4be44fcd 516 end:
02438d87 517 kfree(buffer.pointer);
1da177e4 518
d550d98d 519 return status;
1da177e4
LT
520}
521
1da177e4
LT
522static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx)
523{
1da177e4
LT
524
525 if (!cx->address)
d550d98d 526 return;
1da177e4
LT
527
528 /*
529 * C2 latency must be less than or equal to 100
530 * microseconds.
531 */
532 else if (cx->latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
533 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
4be44fcd 534 "latency too large [%d]\n", cx->latency));
d550d98d 535 return;
1da177e4
LT
536 }
537
1da177e4
LT
538 /*
539 * Otherwise we've met all of our C2 requirements.
540 * Normalize the C2 latency to expidite policy
541 */
542 cx->valid = 1;
4f86d3a8 543
4f86d3a8 544 cx->latency_ticks = cx->latency;
1da177e4 545
d550d98d 546 return;
1da177e4
LT
547}
548
4be44fcd
LB
549static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
550 struct acpi_processor_cx *cx)
1da177e4 551{
02df8b93
VP
552 static int bm_check_flag;
553
1da177e4
LT
554
555 if (!cx->address)
d550d98d 556 return;
1da177e4
LT
557
558 /*
559 * C3 latency must be less than or equal to 1000
560 * microseconds.
561 */
562 else if (cx->latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
563 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
4be44fcd 564 "latency too large [%d]\n", cx->latency));
d550d98d 565 return;
1da177e4
LT
566 }
567
1da177e4
LT
568 /*
569 * PIIX4 Erratum #18: We don't support C3 when Type-F (fast)
570 * DMA transfers are used by any ISA device to avoid livelock.
571 * Note that we could disable Type-F DMA (as recommended by
572 * the erratum), but this is known to disrupt certain ISA
573 * devices thus we take the conservative approach.
574 */
575 else if (errata.piix4.fdma) {
576 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
4be44fcd 577 "C3 not supported on PIIX4 with Type-F DMA\n"));
d550d98d 578 return;
1da177e4
LT
579 }
580
02df8b93
VP
581 /* All the logic here assumes flags.bm_check is same across all CPUs */
582 if (!bm_check_flag) {
583 /* Determine whether bm_check is needed based on CPU */
584 acpi_processor_power_init_bm_check(&(pr->flags), pr->id);
585 bm_check_flag = pr->flags.bm_check;
586 } else {
587 pr->flags.bm_check = bm_check_flag;
588 }
589
590 if (pr->flags.bm_check) {
02df8b93 591 if (!pr->flags.bm_control) {
ed3110ef
VP
592 if (pr->flags.has_cst != 1) {
593 /* bus mastering control is necessary */
594 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
595 "C3 support requires BM control\n"));
596 return;
597 } else {
598 /* Here we enter C3 without bus mastering */
599 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
600 "C3 support without BM control\n"));
601 }
02df8b93
VP
602 }
603 } else {
02df8b93
VP
604 /*
605 * WBINVD should be set in fadt, for C3 state to be
606 * supported on when bm_check is not required.
607 */
cee324b1 608 if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) {
02df8b93 609 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
4be44fcd
LB
610 "Cache invalidation should work properly"
611 " for C3 to be enabled on SMP systems\n"));
d550d98d 612 return;
02df8b93 613 }
02df8b93
VP
614 }
615
1da177e4
LT
616 /*
617 * Otherwise we've met all of our C3 requirements.
618 * Normalize the C3 latency to expidite policy. Enable
619 * checking of bus mastering status (bm_check) so we can
620 * use this in our C3 policy
621 */
622 cx->valid = 1;
4f86d3a8 623
4f86d3a8 624 cx->latency_ticks = cx->latency;
31878dd8
LB
625 /*
626 * On older chipsets, BM_RLD needs to be set
627 * in order for Bus Master activity to wake the
628 * system from C3. Newer chipsets handle DMA
629 * during C3 automatically and BM_RLD is a NOP.
630 * In either case, the proper way to
631 * handle BM_RLD is to set it and leave it set.
632 */
633 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
1da177e4 634
d550d98d 635 return;
1da177e4
LT
636}
637
1da177e4
LT
638static int acpi_processor_power_verify(struct acpi_processor *pr)
639{
640 unsigned int i;
641 unsigned int working = 0;
6eb0a0fd 642
169a0abb 643 pr->power.timer_broadcast_on_state = INT_MAX;
6eb0a0fd 644
4be44fcd 645 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
1da177e4
LT
646 struct acpi_processor_cx *cx = &pr->power.states[i];
647
648 switch (cx->type) {
649 case ACPI_STATE_C1:
650 cx->valid = 1;
651 break;
652
653 case ACPI_STATE_C2:
654 acpi_processor_power_verify_c2(cx);
296d93cd 655 if (cx->valid)
169a0abb 656 acpi_timer_check_state(i, pr, cx);
1da177e4
LT
657 break;
658
659 case ACPI_STATE_C3:
660 acpi_processor_power_verify_c3(pr, cx);
296d93cd 661 if (cx->valid)
169a0abb 662 acpi_timer_check_state(i, pr, cx);
1da177e4
LT
663 break;
664 }
665
666 if (cx->valid)
667 working++;
668 }
bd663347 669
169a0abb 670 acpi_propagate_timer_broadcast(pr);
1da177e4
LT
671
672 return (working);
673}
674
4be44fcd 675static int acpi_processor_get_power_info(struct acpi_processor *pr)
1da177e4
LT
676{
677 unsigned int i;
678 int result;
679
1da177e4
LT
680
681 /* NOTE: the idle thread may not be running while calling
682 * this function */
683
991528d7
VP
684 /* Zero initialize all the C-states info. */
685 memset(pr->power.states, 0, sizeof(pr->power.states));
686
1da177e4 687 result = acpi_processor_get_power_info_cst(pr);
6d93c648 688 if (result == -ENODEV)
c5a114f1 689 result = acpi_processor_get_power_info_fadt(pr);
6d93c648 690
991528d7
VP
691 if (result)
692 return result;
693
694 acpi_processor_get_power_info_default(pr);
695
cf824788 696 pr->power.count = acpi_processor_power_verify(pr);
1da177e4 697
1da177e4
LT
698 /*
699 * if one state of type C2 or C3 is available, mark this
700 * CPU as being "idle manageable"
701 */
702 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
acf05f4b 703 if (pr->power.states[i].valid) {
1da177e4 704 pr->power.count = i;
2203d6ed
LT
705 if (pr->power.states[i].type >= ACPI_STATE_C2)
706 pr->flags.power = 1;
acf05f4b 707 }
1da177e4
LT
708 }
709
d550d98d 710 return 0;
1da177e4
LT
711}
712
1da177e4
LT
713static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset)
714{
50dd0969 715 struct acpi_processor *pr = seq->private;
4be44fcd 716 unsigned int i;
1da177e4 717
1da177e4
LT
718
719 if (!pr)
720 goto end;
721
722 seq_printf(seq, "active state: C%zd\n"
4be44fcd 723 "max_cstate: C%d\n"
5c87579e
AV
724 "bus master activity: %08x\n"
725 "maximum allowed latency: %d usec\n",
4be44fcd 726 pr->power.state ? pr->power.state - pr->power.states : 0,
5c87579e 727 max_cstate, (unsigned)pr->power.bm_activity,
f011e2e2 728 pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY));
1da177e4
LT
729
730 seq_puts(seq, "states:\n");
731
732 for (i = 1; i <= pr->power.count; i++) {
733 seq_printf(seq, " %cC%d: ",
4be44fcd
LB
734 (&pr->power.states[i] ==
735 pr->power.state ? '*' : ' '), i);
1da177e4
LT
736
737 if (!pr->power.states[i].valid) {
738 seq_puts(seq, "<not supported>\n");
739 continue;
740 }
741
742 switch (pr->power.states[i].type) {
743 case ACPI_STATE_C1:
744 seq_printf(seq, "type[C1] ");
745 break;
746 case ACPI_STATE_C2:
747 seq_printf(seq, "type[C2] ");
748 break;
749 case ACPI_STATE_C3:
750 seq_printf(seq, "type[C3] ");
751 break;
752 default:
753 seq_printf(seq, "type[--] ");
754 break;
755 }
756
757 if (pr->power.states[i].promotion.state)
758 seq_printf(seq, "promotion[C%zd] ",
4be44fcd
LB
759 (pr->power.states[i].promotion.state -
760 pr->power.states));
1da177e4
LT
761 else
762 seq_puts(seq, "promotion[--] ");
763
764 if (pr->power.states[i].demotion.state)
765 seq_printf(seq, "demotion[C%zd] ",
4be44fcd
LB
766 (pr->power.states[i].demotion.state -
767 pr->power.states));
1da177e4
LT
768 else
769 seq_puts(seq, "demotion[--] ");
770
a3c6598f 771 seq_printf(seq, "latency[%03d] usage[%08d] duration[%020llu]\n",
4be44fcd 772 pr->power.states[i].latency,
a3c6598f 773 pr->power.states[i].usage,
b0b7eaaf 774 (unsigned long long)pr->power.states[i].time);
1da177e4
LT
775 }
776
4be44fcd 777 end:
d550d98d 778 return 0;
1da177e4
LT
779}
780
781static int acpi_processor_power_open_fs(struct inode *inode, struct file *file)
782{
783 return single_open(file, acpi_processor_power_seq_show,
4be44fcd 784 PDE(inode)->data);
1da177e4
LT
785}
786
d7508032 787static const struct file_operations acpi_processor_power_fops = {
cf7acfab 788 .owner = THIS_MODULE,
4be44fcd
LB
789 .open = acpi_processor_power_open_fs,
790 .read = seq_read,
791 .llseek = seq_lseek,
792 .release = single_release,
1da177e4
LT
793};
794
4f86d3a8
LB
795
796/**
797 * acpi_idle_bm_check - checks if bus master activity was detected
798 */
799static int acpi_idle_bm_check(void)
800{
801 u32 bm_status = 0;
802
a2b7b01c 803 acpi_get_register_unlocked(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
4f86d3a8
LB
804 if (bm_status)
805 acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
806 /*
807 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
808 * the true state of bus mastering activity; forcing us to
809 * manually check the BMIDEA bit of each IDE channel.
810 */
811 else if (errata.piix4.bmisx) {
812 if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
813 || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
814 bm_status = 1;
815 }
816 return bm_status;
817}
818
4f86d3a8
LB
819/**
820 * acpi_idle_do_entry - a helper function that does C2 and C3 type entry
821 * @cx: cstate data
bc71bec9 822 *
823 * Caller disables interrupt before call and enables interrupt after return.
4f86d3a8
LB
824 */
825static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
826{
dcf30997
SR
827 /* Don't trace irqs off for idle */
828 stop_critical_timings();
bc71bec9 829 if (cx->entry_method == ACPI_CSTATE_FFH) {
4f86d3a8
LB
830 /* Call into architectural FFH based C-state */
831 acpi_processor_ffh_cstate_enter(cx);
bc71bec9 832 } else if (cx->entry_method == ACPI_CSTATE_HALT) {
833 acpi_safe_halt();
4f86d3a8
LB
834 } else {
835 int unused;
836 /* IO port based C-state */
837 inb(cx->address);
838 /* Dummy wait op - must do something useless after P_LVL2 read
839 because chipsets cannot guarantee that STPCLK# signal
840 gets asserted in time to freeze execution properly. */
841 unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
842 }
dcf30997 843 start_critical_timings();
4f86d3a8
LB
844}
845
846/**
847 * acpi_idle_enter_c1 - enters an ACPI C1 state-type
848 * @dev: the target CPU
849 * @state: the state data
850 *
851 * This is equivalent to the HALT instruction.
852 */
853static int acpi_idle_enter_c1(struct cpuidle_device *dev,
854 struct cpuidle_state *state)
855{
9b12e18c 856 u32 t1, t2;
4f86d3a8
LB
857 struct acpi_processor *pr;
858 struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
9b12e18c 859
706546d0 860 pr = __get_cpu_var(processors);
4f86d3a8
LB
861
862 if (unlikely(!pr))
863 return 0;
864
2e906655 865 local_irq_disable();
b077fbad
VP
866
867 /* Do not access any ACPI IO ports in suspend path */
868 if (acpi_idle_suspend) {
869 acpi_safe_halt();
870 local_irq_enable();
871 return 0;
872 }
873
9b12e18c 874 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
bc71bec9 875 acpi_idle_do_entry(cx);
9b12e18c 876 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
4f86d3a8 877
2e906655 878 local_irq_enable();
4f86d3a8
LB
879 cx->usage++;
880
9b12e18c 881 return ticks_elapsed_in_us(t1, t2);
4f86d3a8
LB
882}
883
884/**
885 * acpi_idle_enter_simple - enters an ACPI state without BM handling
886 * @dev: the target CPU
887 * @state: the state data
888 */
889static int acpi_idle_enter_simple(struct cpuidle_device *dev,
890 struct cpuidle_state *state)
891{
892 struct acpi_processor *pr;
893 struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
894 u32 t1, t2;
50629118
VP
895 int sleep_ticks = 0;
896
706546d0 897 pr = __get_cpu_var(processors);
4f86d3a8
LB
898
899 if (unlikely(!pr))
900 return 0;
901
e196441b
LB
902 if (acpi_idle_suspend)
903 return(acpi_idle_enter_c1(dev, state));
904
4f86d3a8
LB
905 local_irq_disable();
906 current_thread_info()->status &= ~TS_POLLING;
907 /*
908 * TS_POLLING-cleared state must be visible before we test
909 * NEED_RESCHED:
910 */
911 smp_mb();
912
913 if (unlikely(need_resched())) {
914 current_thread_info()->status |= TS_POLLING;
915 local_irq_enable();
916 return 0;
917 }
918
e17bcb43
TG
919 /*
920 * Must be done before busmaster disable as we might need to
921 * access HPET !
922 */
923 acpi_state_timer_broadcast(pr, cx, 1);
924
4f86d3a8
LB
925 if (cx->type == ACPI_STATE_C3)
926 ACPI_FLUSH_CPU_CACHE();
927
928 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
50629118
VP
929 /* Tell the scheduler that we are going deep-idle: */
930 sched_clock_idle_sleep_event();
4f86d3a8
LB
931 acpi_idle_do_entry(cx);
932 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
933
61331168 934#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
4f86d3a8 935 /* TSC could halt in idle, so notify users */
ddb25f9a
AK
936 if (tsc_halts_in_c(cx->type))
937 mark_tsc_unstable("TSC halts in idle");;
4f86d3a8 938#endif
50629118
VP
939 sleep_ticks = ticks_elapsed(t1, t2);
940
941 /* Tell the scheduler how much we idled: */
942 sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
4f86d3a8
LB
943
944 local_irq_enable();
945 current_thread_info()->status |= TS_POLLING;
946
947 cx->usage++;
948
949 acpi_state_timer_broadcast(pr, cx, 0);
50629118 950 cx->time += sleep_ticks;
4f86d3a8
LB
951 return ticks_elapsed_in_us(t1, t2);
952}
953
954static int c3_cpu_count;
955static DEFINE_SPINLOCK(c3_lock);
956
957/**
958 * acpi_idle_enter_bm - enters C3 with proper BM handling
959 * @dev: the target CPU
960 * @state: the state data
961 *
962 * If BM is detected, the deepest non-C3 idle state is entered instead.
963 */
964static int acpi_idle_enter_bm(struct cpuidle_device *dev,
965 struct cpuidle_state *state)
966{
967 struct acpi_processor *pr;
968 struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
969 u32 t1, t2;
50629118
VP
970 int sleep_ticks = 0;
971
706546d0 972 pr = __get_cpu_var(processors);
4f86d3a8
LB
973
974 if (unlikely(!pr))
975 return 0;
976
e196441b
LB
977 if (acpi_idle_suspend)
978 return(acpi_idle_enter_c1(dev, state));
979
ddc081a1
VP
980 if (acpi_idle_bm_check()) {
981 if (dev->safe_state) {
addbad46 982 dev->last_state = dev->safe_state;
ddc081a1
VP
983 return dev->safe_state->enter(dev, dev->safe_state);
984 } else {
2e906655 985 local_irq_disable();
ddc081a1 986 acpi_safe_halt();
2e906655 987 local_irq_enable();
ddc081a1
VP
988 return 0;
989 }
990 }
991
4f86d3a8
LB
992 local_irq_disable();
993 current_thread_info()->status &= ~TS_POLLING;
994 /*
995 * TS_POLLING-cleared state must be visible before we test
996 * NEED_RESCHED:
997 */
998 smp_mb();
999
1000 if (unlikely(need_resched())) {
1001 current_thread_info()->status |= TS_POLLING;
1002 local_irq_enable();
1003 return 0;
1004 }
1005
996520c1
VP
1006 acpi_unlazy_tlb(smp_processor_id());
1007
50629118
VP
1008 /* Tell the scheduler that we are going deep-idle: */
1009 sched_clock_idle_sleep_event();
4f86d3a8
LB
1010 /*
1011 * Must be done before busmaster disable as we might need to
1012 * access HPET !
1013 */
1014 acpi_state_timer_broadcast(pr, cx, 1);
1015
ddc081a1
VP
1016 /*
1017 * disable bus master
1018 * bm_check implies we need ARB_DIS
1019 * !bm_check implies we need cache flush
1020 * bm_control implies whether we can do ARB_DIS
1021 *
1022 * That leaves a case where bm_check is set and bm_control is
1023 * not set. In that case we cannot do much, we enter C3
1024 * without doing anything.
1025 */
1026 if (pr->flags.bm_check && pr->flags.bm_control) {
4f86d3a8
LB
1027 spin_lock(&c3_lock);
1028 c3_cpu_count++;
1029 /* Disable bus master arbitration when all CPUs are in C3 */
1030 if (c3_cpu_count == num_online_cpus())
1031 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
1032 spin_unlock(&c3_lock);
ddc081a1
VP
1033 } else if (!pr->flags.bm_check) {
1034 ACPI_FLUSH_CPU_CACHE();
1035 }
4f86d3a8 1036
ddc081a1
VP
1037 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
1038 acpi_idle_do_entry(cx);
1039 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
4f86d3a8 1040
ddc081a1
VP
1041 /* Re-enable bus master arbitration */
1042 if (pr->flags.bm_check && pr->flags.bm_control) {
4f86d3a8 1043 spin_lock(&c3_lock);
ddc081a1 1044 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
4f86d3a8
LB
1045 c3_cpu_count--;
1046 spin_unlock(&c3_lock);
1047 }
1048
61331168 1049#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86)
4f86d3a8 1050 /* TSC could halt in idle, so notify users */
ddb25f9a
AK
1051 if (tsc_halts_in_c(ACPI_STATE_C3))
1052 mark_tsc_unstable("TSC halts in idle");
4f86d3a8 1053#endif
50629118
VP
1054 sleep_ticks = ticks_elapsed(t1, t2);
1055 /* Tell the scheduler how much we idled: */
1056 sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS);
4f86d3a8
LB
1057
1058 local_irq_enable();
1059 current_thread_info()->status |= TS_POLLING;
1060
1061 cx->usage++;
1062
1063 acpi_state_timer_broadcast(pr, cx, 0);
50629118 1064 cx->time += sleep_ticks;
4f86d3a8
LB
1065 return ticks_elapsed_in_us(t1, t2);
1066}
1067
1068struct cpuidle_driver acpi_idle_driver = {
1069 .name = "acpi_idle",
1070 .owner = THIS_MODULE,
1071};
1072
1073/**
1074 * acpi_processor_setup_cpuidle - prepares and configures CPUIDLE
1075 * @pr: the ACPI processor
1076 */
1077static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
1078{
9a0b8415 1079 int i, count = CPUIDLE_DRIVER_STATE_START;
4f86d3a8
LB
1080 struct acpi_processor_cx *cx;
1081 struct cpuidle_state *state;
1082 struct cpuidle_device *dev = &pr->power.dev;
1083
1084 if (!pr->flags.power_setup_done)
1085 return -EINVAL;
1086
1087 if (pr->flags.power == 0) {
1088 return -EINVAL;
1089 }
1090
dcb84f33 1091 dev->cpu = pr->id;
4fcb2fcd
VP
1092 for (i = 0; i < CPUIDLE_STATE_MAX; i++) {
1093 dev->states[i].name[0] = '\0';
1094 dev->states[i].desc[0] = '\0';
1095 }
1096
4f86d3a8
LB
1097 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
1098 cx = &pr->power.states[i];
1099 state = &dev->states[count];
1100
1101 if (!cx->valid)
1102 continue;
1103
1104#ifdef CONFIG_HOTPLUG_CPU
1105 if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
1106 !pr->flags.has_cst &&
1107 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
1108 continue;
1fec74a9 1109#endif
4f86d3a8
LB
1110 cpuidle_set_statedata(state, cx);
1111
1112 snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
4fcb2fcd 1113 strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
4f86d3a8 1114 state->exit_latency = cx->latency;
4963f620 1115 state->target_residency = cx->latency * latency_factor;
4f86d3a8
LB
1116 state->power_usage = cx->power;
1117
1118 state->flags = 0;
1119 switch (cx->type) {
1120 case ACPI_STATE_C1:
1121 state->flags |= CPUIDLE_FLAG_SHALLOW;
8e92b660
VP
1122 if (cx->entry_method == ACPI_CSTATE_FFH)
1123 state->flags |= CPUIDLE_FLAG_TIME_VALID;
1124
4f86d3a8 1125 state->enter = acpi_idle_enter_c1;
ddc081a1 1126 dev->safe_state = state;
4f86d3a8
LB
1127 break;
1128
1129 case ACPI_STATE_C2:
1130 state->flags |= CPUIDLE_FLAG_BALANCED;
1131 state->flags |= CPUIDLE_FLAG_TIME_VALID;
1132 state->enter = acpi_idle_enter_simple;
ddc081a1 1133 dev->safe_state = state;
4f86d3a8
LB
1134 break;
1135
1136 case ACPI_STATE_C3:
1137 state->flags |= CPUIDLE_FLAG_DEEP;
1138 state->flags |= CPUIDLE_FLAG_TIME_VALID;
1139 state->flags |= CPUIDLE_FLAG_CHECK_BM;
1140 state->enter = pr->flags.bm_check ?
1141 acpi_idle_enter_bm :
1142 acpi_idle_enter_simple;
1143 break;
1144 }
1145
1146 count++;
9a0b8415 1147 if (count == CPUIDLE_STATE_MAX)
1148 break;
4f86d3a8
LB
1149 }
1150
1151 dev->state_count = count;
1152
1153 if (!count)
1154 return -EINVAL;
1155
4f86d3a8
LB
1156 return 0;
1157}
1158
1159int acpi_processor_cst_has_changed(struct acpi_processor *pr)
1160{
dcb84f33 1161 int ret = 0;
4f86d3a8 1162
36a91358
VP
1163 if (boot_option_idle_override)
1164 return 0;
1165
4f86d3a8
LB
1166 if (!pr)
1167 return -EINVAL;
1168
1169 if (nocst) {
1170 return -ENODEV;
1171 }
1172
1173 if (!pr->flags.power_setup_done)
1174 return -ENODEV;
1175
1176 cpuidle_pause_and_lock();
1177 cpuidle_disable_device(&pr->power.dev);
1178 acpi_processor_get_power_info(pr);
dcb84f33
VP
1179 if (pr->flags.power) {
1180 acpi_processor_setup_cpuidle(pr);
1181 ret = cpuidle_enable_device(&pr->power.dev);
1182 }
4f86d3a8
LB
1183 cpuidle_resume_and_unlock();
1184
1185 return ret;
1186}
1187
7af8b660 1188int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
4be44fcd 1189 struct acpi_device *device)
1da177e4 1190{
4be44fcd 1191 acpi_status status = 0;
b6835052 1192 static int first_run;
4be44fcd 1193 struct proc_dir_entry *entry = NULL;
1da177e4
LT
1194 unsigned int i;
1195
36a91358
VP
1196 if (boot_option_idle_override)
1197 return 0;
1da177e4
LT
1198
1199 if (!first_run) {
c1e3b377
ZY
1200 if (idle_halt) {
1201 /*
1202 * When the boot option of "idle=halt" is added, halt
1203 * is used for CPU IDLE.
1204 * In such case C2/C3 is meaningless. So the max_cstate
1205 * is set to one.
1206 */
1207 max_cstate = 1;
1208 }
1da177e4 1209 dmi_check_system(processor_power_dmi_table);
c1c30634 1210 max_cstate = acpi_processor_cstate_check(max_cstate);
1da177e4 1211 if (max_cstate < ACPI_C_STATES_MAX)
4be44fcd
LB
1212 printk(KERN_NOTICE
1213 "ACPI: processor limited to max C-state %d\n",
1214 max_cstate);
1da177e4
LT
1215 first_run++;
1216 }
1217
02df8b93 1218 if (!pr)
d550d98d 1219 return -EINVAL;
02df8b93 1220
cee324b1 1221 if (acpi_gbl_FADT.cst_control && !nocst) {
4be44fcd 1222 status =
cee324b1 1223 acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8);
1da177e4 1224 if (ACPI_FAILURE(status)) {
a6fc6720
TR
1225 ACPI_EXCEPTION((AE_INFO, status,
1226 "Notifying BIOS of _CST ability failed"));
1da177e4
LT
1227 }
1228 }
1229
1230 acpi_processor_get_power_info(pr);
4f86d3a8 1231 pr->flags.power_setup_done = 1;
1da177e4
LT
1232
1233 /*
1234 * Install the idle handler if processor power management is supported.
1235 * Note that we use previously set idle handler will be used on
1236 * platforms that only support C1.
1237 */
36a91358 1238 if (pr->flags.power) {
4f86d3a8 1239 acpi_processor_setup_cpuidle(pr);
4f86d3a8
LB
1240 if (cpuidle_register_device(&pr->power.dev))
1241 return -EIO;
4f86d3a8 1242
1da177e4
LT
1243 printk(KERN_INFO PREFIX "CPU%d (power states:", pr->id);
1244 for (i = 1; i <= pr->power.count; i++)
1245 if (pr->power.states[i].valid)
4be44fcd
LB
1246 printk(" C%d[C%d]", i,
1247 pr->power.states[i].type);
1da177e4 1248 printk(")\n");
1da177e4
LT
1249 }
1250
1251 /* 'power' [R] */
cf7acfab
DL
1252 entry = proc_create_data(ACPI_PROCESSOR_FILE_POWER,
1253 S_IRUGO, acpi_device_dir(device),
1254 &acpi_processor_power_fops,
1255 acpi_driver_data(device));
1da177e4 1256 if (!entry)
a6fc6720 1257 return -EIO;
d550d98d 1258 return 0;
1da177e4
LT
1259}
1260
4be44fcd
LB
1261int acpi_processor_power_exit(struct acpi_processor *pr,
1262 struct acpi_device *device)
1da177e4 1263{
36a91358
VP
1264 if (boot_option_idle_override)
1265 return 0;
1266
dcb84f33 1267 cpuidle_unregister_device(&pr->power.dev);
1da177e4
LT
1268 pr->flags.power_setup_done = 0;
1269
1270 if (acpi_device_dir(device))
4be44fcd
LB
1271 remove_proc_entry(ACPI_PROCESSOR_FILE_POWER,
1272 acpi_device_dir(device));
1da177e4 1273
d550d98d 1274 return 0;
1da177e4 1275}