[ACPI] EC GPE-disabled issue
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / acpi / processor_idle.c
CommitLineData
1da177e4
LT
1/*
2 * processor_idle - idle state submodule to the ACPI processor driver
3 *
4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6 * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de>
7 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8 * - Added processor hotplug support
9 *
10 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or (at
15 * your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
25 *
26 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
27 */
28
29#include <linux/kernel.h>
30#include <linux/module.h>
31#include <linux/init.h>
32#include <linux/cpufreq.h>
33#include <linux/proc_fs.h>
34#include <linux/seq_file.h>
35#include <linux/acpi.h>
36#include <linux/dmi.h>
37#include <linux/moduleparam.h>
38
39#include <asm/io.h>
40#include <asm/uaccess.h>
41
42#include <acpi/acpi_bus.h>
43#include <acpi/processor.h>
44
45#define ACPI_PROCESSOR_COMPONENT 0x01000000
46#define ACPI_PROCESSOR_CLASS "processor"
47#define ACPI_PROCESSOR_DRIVER_NAME "ACPI Processor Driver"
48#define _COMPONENT ACPI_PROCESSOR_COMPONENT
49ACPI_MODULE_NAME ("acpi_processor")
50
51#define ACPI_PROCESSOR_FILE_POWER "power"
52
53#define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000)
54#define C2_OVERHEAD 4 /* 1us (3.579 ticks per us) */
55#define C3_OVERHEAD 4 /* 1us (3.579 ticks per us) */
56
57static void (*pm_idle_save)(void);
58module_param(max_cstate, uint, 0644);
59
60static unsigned int nocst = 0;
61module_param(nocst, uint, 0000);
62
63/*
64 * bm_history -- bit-mask with a bit per jiffy of bus-master activity
65 * 1000 HZ: 0xFFFFFFFF: 32 jiffies = 32ms
66 * 800 HZ: 0xFFFFFFFF: 32 jiffies = 40ms
67 * 100 HZ: 0x0000000F: 4 jiffies = 40ms
68 * reduce history for more aggressive entry into C3
69 */
70static unsigned int bm_history = (HZ >= 800 ? 0xFFFFFFFF : ((1U << (HZ / 25)) - 1));
71module_param(bm_history, uint, 0644);
72/* --------------------------------------------------------------------------
73 Power Management
74 -------------------------------------------------------------------------- */
75
76/*
77 * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
78 * For now disable this. Probably a bug somewhere else.
79 *
80 * To skip this limit, boot/load with a large max_cstate limit.
81 */
82static int no_c2c3(struct dmi_system_id *id)
83{
84 if (max_cstate > ACPI_PROCESSOR_MAX_POWER)
85 return 0;
86
87 printk(KERN_NOTICE PREFIX "%s detected - C2,C3 disabled."
88 " Override with \"processor.max_cstate=%d\"\n", id->ident,
89 ACPI_PROCESSOR_MAX_POWER + 1);
90
91 max_cstate = 1;
92
93 return 0;
94}
95
96
97
98
99static struct dmi_system_id __initdata processor_power_dmi_table[] = {
100 { no_c2c3, "IBM ThinkPad R40e", {
101 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"),
102 DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW") }},
103 { no_c2c3, "Medion 41700", {
104 DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
105 DMI_MATCH(DMI_BIOS_VERSION,"R01-A1J") }},
106 {},
107};
108
109
110static inline u32
111ticks_elapsed (
112 u32 t1,
113 u32 t2)
114{
115 if (t2 >= t1)
116 return (t2 - t1);
117 else if (!acpi_fadt.tmr_val_ext)
118 return (((0x00FFFFFF - t1) + t2) & 0x00FFFFFF);
119 else
120 return ((0xFFFFFFFF - t1) + t2);
121}
122
123
124static void
125acpi_processor_power_activate (
126 struct acpi_processor *pr,
127 struct acpi_processor_cx *new)
128{
129 struct acpi_processor_cx *old;
130
131 if (!pr || !new)
132 return;
133
134 old = pr->power.state;
135
136 if (old)
137 old->promotion.count = 0;
138 new->demotion.count = 0;
139
140 /* Cleanup from old state. */
141 if (old) {
142 switch (old->type) {
143 case ACPI_STATE_C3:
144 /* Disable bus master reload */
145 if (new->type != ACPI_STATE_C3)
146 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0, ACPI_MTX_DO_NOT_LOCK);
147 break;
148 }
149 }
150
151 /* Prepare to use new state. */
152 switch (new->type) {
153 case ACPI_STATE_C3:
154 /* Enable bus master reload */
155 if (old->type != ACPI_STATE_C3)
156 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1, ACPI_MTX_DO_NOT_LOCK);
157 break;
158 }
159
160 pr->power.state = new;
161
162 return;
163}
164
165
166static void acpi_processor_idle (void)
167{
168 struct acpi_processor *pr = NULL;
169 struct acpi_processor_cx *cx = NULL;
170 struct acpi_processor_cx *next_state = NULL;
171 int sleep_ticks = 0;
172 u32 t1, t2 = 0;
173
174 pr = processors[_smp_processor_id()];
175 if (!pr)
176 return;
177
178 /*
179 * Interrupts must be disabled during bus mastering calculations and
180 * for C2/C3 transitions.
181 */
182 local_irq_disable();
183
184 /*
185 * Check whether we truly need to go idle, or should
186 * reschedule:
187 */
188 if (unlikely(need_resched())) {
189 local_irq_enable();
190 return;
191 }
192
193 cx = pr->power.state;
194 if (!cx)
195 goto easy_out;
196
197 /*
198 * Check BM Activity
199 * -----------------
200 * Check for bus mastering activity (if required), record, and check
201 * for demotion.
202 */
203 if (pr->flags.bm_check) {
204 u32 bm_status = 0;
205 unsigned long diff = jiffies - pr->power.bm_check_timestamp;
206
207 if (diff > 32)
208 diff = 32;
209
210 while (diff) {
211 /* if we didn't get called, assume there was busmaster activity */
212 diff--;
213 if (diff)
214 pr->power.bm_activity |= 0x1;
215 pr->power.bm_activity <<= 1;
216 }
217
218 acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS,
219 &bm_status, ACPI_MTX_DO_NOT_LOCK);
220 if (bm_status) {
221 pr->power.bm_activity++;
222 acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS,
223 1, ACPI_MTX_DO_NOT_LOCK);
224 }
225 /*
226 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
227 * the true state of bus mastering activity; forcing us to
228 * manually check the BMIDEA bit of each IDE channel.
229 */
230 else if (errata.piix4.bmisx) {
231 if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
232 || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
233 pr->power.bm_activity++;
234 }
235
236 pr->power.bm_check_timestamp = jiffies;
237
238 /*
239 * Apply bus mastering demotion policy. Automatically demote
240 * to avoid a faulty transition. Note that the processor
241 * won't enter a low-power state during this call (to this
242 * funciton) but should upon the next.
243 *
244 * TBD: A better policy might be to fallback to the demotion
245 * state (use it for this quantum only) istead of
246 * demoting -- and rely on duration as our sole demotion
247 * qualification. This may, however, introduce DMA
248 * issues (e.g. floppy DMA transfer overrun/underrun).
249 */
250 if (pr->power.bm_activity & cx->demotion.threshold.bm) {
251 local_irq_enable();
252 next_state = cx->demotion.state;
253 goto end;
254 }
255 }
256
257 cx->usage++;
258
259 /*
260 * Sleep:
261 * ------
262 * Invoke the current Cx state to put the processor to sleep.
263 */
264 switch (cx->type) {
265
266 case ACPI_STATE_C1:
267 /*
268 * Invoke C1.
269 * Use the appropriate idle routine, the one that would
270 * be used without acpi C-states.
271 */
272 if (pm_idle_save)
273 pm_idle_save();
274 else
275 safe_halt();
276 /*
277 * TBD: Can't get time duration while in C1, as resumes
278 * go to an ISR rather than here. Need to instrument
279 * base interrupt handler.
280 */
281 sleep_ticks = 0xFFFFFFFF;
282 break;
283
284 case ACPI_STATE_C2:
285 /* Get start time (ticks) */
286 t1 = inl(acpi_fadt.xpm_tmr_blk.address);
287 /* Invoke C2 */
288 inb(cx->address);
289 /* Dummy op - must do something useless after P_LVL2 read */
290 t2 = inl(acpi_fadt.xpm_tmr_blk.address);
291 /* Get end time (ticks) */
292 t2 = inl(acpi_fadt.xpm_tmr_blk.address);
293 /* Re-enable interrupts */
294 local_irq_enable();
295 /* Compute time (ticks) that we were actually asleep */
296 sleep_ticks = ticks_elapsed(t1, t2) - cx->latency_ticks - C2_OVERHEAD;
297 break;
298
299 case ACPI_STATE_C3:
300 /* Disable bus master arbitration */
301 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1, ACPI_MTX_DO_NOT_LOCK);
302 /* Get start time (ticks) */
303 t1 = inl(acpi_fadt.xpm_tmr_blk.address);
304 /* Invoke C3 */
305 inb(cx->address);
306 /* Dummy op - must do something useless after P_LVL3 read */
307 t2 = inl(acpi_fadt.xpm_tmr_blk.address);
308 /* Get end time (ticks) */
309 t2 = inl(acpi_fadt.xpm_tmr_blk.address);
310 /* Enable bus master arbitration */
311 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0, ACPI_MTX_DO_NOT_LOCK);
312 /* Re-enable interrupts */
313 local_irq_enable();
314 /* Compute time (ticks) that we were actually asleep */
315 sleep_ticks = ticks_elapsed(t1, t2) - cx->latency_ticks - C3_OVERHEAD;
316 break;
317
318 default:
319 local_irq_enable();
320 return;
321 }
322
323 next_state = pr->power.state;
324
325 /*
326 * Promotion?
327 * ----------
328 * Track the number of longs (time asleep is greater than threshold)
329 * and promote when the count threshold is reached. Note that bus
330 * mastering activity may prevent promotions.
331 * Do not promote above max_cstate.
332 */
333 if (cx->promotion.state &&
334 ((cx->promotion.state - pr->power.states) <= max_cstate)) {
335 if (sleep_ticks > cx->promotion.threshold.ticks) {
336 cx->promotion.count++;
337 cx->demotion.count = 0;
338 if (cx->promotion.count >= cx->promotion.threshold.count) {
339 if (pr->flags.bm_check) {
340 if (!(pr->power.bm_activity & cx->promotion.threshold.bm)) {
341 next_state = cx->promotion.state;
342 goto end;
343 }
344 }
345 else {
346 next_state = cx->promotion.state;
347 goto end;
348 }
349 }
350 }
351 }
352
353 /*
354 * Demotion?
355 * ---------
356 * Track the number of shorts (time asleep is less than time threshold)
357 * and demote when the usage threshold is reached.
358 */
359 if (cx->demotion.state) {
360 if (sleep_ticks < cx->demotion.threshold.ticks) {
361 cx->demotion.count++;
362 cx->promotion.count = 0;
363 if (cx->demotion.count >= cx->demotion.threshold.count) {
364 next_state = cx->demotion.state;
365 goto end;
366 }
367 }
368 }
369
370end:
371 /*
372 * Demote if current state exceeds max_cstate
373 */
374 if ((pr->power.state - pr->power.states) > max_cstate) {
375 if (cx->demotion.state)
376 next_state = cx->demotion.state;
377 }
378
379 /*
380 * New Cx State?
381 * -------------
382 * If we're going to start using a new Cx state we must clean up
383 * from the previous and prepare to use the new.
384 */
385 if (next_state != pr->power.state)
386 acpi_processor_power_activate(pr, next_state);
387
388 return;
389
390 easy_out:
391 /* do C1 instead of busy loop */
392 if (pm_idle_save)
393 pm_idle_save();
394 else
395 safe_halt();
396 return;
397}
398
399
400static int
401acpi_processor_set_power_policy (
402 struct acpi_processor *pr)
403{
404 unsigned int i;
405 unsigned int state_is_set = 0;
406 struct acpi_processor_cx *lower = NULL;
407 struct acpi_processor_cx *higher = NULL;
408 struct acpi_processor_cx *cx;
409
410 ACPI_FUNCTION_TRACE("acpi_processor_set_power_policy");
411
412 if (!pr)
413 return_VALUE(-EINVAL);
414
415 /*
416 * This function sets the default Cx state policy (OS idle handler).
417 * Our scheme is to promote quickly to C2 but more conservatively
418 * to C3. We're favoring C2 for its characteristics of low latency
419 * (quick response), good power savings, and ability to allow bus
420 * mastering activity. Note that the Cx state policy is completely
421 * customizable and can be altered dynamically.
422 */
423
424 /* startup state */
425 for (i=1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
426 cx = &pr->power.states[i];
427 if (!cx->valid)
428 continue;
429
430 if (!state_is_set)
431 pr->power.state = cx;
432 state_is_set++;
433 break;
434 }
435
436 if (!state_is_set)
437 return_VALUE(-ENODEV);
438
439 /* demotion */
440 for (i=1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
441 cx = &pr->power.states[i];
442 if (!cx->valid)
443 continue;
444
445 if (lower) {
446 cx->demotion.state = lower;
447 cx->demotion.threshold.ticks = cx->latency_ticks;
448 cx->demotion.threshold.count = 1;
449 if (cx->type == ACPI_STATE_C3)
450 cx->demotion.threshold.bm = bm_history;
451 }
452
453 lower = cx;
454 }
455
456 /* promotion */
457 for (i = (ACPI_PROCESSOR_MAX_POWER - 1); i > 0; i--) {
458 cx = &pr->power.states[i];
459 if (!cx->valid)
460 continue;
461
462 if (higher) {
463 cx->promotion.state = higher;
464 cx->promotion.threshold.ticks = cx->latency_ticks;
465 if (cx->type >= ACPI_STATE_C2)
466 cx->promotion.threshold.count = 4;
467 else
468 cx->promotion.threshold.count = 10;
469 if (higher->type == ACPI_STATE_C3)
470 cx->promotion.threshold.bm = bm_history;
471 }
472
473 higher = cx;
474 }
475
476 return_VALUE(0);
477}
478
479
480static int acpi_processor_get_power_info_fadt (struct acpi_processor *pr)
481{
482 int i;
483
484 ACPI_FUNCTION_TRACE("acpi_processor_get_power_info_fadt");
485
486 if (!pr)
487 return_VALUE(-EINVAL);
488
489 if (!pr->pblk)
490 return_VALUE(-ENODEV);
491
492 for (i = 0; i < ACPI_PROCESSOR_MAX_POWER; i++)
493 memset(pr->power.states, 0, sizeof(struct acpi_processor_cx));
494
495 /* if info is obtained from pblk/fadt, type equals state */
496 pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1;
497 pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2;
498 pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3;
499
500 /* the C0 state only exists as a filler in our array,
501 * and all processors need to support C1 */
502 pr->power.states[ACPI_STATE_C0].valid = 1;
503 pr->power.states[ACPI_STATE_C1].valid = 1;
504
505 /* determine C2 and C3 address from pblk */
506 pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4;
507 pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
508
509 /* determine latencies from FADT */
510 pr->power.states[ACPI_STATE_C2].latency = acpi_fadt.plvl2_lat;
511 pr->power.states[ACPI_STATE_C3].latency = acpi_fadt.plvl3_lat;
512
513 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
514 "lvl2[0x%08x] lvl3[0x%08x]\n",
515 pr->power.states[ACPI_STATE_C2].address,
516 pr->power.states[ACPI_STATE_C3].address));
517
518 return_VALUE(0);
519}
520
521
acf05f4b
VP
522static int acpi_processor_get_power_info_default_c1 (struct acpi_processor *pr)
523{
524 int i;
525
526 ACPI_FUNCTION_TRACE("acpi_processor_get_power_info_default_c1");
527
528 for (i = 0; i < ACPI_PROCESSOR_MAX_POWER; i++)
529 memset(pr->power.states, 0, sizeof(struct acpi_processor_cx));
530
531 /* if info is obtained from pblk/fadt, type equals state */
532 pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1;
533 pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2;
534 pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3;
535
536 /* the C0 state only exists as a filler in our array,
537 * and all processors need to support C1 */
538 pr->power.states[ACPI_STATE_C0].valid = 1;
539 pr->power.states[ACPI_STATE_C1].valid = 1;
540
541 return_VALUE(0);
542}
543
544
1da177e4
LT
545static int acpi_processor_get_power_info_cst (struct acpi_processor *pr)
546{
547 acpi_status status = 0;
548 acpi_integer count;
549 int i;
550 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
551 union acpi_object *cst;
552
553 ACPI_FUNCTION_TRACE("acpi_processor_get_power_info_cst");
554
555 if (errata.smp)
556 return_VALUE(-ENODEV);
557
558 if (nocst)
559 return_VALUE(-ENODEV);
560
561 pr->power.count = 0;
562 for (i = 0; i < ACPI_PROCESSOR_MAX_POWER; i++)
563 memset(pr->power.states, 0, sizeof(struct acpi_processor_cx));
564
565 status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer);
566 if (ACPI_FAILURE(status)) {
567 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n"));
568 return_VALUE(-ENODEV);
569 }
570
571 cst = (union acpi_object *) buffer.pointer;
572
573 /* There must be at least 2 elements */
574 if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) {
575 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "not enough elements in _CST\n"));
576 status = -EFAULT;
577 goto end;
578 }
579
580 count = cst->package.elements[0].integer.value;
581
582 /* Validate number of power states. */
583 if (count < 1 || count != cst->package.count - 1) {
584 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "count given by _CST is not valid\n"));
585 status = -EFAULT;
586 goto end;
587 }
588
589 /* We support up to ACPI_PROCESSOR_MAX_POWER. */
590 if (count > ACPI_PROCESSOR_MAX_POWER) {
591 printk(KERN_WARNING "Limiting number of power states to max (%d)\n", ACPI_PROCESSOR_MAX_POWER);
592 printk(KERN_WARNING "Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
593 count = ACPI_PROCESSOR_MAX_POWER;
594 }
595
596 /* Tell driver that at least _CST is supported. */
597 pr->flags.has_cst = 1;
598
599 for (i = 1; i <= count; i++) {
600 union acpi_object *element;
601 union acpi_object *obj;
602 struct acpi_power_register *reg;
603 struct acpi_processor_cx cx;
604
605 memset(&cx, 0, sizeof(cx));
606
607 element = (union acpi_object *) &(cst->package.elements[i]);
608 if (element->type != ACPI_TYPE_PACKAGE)
609 continue;
610
611 if (element->package.count != 4)
612 continue;
613
614 obj = (union acpi_object *) &(element->package.elements[0]);
615
616 if (obj->type != ACPI_TYPE_BUFFER)
617 continue;
618
619 reg = (struct acpi_power_register *) obj->buffer.pointer;
620
621 if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
622 (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE))
623 continue;
624
625 cx.address = (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) ?
626 0 : reg->address;
627
628 /* There should be an easy way to extract an integer... */
629 obj = (union acpi_object *) &(element->package.elements[1]);
630 if (obj->type != ACPI_TYPE_INTEGER)
631 continue;
632
633 cx.type = obj->integer.value;
634
635 if ((cx.type != ACPI_STATE_C1) &&
636 (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO))
637 continue;
638
639 if ((cx.type < ACPI_STATE_C1) ||
640 (cx.type > ACPI_STATE_C3))
641 continue;
642
643 obj = (union acpi_object *) &(element->package.elements[2]);
644 if (obj->type != ACPI_TYPE_INTEGER)
645 continue;
646
647 cx.latency = obj->integer.value;
648
649 obj = (union acpi_object *) &(element->package.elements[3]);
650 if (obj->type != ACPI_TYPE_INTEGER)
651 continue;
652
653 cx.power = obj->integer.value;
654
655 (pr->power.count)++;
656 memcpy(&(pr->power.states[pr->power.count]), &cx, sizeof(cx));
657 }
658
659 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n", pr->power.count));
660
661 /* Validate number of power states discovered */
662 if (pr->power.count < 2)
663 status = -ENODEV;
664
665end:
666 acpi_os_free(buffer.pointer);
667
668 return_VALUE(status);
669}
670
671
672static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx)
673{
674 ACPI_FUNCTION_TRACE("acpi_processor_get_power_verify_c2");
675
676 if (!cx->address)
677 return_VOID;
678
679 /*
680 * C2 latency must be less than or equal to 100
681 * microseconds.
682 */
683 else if (cx->latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
684 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
685 "latency too large [%d]\n",
686 cx->latency));
687 return_VOID;
688 }
689
690 /* We're (currently) only supporting C2 on UP */
691 else if (errata.smp) {
692 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
693 "C2 not supported in SMP mode\n"));
694 return_VOID;
695 }
696
697 /*
698 * Otherwise we've met all of our C2 requirements.
699 * Normalize the C2 latency to expidite policy
700 */
701 cx->valid = 1;
702 cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency);
703
704 return_VOID;
705}
706
707
708static void acpi_processor_power_verify_c3(
709 struct acpi_processor *pr,
710 struct acpi_processor_cx *cx)
711{
712 ACPI_FUNCTION_TRACE("acpi_processor_get_power_verify_c3");
713
714 if (!cx->address)
715 return_VOID;
716
717 /*
718 * C3 latency must be less than or equal to 1000
719 * microseconds.
720 */
721 else if (cx->latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
722 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
723 "latency too large [%d]\n",
724 cx->latency));
725 return_VOID;
726 }
727
728 /* bus mastering control is necessary */
729 else if (!pr->flags.bm_control) {
730 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
731 "C3 support requires bus mastering control\n"));
732 return_VOID;
733 }
734
735 /* We're (currently) only supporting C2 on UP */
736 else if (errata.smp) {
737 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
738 "C3 not supported in SMP mode\n"));
739 return_VOID;
740 }
741
742 /*
743 * PIIX4 Erratum #18: We don't support C3 when Type-F (fast)
744 * DMA transfers are used by any ISA device to avoid livelock.
745 * Note that we could disable Type-F DMA (as recommended by
746 * the erratum), but this is known to disrupt certain ISA
747 * devices thus we take the conservative approach.
748 */
749 else if (errata.piix4.fdma) {
750 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
751 "C3 not supported on PIIX4 with Type-F DMA\n"));
752 return_VOID;
753 }
754
755 /*
756 * Otherwise we've met all of our C3 requirements.
757 * Normalize the C3 latency to expidite policy. Enable
758 * checking of bus mastering status (bm_check) so we can
759 * use this in our C3 policy
760 */
761 cx->valid = 1;
762 cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency);
763 pr->flags.bm_check = 1;
764
765 return_VOID;
766}
767
768
769static int acpi_processor_power_verify(struct acpi_processor *pr)
770{
771 unsigned int i;
772 unsigned int working = 0;
773
774 for (i=1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
775 struct acpi_processor_cx *cx = &pr->power.states[i];
776
777 switch (cx->type) {
778 case ACPI_STATE_C1:
779 cx->valid = 1;
780 break;
781
782 case ACPI_STATE_C2:
783 acpi_processor_power_verify_c2(cx);
784 break;
785
786 case ACPI_STATE_C3:
787 acpi_processor_power_verify_c3(pr, cx);
788 break;
789 }
790
791 if (cx->valid)
792 working++;
793 }
794
795 return (working);
796}
797
798static int acpi_processor_get_power_info (
799 struct acpi_processor *pr)
800{
801 unsigned int i;
802 int result;
803
804 ACPI_FUNCTION_TRACE("acpi_processor_get_power_info");
805
806 /* NOTE: the idle thread may not be running while calling
807 * this function */
808
809 result = acpi_processor_get_power_info_cst(pr);
810 if ((result) || (acpi_processor_power_verify(pr) < 2)) {
811 result = acpi_processor_get_power_info_fadt(pr);
812 if (result)
acf05f4b 813 result = acpi_processor_get_power_info_default_c1(pr);
1da177e4
LT
814 }
815
816 /*
817 * Set Default Policy
818 * ------------------
819 * Now that we know which states are supported, set the default
820 * policy. Note that this policy can be changed dynamically
821 * (e.g. encourage deeper sleeps to conserve battery life when
822 * not on AC).
823 */
824 result = acpi_processor_set_power_policy(pr);
825 if (result)
826 return_VALUE(result);
827
828 /*
829 * if one state of type C2 or C3 is available, mark this
830 * CPU as being "idle manageable"
831 */
832 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
acf05f4b 833 if (pr->power.states[i].valid) {
1da177e4 834 pr->power.count = i;
1da177e4 835 pr->flags.power = 1;
acf05f4b 836 }
1da177e4
LT
837 }
838
839 return_VALUE(0);
840}
841
842int acpi_processor_cst_has_changed (struct acpi_processor *pr)
843{
844 int result = 0;
845
846 ACPI_FUNCTION_TRACE("acpi_processor_cst_has_changed");
847
848 if (!pr)
849 return_VALUE(-EINVAL);
850
851 if (errata.smp || nocst) {
852 return_VALUE(-ENODEV);
853 }
854
855 if (!pr->flags.power_setup_done)
856 return_VALUE(-ENODEV);
857
858 /* Fall back to the default idle loop */
859 pm_idle = pm_idle_save;
fbd568a3 860 synchronize_sched(); /* Relies on interrupts forcing exit from idle. */
1da177e4
LT
861
862 pr->flags.power = 0;
863 result = acpi_processor_get_power_info(pr);
864 if ((pr->flags.power == 1) && (pr->flags.power_setup_done))
865 pm_idle = acpi_processor_idle;
866
867 return_VALUE(result);
868}
869
870/* proc interface */
871
872static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset)
873{
874 struct acpi_processor *pr = (struct acpi_processor *)seq->private;
875 unsigned int i;
876
877 ACPI_FUNCTION_TRACE("acpi_processor_power_seq_show");
878
879 if (!pr)
880 goto end;
881
882 seq_printf(seq, "active state: C%zd\n"
883 "max_cstate: C%d\n"
884 "bus master activity: %08x\n",
885 pr->power.state ? pr->power.state - pr->power.states : 0,
886 max_cstate,
887 (unsigned)pr->power.bm_activity);
888
889 seq_puts(seq, "states:\n");
890
891 for (i = 1; i <= pr->power.count; i++) {
892 seq_printf(seq, " %cC%d: ",
893 (&pr->power.states[i] == pr->power.state?'*':' '), i);
894
895 if (!pr->power.states[i].valid) {
896 seq_puts(seq, "<not supported>\n");
897 continue;
898 }
899
900 switch (pr->power.states[i].type) {
901 case ACPI_STATE_C1:
902 seq_printf(seq, "type[C1] ");
903 break;
904 case ACPI_STATE_C2:
905 seq_printf(seq, "type[C2] ");
906 break;
907 case ACPI_STATE_C3:
908 seq_printf(seq, "type[C3] ");
909 break;
910 default:
911 seq_printf(seq, "type[--] ");
912 break;
913 }
914
915 if (pr->power.states[i].promotion.state)
916 seq_printf(seq, "promotion[C%zd] ",
917 (pr->power.states[i].promotion.state -
918 pr->power.states));
919 else
920 seq_puts(seq, "promotion[--] ");
921
922 if (pr->power.states[i].demotion.state)
923 seq_printf(seq, "demotion[C%zd] ",
924 (pr->power.states[i].demotion.state -
925 pr->power.states));
926 else
927 seq_puts(seq, "demotion[--] ");
928
929 seq_printf(seq, "latency[%03d] usage[%08d]\n",
930 pr->power.states[i].latency,
931 pr->power.states[i].usage);
932 }
933
934end:
935 return_VALUE(0);
936}
937
938static int acpi_processor_power_open_fs(struct inode *inode, struct file *file)
939{
940 return single_open(file, acpi_processor_power_seq_show,
941 PDE(inode)->data);
942}
943
944static struct file_operations acpi_processor_power_fops = {
945 .open = acpi_processor_power_open_fs,
946 .read = seq_read,
947 .llseek = seq_lseek,
948 .release = single_release,
949};
950
951
952int acpi_processor_power_init(struct acpi_processor *pr, struct acpi_device *device)
953{
954 acpi_status status = 0;
955 static int first_run = 0;
956 struct proc_dir_entry *entry = NULL;
957 unsigned int i;
958
959 ACPI_FUNCTION_TRACE("acpi_processor_power_init");
960
961 if (!first_run) {
962 dmi_check_system(processor_power_dmi_table);
963 if (max_cstate < ACPI_C_STATES_MAX)
964 printk(KERN_NOTICE "ACPI: processor limited to max C-state %d\n", max_cstate);
965 first_run++;
966 }
967
968 if (!errata.smp && (pr->id == 0) && acpi_fadt.cst_cnt && !nocst) {
969 status = acpi_os_write_port(acpi_fadt.smi_cmd, acpi_fadt.cst_cnt, 8);
970 if (ACPI_FAILURE(status)) {
971 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
972 "Notifying BIOS of _CST ability failed\n"));
973 }
974 }
975
976 acpi_processor_get_power_info(pr);
977
978 /*
979 * Install the idle handler if processor power management is supported.
980 * Note that we use previously set idle handler will be used on
981 * platforms that only support C1.
982 */
983 if ((pr->flags.power) && (!boot_option_idle_override)) {
984 printk(KERN_INFO PREFIX "CPU%d (power states:", pr->id);
985 for (i = 1; i <= pr->power.count; i++)
986 if (pr->power.states[i].valid)
987 printk(" C%d[C%d]", i, pr->power.states[i].type);
988 printk(")\n");
989
990 if (pr->id == 0) {
991 pm_idle_save = pm_idle;
992 pm_idle = acpi_processor_idle;
993 }
994 }
995
996 /* 'power' [R] */
997 entry = create_proc_entry(ACPI_PROCESSOR_FILE_POWER,
998 S_IRUGO, acpi_device_dir(device));
999 if (!entry)
1000 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
1001 "Unable to create '%s' fs entry\n",
1002 ACPI_PROCESSOR_FILE_POWER));
1003 else {
1004 entry->proc_fops = &acpi_processor_power_fops;
1005 entry->data = acpi_driver_data(device);
1006 entry->owner = THIS_MODULE;
1007 }
1008
1009 pr->flags.power_setup_done = 1;
1010
1011 return_VALUE(0);
1012}
1013
1014int acpi_processor_power_exit(struct acpi_processor *pr, struct acpi_device *device)
1015{
1016 ACPI_FUNCTION_TRACE("acpi_processor_power_exit");
1017
1018 pr->flags.power_setup_done = 0;
1019
1020 if (acpi_device_dir(device))
1021 remove_proc_entry(ACPI_PROCESSOR_FILE_POWER,acpi_device_dir(device));
1022
1023 /* Unregister the idle handler when processor #0 is removed. */
1024 if (pr->id == 0) {
1025 pm_idle = pm_idle_save;
1026
1027 /*
1028 * We are about to unload the current idle thread pm callback
1029 * (pm_idle), Wait for all processors to update cached/local
1030 * copies of pm_idle before proceeding.
1031 */
1032 cpu_idle_wait();
1033 }
1034
1035 return_VALUE(0);
1036}