2 * acpi-cpufreq.c - ACPI Processor P-States Driver
4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6 * Copyright (C) 2002 - 2004 Dominik Brodowski <linux@brodo.de>
7 * Copyright (C) 2006 Denis Sadykov <denis.m.sadykov@intel.com>
9 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or (at
14 * your option) any later version.
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License along
22 * with this program; if not, write to the Free Software Foundation, Inc.,
23 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
25 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/init.h>
31 #include <linux/smp.h>
32 #include <linux/sched.h>
33 #include <linux/cpufreq.h>
34 #include <linux/compiler.h>
35 #include <linux/dmi.h>
36 #include <linux/slab.h>
38 #include <linux/acpi.h>
40 #include <linux/delay.h>
41 #include <linux/uaccess.h>
43 #include <acpi/processor.h>
46 #include <asm/processor.h>
47 #include <asm/cpufeature.h>
50 MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski");
51 MODULE_DESCRIPTION("ACPI Processor P-States Driver");
52 MODULE_LICENSE("GPL");
54 #define PFX "acpi-cpufreq: "
57 UNDEFINED_CAPABLE
= 0,
58 SYSTEM_INTEL_MSR_CAPABLE
,
59 SYSTEM_AMD_MSR_CAPABLE
,
63 #define INTEL_MSR_RANGE (0xffff)
64 #define AMD_MSR_RANGE (0x7)
66 #define MSR_K7_HWCR_CPB_DIS (1ULL << 25)
68 struct acpi_cpufreq_data
{
69 struct acpi_processor_performance
*acpi_data
;
70 struct cpufreq_frequency_table
*freq_table
;
72 unsigned int cpu_feature
;
75 static DEFINE_PER_CPU(struct acpi_cpufreq_data
*, acfreq_data
);
77 /* acpi_perf_data is a pointer to percpu data. */
78 static struct acpi_processor_performance __percpu
*acpi_perf_data
;
80 static struct cpufreq_driver acpi_cpufreq_driver
;
82 static unsigned int acpi_pstate_strict
;
83 static bool boost_enabled
, boost_supported
;
84 static struct msr __percpu
*msrs
;
86 static bool boost_state(unsigned int cpu
)
91 switch (boot_cpu_data
.x86_vendor
) {
92 case X86_VENDOR_INTEL
:
93 rdmsr_on_cpu(cpu
, MSR_IA32_MISC_ENABLE
, &lo
, &hi
);
94 msr
= lo
| ((u64
)hi
<< 32);
95 return !(msr
& MSR_IA32_MISC_ENABLE_TURBO_DISABLE
);
97 rdmsr_on_cpu(cpu
, MSR_K7_HWCR
, &lo
, &hi
);
98 msr
= lo
| ((u64
)hi
<< 32);
99 return !(msr
& MSR_K7_HWCR_CPB_DIS
);
104 static void boost_set_msrs(bool enable
, const struct cpumask
*cpumask
)
110 switch (boot_cpu_data
.x86_vendor
) {
111 case X86_VENDOR_INTEL
:
112 msr_addr
= MSR_IA32_MISC_ENABLE
;
113 msr_mask
= MSR_IA32_MISC_ENABLE_TURBO_DISABLE
;
116 msr_addr
= MSR_K7_HWCR
;
117 msr_mask
= MSR_K7_HWCR_CPB_DIS
;
123 rdmsr_on_cpus(cpumask
, msr_addr
, msrs
);
125 for_each_cpu(cpu
, cpumask
) {
126 struct msr
*reg
= per_cpu_ptr(msrs
, cpu
);
133 wrmsr_on_cpus(cpumask
, msr_addr
, msrs
);
136 static ssize_t
_store_boost(const char *buf
, size_t count
)
139 unsigned long val
= 0;
141 if (!boost_supported
)
144 ret
= kstrtoul(buf
, 10, &val
);
145 if (ret
|| (val
> 1))
148 if ((val
&& boost_enabled
) || (!val
&& !boost_enabled
))
153 boost_set_msrs(val
, cpu_online_mask
);
158 pr_debug("Core Boosting %sabled.\n", val
? "en" : "dis");
163 static ssize_t
store_global_boost(struct kobject
*kobj
, struct attribute
*attr
,
164 const char *buf
, size_t count
)
166 return _store_boost(buf
, count
);
169 static ssize_t
show_global_boost(struct kobject
*kobj
,
170 struct attribute
*attr
, char *buf
)
172 return sprintf(buf
, "%u\n", boost_enabled
);
175 static struct global_attr global_boost
= __ATTR(boost
, 0644,
179 #ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
180 static ssize_t
store_cpb(struct cpufreq_policy
*policy
, const char *buf
,
183 return _store_boost(buf
, count
);
186 static ssize_t
show_cpb(struct cpufreq_policy
*policy
, char *buf
)
188 return sprintf(buf
, "%u\n", boost_enabled
);
191 static struct freq_attr cpb
= __ATTR(cpb
, 0644, show_cpb
, store_cpb
);
194 static int check_est_cpu(unsigned int cpuid
)
196 struct cpuinfo_x86
*cpu
= &cpu_data(cpuid
);
198 return cpu_has(cpu
, X86_FEATURE_EST
);
201 static int check_amd_hwpstate_cpu(unsigned int cpuid
)
203 struct cpuinfo_x86
*cpu
= &cpu_data(cpuid
);
205 return cpu_has(cpu
, X86_FEATURE_HW_PSTATE
);
208 static unsigned extract_io(u32 value
, struct acpi_cpufreq_data
*data
)
210 struct acpi_processor_performance
*perf
;
213 perf
= data
->acpi_data
;
215 for (i
= 0; i
< perf
->state_count
; i
++) {
216 if (value
== perf
->states
[i
].status
)
217 return data
->freq_table
[i
].frequency
;
222 static unsigned extract_msr(u32 msr
, struct acpi_cpufreq_data
*data
)
225 struct acpi_processor_performance
*perf
;
227 if (boot_cpu_data
.x86_vendor
== X86_VENDOR_AMD
)
228 msr
&= AMD_MSR_RANGE
;
230 msr
&= INTEL_MSR_RANGE
;
232 perf
= data
->acpi_data
;
234 for (i
= 0; data
->freq_table
[i
].frequency
!= CPUFREQ_TABLE_END
; i
++) {
235 if (msr
== perf
->states
[data
->freq_table
[i
].index
].status
)
236 return data
->freq_table
[i
].frequency
;
238 return data
->freq_table
[0].frequency
;
241 static unsigned extract_freq(u32 val
, struct acpi_cpufreq_data
*data
)
243 switch (data
->cpu_feature
) {
244 case SYSTEM_INTEL_MSR_CAPABLE
:
245 case SYSTEM_AMD_MSR_CAPABLE
:
246 return extract_msr(val
, data
);
247 case SYSTEM_IO_CAPABLE
:
248 return extract_io(val
, data
);
265 const struct cpumask
*mask
;
273 /* Called via smp_call_function_single(), on the target CPU */
274 static void do_drv_read(void *_cmd
)
276 struct drv_cmd
*cmd
= _cmd
;
280 case SYSTEM_INTEL_MSR_CAPABLE
:
281 case SYSTEM_AMD_MSR_CAPABLE
:
282 rdmsr(cmd
->addr
.msr
.reg
, cmd
->val
, h
);
284 case SYSTEM_IO_CAPABLE
:
285 acpi_os_read_port((acpi_io_address
)cmd
->addr
.io
.port
,
287 (u32
)cmd
->addr
.io
.bit_width
);
294 /* Called via smp_call_function_many(), on the target CPUs */
295 static void do_drv_write(void *_cmd
)
297 struct drv_cmd
*cmd
= _cmd
;
301 case SYSTEM_INTEL_MSR_CAPABLE
:
302 rdmsr(cmd
->addr
.msr
.reg
, lo
, hi
);
303 lo
= (lo
& ~INTEL_MSR_RANGE
) | (cmd
->val
& INTEL_MSR_RANGE
);
304 wrmsr(cmd
->addr
.msr
.reg
, lo
, hi
);
306 case SYSTEM_AMD_MSR_CAPABLE
:
307 wrmsr(cmd
->addr
.msr
.reg
, cmd
->val
, 0);
309 case SYSTEM_IO_CAPABLE
:
310 acpi_os_write_port((acpi_io_address
)cmd
->addr
.io
.port
,
312 (u32
)cmd
->addr
.io
.bit_width
);
319 static void drv_read(struct drv_cmd
*cmd
)
324 err
= smp_call_function_any(cmd
->mask
, do_drv_read
, cmd
, 1);
325 WARN_ON_ONCE(err
); /* smp_call_function_any() was buggy? */
328 static void drv_write(struct drv_cmd
*cmd
)
332 this_cpu
= get_cpu();
333 if (cpumask_test_cpu(this_cpu
, cmd
->mask
))
335 smp_call_function_many(cmd
->mask
, do_drv_write
, cmd
, 1);
339 static u32
get_cur_val(const struct cpumask
*mask
)
341 struct acpi_processor_performance
*perf
;
344 if (unlikely(cpumask_empty(mask
)))
347 switch (per_cpu(acfreq_data
, cpumask_first(mask
))->cpu_feature
) {
348 case SYSTEM_INTEL_MSR_CAPABLE
:
349 cmd
.type
= SYSTEM_INTEL_MSR_CAPABLE
;
350 cmd
.addr
.msr
.reg
= MSR_IA32_PERF_STATUS
;
352 case SYSTEM_AMD_MSR_CAPABLE
:
353 cmd
.type
= SYSTEM_AMD_MSR_CAPABLE
;
354 cmd
.addr
.msr
.reg
= MSR_AMD_PERF_STATUS
;
356 case SYSTEM_IO_CAPABLE
:
357 cmd
.type
= SYSTEM_IO_CAPABLE
;
358 perf
= per_cpu(acfreq_data
, cpumask_first(mask
))->acpi_data
;
359 cmd
.addr
.io
.port
= perf
->control_register
.address
;
360 cmd
.addr
.io
.bit_width
= perf
->control_register
.bit_width
;
369 pr_debug("get_cur_val = %u\n", cmd
.val
);
374 static unsigned int get_cur_freq_on_cpu(unsigned int cpu
)
376 struct acpi_cpufreq_data
*data
= per_cpu(acfreq_data
, cpu
);
378 unsigned int cached_freq
;
380 pr_debug("get_cur_freq_on_cpu (%d)\n", cpu
);
382 if (unlikely(data
== NULL
||
383 data
->acpi_data
== NULL
|| data
->freq_table
== NULL
)) {
387 cached_freq
= data
->freq_table
[data
->acpi_data
->state
].frequency
;
388 freq
= extract_freq(get_cur_val(cpumask_of(cpu
)), data
);
389 if (freq
!= cached_freq
) {
391 * The dreaded BIOS frequency change behind our back.
392 * Force set the frequency on next target call.
397 pr_debug("cur freq = %u\n", freq
);
402 static unsigned int check_freqs(const struct cpumask
*mask
, unsigned int freq
,
403 struct acpi_cpufreq_data
*data
)
405 unsigned int cur_freq
;
408 for (i
= 0; i
< 100; i
++) {
409 cur_freq
= extract_freq(get_cur_val(mask
), data
);
410 if (cur_freq
== freq
)
417 static int acpi_cpufreq_target(struct cpufreq_policy
*policy
,
418 unsigned int target_freq
, unsigned int relation
)
420 struct acpi_cpufreq_data
*data
= per_cpu(acfreq_data
, policy
->cpu
);
421 struct acpi_processor_performance
*perf
;
422 struct cpufreq_freqs freqs
;
424 unsigned int next_state
= 0; /* Index into freq_table */
425 unsigned int next_perf_state
= 0; /* Index into perf table */
428 pr_debug("acpi_cpufreq_target %d (%d)\n", target_freq
, policy
->cpu
);
430 if (unlikely(data
== NULL
||
431 data
->acpi_data
== NULL
|| data
->freq_table
== NULL
)) {
435 perf
= data
->acpi_data
;
436 result
= cpufreq_frequency_table_target(policy
,
439 relation
, &next_state
);
440 if (unlikely(result
)) {
445 next_perf_state
= data
->freq_table
[next_state
].index
;
446 if (perf
->state
== next_perf_state
) {
447 if (unlikely(data
->resume
)) {
448 pr_debug("Called after resume, resetting to P%d\n",
452 pr_debug("Already at target state (P%d)\n",
458 switch (data
->cpu_feature
) {
459 case SYSTEM_INTEL_MSR_CAPABLE
:
460 cmd
.type
= SYSTEM_INTEL_MSR_CAPABLE
;
461 cmd
.addr
.msr
.reg
= MSR_IA32_PERF_CTL
;
462 cmd
.val
= (u32
) perf
->states
[next_perf_state
].control
;
464 case SYSTEM_AMD_MSR_CAPABLE
:
465 cmd
.type
= SYSTEM_AMD_MSR_CAPABLE
;
466 cmd
.addr
.msr
.reg
= MSR_AMD_PERF_CTL
;
467 cmd
.val
= (u32
) perf
->states
[next_perf_state
].control
;
469 case SYSTEM_IO_CAPABLE
:
470 cmd
.type
= SYSTEM_IO_CAPABLE
;
471 cmd
.addr
.io
.port
= perf
->control_register
.address
;
472 cmd
.addr
.io
.bit_width
= perf
->control_register
.bit_width
;
473 cmd
.val
= (u32
) perf
->states
[next_perf_state
].control
;
480 /* cpufreq holds the hotplug lock, so we are safe from here on */
481 if (policy
->shared_type
!= CPUFREQ_SHARED_TYPE_ANY
)
482 cmd
.mask
= policy
->cpus
;
484 cmd
.mask
= cpumask_of(policy
->cpu
);
486 freqs
.old
= perf
->states
[perf
->state
].core_frequency
* 1000;
487 freqs
.new = data
->freq_table
[next_state
].frequency
;
488 cpufreq_notify_transition(policy
, &freqs
, CPUFREQ_PRECHANGE
);
492 if (acpi_pstate_strict
) {
493 if (!check_freqs(cmd
.mask
, freqs
.new, data
)) {
494 pr_debug("acpi_cpufreq_target failed (%d)\n",
501 cpufreq_notify_transition(policy
, &freqs
, CPUFREQ_POSTCHANGE
);
502 perf
->state
= next_perf_state
;
508 static int acpi_cpufreq_verify(struct cpufreq_policy
*policy
)
510 struct acpi_cpufreq_data
*data
= per_cpu(acfreq_data
, policy
->cpu
);
512 pr_debug("acpi_cpufreq_verify\n");
514 return cpufreq_frequency_table_verify(policy
, data
->freq_table
);
518 acpi_cpufreq_guess_freq(struct acpi_cpufreq_data
*data
, unsigned int cpu
)
520 struct acpi_processor_performance
*perf
= data
->acpi_data
;
523 /* search the closest match to cpu_khz */
526 unsigned long freqn
= perf
->states
[0].core_frequency
* 1000;
528 for (i
= 0; i
< (perf
->state_count
-1); i
++) {
530 freqn
= perf
->states
[i
+1].core_frequency
* 1000;
531 if ((2 * cpu_khz
) > (freqn
+ freq
)) {
536 perf
->state
= perf
->state_count
-1;
539 /* assume CPU is at P0... */
541 return perf
->states
[0].core_frequency
* 1000;
545 static void free_acpi_perf_data(void)
549 /* Freeing a NULL pointer is OK, and alloc_percpu zeroes. */
550 for_each_possible_cpu(i
)
551 free_cpumask_var(per_cpu_ptr(acpi_perf_data
, i
)
553 free_percpu(acpi_perf_data
);
556 static int boost_notify(struct notifier_block
*nb
, unsigned long action
,
559 unsigned cpu
= (long)hcpu
;
560 const struct cpumask
*cpumask
;
562 cpumask
= get_cpu_mask(cpu
);
565 * Clear the boost-disable bit on the CPU_DOWN path so that
566 * this cpu cannot block the remaining ones from boosting. On
567 * the CPU_UP path we simply keep the boost-disable flag in
568 * sync with the current global state.
573 case CPU_UP_PREPARE_FROZEN
:
574 boost_set_msrs(boost_enabled
, cpumask
);
577 case CPU_DOWN_PREPARE
:
578 case CPU_DOWN_PREPARE_FROZEN
:
579 boost_set_msrs(1, cpumask
);
590 static struct notifier_block boost_nb
= {
591 .notifier_call
= boost_notify
,
595 * acpi_cpufreq_early_init - initialize ACPI P-States library
597 * Initialize the ACPI P-States library (drivers/acpi/processor_perflib.c)
598 * in order to determine correct frequency and voltage pairings. We can
599 * do _PDC and _PSD and find out the processor dependency for the
600 * actual init that will happen later...
602 static int __init
acpi_cpufreq_early_init(void)
605 pr_debug("acpi_cpufreq_early_init\n");
607 acpi_perf_data
= alloc_percpu(struct acpi_processor_performance
);
608 if (!acpi_perf_data
) {
609 pr_debug("Memory allocation error for acpi_perf_data.\n");
612 for_each_possible_cpu(i
) {
613 if (!zalloc_cpumask_var_node(
614 &per_cpu_ptr(acpi_perf_data
, i
)->shared_cpu_map
,
615 GFP_KERNEL
, cpu_to_node(i
))) {
617 /* Freeing a NULL pointer is OK: alloc_percpu zeroes. */
618 free_acpi_perf_data();
623 /* Do initialization in ACPI core */
624 acpi_processor_preregister_performance(acpi_perf_data
);
630 * Some BIOSes do SW_ANY coordination internally, either set it up in hw
631 * or do it in BIOS firmware and won't inform about it to OS. If not
632 * detected, this has a side effect of making CPU run at a different speed
633 * than OS intended it to run at. Detect it and handle it cleanly.
635 static int bios_with_sw_any_bug
;
637 static int sw_any_bug_found(const struct dmi_system_id
*d
)
639 bios_with_sw_any_bug
= 1;
643 static const struct dmi_system_id sw_any_bug_dmi_table
[] = {
645 .callback
= sw_any_bug_found
,
646 .ident
= "Supermicro Server X6DLP",
648 DMI_MATCH(DMI_SYS_VENDOR
, "Supermicro"),
649 DMI_MATCH(DMI_BIOS_VERSION
, "080010"),
650 DMI_MATCH(DMI_PRODUCT_NAME
, "X6DLP"),
656 static int acpi_cpufreq_blacklist(struct cpuinfo_x86
*c
)
658 /* Intel Xeon Processor 7100 Series Specification Update
659 * http://www.intel.com/Assets/PDF/specupdate/314554.pdf
660 * AL30: A Machine Check Exception (MCE) Occurring during an
661 * Enhanced Intel SpeedStep Technology Ratio Change May Cause
662 * Both Processor Cores to Lock Up. */
663 if (c
->x86_vendor
== X86_VENDOR_INTEL
) {
664 if ((c
->x86
== 15) &&
665 (c
->x86_model
== 6) &&
666 (c
->x86_mask
== 8)) {
667 printk(KERN_INFO
"acpi-cpufreq: Intel(R) "
668 "Xeon(R) 7100 Errata AL30, processors may "
669 "lock up on frequency changes: disabling "
678 static int acpi_cpufreq_cpu_init(struct cpufreq_policy
*policy
)
681 unsigned int valid_states
= 0;
682 unsigned int cpu
= policy
->cpu
;
683 struct acpi_cpufreq_data
*data
;
684 unsigned int result
= 0;
685 struct cpuinfo_x86
*c
= &cpu_data(policy
->cpu
);
686 struct acpi_processor_performance
*perf
;
688 static int blacklisted
;
691 pr_debug("acpi_cpufreq_cpu_init\n");
696 blacklisted
= acpi_cpufreq_blacklist(c
);
701 data
= kzalloc(sizeof(struct acpi_cpufreq_data
), GFP_KERNEL
);
705 data
->acpi_data
= per_cpu_ptr(acpi_perf_data
, cpu
);
706 per_cpu(acfreq_data
, cpu
) = data
;
708 if (cpu_has(c
, X86_FEATURE_CONSTANT_TSC
))
709 acpi_cpufreq_driver
.flags
|= CPUFREQ_CONST_LOOPS
;
711 result
= acpi_processor_register_performance(data
->acpi_data
, cpu
);
715 perf
= data
->acpi_data
;
716 policy
->shared_type
= perf
->shared_type
;
719 * Will let policy->cpus know about dependency only when software
720 * coordination is required.
722 if (policy
->shared_type
== CPUFREQ_SHARED_TYPE_ALL
||
723 policy
->shared_type
== CPUFREQ_SHARED_TYPE_ANY
) {
724 cpumask_copy(policy
->cpus
, perf
->shared_cpu_map
);
728 dmi_check_system(sw_any_bug_dmi_table
);
729 if (bios_with_sw_any_bug
&& !policy_is_shared(policy
)) {
730 policy
->shared_type
= CPUFREQ_SHARED_TYPE_ALL
;
731 cpumask_copy(policy
->cpus
, cpu_core_mask(cpu
));
734 if (check_amd_hwpstate_cpu(cpu
) && !acpi_pstate_strict
) {
735 cpumask_clear(policy
->cpus
);
736 cpumask_set_cpu(cpu
, policy
->cpus
);
737 policy
->shared_type
= CPUFREQ_SHARED_TYPE_HW
;
738 pr_info_once(PFX
"overriding BIOS provided _PSD data\n");
742 /* capability check */
743 if (perf
->state_count
<= 1) {
744 pr_debug("No P-States\n");
749 if (perf
->control_register
.space_id
!= perf
->status_register
.space_id
) {
754 switch (perf
->control_register
.space_id
) {
755 case ACPI_ADR_SPACE_SYSTEM_IO
:
756 if (boot_cpu_data
.x86_vendor
== X86_VENDOR_AMD
&&
757 boot_cpu_data
.x86
== 0xf) {
758 pr_debug("AMD K8 systems must use native drivers.\n");
762 pr_debug("SYSTEM IO addr space\n");
763 data
->cpu_feature
= SYSTEM_IO_CAPABLE
;
765 case ACPI_ADR_SPACE_FIXED_HARDWARE
:
766 pr_debug("HARDWARE addr space\n");
767 if (check_est_cpu(cpu
)) {
768 data
->cpu_feature
= SYSTEM_INTEL_MSR_CAPABLE
;
771 if (check_amd_hwpstate_cpu(cpu
)) {
772 data
->cpu_feature
= SYSTEM_AMD_MSR_CAPABLE
;
778 pr_debug("Unknown addr space %d\n",
779 (u32
) (perf
->control_register
.space_id
));
784 data
->freq_table
= kmalloc(sizeof(struct cpufreq_frequency_table
) *
785 (perf
->state_count
+1), GFP_KERNEL
);
786 if (!data
->freq_table
) {
791 /* detect transition latency */
792 policy
->cpuinfo
.transition_latency
= 0;
793 for (i
= 0; i
< perf
->state_count
; i
++) {
794 if ((perf
->states
[i
].transition_latency
* 1000) >
795 policy
->cpuinfo
.transition_latency
)
796 policy
->cpuinfo
.transition_latency
=
797 perf
->states
[i
].transition_latency
* 1000;
800 /* Check for high latency (>20uS) from buggy BIOSes, like on T42 */
801 if (perf
->control_register
.space_id
== ACPI_ADR_SPACE_FIXED_HARDWARE
&&
802 policy
->cpuinfo
.transition_latency
> 20 * 1000) {
803 policy
->cpuinfo
.transition_latency
= 20 * 1000;
804 printk_once(KERN_INFO
805 "P-state transition latency capped at 20 uS\n");
809 for (i
= 0; i
< perf
->state_count
; i
++) {
810 if (i
> 0 && perf
->states
[i
].core_frequency
>=
811 data
->freq_table
[valid_states
-1].frequency
/ 1000)
814 data
->freq_table
[valid_states
].index
= i
;
815 data
->freq_table
[valid_states
].frequency
=
816 perf
->states
[i
].core_frequency
* 1000;
819 data
->freq_table
[valid_states
].frequency
= CPUFREQ_TABLE_END
;
822 result
= cpufreq_frequency_table_cpuinfo(policy
, data
->freq_table
);
826 if (perf
->states
[0].core_frequency
* 1000 != policy
->cpuinfo
.max_freq
)
827 printk(KERN_WARNING FW_WARN
"P-state 0 is not max freq\n");
829 switch (perf
->control_register
.space_id
) {
830 case ACPI_ADR_SPACE_SYSTEM_IO
:
831 /* Current speed is unknown and not detectable by IO port */
832 policy
->cur
= acpi_cpufreq_guess_freq(data
, policy
->cpu
);
834 case ACPI_ADR_SPACE_FIXED_HARDWARE
:
835 acpi_cpufreq_driver
.get
= get_cur_freq_on_cpu
;
836 policy
->cur
= get_cur_freq_on_cpu(cpu
);
842 /* notify BIOS that we exist */
843 acpi_processor_notify_smm(THIS_MODULE
);
845 /* Check for APERF/MPERF support in hardware */
846 if (boot_cpu_has(X86_FEATURE_APERFMPERF
))
847 acpi_cpufreq_driver
.getavg
= cpufreq_get_measured_perf
;
849 pr_debug("CPU%u - ACPI performance management activated.\n", cpu
);
850 for (i
= 0; i
< perf
->state_count
; i
++)
851 pr_debug(" %cP%d: %d MHz, %d mW, %d uS\n",
852 (i
== perf
->state
? '*' : ' '), i
,
853 (u32
) perf
->states
[i
].core_frequency
,
854 (u32
) perf
->states
[i
].power
,
855 (u32
) perf
->states
[i
].transition_latency
);
857 cpufreq_frequency_table_get_attr(data
->freq_table
, policy
->cpu
);
860 * the first call to ->target() should result in us actually
861 * writing something to the appropriate registers.
868 kfree(data
->freq_table
);
870 acpi_processor_unregister_performance(perf
, cpu
);
873 per_cpu(acfreq_data
, cpu
) = NULL
;
878 static int acpi_cpufreq_cpu_exit(struct cpufreq_policy
*policy
)
880 struct acpi_cpufreq_data
*data
= per_cpu(acfreq_data
, policy
->cpu
);
882 pr_debug("acpi_cpufreq_cpu_exit\n");
885 cpufreq_frequency_table_put_attr(policy
->cpu
);
886 per_cpu(acfreq_data
, policy
->cpu
) = NULL
;
887 acpi_processor_unregister_performance(data
->acpi_data
,
889 kfree(data
->freq_table
);
896 static int acpi_cpufreq_resume(struct cpufreq_policy
*policy
)
898 struct acpi_cpufreq_data
*data
= per_cpu(acfreq_data
, policy
->cpu
);
900 pr_debug("acpi_cpufreq_resume\n");
907 static struct freq_attr
*acpi_cpufreq_attr
[] = {
908 &cpufreq_freq_attr_scaling_available_freqs
,
909 NULL
, /* this is a placeholder for cpb, do not remove */
913 static struct cpufreq_driver acpi_cpufreq_driver
= {
914 .verify
= acpi_cpufreq_verify
,
915 .target
= acpi_cpufreq_target
,
916 .bios_limit
= acpi_processor_get_bios_limit
,
917 .init
= acpi_cpufreq_cpu_init
,
918 .exit
= acpi_cpufreq_cpu_exit
,
919 .resume
= acpi_cpufreq_resume
,
920 .name
= "acpi-cpufreq",
921 .owner
= THIS_MODULE
,
922 .attr
= acpi_cpufreq_attr
,
925 static void __init
acpi_cpufreq_boost_init(void)
927 if (boot_cpu_has(X86_FEATURE_CPB
) || boot_cpu_has(X86_FEATURE_IDA
)) {
933 boost_supported
= true;
934 boost_enabled
= boost_state(0);
938 /* Force all MSRs to the same value */
939 boost_set_msrs(boost_enabled
, cpu_online_mask
);
941 register_cpu_notifier(&boost_nb
);
945 global_boost
.attr
.mode
= 0444;
947 /* We create the boost file in any case, though for systems without
948 * hardware support it will be read-only and hardwired to return 0.
950 if (sysfs_create_file(cpufreq_global_kobject
, &(global_boost
.attr
)))
951 pr_warn(PFX
"could not register global boost sysfs file\n");
953 pr_debug("registered global boost sysfs file\n");
956 static void __exit
acpi_cpufreq_boost_exit(void)
958 sysfs_remove_file(cpufreq_global_kobject
, &(global_boost
.attr
));
961 unregister_cpu_notifier(&boost_nb
);
968 static int __init
acpi_cpufreq_init(void)
975 pr_debug("acpi_cpufreq_init\n");
977 ret
= acpi_cpufreq_early_init();
981 #ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
982 /* this is a sysfs file with a strange name and an even stranger
983 * semantic - per CPU instantiation, but system global effect.
984 * Lets enable it only on AMD CPUs for compatibility reasons and
985 * only if configured. This is considered legacy code, which
986 * will probably be removed at some point in the future.
988 if (check_amd_hwpstate_cpu(0)) {
989 struct freq_attr
**iter
;
991 pr_debug("adding sysfs entry for cpb\n");
993 for (iter
= acpi_cpufreq_attr
; *iter
!= NULL
; iter
++)
996 /* make sure there is a terminator behind it */
1002 ret
= cpufreq_register_driver(&acpi_cpufreq_driver
);
1004 free_acpi_perf_data();
1006 acpi_cpufreq_boost_init();
1011 static void __exit
acpi_cpufreq_exit(void)
1013 pr_debug("acpi_cpufreq_exit\n");
1015 acpi_cpufreq_boost_exit();
1017 cpufreq_unregister_driver(&acpi_cpufreq_driver
);
1019 free_acpi_perf_data();
1022 module_param(acpi_pstate_strict
, uint
, 0644);
1023 MODULE_PARM_DESC(acpi_pstate_strict
,
1024 "value 0 or non-zero. non-zero -> strict ACPI checks are "
1025 "performed during frequency changes.");
1027 late_initcall(acpi_cpufreq_init
);
1028 module_exit(acpi_cpufreq_exit
);
1030 static const struct x86_cpu_id acpi_cpufreq_ids
[] = {
1031 X86_FEATURE_MATCH(X86_FEATURE_ACPI
),
1032 X86_FEATURE_MATCH(X86_FEATURE_HW_PSTATE
),
1035 MODULE_DEVICE_TABLE(x86cpu
, acpi_cpufreq_ids
);
1037 MODULE_ALIAS("acpi");