2 * turbostat -- show CPU frequency and C-state residency
3 * on modern Intel turbo-capable processors.
5 * Copyright (c) 2012 Intel Corporation.
6 * Len Brown <len.brown@intel.com>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
26 #include <sys/types.h>
29 #include <sys/resource.h>
39 char *proc_stat
= "/proc/stat";
40 unsigned int interval_sec
= 5; /* set with -i interval_sec */
41 unsigned int verbose
; /* set with -v */
42 unsigned int rapl_verbose
; /* set with -R */
43 unsigned int thermal_verbose
; /* set with -T */
44 unsigned int summary_only
; /* set with -s */
47 unsigned int do_nhm_cstates
;
48 unsigned int do_snb_cstates
;
49 unsigned int do_c8_c9_c10
;
50 unsigned int has_aperf
;
52 unsigned int units
= 1000000000; /* Ghz etc */
53 unsigned int genuine_intel
;
54 unsigned int has_invariant_tsc
;
55 unsigned int do_nehalem_platform_info
;
56 unsigned int do_nehalem_turbo_ratio_limit
;
57 unsigned int do_ivt_turbo_ratio_limit
;
58 unsigned int extra_msr_offset32
;
59 unsigned int extra_msr_offset64
;
60 unsigned int extra_delta_offset32
;
61 unsigned int extra_delta_offset64
;
64 unsigned int show_pkg
;
65 unsigned int show_core
;
66 unsigned int show_cpu
;
67 unsigned int show_pkg_only
;
68 unsigned int show_core_only
;
69 char *output_buffer
, *outp
;
73 unsigned int tcc_activation_temp
;
74 unsigned int tcc_activation_temp_override
;
75 double rapl_power_units
, rapl_energy_units
, rapl_time_units
;
76 double rapl_joule_counter_range
;
78 #define RAPL_PKG (1 << 0)
79 #define RAPL_CORES (1 << 1)
80 #define RAPL_GFX (1 << 2)
81 #define RAPL_DRAM (1 << 3)
82 #define RAPL_PKG_PERF_STATUS (1 << 4)
83 #define RAPL_DRAM_PERF_STATUS (1 << 5)
84 #define TJMAX_DEFAULT 100
86 #define MAX(a, b) ((a) > (b) ? (a) : (b))
88 int aperf_mperf_unstable
;
92 cpu_set_t
*cpu_present_set
, *cpu_affinity_set
;
93 size_t cpu_present_setsize
, cpu_affinity_setsize
;
96 unsigned long long tsc
;
97 unsigned long long aperf
;
98 unsigned long long mperf
;
99 unsigned long long c1
; /* derived */
100 unsigned long long extra_msr64
;
101 unsigned long long extra_delta64
;
102 unsigned long long extra_msr32
;
103 unsigned long long extra_delta32
;
104 unsigned int smi_count
;
107 #define CPU_IS_FIRST_THREAD_IN_CORE 0x2
108 #define CPU_IS_FIRST_CORE_IN_PACKAGE 0x4
109 } *thread_even
, *thread_odd
;
112 unsigned long long c3
;
113 unsigned long long c6
;
114 unsigned long long c7
;
115 unsigned int core_temp_c
;
116 unsigned int core_id
;
117 } *core_even
, *core_odd
;
120 unsigned long long pc2
;
121 unsigned long long pc3
;
122 unsigned long long pc6
;
123 unsigned long long pc7
;
124 unsigned long long pc8
;
125 unsigned long long pc9
;
126 unsigned long long pc10
;
127 unsigned int package_id
;
128 unsigned int energy_pkg
; /* MSR_PKG_ENERGY_STATUS */
129 unsigned int energy_dram
; /* MSR_DRAM_ENERGY_STATUS */
130 unsigned int energy_cores
; /* MSR_PP0_ENERGY_STATUS */
131 unsigned int energy_gfx
; /* MSR_PP1_ENERGY_STATUS */
132 unsigned int rapl_pkg_perf_status
; /* MSR_PKG_PERF_STATUS */
133 unsigned int rapl_dram_perf_status
; /* MSR_DRAM_PERF_STATUS */
134 unsigned int pkg_temp_c
;
136 } *package_even
, *package_odd
;
138 #define ODD_COUNTERS thread_odd, core_odd, package_odd
139 #define EVEN_COUNTERS thread_even, core_even, package_even
141 #define GET_THREAD(thread_base, thread_no, core_no, pkg_no) \
142 (thread_base + (pkg_no) * topo.num_cores_per_pkg * \
143 topo.num_threads_per_core + \
144 (core_no) * topo.num_threads_per_core + (thread_no))
145 #define GET_CORE(core_base, core_no, pkg_no) \
146 (core_base + (pkg_no) * topo.num_cores_per_pkg + (core_no))
147 #define GET_PKG(pkg_base, pkg_no) (pkg_base + pkg_no)
149 struct system_summary
{
150 struct thread_data threads
;
151 struct core_data cores
;
152 struct pkg_data packages
;
161 int num_cores_per_pkg
;
162 int num_threads_per_core
;
165 struct timeval tv_even
, tv_odd
, tv_delta
;
167 void setup_all_buffers(void);
169 int cpu_is_not_present(int cpu
)
171 return !CPU_ISSET_S(cpu
, cpu_present_setsize
, cpu_present_set
);
174 * run func(thread, core, package) in topology order
175 * skip non-present cpus
178 int for_all_cpus(int (func
)(struct thread_data
*, struct core_data
*, struct pkg_data
*),
179 struct thread_data
*thread_base
, struct core_data
*core_base
, struct pkg_data
*pkg_base
)
181 int retval
, pkg_no
, core_no
, thread_no
;
183 for (pkg_no
= 0; pkg_no
< topo
.num_packages
; ++pkg_no
) {
184 for (core_no
= 0; core_no
< topo
.num_cores_per_pkg
; ++core_no
) {
185 for (thread_no
= 0; thread_no
<
186 topo
.num_threads_per_core
; ++thread_no
) {
187 struct thread_data
*t
;
191 t
= GET_THREAD(thread_base
, thread_no
, core_no
, pkg_no
);
193 if (cpu_is_not_present(t
->cpu_id
))
196 c
= GET_CORE(core_base
, core_no
, pkg_no
);
197 p
= GET_PKG(pkg_base
, pkg_no
);
199 retval
= func(t
, c
, p
);
208 int cpu_migrate(int cpu
)
210 CPU_ZERO_S(cpu_affinity_setsize
, cpu_affinity_set
);
211 CPU_SET_S(cpu
, cpu_affinity_setsize
, cpu_affinity_set
);
212 if (sched_setaffinity(0, cpu_affinity_setsize
, cpu_affinity_set
) == -1)
218 int get_msr(int cpu
, off_t offset
, unsigned long long *msr
)
224 sprintf(pathname
, "/dev/cpu/%d/msr", cpu
);
225 fd
= open(pathname
, O_RDONLY
);
229 retval
= pread(fd
, msr
, sizeof *msr
, offset
);
232 if (retval
!= sizeof *msr
) {
233 fprintf(stderr
, "%s offset 0x%zx read failed\n", pathname
, offset
);
240 void print_header(void)
243 outp
+= sprintf(outp
, "pk");
245 outp
+= sprintf(outp
, " ");
247 outp
+= sprintf(outp
, "cor");
249 outp
+= sprintf(outp
, " CPU");
250 if (show_pkg
|| show_core
|| show_cpu
)
251 outp
+= sprintf(outp
, " ");
253 outp
+= sprintf(outp
, " %%c0");
255 outp
+= sprintf(outp
, " GHz");
256 outp
+= sprintf(outp
, " TSC");
258 outp
+= sprintf(outp
, " SMI");
259 if (extra_delta_offset32
)
260 outp
+= sprintf(outp
, " count 0x%03X", extra_delta_offset32
);
261 if (extra_delta_offset64
)
262 outp
+= sprintf(outp
, " COUNT 0x%03X", extra_delta_offset64
);
263 if (extra_msr_offset32
)
264 outp
+= sprintf(outp
, " MSR 0x%03X", extra_msr_offset32
);
265 if (extra_msr_offset64
)
266 outp
+= sprintf(outp
, " MSR 0x%03X", extra_msr_offset64
);
268 outp
+= sprintf(outp
, " %%c1");
270 outp
+= sprintf(outp
, " %%c3");
272 outp
+= sprintf(outp
, " %%c6");
274 outp
+= sprintf(outp
, " %%c7");
277 outp
+= sprintf(outp
, " CTMP");
279 outp
+= sprintf(outp
, " PTMP");
282 outp
+= sprintf(outp
, " %%pc2");
284 outp
+= sprintf(outp
, " %%pc3");
286 outp
+= sprintf(outp
, " %%pc6");
288 outp
+= sprintf(outp
, " %%pc7");
290 outp
+= sprintf(outp
, " %%pc8");
291 outp
+= sprintf(outp
, " %%pc9");
292 outp
+= sprintf(outp
, " %%pc10");
295 if (do_rapl
& RAPL_PKG
)
296 outp
+= sprintf(outp
, " Pkg_W");
297 if (do_rapl
& RAPL_CORES
)
298 outp
+= sprintf(outp
, " Cor_W");
299 if (do_rapl
& RAPL_GFX
)
300 outp
+= sprintf(outp
, " GFX_W");
301 if (do_rapl
& RAPL_DRAM
)
302 outp
+= sprintf(outp
, " RAM_W");
303 if (do_rapl
& RAPL_PKG_PERF_STATUS
)
304 outp
+= sprintf(outp
, " PKG_%%");
305 if (do_rapl
& RAPL_DRAM_PERF_STATUS
)
306 outp
+= sprintf(outp
, " RAM_%%");
308 outp
+= sprintf(outp
, "\n");
311 int dump_counters(struct thread_data
*t
, struct core_data
*c
,
314 fprintf(stderr
, "t %p, c %p, p %p\n", t
, c
, p
);
317 fprintf(stderr
, "CPU: %d flags 0x%x\n", t
->cpu_id
, t
->flags
);
318 fprintf(stderr
, "TSC: %016llX\n", t
->tsc
);
319 fprintf(stderr
, "aperf: %016llX\n", t
->aperf
);
320 fprintf(stderr
, "mperf: %016llX\n", t
->mperf
);
321 fprintf(stderr
, "c1: %016llX\n", t
->c1
);
322 fprintf(stderr
, "msr0x%x: %08llX\n",
323 extra_delta_offset32
, t
->extra_delta32
);
324 fprintf(stderr
, "msr0x%x: %016llX\n",
325 extra_delta_offset64
, t
->extra_delta64
);
326 fprintf(stderr
, "msr0x%x: %08llX\n",
327 extra_msr_offset32
, t
->extra_msr32
);
328 fprintf(stderr
, "msr0x%x: %016llX\n",
329 extra_msr_offset64
, t
->extra_msr64
);
331 fprintf(stderr
, "SMI: %08X\n", t
->smi_count
);
335 fprintf(stderr
, "core: %d\n", c
->core_id
);
336 fprintf(stderr
, "c3: %016llX\n", c
->c3
);
337 fprintf(stderr
, "c6: %016llX\n", c
->c6
);
338 fprintf(stderr
, "c7: %016llX\n", c
->c7
);
339 fprintf(stderr
, "DTS: %dC\n", c
->core_temp_c
);
343 fprintf(stderr
, "package: %d\n", p
->package_id
);
344 fprintf(stderr
, "pc2: %016llX\n", p
->pc2
);
345 fprintf(stderr
, "pc3: %016llX\n", p
->pc3
);
346 fprintf(stderr
, "pc6: %016llX\n", p
->pc6
);
347 fprintf(stderr
, "pc7: %016llX\n", p
->pc7
);
348 fprintf(stderr
, "pc8: %016llX\n", p
->pc8
);
349 fprintf(stderr
, "pc9: %016llX\n", p
->pc9
);
350 fprintf(stderr
, "pc10: %016llX\n", p
->pc10
);
351 fprintf(stderr
, "Joules PKG: %0X\n", p
->energy_pkg
);
352 fprintf(stderr
, "Joules COR: %0X\n", p
->energy_cores
);
353 fprintf(stderr
, "Joules GFX: %0X\n", p
->energy_gfx
);
354 fprintf(stderr
, "Joules RAM: %0X\n", p
->energy_dram
);
355 fprintf(stderr
, "Throttle PKG: %0X\n", p
->rapl_pkg_perf_status
);
356 fprintf(stderr
, "Throttle RAM: %0X\n", p
->rapl_dram_perf_status
);
357 fprintf(stderr
, "PTM: %dC\n", p
->pkg_temp_c
);
363 * column formatting convention & formats
364 * package: "pk" 2 columns %2d
365 * core: "cor" 3 columns %3d
366 * CPU: "CPU" 3 columns %3d
371 * GHz: "GHz" 3 columns %3.2
372 * TSC: "TSC" 3 columns %3.2
373 * SMI: "SMI" 4 columns %4d
374 * percentage " %pc3" %6.2
375 * Perf Status percentage: %5.2
376 * "CTMP" 4 columns %4d
378 int format_counters(struct thread_data
*t
, struct core_data
*c
,
381 double interval_float
;
384 /* if showing only 1st thread in core and this isn't one, bail out */
385 if (show_core_only
&& !(t
->flags
& CPU_IS_FIRST_THREAD_IN_CORE
))
388 /* if showing only 1st thread in pkg and this isn't one, bail out */
389 if (show_pkg_only
&& !(t
->flags
& CPU_IS_FIRST_CORE_IN_PACKAGE
))
392 interval_float
= tv_delta
.tv_sec
+ tv_delta
.tv_usec
/1000000.0;
394 /* topo columns, print blanks on 1st (average) line */
395 if (t
== &average
.threads
) {
397 outp
+= sprintf(outp
, " ");
398 if (show_pkg
&& show_core
)
399 outp
+= sprintf(outp
, " ");
401 outp
+= sprintf(outp
, " ");
403 outp
+= sprintf(outp
, " " " ");
407 outp
+= sprintf(outp
, "%2d", p
->package_id
);
409 outp
+= sprintf(outp
, " ");
411 if (show_pkg
&& show_core
)
412 outp
+= sprintf(outp
, " ");
415 outp
+= sprintf(outp
, "%3d", c
->core_id
);
417 outp
+= sprintf(outp
, " ");
420 outp
+= sprintf(outp
, " %3d", t
->cpu_id
);
423 if (do_nhm_cstates
) {
424 if (show_pkg
|| show_core
|| show_cpu
)
425 outp
+= sprintf(outp
, " ");
427 outp
+= sprintf(outp
, "%6.2f", 100.0 * t
->mperf
/t
->tsc
);
429 outp
+= sprintf(outp
, " ****");
434 if (!aperf_mperf_unstable
) {
435 outp
+= sprintf(outp
, " %3.2f",
436 1.0 * t
->tsc
/ units
* t
->aperf
/
437 t
->mperf
/ interval_float
);
439 if (t
->aperf
> t
->tsc
|| t
->mperf
> t
->tsc
) {
440 outp
+= sprintf(outp
, " ***");
442 outp
+= sprintf(outp
, "%3.1f*",
445 t
->mperf
/ interval_float
);
451 outp
+= sprintf(outp
, "%5.2f", 1.0 * t
->tsc
/units
/interval_float
);
455 outp
+= sprintf(outp
, "%4d", t
->smi_count
);
458 if (extra_delta_offset32
)
459 outp
+= sprintf(outp
, " %11llu", t
->extra_delta32
);
462 if (extra_delta_offset64
)
463 outp
+= sprintf(outp
, " %11llu", t
->extra_delta64
);
465 if (extra_msr_offset32
)
466 outp
+= sprintf(outp
, " 0x%08llx", t
->extra_msr32
);
469 if (extra_msr_offset64
)
470 outp
+= sprintf(outp
, " 0x%016llx", t
->extra_msr64
);
472 if (do_nhm_cstates
) {
474 outp
+= sprintf(outp
, " %6.2f", 100.0 * t
->c1
/t
->tsc
);
476 outp
+= sprintf(outp
, " ****");
479 /* print per-core data only for 1st thread in core */
480 if (!(t
->flags
& CPU_IS_FIRST_THREAD_IN_CORE
))
484 outp
+= sprintf(outp
, " %6.2f", 100.0 * c
->c3
/t
->tsc
);
486 outp
+= sprintf(outp
, " %6.2f", 100.0 * c
->c6
/t
->tsc
);
488 outp
+= sprintf(outp
, " %6.2f", 100.0 * c
->c7
/t
->tsc
);
491 outp
+= sprintf(outp
, " %4d", c
->core_temp_c
);
493 /* print per-package data only for 1st core in package */
494 if (!(t
->flags
& CPU_IS_FIRST_CORE_IN_PACKAGE
))
498 outp
+= sprintf(outp
, " %4d", p
->pkg_temp_c
);
501 outp
+= sprintf(outp
, " %6.2f", 100.0 * p
->pc2
/t
->tsc
);
503 outp
+= sprintf(outp
, " %6.2f", 100.0 * p
->pc3
/t
->tsc
);
505 outp
+= sprintf(outp
, " %6.2f", 100.0 * p
->pc6
/t
->tsc
);
507 outp
+= sprintf(outp
, " %6.2f", 100.0 * p
->pc7
/t
->tsc
);
509 outp
+= sprintf(outp
, " %6.2f", 100.0 * p
->pc8
/t
->tsc
);
510 outp
+= sprintf(outp
, " %6.2f", 100.0 * p
->pc9
/t
->tsc
);
511 outp
+= sprintf(outp
, " %6.2f", 100.0 * p
->pc10
/t
->tsc
);
515 * If measurement interval exceeds minimum RAPL Joule Counter range,
516 * indicate that results are suspect by printing "**" in fraction place.
518 if (interval_float
< rapl_joule_counter_range
) {
526 if (do_rapl
& RAPL_PKG
)
527 outp
+= sprintf(outp
, fmt6
, p
->energy_pkg
* rapl_energy_units
/ interval_float
);
528 if (do_rapl
& RAPL_CORES
)
529 outp
+= sprintf(outp
, fmt6
, p
->energy_cores
* rapl_energy_units
/ interval_float
);
530 if (do_rapl
& RAPL_GFX
)
531 outp
+= sprintf(outp
, fmt5
, p
->energy_gfx
* rapl_energy_units
/ interval_float
);
532 if (do_rapl
& RAPL_DRAM
)
533 outp
+= sprintf(outp
, fmt5
, p
->energy_dram
* rapl_energy_units
/ interval_float
);
534 if (do_rapl
& RAPL_PKG_PERF_STATUS
)
535 outp
+= sprintf(outp
, fmt5
, 100.0 * p
->rapl_pkg_perf_status
* rapl_time_units
/ interval_float
);
536 if (do_rapl
& RAPL_DRAM_PERF_STATUS
)
537 outp
+= sprintf(outp
, fmt5
, 100.0 * p
->rapl_dram_perf_status
* rapl_time_units
/ interval_float
);
540 outp
+= sprintf(outp
, "\n");
547 fputs(output_buffer
, stdout
);
549 outp
= output_buffer
;
553 fputs(output_buffer
, stderr
);
554 outp
= output_buffer
;
556 void format_all_counters(struct thread_data
*t
, struct core_data
*c
, struct pkg_data
*p
)
560 if (!printed
|| !summary_only
)
563 if (topo
.num_cpus
> 1)
564 format_counters(&average
.threads
, &average
.cores
,
572 for_all_cpus(format_counters
, t
, c
, p
);
575 #define DELTA_WRAP32(new, old) \
579 old = 0x100000000 + new - old; \
583 delta_package(struct pkg_data
*new, struct pkg_data
*old
)
585 old
->pc2
= new->pc2
- old
->pc2
;
586 old
->pc3
= new->pc3
- old
->pc3
;
587 old
->pc6
= new->pc6
- old
->pc6
;
588 old
->pc7
= new->pc7
- old
->pc7
;
589 old
->pc8
= new->pc8
- old
->pc8
;
590 old
->pc9
= new->pc9
- old
->pc9
;
591 old
->pc10
= new->pc10
- old
->pc10
;
592 old
->pkg_temp_c
= new->pkg_temp_c
;
594 DELTA_WRAP32(new->energy_pkg
, old
->energy_pkg
);
595 DELTA_WRAP32(new->energy_cores
, old
->energy_cores
);
596 DELTA_WRAP32(new->energy_gfx
, old
->energy_gfx
);
597 DELTA_WRAP32(new->energy_dram
, old
->energy_dram
);
598 DELTA_WRAP32(new->rapl_pkg_perf_status
, old
->rapl_pkg_perf_status
);
599 DELTA_WRAP32(new->rapl_dram_perf_status
, old
->rapl_dram_perf_status
);
603 delta_core(struct core_data
*new, struct core_data
*old
)
605 old
->c3
= new->c3
- old
->c3
;
606 old
->c6
= new->c6
- old
->c6
;
607 old
->c7
= new->c7
- old
->c7
;
608 old
->core_temp_c
= new->core_temp_c
;
615 delta_thread(struct thread_data
*new, struct thread_data
*old
,
616 struct core_data
*core_delta
)
618 old
->tsc
= new->tsc
- old
->tsc
;
620 /* check for TSC < 1 Mcycles over interval */
621 if (old
->tsc
< (1000 * 1000)) {
622 fprintf(stderr
, "Insanely slow TSC rate, TSC stops in idle?\n");
623 fprintf(stderr
, "You can disable all c-states by booting with \"idle=poll\"\n");
624 fprintf(stderr
, "or just the deep ones with \"processor.max_cstate=1\"\n");
628 old
->c1
= new->c1
- old
->c1
;
630 if ((new->aperf
> old
->aperf
) && (new->mperf
> old
->mperf
)) {
631 old
->aperf
= new->aperf
- old
->aperf
;
632 old
->mperf
= new->mperf
- old
->mperf
;
635 if (!aperf_mperf_unstable
) {
636 fprintf(stderr
, "%s: APERF or MPERF went backwards *\n", progname
);
637 fprintf(stderr
, "* Frequency results do not cover entire interval *\n");
638 fprintf(stderr
, "* fix this by running Linux-2.6.30 or later *\n");
640 aperf_mperf_unstable
= 1;
643 * mperf delta is likely a huge "positive" number
644 * can not use it for calculating c0 time
652 * As counter collection is not atomic,
653 * it is possible for mperf's non-halted cycles + idle states
654 * to exceed TSC's all cycles: show c1 = 0% in that case.
656 if ((old
->mperf
+ core_delta
->c3
+ core_delta
->c6
+ core_delta
->c7
) > old
->tsc
)
659 /* normal case, derive c1 */
660 old
->c1
= old
->tsc
- old
->mperf
- core_delta
->c3
661 - core_delta
->c6
- core_delta
->c7
;
664 if (old
->mperf
== 0) {
665 if (verbose
> 1) fprintf(stderr
, "cpu%d MPERF 0!\n", old
->cpu_id
);
666 old
->mperf
= 1; /* divide by 0 protection */
669 old
->extra_delta32
= new->extra_delta32
- old
->extra_delta32
;
670 old
->extra_delta32
&= 0xFFFFFFFF;
672 old
->extra_delta64
= new->extra_delta64
- old
->extra_delta64
;
675 * Extra MSR is just a snapshot, simply copy latest w/o subtracting
677 old
->extra_msr32
= new->extra_msr32
;
678 old
->extra_msr64
= new->extra_msr64
;
681 old
->smi_count
= new->smi_count
- old
->smi_count
;
684 int delta_cpu(struct thread_data
*t
, struct core_data
*c
,
685 struct pkg_data
*p
, struct thread_data
*t2
,
686 struct core_data
*c2
, struct pkg_data
*p2
)
688 /* calculate core delta only for 1st thread in core */
689 if (t
->flags
& CPU_IS_FIRST_THREAD_IN_CORE
)
692 /* always calculate thread delta */
693 delta_thread(t
, t2
, c2
); /* c2 is core delta */
695 /* calculate package delta only for 1st core in package */
696 if (t
->flags
& CPU_IS_FIRST_CORE_IN_PACKAGE
)
697 delta_package(p
, p2
);
702 void clear_counters(struct thread_data
*t
, struct core_data
*c
, struct pkg_data
*p
)
710 t
->extra_delta32
= 0;
711 t
->extra_delta64
= 0;
713 /* tells format_counters to dump all fields from this set */
714 t
->flags
= CPU_IS_FIRST_THREAD_IN_CORE
| CPU_IS_FIRST_CORE_IN_PACKAGE
;
733 p
->rapl_pkg_perf_status
= 0;
734 p
->rapl_dram_perf_status
= 0;
737 int sum_counters(struct thread_data
*t
, struct core_data
*c
,
740 average
.threads
.tsc
+= t
->tsc
;
741 average
.threads
.aperf
+= t
->aperf
;
742 average
.threads
.mperf
+= t
->mperf
;
743 average
.threads
.c1
+= t
->c1
;
745 average
.threads
.extra_delta32
+= t
->extra_delta32
;
746 average
.threads
.extra_delta64
+= t
->extra_delta64
;
748 /* sum per-core values only for 1st thread in core */
749 if (!(t
->flags
& CPU_IS_FIRST_THREAD_IN_CORE
))
752 average
.cores
.c3
+= c
->c3
;
753 average
.cores
.c6
+= c
->c6
;
754 average
.cores
.c7
+= c
->c7
;
756 average
.cores
.core_temp_c
= MAX(average
.cores
.core_temp_c
, c
->core_temp_c
);
758 /* sum per-pkg values only for 1st core in pkg */
759 if (!(t
->flags
& CPU_IS_FIRST_CORE_IN_PACKAGE
))
762 average
.packages
.pc2
+= p
->pc2
;
763 average
.packages
.pc3
+= p
->pc3
;
764 average
.packages
.pc6
+= p
->pc6
;
765 average
.packages
.pc7
+= p
->pc7
;
766 average
.packages
.pc8
+= p
->pc8
;
767 average
.packages
.pc9
+= p
->pc9
;
768 average
.packages
.pc10
+= p
->pc10
;
770 average
.packages
.energy_pkg
+= p
->energy_pkg
;
771 average
.packages
.energy_dram
+= p
->energy_dram
;
772 average
.packages
.energy_cores
+= p
->energy_cores
;
773 average
.packages
.energy_gfx
+= p
->energy_gfx
;
775 average
.packages
.pkg_temp_c
= MAX(average
.packages
.pkg_temp_c
, p
->pkg_temp_c
);
777 average
.packages
.rapl_pkg_perf_status
+= p
->rapl_pkg_perf_status
;
778 average
.packages
.rapl_dram_perf_status
+= p
->rapl_dram_perf_status
;
782 * sum the counters for all cpus in the system
783 * compute the weighted average
785 void compute_average(struct thread_data
*t
, struct core_data
*c
,
788 clear_counters(&average
.threads
, &average
.cores
, &average
.packages
);
790 for_all_cpus(sum_counters
, t
, c
, p
);
792 average
.threads
.tsc
/= topo
.num_cpus
;
793 average
.threads
.aperf
/= topo
.num_cpus
;
794 average
.threads
.mperf
/= topo
.num_cpus
;
795 average
.threads
.c1
/= topo
.num_cpus
;
797 average
.threads
.extra_delta32
/= topo
.num_cpus
;
798 average
.threads
.extra_delta32
&= 0xFFFFFFFF;
800 average
.threads
.extra_delta64
/= topo
.num_cpus
;
802 average
.cores
.c3
/= topo
.num_cores
;
803 average
.cores
.c6
/= topo
.num_cores
;
804 average
.cores
.c7
/= topo
.num_cores
;
806 average
.packages
.pc2
/= topo
.num_packages
;
807 average
.packages
.pc3
/= topo
.num_packages
;
808 average
.packages
.pc6
/= topo
.num_packages
;
809 average
.packages
.pc7
/= topo
.num_packages
;
811 average
.packages
.pc8
/= topo
.num_packages
;
812 average
.packages
.pc9
/= topo
.num_packages
;
813 average
.packages
.pc10
/= topo
.num_packages
;
816 static unsigned long long rdtsc(void)
818 unsigned int low
, high
;
820 asm volatile("rdtsc" : "=a" (low
), "=d" (high
));
822 return low
| ((unsigned long long)high
) << 32;
829 * acquire and record local counters for that cpu
831 int get_counters(struct thread_data
*t
, struct core_data
*c
, struct pkg_data
*p
)
834 unsigned long long msr
;
836 if (cpu_migrate(cpu
)) {
837 fprintf(stderr
, "Could not migrate to CPU %d\n", cpu
);
841 t
->tsc
= rdtsc(); /* we are running on local CPU of interest */
844 if (get_msr(cpu
, MSR_IA32_APERF
, &t
->aperf
))
846 if (get_msr(cpu
, MSR_IA32_MPERF
, &t
->mperf
))
851 if (get_msr(cpu
, MSR_SMI_COUNT
, &msr
))
853 t
->smi_count
= msr
& 0xFFFFFFFF;
855 if (extra_delta_offset32
) {
856 if (get_msr(cpu
, extra_delta_offset32
, &msr
))
858 t
->extra_delta32
= msr
& 0xFFFFFFFF;
861 if (extra_delta_offset64
)
862 if (get_msr(cpu
, extra_delta_offset64
, &t
->extra_delta64
))
865 if (extra_msr_offset32
) {
866 if (get_msr(cpu
, extra_msr_offset32
, &msr
))
868 t
->extra_msr32
= msr
& 0xFFFFFFFF;
871 if (extra_msr_offset64
)
872 if (get_msr(cpu
, extra_msr_offset64
, &t
->extra_msr64
))
875 /* collect core counters only for 1st thread in core */
876 if (!(t
->flags
& CPU_IS_FIRST_THREAD_IN_CORE
))
879 if (do_nhm_cstates
) {
880 if (get_msr(cpu
, MSR_CORE_C3_RESIDENCY
, &c
->c3
))
882 if (get_msr(cpu
, MSR_CORE_C6_RESIDENCY
, &c
->c6
))
887 if (get_msr(cpu
, MSR_CORE_C7_RESIDENCY
, &c
->c7
))
891 if (get_msr(cpu
, MSR_IA32_THERM_STATUS
, &msr
))
893 c
->core_temp_c
= tcc_activation_temp
- ((msr
>> 16) & 0x7F);
897 /* collect package counters only for 1st core in package */
898 if (!(t
->flags
& CPU_IS_FIRST_CORE_IN_PACKAGE
))
901 if (do_nhm_cstates
) {
902 if (get_msr(cpu
, MSR_PKG_C3_RESIDENCY
, &p
->pc3
))
904 if (get_msr(cpu
, MSR_PKG_C6_RESIDENCY
, &p
->pc6
))
907 if (do_snb_cstates
) {
908 if (get_msr(cpu
, MSR_PKG_C2_RESIDENCY
, &p
->pc2
))
910 if (get_msr(cpu
, MSR_PKG_C7_RESIDENCY
, &p
->pc7
))
914 if (get_msr(cpu
, MSR_PKG_C8_RESIDENCY
, &p
->pc8
))
916 if (get_msr(cpu
, MSR_PKG_C9_RESIDENCY
, &p
->pc9
))
918 if (get_msr(cpu
, MSR_PKG_C10_RESIDENCY
, &p
->pc10
))
921 if (do_rapl
& RAPL_PKG
) {
922 if (get_msr(cpu
, MSR_PKG_ENERGY_STATUS
, &msr
))
924 p
->energy_pkg
= msr
& 0xFFFFFFFF;
926 if (do_rapl
& RAPL_CORES
) {
927 if (get_msr(cpu
, MSR_PP0_ENERGY_STATUS
, &msr
))
929 p
->energy_cores
= msr
& 0xFFFFFFFF;
931 if (do_rapl
& RAPL_DRAM
) {
932 if (get_msr(cpu
, MSR_DRAM_ENERGY_STATUS
, &msr
))
934 p
->energy_dram
= msr
& 0xFFFFFFFF;
936 if (do_rapl
& RAPL_GFX
) {
937 if (get_msr(cpu
, MSR_PP1_ENERGY_STATUS
, &msr
))
939 p
->energy_gfx
= msr
& 0xFFFFFFFF;
941 if (do_rapl
& RAPL_PKG_PERF_STATUS
) {
942 if (get_msr(cpu
, MSR_PKG_PERF_STATUS
, &msr
))
944 p
->rapl_pkg_perf_status
= msr
& 0xFFFFFFFF;
946 if (do_rapl
& RAPL_DRAM_PERF_STATUS
) {
947 if (get_msr(cpu
, MSR_DRAM_PERF_STATUS
, &msr
))
949 p
->rapl_dram_perf_status
= msr
& 0xFFFFFFFF;
952 if (get_msr(cpu
, MSR_IA32_PACKAGE_THERM_STATUS
, &msr
))
954 p
->pkg_temp_c
= tcc_activation_temp
- ((msr
>> 16) & 0x7F);
959 void print_verbose_header(void)
961 unsigned long long msr
;
964 if (!do_nehalem_platform_info
)
967 get_msr(0, MSR_NHM_PLATFORM_INFO
, &msr
);
969 fprintf(stderr
, "cpu0: MSR_NHM_PLATFORM_INFO: 0x%08llx\n", msr
);
971 ratio
= (msr
>> 40) & 0xFF;
972 fprintf(stderr
, "%d * %.0f = %.0f MHz max efficiency\n",
973 ratio
, bclk
, ratio
* bclk
);
975 ratio
= (msr
>> 8) & 0xFF;
976 fprintf(stderr
, "%d * %.0f = %.0f MHz TSC frequency\n",
977 ratio
, bclk
, ratio
* bclk
);
979 get_msr(0, MSR_IA32_POWER_CTL
, &msr
);
980 fprintf(stderr
, "cpu0: MSR_IA32_POWER_CTL: 0x%08llx (C1E: %sabled)\n",
981 msr
, msr
& 0x2 ? "EN" : "DIS");
983 if (!do_ivt_turbo_ratio_limit
)
984 goto print_nhm_turbo_ratio_limits
;
986 get_msr(0, MSR_IVT_TURBO_RATIO_LIMIT
, &msr
);
988 fprintf(stderr
, "cpu0: MSR_IVT_TURBO_RATIO_LIMIT: 0x%08llx\n", msr
);
990 ratio
= (msr
>> 56) & 0xFF;
992 fprintf(stderr
, "%d * %.0f = %.0f MHz max turbo 16 active cores\n",
993 ratio
, bclk
, ratio
* bclk
);
995 ratio
= (msr
>> 48) & 0xFF;
997 fprintf(stderr
, "%d * %.0f = %.0f MHz max turbo 15 active cores\n",
998 ratio
, bclk
, ratio
* bclk
);
1000 ratio
= (msr
>> 40) & 0xFF;
1002 fprintf(stderr
, "%d * %.0f = %.0f MHz max turbo 14 active cores\n",
1003 ratio
, bclk
, ratio
* bclk
);
1005 ratio
= (msr
>> 32) & 0xFF;
1007 fprintf(stderr
, "%d * %.0f = %.0f MHz max turbo 13 active cores\n",
1008 ratio
, bclk
, ratio
* bclk
);
1010 ratio
= (msr
>> 24) & 0xFF;
1012 fprintf(stderr
, "%d * %.0f = %.0f MHz max turbo 12 active cores\n",
1013 ratio
, bclk
, ratio
* bclk
);
1015 ratio
= (msr
>> 16) & 0xFF;
1017 fprintf(stderr
, "%d * %.0f = %.0f MHz max turbo 11 active cores\n",
1018 ratio
, bclk
, ratio
* bclk
);
1020 ratio
= (msr
>> 8) & 0xFF;
1022 fprintf(stderr
, "%d * %.0f = %.0f MHz max turbo 10 active cores\n",
1023 ratio
, bclk
, ratio
* bclk
);
1025 ratio
= (msr
>> 0) & 0xFF;
1027 fprintf(stderr
, "%d * %.0f = %.0f MHz max turbo 9 active cores\n",
1028 ratio
, bclk
, ratio
* bclk
);
1030 print_nhm_turbo_ratio_limits
:
1031 get_msr(0, MSR_NHM_SNB_PKG_CST_CFG_CTL
, &msr
);
1033 #define SNB_C1_AUTO_UNDEMOTE (1UL << 27)
1034 #define SNB_C3_AUTO_UNDEMOTE (1UL << 28)
1036 fprintf(stderr
, "cpu0: MSR_NHM_SNB_PKG_CST_CFG_CTL: 0x%08llx", msr
);
1038 fprintf(stderr
, " (%s%s%s%s%slocked: pkg-cstate-limit=%d: ",
1039 (msr
& SNB_C3_AUTO_UNDEMOTE
) ? "UNdemote-C3, " : "",
1040 (msr
& SNB_C1_AUTO_UNDEMOTE
) ? "UNdemote-C1, " : "",
1041 (msr
& NHM_C3_AUTO_DEMOTE
) ? "demote-C3, " : "",
1042 (msr
& NHM_C1_AUTO_DEMOTE
) ? "demote-C1, " : "",
1043 (msr
& (1 << 15)) ? "" : "UN",
1044 (unsigned int)msr
& 7);
1049 fprintf(stderr
, "pc0");
1052 fprintf(stderr
, do_snb_cstates
? "pc2" : "pc0");
1055 fprintf(stderr
, do_snb_cstates
? "pc6-noret" : "pc3");
1058 fprintf(stderr
, "pc6");
1061 fprintf(stderr
, "pc7");
1064 fprintf(stderr
, do_snb_cstates
? "pc7s" : "invalid");
1067 fprintf(stderr
, "unlimited");
1070 fprintf(stderr
, "invalid");
1072 fprintf(stderr
, ")\n");
1074 if (!do_nehalem_turbo_ratio_limit
)
1077 get_msr(0, MSR_NHM_TURBO_RATIO_LIMIT
, &msr
);
1079 fprintf(stderr
, "cpu0: MSR_NHM_TURBO_RATIO_LIMIT: 0x%08llx\n", msr
);
1081 ratio
= (msr
>> 56) & 0xFF;
1083 fprintf(stderr
, "%d * %.0f = %.0f MHz max turbo 8 active cores\n",
1084 ratio
, bclk
, ratio
* bclk
);
1086 ratio
= (msr
>> 48) & 0xFF;
1088 fprintf(stderr
, "%d * %.0f = %.0f MHz max turbo 7 active cores\n",
1089 ratio
, bclk
, ratio
* bclk
);
1091 ratio
= (msr
>> 40) & 0xFF;
1093 fprintf(stderr
, "%d * %.0f = %.0f MHz max turbo 6 active cores\n",
1094 ratio
, bclk
, ratio
* bclk
);
1096 ratio
= (msr
>> 32) & 0xFF;
1098 fprintf(stderr
, "%d * %.0f = %.0f MHz max turbo 5 active cores\n",
1099 ratio
, bclk
, ratio
* bclk
);
1101 ratio
= (msr
>> 24) & 0xFF;
1103 fprintf(stderr
, "%d * %.0f = %.0f MHz max turbo 4 active cores\n",
1104 ratio
, bclk
, ratio
* bclk
);
1106 ratio
= (msr
>> 16) & 0xFF;
1108 fprintf(stderr
, "%d * %.0f = %.0f MHz max turbo 3 active cores\n",
1109 ratio
, bclk
, ratio
* bclk
);
1111 ratio
= (msr
>> 8) & 0xFF;
1113 fprintf(stderr
, "%d * %.0f = %.0f MHz max turbo 2 active cores\n",
1114 ratio
, bclk
, ratio
* bclk
);
1116 ratio
= (msr
>> 0) & 0xFF;
1118 fprintf(stderr
, "%d * %.0f = %.0f MHz max turbo 1 active cores\n",
1119 ratio
, bclk
, ratio
* bclk
);
1122 void free_all_buffers(void)
1124 CPU_FREE(cpu_present_set
);
1125 cpu_present_set
= NULL
;
1126 cpu_present_set
= 0;
1128 CPU_FREE(cpu_affinity_set
);
1129 cpu_affinity_set
= NULL
;
1130 cpu_affinity_setsize
= 0;
1138 package_even
= NULL
;
1148 free(output_buffer
);
1149 output_buffer
= NULL
;
1154 * cpu_is_first_sibling_in_core(cpu)
1155 * return 1 if given CPU is 1st HT sibling in the core
1157 int cpu_is_first_sibling_in_core(int cpu
)
1163 sprintf(path
, "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", cpu
);
1164 filep
= fopen(path
, "r");
1165 if (filep
== NULL
) {
1169 fscanf(filep
, "%d", &first_cpu
);
1171 return (cpu
== first_cpu
);
1175 * cpu_is_first_core_in_package(cpu)
1176 * return 1 if given CPU is 1st core in package
1178 int cpu_is_first_core_in_package(int cpu
)
1184 sprintf(path
, "/sys/devices/system/cpu/cpu%d/topology/core_siblings_list", cpu
);
1185 filep
= fopen(path
, "r");
1186 if (filep
== NULL
) {
1190 fscanf(filep
, "%d", &first_cpu
);
1192 return (cpu
== first_cpu
);
1195 int get_physical_package_id(int cpu
)
1201 sprintf(path
, "/sys/devices/system/cpu/cpu%d/topology/physical_package_id", cpu
);
1202 filep
= fopen(path
, "r");
1203 if (filep
== NULL
) {
1207 fscanf(filep
, "%d", &pkg
);
1212 int get_core_id(int cpu
)
1218 sprintf(path
, "/sys/devices/system/cpu/cpu%d/topology/core_id", cpu
);
1219 filep
= fopen(path
, "r");
1220 if (filep
== NULL
) {
1224 fscanf(filep
, "%d", &core
);
1229 int get_num_ht_siblings(int cpu
)
1237 sprintf(path
, "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", cpu
);
1238 filep
= fopen(path
, "r");
1239 if (filep
== NULL
) {
1245 * if a pair of number with a character between: 2 siblings (eg. 1-2, or 1,4)
1246 * otherwinse 1 sibling (self).
1248 matches
= fscanf(filep
, "%d%c%d\n", &sib1
, &character
, &sib2
);
1259 * run func(thread, core, package) in topology order
1260 * skip non-present cpus
1263 int for_all_cpus_2(int (func
)(struct thread_data
*, struct core_data
*,
1264 struct pkg_data
*, struct thread_data
*, struct core_data
*,
1265 struct pkg_data
*), struct thread_data
*thread_base
,
1266 struct core_data
*core_base
, struct pkg_data
*pkg_base
,
1267 struct thread_data
*thread_base2
, struct core_data
*core_base2
,
1268 struct pkg_data
*pkg_base2
)
1270 int retval
, pkg_no
, core_no
, thread_no
;
1272 for (pkg_no
= 0; pkg_no
< topo
.num_packages
; ++pkg_no
) {
1273 for (core_no
= 0; core_no
< topo
.num_cores_per_pkg
; ++core_no
) {
1274 for (thread_no
= 0; thread_no
<
1275 topo
.num_threads_per_core
; ++thread_no
) {
1276 struct thread_data
*t
, *t2
;
1277 struct core_data
*c
, *c2
;
1278 struct pkg_data
*p
, *p2
;
1280 t
= GET_THREAD(thread_base
, thread_no
, core_no
, pkg_no
);
1282 if (cpu_is_not_present(t
->cpu_id
))
1285 t2
= GET_THREAD(thread_base2
, thread_no
, core_no
, pkg_no
);
1287 c
= GET_CORE(core_base
, core_no
, pkg_no
);
1288 c2
= GET_CORE(core_base2
, core_no
, pkg_no
);
1290 p
= GET_PKG(pkg_base
, pkg_no
);
1291 p2
= GET_PKG(pkg_base2
, pkg_no
);
1293 retval
= func(t
, c
, p
, t2
, c2
, p2
);
1303 * run func(cpu) on every cpu in /proc/stat
1304 * return max_cpu number
1306 int for_all_proc_cpus(int (func
)(int))
1312 fp
= fopen(proc_stat
, "r");
1318 retval
= fscanf(fp
, "cpu %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n");
1320 perror("/proc/stat format");
1325 retval
= fscanf(fp
, "cpu%u %*d %*d %*d %*d %*d %*d %*d %*d %*d %*d\n", &cpu_num
);
1329 retval
= func(cpu_num
);
1339 void re_initialize(void)
1342 setup_all_buffers();
1343 printf("turbostat: re-initialized with num_cpus %d\n", topo
.num_cpus
);
1349 * remember the last one seen, it will be the max
1351 int count_cpus(int cpu
)
1353 if (topo
.max_cpu_num
< cpu
)
1354 topo
.max_cpu_num
= cpu
;
1359 int mark_cpu_present(int cpu
)
1361 CPU_SET_S(cpu
, cpu_present_setsize
, cpu_present_set
);
1365 void turbostat_loop()
1373 retval
= for_all_cpus(get_counters
, EVEN_COUNTERS
);
1376 } else if (retval
== -1) {
1377 if (restarted
> 1) {
1384 gettimeofday(&tv_even
, (struct timezone
*)NULL
);
1387 if (for_all_proc_cpus(cpu_is_not_present
)) {
1391 sleep(interval_sec
);
1392 retval
= for_all_cpus(get_counters
, ODD_COUNTERS
);
1395 } else if (retval
== -1) {
1399 gettimeofday(&tv_odd
, (struct timezone
*)NULL
);
1400 timersub(&tv_odd
, &tv_even
, &tv_delta
);
1401 for_all_cpus_2(delta_cpu
, ODD_COUNTERS
, EVEN_COUNTERS
);
1402 compute_average(EVEN_COUNTERS
);
1403 format_all_counters(EVEN_COUNTERS
);
1405 sleep(interval_sec
);
1406 retval
= for_all_cpus(get_counters
, EVEN_COUNTERS
);
1409 } else if (retval
== -1) {
1413 gettimeofday(&tv_even
, (struct timezone
*)NULL
);
1414 timersub(&tv_even
, &tv_odd
, &tv_delta
);
1415 for_all_cpus_2(delta_cpu
, EVEN_COUNTERS
, ODD_COUNTERS
);
1416 compute_average(ODD_COUNTERS
);
1417 format_all_counters(ODD_COUNTERS
);
1422 void check_dev_msr()
1426 if (stat("/dev/cpu/0/msr", &sb
)) {
1427 fprintf(stderr
, "no /dev/cpu/0/msr\n");
1428 fprintf(stderr
, "Try \"# modprobe msr\"\n");
1433 void check_super_user()
1435 if (getuid() != 0) {
1436 fprintf(stderr
, "must be root\n");
1441 int has_nehalem_turbo_ratio_limit(unsigned int family
, unsigned int model
)
1450 case 0x1A: /* Core i7, Xeon 5500 series - Bloomfield, Gainstown NHM-EP */
1451 case 0x1E: /* Core i7 and i5 Processor - Clarksfield, Lynnfield, Jasper Forest */
1452 case 0x1F: /* Core i7 and i5 Processor - Nehalem */
1453 case 0x25: /* Westmere Client - Clarkdale, Arrandale */
1454 case 0x2C: /* Westmere EP - Gulftown */
1455 case 0x2A: /* SNB */
1456 case 0x2D: /* SNB Xeon */
1457 case 0x3A: /* IVB */
1458 case 0x3E: /* IVB Xeon */
1459 case 0x3C: /* HSW */
1460 case 0x3F: /* HSW */
1461 case 0x45: /* HSW */
1462 case 0x46: /* HSW */
1464 case 0x2E: /* Nehalem-EX Xeon - Beckton */
1465 case 0x2F: /* Westmere-EX Xeon - Eagleton */
1470 int has_ivt_turbo_ratio_limit(unsigned int family
, unsigned int model
)
1479 case 0x3E: /* IVB Xeon */
1488 * Decode the ENERGY_PERF_BIAS MSR
1490 int print_epb(struct thread_data
*t
, struct core_data
*c
, struct pkg_data
*p
)
1492 unsigned long long msr
;
1501 /* EPB is per-package */
1502 if (!(t
->flags
& CPU_IS_FIRST_THREAD_IN_CORE
) || !(t
->flags
& CPU_IS_FIRST_CORE_IN_PACKAGE
))
1505 if (cpu_migrate(cpu
)) {
1506 fprintf(stderr
, "Could not migrate to CPU %d\n", cpu
);
1510 if (get_msr(cpu
, MSR_IA32_ENERGY_PERF_BIAS
, &msr
))
1513 switch (msr
& 0x7) {
1514 case ENERGY_PERF_BIAS_PERFORMANCE
:
1515 epb_string
= "performance";
1517 case ENERGY_PERF_BIAS_NORMAL
:
1518 epb_string
= "balanced";
1520 case ENERGY_PERF_BIAS_POWERSAVE
:
1521 epb_string
= "powersave";
1524 epb_string
= "custom";
1527 fprintf(stderr
, "cpu%d: MSR_IA32_ENERGY_PERF_BIAS: 0x%08llx (%s)\n", cpu
, msr
, epb_string
);
1532 #define RAPL_POWER_GRANULARITY 0x7FFF /* 15 bit power granularity */
1533 #define RAPL_TIME_GRANULARITY 0x3F /* 6 bit time granularity */
1540 void rapl_probe(unsigned int family
, unsigned int model
)
1542 unsigned long long msr
;
1554 case 0x3C: /* HSW */
1555 case 0x3F: /* HSW */
1556 case 0x45: /* HSW */
1557 case 0x46: /* HSW */
1558 do_rapl
= RAPL_PKG
| RAPL_CORES
| RAPL_GFX
;
1562 do_rapl
= RAPL_PKG
| RAPL_CORES
| RAPL_DRAM
| RAPL_PKG_PERF_STATUS
| RAPL_DRAM_PERF_STATUS
;
1568 /* units on package 0, verify later other packages match */
1569 if (get_msr(0, MSR_RAPL_POWER_UNIT
, &msr
))
1572 rapl_power_units
= 1.0 / (1 << (msr
& 0xF));
1573 rapl_energy_units
= 1.0 / (1 << (msr
>> 8 & 0x1F));
1574 rapl_time_units
= 1.0 / (1 << (msr
>> 16 & 0xF));
1576 /* get TDP to determine energy counter range */
1577 if (get_msr(0, MSR_PKG_POWER_INFO
, &msr
))
1580 tdp
= ((msr
>> 0) & RAPL_POWER_GRANULARITY
) * rapl_power_units
;
1582 rapl_joule_counter_range
= 0xFFFFFFFF * rapl_energy_units
/ tdp
;
1585 fprintf(stderr
, "RAPL: %.0f sec. Joule Counter Range\n", rapl_joule_counter_range
);
1590 int print_thermal(struct thread_data
*t
, struct core_data
*c
, struct pkg_data
*p
)
1592 unsigned long long msr
;
1596 if (!(do_dts
|| do_ptm
))
1601 /* DTS is per-core, no need to print for each thread */
1602 if (!(t
->flags
& CPU_IS_FIRST_THREAD_IN_CORE
))
1605 if (cpu_migrate(cpu
)) {
1606 fprintf(stderr
, "Could not migrate to CPU %d\n", cpu
);
1610 if (do_ptm
&& (t
->flags
& CPU_IS_FIRST_CORE_IN_PACKAGE
)) {
1611 if (get_msr(cpu
, MSR_IA32_PACKAGE_THERM_STATUS
, &msr
))
1614 dts
= (msr
>> 16) & 0x7F;
1615 fprintf(stderr
, "cpu%d: MSR_IA32_PACKAGE_THERM_STATUS: 0x%08llx (%d C)\n",
1616 cpu
, msr
, tcc_activation_temp
- dts
);
1619 if (get_msr(cpu
, MSR_IA32_PACKAGE_THERM_INTERRUPT
, &msr
))
1622 dts
= (msr
>> 16) & 0x7F;
1623 dts2
= (msr
>> 8) & 0x7F;
1624 fprintf(stderr
, "cpu%d: MSR_IA32_PACKAGE_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n",
1625 cpu
, msr
, tcc_activation_temp
- dts
, tcc_activation_temp
- dts2
);
1631 unsigned int resolution
;
1633 if (get_msr(cpu
, MSR_IA32_THERM_STATUS
, &msr
))
1636 dts
= (msr
>> 16) & 0x7F;
1637 resolution
= (msr
>> 27) & 0xF;
1638 fprintf(stderr
, "cpu%d: MSR_IA32_THERM_STATUS: 0x%08llx (%d C +/- %d)\n",
1639 cpu
, msr
, tcc_activation_temp
- dts
, resolution
);
1642 if (get_msr(cpu
, MSR_IA32_THERM_INTERRUPT
, &msr
))
1645 dts
= (msr
>> 16) & 0x7F;
1646 dts2
= (msr
>> 8) & 0x7F;
1647 fprintf(stderr
, "cpu%d: MSR_IA32_THERM_INTERRUPT: 0x%08llx (%d C, %d C)\n",
1648 cpu
, msr
, tcc_activation_temp
- dts
, tcc_activation_temp
- dts2
);
1655 void print_power_limit_msr(int cpu
, unsigned long long msr
, char *label
)
1657 fprintf(stderr
, "cpu%d: %s: %sabled (%f Watts, %f sec, clamp %sabled)\n",
1659 ((msr
>> 15) & 1) ? "EN" : "DIS",
1660 ((msr
>> 0) & 0x7FFF) * rapl_power_units
,
1661 (1.0 + (((msr
>> 22) & 0x3)/4.0)) * (1 << ((msr
>> 17) & 0x1F)) * rapl_time_units
,
1662 (((msr
>> 16) & 1) ? "EN" : "DIS"));
1667 int print_rapl(struct thread_data
*t
, struct core_data
*c
, struct pkg_data
*p
)
1669 unsigned long long msr
;
1671 double local_rapl_power_units
, local_rapl_energy_units
, local_rapl_time_units
;
1676 /* RAPL counters are per package, so print only for 1st thread/package */
1677 if (!(t
->flags
& CPU_IS_FIRST_THREAD_IN_CORE
) || !(t
->flags
& CPU_IS_FIRST_CORE_IN_PACKAGE
))
1681 if (cpu_migrate(cpu
)) {
1682 fprintf(stderr
, "Could not migrate to CPU %d\n", cpu
);
1686 if (get_msr(cpu
, MSR_RAPL_POWER_UNIT
, &msr
))
1689 local_rapl_power_units
= 1.0 / (1 << (msr
& 0xF));
1690 local_rapl_energy_units
= 1.0 / (1 << (msr
>> 8 & 0x1F));
1691 local_rapl_time_units
= 1.0 / (1 << (msr
>> 16 & 0xF));
1693 if (local_rapl_power_units
!= rapl_power_units
)
1694 fprintf(stderr
, "cpu%d, ERROR: Power units mis-match\n", cpu
);
1695 if (local_rapl_energy_units
!= rapl_energy_units
)
1696 fprintf(stderr
, "cpu%d, ERROR: Energy units mis-match\n", cpu
);
1697 if (local_rapl_time_units
!= rapl_time_units
)
1698 fprintf(stderr
, "cpu%d, ERROR: Time units mis-match\n", cpu
);
1701 fprintf(stderr
, "cpu%d: MSR_RAPL_POWER_UNIT: 0x%08llx "
1702 "(%f Watts, %f Joules, %f sec.)\n", cpu
, msr
,
1703 local_rapl_power_units
, local_rapl_energy_units
, local_rapl_time_units
);
1705 if (do_rapl
& RAPL_PKG
) {
1706 if (get_msr(cpu
, MSR_PKG_POWER_INFO
, &msr
))
1710 fprintf(stderr
, "cpu%d: MSR_PKG_POWER_INFO: 0x%08llx (%.0f W TDP, RAPL %.0f - %.0f W, %f sec.)\n",
1712 ((msr
>> 0) & RAPL_POWER_GRANULARITY
) * rapl_power_units
,
1713 ((msr
>> 16) & RAPL_POWER_GRANULARITY
) * rapl_power_units
,
1714 ((msr
>> 32) & RAPL_POWER_GRANULARITY
) * rapl_power_units
,
1715 ((msr
>> 48) & RAPL_TIME_GRANULARITY
) * rapl_time_units
);
1717 if (get_msr(cpu
, MSR_PKG_POWER_LIMIT
, &msr
))
1720 fprintf(stderr
, "cpu%d: MSR_PKG_POWER_LIMIT: 0x%08llx (%slocked)\n",
1721 cpu
, msr
, (msr
>> 63) & 1 ? "": "UN");
1723 print_power_limit_msr(cpu
, msr
, "PKG Limit #1");
1724 fprintf(stderr
, "cpu%d: PKG Limit #2: %sabled (%f Watts, %f* sec, clamp %sabled)\n",
1726 ((msr
>> 47) & 1) ? "EN" : "DIS",
1727 ((msr
>> 32) & 0x7FFF) * rapl_power_units
,
1728 (1.0 + (((msr
>> 54) & 0x3)/4.0)) * (1 << ((msr
>> 49) & 0x1F)) * rapl_time_units
,
1729 ((msr
>> 48) & 1) ? "EN" : "DIS");
1732 if (do_rapl
& RAPL_DRAM
) {
1733 if (get_msr(cpu
, MSR_DRAM_POWER_INFO
, &msr
))
1737 fprintf(stderr
, "cpu%d: MSR_DRAM_POWER_INFO,: 0x%08llx (%.0f W TDP, RAPL %.0f - %.0f W, %f sec.)\n",
1739 ((msr
>> 0) & RAPL_POWER_GRANULARITY
) * rapl_power_units
,
1740 ((msr
>> 16) & RAPL_POWER_GRANULARITY
) * rapl_power_units
,
1741 ((msr
>> 32) & RAPL_POWER_GRANULARITY
) * rapl_power_units
,
1742 ((msr
>> 48) & RAPL_TIME_GRANULARITY
) * rapl_time_units
);
1745 if (get_msr(cpu
, MSR_DRAM_POWER_LIMIT
, &msr
))
1747 fprintf(stderr
, "cpu%d: MSR_DRAM_POWER_LIMIT: 0x%08llx (%slocked)\n",
1748 cpu
, msr
, (msr
>> 31) & 1 ? "": "UN");
1750 print_power_limit_msr(cpu
, msr
, "DRAM Limit");
1752 if (do_rapl
& RAPL_CORES
) {
1754 if (get_msr(cpu
, MSR_PP0_POLICY
, &msr
))
1757 fprintf(stderr
, "cpu%d: MSR_PP0_POLICY: %lld\n", cpu
, msr
& 0xF);
1759 if (get_msr(cpu
, MSR_PP0_POWER_LIMIT
, &msr
))
1761 fprintf(stderr
, "cpu%d: MSR_PP0_POWER_LIMIT: 0x%08llx (%slocked)\n",
1762 cpu
, msr
, (msr
>> 31) & 1 ? "": "UN");
1763 print_power_limit_msr(cpu
, msr
, "Cores Limit");
1766 if (do_rapl
& RAPL_GFX
) {
1768 if (get_msr(cpu
, MSR_PP1_POLICY
, &msr
))
1771 fprintf(stderr
, "cpu%d: MSR_PP1_POLICY: %lld\n", cpu
, msr
& 0xF);
1773 if (get_msr(cpu
, MSR_PP1_POWER_LIMIT
, &msr
))
1775 fprintf(stderr
, "cpu%d: MSR_PP1_POWER_LIMIT: 0x%08llx (%slocked)\n",
1776 cpu
, msr
, (msr
>> 31) & 1 ? "": "UN");
1777 print_power_limit_msr(cpu
, msr
, "GFX Limit");
1784 int is_snb(unsigned int family
, unsigned int model
)
1792 case 0x3A: /* IVB */
1793 case 0x3E: /* IVB Xeon */
1794 case 0x3C: /* HSW */
1795 case 0x3F: /* HSW */
1796 case 0x45: /* HSW */
1797 case 0x46: /* HSW */
1803 int has_c8_c9_c10(unsigned int family
, unsigned int model
)
1816 double discover_bclk(unsigned int family
, unsigned int model
)
1818 if (is_snb(family
, model
))
1825 * MSR_IA32_TEMPERATURE_TARGET indicates the temperature where
1826 * the Thermal Control Circuit (TCC) activates.
1827 * This is usually equal to tjMax.
1829 * Older processors do not have this MSR, so there we guess,
1830 * but also allow cmdline over-ride with -T.
1832 * Several MSR temperature values are in units of degrees-C
1833 * below this value, including the Digital Thermal Sensor (DTS),
1834 * Package Thermal Management Sensor (PTM), and thermal event thresholds.
1836 int set_temperature_target(struct thread_data
*t
, struct core_data
*c
, struct pkg_data
*p
)
1838 unsigned long long msr
;
1839 unsigned int target_c_local
;
1842 /* tcc_activation_temp is used only for dts or ptm */
1843 if (!(do_dts
|| do_ptm
))
1846 /* this is a per-package concept */
1847 if (!(t
->flags
& CPU_IS_FIRST_THREAD_IN_CORE
) || !(t
->flags
& CPU_IS_FIRST_CORE_IN_PACKAGE
))
1851 if (cpu_migrate(cpu
)) {
1852 fprintf(stderr
, "Could not migrate to CPU %d\n", cpu
);
1856 if (tcc_activation_temp_override
!= 0) {
1857 tcc_activation_temp
= tcc_activation_temp_override
;
1858 fprintf(stderr
, "cpu%d: Using cmdline TCC Target (%d C)\n",
1859 cpu
, tcc_activation_temp
);
1863 /* Temperature Target MSR is Nehalem and newer only */
1864 if (!do_nehalem_platform_info
)
1867 if (get_msr(0, MSR_IA32_TEMPERATURE_TARGET
, &msr
))
1870 target_c_local
= (msr
>> 16) & 0x7F;
1873 fprintf(stderr
, "cpu%d: MSR_IA32_TEMPERATURE_TARGET: 0x%08llx (%d C)\n",
1874 cpu
, msr
, target_c_local
);
1876 if (target_c_local
< 85 || target_c_local
> 120)
1879 tcc_activation_temp
= target_c_local
;
1884 tcc_activation_temp
= TJMAX_DEFAULT
;
1885 fprintf(stderr
, "cpu%d: Guessing tjMax %d C, Please use -T to specify\n",
1886 cpu
, tcc_activation_temp
);
1892 unsigned int eax
, ebx
, ecx
, edx
, max_level
;
1893 unsigned int fms
, family
, model
, stepping
;
1895 eax
= ebx
= ecx
= edx
= 0;
1897 asm("cpuid" : "=a" (max_level
), "=b" (ebx
), "=c" (ecx
), "=d" (edx
) : "a" (0));
1899 if (ebx
== 0x756e6547 && edx
== 0x49656e69 && ecx
== 0x6c65746e)
1903 fprintf(stderr
, "CPUID(0): %.4s%.4s%.4s ",
1904 (char *)&ebx
, (char *)&edx
, (char *)&ecx
);
1906 asm("cpuid" : "=a" (fms
), "=c" (ecx
), "=d" (edx
) : "a" (1) : "ebx");
1907 family
= (fms
>> 8) & 0xf;
1908 model
= (fms
>> 4) & 0xf;
1909 stepping
= fms
& 0xf;
1910 if (family
== 6 || family
== 0xf)
1911 model
+= ((fms
>> 16) & 0xf) << 4;
1914 fprintf(stderr
, "%d CPUID levels; family:model:stepping 0x%x:%x:%x (%d:%d:%d)\n",
1915 max_level
, family
, model
, stepping
, family
, model
, stepping
);
1917 if (!(edx
& (1 << 5))) {
1918 fprintf(stderr
, "CPUID: no MSR\n");
1923 * check max extended function levels of CPUID.
1924 * This is needed to check for invariant TSC.
1925 * This check is valid for both Intel and AMD.
1927 ebx
= ecx
= edx
= 0;
1928 asm("cpuid" : "=a" (max_level
), "=b" (ebx
), "=c" (ecx
), "=d" (edx
) : "a" (0x80000000));
1930 if (max_level
< 0x80000007) {
1931 fprintf(stderr
, "CPUID: no invariant TSC (max_level 0x%x)\n", max_level
);
1936 * Non-Stop TSC is advertised by CPUID.EAX=0x80000007: EDX.bit8
1937 * this check is valid for both Intel and AMD
1939 asm("cpuid" : "=a" (eax
), "=b" (ebx
), "=c" (ecx
), "=d" (edx
) : "a" (0x80000007));
1940 has_invariant_tsc
= edx
& (1 << 8);
1942 if (!has_invariant_tsc
) {
1943 fprintf(stderr
, "No invariant TSC\n");
1948 * APERF/MPERF is advertised by CPUID.EAX=0x6: ECX.bit0
1949 * this check is valid for both Intel and AMD
1952 asm("cpuid" : "=a" (eax
), "=b" (ebx
), "=c" (ecx
), "=d" (edx
) : "a" (0x6));
1953 has_aperf
= ecx
& (1 << 0);
1954 do_dts
= eax
& (1 << 0);
1955 do_ptm
= eax
& (1 << 6);
1956 has_epb
= ecx
& (1 << 3);
1959 fprintf(stderr
, "CPUID(6): %s%s%s%s\n",
1960 has_aperf
? "APERF" : "No APERF!",
1961 do_dts
? ", DTS" : "",
1962 do_ptm
? ", PTM": "",
1963 has_epb
? ", EPB": "");
1968 do_nehalem_platform_info
= genuine_intel
&& has_invariant_tsc
;
1969 do_nhm_cstates
= genuine_intel
; /* all Intel w/ non-stop TSC have NHM counters */
1970 do_smi
= do_nhm_cstates
;
1971 do_snb_cstates
= is_snb(family
, model
);
1972 do_c8_c9_c10
= has_c8_c9_c10(family
, model
);
1973 bclk
= discover_bclk(family
, model
);
1975 do_nehalem_turbo_ratio_limit
= has_nehalem_turbo_ratio_limit(family
, model
);
1976 do_ivt_turbo_ratio_limit
= has_ivt_turbo_ratio_limit(family
, model
);
1977 rapl_probe(family
, model
);
1985 fprintf(stderr
, "%s: [-v][-R][-T][-p|-P|-S][-c MSR# | -s]][-C MSR#][-m MSR#][-M MSR#][-i interval_sec | command ...]\n",
1992 * in /dev/cpu/ return success for names that are numbers
1993 * ie. filter out ".", "..", "microcode".
1995 int dir_filter(const struct dirent
*dirp
)
1997 if (isdigit(dirp
->d_name
[0]))
2003 int open_dev_cpu_msr(int dummy1
)
2008 void topology_probe()
2011 int max_core_id
= 0;
2012 int max_package_id
= 0;
2013 int max_siblings
= 0;
2014 struct cpu_topology
{
2016 int physical_package_id
;
2019 /* Initialize num_cpus, max_cpu_num */
2021 topo
.max_cpu_num
= 0;
2022 for_all_proc_cpus(count_cpus
);
2023 if (!summary_only
&& topo
.num_cpus
> 1)
2027 fprintf(stderr
, "num_cpus %d max_cpu_num %d\n", topo
.num_cpus
, topo
.max_cpu_num
);
2029 cpus
= calloc(1, (topo
.max_cpu_num
+ 1) * sizeof(struct cpu_topology
));
2031 perror("calloc cpus");
2036 * Allocate and initialize cpu_present_set
2038 cpu_present_set
= CPU_ALLOC((topo
.max_cpu_num
+ 1));
2039 if (cpu_present_set
== NULL
) {
2040 perror("CPU_ALLOC");
2043 cpu_present_setsize
= CPU_ALLOC_SIZE((topo
.max_cpu_num
+ 1));
2044 CPU_ZERO_S(cpu_present_setsize
, cpu_present_set
);
2045 for_all_proc_cpus(mark_cpu_present
);
2048 * Allocate and initialize cpu_affinity_set
2050 cpu_affinity_set
= CPU_ALLOC((topo
.max_cpu_num
+ 1));
2051 if (cpu_affinity_set
== NULL
) {
2052 perror("CPU_ALLOC");
2055 cpu_affinity_setsize
= CPU_ALLOC_SIZE((topo
.max_cpu_num
+ 1));
2056 CPU_ZERO_S(cpu_affinity_setsize
, cpu_affinity_set
);
2061 * find max_core_id, max_package_id
2063 for (i
= 0; i
<= topo
.max_cpu_num
; ++i
) {
2066 if (cpu_is_not_present(i
)) {
2068 fprintf(stderr
, "cpu%d NOT PRESENT\n", i
);
2071 cpus
[i
].core_id
= get_core_id(i
);
2072 if (cpus
[i
].core_id
> max_core_id
)
2073 max_core_id
= cpus
[i
].core_id
;
2075 cpus
[i
].physical_package_id
= get_physical_package_id(i
);
2076 if (cpus
[i
].physical_package_id
> max_package_id
)
2077 max_package_id
= cpus
[i
].physical_package_id
;
2079 siblings
= get_num_ht_siblings(i
);
2080 if (siblings
> max_siblings
)
2081 max_siblings
= siblings
;
2083 fprintf(stderr
, "cpu %d pkg %d core %d\n",
2084 i
, cpus
[i
].physical_package_id
, cpus
[i
].core_id
);
2086 topo
.num_cores_per_pkg
= max_core_id
+ 1;
2088 fprintf(stderr
, "max_core_id %d, sizing for %d cores per package\n",
2089 max_core_id
, topo
.num_cores_per_pkg
);
2090 if (!summary_only
&& topo
.num_cores_per_pkg
> 1)
2093 topo
.num_packages
= max_package_id
+ 1;
2095 fprintf(stderr
, "max_package_id %d, sizing for %d packages\n",
2096 max_package_id
, topo
.num_packages
);
2097 if (!summary_only
&& topo
.num_packages
> 1)
2100 topo
.num_threads_per_core
= max_siblings
;
2102 fprintf(stderr
, "max_siblings %d\n", max_siblings
);
2108 allocate_counters(struct thread_data
**t
, struct core_data
**c
, struct pkg_data
**p
)
2112 *t
= calloc(topo
.num_threads_per_core
* topo
.num_cores_per_pkg
*
2113 topo
.num_packages
, sizeof(struct thread_data
));
2117 for (i
= 0; i
< topo
.num_threads_per_core
*
2118 topo
.num_cores_per_pkg
* topo
.num_packages
; i
++)
2119 (*t
)[i
].cpu_id
= -1;
2121 *c
= calloc(topo
.num_cores_per_pkg
* topo
.num_packages
,
2122 sizeof(struct core_data
));
2126 for (i
= 0; i
< topo
.num_cores_per_pkg
* topo
.num_packages
; i
++)
2127 (*c
)[i
].core_id
= -1;
2129 *p
= calloc(topo
.num_packages
, sizeof(struct pkg_data
));
2133 for (i
= 0; i
< topo
.num_packages
; i
++)
2134 (*p
)[i
].package_id
= i
;
2138 perror("calloc counters");
2144 * set cpu_id, core_num, pkg_num
2145 * set FIRST_THREAD_IN_CORE and FIRST_CORE_IN_PACKAGE
2147 * increment topo.num_cores when 1st core in pkg seen
2149 void init_counter(struct thread_data
*thread_base
, struct core_data
*core_base
,
2150 struct pkg_data
*pkg_base
, int thread_num
, int core_num
,
2151 int pkg_num
, int cpu_id
)
2153 struct thread_data
*t
;
2154 struct core_data
*c
;
2157 t
= GET_THREAD(thread_base
, thread_num
, core_num
, pkg_num
);
2158 c
= GET_CORE(core_base
, core_num
, pkg_num
);
2159 p
= GET_PKG(pkg_base
, pkg_num
);
2162 if (thread_num
== 0) {
2163 t
->flags
|= CPU_IS_FIRST_THREAD_IN_CORE
;
2164 if (cpu_is_first_core_in_package(cpu_id
))
2165 t
->flags
|= CPU_IS_FIRST_CORE_IN_PACKAGE
;
2168 c
->core_id
= core_num
;
2169 p
->package_id
= pkg_num
;
2173 int initialize_counters(int cpu_id
)
2175 int my_thread_id
, my_core_id
, my_package_id
;
2177 my_package_id
= get_physical_package_id(cpu_id
);
2178 my_core_id
= get_core_id(cpu_id
);
2180 if (cpu_is_first_sibling_in_core(cpu_id
)) {
2187 init_counter(EVEN_COUNTERS
, my_thread_id
, my_core_id
, my_package_id
, cpu_id
);
2188 init_counter(ODD_COUNTERS
, my_thread_id
, my_core_id
, my_package_id
, cpu_id
);
2192 void allocate_output_buffer()
2194 output_buffer
= calloc(1, (1 + topo
.num_cpus
) * 256);
2195 outp
= output_buffer
;
2202 void setup_all_buffers(void)
2205 allocate_counters(&thread_even
, &core_even
, &package_even
);
2206 allocate_counters(&thread_odd
, &core_odd
, &package_odd
);
2207 allocate_output_buffer();
2208 for_all_proc_cpus(initialize_counters
);
2210 void turbostat_init()
2217 setup_all_buffers();
2220 print_verbose_header();
2223 for_all_cpus(print_epb
, ODD_COUNTERS
);
2226 for_all_cpus(print_rapl
, ODD_COUNTERS
);
2228 for_all_cpus(set_temperature_target
, ODD_COUNTERS
);
2231 for_all_cpus(print_thermal
, ODD_COUNTERS
);
2234 int fork_it(char **argv
)
2239 status
= for_all_cpus(get_counters
, EVEN_COUNTERS
);
2242 /* clear affinity side-effect of get_counters() */
2243 sched_setaffinity(0, cpu_present_setsize
, cpu_present_set
);
2244 gettimeofday(&tv_even
, (struct timezone
*)NULL
);
2249 execvp(argv
[0], argv
);
2253 if (child_pid
== -1) {
2258 signal(SIGINT
, SIG_IGN
);
2259 signal(SIGQUIT
, SIG_IGN
);
2260 if (waitpid(child_pid
, &status
, 0) == -1) {
2266 * n.b. fork_it() does not check for errors from for_all_cpus()
2267 * because re-starting is problematic when forking
2269 for_all_cpus(get_counters
, ODD_COUNTERS
);
2270 gettimeofday(&tv_odd
, (struct timezone
*)NULL
);
2271 timersub(&tv_odd
, &tv_even
, &tv_delta
);
2272 for_all_cpus_2(delta_cpu
, ODD_COUNTERS
, EVEN_COUNTERS
);
2273 compute_average(EVEN_COUNTERS
);
2274 format_all_counters(EVEN_COUNTERS
);
2277 fprintf(stderr
, "%.6f sec\n", tv_delta
.tv_sec
+ tv_delta
.tv_usec
/1000000.0);
2282 void cmdline(int argc
, char **argv
)
2288 while ((opt
= getopt(argc
, argv
, "+pPSvi:sc:sC:m:M:RT:")) != -1) {
2303 interval_sec
= atoi(optarg
);
2306 sscanf(optarg
, "%x", &extra_delta_offset32
);
2309 sscanf(optarg
, "%x", &extra_delta_offset64
);
2312 sscanf(optarg
, "%x", &extra_msr_offset32
);
2315 sscanf(optarg
, "%x", &extra_msr_offset64
);
2321 tcc_activation_temp_override
= atoi(optarg
);
2329 int main(int argc
, char **argv
)
2331 cmdline(argc
, argv
);
2334 fprintf(stderr
, "turbostat v3.4 April 17, 2013"
2335 " - Len Brown <lenb@kernel.org>\n");
2340 * if any params left, it must be a command to fork
2343 return fork_it(argv
+ optind
);