perf stat: Use --big-num format by default
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / tools / perf / builtin-stat.c
... / ...
CommitLineData
1/*
2 * builtin-stat.c
3 *
4 * Builtin stat command: Give a precise performance counters summary
5 * overview about any workload, CPU or specific PID.
6 *
7 * Sample output:
8
9 $ perf stat ~/hackbench 10
10 Time: 0.104
11
12 Performance counter stats for '/home/mingo/hackbench':
13
14 1255.538611 task clock ticks # 10.143 CPU utilization factor
15 54011 context switches # 0.043 M/sec
16 385 CPU migrations # 0.000 M/sec
17 17755 pagefaults # 0.014 M/sec
18 3808323185 CPU cycles # 3033.219 M/sec
19 1575111190 instructions # 1254.530 M/sec
20 17367895 cache references # 13.833 M/sec
21 7674421 cache misses # 6.112 M/sec
22
23 Wall-clock time elapsed: 123.786620 msecs
24
25 *
26 * Copyright (C) 2008, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
27 *
28 * Improvements and fixes by:
29 *
30 * Arjan van de Ven <arjan@linux.intel.com>
31 * Yanmin Zhang <yanmin.zhang@intel.com>
32 * Wu Fengguang <fengguang.wu@intel.com>
33 * Mike Galbraith <efault@gmx.de>
34 * Paul Mackerras <paulus@samba.org>
35 * Jaswinder Singh Rajput <jaswinder@kernel.org>
36 *
37 * Released under the GPL v2. (and only v2, not any later version)
38 */
39
40#include "perf.h"
41#include "builtin.h"
42#include "util/util.h"
43#include "util/parse-options.h"
44#include "util/parse-events.h"
45#include "util/event.h"
46#include "util/debug.h"
47#include "util/header.h"
48#include "util/cpumap.h"
49#include "util/thread.h"
50
51#include <sys/prctl.h>
52#include <math.h>
53#include <locale.h>
54
55static struct perf_event_attr default_attrs[] = {
56
57 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK },
58 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES },
59 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS },
60 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS },
61
62 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES },
63 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS },
64 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
65 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES },
66 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_REFERENCES },
67 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CACHE_MISSES },
68
69};
70
71static bool system_wide = false;
72static int nr_cpus = 0;
73static int run_idx = 0;
74
75static int run_count = 1;
76static bool no_inherit = false;
77static bool scale = true;
78static bool no_aggr = false;
79static pid_t target_pid = -1;
80static pid_t target_tid = -1;
81static pid_t *all_tids = NULL;
82static int thread_num = 0;
83static pid_t child_pid = -1;
84static bool null_run = false;
85static bool big_num = true;
86static const char *cpu_list;
87
88
89static int *fd[MAX_NR_CPUS][MAX_COUNTERS];
90
91static int event_scaled[MAX_COUNTERS];
92
93static struct {
94 u64 val;
95 u64 ena;
96 u64 run;
97} cpu_counts[MAX_NR_CPUS][MAX_COUNTERS];
98
99static volatile int done = 0;
100
101struct stats
102{
103 double n, mean, M2;
104};
105
106static void update_stats(struct stats *stats, u64 val)
107{
108 double delta;
109
110 stats->n++;
111 delta = val - stats->mean;
112 stats->mean += delta / stats->n;
113 stats->M2 += delta*(val - stats->mean);
114}
115
116static double avg_stats(struct stats *stats)
117{
118 return stats->mean;
119}
120
121/*
122 * http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
123 *
124 * (\Sum n_i^2) - ((\Sum n_i)^2)/n
125 * s^2 = -------------------------------
126 * n - 1
127 *
128 * http://en.wikipedia.org/wiki/Stddev
129 *
130 * The std dev of the mean is related to the std dev by:
131 *
132 * s
133 * s_mean = -------
134 * sqrt(n)
135 *
136 */
137static double stddev_stats(struct stats *stats)
138{
139 double variance = stats->M2 / (stats->n - 1);
140 double variance_mean = variance / stats->n;
141
142 return sqrt(variance_mean);
143}
144
145struct stats event_res_stats[MAX_COUNTERS][3];
146struct stats runtime_nsecs_stats[MAX_NR_CPUS];
147struct stats runtime_cycles_stats[MAX_NR_CPUS];
148struct stats runtime_branches_stats[MAX_NR_CPUS];
149struct stats walltime_nsecs_stats;
150
151#define MATCH_EVENT(t, c, counter) \
152 (attrs[counter].type == PERF_TYPE_##t && \
153 attrs[counter].config == PERF_COUNT_##c)
154
155#define ERR_PERF_OPEN \
156"counter %d, sys_perf_event_open() syscall returned with %d (%s). /bin/dmesg may provide additional information."
157
158static int create_perf_stat_counter(int counter, bool *perm_err)
159{
160 struct perf_event_attr *attr = attrs + counter;
161 int thread;
162 int ncreated = 0;
163
164 if (scale)
165 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
166 PERF_FORMAT_TOTAL_TIME_RUNNING;
167
168 if (system_wide) {
169 int cpu;
170
171 for (cpu = 0; cpu < nr_cpus; cpu++) {
172 fd[cpu][counter][0] = sys_perf_event_open(attr,
173 -1, cpumap[cpu], -1, 0);
174 if (fd[cpu][counter][0] < 0) {
175 if (errno == EPERM || errno == EACCES)
176 *perm_err = true;
177 error(ERR_PERF_OPEN, counter,
178 fd[cpu][counter][0], strerror(errno));
179 } else {
180 ++ncreated;
181 }
182 }
183 } else {
184 attr->inherit = !no_inherit;
185 if (target_pid == -1 && target_tid == -1) {
186 attr->disabled = 1;
187 attr->enable_on_exec = 1;
188 }
189 for (thread = 0; thread < thread_num; thread++) {
190 fd[0][counter][thread] = sys_perf_event_open(attr,
191 all_tids[thread], -1, -1, 0);
192 if (fd[0][counter][thread] < 0) {
193 if (errno == EPERM || errno == EACCES)
194 *perm_err = true;
195 error(ERR_PERF_OPEN, counter,
196 fd[0][counter][thread],
197 strerror(errno));
198 } else {
199 ++ncreated;
200 }
201 }
202 }
203
204 return ncreated;
205}
206
207/*
208 * Does the counter have nsecs as a unit?
209 */
210static inline int nsec_counter(int counter)
211{
212 if (MATCH_EVENT(SOFTWARE, SW_CPU_CLOCK, counter) ||
213 MATCH_EVENT(SOFTWARE, SW_TASK_CLOCK, counter))
214 return 1;
215
216 return 0;
217}
218
219/*
220 * Read out the results of a single counter:
221 * aggregate counts across CPUs in system-wide mode
222 */
223static void read_counter_aggr(int counter)
224{
225 u64 count[3], single_count[3];
226 int cpu;
227 size_t res, nv;
228 int scaled;
229 int i, thread;
230
231 count[0] = count[1] = count[2] = 0;
232
233 nv = scale ? 3 : 1;
234 for (cpu = 0; cpu < nr_cpus; cpu++) {
235 for (thread = 0; thread < thread_num; thread++) {
236 if (fd[cpu][counter][thread] < 0)
237 continue;
238
239 res = read(fd[cpu][counter][thread],
240 single_count, nv * sizeof(u64));
241 assert(res == nv * sizeof(u64));
242
243 close(fd[cpu][counter][thread]);
244 fd[cpu][counter][thread] = -1;
245
246 count[0] += single_count[0];
247 if (scale) {
248 count[1] += single_count[1];
249 count[2] += single_count[2];
250 }
251 }
252 }
253
254 scaled = 0;
255 if (scale) {
256 if (count[2] == 0) {
257 event_scaled[counter] = -1;
258 count[0] = 0;
259 return;
260 }
261
262 if (count[2] < count[1]) {
263 event_scaled[counter] = 1;
264 count[0] = (unsigned long long)
265 ((double)count[0] * count[1] / count[2] + 0.5);
266 }
267 }
268
269 for (i = 0; i < 3; i++)
270 update_stats(&event_res_stats[counter][i], count[i]);
271
272 if (verbose) {
273 fprintf(stderr, "%s: %Ld %Ld %Ld\n", event_name(counter),
274 count[0], count[1], count[2]);
275 }
276
277 /*
278 * Save the full runtime - to allow normalization during printout:
279 */
280 if (MATCH_EVENT(SOFTWARE, SW_TASK_CLOCK, counter))
281 update_stats(&runtime_nsecs_stats[0], count[0]);
282 if (MATCH_EVENT(HARDWARE, HW_CPU_CYCLES, counter))
283 update_stats(&runtime_cycles_stats[0], count[0]);
284 if (MATCH_EVENT(HARDWARE, HW_BRANCH_INSTRUCTIONS, counter))
285 update_stats(&runtime_branches_stats[0], count[0]);
286}
287
288/*
289 * Read out the results of a single counter:
290 * do not aggregate counts across CPUs in system-wide mode
291 */
292static void read_counter(int counter)
293{
294 u64 count[3];
295 int cpu;
296 size_t res, nv;
297
298 count[0] = count[1] = count[2] = 0;
299
300 nv = scale ? 3 : 1;
301
302 for (cpu = 0; cpu < nr_cpus; cpu++) {
303
304 if (fd[cpu][counter][0] < 0)
305 continue;
306
307 res = read(fd[cpu][counter][0], count, nv * sizeof(u64));
308
309 assert(res == nv * sizeof(u64));
310
311 close(fd[cpu][counter][0]);
312 fd[cpu][counter][0] = -1;
313
314 if (scale) {
315 if (count[2] == 0) {
316 count[0] = 0;
317 } else if (count[2] < count[1]) {
318 count[0] = (unsigned long long)
319 ((double)count[0] * count[1] / count[2] + 0.5);
320 }
321 }
322 cpu_counts[cpu][counter].val = count[0]; /* scaled count */
323 cpu_counts[cpu][counter].ena = count[1];
324 cpu_counts[cpu][counter].run = count[2];
325
326 if (MATCH_EVENT(SOFTWARE, SW_TASK_CLOCK, counter))
327 update_stats(&runtime_nsecs_stats[cpu], count[0]);
328 if (MATCH_EVENT(HARDWARE, HW_CPU_CYCLES, counter))
329 update_stats(&runtime_cycles_stats[cpu], count[0]);
330 if (MATCH_EVENT(HARDWARE, HW_BRANCH_INSTRUCTIONS, counter))
331 update_stats(&runtime_branches_stats[cpu], count[0]);
332 }
333}
334
335static int run_perf_stat(int argc __used, const char **argv)
336{
337 unsigned long long t0, t1;
338 int status = 0;
339 int counter, ncreated = 0;
340 int child_ready_pipe[2], go_pipe[2];
341 bool perm_err = false;
342 const bool forks = (argc > 0);
343 char buf;
344
345 if (!system_wide)
346 nr_cpus = 1;
347
348 if (forks && (pipe(child_ready_pipe) < 0 || pipe(go_pipe) < 0)) {
349 perror("failed to create pipes");
350 exit(1);
351 }
352
353 if (forks) {
354 if ((child_pid = fork()) < 0)
355 perror("failed to fork");
356
357 if (!child_pid) {
358 close(child_ready_pipe[0]);
359 close(go_pipe[1]);
360 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
361
362 /*
363 * Do a dummy execvp to get the PLT entry resolved,
364 * so we avoid the resolver overhead on the real
365 * execvp call.
366 */
367 execvp("", (char **)argv);
368
369 /*
370 * Tell the parent we're ready to go
371 */
372 close(child_ready_pipe[1]);
373
374 /*
375 * Wait until the parent tells us to go.
376 */
377 if (read(go_pipe[0], &buf, 1) == -1)
378 perror("unable to read pipe");
379
380 execvp(argv[0], (char **)argv);
381
382 perror(argv[0]);
383 exit(-1);
384 }
385
386 if (target_tid == -1 && target_pid == -1 && !system_wide)
387 all_tids[0] = child_pid;
388
389 /*
390 * Wait for the child to be ready to exec.
391 */
392 close(child_ready_pipe[1]);
393 close(go_pipe[0]);
394 if (read(child_ready_pipe[0], &buf, 1) == -1)
395 perror("unable to read pipe");
396 close(child_ready_pipe[0]);
397 }
398
399 for (counter = 0; counter < nr_counters; counter++)
400 ncreated += create_perf_stat_counter(counter, &perm_err);
401
402 if (ncreated < nr_counters) {
403 if (perm_err)
404 error("You may not have permission to collect %sstats.\n"
405 "\t Consider tweaking"
406 " /proc/sys/kernel/perf_event_paranoid or running as root.",
407 system_wide ? "system-wide " : "");
408 die("Not all events could be opened.\n");
409 if (child_pid != -1)
410 kill(child_pid, SIGTERM);
411 return -1;
412 }
413
414 /*
415 * Enable counters and exec the command:
416 */
417 t0 = rdclock();
418
419 if (forks) {
420 close(go_pipe[1]);
421 wait(&status);
422 } else {
423 while(!done) sleep(1);
424 }
425
426 t1 = rdclock();
427
428 update_stats(&walltime_nsecs_stats, t1 - t0);
429
430 if (no_aggr) {
431 for (counter = 0; counter < nr_counters; counter++)
432 read_counter(counter);
433 } else {
434 for (counter = 0; counter < nr_counters; counter++)
435 read_counter_aggr(counter);
436 }
437 return WEXITSTATUS(status);
438}
439
440static void print_noise(int counter, double avg)
441{
442 if (run_count == 1)
443 return;
444
445 fprintf(stderr, " ( +- %7.3f%% )",
446 100 * stddev_stats(&event_res_stats[counter][0]) / avg);
447}
448
449static void nsec_printout(int cpu, int counter, double avg)
450{
451 double msecs = avg / 1e6;
452
453 if (no_aggr)
454 fprintf(stderr, "CPU%-4d %18.6f %-24s",
455 cpumap[cpu], msecs, event_name(counter));
456 else
457 fprintf(stderr, " %18.6f %-24s", msecs, event_name(counter));
458
459 if (MATCH_EVENT(SOFTWARE, SW_TASK_CLOCK, counter)) {
460 fprintf(stderr, " # %10.3f CPUs ",
461 avg / avg_stats(&walltime_nsecs_stats));
462 }
463}
464
465static void abs_printout(int cpu, int counter, double avg)
466{
467 double total, ratio = 0.0;
468 char cpustr[16] = { '\0', };
469
470 if (no_aggr)
471 sprintf(cpustr, "CPU%-4d", cpumap[cpu]);
472 else
473 cpu = 0;
474
475 if (big_num)
476 fprintf(stderr, "%s %'18.0f %-24s",
477 cpustr, avg, event_name(counter));
478 else
479 fprintf(stderr, "%s %18.0f %-24s",
480 cpustr, avg, event_name(counter));
481
482 if (MATCH_EVENT(HARDWARE, HW_INSTRUCTIONS, counter)) {
483 total = avg_stats(&runtime_cycles_stats[cpu]);
484
485 if (total)
486 ratio = avg / total;
487
488 fprintf(stderr, " # %10.3f IPC ", ratio);
489 } else if (MATCH_EVENT(HARDWARE, HW_BRANCH_MISSES, counter) &&
490 runtime_branches_stats[cpu].n != 0) {
491 total = avg_stats(&runtime_branches_stats[cpu]);
492
493 if (total)
494 ratio = avg * 100 / total;
495
496 fprintf(stderr, " # %10.3f %% ", ratio);
497
498 } else if (runtime_nsecs_stats[cpu].n != 0) {
499 total = avg_stats(&runtime_nsecs_stats[cpu]);
500
501 if (total)
502 ratio = 1000.0 * avg / total;
503
504 fprintf(stderr, " # %10.3f M/sec", ratio);
505 }
506}
507
508/*
509 * Print out the results of a single counter:
510 * aggregated counts in system-wide mode
511 */
512static void print_counter_aggr(int counter)
513{
514 double avg = avg_stats(&event_res_stats[counter][0]);
515 int scaled = event_scaled[counter];
516
517 if (scaled == -1) {
518 fprintf(stderr, " %18s %-24s\n",
519 "<not counted>", event_name(counter));
520 return;
521 }
522
523 if (nsec_counter(counter))
524 nsec_printout(-1, counter, avg);
525 else
526 abs_printout(-1, counter, avg);
527
528 print_noise(counter, avg);
529
530 if (scaled) {
531 double avg_enabled, avg_running;
532
533 avg_enabled = avg_stats(&event_res_stats[counter][1]);
534 avg_running = avg_stats(&event_res_stats[counter][2]);
535
536 fprintf(stderr, " (scaled from %.2f%%)",
537 100 * avg_running / avg_enabled);
538 }
539
540 fprintf(stderr, "\n");
541}
542
543/*
544 * Print out the results of a single counter:
545 * does not use aggregated count in system-wide
546 */
547static void print_counter(int counter)
548{
549 u64 ena, run, val;
550 int cpu;
551
552 for (cpu = 0; cpu < nr_cpus; cpu++) {
553 val = cpu_counts[cpu][counter].val;
554 ena = cpu_counts[cpu][counter].ena;
555 run = cpu_counts[cpu][counter].run;
556 if (run == 0 || ena == 0) {
557 fprintf(stderr, "CPU%-4d %18s %-24s", cpumap[cpu],
558 "<not counted>", event_name(counter));
559
560 fprintf(stderr, "\n");
561 continue;
562 }
563
564 if (nsec_counter(counter))
565 nsec_printout(cpu, counter, val);
566 else
567 abs_printout(cpu, counter, val);
568
569 print_noise(counter, 1.0);
570
571 if (run != ena) {
572 fprintf(stderr, " (scaled from %.2f%%)",
573 100.0 * run / ena);
574 }
575 fprintf(stderr, "\n");
576 }
577}
578
579static void print_stat(int argc, const char **argv)
580{
581 int i, counter;
582
583 fflush(stdout);
584
585 fprintf(stderr, "\n");
586 fprintf(stderr, " Performance counter stats for ");
587 if(target_pid == -1 && target_tid == -1) {
588 fprintf(stderr, "\'%s", argv[0]);
589 for (i = 1; i < argc; i++)
590 fprintf(stderr, " %s", argv[i]);
591 } else if (target_pid != -1)
592 fprintf(stderr, "process id \'%d", target_pid);
593 else
594 fprintf(stderr, "thread id \'%d", target_tid);
595
596 fprintf(stderr, "\'");
597 if (run_count > 1)
598 fprintf(stderr, " (%d runs)", run_count);
599 fprintf(stderr, ":\n\n");
600
601 if (no_aggr) {
602 for (counter = 0; counter < nr_counters; counter++)
603 print_counter(counter);
604 } else {
605 for (counter = 0; counter < nr_counters; counter++)
606 print_counter_aggr(counter);
607 }
608
609 fprintf(stderr, "\n");
610 fprintf(stderr, " %18.9f seconds time elapsed",
611 avg_stats(&walltime_nsecs_stats)/1e9);
612 if (run_count > 1) {
613 fprintf(stderr, " ( +- %7.3f%% )",
614 100*stddev_stats(&walltime_nsecs_stats) /
615 avg_stats(&walltime_nsecs_stats));
616 }
617 fprintf(stderr, "\n\n");
618}
619
620static volatile int signr = -1;
621
622static void skip_signal(int signo)
623{
624 if(child_pid == -1)
625 done = 1;
626
627 signr = signo;
628}
629
630static void sig_atexit(void)
631{
632 if (child_pid != -1)
633 kill(child_pid, SIGTERM);
634
635 if (signr == -1)
636 return;
637
638 signal(signr, SIG_DFL);
639 kill(getpid(), signr);
640}
641
642static const char * const stat_usage[] = {
643 "perf stat [<options>] [<command>]",
644 NULL
645};
646
647static const struct option options[] = {
648 OPT_CALLBACK('e', "event", NULL, "event",
649 "event selector. use 'perf list' to list available events",
650 parse_events),
651 OPT_BOOLEAN('i', "no-inherit", &no_inherit,
652 "child tasks do not inherit counters"),
653 OPT_INTEGER('p', "pid", &target_pid,
654 "stat events on existing process id"),
655 OPT_INTEGER('t', "tid", &target_tid,
656 "stat events on existing thread id"),
657 OPT_BOOLEAN('a', "all-cpus", &system_wide,
658 "system-wide collection from all CPUs"),
659 OPT_BOOLEAN('c', "scale", &scale,
660 "scale/normalize counters"),
661 OPT_INCR('v', "verbose", &verbose,
662 "be more verbose (show counter open errors, etc)"),
663 OPT_INTEGER('r', "repeat", &run_count,
664 "repeat command and print average + stddev (max: 100)"),
665 OPT_BOOLEAN('n', "null", &null_run,
666 "null run - dont start any counters"),
667 OPT_BOOLEAN('B', "big-num", &big_num,
668 "print large numbers with thousands\' separators"),
669 OPT_STRING('C', "cpu", &cpu_list, "cpu",
670 "list of cpus to monitor in system-wide"),
671 OPT_BOOLEAN('A', "no-aggr", &no_aggr,
672 "disable CPU count aggregation"),
673 OPT_END()
674};
675
676int cmd_stat(int argc, const char **argv, const char *prefix __used)
677{
678 int status;
679 int i,j;
680
681 setlocale(LC_ALL, "");
682
683 argc = parse_options(argc, argv, options, stat_usage,
684 PARSE_OPT_STOP_AT_NON_OPTION);
685 if (!argc && target_pid == -1 && target_tid == -1)
686 usage_with_options(stat_usage, options);
687 if (run_count <= 0)
688 usage_with_options(stat_usage, options);
689
690 /* no_aggr is for system-wide only */
691 if (no_aggr && !system_wide)
692 usage_with_options(stat_usage, options);
693
694 /* Set attrs and nr_counters if no event is selected and !null_run */
695 if (!null_run && !nr_counters) {
696 memcpy(attrs, default_attrs, sizeof(default_attrs));
697 nr_counters = ARRAY_SIZE(default_attrs);
698 }
699
700 if (system_wide)
701 nr_cpus = read_cpu_map(cpu_list);
702 else
703 nr_cpus = 1;
704
705 if (nr_cpus < 1)
706 usage_with_options(stat_usage, options);
707
708 if (target_pid != -1) {
709 target_tid = target_pid;
710 thread_num = find_all_tid(target_pid, &all_tids);
711 if (thread_num <= 0) {
712 fprintf(stderr, "Can't find all threads of pid %d\n",
713 target_pid);
714 usage_with_options(stat_usage, options);
715 }
716 } else {
717 all_tids=malloc(sizeof(pid_t));
718 if (!all_tids)
719 return -ENOMEM;
720
721 all_tids[0] = target_tid;
722 thread_num = 1;
723 }
724
725 for (i = 0; i < MAX_NR_CPUS; i++) {
726 for (j = 0; j < MAX_COUNTERS; j++) {
727 fd[i][j] = malloc(sizeof(int)*thread_num);
728 if (!fd[i][j])
729 return -ENOMEM;
730 }
731 }
732
733 /*
734 * We dont want to block the signals - that would cause
735 * child tasks to inherit that and Ctrl-C would not work.
736 * What we want is for Ctrl-C to work in the exec()-ed
737 * task, but being ignored by perf stat itself:
738 */
739 atexit(sig_atexit);
740 signal(SIGINT, skip_signal);
741 signal(SIGALRM, skip_signal);
742 signal(SIGABRT, skip_signal);
743
744 status = 0;
745 for (run_idx = 0; run_idx < run_count; run_idx++) {
746 if (run_count != 1 && verbose)
747 fprintf(stderr, "[ perf stat: executing run #%d ... ]\n", run_idx + 1);
748 status = run_perf_stat(argc, argv);
749 }
750
751 if (status != -1)
752 print_stat(argc, argv);
753
754 return status;
755}