4 #include "parse-options.h"
5 #include "parse-events.h"
13 struct perf_event_attr attrs
[MAX_COUNTERS
];
28 char debugfs_path
[MAXPATHLEN
];
30 #define CHW(x) .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_##x
31 #define CSW(x) .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_##x
33 static struct event_symbol event_symbols
[] = {
34 { CHW(CPU_CYCLES
), "cpu-cycles", "cycles" },
35 { CHW(INSTRUCTIONS
), "instructions", "" },
36 { CHW(CACHE_REFERENCES
), "cache-references", "" },
37 { CHW(CACHE_MISSES
), "cache-misses", "" },
38 { CHW(BRANCH_INSTRUCTIONS
), "branch-instructions", "branches" },
39 { CHW(BRANCH_MISSES
), "branch-misses", "" },
40 { CHW(BUS_CYCLES
), "bus-cycles", "" },
42 { CSW(CPU_CLOCK
), "cpu-clock", "" },
43 { CSW(TASK_CLOCK
), "task-clock", "" },
44 { CSW(PAGE_FAULTS
), "page-faults", "faults" },
45 { CSW(PAGE_FAULTS_MIN
), "minor-faults", "" },
46 { CSW(PAGE_FAULTS_MAJ
), "major-faults", "" },
47 { CSW(CONTEXT_SWITCHES
), "context-switches", "cs" },
48 { CSW(CPU_MIGRATIONS
), "cpu-migrations", "migrations" },
51 #define __PERF_EVENT_FIELD(config, name) \
52 ((config & PERF_EVENT_##name##_MASK) >> PERF_EVENT_##name##_SHIFT)
54 #define PERF_EVENT_RAW(config) __PERF_EVENT_FIELD(config, RAW)
55 #define PERF_EVENT_CONFIG(config) __PERF_EVENT_FIELD(config, CONFIG)
56 #define PERF_EVENT_TYPE(config) __PERF_EVENT_FIELD(config, TYPE)
57 #define PERF_EVENT_ID(config) __PERF_EVENT_FIELD(config, EVENT)
59 static const char *hw_event_names
[] = {
69 static const char *sw_event_names
[] = {
81 static const char *hw_cache
[][MAX_ALIASES
] = {
82 { "L1-dcache", "l1-d", "l1d", "L1-data", },
83 { "L1-icache", "l1-i", "l1i", "L1-instruction", },
85 { "dTLB", "d-tlb", "Data-TLB", },
86 { "iTLB", "i-tlb", "Instruction-TLB", },
87 { "branch", "branches", "bpu", "btb", "bpc", },
90 static const char *hw_cache_op
[][MAX_ALIASES
] = {
91 { "load", "loads", "read", },
92 { "store", "stores", "write", },
93 { "prefetch", "prefetches", "speculative-read", "speculative-load", },
96 static const char *hw_cache_result
[][MAX_ALIASES
] = {
97 { "refs", "Reference", "ops", "access", },
98 { "misses", "miss", },
101 #define C(x) PERF_COUNT_HW_CACHE_##x
102 #define CACHE_READ (1 << C(OP_READ))
103 #define CACHE_WRITE (1 << C(OP_WRITE))
104 #define CACHE_PREFETCH (1 << C(OP_PREFETCH))
105 #define COP(x) (1 << x)
108 * cache operartion stat
109 * L1I : Read and prefetch only
110 * ITLB and BPU : Read-only
112 static unsigned long hw_cache_stat
[C(MAX
)] = {
113 [C(L1D
)] = (CACHE_READ
| CACHE_WRITE
| CACHE_PREFETCH
),
114 [C(L1I
)] = (CACHE_READ
| CACHE_PREFETCH
),
115 [C(LL
)] = (CACHE_READ
| CACHE_WRITE
| CACHE_PREFETCH
),
116 [C(DTLB
)] = (CACHE_READ
| CACHE_WRITE
| CACHE_PREFETCH
),
117 [C(ITLB
)] = (CACHE_READ
),
118 [C(BPU
)] = (CACHE_READ
),
121 #define for_each_subsystem(sys_dir, sys_dirent, sys_next) \
122 while (!readdir_r(sys_dir, &sys_dirent, &sys_next) && sys_next) \
123 if (sys_dirent.d_type == DT_DIR && \
124 (strcmp(sys_dirent.d_name, ".")) && \
125 (strcmp(sys_dirent.d_name, "..")))
127 static int tp_event_has_id(struct dirent
*sys_dir
, struct dirent
*evt_dir
)
129 char evt_path
[MAXPATHLEN
];
132 snprintf(evt_path
, MAXPATHLEN
, "%s/%s/%s/id", debugfs_path
,
133 sys_dir
->d_name
, evt_dir
->d_name
);
134 fd
= open(evt_path
, O_RDONLY
);
142 #define for_each_event(sys_dirent, evt_dir, evt_dirent, evt_next) \
143 while (!readdir_r(evt_dir, &evt_dirent, &evt_next) && evt_next) \
144 if (evt_dirent.d_type == DT_DIR && \
145 (strcmp(evt_dirent.d_name, ".")) && \
146 (strcmp(evt_dirent.d_name, "..")) && \
147 (!tp_event_has_id(&sys_dirent, &evt_dirent)))
149 #define MAX_EVENT_LENGTH 512
151 int valid_debugfs_mount(const char *debugfs
)
155 if (statfs(debugfs
, &st_fs
) < 0)
157 else if (st_fs
.f_type
!= (long) DEBUGFS_MAGIC
)
162 struct tracepoint_path
*tracepoint_id_to_path(u64 config
)
164 struct tracepoint_path
*path
= NULL
;
165 DIR *sys_dir
, *evt_dir
;
166 struct dirent
*sys_next
, *evt_next
, sys_dirent
, evt_dirent
;
170 char evt_path
[MAXPATHLEN
];
172 if (valid_debugfs_mount(debugfs_path
))
175 sys_dir
= opendir(debugfs_path
);
178 sys_dir_fd
= dirfd(sys_dir
);
180 for_each_subsystem(sys_dir
, sys_dirent
, sys_next
) {
181 int dfd
= openat(sys_dir_fd
, sys_dirent
.d_name
,
182 O_RDONLY
|O_DIRECTORY
), evt_dir_fd
;
185 evt_dir
= fdopendir(dfd
);
190 evt_dir_fd
= dirfd(evt_dir
);
191 for_each_event(sys_dirent
, evt_dir
, evt_dirent
, evt_next
) {
192 snprintf(evt_path
, MAXPATHLEN
, "%s/id",
194 fd
= openat(evt_dir_fd
, evt_path
, O_RDONLY
);
197 if (read(fd
, id_buf
, sizeof(id_buf
)) < 0) {
206 path
= calloc(1, sizeof(path
));
207 path
->system
= malloc(MAX_EVENT_LENGTH
);
212 path
->name
= malloc(MAX_EVENT_LENGTH
);
218 strncpy(path
->system
, sys_dirent
.d_name
,
220 strncpy(path
->name
, evt_dirent
.d_name
,
233 #define TP_PATH_LEN (MAX_EVENT_LENGTH * 2 + 1)
234 static const char *tracepoint_id_to_name(u64 config
)
236 static char buf
[TP_PATH_LEN
];
237 struct tracepoint_path
*path
;
239 path
= tracepoint_id_to_path(config
);
241 snprintf(buf
, TP_PATH_LEN
, "%s:%s", path
->system
, path
->name
);
246 snprintf(buf
, TP_PATH_LEN
, "%s:%s", "unknown", "unknown");
251 static int is_cache_op_valid(u8 cache_type
, u8 cache_op
)
253 if (hw_cache_stat
[cache_type
] & COP(cache_op
))
254 return 1; /* valid */
256 return 0; /* invalid */
259 static char *event_cache_name(u8 cache_type
, u8 cache_op
, u8 cache_result
)
261 static char name
[50];
264 sprintf(name
, "%s-%s-%s", hw_cache
[cache_type
][0],
265 hw_cache_op
[cache_op
][0],
266 hw_cache_result
[cache_result
][0]);
268 sprintf(name
, "%s-%s", hw_cache
[cache_type
][0],
269 hw_cache_op
[cache_op
][1]);
275 const char *event_name(int counter
)
277 u64 config
= attrs
[counter
].config
;
278 int type
= attrs
[counter
].type
;
280 return __event_name(type
, config
);
283 const char *__event_name(int type
, u64 config
)
287 if (type
== PERF_TYPE_RAW
) {
288 sprintf(buf
, "raw 0x%llx", config
);
293 case PERF_TYPE_HARDWARE
:
294 if (config
< PERF_COUNT_HW_MAX
)
295 return hw_event_names
[config
];
296 return "unknown-hardware";
298 case PERF_TYPE_HW_CACHE
: {
299 u8 cache_type
, cache_op
, cache_result
;
301 cache_type
= (config
>> 0) & 0xff;
302 if (cache_type
> PERF_COUNT_HW_CACHE_MAX
)
303 return "unknown-ext-hardware-cache-type";
305 cache_op
= (config
>> 8) & 0xff;
306 if (cache_op
> PERF_COUNT_HW_CACHE_OP_MAX
)
307 return "unknown-ext-hardware-cache-op";
309 cache_result
= (config
>> 16) & 0xff;
310 if (cache_result
> PERF_COUNT_HW_CACHE_RESULT_MAX
)
311 return "unknown-ext-hardware-cache-result";
313 if (!is_cache_op_valid(cache_type
, cache_op
))
314 return "invalid-cache";
316 return event_cache_name(cache_type
, cache_op
, cache_result
);
319 case PERF_TYPE_SOFTWARE
:
320 if (config
< PERF_COUNT_SW_MAX
)
321 return sw_event_names
[config
];
322 return "unknown-software";
324 case PERF_TYPE_TRACEPOINT
:
325 return tracepoint_id_to_name(config
);
334 static int parse_aliases(const char **str
, const char *names
[][MAX_ALIASES
], int size
)
339 for (i
= 0; i
< size
; i
++) {
340 for (j
= 0; j
< MAX_ALIASES
&& names
[i
][j
]; j
++) {
341 n
= strlen(names
[i
][j
]);
342 if (n
> longest
&& !strncasecmp(*str
, names
[i
][j
], n
))
354 static enum event_result
355 parse_generic_hw_event(const char **str
, struct perf_event_attr
*attr
)
357 const char *s
= *str
;
358 int cache_type
= -1, cache_op
= -1, cache_result
= -1;
360 cache_type
= parse_aliases(&s
, hw_cache
, PERF_COUNT_HW_CACHE_MAX
);
362 * No fallback - if we cannot get a clear cache type
365 if (cache_type
== -1)
368 while ((cache_op
== -1 || cache_result
== -1) && *s
== '-') {
371 if (cache_op
== -1) {
372 cache_op
= parse_aliases(&s
, hw_cache_op
,
373 PERF_COUNT_HW_CACHE_OP_MAX
);
375 if (!is_cache_op_valid(cache_type
, cache_op
))
381 if (cache_result
== -1) {
382 cache_result
= parse_aliases(&s
, hw_cache_result
,
383 PERF_COUNT_HW_CACHE_RESULT_MAX
);
384 if (cache_result
>= 0)
389 * Can't parse this as a cache op or result, so back up
397 * Fall back to reads:
400 cache_op
= PERF_COUNT_HW_CACHE_OP_READ
;
403 * Fall back to accesses:
405 if (cache_result
== -1)
406 cache_result
= PERF_COUNT_HW_CACHE_RESULT_ACCESS
;
408 attr
->config
= cache_type
| (cache_op
<< 8) | (cache_result
<< 16);
409 attr
->type
= PERF_TYPE_HW_CACHE
;
415 static enum event_result
416 parse_single_tracepoint_event(char *sys_name
,
417 const char *evt_name
,
418 unsigned int evt_length
,
420 struct perf_event_attr
*attr
,
423 char evt_path
[MAXPATHLEN
];
429 if (!strncmp(flags
, "record", strlen(flags
))) {
430 attr
->sample_type
|= PERF_SAMPLE_RAW
;
431 attr
->sample_type
|= PERF_SAMPLE_TIME
;
432 attr
->sample_type
|= PERF_SAMPLE_CPU
;
436 snprintf(evt_path
, MAXPATHLEN
, "%s/%s/%s/id", debugfs_path
,
439 fd
= open(evt_path
, O_RDONLY
);
443 if (read(fd
, id_buf
, sizeof(id_buf
)) < 0) {
451 attr
->type
= PERF_TYPE_TRACEPOINT
;
452 *strp
= evt_name
+ evt_length
;
457 /* sys + ':' + event + ':' + flags*/
458 #define MAX_EVOPT_LEN (MAX_EVENT_LENGTH * 2 + 2 + 128)
459 static enum event_result
460 parse_subsystem_tracepoint_event(char *sys_name
, char *flags
)
462 char evt_path
[MAXPATHLEN
];
463 struct dirent
*evt_ent
;
466 snprintf(evt_path
, MAXPATHLEN
, "%s/%s", debugfs_path
, sys_name
);
467 evt_dir
= opendir(evt_path
);
470 perror("Can't open event dir");
474 while ((evt_ent
= readdir(evt_dir
))) {
475 char event_opt
[MAX_EVOPT_LEN
+ 1];
477 unsigned int rem
= MAX_EVOPT_LEN
;
479 if (!strcmp(evt_ent
->d_name
, ".")
480 || !strcmp(evt_ent
->d_name
, "..")
481 || !strcmp(evt_ent
->d_name
, "enable")
482 || !strcmp(evt_ent
->d_name
, "filter"))
485 len
= snprintf(event_opt
, MAX_EVOPT_LEN
, "%s:%s", sys_name
,
492 if (rem
< strlen(flags
) + 1)
495 strcat(event_opt
, ":");
496 strcat(event_opt
, flags
);
499 if (parse_events(NULL
, event_opt
, 0))
503 return EVT_HANDLED_ALL
;
507 static enum event_result
parse_tracepoint_event(const char **strp
,
508 struct perf_event_attr
*attr
)
510 const char *evt_name
;
512 char sys_name
[MAX_EVENT_LENGTH
];
513 unsigned int sys_length
, evt_length
;
515 if (valid_debugfs_mount(debugfs_path
))
518 evt_name
= strchr(*strp
, ':');
522 sys_length
= evt_name
- *strp
;
523 if (sys_length
>= MAX_EVENT_LENGTH
)
526 strncpy(sys_name
, *strp
, sys_length
);
527 sys_name
[sys_length
] = '\0';
528 evt_name
= evt_name
+ 1;
530 flags
= strchr(evt_name
, ':');
533 evt_name
= strndup(evt_name
, flags
- evt_name
);
537 evt_length
= strlen(evt_name
);
538 if (evt_length
>= MAX_EVENT_LENGTH
)
541 if (!strcmp(evt_name
, "*")) {
542 *strp
= evt_name
+ evt_length
;
543 return parse_subsystem_tracepoint_event(sys_name
, flags
);
545 return parse_single_tracepoint_event(sys_name
, evt_name
,
550 static int check_events(const char *str
, unsigned int i
)
554 n
= strlen(event_symbols
[i
].symbol
);
555 if (!strncmp(str
, event_symbols
[i
].symbol
, n
))
558 n
= strlen(event_symbols
[i
].alias
);
560 if (!strncmp(str
, event_symbols
[i
].alias
, n
))
565 static enum event_result
566 parse_symbolic_event(const char **strp
, struct perf_event_attr
*attr
)
568 const char *str
= *strp
;
572 for (i
= 0; i
< ARRAY_SIZE(event_symbols
); i
++) {
573 n
= check_events(str
, i
);
575 attr
->type
= event_symbols
[i
].type
;
576 attr
->config
= event_symbols
[i
].config
;
584 static enum event_result
585 parse_raw_event(const char **strp
, struct perf_event_attr
*attr
)
587 const char *str
= *strp
;
593 n
= hex2u64(str
+ 1, &config
);
596 attr
->type
= PERF_TYPE_RAW
;
597 attr
->config
= config
;
603 static enum event_result
604 parse_numeric_event(const char **strp
, struct perf_event_attr
*attr
)
606 const char *str
= *strp
;
611 type
= strtoul(str
, &endp
, 0);
612 if (endp
> str
&& type
< PERF_TYPE_MAX
&& *endp
== ':') {
614 config
= strtoul(str
, &endp
, 0);
617 attr
->config
= config
;
625 static enum event_result
626 parse_event_modifier(const char **strp
, struct perf_event_attr
*attr
)
628 const char *str
= *strp
;
629 int eu
= 1, ek
= 1, eh
= 1;
636 else if (*str
== 'k')
638 else if (*str
== 'h')
644 if (str
>= *strp
+ 2) {
646 attr
->exclude_user
= eu
;
647 attr
->exclude_kernel
= ek
;
648 attr
->exclude_hv
= eh
;
655 * Each event can have multiple symbolic names.
656 * Symbolic names are (almost) exactly matched.
658 static enum event_result
659 parse_event_symbols(const char **str
, struct perf_event_attr
*attr
)
661 enum event_result ret
;
663 ret
= parse_tracepoint_event(str
, attr
);
664 if (ret
!= EVT_FAILED
)
667 ret
= parse_raw_event(str
, attr
);
668 if (ret
!= EVT_FAILED
)
671 ret
= parse_numeric_event(str
, attr
);
672 if (ret
!= EVT_FAILED
)
675 ret
= parse_symbolic_event(str
, attr
);
676 if (ret
!= EVT_FAILED
)
679 ret
= parse_generic_hw_event(str
, attr
);
680 if (ret
!= EVT_FAILED
)
686 parse_event_modifier(str
, attr
);
691 static void store_event_type(const char *orgname
)
693 char filename
[PATH_MAX
], *c
;
697 sprintf(filename
, "/sys/kernel/debug/tracing/events/%s/id", orgname
);
698 c
= strchr(filename
, ':');
702 file
= fopen(filename
, "r");
705 if (fscanf(file
, "%i", &id
) < 1)
706 die("cannot store event ID");
708 perf_header__push_event(id
, orgname
);
712 int parse_events(const struct option
*opt __used
, const char *str
, int unset __used
)
714 struct perf_event_attr attr
;
715 enum event_result ret
;
717 if (strchr(str
, ':'))
718 store_event_type(str
);
721 if (nr_counters
== MAX_COUNTERS
)
724 memset(&attr
, 0, sizeof(attr
));
725 ret
= parse_event_symbols(&str
, &attr
);
726 if (ret
== EVT_FAILED
)
729 if (!(*str
== 0 || *str
== ',' || isspace(*str
)))
732 if (ret
!= EVT_HANDLED_ALL
) {
733 attrs
[nr_counters
] = attr
;
741 while (isspace(*str
))
748 static const char * const event_type_descriptors
[] = {
753 "Hardware cache event",
757 * Print the events from <debugfs_mount_point>/tracing/events
760 static void print_tracepoint_events(void)
762 DIR *sys_dir
, *evt_dir
;
763 struct dirent
*sys_next
, *evt_next
, sys_dirent
, evt_dirent
;
765 char evt_path
[MAXPATHLEN
];
767 if (valid_debugfs_mount(debugfs_path
))
770 sys_dir
= opendir(debugfs_path
);
773 sys_dir_fd
= dirfd(sys_dir
);
775 for_each_subsystem(sys_dir
, sys_dirent
, sys_next
) {
776 int dfd
= openat(sys_dir_fd
, sys_dirent
.d_name
,
777 O_RDONLY
|O_DIRECTORY
), evt_dir_fd
;
780 evt_dir
= fdopendir(dfd
);
785 evt_dir_fd
= dirfd(evt_dir
);
786 for_each_event(sys_dirent
, evt_dir
, evt_dirent
, evt_next
) {
787 snprintf(evt_path
, MAXPATHLEN
, "%s:%s",
788 sys_dirent
.d_name
, evt_dirent
.d_name
);
789 fprintf(stderr
, " %-42s [%s]\n", evt_path
,
790 event_type_descriptors
[PERF_TYPE_TRACEPOINT
+1]);
800 * Print the help text for the event symbols:
802 void print_events(void)
804 struct event_symbol
*syms
= event_symbols
;
805 unsigned int i
, type
, op
, prev_type
= -1;
808 fprintf(stderr
, "\n");
809 fprintf(stderr
, "List of pre-defined events (to be used in -e):\n");
811 for (i
= 0; i
< ARRAY_SIZE(event_symbols
); i
++, syms
++) {
812 type
= syms
->type
+ 1;
813 if (type
>= ARRAY_SIZE(event_type_descriptors
))
816 if (type
!= prev_type
)
817 fprintf(stderr
, "\n");
819 if (strlen(syms
->alias
))
820 sprintf(name
, "%s OR %s", syms
->symbol
, syms
->alias
);
822 strcpy(name
, syms
->symbol
);
823 fprintf(stderr
, " %-42s [%s]\n", name
,
824 event_type_descriptors
[type
]);
829 fprintf(stderr
, "\n");
830 for (type
= 0; type
< PERF_COUNT_HW_CACHE_MAX
; type
++) {
831 for (op
= 0; op
< PERF_COUNT_HW_CACHE_OP_MAX
; op
++) {
832 /* skip invalid cache type */
833 if (!is_cache_op_valid(type
, op
))
836 for (i
= 0; i
< PERF_COUNT_HW_CACHE_RESULT_MAX
; i
++) {
837 fprintf(stderr
, " %-42s [%s]\n",
838 event_cache_name(type
, op
, i
),
839 event_type_descriptors
[4]);
844 fprintf(stderr
, "\n");
845 fprintf(stderr
, " %-42s [raw hardware event descriptor]\n",
847 fprintf(stderr
, "\n");
849 print_tracepoint_events();