Merge branch 'linus' into x86/urgent
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / tools / perf / util / header.c
1 #define _FILE_OFFSET_BITS 64
2
3 #include <sys/types.h>
4 #include <byteswap.h>
5 #include <unistd.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <linux/list.h>
9 #include <linux/kernel.h>
10
11 #include "evlist.h"
12 #include "evsel.h"
13 #include "util.h"
14 #include "header.h"
15 #include "../perf.h"
16 #include "trace-event.h"
17 #include "session.h"
18 #include "symbol.h"
19 #include "debug.h"
20
21 static bool no_buildid_cache = false;
22
23 static int event_count;
24 static struct perf_trace_event_type *events;
25
26 int perf_header__push_event(u64 id, const char *name)
27 {
28 if (strlen(name) > MAX_EVENT_NAME)
29 pr_warning("Event %s will be truncated\n", name);
30
31 if (!events) {
32 events = malloc(sizeof(struct perf_trace_event_type));
33 if (events == NULL)
34 return -ENOMEM;
35 } else {
36 struct perf_trace_event_type *nevents;
37
38 nevents = realloc(events, (event_count + 1) * sizeof(*events));
39 if (nevents == NULL)
40 return -ENOMEM;
41 events = nevents;
42 }
43 memset(&events[event_count], 0, sizeof(struct perf_trace_event_type));
44 events[event_count].event_id = id;
45 strncpy(events[event_count].name, name, MAX_EVENT_NAME - 1);
46 event_count++;
47 return 0;
48 }
49
50 char *perf_header__find_event(u64 id)
51 {
52 int i;
53 for (i = 0 ; i < event_count; i++) {
54 if (events[i].event_id == id)
55 return events[i].name;
56 }
57 return NULL;
58 }
59
60 static const char *__perf_magic = "PERFFILE";
61
62 #define PERF_MAGIC (*(u64 *)__perf_magic)
63
64 struct perf_file_attr {
65 struct perf_event_attr attr;
66 struct perf_file_section ids;
67 };
68
69 void perf_header__set_feat(struct perf_header *header, int feat)
70 {
71 set_bit(feat, header->adds_features);
72 }
73
74 void perf_header__clear_feat(struct perf_header *header, int feat)
75 {
76 clear_bit(feat, header->adds_features);
77 }
78
79 bool perf_header__has_feat(const struct perf_header *header, int feat)
80 {
81 return test_bit(feat, header->adds_features);
82 }
83
84 static int do_write(int fd, const void *buf, size_t size)
85 {
86 while (size) {
87 int ret = write(fd, buf, size);
88
89 if (ret < 0)
90 return -errno;
91
92 size -= ret;
93 buf += ret;
94 }
95
96 return 0;
97 }
98
99 #define NAME_ALIGN 64
100
101 static int write_padded(int fd, const void *bf, size_t count,
102 size_t count_aligned)
103 {
104 static const char zero_buf[NAME_ALIGN];
105 int err = do_write(fd, bf, count);
106
107 if (!err)
108 err = do_write(fd, zero_buf, count_aligned - count);
109
110 return err;
111 }
112
113 #define dsos__for_each_with_build_id(pos, head) \
114 list_for_each_entry(pos, head, node) \
115 if (!pos->has_build_id) \
116 continue; \
117 else
118
119 static int __dsos__write_buildid_table(struct list_head *head, pid_t pid,
120 u16 misc, int fd)
121 {
122 struct dso *pos;
123
124 dsos__for_each_with_build_id(pos, head) {
125 int err;
126 struct build_id_event b;
127 size_t len;
128
129 if (!pos->hit)
130 continue;
131 len = pos->long_name_len + 1;
132 len = ALIGN(len, NAME_ALIGN);
133 memset(&b, 0, sizeof(b));
134 memcpy(&b.build_id, pos->build_id, sizeof(pos->build_id));
135 b.pid = pid;
136 b.header.misc = misc;
137 b.header.size = sizeof(b) + len;
138 err = do_write(fd, &b, sizeof(b));
139 if (err < 0)
140 return err;
141 err = write_padded(fd, pos->long_name,
142 pos->long_name_len + 1, len);
143 if (err < 0)
144 return err;
145 }
146
147 return 0;
148 }
149
150 static int machine__write_buildid_table(struct machine *machine, int fd)
151 {
152 int err;
153 u16 kmisc = PERF_RECORD_MISC_KERNEL,
154 umisc = PERF_RECORD_MISC_USER;
155
156 if (!machine__is_host(machine)) {
157 kmisc = PERF_RECORD_MISC_GUEST_KERNEL;
158 umisc = PERF_RECORD_MISC_GUEST_USER;
159 }
160
161 err = __dsos__write_buildid_table(&machine->kernel_dsos, machine->pid,
162 kmisc, fd);
163 if (err == 0)
164 err = __dsos__write_buildid_table(&machine->user_dsos,
165 machine->pid, umisc, fd);
166 return err;
167 }
168
169 static int dsos__write_buildid_table(struct perf_header *header, int fd)
170 {
171 struct perf_session *session = container_of(header,
172 struct perf_session, header);
173 struct rb_node *nd;
174 int err = machine__write_buildid_table(&session->host_machine, fd);
175
176 if (err)
177 return err;
178
179 for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
180 struct machine *pos = rb_entry(nd, struct machine, rb_node);
181 err = machine__write_buildid_table(pos, fd);
182 if (err)
183 break;
184 }
185 return err;
186 }
187
188 int build_id_cache__add_s(const char *sbuild_id, const char *debugdir,
189 const char *name, bool is_kallsyms)
190 {
191 const size_t size = PATH_MAX;
192 char *realname, *filename = malloc(size),
193 *linkname = malloc(size), *targetname;
194 int len, err = -1;
195
196 if (is_kallsyms)
197 realname = (char *)name;
198 else
199 realname = realpath(name, NULL);
200
201 if (realname == NULL || filename == NULL || linkname == NULL)
202 goto out_free;
203
204 len = snprintf(filename, size, "%s%s%s",
205 debugdir, is_kallsyms ? "/" : "", realname);
206 if (mkdir_p(filename, 0755))
207 goto out_free;
208
209 snprintf(filename + len, sizeof(filename) - len, "/%s", sbuild_id);
210
211 if (access(filename, F_OK)) {
212 if (is_kallsyms) {
213 if (copyfile("/proc/kallsyms", filename))
214 goto out_free;
215 } else if (link(realname, filename) && copyfile(name, filename))
216 goto out_free;
217 }
218
219 len = snprintf(linkname, size, "%s/.build-id/%.2s",
220 debugdir, sbuild_id);
221
222 if (access(linkname, X_OK) && mkdir_p(linkname, 0755))
223 goto out_free;
224
225 snprintf(linkname + len, size - len, "/%s", sbuild_id + 2);
226 targetname = filename + strlen(debugdir) - 5;
227 memcpy(targetname, "../..", 5);
228
229 if (symlink(targetname, linkname) == 0)
230 err = 0;
231 out_free:
232 if (!is_kallsyms)
233 free(realname);
234 free(filename);
235 free(linkname);
236 return err;
237 }
238
239 static int build_id_cache__add_b(const u8 *build_id, size_t build_id_size,
240 const char *name, const char *debugdir,
241 bool is_kallsyms)
242 {
243 char sbuild_id[BUILD_ID_SIZE * 2 + 1];
244
245 build_id__sprintf(build_id, build_id_size, sbuild_id);
246
247 return build_id_cache__add_s(sbuild_id, debugdir, name, is_kallsyms);
248 }
249
250 int build_id_cache__remove_s(const char *sbuild_id, const char *debugdir)
251 {
252 const size_t size = PATH_MAX;
253 char *filename = malloc(size),
254 *linkname = malloc(size);
255 int err = -1;
256
257 if (filename == NULL || linkname == NULL)
258 goto out_free;
259
260 snprintf(linkname, size, "%s/.build-id/%.2s/%s",
261 debugdir, sbuild_id, sbuild_id + 2);
262
263 if (access(linkname, F_OK))
264 goto out_free;
265
266 if (readlink(linkname, filename, size) < 0)
267 goto out_free;
268
269 if (unlink(linkname))
270 goto out_free;
271
272 /*
273 * Since the link is relative, we must make it absolute:
274 */
275 snprintf(linkname, size, "%s/.build-id/%.2s/%s",
276 debugdir, sbuild_id, filename);
277
278 if (unlink(linkname))
279 goto out_free;
280
281 err = 0;
282 out_free:
283 free(filename);
284 free(linkname);
285 return err;
286 }
287
288 static int dso__cache_build_id(struct dso *dso, const char *debugdir)
289 {
290 bool is_kallsyms = dso->kernel && dso->long_name[0] != '/';
291
292 return build_id_cache__add_b(dso->build_id, sizeof(dso->build_id),
293 dso->long_name, debugdir, is_kallsyms);
294 }
295
296 static int __dsos__cache_build_ids(struct list_head *head, const char *debugdir)
297 {
298 struct dso *pos;
299 int err = 0;
300
301 dsos__for_each_with_build_id(pos, head)
302 if (dso__cache_build_id(pos, debugdir))
303 err = -1;
304
305 return err;
306 }
307
308 static int machine__cache_build_ids(struct machine *machine, const char *debugdir)
309 {
310 int ret = __dsos__cache_build_ids(&machine->kernel_dsos, debugdir);
311 ret |= __dsos__cache_build_ids(&machine->user_dsos, debugdir);
312 return ret;
313 }
314
315 static int perf_session__cache_build_ids(struct perf_session *session)
316 {
317 struct rb_node *nd;
318 int ret;
319 char debugdir[PATH_MAX];
320
321 snprintf(debugdir, sizeof(debugdir), "%s", buildid_dir);
322
323 if (mkdir(debugdir, 0755) != 0 && errno != EEXIST)
324 return -1;
325
326 ret = machine__cache_build_ids(&session->host_machine, debugdir);
327
328 for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
329 struct machine *pos = rb_entry(nd, struct machine, rb_node);
330 ret |= machine__cache_build_ids(pos, debugdir);
331 }
332 return ret ? -1 : 0;
333 }
334
335 static bool machine__read_build_ids(struct machine *machine, bool with_hits)
336 {
337 bool ret = __dsos__read_build_ids(&machine->kernel_dsos, with_hits);
338 ret |= __dsos__read_build_ids(&machine->user_dsos, with_hits);
339 return ret;
340 }
341
342 static bool perf_session__read_build_ids(struct perf_session *session, bool with_hits)
343 {
344 struct rb_node *nd;
345 bool ret = machine__read_build_ids(&session->host_machine, with_hits);
346
347 for (nd = rb_first(&session->machines); nd; nd = rb_next(nd)) {
348 struct machine *pos = rb_entry(nd, struct machine, rb_node);
349 ret |= machine__read_build_ids(pos, with_hits);
350 }
351
352 return ret;
353 }
354
355 static int perf_header__adds_write(struct perf_header *header,
356 struct perf_evlist *evlist, int fd)
357 {
358 int nr_sections;
359 struct perf_session *session;
360 struct perf_file_section *feat_sec;
361 int sec_size;
362 u64 sec_start;
363 int idx = 0, err;
364
365 session = container_of(header, struct perf_session, header);
366
367 if (perf_header__has_feat(header, HEADER_BUILD_ID &&
368 !perf_session__read_build_ids(session, true)))
369 perf_header__clear_feat(header, HEADER_BUILD_ID);
370
371 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
372 if (!nr_sections)
373 return 0;
374
375 feat_sec = calloc(sizeof(*feat_sec), nr_sections);
376 if (feat_sec == NULL)
377 return -ENOMEM;
378
379 sec_size = sizeof(*feat_sec) * nr_sections;
380
381 sec_start = header->data_offset + header->data_size;
382 lseek(fd, sec_start + sec_size, SEEK_SET);
383
384 if (perf_header__has_feat(header, HEADER_TRACE_INFO)) {
385 struct perf_file_section *trace_sec;
386
387 trace_sec = &feat_sec[idx++];
388
389 /* Write trace info */
390 trace_sec->offset = lseek(fd, 0, SEEK_CUR);
391 read_tracing_data(fd, &evlist->entries);
392 trace_sec->size = lseek(fd, 0, SEEK_CUR) - trace_sec->offset;
393 }
394
395 if (perf_header__has_feat(header, HEADER_BUILD_ID)) {
396 struct perf_file_section *buildid_sec;
397
398 buildid_sec = &feat_sec[idx++];
399
400 /* Write build-ids */
401 buildid_sec->offset = lseek(fd, 0, SEEK_CUR);
402 err = dsos__write_buildid_table(header, fd);
403 if (err < 0) {
404 pr_debug("failed to write buildid table\n");
405 goto out_free;
406 }
407 buildid_sec->size = lseek(fd, 0, SEEK_CUR) -
408 buildid_sec->offset;
409 if (!no_buildid_cache)
410 perf_session__cache_build_ids(session);
411 }
412
413 lseek(fd, sec_start, SEEK_SET);
414 err = do_write(fd, feat_sec, sec_size);
415 if (err < 0)
416 pr_debug("failed to write feature section\n");
417 out_free:
418 free(feat_sec);
419 return err;
420 }
421
422 int perf_header__write_pipe(int fd)
423 {
424 struct perf_pipe_file_header f_header;
425 int err;
426
427 f_header = (struct perf_pipe_file_header){
428 .magic = PERF_MAGIC,
429 .size = sizeof(f_header),
430 };
431
432 err = do_write(fd, &f_header, sizeof(f_header));
433 if (err < 0) {
434 pr_debug("failed to write perf pipe header\n");
435 return err;
436 }
437
438 return 0;
439 }
440
441 int perf_session__write_header(struct perf_session *session,
442 struct perf_evlist *evlist,
443 int fd, bool at_exit)
444 {
445 struct perf_file_header f_header;
446 struct perf_file_attr f_attr;
447 struct perf_header *header = &session->header;
448 struct perf_evsel *attr, *pair = NULL;
449 int err;
450
451 lseek(fd, sizeof(f_header), SEEK_SET);
452
453 if (session->evlist != evlist)
454 pair = list_entry(session->evlist->entries.next, struct perf_evsel, node);
455
456 list_for_each_entry(attr, &evlist->entries, node) {
457 attr->id_offset = lseek(fd, 0, SEEK_CUR);
458 err = do_write(fd, attr->id, attr->ids * sizeof(u64));
459 if (err < 0) {
460 out_err_write:
461 pr_debug("failed to write perf header\n");
462 return err;
463 }
464 if (session->evlist != evlist) {
465 err = do_write(fd, pair->id, pair->ids * sizeof(u64));
466 if (err < 0)
467 goto out_err_write;
468 attr->ids += pair->ids;
469 pair = list_entry(pair->node.next, struct perf_evsel, node);
470 }
471 }
472
473 header->attr_offset = lseek(fd, 0, SEEK_CUR);
474
475 list_for_each_entry(attr, &evlist->entries, node) {
476 f_attr = (struct perf_file_attr){
477 .attr = attr->attr,
478 .ids = {
479 .offset = attr->id_offset,
480 .size = attr->ids * sizeof(u64),
481 }
482 };
483 err = do_write(fd, &f_attr, sizeof(f_attr));
484 if (err < 0) {
485 pr_debug("failed to write perf header attribute\n");
486 return err;
487 }
488 }
489
490 header->event_offset = lseek(fd, 0, SEEK_CUR);
491 header->event_size = event_count * sizeof(struct perf_trace_event_type);
492 if (events) {
493 err = do_write(fd, events, header->event_size);
494 if (err < 0) {
495 pr_debug("failed to write perf header events\n");
496 return err;
497 }
498 }
499
500 header->data_offset = lseek(fd, 0, SEEK_CUR);
501
502 if (at_exit) {
503 err = perf_header__adds_write(header, evlist, fd);
504 if (err < 0)
505 return err;
506 }
507
508 f_header = (struct perf_file_header){
509 .magic = PERF_MAGIC,
510 .size = sizeof(f_header),
511 .attr_size = sizeof(f_attr),
512 .attrs = {
513 .offset = header->attr_offset,
514 .size = evlist->nr_entries * sizeof(f_attr),
515 },
516 .data = {
517 .offset = header->data_offset,
518 .size = header->data_size,
519 },
520 .event_types = {
521 .offset = header->event_offset,
522 .size = header->event_size,
523 },
524 };
525
526 memcpy(&f_header.adds_features, &header->adds_features, sizeof(header->adds_features));
527
528 lseek(fd, 0, SEEK_SET);
529 err = do_write(fd, &f_header, sizeof(f_header));
530 if (err < 0) {
531 pr_debug("failed to write perf header\n");
532 return err;
533 }
534 lseek(fd, header->data_offset + header->data_size, SEEK_SET);
535
536 header->frozen = 1;
537 return 0;
538 }
539
540 static int perf_header__getbuffer64(struct perf_header *header,
541 int fd, void *buf, size_t size)
542 {
543 if (readn(fd, buf, size) <= 0)
544 return -1;
545
546 if (header->needs_swap)
547 mem_bswap_64(buf, size);
548
549 return 0;
550 }
551
552 int perf_header__process_sections(struct perf_header *header, int fd,
553 int (*process)(struct perf_file_section *section,
554 struct perf_header *ph,
555 int feat, int fd))
556 {
557 struct perf_file_section *feat_sec;
558 int nr_sections;
559 int sec_size;
560 int idx = 0;
561 int err = -1, feat = 1;
562
563 nr_sections = bitmap_weight(header->adds_features, HEADER_FEAT_BITS);
564 if (!nr_sections)
565 return 0;
566
567 feat_sec = calloc(sizeof(*feat_sec), nr_sections);
568 if (!feat_sec)
569 return -1;
570
571 sec_size = sizeof(*feat_sec) * nr_sections;
572
573 lseek(fd, header->data_offset + header->data_size, SEEK_SET);
574
575 if (perf_header__getbuffer64(header, fd, feat_sec, sec_size))
576 goto out_free;
577
578 err = 0;
579 while (idx < nr_sections && feat < HEADER_LAST_FEATURE) {
580 if (perf_header__has_feat(header, feat)) {
581 struct perf_file_section *sec = &feat_sec[idx++];
582
583 err = process(sec, header, feat, fd);
584 if (err < 0)
585 break;
586 }
587 ++feat;
588 }
589 out_free:
590 free(feat_sec);
591 return err;
592 }
593
594 int perf_file_header__read(struct perf_file_header *header,
595 struct perf_header *ph, int fd)
596 {
597 lseek(fd, 0, SEEK_SET);
598
599 if (readn(fd, header, sizeof(*header)) <= 0 ||
600 memcmp(&header->magic, __perf_magic, sizeof(header->magic)))
601 return -1;
602
603 if (header->attr_size != sizeof(struct perf_file_attr)) {
604 u64 attr_size = bswap_64(header->attr_size);
605
606 if (attr_size != sizeof(struct perf_file_attr))
607 return -1;
608
609 mem_bswap_64(header, offsetof(struct perf_file_header,
610 adds_features));
611 ph->needs_swap = true;
612 }
613
614 if (header->size != sizeof(*header)) {
615 /* Support the previous format */
616 if (header->size == offsetof(typeof(*header), adds_features))
617 bitmap_zero(header->adds_features, HEADER_FEAT_BITS);
618 else
619 return -1;
620 }
621
622 memcpy(&ph->adds_features, &header->adds_features,
623 sizeof(ph->adds_features));
624 /*
625 * FIXME: hack that assumes that if we need swap the perf.data file
626 * may be coming from an arch with a different word-size, ergo different
627 * DEFINE_BITMAP format, investigate more later, but for now its mostly
628 * safe to assume that we have a build-id section. Trace files probably
629 * have several other issues in this realm anyway...
630 */
631 if (ph->needs_swap) {
632 memset(&ph->adds_features, 0, sizeof(ph->adds_features));
633 perf_header__set_feat(ph, HEADER_BUILD_ID);
634 }
635
636 ph->event_offset = header->event_types.offset;
637 ph->event_size = header->event_types.size;
638 ph->data_offset = header->data.offset;
639 ph->data_size = header->data.size;
640 return 0;
641 }
642
643 static int __event_process_build_id(struct build_id_event *bev,
644 char *filename,
645 struct perf_session *session)
646 {
647 int err = -1;
648 struct list_head *head;
649 struct machine *machine;
650 u16 misc;
651 struct dso *dso;
652 enum dso_kernel_type dso_type;
653
654 machine = perf_session__findnew_machine(session, bev->pid);
655 if (!machine)
656 goto out;
657
658 misc = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
659
660 switch (misc) {
661 case PERF_RECORD_MISC_KERNEL:
662 dso_type = DSO_TYPE_KERNEL;
663 head = &machine->kernel_dsos;
664 break;
665 case PERF_RECORD_MISC_GUEST_KERNEL:
666 dso_type = DSO_TYPE_GUEST_KERNEL;
667 head = &machine->kernel_dsos;
668 break;
669 case PERF_RECORD_MISC_USER:
670 case PERF_RECORD_MISC_GUEST_USER:
671 dso_type = DSO_TYPE_USER;
672 head = &machine->user_dsos;
673 break;
674 default:
675 goto out;
676 }
677
678 dso = __dsos__findnew(head, filename);
679 if (dso != NULL) {
680 char sbuild_id[BUILD_ID_SIZE * 2 + 1];
681
682 dso__set_build_id(dso, &bev->build_id);
683
684 if (filename[0] == '[')
685 dso->kernel = dso_type;
686
687 build_id__sprintf(dso->build_id, sizeof(dso->build_id),
688 sbuild_id);
689 pr_debug("build id event received for %s: %s\n",
690 dso->long_name, sbuild_id);
691 }
692
693 err = 0;
694 out:
695 return err;
696 }
697
698 static int perf_header__read_build_ids(struct perf_header *header,
699 int input, u64 offset, u64 size)
700 {
701 struct perf_session *session = container_of(header, struct perf_session, header);
702 struct build_id_event bev;
703 char filename[PATH_MAX];
704 u64 limit = offset + size;
705 int err = -1;
706
707 while (offset < limit) {
708 ssize_t len;
709
710 if (read(input, &bev, sizeof(bev)) != sizeof(bev))
711 goto out;
712
713 if (header->needs_swap)
714 perf_event_header__bswap(&bev.header);
715
716 len = bev.header.size - sizeof(bev);
717 if (read(input, filename, len) != len)
718 goto out;
719
720 __event_process_build_id(&bev, filename, session);
721
722 offset += bev.header.size;
723 }
724 err = 0;
725 out:
726 return err;
727 }
728
729 static int perf_file_section__process(struct perf_file_section *section,
730 struct perf_header *ph,
731 int feat, int fd)
732 {
733 if (lseek(fd, section->offset, SEEK_SET) == (off_t)-1) {
734 pr_debug("Failed to lseek to %" PRIu64 " offset for feature "
735 "%d, continuing...\n", section->offset, feat);
736 return 0;
737 }
738
739 switch (feat) {
740 case HEADER_TRACE_INFO:
741 trace_report(fd, false);
742 break;
743
744 case HEADER_BUILD_ID:
745 if (perf_header__read_build_ids(ph, fd, section->offset, section->size))
746 pr_debug("Failed to read buildids, continuing...\n");
747 break;
748 default:
749 pr_debug("unknown feature %d, continuing...\n", feat);
750 }
751
752 return 0;
753 }
754
755 static int perf_file_header__read_pipe(struct perf_pipe_file_header *header,
756 struct perf_header *ph, int fd,
757 bool repipe)
758 {
759 if (readn(fd, header, sizeof(*header)) <= 0 ||
760 memcmp(&header->magic, __perf_magic, sizeof(header->magic)))
761 return -1;
762
763 if (repipe && do_write(STDOUT_FILENO, header, sizeof(*header)) < 0)
764 return -1;
765
766 if (header->size != sizeof(*header)) {
767 u64 size = bswap_64(header->size);
768
769 if (size != sizeof(*header))
770 return -1;
771
772 ph->needs_swap = true;
773 }
774
775 return 0;
776 }
777
778 static int perf_header__read_pipe(struct perf_session *session, int fd)
779 {
780 struct perf_header *header = &session->header;
781 struct perf_pipe_file_header f_header;
782
783 if (perf_file_header__read_pipe(&f_header, header, fd,
784 session->repipe) < 0) {
785 pr_debug("incompatible file format\n");
786 return -EINVAL;
787 }
788
789 session->fd = fd;
790
791 return 0;
792 }
793
794 int perf_session__read_header(struct perf_session *session, int fd)
795 {
796 struct perf_header *header = &session->header;
797 struct perf_file_header f_header;
798 struct perf_file_attr f_attr;
799 u64 f_id;
800 int nr_attrs, nr_ids, i, j;
801
802 session->evlist = perf_evlist__new(NULL, NULL);
803 if (session->evlist == NULL)
804 return -ENOMEM;
805
806 if (session->fd_pipe)
807 return perf_header__read_pipe(session, fd);
808
809 if (perf_file_header__read(&f_header, header, fd) < 0) {
810 pr_debug("incompatible file format\n");
811 return -EINVAL;
812 }
813
814 nr_attrs = f_header.attrs.size / sizeof(f_attr);
815 lseek(fd, f_header.attrs.offset, SEEK_SET);
816
817 for (i = 0; i < nr_attrs; i++) {
818 struct perf_evsel *evsel;
819 off_t tmp;
820
821 if (perf_header__getbuffer64(header, fd, &f_attr, sizeof(f_attr)))
822 goto out_errno;
823
824 tmp = lseek(fd, 0, SEEK_CUR);
825 evsel = perf_evsel__new(&f_attr.attr, i);
826
827 if (evsel == NULL)
828 goto out_delete_evlist;
829 /*
830 * Do it before so that if perf_evsel__alloc_id fails, this
831 * entry gets purged too at perf_evlist__delete().
832 */
833 perf_evlist__add(session->evlist, evsel);
834
835 nr_ids = f_attr.ids.size / sizeof(u64);
836 /*
837 * We don't have the cpu and thread maps on the header, so
838 * for allocating the perf_sample_id table we fake 1 cpu and
839 * hattr->ids threads.
840 */
841 if (perf_evsel__alloc_id(evsel, 1, nr_ids))
842 goto out_delete_evlist;
843
844 lseek(fd, f_attr.ids.offset, SEEK_SET);
845
846 for (j = 0; j < nr_ids; j++) {
847 if (perf_header__getbuffer64(header, fd, &f_id, sizeof(f_id)))
848 goto out_errno;
849
850 perf_evlist__id_add(session->evlist, evsel, 0, j, f_id);
851 }
852
853 lseek(fd, tmp, SEEK_SET);
854 }
855
856 if (f_header.event_types.size) {
857 lseek(fd, f_header.event_types.offset, SEEK_SET);
858 events = malloc(f_header.event_types.size);
859 if (events == NULL)
860 return -ENOMEM;
861 if (perf_header__getbuffer64(header, fd, events,
862 f_header.event_types.size))
863 goto out_errno;
864 event_count = f_header.event_types.size / sizeof(struct perf_trace_event_type);
865 }
866
867 perf_header__process_sections(header, fd, perf_file_section__process);
868
869 lseek(fd, header->data_offset, SEEK_SET);
870
871 header->frozen = 1;
872 return 0;
873 out_errno:
874 return -errno;
875
876 out_delete_evlist:
877 perf_evlist__delete(session->evlist);
878 session->evlist = NULL;
879 return -ENOMEM;
880 }
881
882 u64 perf_evlist__sample_type(struct perf_evlist *evlist)
883 {
884 struct perf_evsel *pos;
885 u64 type = 0;
886
887 list_for_each_entry(pos, &evlist->entries, node) {
888 if (!type)
889 type = pos->attr.sample_type;
890 else if (type != pos->attr.sample_type)
891 die("non matching sample_type");
892 }
893
894 return type;
895 }
896
897 bool perf_evlist__sample_id_all(const struct perf_evlist *evlist)
898 {
899 bool value = false, first = true;
900 struct perf_evsel *pos;
901
902 list_for_each_entry(pos, &evlist->entries, node) {
903 if (first) {
904 value = pos->attr.sample_id_all;
905 first = false;
906 } else if (value != pos->attr.sample_id_all)
907 die("non matching sample_id_all");
908 }
909
910 return value;
911 }
912
913 int perf_event__synthesize_attr(struct perf_event_attr *attr, u16 ids, u64 *id,
914 perf_event__handler_t process,
915 struct perf_session *session)
916 {
917 union perf_event *ev;
918 size_t size;
919 int err;
920
921 size = sizeof(struct perf_event_attr);
922 size = ALIGN(size, sizeof(u64));
923 size += sizeof(struct perf_event_header);
924 size += ids * sizeof(u64);
925
926 ev = malloc(size);
927
928 if (ev == NULL)
929 return -ENOMEM;
930
931 ev->attr.attr = *attr;
932 memcpy(ev->attr.id, id, ids * sizeof(u64));
933
934 ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
935 ev->attr.header.size = size;
936
937 err = process(ev, NULL, session);
938
939 free(ev);
940
941 return err;
942 }
943
944 int perf_session__synthesize_attrs(struct perf_session *session,
945 perf_event__handler_t process)
946 {
947 struct perf_evsel *attr;
948 int err = 0;
949
950 list_for_each_entry(attr, &session->evlist->entries, node) {
951 err = perf_event__synthesize_attr(&attr->attr, attr->ids,
952 attr->id, process, session);
953 if (err) {
954 pr_debug("failed to create perf header attribute\n");
955 return err;
956 }
957 }
958
959 return err;
960 }
961
962 int perf_event__process_attr(union perf_event *event,
963 struct perf_session *session)
964 {
965 unsigned int i, ids, n_ids;
966 struct perf_evsel *evsel;
967
968 if (session->evlist == NULL) {
969 session->evlist = perf_evlist__new(NULL, NULL);
970 if (session->evlist == NULL)
971 return -ENOMEM;
972 }
973
974 evsel = perf_evsel__new(&event->attr.attr,
975 session->evlist->nr_entries);
976 if (evsel == NULL)
977 return -ENOMEM;
978
979 perf_evlist__add(session->evlist, evsel);
980
981 ids = event->header.size;
982 ids -= (void *)&event->attr.id - (void *)event;
983 n_ids = ids / sizeof(u64);
984 /*
985 * We don't have the cpu and thread maps on the header, so
986 * for allocating the perf_sample_id table we fake 1 cpu and
987 * hattr->ids threads.
988 */
989 if (perf_evsel__alloc_id(evsel, 1, n_ids))
990 return -ENOMEM;
991
992 for (i = 0; i < n_ids; i++) {
993 perf_evlist__id_add(session->evlist, evsel, 0, i,
994 event->attr.id[i]);
995 }
996
997 perf_session__update_sample_type(session);
998
999 return 0;
1000 }
1001
1002 int perf_event__synthesize_event_type(u64 event_id, char *name,
1003 perf_event__handler_t process,
1004 struct perf_session *session)
1005 {
1006 union perf_event ev;
1007 size_t size = 0;
1008 int err = 0;
1009
1010 memset(&ev, 0, sizeof(ev));
1011
1012 ev.event_type.event_type.event_id = event_id;
1013 memset(ev.event_type.event_type.name, 0, MAX_EVENT_NAME);
1014 strncpy(ev.event_type.event_type.name, name, MAX_EVENT_NAME - 1);
1015
1016 ev.event_type.header.type = PERF_RECORD_HEADER_EVENT_TYPE;
1017 size = strlen(name);
1018 size = ALIGN(size, sizeof(u64));
1019 ev.event_type.header.size = sizeof(ev.event_type) -
1020 (sizeof(ev.event_type.event_type.name) - size);
1021
1022 err = process(&ev, NULL, session);
1023
1024 return err;
1025 }
1026
1027 int perf_event__synthesize_event_types(perf_event__handler_t process,
1028 struct perf_session *session)
1029 {
1030 struct perf_trace_event_type *type;
1031 int i, err = 0;
1032
1033 for (i = 0; i < event_count; i++) {
1034 type = &events[i];
1035
1036 err = perf_event__synthesize_event_type(type->event_id,
1037 type->name, process,
1038 session);
1039 if (err) {
1040 pr_debug("failed to create perf header event type\n");
1041 return err;
1042 }
1043 }
1044
1045 return err;
1046 }
1047
1048 int perf_event__process_event_type(union perf_event *event,
1049 struct perf_session *session __unused)
1050 {
1051 if (perf_header__push_event(event->event_type.event_type.event_id,
1052 event->event_type.event_type.name) < 0)
1053 return -ENOMEM;
1054
1055 return 0;
1056 }
1057
1058 int perf_event__synthesize_tracing_data(int fd, struct perf_evlist *evlist,
1059 perf_event__handler_t process,
1060 struct perf_session *session __unused)
1061 {
1062 union perf_event ev;
1063 ssize_t size = 0, aligned_size = 0, padding;
1064 int err __used = 0;
1065
1066 memset(&ev, 0, sizeof(ev));
1067
1068 ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
1069 size = read_tracing_data_size(fd, &evlist->entries);
1070 if (size <= 0)
1071 return size;
1072 aligned_size = ALIGN(size, sizeof(u64));
1073 padding = aligned_size - size;
1074 ev.tracing_data.header.size = sizeof(ev.tracing_data);
1075 ev.tracing_data.size = aligned_size;
1076
1077 process(&ev, NULL, session);
1078
1079 err = read_tracing_data(fd, &evlist->entries);
1080 write_padded(fd, NULL, 0, padding);
1081
1082 return aligned_size;
1083 }
1084
1085 int perf_event__process_tracing_data(union perf_event *event,
1086 struct perf_session *session)
1087 {
1088 ssize_t size_read, padding, size = event->tracing_data.size;
1089 off_t offset = lseek(session->fd, 0, SEEK_CUR);
1090 char buf[BUFSIZ];
1091
1092 /* setup for reading amidst mmap */
1093 lseek(session->fd, offset + sizeof(struct tracing_data_event),
1094 SEEK_SET);
1095
1096 size_read = trace_report(session->fd, session->repipe);
1097
1098 padding = ALIGN(size_read, sizeof(u64)) - size_read;
1099
1100 if (read(session->fd, buf, padding) < 0)
1101 die("reading input file");
1102 if (session->repipe) {
1103 int retw = write(STDOUT_FILENO, buf, padding);
1104 if (retw <= 0 || retw != padding)
1105 die("repiping tracing data padding");
1106 }
1107
1108 if (size_read + padding != size)
1109 die("tracing data size mismatch");
1110
1111 return size_read + padding;
1112 }
1113
1114 int perf_event__synthesize_build_id(struct dso *pos, u16 misc,
1115 perf_event__handler_t process,
1116 struct machine *machine,
1117 struct perf_session *session)
1118 {
1119 union perf_event ev;
1120 size_t len;
1121 int err = 0;
1122
1123 if (!pos->hit)
1124 return err;
1125
1126 memset(&ev, 0, sizeof(ev));
1127
1128 len = pos->long_name_len + 1;
1129 len = ALIGN(len, NAME_ALIGN);
1130 memcpy(&ev.build_id.build_id, pos->build_id, sizeof(pos->build_id));
1131 ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
1132 ev.build_id.header.misc = misc;
1133 ev.build_id.pid = machine->pid;
1134 ev.build_id.header.size = sizeof(ev.build_id) + len;
1135 memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
1136
1137 err = process(&ev, NULL, session);
1138
1139 return err;
1140 }
1141
1142 int perf_event__process_build_id(union perf_event *event,
1143 struct perf_session *session)
1144 {
1145 __event_process_build_id(&event->build_id,
1146 event->build_id.filename,
1147 session);
1148 return 0;
1149 }
1150
1151 void disable_buildid_cache(void)
1152 {
1153 no_buildid_cache = true;
1154 }