perf tools: Use __maybe_used for unused variables
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / tools / perf / util / hist.c
1 #include "annotate.h"
2 #include "util.h"
3 #include "build-id.h"
4 #include "hist.h"
5 #include "session.h"
6 #include "sort.h"
7 #include <math.h>
8
9 static bool hists__filter_entry_by_dso(struct hists *hists,
10 struct hist_entry *he);
11 static bool hists__filter_entry_by_thread(struct hists *hists,
12 struct hist_entry *he);
13 static bool hists__filter_entry_by_symbol(struct hists *hists,
14 struct hist_entry *he);
15
16 enum hist_filter {
17 HIST_FILTER__DSO,
18 HIST_FILTER__THREAD,
19 HIST_FILTER__PARENT,
20 HIST_FILTER__SYMBOL,
21 };
22
23 struct callchain_param callchain_param = {
24 .mode = CHAIN_GRAPH_REL,
25 .min_percent = 0.5,
26 .order = ORDER_CALLEE
27 };
28
29 u16 hists__col_len(struct hists *hists, enum hist_column col)
30 {
31 return hists->col_len[col];
32 }
33
34 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
35 {
36 hists->col_len[col] = len;
37 }
38
39 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
40 {
41 if (len > hists__col_len(hists, col)) {
42 hists__set_col_len(hists, col, len);
43 return true;
44 }
45 return false;
46 }
47
48 void hists__reset_col_len(struct hists *hists)
49 {
50 enum hist_column col;
51
52 for (col = 0; col < HISTC_NR_COLS; ++col)
53 hists__set_col_len(hists, col, 0);
54 }
55
56 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
57 {
58 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
59
60 if (hists__col_len(hists, dso) < unresolved_col_width &&
61 !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
62 !symbol_conf.dso_list)
63 hists__set_col_len(hists, dso, unresolved_col_width);
64 }
65
66 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
67 {
68 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
69 u16 len;
70
71 if (h->ms.sym)
72 hists__new_col_len(hists, HISTC_SYMBOL, h->ms.sym->namelen + 4);
73 else
74 hists__set_unres_dso_col_len(hists, HISTC_DSO);
75
76 len = thread__comm_len(h->thread);
77 if (hists__new_col_len(hists, HISTC_COMM, len))
78 hists__set_col_len(hists, HISTC_THREAD, len + 6);
79
80 if (h->ms.map) {
81 len = dso__name_len(h->ms.map->dso);
82 hists__new_col_len(hists, HISTC_DSO, len);
83 }
84
85 if (h->branch_info) {
86 int symlen;
87 /*
88 * +4 accounts for '[x] ' priv level info
89 * +2 account of 0x prefix on raw addresses
90 */
91 if (h->branch_info->from.sym) {
92 symlen = (int)h->branch_info->from.sym->namelen + 4;
93 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
94
95 symlen = dso__name_len(h->branch_info->from.map->dso);
96 hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
97 } else {
98 symlen = unresolved_col_width + 4 + 2;
99 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
100 hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
101 }
102
103 if (h->branch_info->to.sym) {
104 symlen = (int)h->branch_info->to.sym->namelen + 4;
105 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
106
107 symlen = dso__name_len(h->branch_info->to.map->dso);
108 hists__new_col_len(hists, HISTC_DSO_TO, symlen);
109 } else {
110 symlen = unresolved_col_width + 4 + 2;
111 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
112 hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
113 }
114 }
115 }
116
117 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
118 {
119 struct rb_node *next = rb_first(&hists->entries);
120 struct hist_entry *n;
121 int row = 0;
122
123 hists__reset_col_len(hists);
124
125 while (next && row++ < max_rows) {
126 n = rb_entry(next, struct hist_entry, rb_node);
127 if (!n->filtered)
128 hists__calc_col_len(hists, n);
129 next = rb_next(&n->rb_node);
130 }
131 }
132
133 static void hist_entry__add_cpumode_period(struct hist_entry *he,
134 unsigned int cpumode, u64 period)
135 {
136 switch (cpumode) {
137 case PERF_RECORD_MISC_KERNEL:
138 he->period_sys += period;
139 break;
140 case PERF_RECORD_MISC_USER:
141 he->period_us += period;
142 break;
143 case PERF_RECORD_MISC_GUEST_KERNEL:
144 he->period_guest_sys += period;
145 break;
146 case PERF_RECORD_MISC_GUEST_USER:
147 he->period_guest_us += period;
148 break;
149 default:
150 break;
151 }
152 }
153
154 static void hist_entry__decay(struct hist_entry *he)
155 {
156 he->period = (he->period * 7) / 8;
157 he->nr_events = (he->nr_events * 7) / 8;
158 }
159
160 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
161 {
162 u64 prev_period = he->period;
163
164 if (prev_period == 0)
165 return true;
166
167 hist_entry__decay(he);
168
169 if (!he->filtered)
170 hists->stats.total_period -= prev_period - he->period;
171
172 return he->period == 0;
173 }
174
175 static void __hists__decay_entries(struct hists *hists, bool zap_user,
176 bool zap_kernel, bool threaded)
177 {
178 struct rb_node *next = rb_first(&hists->entries);
179 struct hist_entry *n;
180
181 while (next) {
182 n = rb_entry(next, struct hist_entry, rb_node);
183 next = rb_next(&n->rb_node);
184 /*
185 * We may be annotating this, for instance, so keep it here in
186 * case some it gets new samples, we'll eventually free it when
187 * the user stops browsing and it agains gets fully decayed.
188 */
189 if (((zap_user && n->level == '.') ||
190 (zap_kernel && n->level != '.') ||
191 hists__decay_entry(hists, n)) &&
192 !n->used) {
193 rb_erase(&n->rb_node, &hists->entries);
194
195 if (sort__need_collapse || threaded)
196 rb_erase(&n->rb_node_in, &hists->entries_collapsed);
197
198 hist_entry__free(n);
199 --hists->nr_entries;
200 }
201 }
202 }
203
204 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
205 {
206 return __hists__decay_entries(hists, zap_user, zap_kernel, false);
207 }
208
209 void hists__decay_entries_threaded(struct hists *hists,
210 bool zap_user, bool zap_kernel)
211 {
212 return __hists__decay_entries(hists, zap_user, zap_kernel, true);
213 }
214
215 /*
216 * histogram, sorted on item, collects periods
217 */
218
219 static struct hist_entry *hist_entry__new(struct hist_entry *template)
220 {
221 size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0;
222 struct hist_entry *he = malloc(sizeof(*he) + callchain_size);
223
224 if (he != NULL) {
225 *he = *template;
226 he->nr_events = 1;
227 if (he->ms.map)
228 he->ms.map->referenced = true;
229 if (symbol_conf.use_callchain)
230 callchain_init(he->callchain);
231 }
232
233 return he;
234 }
235
236 static void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h)
237 {
238 if (!h->filtered) {
239 hists__calc_col_len(hists, h);
240 ++hists->nr_entries;
241 hists->stats.total_period += h->period;
242 }
243 }
244
245 static u8 symbol__parent_filter(const struct symbol *parent)
246 {
247 if (symbol_conf.exclude_other && parent == NULL)
248 return 1 << HIST_FILTER__PARENT;
249 return 0;
250 }
251
252 static struct hist_entry *add_hist_entry(struct hists *hists,
253 struct hist_entry *entry,
254 struct addr_location *al,
255 u64 period)
256 {
257 struct rb_node **p;
258 struct rb_node *parent = NULL;
259 struct hist_entry *he;
260 int cmp;
261
262 pthread_mutex_lock(&hists->lock);
263
264 p = &hists->entries_in->rb_node;
265
266 while (*p != NULL) {
267 parent = *p;
268 he = rb_entry(parent, struct hist_entry, rb_node_in);
269
270 cmp = hist_entry__cmp(entry, he);
271
272 if (!cmp) {
273 he->period += period;
274 ++he->nr_events;
275
276 /* If the map of an existing hist_entry has
277 * become out-of-date due to an exec() or
278 * similar, update it. Otherwise we will
279 * mis-adjust symbol addresses when computing
280 * the history counter to increment.
281 */
282 if (he->ms.map != entry->ms.map) {
283 he->ms.map = entry->ms.map;
284 if (he->ms.map)
285 he->ms.map->referenced = true;
286 }
287 goto out;
288 }
289
290 if (cmp < 0)
291 p = &(*p)->rb_left;
292 else
293 p = &(*p)->rb_right;
294 }
295
296 he = hist_entry__new(entry);
297 if (!he)
298 goto out_unlock;
299
300 rb_link_node(&he->rb_node_in, parent, p);
301 rb_insert_color(&he->rb_node_in, hists->entries_in);
302 out:
303 hist_entry__add_cpumode_period(he, al->cpumode, period);
304 out_unlock:
305 pthread_mutex_unlock(&hists->lock);
306 return he;
307 }
308
309 struct hist_entry *__hists__add_branch_entry(struct hists *self,
310 struct addr_location *al,
311 struct symbol *sym_parent,
312 struct branch_info *bi,
313 u64 period)
314 {
315 struct hist_entry entry = {
316 .thread = al->thread,
317 .ms = {
318 .map = bi->to.map,
319 .sym = bi->to.sym,
320 },
321 .cpu = al->cpu,
322 .ip = bi->to.addr,
323 .level = al->level,
324 .period = period,
325 .parent = sym_parent,
326 .filtered = symbol__parent_filter(sym_parent),
327 .branch_info = bi,
328 };
329
330 return add_hist_entry(self, &entry, al, period);
331 }
332
333 struct hist_entry *__hists__add_entry(struct hists *self,
334 struct addr_location *al,
335 struct symbol *sym_parent, u64 period)
336 {
337 struct hist_entry entry = {
338 .thread = al->thread,
339 .ms = {
340 .map = al->map,
341 .sym = al->sym,
342 },
343 .cpu = al->cpu,
344 .ip = al->addr,
345 .level = al->level,
346 .period = period,
347 .parent = sym_parent,
348 .filtered = symbol__parent_filter(sym_parent),
349 };
350
351 return add_hist_entry(self, &entry, al, period);
352 }
353
354 int64_t
355 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
356 {
357 struct sort_entry *se;
358 int64_t cmp = 0;
359
360 list_for_each_entry(se, &hist_entry__sort_list, list) {
361 cmp = se->se_cmp(left, right);
362 if (cmp)
363 break;
364 }
365
366 return cmp;
367 }
368
369 int64_t
370 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
371 {
372 struct sort_entry *se;
373 int64_t cmp = 0;
374
375 list_for_each_entry(se, &hist_entry__sort_list, list) {
376 int64_t (*f)(struct hist_entry *, struct hist_entry *);
377
378 f = se->se_collapse ?: se->se_cmp;
379
380 cmp = f(left, right);
381 if (cmp)
382 break;
383 }
384
385 return cmp;
386 }
387
388 void hist_entry__free(struct hist_entry *he)
389 {
390 free(he);
391 }
392
393 /*
394 * collapse the histogram
395 */
396
397 static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
398 struct rb_root *root,
399 struct hist_entry *he)
400 {
401 struct rb_node **p = &root->rb_node;
402 struct rb_node *parent = NULL;
403 struct hist_entry *iter;
404 int64_t cmp;
405
406 while (*p != NULL) {
407 parent = *p;
408 iter = rb_entry(parent, struct hist_entry, rb_node_in);
409
410 cmp = hist_entry__collapse(iter, he);
411
412 if (!cmp) {
413 iter->period += he->period;
414 iter->nr_events += he->nr_events;
415 if (symbol_conf.use_callchain) {
416 callchain_cursor_reset(&callchain_cursor);
417 callchain_merge(&callchain_cursor,
418 iter->callchain,
419 he->callchain);
420 }
421 hist_entry__free(he);
422 return false;
423 }
424
425 if (cmp < 0)
426 p = &(*p)->rb_left;
427 else
428 p = &(*p)->rb_right;
429 }
430
431 rb_link_node(&he->rb_node_in, parent, p);
432 rb_insert_color(&he->rb_node_in, root);
433 return true;
434 }
435
436 static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
437 {
438 struct rb_root *root;
439
440 pthread_mutex_lock(&hists->lock);
441
442 root = hists->entries_in;
443 if (++hists->entries_in > &hists->entries_in_array[1])
444 hists->entries_in = &hists->entries_in_array[0];
445
446 pthread_mutex_unlock(&hists->lock);
447
448 return root;
449 }
450
451 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
452 {
453 hists__filter_entry_by_dso(hists, he);
454 hists__filter_entry_by_thread(hists, he);
455 hists__filter_entry_by_symbol(hists, he);
456 }
457
458 static void __hists__collapse_resort(struct hists *hists, bool threaded)
459 {
460 struct rb_root *root;
461 struct rb_node *next;
462 struct hist_entry *n;
463
464 if (!sort__need_collapse && !threaded)
465 return;
466
467 root = hists__get_rotate_entries_in(hists);
468 next = rb_first(root);
469
470 while (next) {
471 n = rb_entry(next, struct hist_entry, rb_node_in);
472 next = rb_next(&n->rb_node_in);
473
474 rb_erase(&n->rb_node_in, root);
475 if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
476 /*
477 * If it wasn't combined with one of the entries already
478 * collapsed, we need to apply the filters that may have
479 * been set by, say, the hist_browser.
480 */
481 hists__apply_filters(hists, n);
482 }
483 }
484 }
485
486 void hists__collapse_resort(struct hists *hists)
487 {
488 return __hists__collapse_resort(hists, false);
489 }
490
491 void hists__collapse_resort_threaded(struct hists *hists)
492 {
493 return __hists__collapse_resort(hists, true);
494 }
495
496 /*
497 * reverse the map, sort on period.
498 */
499
500 static void __hists__insert_output_entry(struct rb_root *entries,
501 struct hist_entry *he,
502 u64 min_callchain_hits)
503 {
504 struct rb_node **p = &entries->rb_node;
505 struct rb_node *parent = NULL;
506 struct hist_entry *iter;
507
508 if (symbol_conf.use_callchain)
509 callchain_param.sort(&he->sorted_chain, he->callchain,
510 min_callchain_hits, &callchain_param);
511
512 while (*p != NULL) {
513 parent = *p;
514 iter = rb_entry(parent, struct hist_entry, rb_node);
515
516 if (he->period > iter->period)
517 p = &(*p)->rb_left;
518 else
519 p = &(*p)->rb_right;
520 }
521
522 rb_link_node(&he->rb_node, parent, p);
523 rb_insert_color(&he->rb_node, entries);
524 }
525
526 static void __hists__output_resort(struct hists *hists, bool threaded)
527 {
528 struct rb_root *root;
529 struct rb_node *next;
530 struct hist_entry *n;
531 u64 min_callchain_hits;
532
533 min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
534
535 if (sort__need_collapse || threaded)
536 root = &hists->entries_collapsed;
537 else
538 root = hists->entries_in;
539
540 next = rb_first(root);
541 hists->entries = RB_ROOT;
542
543 hists->nr_entries = 0;
544 hists->stats.total_period = 0;
545 hists__reset_col_len(hists);
546
547 while (next) {
548 n = rb_entry(next, struct hist_entry, rb_node_in);
549 next = rb_next(&n->rb_node_in);
550
551 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits);
552 hists__inc_nr_entries(hists, n);
553 }
554 }
555
556 void hists__output_resort(struct hists *hists)
557 {
558 return __hists__output_resort(hists, false);
559 }
560
561 void hists__output_resort_threaded(struct hists *hists)
562 {
563 return __hists__output_resort(hists, true);
564 }
565
566 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
567 enum hist_filter filter)
568 {
569 h->filtered &= ~(1 << filter);
570 if (h->filtered)
571 return;
572
573 ++hists->nr_entries;
574 if (h->ms.unfolded)
575 hists->nr_entries += h->nr_rows;
576 h->row_offset = 0;
577 hists->stats.total_period += h->period;
578 hists->stats.nr_events[PERF_RECORD_SAMPLE] += h->nr_events;
579
580 hists__calc_col_len(hists, h);
581 }
582
583
584 static bool hists__filter_entry_by_dso(struct hists *hists,
585 struct hist_entry *he)
586 {
587 if (hists->dso_filter != NULL &&
588 (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
589 he->filtered |= (1 << HIST_FILTER__DSO);
590 return true;
591 }
592
593 return false;
594 }
595
596 void hists__filter_by_dso(struct hists *hists)
597 {
598 struct rb_node *nd;
599
600 hists->nr_entries = hists->stats.total_period = 0;
601 hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
602 hists__reset_col_len(hists);
603
604 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
605 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
606
607 if (symbol_conf.exclude_other && !h->parent)
608 continue;
609
610 if (hists__filter_entry_by_dso(hists, h))
611 continue;
612
613 hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
614 }
615 }
616
617 static bool hists__filter_entry_by_thread(struct hists *hists,
618 struct hist_entry *he)
619 {
620 if (hists->thread_filter != NULL &&
621 he->thread != hists->thread_filter) {
622 he->filtered |= (1 << HIST_FILTER__THREAD);
623 return true;
624 }
625
626 return false;
627 }
628
629 void hists__filter_by_thread(struct hists *hists)
630 {
631 struct rb_node *nd;
632
633 hists->nr_entries = hists->stats.total_period = 0;
634 hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
635 hists__reset_col_len(hists);
636
637 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
638 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
639
640 if (hists__filter_entry_by_thread(hists, h))
641 continue;
642
643 hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
644 }
645 }
646
647 static bool hists__filter_entry_by_symbol(struct hists *hists,
648 struct hist_entry *he)
649 {
650 if (hists->symbol_filter_str != NULL &&
651 (!he->ms.sym || strstr(he->ms.sym->name,
652 hists->symbol_filter_str) == NULL)) {
653 he->filtered |= (1 << HIST_FILTER__SYMBOL);
654 return true;
655 }
656
657 return false;
658 }
659
660 void hists__filter_by_symbol(struct hists *hists)
661 {
662 struct rb_node *nd;
663
664 hists->nr_entries = hists->stats.total_period = 0;
665 hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
666 hists__reset_col_len(hists);
667
668 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
669 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
670
671 if (hists__filter_entry_by_symbol(hists, h))
672 continue;
673
674 hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
675 }
676 }
677
678 int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip)
679 {
680 return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip);
681 }
682
683 int hist_entry__annotate(struct hist_entry *he, size_t privsize)
684 {
685 return symbol__annotate(he->ms.sym, he->ms.map, privsize);
686 }
687
688 void hists__inc_nr_events(struct hists *hists, u32 type)
689 {
690 ++hists->stats.nr_events[0];
691 ++hists->stats.nr_events[type];
692 }