4 #include "util/evsel.h"
6 #include "util/cache.h"
7 #include "util/symbol.h"
8 #include "util/thread.h"
9 #include "util/header.h"
10 #include "util/session.h"
11 #include "util/tool.h"
13 #include "util/parse-options.h"
14 #include "util/trace-event.h"
16 #include "util/debug.h"
18 #include <linux/rbtree.h>
21 typedef int (*sort_fn_t
)(struct alloc_stat
*, struct alloc_stat
*);
23 static const char *input_name
;
25 static int alloc_flag
;
26 static int caller_flag
;
28 static int alloc_lines
= -1;
29 static int caller_lines
= -1;
33 static char default_sort_order
[] = "frag,hit,bytes";
35 static int *cpunode_map
;
36 static int max_cpu_num
;
51 static struct rb_root root_alloc_stat
;
52 static struct rb_root root_alloc_sorted
;
53 static struct rb_root root_caller_stat
;
54 static struct rb_root root_caller_sorted
;
56 static unsigned long total_requested
, total_allocated
;
57 static unsigned long nr_allocs
, nr_cross_allocs
;
59 #define PATH_SYS_NODE "/sys/devices/system/node"
61 static int init_cpunode_map(void)
66 fp
= fopen("/sys/devices/system/cpu/kernel_max", "r");
72 if (fscanf(fp
, "%d", &max_cpu_num
) < 1) {
73 pr_err("Failed to read 'kernel_max' from sysfs");
79 cpunode_map
= calloc(max_cpu_num
, sizeof(int));
81 pr_err("%s: calloc failed\n", __func__
);
85 for (i
= 0; i
< max_cpu_num
; i
++)
94 static int setup_cpunode_map(void)
96 struct dirent
*dent1
, *dent2
;
98 unsigned int cpu
, mem
;
101 if (init_cpunode_map())
104 dir1
= opendir(PATH_SYS_NODE
);
108 while ((dent1
= readdir(dir1
)) != NULL
) {
109 if (dent1
->d_type
!= DT_DIR
||
110 sscanf(dent1
->d_name
, "node%u", &mem
) < 1)
113 snprintf(buf
, PATH_MAX
, "%s/%s", PATH_SYS_NODE
, dent1
->d_name
);
117 while ((dent2
= readdir(dir2
)) != NULL
) {
118 if (dent2
->d_type
!= DT_LNK
||
119 sscanf(dent2
->d_name
, "cpu%u", &cpu
) < 1)
121 cpunode_map
[cpu
] = mem
;
129 static int insert_alloc_stat(unsigned long call_site
, unsigned long ptr
,
130 int bytes_req
, int bytes_alloc
, int cpu
)
132 struct rb_node
**node
= &root_alloc_stat
.rb_node
;
133 struct rb_node
*parent
= NULL
;
134 struct alloc_stat
*data
= NULL
;
138 data
= rb_entry(*node
, struct alloc_stat
, node
);
141 node
= &(*node
)->rb_right
;
142 else if (ptr
< data
->ptr
)
143 node
= &(*node
)->rb_left
;
148 if (data
&& data
->ptr
== ptr
) {
150 data
->bytes_req
+= bytes_req
;
151 data
->bytes_alloc
+= bytes_alloc
;
153 data
= malloc(sizeof(*data
));
155 pr_err("%s: malloc failed\n", __func__
);
161 data
->bytes_req
= bytes_req
;
162 data
->bytes_alloc
= bytes_alloc
;
164 rb_link_node(&data
->node
, parent
, node
);
165 rb_insert_color(&data
->node
, &root_alloc_stat
);
167 data
->call_site
= call_site
;
168 data
->alloc_cpu
= cpu
;
172 static int insert_caller_stat(unsigned long call_site
,
173 int bytes_req
, int bytes_alloc
)
175 struct rb_node
**node
= &root_caller_stat
.rb_node
;
176 struct rb_node
*parent
= NULL
;
177 struct alloc_stat
*data
= NULL
;
181 data
= rb_entry(*node
, struct alloc_stat
, node
);
183 if (call_site
> data
->call_site
)
184 node
= &(*node
)->rb_right
;
185 else if (call_site
< data
->call_site
)
186 node
= &(*node
)->rb_left
;
191 if (data
&& data
->call_site
== call_site
) {
193 data
->bytes_req
+= bytes_req
;
194 data
->bytes_alloc
+= bytes_alloc
;
196 data
= malloc(sizeof(*data
));
198 pr_err("%s: malloc failed\n", __func__
);
201 data
->call_site
= call_site
;
204 data
->bytes_req
= bytes_req
;
205 data
->bytes_alloc
= bytes_alloc
;
207 rb_link_node(&data
->node
, parent
, node
);
208 rb_insert_color(&data
->node
, &root_caller_stat
);
214 static int perf_evsel__process_alloc_event(struct perf_evsel
*evsel
,
215 struct perf_sample
*sample
, int node
)
217 struct event_format
*event
= evsel
->tp_format
;
218 void *data
= sample
->raw_data
;
219 unsigned long call_site
;
221 int bytes_req
, cpu
= sample
->cpu
;
225 ptr
= raw_field_value(event
, "ptr", data
);
226 call_site
= raw_field_value(event
, "call_site", data
);
227 bytes_req
= raw_field_value(event
, "bytes_req", data
);
228 bytes_alloc
= raw_field_value(event
, "bytes_alloc", data
);
230 if (insert_alloc_stat(call_site
, ptr
, bytes_req
, bytes_alloc
, cpu
) ||
231 insert_caller_stat(call_site
, bytes_req
, bytes_alloc
))
234 total_requested
+= bytes_req
;
235 total_allocated
+= bytes_alloc
;
238 node1
= cpunode_map
[cpu
];
239 node2
= raw_field_value(event
, "node", data
);
247 static int ptr_cmp(struct alloc_stat
*, struct alloc_stat
*);
248 static int callsite_cmp(struct alloc_stat
*, struct alloc_stat
*);
250 static struct alloc_stat
*search_alloc_stat(unsigned long ptr
,
251 unsigned long call_site
,
252 struct rb_root
*root
,
255 struct rb_node
*node
= root
->rb_node
;
256 struct alloc_stat key
= { .ptr
= ptr
, .call_site
= call_site
};
259 struct alloc_stat
*data
;
262 data
= rb_entry(node
, struct alloc_stat
, node
);
264 cmp
= sort_fn(&key
, data
);
266 node
= node
->rb_left
;
268 node
= node
->rb_right
;
275 static int perf_evsel__process_free_event(struct perf_evsel
*evsel
,
276 struct perf_sample
*sample
)
278 unsigned long ptr
= raw_field_value(evsel
->tp_format
, "ptr",
280 struct alloc_stat
*s_alloc
, *s_caller
;
282 s_alloc
= search_alloc_stat(ptr
, 0, &root_alloc_stat
, ptr_cmp
);
286 if ((short)sample
->cpu
!= s_alloc
->alloc_cpu
) {
289 s_caller
= search_alloc_stat(0, s_alloc
->call_site
,
290 &root_caller_stat
, callsite_cmp
);
293 s_caller
->pingpong
++;
295 s_alloc
->alloc_cpu
= -1;
300 static int perf_evsel__process_kmem_event(struct perf_evsel
*evsel
,
301 struct perf_sample
*sample
)
303 struct event_format
*event
= evsel
->tp_format
;
305 if (!strcmp(event
->name
, "kmalloc") ||
306 !strcmp(event
->name
, "kmem_cache_alloc")) {
307 return perf_evsel__process_alloc_event(evsel
, sample
, 0);
310 if (!strcmp(event
->name
, "kmalloc_node") ||
311 !strcmp(event
->name
, "kmem_cache_alloc_node")) {
312 return perf_evsel__process_alloc_event(evsel
, sample
, 1);
315 if (!strcmp(event
->name
, "kfree") ||
316 !strcmp(event
->name
, "kmem_cache_free")) {
317 return perf_evsel__process_free_event(evsel
, sample
);
323 static int process_sample_event(struct perf_tool
*tool __maybe_unused
,
324 union perf_event
*event
,
325 struct perf_sample
*sample
,
326 struct perf_evsel
*evsel
,
327 struct machine
*machine
)
329 struct thread
*thread
= machine__findnew_thread(machine
, event
->ip
.pid
);
331 if (thread
== NULL
) {
332 pr_debug("problem processing %d event, skipping it.\n",
337 dump_printf(" ... thread: %s:%d\n", thread
->comm
, thread
->pid
);
339 return perf_evsel__process_kmem_event(evsel
, sample
);
342 static struct perf_tool perf_kmem
= {
343 .sample
= process_sample_event
,
344 .comm
= perf_event__process_comm
,
345 .ordered_samples
= true,
348 static double fragmentation(unsigned long n_req
, unsigned long n_alloc
)
353 return 100.0 - (100.0 * n_req
/ n_alloc
);
356 static void __print_result(struct rb_root
*root
, struct perf_session
*session
,
357 int n_lines
, int is_caller
)
359 struct rb_node
*next
;
360 struct machine
*machine
;
362 printf("%.102s\n", graph_dotted_line
);
363 printf(" %-34s |", is_caller
? "Callsite": "Alloc Ptr");
364 printf(" Total_alloc/Per | Total_req/Per | Hit | Ping-pong | Frag\n");
365 printf("%.102s\n", graph_dotted_line
);
367 next
= rb_first(root
);
369 machine
= perf_session__find_host_machine(session
);
371 pr_err("__print_result: couldn't find kernel information\n");
374 while (next
&& n_lines
--) {
375 struct alloc_stat
*data
= rb_entry(next
, struct alloc_stat
,
377 struct symbol
*sym
= NULL
;
383 addr
= data
->call_site
;
385 sym
= machine__find_kernel_function(machine
, addr
, &map
, NULL
);
390 snprintf(buf
, sizeof(buf
), "%s+%" PRIx64
"", sym
->name
,
391 addr
- map
->unmap_ip(map
, sym
->start
));
393 snprintf(buf
, sizeof(buf
), "%#" PRIx64
"", addr
);
394 printf(" %-34s |", buf
);
396 printf(" %9llu/%-5lu | %9llu/%-5lu | %8lu | %8lu | %6.3f%%\n",
397 (unsigned long long)data
->bytes_alloc
,
398 (unsigned long)data
->bytes_alloc
/ data
->hit
,
399 (unsigned long long)data
->bytes_req
,
400 (unsigned long)data
->bytes_req
/ data
->hit
,
401 (unsigned long)data
->hit
,
402 (unsigned long)data
->pingpong
,
403 fragmentation(data
->bytes_req
, data
->bytes_alloc
));
405 next
= rb_next(next
);
409 printf(" ... | ... | ... | ... | ... | ... \n");
411 printf("%.102s\n", graph_dotted_line
);
414 static void print_summary(void)
416 printf("\nSUMMARY\n=======\n");
417 printf("Total bytes requested: %lu\n", total_requested
);
418 printf("Total bytes allocated: %lu\n", total_allocated
);
419 printf("Total bytes wasted on internal fragmentation: %lu\n",
420 total_allocated
- total_requested
);
421 printf("Internal fragmentation: %f%%\n",
422 fragmentation(total_requested
, total_allocated
));
423 printf("Cross CPU allocations: %lu/%lu\n", nr_cross_allocs
, nr_allocs
);
426 static void print_result(struct perf_session
*session
)
429 __print_result(&root_caller_sorted
, session
, caller_lines
, 1);
431 __print_result(&root_alloc_sorted
, session
, alloc_lines
, 0);
435 struct sort_dimension
{
438 struct list_head list
;
441 static LIST_HEAD(caller_sort
);
442 static LIST_HEAD(alloc_sort
);
444 static void sort_insert(struct rb_root
*root
, struct alloc_stat
*data
,
445 struct list_head
*sort_list
)
447 struct rb_node
**new = &(root
->rb_node
);
448 struct rb_node
*parent
= NULL
;
449 struct sort_dimension
*sort
;
452 struct alloc_stat
*this;
455 this = rb_entry(*new, struct alloc_stat
, node
);
458 list_for_each_entry(sort
, sort_list
, list
) {
459 cmp
= sort
->cmp(data
, this);
465 new = &((*new)->rb_left
);
467 new = &((*new)->rb_right
);
470 rb_link_node(&data
->node
, parent
, new);
471 rb_insert_color(&data
->node
, root
);
474 static void __sort_result(struct rb_root
*root
, struct rb_root
*root_sorted
,
475 struct list_head
*sort_list
)
477 struct rb_node
*node
;
478 struct alloc_stat
*data
;
481 node
= rb_first(root
);
485 rb_erase(node
, root
);
486 data
= rb_entry(node
, struct alloc_stat
, node
);
487 sort_insert(root_sorted
, data
, sort_list
);
491 static void sort_result(void)
493 __sort_result(&root_alloc_stat
, &root_alloc_sorted
, &alloc_sort
);
494 __sort_result(&root_caller_stat
, &root_caller_sorted
, &caller_sort
);
497 static int __cmd_kmem(void)
500 struct perf_session
*session
;
502 session
= perf_session__new(input_name
, O_RDONLY
, 0, false, &perf_kmem
);
506 if (perf_session__create_kernel_maps(session
) < 0)
509 if (!perf_session__has_traces(session
, "kmem record"))
513 err
= perf_session__process_events(session
, &perf_kmem
);
517 print_result(session
);
519 perf_session__delete(session
);
523 static const char * const kmem_usage
[] = {
524 "perf kmem [<options>] {record|stat}",
528 static int ptr_cmp(struct alloc_stat
*l
, struct alloc_stat
*r
)
532 else if (l
->ptr
> r
->ptr
)
537 static struct sort_dimension ptr_sort_dimension
= {
542 static int callsite_cmp(struct alloc_stat
*l
, struct alloc_stat
*r
)
544 if (l
->call_site
< r
->call_site
)
546 else if (l
->call_site
> r
->call_site
)
551 static struct sort_dimension callsite_sort_dimension
= {
556 static int hit_cmp(struct alloc_stat
*l
, struct alloc_stat
*r
)
560 else if (l
->hit
> r
->hit
)
565 static struct sort_dimension hit_sort_dimension
= {
570 static int bytes_cmp(struct alloc_stat
*l
, struct alloc_stat
*r
)
572 if (l
->bytes_alloc
< r
->bytes_alloc
)
574 else if (l
->bytes_alloc
> r
->bytes_alloc
)
579 static struct sort_dimension bytes_sort_dimension
= {
584 static int frag_cmp(struct alloc_stat
*l
, struct alloc_stat
*r
)
588 x
= fragmentation(l
->bytes_req
, l
->bytes_alloc
);
589 y
= fragmentation(r
->bytes_req
, r
->bytes_alloc
);
598 static struct sort_dimension frag_sort_dimension
= {
603 static int pingpong_cmp(struct alloc_stat
*l
, struct alloc_stat
*r
)
605 if (l
->pingpong
< r
->pingpong
)
607 else if (l
->pingpong
> r
->pingpong
)
612 static struct sort_dimension pingpong_sort_dimension
= {
617 static struct sort_dimension
*avail_sorts
[] = {
619 &callsite_sort_dimension
,
621 &bytes_sort_dimension
,
622 &frag_sort_dimension
,
623 &pingpong_sort_dimension
,
626 #define NUM_AVAIL_SORTS \
627 (int)(sizeof(avail_sorts) / sizeof(struct sort_dimension *))
629 static int sort_dimension__add(const char *tok
, struct list_head
*list
)
631 struct sort_dimension
*sort
;
634 for (i
= 0; i
< NUM_AVAIL_SORTS
; i
++) {
635 if (!strcmp(avail_sorts
[i
]->name
, tok
)) {
636 sort
= malloc(sizeof(*sort
));
638 pr_err("%s: malloc failed\n", __func__
);
641 memcpy(sort
, avail_sorts
[i
], sizeof(*sort
));
642 list_add_tail(&sort
->list
, list
);
650 static int setup_sorting(struct list_head
*sort_list
, const char *arg
)
653 char *str
= strdup(arg
);
656 pr_err("%s: strdup failed\n", __func__
);
661 tok
= strsep(&str
, ",");
664 if (sort_dimension__add(tok
, sort_list
) < 0) {
665 error("Unknown --sort key: '%s'", tok
);
675 static int parse_sort_opt(const struct option
*opt __maybe_unused
,
676 const char *arg
, int unset __maybe_unused
)
681 if (caller_flag
> alloc_flag
)
682 return setup_sorting(&caller_sort
, arg
);
684 return setup_sorting(&alloc_sort
, arg
);
689 static int parse_caller_opt(const struct option
*opt __maybe_unused
,
690 const char *arg __maybe_unused
,
691 int unset __maybe_unused
)
693 caller_flag
= (alloc_flag
+ 1);
697 static int parse_alloc_opt(const struct option
*opt __maybe_unused
,
698 const char *arg __maybe_unused
,
699 int unset __maybe_unused
)
701 alloc_flag
= (caller_flag
+ 1);
705 static int parse_line_opt(const struct option
*opt __maybe_unused
,
706 const char *arg
, int unset __maybe_unused
)
713 lines
= strtoul(arg
, NULL
, 10);
715 if (caller_flag
> alloc_flag
)
716 caller_lines
= lines
;
723 static const struct option kmem_options
[] = {
724 OPT_STRING('i', "input", &input_name
, "file",
726 OPT_CALLBACK_NOOPT(0, "caller", NULL
, NULL
,
727 "show per-callsite statistics",
729 OPT_CALLBACK_NOOPT(0, "alloc", NULL
, NULL
,
730 "show per-allocation statistics",
732 OPT_CALLBACK('s', "sort", NULL
, "key[,key2...]",
733 "sort by keys: ptr, call_site, bytes, hit, pingpong, frag",
735 OPT_CALLBACK('l', "line", NULL
, "num",
738 OPT_BOOLEAN(0, "raw-ip", &raw_ip
, "show raw ip instead of symbol"),
742 static const char *record_args
[] = {
748 "-e", "kmem:kmalloc",
749 "-e", "kmem:kmalloc_node",
751 "-e", "kmem:kmem_cache_alloc",
752 "-e", "kmem:kmem_cache_alloc_node",
753 "-e", "kmem:kmem_cache_free",
756 static int __cmd_record(int argc
, const char **argv
)
758 unsigned int rec_argc
, i
, j
;
759 const char **rec_argv
;
761 rec_argc
= ARRAY_SIZE(record_args
) + argc
- 1;
762 rec_argv
= calloc(rec_argc
+ 1, sizeof(char *));
764 if (rec_argv
== NULL
)
767 for (i
= 0; i
< ARRAY_SIZE(record_args
); i
++)
768 rec_argv
[i
] = strdup(record_args
[i
]);
770 for (j
= 1; j
< (unsigned int)argc
; j
++, i
++)
771 rec_argv
[i
] = argv
[j
];
773 return cmd_record(i
, rec_argv
, NULL
);
776 int cmd_kmem(int argc
, const char **argv
, const char *prefix __maybe_unused
)
778 argc
= parse_options(argc
, argv
, kmem_options
, kmem_usage
, 0);
781 usage_with_options(kmem_usage
, kmem_options
);
785 if (!strncmp(argv
[0], "rec", 3)) {
786 return __cmd_record(argc
, argv
);
787 } else if (!strcmp(argv
[0], "stat")) {
788 if (setup_cpunode_map())
791 if (list_empty(&caller_sort
))
792 setup_sorting(&caller_sort
, default_sort_order
);
793 if (list_empty(&alloc_sort
))
794 setup_sorting(&alloc_sort
, default_sort_order
);
798 usage_with_options(kmem_usage
, kmem_options
);