return head;
}
-static long events;
+static long samples;
static struct timeval last_read, this_read;
static __u64 bytes_written;
/*
* If we're further behind than half the buffer, there's a chance
- * the writer will bite our tail and screw up the events under us.
+ * the writer will bite our tail and mess up the samples under us.
*
* If we somehow ended up ahead of the head, we got messed up.
*
last_read = this_read;
if (old != head)
- events++;
+ samples++;
size = head - old;
exit(EXIT_FAILURE);
}
-static void pid_synthesize_mmap_events(pid_t pid)
+static void pid_synthesize_mmap_samples(pid_t pid)
{
char filename[PATH_MAX];
FILE *fp;
fclose(fp);
}
-static void synthesize_events(void)
+static void synthesize_samples(void)
{
DIR *proc;
struct dirent dirent, *next;
continue;
pid_synthesize_comm_event(pid, 1);
- pid_synthesize_mmap_events(pid);
+ pid_synthesize_mmap_samples(pid);
}
closedir(proc);
if (pid > 0) {
pid_synthesize_comm_event(pid, 0);
- pid_synthesize_mmap_events(pid);
+ pid_synthesize_mmap_samples(pid);
}
group_fd = -1;
}
if (system_wide)
- synthesize_events();
+ synthesize_samples();
while (!done) {
- int hits = events;
+ int hits = samples;
for (i = 0; i < nr_cpu; i++) {
for (counter = 0; counter < nr_counters; counter++)
mmap_read(&mmap_array[i][counter]);
}
- if (hits == events)
+ if (hits == samples)
ret = poll(event_array, nr_poll, 100);
}
* Approximate RIP event size: 24 bytes.
*/
fprintf(stderr,
- "[ perf record: Captured and wrote %.3f MB %s (~%lld events) ]\n",
+ "[ perf record: Captured and wrote %.3f MB %s (~%lld samples) ]\n",
(double)bytes_written / 1024.0 / 1024.0,
output_name,
bytes_written / 24);
return weight;
}
-static long events;
-static long userspace_events;
+static long samples;
+static long userspace_samples;
static const char CONSOLE_CLEAR[] = "\e[H\e[2J";
static void __list_insert_active_sym(struct sym_entry *syme)
{
int printed = 0, j;
int counter;
- float events_per_sec = events/delay_secs;
- float kevents_per_sec = (events-userspace_events)/delay_secs;
- float sum_kevents = 0.0;
+ float samples_per_sec = samples/delay_secs;
+ float ksamples_per_sec = (samples-userspace_samples)/delay_secs;
+ float sum_ksamples = 0.0;
struct sym_entry *syme, *n;
struct rb_root tmp = RB_ROOT;
struct rb_node *nd;
- events = userspace_events = 0;
+ samples = userspace_samples = 0;
/* Sort the active symbols */
pthread_mutex_lock(&active_symbols_lock);
if (syme->snap_count != 0) {
syme->weight = sym_weight(syme);
rb_insert_active_sym(&tmp, syme);
- sum_kevents += syme->snap_count;
+ sum_ksamples += syme->snap_count;
for (j = 0; j < nr_counters; j++)
syme->count[j] = zero ? 0 : syme->count[j] * 7 / 8;
printf(
"------------------------------------------------------------------------------\n");
printf( " PerfTop:%8.0f irqs/sec kernel:%4.1f%% [",
- events_per_sec,
- 100.0 - (100.0*((events_per_sec-kevents_per_sec)/events_per_sec)));
+ samples_per_sec,
+ 100.0 - (100.0*((samples_per_sec-ksamples_per_sec)/samples_per_sec)));
if (nr_counters == 1) {
printf("%d", event_count[0]);
printf("------------------------------------------------------------------------------\n\n");
if (nr_counters == 1)
- printf(" events pcnt");
+ printf(" samples pcnt");
else
- printf(" weight events pcnt");
+ printf(" weight samples pcnt");
printf(" RIP kernel function\n"
- " ______ ______ _____ ________________ _______________\n\n"
+ " ______ _______ _____ ________________ _______________\n\n"
);
for (nd = rb_first(&tmp); nd; nd = rb_next(nd)) {
if (++printed > print_entries || syme->snap_count < count_filter)
continue;
- pcnt = 100.0 - (100.0 * ((sum_kevents - syme->snap_count) /
- sum_kevents));
+ pcnt = 100.0 - (100.0 * ((sum_ksamples - syme->snap_count) /
+ sum_ksamples));
/*
* We color high-overhead entries in red, low-overhead
color = PERF_COLOR_GREEN;
if (nr_counters == 1)
- printf("%19.2f - ", syme->weight);
+ printf("%20.2f - ", syme->weight);
else
- printf("%8.1f %10ld - ", syme->weight, syme->snap_count);
+ printf("%9.1f %10ld - ", syme->weight, syme->snap_count);
color_fprintf(stdout, color, "%4.1f%%", pcnt);
printf(" - %016llx : %s\n", sym->start, sym->name);
return 1;
syme = dso__sym_priv(self, sym);
- /* Tag events to be skipped. */
+ /* Tag samples to be skipped. */
if (!strcmp("default_idle", name) ||
!strcmp("cpu_idle", name) ||
!strcmp("enter_idle", name) ||
}
}
- events--;
+ samples--;
}
static void process_event(uint64_t ip, int counter)
{
- events++;
+ samples++;
if (ip < min_ip || ip > max_ip) {
- userspace_events++;
+ userspace_samples++;
return;
}
/*
* If we're further behind than half the buffer, there's a chance
- * the writer will bite our tail and screw up the events under us.
+ * the writer will bite our tail and mess up the samples under us.
*
* If we somehow ended up ahead of the head, we got messed up.
*
}
while (1) {
- int hits = events;
+ int hits = samples;
for (i = 0; i < nr_cpus; i++) {
for (counter = 0; counter < nr_counters; counter++)
mmap_read(&mmap_array[i][counter]);
}
- if (hits == events)
+ if (hits == samples)
ret = poll(event_array, nr_poll, 100);
}