#define ALIGN(x, a) __ALIGN_MASK(x, (typeof(x))(a)-1)
#define __ALIGN_MASK(x, mask) (((x)+(mask))&~(mask))
-static long default_interval = 100000;
-static long event_count[MAX_COUNTERS];
-
static int fd[MAX_NR_CPUS][MAX_COUNTERS];
+
+static long default_interval = 100000;
+
static int nr_cpus = 0;
static unsigned int page_size;
static unsigned int mmap_pages = 128;
static int force = 0;
static int append_file = 0;
-const unsigned int default_count[] = {
- 1000000,
- 1000000,
- 10000,
- 10000,
- 1000000,
- 10000,
+static long samples;
+static struct timeval last_read;
+static struct timeval this_read;
+
+static __u64 bytes_written;
+
+static struct pollfd event_array[MAX_NR_CPUS * MAX_COUNTERS];
+
+static int nr_poll;
+static int nr_cpu;
+
+struct mmap_event {
+ struct perf_event_header header;
+ __u32 pid;
+ __u32 tid;
+ __u64 start;
+ __u64 len;
+ __u64 pgoff;
+ char filename[PATH_MAX];
+};
+
+struct comm_event {
+ struct perf_event_header header;
+ __u32 pid;
+ __u32 tid;
+ char comm[16];
};
+
struct mmap_data {
- int counter;
- void *base;
- unsigned int mask;
- unsigned int prev;
+ int counter;
+ void *base;
+ unsigned int mask;
+ unsigned int prev;
};
+static struct mmap_data mmap_array[MAX_NR_CPUS][MAX_COUNTERS];
+
static unsigned int mmap_read_head(struct mmap_data *md)
{
struct perf_counter_mmap_page *pc = md->base;
return head;
}
-static long samples;
-static struct timeval last_read, this_read;
-
-static __u64 bytes_written;
-
static void mmap_read(struct mmap_data *md)
{
unsigned int head = mmap_read_head(md);
done = 1;
}
-static struct pollfd event_array[MAX_NR_CPUS * MAX_COUNTERS];
-static struct mmap_data mmap_array[MAX_NR_CPUS][MAX_COUNTERS];
-
-static int nr_poll;
-static int nr_cpu;
-
-struct mmap_event {
- struct perf_event_header header;
- __u32 pid;
- __u32 tid;
- __u64 start;
- __u64 len;
- __u64 pgoff;
- char filename[PATH_MAX];
-};
-
-struct comm_event {
- struct perf_event_header header;
- __u32 pid;
- __u32 tid;
- char comm[16];
-};
-
static void pid_synthesize_comm_event(pid_t pid, int full)
{
struct comm_event comm_ev;
static void create_counter(int counter, int cpu, pid_t pid)
{
- struct perf_counter_attr attr;
+ struct perf_counter_attr *attr = attrs + counter;
int track = 1;
- memset(&attr, 0, sizeof(attr));
- attr.config = event_id[counter];
- attr.sample_period = event_count[counter];
- attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_PERIOD;
+ attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_PERIOD;
if (freq) {
- attr.freq = 1;
- attr.sample_freq = freq;
+ attr->freq = 1;
+ attr->sample_freq = freq;
}
- attr.mmap = track;
- attr.comm = track;
- attr.inherit = (cpu < 0) && inherit;
+ attr->mmap = track;
+ attr->comm = track;
+ attr->inherit = (cpu < 0) && inherit;
track = 0; /* only the first counter needs these */
- fd[nr_cpu][counter] = sys_perf_counter_open(&attr, pid, cpu, group_fd, 0);
+ fd[nr_cpu][counter] = sys_perf_counter_open(attr, pid, cpu, group_fd, 0);
if (fd[nr_cpu][counter] < 0) {
int err = errno;
if (!argc && target_pid == -1 && !system_wide)
usage_with_options(record_usage, options);
- if (!nr_counters) {
+ if (!nr_counters)
nr_counters = 1;
- event_id[0] = 0;
- }
for (counter = 0; counter < nr_counters; counter++) {
- if (event_count[counter])
+ if (attrs[counter].sample_period)
continue;
- event_count[counter] = default_interval;
+ attrs[counter].sample_period = default_interval;
}
return __cmd_record(argc, argv);
#include <sys/prctl.h>
-static int system_wide = 0;
-static int inherit = 1;
+static struct perf_counter_attr default_attrs[MAX_COUNTERS] = {
-static __u64 default_event_id[MAX_COUNTERS] = {
- EID(PERF_TYPE_SOFTWARE, PERF_COUNT_TASK_CLOCK),
- EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CONTEXT_SWITCHES),
- EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_MIGRATIONS),
- EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS),
+ { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_TASK_CLOCK },
+ { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_CONTEXT_SWITCHES },
+ { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_CPU_MIGRATIONS },
+ { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_PAGE_FAULTS },
- EID(PERF_TYPE_HARDWARE, PERF_COUNT_CPU_CYCLES),
- EID(PERF_TYPE_HARDWARE, PERF_COUNT_INSTRUCTIONS),
- EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_REFERENCES),
- EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_MISSES),
+ { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_CPU_CYCLES },
+ { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_INSTRUCTIONS },
+ { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_CACHE_REFERENCES },
+ { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_CACHE_MISSES },
};
-static int default_interval = 100000;
-static int event_count[MAX_COUNTERS];
+static int system_wide = 0;
+static int inherit = 1;
+
static int fd[MAX_NR_CPUS][MAX_COUNTERS];
static int target_pid = -1;
static void create_perfstat_counter(int counter)
{
- struct perf_counter_attr attr;
-
- memset(&attr, 0, sizeof(attr));
- attr.config = event_id[counter];
- attr.sample_type = 0;
- attr.exclude_kernel = event_mask[counter] & EVENT_MASK_KERNEL;
- attr.exclude_user = event_mask[counter] & EVENT_MASK_USER;
+ struct perf_counter_attr *attr = attrs + counter;
if (scale)
- attr.read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
- PERF_FORMAT_TOTAL_TIME_RUNNING;
+ attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
+ PERF_FORMAT_TOTAL_TIME_RUNNING;
if (system_wide) {
int cpu;
for (cpu = 0; cpu < nr_cpus; cpu ++) {
- fd[cpu][counter] = sys_perf_counter_open(&attr, -1, cpu, -1, 0);
+ fd[cpu][counter] = sys_perf_counter_open(attr, -1, cpu, -1, 0);
if (fd[cpu][counter] < 0) {
printf("perfstat error: syscall returned with %d (%s)\n",
fd[cpu][counter], strerror(errno));
}
}
} else {
- attr.inherit = inherit;
- attr.disabled = 1;
+ attr->inherit = inherit;
+ attr->disabled = 1;
- fd[0][counter] = sys_perf_counter_open(&attr, 0, -1, -1, 0);
+ fd[0][counter] = sys_perf_counter_open(attr, 0, -1, -1, 0);
if (fd[0][counter] < 0) {
printf("perfstat error: syscall returned with %d (%s)\n",
fd[0][counter], strerror(errno));
*/
static inline int nsec_counter(int counter)
{
- if (event_id[counter] == EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_CLOCK))
+ if (attrs[counter].type != PERF_TYPE_SOFTWARE)
+ return 0;
+
+ if (attrs[counter].config == PERF_COUNT_CPU_CLOCK)
return 1;
- if (event_id[counter] == EID(PERF_TYPE_SOFTWARE, PERF_COUNT_TASK_CLOCK))
+
+ if (attrs[counter].config == PERF_COUNT_TASK_CLOCK)
return 1;
return 0;
/*
* Save the full runtime - to allow normalization during printout:
*/
- if (event_id[counter] == EID(PERF_TYPE_SOFTWARE, PERF_COUNT_TASK_CLOCK))
+ if (attrs[counter].type == PERF_TYPE_SOFTWARE &&
+ attrs[counter].config == PERF_COUNT_TASK_CLOCK)
runtime_nsecs = count[0];
}
fprintf(stderr, " %14.6f %-20s",
msecs, event_name(counter));
- if (event_id[counter] ==
- EID(PERF_TYPE_SOFTWARE, PERF_COUNT_TASK_CLOCK)) {
+ if (attrs[counter].type == PERF_TYPE_SOFTWARE &&
+ attrs[counter].config == PERF_COUNT_TASK_CLOCK) {
fprintf(stderr, " # %11.3f CPU utilization factor",
(double)count[0] / (double)walltime_nsecs);
static const struct option options[] = {
OPT_CALLBACK('e', "event", NULL, "event",
events_help_msg, parse_events),
- OPT_INTEGER('c', "count", &default_interval,
- "event period to sample"),
OPT_BOOLEAN('i', "inherit", &inherit,
"child tasks inherit counters"),
OPT_INTEGER('p', "pid", &target_pid,
int cmd_stat(int argc, const char **argv, const char *prefix)
{
- int counter;
-
page_size = sysconf(_SC_PAGE_SIZE);
create_events_help(events_help_msg);
- memcpy(event_id, default_event_id, sizeof(default_event_id));
+
+ memcpy(attrs, default_attrs, sizeof(attrs));
argc = parse_options(argc, argv, options, stat_usage, 0);
if (!argc)
usage_with_options(stat_usage, options);
- if (!nr_counters) {
+ if (!nr_counters)
nr_counters = 8;
- }
-
- for (counter = 0; counter < nr_counters; counter++) {
- if (event_count[counter])
- continue;
- event_count[counter] = default_interval;
- }
nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
assert(nr_cpus <= MAX_NR_CPUS);
assert(nr_cpus >= 0);
#include <linux/unistd.h>
#include <linux/types.h>
-static int system_wide = 0;
+static int fd[MAX_NR_CPUS][MAX_COUNTERS];
-static __u64 default_event_id[MAX_COUNTERS] = {
- EID(PERF_TYPE_SOFTWARE, PERF_COUNT_TASK_CLOCK),
- EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CONTEXT_SWITCHES),
- EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_MIGRATIONS),
- EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS),
+static int system_wide = 0;
- EID(PERF_TYPE_HARDWARE, PERF_COUNT_CPU_CYCLES),
- EID(PERF_TYPE_HARDWARE, PERF_COUNT_INSTRUCTIONS),
- EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_REFERENCES),
- EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_MISSES),
-};
-static int default_interval = 100000;
-static int event_count[MAX_COUNTERS];
-static int fd[MAX_NR_CPUS][MAX_COUNTERS];
+static int default_interval = 100000;
static __u64 count_filter = 5;
static int print_entries = 15;
static int zero;
static int dump_symtab;
-static const unsigned int default_count[] = {
- 1000000,
- 1000000,
- 10000,
- 10000,
- 1000000,
- 10000,
-};
-
/*
* Symbols
*/
struct sym_entry *sym_filter_entry;
-struct dso *kernel_dso;
+struct dso *kernel_dso;
/*
* Symbols will be added here in record_ip and will get out
100.0 - (100.0*((samples_per_sec-ksamples_per_sec)/samples_per_sec)));
if (nr_counters == 1) {
- printf("%d", event_count[0]);
+ printf("%Ld", attrs[0].sample_period);
if (freq)
printf("Hz ");
else
}
struct mmap_data {
- int counter;
- void *base;
- unsigned int mask;
- unsigned int prev;
+ int counter;
+ void *base;
+ unsigned int mask;
+ unsigned int prev;
};
static unsigned int mmap_read_head(struct mmap_data *md)
static int __cmd_top(void)
{
- struct perf_counter_attr attr;
+ struct perf_counter_attr *attr;
pthread_t thread;
int i, counter, group_fd, nr_poll = 0;
unsigned int cpu;
if (target_pid == -1 && profile_cpu == -1)
cpu = i;
- memset(&attr, 0, sizeof(attr));
- attr.config = event_id[counter];
- attr.sample_period = event_count[counter];
- attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID;
- attr.freq = freq;
+ attr = attrs + counter;
- fd[i][counter] = sys_perf_counter_open(&attr, target_pid, cpu, group_fd, 0);
+ attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID;
+ attr->freq = freq;
+
+ fd[i][counter] = sys_perf_counter_open(attr, target_pid, cpu, group_fd, 0);
if (fd[i][counter] < 0) {
int err = errno;
page_size = sysconf(_SC_PAGE_SIZE);
create_events_help(events_help_msg);
- memcpy(event_id, default_event_id, sizeof(default_event_id));
argc = parse_options(argc, argv, options, top_usage, 0);
if (argc)
profile_cpu = -1;
}
- if (!nr_counters) {
+ if (!nr_counters)
nr_counters = 1;
- event_id[0] = 0;
- }
if (delay_secs < 1)
delay_secs = 1;
+ parse_symbols();
+
+ /*
+ * Fill in the ones not specifically initialized via -c:
+ */
for (counter = 0; counter < nr_counters; counter++) {
- if (event_count[counter])
+ if (attrs[counter].sample_period)
continue;
- event_count[counter] = default_interval;
+ attrs[counter].sample_period = default_interval;
}
nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
if (target_pid != -1 || profile_cpu != -1)
nr_cpus = 1;
- parse_symbols();
-
return __cmd_top();
}
#define MAX_COUNTERS 256
#define MAX_NR_CPUS 256
-#define EID(type, id) (((__u64)(type) << PERF_COUNTER_TYPE_SHIFT) | (id))
-
#endif
#include "exec_cmd.h"
#include "string.h"
-int nr_counters;
+int nr_counters;
-__u64 event_id[MAX_COUNTERS] = { };
-int event_mask[MAX_COUNTERS];
+struct perf_counter_attr attrs[MAX_COUNTERS];
struct event_symbol {
- __u64 event;
- char *symbol;
+ __u8 type;
+ __u64 config;
+ char *symbol;
};
+#define C(x, y) .type = PERF_TYPE_##x, .config = PERF_COUNT_##y
+
static struct event_symbol event_symbols[] = {
- {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CPU_CYCLES), "cpu-cycles", },
- {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CPU_CYCLES), "cycles", },
- {EID(PERF_TYPE_HARDWARE, PERF_COUNT_INSTRUCTIONS), "instructions", },
- {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_REFERENCES), "cache-references", },
- {EID(PERF_TYPE_HARDWARE, PERF_COUNT_CACHE_MISSES), "cache-misses", },
- {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BRANCH_INSTRUCTIONS), "branch-instructions", },
- {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BRANCH_INSTRUCTIONS), "branches", },
- {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BRANCH_MISSES), "branch-misses", },
- {EID(PERF_TYPE_HARDWARE, PERF_COUNT_BUS_CYCLES), "bus-cycles", },
-
- {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_CLOCK), "cpu-clock", },
- {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_TASK_CLOCK), "task-clock", },
- {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS), "page-faults", },
- {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS), "faults", },
- {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS_MIN), "minor-faults", },
- {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_PAGE_FAULTS_MAJ), "major-faults", },
- {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CONTEXT_SWITCHES), "context-switches", },
- {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CONTEXT_SWITCHES), "cs", },
- {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_MIGRATIONS), "cpu-migrations", },
- {EID(PERF_TYPE_SOFTWARE, PERF_COUNT_CPU_MIGRATIONS), "migrations", },
+ { C(HARDWARE, CPU_CYCLES), "cpu-cycles", },
+ { C(HARDWARE, CPU_CYCLES), "cycles", },
+ { C(HARDWARE, INSTRUCTIONS), "instructions", },
+ { C(HARDWARE, CACHE_REFERENCES), "cache-references", },
+ { C(HARDWARE, CACHE_MISSES), "cache-misses", },
+ { C(HARDWARE, BRANCH_INSTRUCTIONS), "branch-instructions", },
+ { C(HARDWARE, BRANCH_INSTRUCTIONS), "branches", },
+ { C(HARDWARE, BRANCH_MISSES), "branch-misses", },
+ { C(HARDWARE, BUS_CYCLES), "bus-cycles", },
+
+ { C(SOFTWARE, CPU_CLOCK), "cpu-clock", },
+ { C(SOFTWARE, TASK_CLOCK), "task-clock", },
+ { C(SOFTWARE, PAGE_FAULTS), "page-faults", },
+ { C(SOFTWARE, PAGE_FAULTS), "faults", },
+ { C(SOFTWARE, PAGE_FAULTS_MIN), "minor-faults", },
+ { C(SOFTWARE, PAGE_FAULTS_MAJ), "major-faults", },
+ { C(SOFTWARE, CONTEXT_SWITCHES), "context-switches", },
+ { C(SOFTWARE, CONTEXT_SWITCHES), "cs", },
+ { C(SOFTWARE, CPU_MIGRATIONS), "cpu-migrations", },
+ { C(SOFTWARE, CPU_MIGRATIONS), "migrations", },
};
#define __PERF_COUNTER_FIELD(config, name) \
"major faults",
};
-char *event_name(int ctr)
+char *event_name(int counter)
{
- __u64 config = event_id[ctr];
- int type = PERF_COUNTER_TYPE(config);
- int id = PERF_COUNTER_ID(config);
+ __u64 config = attrs[counter].config;
+ int type = attrs[counter].type;
static char buf[32];
- if (PERF_COUNTER_RAW(config)) {
- sprintf(buf, "raw 0x%llx", PERF_COUNTER_CONFIG(config));
+ if (attrs[counter].type == PERF_TYPE_RAW) {
+ sprintf(buf, "raw 0x%llx", config);
return buf;
}
switch (type) {
case PERF_TYPE_HARDWARE:
- if (id < PERF_HW_EVENTS_MAX)
- return hw_event_names[id];
+ if (config < PERF_HW_EVENTS_MAX)
+ return hw_event_names[config];
return "unknown-hardware";
case PERF_TYPE_SOFTWARE:
- if (id < PERF_SW_EVENTS_MAX)
- return sw_event_names[id];
+ if (config < PERF_SW_EVENTS_MAX)
+ return sw_event_names[config];
return "unknown-software";
default:
* Each event can have multiple symbolic names.
* Symbolic names are (almost) exactly matched.
*/
-static __u64 match_event_symbols(const char *str)
+static int match_event_symbols(const char *str, struct perf_counter_attr *attr)
{
__u64 config, id;
int type;
unsigned int i;
const char *sep, *pstr;
- if (str[0] == 'r' && hex2u64(str + 1, &config) > 0)
- return config | PERF_COUNTER_RAW_MASK;
+ if (str[0] == 'r' && hex2u64(str + 1, &config) > 0) {
+ attr->type = PERF_TYPE_RAW;
+ attr->config = config;
+
+ return 0;
+ }
pstr = str;
sep = strchr(pstr, ':');
if (sep) {
pstr = sep + 1;
if (strchr(pstr, 'k'))
- event_mask[nr_counters] |= EVENT_MASK_USER;
+ attr->exclude_user = 1;
if (strchr(pstr, 'u'))
- event_mask[nr_counters] |= EVENT_MASK_KERNEL;
+ attr->exclude_kernel = 1;
}
- return EID(type, id);
+ attr->type = type;
+ attr->config = id;
+
+ return 0;
}
for (i = 0; i < ARRAY_SIZE(event_symbols); i++) {
if (!strncmp(str, event_symbols[i].symbol,
- strlen(event_symbols[i].symbol)))
- return event_symbols[i].event;
+ strlen(event_symbols[i].symbol))) {
+
+ attr->type = event_symbols[i].type;
+ attr->config = event_symbols[i].config;
+
+ return 0;
+ }
}
- return ~0ULL;
+ return -EINVAL;
}
int parse_events(const struct option *opt, const char *str, int unset)
{
- __u64 config;
+ struct perf_counter_attr attr;
+ int ret;
+ memset(&attr, 0, sizeof(attr));
again:
if (nr_counters == MAX_COUNTERS)
return -1;
- config = match_event_symbols(str);
- if (config == ~0ULL)
- return -1;
+ ret = match_event_symbols(str, &attr);
+ if (ret < 0)
+ return ret;
- event_id[nr_counters] = config;
+ attrs[nr_counters] = attr;
nr_counters++;
str = strstr(str, ",");
{
unsigned int i;
char *str;
- __u64 e;
str = events_help_msg;
for (i = 0; i < ARRAY_SIZE(event_symbols); i++) {
int type, id;
- e = event_symbols[i].event;
- type = PERF_COUNTER_TYPE(e);
- id = PERF_COUNTER_ID(e);
+ type = event_symbols[i].type;
+ id = event_symbols[i].config;
if (i)
str += sprintf(str, "|");
str += sprintf(str, "|rNNN]");
}
-
* Parse symbolic events/counts passed in as options:
*/
-extern int nr_counters;
-extern __u64 event_id[MAX_COUNTERS];
-extern int event_mask[MAX_COUNTERS];
+extern int nr_counters;
-#define EVENT_MASK_KERNEL 1
-#define EVENT_MASK_USER 2
+extern struct perf_counter_attr attrs[MAX_COUNTERS];
extern char *event_name(int ctr);
if (!ppmu)
return ERR_PTR(-ENXIO);
- if (!perf_event_raw(&counter->attr)) {
- ev = perf_event_id(&counter->attr);
+ if (counter->attr.type != PERF_TYPE_RAW) {
+ ev = counter->attr.config;
if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
return ERR_PTR(-EOPNOTSUPP);
ev = ppmu->generic_events[ev];
} else {
- ev = perf_event_config(&counter->attr);
+ ev = counter->attr.config;
}
counter->hw.config_base = ev;
counter->hw.idx = 0;
/*
* Raw event type provide the config in the event structure
*/
- if (perf_event_raw(attr)) {
- hwc->config |= x86_pmu.raw_event(perf_event_config(attr));
+ if (attr->type == PERF_TYPE_RAW) {
+ hwc->config |= x86_pmu.raw_event(attr->config);
} else {
- if (perf_event_id(attr) >= x86_pmu.max_events)
+ if (attr->config >= x86_pmu.max_events)
return -EINVAL;
/*
* The generic map:
*/
- hwc->config |= x86_pmu.event_map(perf_event_id(attr));
+ hwc->config |= x86_pmu.event_map(attr->config);
}
counter->destroy = hw_perf_counter_destroy;
PERF_SW_EVENTS_MAX = 7,
};
-#define __PERF_COUNTER_MASK(name) \
- (((1ULL << PERF_COUNTER_##name##_BITS) - 1) << \
- PERF_COUNTER_##name##_SHIFT)
-
-#define PERF_COUNTER_RAW_BITS 1
-#define PERF_COUNTER_RAW_SHIFT 63
-#define PERF_COUNTER_RAW_MASK __PERF_COUNTER_MASK(RAW)
-
-#define PERF_COUNTER_CONFIG_BITS 63
-#define PERF_COUNTER_CONFIG_SHIFT 0
-#define PERF_COUNTER_CONFIG_MASK __PERF_COUNTER_MASK(CONFIG)
-
-#define PERF_COUNTER_TYPE_BITS 7
-#define PERF_COUNTER_TYPE_SHIFT 56
-#define PERF_COUNTER_TYPE_MASK __PERF_COUNTER_MASK(TYPE)
-
-#define PERF_COUNTER_EVENT_BITS 56
-#define PERF_COUNTER_EVENT_SHIFT 0
-#define PERF_COUNTER_EVENT_MASK __PERF_COUNTER_MASK(EVENT)
-
/*
* Bits that can be set in attr.sample_type to request information
* in the overflow packets.
*/
struct perf_counter_attr {
/*
- * The MSB of the config word signifies if the rest contains cpu
- * specific (raw) counter configuration data, if unset, the next
- * 7 bits are an event type and the rest of the bits are the event
- * identifier.
+ * Major type: hardware/software/tracepoint/etc.
+ */
+ __u32 type;
+ __u32 __reserved_1;
+
+ /*
+ * Type specific configuration information.
*/
__u64 config;
comm : 1, /* include comm data */
freq : 1, /* use freq, not period */
- __reserved_1 : 53;
+ __reserved_2 : 53;
__u32 wakeup_events; /* wakeup every n events */
- __u32 __reserved_2;
+ __u32 __reserved_3;
- __u64 __reserved_3;
__u64 __reserved_4;
};
/*
* struct {
- * struct perf_event_header header;
- * u32 pid, ppid;
+ * struct perf_event_header header;
+ * u32 pid, ppid;
* };
*/
PERF_EVENT_FORK = 7,
struct task_struct;
-static inline u64 perf_event_raw(struct perf_counter_attr *attr)
-{
- return attr->config & PERF_COUNTER_RAW_MASK;
-}
-
-static inline u64 perf_event_config(struct perf_counter_attr *attr)
-{
- return attr->config & PERF_COUNTER_CONFIG_MASK;
-}
-
-static inline u64 perf_event_type(struct perf_counter_attr *attr)
-{
- return (attr->config & PERF_COUNTER_TYPE_MASK) >>
- PERF_COUNTER_TYPE_SHIFT;
-}
-
-static inline u64 perf_event_id(struct perf_counter_attr *attr)
-{
- return attr->config & PERF_COUNTER_EVENT_MASK;
-}
-
/**
* struct hw_perf_counter - performance counter hardware details:
*/
*/
static inline int is_software_counter(struct perf_counter *counter)
{
- return !perf_event_raw(&counter->attr) &&
- perf_event_type(&counter->attr) != PERF_TYPE_HARDWARE;
+ return (counter->attr.type != PERF_TYPE_RAW) &&
+ (counter->attr.type != PERF_TYPE_HARDWARE);
}
extern void perf_swcounter_event(u32, u64, int, struct pt_regs *, u64);
enum perf_event_types type,
u32 event, struct pt_regs *regs)
{
- u64 event_config;
-
- event_config = ((u64) type << PERF_COUNTER_TYPE_SHIFT) | event;
-
if (!perf_swcounter_is_counting(counter))
return 0;
- if (counter->attr.config != event_config)
+ if (counter->attr.type != type)
+ return 0;
+ if (counter->attr.config != event)
return 0;
if (regs) {
* to be kernel events, and page faults are never hypervisor
* events.
*/
- switch (perf_event_id(&counter->attr)) {
+ switch (counter->attr.config) {
case PERF_COUNT_CPU_CLOCK:
pmu = &perf_ops_cpu_clock;
if (attr->inherit && (attr->sample_type & PERF_SAMPLE_GROUP))
goto done;
- if (perf_event_raw(attr)) {
+ if (attr->type == PERF_TYPE_RAW) {
pmu = hw_perf_counter_init(counter);
goto done;
}
- switch (perf_event_type(attr)) {
+ switch (attr->type) {
case PERF_TYPE_HARDWARE:
pmu = hw_perf_counter_init(counter);
break;