/*
* target related setups
*/
- err = perf_target__validate(&kvm->opts.target);
+ err = target__validate(&kvm->opts.target);
if (err) {
- perf_target__strerror(&kvm->opts.target, err, errbuf, BUFSIZ);
+ target__strerror(&kvm->opts.target, err, errbuf, BUFSIZ);
ui__warning("%s", errbuf);
}
- if (perf_target__none(&kvm->opts.target))
+ if (target__none(&kvm->opts.target))
kvm->opts.target.system_wide = true;
* (apart from group members) have enable_on_exec=1 set,
* so don't spoil it by prematurely enabling them.
*/
- if (!perf_target__none(&opts->target))
+ if (!target__none(&opts->target))
perf_evlist__enable(evsel_list);
/*
* die with the process and we wait for that. Thus no need to
* disable events in this case.
*/
- if (done && !disabled && !perf_target__none(&opts->target)) {
+ if (done && !disabled && !target__none(&opts->target)) {
perf_evlist__disable(evsel_list);
disabled = true;
}
argc = parse_options(argc, argv, record_options, record_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
- if (!argc && perf_target__none(&rec->opts.target))
+ if (!argc && target__none(&rec->opts.target))
usage_with_options(record_usage, record_options);
if (nr_cgroups && !rec->opts.target.system_wide) {
goto out_symbol_exit;
}
- err = perf_target__validate(&rec->opts.target);
+ err = target__validate(&rec->opts.target);
if (err) {
- perf_target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
+ target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
ui__warning("%s", errbuf);
}
- err = perf_target__parse_uid(&rec->opts.target);
+ err = target__parse_uid(&rec->opts.target);
if (err) {
int saved_errno = errno;
- perf_target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
+ target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
ui__error("%s", errbuf);
err = -saved_errno;
static struct perf_evlist *evsel_list;
-static struct perf_target target = {
+static struct target target = {
.uid = UINT_MAX,
};
attr->inherit = !no_inherit;
- if (perf_target__has_cpu(&target))
+ if (target__has_cpu(&target))
return perf_evsel__open_per_cpu(evsel, perf_evsel__cpus(evsel));
- if (!perf_target__has_task(&target) &&
- perf_evsel__is_group_leader(evsel)) {
+ if (!target__has_task(&target) && perf_evsel__is_group_leader(evsel)) {
attr->disabled = 1;
if (!initial_delay)
attr->enable_on_exec = 1;
fprintf(output, "\'system wide");
else if (target.cpu_list)
fprintf(output, "\'CPU(s) %s", target.cpu_list);
- else if (!perf_target__has_task(&target)) {
+ else if (!target__has_task(&target)) {
fprintf(output, "\'%s", argv[0]);
for (i = 1; i < argc; i++)
fprintf(output, " %s", argv[i]);
} else if (big_num_opt == 0) /* User passed --no-big-num */
big_num = false;
- if (!argc && perf_target__none(&target))
+ if (!argc && target__none(&target))
usage_with_options(stat_usage, options);
if (run_count < 0) {
}
/* no_aggr, cgroup are for system-wide only */
- if ((aggr_mode != AGGR_GLOBAL || nr_cgroups)
- && !perf_target__has_cpu(&target)) {
+ if ((aggr_mode != AGGR_GLOBAL || nr_cgroups) &&
+ !target__has_cpu(&target)) {
fprintf(stderr, "both cgroup and no-aggregation "
"modes only available in system-wide mode\n");
if (add_default_attributes())
goto out;
- perf_target__validate(&target);
+ target__validate(&target);
if (perf_evlist__create_maps(evsel_list, &target) < 0) {
- if (perf_target__has_task(&target)) {
+ if (target__has_task(&target)) {
pr_err("Problems finding threads of monitor\n");
parse_options_usage(stat_usage, options, "p", 1);
parse_options_usage(NULL, options, "t", 1);
- } else if (perf_target__has_cpu(&target)) {
+ } else if (target__has_cpu(&target)) {
perror("failed to parse CPUs map");
parse_options_usage(stat_usage, options, "C", 1);
parse_options_usage(NULL, options, "a", 1);
* XXX 'top' still doesn't start workloads like record, trace, but should,
* so leave the check here.
*/
- if (!perf_target__none(&opts->target))
+ if (!target__none(&opts->target))
perf_evlist__enable(top->evlist);
/* Wait for a minimal set of events before starting the snapshot */
.sym_pcnt_filter = 5,
};
struct perf_record_opts *opts = &top.record_opts;
- struct perf_target *target = &opts->target;
+ struct target *target = &opts->target;
const struct option options[] = {
OPT_CALLBACK('e', "event", &top.evlist, "event",
"event selector. use 'perf list' to list available events",
setup_browser(false);
- status = perf_target__validate(target);
+ status = target__validate(target);
if (status) {
- perf_target__strerror(target, status, errbuf, BUFSIZ);
+ target__strerror(target, status, errbuf, BUFSIZ);
ui__warning("%s", errbuf);
}
- status = perf_target__parse_uid(target);
+ status = target__parse_uid(target);
if (status) {
int saved_errno = errno;
- perf_target__strerror(target, status, errbuf, BUFSIZ);
+ target__strerror(target, status, errbuf, BUFSIZ);
ui__error("%s", errbuf);
status = -saved_errno;
goto out_delete_evlist;
}
- if (perf_target__none(target))
+ if (target__none(target))
target->system_wide = true;
if (perf_evlist__create_maps(top.evlist, target) < 0)
}
}
- err = perf_target__validate(&trace.opts.target);
+ err = target__validate(&trace.opts.target);
if (err) {
- perf_target__strerror(&trace.opts.target, err, bf, sizeof(bf));
+ target__strerror(&trace.opts.target, err, bf, sizeof(bf));
fprintf(trace.output, "%s", bf);
goto out_close;
}
- err = perf_target__parse_uid(&trace.opts.target);
+ err = target__parse_uid(&trace.opts.target);
if (err) {
- perf_target__strerror(&trace.opts.target, err, bf, sizeof(bf));
+ target__strerror(&trace.opts.target, err, bf, sizeof(bf));
fprintf(trace.output, "%s", bf);
goto out_close;
}
- if (!argc && perf_target__none(&trace.opts.target))
+ if (!argc && target__none(&trace.opts.target))
trace.opts.target.system_wide = true;
if (input_name)
};
struct perf_record_opts {
- struct perf_target target;
+ struct target target;
int call_graph;
bool group;
bool inherit_stat;
union perf_event *event;
struct perf_evsel *evsel;
struct perf_evlist *evlist;
- struct perf_target target = {
+ struct target target = {
.uid = UINT_MAX,
.uses_mmap = true,
};
return perf_evlist__mmap_per_cpu(evlist, prot, mask);
}
-int perf_evlist__create_maps(struct perf_evlist *evlist,
- struct perf_target *target)
+int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
{
evlist->threads = thread_map__new_str(target->pid, target->tid,
target->uid);
if (evlist->threads == NULL)
return -1;
- if (perf_target__has_task(target))
+ if (target__has_task(target))
evlist->cpus = cpu_map__dummy_new();
- else if (!perf_target__has_cpu(target) && !target->uses_mmap)
+ else if (!target__has_cpu(target) && !target->uses_mmap)
evlist->cpus = cpu_map__dummy_new();
else
evlist->cpus = cpu_map__new(target->cpu_list);
return err;
}
-int perf_evlist__prepare_workload(struct perf_evlist *evlist,
- struct perf_target *target,
+int perf_evlist__prepare_workload(struct perf_evlist *evlist, struct target *target,
const char *argv[], bool pipe_output,
bool want_signal)
{
exit(-1);
}
- if (perf_target__none(target))
+ if (target__none(target))
evlist->threads->map[0] = evlist->workload.pid;
close(child_ready_pipe[1]);
int perf_record_opts__config(struct perf_record_opts *opts);
int perf_evlist__prepare_workload(struct perf_evlist *evlist,
- struct perf_target *target,
+ struct target *target,
const char *argv[], bool pipe_output,
bool want_signal);
int perf_evlist__start_workload(struct perf_evlist *evlist);
evlist->threads = threads;
}
-int perf_evlist__create_maps(struct perf_evlist *evlist,
- struct perf_target *target);
+int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target);
void perf_evlist__delete_maps(struct perf_evlist *evlist);
int perf_evlist__apply_filters(struct perf_evlist *evlist);
}
}
- if (perf_target__has_cpu(&opts->target))
+ if (target__has_cpu(&opts->target))
perf_evsel__set_sample_bit(evsel, CPU);
if (opts->period)
if (!perf_missing_features.sample_id_all &&
(opts->sample_time || !opts->no_inherit ||
- perf_target__has_cpu(&opts->target)))
+ target__has_cpu(&opts->target)))
perf_evsel__set_sample_bit(evsel, TIME);
if (opts->raw_samples) {
* Setting enable_on_exec for independent events and
* group leaders for traced executed by perf.
*/
- if (perf_target__none(&opts->target) && perf_evsel__is_group_leader(evsel))
+ if (target__none(&opts->target) && perf_evsel__is_group_leader(evsel))
attr->enable_on_exec = 1;
}
return false;
}
-int perf_evsel__open_strerror(struct perf_evsel *evsel,
- struct perf_target *target,
+int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
int err, char *msg, size_t size)
{
switch (err) {
bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
char *msg, size_t msgsize);
-int perf_evsel__open_strerror(struct perf_evsel *evsel,
- struct perf_target *target,
+int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
int err, char *msg, size_t size);
static inline int perf_evsel__group_idx(struct perf_evsel *evsel)
}
int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
- struct perf_target *target, struct thread_map *threads,
+ struct target *target, struct thread_map *threads,
perf_event__handler_t process, bool data_mmap)
{
- if (perf_target__has_task(target))
+ if (target__has_task(target))
return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap);
- else if (perf_target__has_cpu(target))
+ else if (target__has_cpu(target))
return perf_event__synthesize_threads(tool, process, machine, data_mmap);
/* command specified */
return 0;
void *priv);
int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
- struct perf_target *target, struct thread_map *threads,
+ struct target *target, struct thread_map *threads,
perf_event__handler_t process, bool data_mmap);
static inline
-int machine__synthesize_threads(struct machine *machine, struct perf_target *target,
+int machine__synthesize_threads(struct machine *machine, struct target *target,
struct thread_map *threads, bool data_mmap)
{
return __machine__synthesize_threads(machine, NULL, target, threads,
#include <string.h>
-enum perf_target_errno perf_target__validate(struct perf_target *target)
+enum target_errno target__validate(struct target *target)
{
- enum perf_target_errno ret = PERF_ERRNO_TARGET__SUCCESS;
+ enum target_errno ret = TARGET_ERRNO__SUCCESS;
if (target->pid)
target->tid = target->pid;
/* CPU and PID are mutually exclusive */
if (target->tid && target->cpu_list) {
target->cpu_list = NULL;
- if (ret == PERF_ERRNO_TARGET__SUCCESS)
- ret = PERF_ERRNO_TARGET__PID_OVERRIDE_CPU;
+ if (ret == TARGET_ERRNO__SUCCESS)
+ ret = TARGET_ERRNO__PID_OVERRIDE_CPU;
}
/* UID and PID are mutually exclusive */
if (target->tid && target->uid_str) {
target->uid_str = NULL;
- if (ret == PERF_ERRNO_TARGET__SUCCESS)
- ret = PERF_ERRNO_TARGET__PID_OVERRIDE_UID;
+ if (ret == TARGET_ERRNO__SUCCESS)
+ ret = TARGET_ERRNO__PID_OVERRIDE_UID;
}
/* UID and CPU are mutually exclusive */
if (target->uid_str && target->cpu_list) {
target->cpu_list = NULL;
- if (ret == PERF_ERRNO_TARGET__SUCCESS)
- ret = PERF_ERRNO_TARGET__UID_OVERRIDE_CPU;
+ if (ret == TARGET_ERRNO__SUCCESS)
+ ret = TARGET_ERRNO__UID_OVERRIDE_CPU;
}
/* PID and SYSTEM are mutually exclusive */
if (target->tid && target->system_wide) {
target->system_wide = false;
- if (ret == PERF_ERRNO_TARGET__SUCCESS)
- ret = PERF_ERRNO_TARGET__PID_OVERRIDE_SYSTEM;
+ if (ret == TARGET_ERRNO__SUCCESS)
+ ret = TARGET_ERRNO__PID_OVERRIDE_SYSTEM;
}
/* UID and SYSTEM are mutually exclusive */
if (target->uid_str && target->system_wide) {
target->system_wide = false;
- if (ret == PERF_ERRNO_TARGET__SUCCESS)
- ret = PERF_ERRNO_TARGET__UID_OVERRIDE_SYSTEM;
+ if (ret == TARGET_ERRNO__SUCCESS)
+ ret = TARGET_ERRNO__UID_OVERRIDE_SYSTEM;
}
return ret;
}
-enum perf_target_errno perf_target__parse_uid(struct perf_target *target)
+enum target_errno target__parse_uid(struct target *target)
{
struct passwd pwd, *result;
char buf[1024];
target->uid = UINT_MAX;
if (str == NULL)
- return PERF_ERRNO_TARGET__SUCCESS;
+ return TARGET_ERRNO__SUCCESS;
/* Try user name first */
getpwnam_r(str, &pwd, buf, sizeof(buf), &result);
int uid = strtol(str, &endptr, 10);
if (*endptr != '\0')
- return PERF_ERRNO_TARGET__INVALID_UID;
+ return TARGET_ERRNO__INVALID_UID;
getpwuid_r(uid, &pwd, buf, sizeof(buf), &result);
if (result == NULL)
- return PERF_ERRNO_TARGET__USER_NOT_FOUND;
+ return TARGET_ERRNO__USER_NOT_FOUND;
}
target->uid = result->pw_uid;
- return PERF_ERRNO_TARGET__SUCCESS;
+ return TARGET_ERRNO__SUCCESS;
}
/*
- * This must have a same ordering as the enum perf_target_errno.
+ * This must have a same ordering as the enum target_errno.
*/
-static const char *perf_target__error_str[] = {
+static const char *target__error_str[] = {
"PID/TID switch overriding CPU",
"PID/TID switch overriding UID",
"UID switch overriding CPU",
"Problems obtaining information for user %s",
};
-int perf_target__strerror(struct perf_target *target, int errnum,
+int target__strerror(struct target *target, int errnum,
char *buf, size_t buflen)
{
int idx;
return 0;
}
- if (errnum < __PERF_ERRNO_TARGET__START ||
- errnum >= __PERF_ERRNO_TARGET__END)
+ if (errnum < __TARGET_ERRNO__START || errnum >= __TARGET_ERRNO__END)
return -1;
- idx = errnum - __PERF_ERRNO_TARGET__START;
- msg = perf_target__error_str[idx];
+ idx = errnum - __TARGET_ERRNO__START;
+ msg = target__error_str[idx];
switch (errnum) {
- case PERF_ERRNO_TARGET__PID_OVERRIDE_CPU
- ... PERF_ERRNO_TARGET__UID_OVERRIDE_SYSTEM:
+ case TARGET_ERRNO__PID_OVERRIDE_CPU ... TARGET_ERRNO__UID_OVERRIDE_SYSTEM:
snprintf(buf, buflen, "%s", msg);
break;
- case PERF_ERRNO_TARGET__INVALID_UID:
- case PERF_ERRNO_TARGET__USER_NOT_FOUND:
+ case TARGET_ERRNO__INVALID_UID:
+ case TARGET_ERRNO__USER_NOT_FOUND:
snprintf(buf, buflen, msg, target->uid_str);
break;
#include <stdbool.h>
#include <sys/types.h>
-struct perf_target {
+struct target {
const char *pid;
const char *tid;
const char *cpu_list;
bool uses_mmap;
};
-enum perf_target_errno {
- PERF_ERRNO_TARGET__SUCCESS = 0,
+enum target_errno {
+ TARGET_ERRNO__SUCCESS = 0,
/*
* Choose an arbitrary negative big number not to clash with standard
*
* http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/errno.h.html
*/
- __PERF_ERRNO_TARGET__START = -10000,
+ __TARGET_ERRNO__START = -10000,
+ /* for target__validate() */
+ TARGET_ERRNO__PID_OVERRIDE_CPU = __TARGET_ERRNO__START,
+ TARGET_ERRNO__PID_OVERRIDE_UID,
+ TARGET_ERRNO__UID_OVERRIDE_CPU,
+ TARGET_ERRNO__PID_OVERRIDE_SYSTEM,
+ TARGET_ERRNO__UID_OVERRIDE_SYSTEM,
- /* for perf_target__validate() */
- PERF_ERRNO_TARGET__PID_OVERRIDE_CPU = __PERF_ERRNO_TARGET__START,
- PERF_ERRNO_TARGET__PID_OVERRIDE_UID,
- PERF_ERRNO_TARGET__UID_OVERRIDE_CPU,
- PERF_ERRNO_TARGET__PID_OVERRIDE_SYSTEM,
- PERF_ERRNO_TARGET__UID_OVERRIDE_SYSTEM,
+ /* for target__parse_uid() */
+ TARGET_ERRNO__INVALID_UID,
+ TARGET_ERRNO__USER_NOT_FOUND,
- /* for perf_target__parse_uid() */
- PERF_ERRNO_TARGET__INVALID_UID,
- PERF_ERRNO_TARGET__USER_NOT_FOUND,
-
- __PERF_ERRNO_TARGET__END,
+ __TARGET_ERRNO__END,
};
-enum perf_target_errno perf_target__validate(struct perf_target *target);
-enum perf_target_errno perf_target__parse_uid(struct perf_target *target);
+enum target_errno target__validate(struct target *target);
+enum target_errno target__parse_uid(struct target *target);
-int perf_target__strerror(struct perf_target *target, int errnum, char *buf,
- size_t buflen);
+int target__strerror(struct target *target, int errnum, char *buf, size_t buflen);
-static inline bool perf_target__has_task(struct perf_target *target)
+static inline bool target__has_task(struct target *target)
{
return target->tid || target->pid || target->uid_str;
}
-static inline bool perf_target__has_cpu(struct perf_target *target)
+static inline bool target__has_cpu(struct target *target)
{
return target->system_wide || target->cpu_list;
}
-static inline bool perf_target__none(struct perf_target *target)
+static inline bool target__none(struct target *target)
{
- return !perf_target__has_task(target) && !perf_target__has_cpu(target);
+ return !target__has_task(target) && !target__has_cpu(target);
}
#endif /* _PERF_TARGET_H */
float ksamples_per_sec;
float esamples_percent;
struct perf_record_opts *opts = &top->record_opts;
- struct perf_target *target = &opts->target;
+ struct target *target = &opts->target;
size_t ret = 0;
if (top->samples) {