No change, move of duplicated stuff only.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
LIB_FILE=libperf.a
LIB_H += ../../include/linux/perf_counter.h
+LIB_H += perf.h
LIB_H += util/levenshtein.h
LIB_H += util/parse-options.h
LIB_H += util/quote.h
-#define _GNU_SOURCE
+#include "util/util.h"
+
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/time.h>
#include "../../include/linux/perf_counter.h"
-
-/*
- * prctl(PR_TASK_PERF_COUNTERS_DISABLE) will (cheaply) disable all
- * counters in the current task.
- */
-#define PR_TASK_PERF_COUNTERS_DISABLE 31
-#define PR_TASK_PERF_COUNTERS_ENABLE 32
-
-#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
-
-#define rdclock() \
-({ \
- struct timespec ts; \
- \
- clock_gettime(CLOCK_MONOTONIC, &ts); \
- ts.tv_sec * 1000000000ULL + ts.tv_nsec; \
-})
-
-/*
- * Pick up some kernel type conventions:
- */
-#define __user
-#define asmlinkage
-
-#ifdef __x86_64__
-#define __NR_perf_counter_open 298
-#define rmb() asm volatile("lfence" ::: "memory")
-#define cpu_relax() asm volatile("rep; nop" ::: "memory");
-#endif
-
-#ifdef __i386__
-#define __NR_perf_counter_open 336
-#define rmb() asm volatile("lfence" ::: "memory")
-#define cpu_relax() asm volatile("rep; nop" ::: "memory");
-#endif
-
-#ifdef __powerpc__
-#define __NR_perf_counter_open 319
-#define rmb() asm volatile ("sync" ::: "memory")
-#define cpu_relax() asm volatile ("" ::: "memory");
-#endif
-
-#define unlikely(x) __builtin_expect(!!(x), 0)
-#define min(x, y) ({ \
- typeof(x) _min1 = (x); \
- typeof(y) _min2 = (y); \
- (void) (&_min1 == &_min2); \
- _min1 < _min2 ? _min1 : _min2; })
-
-extern asmlinkage int sys_perf_counter_open(
- struct perf_counter_hw_event *hw_event_uptr __user,
- pid_t pid,
- int cpu,
- int group_fd,
- unsigned long flags);
-
-#define MAX_COUNTERS 64
-#define MAX_NR_CPUS 256
-
-#define EID(type, id) (((__u64)(type) << PERF_COUNTER_TYPE_SHIFT) | (id))
+#include "perf.h"
static int nr_counters = 0;
static __u64 event_id[MAX_COUNTERS] = { };
#include "../../include/linux/perf_counter.h"
-
-/*
- * prctl(PR_TASK_PERF_COUNTERS_DISABLE) will (cheaply) disable all
- * counters in the current task.
- */
-#define PR_TASK_PERF_COUNTERS_DISABLE 31
-#define PR_TASK_PERF_COUNTERS_ENABLE 32
-
-#define rdclock() \
-({ \
- struct timespec ts; \
- \
- clock_gettime(CLOCK_MONOTONIC, &ts); \
- ts.tv_sec * 1000000000ULL + ts.tv_nsec; \
-})
-
-/*
- * Pick up some kernel type conventions:
- */
-#define __user
-#define asmlinkage
-
-#ifdef __x86_64__
-#define __NR_perf_counter_open 298
-#define rmb() asm volatile("lfence" ::: "memory")
-#define cpu_relax() asm volatile("rep; nop" ::: "memory");
-#endif
-
-#ifdef __i386__
-#define __NR_perf_counter_open 336
-#define rmb() asm volatile("lfence" ::: "memory")
-#define cpu_relax() asm volatile("rep; nop" ::: "memory");
-#endif
-
-#ifdef __powerpc__
-#define __NR_perf_counter_open 319
-#define rmb() asm volatile ("sync" ::: "memory")
-#define cpu_relax() asm volatile ("" ::: "memory");
-#endif
-
-#define unlikely(x) __builtin_expect(!!(x), 0)
-#define min(x, y) ({ \
- typeof(x) _min1 = (x); \
- typeof(y) _min2 = (y); \
- (void) (&_min1 == &_min2); \
- _min1 < _min2 ? _min1 : _min2; })
-
-extern asmlinkage int sys_perf_counter_open(
- struct perf_counter_hw_event *hw_event_uptr __user,
- pid_t pid,
- int cpu,
- int group_fd,
- unsigned long flags);
-
-#define MAX_COUNTERS 64
-#define MAX_NR_CPUS 256
-
-#define EID(type, id) (((__u64)(type) << PERF_COUNTER_TYPE_SHIFT) | (id))
+#include "perf.h"
static int system_wide = 0;
#include "../../include/linux/perf_counter.h"
-
-/*
- * prctl(PR_TASK_PERF_COUNTERS_DISABLE) will (cheaply) disable all
- * counters in the current task.
- */
-#define PR_TASK_PERF_COUNTERS_DISABLE 31
-#define PR_TASK_PERF_COUNTERS_ENABLE 32
-
-#define rdclock() \
-({ \
- struct timespec ts; \
- \
- clock_gettime(CLOCK_MONOTONIC, &ts); \
- ts.tv_sec * 1000000000ULL + ts.tv_nsec; \
-})
-
-/*
- * Pick up some kernel type conventions:
- */
-#define __user
-#define asmlinkage
-
-#ifdef __x86_64__
-#define __NR_perf_counter_open 298
-#define rmb() asm volatile("lfence" ::: "memory")
-#define cpu_relax() asm volatile("rep; nop" ::: "memory");
-#endif
-
-#ifdef __i386__
-#define __NR_perf_counter_open 336
-#define rmb() asm volatile("lfence" ::: "memory")
-#define cpu_relax() asm volatile("rep; nop" ::: "memory");
-#endif
-
-#ifdef __powerpc__
-#define __NR_perf_counter_open 319
-#define rmb() asm volatile ("sync" ::: "memory")
-#define cpu_relax() asm volatile ("" ::: "memory");
-#endif
-
-#define unlikely(x) __builtin_expect(!!(x), 0)
-#define min(x, y) ({ \
- typeof(x) _min1 = (x); \
- typeof(y) _min2 = (y); \
- (void) (&_min1 == &_min2); \
- _min1 < _min2 ? _min1 : _min2; })
-
-asmlinkage int sys_perf_counter_open(
- struct perf_counter_hw_event *hw_event_uptr __user,
- pid_t pid,
- int cpu,
- int group_fd,
- unsigned long flags)
-{
- return syscall(
- __NR_perf_counter_open, hw_event_uptr, pid, cpu, group_fd, flags);
-}
-
-#define MAX_COUNTERS 64
-#define MAX_NR_CPUS 256
-
-#define EID(type, id) (((__u64)(type) << PERF_COUNTER_TYPE_SHIFT) | (id))
+#include "perf.h"
static int system_wide = 0;
--- /dev/null
+#ifndef _PERF_PERF_H
+#define _PERF_PERF_H
+
+/*
+ * prctl(PR_TASK_PERF_COUNTERS_DISABLE) will (cheaply) disable all
+ * counters in the current task.
+ */
+#define PR_TASK_PERF_COUNTERS_DISABLE 31
+#define PR_TASK_PERF_COUNTERS_ENABLE 32
+
+#define rdclock() \
+({ \
+ struct timespec ts; \
+ \
+ clock_gettime(CLOCK_MONOTONIC, &ts); \
+ ts.tv_sec * 1000000000ULL + ts.tv_nsec; \
+})
+
+/*
+ * Pick up some kernel type conventions:
+ */
+#define __user
+#define asmlinkage
+
+#ifdef __x86_64__
+#define __NR_perf_counter_open 298
+#define rmb() asm volatile("lfence" ::: "memory")
+#define cpu_relax() asm volatile("rep; nop" ::: "memory");
+#endif
+
+#ifdef __i386__
+#define __NR_perf_counter_open 336
+#define rmb() asm volatile("lfence" ::: "memory")
+#define cpu_relax() asm volatile("rep; nop" ::: "memory");
+#endif
+
+#ifdef __powerpc__
+#define __NR_perf_counter_open 319
+#define rmb() asm volatile ("sync" ::: "memory")
+#define cpu_relax() asm volatile ("" ::: "memory");
+#endif
+
+#define unlikely(x) __builtin_expect(!!(x), 0)
+#define min(x, y) ({ \
+ typeof(x) _min1 = (x); \
+ typeof(y) _min2 = (y); \
+ (void) (&_min1 == &_min2); \
+ _min1 < _min2 ? _min1 : _min2; })
+
+static inline int
+sys_perf_counter_open(struct perf_counter_hw_event *hw_event_uptr,
+ pid_t pid, int cpu, int group_fd,
+ unsigned long flags)
+{
+ return syscall(__NR_perf_counter_open, hw_event_uptr, pid, cpu,
+ group_fd, flags);
+}
+
+#define MAX_COUNTERS 64
+#define MAX_NR_CPUS 256
+
+#define EID(type, id) (((__u64)(type) << PERF_COUNTER_TYPE_SHIFT) | (id))
+
+#endif