/*
* Ioctls that can be done on a perf counter fd:
*/
-#define PERF_COUNTER_IOC_ENABLE _IOW('$', 0, u32)
-#define PERF_COUNTER_IOC_DISABLE _IOW('$', 1, u32)
-#define PERF_COUNTER_IOC_REFRESH _IOW('$', 2, u32)
-#define PERF_COUNTER_IOC_RESET _IOW('$', 3, u32)
+#define PERF_COUNTER_IOC_ENABLE _IO ('$', 0)
+#define PERF_COUNTER_IOC_DISABLE _IO ('$', 1)
+#define PERF_COUNTER_IOC_REFRESH _IO ('$', 2)
+#define PERF_COUNTER_IOC_RESET _IO ('$', 3)
+#define PERF_COUNTER_IOC_PERIOD _IOW('$', 4, u64)
enum perf_counter_ioc_flags {
PERF_IOC_FLAG_GROUP = 1U << 0,
mutex_unlock(&counter->child_mutex);
}
+static int perf_counter_period(struct perf_counter *counter, u64 __user *arg)
+{
+ struct perf_counter_context *ctx = counter->ctx;
+ unsigned long size;
+ int ret = 0;
+ u64 value;
+
+ if (!counter->hw_event.sample_period)
+ return -EINVAL;
+
+ size = copy_from_user(&value, arg, sizeof(value));
+ if (size != sizeof(value))
+ return -EFAULT;
+
+ if (!value)
+ return -EINVAL;
+
+ spin_lock_irq(&ctx->lock);
+ if (counter->hw_event.freq) {
+ if (value > sysctl_perf_counter_limit) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ counter->hw_event.sample_freq = value;
+ } else {
+ counter->hw_event.sample_period = value;
+ counter->hw.sample_period = value;
+
+ perf_log_period(counter, value);
+ }
+unlock:
+ spin_unlock_irq(&ctx->lock);
+
+ return ret;
+}
+
static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct perf_counter *counter = file->private_data;
case PERF_COUNTER_IOC_REFRESH:
return perf_counter_refresh(counter, arg);
+
+ case PERF_COUNTER_IOC_PERIOD:
+ return perf_counter_period(counter, (u64 __user *)arg);
+
default:
return -ENOTTY;
}