extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);
-extern int sysctl_perf_counter_priv;
+extern int sysctl_perf_counter_paranoid;
extern int sysctl_perf_counter_mlock;
extern int sysctl_perf_counter_limit;
static atomic_t nr_mmap_counters __read_mostly;
static atomic_t nr_comm_counters __read_mostly;
-int sysctl_perf_counter_priv __read_mostly; /* do we need to be privileged */
+/*
+ * 0 - not paranoid
+ * 1 - disallow cpu counters to unpriv
+ * 2 - disallow kernel profiling to unpriv
+ */
+int sysctl_perf_counter_paranoid __read_mostly; /* do we need to be privileged */
+
+static inline bool perf_paranoid_cpu(void)
+{
+ return sysctl_perf_counter_paranoid > 0;
+}
+
+static inline bool perf_paranoid_kernel(void)
+{
+ return sysctl_perf_counter_paranoid > 1;
+}
+
int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */
int sysctl_perf_counter_limit __read_mostly = 100000; /* max NMIs per second */
*/
if (cpu != -1) {
/* Must be root to operate on a CPU counter: */
- if (sysctl_perf_counter_priv && !capable(CAP_SYS_ADMIN))
+ if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
return ERR_PTR(-EACCES);
if (cpu < 0 || cpu > num_possible_cpus())
if (copy_from_user(&attr, attr_uptr, sizeof(attr)) != 0)
return -EFAULT;
+ if (!attr.exclude_kernel) {
+ if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
+ return -EACCES;
+ }
+
/*
* Get the target context (task or percpu):
*/
#ifdef CONFIG_PERF_COUNTERS
{
.ctl_name = CTL_UNNUMBERED,
- .procname = "perf_counter_privileged",
- .data = &sysctl_perf_counter_priv,
- .maxlen = sizeof(sysctl_perf_counter_priv),
+ .procname = "perf_counter_paranoid",
+ .data = &sysctl_perf_counter_paranoid,
+ .maxlen = sizeof(sysctl_perf_counter_paranoid),
.mode = 0644,
.proc_handler = &proc_dointvec,
},