struct perf_mmap_data {
struct rcu_head rcu_head;
int nr_pages; /* nr of data pages */
+ int nr_locked; /* nr pages mlocked */
atomic_t poll; /* POLL_ for wakeups */
atomic_t head; /* write position */
extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);
extern int sysctl_perf_counter_priv;
+extern int sysctl_perf_counter_mlock;
extern void perf_counter_init(void);
static atomic_t nr_comm_tracking __read_mostly;
int sysctl_perf_counter_priv __read_mostly; /* do we need to be privileged */
+int sysctl_perf_counter_mlock __read_mostly = 128; /* 'free' kb per counter */
/*
* Lock for (sysadmin-configurable) counter reservations:
if (atomic_dec_and_mutex_lock(&counter->mmap_count,
&counter->mmap_mutex)) {
- vma->vm_mm->locked_vm -= counter->data->nr_pages + 1;
+ vma->vm_mm->locked_vm -= counter->data->nr_locked;
perf_mmap_data_free(counter);
mutex_unlock(&counter->mmap_mutex);
}
unsigned long nr_pages;
unsigned long locked, lock_limit;
int ret = 0;
+ long extra;
if (!(vma->vm_flags & VM_SHARED) || (vma->vm_flags & VM_WRITE))
return -EINVAL;
goto unlock;
}
- locked = vma->vm_mm->locked_vm;
- locked += nr_pages + 1;
+ extra = nr_pages /* + 1 only account the data pages */;
+ extra -= sysctl_perf_counter_mlock >> (PAGE_SHIFT - 10);
+ if (extra < 0)
+ extra = 0;
+
+ locked = vma->vm_mm->locked_vm + extra;
lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
lock_limit >>= PAGE_SHIFT;
goto unlock;
atomic_set(&counter->mmap_count, 1);
- vma->vm_mm->locked_vm += nr_pages + 1;
+ vma->vm_mm->locked_vm += extra;
+ counter->data->nr_locked = extra;
unlock:
mutex_unlock(&counter->mmap_mutex);
.mode = 0644,
.proc_handler = &proc_dointvec,
},
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "perf_counter_mlock_kb",
+ .data = &sysctl_perf_counter_mlock,
+ .maxlen = sizeof(sysctl_perf_counter_mlock),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ },
#endif
/*
* NOTE: do not add new entries to this table unless you have read