perf_counter: per user mlock gift
authorPeter Zijlstra <a.p.zijlstra@chello.nl>
Fri, 15 May 2009 13:19:27 +0000 (15:19 +0200)
committerIngo Molnar <mingo@elte.hu>
Fri, 15 May 2009 13:26:56 +0000 (15:26 +0200)
Instead of a per-process mlock gift for perf-counters, use a
per-user gift so that there is less of a DoS potential.

[ Impact: allow less worst-case unprivileged memory consumption ]

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <20090515132018.496182835@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
include/linux/sched.h
kernel/perf_counter.c

index d1857580a1328e202e092715492c0b2cc9de30c6..ff59d123151926b354114d94e883d28561a5c4a7 100644 (file)
@@ -674,6 +674,10 @@ struct user_struct {
        struct work_struct work;
 #endif
 #endif
+
+#ifdef CONFIG_PERF_COUNTERS
+       atomic_long_t locked_vm;
+#endif
 };
 
 extern int uids_sysfs_init(void);
index 0173738dd548eaa07e729c0658759f00c15b8694..93f4a0e4b8739a0047bb3d0bee4d7804158cd8b5 100644 (file)
@@ -45,7 +45,7 @@ static atomic_t nr_munmap_tracking __read_mostly;
 static atomic_t nr_comm_tracking __read_mostly;
 
 int sysctl_perf_counter_priv __read_mostly; /* do we need to be privileged */
-int sysctl_perf_counter_mlock __read_mostly = 128; /* 'free' kb per counter */
+int sysctl_perf_counter_mlock __read_mostly = 512; /* 'free' kb per user */
 
 /*
  * Lock for (sysadmin-configurable) counter reservations:
@@ -1522,6 +1522,9 @@ static void perf_mmap_close(struct vm_area_struct *vma)
 
        if (atomic_dec_and_mutex_lock(&counter->mmap_count,
                                      &counter->mmap_mutex)) {
+               struct user_struct *user = current_user();
+
+               atomic_long_sub(counter->data->nr_pages + 1, &user->locked_vm);
                vma->vm_mm->locked_vm -= counter->data->nr_locked;
                perf_mmap_data_free(counter);
                mutex_unlock(&counter->mmap_mutex);
@@ -1537,11 +1540,13 @@ static struct vm_operations_struct perf_mmap_vmops = {
 static int perf_mmap(struct file *file, struct vm_area_struct *vma)
 {
        struct perf_counter *counter = file->private_data;
+       struct user_struct *user = current_user();
        unsigned long vma_size;
        unsigned long nr_pages;
+       unsigned long user_locked, user_lock_limit;
        unsigned long locked, lock_limit;
+       long user_extra, extra;
        int ret = 0;
-       long extra;
 
        if (!(vma->vm_flags & VM_SHARED) || (vma->vm_flags & VM_WRITE))
                return -EINVAL;
@@ -1569,15 +1574,17 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
                goto unlock;
        }
 
-       extra = nr_pages /* + 1 only account the data pages */;
-       extra -= sysctl_perf_counter_mlock >> (PAGE_SHIFT - 10);
-       if (extra < 0)
-               extra = 0;
+       user_extra = nr_pages + 1;
+       user_lock_limit = sysctl_perf_counter_mlock >> (PAGE_SHIFT - 10);
+       user_locked = atomic_long_read(&user->locked_vm) + user_extra;
 
-       locked = vma->vm_mm->locked_vm + extra;
+       extra = 0;
+       if (user_locked > user_lock_limit)
+               extra = user_locked - user_lock_limit;
 
        lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
        lock_limit >>= PAGE_SHIFT;
+       locked = vma->vm_mm->locked_vm + extra;
 
        if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
                ret = -EPERM;
@@ -1590,6 +1597,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
                goto unlock;
 
        atomic_set(&counter->mmap_count, 1);
+       atomic_long_add(user_extra, &user->locked_vm);
        vma->vm_mm->locked_vm += extra;
        counter->data->nr_locked = extra;
 unlock: