kernel core: use helpers for rlimits
authorJiri Slaby <jslaby@suse.cz>
Fri, 5 Mar 2010 21:42:54 +0000 (13:42 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 6 Mar 2010 19:26:33 +0000 (11:26 -0800)
Make sure compiler won't do weird things with limits.  E.g.  fetching them
twice may return 2 different values after writable limits are implemented.

I.e.  either use rlimit helpers added in commit 3e10e716abf3 ("resource:
add helpers for fetching rlimits") or ACCESS_ONCE if not applicable.

Signed-off-by: Jiri Slaby <jslaby@suse.cz>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: john stultz <johnstul@us.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
kernel/fork.c
kernel/perf_event.c
kernel/posix-cpu-timers.c
kernel/sched.c
kernel/sched_rt.c
kernel/signal.c
kernel/sys.c

index bab7b254ad397933e1bb25f77246ff57e8473a07..b0ec34abc0bb8261c5c930f1dcf78ed7203051e4 100644 (file)
@@ -828,6 +828,8 @@ void __cleanup_sighand(struct sighand_struct *sighand)
  */
 static void posix_cpu_timers_init_group(struct signal_struct *sig)
 {
+       unsigned long cpu_limit;
+
        /* Thread group counters. */
        thread_group_cputime_init(sig);
 
@@ -842,9 +844,9 @@ static void posix_cpu_timers_init_group(struct signal_struct *sig)
        sig->cputime_expires.virt_exp = cputime_zero;
        sig->cputime_expires.sched_exp = 0;
 
-       if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) {
-               sig->cputime_expires.prof_exp =
-                       secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur);
+       cpu_limit = ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
+       if (cpu_limit != RLIM_INFINITY) {
+               sig->cputime_expires.prof_exp = secs_to_cputime(cpu_limit);
                sig->cputimer.running = 1;
        }
 
@@ -1037,7 +1039,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
 #endif
        retval = -EAGAIN;
        if (atomic_read(&p->real_cred->user->processes) >=
-                       p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
+                       task_rlimit(p, RLIMIT_NPROC)) {
                if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
                    p->real_cred->user != INIT_USER)
                        goto bad_fork_free;
index a661e7991865bdeb22d6631b01a96d8a4b6c3047..8e352c756ba742cda4dfa22bdb9954ed675d305e 100644 (file)
@@ -2610,7 +2610,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
        if (user_locked > user_lock_limit)
                extra = user_locked - user_lock_limit;
 
-       lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
+       lock_limit = rlimit(RLIMIT_MEMLOCK);
        lock_limit >>= PAGE_SHIFT;
        locked = vma->vm_mm->locked_vm + extra;
 
index dbb16bf15c45d67dbee44bb0a7709dfead614a77..1a22dfd42df9b4089e9630f2635a425a26d4959b 100644 (file)
@@ -1031,9 +1031,10 @@ static void check_thread_timers(struct task_struct *tsk,
        /*
         * Check for the special case thread timers.
         */
-       soft = sig->rlim[RLIMIT_RTTIME].rlim_cur;
+       soft = ACCESS_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_cur);
        if (soft != RLIM_INFINITY) {
-               unsigned long hard = sig->rlim[RLIMIT_RTTIME].rlim_max;
+               unsigned long hard =
+                       ACCESS_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_max);
 
                if (hard != RLIM_INFINITY &&
                    tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) {
@@ -1194,10 +1195,11 @@ static void check_process_timers(struct task_struct *tsk,
                         SIGPROF);
        check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT], &virt_expires, utime,
                         SIGVTALRM);
-       soft = sig->rlim[RLIMIT_CPU].rlim_cur;
+       soft = ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
        if (soft != RLIM_INFINITY) {
                unsigned long psecs = cputime_to_secs(ptime);
-               unsigned long hard = sig->rlim[RLIMIT_CPU].rlim_max;
+               unsigned long hard =
+                       ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_max);
                cputime_t x;
                if (psecs >= hard) {
                        /*
index abb36b16b93b7ab3984d19980a3cbb494e823632..b47ceeec1a912054e63e947f8a334f0152cdcceb 100644 (file)
@@ -4353,7 +4353,7 @@ int can_nice(const struct task_struct *p, const int nice)
        /* convert nice value [19,-20] to rlimit style value [1,40] */
        int nice_rlim = 20 - nice;
 
-       return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
+       return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
                capable(CAP_SYS_NICE));
 }
 
@@ -4530,7 +4530,7 @@ recheck:
 
                        if (!lock_task_sighand(p, &flags))
                                return -ESRCH;
-                       rlim_rtprio = p->signal->rlim[RLIMIT_RTPRIO].rlim_cur;
+                       rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO);
                        unlock_task_sighand(p, &flags);
 
                        /* can't set/change the rt policy */
index bf3e38fdbe6dc6ca0160d7f408881525928da21a..5a6ed1f0990a5cd5f128d4f61cd8522a40ff3466 100644 (file)
@@ -1662,8 +1662,9 @@ static void watchdog(struct rq *rq, struct task_struct *p)
        if (!p->signal)
                return;
 
-       soft = p->signal->rlim[RLIMIT_RTTIME].rlim_cur;
-       hard = p->signal->rlim[RLIMIT_RTTIME].rlim_max;
+       /* max may change after cur was read, this will be fixed next tick */
+       soft = task_rlimit(p, RLIMIT_RTTIME);
+       hard = task_rlimit_max(p, RLIMIT_RTTIME);
 
        if (soft != RLIM_INFINITY) {
                unsigned long next;
index 5bb9baffa4f121c732b05ab568e37484a02a537c..dbd7fe073c558dbc57080e7d0d9344b2cf47b2dd 100644 (file)
@@ -245,7 +245,7 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
 
        if (override_rlimit ||
            atomic_read(&user->sigpending) <=
-                       t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur) {
+                       task_rlimit(t, RLIMIT_SIGPENDING)) {
                q = kmem_cache_alloc(sigqueue_cachep, flags);
        } else {
                print_dropped_signal(sig);
index 877fe4f8e05e0439e58edae684bf6a57c6e9e3a9..9814e43fb23b7539354ffc1cf685256ccdb4decf 100644 (file)
@@ -571,8 +571,7 @@ static int set_user(struct cred *new)
        if (!new_user)
                return -EAGAIN;
 
-       if (atomic_read(&new_user->processes) >=
-                               current->signal->rlim[RLIMIT_NPROC].rlim_cur &&
+       if (atomic_read(&new_user->processes) >= rlimit(RLIMIT_NPROC) &&
                        new_user != INIT_USER) {
                free_uid(new_user);
                return -EAGAIN;