1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright (C) 1991, 1992 Linus Torvalds
8 #include <linux/export.h>
10 #include <linux/utsname.h>
11 #include <linux/mman.h>
12 #include <linux/reboot.h>
13 #include <linux/prctl.h>
14 #include <linux/highuid.h>
16 #include <linux/kmod.h>
17 #include <linux/perf_event.h>
18 #include <linux/resource.h>
19 #include <linux/kernel.h>
20 #include <linux/workqueue.h>
21 #include <linux/capability.h>
22 #include <linux/device.h>
23 #include <linux/key.h>
24 #include <linux/times.h>
25 #include <linux/posix-timers.h>
26 #include <linux/security.h>
27 #include <linux/dcookies.h>
28 #include <linux/suspend.h>
29 #include <linux/tty.h>
30 #include <linux/signal.h>
31 #include <linux/cn_proc.h>
32 #include <linux/getcpu.h>
33 #include <linux/task_io_accounting_ops.h>
34 #include <linux/seccomp.h>
35 #include <linux/cpu.h>
36 #include <linux/personality.h>
37 #include <linux/ptrace.h>
38 #include <linux/fs_struct.h>
39 #include <linux/file.h>
40 #include <linux/mount.h>
41 #include <linux/gfp.h>
42 #include <linux/syscore_ops.h>
43 #include <linux/version.h>
44 #include <linux/ctype.h>
46 #include <linux/mempolicy.h>
48 #include <linux/compat.h>
49 #include <linux/syscalls.h>
50 #include <linux/kprobes.h>
51 #include <linux/user_namespace.h>
52 #include <linux/binfmts.h>
54 #include <linux/sched.h>
55 #include <linux/sched/autogroup.h>
56 #include <linux/sched/loadavg.h>
57 #include <linux/sched/stat.h>
58 #include <linux/sched/mm.h>
59 #include <linux/sched/coredump.h>
60 #include <linux/sched/task.h>
61 #include <linux/sched/cputime.h>
62 #include <linux/rcupdate.h>
63 #include <linux/uidgid.h>
64 #include <linux/cred.h>
66 #include <linux/kmsg_dump.h>
67 /* Move somewhere else to avoid recompiling? */
68 #include <generated/utsrelease.h>
70 #include <linux/uaccess.h>
72 #include <asm/unistd.h>
74 #ifndef SET_UNALIGN_CTL
75 # define SET_UNALIGN_CTL(a, b) (-EINVAL)
77 #ifndef GET_UNALIGN_CTL
78 # define GET_UNALIGN_CTL(a, b) (-EINVAL)
81 # define SET_FPEMU_CTL(a, b) (-EINVAL)
84 # define GET_FPEMU_CTL(a, b) (-EINVAL)
87 # define SET_FPEXC_CTL(a, b) (-EINVAL)
90 # define GET_FPEXC_CTL(a, b) (-EINVAL)
93 # define GET_ENDIAN(a, b) (-EINVAL)
96 # define SET_ENDIAN(a, b) (-EINVAL)
99 # define GET_TSC_CTL(a) (-EINVAL)
102 # define SET_TSC_CTL(a) (-EINVAL)
104 #ifndef MPX_ENABLE_MANAGEMENT
105 # define MPX_ENABLE_MANAGEMENT() (-EINVAL)
107 #ifndef MPX_DISABLE_MANAGEMENT
108 # define MPX_DISABLE_MANAGEMENT() (-EINVAL)
111 # define GET_FP_MODE(a) (-EINVAL)
114 # define SET_FP_MODE(a,b) (-EINVAL)
118 * this is where the system-wide overflow UID and GID are defined, for
119 * architectures that now have 32-bit UID/GID but didn't in the past
122 int overflowuid
= DEFAULT_OVERFLOWUID
;
123 int overflowgid
= DEFAULT_OVERFLOWGID
;
125 EXPORT_SYMBOL(overflowuid
);
126 EXPORT_SYMBOL(overflowgid
);
129 * the same as above, but for filesystems which can only store a 16-bit
130 * UID and GID. as such, this is needed on all architectures
133 int fs_overflowuid
= DEFAULT_FS_OVERFLOWUID
;
134 int fs_overflowgid
= DEFAULT_FS_OVERFLOWUID
;
136 EXPORT_SYMBOL(fs_overflowuid
);
137 EXPORT_SYMBOL(fs_overflowgid
);
140 * Returns true if current's euid is same as p's uid or euid,
141 * or has CAP_SYS_NICE to p's user_ns.
143 * Called with rcu_read_lock, creds are safe
145 static bool set_one_prio_perm(struct task_struct
*p
)
147 const struct cred
*cred
= current_cred(), *pcred
= __task_cred(p
);
149 if (uid_eq(pcred
->uid
, cred
->euid
) ||
150 uid_eq(pcred
->euid
, cred
->euid
))
152 if (ns_capable(pcred
->user_ns
, CAP_SYS_NICE
))
158 * set the priority of a task
159 * - the caller must hold the RCU read lock
161 static int set_one_prio(struct task_struct
*p
, int niceval
, int error
)
165 if (!set_one_prio_perm(p
)) {
169 if (niceval
< task_nice(p
) && !can_nice(p
, niceval
)) {
173 no_nice
= security_task_setnice(p
, niceval
);
180 set_user_nice(p
, niceval
);
185 SYSCALL_DEFINE3(setpriority
, int, which
, int, who
, int, niceval
)
187 struct task_struct
*g
, *p
;
188 struct user_struct
*user
;
189 const struct cred
*cred
= current_cred();
194 if (which
> PRIO_USER
|| which
< PRIO_PROCESS
)
197 /* normalize: avoid signed division (rounding problems) */
199 if (niceval
< MIN_NICE
)
201 if (niceval
> MAX_NICE
)
205 read_lock(&tasklist_lock
);
209 p
= find_task_by_vpid(who
);
213 error
= set_one_prio(p
, niceval
, error
);
217 pgrp
= find_vpid(who
);
219 pgrp
= task_pgrp(current
);
220 do_each_pid_thread(pgrp
, PIDTYPE_PGID
, p
) {
221 error
= set_one_prio(p
, niceval
, error
);
222 } while_each_pid_thread(pgrp
, PIDTYPE_PGID
, p
);
225 uid
= make_kuid(cred
->user_ns
, who
);
229 else if (!uid_eq(uid
, cred
->uid
)) {
230 user
= find_user(uid
);
232 goto out_unlock
; /* No processes for this user */
234 do_each_thread(g
, p
) {
235 if (uid_eq(task_uid(p
), uid
) && task_pid_vnr(p
))
236 error
= set_one_prio(p
, niceval
, error
);
237 } while_each_thread(g
, p
);
238 if (!uid_eq(uid
, cred
->uid
))
239 free_uid(user
); /* For find_user() */
243 read_unlock(&tasklist_lock
);
250 * Ugh. To avoid negative return values, "getpriority()" will
251 * not return the normal nice-value, but a negated value that
252 * has been offset by 20 (ie it returns 40..1 instead of -20..19)
253 * to stay compatible.
255 SYSCALL_DEFINE2(getpriority
, int, which
, int, who
)
257 struct task_struct
*g
, *p
;
258 struct user_struct
*user
;
259 const struct cred
*cred
= current_cred();
260 long niceval
, retval
= -ESRCH
;
264 if (which
> PRIO_USER
|| which
< PRIO_PROCESS
)
268 read_lock(&tasklist_lock
);
272 p
= find_task_by_vpid(who
);
276 niceval
= nice_to_rlimit(task_nice(p
));
277 if (niceval
> retval
)
283 pgrp
= find_vpid(who
);
285 pgrp
= task_pgrp(current
);
286 do_each_pid_thread(pgrp
, PIDTYPE_PGID
, p
) {
287 niceval
= nice_to_rlimit(task_nice(p
));
288 if (niceval
> retval
)
290 } while_each_pid_thread(pgrp
, PIDTYPE_PGID
, p
);
293 uid
= make_kuid(cred
->user_ns
, who
);
297 else if (!uid_eq(uid
, cred
->uid
)) {
298 user
= find_user(uid
);
300 goto out_unlock
; /* No processes for this user */
302 do_each_thread(g
, p
) {
303 if (uid_eq(task_uid(p
), uid
) && task_pid_vnr(p
)) {
304 niceval
= nice_to_rlimit(task_nice(p
));
305 if (niceval
> retval
)
308 } while_each_thread(g
, p
);
309 if (!uid_eq(uid
, cred
->uid
))
310 free_uid(user
); /* for find_user() */
314 read_unlock(&tasklist_lock
);
321 * Unprivileged users may change the real gid to the effective gid
322 * or vice versa. (BSD-style)
324 * If you set the real gid at all, or set the effective gid to a value not
325 * equal to the real gid, then the saved gid is set to the new effective gid.
327 * This makes it possible for a setgid program to completely drop its
328 * privileges, which is often a useful assertion to make when you are doing
329 * a security audit over a program.
331 * The general idea is that a program which uses just setregid() will be
332 * 100% compatible with BSD. A program which uses just setgid() will be
333 * 100% compatible with POSIX with saved IDs.
335 * SMP: There are not races, the GIDs are checked only by filesystem
336 * operations (as far as semantic preservation is concerned).
338 #ifdef CONFIG_MULTIUSER
339 SYSCALL_DEFINE2(setregid
, gid_t
, rgid
, gid_t
, egid
)
341 struct user_namespace
*ns
= current_user_ns();
342 const struct cred
*old
;
347 krgid
= make_kgid(ns
, rgid
);
348 kegid
= make_kgid(ns
, egid
);
350 if ((rgid
!= (gid_t
) -1) && !gid_valid(krgid
))
352 if ((egid
!= (gid_t
) -1) && !gid_valid(kegid
))
355 new = prepare_creds();
358 old
= current_cred();
361 if (rgid
!= (gid_t
) -1) {
362 if (gid_eq(old
->gid
, krgid
) ||
363 gid_eq(old
->egid
, krgid
) ||
364 ns_capable(old
->user_ns
, CAP_SETGID
))
369 if (egid
!= (gid_t
) -1) {
370 if (gid_eq(old
->gid
, kegid
) ||
371 gid_eq(old
->egid
, kegid
) ||
372 gid_eq(old
->sgid
, kegid
) ||
373 ns_capable(old
->user_ns
, CAP_SETGID
))
379 if (rgid
!= (gid_t
) -1 ||
380 (egid
!= (gid_t
) -1 && !gid_eq(kegid
, old
->gid
)))
381 new->sgid
= new->egid
;
382 new->fsgid
= new->egid
;
384 return commit_creds(new);
392 * setgid() is implemented like SysV w/ SAVED_IDS
394 * SMP: Same implicit races as above.
396 SYSCALL_DEFINE1(setgid
, gid_t
, gid
)
398 struct user_namespace
*ns
= current_user_ns();
399 const struct cred
*old
;
404 kgid
= make_kgid(ns
, gid
);
405 if (!gid_valid(kgid
))
408 new = prepare_creds();
411 old
= current_cred();
414 if (ns_capable(old
->user_ns
, CAP_SETGID
))
415 new->gid
= new->egid
= new->sgid
= new->fsgid
= kgid
;
416 else if (gid_eq(kgid
, old
->gid
) || gid_eq(kgid
, old
->sgid
))
417 new->egid
= new->fsgid
= kgid
;
421 return commit_creds(new);
429 * change the user struct in a credentials set to match the new UID
431 static int set_user(struct cred
*new)
433 struct user_struct
*new_user
;
435 new_user
= alloc_uid(new->uid
);
440 * We don't fail in case of NPROC limit excess here because too many
441 * poorly written programs don't check set*uid() return code, assuming
442 * it never fails if called by root. We may still enforce NPROC limit
443 * for programs doing set*uid()+execve() by harmlessly deferring the
444 * failure to the execve() stage.
446 if (atomic_read(&new_user
->processes
) >= rlimit(RLIMIT_NPROC
) &&
447 new_user
!= INIT_USER
)
448 current
->flags
|= PF_NPROC_EXCEEDED
;
450 current
->flags
&= ~PF_NPROC_EXCEEDED
;
453 new->user
= new_user
;
458 * Unprivileged users may change the real uid to the effective uid
459 * or vice versa. (BSD-style)
461 * If you set the real uid at all, or set the effective uid to a value not
462 * equal to the real uid, then the saved uid is set to the new effective uid.
464 * This makes it possible for a setuid program to completely drop its
465 * privileges, which is often a useful assertion to make when you are doing
466 * a security audit over a program.
468 * The general idea is that a program which uses just setreuid() will be
469 * 100% compatible with BSD. A program which uses just setuid() will be
470 * 100% compatible with POSIX with saved IDs.
472 SYSCALL_DEFINE2(setreuid
, uid_t
, ruid
, uid_t
, euid
)
474 struct user_namespace
*ns
= current_user_ns();
475 const struct cred
*old
;
480 kruid
= make_kuid(ns
, ruid
);
481 keuid
= make_kuid(ns
, euid
);
483 if ((ruid
!= (uid_t
) -1) && !uid_valid(kruid
))
485 if ((euid
!= (uid_t
) -1) && !uid_valid(keuid
))
488 new = prepare_creds();
491 old
= current_cred();
494 if (ruid
!= (uid_t
) -1) {
496 if (!uid_eq(old
->uid
, kruid
) &&
497 !uid_eq(old
->euid
, kruid
) &&
498 !ns_capable(old
->user_ns
, CAP_SETUID
))
502 if (euid
!= (uid_t
) -1) {
504 if (!uid_eq(old
->uid
, keuid
) &&
505 !uid_eq(old
->euid
, keuid
) &&
506 !uid_eq(old
->suid
, keuid
) &&
507 !ns_capable(old
->user_ns
, CAP_SETUID
))
511 if (!uid_eq(new->uid
, old
->uid
)) {
512 retval
= set_user(new);
516 if (ruid
!= (uid_t
) -1 ||
517 (euid
!= (uid_t
) -1 && !uid_eq(keuid
, old
->uid
)))
518 new->suid
= new->euid
;
519 new->fsuid
= new->euid
;
521 retval
= security_task_fix_setuid(new, old
, LSM_SETID_RE
);
525 return commit_creds(new);
533 * setuid() is implemented like SysV with SAVED_IDS
535 * Note that SAVED_ID's is deficient in that a setuid root program
536 * like sendmail, for example, cannot set its uid to be a normal
537 * user and then switch back, because if you're root, setuid() sets
538 * the saved uid too. If you don't like this, blame the bright people
539 * in the POSIX committee and/or USG. Note that the BSD-style setreuid()
540 * will allow a root program to temporarily drop privileges and be able to
541 * regain them by swapping the real and effective uid.
543 SYSCALL_DEFINE1(setuid
, uid_t
, uid
)
545 struct user_namespace
*ns
= current_user_ns();
546 const struct cred
*old
;
551 kuid
= make_kuid(ns
, uid
);
552 if (!uid_valid(kuid
))
555 new = prepare_creds();
558 old
= current_cred();
561 if (ns_capable(old
->user_ns
, CAP_SETUID
)) {
562 new->suid
= new->uid
= kuid
;
563 if (!uid_eq(kuid
, old
->uid
)) {
564 retval
= set_user(new);
568 } else if (!uid_eq(kuid
, old
->uid
) && !uid_eq(kuid
, new->suid
)) {
572 new->fsuid
= new->euid
= kuid
;
574 retval
= security_task_fix_setuid(new, old
, LSM_SETID_ID
);
578 return commit_creds(new);
587 * This function implements a generic ability to update ruid, euid,
588 * and suid. This allows you to implement the 4.4 compatible seteuid().
590 SYSCALL_DEFINE3(setresuid
, uid_t
, ruid
, uid_t
, euid
, uid_t
, suid
)
592 struct user_namespace
*ns
= current_user_ns();
593 const struct cred
*old
;
596 kuid_t kruid
, keuid
, ksuid
;
598 kruid
= make_kuid(ns
, ruid
);
599 keuid
= make_kuid(ns
, euid
);
600 ksuid
= make_kuid(ns
, suid
);
602 if ((ruid
!= (uid_t
) -1) && !uid_valid(kruid
))
605 if ((euid
!= (uid_t
) -1) && !uid_valid(keuid
))
608 if ((suid
!= (uid_t
) -1) && !uid_valid(ksuid
))
611 new = prepare_creds();
615 old
= current_cred();
618 if (!ns_capable(old
->user_ns
, CAP_SETUID
)) {
619 if (ruid
!= (uid_t
) -1 && !uid_eq(kruid
, old
->uid
) &&
620 !uid_eq(kruid
, old
->euid
) && !uid_eq(kruid
, old
->suid
))
622 if (euid
!= (uid_t
) -1 && !uid_eq(keuid
, old
->uid
) &&
623 !uid_eq(keuid
, old
->euid
) && !uid_eq(keuid
, old
->suid
))
625 if (suid
!= (uid_t
) -1 && !uid_eq(ksuid
, old
->uid
) &&
626 !uid_eq(ksuid
, old
->euid
) && !uid_eq(ksuid
, old
->suid
))
630 if (ruid
!= (uid_t
) -1) {
632 if (!uid_eq(kruid
, old
->uid
)) {
633 retval
= set_user(new);
638 if (euid
!= (uid_t
) -1)
640 if (suid
!= (uid_t
) -1)
642 new->fsuid
= new->euid
;
644 retval
= security_task_fix_setuid(new, old
, LSM_SETID_RES
);
648 return commit_creds(new);
655 SYSCALL_DEFINE3(getresuid
, uid_t __user
*, ruidp
, uid_t __user
*, euidp
, uid_t __user
*, suidp
)
657 const struct cred
*cred
= current_cred();
659 uid_t ruid
, euid
, suid
;
661 ruid
= from_kuid_munged(cred
->user_ns
, cred
->uid
);
662 euid
= from_kuid_munged(cred
->user_ns
, cred
->euid
);
663 suid
= from_kuid_munged(cred
->user_ns
, cred
->suid
);
665 retval
= put_user(ruid
, ruidp
);
667 retval
= put_user(euid
, euidp
);
669 return put_user(suid
, suidp
);
675 * Same as above, but for rgid, egid, sgid.
677 SYSCALL_DEFINE3(setresgid
, gid_t
, rgid
, gid_t
, egid
, gid_t
, sgid
)
679 struct user_namespace
*ns
= current_user_ns();
680 const struct cred
*old
;
683 kgid_t krgid
, kegid
, ksgid
;
685 krgid
= make_kgid(ns
, rgid
);
686 kegid
= make_kgid(ns
, egid
);
687 ksgid
= make_kgid(ns
, sgid
);
689 if ((rgid
!= (gid_t
) -1) && !gid_valid(krgid
))
691 if ((egid
!= (gid_t
) -1) && !gid_valid(kegid
))
693 if ((sgid
!= (gid_t
) -1) && !gid_valid(ksgid
))
696 new = prepare_creds();
699 old
= current_cred();
702 if (!ns_capable(old
->user_ns
, CAP_SETGID
)) {
703 if (rgid
!= (gid_t
) -1 && !gid_eq(krgid
, old
->gid
) &&
704 !gid_eq(krgid
, old
->egid
) && !gid_eq(krgid
, old
->sgid
))
706 if (egid
!= (gid_t
) -1 && !gid_eq(kegid
, old
->gid
) &&
707 !gid_eq(kegid
, old
->egid
) && !gid_eq(kegid
, old
->sgid
))
709 if (sgid
!= (gid_t
) -1 && !gid_eq(ksgid
, old
->gid
) &&
710 !gid_eq(ksgid
, old
->egid
) && !gid_eq(ksgid
, old
->sgid
))
714 if (rgid
!= (gid_t
) -1)
716 if (egid
!= (gid_t
) -1)
718 if (sgid
!= (gid_t
) -1)
720 new->fsgid
= new->egid
;
722 return commit_creds(new);
729 SYSCALL_DEFINE3(getresgid
, gid_t __user
*, rgidp
, gid_t __user
*, egidp
, gid_t __user
*, sgidp
)
731 const struct cred
*cred
= current_cred();
733 gid_t rgid
, egid
, sgid
;
735 rgid
= from_kgid_munged(cred
->user_ns
, cred
->gid
);
736 egid
= from_kgid_munged(cred
->user_ns
, cred
->egid
);
737 sgid
= from_kgid_munged(cred
->user_ns
, cred
->sgid
);
739 retval
= put_user(rgid
, rgidp
);
741 retval
= put_user(egid
, egidp
);
743 retval
= put_user(sgid
, sgidp
);
751 * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This
752 * is used for "access()" and for the NFS daemon (letting nfsd stay at
753 * whatever uid it wants to). It normally shadows "euid", except when
754 * explicitly set by setfsuid() or for access..
756 SYSCALL_DEFINE1(setfsuid
, uid_t
, uid
)
758 const struct cred
*old
;
763 old
= current_cred();
764 old_fsuid
= from_kuid_munged(old
->user_ns
, old
->fsuid
);
766 kuid
= make_kuid(old
->user_ns
, uid
);
767 if (!uid_valid(kuid
))
770 new = prepare_creds();
774 if (uid_eq(kuid
, old
->uid
) || uid_eq(kuid
, old
->euid
) ||
775 uid_eq(kuid
, old
->suid
) || uid_eq(kuid
, old
->fsuid
) ||
776 ns_capable(old
->user_ns
, CAP_SETUID
)) {
777 if (!uid_eq(kuid
, old
->fsuid
)) {
779 if (security_task_fix_setuid(new, old
, LSM_SETID_FS
) == 0)
793 * Samma på svenska..
795 SYSCALL_DEFINE1(setfsgid
, gid_t
, gid
)
797 const struct cred
*old
;
802 old
= current_cred();
803 old_fsgid
= from_kgid_munged(old
->user_ns
, old
->fsgid
);
805 kgid
= make_kgid(old
->user_ns
, gid
);
806 if (!gid_valid(kgid
))
809 new = prepare_creds();
813 if (gid_eq(kgid
, old
->gid
) || gid_eq(kgid
, old
->egid
) ||
814 gid_eq(kgid
, old
->sgid
) || gid_eq(kgid
, old
->fsgid
) ||
815 ns_capable(old
->user_ns
, CAP_SETGID
)) {
816 if (!gid_eq(kgid
, old
->fsgid
)) {
829 #endif /* CONFIG_MULTIUSER */
832 * sys_getpid - return the thread group id of the current process
834 * Note, despite the name, this returns the tgid not the pid. The tgid and
835 * the pid are identical unless CLONE_THREAD was specified on clone() in
836 * which case the tgid is the same in all threads of the same group.
838 * This is SMP safe as current->tgid does not change.
840 SYSCALL_DEFINE0(getpid
)
842 return task_tgid_vnr(current
);
845 /* Thread ID - the internal kernel "pid" */
846 SYSCALL_DEFINE0(gettid
)
848 return task_pid_vnr(current
);
852 * Accessing ->real_parent is not SMP-safe, it could
853 * change from under us. However, we can use a stale
854 * value of ->real_parent under rcu_read_lock(), see
855 * release_task()->call_rcu(delayed_put_task_struct).
857 SYSCALL_DEFINE0(getppid
)
862 pid
= task_tgid_vnr(rcu_dereference(current
->real_parent
));
868 SYSCALL_DEFINE0(getuid
)
870 /* Only we change this so SMP safe */
871 return from_kuid_munged(current_user_ns(), current_uid());
874 SYSCALL_DEFINE0(geteuid
)
876 /* Only we change this so SMP safe */
877 return from_kuid_munged(current_user_ns(), current_euid());
880 SYSCALL_DEFINE0(getgid
)
882 /* Only we change this so SMP safe */
883 return from_kgid_munged(current_user_ns(), current_gid());
886 SYSCALL_DEFINE0(getegid
)
888 /* Only we change this so SMP safe */
889 return from_kgid_munged(current_user_ns(), current_egid());
892 static void do_sys_times(struct tms
*tms
)
894 u64 tgutime
, tgstime
, cutime
, cstime
;
896 thread_group_cputime_adjusted(current
, &tgutime
, &tgstime
);
897 cutime
= current
->signal
->cutime
;
898 cstime
= current
->signal
->cstime
;
899 tms
->tms_utime
= nsec_to_clock_t(tgutime
);
900 tms
->tms_stime
= nsec_to_clock_t(tgstime
);
901 tms
->tms_cutime
= nsec_to_clock_t(cutime
);
902 tms
->tms_cstime
= nsec_to_clock_t(cstime
);
905 SYSCALL_DEFINE1(times
, struct tms __user
*, tbuf
)
911 if (copy_to_user(tbuf
, &tmp
, sizeof(struct tms
)))
914 force_successful_syscall_return();
915 return (long) jiffies_64_to_clock_t(get_jiffies_64());
919 static compat_clock_t
clock_t_to_compat_clock_t(clock_t x
)
921 return compat_jiffies_to_clock_t(clock_t_to_jiffies(x
));
924 COMPAT_SYSCALL_DEFINE1(times
, struct compat_tms __user
*, tbuf
)
928 struct compat_tms tmp
;
931 /* Convert our struct tms to the compat version. */
932 tmp
.tms_utime
= clock_t_to_compat_clock_t(tms
.tms_utime
);
933 tmp
.tms_stime
= clock_t_to_compat_clock_t(tms
.tms_stime
);
934 tmp
.tms_cutime
= clock_t_to_compat_clock_t(tms
.tms_cutime
);
935 tmp
.tms_cstime
= clock_t_to_compat_clock_t(tms
.tms_cstime
);
936 if (copy_to_user(tbuf
, &tmp
, sizeof(tmp
)))
939 force_successful_syscall_return();
940 return compat_jiffies_to_clock_t(jiffies
);
945 * This needs some heavy checking ...
946 * I just haven't the stomach for it. I also don't fully
947 * understand sessions/pgrp etc. Let somebody who does explain it.
949 * OK, I think I have the protection semantics right.... this is really
950 * only important on a multi-user system anyway, to make sure one user
951 * can't send a signal to a process owned by another. -TYT, 12/12/91
953 * !PF_FORKNOEXEC check to conform completely to POSIX.
955 SYSCALL_DEFINE2(setpgid
, pid_t
, pid
, pid_t
, pgid
)
957 struct task_struct
*p
;
958 struct task_struct
*group_leader
= current
->group_leader
;
963 pid
= task_pid_vnr(group_leader
);
970 /* From this point forward we keep holding onto the tasklist lock
971 * so that our parent does not change from under us. -DaveM
973 write_lock_irq(&tasklist_lock
);
976 p
= find_task_by_vpid(pid
);
981 if (!thread_group_leader(p
))
984 if (same_thread_group(p
->real_parent
, group_leader
)) {
986 if (task_session(p
) != task_session(group_leader
))
989 if (!(p
->flags
& PF_FORKNOEXEC
))
993 if (p
!= group_leader
)
998 if (p
->signal
->leader
)
1003 struct task_struct
*g
;
1005 pgrp
= find_vpid(pgid
);
1006 g
= pid_task(pgrp
, PIDTYPE_PGID
);
1007 if (!g
|| task_session(g
) != task_session(group_leader
))
1011 err
= security_task_setpgid(p
, pgid
);
1015 if (task_pgrp(p
) != pgrp
)
1016 change_pid(p
, PIDTYPE_PGID
, pgrp
);
1020 /* All paths lead to here, thus we are safe. -DaveM */
1021 write_unlock_irq(&tasklist_lock
);
1026 SYSCALL_DEFINE1(getpgid
, pid_t
, pid
)
1028 struct task_struct
*p
;
1034 grp
= task_pgrp(current
);
1037 p
= find_task_by_vpid(pid
);
1044 retval
= security_task_getpgid(p
);
1048 retval
= pid_vnr(grp
);
1054 #ifdef __ARCH_WANT_SYS_GETPGRP
1056 SYSCALL_DEFINE0(getpgrp
)
1058 return sys_getpgid(0);
1063 SYSCALL_DEFINE1(getsid
, pid_t
, pid
)
1065 struct task_struct
*p
;
1071 sid
= task_session(current
);
1074 p
= find_task_by_vpid(pid
);
1077 sid
= task_session(p
);
1081 retval
= security_task_getsid(p
);
1085 retval
= pid_vnr(sid
);
1091 static void set_special_pids(struct pid
*pid
)
1093 struct task_struct
*curr
= current
->group_leader
;
1095 if (task_session(curr
) != pid
)
1096 change_pid(curr
, PIDTYPE_SID
, pid
);
1098 if (task_pgrp(curr
) != pid
)
1099 change_pid(curr
, PIDTYPE_PGID
, pid
);
1102 SYSCALL_DEFINE0(setsid
)
1104 struct task_struct
*group_leader
= current
->group_leader
;
1105 struct pid
*sid
= task_pid(group_leader
);
1106 pid_t session
= pid_vnr(sid
);
1109 write_lock_irq(&tasklist_lock
);
1110 /* Fail if I am already a session leader */
1111 if (group_leader
->signal
->leader
)
1114 /* Fail if a process group id already exists that equals the
1115 * proposed session id.
1117 if (pid_task(sid
, PIDTYPE_PGID
))
1120 group_leader
->signal
->leader
= 1;
1121 set_special_pids(sid
);
1123 proc_clear_tty(group_leader
);
1127 write_unlock_irq(&tasklist_lock
);
1129 proc_sid_connector(group_leader
);
1130 sched_autogroup_create_attach(group_leader
);
1135 DECLARE_RWSEM(uts_sem
);
1137 #ifdef COMPAT_UTS_MACHINE
1138 #define override_architecture(name) \
1139 (personality(current->personality) == PER_LINUX32 && \
1140 copy_to_user(name->machine, COMPAT_UTS_MACHINE, \
1141 sizeof(COMPAT_UTS_MACHINE)))
1143 #define override_architecture(name) 0
1147 * Work around broken programs that cannot handle "Linux 3.0".
1148 * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40
1149 * And we map 4.x to 2.6.60+x, so 4.0 would be 2.6.60.
1151 static int override_release(char __user
*release
, size_t len
)
1155 if (current
->personality
& UNAME26
) {
1156 const char *rest
= UTS_RELEASE
;
1157 char buf
[65] = { 0 };
1163 if (*rest
== '.' && ++ndots
>= 3)
1165 if (!isdigit(*rest
) && *rest
!= '.')
1169 v
= ((LINUX_VERSION_CODE
>> 8) & 0xff) + 60;
1170 copy
= clamp_t(size_t, len
, 1, sizeof(buf
));
1171 copy
= scnprintf(buf
, copy
, "2.6.%u%s", v
, rest
);
1172 ret
= copy_to_user(release
, buf
, copy
+ 1);
1177 SYSCALL_DEFINE1(newuname
, struct new_utsname __user
*, name
)
1181 down_read(&uts_sem
);
1182 if (copy_to_user(name
, utsname(), sizeof *name
))
1186 if (!errno
&& override_release(name
->release
, sizeof(name
->release
)))
1188 if (!errno
&& override_architecture(name
))
1193 #ifdef __ARCH_WANT_SYS_OLD_UNAME
1197 SYSCALL_DEFINE1(uname
, struct old_utsname __user
*, name
)
1204 down_read(&uts_sem
);
1205 if (copy_to_user(name
, utsname(), sizeof(*name
)))
1209 if (!error
&& override_release(name
->release
, sizeof(name
->release
)))
1211 if (!error
&& override_architecture(name
))
1216 SYSCALL_DEFINE1(olduname
, struct oldold_utsname __user
*, name
)
1222 if (!access_ok(VERIFY_WRITE
, name
, sizeof(struct oldold_utsname
)))
1225 down_read(&uts_sem
);
1226 error
= __copy_to_user(&name
->sysname
, &utsname()->sysname
,
1228 error
|= __put_user(0, name
->sysname
+ __OLD_UTS_LEN
);
1229 error
|= __copy_to_user(&name
->nodename
, &utsname()->nodename
,
1231 error
|= __put_user(0, name
->nodename
+ __OLD_UTS_LEN
);
1232 error
|= __copy_to_user(&name
->release
, &utsname()->release
,
1234 error
|= __put_user(0, name
->release
+ __OLD_UTS_LEN
);
1235 error
|= __copy_to_user(&name
->version
, &utsname()->version
,
1237 error
|= __put_user(0, name
->version
+ __OLD_UTS_LEN
);
1238 error
|= __copy_to_user(&name
->machine
, &utsname()->machine
,
1240 error
|= __put_user(0, name
->machine
+ __OLD_UTS_LEN
);
1243 if (!error
&& override_architecture(name
))
1245 if (!error
&& override_release(name
->release
, sizeof(name
->release
)))
1247 return error
? -EFAULT
: 0;
1251 SYSCALL_DEFINE2(sethostname
, char __user
*, name
, int, len
)
1254 char tmp
[__NEW_UTS_LEN
];
1256 if (!ns_capable(current
->nsproxy
->uts_ns
->user_ns
, CAP_SYS_ADMIN
))
1259 if (len
< 0 || len
> __NEW_UTS_LEN
)
1261 down_write(&uts_sem
);
1263 if (!copy_from_user(tmp
, name
, len
)) {
1264 struct new_utsname
*u
= utsname();
1266 memcpy(u
->nodename
, tmp
, len
);
1267 memset(u
->nodename
+ len
, 0, sizeof(u
->nodename
) - len
);
1269 uts_proc_notify(UTS_PROC_HOSTNAME
);
1275 #ifdef __ARCH_WANT_SYS_GETHOSTNAME
1277 SYSCALL_DEFINE2(gethostname
, char __user
*, name
, int, len
)
1280 struct new_utsname
*u
;
1284 down_read(&uts_sem
);
1286 i
= 1 + strlen(u
->nodename
);
1290 if (copy_to_user(name
, u
->nodename
, i
))
1299 * Only setdomainname; getdomainname can be implemented by calling
1302 SYSCALL_DEFINE2(setdomainname
, char __user
*, name
, int, len
)
1305 char tmp
[__NEW_UTS_LEN
];
1307 if (!ns_capable(current
->nsproxy
->uts_ns
->user_ns
, CAP_SYS_ADMIN
))
1309 if (len
< 0 || len
> __NEW_UTS_LEN
)
1312 down_write(&uts_sem
);
1314 if (!copy_from_user(tmp
, name
, len
)) {
1315 struct new_utsname
*u
= utsname();
1317 memcpy(u
->domainname
, tmp
, len
);
1318 memset(u
->domainname
+ len
, 0, sizeof(u
->domainname
) - len
);
1320 uts_proc_notify(UTS_PROC_DOMAINNAME
);
1326 SYSCALL_DEFINE2(getrlimit
, unsigned int, resource
, struct rlimit __user
*, rlim
)
1328 struct rlimit value
;
1331 ret
= do_prlimit(current
, resource
, NULL
, &value
);
1333 ret
= copy_to_user(rlim
, &value
, sizeof(*rlim
)) ? -EFAULT
: 0;
1338 #ifdef CONFIG_COMPAT
1340 COMPAT_SYSCALL_DEFINE2(setrlimit
, unsigned int, resource
,
1341 struct compat_rlimit __user
*, rlim
)
1344 struct compat_rlimit r32
;
1346 if (copy_from_user(&r32
, rlim
, sizeof(struct compat_rlimit
)))
1349 if (r32
.rlim_cur
== COMPAT_RLIM_INFINITY
)
1350 r
.rlim_cur
= RLIM_INFINITY
;
1352 r
.rlim_cur
= r32
.rlim_cur
;
1353 if (r32
.rlim_max
== COMPAT_RLIM_INFINITY
)
1354 r
.rlim_max
= RLIM_INFINITY
;
1356 r
.rlim_max
= r32
.rlim_max
;
1357 return do_prlimit(current
, resource
, &r
, NULL
);
1360 COMPAT_SYSCALL_DEFINE2(getrlimit
, unsigned int, resource
,
1361 struct compat_rlimit __user
*, rlim
)
1366 ret
= do_prlimit(current
, resource
, NULL
, &r
);
1368 struct compat_rlimit r32
;
1369 if (r
.rlim_cur
> COMPAT_RLIM_INFINITY
)
1370 r32
.rlim_cur
= COMPAT_RLIM_INFINITY
;
1372 r32
.rlim_cur
= r
.rlim_cur
;
1373 if (r
.rlim_max
> COMPAT_RLIM_INFINITY
)
1374 r32
.rlim_max
= COMPAT_RLIM_INFINITY
;
1376 r32
.rlim_max
= r
.rlim_max
;
1378 if (copy_to_user(rlim
, &r32
, sizeof(struct compat_rlimit
)))
1386 #ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT
1389 * Back compatibility for getrlimit. Needed for some apps.
1391 SYSCALL_DEFINE2(old_getrlimit
, unsigned int, resource
,
1392 struct rlimit __user
*, rlim
)
1395 if (resource
>= RLIM_NLIMITS
)
1398 task_lock(current
->group_leader
);
1399 x
= current
->signal
->rlim
[resource
];
1400 task_unlock(current
->group_leader
);
1401 if (x
.rlim_cur
> 0x7FFFFFFF)
1402 x
.rlim_cur
= 0x7FFFFFFF;
1403 if (x
.rlim_max
> 0x7FFFFFFF)
1404 x
.rlim_max
= 0x7FFFFFFF;
1405 return copy_to_user(rlim
, &x
, sizeof(x
)) ? -EFAULT
: 0;
1408 #ifdef CONFIG_COMPAT
1409 COMPAT_SYSCALL_DEFINE2(old_getrlimit
, unsigned int, resource
,
1410 struct compat_rlimit __user
*, rlim
)
1414 if (resource
>= RLIM_NLIMITS
)
1417 task_lock(current
->group_leader
);
1418 r
= current
->signal
->rlim
[resource
];
1419 task_unlock(current
->group_leader
);
1420 if (r
.rlim_cur
> 0x7FFFFFFF)
1421 r
.rlim_cur
= 0x7FFFFFFF;
1422 if (r
.rlim_max
> 0x7FFFFFFF)
1423 r
.rlim_max
= 0x7FFFFFFF;
1425 if (put_user(r
.rlim_cur
, &rlim
->rlim_cur
) ||
1426 put_user(r
.rlim_max
, &rlim
->rlim_max
))
1434 static inline bool rlim64_is_infinity(__u64 rlim64
)
1436 #if BITS_PER_LONG < 64
1437 return rlim64
>= ULONG_MAX
;
1439 return rlim64
== RLIM64_INFINITY
;
1443 static void rlim_to_rlim64(const struct rlimit
*rlim
, struct rlimit64
*rlim64
)
1445 if (rlim
->rlim_cur
== RLIM_INFINITY
)
1446 rlim64
->rlim_cur
= RLIM64_INFINITY
;
1448 rlim64
->rlim_cur
= rlim
->rlim_cur
;
1449 if (rlim
->rlim_max
== RLIM_INFINITY
)
1450 rlim64
->rlim_max
= RLIM64_INFINITY
;
1452 rlim64
->rlim_max
= rlim
->rlim_max
;
1455 static void rlim64_to_rlim(const struct rlimit64
*rlim64
, struct rlimit
*rlim
)
1457 if (rlim64_is_infinity(rlim64
->rlim_cur
))
1458 rlim
->rlim_cur
= RLIM_INFINITY
;
1460 rlim
->rlim_cur
= (unsigned long)rlim64
->rlim_cur
;
1461 if (rlim64_is_infinity(rlim64
->rlim_max
))
1462 rlim
->rlim_max
= RLIM_INFINITY
;
1464 rlim
->rlim_max
= (unsigned long)rlim64
->rlim_max
;
1467 /* make sure you are allowed to change @tsk limits before calling this */
1468 int do_prlimit(struct task_struct
*tsk
, unsigned int resource
,
1469 struct rlimit
*new_rlim
, struct rlimit
*old_rlim
)
1471 struct rlimit
*rlim
;
1474 if (resource
>= RLIM_NLIMITS
)
1477 if (new_rlim
->rlim_cur
> new_rlim
->rlim_max
)
1479 if (resource
== RLIMIT_NOFILE
&&
1480 new_rlim
->rlim_max
> sysctl_nr_open
)
1484 /* protect tsk->signal and tsk->sighand from disappearing */
1485 read_lock(&tasklist_lock
);
1486 if (!tsk
->sighand
) {
1491 rlim
= tsk
->signal
->rlim
+ resource
;
1492 task_lock(tsk
->group_leader
);
1494 /* Keep the capable check against init_user_ns until
1495 cgroups can contain all limits */
1496 if (new_rlim
->rlim_max
> rlim
->rlim_max
&&
1497 !capable(CAP_SYS_RESOURCE
))
1500 retval
= security_task_setrlimit(tsk
, resource
, new_rlim
);
1501 if (resource
== RLIMIT_CPU
&& new_rlim
->rlim_cur
== 0) {
1503 * The caller is asking for an immediate RLIMIT_CPU
1504 * expiry. But we use the zero value to mean "it was
1505 * never set". So let's cheat and make it one second
1508 new_rlim
->rlim_cur
= 1;
1517 task_unlock(tsk
->group_leader
);
1520 * RLIMIT_CPU handling. Note that the kernel fails to return an error
1521 * code if it rejected the user's attempt to set RLIMIT_CPU. This is a
1522 * very long-standing error, and fixing it now risks breakage of
1523 * applications, so we live with it
1525 if (!retval
&& new_rlim
&& resource
== RLIMIT_CPU
&&
1526 new_rlim
->rlim_cur
!= RLIM_INFINITY
&&
1527 IS_ENABLED(CONFIG_POSIX_TIMERS
))
1528 update_rlimit_cpu(tsk
, new_rlim
->rlim_cur
);
1530 read_unlock(&tasklist_lock
);
1534 /* rcu lock must be held */
1535 static int check_prlimit_permission(struct task_struct
*task
,
1538 const struct cred
*cred
= current_cred(), *tcred
;
1541 if (current
== task
)
1544 tcred
= __task_cred(task
);
1545 id_match
= (uid_eq(cred
->uid
, tcred
->euid
) &&
1546 uid_eq(cred
->uid
, tcred
->suid
) &&
1547 uid_eq(cred
->uid
, tcred
->uid
) &&
1548 gid_eq(cred
->gid
, tcred
->egid
) &&
1549 gid_eq(cred
->gid
, tcred
->sgid
) &&
1550 gid_eq(cred
->gid
, tcred
->gid
));
1551 if (!id_match
&& !ns_capable(tcred
->user_ns
, CAP_SYS_RESOURCE
))
1554 return security_task_prlimit(cred
, tcred
, flags
);
1557 SYSCALL_DEFINE4(prlimit64
, pid_t
, pid
, unsigned int, resource
,
1558 const struct rlimit64 __user
*, new_rlim
,
1559 struct rlimit64 __user
*, old_rlim
)
1561 struct rlimit64 old64
, new64
;
1562 struct rlimit old
, new;
1563 struct task_struct
*tsk
;
1564 unsigned int checkflags
= 0;
1568 checkflags
|= LSM_PRLIMIT_READ
;
1571 if (copy_from_user(&new64
, new_rlim
, sizeof(new64
)))
1573 rlim64_to_rlim(&new64
, &new);
1574 checkflags
|= LSM_PRLIMIT_WRITE
;
1578 tsk
= pid
? find_task_by_vpid(pid
) : current
;
1583 ret
= check_prlimit_permission(tsk
, checkflags
);
1588 get_task_struct(tsk
);
1591 ret
= do_prlimit(tsk
, resource
, new_rlim
? &new : NULL
,
1592 old_rlim
? &old
: NULL
);
1594 if (!ret
&& old_rlim
) {
1595 rlim_to_rlim64(&old
, &old64
);
1596 if (copy_to_user(old_rlim
, &old64
, sizeof(old64
)))
1600 put_task_struct(tsk
);
1604 SYSCALL_DEFINE2(setrlimit
, unsigned int, resource
, struct rlimit __user
*, rlim
)
1606 struct rlimit new_rlim
;
1608 if (copy_from_user(&new_rlim
, rlim
, sizeof(*rlim
)))
1610 return do_prlimit(current
, resource
, &new_rlim
, NULL
);
1614 * It would make sense to put struct rusage in the task_struct,
1615 * except that would make the task_struct be *really big*. After
1616 * task_struct gets moved into malloc'ed memory, it would
1617 * make sense to do this. It will make moving the rest of the information
1618 * a lot simpler! (Which we're not doing right now because we're not
1619 * measuring them yet).
1621 * When sampling multiple threads for RUSAGE_SELF, under SMP we might have
1622 * races with threads incrementing their own counters. But since word
1623 * reads are atomic, we either get new values or old values and we don't
1624 * care which for the sums. We always take the siglock to protect reading
1625 * the c* fields from p->signal from races with exit.c updating those
1626 * fields when reaping, so a sample either gets all the additions of a
1627 * given child after it's reaped, or none so this sample is before reaping.
1630 * We need to take the siglock for CHILDEREN, SELF and BOTH
1631 * for the cases current multithreaded, non-current single threaded
1632 * non-current multithreaded. Thread traversal is now safe with
1634 * Strictly speaking, we donot need to take the siglock if we are current and
1635 * single threaded, as no one else can take our signal_struct away, no one
1636 * else can reap the children to update signal->c* counters, and no one else
1637 * can race with the signal-> fields. If we do not take any lock, the
1638 * signal-> fields could be read out of order while another thread was just
1639 * exiting. So we should place a read memory barrier when we avoid the lock.
1640 * On the writer side, write memory barrier is implied in __exit_signal
1641 * as __exit_signal releases the siglock spinlock after updating the signal->
1642 * fields. But we don't do this yet to keep things simple.
1646 static void accumulate_thread_rusage(struct task_struct
*t
, struct rusage
*r
)
1648 r
->ru_nvcsw
+= t
->nvcsw
;
1649 r
->ru_nivcsw
+= t
->nivcsw
;
1650 r
->ru_minflt
+= t
->min_flt
;
1651 r
->ru_majflt
+= t
->maj_flt
;
1652 r
->ru_inblock
+= task_io_get_inblock(t
);
1653 r
->ru_oublock
+= task_io_get_oublock(t
);
1656 void getrusage(struct task_struct
*p
, int who
, struct rusage
*r
)
1658 struct task_struct
*t
;
1659 unsigned long flags
;
1660 u64 tgutime
, tgstime
, utime
, stime
;
1661 unsigned long maxrss
= 0;
1663 memset((char *)r
, 0, sizeof (*r
));
1666 if (who
== RUSAGE_THREAD
) {
1667 task_cputime_adjusted(current
, &utime
, &stime
);
1668 accumulate_thread_rusage(p
, r
);
1669 maxrss
= p
->signal
->maxrss
;
1673 if (!lock_task_sighand(p
, &flags
))
1678 case RUSAGE_CHILDREN
:
1679 utime
= p
->signal
->cutime
;
1680 stime
= p
->signal
->cstime
;
1681 r
->ru_nvcsw
= p
->signal
->cnvcsw
;
1682 r
->ru_nivcsw
= p
->signal
->cnivcsw
;
1683 r
->ru_minflt
= p
->signal
->cmin_flt
;
1684 r
->ru_majflt
= p
->signal
->cmaj_flt
;
1685 r
->ru_inblock
= p
->signal
->cinblock
;
1686 r
->ru_oublock
= p
->signal
->coublock
;
1687 maxrss
= p
->signal
->cmaxrss
;
1689 if (who
== RUSAGE_CHILDREN
)
1693 thread_group_cputime_adjusted(p
, &tgutime
, &tgstime
);
1696 r
->ru_nvcsw
+= p
->signal
->nvcsw
;
1697 r
->ru_nivcsw
+= p
->signal
->nivcsw
;
1698 r
->ru_minflt
+= p
->signal
->min_flt
;
1699 r
->ru_majflt
+= p
->signal
->maj_flt
;
1700 r
->ru_inblock
+= p
->signal
->inblock
;
1701 r
->ru_oublock
+= p
->signal
->oublock
;
1702 if (maxrss
< p
->signal
->maxrss
)
1703 maxrss
= p
->signal
->maxrss
;
1706 accumulate_thread_rusage(t
, r
);
1707 } while_each_thread(p
, t
);
1713 unlock_task_sighand(p
, &flags
);
1716 r
->ru_utime
= ns_to_timeval(utime
);
1717 r
->ru_stime
= ns_to_timeval(stime
);
1719 if (who
!= RUSAGE_CHILDREN
) {
1720 struct mm_struct
*mm
= get_task_mm(p
);
1723 setmax_mm_hiwater_rss(&maxrss
, mm
);
1727 r
->ru_maxrss
= maxrss
* (PAGE_SIZE
/ 1024); /* convert pages to KBs */
1730 SYSCALL_DEFINE2(getrusage
, int, who
, struct rusage __user
*, ru
)
1734 if (who
!= RUSAGE_SELF
&& who
!= RUSAGE_CHILDREN
&&
1735 who
!= RUSAGE_THREAD
)
1738 getrusage(current
, who
, &r
);
1739 return copy_to_user(ru
, &r
, sizeof(r
)) ? -EFAULT
: 0;
1742 #ifdef CONFIG_COMPAT
1743 COMPAT_SYSCALL_DEFINE2(getrusage
, int, who
, struct compat_rusage __user
*, ru
)
1747 if (who
!= RUSAGE_SELF
&& who
!= RUSAGE_CHILDREN
&&
1748 who
!= RUSAGE_THREAD
)
1751 getrusage(current
, who
, &r
);
1752 return put_compat_rusage(&r
, ru
);
1756 SYSCALL_DEFINE1(umask
, int, mask
)
1758 mask
= xchg(¤t
->fs
->umask
, mask
& S_IRWXUGO
);
1762 static int prctl_set_mm_exe_file(struct mm_struct
*mm
, unsigned int fd
)
1765 struct file
*old_exe
, *exe_file
;
1766 struct inode
*inode
;
1773 inode
= file_inode(exe
.file
);
1776 * Because the original mm->exe_file points to executable file, make
1777 * sure that this one is executable as well, to avoid breaking an
1781 if (!S_ISREG(inode
->i_mode
) || path_noexec(&exe
.file
->f_path
))
1784 err
= inode_permission(inode
, MAY_EXEC
);
1789 * Forbid mm->exe_file change if old file still mapped.
1791 exe_file
= get_mm_exe_file(mm
);
1794 struct vm_area_struct
*vma
;
1796 down_read(&mm
->mmap_sem
);
1797 for (vma
= mm
->mmap
; vma
; vma
= vma
->vm_next
) {
1800 if (path_equal(&vma
->vm_file
->f_path
,
1805 up_read(&mm
->mmap_sem
);
1810 /* set the new file, lockless */
1812 old_exe
= xchg(&mm
->exe_file
, exe
.file
);
1819 up_read(&mm
->mmap_sem
);
1825 * WARNING: we don't require any capability here so be very careful
1826 * in what is allowed for modification from userspace.
1828 static int validate_prctl_map(struct prctl_mm_map
*prctl_map
)
1830 unsigned long mmap_max_addr
= TASK_SIZE
;
1831 struct mm_struct
*mm
= current
->mm
;
1832 int error
= -EINVAL
, i
;
1834 static const unsigned char offsets
[] = {
1835 offsetof(struct prctl_mm_map
, start_code
),
1836 offsetof(struct prctl_mm_map
, end_code
),
1837 offsetof(struct prctl_mm_map
, start_data
),
1838 offsetof(struct prctl_mm_map
, end_data
),
1839 offsetof(struct prctl_mm_map
, start_brk
),
1840 offsetof(struct prctl_mm_map
, brk
),
1841 offsetof(struct prctl_mm_map
, start_stack
),
1842 offsetof(struct prctl_mm_map
, arg_start
),
1843 offsetof(struct prctl_mm_map
, arg_end
),
1844 offsetof(struct prctl_mm_map
, env_start
),
1845 offsetof(struct prctl_mm_map
, env_end
),
1849 * Make sure the members are not somewhere outside
1850 * of allowed address space.
1852 for (i
= 0; i
< ARRAY_SIZE(offsets
); i
++) {
1853 u64 val
= *(u64
*)((char *)prctl_map
+ offsets
[i
]);
1855 if ((unsigned long)val
>= mmap_max_addr
||
1856 (unsigned long)val
< mmap_min_addr
)
1861 * Make sure the pairs are ordered.
1863 #define __prctl_check_order(__m1, __op, __m2) \
1864 ((unsigned long)prctl_map->__m1 __op \
1865 (unsigned long)prctl_map->__m2) ? 0 : -EINVAL
1866 error
= __prctl_check_order(start_code
, <, end_code
);
1867 error
|= __prctl_check_order(start_data
, <, end_data
);
1868 error
|= __prctl_check_order(start_brk
, <=, brk
);
1869 error
|= __prctl_check_order(arg_start
, <=, arg_end
);
1870 error
|= __prctl_check_order(env_start
, <=, env_end
);
1873 #undef __prctl_check_order
1878 * @brk should be after @end_data in traditional maps.
1880 if (prctl_map
->start_brk
<= prctl_map
->end_data
||
1881 prctl_map
->brk
<= prctl_map
->end_data
)
1885 * Neither we should allow to override limits if they set.
1887 if (check_data_rlimit(rlimit(RLIMIT_DATA
), prctl_map
->brk
,
1888 prctl_map
->start_brk
, prctl_map
->end_data
,
1889 prctl_map
->start_data
))
1893 * Someone is trying to cheat the auxv vector.
1895 if (prctl_map
->auxv_size
) {
1896 if (!prctl_map
->auxv
|| prctl_map
->auxv_size
> sizeof(mm
->saved_auxv
))
1901 * Finally, make sure the caller has the rights to
1902 * change /proc/pid/exe link: only local sys admin should
1905 if (prctl_map
->exe_fd
!= (u32
)-1) {
1906 if (!ns_capable(current_user_ns(), CAP_SYS_ADMIN
))
1915 #ifdef CONFIG_CHECKPOINT_RESTORE
1916 static int prctl_set_mm_map(int opt
, const void __user
*addr
, unsigned long data_size
)
1918 struct prctl_mm_map prctl_map
= { .exe_fd
= (u32
)-1, };
1919 unsigned long user_auxv
[AT_VECTOR_SIZE
];
1920 struct mm_struct
*mm
= current
->mm
;
1923 BUILD_BUG_ON(sizeof(user_auxv
) != sizeof(mm
->saved_auxv
));
1924 BUILD_BUG_ON(sizeof(struct prctl_mm_map
) > 256);
1926 if (opt
== PR_SET_MM_MAP_SIZE
)
1927 return put_user((unsigned int)sizeof(prctl_map
),
1928 (unsigned int __user
*)addr
);
1930 if (data_size
!= sizeof(prctl_map
))
1933 if (copy_from_user(&prctl_map
, addr
, sizeof(prctl_map
)))
1936 error
= validate_prctl_map(&prctl_map
);
1940 if (prctl_map
.auxv_size
) {
1941 memset(user_auxv
, 0, sizeof(user_auxv
));
1942 if (copy_from_user(user_auxv
,
1943 (const void __user
*)prctl_map
.auxv
,
1944 prctl_map
.auxv_size
))
1947 /* Last entry must be AT_NULL as specification requires */
1948 user_auxv
[AT_VECTOR_SIZE
- 2] = AT_NULL
;
1949 user_auxv
[AT_VECTOR_SIZE
- 1] = AT_NULL
;
1952 if (prctl_map
.exe_fd
!= (u32
)-1) {
1953 error
= prctl_set_mm_exe_file(mm
, prctl_map
.exe_fd
);
1958 down_write(&mm
->mmap_sem
);
1961 * We don't validate if these members are pointing to
1962 * real present VMAs because application may have correspond
1963 * VMAs already unmapped and kernel uses these members for statistics
1964 * output in procfs mostly, except
1966 * - @start_brk/@brk which are used in do_brk but kernel lookups
1967 * for VMAs when updating these memvers so anything wrong written
1968 * here cause kernel to swear at userspace program but won't lead
1969 * to any problem in kernel itself
1972 mm
->start_code
= prctl_map
.start_code
;
1973 mm
->end_code
= prctl_map
.end_code
;
1974 mm
->start_data
= prctl_map
.start_data
;
1975 mm
->end_data
= prctl_map
.end_data
;
1976 mm
->start_brk
= prctl_map
.start_brk
;
1977 mm
->brk
= prctl_map
.brk
;
1978 mm
->start_stack
= prctl_map
.start_stack
;
1979 mm
->arg_start
= prctl_map
.arg_start
;
1980 mm
->arg_end
= prctl_map
.arg_end
;
1981 mm
->env_start
= prctl_map
.env_start
;
1982 mm
->env_end
= prctl_map
.env_end
;
1985 * Note this update of @saved_auxv is lockless thus
1986 * if someone reads this member in procfs while we're
1987 * updating -- it may get partly updated results. It's
1988 * known and acceptable trade off: we leave it as is to
1989 * not introduce additional locks here making the kernel
1992 if (prctl_map
.auxv_size
)
1993 memcpy(mm
->saved_auxv
, user_auxv
, sizeof(user_auxv
));
1995 up_write(&mm
->mmap_sem
);
1998 #endif /* CONFIG_CHECKPOINT_RESTORE */
2000 static int prctl_set_auxv(struct mm_struct
*mm
, unsigned long addr
,
2004 * This doesn't move the auxiliary vector itself since it's pinned to
2005 * mm_struct, but it permits filling the vector with new values. It's
2006 * up to the caller to provide sane values here, otherwise userspace
2007 * tools which use this vector might be unhappy.
2009 unsigned long user_auxv
[AT_VECTOR_SIZE
];
2011 if (len
> sizeof(user_auxv
))
2014 if (copy_from_user(user_auxv
, (const void __user
*)addr
, len
))
2017 /* Make sure the last entry is always AT_NULL */
2018 user_auxv
[AT_VECTOR_SIZE
- 2] = 0;
2019 user_auxv
[AT_VECTOR_SIZE
- 1] = 0;
2021 BUILD_BUG_ON(sizeof(user_auxv
) != sizeof(mm
->saved_auxv
));
2024 memcpy(mm
->saved_auxv
, user_auxv
, len
);
2025 task_unlock(current
);
2030 static int prctl_set_mm(int opt
, unsigned long addr
,
2031 unsigned long arg4
, unsigned long arg5
)
2033 struct mm_struct
*mm
= current
->mm
;
2034 struct prctl_mm_map prctl_map
;
2035 struct vm_area_struct
*vma
;
2038 if (arg5
|| (arg4
&& (opt
!= PR_SET_MM_AUXV
&&
2039 opt
!= PR_SET_MM_MAP
&&
2040 opt
!= PR_SET_MM_MAP_SIZE
)))
2043 #ifdef CONFIG_CHECKPOINT_RESTORE
2044 if (opt
== PR_SET_MM_MAP
|| opt
== PR_SET_MM_MAP_SIZE
)
2045 return prctl_set_mm_map(opt
, (const void __user
*)addr
, arg4
);
2048 if (!capable(CAP_SYS_RESOURCE
))
2051 if (opt
== PR_SET_MM_EXE_FILE
)
2052 return prctl_set_mm_exe_file(mm
, (unsigned int)addr
);
2054 if (opt
== PR_SET_MM_AUXV
)
2055 return prctl_set_auxv(mm
, addr
, arg4
);
2057 if (addr
>= TASK_SIZE
|| addr
< mmap_min_addr
)
2062 down_write(&mm
->mmap_sem
);
2063 vma
= find_vma(mm
, addr
);
2065 prctl_map
.start_code
= mm
->start_code
;
2066 prctl_map
.end_code
= mm
->end_code
;
2067 prctl_map
.start_data
= mm
->start_data
;
2068 prctl_map
.end_data
= mm
->end_data
;
2069 prctl_map
.start_brk
= mm
->start_brk
;
2070 prctl_map
.brk
= mm
->brk
;
2071 prctl_map
.start_stack
= mm
->start_stack
;
2072 prctl_map
.arg_start
= mm
->arg_start
;
2073 prctl_map
.arg_end
= mm
->arg_end
;
2074 prctl_map
.env_start
= mm
->env_start
;
2075 prctl_map
.env_end
= mm
->env_end
;
2076 prctl_map
.auxv
= NULL
;
2077 prctl_map
.auxv_size
= 0;
2078 prctl_map
.exe_fd
= -1;
2081 case PR_SET_MM_START_CODE
:
2082 prctl_map
.start_code
= addr
;
2084 case PR_SET_MM_END_CODE
:
2085 prctl_map
.end_code
= addr
;
2087 case PR_SET_MM_START_DATA
:
2088 prctl_map
.start_data
= addr
;
2090 case PR_SET_MM_END_DATA
:
2091 prctl_map
.end_data
= addr
;
2093 case PR_SET_MM_START_STACK
:
2094 prctl_map
.start_stack
= addr
;
2096 case PR_SET_MM_START_BRK
:
2097 prctl_map
.start_brk
= addr
;
2100 prctl_map
.brk
= addr
;
2102 case PR_SET_MM_ARG_START
:
2103 prctl_map
.arg_start
= addr
;
2105 case PR_SET_MM_ARG_END
:
2106 prctl_map
.arg_end
= addr
;
2108 case PR_SET_MM_ENV_START
:
2109 prctl_map
.env_start
= addr
;
2111 case PR_SET_MM_ENV_END
:
2112 prctl_map
.env_end
= addr
;
2118 error
= validate_prctl_map(&prctl_map
);
2124 * If command line arguments and environment
2125 * are placed somewhere else on stack, we can
2126 * set them up here, ARG_START/END to setup
2127 * command line argumets and ENV_START/END
2130 case PR_SET_MM_START_STACK
:
2131 case PR_SET_MM_ARG_START
:
2132 case PR_SET_MM_ARG_END
:
2133 case PR_SET_MM_ENV_START
:
2134 case PR_SET_MM_ENV_END
:
2141 mm
->start_code
= prctl_map
.start_code
;
2142 mm
->end_code
= prctl_map
.end_code
;
2143 mm
->start_data
= prctl_map
.start_data
;
2144 mm
->end_data
= prctl_map
.end_data
;
2145 mm
->start_brk
= prctl_map
.start_brk
;
2146 mm
->brk
= prctl_map
.brk
;
2147 mm
->start_stack
= prctl_map
.start_stack
;
2148 mm
->arg_start
= prctl_map
.arg_start
;
2149 mm
->arg_end
= prctl_map
.arg_end
;
2150 mm
->env_start
= prctl_map
.env_start
;
2151 mm
->env_end
= prctl_map
.env_end
;
2155 up_write(&mm
->mmap_sem
);
2159 #ifdef CONFIG_CHECKPOINT_RESTORE
2160 static int prctl_get_tid_address(struct task_struct
*me
, int __user
**tid_addr
)
2162 return put_user(me
->clear_child_tid
, tid_addr
);
2165 static int prctl_get_tid_address(struct task_struct
*me
, int __user
**tid_addr
)
2171 static int propagate_has_child_subreaper(struct task_struct
*p
, void *data
)
2174 * If task has has_child_subreaper - all its decendants
2175 * already have these flag too and new decendants will
2176 * inherit it on fork, skip them.
2178 * If we've found child_reaper - skip descendants in
2179 * it's subtree as they will never get out pidns.
2181 if (p
->signal
->has_child_subreaper
||
2182 is_child_reaper(task_pid(p
)))
2185 p
->signal
->has_child_subreaper
= 1;
2190 static int prctl_update_vma_anon_name(struct vm_area_struct
*vma
,
2191 struct vm_area_struct
**prev
,
2192 unsigned long start
, unsigned long end
,
2193 const char __user
*name_addr
)
2195 struct mm_struct
*mm
= vma
->vm_mm
;
2199 if (name_addr
== vma_get_anon_name(vma
)) {
2204 pgoff
= vma
->vm_pgoff
+ ((start
- vma
->vm_start
) >> PAGE_SHIFT
);
2205 *prev
= vma_merge(mm
, *prev
, start
, end
, vma
->vm_flags
, vma
->anon_vma
,
2206 vma
->vm_file
, pgoff
, vma_policy(vma
),
2207 vma
->vm_userfaultfd_ctx
, name_addr
);
2215 if (start
!= vma
->vm_start
) {
2216 error
= split_vma(mm
, vma
, start
, 1);
2221 if (end
!= vma
->vm_end
) {
2222 error
= split_vma(mm
, vma
, end
, 0);
2229 vma
->anon_name
= name_addr
;
2232 if (error
== -ENOMEM
)
2237 static int prctl_set_vma_anon_name(unsigned long start
, unsigned long end
,
2241 struct vm_area_struct
*vma
, *prev
;
2242 int unmapped_error
= 0;
2243 int error
= -EINVAL
;
2246 * If the interval [start,end) covers some unmapped address
2247 * ranges, just ignore them, but return -ENOMEM at the end.
2248 * - this matches the handling in madvise.
2250 vma
= find_vma_prev(current
->mm
, start
, &prev
);
2251 if (vma
&& start
> vma
->vm_start
)
2255 /* Still start < end. */
2260 /* Here start < (end|vma->vm_end). */
2261 if (start
< vma
->vm_start
) {
2262 unmapped_error
= -ENOMEM
;
2263 start
= vma
->vm_start
;
2268 /* Here vma->vm_start <= start < (end|vma->vm_end) */
2273 /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */
2274 error
= prctl_update_vma_anon_name(vma
, &prev
, start
, tmp
,
2275 (const char __user
*)arg
);
2279 if (prev
&& start
< prev
->vm_end
)
2280 start
= prev
->vm_end
;
2281 error
= unmapped_error
;
2285 vma
= prev
->vm_next
;
2286 else /* madvise_remove dropped mmap_sem */
2287 vma
= find_vma(current
->mm
, start
);
2291 static int prctl_set_vma(unsigned long opt
, unsigned long start
,
2292 unsigned long len_in
, unsigned long arg
)
2294 struct mm_struct
*mm
= current
->mm
;
2299 if (start
& ~PAGE_MASK
)
2301 len
= (len_in
+ ~PAGE_MASK
) & PAGE_MASK
;
2303 /* Check to see whether len was rounded up from small -ve to zero */
2314 down_write(&mm
->mmap_sem
);
2317 case PR_SET_VMA_ANON_NAME
:
2318 error
= prctl_set_vma_anon_name(start
, end
, arg
);
2324 up_write(&mm
->mmap_sem
);
2328 #else /* CONFIG_MMU */
2329 static int prctl_set_vma(unsigned long opt
, unsigned long start
,
2330 unsigned long len_in
, unsigned long arg
)
2336 SYSCALL_DEFINE5(prctl
, int, option
, unsigned long, arg2
, unsigned long, arg3
,
2337 unsigned long, arg4
, unsigned long, arg5
)
2339 struct task_struct
*me
= current
;
2340 unsigned char comm
[sizeof(me
->comm
)];
2343 error
= security_task_prctl(option
, arg2
, arg3
, arg4
, arg5
);
2344 if (error
!= -ENOSYS
)
2349 case PR_SET_PDEATHSIG
:
2350 if (!valid_signal(arg2
)) {
2354 me
->pdeath_signal
= arg2
;
2356 case PR_GET_PDEATHSIG
:
2357 error
= put_user(me
->pdeath_signal
, (int __user
*)arg2
);
2359 case PR_GET_DUMPABLE
:
2360 error
= get_dumpable(me
->mm
);
2362 case PR_SET_DUMPABLE
:
2363 if (arg2
!= SUID_DUMP_DISABLE
&& arg2
!= SUID_DUMP_USER
) {
2367 set_dumpable(me
->mm
, arg2
);
2370 case PR_SET_UNALIGN
:
2371 error
= SET_UNALIGN_CTL(me
, arg2
);
2373 case PR_GET_UNALIGN
:
2374 error
= GET_UNALIGN_CTL(me
, arg2
);
2377 error
= SET_FPEMU_CTL(me
, arg2
);
2380 error
= GET_FPEMU_CTL(me
, arg2
);
2383 error
= SET_FPEXC_CTL(me
, arg2
);
2386 error
= GET_FPEXC_CTL(me
, arg2
);
2389 error
= PR_TIMING_STATISTICAL
;
2392 if (arg2
!= PR_TIMING_STATISTICAL
)
2396 comm
[sizeof(me
->comm
) - 1] = 0;
2397 if (strncpy_from_user(comm
, (char __user
*)arg2
,
2398 sizeof(me
->comm
) - 1) < 0)
2400 set_task_comm(me
, comm
);
2401 proc_comm_connector(me
);
2404 get_task_comm(comm
, me
);
2405 if (copy_to_user((char __user
*)arg2
, comm
, sizeof(comm
)))
2409 error
= GET_ENDIAN(me
, arg2
);
2412 error
= SET_ENDIAN(me
, arg2
);
2414 case PR_GET_SECCOMP
:
2415 error
= prctl_get_seccomp();
2417 case PR_SET_SECCOMP
:
2418 error
= prctl_set_seccomp(arg2
, (char __user
*)arg3
);
2421 error
= GET_TSC_CTL(arg2
);
2424 error
= SET_TSC_CTL(arg2
);
2426 case PR_TASK_PERF_EVENTS_DISABLE
:
2427 error
= perf_event_task_disable();
2429 case PR_TASK_PERF_EVENTS_ENABLE
:
2430 error
= perf_event_task_enable();
2432 case PR_GET_TIMERSLACK
:
2433 if (current
->timer_slack_ns
> ULONG_MAX
)
2436 error
= current
->timer_slack_ns
;
2438 case PR_SET_TIMERSLACK
:
2440 current
->timer_slack_ns
=
2441 current
->default_timer_slack_ns
;
2443 current
->timer_slack_ns
= arg2
;
2449 case PR_MCE_KILL_CLEAR
:
2452 current
->flags
&= ~PF_MCE_PROCESS
;
2454 case PR_MCE_KILL_SET
:
2455 current
->flags
|= PF_MCE_PROCESS
;
2456 if (arg3
== PR_MCE_KILL_EARLY
)
2457 current
->flags
|= PF_MCE_EARLY
;
2458 else if (arg3
== PR_MCE_KILL_LATE
)
2459 current
->flags
&= ~PF_MCE_EARLY
;
2460 else if (arg3
== PR_MCE_KILL_DEFAULT
)
2462 ~(PF_MCE_EARLY
|PF_MCE_PROCESS
);
2470 case PR_MCE_KILL_GET
:
2471 if (arg2
| arg3
| arg4
| arg5
)
2473 if (current
->flags
& PF_MCE_PROCESS
)
2474 error
= (current
->flags
& PF_MCE_EARLY
) ?
2475 PR_MCE_KILL_EARLY
: PR_MCE_KILL_LATE
;
2477 error
= PR_MCE_KILL_DEFAULT
;
2480 error
= prctl_set_mm(arg2
, arg3
, arg4
, arg5
);
2482 case PR_GET_TID_ADDRESS
:
2483 error
= prctl_get_tid_address(me
, (int __user
**)arg2
);
2485 case PR_SET_CHILD_SUBREAPER
:
2486 me
->signal
->is_child_subreaper
= !!arg2
;
2490 walk_process_tree(me
, propagate_has_child_subreaper
, NULL
);
2492 case PR_GET_CHILD_SUBREAPER
:
2493 error
= put_user(me
->signal
->is_child_subreaper
,
2494 (int __user
*)arg2
);
2496 case PR_SET_NO_NEW_PRIVS
:
2497 if (arg2
!= 1 || arg3
|| arg4
|| arg5
)
2500 task_set_no_new_privs(current
);
2502 case PR_GET_NO_NEW_PRIVS
:
2503 if (arg2
|| arg3
|| arg4
|| arg5
)
2505 return task_no_new_privs(current
) ? 1 : 0;
2506 case PR_GET_THP_DISABLE
:
2507 if (arg2
|| arg3
|| arg4
|| arg5
)
2509 error
= !!test_bit(MMF_DISABLE_THP
, &me
->mm
->flags
);
2511 case PR_SET_THP_DISABLE
:
2512 if (arg3
|| arg4
|| arg5
)
2514 if (down_write_killable(&me
->mm
->mmap_sem
))
2517 set_bit(MMF_DISABLE_THP
, &me
->mm
->flags
);
2519 clear_bit(MMF_DISABLE_THP
, &me
->mm
->flags
);
2520 up_write(&me
->mm
->mmap_sem
);
2522 case PR_MPX_ENABLE_MANAGEMENT
:
2523 if (arg2
|| arg3
|| arg4
|| arg5
)
2525 error
= MPX_ENABLE_MANAGEMENT();
2527 case PR_MPX_DISABLE_MANAGEMENT
:
2528 if (arg2
|| arg3
|| arg4
|| arg5
)
2530 error
= MPX_DISABLE_MANAGEMENT();
2532 case PR_SET_FP_MODE
:
2533 error
= SET_FP_MODE(me
, arg2
);
2535 case PR_GET_FP_MODE
:
2536 error
= GET_FP_MODE(me
);
2539 error
= prctl_set_vma(arg2
, arg3
, arg4
, arg5
);
2548 SYSCALL_DEFINE3(getcpu
, unsigned __user
*, cpup
, unsigned __user
*, nodep
,
2549 struct getcpu_cache __user
*, unused
)
2552 int cpu
= raw_smp_processor_id();
2555 err
|= put_user(cpu
, cpup
);
2557 err
|= put_user(cpu_to_node(cpu
), nodep
);
2558 return err
? -EFAULT
: 0;
2562 * do_sysinfo - fill in sysinfo struct
2563 * @info: pointer to buffer to fill
2565 static int do_sysinfo(struct sysinfo
*info
)
2567 unsigned long mem_total
, sav_total
;
2568 unsigned int mem_unit
, bitcount
;
2571 memset(info
, 0, sizeof(struct sysinfo
));
2573 get_monotonic_boottime(&tp
);
2574 info
->uptime
= tp
.tv_sec
+ (tp
.tv_nsec
? 1 : 0);
2576 get_avenrun(info
->loads
, 0, SI_LOAD_SHIFT
- FSHIFT
);
2578 info
->procs
= nr_threads
;
2584 * If the sum of all the available memory (i.e. ram + swap)
2585 * is less than can be stored in a 32 bit unsigned long then
2586 * we can be binary compatible with 2.2.x kernels. If not,
2587 * well, in that case 2.2.x was broken anyways...
2589 * -Erik Andersen <andersee@debian.org>
2592 mem_total
= info
->totalram
+ info
->totalswap
;
2593 if (mem_total
< info
->totalram
|| mem_total
< info
->totalswap
)
2596 mem_unit
= info
->mem_unit
;
2597 while (mem_unit
> 1) {
2600 sav_total
= mem_total
;
2602 if (mem_total
< sav_total
)
2607 * If mem_total did not overflow, multiply all memory values by
2608 * info->mem_unit and set it to 1. This leaves things compatible
2609 * with 2.2.x, and also retains compatibility with earlier 2.4.x
2614 info
->totalram
<<= bitcount
;
2615 info
->freeram
<<= bitcount
;
2616 info
->sharedram
<<= bitcount
;
2617 info
->bufferram
<<= bitcount
;
2618 info
->totalswap
<<= bitcount
;
2619 info
->freeswap
<<= bitcount
;
2620 info
->totalhigh
<<= bitcount
;
2621 info
->freehigh
<<= bitcount
;
2627 SYSCALL_DEFINE1(sysinfo
, struct sysinfo __user
*, info
)
2633 if (copy_to_user(info
, &val
, sizeof(struct sysinfo
)))
2639 #ifdef CONFIG_COMPAT
2640 struct compat_sysinfo
{
2654 char _f
[20-2*sizeof(u32
)-sizeof(int)];
2657 COMPAT_SYSCALL_DEFINE1(sysinfo
, struct compat_sysinfo __user
*, info
)
2663 /* Check to see if any memory value is too large for 32-bit and scale
2666 if (upper_32_bits(s
.totalram
) || upper_32_bits(s
.totalswap
)) {
2669 while (s
.mem_unit
< PAGE_SIZE
) {
2674 s
.totalram
>>= bitcount
;
2675 s
.freeram
>>= bitcount
;
2676 s
.sharedram
>>= bitcount
;
2677 s
.bufferram
>>= bitcount
;
2678 s
.totalswap
>>= bitcount
;
2679 s
.freeswap
>>= bitcount
;
2680 s
.totalhigh
>>= bitcount
;
2681 s
.freehigh
>>= bitcount
;
2684 if (!access_ok(VERIFY_WRITE
, info
, sizeof(struct compat_sysinfo
)) ||
2685 __put_user(s
.uptime
, &info
->uptime
) ||
2686 __put_user(s
.loads
[0], &info
->loads
[0]) ||
2687 __put_user(s
.loads
[1], &info
->loads
[1]) ||
2688 __put_user(s
.loads
[2], &info
->loads
[2]) ||
2689 __put_user(s
.totalram
, &info
->totalram
) ||
2690 __put_user(s
.freeram
, &info
->freeram
) ||
2691 __put_user(s
.sharedram
, &info
->sharedram
) ||
2692 __put_user(s
.bufferram
, &info
->bufferram
) ||
2693 __put_user(s
.totalswap
, &info
->totalswap
) ||
2694 __put_user(s
.freeswap
, &info
->freeswap
) ||
2695 __put_user(s
.procs
, &info
->procs
) ||
2696 __put_user(s
.totalhigh
, &info
->totalhigh
) ||
2697 __put_user(s
.freehigh
, &info
->freehigh
) ||
2698 __put_user(s
.mem_unit
, &info
->mem_unit
))
2703 #endif /* CONFIG_COMPAT */