Modify alloc_uid to take a kuid and make the user hash table global.
Stop holding a reference to the user namespace in struct user_struct.
This simplifies the code and makes the per user accounting not
care about which user namespace a uid happens to appear in.
Acked-by: Serge Hallyn <serge.hallyn@canonical.com>
Signed-off-by: Eric W. Biederman <ebiederm@xmission.com>
struct task_struct *p, *g;
struct user_struct *user;
struct pid *pgrp;
+ kuid_t uid;
int ret;
switch (class) {
} while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
break;
case IOPRIO_WHO_USER:
+ uid = make_kuid(current_user_ns(), who);
+ if (!uid_valid(uid))
+ break;
if (!who)
user = current_user();
else
- user = find_user(who);
+ user = find_user(uid);
if (!user)
break;
do_each_thread(g, p) {
- if (__task_cred(p)->uid != who)
+ const struct cred *tcred = __task_cred(p);
+ kuid_t tcred_uid = make_kuid(tcred->user_ns, tcred->uid);
+ if (!uid_eq(tcred_uid, uid))
continue;
ret = set_task_ioprio(p, ioprio);
if (ret)
struct task_struct *g, *p;
struct user_struct *user;
struct pid *pgrp;
+ kuid_t uid;
int ret = -ESRCH;
int tmpio;
} while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
break;
case IOPRIO_WHO_USER:
+ uid = make_kuid(current_user_ns(), who);
if (!who)
user = current_user();
else
- user = find_user(who);
+ user = find_user(uid);
if (!user)
break;
do_each_thread(g, p) {
- if (__task_cred(p)->uid != user->uid)
+ const struct cred *tcred = __task_cred(p);
+ kuid_t tcred_uid = make_kuid(tcred->user_ns, tcred->uid);
+ if (!uid_eq(tcred_uid, user->uid))
continue;
tmpio = get_task_ioprio(p);
if (tmpio < 0)
#include <linux/latencytop.h>
#include <linux/cred.h>
#include <linux/llist.h>
+#include <linux/uidgid.h>
#include <asm/processor.h>
/* Hash table maintenance information */
struct hlist_node uidhash_node;
- uid_t uid;
- struct user_namespace *_user_ns; /* Don't use will be removed soon */
+ kuid_t uid;
#ifdef CONFIG_PERF_EVENTS
atomic_long_t locked_vm;
extern int uids_sysfs_init(void);
-extern struct user_struct *find_user(uid_t);
+extern struct user_struct *find_user(kuid_t);
extern struct user_struct root_user;
#define INIT_USER (&root_user)
extern void __set_special_pids(struct pid *pid);
/* per-UID process charging. */
-extern struct user_struct * alloc_uid(struct user_namespace *, uid_t);
+extern struct user_struct * alloc_uid(kuid_t);
static inline struct user_struct *get_uid(struct user_struct *u)
{
atomic_inc(&u->__count);
#include <linux/sched.h>
#include <linux/err.h>
-#define UIDHASH_BITS (CONFIG_BASE_SMALL ? 3 : 7)
-#define UIDHASH_SZ (1 << UIDHASH_BITS)
-
struct user_namespace {
struct kref kref;
- struct hlist_head uidhash_table[UIDHASH_SZ];
struct user_namespace *parent;
struct user_struct *creator;
struct work_struct destroyer;
const struct cred *cred = current_cred();
int error = -EINVAL;
struct pid *pgrp;
+ kuid_t cred_uid;
+ kuid_t uid;
if (which > PRIO_USER || which < PRIO_PROCESS)
goto out;
} while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
break;
case PRIO_USER:
+ cred_uid = make_kuid(cred->user_ns, cred->uid);
+ uid = make_kuid(cred->user_ns, who);
user = cred->user;
if (!who)
- who = cred->uid;
- else if ((who != cred->uid) &&
- !(user = find_user(who)))
+ uid = cred_uid;
+ else if (!uid_eq(uid, cred_uid) &&
+ !(user = find_user(uid)))
goto out_unlock; /* No processes for this user */
do_each_thread(g, p) {
- if (__task_cred(p)->uid == who)
+ const struct cred *tcred = __task_cred(p);
+ kuid_t tcred_uid = make_kuid(tcred->user_ns, tcred->uid);
+ if (uid_eq(tcred_uid, uid))
error = set_one_prio(p, niceval, error);
} while_each_thread(g, p);
- if (who != cred->uid)
+ if (!uid_eq(uid, cred_uid))
free_uid(user); /* For find_user() */
break;
}
const struct cred *cred = current_cred();
long niceval, retval = -ESRCH;
struct pid *pgrp;
+ kuid_t cred_uid;
+ kuid_t uid;
if (which > PRIO_USER || which < PRIO_PROCESS)
return -EINVAL;
} while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
break;
case PRIO_USER:
+ cred_uid = make_kuid(cred->user_ns, cred->uid);
+ uid = make_kuid(cred->user_ns, who);
user = cred->user;
if (!who)
- who = cred->uid;
- else if ((who != cred->uid) &&
- !(user = find_user(who)))
+ uid = cred_uid;
+ else if (!uid_eq(uid, cred_uid) &&
+ !(user = find_user(uid)))
goto out_unlock; /* No processes for this user */
do_each_thread(g, p) {
- if (__task_cred(p)->uid == who) {
+ const struct cred *tcred = __task_cred(p);
+ kuid_t tcred_uid = make_kuid(tcred->user_ns, tcred->uid);
+ if (uid_eq(tcred_uid, uid)) {
niceval = 20 - task_nice(p);
if (niceval > retval)
retval = niceval;
}
} while_each_thread(g, p);
- if (who != cred->uid)
+ if (!uid_eq(uid, cred_uid))
free_uid(user); /* for find_user() */
break;
}
{
struct user_struct *new_user;
- new_user = alloc_uid(current_user_ns(), new->uid);
+ new_user = alloc_uid(make_kuid(new->user_ns, new->uid));
if (!new_user)
return -EAGAIN;
* when changing user ID's (ie setuid() and friends).
*/
+#define UIDHASH_BITS (CONFIG_BASE_SMALL ? 3 : 7)
+#define UIDHASH_SZ (1 << UIDHASH_BITS)
#define UIDHASH_MASK (UIDHASH_SZ - 1)
#define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
-#define uidhashentry(ns, uid) ((ns)->uidhash_table + __uidhashfn((uid)))
+#define uidhashentry(uid) (uidhash_table + __uidhashfn((__kuid_val(uid))))
static struct kmem_cache *uid_cachep;
+struct hlist_head uidhash_table[UIDHASH_SZ];
/*
* The uidhash_lock is mostly taken from process context, but it is
.files = ATOMIC_INIT(0),
.sigpending = ATOMIC_INIT(0),
.locked_shm = 0,
- ._user_ns = &init_user_ns,
+ .uid = GLOBAL_ROOT_UID,
};
/*
static void uid_hash_remove(struct user_struct *up)
{
hlist_del_init(&up->uidhash_node);
- put_user_ns(up->_user_ns); /* It is safe to free the uid hash table now */
}
-static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent)
+static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head *hashent)
{
struct user_struct *user;
struct hlist_node *h;
hlist_for_each_entry(user, h, hashent, uidhash_node) {
- if (user->uid == uid) {
+ if (uid_eq(user->uid, uid)) {
atomic_inc(&user->__count);
return user;
}
*
* If the user_struct could not be found, return NULL.
*/
-struct user_struct *find_user(uid_t uid)
+struct user_struct *find_user(kuid_t uid)
{
struct user_struct *ret;
unsigned long flags;
- struct user_namespace *ns = current_user_ns();
spin_lock_irqsave(&uidhash_lock, flags);
- ret = uid_hash_find(uid, uidhashentry(ns, uid));
+ ret = uid_hash_find(uid, uidhashentry(uid));
spin_unlock_irqrestore(&uidhash_lock, flags);
return ret;
}
local_irq_restore(flags);
}
-struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
+struct user_struct *alloc_uid(kuid_t uid)
{
- struct hlist_head *hashent = uidhashentry(ns, uid);
+ struct hlist_head *hashent = uidhashentry(uid);
struct user_struct *up, *new;
spin_lock_irq(&uidhash_lock);
new->uid = uid;
atomic_set(&new->__count, 1);
- new->_user_ns = get_user_ns(ns);
-
/*
* Before adding this, check whether we raced
* on adding the same user already..
spin_lock_irq(&uidhash_lock);
up = uid_hash_find(uid, hashent);
if (up) {
- put_user_ns(ns);
key_put(new->uid_keyring);
key_put(new->session_keyring);
kmem_cache_free(uid_cachep, new);
0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
for(n = 0; n < UIDHASH_SZ; ++n)
- INIT_HLIST_HEAD(init_user_ns.uidhash_table + n);
+ INIT_HLIST_HEAD(uidhash_table + n);
/* Insert the root user immediately (init already runs as root) */
spin_lock_irq(&uidhash_lock);
- uid_hash_insert(&root_user, uidhashentry(&init_user_ns, 0));
+ uid_hash_insert(&root_user, uidhashentry(GLOBAL_ROOT_UID));
spin_unlock_irq(&uidhash_lock);
return 0;
{
struct user_namespace *ns, *parent_ns = new->user_ns;
struct user_struct *root_user;
- int n;
ns = kmem_cache_alloc(user_ns_cachep, GFP_KERNEL);
if (!ns)
kref_init(&ns->kref);
- for (n = 0; n < UIDHASH_SZ; ++n)
- INIT_HLIST_HEAD(ns->uidhash_table + n);
-
/* Alloc new root user. */
- root_user = alloc_uid(ns, 0);
+ root_user = alloc_uid(make_kuid(ns, 0));
if (!root_user) {
kmem_cache_free(user_ns_cachep, ns);
return -ENOMEM;