- Replace key_user ->user_ns equality checks with kuid_has_mapping checks.
- Use from_kuid to generate key descriptions
- Use kuid_t and kgid_t and the associated helpers instead of uid_t and gid_t
- Avoid potential problems with file descriptor passing by displaying
keys in the user namespace of the opener of key status proc files.
Cc: linux-security-module@vger.kernel.org
Cc: keyrings@linux-nfs.org
Cc: David Howells <dhowells@redhat.com>
Signed-off-by: Eric W. Biederman <ebiederm@xmission.com>
#include <linux/atomic.h>
#ifdef __KERNEL__
+#include <linux/uidgid.h>
/* key handle serial number */
typedef int32_t key_serial_t;
time_t revoked_at; /* time at which key was revoked */
};
time_t last_used_at; /* last time used for LRU keyring discard */
- uid_t uid;
- gid_t gid;
+ kuid_t uid;
+ kgid_t gid;
key_perm_t perm; /* access permissions */
unsigned short quotalen; /* length added to quota */
unsigned short datalen; /* payload data length
extern struct key *key_alloc(struct key_type *type,
const char *desc,
- uid_t uid, gid_t gid,
+ kuid_t uid, kgid_t gid,
const struct cred *cred,
key_perm_t perm,
unsigned long flags);
extern int key_unlink(struct key *keyring,
struct key *key);
-extern struct key *keyring_alloc(const char *description, uid_t uid, gid_t gid,
+extern struct key *keyring_alloc(const char *description, kuid_t uid, kgid_t gid,
const struct cred *cred,
unsigned long flags,
struct key *dest);
# Features
depends on IMA = n
depends on EVM = n
- depends on KEYS = n
depends on AUDIT = n
depends on AUDITSYSCALL = n
depends on TASKSTATS = n
atomic_t usage; /* for accessing qnkeys & qnbytes */
atomic_t nkeys; /* number of keys */
atomic_t nikeys; /* number of instantiated keys */
- uid_t uid;
- struct user_namespace *user_ns;
+ kuid_t uid;
int qnkeys; /* number of keys allocated to this user */
int qnbytes; /* number of bytes allocated to this user */
};
extern spinlock_t key_user_lock;
extern struct key_user root_key_user;
-extern struct key_user *key_user_lookup(uid_t uid,
- struct user_namespace *user_ns);
+extern struct key_user *key_user_lookup(kuid_t uid);
extern void key_user_put(struct key_user *user);
/*
#include <linux/workqueue.h>
#include <linux/random.h>
#include <linux/err.h>
-#include <linux/user_namespace.h>
#include "internal.h"
struct kmem_cache *key_jar;
* Get the key quota record for a user, allocating a new record if one doesn't
* already exist.
*/
-struct key_user *key_user_lookup(uid_t uid, struct user_namespace *user_ns)
+struct key_user *key_user_lookup(kuid_t uid)
{
struct key_user *candidate = NULL, *user;
struct rb_node *parent = NULL;
parent = *p;
user = rb_entry(parent, struct key_user, node);
- if (uid < user->uid)
+ if (uid_lt(uid, user->uid))
p = &(*p)->rb_left;
- else if (uid > user->uid)
- p = &(*p)->rb_right;
- else if (user_ns < user->user_ns)
- p = &(*p)->rb_left;
- else if (user_ns > user->user_ns)
+ else if (uid_gt(uid, user->uid))
p = &(*p)->rb_right;
else
goto found;
atomic_set(&candidate->nkeys, 0);
atomic_set(&candidate->nikeys, 0);
candidate->uid = uid;
- candidate->user_ns = get_user_ns(user_ns);
candidate->qnkeys = 0;
candidate->qnbytes = 0;
spin_lock_init(&candidate->lock);
if (atomic_dec_and_lock(&user->usage, &key_user_lock)) {
rb_erase(&user->node, &key_user_tree);
spin_unlock(&key_user_lock);
- put_user_ns(user->user_ns);
kfree(user);
}
* key_alloc() calls don't race with module unloading.
*/
struct key *key_alloc(struct key_type *type, const char *desc,
- uid_t uid, gid_t gid, const struct cred *cred,
+ kuid_t uid, kgid_t gid, const struct cred *cred,
key_perm_t perm, unsigned long flags)
{
struct key_user *user = NULL;
quotalen = desclen + type->def_datalen;
/* get hold of the key tracking for this user */
- user = key_user_lookup(uid, cred->user_ns);
+ user = key_user_lookup(uid);
if (!user)
goto no_memory_1;
/* check that the user's quota permits allocation of another key and
* its description */
if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
- unsigned maxkeys = (uid == 0) ?
+ unsigned maxkeys = uid_eq(uid, GLOBAL_ROOT_UID) ?
key_quota_root_maxkeys : key_quota_maxkeys;
- unsigned maxbytes = (uid == 0) ?
+ unsigned maxbytes = uid_eq(uid, GLOBAL_ROOT_UID) ?
key_quota_root_maxbytes : key_quota_maxbytes;
spin_lock(&user->lock);
/* contemplate the quota adjustment */
if (delta != 0 && test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
- unsigned maxbytes = (key->user->uid == 0) ?
+ unsigned maxbytes = uid_eq(key->user->uid, GLOBAL_ROOT_UID) ?
key_quota_root_maxbytes : key_quota_maxbytes;
spin_lock(&key->user->lock);
ret = snprintf(tmpbuf, PAGE_SIZE - 1,
"%s;%d;%d;%08x;%s",
key->type->name,
- key->uid,
- key->gid,
+ from_kuid_munged(current_user_ns(), key->uid),
+ from_kgid_munged(current_user_ns(), key->gid),
key->perm,
key->description ?: "");
*
* If successful, 0 will be returned.
*/
-long keyctl_chown_key(key_serial_t id, uid_t uid, gid_t gid)
+long keyctl_chown_key(key_serial_t id, uid_t user, gid_t group)
{
struct key_user *newowner, *zapowner = NULL;
struct key *key;
key_ref_t key_ref;
long ret;
+ kuid_t uid;
+ kgid_t gid;
+
+ uid = make_kuid(current_user_ns(), user);
+ gid = make_kgid(current_user_ns(), group);
+ ret = -EINVAL;
+ if ((user != (uid_t) -1) && !uid_valid(uid))
+ goto error;
+ if ((group != (gid_t) -1) && !gid_valid(gid))
+ goto error;
ret = 0;
- if (uid == (uid_t) -1 && gid == (gid_t) -1)
+ if (user == (uid_t) -1 && group == (gid_t) -1)
goto error;
key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL,
if (!capable(CAP_SYS_ADMIN)) {
/* only the sysadmin can chown a key to some other UID */
- if (uid != (uid_t) -1 && key->uid != uid)
+ if (user != (uid_t) -1 && !uid_eq(key->uid, uid))
goto error_put;
/* only the sysadmin can set the key's GID to a group other
* than one of those that the current process subscribes to */
- if (gid != (gid_t) -1 && gid != key->gid && !in_group_p(gid))
+ if (group != (gid_t) -1 && !gid_eq(gid, key->gid) && !in_group_p(gid))
goto error_put;
}
/* change the UID */
- if (uid != (uid_t) -1 && uid != key->uid) {
+ if (user != (uid_t) -1 && !uid_eq(uid, key->uid)) {
ret = -ENOMEM;
- newowner = key_user_lookup(uid, current_user_ns());
+ newowner = key_user_lookup(uid);
if (!newowner)
goto error_put;
/* transfer the quota burden to the new user */
if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
- unsigned maxkeys = (uid == 0) ?
+ unsigned maxkeys = uid_eq(uid, GLOBAL_ROOT_UID) ?
key_quota_root_maxkeys : key_quota_maxkeys;
- unsigned maxbytes = (uid == 0) ?
+ unsigned maxbytes = uid_eq(uid, GLOBAL_ROOT_UID) ?
key_quota_root_maxbytes : key_quota_maxbytes;
spin_lock(&newowner->lock);
}
/* change the GID */
- if (gid != (gid_t) -1)
+ if (group != (gid_t) -1)
key->gid = gid;
ret = 0;
down_write(&key->sem);
/* if we're not the sysadmin, we can only change a key that we own */
- if (capable(CAP_SYS_ADMIN) || key->uid == current_fsuid()) {
+ if (capable(CAP_SYS_ADMIN) || uid_eq(key->uid, current_fsuid())) {
key->perm = perm;
ret = 0;
}
/* the parent must have the same effective ownership and mustn't be
* SUID/SGID */
- if (pcred->uid != mycred->euid ||
- pcred->euid != mycred->euid ||
- pcred->suid != mycred->euid ||
- pcred->gid != mycred->egid ||
- pcred->egid != mycred->egid ||
- pcred->sgid != mycred->egid)
+ if (!uid_eq(pcred->uid, mycred->euid) ||
+ !uid_eq(pcred->euid, mycred->euid) ||
+ !uid_eq(pcred->suid, mycred->euid) ||
+ !gid_eq(pcred->gid, mycred->egid) ||
+ !gid_eq(pcred->egid, mycred->egid) ||
+ !gid_eq(pcred->sgid, mycred->egid))
goto unlock;
/* the keyrings must have the same UID */
if ((pcred->tgcred->session_keyring &&
- pcred->tgcred->session_keyring->uid != mycred->euid) ||
- mycred->tgcred->session_keyring->uid != mycred->euid)
+ !uid_eq(pcred->tgcred->session_keyring->uid, mycred->euid)) ||
+ !uid_eq(mycred->tgcred->session_keyring->uid, mycred->euid))
goto unlock;
/* cancel an already pending keyring replacement */
/*
* Allocate a keyring and link into the destination keyring.
*/
-struct key *keyring_alloc(const char *description, uid_t uid, gid_t gid,
+struct key *keyring_alloc(const char *description, kuid_t uid, kgid_t gid,
const struct cred *cred, unsigned long flags,
struct key *dest)
{
&keyring_name_hash[bucket],
type_data.link
) {
- if (keyring->user->user_ns != current_user_ns())
+ if (!kuid_has_mapping(current_user_ns(), keyring->user->uid))
continue;
if (test_bit(KEY_FLAG_REVOKED, &keyring->flags))
key = key_ref_to_ptr(key_ref);
- if (key->user->user_ns != cred->user_ns)
- goto use_other_perms;
-
/* use the second 8-bits of permissions for keys the caller owns */
- if (key->uid == cred->fsuid) {
+ if (uid_eq(key->uid, cred->fsuid)) {
kperm = key->perm >> 16;
goto use_these_perms;
}
/* use the third 8-bits of permissions for keys the caller has a group
* membership in common with */
- if (key->gid != -1 && key->perm & KEY_GRP_ALL) {
- if (key->gid == cred->fsgid) {
+ if (gid_valid(key->gid) && key->perm & KEY_GRP_ALL) {
+ if (gid_eq(key->gid, cred->fsgid)) {
kperm = key->perm >> 8;
goto use_these_perms;
}
- ret = groups_search(cred->group_info,
- make_kgid(current_user_ns(), key->gid));
+ ret = groups_search(cred->group_info, key->gid);
if (ret) {
kperm = key->perm >> 8;
goto use_these_perms;
}
}
-use_other_perms:
-
/* otherwise use the least-significant 8-bits */
kperm = key->perm;
*/
#ifdef CONFIG_KEYS_DEBUG_PROC_KEYS
-static struct rb_node *key_serial_next(struct rb_node *n)
+static struct rb_node *key_serial_next(struct seq_file *p, struct rb_node *n)
{
- struct user_namespace *user_ns = current_user_ns();
+ struct user_namespace *user_ns = seq_user_ns(p);
n = rb_next(n);
while (n) {
struct key *key = rb_entry(n, struct key, serial_node);
- if (key->user->user_ns == user_ns)
+ if (kuid_has_mapping(user_ns, key->user->uid))
break;
n = rb_next(n);
}
return seq_open(file, &proc_keys_ops);
}
-static struct key *find_ge_key(key_serial_t id)
+static struct key *find_ge_key(struct seq_file *p, key_serial_t id)
{
- struct user_namespace *user_ns = current_user_ns();
+ struct user_namespace *user_ns = seq_user_ns(p);
struct rb_node *n = key_serial_tree.rb_node;
struct key *minkey = NULL;
return NULL;
for (;;) {
- if (minkey->user->user_ns == user_ns)
+ if (kuid_has_mapping(user_ns, minkey->user->uid))
return minkey;
n = rb_next(&minkey->serial_node);
if (!n)
if (*_pos > INT_MAX)
return NULL;
- key = find_ge_key(pos);
+ key = find_ge_key(p, pos);
if (!key)
return NULL;
*_pos = key->serial;
{
struct rb_node *n;
- n = key_serial_next(v);
+ n = key_serial_next(p, v);
if (n)
*_pos = key_node_serial(n);
return n;
atomic_read(&key->usage),
xbuf,
key->perm,
- key->uid,
- key->gid,
+ from_kuid_munged(seq_user_ns(m), key->uid),
+ from_kgid_munged(seq_user_ns(m), key->gid),
key->type->name);
#undef showflag
#endif /* CONFIG_KEYS_DEBUG_PROC_KEYS */
-static struct rb_node *__key_user_next(struct rb_node *n)
+static struct rb_node *__key_user_next(struct user_namespace *user_ns, struct rb_node *n)
{
while (n) {
struct key_user *user = rb_entry(n, struct key_user, node);
- if (user->user_ns == current_user_ns())
+ if (kuid_has_mapping(user_ns, user->uid))
break;
n = rb_next(n);
}
return n;
}
-static struct rb_node *key_user_next(struct rb_node *n)
+static struct rb_node *key_user_next(struct user_namespace *user_ns, struct rb_node *n)
{
- return __key_user_next(rb_next(n));
+ return __key_user_next(user_ns, rb_next(n));
}
-static struct rb_node *key_user_first(struct rb_root *r)
+static struct rb_node *key_user_first(struct user_namespace *user_ns, struct rb_root *r)
{
struct rb_node *n = rb_first(r);
- return __key_user_next(n);
+ return __key_user_next(user_ns, n);
}
/*
spin_lock(&key_user_lock);
- _p = key_user_first(&key_user_tree);
+ _p = key_user_first(seq_user_ns(p), &key_user_tree);
while (pos > 0 && _p) {
pos--;
- _p = key_user_next(_p);
+ _p = key_user_next(seq_user_ns(p), _p);
}
return _p;
static void *proc_key_users_next(struct seq_file *p, void *v, loff_t *_pos)
{
(*_pos)++;
- return key_user_next((struct rb_node *)v);
+ return key_user_next(seq_user_ns(p), (struct rb_node *)v);
}
static void proc_key_users_stop(struct seq_file *p, void *v)
{
struct rb_node *_p = v;
struct key_user *user = rb_entry(_p, struct key_user, node);
- unsigned maxkeys = (user->uid == 0) ?
+ unsigned maxkeys = uid_eq(user->uid, GLOBAL_ROOT_UID) ?
key_quota_root_maxkeys : key_quota_maxkeys;
- unsigned maxbytes = (user->uid == 0) ?
+ unsigned maxbytes = uid_eq(user->uid, GLOBAL_ROOT_UID) ?
key_quota_root_maxbytes : key_quota_maxbytes;
seq_printf(m, "%5u: %5d %d/%d %d/%d %d/%d\n",
- user->uid,
+ from_kuid_munged(seq_user_ns(m), user->uid),
atomic_read(&user->usage),
atomic_read(&user->nkeys),
atomic_read(&user->nikeys),
.lock = __SPIN_LOCK_UNLOCKED(root_key_user.lock),
.nkeys = ATOMIC_INIT(2),
.nikeys = ATOMIC_INIT(2),
- .uid = 0,
- .user_ns = &init_user_ns,
+ .uid = GLOBAL_ROOT_UID,
};
/*
struct key *uid_keyring, *session_keyring;
char buf[20];
int ret;
+ uid_t uid;
cred = current_cred();
user = cred->user;
+ uid = from_kuid(cred->user_ns, user->uid);
- kenter("%p{%u}", user, user->uid);
+ kenter("%p{%u}", user, uid);
if (user->uid_keyring) {
kleave(" = 0 [exist]");
* - there may be one in existence already as it may have been
* pinned by a session, but the user_struct pointing to it
* may have been destroyed by setuid */
- sprintf(buf, "_uid.%u", user->uid);
+ sprintf(buf, "_uid.%u", uid);
uid_keyring = find_keyring_by_name(buf, true);
if (IS_ERR(uid_keyring)) {
- uid_keyring = keyring_alloc(buf, user->uid, (gid_t) -1,
+ uid_keyring = keyring_alloc(buf, user->uid, INVALID_GID,
cred, KEY_ALLOC_IN_QUOTA,
NULL);
if (IS_ERR(uid_keyring)) {
/* get a default session keyring (which might also exist
* already) */
- sprintf(buf, "_uid_ses.%u", user->uid);
+ sprintf(buf, "_uid_ses.%u", uid);
session_keyring = find_keyring_by_name(buf, true);
if (IS_ERR(session_keyring)) {
session_keyring =
- keyring_alloc(buf, user->uid, (gid_t) -1,
+ keyring_alloc(buf, user->uid, INVALID_GID,
cred, KEY_ALLOC_IN_QUOTA, NULL);
if (IS_ERR(session_keyring)) {
ret = PTR_ERR(session_keyring);
goto error_link;
/* record the UID and GID */
- sprintf(uid_str, "%d", cred->fsuid);
- sprintf(gid_str, "%d", cred->fsgid);
+ sprintf(uid_str, "%d", from_kuid(&init_user_ns, cred->fsuid));
+ sprintf(gid_str, "%d", from_kgid(&init_user_ns, cred->fsgid));
/* we say which key is under construction */
sprintf(key_str, "%d", key->serial);
kenter("");
- user = key_user_lookup(current_fsuid(), current_user_ns());
+ user = key_user_lookup(current_fsuid());
if (!user)
return ERR_PTR(-ENOMEM);