4 * (C) Copyright 1991-2000 Linus Torvalds
6 * We have a per-user structure to keep track of how many
7 * processes, files etc the user has claimed, in order to be
8 * able to have per-user limits for system resources.
11 #include <linux/init.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/bitops.h>
15 #include <linux/key.h>
16 #include <linux/interrupt.h>
17 #include <linux/module.h>
18 #include <linux/user_namespace.h>
19 #include "cred-internals.h"
21 struct user_namespace init_user_ns
= {
23 .refcount
= ATOMIC_INIT(2),
25 .root_user
= &root_user
,
27 EXPORT_SYMBOL_GPL(init_user_ns
);
30 * UID task count cache, to get fast user lookup in "alloc_uid"
31 * when changing user ID's (ie setuid() and friends).
34 #define UIDHASH_MASK (UIDHASH_SZ - 1)
35 #define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
36 #define uidhashentry(ns, uid) ((ns)->uidhash_table + __uidhashfn((uid)))
38 static struct kmem_cache
*uid_cachep
;
41 * The uidhash_lock is mostly taken from process context, but it is
42 * occasionally also taken from softirq/tasklet context, when
43 * task-structs get RCU-freed. Hence all locking must be softirq-safe.
44 * But free_uid() is also called with local interrupts disabled, and running
45 * local_bh_enable() with local interrupts disabled is an error - we'll run
46 * softirq callbacks, and they can unconditionally enable interrupts, and
47 * the caller of free_uid() didn't expect that..
49 static DEFINE_SPINLOCK(uidhash_lock
);
51 struct user_struct root_user
= {
52 .__count
= ATOMIC_INIT(1),
53 .processes
= ATOMIC_INIT(1),
54 .files
= ATOMIC_INIT(0),
55 .sigpending
= ATOMIC_INIT(0),
57 #ifdef CONFIG_USER_SCHED
58 .tg
= &init_task_group
,
63 * These routines must be called with the uidhash spinlock held!
65 static void uid_hash_insert(struct user_struct
*up
, struct hlist_head
*hashent
)
67 hlist_add_head(&up
->uidhash_node
, hashent
);
70 static void uid_hash_remove(struct user_struct
*up
)
72 hlist_del_init(&up
->uidhash_node
);
75 static struct user_struct
*uid_hash_find(uid_t uid
, struct hlist_head
*hashent
)
77 struct user_struct
*user
;
80 hlist_for_each_entry(user
, h
, hashent
, uidhash_node
) {
81 if (user
->uid
== uid
) {
82 atomic_inc(&user
->__count
);
90 #ifdef CONFIG_USER_SCHED
92 static void sched_destroy_user(struct user_struct
*up
)
94 sched_destroy_group(up
->tg
);
97 static int sched_create_user(struct user_struct
*up
)
101 up
->tg
= sched_create_group(&root_task_group
);
108 #else /* CONFIG_USER_SCHED */
110 static void sched_destroy_user(struct user_struct
*up
) { }
111 static int sched_create_user(struct user_struct
*up
) { return 0; }
113 #endif /* CONFIG_USER_SCHED */
115 #if defined(CONFIG_USER_SCHED) && defined(CONFIG_SYSFS)
117 static struct kset
*uids_kset
; /* represents the /sys/kernel/uids/ directory */
118 static DEFINE_MUTEX(uids_mutex
);
120 static inline void uids_mutex_lock(void)
122 mutex_lock(&uids_mutex
);
125 static inline void uids_mutex_unlock(void)
127 mutex_unlock(&uids_mutex
);
130 /* uid directory attributes */
131 #ifdef CONFIG_FAIR_GROUP_SCHED
132 static ssize_t
cpu_shares_show(struct kobject
*kobj
,
133 struct kobj_attribute
*attr
,
136 struct user_struct
*up
= container_of(kobj
, struct user_struct
, kobj
);
138 return sprintf(buf
, "%lu\n", sched_group_shares(up
->tg
));
141 static ssize_t
cpu_shares_store(struct kobject
*kobj
,
142 struct kobj_attribute
*attr
,
143 const char *buf
, size_t size
)
145 struct user_struct
*up
= container_of(kobj
, struct user_struct
, kobj
);
146 unsigned long shares
;
149 sscanf(buf
, "%lu", &shares
);
151 rc
= sched_group_set_shares(up
->tg
, shares
);
153 return (rc
? rc
: size
);
156 static struct kobj_attribute cpu_share_attr
=
157 __ATTR(cpu_share
, 0644, cpu_shares_show
, cpu_shares_store
);
160 #ifdef CONFIG_RT_GROUP_SCHED
161 static ssize_t
cpu_rt_runtime_show(struct kobject
*kobj
,
162 struct kobj_attribute
*attr
,
165 struct user_struct
*up
= container_of(kobj
, struct user_struct
, kobj
);
167 return sprintf(buf
, "%ld\n", sched_group_rt_runtime(up
->tg
));
170 static ssize_t
cpu_rt_runtime_store(struct kobject
*kobj
,
171 struct kobj_attribute
*attr
,
172 const char *buf
, size_t size
)
174 struct user_struct
*up
= container_of(kobj
, struct user_struct
, kobj
);
175 unsigned long rt_runtime
;
178 sscanf(buf
, "%ld", &rt_runtime
);
180 rc
= sched_group_set_rt_runtime(up
->tg
, rt_runtime
);
182 return (rc
? rc
: size
);
185 static struct kobj_attribute cpu_rt_runtime_attr
=
186 __ATTR(cpu_rt_runtime
, 0644, cpu_rt_runtime_show
, cpu_rt_runtime_store
);
188 static ssize_t
cpu_rt_period_show(struct kobject
*kobj
,
189 struct kobj_attribute
*attr
,
192 struct user_struct
*up
= container_of(kobj
, struct user_struct
, kobj
);
194 return sprintf(buf
, "%lu\n", sched_group_rt_period(up
->tg
));
197 static ssize_t
cpu_rt_period_store(struct kobject
*kobj
,
198 struct kobj_attribute
*attr
,
199 const char *buf
, size_t size
)
201 struct user_struct
*up
= container_of(kobj
, struct user_struct
, kobj
);
202 unsigned long rt_period
;
205 sscanf(buf
, "%lu", &rt_period
);
207 rc
= sched_group_set_rt_period(up
->tg
, rt_period
);
209 return (rc
? rc
: size
);
212 static struct kobj_attribute cpu_rt_period_attr
=
213 __ATTR(cpu_rt_period
, 0644, cpu_rt_period_show
, cpu_rt_period_store
);
216 /* default attributes per uid directory */
217 static struct attribute
*uids_attributes
[] = {
218 #ifdef CONFIG_FAIR_GROUP_SCHED
219 &cpu_share_attr
.attr
,
221 #ifdef CONFIG_RT_GROUP_SCHED
222 &cpu_rt_runtime_attr
.attr
,
223 &cpu_rt_period_attr
.attr
,
228 /* the lifetime of user_struct is not managed by the core (now) */
229 static void uids_release(struct kobject
*kobj
)
234 static struct kobj_type uids_ktype
= {
235 .sysfs_ops
= &kobj_sysfs_ops
,
236 .default_attrs
= uids_attributes
,
237 .release
= uids_release
,
240 /* create /sys/kernel/uids/<uid>/cpu_share file for this user */
241 static int uids_user_create(struct user_struct
*up
)
243 struct kobject
*kobj
= &up
->kobj
;
246 memset(kobj
, 0, sizeof(struct kobject
));
247 kobj
->kset
= uids_kset
;
248 error
= kobject_init_and_add(kobj
, &uids_ktype
, NULL
, "%d", up
->uid
);
254 kobject_uevent(kobj
, KOBJ_ADD
);
259 /* create these entries in sysfs:
260 * "/sys/kernel/uids" directory
261 * "/sys/kernel/uids/0" directory (for root user)
262 * "/sys/kernel/uids/0/cpu_share" file (for root user)
264 int __init
uids_sysfs_init(void)
266 uids_kset
= kset_create_and_add("uids", NULL
, kernel_kobj
);
270 return uids_user_create(&root_user
);
273 /* work function to remove sysfs directory for a user and free up
274 * corresponding structures.
276 static void remove_user_sysfs_dir(struct work_struct
*w
)
278 struct user_struct
*up
= container_of(w
, struct user_struct
, work
);
282 /* Make uid_hash_remove() + sysfs_remove_file() + kobject_del()
287 local_irq_save(flags
);
289 if (atomic_dec_and_lock(&up
->__count
, &uidhash_lock
)) {
292 spin_unlock_irqrestore(&uidhash_lock
, flags
);
294 local_irq_restore(flags
);
300 kobject_uevent(&up
->kobj
, KOBJ_REMOVE
);
301 kobject_del(&up
->kobj
);
302 kobject_put(&up
->kobj
);
304 sched_destroy_user(up
);
305 key_put(up
->uid_keyring
);
306 key_put(up
->session_keyring
);
307 kmem_cache_free(uid_cachep
, up
);
313 /* IRQs are disabled and uidhash_lock is held upon function entry.
314 * IRQ state (as stored in flags) is restored and uidhash_lock released
315 * upon function exit.
317 static inline void free_user(struct user_struct
*up
, unsigned long flags
)
319 /* restore back the count */
320 atomic_inc(&up
->__count
);
321 spin_unlock_irqrestore(&uidhash_lock
, flags
);
323 INIT_WORK(&up
->work
, remove_user_sysfs_dir
);
324 schedule_work(&up
->work
);
327 #else /* CONFIG_USER_SCHED && CONFIG_SYSFS */
329 int uids_sysfs_init(void) { return 0; }
330 static inline int uids_user_create(struct user_struct
*up
) { return 0; }
331 static inline void uids_mutex_lock(void) { }
332 static inline void uids_mutex_unlock(void) { }
334 /* IRQs are disabled and uidhash_lock is held upon function entry.
335 * IRQ state (as stored in flags) is restored and uidhash_lock released
336 * upon function exit.
338 static inline void free_user(struct user_struct
*up
, unsigned long flags
)
341 spin_unlock_irqrestore(&uidhash_lock
, flags
);
342 sched_destroy_user(up
);
343 key_put(up
->uid_keyring
);
344 key_put(up
->session_keyring
);
345 kmem_cache_free(uid_cachep
, up
);
351 * Locate the user_struct for the passed UID. If found, take a ref on it. The
352 * caller must undo that ref with free_uid().
354 * If the user_struct could not be found, return NULL.
356 struct user_struct
*find_user(uid_t uid
)
358 struct user_struct
*ret
;
360 struct user_namespace
*ns
= current
->nsproxy
->user_ns
;
362 spin_lock_irqsave(&uidhash_lock
, flags
);
363 ret
= uid_hash_find(uid
, uidhashentry(ns
, uid
));
364 spin_unlock_irqrestore(&uidhash_lock
, flags
);
368 void free_uid(struct user_struct
*up
)
375 local_irq_save(flags
);
376 if (atomic_dec_and_lock(&up
->__count
, &uidhash_lock
))
377 free_user(up
, flags
);
379 local_irq_restore(flags
);
382 struct user_struct
*alloc_uid(struct user_namespace
*ns
, uid_t uid
)
384 struct hlist_head
*hashent
= uidhashentry(ns
, uid
);
385 struct user_struct
*up
, *new;
387 /* Make uid_hash_find() + uids_user_create() + uid_hash_insert()
392 spin_lock_irq(&uidhash_lock
);
393 up
= uid_hash_find(uid
, hashent
);
394 spin_unlock_irq(&uidhash_lock
);
397 new = kmem_cache_zalloc(uid_cachep
, GFP_KERNEL
);
402 atomic_set(&new->__count
, 1);
404 if (sched_create_user(new) < 0)
407 if (uids_user_create(new))
408 goto out_destoy_sched
;
411 * Before adding this, check whether we raced
412 * on adding the same user already..
414 spin_lock_irq(&uidhash_lock
);
415 up
= uid_hash_find(uid
, hashent
);
417 /* This case is not possible when CONFIG_USER_SCHED
418 * is defined, since we serialize alloc_uid() using
419 * uids_mutex. Hence no need to call
420 * sched_destroy_user() or remove_user_sysfs_dir().
422 key_put(new->uid_keyring
);
423 key_put(new->session_keyring
);
424 kmem_cache_free(uid_cachep
, new);
426 uid_hash_insert(new, hashent
);
429 spin_unlock_irq(&uidhash_lock
);
438 sched_destroy_user(new);
440 kmem_cache_free(uid_cachep
, new);
446 #ifdef CONFIG_USER_NS
447 void release_uids(struct user_namespace
*ns
)
451 struct hlist_head
*head
;
452 struct hlist_node
*nd
;
454 spin_lock_irqsave(&uidhash_lock
, flags
);
456 * collapse the chains so that the user_struct-s will
457 * be still alive, but not in hashes. subsequent free_uid()
460 for (i
= 0; i
< UIDHASH_SZ
; i
++) {
461 head
= ns
->uidhash_table
+ i
;
462 while (!hlist_empty(head
)) {
467 spin_unlock_irqrestore(&uidhash_lock
, flags
);
469 free_uid(ns
->root_user
);
473 static int __init
uid_cache_init(void)
477 uid_cachep
= kmem_cache_create("uid_cache", sizeof(struct user_struct
),
478 0, SLAB_HWCACHE_ALIGN
|SLAB_PANIC
, NULL
);
480 for(n
= 0; n
< UIDHASH_SZ
; ++n
)
481 INIT_HLIST_HEAD(init_user_ns
.uidhash_table
+ n
);
483 /* Insert the root user immediately (init already runs as root) */
484 spin_lock_irq(&uidhash_lock
);
485 uid_hash_insert(&root_user
, uidhashentry(&init_user_ns
, 0));
486 spin_unlock_irq(&uidhash_lock
);
491 module_init(uid_cache_init
);