CRED: Inaugurate COW credentials
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / user.c
1 /*
2 * The "user cache".
3 *
4 * (C) Copyright 1991-2000 Linus Torvalds
5 *
6 * We have a per-user structure to keep track of how many
7 * processes, files etc the user has claimed, in order to be
8 * able to have per-user limits for system resources.
9 */
10
11 #include <linux/init.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/bitops.h>
15 #include <linux/key.h>
16 #include <linux/interrupt.h>
17 #include <linux/module.h>
18 #include <linux/user_namespace.h>
19 #include "cred-internals.h"
20
21 struct user_namespace init_user_ns = {
22 .kref = {
23 .refcount = ATOMIC_INIT(2),
24 },
25 .root_user = &root_user,
26 };
27 EXPORT_SYMBOL_GPL(init_user_ns);
28
29 /*
30 * UID task count cache, to get fast user lookup in "alloc_uid"
31 * when changing user ID's (ie setuid() and friends).
32 */
33
34 #define UIDHASH_MASK (UIDHASH_SZ - 1)
35 #define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
36 #define uidhashentry(ns, uid) ((ns)->uidhash_table + __uidhashfn((uid)))
37
38 static struct kmem_cache *uid_cachep;
39
40 /*
41 * The uidhash_lock is mostly taken from process context, but it is
42 * occasionally also taken from softirq/tasklet context, when
43 * task-structs get RCU-freed. Hence all locking must be softirq-safe.
44 * But free_uid() is also called with local interrupts disabled, and running
45 * local_bh_enable() with local interrupts disabled is an error - we'll run
46 * softirq callbacks, and they can unconditionally enable interrupts, and
47 * the caller of free_uid() didn't expect that..
48 */
49 static DEFINE_SPINLOCK(uidhash_lock);
50
51 struct user_struct root_user = {
52 .__count = ATOMIC_INIT(1),
53 .processes = ATOMIC_INIT(1),
54 .files = ATOMIC_INIT(0),
55 .sigpending = ATOMIC_INIT(0),
56 .locked_shm = 0,
57 #ifdef CONFIG_USER_SCHED
58 .tg = &init_task_group,
59 #endif
60 };
61
62 /*
63 * These routines must be called with the uidhash spinlock held!
64 */
65 static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
66 {
67 hlist_add_head(&up->uidhash_node, hashent);
68 }
69
70 static void uid_hash_remove(struct user_struct *up)
71 {
72 hlist_del_init(&up->uidhash_node);
73 }
74
75 static struct user_struct *uid_hash_find(uid_t uid, struct hlist_head *hashent)
76 {
77 struct user_struct *user;
78 struct hlist_node *h;
79
80 hlist_for_each_entry(user, h, hashent, uidhash_node) {
81 if (user->uid == uid) {
82 atomic_inc(&user->__count);
83 return user;
84 }
85 }
86
87 return NULL;
88 }
89
90 #ifdef CONFIG_USER_SCHED
91
92 static void sched_destroy_user(struct user_struct *up)
93 {
94 sched_destroy_group(up->tg);
95 }
96
97 static int sched_create_user(struct user_struct *up)
98 {
99 int rc = 0;
100
101 up->tg = sched_create_group(&root_task_group);
102 if (IS_ERR(up->tg))
103 rc = -ENOMEM;
104
105 return rc;
106 }
107
108 #else /* CONFIG_USER_SCHED */
109
110 static void sched_destroy_user(struct user_struct *up) { }
111 static int sched_create_user(struct user_struct *up) { return 0; }
112
113 #endif /* CONFIG_USER_SCHED */
114
115 #if defined(CONFIG_USER_SCHED) && defined(CONFIG_SYSFS)
116
117 static struct kset *uids_kset; /* represents the /sys/kernel/uids/ directory */
118 static DEFINE_MUTEX(uids_mutex);
119
120 static inline void uids_mutex_lock(void)
121 {
122 mutex_lock(&uids_mutex);
123 }
124
125 static inline void uids_mutex_unlock(void)
126 {
127 mutex_unlock(&uids_mutex);
128 }
129
130 /* uid directory attributes */
131 #ifdef CONFIG_FAIR_GROUP_SCHED
132 static ssize_t cpu_shares_show(struct kobject *kobj,
133 struct kobj_attribute *attr,
134 char *buf)
135 {
136 struct user_struct *up = container_of(kobj, struct user_struct, kobj);
137
138 return sprintf(buf, "%lu\n", sched_group_shares(up->tg));
139 }
140
141 static ssize_t cpu_shares_store(struct kobject *kobj,
142 struct kobj_attribute *attr,
143 const char *buf, size_t size)
144 {
145 struct user_struct *up = container_of(kobj, struct user_struct, kobj);
146 unsigned long shares;
147 int rc;
148
149 sscanf(buf, "%lu", &shares);
150
151 rc = sched_group_set_shares(up->tg, shares);
152
153 return (rc ? rc : size);
154 }
155
156 static struct kobj_attribute cpu_share_attr =
157 __ATTR(cpu_share, 0644, cpu_shares_show, cpu_shares_store);
158 #endif
159
160 #ifdef CONFIG_RT_GROUP_SCHED
161 static ssize_t cpu_rt_runtime_show(struct kobject *kobj,
162 struct kobj_attribute *attr,
163 char *buf)
164 {
165 struct user_struct *up = container_of(kobj, struct user_struct, kobj);
166
167 return sprintf(buf, "%ld\n", sched_group_rt_runtime(up->tg));
168 }
169
170 static ssize_t cpu_rt_runtime_store(struct kobject *kobj,
171 struct kobj_attribute *attr,
172 const char *buf, size_t size)
173 {
174 struct user_struct *up = container_of(kobj, struct user_struct, kobj);
175 unsigned long rt_runtime;
176 int rc;
177
178 sscanf(buf, "%ld", &rt_runtime);
179
180 rc = sched_group_set_rt_runtime(up->tg, rt_runtime);
181
182 return (rc ? rc : size);
183 }
184
185 static struct kobj_attribute cpu_rt_runtime_attr =
186 __ATTR(cpu_rt_runtime, 0644, cpu_rt_runtime_show, cpu_rt_runtime_store);
187
188 static ssize_t cpu_rt_period_show(struct kobject *kobj,
189 struct kobj_attribute *attr,
190 char *buf)
191 {
192 struct user_struct *up = container_of(kobj, struct user_struct, kobj);
193
194 return sprintf(buf, "%lu\n", sched_group_rt_period(up->tg));
195 }
196
197 static ssize_t cpu_rt_period_store(struct kobject *kobj,
198 struct kobj_attribute *attr,
199 const char *buf, size_t size)
200 {
201 struct user_struct *up = container_of(kobj, struct user_struct, kobj);
202 unsigned long rt_period;
203 int rc;
204
205 sscanf(buf, "%lu", &rt_period);
206
207 rc = sched_group_set_rt_period(up->tg, rt_period);
208
209 return (rc ? rc : size);
210 }
211
212 static struct kobj_attribute cpu_rt_period_attr =
213 __ATTR(cpu_rt_period, 0644, cpu_rt_period_show, cpu_rt_period_store);
214 #endif
215
216 /* default attributes per uid directory */
217 static struct attribute *uids_attributes[] = {
218 #ifdef CONFIG_FAIR_GROUP_SCHED
219 &cpu_share_attr.attr,
220 #endif
221 #ifdef CONFIG_RT_GROUP_SCHED
222 &cpu_rt_runtime_attr.attr,
223 &cpu_rt_period_attr.attr,
224 #endif
225 NULL
226 };
227
228 /* the lifetime of user_struct is not managed by the core (now) */
229 static void uids_release(struct kobject *kobj)
230 {
231 return;
232 }
233
234 static struct kobj_type uids_ktype = {
235 .sysfs_ops = &kobj_sysfs_ops,
236 .default_attrs = uids_attributes,
237 .release = uids_release,
238 };
239
240 /* create /sys/kernel/uids/<uid>/cpu_share file for this user */
241 static int uids_user_create(struct user_struct *up)
242 {
243 struct kobject *kobj = &up->kobj;
244 int error;
245
246 memset(kobj, 0, sizeof(struct kobject));
247 kobj->kset = uids_kset;
248 error = kobject_init_and_add(kobj, &uids_ktype, NULL, "%d", up->uid);
249 if (error) {
250 kobject_put(kobj);
251 goto done;
252 }
253
254 kobject_uevent(kobj, KOBJ_ADD);
255 done:
256 return error;
257 }
258
259 /* create these entries in sysfs:
260 * "/sys/kernel/uids" directory
261 * "/sys/kernel/uids/0" directory (for root user)
262 * "/sys/kernel/uids/0/cpu_share" file (for root user)
263 */
264 int __init uids_sysfs_init(void)
265 {
266 uids_kset = kset_create_and_add("uids", NULL, kernel_kobj);
267 if (!uids_kset)
268 return -ENOMEM;
269
270 return uids_user_create(&root_user);
271 }
272
273 /* work function to remove sysfs directory for a user and free up
274 * corresponding structures.
275 */
276 static void remove_user_sysfs_dir(struct work_struct *w)
277 {
278 struct user_struct *up = container_of(w, struct user_struct, work);
279 unsigned long flags;
280 int remove_user = 0;
281
282 /* Make uid_hash_remove() + sysfs_remove_file() + kobject_del()
283 * atomic.
284 */
285 uids_mutex_lock();
286
287 local_irq_save(flags);
288
289 if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) {
290 uid_hash_remove(up);
291 remove_user = 1;
292 spin_unlock_irqrestore(&uidhash_lock, flags);
293 } else {
294 local_irq_restore(flags);
295 }
296
297 if (!remove_user)
298 goto done;
299
300 kobject_uevent(&up->kobj, KOBJ_REMOVE);
301 kobject_del(&up->kobj);
302 kobject_put(&up->kobj);
303
304 sched_destroy_user(up);
305 key_put(up->uid_keyring);
306 key_put(up->session_keyring);
307 kmem_cache_free(uid_cachep, up);
308
309 done:
310 uids_mutex_unlock();
311 }
312
313 /* IRQs are disabled and uidhash_lock is held upon function entry.
314 * IRQ state (as stored in flags) is restored and uidhash_lock released
315 * upon function exit.
316 */
317 static inline void free_user(struct user_struct *up, unsigned long flags)
318 {
319 /* restore back the count */
320 atomic_inc(&up->__count);
321 spin_unlock_irqrestore(&uidhash_lock, flags);
322
323 INIT_WORK(&up->work, remove_user_sysfs_dir);
324 schedule_work(&up->work);
325 }
326
327 #else /* CONFIG_USER_SCHED && CONFIG_SYSFS */
328
329 int uids_sysfs_init(void) { return 0; }
330 static inline int uids_user_create(struct user_struct *up) { return 0; }
331 static inline void uids_mutex_lock(void) { }
332 static inline void uids_mutex_unlock(void) { }
333
334 /* IRQs are disabled and uidhash_lock is held upon function entry.
335 * IRQ state (as stored in flags) is restored and uidhash_lock released
336 * upon function exit.
337 */
338 static inline void free_user(struct user_struct *up, unsigned long flags)
339 {
340 uid_hash_remove(up);
341 spin_unlock_irqrestore(&uidhash_lock, flags);
342 sched_destroy_user(up);
343 key_put(up->uid_keyring);
344 key_put(up->session_keyring);
345 kmem_cache_free(uid_cachep, up);
346 }
347
348 #endif
349
350 /*
351 * Locate the user_struct for the passed UID. If found, take a ref on it. The
352 * caller must undo that ref with free_uid().
353 *
354 * If the user_struct could not be found, return NULL.
355 */
356 struct user_struct *find_user(uid_t uid)
357 {
358 struct user_struct *ret;
359 unsigned long flags;
360 struct user_namespace *ns = current->nsproxy->user_ns;
361
362 spin_lock_irqsave(&uidhash_lock, flags);
363 ret = uid_hash_find(uid, uidhashentry(ns, uid));
364 spin_unlock_irqrestore(&uidhash_lock, flags);
365 return ret;
366 }
367
368 void free_uid(struct user_struct *up)
369 {
370 unsigned long flags;
371
372 if (!up)
373 return;
374
375 local_irq_save(flags);
376 if (atomic_dec_and_lock(&up->__count, &uidhash_lock))
377 free_user(up, flags);
378 else
379 local_irq_restore(flags);
380 }
381
382 struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
383 {
384 struct hlist_head *hashent = uidhashentry(ns, uid);
385 struct user_struct *up, *new;
386
387 /* Make uid_hash_find() + uids_user_create() + uid_hash_insert()
388 * atomic.
389 */
390 uids_mutex_lock();
391
392 spin_lock_irq(&uidhash_lock);
393 up = uid_hash_find(uid, hashent);
394 spin_unlock_irq(&uidhash_lock);
395
396 if (!up) {
397 new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL);
398 if (!new)
399 goto out_unlock;
400
401 new->uid = uid;
402 atomic_set(&new->__count, 1);
403
404 if (sched_create_user(new) < 0)
405 goto out_free_user;
406
407 if (uids_user_create(new))
408 goto out_destoy_sched;
409
410 /*
411 * Before adding this, check whether we raced
412 * on adding the same user already..
413 */
414 spin_lock_irq(&uidhash_lock);
415 up = uid_hash_find(uid, hashent);
416 if (up) {
417 /* This case is not possible when CONFIG_USER_SCHED
418 * is defined, since we serialize alloc_uid() using
419 * uids_mutex. Hence no need to call
420 * sched_destroy_user() or remove_user_sysfs_dir().
421 */
422 key_put(new->uid_keyring);
423 key_put(new->session_keyring);
424 kmem_cache_free(uid_cachep, new);
425 } else {
426 uid_hash_insert(new, hashent);
427 up = new;
428 }
429 spin_unlock_irq(&uidhash_lock);
430
431 }
432
433 uids_mutex_unlock();
434
435 return up;
436
437 out_destoy_sched:
438 sched_destroy_user(new);
439 out_free_user:
440 kmem_cache_free(uid_cachep, new);
441 out_unlock:
442 uids_mutex_unlock();
443 return NULL;
444 }
445
446 #ifdef CONFIG_USER_NS
447 void release_uids(struct user_namespace *ns)
448 {
449 int i;
450 unsigned long flags;
451 struct hlist_head *head;
452 struct hlist_node *nd;
453
454 spin_lock_irqsave(&uidhash_lock, flags);
455 /*
456 * collapse the chains so that the user_struct-s will
457 * be still alive, but not in hashes. subsequent free_uid()
458 * will free them.
459 */
460 for (i = 0; i < UIDHASH_SZ; i++) {
461 head = ns->uidhash_table + i;
462 while (!hlist_empty(head)) {
463 nd = head->first;
464 hlist_del_init(nd);
465 }
466 }
467 spin_unlock_irqrestore(&uidhash_lock, flags);
468
469 free_uid(ns->root_user);
470 }
471 #endif
472
473 static int __init uid_cache_init(void)
474 {
475 int n;
476
477 uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
478 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
479
480 for(n = 0; n < UIDHASH_SZ; ++n)
481 INIT_HLIST_HEAD(init_user_ns.uidhash_table + n);
482
483 /* Insert the root user immediately (init already runs as root) */
484 spin_lock_irq(&uidhash_lock);
485 uid_hash_insert(&root_user, uidhashentry(&init_user_ns, 0));
486 spin_unlock_irq(&uidhash_lock);
487
488 return 0;
489 }
490
491 module_init(uid_cache_init);