inet: switch IP ID generator to siphash
[GitHub/moto-9609/android_kernel_motorola_exynos9610.git] / kernel / user.c
1 /*
2 * The "user cache".
3 *
4 * (C) Copyright 1991-2000 Linus Torvalds
5 *
6 * We have a per-user structure to keep track of how many
7 * processes, files etc the user has claimed, in order to be
8 * able to have per-user limits for system resources.
9 */
10
11 #include <linux/init.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/bitops.h>
15 #include <linux/key.h>
16 #include <linux/sched/user.h>
17 #include <linux/interrupt.h>
18 #include <linux/export.h>
19 #include <linux/user_namespace.h>
20 #include <linux/proc_fs.h>
21 #include <linux/proc_ns.h>
22
23 /*
24 * userns count is 1 for root user, 1 for init_uts_ns,
25 * and 1 for... ?
26 */
27 struct user_namespace init_user_ns = {
28 .uid_map = {
29 .nr_extents = 1,
30 .extent[0] = {
31 .first = 0,
32 .lower_first = 0,
33 .count = 4294967295U,
34 },
35 },
36 .gid_map = {
37 .nr_extents = 1,
38 .extent[0] = {
39 .first = 0,
40 .lower_first = 0,
41 .count = 4294967295U,
42 },
43 },
44 .projid_map = {
45 .nr_extents = 1,
46 .extent[0] = {
47 .first = 0,
48 .lower_first = 0,
49 .count = 4294967295U,
50 },
51 },
52 .count = ATOMIC_INIT(3),
53 .owner = GLOBAL_ROOT_UID,
54 .group = GLOBAL_ROOT_GID,
55 .ns.inum = PROC_USER_INIT_INO,
56 #ifdef CONFIG_USER_NS
57 .ns.ops = &userns_operations,
58 #endif
59 .flags = USERNS_INIT_FLAGS,
60 #ifdef CONFIG_PERSISTENT_KEYRINGS
61 .persistent_keyring_register_sem =
62 __RWSEM_INITIALIZER(init_user_ns.persistent_keyring_register_sem),
63 #endif
64 };
65 EXPORT_SYMBOL_GPL(init_user_ns);
66
67 /*
68 * UID task count cache, to get fast user lookup in "alloc_uid"
69 * when changing user ID's (ie setuid() and friends).
70 */
71
72 #define UIDHASH_BITS (CONFIG_BASE_SMALL ? 3 : 7)
73 #define UIDHASH_SZ (1 << UIDHASH_BITS)
74 #define UIDHASH_MASK (UIDHASH_SZ - 1)
75 #define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK)
76 #define uidhashentry(uid) (uidhash_table + __uidhashfn((__kuid_val(uid))))
77
78 static struct kmem_cache *uid_cachep;
79 struct hlist_head uidhash_table[UIDHASH_SZ];
80
81 /*
82 * The uidhash_lock is mostly taken from process context, but it is
83 * occasionally also taken from softirq/tasklet context, when
84 * task-structs get RCU-freed. Hence all locking must be softirq-safe.
85 * But free_uid() is also called with local interrupts disabled, and running
86 * local_bh_enable() with local interrupts disabled is an error - we'll run
87 * softirq callbacks, and they can unconditionally enable interrupts, and
88 * the caller of free_uid() didn't expect that..
89 */
90 static DEFINE_SPINLOCK(uidhash_lock);
91
92 /* root_user.__count is 1, for init task cred */
93 struct user_struct root_user = {
94 .__count = ATOMIC_INIT(1),
95 .processes = ATOMIC_INIT(1),
96 .sigpending = ATOMIC_INIT(0),
97 .locked_shm = 0,
98 .uid = GLOBAL_ROOT_UID,
99 };
100
101 /*
102 * These routines must be called with the uidhash spinlock held!
103 */
104 static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent)
105 {
106 hlist_add_head(&up->uidhash_node, hashent);
107 }
108
109 static void uid_hash_remove(struct user_struct *up)
110 {
111 hlist_del_init(&up->uidhash_node);
112 }
113
114 static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head *hashent)
115 {
116 struct user_struct *user;
117
118 hlist_for_each_entry(user, hashent, uidhash_node) {
119 if (uid_eq(user->uid, uid)) {
120 atomic_inc(&user->__count);
121 return user;
122 }
123 }
124
125 return NULL;
126 }
127
128 /* IRQs are disabled and uidhash_lock is held upon function entry.
129 * IRQ state (as stored in flags) is restored and uidhash_lock released
130 * upon function exit.
131 */
132 static void free_user(struct user_struct *up, unsigned long flags)
133 __releases(&uidhash_lock)
134 {
135 uid_hash_remove(up);
136 spin_unlock_irqrestore(&uidhash_lock, flags);
137 key_put(up->uid_keyring);
138 key_put(up->session_keyring);
139 kmem_cache_free(uid_cachep, up);
140 }
141
142 /*
143 * Locate the user_struct for the passed UID. If found, take a ref on it. The
144 * caller must undo that ref with free_uid().
145 *
146 * If the user_struct could not be found, return NULL.
147 */
148 struct user_struct *find_user(kuid_t uid)
149 {
150 struct user_struct *ret;
151 unsigned long flags;
152
153 spin_lock_irqsave(&uidhash_lock, flags);
154 ret = uid_hash_find(uid, uidhashentry(uid));
155 spin_unlock_irqrestore(&uidhash_lock, flags);
156 return ret;
157 }
158
159 void free_uid(struct user_struct *up)
160 {
161 unsigned long flags;
162
163 if (!up)
164 return;
165
166 local_irq_save(flags);
167 if (atomic_dec_and_lock(&up->__count, &uidhash_lock))
168 free_user(up, flags);
169 else
170 local_irq_restore(flags);
171 }
172
173 struct user_struct *alloc_uid(kuid_t uid)
174 {
175 struct hlist_head *hashent = uidhashentry(uid);
176 struct user_struct *up, *new;
177
178 spin_lock_irq(&uidhash_lock);
179 up = uid_hash_find(uid, hashent);
180 spin_unlock_irq(&uidhash_lock);
181
182 if (!up) {
183 new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL);
184 if (!new)
185 goto out_unlock;
186
187 new->uid = uid;
188 atomic_set(&new->__count, 1);
189
190 /*
191 * Before adding this, check whether we raced
192 * on adding the same user already..
193 */
194 spin_lock_irq(&uidhash_lock);
195 up = uid_hash_find(uid, hashent);
196 if (up) {
197 key_put(new->uid_keyring);
198 key_put(new->session_keyring);
199 kmem_cache_free(uid_cachep, new);
200 } else {
201 uid_hash_insert(new, hashent);
202 up = new;
203 }
204 spin_unlock_irq(&uidhash_lock);
205 }
206 proc_register_uid(uid);
207
208 return up;
209
210 out_unlock:
211 return NULL;
212 }
213
214 static int __init uid_cache_init(void)
215 {
216 int n;
217
218 uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct),
219 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
220
221 for(n = 0; n < UIDHASH_SZ; ++n)
222 INIT_HLIST_HEAD(uidhash_table + n);
223
224 /* Insert the root user immediately (init already runs as root) */
225 spin_lock_irq(&uidhash_lock);
226 uid_hash_insert(&root_user, uidhashentry(GLOBAL_ROOT_UID));
227 spin_unlock_irq(&uidhash_lock);
228 proc_register_uid(GLOBAL_ROOT_UID);
229
230 return 0;
231 }
232 subsys_initcall(uid_cache_init);