trivial: update Jesper Juhl CREDITS entry with new email
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / pid.c
CommitLineData
1da177e4
LT
1/*
2 * Generic pidhash and scalable, time-bounded PID allocator
3 *
4 * (C) 2002-2003 William Irwin, IBM
5 * (C) 2004 William Irwin, Oracle
6 * (C) 2002-2004 Ingo Molnar, Red Hat
7 *
8 * pid-structures are backing objects for tasks sharing a given ID to chain
9 * against. There is very little to them aside from hashing them and
10 * parking tasks using given ID's on a list.
11 *
12 * The hash is always changed with the tasklist_lock write-acquired,
13 * and the hash is only accessed with the tasklist_lock at least
14 * read-acquired, so there's no additional SMP locking needed here.
15 *
16 * We have a list of bitmap pages, which bitmaps represent the PID space.
17 * Allocating and freeing PIDs is completely lockless. The worst-case
18 * allocation scenario when all but one out of 1 million PIDs possible are
19 * allocated already: the scanning of 32 list entries and at most PAGE_SIZE
20 * bytes. The typical fastpath is a single successful setbit. Freeing is O(1).
30e49c26
PE
21 *
22 * Pid namespaces:
23 * (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc.
24 * (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM
25 * Many thanks to Oleg Nesterov for comments and help
26 *
1da177e4
LT
27 */
28
29#include <linux/mm.h>
30#include <linux/module.h>
31#include <linux/slab.h>
32#include <linux/init.h>
82524746 33#include <linux/rculist.h>
1da177e4
LT
34#include <linux/bootmem.h>
35#include <linux/hash.h>
61a58c6c 36#include <linux/pid_namespace.h>
820e45db 37#include <linux/init_task.h>
3eb07c8c 38#include <linux/syscalls.h>
1da177e4 39
8ef047aa
PE
40#define pid_hashfn(nr, ns) \
41 hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift)
92476d7f 42static struct hlist_head *pid_hash;
1da177e4 43static int pidhash_shift;
820e45db 44struct pid init_struct_pid = INIT_STRUCT_PID;
1da177e4
LT
45
46int pid_max = PID_MAX_DEFAULT;
1da177e4
LT
47
48#define RESERVED_PIDS 300
49
50int pid_max_min = RESERVED_PIDS + 1;
51int pid_max_max = PID_MAX_LIMIT;
52
1da177e4
LT
53#define BITS_PER_PAGE (PAGE_SIZE*8)
54#define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
3fbc9648 55
61a58c6c
SB
56static inline int mk_pid(struct pid_namespace *pid_ns,
57 struct pidmap *map, int off)
3fbc9648 58{
61a58c6c 59 return (map - pid_ns->pidmap)*BITS_PER_PAGE + off;
3fbc9648
SB
60}
61
1da177e4
LT
62#define find_next_offset(map, off) \
63 find_next_zero_bit((map)->page, BITS_PER_PAGE, off)
64
65/*
66 * PID-map pages start out as NULL, they get allocated upon
67 * first use and are never deallocated. This way a low pid_max
68 * value does not cause lots of bitmaps to be allocated, but
69 * the scheme scales to up to 4 million PIDs, runtime.
70 */
61a58c6c 71struct pid_namespace init_pid_ns = {
9a575a92
CLG
72 .kref = {
73 .refcount = ATOMIC_INIT(2),
74 },
3fbc9648
SB
75 .pidmap = {
76 [ 0 ... PIDMAP_ENTRIES-1] = { ATOMIC_INIT(BITS_PER_PAGE), NULL }
77 },
84d73786 78 .last_pid = 0,
faacbfd3
PE
79 .level = 0,
80 .child_reaper = &init_task,
3fbc9648 81};
198fe21b 82EXPORT_SYMBOL_GPL(init_pid_ns);
1da177e4 83
b461cc03 84int is_container_init(struct task_struct *tsk)
b460cbc5 85{
b461cc03
PE
86 int ret = 0;
87 struct pid *pid;
88
89 rcu_read_lock();
90 pid = task_pid(tsk);
91 if (pid != NULL && pid->numbers[pid->level].nr == 1)
92 ret = 1;
93 rcu_read_unlock();
94
95 return ret;
b460cbc5 96}
b461cc03 97EXPORT_SYMBOL(is_container_init);
b460cbc5 98
92476d7f
EB
99/*
100 * Note: disable interrupts while the pidmap_lock is held as an
101 * interrupt might come in and do read_lock(&tasklist_lock).
102 *
103 * If we don't disable interrupts there is a nasty deadlock between
104 * detach_pid()->free_pid() and another cpu that does
105 * spin_lock(&pidmap_lock) followed by an interrupt routine that does
106 * read_lock(&tasklist_lock);
107 *
108 * After we clean up the tasklist_lock and know there are no
109 * irq handlers that take it we can leave the interrupts enabled.
110 * For now it is easier to be safe than to prove it can't happen.
111 */
3fbc9648 112
1da177e4
LT
113static __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock);
114
b7127aa4 115static void free_pidmap(struct upid *upid)
1da177e4 116{
b7127aa4
ON
117 int nr = upid->nr;
118 struct pidmap *map = upid->ns->pidmap + nr / BITS_PER_PAGE;
119 int offset = nr & BITS_PER_PAGE_MASK;
1da177e4
LT
120
121 clear_bit(offset, map->page);
122 atomic_inc(&map->nr_free);
123}
124
61a58c6c 125static int alloc_pidmap(struct pid_namespace *pid_ns)
1da177e4 126{
61a58c6c 127 int i, offset, max_scan, pid, last = pid_ns->last_pid;
6a1f3b84 128 struct pidmap *map;
1da177e4
LT
129
130 pid = last + 1;
131 if (pid >= pid_max)
132 pid = RESERVED_PIDS;
133 offset = pid & BITS_PER_PAGE_MASK;
61a58c6c 134 map = &pid_ns->pidmap[pid/BITS_PER_PAGE];
1da177e4
LT
135 max_scan = (pid_max + BITS_PER_PAGE - 1)/BITS_PER_PAGE - !offset;
136 for (i = 0; i <= max_scan; ++i) {
137 if (unlikely(!map->page)) {
3fbc9648 138 void *page = kzalloc(PAGE_SIZE, GFP_KERNEL);
1da177e4
LT
139 /*
140 * Free the page if someone raced with us
141 * installing it:
142 */
92476d7f 143 spin_lock_irq(&pidmap_lock);
1da177e4 144 if (map->page)
3fbc9648 145 kfree(page);
1da177e4 146 else
3fbc9648 147 map->page = page;
92476d7f 148 spin_unlock_irq(&pidmap_lock);
1da177e4
LT
149 if (unlikely(!map->page))
150 break;
151 }
152 if (likely(atomic_read(&map->nr_free))) {
153 do {
154 if (!test_and_set_bit(offset, map->page)) {
155 atomic_dec(&map->nr_free);
61a58c6c 156 pid_ns->last_pid = pid;
1da177e4
LT
157 return pid;
158 }
159 offset = find_next_offset(map, offset);
61a58c6c 160 pid = mk_pid(pid_ns, map, offset);
1da177e4
LT
161 /*
162 * find_next_offset() found a bit, the pid from it
163 * is in-bounds, and if we fell back to the last
164 * bitmap block and the final block was the same
165 * as the starting point, pid is before last_pid.
166 */
167 } while (offset < BITS_PER_PAGE && pid < pid_max &&
168 (i != max_scan || pid < last ||
169 !((last+1) & BITS_PER_PAGE_MASK)));
170 }
61a58c6c 171 if (map < &pid_ns->pidmap[(pid_max-1)/BITS_PER_PAGE]) {
1da177e4
LT
172 ++map;
173 offset = 0;
174 } else {
61a58c6c 175 map = &pid_ns->pidmap[0];
1da177e4
LT
176 offset = RESERVED_PIDS;
177 if (unlikely(last == offset))
178 break;
179 }
61a58c6c 180 pid = mk_pid(pid_ns, map, offset);
1da177e4
LT
181 }
182 return -1;
183}
184
74bd59bb 185int next_pidmap(struct pid_namespace *pid_ns, int last)
0804ef4b
EB
186{
187 int offset;
f40f50d3 188 struct pidmap *map, *end;
0804ef4b
EB
189
190 offset = (last + 1) & BITS_PER_PAGE_MASK;
61a58c6c
SB
191 map = &pid_ns->pidmap[(last + 1)/BITS_PER_PAGE];
192 end = &pid_ns->pidmap[PIDMAP_ENTRIES];
f40f50d3 193 for (; map < end; map++, offset = 0) {
0804ef4b
EB
194 if (unlikely(!map->page))
195 continue;
196 offset = find_next_bit((map)->page, BITS_PER_PAGE, offset);
197 if (offset < BITS_PER_PAGE)
61a58c6c 198 return mk_pid(pid_ns, map, offset);
0804ef4b
EB
199 }
200 return -1;
201}
202
7ad5b3a5 203void put_pid(struct pid *pid)
92476d7f 204{
baf8f0f8
PE
205 struct pid_namespace *ns;
206
92476d7f
EB
207 if (!pid)
208 return;
baf8f0f8 209
8ef047aa 210 ns = pid->numbers[pid->level].ns;
92476d7f 211 if ((atomic_read(&pid->count) == 1) ||
8ef047aa 212 atomic_dec_and_test(&pid->count)) {
baf8f0f8 213 kmem_cache_free(ns->pid_cachep, pid);
b461cc03 214 put_pid_ns(ns);
8ef047aa 215 }
92476d7f 216}
bbf73147 217EXPORT_SYMBOL_GPL(put_pid);
92476d7f
EB
218
219static void delayed_put_pid(struct rcu_head *rhp)
220{
221 struct pid *pid = container_of(rhp, struct pid, rcu);
222 put_pid(pid);
223}
224
7ad5b3a5 225void free_pid(struct pid *pid)
92476d7f
EB
226{
227 /* We can be called with write_lock_irq(&tasklist_lock) held */
8ef047aa 228 int i;
92476d7f
EB
229 unsigned long flags;
230
231 spin_lock_irqsave(&pidmap_lock, flags);
198fe21b
PE
232 for (i = 0; i <= pid->level; i++)
233 hlist_del_rcu(&pid->numbers[i].pid_chain);
92476d7f
EB
234 spin_unlock_irqrestore(&pidmap_lock, flags);
235
8ef047aa 236 for (i = 0; i <= pid->level; i++)
b7127aa4 237 free_pidmap(pid->numbers + i);
8ef047aa 238
92476d7f
EB
239 call_rcu(&pid->rcu, delayed_put_pid);
240}
241
8ef047aa 242struct pid *alloc_pid(struct pid_namespace *ns)
92476d7f
EB
243{
244 struct pid *pid;
245 enum pid_type type;
8ef047aa
PE
246 int i, nr;
247 struct pid_namespace *tmp;
198fe21b 248 struct upid *upid;
92476d7f 249
baf8f0f8 250 pid = kmem_cache_alloc(ns->pid_cachep, GFP_KERNEL);
92476d7f
EB
251 if (!pid)
252 goto out;
253
8ef047aa
PE
254 tmp = ns;
255 for (i = ns->level; i >= 0; i--) {
256 nr = alloc_pidmap(tmp);
257 if (nr < 0)
258 goto out_free;
92476d7f 259
8ef047aa
PE
260 pid->numbers[i].nr = nr;
261 pid->numbers[i].ns = tmp;
262 tmp = tmp->parent;
263 }
264
b461cc03 265 get_pid_ns(ns);
8ef047aa 266 pid->level = ns->level;
92476d7f 267 atomic_set(&pid->count, 1);
92476d7f
EB
268 for (type = 0; type < PIDTYPE_MAX; ++type)
269 INIT_HLIST_HEAD(&pid->tasks[type]);
270
271 spin_lock_irq(&pidmap_lock);
198fe21b
PE
272 for (i = ns->level; i >= 0; i--) {
273 upid = &pid->numbers[i];
274 hlist_add_head_rcu(&upid->pid_chain,
275 &pid_hash[pid_hashfn(upid->nr, upid->ns)]);
276 }
92476d7f
EB
277 spin_unlock_irq(&pidmap_lock);
278
279out:
280 return pid;
281
282out_free:
b7127aa4
ON
283 while (++i <= ns->level)
284 free_pidmap(pid->numbers + i);
8ef047aa 285
baf8f0f8 286 kmem_cache_free(ns->pid_cachep, pid);
92476d7f
EB
287 pid = NULL;
288 goto out;
289}
290
7ad5b3a5 291struct pid *find_pid_ns(int nr, struct pid_namespace *ns)
1da177e4
LT
292{
293 struct hlist_node *elem;
198fe21b
PE
294 struct upid *pnr;
295
296 hlist_for_each_entry_rcu(pnr, elem,
297 &pid_hash[pid_hashfn(nr, ns)], pid_chain)
298 if (pnr->nr == nr && pnr->ns == ns)
299 return container_of(pnr, struct pid,
300 numbers[ns->level]);
1da177e4 301
1da177e4
LT
302 return NULL;
303}
198fe21b 304EXPORT_SYMBOL_GPL(find_pid_ns);
1da177e4 305
8990571e
PE
306struct pid *find_vpid(int nr)
307{
308 return find_pid_ns(nr, current->nsproxy->pid_ns);
309}
310EXPORT_SYMBOL_GPL(find_vpid);
311
e713d0da
SB
312/*
313 * attach_pid() must be called with the tasklist_lock write-held.
314 */
24336eae 315void attach_pid(struct task_struct *task, enum pid_type type,
e713d0da 316 struct pid *pid)
1da177e4 317{
92476d7f 318 struct pid_link *link;
92476d7f 319
92476d7f 320 link = &task->pids[type];
e713d0da 321 link->pid = pid;
92476d7f 322 hlist_add_head_rcu(&link->node, &pid->tasks[type]);
1da177e4
LT
323}
324
24336eae
ON
325static void __change_pid(struct task_struct *task, enum pid_type type,
326 struct pid *new)
1da177e4 327{
92476d7f
EB
328 struct pid_link *link;
329 struct pid *pid;
330 int tmp;
1da177e4 331
92476d7f
EB
332 link = &task->pids[type];
333 pid = link->pid;
1da177e4 334
92476d7f 335 hlist_del_rcu(&link->node);
24336eae 336 link->pid = new;
1da177e4 337
92476d7f
EB
338 for (tmp = PIDTYPE_MAX; --tmp >= 0; )
339 if (!hlist_empty(&pid->tasks[tmp]))
340 return;
1da177e4 341
92476d7f 342 free_pid(pid);
1da177e4
LT
343}
344
24336eae
ON
345void detach_pid(struct task_struct *task, enum pid_type type)
346{
347 __change_pid(task, type, NULL);
348}
349
350void change_pid(struct task_struct *task, enum pid_type type,
351 struct pid *pid)
352{
353 __change_pid(task, type, pid);
354 attach_pid(task, type, pid);
355}
356
c18258c6 357/* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */
7ad5b3a5 358void transfer_pid(struct task_struct *old, struct task_struct *new,
c18258c6
EB
359 enum pid_type type)
360{
361 new->pids[type].pid = old->pids[type].pid;
362 hlist_replace_rcu(&old->pids[type].node, &new->pids[type].node);
c18258c6
EB
363}
364
7ad5b3a5 365struct task_struct *pid_task(struct pid *pid, enum pid_type type)
1da177e4 366{
92476d7f
EB
367 struct task_struct *result = NULL;
368 if (pid) {
369 struct hlist_node *first;
370 first = rcu_dereference(pid->tasks[type].first);
371 if (first)
372 result = hlist_entry(first, struct task_struct, pids[(type)].node);
373 }
374 return result;
375}
eccba068 376EXPORT_SYMBOL(pid_task);
1da177e4 377
92476d7f
EB
378/*
379 * Must be called under rcu_read_lock() or with tasklist_lock read-held.
380 */
198fe21b
PE
381struct task_struct *find_task_by_pid_type_ns(int type, int nr,
382 struct pid_namespace *ns)
92476d7f 383{
198fe21b 384 return pid_task(find_pid_ns(nr, ns), type);
92476d7f 385}
1da177e4 386
198fe21b 387EXPORT_SYMBOL(find_task_by_pid_type_ns);
1da177e4 388
228ebcbe
PE
389struct task_struct *find_task_by_vpid(pid_t vnr)
390{
391 return find_task_by_pid_type_ns(PIDTYPE_PID, vnr,
392 current->nsproxy->pid_ns);
393}
394EXPORT_SYMBOL(find_task_by_vpid);
395
396struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
397{
398 return find_task_by_pid_type_ns(PIDTYPE_PID, nr, ns);
399}
400EXPORT_SYMBOL(find_task_by_pid_ns);
401
1a657f78
ON
402struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
403{
404 struct pid *pid;
405 rcu_read_lock();
406 pid = get_pid(task->pids[type].pid);
407 rcu_read_unlock();
408 return pid;
409}
410
7ad5b3a5 411struct task_struct *get_pid_task(struct pid *pid, enum pid_type type)
92476d7f
EB
412{
413 struct task_struct *result;
414 rcu_read_lock();
415 result = pid_task(pid, type);
416 if (result)
417 get_task_struct(result);
418 rcu_read_unlock();
419 return result;
1da177e4
LT
420}
421
92476d7f 422struct pid *find_get_pid(pid_t nr)
1da177e4
LT
423{
424 struct pid *pid;
425
92476d7f 426 rcu_read_lock();
198fe21b 427 pid = get_pid(find_vpid(nr));
92476d7f 428 rcu_read_unlock();
1da177e4 429
92476d7f 430 return pid;
1da177e4 431}
339caf2a 432EXPORT_SYMBOL_GPL(find_get_pid);
1da177e4 433
7af57294
PE
434pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns)
435{
436 struct upid *upid;
437 pid_t nr = 0;
438
439 if (pid && ns->level <= pid->level) {
440 upid = &pid->numbers[ns->level];
441 if (upid->ns == ns)
442 nr = upid->nr;
443 }
444 return nr;
445}
446
44c4e1b2
EB
447pid_t pid_vnr(struct pid *pid)
448{
449 return pid_nr_ns(pid, current->nsproxy->pid_ns);
450}
451EXPORT_SYMBOL_GPL(pid_vnr);
452
2f2a3a46
PE
453pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
454{
455 return pid_nr_ns(task_pid(tsk), ns);
456}
457EXPORT_SYMBOL(task_pid_nr_ns);
458
459pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
460{
461 return pid_nr_ns(task_tgid(tsk), ns);
462}
463EXPORT_SYMBOL(task_tgid_nr_ns);
464
465pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
466{
467 return pid_nr_ns(task_pgrp(tsk), ns);
468}
469EXPORT_SYMBOL(task_pgrp_nr_ns);
470
471pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
472{
473 return pid_nr_ns(task_session(tsk), ns);
474}
475EXPORT_SYMBOL(task_session_nr_ns);
476
0804ef4b
EB
477/*
478 * Used by proc to find the first pid that is greater then or equal to nr.
479 *
e49859e7 480 * If there is a pid at nr this function is exactly the same as find_pid_ns.
0804ef4b 481 */
198fe21b 482struct pid *find_ge_pid(int nr, struct pid_namespace *ns)
0804ef4b
EB
483{
484 struct pid *pid;
485
486 do {
198fe21b 487 pid = find_pid_ns(nr, ns);
0804ef4b
EB
488 if (pid)
489 break;
198fe21b 490 nr = next_pidmap(ns, nr);
0804ef4b
EB
491 } while (nr > 0);
492
493 return pid;
494}
495
1da177e4
LT
496/*
497 * The pid hash table is scaled according to the amount of memory in the
498 * machine. From a minimum of 16 slots up to 4096 slots at one gigabyte or
499 * more.
500 */
501void __init pidhash_init(void)
502{
92476d7f 503 int i, pidhash_size;
1da177e4
LT
504 unsigned long megabytes = nr_kernel_pages >> (20 - PAGE_SHIFT);
505
506 pidhash_shift = max(4, fls(megabytes * 4));
507 pidhash_shift = min(12, pidhash_shift);
508 pidhash_size = 1 << pidhash_shift;
509
510 printk("PID hash table entries: %d (order: %d, %Zd bytes)\n",
511 pidhash_size, pidhash_shift,
92476d7f
EB
512 pidhash_size * sizeof(struct hlist_head));
513
514 pid_hash = alloc_bootmem(pidhash_size * sizeof(*(pid_hash)));
515 if (!pid_hash)
516 panic("Could not alloc pidhash!\n");
517 for (i = 0; i < pidhash_size; i++)
518 INIT_HLIST_HEAD(&pid_hash[i]);
1da177e4
LT
519}
520
521void __init pidmap_init(void)
522{
61a58c6c 523 init_pid_ns.pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL);
73b9ebfe 524 /* Reserve PID 0. We never call free_pidmap(0) */
61a58c6c
SB
525 set_bit(0, init_pid_ns.pidmap[0].page);
526 atomic_dec(&init_pid_ns.pidmap[0].nr_free);
92476d7f 527
74bd59bb
PE
528 init_pid_ns.pid_cachep = KMEM_CACHE(pid,
529 SLAB_HWCACHE_ALIGN | SLAB_PANIC);
1da177e4 530}