[PATCH] pgdat allocation for new node add (call pgdat allocation)
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / ipc / util.c
CommitLineData
1da177e4
LT
1/*
2 * linux/ipc/util.c
3 * Copyright (C) 1992 Krishna Balasubramanian
4 *
5 * Sep 1997 - Call suser() last after "normal" permission checks so we
6 * get BSD style process accounting right.
7 * Occurs in several places in the IPC code.
8 * Chris Evans, <chris@ferret.lmh.ox.ac.uk>
9 * Nov 1999 - ipc helper functions, unified SMP locking
624dffcb 10 * Manfred Spraul <manfred@colorfullife.com>
1da177e4
LT
11 * Oct 2002 - One lock per IPC id. RCU ipc_free for lock-free grow_ary().
12 * Mingming Cao <cmm@us.ibm.com>
073115d6
SG
13 * Mar 2006 - support for audit of ipc object properties
14 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
1da177e4
LT
15 */
16
17#include <linux/config.h>
18#include <linux/mm.h>
19#include <linux/shm.h>
20#include <linux/init.h>
21#include <linux/msg.h>
22#include <linux/smp_lock.h>
23#include <linux/vmalloc.h>
24#include <linux/slab.h>
c59ede7b 25#include <linux/capability.h>
1da177e4
LT
26#include <linux/highuid.h>
27#include <linux/security.h>
28#include <linux/rcupdate.h>
29#include <linux/workqueue.h>
ae781774
MW
30#include <linux/seq_file.h>
31#include <linux/proc_fs.h>
073115d6 32#include <linux/audit.h>
1da177e4
LT
33
34#include <asm/unistd.h>
35
36#include "util.h"
37
ae781774
MW
38struct ipc_proc_iface {
39 const char *path;
40 const char *header;
41 struct ipc_ids *ids;
42 int (*show)(struct seq_file *, void *);
43};
44
1da177e4
LT
45/**
46 * ipc_init - initialise IPC subsystem
47 *
48 * The various system5 IPC resources (semaphores, messages and shared
49 * memory are initialised
50 */
51
52static int __init ipc_init(void)
53{
54 sem_init();
55 msg_init();
56 shm_init();
57 return 0;
58}
59__initcall(ipc_init);
60
61/**
62 * ipc_init_ids - initialise IPC identifiers
63 * @ids: Identifier set
64 * @size: Number of identifiers
65 *
66 * Given a size for the ipc identifier range (limited below IPCMNI)
67 * set up the sequence range to use then allocate and initialise the
68 * array itself.
69 */
70
71void __init ipc_init_ids(struct ipc_ids* ids, int size)
72{
73 int i;
5f921ae9
IM
74
75 mutex_init(&ids->mutex);
1da177e4
LT
76
77 if(size > IPCMNI)
78 size = IPCMNI;
79 ids->in_use = 0;
80 ids->max_id = -1;
81 ids->seq = 0;
82 {
83 int seq_limit = INT_MAX/SEQ_MULTIPLIER;
84 if(seq_limit > USHRT_MAX)
85 ids->seq_max = USHRT_MAX;
86 else
87 ids->seq_max = seq_limit;
88 }
89
90 ids->entries = ipc_rcu_alloc(sizeof(struct kern_ipc_perm *)*size +
91 sizeof(struct ipc_id_ary));
92
93 if(ids->entries == NULL) {
94 printk(KERN_ERR "ipc_init_ids() failed, ipc service disabled.\n");
95 size = 0;
96 ids->entries = &ids->nullentry;
97 }
98 ids->entries->size = size;
99 for(i=0;i<size;i++)
100 ids->entries->p[i] = NULL;
101}
102
ae781774
MW
103#ifdef CONFIG_PROC_FS
104static struct file_operations sysvipc_proc_fops;
105/**
106 * ipc_init_proc_interface - Create a proc interface for sysipc types
107 * using a seq_file interface.
108 * @path: Path in procfs
109 * @header: Banner to be printed at the beginning of the file.
110 * @ids: ipc id table to iterate.
111 * @show: show routine.
112 */
113void __init ipc_init_proc_interface(const char *path, const char *header,
114 struct ipc_ids *ids,
115 int (*show)(struct seq_file *, void *))
116{
117 struct proc_dir_entry *pde;
118 struct ipc_proc_iface *iface;
119
120 iface = kmalloc(sizeof(*iface), GFP_KERNEL);
121 if (!iface)
122 return;
123 iface->path = path;
124 iface->header = header;
125 iface->ids = ids;
126 iface->show = show;
127
128 pde = create_proc_entry(path,
129 S_IRUGO, /* world readable */
130 NULL /* parent dir */);
131 if (pde) {
132 pde->data = iface;
133 pde->proc_fops = &sysvipc_proc_fops;
134 } else {
135 kfree(iface);
136 }
137}
138#endif
139
1da177e4
LT
140/**
141 * ipc_findkey - find a key in an ipc identifier set
142 * @ids: Identifier set
143 * @key: The key to find
144 *
5f921ae9 145 * Requires ipc_ids.mutex locked.
1da177e4
LT
146 * Returns the identifier if found or -1 if not.
147 */
148
149int ipc_findkey(struct ipc_ids* ids, key_t key)
150{
151 int id;
152 struct kern_ipc_perm* p;
153 int max_id = ids->max_id;
154
155 /*
156 * rcu_dereference() is not needed here
5f921ae9 157 * since ipc_ids.mutex is held
1da177e4
LT
158 */
159 for (id = 0; id <= max_id; id++) {
160 p = ids->entries->p[id];
161 if(p==NULL)
162 continue;
163 if (key == p->key)
164 return id;
165 }
166 return -1;
167}
168
169/*
5f921ae9 170 * Requires ipc_ids.mutex locked
1da177e4
LT
171 */
172static int grow_ary(struct ipc_ids* ids, int newsize)
173{
174 struct ipc_id_ary* new;
175 struct ipc_id_ary* old;
176 int i;
177 int size = ids->entries->size;
178
179 if(newsize > IPCMNI)
180 newsize = IPCMNI;
181 if(newsize <= size)
182 return newsize;
183
184 new = ipc_rcu_alloc(sizeof(struct kern_ipc_perm *)*newsize +
185 sizeof(struct ipc_id_ary));
186 if(new == NULL)
187 return size;
188 new->size = newsize;
a9a5cd5d 189 memcpy(new->p, ids->entries->p, sizeof(struct kern_ipc_perm *)*size);
1da177e4
LT
190 for(i=size;i<newsize;i++) {
191 new->p[i] = NULL;
192 }
193 old = ids->entries;
194
195 /*
196 * Use rcu_assign_pointer() to make sure the memcpyed contents
197 * of the new array are visible before the new array becomes visible.
198 */
199 rcu_assign_pointer(ids->entries, new);
200
201 ipc_rcu_putref(old);
202 return newsize;
203}
204
205/**
206 * ipc_addid - add an IPC identifier
207 * @ids: IPC identifier set
208 * @new: new IPC permission set
209 * @size: new size limit for the id array
210 *
211 * Add an entry 'new' to the IPC arrays. The permissions object is
212 * initialised and the first free entry is set up and the id assigned
213 * is returned. The list is returned in a locked state on success.
214 * On failure the list is not locked and -1 is returned.
215 *
5f921ae9 216 * Called with ipc_ids.mutex held.
1da177e4
LT
217 */
218
219int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size)
220{
221 int id;
222
223 size = grow_ary(ids,size);
224
225 /*
226 * rcu_dereference()() is not needed here since
5f921ae9 227 * ipc_ids.mutex is held
1da177e4
LT
228 */
229 for (id = 0; id < size; id++) {
230 if(ids->entries->p[id] == NULL)
231 goto found;
232 }
233 return -1;
234found:
235 ids->in_use++;
236 if (id > ids->max_id)
237 ids->max_id = id;
238
239 new->cuid = new->uid = current->euid;
240 new->gid = new->cgid = current->egid;
241
242 new->seq = ids->seq++;
243 if(ids->seq > ids->seq_max)
244 ids->seq = 0;
245
246 spin_lock_init(&new->lock);
247 new->deleted = 0;
248 rcu_read_lock();
249 spin_lock(&new->lock);
250 ids->entries->p[id] = new;
251 return id;
252}
253
254/**
255 * ipc_rmid - remove an IPC identifier
256 * @ids: identifier set
257 * @id: Identifier to remove
258 *
259 * The identifier must be valid, and in use. The kernel will panic if
260 * fed an invalid identifier. The entry is removed and internal
261 * variables recomputed. The object associated with the identifier
262 * is returned.
5f921ae9 263 * ipc_ids.mutex and the spinlock for this ID is hold before this function
1da177e4
LT
264 * is called, and remain locked on the exit.
265 */
266
267struct kern_ipc_perm* ipc_rmid(struct ipc_ids* ids, int id)
268{
269 struct kern_ipc_perm* p;
270 int lid = id % SEQ_MULTIPLIER;
9bc98fc6 271 BUG_ON(lid >= ids->entries->size);
1da177e4
LT
272
273 /*
274 * do not need a rcu_dereference()() here to force ordering
5f921ae9 275 * on Alpha, since the ipc_ids.mutex is held.
1da177e4
LT
276 */
277 p = ids->entries->p[lid];
278 ids->entries->p[lid] = NULL;
9bc98fc6 279 BUG_ON(p==NULL);
1da177e4
LT
280 ids->in_use--;
281
282 if (lid == ids->max_id) {
283 do {
284 lid--;
285 if(lid == -1)
286 break;
287 } while (ids->entries->p[lid] == NULL);
288 ids->max_id = lid;
289 }
290 p->deleted = 1;
291 return p;
292}
293
294/**
295 * ipc_alloc - allocate ipc space
296 * @size: size desired
297 *
298 * Allocate memory from the appropriate pools and return a pointer to it.
299 * NULL is returned if the allocation fails
300 */
301
302void* ipc_alloc(int size)
303{
304 void* out;
305 if(size > PAGE_SIZE)
306 out = vmalloc(size);
307 else
308 out = kmalloc(size, GFP_KERNEL);
309 return out;
310}
311
312/**
313 * ipc_free - free ipc space
314 * @ptr: pointer returned by ipc_alloc
315 * @size: size of block
316 *
317 * Free a block created with ipc_alloc. The caller must know the size
318 * used in the allocation call.
319 */
320
321void ipc_free(void* ptr, int size)
322{
323 if(size > PAGE_SIZE)
324 vfree(ptr);
325 else
326 kfree(ptr);
327}
328
329/*
330 * rcu allocations:
331 * There are three headers that are prepended to the actual allocation:
332 * - during use: ipc_rcu_hdr.
333 * - during the rcu grace period: ipc_rcu_grace.
334 * - [only if vmalloc]: ipc_rcu_sched.
335 * Their lifetime doesn't overlap, thus the headers share the same memory.
336 * Unlike a normal union, they are right-aligned, thus some container_of
337 * forward/backward casting is necessary:
338 */
339struct ipc_rcu_hdr
340{
341 int refcount;
342 int is_vmalloc;
343 void *data[0];
344};
345
346
347struct ipc_rcu_grace
348{
349 struct rcu_head rcu;
350 /* "void *" makes sure alignment of following data is sane. */
351 void *data[0];
352};
353
354struct ipc_rcu_sched
355{
356 struct work_struct work;
357 /* "void *" makes sure alignment of following data is sane. */
358 void *data[0];
359};
360
361#define HDRLEN_KMALLOC (sizeof(struct ipc_rcu_grace) > sizeof(struct ipc_rcu_hdr) ? \
362 sizeof(struct ipc_rcu_grace) : sizeof(struct ipc_rcu_hdr))
363#define HDRLEN_VMALLOC (sizeof(struct ipc_rcu_sched) > HDRLEN_KMALLOC ? \
364 sizeof(struct ipc_rcu_sched) : HDRLEN_KMALLOC)
365
366static inline int rcu_use_vmalloc(int size)
367{
368 /* Too big for a single page? */
369 if (HDRLEN_KMALLOC + size > PAGE_SIZE)
370 return 1;
371 return 0;
372}
373
374/**
375 * ipc_rcu_alloc - allocate ipc and rcu space
376 * @size: size desired
377 *
378 * Allocate memory for the rcu header structure + the object.
379 * Returns the pointer to the object.
380 * NULL is returned if the allocation fails.
381 */
382
383void* ipc_rcu_alloc(int size)
384{
385 void* out;
386 /*
387 * We prepend the allocation with the rcu struct, and
388 * workqueue if necessary (for vmalloc).
389 */
390 if (rcu_use_vmalloc(size)) {
391 out = vmalloc(HDRLEN_VMALLOC + size);
392 if (out) {
393 out += HDRLEN_VMALLOC;
394 container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
395 container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
396 }
397 } else {
398 out = kmalloc(HDRLEN_KMALLOC + size, GFP_KERNEL);
399 if (out) {
400 out += HDRLEN_KMALLOC;
401 container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
402 container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
403 }
404 }
405
406 return out;
407}
408
409void ipc_rcu_getref(void *ptr)
410{
411 container_of(ptr, struct ipc_rcu_hdr, data)->refcount++;
412}
413
414/**
1e5d5331
RD
415 * ipc_schedule_free - free ipc + rcu space
416 * @head: RCU callback structure for queued work
1da177e4
LT
417 *
418 * Since RCU callback function is called in bh,
419 * we need to defer the vfree to schedule_work
420 */
421static void ipc_schedule_free(struct rcu_head *head)
422{
423 struct ipc_rcu_grace *grace =
424 container_of(head, struct ipc_rcu_grace, rcu);
425 struct ipc_rcu_sched *sched =
426 container_of(&(grace->data[0]), struct ipc_rcu_sched, data[0]);
427
428 INIT_WORK(&sched->work, vfree, sched);
429 schedule_work(&sched->work);
430}
431
432/**
1e5d5331
RD
433 * ipc_immediate_free - free ipc + rcu space
434 * @head: RCU callback structure that contains pointer to be freed
1da177e4 435 *
1e5d5331 436 * Free from the RCU callback context
1da177e4
LT
437 */
438static void ipc_immediate_free(struct rcu_head *head)
439{
440 struct ipc_rcu_grace *free =
441 container_of(head, struct ipc_rcu_grace, rcu);
442 kfree(free);
443}
444
445void ipc_rcu_putref(void *ptr)
446{
447 if (--container_of(ptr, struct ipc_rcu_hdr, data)->refcount > 0)
448 return;
449
450 if (container_of(ptr, struct ipc_rcu_hdr, data)->is_vmalloc) {
451 call_rcu(&container_of(ptr, struct ipc_rcu_grace, data)->rcu,
452 ipc_schedule_free);
453 } else {
454 call_rcu(&container_of(ptr, struct ipc_rcu_grace, data)->rcu,
455 ipc_immediate_free);
456 }
457}
458
459/**
460 * ipcperms - check IPC permissions
461 * @ipcp: IPC permission set
462 * @flag: desired permission set.
463 *
464 * Check user, group, other permissions for access
465 * to ipc resources. return 0 if allowed
466 */
467
468int ipcperms (struct kern_ipc_perm *ipcp, short flag)
469{ /* flag will most probably be 0 or S_...UGO from <linux/stat.h> */
073115d6 470 int requested_mode, granted_mode, err;
1da177e4 471
073115d6
SG
472 if (unlikely((err = audit_ipc_obj(ipcp))))
473 return err;
1da177e4
LT
474 requested_mode = (flag >> 6) | (flag >> 3) | flag;
475 granted_mode = ipcp->mode;
476 if (current->euid == ipcp->cuid || current->euid == ipcp->uid)
477 granted_mode >>= 6;
478 else if (in_group_p(ipcp->cgid) || in_group_p(ipcp->gid))
479 granted_mode >>= 3;
480 /* is there some bit set in requested_mode but not in granted_mode? */
481 if ((requested_mode & ~granted_mode & 0007) &&
482 !capable(CAP_IPC_OWNER))
483 return -1;
484
485 return security_ipc_permission(ipcp, flag);
486}
487
488/*
489 * Functions to convert between the kern_ipc_perm structure and the
490 * old/new ipc_perm structures
491 */
492
493/**
494 * kernel_to_ipc64_perm - convert kernel ipc permissions to user
495 * @in: kernel permissions
496 * @out: new style IPC permissions
497 *
498 * Turn the kernel object 'in' into a set of permissions descriptions
499 * for returning to userspace (out).
500 */
501
502
503void kernel_to_ipc64_perm (struct kern_ipc_perm *in, struct ipc64_perm *out)
504{
505 out->key = in->key;
506 out->uid = in->uid;
507 out->gid = in->gid;
508 out->cuid = in->cuid;
509 out->cgid = in->cgid;
510 out->mode = in->mode;
511 out->seq = in->seq;
512}
513
514/**
515 * ipc64_perm_to_ipc_perm - convert old ipc permissions to new
516 * @in: new style IPC permissions
517 * @out: old style IPC permissions
518 *
519 * Turn the new style permissions object in into a compatibility
520 * object and store it into the 'out' pointer.
521 */
522
523void ipc64_perm_to_ipc_perm (struct ipc64_perm *in, struct ipc_perm *out)
524{
525 out->key = in->key;
526 SET_UID(out->uid, in->uid);
527 SET_GID(out->gid, in->gid);
528 SET_UID(out->cuid, in->cuid);
529 SET_GID(out->cgid, in->cgid);
530 out->mode = in->mode;
531 out->seq = in->seq;
532}
533
534/*
535 * So far only shm_get_stat() calls ipc_get() via shm_get(), so ipc_get()
5f921ae9
IM
536 * is called with shm_ids.mutex locked. Since grow_ary() is also called with
537 * shm_ids.mutex down(for Shared Memory), there is no need to add read
1da177e4
LT
538 * barriers here to gurantee the writes in grow_ary() are seen in order
539 * here (for Alpha).
540 *
5f921ae9
IM
541 * However ipc_get() itself does not necessary require ipc_ids.mutex down. So
542 * if in the future ipc_get() is used by other places without ipc_ids.mutex
1da177e4
LT
543 * down, then ipc_get() needs read memery barriers as ipc_lock() does.
544 */
545struct kern_ipc_perm* ipc_get(struct ipc_ids* ids, int id)
546{
547 struct kern_ipc_perm* out;
548 int lid = id % SEQ_MULTIPLIER;
549 if(lid >= ids->entries->size)
550 return NULL;
551 out = ids->entries->p[lid];
552 return out;
553}
554
555struct kern_ipc_perm* ipc_lock(struct ipc_ids* ids, int id)
556{
557 struct kern_ipc_perm* out;
558 int lid = id % SEQ_MULTIPLIER;
559 struct ipc_id_ary* entries;
560
561 rcu_read_lock();
562 entries = rcu_dereference(ids->entries);
563 if(lid >= entries->size) {
564 rcu_read_unlock();
565 return NULL;
566 }
567 out = entries->p[lid];
568 if(out == NULL) {
569 rcu_read_unlock();
570 return NULL;
571 }
572 spin_lock(&out->lock);
573
574 /* ipc_rmid() may have already freed the ID while ipc_lock
575 * was spinning: here verify that the structure is still valid
576 */
577 if (out->deleted) {
578 spin_unlock(&out->lock);
579 rcu_read_unlock();
580 return NULL;
581 }
582 return out;
583}
584
585void ipc_lock_by_ptr(struct kern_ipc_perm *perm)
586{
587 rcu_read_lock();
588 spin_lock(&perm->lock);
589}
590
591void ipc_unlock(struct kern_ipc_perm* perm)
592{
593 spin_unlock(&perm->lock);
594 rcu_read_unlock();
595}
596
597int ipc_buildid(struct ipc_ids* ids, int id, int seq)
598{
599 return SEQ_MULTIPLIER*seq + id;
600}
601
602int ipc_checkid(struct ipc_ids* ids, struct kern_ipc_perm* ipcp, int uid)
603{
604 if(uid/SEQ_MULTIPLIER != ipcp->seq)
605 return 1;
606 return 0;
607}
608
609#ifdef __ARCH_WANT_IPC_PARSE_VERSION
610
611
612/**
613 * ipc_parse_version - IPC call version
614 * @cmd: pointer to command
615 *
616 * Return IPC_64 for new style IPC and IPC_OLD for old style IPC.
617 * The cmd value is turned from an encoding command and version into
618 * just the command code.
619 */
620
621int ipc_parse_version (int *cmd)
622{
623 if (*cmd & IPC_64) {
624 *cmd ^= IPC_64;
625 return IPC_64;
626 } else {
627 return IPC_OLD;
628 }
629}
630
631#endif /* __ARCH_WANT_IPC_PARSE_VERSION */
ae781774
MW
632
633#ifdef CONFIG_PROC_FS
634static void *sysvipc_proc_next(struct seq_file *s, void *it, loff_t *pos)
635{
636 struct ipc_proc_iface *iface = s->private;
637 struct kern_ipc_perm *ipc = it;
638 loff_t p;
639
640 /* If we had an ipc id locked before, unlock it */
641 if (ipc && ipc != SEQ_START_TOKEN)
642 ipc_unlock(ipc);
643
644 /*
645 * p = *pos - 1 (because id 0 starts at position 1)
646 * + 1 (because we increment the position by one)
647 */
648 for (p = *pos; p <= iface->ids->max_id; p++) {
649 if ((ipc = ipc_lock(iface->ids, p)) != NULL) {
650 *pos = p + 1;
651 return ipc;
652 }
653 }
654
655 /* Out of range - return NULL to terminate iteration */
656 return NULL;
657}
658
659/*
660 * File positions: pos 0 -> header, pos n -> ipc id + 1.
661 * SeqFile iterator: iterator value locked shp or SEQ_TOKEN_START.
662 */
663static void *sysvipc_proc_start(struct seq_file *s, loff_t *pos)
664{
665 struct ipc_proc_iface *iface = s->private;
666 struct kern_ipc_perm *ipc;
667 loff_t p;
668
669 /*
670 * Take the lock - this will be released by the corresponding
671 * call to stop().
672 */
5f921ae9 673 mutex_lock(&iface->ids->mutex);
ae781774
MW
674
675 /* pos < 0 is invalid */
676 if (*pos < 0)
677 return NULL;
678
679 /* pos == 0 means header */
680 if (*pos == 0)
681 return SEQ_START_TOKEN;
682
683 /* Find the (pos-1)th ipc */
684 for (p = *pos - 1; p <= iface->ids->max_id; p++) {
685 if ((ipc = ipc_lock(iface->ids, p)) != NULL) {
686 *pos = p + 1;
687 return ipc;
688 }
689 }
690 return NULL;
691}
692
693static void sysvipc_proc_stop(struct seq_file *s, void *it)
694{
695 struct kern_ipc_perm *ipc = it;
696 struct ipc_proc_iface *iface = s->private;
697
698 /* If we had a locked segment, release it */
699 if (ipc && ipc != SEQ_START_TOKEN)
700 ipc_unlock(ipc);
701
702 /* Release the lock we took in start() */
5f921ae9 703 mutex_unlock(&iface->ids->mutex);
ae781774
MW
704}
705
706static int sysvipc_proc_show(struct seq_file *s, void *it)
707{
708 struct ipc_proc_iface *iface = s->private;
709
710 if (it == SEQ_START_TOKEN)
711 return seq_puts(s, iface->header);
712
713 return iface->show(s, it);
714}
715
716static struct seq_operations sysvipc_proc_seqops = {
717 .start = sysvipc_proc_start,
718 .stop = sysvipc_proc_stop,
719 .next = sysvipc_proc_next,
720 .show = sysvipc_proc_show,
721};
722
723static int sysvipc_proc_open(struct inode *inode, struct file *file) {
724 int ret;
725 struct seq_file *seq;
726
727 ret = seq_open(file, &sysvipc_proc_seqops);
728 if (!ret) {
729 seq = file->private_data;
730 seq->private = PDE(inode)->data;
731 }
732 return ret;
733}
734
735static struct file_operations sysvipc_proc_fops = {
736 .open = sysvipc_proc_open,
737 .read = seq_read,
738 .llseek = seq_lseek,
739 .release = seq_release,
740};
741#endif /* CONFIG_PROC_FS */