[PATCH] IPC namespace - utils
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / ipc / util.c
1 /*
2 * linux/ipc/util.c
3 * Copyright (C) 1992 Krishna Balasubramanian
4 *
5 * Sep 1997 - Call suser() last after "normal" permission checks so we
6 * get BSD style process accounting right.
7 * Occurs in several places in the IPC code.
8 * Chris Evans, <chris@ferret.lmh.ox.ac.uk>
9 * Nov 1999 - ipc helper functions, unified SMP locking
10 * Manfred Spraul <manfred@colorfullife.com>
11 * Oct 2002 - One lock per IPC id. RCU ipc_free for lock-free grow_ary().
12 * Mingming Cao <cmm@us.ibm.com>
13 * Mar 2006 - support for audit of ipc object properties
14 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
15 * Jun 2006 - namespaces ssupport
16 * OpenVZ, SWsoft Inc.
17 * Pavel Emelianov <xemul@openvz.org>
18 */
19
20 #include <linux/mm.h>
21 #include <linux/shm.h>
22 #include <linux/init.h>
23 #include <linux/msg.h>
24 #include <linux/smp_lock.h>
25 #include <linux/vmalloc.h>
26 #include <linux/slab.h>
27 #include <linux/capability.h>
28 #include <linux/highuid.h>
29 #include <linux/security.h>
30 #include <linux/rcupdate.h>
31 #include <linux/workqueue.h>
32 #include <linux/seq_file.h>
33 #include <linux/proc_fs.h>
34 #include <linux/audit.h>
35 #include <linux/nsproxy.h>
36
37 #include <asm/unistd.h>
38
39 #include "util.h"
40
41 struct ipc_proc_iface {
42 const char *path;
43 const char *header;
44 int ids;
45 int (*show)(struct seq_file *, void *);
46 };
47
48 struct ipc_namespace init_ipc_ns = {
49 .kref = {
50 .refcount = ATOMIC_INIT(2),
51 },
52 };
53
54 #ifdef CONFIG_IPC_NS
55 static struct ipc_namespace *clone_ipc_ns(struct ipc_namespace *old_ns)
56 {
57 int err;
58 struct ipc_namespace *ns;
59
60 err = -ENOMEM;
61 ns = kmalloc(sizeof(struct ipc_namespace), GFP_KERNEL);
62 if (ns == NULL)
63 goto err_mem;
64
65 err = sem_init_ns(ns);
66 if (err)
67 goto err_sem;
68 err = msg_init_ns(ns);
69 if (err)
70 goto err_msg;
71 err = shm_init_ns(ns);
72 if (err)
73 goto err_shm;
74
75 kref_init(&ns->kref);
76 return ns;
77
78 err_shm:
79 msg_exit_ns(ns);
80 err_msg:
81 sem_exit_ns(ns);
82 err_sem:
83 kfree(ns);
84 err_mem:
85 return ERR_PTR(err);
86 }
87
88 int unshare_ipcs(unsigned long unshare_flags, struct ipc_namespace **new_ipc)
89 {
90 struct ipc_namespace *new;
91
92 if (unshare_flags & CLONE_NEWIPC) {
93 if (!capable(CAP_SYS_ADMIN))
94 return -EPERM;
95
96 new = clone_ipc_ns(current->nsproxy->ipc_ns);
97 if (IS_ERR(new))
98 return PTR_ERR(new);
99
100 *new_ipc = new;
101 }
102
103 return 0;
104 }
105
106 int copy_ipcs(unsigned long flags, struct task_struct *tsk)
107 {
108 struct ipc_namespace *old_ns = tsk->nsproxy->ipc_ns;
109 struct ipc_namespace *new_ns;
110 int err = 0;
111
112 if (!old_ns)
113 return 0;
114
115 get_ipc_ns(old_ns);
116
117 if (!(flags & CLONE_NEWIPC))
118 return 0;
119
120 if (!capable(CAP_SYS_ADMIN)) {
121 err = -EPERM;
122 goto out;
123 }
124
125 new_ns = clone_ipc_ns(old_ns);
126 if (!new_ns) {
127 err = -ENOMEM;
128 goto out;
129 }
130
131 tsk->nsproxy->ipc_ns = new_ns;
132 out:
133 put_ipc_ns(old_ns);
134 return err;
135 }
136
137 void free_ipc_ns(struct kref *kref)
138 {
139 struct ipc_namespace *ns;
140
141 ns = container_of(kref, struct ipc_namespace, kref);
142 sem_exit_ns(ns);
143 msg_exit_ns(ns);
144 shm_exit_ns(ns);
145 kfree(ns);
146 }
147 #endif
148
149 /**
150 * ipc_init - initialise IPC subsystem
151 *
152 * The various system5 IPC resources (semaphores, messages and shared
153 * memory are initialised
154 */
155
156 static int __init ipc_init(void)
157 {
158 sem_init();
159 msg_init();
160 shm_init();
161 return 0;
162 }
163 __initcall(ipc_init);
164
165 /**
166 * ipc_init_ids - initialise IPC identifiers
167 * @ids: Identifier set
168 * @size: Number of identifiers
169 *
170 * Given a size for the ipc identifier range (limited below IPCMNI)
171 * set up the sequence range to use then allocate and initialise the
172 * array itself.
173 */
174
175 void __ipc_init ipc_init_ids(struct ipc_ids* ids, int size)
176 {
177 int i;
178
179 mutex_init(&ids->mutex);
180
181 if(size > IPCMNI)
182 size = IPCMNI;
183 ids->in_use = 0;
184 ids->max_id = -1;
185 ids->seq = 0;
186 {
187 int seq_limit = INT_MAX/SEQ_MULTIPLIER;
188 if(seq_limit > USHRT_MAX)
189 ids->seq_max = USHRT_MAX;
190 else
191 ids->seq_max = seq_limit;
192 }
193
194 ids->entries = ipc_rcu_alloc(sizeof(struct kern_ipc_perm *)*size +
195 sizeof(struct ipc_id_ary));
196
197 if(ids->entries == NULL) {
198 printk(KERN_ERR "ipc_init_ids() failed, ipc service disabled.\n");
199 size = 0;
200 ids->entries = &ids->nullentry;
201 }
202 ids->entries->size = size;
203 for(i=0;i<size;i++)
204 ids->entries->p[i] = NULL;
205 }
206
207 #ifdef CONFIG_PROC_FS
208 static struct file_operations sysvipc_proc_fops;
209 /**
210 * ipc_init_proc_interface - Create a proc interface for sysipc types
211 * using a seq_file interface.
212 * @path: Path in procfs
213 * @header: Banner to be printed at the beginning of the file.
214 * @ids: ipc id table to iterate.
215 * @show: show routine.
216 */
217 void __init ipc_init_proc_interface(const char *path, const char *header,
218 int ids, int (*show)(struct seq_file *, void *))
219 {
220 struct proc_dir_entry *pde;
221 struct ipc_proc_iface *iface;
222
223 iface = kmalloc(sizeof(*iface), GFP_KERNEL);
224 if (!iface)
225 return;
226 iface->path = path;
227 iface->header = header;
228 iface->ids = ids;
229 iface->show = show;
230
231 pde = create_proc_entry(path,
232 S_IRUGO, /* world readable */
233 NULL /* parent dir */);
234 if (pde) {
235 pde->data = iface;
236 pde->proc_fops = &sysvipc_proc_fops;
237 } else {
238 kfree(iface);
239 }
240 }
241 #endif
242
243 /**
244 * ipc_findkey - find a key in an ipc identifier set
245 * @ids: Identifier set
246 * @key: The key to find
247 *
248 * Requires ipc_ids.mutex locked.
249 * Returns the identifier if found or -1 if not.
250 */
251
252 int ipc_findkey(struct ipc_ids* ids, key_t key)
253 {
254 int id;
255 struct kern_ipc_perm* p;
256 int max_id = ids->max_id;
257
258 /*
259 * rcu_dereference() is not needed here
260 * since ipc_ids.mutex is held
261 */
262 for (id = 0; id <= max_id; id++) {
263 p = ids->entries->p[id];
264 if(p==NULL)
265 continue;
266 if (key == p->key)
267 return id;
268 }
269 return -1;
270 }
271
272 /*
273 * Requires ipc_ids.mutex locked
274 */
275 static int grow_ary(struct ipc_ids* ids, int newsize)
276 {
277 struct ipc_id_ary* new;
278 struct ipc_id_ary* old;
279 int i;
280 int size = ids->entries->size;
281
282 if(newsize > IPCMNI)
283 newsize = IPCMNI;
284 if(newsize <= size)
285 return newsize;
286
287 new = ipc_rcu_alloc(sizeof(struct kern_ipc_perm *)*newsize +
288 sizeof(struct ipc_id_ary));
289 if(new == NULL)
290 return size;
291 new->size = newsize;
292 memcpy(new->p, ids->entries->p, sizeof(struct kern_ipc_perm *)*size);
293 for(i=size;i<newsize;i++) {
294 new->p[i] = NULL;
295 }
296 old = ids->entries;
297
298 /*
299 * Use rcu_assign_pointer() to make sure the memcpyed contents
300 * of the new array are visible before the new array becomes visible.
301 */
302 rcu_assign_pointer(ids->entries, new);
303
304 ipc_rcu_putref(old);
305 return newsize;
306 }
307
308 /**
309 * ipc_addid - add an IPC identifier
310 * @ids: IPC identifier set
311 * @new: new IPC permission set
312 * @size: new size limit for the id array
313 *
314 * Add an entry 'new' to the IPC arrays. The permissions object is
315 * initialised and the first free entry is set up and the id assigned
316 * is returned. The list is returned in a locked state on success.
317 * On failure the list is not locked and -1 is returned.
318 *
319 * Called with ipc_ids.mutex held.
320 */
321
322 int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size)
323 {
324 int id;
325
326 size = grow_ary(ids,size);
327
328 /*
329 * rcu_dereference()() is not needed here since
330 * ipc_ids.mutex is held
331 */
332 for (id = 0; id < size; id++) {
333 if(ids->entries->p[id] == NULL)
334 goto found;
335 }
336 return -1;
337 found:
338 ids->in_use++;
339 if (id > ids->max_id)
340 ids->max_id = id;
341
342 new->cuid = new->uid = current->euid;
343 new->gid = new->cgid = current->egid;
344
345 new->seq = ids->seq++;
346 if(ids->seq > ids->seq_max)
347 ids->seq = 0;
348
349 spin_lock_init(&new->lock);
350 new->deleted = 0;
351 rcu_read_lock();
352 spin_lock(&new->lock);
353 ids->entries->p[id] = new;
354 return id;
355 }
356
357 /**
358 * ipc_rmid - remove an IPC identifier
359 * @ids: identifier set
360 * @id: Identifier to remove
361 *
362 * The identifier must be valid, and in use. The kernel will panic if
363 * fed an invalid identifier. The entry is removed and internal
364 * variables recomputed. The object associated with the identifier
365 * is returned.
366 * ipc_ids.mutex and the spinlock for this ID is hold before this function
367 * is called, and remain locked on the exit.
368 */
369
370 struct kern_ipc_perm* ipc_rmid(struct ipc_ids* ids, int id)
371 {
372 struct kern_ipc_perm* p;
373 int lid = id % SEQ_MULTIPLIER;
374 BUG_ON(lid >= ids->entries->size);
375
376 /*
377 * do not need a rcu_dereference()() here to force ordering
378 * on Alpha, since the ipc_ids.mutex is held.
379 */
380 p = ids->entries->p[lid];
381 ids->entries->p[lid] = NULL;
382 BUG_ON(p==NULL);
383 ids->in_use--;
384
385 if (lid == ids->max_id) {
386 do {
387 lid--;
388 if(lid == -1)
389 break;
390 } while (ids->entries->p[lid] == NULL);
391 ids->max_id = lid;
392 }
393 p->deleted = 1;
394 return p;
395 }
396
397 /**
398 * ipc_alloc - allocate ipc space
399 * @size: size desired
400 *
401 * Allocate memory from the appropriate pools and return a pointer to it.
402 * NULL is returned if the allocation fails
403 */
404
405 void* ipc_alloc(int size)
406 {
407 void* out;
408 if(size > PAGE_SIZE)
409 out = vmalloc(size);
410 else
411 out = kmalloc(size, GFP_KERNEL);
412 return out;
413 }
414
415 /**
416 * ipc_free - free ipc space
417 * @ptr: pointer returned by ipc_alloc
418 * @size: size of block
419 *
420 * Free a block created with ipc_alloc. The caller must know the size
421 * used in the allocation call.
422 */
423
424 void ipc_free(void* ptr, int size)
425 {
426 if(size > PAGE_SIZE)
427 vfree(ptr);
428 else
429 kfree(ptr);
430 }
431
432 /*
433 * rcu allocations:
434 * There are three headers that are prepended to the actual allocation:
435 * - during use: ipc_rcu_hdr.
436 * - during the rcu grace period: ipc_rcu_grace.
437 * - [only if vmalloc]: ipc_rcu_sched.
438 * Their lifetime doesn't overlap, thus the headers share the same memory.
439 * Unlike a normal union, they are right-aligned, thus some container_of
440 * forward/backward casting is necessary:
441 */
442 struct ipc_rcu_hdr
443 {
444 int refcount;
445 int is_vmalloc;
446 void *data[0];
447 };
448
449
450 struct ipc_rcu_grace
451 {
452 struct rcu_head rcu;
453 /* "void *" makes sure alignment of following data is sane. */
454 void *data[0];
455 };
456
457 struct ipc_rcu_sched
458 {
459 struct work_struct work;
460 /* "void *" makes sure alignment of following data is sane. */
461 void *data[0];
462 };
463
464 #define HDRLEN_KMALLOC (sizeof(struct ipc_rcu_grace) > sizeof(struct ipc_rcu_hdr) ? \
465 sizeof(struct ipc_rcu_grace) : sizeof(struct ipc_rcu_hdr))
466 #define HDRLEN_VMALLOC (sizeof(struct ipc_rcu_sched) > HDRLEN_KMALLOC ? \
467 sizeof(struct ipc_rcu_sched) : HDRLEN_KMALLOC)
468
469 static inline int rcu_use_vmalloc(int size)
470 {
471 /* Too big for a single page? */
472 if (HDRLEN_KMALLOC + size > PAGE_SIZE)
473 return 1;
474 return 0;
475 }
476
477 /**
478 * ipc_rcu_alloc - allocate ipc and rcu space
479 * @size: size desired
480 *
481 * Allocate memory for the rcu header structure + the object.
482 * Returns the pointer to the object.
483 * NULL is returned if the allocation fails.
484 */
485
486 void* ipc_rcu_alloc(int size)
487 {
488 void* out;
489 /*
490 * We prepend the allocation with the rcu struct, and
491 * workqueue if necessary (for vmalloc).
492 */
493 if (rcu_use_vmalloc(size)) {
494 out = vmalloc(HDRLEN_VMALLOC + size);
495 if (out) {
496 out += HDRLEN_VMALLOC;
497 container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
498 container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
499 }
500 } else {
501 out = kmalloc(HDRLEN_KMALLOC + size, GFP_KERNEL);
502 if (out) {
503 out += HDRLEN_KMALLOC;
504 container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
505 container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
506 }
507 }
508
509 return out;
510 }
511
512 void ipc_rcu_getref(void *ptr)
513 {
514 container_of(ptr, struct ipc_rcu_hdr, data)->refcount++;
515 }
516
517 /**
518 * ipc_schedule_free - free ipc + rcu space
519 * @head: RCU callback structure for queued work
520 *
521 * Since RCU callback function is called in bh,
522 * we need to defer the vfree to schedule_work
523 */
524 static void ipc_schedule_free(struct rcu_head *head)
525 {
526 struct ipc_rcu_grace *grace =
527 container_of(head, struct ipc_rcu_grace, rcu);
528 struct ipc_rcu_sched *sched =
529 container_of(&(grace->data[0]), struct ipc_rcu_sched, data[0]);
530
531 INIT_WORK(&sched->work, vfree, sched);
532 schedule_work(&sched->work);
533 }
534
535 /**
536 * ipc_immediate_free - free ipc + rcu space
537 * @head: RCU callback structure that contains pointer to be freed
538 *
539 * Free from the RCU callback context
540 */
541 static void ipc_immediate_free(struct rcu_head *head)
542 {
543 struct ipc_rcu_grace *free =
544 container_of(head, struct ipc_rcu_grace, rcu);
545 kfree(free);
546 }
547
548 void ipc_rcu_putref(void *ptr)
549 {
550 if (--container_of(ptr, struct ipc_rcu_hdr, data)->refcount > 0)
551 return;
552
553 if (container_of(ptr, struct ipc_rcu_hdr, data)->is_vmalloc) {
554 call_rcu(&container_of(ptr, struct ipc_rcu_grace, data)->rcu,
555 ipc_schedule_free);
556 } else {
557 call_rcu(&container_of(ptr, struct ipc_rcu_grace, data)->rcu,
558 ipc_immediate_free);
559 }
560 }
561
562 /**
563 * ipcperms - check IPC permissions
564 * @ipcp: IPC permission set
565 * @flag: desired permission set.
566 *
567 * Check user, group, other permissions for access
568 * to ipc resources. return 0 if allowed
569 */
570
571 int ipcperms (struct kern_ipc_perm *ipcp, short flag)
572 { /* flag will most probably be 0 or S_...UGO from <linux/stat.h> */
573 int requested_mode, granted_mode, err;
574
575 if (unlikely((err = audit_ipc_obj(ipcp))))
576 return err;
577 requested_mode = (flag >> 6) | (flag >> 3) | flag;
578 granted_mode = ipcp->mode;
579 if (current->euid == ipcp->cuid || current->euid == ipcp->uid)
580 granted_mode >>= 6;
581 else if (in_group_p(ipcp->cgid) || in_group_p(ipcp->gid))
582 granted_mode >>= 3;
583 /* is there some bit set in requested_mode but not in granted_mode? */
584 if ((requested_mode & ~granted_mode & 0007) &&
585 !capable(CAP_IPC_OWNER))
586 return -1;
587
588 return security_ipc_permission(ipcp, flag);
589 }
590
591 /*
592 * Functions to convert between the kern_ipc_perm structure and the
593 * old/new ipc_perm structures
594 */
595
596 /**
597 * kernel_to_ipc64_perm - convert kernel ipc permissions to user
598 * @in: kernel permissions
599 * @out: new style IPC permissions
600 *
601 * Turn the kernel object 'in' into a set of permissions descriptions
602 * for returning to userspace (out).
603 */
604
605
606 void kernel_to_ipc64_perm (struct kern_ipc_perm *in, struct ipc64_perm *out)
607 {
608 out->key = in->key;
609 out->uid = in->uid;
610 out->gid = in->gid;
611 out->cuid = in->cuid;
612 out->cgid = in->cgid;
613 out->mode = in->mode;
614 out->seq = in->seq;
615 }
616
617 /**
618 * ipc64_perm_to_ipc_perm - convert old ipc permissions to new
619 * @in: new style IPC permissions
620 * @out: old style IPC permissions
621 *
622 * Turn the new style permissions object in into a compatibility
623 * object and store it into the 'out' pointer.
624 */
625
626 void ipc64_perm_to_ipc_perm (struct ipc64_perm *in, struct ipc_perm *out)
627 {
628 out->key = in->key;
629 SET_UID(out->uid, in->uid);
630 SET_GID(out->gid, in->gid);
631 SET_UID(out->cuid, in->cuid);
632 SET_GID(out->cgid, in->cgid);
633 out->mode = in->mode;
634 out->seq = in->seq;
635 }
636
637 /*
638 * So far only shm_get_stat() calls ipc_get() via shm_get(), so ipc_get()
639 * is called with shm_ids.mutex locked. Since grow_ary() is also called with
640 * shm_ids.mutex down(for Shared Memory), there is no need to add read
641 * barriers here to gurantee the writes in grow_ary() are seen in order
642 * here (for Alpha).
643 *
644 * However ipc_get() itself does not necessary require ipc_ids.mutex down. So
645 * if in the future ipc_get() is used by other places without ipc_ids.mutex
646 * down, then ipc_get() needs read memery barriers as ipc_lock() does.
647 */
648 struct kern_ipc_perm* ipc_get(struct ipc_ids* ids, int id)
649 {
650 struct kern_ipc_perm* out;
651 int lid = id % SEQ_MULTIPLIER;
652 if(lid >= ids->entries->size)
653 return NULL;
654 out = ids->entries->p[lid];
655 return out;
656 }
657
658 struct kern_ipc_perm* ipc_lock(struct ipc_ids* ids, int id)
659 {
660 struct kern_ipc_perm* out;
661 int lid = id % SEQ_MULTIPLIER;
662 struct ipc_id_ary* entries;
663
664 rcu_read_lock();
665 entries = rcu_dereference(ids->entries);
666 if(lid >= entries->size) {
667 rcu_read_unlock();
668 return NULL;
669 }
670 out = entries->p[lid];
671 if(out == NULL) {
672 rcu_read_unlock();
673 return NULL;
674 }
675 spin_lock(&out->lock);
676
677 /* ipc_rmid() may have already freed the ID while ipc_lock
678 * was spinning: here verify that the structure is still valid
679 */
680 if (out->deleted) {
681 spin_unlock(&out->lock);
682 rcu_read_unlock();
683 return NULL;
684 }
685 return out;
686 }
687
688 void ipc_lock_by_ptr(struct kern_ipc_perm *perm)
689 {
690 rcu_read_lock();
691 spin_lock(&perm->lock);
692 }
693
694 void ipc_unlock(struct kern_ipc_perm* perm)
695 {
696 spin_unlock(&perm->lock);
697 rcu_read_unlock();
698 }
699
700 int ipc_buildid(struct ipc_ids* ids, int id, int seq)
701 {
702 return SEQ_MULTIPLIER*seq + id;
703 }
704
705 int ipc_checkid(struct ipc_ids* ids, struct kern_ipc_perm* ipcp, int uid)
706 {
707 if(uid/SEQ_MULTIPLIER != ipcp->seq)
708 return 1;
709 return 0;
710 }
711
712 #ifdef __ARCH_WANT_IPC_PARSE_VERSION
713
714
715 /**
716 * ipc_parse_version - IPC call version
717 * @cmd: pointer to command
718 *
719 * Return IPC_64 for new style IPC and IPC_OLD for old style IPC.
720 * The cmd value is turned from an encoding command and version into
721 * just the command code.
722 */
723
724 int ipc_parse_version (int *cmd)
725 {
726 if (*cmd & IPC_64) {
727 *cmd ^= IPC_64;
728 return IPC_64;
729 } else {
730 return IPC_OLD;
731 }
732 }
733
734 #endif /* __ARCH_WANT_IPC_PARSE_VERSION */
735
736 #ifdef CONFIG_PROC_FS
737 static void *sysvipc_proc_next(struct seq_file *s, void *it, loff_t *pos)
738 {
739 struct ipc_proc_iface *iface = s->private;
740 struct kern_ipc_perm *ipc = it;
741 loff_t p;
742 struct ipc_ids *ids;
743
744 ids = current->nsproxy->ipc_ns->ids[iface->ids];
745
746 /* If we had an ipc id locked before, unlock it */
747 if (ipc && ipc != SEQ_START_TOKEN)
748 ipc_unlock(ipc);
749
750 /*
751 * p = *pos - 1 (because id 0 starts at position 1)
752 * + 1 (because we increment the position by one)
753 */
754 for (p = *pos; p <= ids->max_id; p++) {
755 if ((ipc = ipc_lock(ids, p)) != NULL) {
756 *pos = p + 1;
757 return ipc;
758 }
759 }
760
761 /* Out of range - return NULL to terminate iteration */
762 return NULL;
763 }
764
765 /*
766 * File positions: pos 0 -> header, pos n -> ipc id + 1.
767 * SeqFile iterator: iterator value locked shp or SEQ_TOKEN_START.
768 */
769 static void *sysvipc_proc_start(struct seq_file *s, loff_t *pos)
770 {
771 struct ipc_proc_iface *iface = s->private;
772 struct kern_ipc_perm *ipc;
773 loff_t p;
774 struct ipc_ids *ids;
775
776 ids = current->nsproxy->ipc_ns->ids[iface->ids];
777
778 /*
779 * Take the lock - this will be released by the corresponding
780 * call to stop().
781 */
782 mutex_lock(&ids->mutex);
783
784 /* pos < 0 is invalid */
785 if (*pos < 0)
786 return NULL;
787
788 /* pos == 0 means header */
789 if (*pos == 0)
790 return SEQ_START_TOKEN;
791
792 /* Find the (pos-1)th ipc */
793 for (p = *pos - 1; p <= ids->max_id; p++) {
794 if ((ipc = ipc_lock(ids, p)) != NULL) {
795 *pos = p + 1;
796 return ipc;
797 }
798 }
799 return NULL;
800 }
801
802 static void sysvipc_proc_stop(struct seq_file *s, void *it)
803 {
804 struct kern_ipc_perm *ipc = it;
805 struct ipc_proc_iface *iface = s->private;
806 struct ipc_ids *ids;
807
808 /* If we had a locked segment, release it */
809 if (ipc && ipc != SEQ_START_TOKEN)
810 ipc_unlock(ipc);
811
812 ids = current->nsproxy->ipc_ns->ids[iface->ids];
813 /* Release the lock we took in start() */
814 mutex_unlock(&ids->mutex);
815 }
816
817 static int sysvipc_proc_show(struct seq_file *s, void *it)
818 {
819 struct ipc_proc_iface *iface = s->private;
820
821 if (it == SEQ_START_TOKEN)
822 return seq_puts(s, iface->header);
823
824 return iface->show(s, it);
825 }
826
827 static struct seq_operations sysvipc_proc_seqops = {
828 .start = sysvipc_proc_start,
829 .stop = sysvipc_proc_stop,
830 .next = sysvipc_proc_next,
831 .show = sysvipc_proc_show,
832 };
833
834 static int sysvipc_proc_open(struct inode *inode, struct file *file) {
835 int ret;
836 struct seq_file *seq;
837
838 ret = seq_open(file, &sysvipc_proc_seqops);
839 if (!ret) {
840 seq = file->private_data;
841 seq->private = PDE(inode)->data;
842 }
843 return ret;
844 }
845
846 static struct file_operations sysvipc_proc_fops = {
847 .open = sysvipc_proc_open,
848 .read = seq_read,
849 .llseek = seq_lseek,
850 .release = seq_release,
851 };
852 #endif /* CONFIG_PROC_FS */