Merge tag 'v3.10.107' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / ipc / sem.c
1 /*
2 * linux/ipc/sem.c
3 * Copyright (C) 1992 Krishna Balasubramanian
4 * Copyright (C) 1995 Eric Schenk, Bruno Haible
5 *
6 * /proc/sysvipc/sem support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
7 *
8 * SMP-threaded, sysctl's added
9 * (c) 1999 Manfred Spraul <manfred@colorfullife.com>
10 * Enforced range limit on SEM_UNDO
11 * (c) 2001 Red Hat Inc
12 * Lockless wakeup
13 * (c) 2003 Manfred Spraul <manfred@colorfullife.com>
14 * Further wakeup optimizations, documentation
15 * (c) 2010 Manfred Spraul <manfred@colorfullife.com>
16 *
17 * support for audit of ipc object properties and permission changes
18 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
19 *
20 * namespaces support
21 * OpenVZ, SWsoft Inc.
22 * Pavel Emelianov <xemul@openvz.org>
23 *
24 * Implementation notes: (May 2010)
25 * This file implements System V semaphores.
26 *
27 * User space visible behavior:
28 * - FIFO ordering for semop() operations (just FIFO, not starvation
29 * protection)
30 * - multiple semaphore operations that alter the same semaphore in
31 * one semop() are handled.
32 * - sem_ctime (time of last semctl()) is updated in the IPC_SET, SETVAL and
33 * SETALL calls.
34 * - two Linux specific semctl() commands: SEM_STAT, SEM_INFO.
35 * - undo adjustments at process exit are limited to 0..SEMVMX.
36 * - namespace are supported.
37 * - SEMMSL, SEMMNS, SEMOPM and SEMMNI can be configured at runtine by writing
38 * to /proc/sys/kernel/sem.
39 * - statistics about the usage are reported in /proc/sysvipc/sem.
40 *
41 * Internals:
42 * - scalability:
43 * - all global variables are read-mostly.
44 * - semop() calls and semctl(RMID) are synchronized by RCU.
45 * - most operations do write operations (actually: spin_lock calls) to
46 * the per-semaphore array structure.
47 * Thus: Perfect SMP scaling between independent semaphore arrays.
48 * If multiple semaphores in one array are used, then cache line
49 * trashing on the semaphore array spinlock will limit the scaling.
50 * - semncnt and semzcnt are calculated on demand in count_semncnt() and
51 * count_semzcnt()
52 * - the task that performs a successful semop() scans the list of all
53 * sleeping tasks and completes any pending operations that can be fulfilled.
54 * Semaphores are actively given to waiting tasks (necessary for FIFO).
55 * (see update_queue())
56 * - To improve the scalability, the actual wake-up calls are performed after
57 * dropping all locks. (see wake_up_sem_queue_prepare(),
58 * wake_up_sem_queue_do())
59 * - All work is done by the waker, the woken up task does not have to do
60 * anything - not even acquiring a lock or dropping a refcount.
61 * - A woken up task may not even touch the semaphore array anymore, it may
62 * have been destroyed already by a semctl(RMID).
63 * - The synchronizations between wake-ups due to a timeout/signal and a
64 * wake-up due to a completed semaphore operation is achieved by using an
65 * intermediate state (IN_WAKEUP).
66 * - UNDO values are stored in an array (one per process and per
67 * semaphore array, lazily allocated). For backwards compatibility, multiple
68 * modes for the UNDO variables are supported (per process, per thread)
69 * (see copy_semundo, CLONE_SYSVSEM)
70 * - There are two lists of the pending operations: a per-array list
71 * and per-semaphore list (stored in the array). This allows to achieve FIFO
72 * ordering without always scanning all pending operations.
73 * The worst-case behavior is nevertheless O(N^2) for N wakeups.
74 */
75
76 #include <linux/slab.h>
77 #include <linux/spinlock.h>
78 #include <linux/init.h>
79 #include <linux/proc_fs.h>
80 #include <linux/time.h>
81 #include <linux/security.h>
82 #include <linux/syscalls.h>
83 #include <linux/audit.h>
84 #include <linux/capability.h>
85 #include <linux/seq_file.h>
86 #include <linux/rwsem.h>
87 #include <linux/nsproxy.h>
88 #include <linux/ipc_namespace.h>
89
90 #include <asm/uaccess.h>
91 #include "util.h"
92
93 /* One semaphore structure for each semaphore in the system. */
94 struct sem {
95 int semval; /* current value */
96 int sempid; /* pid of last operation */
97 spinlock_t lock; /* spinlock for fine-grained semtimedop */
98 struct list_head pending_alter; /* pending single-sop operations */
99 /* that alter the semaphore */
100 struct list_head pending_const; /* pending single-sop operations */
101 /* that do not alter the semaphore*/
102 time_t sem_otime; /* candidate for sem_otime */
103 } ____cacheline_aligned_in_smp;
104
105 /* One queue for each sleeping process in the system. */
106 struct sem_queue {
107 struct list_head list; /* queue of pending operations */
108 struct task_struct *sleeper; /* this process */
109 struct sem_undo *undo; /* undo structure */
110 int pid; /* process id of requesting process */
111 int status; /* completion status of operation */
112 struct sembuf *sops; /* array of pending operations */
113 int nsops; /* number of operations */
114 int alter; /* does *sops alter the array? */
115 };
116
117 /* Each task has a list of undo requests. They are executed automatically
118 * when the process exits.
119 */
120 struct sem_undo {
121 struct list_head list_proc; /* per-process list: *
122 * all undos from one process
123 * rcu protected */
124 struct rcu_head rcu; /* rcu struct for sem_undo */
125 struct sem_undo_list *ulp; /* back ptr to sem_undo_list */
126 struct list_head list_id; /* per semaphore array list:
127 * all undos for one array */
128 int semid; /* semaphore set identifier */
129 short *semadj; /* array of adjustments */
130 /* one per semaphore */
131 };
132
133 /* sem_undo_list controls shared access to the list of sem_undo structures
134 * that may be shared among all a CLONE_SYSVSEM task group.
135 */
136 struct sem_undo_list {
137 atomic_t refcnt;
138 spinlock_t lock;
139 struct list_head list_proc;
140 };
141
142
143 #define sem_ids(ns) ((ns)->ids[IPC_SEM_IDS])
144
145 #define sem_checkid(sma, semid) ipc_checkid(&sma->sem_perm, semid)
146
147 static int newary(struct ipc_namespace *, struct ipc_params *);
148 static void freeary(struct ipc_namespace *, struct kern_ipc_perm *);
149 #ifdef CONFIG_PROC_FS
150 static int sysvipc_sem_proc_show(struct seq_file *s, void *it);
151 #endif
152
153 #define SEMMSL_FAST 256 /* 512 bytes on stack */
154 #define SEMOPM_FAST 64 /* ~ 372 bytes on stack */
155
156 /*
157 * Locking:
158 * sem_undo.id_next,
159 * sem_array.complex_count,
160 * sem_array.pending{_alter,_cont},
161 * sem_array.sem_undo: global sem_lock() for read/write
162 * sem_undo.proc_next: only "current" is allowed to read/write that field.
163 *
164 * sem_array.sem_base[i].pending_{const,alter}:
165 * global or semaphore sem_lock() for read/write
166 */
167
168 #define sc_semmsl sem_ctls[0]
169 #define sc_semmns sem_ctls[1]
170 #define sc_semopm sem_ctls[2]
171 #define sc_semmni sem_ctls[3]
172
173 void sem_init_ns(struct ipc_namespace *ns)
174 {
175 ns->sc_semmsl = SEMMSL;
176 ns->sc_semmns = SEMMNS;
177 ns->sc_semopm = SEMOPM;
178 ns->sc_semmni = SEMMNI;
179 ns->used_sems = 0;
180 ipc_init_ids(&ns->ids[IPC_SEM_IDS]);
181 }
182
183 #ifdef CONFIG_IPC_NS
184 void sem_exit_ns(struct ipc_namespace *ns)
185 {
186 free_ipcs(ns, &sem_ids(ns), freeary);
187 idr_destroy(&ns->ids[IPC_SEM_IDS].ipcs_idr);
188 }
189 #endif
190
191 void __init sem_init (void)
192 {
193 sem_init_ns(&init_ipc_ns);
194 ipc_init_proc_interface("sysvipc/sem",
195 " key semid perms nsems uid gid cuid cgid otime ctime\n",
196 IPC_SEM_IDS, sysvipc_sem_proc_show);
197 }
198
199 /**
200 * unmerge_queues - unmerge queues, if possible.
201 * @sma: semaphore array
202 *
203 * The function unmerges the wait queues if complex_count is 0.
204 * It must be called prior to dropping the global semaphore array lock.
205 */
206 static void unmerge_queues(struct sem_array *sma)
207 {
208 struct sem_queue *q, *tq;
209
210 /* complex operations still around? */
211 if (sma->complex_count)
212 return;
213 /*
214 * We will switch back to simple mode.
215 * Move all pending operation back into the per-semaphore
216 * queues.
217 */
218 list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
219 struct sem *curr;
220 curr = &sma->sem_base[q->sops[0].sem_num];
221
222 list_add_tail(&q->list, &curr->pending_alter);
223 }
224 INIT_LIST_HEAD(&sma->pending_alter);
225 }
226
227 /**
228 * merge_queues - Merge single semop queues into global queue
229 * @sma: semaphore array
230 *
231 * This function merges all per-semaphore queues into the global queue.
232 * It is necessary to achieve FIFO ordering for the pending single-sop
233 * operations when a multi-semop operation must sleep.
234 * Only the alter operations must be moved, the const operations can stay.
235 */
236 static void merge_queues(struct sem_array *sma)
237 {
238 int i;
239 for (i = 0; i < sma->sem_nsems; i++) {
240 struct sem *sem = sma->sem_base + i;
241
242 list_splice_init(&sem->pending_alter, &sma->pending_alter);
243 }
244 }
245
246 static void sem_rcu_free(struct rcu_head *head)
247 {
248 struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu);
249 struct sem_array *sma = ipc_rcu_to_struct(p);
250
251 security_sem_free(sma);
252 ipc_rcu_free(head);
253 }
254
255 /*
256 * spin_unlock_wait() and !spin_is_locked() are not memory barriers, they
257 * are only control barriers.
258 * The code must pair with spin_unlock(&sem->lock) or
259 * spin_unlock(&sem_perm.lock), thus just the control barrier is insufficient.
260 *
261 * smp_rmb() is sufficient, as writes cannot pass the control barrier.
262 */
263 #define ipc_smp_acquire__after_spin_is_unlocked() smp_rmb()
264
265 /*
266 * Wait until all currently ongoing simple ops have completed.
267 * Caller must own sem_perm.lock.
268 * New simple ops cannot start, because simple ops first check
269 * that sem_perm.lock is free.
270 */
271 static void sem_wait_array(struct sem_array *sma)
272 {
273 int i;
274 struct sem *sem;
275
276 for (i = 0; i < sma->sem_nsems; i++) {
277 sem = sma->sem_base + i;
278 spin_unlock_wait(&sem->lock);
279 }
280 ipc_smp_acquire__after_spin_is_unlocked();
281 }
282
283 /*
284 * If the request contains only one semaphore operation, and there are
285 * no complex transactions pending, lock only the semaphore involved.
286 * Otherwise, lock the entire semaphore array, since we either have
287 * multiple semaphores in our own semops, or we need to look at
288 * semaphores from other pending complex operations.
289 */
290 static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
291 int nsops)
292 {
293 struct sem *sem;
294
295 if (nsops != 1) {
296 /* Complex operation - acquire a full lock */
297 ipc_lock_object(&sma->sem_perm);
298
299 /* And wait until all simple ops that are processed
300 * right now have dropped their locks.
301 */
302 sem_wait_array(sma);
303 return -1;
304 }
305
306 /*
307 * Only one semaphore affected - try to optimize locking.
308 * The rules are:
309 * - optimized locking is possible if no complex operation
310 * is either enqueued or processed right now.
311 * - The test for enqueued complex ops is simple:
312 * sma->complex_count != 0
313 * - Testing for complex ops that are processed right now is
314 * a bit more difficult. Complex ops acquire the full lock
315 * and first wait that the running simple ops have completed.
316 * (see above)
317 * Thus: If we own a simple lock and the global lock is free
318 * and complex_count is now 0, then it will stay 0 and
319 * thus just locking sem->lock is sufficient.
320 */
321 sem = sma->sem_base + sops->sem_num;
322
323 if (sma->complex_count == 0) {
324 /*
325 * It appears that no complex operation is around.
326 * Acquire the per-semaphore lock.
327 */
328 spin_lock(&sem->lock);
329
330 /* Then check that the global lock is free */
331 if (!spin_is_locked(&sma->sem_perm.lock)) {
332 /*
333 * We need a memory barrier with acquire semantics,
334 * otherwise we can race with another thread that does:
335 * complex_count++;
336 * spin_unlock(sem_perm.lock);
337 */
338 ipc_smp_acquire__after_spin_is_unlocked();
339
340 /* Now repeat the test of complex_count:
341 * It can't change anymore until we drop sem->lock.
342 * Thus: if is now 0, then it will stay 0.
343 */
344 if (sma->complex_count == 0) {
345 /* fast path successful! */
346 return sops->sem_num;
347 }
348 }
349 spin_unlock(&sem->lock);
350 }
351
352 /* slow path: acquire the full lock */
353 ipc_lock_object(&sma->sem_perm);
354
355 if (sma->complex_count == 0) {
356 /* False alarm:
357 * There is no complex operation, thus we can switch
358 * back to the fast path.
359 */
360 spin_lock(&sem->lock);
361 ipc_unlock_object(&sma->sem_perm);
362 return sops->sem_num;
363 } else {
364 /* Not a false alarm, thus complete the sequence for a
365 * full lock.
366 */
367 sem_wait_array(sma);
368 return -1;
369 }
370 }
371
372 static inline void sem_unlock(struct sem_array *sma, int locknum)
373 {
374 if (locknum == -1) {
375 unmerge_queues(sma);
376 ipc_unlock_object(&sma->sem_perm);
377 } else {
378 struct sem *sem = sma->sem_base + locknum;
379 spin_unlock(&sem->lock);
380 }
381 }
382
383 /*
384 * sem_lock_(check_) routines are called in the paths where the rwsem
385 * is not held.
386 *
387 * The caller holds the RCU read lock.
388 */
389 static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns,
390 int id, struct sembuf *sops, int nsops, int *locknum)
391 {
392 struct kern_ipc_perm *ipcp;
393 struct sem_array *sma;
394
395 ipcp = ipc_obtain_object(&sem_ids(ns), id);
396 if (IS_ERR(ipcp))
397 return ERR_CAST(ipcp);
398
399 sma = container_of(ipcp, struct sem_array, sem_perm);
400 *locknum = sem_lock(sma, sops, nsops);
401
402 /* ipc_rmid() may have already freed the ID while sem_lock
403 * was spinning: verify that the structure is still valid
404 */
405 if (!ipcp->deleted)
406 return container_of(ipcp, struct sem_array, sem_perm);
407
408 sem_unlock(sma, *locknum);
409 return ERR_PTR(-EINVAL);
410 }
411
412 static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int id)
413 {
414 struct kern_ipc_perm *ipcp = ipc_obtain_object(&sem_ids(ns), id);
415
416 if (IS_ERR(ipcp))
417 return ERR_CAST(ipcp);
418
419 return container_of(ipcp, struct sem_array, sem_perm);
420 }
421
422 static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns,
423 int id)
424 {
425 struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&sem_ids(ns), id);
426
427 if (IS_ERR(ipcp))
428 return ERR_CAST(ipcp);
429
430 return container_of(ipcp, struct sem_array, sem_perm);
431 }
432
433 static inline void sem_lock_and_putref(struct sem_array *sma)
434 {
435 sem_lock(sma, NULL, -1);
436 ipc_rcu_putref(sma, ipc_rcu_free);
437 }
438
439 static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
440 {
441 ipc_rmid(&sem_ids(ns), &s->sem_perm);
442 }
443
444 /*
445 * Lockless wakeup algorithm:
446 * Without the check/retry algorithm a lockless wakeup is possible:
447 * - queue.status is initialized to -EINTR before blocking.
448 * - wakeup is performed by
449 * * unlinking the queue entry from the pending list
450 * * setting queue.status to IN_WAKEUP
451 * This is the notification for the blocked thread that a
452 * result value is imminent.
453 * * call wake_up_process
454 * * set queue.status to the final value.
455 * - the previously blocked thread checks queue.status:
456 * * if it's IN_WAKEUP, then it must wait until the value changes
457 * * if it's not -EINTR, then the operation was completed by
458 * update_queue. semtimedop can return queue.status without
459 * performing any operation on the sem array.
460 * * otherwise it must acquire the spinlock and check what's up.
461 *
462 * The two-stage algorithm is necessary to protect against the following
463 * races:
464 * - if queue.status is set after wake_up_process, then the woken up idle
465 * thread could race forward and try (and fail) to acquire sma->lock
466 * before update_queue had a chance to set queue.status
467 * - if queue.status is written before wake_up_process and if the
468 * blocked process is woken up by a signal between writing
469 * queue.status and the wake_up_process, then the woken up
470 * process could return from semtimedop and die by calling
471 * sys_exit before wake_up_process is called. Then wake_up_process
472 * will oops, because the task structure is already invalid.
473 * (yes, this happened on s390 with sysv msg).
474 *
475 */
476 #define IN_WAKEUP 1
477
478 /**
479 * newary - Create a new semaphore set
480 * @ns: namespace
481 * @params: ptr to the structure that contains key, semflg and nsems
482 *
483 * Called with sem_ids.rwsem held (as a writer)
484 */
485
486 static int newary(struct ipc_namespace *ns, struct ipc_params *params)
487 {
488 int id;
489 int retval;
490 struct sem_array *sma;
491 int size;
492 key_t key = params->key;
493 int nsems = params->u.nsems;
494 int semflg = params->flg;
495 int i;
496
497 if (!nsems)
498 return -EINVAL;
499 if (ns->used_sems + nsems > ns->sc_semmns)
500 return -ENOSPC;
501
502 size = sizeof (*sma) + nsems * sizeof (struct sem);
503 sma = ipc_rcu_alloc(size);
504 if (!sma) {
505 return -ENOMEM;
506 }
507 memset (sma, 0, size);
508
509 sma->sem_perm.mode = (semflg & S_IRWXUGO);
510 sma->sem_perm.key = key;
511
512 sma->sem_perm.security = NULL;
513 retval = security_sem_alloc(sma);
514 if (retval) {
515 ipc_rcu_putref(sma, ipc_rcu_free);
516 return retval;
517 }
518
519 id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
520 if (id < 0) {
521 ipc_rcu_putref(sma, sem_rcu_free);
522 return id;
523 }
524 ns->used_sems += nsems;
525
526 sma->sem_base = (struct sem *) &sma[1];
527
528 for (i = 0; i < nsems; i++) {
529 INIT_LIST_HEAD(&sma->sem_base[i].pending_alter);
530 INIT_LIST_HEAD(&sma->sem_base[i].pending_const);
531 spin_lock_init(&sma->sem_base[i].lock);
532 }
533
534 sma->complex_count = 0;
535 INIT_LIST_HEAD(&sma->pending_alter);
536 INIT_LIST_HEAD(&sma->pending_const);
537 INIT_LIST_HEAD(&sma->list_id);
538 sma->sem_nsems = nsems;
539 sma->sem_ctime = get_seconds();
540 sem_unlock(sma, -1);
541 rcu_read_unlock();
542
543 return sma->sem_perm.id;
544 }
545
546
547 /*
548 * Called with sem_ids.rwsem and ipcp locked.
549 */
550 static inline int sem_security(struct kern_ipc_perm *ipcp, int semflg)
551 {
552 struct sem_array *sma;
553
554 sma = container_of(ipcp, struct sem_array, sem_perm);
555 return security_sem_associate(sma, semflg);
556 }
557
558 /*
559 * Called with sem_ids.rwsem and ipcp locked.
560 */
561 static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
562 struct ipc_params *params)
563 {
564 struct sem_array *sma;
565
566 sma = container_of(ipcp, struct sem_array, sem_perm);
567 if (params->u.nsems > sma->sem_nsems)
568 return -EINVAL;
569
570 return 0;
571 }
572
573 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
574 {
575 struct ipc_namespace *ns;
576 struct ipc_ops sem_ops;
577 struct ipc_params sem_params;
578
579 ns = current->nsproxy->ipc_ns;
580
581 if (nsems < 0 || nsems > ns->sc_semmsl)
582 return -EINVAL;
583
584 sem_ops.getnew = newary;
585 sem_ops.associate = sem_security;
586 sem_ops.more_checks = sem_more_checks;
587
588 sem_params.key = key;
589 sem_params.flg = semflg;
590 sem_params.u.nsems = nsems;
591
592 return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params);
593 }
594
595 /** perform_atomic_semop - Perform (if possible) a semaphore operation
596 * @sma: semaphore array
597 * @sops: array with operations that should be checked
598 * @nsems: number of sops
599 * @un: undo array
600 * @pid: pid that did the change
601 *
602 * Returns 0 if the operation was possible.
603 * Returns 1 if the operation is impossible, the caller must sleep.
604 * Negative values are error codes.
605 */
606
607 static int perform_atomic_semop(struct sem_array *sma, struct sembuf *sops,
608 int nsops, struct sem_undo *un, int pid)
609 {
610 int result, sem_op;
611 struct sembuf *sop;
612 struct sem * curr;
613
614 for (sop = sops; sop < sops + nsops; sop++) {
615 curr = sma->sem_base + sop->sem_num;
616 sem_op = sop->sem_op;
617 result = curr->semval;
618
619 if (!sem_op && result)
620 goto would_block;
621
622 result += sem_op;
623 if (result < 0)
624 goto would_block;
625 if (result > SEMVMX)
626 goto out_of_range;
627 if (sop->sem_flg & SEM_UNDO) {
628 int undo = un->semadj[sop->sem_num] - sem_op;
629 /*
630 * Exceeding the undo range is an error.
631 */
632 if (undo < (-SEMAEM - 1) || undo > SEMAEM)
633 goto out_of_range;
634 }
635 curr->semval = result;
636 }
637
638 sop--;
639 while (sop >= sops) {
640 sma->sem_base[sop->sem_num].sempid = pid;
641 if (sop->sem_flg & SEM_UNDO)
642 un->semadj[sop->sem_num] -= sop->sem_op;
643 sop--;
644 }
645
646 return 0;
647
648 out_of_range:
649 result = -ERANGE;
650 goto undo;
651
652 would_block:
653 if (sop->sem_flg & IPC_NOWAIT)
654 result = -EAGAIN;
655 else
656 result = 1;
657
658 undo:
659 sop--;
660 while (sop >= sops) {
661 sma->sem_base[sop->sem_num].semval -= sop->sem_op;
662 sop--;
663 }
664
665 return result;
666 }
667
668 /** wake_up_sem_queue_prepare(q, error): Prepare wake-up
669 * @q: queue entry that must be signaled
670 * @error: Error value for the signal
671 *
672 * Prepare the wake-up of the queue entry q.
673 */
674 static void wake_up_sem_queue_prepare(struct list_head *pt,
675 struct sem_queue *q, int error)
676 {
677 if (list_empty(pt)) {
678 /*
679 * Hold preempt off so that we don't get preempted and have the
680 * wakee busy-wait until we're scheduled back on.
681 */
682 preempt_disable();
683 }
684 q->status = IN_WAKEUP;
685 q->pid = error;
686
687 list_add_tail(&q->list, pt);
688 }
689
690 /**
691 * wake_up_sem_queue_do(pt) - do the actual wake-up
692 * @pt: list of tasks to be woken up
693 *
694 * Do the actual wake-up.
695 * The function is called without any locks held, thus the semaphore array
696 * could be destroyed already and the tasks can disappear as soon as the
697 * status is set to the actual return code.
698 */
699 static void wake_up_sem_queue_do(struct list_head *pt)
700 {
701 struct sem_queue *q, *t;
702 int did_something;
703
704 did_something = !list_empty(pt);
705 list_for_each_entry_safe(q, t, pt, list) {
706 wake_up_process(q->sleeper);
707 /* q can disappear immediately after writing q->status. */
708 smp_wmb();
709 q->status = q->pid;
710 }
711 if (did_something)
712 preempt_enable();
713 }
714
715 static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
716 {
717 list_del(&q->list);
718 if (q->nsops > 1)
719 sma->complex_count--;
720 }
721
722 /** check_restart(sma, q)
723 * @sma: semaphore array
724 * @q: the operation that just completed
725 *
726 * update_queue is O(N^2) when it restarts scanning the whole queue of
727 * waiting operations. Therefore this function checks if the restart is
728 * really necessary. It is called after a previously waiting operation
729 * modified the array.
730 * Note that wait-for-zero operations are handled without restart.
731 */
732 static int check_restart(struct sem_array *sma, struct sem_queue *q)
733 {
734 /* pending complex alter operations are too difficult to analyse */
735 if (!list_empty(&sma->pending_alter))
736 return 1;
737
738 /* we were a sleeping complex operation. Too difficult */
739 if (q->nsops > 1)
740 return 1;
741
742 /* It is impossible that someone waits for the new value:
743 * - complex operations always restart.
744 * - wait-for-zero are handled seperately.
745 * - q is a previously sleeping simple operation that
746 * altered the array. It must be a decrement, because
747 * simple increments never sleep.
748 * - If there are older (higher priority) decrements
749 * in the queue, then they have observed the original
750 * semval value and couldn't proceed. The operation
751 * decremented to value - thus they won't proceed either.
752 */
753 return 0;
754 }
755
756 /**
757 * wake_const_ops(sma, semnum, pt) - Wake up non-alter tasks
758 * @sma: semaphore array.
759 * @semnum: semaphore that was modified.
760 * @pt: list head for the tasks that must be woken up.
761 *
762 * wake_const_ops must be called after a semaphore in a semaphore array
763 * was set to 0. If complex const operations are pending, wake_const_ops must
764 * be called with semnum = -1, as well as with the number of each modified
765 * semaphore.
766 * The tasks that must be woken up are added to @pt. The return code
767 * is stored in q->pid.
768 * The function returns 1 if at least one operation was completed successfully.
769 */
770 static int wake_const_ops(struct sem_array *sma, int semnum,
771 struct list_head *pt)
772 {
773 struct sem_queue *q;
774 struct list_head *walk;
775 struct list_head *pending_list;
776 int semop_completed = 0;
777
778 if (semnum == -1)
779 pending_list = &sma->pending_const;
780 else
781 pending_list = &sma->sem_base[semnum].pending_const;
782
783 walk = pending_list->next;
784 while (walk != pending_list) {
785 int error;
786
787 q = container_of(walk, struct sem_queue, list);
788 walk = walk->next;
789
790 error = perform_atomic_semop(sma, q->sops, q->nsops,
791 q->undo, q->pid);
792
793 if (error <= 0) {
794 /* operation completed, remove from queue & wakeup */
795
796 unlink_queue(sma, q);
797
798 wake_up_sem_queue_prepare(pt, q, error);
799 if (error == 0)
800 semop_completed = 1;
801 }
802 }
803 return semop_completed;
804 }
805
806 /**
807 * do_smart_wakeup_zero(sma, sops, nsops, pt) - wakeup all wait for zero tasks
808 * @sma: semaphore array
809 * @sops: operations that were performed
810 * @nsops: number of operations
811 * @pt: list head of the tasks that must be woken up.
812 *
813 * do_smart_wakeup_zero() checks all required queue for wait-for-zero
814 * operations, based on the actual changes that were performed on the
815 * semaphore array.
816 * The function returns 1 if at least one operation was completed successfully.
817 */
818 static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops,
819 int nsops, struct list_head *pt)
820 {
821 int i;
822 int semop_completed = 0;
823 int got_zero = 0;
824
825 /* first: the per-semaphore queues, if known */
826 if (sops) {
827 for (i = 0; i < nsops; i++) {
828 int num = sops[i].sem_num;
829
830 if (sma->sem_base[num].semval == 0) {
831 got_zero = 1;
832 semop_completed |= wake_const_ops(sma, num, pt);
833 }
834 }
835 } else {
836 /*
837 * No sops means modified semaphores not known.
838 * Assume all were changed.
839 */
840 for (i = 0; i < sma->sem_nsems; i++) {
841 if (sma->sem_base[i].semval == 0) {
842 got_zero = 1;
843 semop_completed |= wake_const_ops(sma, i, pt);
844 }
845 }
846 }
847 /*
848 * If one of the modified semaphores got 0,
849 * then check the global queue, too.
850 */
851 if (got_zero)
852 semop_completed |= wake_const_ops(sma, -1, pt);
853
854 return semop_completed;
855 }
856
857
858 /**
859 * update_queue(sma, semnum): Look for tasks that can be completed.
860 * @sma: semaphore array.
861 * @semnum: semaphore that was modified.
862 * @pt: list head for the tasks that must be woken up.
863 *
864 * update_queue must be called after a semaphore in a semaphore array
865 * was modified. If multiple semaphores were modified, update_queue must
866 * be called with semnum = -1, as well as with the number of each modified
867 * semaphore.
868 * The tasks that must be woken up are added to @pt. The return code
869 * is stored in q->pid.
870 * The function internally checks if const operations can now succeed.
871 *
872 * The function return 1 if at least one semop was completed successfully.
873 */
874 static int update_queue(struct sem_array *sma, int semnum, struct list_head *pt)
875 {
876 struct sem_queue *q;
877 struct list_head *walk;
878 struct list_head *pending_list;
879 int semop_completed = 0;
880
881 if (semnum == -1)
882 pending_list = &sma->pending_alter;
883 else
884 pending_list = &sma->sem_base[semnum].pending_alter;
885
886 again:
887 walk = pending_list->next;
888 while (walk != pending_list) {
889 int error, restart;
890
891 q = container_of(walk, struct sem_queue, list);
892 walk = walk->next;
893
894 /* If we are scanning the single sop, per-semaphore list of
895 * one semaphore and that semaphore is 0, then it is not
896 * necessary to scan further: simple increments
897 * that affect only one entry succeed immediately and cannot
898 * be in the per semaphore pending queue, and decrements
899 * cannot be successful if the value is already 0.
900 */
901 if (semnum != -1 && sma->sem_base[semnum].semval == 0)
902 break;
903
904 error = perform_atomic_semop(sma, q->sops, q->nsops,
905 q->undo, q->pid);
906
907 /* Does q->sleeper still need to sleep? */
908 if (error > 0)
909 continue;
910
911 unlink_queue(sma, q);
912
913 if (error) {
914 restart = 0;
915 } else {
916 semop_completed = 1;
917 do_smart_wakeup_zero(sma, q->sops, q->nsops, pt);
918 restart = check_restart(sma, q);
919 }
920
921 wake_up_sem_queue_prepare(pt, q, error);
922 if (restart)
923 goto again;
924 }
925 return semop_completed;
926 }
927
928 /**
929 * set_semotime(sma, sops) - set sem_otime
930 * @sma: semaphore array
931 * @sops: operations that modified the array, may be NULL
932 *
933 * sem_otime is replicated to avoid cache line trashing.
934 * This function sets one instance to the current time.
935 */
936 static void set_semotime(struct sem_array *sma, struct sembuf *sops)
937 {
938 if (sops == NULL) {
939 sma->sem_base[0].sem_otime = get_seconds();
940 } else {
941 sma->sem_base[sops[0].sem_num].sem_otime =
942 get_seconds();
943 }
944 }
945
946 /**
947 * do_smart_update(sma, sops, nsops, otime, pt) - optimized update_queue
948 * @sma: semaphore array
949 * @sops: operations that were performed
950 * @nsops: number of operations
951 * @otime: force setting otime
952 * @pt: list head of the tasks that must be woken up.
953 *
954 * do_smart_update() does the required calls to update_queue and wakeup_zero,
955 * based on the actual changes that were performed on the semaphore array.
956 * Note that the function does not do the actual wake-up: the caller is
957 * responsible for calling wake_up_sem_queue_do(@pt).
958 * It is safe to perform this call after dropping all locks.
959 */
960 static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsops,
961 int otime, struct list_head *pt)
962 {
963 int i;
964
965 otime |= do_smart_wakeup_zero(sma, sops, nsops, pt);
966
967 if (!list_empty(&sma->pending_alter)) {
968 /* semaphore array uses the global queue - just process it. */
969 otime |= update_queue(sma, -1, pt);
970 } else {
971 if (!sops) {
972 /*
973 * No sops, thus the modified semaphores are not
974 * known. Check all.
975 */
976 for (i = 0; i < sma->sem_nsems; i++)
977 otime |= update_queue(sma, i, pt);
978 } else {
979 /*
980 * Check the semaphores that were increased:
981 * - No complex ops, thus all sleeping ops are
982 * decrease.
983 * - if we decreased the value, then any sleeping
984 * semaphore ops wont be able to run: If the
985 * previous value was too small, then the new
986 * value will be too small, too.
987 */
988 for (i = 0; i < nsops; i++) {
989 if (sops[i].sem_op > 0) {
990 otime |= update_queue(sma,
991 sops[i].sem_num, pt);
992 }
993 }
994 }
995 }
996 if (otime)
997 set_semotime(sma, sops);
998 }
999
1000 /* The following counts are associated to each semaphore:
1001 * semncnt number of tasks waiting on semval being nonzero
1002 * semzcnt number of tasks waiting on semval being zero
1003 * This model assumes that a task waits on exactly one semaphore.
1004 * Since semaphore operations are to be performed atomically, tasks actually
1005 * wait on a whole sequence of semaphores simultaneously.
1006 * The counts we return here are a rough approximation, but still
1007 * warrant that semncnt+semzcnt>0 if the task is on the pending queue.
1008 */
1009 static int count_semncnt (struct sem_array * sma, ushort semnum)
1010 {
1011 int semncnt;
1012 struct sem_queue * q;
1013
1014 semncnt = 0;
1015 list_for_each_entry(q, &sma->sem_base[semnum].pending_alter, list) {
1016 struct sembuf * sops = q->sops;
1017 BUG_ON(sops->sem_num != semnum);
1018 if ((sops->sem_op < 0) && !(sops->sem_flg & IPC_NOWAIT))
1019 semncnt++;
1020 }
1021
1022 list_for_each_entry(q, &sma->pending_alter, list) {
1023 struct sembuf * sops = q->sops;
1024 int nsops = q->nsops;
1025 int i;
1026 for (i = 0; i < nsops; i++)
1027 if (sops[i].sem_num == semnum
1028 && (sops[i].sem_op < 0)
1029 && !(sops[i].sem_flg & IPC_NOWAIT))
1030 semncnt++;
1031 }
1032 return semncnt;
1033 }
1034
1035 static int count_semzcnt (struct sem_array * sma, ushort semnum)
1036 {
1037 int semzcnt;
1038 struct sem_queue * q;
1039
1040 semzcnt = 0;
1041 list_for_each_entry(q, &sma->sem_base[semnum].pending_const, list) {
1042 struct sembuf * sops = q->sops;
1043 BUG_ON(sops->sem_num != semnum);
1044 if ((sops->sem_op == 0) && !(sops->sem_flg & IPC_NOWAIT))
1045 semzcnt++;
1046 }
1047
1048 list_for_each_entry(q, &sma->pending_const, list) {
1049 struct sembuf * sops = q->sops;
1050 int nsops = q->nsops;
1051 int i;
1052 for (i = 0; i < nsops; i++)
1053 if (sops[i].sem_num == semnum
1054 && (sops[i].sem_op == 0)
1055 && !(sops[i].sem_flg & IPC_NOWAIT))
1056 semzcnt++;
1057 }
1058 return semzcnt;
1059 }
1060
1061 /* Free a semaphore set. freeary() is called with sem_ids.rwsem locked
1062 * as a writer and the spinlock for this semaphore set hold. sem_ids.rwsem
1063 * remains locked on exit.
1064 */
1065 static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
1066 {
1067 struct sem_undo *un, *tu;
1068 struct sem_queue *q, *tq;
1069 struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm);
1070 struct list_head tasks;
1071 int i;
1072
1073 /* Free the existing undo structures for this semaphore set. */
1074 ipc_assert_locked_object(&sma->sem_perm);
1075 list_for_each_entry_safe(un, tu, &sma->list_id, list_id) {
1076 list_del(&un->list_id);
1077 spin_lock(&un->ulp->lock);
1078 un->semid = -1;
1079 list_del_rcu(&un->list_proc);
1080 spin_unlock(&un->ulp->lock);
1081 kfree_rcu(un, rcu);
1082 }
1083
1084 /* Wake up all pending processes and let them fail with EIDRM. */
1085 INIT_LIST_HEAD(&tasks);
1086 list_for_each_entry_safe(q, tq, &sma->pending_const, list) {
1087 unlink_queue(sma, q);
1088 wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
1089 }
1090
1091 list_for_each_entry_safe(q, tq, &sma->pending_alter, list) {
1092 unlink_queue(sma, q);
1093 wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
1094 }
1095 for (i = 0; i < sma->sem_nsems; i++) {
1096 struct sem *sem = sma->sem_base + i;
1097 list_for_each_entry_safe(q, tq, &sem->pending_const, list) {
1098 unlink_queue(sma, q);
1099 wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
1100 }
1101 list_for_each_entry_safe(q, tq, &sem->pending_alter, list) {
1102 unlink_queue(sma, q);
1103 wake_up_sem_queue_prepare(&tasks, q, -EIDRM);
1104 }
1105 }
1106
1107 /* Remove the semaphore set from the IDR */
1108 sem_rmid(ns, sma);
1109 sem_unlock(sma, -1);
1110 rcu_read_unlock();
1111
1112 wake_up_sem_queue_do(&tasks);
1113 ns->used_sems -= sma->sem_nsems;
1114 ipc_rcu_putref(sma, sem_rcu_free);
1115 }
1116
1117 static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version)
1118 {
1119 switch(version) {
1120 case IPC_64:
1121 return copy_to_user(buf, in, sizeof(*in));
1122 case IPC_OLD:
1123 {
1124 struct semid_ds out;
1125
1126 memset(&out, 0, sizeof(out));
1127
1128 ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm);
1129
1130 out.sem_otime = in->sem_otime;
1131 out.sem_ctime = in->sem_ctime;
1132 out.sem_nsems = in->sem_nsems;
1133
1134 return copy_to_user(buf, &out, sizeof(out));
1135 }
1136 default:
1137 return -EINVAL;
1138 }
1139 }
1140
1141 static time_t get_semotime(struct sem_array *sma)
1142 {
1143 int i;
1144 time_t res;
1145
1146 res = sma->sem_base[0].sem_otime;
1147 for (i = 1; i < sma->sem_nsems; i++) {
1148 time_t to = sma->sem_base[i].sem_otime;
1149
1150 if (to > res)
1151 res = to;
1152 }
1153 return res;
1154 }
1155
1156 static int semctl_nolock(struct ipc_namespace *ns, int semid,
1157 int cmd, int version, void __user *p)
1158 {
1159 int err;
1160 struct sem_array *sma;
1161
1162 switch(cmd) {
1163 case IPC_INFO:
1164 case SEM_INFO:
1165 {
1166 struct seminfo seminfo;
1167 int max_id;
1168
1169 err = security_sem_semctl(NULL, cmd);
1170 if (err)
1171 return err;
1172
1173 memset(&seminfo,0,sizeof(seminfo));
1174 seminfo.semmni = ns->sc_semmni;
1175 seminfo.semmns = ns->sc_semmns;
1176 seminfo.semmsl = ns->sc_semmsl;
1177 seminfo.semopm = ns->sc_semopm;
1178 seminfo.semvmx = SEMVMX;
1179 seminfo.semmnu = SEMMNU;
1180 seminfo.semmap = SEMMAP;
1181 seminfo.semume = SEMUME;
1182 down_read(&sem_ids(ns).rwsem);
1183 if (cmd == SEM_INFO) {
1184 seminfo.semusz = sem_ids(ns).in_use;
1185 seminfo.semaem = ns->used_sems;
1186 } else {
1187 seminfo.semusz = SEMUSZ;
1188 seminfo.semaem = SEMAEM;
1189 }
1190 max_id = ipc_get_maxid(&sem_ids(ns));
1191 up_read(&sem_ids(ns).rwsem);
1192 if (copy_to_user(p, &seminfo, sizeof(struct seminfo)))
1193 return -EFAULT;
1194 return (max_id < 0) ? 0: max_id;
1195 }
1196 case IPC_STAT:
1197 case SEM_STAT:
1198 {
1199 struct semid64_ds tbuf;
1200 int id = 0;
1201
1202 memset(&tbuf, 0, sizeof(tbuf));
1203
1204 rcu_read_lock();
1205 if (cmd == SEM_STAT) {
1206 sma = sem_obtain_object(ns, semid);
1207 if (IS_ERR(sma)) {
1208 err = PTR_ERR(sma);
1209 goto out_unlock;
1210 }
1211 id = sma->sem_perm.id;
1212 } else {
1213 sma = sem_obtain_object_check(ns, semid);
1214 if (IS_ERR(sma)) {
1215 err = PTR_ERR(sma);
1216 goto out_unlock;
1217 }
1218 }
1219
1220 err = -EACCES;
1221 if (ipcperms(ns, &sma->sem_perm, S_IRUGO))
1222 goto out_unlock;
1223
1224 err = security_sem_semctl(sma, cmd);
1225 if (err)
1226 goto out_unlock;
1227
1228 kernel_to_ipc64_perm(&sma->sem_perm, &tbuf.sem_perm);
1229 tbuf.sem_otime = get_semotime(sma);
1230 tbuf.sem_ctime = sma->sem_ctime;
1231 tbuf.sem_nsems = sma->sem_nsems;
1232 rcu_read_unlock();
1233 if (copy_semid_to_user(p, &tbuf, version))
1234 return -EFAULT;
1235 return id;
1236 }
1237 default:
1238 return -EINVAL;
1239 }
1240 out_unlock:
1241 rcu_read_unlock();
1242 return err;
1243 }
1244
1245 static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
1246 unsigned long arg)
1247 {
1248 struct sem_undo *un;
1249 struct sem_array *sma;
1250 struct sem* curr;
1251 int err;
1252 struct list_head tasks;
1253 int val;
1254 #if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
1255 /* big-endian 64bit */
1256 val = arg >> 32;
1257 #else
1258 /* 32bit or little-endian 64bit */
1259 val = arg;
1260 #endif
1261
1262 if (val > SEMVMX || val < 0)
1263 return -ERANGE;
1264
1265 INIT_LIST_HEAD(&tasks);
1266
1267 rcu_read_lock();
1268 sma = sem_obtain_object_check(ns, semid);
1269 if (IS_ERR(sma)) {
1270 rcu_read_unlock();
1271 return PTR_ERR(sma);
1272 }
1273
1274 if (semnum < 0 || semnum >= sma->sem_nsems) {
1275 rcu_read_unlock();
1276 return -EINVAL;
1277 }
1278
1279
1280 if (ipcperms(ns, &sma->sem_perm, S_IWUGO)) {
1281 rcu_read_unlock();
1282 return -EACCES;
1283 }
1284
1285 err = security_sem_semctl(sma, SETVAL);
1286 if (err) {
1287 rcu_read_unlock();
1288 return -EACCES;
1289 }
1290
1291 sem_lock(sma, NULL, -1);
1292
1293 if (sma->sem_perm.deleted) {
1294 sem_unlock(sma, -1);
1295 rcu_read_unlock();
1296 return -EIDRM;
1297 }
1298
1299 curr = &sma->sem_base[semnum];
1300
1301 ipc_assert_locked_object(&sma->sem_perm);
1302 list_for_each_entry(un, &sma->list_id, list_id)
1303 un->semadj[semnum] = 0;
1304
1305 curr->semval = val;
1306 curr->sempid = task_tgid_vnr(current);
1307 sma->sem_ctime = get_seconds();
1308 /* maybe some queued-up processes were waiting for this */
1309 do_smart_update(sma, NULL, 0, 0, &tasks);
1310 sem_unlock(sma, -1);
1311 rcu_read_unlock();
1312 wake_up_sem_queue_do(&tasks);
1313 return 0;
1314 }
1315
1316 static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
1317 int cmd, void __user *p)
1318 {
1319 struct sem_array *sma;
1320 struct sem* curr;
1321 int err, nsems;
1322 ushort fast_sem_io[SEMMSL_FAST];
1323 ushort* sem_io = fast_sem_io;
1324 struct list_head tasks;
1325
1326 INIT_LIST_HEAD(&tasks);
1327
1328 rcu_read_lock();
1329 sma = sem_obtain_object_check(ns, semid);
1330 if (IS_ERR(sma)) {
1331 rcu_read_unlock();
1332 return PTR_ERR(sma);
1333 }
1334
1335 nsems = sma->sem_nsems;
1336
1337 err = -EACCES;
1338 if (ipcperms(ns, &sma->sem_perm, cmd == SETALL ? S_IWUGO : S_IRUGO))
1339 goto out_rcu_wakeup;
1340
1341 err = security_sem_semctl(sma, cmd);
1342 if (err)
1343 goto out_rcu_wakeup;
1344
1345 err = -EACCES;
1346 switch (cmd) {
1347 case GETALL:
1348 {
1349 ushort __user *array = p;
1350 int i;
1351
1352 sem_lock(sma, NULL, -1);
1353 if (sma->sem_perm.deleted) {
1354 err = -EIDRM;
1355 goto out_unlock;
1356 }
1357 if(nsems > SEMMSL_FAST) {
1358 if (!ipc_rcu_getref(sma)) {
1359 err = -EIDRM;
1360 goto out_unlock;
1361 }
1362 sem_unlock(sma, -1);
1363 rcu_read_unlock();
1364 sem_io = ipc_alloc(sizeof(ushort)*nsems);
1365 if(sem_io == NULL) {
1366 ipc_rcu_putref(sma, ipc_rcu_free);
1367 return -ENOMEM;
1368 }
1369
1370 rcu_read_lock();
1371 sem_lock_and_putref(sma);
1372 if (sma->sem_perm.deleted) {
1373 err = -EIDRM;
1374 goto out_unlock;
1375 }
1376 }
1377 for (i = 0; i < sma->sem_nsems; i++)
1378 sem_io[i] = sma->sem_base[i].semval;
1379 sem_unlock(sma, -1);
1380 rcu_read_unlock();
1381 err = 0;
1382 if(copy_to_user(array, sem_io, nsems*sizeof(ushort)))
1383 err = -EFAULT;
1384 goto out_free;
1385 }
1386 case SETALL:
1387 {
1388 int i;
1389 struct sem_undo *un;
1390
1391 if (!ipc_rcu_getref(sma)) {
1392 err = -EIDRM;
1393 goto out_rcu_wakeup;
1394 }
1395 rcu_read_unlock();
1396
1397 if(nsems > SEMMSL_FAST) {
1398 sem_io = ipc_alloc(sizeof(ushort)*nsems);
1399 if(sem_io == NULL) {
1400 ipc_rcu_putref(sma, ipc_rcu_free);
1401 return -ENOMEM;
1402 }
1403 }
1404
1405 if (copy_from_user (sem_io, p, nsems*sizeof(ushort))) {
1406 ipc_rcu_putref(sma, ipc_rcu_free);
1407 err = -EFAULT;
1408 goto out_free;
1409 }
1410
1411 for (i = 0; i < nsems; i++) {
1412 if (sem_io[i] > SEMVMX) {
1413 ipc_rcu_putref(sma, ipc_rcu_free);
1414 err = -ERANGE;
1415 goto out_free;
1416 }
1417 }
1418 rcu_read_lock();
1419 sem_lock_and_putref(sma);
1420 if (sma->sem_perm.deleted) {
1421 err = -EIDRM;
1422 goto out_unlock;
1423 }
1424
1425 for (i = 0; i < nsems; i++)
1426 sma->sem_base[i].semval = sem_io[i];
1427
1428 ipc_assert_locked_object(&sma->sem_perm);
1429 list_for_each_entry(un, &sma->list_id, list_id) {
1430 for (i = 0; i < nsems; i++)
1431 un->semadj[i] = 0;
1432 }
1433 sma->sem_ctime = get_seconds();
1434 /* maybe some queued-up processes were waiting for this */
1435 do_smart_update(sma, NULL, 0, 0, &tasks);
1436 err = 0;
1437 goto out_unlock;
1438 }
1439 /* GETVAL, GETPID, GETNCTN, GETZCNT: fall-through */
1440 }
1441 err = -EINVAL;
1442 if (semnum < 0 || semnum >= nsems)
1443 goto out_rcu_wakeup;
1444
1445 sem_lock(sma, NULL, -1);
1446 if (sma->sem_perm.deleted) {
1447 err = -EIDRM;
1448 goto out_unlock;
1449 }
1450 curr = &sma->sem_base[semnum];
1451
1452 switch (cmd) {
1453 case GETVAL:
1454 err = curr->semval;
1455 goto out_unlock;
1456 case GETPID:
1457 err = curr->sempid;
1458 goto out_unlock;
1459 case GETNCNT:
1460 err = count_semncnt(sma,semnum);
1461 goto out_unlock;
1462 case GETZCNT:
1463 err = count_semzcnt(sma,semnum);
1464 goto out_unlock;
1465 }
1466
1467 out_unlock:
1468 sem_unlock(sma, -1);
1469 out_rcu_wakeup:
1470 rcu_read_unlock();
1471 wake_up_sem_queue_do(&tasks);
1472 out_free:
1473 if(sem_io != fast_sem_io)
1474 ipc_free(sem_io, sizeof(ushort)*nsems);
1475 return err;
1476 }
1477
1478 static inline unsigned long
1479 copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version)
1480 {
1481 switch(version) {
1482 case IPC_64:
1483 if (copy_from_user(out, buf, sizeof(*out)))
1484 return -EFAULT;
1485 return 0;
1486 case IPC_OLD:
1487 {
1488 struct semid_ds tbuf_old;
1489
1490 if(copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
1491 return -EFAULT;
1492
1493 out->sem_perm.uid = tbuf_old.sem_perm.uid;
1494 out->sem_perm.gid = tbuf_old.sem_perm.gid;
1495 out->sem_perm.mode = tbuf_old.sem_perm.mode;
1496
1497 return 0;
1498 }
1499 default:
1500 return -EINVAL;
1501 }
1502 }
1503
1504 /*
1505 * This function handles some semctl commands which require the rwsem
1506 * to be held in write mode.
1507 * NOTE: no locks must be held, the rwsem is taken inside this function.
1508 */
1509 static int semctl_down(struct ipc_namespace *ns, int semid,
1510 int cmd, int version, void __user *p)
1511 {
1512 struct sem_array *sma;
1513 int err;
1514 struct semid64_ds semid64;
1515 struct kern_ipc_perm *ipcp;
1516
1517 if(cmd == IPC_SET) {
1518 if (copy_semid_from_user(&semid64, p, version))
1519 return -EFAULT;
1520 }
1521
1522 down_write(&sem_ids(ns).rwsem);
1523 rcu_read_lock();
1524
1525 ipcp = ipcctl_pre_down_nolock(ns, &sem_ids(ns), semid, cmd,
1526 &semid64.sem_perm, 0);
1527 if (IS_ERR(ipcp)) {
1528 err = PTR_ERR(ipcp);
1529 goto out_unlock1;
1530 }
1531
1532 sma = container_of(ipcp, struct sem_array, sem_perm);
1533
1534 err = security_sem_semctl(sma, cmd);
1535 if (err)
1536 goto out_unlock1;
1537
1538 switch (cmd) {
1539 case IPC_RMID:
1540 sem_lock(sma, NULL, -1);
1541 /* freeary unlocks the ipc object and rcu */
1542 freeary(ns, ipcp);
1543 goto out_up;
1544 case IPC_SET:
1545 sem_lock(sma, NULL, -1);
1546 err = ipc_update_perm(&semid64.sem_perm, ipcp);
1547 if (err)
1548 goto out_unlock0;
1549 sma->sem_ctime = get_seconds();
1550 break;
1551 default:
1552 err = -EINVAL;
1553 goto out_unlock1;
1554 }
1555
1556 out_unlock0:
1557 sem_unlock(sma, -1);
1558 out_unlock1:
1559 rcu_read_unlock();
1560 out_up:
1561 up_write(&sem_ids(ns).rwsem);
1562 return err;
1563 }
1564
1565 SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, unsigned long, arg)
1566 {
1567 int version;
1568 struct ipc_namespace *ns;
1569 void __user *p = (void __user *)arg;
1570
1571 if (semid < 0)
1572 return -EINVAL;
1573
1574 version = ipc_parse_version(&cmd);
1575 ns = current->nsproxy->ipc_ns;
1576
1577 switch(cmd) {
1578 case IPC_INFO:
1579 case SEM_INFO:
1580 case IPC_STAT:
1581 case SEM_STAT:
1582 return semctl_nolock(ns, semid, cmd, version, p);
1583 case GETALL:
1584 case GETVAL:
1585 case GETPID:
1586 case GETNCNT:
1587 case GETZCNT:
1588 case SETALL:
1589 return semctl_main(ns, semid, semnum, cmd, p);
1590 case SETVAL:
1591 return semctl_setval(ns, semid, semnum, arg);
1592 case IPC_RMID:
1593 case IPC_SET:
1594 return semctl_down(ns, semid, cmd, version, p);
1595 default:
1596 return -EINVAL;
1597 }
1598 }
1599
1600 /* If the task doesn't already have a undo_list, then allocate one
1601 * here. We guarantee there is only one thread using this undo list,
1602 * and current is THE ONE
1603 *
1604 * If this allocation and assignment succeeds, but later
1605 * portions of this code fail, there is no need to free the sem_undo_list.
1606 * Just let it stay associated with the task, and it'll be freed later
1607 * at exit time.
1608 *
1609 * This can block, so callers must hold no locks.
1610 */
1611 static inline int get_undo_list(struct sem_undo_list **undo_listp)
1612 {
1613 struct sem_undo_list *undo_list;
1614
1615 undo_list = current->sysvsem.undo_list;
1616 if (!undo_list) {
1617 undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL);
1618 if (undo_list == NULL)
1619 return -ENOMEM;
1620 spin_lock_init(&undo_list->lock);
1621 atomic_set(&undo_list->refcnt, 1);
1622 INIT_LIST_HEAD(&undo_list->list_proc);
1623
1624 current->sysvsem.undo_list = undo_list;
1625 }
1626 *undo_listp = undo_list;
1627 return 0;
1628 }
1629
1630 static struct sem_undo *__lookup_undo(struct sem_undo_list *ulp, int semid)
1631 {
1632 struct sem_undo *un;
1633
1634 list_for_each_entry_rcu(un, &ulp->list_proc, list_proc) {
1635 if (un->semid == semid)
1636 return un;
1637 }
1638 return NULL;
1639 }
1640
1641 static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid)
1642 {
1643 struct sem_undo *un;
1644
1645 assert_spin_locked(&ulp->lock);
1646
1647 un = __lookup_undo(ulp, semid);
1648 if (un) {
1649 list_del_rcu(&un->list_proc);
1650 list_add_rcu(&un->list_proc, &ulp->list_proc);
1651 }
1652 return un;
1653 }
1654
1655 /**
1656 * find_alloc_undo - Lookup (and if not present create) undo array
1657 * @ns: namespace
1658 * @semid: semaphore array id
1659 *
1660 * The function looks up (and if not present creates) the undo structure.
1661 * The size of the undo structure depends on the size of the semaphore
1662 * array, thus the alloc path is not that straightforward.
1663 * Lifetime-rules: sem_undo is rcu-protected, on success, the function
1664 * performs a rcu_read_lock().
1665 */
1666 static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
1667 {
1668 struct sem_array *sma;
1669 struct sem_undo_list *ulp;
1670 struct sem_undo *un, *new;
1671 int nsems, error;
1672
1673 error = get_undo_list(&ulp);
1674 if (error)
1675 return ERR_PTR(error);
1676
1677 rcu_read_lock();
1678 spin_lock(&ulp->lock);
1679 un = lookup_undo(ulp, semid);
1680 spin_unlock(&ulp->lock);
1681 if (likely(un!=NULL))
1682 goto out;
1683
1684 /* no undo structure around - allocate one. */
1685 /* step 1: figure out the size of the semaphore array */
1686 sma = sem_obtain_object_check(ns, semid);
1687 if (IS_ERR(sma)) {
1688 rcu_read_unlock();
1689 return ERR_CAST(sma);
1690 }
1691
1692 nsems = sma->sem_nsems;
1693 if (!ipc_rcu_getref(sma)) {
1694 rcu_read_unlock();
1695 un = ERR_PTR(-EIDRM);
1696 goto out;
1697 }
1698 rcu_read_unlock();
1699
1700 /* step 2: allocate new undo structure */
1701 new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL);
1702 if (!new) {
1703 ipc_rcu_putref(sma, ipc_rcu_free);
1704 return ERR_PTR(-ENOMEM);
1705 }
1706
1707 /* step 3: Acquire the lock on semaphore array */
1708 rcu_read_lock();
1709 sem_lock_and_putref(sma);
1710 if (sma->sem_perm.deleted) {
1711 sem_unlock(sma, -1);
1712 rcu_read_unlock();
1713 kfree(new);
1714 un = ERR_PTR(-EIDRM);
1715 goto out;
1716 }
1717 spin_lock(&ulp->lock);
1718
1719 /*
1720 * step 4: check for races: did someone else allocate the undo struct?
1721 */
1722 un = lookup_undo(ulp, semid);
1723 if (un) {
1724 kfree(new);
1725 goto success;
1726 }
1727 /* step 5: initialize & link new undo structure */
1728 new->semadj = (short *) &new[1];
1729 new->ulp = ulp;
1730 new->semid = semid;
1731 assert_spin_locked(&ulp->lock);
1732 list_add_rcu(&new->list_proc, &ulp->list_proc);
1733 ipc_assert_locked_object(&sma->sem_perm);
1734 list_add(&new->list_id, &sma->list_id);
1735 un = new;
1736
1737 success:
1738 spin_unlock(&ulp->lock);
1739 sem_unlock(sma, -1);
1740 out:
1741 return un;
1742 }
1743
1744
1745 /**
1746 * get_queue_result - Retrieve the result code from sem_queue
1747 * @q: Pointer to queue structure
1748 *
1749 * Retrieve the return code from the pending queue. If IN_WAKEUP is found in
1750 * q->status, then we must loop until the value is replaced with the final
1751 * value: This may happen if a task is woken up by an unrelated event (e.g.
1752 * signal) and in parallel the task is woken up by another task because it got
1753 * the requested semaphores.
1754 *
1755 * The function can be called with or without holding the semaphore spinlock.
1756 */
1757 static int get_queue_result(struct sem_queue *q)
1758 {
1759 int error;
1760
1761 error = q->status;
1762 while (unlikely(error == IN_WAKEUP)) {
1763 cpu_relax();
1764 error = q->status;
1765 }
1766
1767 return error;
1768 }
1769
1770 SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
1771 unsigned, nsops, const struct timespec __user *, timeout)
1772 {
1773 int error = -EINVAL;
1774 struct sem_array *sma;
1775 struct sembuf fast_sops[SEMOPM_FAST];
1776 struct sembuf* sops = fast_sops, *sop;
1777 struct sem_undo *un;
1778 int undos = 0, alter = 0, max, locknum;
1779 struct sem_queue queue;
1780 unsigned long jiffies_left = 0;
1781 struct ipc_namespace *ns;
1782 struct list_head tasks;
1783
1784 ns = current->nsproxy->ipc_ns;
1785
1786 if (nsops < 1 || semid < 0)
1787 return -EINVAL;
1788 if (nsops > ns->sc_semopm)
1789 return -E2BIG;
1790 if(nsops > SEMOPM_FAST) {
1791 sops = kmalloc(sizeof(*sops)*nsops,GFP_KERNEL);
1792 if(sops==NULL)
1793 return -ENOMEM;
1794 }
1795 if (copy_from_user (sops, tsops, nsops * sizeof(*tsops))) {
1796 error=-EFAULT;
1797 goto out_free;
1798 }
1799 if (timeout) {
1800 struct timespec _timeout;
1801 if (copy_from_user(&_timeout, timeout, sizeof(*timeout))) {
1802 error = -EFAULT;
1803 goto out_free;
1804 }
1805 if (_timeout.tv_sec < 0 || _timeout.tv_nsec < 0 ||
1806 _timeout.tv_nsec >= 1000000000L) {
1807 error = -EINVAL;
1808 goto out_free;
1809 }
1810 jiffies_left = timespec_to_jiffies(&_timeout);
1811 }
1812 max = 0;
1813 for (sop = sops; sop < sops + nsops; sop++) {
1814 if (sop->sem_num >= max)
1815 max = sop->sem_num;
1816 if (sop->sem_flg & SEM_UNDO)
1817 undos = 1;
1818 if (sop->sem_op != 0)
1819 alter = 1;
1820 }
1821
1822 INIT_LIST_HEAD(&tasks);
1823
1824 if (undos) {
1825 /* On success, find_alloc_undo takes the rcu_read_lock */
1826 un = find_alloc_undo(ns, semid);
1827 if (IS_ERR(un)) {
1828 error = PTR_ERR(un);
1829 goto out_free;
1830 }
1831 } else {
1832 un = NULL;
1833 rcu_read_lock();
1834 }
1835
1836 sma = sem_obtain_object_check(ns, semid);
1837 if (IS_ERR(sma)) {
1838 rcu_read_unlock();
1839 error = PTR_ERR(sma);
1840 goto out_free;
1841 }
1842
1843 error = -EFBIG;
1844 if (max >= sma->sem_nsems)
1845 goto out_rcu_wakeup;
1846
1847 error = -EACCES;
1848 if (ipcperms(ns, &sma->sem_perm, alter ? S_IWUGO : S_IRUGO))
1849 goto out_rcu_wakeup;
1850
1851 error = security_sem_semop(sma, sops, nsops, alter);
1852 if (error)
1853 goto out_rcu_wakeup;
1854
1855 error = -EIDRM;
1856 locknum = sem_lock(sma, sops, nsops);
1857 if (sma->sem_perm.deleted)
1858 goto out_unlock_free;
1859 /*
1860 * semid identifiers are not unique - find_alloc_undo may have
1861 * allocated an undo structure, it was invalidated by an RMID
1862 * and now a new array with received the same id. Check and fail.
1863 * This case can be detected checking un->semid. The existence of
1864 * "un" itself is guaranteed by rcu.
1865 */
1866 if (un && un->semid == -1)
1867 goto out_unlock_free;
1868
1869 error = perform_atomic_semop(sma, sops, nsops, un,
1870 task_tgid_vnr(current));
1871 if (error == 0) {
1872 /* If the operation was successful, then do
1873 * the required updates.
1874 */
1875 if (alter)
1876 do_smart_update(sma, sops, nsops, 1, &tasks);
1877 else
1878 set_semotime(sma, sops);
1879 }
1880 if (error <= 0)
1881 goto out_unlock_free;
1882
1883 /* We need to sleep on this operation, so we put the current
1884 * task into the pending queue and go to sleep.
1885 */
1886
1887 queue.sops = sops;
1888 queue.nsops = nsops;
1889 queue.undo = un;
1890 queue.pid = task_tgid_vnr(current);
1891 queue.alter = alter;
1892
1893 if (nsops == 1) {
1894 struct sem *curr;
1895 curr = &sma->sem_base[sops->sem_num];
1896
1897 if (alter) {
1898 if (sma->complex_count) {
1899 list_add_tail(&queue.list,
1900 &sma->pending_alter);
1901 } else {
1902
1903 list_add_tail(&queue.list,
1904 &curr->pending_alter);
1905 }
1906 } else {
1907 list_add_tail(&queue.list, &curr->pending_const);
1908 }
1909 } else {
1910 if (!sma->complex_count)
1911 merge_queues(sma);
1912
1913 if (alter)
1914 list_add_tail(&queue.list, &sma->pending_alter);
1915 else
1916 list_add_tail(&queue.list, &sma->pending_const);
1917
1918 sma->complex_count++;
1919 }
1920
1921 queue.status = -EINTR;
1922 queue.sleeper = current;
1923
1924 sleep_again:
1925 current->state = TASK_INTERRUPTIBLE;
1926 sem_unlock(sma, locknum);
1927 rcu_read_unlock();
1928
1929 if (timeout)
1930 jiffies_left = schedule_timeout(jiffies_left);
1931 else
1932 schedule();
1933
1934 error = get_queue_result(&queue);
1935
1936 if (error != -EINTR) {
1937 /* fast path: update_queue already obtained all requested
1938 * resources.
1939 * Perform a smp_mb(): User space could assume that semop()
1940 * is a memory barrier: Without the mb(), the cpu could
1941 * speculatively read in user space stale data that was
1942 * overwritten by the previous owner of the semaphore.
1943 */
1944 smp_mb();
1945
1946 goto out_free;
1947 }
1948
1949 rcu_read_lock();
1950 sma = sem_obtain_lock(ns, semid, sops, nsops, &locknum);
1951
1952 /*
1953 * Wait until it's guaranteed that no wakeup_sem_queue_do() is ongoing.
1954 */
1955 error = get_queue_result(&queue);
1956
1957 /*
1958 * Array removed? If yes, leave without sem_unlock().
1959 */
1960 if (IS_ERR(sma)) {
1961 rcu_read_unlock();
1962 goto out_free;
1963 }
1964
1965
1966 /*
1967 * If queue.status != -EINTR we are woken up by another process.
1968 * Leave without unlink_queue(), but with sem_unlock().
1969 */
1970
1971 if (error != -EINTR) {
1972 goto out_unlock_free;
1973 }
1974
1975 /*
1976 * If an interrupt occurred we have to clean up the queue
1977 */
1978 if (timeout && jiffies_left == 0)
1979 error = -EAGAIN;
1980
1981 /*
1982 * If the wakeup was spurious, just retry
1983 */
1984 if (error == -EINTR && !signal_pending(current))
1985 goto sleep_again;
1986
1987 unlink_queue(sma, &queue);
1988
1989 out_unlock_free:
1990 sem_unlock(sma, locknum);
1991 out_rcu_wakeup:
1992 rcu_read_unlock();
1993 wake_up_sem_queue_do(&tasks);
1994 out_free:
1995 if(sops != fast_sops)
1996 kfree(sops);
1997 return error;
1998 }
1999
2000 SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops,
2001 unsigned, nsops)
2002 {
2003 return sys_semtimedop(semid, tsops, nsops, NULL);
2004 }
2005
2006 /* If CLONE_SYSVSEM is set, establish sharing of SEM_UNDO state between
2007 * parent and child tasks.
2008 */
2009
2010 int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
2011 {
2012 struct sem_undo_list *undo_list;
2013 int error;
2014
2015 if (clone_flags & CLONE_SYSVSEM) {
2016 error = get_undo_list(&undo_list);
2017 if (error)
2018 return error;
2019 atomic_inc(&undo_list->refcnt);
2020 tsk->sysvsem.undo_list = undo_list;
2021 } else
2022 tsk->sysvsem.undo_list = NULL;
2023
2024 return 0;
2025 }
2026
2027 /*
2028 * add semadj values to semaphores, free undo structures.
2029 * undo structures are not freed when semaphore arrays are destroyed
2030 * so some of them may be out of date.
2031 * IMPLEMENTATION NOTE: There is some confusion over whether the
2032 * set of adjustments that needs to be done should be done in an atomic
2033 * manner or not. That is, if we are attempting to decrement the semval
2034 * should we queue up and wait until we can do so legally?
2035 * The original implementation attempted to do this (queue and wait).
2036 * The current implementation does not do so. The POSIX standard
2037 * and SVID should be consulted to determine what behavior is mandated.
2038 */
2039 void exit_sem(struct task_struct *tsk)
2040 {
2041 struct sem_undo_list *ulp;
2042
2043 ulp = tsk->sysvsem.undo_list;
2044 if (!ulp)
2045 return;
2046 tsk->sysvsem.undo_list = NULL;
2047
2048 if (!atomic_dec_and_test(&ulp->refcnt))
2049 return;
2050
2051 for (;;) {
2052 struct sem_array *sma;
2053 struct sem_undo *un;
2054 struct list_head tasks;
2055 int semid, i;
2056
2057 rcu_read_lock();
2058 un = list_entry_rcu(ulp->list_proc.next,
2059 struct sem_undo, list_proc);
2060 if (&un->list_proc == &ulp->list_proc) {
2061 /*
2062 * We must wait for freeary() before freeing this ulp,
2063 * in case we raced with last sem_undo. There is a small
2064 * possibility where we exit while freeary() didn't
2065 * finish unlocking sem_undo_list.
2066 */
2067 spin_unlock_wait(&ulp->lock);
2068 rcu_read_unlock();
2069 break;
2070 }
2071 spin_lock(&ulp->lock);
2072 semid = un->semid;
2073 spin_unlock(&ulp->lock);
2074
2075 /* exit_sem raced with IPC_RMID, nothing to do */
2076 if (semid == -1) {
2077 rcu_read_unlock();
2078 continue;
2079 }
2080
2081 sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, semid);
2082 /* exit_sem raced with IPC_RMID, nothing to do */
2083 if (IS_ERR(sma)) {
2084 rcu_read_unlock();
2085 continue;
2086 }
2087
2088 sem_lock(sma, NULL, -1);
2089 /* exit_sem raced with IPC_RMID, nothing to do */
2090 if (sma->sem_perm.deleted) {
2091 sem_unlock(sma, -1);
2092 rcu_read_unlock();
2093 continue;
2094 }
2095 un = __lookup_undo(ulp, semid);
2096 if (un == NULL) {
2097 /* exit_sem raced with IPC_RMID+semget() that created
2098 * exactly the same semid. Nothing to do.
2099 */
2100 sem_unlock(sma, -1);
2101 rcu_read_unlock();
2102 continue;
2103 }
2104
2105 /* remove un from the linked lists */
2106 ipc_assert_locked_object(&sma->sem_perm);
2107 list_del(&un->list_id);
2108
2109 spin_lock(&ulp->lock);
2110 list_del_rcu(&un->list_proc);
2111 spin_unlock(&ulp->lock);
2112
2113 /* perform adjustments registered in un */
2114 for (i = 0; i < sma->sem_nsems; i++) {
2115 struct sem * semaphore = &sma->sem_base[i];
2116 if (un->semadj[i]) {
2117 semaphore->semval += un->semadj[i];
2118 /*
2119 * Range checks of the new semaphore value,
2120 * not defined by sus:
2121 * - Some unices ignore the undo entirely
2122 * (e.g. HP UX 11i 11.22, Tru64 V5.1)
2123 * - some cap the value (e.g. FreeBSD caps
2124 * at 0, but doesn't enforce SEMVMX)
2125 *
2126 * Linux caps the semaphore value, both at 0
2127 * and at SEMVMX.
2128 *
2129 * Manfred <manfred@colorfullife.com>
2130 */
2131 if (semaphore->semval < 0)
2132 semaphore->semval = 0;
2133 if (semaphore->semval > SEMVMX)
2134 semaphore->semval = SEMVMX;
2135 semaphore->sempid = task_tgid_vnr(current);
2136 }
2137 }
2138 /* maybe some queued-up processes were waiting for this */
2139 INIT_LIST_HEAD(&tasks);
2140 do_smart_update(sma, NULL, 0, 1, &tasks);
2141 sem_unlock(sma, -1);
2142 rcu_read_unlock();
2143 wake_up_sem_queue_do(&tasks);
2144
2145 kfree_rcu(un, rcu);
2146 }
2147 kfree(ulp);
2148 }
2149
2150 #ifdef CONFIG_PROC_FS
2151 static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
2152 {
2153 struct user_namespace *user_ns = seq_user_ns(s);
2154 struct sem_array *sma = it;
2155 time_t sem_otime;
2156
2157 /*
2158 * The proc interface isn't aware of sem_lock(), it calls
2159 * ipc_lock_object() directly (in sysvipc_find_ipc).
2160 * In order to stay compatible with sem_lock(), we must wait until
2161 * all simple semop() calls have left their critical regions.
2162 */
2163 sem_wait_array(sma);
2164
2165 sem_otime = get_semotime(sma);
2166
2167 return seq_printf(s,
2168 "%10d %10d %4o %10u %5u %5u %5u %5u %10lu %10lu\n",
2169 sma->sem_perm.key,
2170 sma->sem_perm.id,
2171 sma->sem_perm.mode,
2172 sma->sem_nsems,
2173 from_kuid_munged(user_ns, sma->sem_perm.uid),
2174 from_kgid_munged(user_ns, sma->sem_perm.gid),
2175 from_kuid_munged(user_ns, sma->sem_perm.cuid),
2176 from_kgid_munged(user_ns, sma->sem_perm.cgid),
2177 sem_otime,
2178 sma->sem_ctime);
2179 }
2180 #endif