IPC/semaphores: move the rwmutex handling inside semctl_down
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / ipc / shm.c
1 /*
2 * linux/ipc/shm.c
3 * Copyright (C) 1992, 1993 Krishna Balasubramanian
4 * Many improvements/fixes by Bruno Haible.
5 * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
6 * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
7 *
8 * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
9 * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
10 * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
11 * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
12 * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
13 * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
14 * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
15 *
16 * support for audit of ipc object properties and permission changes
17 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
18 *
19 * namespaces support
20 * OpenVZ, SWsoft Inc.
21 * Pavel Emelianov <xemul@openvz.org>
22 */
23
24 #include <linux/slab.h>
25 #include <linux/mm.h>
26 #include <linux/hugetlb.h>
27 #include <linux/shm.h>
28 #include <linux/init.h>
29 #include <linux/file.h>
30 #include <linux/mman.h>
31 #include <linux/shmem_fs.h>
32 #include <linux/security.h>
33 #include <linux/syscalls.h>
34 #include <linux/audit.h>
35 #include <linux/capability.h>
36 #include <linux/ptrace.h>
37 #include <linux/seq_file.h>
38 #include <linux/rwsem.h>
39 #include <linux/nsproxy.h>
40 #include <linux/mount.h>
41 #include <linux/ipc_namespace.h>
42
43 #include <asm/uaccess.h>
44
45 #include "util.h"
46
47 struct shm_file_data {
48 int id;
49 struct ipc_namespace *ns;
50 struct file *file;
51 const struct vm_operations_struct *vm_ops;
52 };
53
54 #define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
55
56 static const struct file_operations shm_file_operations;
57 static struct vm_operations_struct shm_vm_ops;
58
59 #define shm_ids(ns) ((ns)->ids[IPC_SHM_IDS])
60
61 #define shm_unlock(shp) \
62 ipc_unlock(&(shp)->shm_perm)
63
64 static int newseg(struct ipc_namespace *, struct ipc_params *);
65 static void shm_open(struct vm_area_struct *vma);
66 static void shm_close(struct vm_area_struct *vma);
67 static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
68 #ifdef CONFIG_PROC_FS
69 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
70 #endif
71
72 void shm_init_ns(struct ipc_namespace *ns)
73 {
74 ns->shm_ctlmax = SHMMAX;
75 ns->shm_ctlall = SHMALL;
76 ns->shm_ctlmni = SHMMNI;
77 ns->shm_tot = 0;
78 ipc_init_ids(&ns->ids[IPC_SHM_IDS]);
79 }
80
81 /*
82 * Called with shm_ids.rw_mutex (writer) and the shp structure locked.
83 * Only shm_ids.rw_mutex remains locked on exit.
84 */
85 static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
86 {
87 struct shmid_kernel *shp;
88 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
89
90 if (shp->shm_nattch){
91 shp->shm_perm.mode |= SHM_DEST;
92 /* Do not find it any more */
93 shp->shm_perm.key = IPC_PRIVATE;
94 shm_unlock(shp);
95 } else
96 shm_destroy(ns, shp);
97 }
98
99 #ifdef CONFIG_IPC_NS
100 void shm_exit_ns(struct ipc_namespace *ns)
101 {
102 free_ipcs(ns, &shm_ids(ns), do_shm_rmid);
103 }
104 #endif
105
106 void __init shm_init (void)
107 {
108 shm_init_ns(&init_ipc_ns);
109 ipc_init_proc_interface("sysvipc/shm",
110 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime\n",
111 IPC_SHM_IDS, sysvipc_shm_proc_show);
112 }
113
114 /*
115 * shm_lock_(check_)down routines are called in the paths where the rw_mutex
116 * is held to protect access to the idr tree.
117 */
118 static inline struct shmid_kernel *shm_lock_down(struct ipc_namespace *ns,
119 int id)
120 {
121 struct kern_ipc_perm *ipcp = ipc_lock_down(&shm_ids(ns), id);
122
123 if (IS_ERR(ipcp))
124 return (struct shmid_kernel *)ipcp;
125
126 return container_of(ipcp, struct shmid_kernel, shm_perm);
127 }
128
129 static inline struct shmid_kernel *shm_lock_check_down(
130 struct ipc_namespace *ns,
131 int id)
132 {
133 struct kern_ipc_perm *ipcp = ipc_lock_check_down(&shm_ids(ns), id);
134
135 if (IS_ERR(ipcp))
136 return (struct shmid_kernel *)ipcp;
137
138 return container_of(ipcp, struct shmid_kernel, shm_perm);
139 }
140
141 /*
142 * shm_lock_(check_) routines are called in the paths where the rw_mutex
143 * is not held.
144 */
145 static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
146 {
147 struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);
148
149 if (IS_ERR(ipcp))
150 return (struct shmid_kernel *)ipcp;
151
152 return container_of(ipcp, struct shmid_kernel, shm_perm);
153 }
154
155 static inline struct shmid_kernel *shm_lock_check(struct ipc_namespace *ns,
156 int id)
157 {
158 struct kern_ipc_perm *ipcp = ipc_lock_check(&shm_ids(ns), id);
159
160 if (IS_ERR(ipcp))
161 return (struct shmid_kernel *)ipcp;
162
163 return container_of(ipcp, struct shmid_kernel, shm_perm);
164 }
165
166 static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
167 {
168 ipc_rmid(&shm_ids(ns), &s->shm_perm);
169 }
170
171
172 /* This is called by fork, once for every shm attach. */
173 static void shm_open(struct vm_area_struct *vma)
174 {
175 struct file *file = vma->vm_file;
176 struct shm_file_data *sfd = shm_file_data(file);
177 struct shmid_kernel *shp;
178
179 shp = shm_lock(sfd->ns, sfd->id);
180 BUG_ON(IS_ERR(shp));
181 shp->shm_atim = get_seconds();
182 shp->shm_lprid = task_tgid_vnr(current);
183 shp->shm_nattch++;
184 shm_unlock(shp);
185 }
186
187 /*
188 * shm_destroy - free the struct shmid_kernel
189 *
190 * @ns: namespace
191 * @shp: struct to free
192 *
193 * It has to be called with shp and shm_ids.rw_mutex (writer) locked,
194 * but returns with shp unlocked and freed.
195 */
196 static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
197 {
198 ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
199 shm_rmid(ns, shp);
200 shm_unlock(shp);
201 if (!is_file_hugepages(shp->shm_file))
202 shmem_lock(shp->shm_file, 0, shp->mlock_user);
203 else
204 user_shm_unlock(shp->shm_file->f_path.dentry->d_inode->i_size,
205 shp->mlock_user);
206 fput (shp->shm_file);
207 security_shm_free(shp);
208 ipc_rcu_putref(shp);
209 }
210
211 /*
212 * remove the attach descriptor vma.
213 * free memory for segment if it is marked destroyed.
214 * The descriptor has already been removed from the current->mm->mmap list
215 * and will later be kfree()d.
216 */
217 static void shm_close(struct vm_area_struct *vma)
218 {
219 struct file * file = vma->vm_file;
220 struct shm_file_data *sfd = shm_file_data(file);
221 struct shmid_kernel *shp;
222 struct ipc_namespace *ns = sfd->ns;
223
224 down_write(&shm_ids(ns).rw_mutex);
225 /* remove from the list of attaches of the shm segment */
226 shp = shm_lock_down(ns, sfd->id);
227 BUG_ON(IS_ERR(shp));
228 shp->shm_lprid = task_tgid_vnr(current);
229 shp->shm_dtim = get_seconds();
230 shp->shm_nattch--;
231 if(shp->shm_nattch == 0 &&
232 shp->shm_perm.mode & SHM_DEST)
233 shm_destroy(ns, shp);
234 else
235 shm_unlock(shp);
236 up_write(&shm_ids(ns).rw_mutex);
237 }
238
239 static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
240 {
241 struct file *file = vma->vm_file;
242 struct shm_file_data *sfd = shm_file_data(file);
243
244 return sfd->vm_ops->fault(vma, vmf);
245 }
246
247 #ifdef CONFIG_NUMA
248 static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
249 {
250 struct file *file = vma->vm_file;
251 struct shm_file_data *sfd = shm_file_data(file);
252 int err = 0;
253 if (sfd->vm_ops->set_policy)
254 err = sfd->vm_ops->set_policy(vma, new);
255 return err;
256 }
257
258 static struct mempolicy *shm_get_policy(struct vm_area_struct *vma,
259 unsigned long addr)
260 {
261 struct file *file = vma->vm_file;
262 struct shm_file_data *sfd = shm_file_data(file);
263 struct mempolicy *pol = NULL;
264
265 if (sfd->vm_ops->get_policy)
266 pol = sfd->vm_ops->get_policy(vma, addr);
267 else if (vma->vm_policy)
268 pol = vma->vm_policy;
269
270 return pol;
271 }
272 #endif
273
274 static int shm_mmap(struct file * file, struct vm_area_struct * vma)
275 {
276 struct shm_file_data *sfd = shm_file_data(file);
277 int ret;
278
279 ret = sfd->file->f_op->mmap(sfd->file, vma);
280 if (ret != 0)
281 return ret;
282 sfd->vm_ops = vma->vm_ops;
283 #ifdef CONFIG_MMU
284 BUG_ON(!sfd->vm_ops->fault);
285 #endif
286 vma->vm_ops = &shm_vm_ops;
287 shm_open(vma);
288
289 return ret;
290 }
291
292 static int shm_release(struct inode *ino, struct file *file)
293 {
294 struct shm_file_data *sfd = shm_file_data(file);
295
296 put_ipc_ns(sfd->ns);
297 shm_file_data(file) = NULL;
298 kfree(sfd);
299 return 0;
300 }
301
302 static int shm_fsync(struct file *file, struct dentry *dentry, int datasync)
303 {
304 int (*fsync) (struct file *, struct dentry *, int datasync);
305 struct shm_file_data *sfd = shm_file_data(file);
306 int ret = -EINVAL;
307
308 fsync = sfd->file->f_op->fsync;
309 if (fsync)
310 ret = fsync(sfd->file, sfd->file->f_path.dentry, datasync);
311 return ret;
312 }
313
314 static unsigned long shm_get_unmapped_area(struct file *file,
315 unsigned long addr, unsigned long len, unsigned long pgoff,
316 unsigned long flags)
317 {
318 struct shm_file_data *sfd = shm_file_data(file);
319 return get_unmapped_area(sfd->file, addr, len, pgoff, flags);
320 }
321
322 int is_file_shm_hugepages(struct file *file)
323 {
324 int ret = 0;
325
326 if (file->f_op == &shm_file_operations) {
327 struct shm_file_data *sfd;
328 sfd = shm_file_data(file);
329 ret = is_file_hugepages(sfd->file);
330 }
331 return ret;
332 }
333
334 static const struct file_operations shm_file_operations = {
335 .mmap = shm_mmap,
336 .fsync = shm_fsync,
337 .release = shm_release,
338 .get_unmapped_area = shm_get_unmapped_area,
339 };
340
341 static struct vm_operations_struct shm_vm_ops = {
342 .open = shm_open, /* callback for a new vm-area open */
343 .close = shm_close, /* callback for when the vm-area is released */
344 .fault = shm_fault,
345 #if defined(CONFIG_NUMA)
346 .set_policy = shm_set_policy,
347 .get_policy = shm_get_policy,
348 #endif
349 };
350
351 /**
352 * newseg - Create a new shared memory segment
353 * @ns: namespace
354 * @params: ptr to the structure that contains key, size and shmflg
355 *
356 * Called with shm_ids.rw_mutex held as a writer.
357 */
358
359 static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
360 {
361 key_t key = params->key;
362 int shmflg = params->flg;
363 size_t size = params->u.size;
364 int error;
365 struct shmid_kernel *shp;
366 int numpages = (size + PAGE_SIZE -1) >> PAGE_SHIFT;
367 struct file * file;
368 char name[13];
369 int id;
370
371 if (size < SHMMIN || size > ns->shm_ctlmax)
372 return -EINVAL;
373
374 if (ns->shm_tot + numpages > ns->shm_ctlall)
375 return -ENOSPC;
376
377 shp = ipc_rcu_alloc(sizeof(*shp));
378 if (!shp)
379 return -ENOMEM;
380
381 shp->shm_perm.key = key;
382 shp->shm_perm.mode = (shmflg & S_IRWXUGO);
383 shp->mlock_user = NULL;
384
385 shp->shm_perm.security = NULL;
386 error = security_shm_alloc(shp);
387 if (error) {
388 ipc_rcu_putref(shp);
389 return error;
390 }
391
392 sprintf (name, "SYSV%08x", key);
393 if (shmflg & SHM_HUGETLB) {
394 /* hugetlb_file_setup takes care of mlock user accounting */
395 file = hugetlb_file_setup(name, size);
396 shp->mlock_user = current->user;
397 } else {
398 int acctflag = VM_ACCOUNT;
399 /*
400 * Do not allow no accounting for OVERCOMMIT_NEVER, even
401 * if it's asked for.
402 */
403 if ((shmflg & SHM_NORESERVE) &&
404 sysctl_overcommit_memory != OVERCOMMIT_NEVER)
405 acctflag = 0;
406 file = shmem_file_setup(name, size, acctflag);
407 }
408 error = PTR_ERR(file);
409 if (IS_ERR(file))
410 goto no_file;
411
412 id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
413 if (id < 0) {
414 error = id;
415 goto no_id;
416 }
417
418 shp->shm_cprid = task_tgid_vnr(current);
419 shp->shm_lprid = 0;
420 shp->shm_atim = shp->shm_dtim = 0;
421 shp->shm_ctim = get_seconds();
422 shp->shm_segsz = size;
423 shp->shm_nattch = 0;
424 shp->shm_file = file;
425 /*
426 * shmid gets reported as "inode#" in /proc/pid/maps.
427 * proc-ps tools use this. Changing this will break them.
428 */
429 file->f_dentry->d_inode->i_ino = shp->shm_perm.id;
430
431 ns->shm_tot += numpages;
432 error = shp->shm_perm.id;
433 shm_unlock(shp);
434 return error;
435
436 no_id:
437 fput(file);
438 no_file:
439 security_shm_free(shp);
440 ipc_rcu_putref(shp);
441 return error;
442 }
443
444 /*
445 * Called with shm_ids.rw_mutex and ipcp locked.
446 */
447 static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg)
448 {
449 struct shmid_kernel *shp;
450
451 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
452 return security_shm_associate(shp, shmflg);
453 }
454
455 /*
456 * Called with shm_ids.rw_mutex and ipcp locked.
457 */
458 static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
459 struct ipc_params *params)
460 {
461 struct shmid_kernel *shp;
462
463 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
464 if (shp->shm_segsz < params->u.size)
465 return -EINVAL;
466
467 return 0;
468 }
469
470 asmlinkage long sys_shmget (key_t key, size_t size, int shmflg)
471 {
472 struct ipc_namespace *ns;
473 struct ipc_ops shm_ops;
474 struct ipc_params shm_params;
475
476 ns = current->nsproxy->ipc_ns;
477
478 shm_ops.getnew = newseg;
479 shm_ops.associate = shm_security;
480 shm_ops.more_checks = shm_more_checks;
481
482 shm_params.key = key;
483 shm_params.flg = shmflg;
484 shm_params.u.size = size;
485
486 return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params);
487 }
488
489 static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
490 {
491 switch(version) {
492 case IPC_64:
493 return copy_to_user(buf, in, sizeof(*in));
494 case IPC_OLD:
495 {
496 struct shmid_ds out;
497
498 ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
499 out.shm_segsz = in->shm_segsz;
500 out.shm_atime = in->shm_atime;
501 out.shm_dtime = in->shm_dtime;
502 out.shm_ctime = in->shm_ctime;
503 out.shm_cpid = in->shm_cpid;
504 out.shm_lpid = in->shm_lpid;
505 out.shm_nattch = in->shm_nattch;
506
507 return copy_to_user(buf, &out, sizeof(out));
508 }
509 default:
510 return -EINVAL;
511 }
512 }
513
514 struct shm_setbuf {
515 uid_t uid;
516 gid_t gid;
517 mode_t mode;
518 };
519
520 static inline unsigned long copy_shmid_from_user(struct shm_setbuf *out, void __user *buf, int version)
521 {
522 switch(version) {
523 case IPC_64:
524 {
525 struct shmid64_ds tbuf;
526
527 if (copy_from_user(&tbuf, buf, sizeof(tbuf)))
528 return -EFAULT;
529
530 out->uid = tbuf.shm_perm.uid;
531 out->gid = tbuf.shm_perm.gid;
532 out->mode = tbuf.shm_perm.mode;
533
534 return 0;
535 }
536 case IPC_OLD:
537 {
538 struct shmid_ds tbuf_old;
539
540 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
541 return -EFAULT;
542
543 out->uid = tbuf_old.shm_perm.uid;
544 out->gid = tbuf_old.shm_perm.gid;
545 out->mode = tbuf_old.shm_perm.mode;
546
547 return 0;
548 }
549 default:
550 return -EINVAL;
551 }
552 }
553
554 static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
555 {
556 switch(version) {
557 case IPC_64:
558 return copy_to_user(buf, in, sizeof(*in));
559 case IPC_OLD:
560 {
561 struct shminfo out;
562
563 if(in->shmmax > INT_MAX)
564 out.shmmax = INT_MAX;
565 else
566 out.shmmax = (int)in->shmmax;
567
568 out.shmmin = in->shmmin;
569 out.shmmni = in->shmmni;
570 out.shmseg = in->shmseg;
571 out.shmall = in->shmall;
572
573 return copy_to_user(buf, &out, sizeof(out));
574 }
575 default:
576 return -EINVAL;
577 }
578 }
579
580 /*
581 * Called with shm_ids.rw_mutex held as a reader
582 */
583 static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
584 unsigned long *swp)
585 {
586 int next_id;
587 int total, in_use;
588
589 *rss = 0;
590 *swp = 0;
591
592 in_use = shm_ids(ns).in_use;
593
594 for (total = 0, next_id = 0; total < in_use; next_id++) {
595 struct shmid_kernel *shp;
596 struct inode *inode;
597
598 shp = idr_find(&shm_ids(ns).ipcs_idr, next_id);
599 if (shp == NULL)
600 continue;
601
602 inode = shp->shm_file->f_path.dentry->d_inode;
603
604 if (is_file_hugepages(shp->shm_file)) {
605 struct address_space *mapping = inode->i_mapping;
606 *rss += (HPAGE_SIZE/PAGE_SIZE)*mapping->nrpages;
607 } else {
608 struct shmem_inode_info *info = SHMEM_I(inode);
609 spin_lock(&info->lock);
610 *rss += inode->i_mapping->nrpages;
611 *swp += info->swapped;
612 spin_unlock(&info->lock);
613 }
614
615 total++;
616 }
617 }
618
619 /*
620 * This function handles some shmctl commands which require the rw_mutex
621 * to be held in write mode.
622 * NOTE: no locks must be held, the rw_mutex is taken inside this function.
623 */
624 static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
625 struct shmid_ds __user *buf, int version)
626 {
627 struct kern_ipc_perm *ipcp;
628 struct shm_setbuf setbuf;
629 struct shmid_kernel *shp;
630 int err;
631
632 if (cmd == IPC_SET) {
633 if (copy_shmid_from_user(&setbuf, buf, version))
634 return -EFAULT;
635 }
636
637 down_write(&shm_ids(ns).rw_mutex);
638 shp = shm_lock_check_down(ns, shmid);
639 if (IS_ERR(shp)) {
640 err = PTR_ERR(shp);
641 goto out_up;
642 }
643
644 ipcp = &shp->shm_perm;
645
646 err = audit_ipc_obj(ipcp);
647 if (err)
648 goto out_unlock;
649
650 if (cmd == IPC_SET) {
651 err = audit_ipc_set_perm(0, setbuf.uid,
652 setbuf.gid, setbuf.mode);
653 if (err)
654 goto out_unlock;
655 }
656
657 if (current->euid != ipcp->uid &&
658 current->euid != ipcp->cuid &&
659 !capable(CAP_SYS_ADMIN)) {
660 err = -EPERM;
661 goto out_unlock;
662 }
663
664 err = security_shm_shmctl(shp, cmd);
665 if (err)
666 goto out_unlock;
667 switch (cmd) {
668 case IPC_RMID:
669 do_shm_rmid(ns, ipcp);
670 goto out_up;
671 case IPC_SET:
672 ipcp->uid = setbuf.uid;
673 ipcp->gid = setbuf.gid;
674 ipcp->mode = (ipcp->mode & ~S_IRWXUGO)
675 | (setbuf.mode & S_IRWXUGO);
676 shp->shm_ctim = get_seconds();
677 break;
678 default:
679 err = -EINVAL;
680 }
681 out_unlock:
682 shm_unlock(shp);
683 out_up:
684 up_write(&shm_ids(ns).rw_mutex);
685 return err;
686 }
687
688 asmlinkage long sys_shmctl(int shmid, int cmd, struct shmid_ds __user *buf)
689 {
690 struct shmid_kernel *shp;
691 int err, version;
692 struct ipc_namespace *ns;
693
694 if (cmd < 0 || shmid < 0) {
695 err = -EINVAL;
696 goto out;
697 }
698
699 version = ipc_parse_version(&cmd);
700 ns = current->nsproxy->ipc_ns;
701
702 switch (cmd) { /* replace with proc interface ? */
703 case IPC_INFO:
704 {
705 struct shminfo64 shminfo;
706
707 err = security_shm_shmctl(NULL, cmd);
708 if (err)
709 return err;
710
711 memset(&shminfo,0,sizeof(shminfo));
712 shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni;
713 shminfo.shmmax = ns->shm_ctlmax;
714 shminfo.shmall = ns->shm_ctlall;
715
716 shminfo.shmmin = SHMMIN;
717 if(copy_shminfo_to_user (buf, &shminfo, version))
718 return -EFAULT;
719
720 down_read(&shm_ids(ns).rw_mutex);
721 err = ipc_get_maxid(&shm_ids(ns));
722 up_read(&shm_ids(ns).rw_mutex);
723
724 if(err<0)
725 err = 0;
726 goto out;
727 }
728 case SHM_INFO:
729 {
730 struct shm_info shm_info;
731
732 err = security_shm_shmctl(NULL, cmd);
733 if (err)
734 return err;
735
736 memset(&shm_info,0,sizeof(shm_info));
737 down_read(&shm_ids(ns).rw_mutex);
738 shm_info.used_ids = shm_ids(ns).in_use;
739 shm_get_stat (ns, &shm_info.shm_rss, &shm_info.shm_swp);
740 shm_info.shm_tot = ns->shm_tot;
741 shm_info.swap_attempts = 0;
742 shm_info.swap_successes = 0;
743 err = ipc_get_maxid(&shm_ids(ns));
744 up_read(&shm_ids(ns).rw_mutex);
745 if(copy_to_user (buf, &shm_info, sizeof(shm_info))) {
746 err = -EFAULT;
747 goto out;
748 }
749
750 err = err < 0 ? 0 : err;
751 goto out;
752 }
753 case SHM_STAT:
754 case IPC_STAT:
755 {
756 struct shmid64_ds tbuf;
757 int result;
758
759 if (!buf) {
760 err = -EFAULT;
761 goto out;
762 }
763
764 if (cmd == SHM_STAT) {
765 shp = shm_lock(ns, shmid);
766 if (IS_ERR(shp)) {
767 err = PTR_ERR(shp);
768 goto out;
769 }
770 result = shp->shm_perm.id;
771 } else {
772 shp = shm_lock_check(ns, shmid);
773 if (IS_ERR(shp)) {
774 err = PTR_ERR(shp);
775 goto out;
776 }
777 result = 0;
778 }
779 err=-EACCES;
780 if (ipcperms (&shp->shm_perm, S_IRUGO))
781 goto out_unlock;
782 err = security_shm_shmctl(shp, cmd);
783 if (err)
784 goto out_unlock;
785 memset(&tbuf, 0, sizeof(tbuf));
786 kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm);
787 tbuf.shm_segsz = shp->shm_segsz;
788 tbuf.shm_atime = shp->shm_atim;
789 tbuf.shm_dtime = shp->shm_dtim;
790 tbuf.shm_ctime = shp->shm_ctim;
791 tbuf.shm_cpid = shp->shm_cprid;
792 tbuf.shm_lpid = shp->shm_lprid;
793 tbuf.shm_nattch = shp->shm_nattch;
794 shm_unlock(shp);
795 if(copy_shmid_to_user (buf, &tbuf, version))
796 err = -EFAULT;
797 else
798 err = result;
799 goto out;
800 }
801 case SHM_LOCK:
802 case SHM_UNLOCK:
803 {
804 shp = shm_lock_check(ns, shmid);
805 if (IS_ERR(shp)) {
806 err = PTR_ERR(shp);
807 goto out;
808 }
809
810 err = audit_ipc_obj(&(shp->shm_perm));
811 if (err)
812 goto out_unlock;
813
814 if (!capable(CAP_IPC_LOCK)) {
815 err = -EPERM;
816 if (current->euid != shp->shm_perm.uid &&
817 current->euid != shp->shm_perm.cuid)
818 goto out_unlock;
819 if (cmd == SHM_LOCK &&
820 !current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur)
821 goto out_unlock;
822 }
823
824 err = security_shm_shmctl(shp, cmd);
825 if (err)
826 goto out_unlock;
827
828 if(cmd==SHM_LOCK) {
829 struct user_struct * user = current->user;
830 if (!is_file_hugepages(shp->shm_file)) {
831 err = shmem_lock(shp->shm_file, 1, user);
832 if (!err && !(shp->shm_perm.mode & SHM_LOCKED)){
833 shp->shm_perm.mode |= SHM_LOCKED;
834 shp->mlock_user = user;
835 }
836 }
837 } else if (!is_file_hugepages(shp->shm_file)) {
838 shmem_lock(shp->shm_file, 0, shp->mlock_user);
839 shp->shm_perm.mode &= ~SHM_LOCKED;
840 shp->mlock_user = NULL;
841 }
842 shm_unlock(shp);
843 goto out;
844 }
845 case IPC_RMID:
846 case IPC_SET:
847 err = shmctl_down(ns, shmid, cmd, buf, version);
848 return err;
849 default:
850 return -EINVAL;
851 }
852
853 out_unlock:
854 shm_unlock(shp);
855 out:
856 return err;
857 }
858
859 /*
860 * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
861 *
862 * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
863 * "raddr" thing points to kernel space, and there has to be a wrapper around
864 * this.
865 */
866 long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
867 {
868 struct shmid_kernel *shp;
869 unsigned long addr;
870 unsigned long size;
871 struct file * file;
872 int err;
873 unsigned long flags;
874 unsigned long prot;
875 int acc_mode;
876 unsigned long user_addr;
877 struct ipc_namespace *ns;
878 struct shm_file_data *sfd;
879 struct path path;
880 mode_t f_mode;
881
882 err = -EINVAL;
883 if (shmid < 0)
884 goto out;
885 else if ((addr = (ulong)shmaddr)) {
886 if (addr & (SHMLBA-1)) {
887 if (shmflg & SHM_RND)
888 addr &= ~(SHMLBA-1); /* round down */
889 else
890 #ifndef __ARCH_FORCE_SHMLBA
891 if (addr & ~PAGE_MASK)
892 #endif
893 goto out;
894 }
895 flags = MAP_SHARED | MAP_FIXED;
896 } else {
897 if ((shmflg & SHM_REMAP))
898 goto out;
899
900 flags = MAP_SHARED;
901 }
902
903 if (shmflg & SHM_RDONLY) {
904 prot = PROT_READ;
905 acc_mode = S_IRUGO;
906 f_mode = FMODE_READ;
907 } else {
908 prot = PROT_READ | PROT_WRITE;
909 acc_mode = S_IRUGO | S_IWUGO;
910 f_mode = FMODE_READ | FMODE_WRITE;
911 }
912 if (shmflg & SHM_EXEC) {
913 prot |= PROT_EXEC;
914 acc_mode |= S_IXUGO;
915 }
916
917 /*
918 * We cannot rely on the fs check since SYSV IPC does have an
919 * additional creator id...
920 */
921 ns = current->nsproxy->ipc_ns;
922 shp = shm_lock_check(ns, shmid);
923 if (IS_ERR(shp)) {
924 err = PTR_ERR(shp);
925 goto out;
926 }
927
928 err = -EACCES;
929 if (ipcperms(&shp->shm_perm, acc_mode))
930 goto out_unlock;
931
932 err = security_shm_shmat(shp, shmaddr, shmflg);
933 if (err)
934 goto out_unlock;
935
936 path.dentry = dget(shp->shm_file->f_path.dentry);
937 path.mnt = shp->shm_file->f_path.mnt;
938 shp->shm_nattch++;
939 size = i_size_read(path.dentry->d_inode);
940 shm_unlock(shp);
941
942 err = -ENOMEM;
943 sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
944 if (!sfd)
945 goto out_put_dentry;
946
947 err = -ENOMEM;
948
949 file = alloc_file(path.mnt, path.dentry, f_mode, &shm_file_operations);
950 if (!file)
951 goto out_free;
952
953 file->private_data = sfd;
954 file->f_mapping = shp->shm_file->f_mapping;
955 sfd->id = shp->shm_perm.id;
956 sfd->ns = get_ipc_ns(ns);
957 sfd->file = shp->shm_file;
958 sfd->vm_ops = NULL;
959
960 down_write(&current->mm->mmap_sem);
961 if (addr && !(shmflg & SHM_REMAP)) {
962 err = -EINVAL;
963 if (find_vma_intersection(current->mm, addr, addr + size))
964 goto invalid;
965 /*
966 * If shm segment goes below stack, make sure there is some
967 * space left for the stack to grow (at least 4 pages).
968 */
969 if (addr < current->mm->start_stack &&
970 addr > current->mm->start_stack - size - PAGE_SIZE * 5)
971 goto invalid;
972 }
973
974 user_addr = do_mmap (file, addr, size, prot, flags, 0);
975 *raddr = user_addr;
976 err = 0;
977 if (IS_ERR_VALUE(user_addr))
978 err = (long)user_addr;
979 invalid:
980 up_write(&current->mm->mmap_sem);
981
982 fput(file);
983
984 out_nattch:
985 down_write(&shm_ids(ns).rw_mutex);
986 shp = shm_lock_down(ns, shmid);
987 BUG_ON(IS_ERR(shp));
988 shp->shm_nattch--;
989 if(shp->shm_nattch == 0 &&
990 shp->shm_perm.mode & SHM_DEST)
991 shm_destroy(ns, shp);
992 else
993 shm_unlock(shp);
994 up_write(&shm_ids(ns).rw_mutex);
995
996 out:
997 return err;
998
999 out_unlock:
1000 shm_unlock(shp);
1001 goto out;
1002
1003 out_free:
1004 kfree(sfd);
1005 out_put_dentry:
1006 dput(path.dentry);
1007 goto out_nattch;
1008 }
1009
1010 asmlinkage long sys_shmat(int shmid, char __user *shmaddr, int shmflg)
1011 {
1012 unsigned long ret;
1013 long err;
1014
1015 err = do_shmat(shmid, shmaddr, shmflg, &ret);
1016 if (err)
1017 return err;
1018 force_successful_syscall_return();
1019 return (long)ret;
1020 }
1021
1022 /*
1023 * detach and kill segment if marked destroyed.
1024 * The work is done in shm_close.
1025 */
1026 asmlinkage long sys_shmdt(char __user *shmaddr)
1027 {
1028 struct mm_struct *mm = current->mm;
1029 struct vm_area_struct *vma, *next;
1030 unsigned long addr = (unsigned long)shmaddr;
1031 loff_t size = 0;
1032 int retval = -EINVAL;
1033
1034 if (addr & ~PAGE_MASK)
1035 return retval;
1036
1037 down_write(&mm->mmap_sem);
1038
1039 /*
1040 * This function tries to be smart and unmap shm segments that
1041 * were modified by partial mlock or munmap calls:
1042 * - It first determines the size of the shm segment that should be
1043 * unmapped: It searches for a vma that is backed by shm and that
1044 * started at address shmaddr. It records it's size and then unmaps
1045 * it.
1046 * - Then it unmaps all shm vmas that started at shmaddr and that
1047 * are within the initially determined size.
1048 * Errors from do_munmap are ignored: the function only fails if
1049 * it's called with invalid parameters or if it's called to unmap
1050 * a part of a vma. Both calls in this function are for full vmas,
1051 * the parameters are directly copied from the vma itself and always
1052 * valid - therefore do_munmap cannot fail. (famous last words?)
1053 */
1054 /*
1055 * If it had been mremap()'d, the starting address would not
1056 * match the usual checks anyway. So assume all vma's are
1057 * above the starting address given.
1058 */
1059 vma = find_vma(mm, addr);
1060
1061 while (vma) {
1062 next = vma->vm_next;
1063
1064 /*
1065 * Check if the starting address would match, i.e. it's
1066 * a fragment created by mprotect() and/or munmap(), or it
1067 * otherwise it starts at this address with no hassles.
1068 */
1069 if ((vma->vm_ops == &shm_vm_ops) &&
1070 (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
1071
1072
1073 size = vma->vm_file->f_path.dentry->d_inode->i_size;
1074 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1075 /*
1076 * We discovered the size of the shm segment, so
1077 * break out of here and fall through to the next
1078 * loop that uses the size information to stop
1079 * searching for matching vma's.
1080 */
1081 retval = 0;
1082 vma = next;
1083 break;
1084 }
1085 vma = next;
1086 }
1087
1088 /*
1089 * We need look no further than the maximum address a fragment
1090 * could possibly have landed at. Also cast things to loff_t to
1091 * prevent overflows and make comparisions vs. equal-width types.
1092 */
1093 size = PAGE_ALIGN(size);
1094 while (vma && (loff_t)(vma->vm_end - addr) <= size) {
1095 next = vma->vm_next;
1096
1097 /* finding a matching vma now does not alter retval */
1098 if ((vma->vm_ops == &shm_vm_ops) &&
1099 (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff)
1100
1101 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1102 vma = next;
1103 }
1104
1105 up_write(&mm->mmap_sem);
1106 return retval;
1107 }
1108
1109 #ifdef CONFIG_PROC_FS
1110 static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
1111 {
1112 struct shmid_kernel *shp = it;
1113 char *format;
1114
1115 #define SMALL_STRING "%10d %10d %4o %10u %5u %5u %5d %5u %5u %5u %5u %10lu %10lu %10lu\n"
1116 #define BIG_STRING "%10d %10d %4o %21u %5u %5u %5d %5u %5u %5u %5u %10lu %10lu %10lu\n"
1117
1118 if (sizeof(size_t) <= sizeof(int))
1119 format = SMALL_STRING;
1120 else
1121 format = BIG_STRING;
1122 return seq_printf(s, format,
1123 shp->shm_perm.key,
1124 shp->shm_perm.id,
1125 shp->shm_perm.mode,
1126 shp->shm_segsz,
1127 shp->shm_cprid,
1128 shp->shm_lprid,
1129 shp->shm_nattch,
1130 shp->shm_perm.uid,
1131 shp->shm_perm.gid,
1132 shp->shm_perm.cuid,
1133 shp->shm_perm.cgid,
1134 shp->shm_atim,
1135 shp->shm_dtim,
1136 shp->shm_ctim);
1137 }
1138 #endif