ipc,shm: cleanup do_shmat pasta
[GitHub/LineageOS/android_kernel_samsung_universal7580.git] / ipc / shm.c
1 /*
2 * linux/ipc/shm.c
3 * Copyright (C) 1992, 1993 Krishna Balasubramanian
4 * Many improvements/fixes by Bruno Haible.
5 * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994.
6 * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli.
7 *
8 * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
9 * BIGMEM support, Andrea Arcangeli <andrea@suse.de>
10 * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr>
11 * HIGHMEM support, Ingo Molnar <mingo@redhat.com>
12 * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com>
13 * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com>
14 * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com>
15 *
16 * support for audit of ipc object properties and permission changes
17 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
18 *
19 * namespaces support
20 * OpenVZ, SWsoft Inc.
21 * Pavel Emelianov <xemul@openvz.org>
22 */
23
24 #include <linux/slab.h>
25 #include <linux/mm.h>
26 #include <linux/hugetlb.h>
27 #include <linux/shm.h>
28 #include <linux/init.h>
29 #include <linux/file.h>
30 #include <linux/mman.h>
31 #include <linux/shmem_fs.h>
32 #include <linux/security.h>
33 #include <linux/syscalls.h>
34 #include <linux/audit.h>
35 #include <linux/capability.h>
36 #include <linux/ptrace.h>
37 #include <linux/seq_file.h>
38 #include <linux/rwsem.h>
39 #include <linux/nsproxy.h>
40 #include <linux/mount.h>
41 #include <linux/ipc_namespace.h>
42
43 #include <asm/uaccess.h>
44
45 #include "util.h"
46
47 struct shm_file_data {
48 int id;
49 struct ipc_namespace *ns;
50 struct file *file;
51 const struct vm_operations_struct *vm_ops;
52 };
53
54 #define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data))
55
56 static const struct file_operations shm_file_operations;
57 static const struct vm_operations_struct shm_vm_ops;
58
59 #define shm_ids(ns) ((ns)->ids[IPC_SHM_IDS])
60
61 #define shm_unlock(shp) \
62 ipc_unlock(&(shp)->shm_perm)
63
64 static int newseg(struct ipc_namespace *, struct ipc_params *);
65 static void shm_open(struct vm_area_struct *vma);
66 static void shm_close(struct vm_area_struct *vma);
67 static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
68 #ifdef CONFIG_PROC_FS
69 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
70 #endif
71
72 void shm_init_ns(struct ipc_namespace *ns)
73 {
74 ns->shm_ctlmax = SHMMAX;
75 ns->shm_ctlall = SHMALL;
76 ns->shm_ctlmni = SHMMNI;
77 ns->shm_rmid_forced = 0;
78 ns->shm_tot = 0;
79 ipc_init_ids(&shm_ids(ns));
80 }
81
82 /*
83 * Called with shm_ids.rw_mutex (writer) and the shp structure locked.
84 * Only shm_ids.rw_mutex remains locked on exit.
85 */
86 static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
87 {
88 struct shmid_kernel *shp;
89 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
90
91 if (shp->shm_nattch){
92 shp->shm_perm.mode |= SHM_DEST;
93 /* Do not find it any more */
94 shp->shm_perm.key = IPC_PRIVATE;
95 shm_unlock(shp);
96 } else
97 shm_destroy(ns, shp);
98 }
99
100 #ifdef CONFIG_IPC_NS
101 void shm_exit_ns(struct ipc_namespace *ns)
102 {
103 free_ipcs(ns, &shm_ids(ns), do_shm_rmid);
104 idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr);
105 }
106 #endif
107
108 static int __init ipc_ns_init(void)
109 {
110 shm_init_ns(&init_ipc_ns);
111 return 0;
112 }
113
114 pure_initcall(ipc_ns_init);
115
116 void __init shm_init (void)
117 {
118 ipc_init_proc_interface("sysvipc/shm",
119 #if BITS_PER_LONG <= 32
120 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n",
121 #else
122 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n",
123 #endif
124 IPC_SHM_IDS, sysvipc_shm_proc_show);
125 }
126
127 static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id)
128 {
129 struct kern_ipc_perm *ipcp = ipc_obtain_object(&shm_ids(ns), id);
130
131 if (IS_ERR(ipcp))
132 return ERR_CAST(ipcp);
133
134 return container_of(ipcp, struct shmid_kernel, shm_perm);
135 }
136
137 static inline struct shmid_kernel *shm_obtain_object_check(struct ipc_namespace *ns, int id)
138 {
139 struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&shm_ids(ns), id);
140
141 if (IS_ERR(ipcp))
142 return ERR_CAST(ipcp);
143
144 return container_of(ipcp, struct shmid_kernel, shm_perm);
145 }
146
147 /*
148 * shm_lock_(check_) routines are called in the paths where the rw_mutex
149 * is not necessarily held.
150 */
151 static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id)
152 {
153 struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);
154
155 if (IS_ERR(ipcp))
156 return (struct shmid_kernel *)ipcp;
157
158 return container_of(ipcp, struct shmid_kernel, shm_perm);
159 }
160
161 static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp)
162 {
163 rcu_read_lock();
164 ipc_lock_object(&ipcp->shm_perm);
165 }
166
167 static inline struct shmid_kernel *shm_lock_check(struct ipc_namespace *ns,
168 int id)
169 {
170 struct kern_ipc_perm *ipcp = ipc_lock_check(&shm_ids(ns), id);
171
172 if (IS_ERR(ipcp))
173 return (struct shmid_kernel *)ipcp;
174
175 return container_of(ipcp, struct shmid_kernel, shm_perm);
176 }
177
178 static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s)
179 {
180 ipc_rmid(&shm_ids(ns), &s->shm_perm);
181 }
182
183
184 /* This is called by fork, once for every shm attach. */
185 static void shm_open(struct vm_area_struct *vma)
186 {
187 struct file *file = vma->vm_file;
188 struct shm_file_data *sfd = shm_file_data(file);
189 struct shmid_kernel *shp;
190
191 shp = shm_lock(sfd->ns, sfd->id);
192 BUG_ON(IS_ERR(shp));
193 shp->shm_atim = get_seconds();
194 shp->shm_lprid = task_tgid_vnr(current);
195 shp->shm_nattch++;
196 shm_unlock(shp);
197 }
198
199 /*
200 * shm_destroy - free the struct shmid_kernel
201 *
202 * @ns: namespace
203 * @shp: struct to free
204 *
205 * It has to be called with shp and shm_ids.rw_mutex (writer) locked,
206 * but returns with shp unlocked and freed.
207 */
208 static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
209 {
210 ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
211 shm_rmid(ns, shp);
212 shm_unlock(shp);
213 if (!is_file_hugepages(shp->shm_file))
214 shmem_lock(shp->shm_file, 0, shp->mlock_user);
215 else if (shp->mlock_user)
216 user_shm_unlock(file_inode(shp->shm_file)->i_size,
217 shp->mlock_user);
218 fput (shp->shm_file);
219 security_shm_free(shp);
220 ipc_rcu_putref(shp);
221 }
222
223 /*
224 * shm_may_destroy - identifies whether shm segment should be destroyed now
225 *
226 * Returns true if and only if there are no active users of the segment and
227 * one of the following is true:
228 *
229 * 1) shmctl(id, IPC_RMID, NULL) was called for this shp
230 *
231 * 2) sysctl kernel.shm_rmid_forced is set to 1.
232 */
233 static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
234 {
235 return (shp->shm_nattch == 0) &&
236 (ns->shm_rmid_forced ||
237 (shp->shm_perm.mode & SHM_DEST));
238 }
239
240 /*
241 * remove the attach descriptor vma.
242 * free memory for segment if it is marked destroyed.
243 * The descriptor has already been removed from the current->mm->mmap list
244 * and will later be kfree()d.
245 */
246 static void shm_close(struct vm_area_struct *vma)
247 {
248 struct file * file = vma->vm_file;
249 struct shm_file_data *sfd = shm_file_data(file);
250 struct shmid_kernel *shp;
251 struct ipc_namespace *ns = sfd->ns;
252
253 down_write(&shm_ids(ns).rw_mutex);
254 /* remove from the list of attaches of the shm segment */
255 shp = shm_lock(ns, sfd->id);
256 BUG_ON(IS_ERR(shp));
257 shp->shm_lprid = task_tgid_vnr(current);
258 shp->shm_dtim = get_seconds();
259 shp->shm_nattch--;
260 if (shm_may_destroy(ns, shp))
261 shm_destroy(ns, shp);
262 else
263 shm_unlock(shp);
264 up_write(&shm_ids(ns).rw_mutex);
265 }
266
267 /* Called with ns->shm_ids(ns).rw_mutex locked */
268 static int shm_try_destroy_current(int id, void *p, void *data)
269 {
270 struct ipc_namespace *ns = data;
271 struct kern_ipc_perm *ipcp = p;
272 struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm);
273
274 if (shp->shm_creator != current)
275 return 0;
276
277 /*
278 * Mark it as orphaned to destroy the segment when
279 * kernel.shm_rmid_forced is changed.
280 * It is noop if the following shm_may_destroy() returns true.
281 */
282 shp->shm_creator = NULL;
283
284 /*
285 * Don't even try to destroy it. If shm_rmid_forced=0 and IPC_RMID
286 * is not set, it shouldn't be deleted here.
287 */
288 if (!ns->shm_rmid_forced)
289 return 0;
290
291 if (shm_may_destroy(ns, shp)) {
292 shm_lock_by_ptr(shp);
293 shm_destroy(ns, shp);
294 }
295 return 0;
296 }
297
298 /* Called with ns->shm_ids(ns).rw_mutex locked */
299 static int shm_try_destroy_orphaned(int id, void *p, void *data)
300 {
301 struct ipc_namespace *ns = data;
302 struct kern_ipc_perm *ipcp = p;
303 struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm);
304
305 /*
306 * We want to destroy segments without users and with already
307 * exit'ed originating process.
308 *
309 * As shp->* are changed under rw_mutex, it's safe to skip shp locking.
310 */
311 if (shp->shm_creator != NULL)
312 return 0;
313
314 if (shm_may_destroy(ns, shp)) {
315 shm_lock_by_ptr(shp);
316 shm_destroy(ns, shp);
317 }
318 return 0;
319 }
320
321 void shm_destroy_orphaned(struct ipc_namespace *ns)
322 {
323 down_write(&shm_ids(ns).rw_mutex);
324 if (shm_ids(ns).in_use)
325 idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns);
326 up_write(&shm_ids(ns).rw_mutex);
327 }
328
329
330 void exit_shm(struct task_struct *task)
331 {
332 struct ipc_namespace *ns = task->nsproxy->ipc_ns;
333
334 if (shm_ids(ns).in_use == 0)
335 return;
336
337 /* Destroy all already created segments, but not mapped yet */
338 down_write(&shm_ids(ns).rw_mutex);
339 if (shm_ids(ns).in_use)
340 idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_current, ns);
341 up_write(&shm_ids(ns).rw_mutex);
342 }
343
344 static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
345 {
346 struct file *file = vma->vm_file;
347 struct shm_file_data *sfd = shm_file_data(file);
348
349 return sfd->vm_ops->fault(vma, vmf);
350 }
351
352 #ifdef CONFIG_NUMA
353 static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
354 {
355 struct file *file = vma->vm_file;
356 struct shm_file_data *sfd = shm_file_data(file);
357 int err = 0;
358 if (sfd->vm_ops->set_policy)
359 err = sfd->vm_ops->set_policy(vma, new);
360 return err;
361 }
362
363 static struct mempolicy *shm_get_policy(struct vm_area_struct *vma,
364 unsigned long addr)
365 {
366 struct file *file = vma->vm_file;
367 struct shm_file_data *sfd = shm_file_data(file);
368 struct mempolicy *pol = NULL;
369
370 if (sfd->vm_ops->get_policy)
371 pol = sfd->vm_ops->get_policy(vma, addr);
372 else if (vma->vm_policy)
373 pol = vma->vm_policy;
374
375 return pol;
376 }
377 #endif
378
379 static int shm_mmap(struct file * file, struct vm_area_struct * vma)
380 {
381 struct shm_file_data *sfd = shm_file_data(file);
382 int ret;
383
384 ret = sfd->file->f_op->mmap(sfd->file, vma);
385 if (ret != 0)
386 return ret;
387 sfd->vm_ops = vma->vm_ops;
388 #ifdef CONFIG_MMU
389 BUG_ON(!sfd->vm_ops->fault);
390 #endif
391 vma->vm_ops = &shm_vm_ops;
392 shm_open(vma);
393
394 return ret;
395 }
396
397 static int shm_release(struct inode *ino, struct file *file)
398 {
399 struct shm_file_data *sfd = shm_file_data(file);
400
401 put_ipc_ns(sfd->ns);
402 shm_file_data(file) = NULL;
403 kfree(sfd);
404 return 0;
405 }
406
407 static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync)
408 {
409 struct shm_file_data *sfd = shm_file_data(file);
410
411 if (!sfd->file->f_op->fsync)
412 return -EINVAL;
413 return sfd->file->f_op->fsync(sfd->file, start, end, datasync);
414 }
415
416 static long shm_fallocate(struct file *file, int mode, loff_t offset,
417 loff_t len)
418 {
419 struct shm_file_data *sfd = shm_file_data(file);
420
421 if (!sfd->file->f_op->fallocate)
422 return -EOPNOTSUPP;
423 return sfd->file->f_op->fallocate(file, mode, offset, len);
424 }
425
426 static unsigned long shm_get_unmapped_area(struct file *file,
427 unsigned long addr, unsigned long len, unsigned long pgoff,
428 unsigned long flags)
429 {
430 struct shm_file_data *sfd = shm_file_data(file);
431 return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len,
432 pgoff, flags);
433 }
434
435 static const struct file_operations shm_file_operations = {
436 .mmap = shm_mmap,
437 .fsync = shm_fsync,
438 .release = shm_release,
439 #ifndef CONFIG_MMU
440 .get_unmapped_area = shm_get_unmapped_area,
441 #endif
442 .llseek = noop_llseek,
443 .fallocate = shm_fallocate,
444 };
445
446 static const struct file_operations shm_file_operations_huge = {
447 .mmap = shm_mmap,
448 .fsync = shm_fsync,
449 .release = shm_release,
450 .get_unmapped_area = shm_get_unmapped_area,
451 .llseek = noop_llseek,
452 .fallocate = shm_fallocate,
453 };
454
455 int is_file_shm_hugepages(struct file *file)
456 {
457 return file->f_op == &shm_file_operations_huge;
458 }
459
460 static const struct vm_operations_struct shm_vm_ops = {
461 .open = shm_open, /* callback for a new vm-area open */
462 .close = shm_close, /* callback for when the vm-area is released */
463 .fault = shm_fault,
464 #if defined(CONFIG_NUMA)
465 .set_policy = shm_set_policy,
466 .get_policy = shm_get_policy,
467 #endif
468 };
469
470 /**
471 * newseg - Create a new shared memory segment
472 * @ns: namespace
473 * @params: ptr to the structure that contains key, size and shmflg
474 *
475 * Called with shm_ids.rw_mutex held as a writer.
476 */
477
478 static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
479 {
480 key_t key = params->key;
481 int shmflg = params->flg;
482 size_t size = params->u.size;
483 int error;
484 struct shmid_kernel *shp;
485 size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
486 struct file * file;
487 char name[13];
488 int id;
489 vm_flags_t acctflag = 0;
490
491 if (size < SHMMIN || size > ns->shm_ctlmax)
492 return -EINVAL;
493
494 if (ns->shm_tot + numpages > ns->shm_ctlall)
495 return -ENOSPC;
496
497 shp = ipc_rcu_alloc(sizeof(*shp));
498 if (!shp)
499 return -ENOMEM;
500
501 shp->shm_perm.key = key;
502 shp->shm_perm.mode = (shmflg & S_IRWXUGO);
503 shp->mlock_user = NULL;
504
505 shp->shm_perm.security = NULL;
506 error = security_shm_alloc(shp);
507 if (error) {
508 ipc_rcu_putref(shp);
509 return error;
510 }
511
512 sprintf (name, "SYSV%08x", key);
513 if (shmflg & SHM_HUGETLB) {
514 struct hstate *hs = hstate_sizelog((shmflg >> SHM_HUGE_SHIFT)
515 & SHM_HUGE_MASK);
516 size_t hugesize;
517
518 if (!hs) {
519 error = -EINVAL;
520 goto no_file;
521 }
522 hugesize = ALIGN(size, huge_page_size(hs));
523
524 /* hugetlb_file_setup applies strict accounting */
525 if (shmflg & SHM_NORESERVE)
526 acctflag = VM_NORESERVE;
527 file = hugetlb_file_setup(name, hugesize, acctflag,
528 &shp->mlock_user, HUGETLB_SHMFS_INODE,
529 (shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK);
530 } else {
531 /*
532 * Do not allow no accounting for OVERCOMMIT_NEVER, even
533 * if it's asked for.
534 */
535 if ((shmflg & SHM_NORESERVE) &&
536 sysctl_overcommit_memory != OVERCOMMIT_NEVER)
537 acctflag = VM_NORESERVE;
538 file = shmem_file_setup(name, size, acctflag);
539 }
540 error = PTR_ERR(file);
541 if (IS_ERR(file))
542 goto no_file;
543
544 id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni);
545 if (id < 0) {
546 error = id;
547 goto no_id;
548 }
549
550 shp->shm_cprid = task_tgid_vnr(current);
551 shp->shm_lprid = 0;
552 shp->shm_atim = shp->shm_dtim = 0;
553 shp->shm_ctim = get_seconds();
554 shp->shm_segsz = size;
555 shp->shm_nattch = 0;
556 shp->shm_file = file;
557 shp->shm_creator = current;
558
559 /*
560 * shmid gets reported as "inode#" in /proc/pid/maps.
561 * proc-ps tools use this. Changing this will break them.
562 */
563 file_inode(file)->i_ino = shp->shm_perm.id;
564
565 ns->shm_tot += numpages;
566 error = shp->shm_perm.id;
567
568 ipc_unlock_object(&shp->shm_perm);
569 rcu_read_unlock();
570 return error;
571
572 no_id:
573 if (is_file_hugepages(file) && shp->mlock_user)
574 user_shm_unlock(size, shp->mlock_user);
575 fput(file);
576 no_file:
577 security_shm_free(shp);
578 ipc_rcu_putref(shp);
579 return error;
580 }
581
582 /*
583 * Called with shm_ids.rw_mutex and ipcp locked.
584 */
585 static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg)
586 {
587 struct shmid_kernel *shp;
588
589 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
590 return security_shm_associate(shp, shmflg);
591 }
592
593 /*
594 * Called with shm_ids.rw_mutex and ipcp locked.
595 */
596 static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
597 struct ipc_params *params)
598 {
599 struct shmid_kernel *shp;
600
601 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
602 if (shp->shm_segsz < params->u.size)
603 return -EINVAL;
604
605 return 0;
606 }
607
608 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
609 {
610 struct ipc_namespace *ns;
611 struct ipc_ops shm_ops;
612 struct ipc_params shm_params;
613
614 ns = current->nsproxy->ipc_ns;
615
616 shm_ops.getnew = newseg;
617 shm_ops.associate = shm_security;
618 shm_ops.more_checks = shm_more_checks;
619
620 shm_params.key = key;
621 shm_params.flg = shmflg;
622 shm_params.u.size = size;
623
624 return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params);
625 }
626
627 static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version)
628 {
629 switch(version) {
630 case IPC_64:
631 return copy_to_user(buf, in, sizeof(*in));
632 case IPC_OLD:
633 {
634 struct shmid_ds out;
635
636 memset(&out, 0, sizeof(out));
637 ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm);
638 out.shm_segsz = in->shm_segsz;
639 out.shm_atime = in->shm_atime;
640 out.shm_dtime = in->shm_dtime;
641 out.shm_ctime = in->shm_ctime;
642 out.shm_cpid = in->shm_cpid;
643 out.shm_lpid = in->shm_lpid;
644 out.shm_nattch = in->shm_nattch;
645
646 return copy_to_user(buf, &out, sizeof(out));
647 }
648 default:
649 return -EINVAL;
650 }
651 }
652
653 static inline unsigned long
654 copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version)
655 {
656 switch(version) {
657 case IPC_64:
658 if (copy_from_user(out, buf, sizeof(*out)))
659 return -EFAULT;
660 return 0;
661 case IPC_OLD:
662 {
663 struct shmid_ds tbuf_old;
664
665 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
666 return -EFAULT;
667
668 out->shm_perm.uid = tbuf_old.shm_perm.uid;
669 out->shm_perm.gid = tbuf_old.shm_perm.gid;
670 out->shm_perm.mode = tbuf_old.shm_perm.mode;
671
672 return 0;
673 }
674 default:
675 return -EINVAL;
676 }
677 }
678
679 static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version)
680 {
681 switch(version) {
682 case IPC_64:
683 return copy_to_user(buf, in, sizeof(*in));
684 case IPC_OLD:
685 {
686 struct shminfo out;
687
688 if(in->shmmax > INT_MAX)
689 out.shmmax = INT_MAX;
690 else
691 out.shmmax = (int)in->shmmax;
692
693 out.shmmin = in->shmmin;
694 out.shmmni = in->shmmni;
695 out.shmseg = in->shmseg;
696 out.shmall = in->shmall;
697
698 return copy_to_user(buf, &out, sizeof(out));
699 }
700 default:
701 return -EINVAL;
702 }
703 }
704
705 /*
706 * Calculate and add used RSS and swap pages of a shm.
707 * Called with shm_ids.rw_mutex held as a reader
708 */
709 static void shm_add_rss_swap(struct shmid_kernel *shp,
710 unsigned long *rss_add, unsigned long *swp_add)
711 {
712 struct inode *inode;
713
714 inode = file_inode(shp->shm_file);
715
716 if (is_file_hugepages(shp->shm_file)) {
717 struct address_space *mapping = inode->i_mapping;
718 struct hstate *h = hstate_file(shp->shm_file);
719 *rss_add += pages_per_huge_page(h) * mapping->nrpages;
720 } else {
721 #ifdef CONFIG_SHMEM
722 struct shmem_inode_info *info = SHMEM_I(inode);
723 spin_lock(&info->lock);
724 *rss_add += inode->i_mapping->nrpages;
725 *swp_add += info->swapped;
726 spin_unlock(&info->lock);
727 #else
728 *rss_add += inode->i_mapping->nrpages;
729 #endif
730 }
731 }
732
733 /*
734 * Called with shm_ids.rw_mutex held as a reader
735 */
736 static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss,
737 unsigned long *swp)
738 {
739 int next_id;
740 int total, in_use;
741
742 *rss = 0;
743 *swp = 0;
744
745 in_use = shm_ids(ns).in_use;
746
747 for (total = 0, next_id = 0; total < in_use; next_id++) {
748 struct kern_ipc_perm *ipc;
749 struct shmid_kernel *shp;
750
751 ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id);
752 if (ipc == NULL)
753 continue;
754 shp = container_of(ipc, struct shmid_kernel, shm_perm);
755
756 shm_add_rss_swap(shp, rss, swp);
757
758 total++;
759 }
760 }
761
762 /*
763 * This function handles some shmctl commands which require the rw_mutex
764 * to be held in write mode.
765 * NOTE: no locks must be held, the rw_mutex is taken inside this function.
766 */
767 static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd,
768 struct shmid_ds __user *buf, int version)
769 {
770 struct kern_ipc_perm *ipcp;
771 struct shmid64_ds shmid64;
772 struct shmid_kernel *shp;
773 int err;
774
775 if (cmd == IPC_SET) {
776 if (copy_shmid_from_user(&shmid64, buf, version))
777 return -EFAULT;
778 }
779
780 down_write(&shm_ids(ns).rw_mutex);
781 rcu_read_lock();
782
783 ipcp = ipcctl_pre_down_nolock(ns, &shm_ids(ns), shmid, cmd,
784 &shmid64.shm_perm, 0);
785 if (IS_ERR(ipcp)) {
786 err = PTR_ERR(ipcp);
787 goto out_unlock1;
788 }
789
790 shp = container_of(ipcp, struct shmid_kernel, shm_perm);
791
792 err = security_shm_shmctl(shp, cmd);
793 if (err)
794 goto out_unlock1;
795
796 switch (cmd) {
797 case IPC_RMID:
798 ipc_lock_object(&shp->shm_perm);
799 /* do_shm_rmid unlocks the ipc object and rcu */
800 do_shm_rmid(ns, ipcp);
801 goto out_up;
802 case IPC_SET:
803 ipc_lock_object(&shp->shm_perm);
804 err = ipc_update_perm(&shmid64.shm_perm, ipcp);
805 if (err)
806 goto out_unlock0;
807 shp->shm_ctim = get_seconds();
808 break;
809 default:
810 err = -EINVAL;
811 goto out_unlock1;
812 }
813
814 out_unlock0:
815 ipc_unlock_object(&shp->shm_perm);
816 out_unlock1:
817 rcu_read_unlock();
818 out_up:
819 up_write(&shm_ids(ns).rw_mutex);
820 return err;
821 }
822
823 static int shmctl_nolock(struct ipc_namespace *ns, int shmid,
824 int cmd, int version, void __user *buf)
825 {
826 int err;
827 struct shmid_kernel *shp;
828
829 /* preliminary security checks for *_INFO */
830 if (cmd == IPC_INFO || cmd == SHM_INFO) {
831 err = security_shm_shmctl(NULL, cmd);
832 if (err)
833 return err;
834 }
835
836 switch (cmd) {
837 case IPC_INFO:
838 {
839 struct shminfo64 shminfo;
840
841 memset(&shminfo, 0, sizeof(shminfo));
842 shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni;
843 shminfo.shmmax = ns->shm_ctlmax;
844 shminfo.shmall = ns->shm_ctlall;
845
846 shminfo.shmmin = SHMMIN;
847 if(copy_shminfo_to_user (buf, &shminfo, version))
848 return -EFAULT;
849
850 down_read(&shm_ids(ns).rw_mutex);
851 err = ipc_get_maxid(&shm_ids(ns));
852 up_read(&shm_ids(ns).rw_mutex);
853
854 if(err<0)
855 err = 0;
856 goto out;
857 }
858 case SHM_INFO:
859 {
860 struct shm_info shm_info;
861
862 memset(&shm_info, 0, sizeof(shm_info));
863 down_read(&shm_ids(ns).rw_mutex);
864 shm_info.used_ids = shm_ids(ns).in_use;
865 shm_get_stat (ns, &shm_info.shm_rss, &shm_info.shm_swp);
866 shm_info.shm_tot = ns->shm_tot;
867 shm_info.swap_attempts = 0;
868 shm_info.swap_successes = 0;
869 err = ipc_get_maxid(&shm_ids(ns));
870 up_read(&shm_ids(ns).rw_mutex);
871 if (copy_to_user(buf, &shm_info, sizeof(shm_info))) {
872 err = -EFAULT;
873 goto out;
874 }
875
876 err = err < 0 ? 0 : err;
877 goto out;
878 }
879 case SHM_STAT:
880 case IPC_STAT:
881 {
882 struct shmid64_ds tbuf;
883 int result;
884
885 rcu_read_lock();
886 if (cmd == SHM_STAT) {
887 shp = shm_obtain_object(ns, shmid);
888 if (IS_ERR(shp)) {
889 err = PTR_ERR(shp);
890 goto out_unlock;
891 }
892 result = shp->shm_perm.id;
893 } else {
894 shp = shm_obtain_object_check(ns, shmid);
895 if (IS_ERR(shp)) {
896 err = PTR_ERR(shp);
897 goto out_unlock;
898 }
899 result = 0;
900 }
901
902 err = -EACCES;
903 if (ipcperms(ns, &shp->shm_perm, S_IRUGO))
904 goto out_unlock;
905
906 err = security_shm_shmctl(shp, cmd);
907 if (err)
908 goto out_unlock;
909
910 memset(&tbuf, 0, sizeof(tbuf));
911 kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm);
912 tbuf.shm_segsz = shp->shm_segsz;
913 tbuf.shm_atime = shp->shm_atim;
914 tbuf.shm_dtime = shp->shm_dtim;
915 tbuf.shm_ctime = shp->shm_ctim;
916 tbuf.shm_cpid = shp->shm_cprid;
917 tbuf.shm_lpid = shp->shm_lprid;
918 tbuf.shm_nattch = shp->shm_nattch;
919 rcu_read_unlock();
920
921 if (copy_shmid_to_user(buf, &tbuf, version))
922 err = -EFAULT;
923 else
924 err = result;
925 goto out;
926 }
927 default:
928 return -EINVAL;
929 }
930
931 out_unlock:
932 rcu_read_unlock();
933 out:
934 return err;
935 }
936
937 SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
938 {
939 struct shmid_kernel *shp;
940 int err, version;
941 struct ipc_namespace *ns;
942
943 if (cmd < 0 || shmid < 0)
944 return -EINVAL;
945
946 version = ipc_parse_version(&cmd);
947 ns = current->nsproxy->ipc_ns;
948
949 switch (cmd) {
950 case IPC_INFO:
951 case SHM_INFO:
952 case SHM_STAT:
953 case IPC_STAT:
954 return shmctl_nolock(ns, shmid, cmd, version, buf);
955 case IPC_RMID:
956 case IPC_SET:
957 return shmctl_down(ns, shmid, cmd, buf, version);
958 case SHM_LOCK:
959 case SHM_UNLOCK:
960 {
961 struct file *shm_file;
962
963 rcu_read_lock();
964 shp = shm_obtain_object_check(ns, shmid);
965 if (IS_ERR(shp)) {
966 err = PTR_ERR(shp);
967 goto out_unlock1;
968 }
969
970 audit_ipc_obj(&(shp->shm_perm));
971 err = security_shm_shmctl(shp, cmd);
972 if (err)
973 goto out_unlock1;
974
975 ipc_lock_object(&shp->shm_perm);
976 if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) {
977 kuid_t euid = current_euid();
978 err = -EPERM;
979 if (!uid_eq(euid, shp->shm_perm.uid) &&
980 !uid_eq(euid, shp->shm_perm.cuid))
981 goto out_unlock0;
982 if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK))
983 goto out_unlock0;
984 }
985
986 shm_file = shp->shm_file;
987 if (is_file_hugepages(shm_file))
988 goto out_unlock0;
989
990 if (cmd == SHM_LOCK) {
991 struct user_struct *user = current_user();
992 err = shmem_lock(shm_file, 1, user);
993 if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) {
994 shp->shm_perm.mode |= SHM_LOCKED;
995 shp->mlock_user = user;
996 }
997 goto out_unlock0;
998 }
999
1000 /* SHM_UNLOCK */
1001 if (!(shp->shm_perm.mode & SHM_LOCKED))
1002 goto out_unlock0;
1003 shmem_lock(shm_file, 0, shp->mlock_user);
1004 shp->shm_perm.mode &= ~SHM_LOCKED;
1005 shp->mlock_user = NULL;
1006 get_file(shm_file);
1007 ipc_unlock_object(&shp->shm_perm);
1008 rcu_read_unlock();
1009 shmem_unlock_mapping(shm_file->f_mapping);
1010
1011 fput(shm_file);
1012 return err;
1013 }
1014 default:
1015 return -EINVAL;
1016 }
1017
1018 out_unlock0:
1019 ipc_unlock_object(&shp->shm_perm);
1020 out_unlock1:
1021 rcu_read_unlock();
1022 return err;
1023 }
1024
1025 /*
1026 * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists.
1027 *
1028 * NOTE! Despite the name, this is NOT a direct system call entrypoint. The
1029 * "raddr" thing points to kernel space, and there has to be a wrapper around
1030 * this.
1031 */
1032 long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
1033 unsigned long shmlba)
1034 {
1035 struct shmid_kernel *shp;
1036 unsigned long addr;
1037 unsigned long size;
1038 struct file * file;
1039 int err;
1040 unsigned long flags;
1041 unsigned long prot;
1042 int acc_mode;
1043 struct ipc_namespace *ns;
1044 struct shm_file_data *sfd;
1045 struct path path;
1046 fmode_t f_mode;
1047 unsigned long populate = 0;
1048
1049 err = -EINVAL;
1050 if (shmid < 0)
1051 goto out;
1052 else if ((addr = (ulong)shmaddr)) {
1053 if (addr & (shmlba - 1)) {
1054 if (shmflg & SHM_RND)
1055 addr &= ~(shmlba - 1); /* round down */
1056 else
1057 #ifndef __ARCH_FORCE_SHMLBA
1058 if (addr & ~PAGE_MASK)
1059 #endif
1060 goto out;
1061 }
1062 flags = MAP_SHARED | MAP_FIXED;
1063 } else {
1064 if ((shmflg & SHM_REMAP))
1065 goto out;
1066
1067 flags = MAP_SHARED;
1068 }
1069
1070 if (shmflg & SHM_RDONLY) {
1071 prot = PROT_READ;
1072 acc_mode = S_IRUGO;
1073 f_mode = FMODE_READ;
1074 } else {
1075 prot = PROT_READ | PROT_WRITE;
1076 acc_mode = S_IRUGO | S_IWUGO;
1077 f_mode = FMODE_READ | FMODE_WRITE;
1078 }
1079 if (shmflg & SHM_EXEC) {
1080 prot |= PROT_EXEC;
1081 acc_mode |= S_IXUGO;
1082 }
1083
1084 /*
1085 * We cannot rely on the fs check since SYSV IPC does have an
1086 * additional creator id...
1087 */
1088 ns = current->nsproxy->ipc_ns;
1089 shp = shm_lock_check(ns, shmid);
1090 if (IS_ERR(shp)) {
1091 err = PTR_ERR(shp);
1092 goto out;
1093 }
1094
1095 err = -EACCES;
1096 if (ipcperms(ns, &shp->shm_perm, acc_mode))
1097 goto out_unlock;
1098
1099 err = security_shm_shmat(shp, shmaddr, shmflg);
1100 if (err)
1101 goto out_unlock;
1102
1103 path = shp->shm_file->f_path;
1104 path_get(&path);
1105 shp->shm_nattch++;
1106 size = i_size_read(path.dentry->d_inode);
1107 shm_unlock(shp);
1108
1109 err = -ENOMEM;
1110 sfd = kzalloc(sizeof(*sfd), GFP_KERNEL);
1111 if (!sfd) {
1112 path_put(&path);
1113 goto out_nattch;
1114 }
1115
1116 file = alloc_file(&path, f_mode,
1117 is_file_hugepages(shp->shm_file) ?
1118 &shm_file_operations_huge :
1119 &shm_file_operations);
1120 err = PTR_ERR(file);
1121 if (IS_ERR(file)) {
1122 kfree(sfd);
1123 path_put(&path);
1124 goto out_nattch;
1125 }
1126
1127 file->private_data = sfd;
1128 file->f_mapping = shp->shm_file->f_mapping;
1129 sfd->id = shp->shm_perm.id;
1130 sfd->ns = get_ipc_ns(ns);
1131 sfd->file = shp->shm_file;
1132 sfd->vm_ops = NULL;
1133
1134 err = security_mmap_file(file, prot, flags);
1135 if (err)
1136 goto out_fput;
1137
1138 down_write(&current->mm->mmap_sem);
1139 if (addr && !(shmflg & SHM_REMAP)) {
1140 err = -EINVAL;
1141 if (find_vma_intersection(current->mm, addr, addr + size))
1142 goto invalid;
1143 /*
1144 * If shm segment goes below stack, make sure there is some
1145 * space left for the stack to grow (at least 4 pages).
1146 */
1147 if (addr < current->mm->start_stack &&
1148 addr > current->mm->start_stack - size - PAGE_SIZE * 5)
1149 goto invalid;
1150 }
1151
1152 addr = do_mmap_pgoff(file, addr, size, prot, flags, 0, &populate);
1153 *raddr = addr;
1154 err = 0;
1155 if (IS_ERR_VALUE(addr))
1156 err = (long)addr;
1157 invalid:
1158 up_write(&current->mm->mmap_sem);
1159 if (populate)
1160 mm_populate(addr, populate);
1161
1162 out_fput:
1163 fput(file);
1164
1165 out_nattch:
1166 down_write(&shm_ids(ns).rw_mutex);
1167 shp = shm_lock(ns, shmid);
1168 BUG_ON(IS_ERR(shp));
1169 shp->shm_nattch--;
1170 if (shm_may_destroy(ns, shp))
1171 shm_destroy(ns, shp);
1172 else
1173 shm_unlock(shp);
1174 up_write(&shm_ids(ns).rw_mutex);
1175 return err;
1176
1177 out_unlock:
1178 shm_unlock(shp);
1179 out:
1180 return err;
1181 }
1182
1183 SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg)
1184 {
1185 unsigned long ret;
1186 long err;
1187
1188 err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA);
1189 if (err)
1190 return err;
1191 force_successful_syscall_return();
1192 return (long)ret;
1193 }
1194
1195 /*
1196 * detach and kill segment if marked destroyed.
1197 * The work is done in shm_close.
1198 */
1199 SYSCALL_DEFINE1(shmdt, char __user *, shmaddr)
1200 {
1201 struct mm_struct *mm = current->mm;
1202 struct vm_area_struct *vma;
1203 unsigned long addr = (unsigned long)shmaddr;
1204 int retval = -EINVAL;
1205 #ifdef CONFIG_MMU
1206 loff_t size = 0;
1207 struct vm_area_struct *next;
1208 #endif
1209
1210 if (addr & ~PAGE_MASK)
1211 return retval;
1212
1213 down_write(&mm->mmap_sem);
1214
1215 /*
1216 * This function tries to be smart and unmap shm segments that
1217 * were modified by partial mlock or munmap calls:
1218 * - It first determines the size of the shm segment that should be
1219 * unmapped: It searches for a vma that is backed by shm and that
1220 * started at address shmaddr. It records it's size and then unmaps
1221 * it.
1222 * - Then it unmaps all shm vmas that started at shmaddr and that
1223 * are within the initially determined size.
1224 * Errors from do_munmap are ignored: the function only fails if
1225 * it's called with invalid parameters or if it's called to unmap
1226 * a part of a vma. Both calls in this function are for full vmas,
1227 * the parameters are directly copied from the vma itself and always
1228 * valid - therefore do_munmap cannot fail. (famous last words?)
1229 */
1230 /*
1231 * If it had been mremap()'d, the starting address would not
1232 * match the usual checks anyway. So assume all vma's are
1233 * above the starting address given.
1234 */
1235 vma = find_vma(mm, addr);
1236
1237 #ifdef CONFIG_MMU
1238 while (vma) {
1239 next = vma->vm_next;
1240
1241 /*
1242 * Check if the starting address would match, i.e. it's
1243 * a fragment created by mprotect() and/or munmap(), or it
1244 * otherwise it starts at this address with no hassles.
1245 */
1246 if ((vma->vm_ops == &shm_vm_ops) &&
1247 (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) {
1248
1249
1250 size = file_inode(vma->vm_file)->i_size;
1251 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1252 /*
1253 * We discovered the size of the shm segment, so
1254 * break out of here and fall through to the next
1255 * loop that uses the size information to stop
1256 * searching for matching vma's.
1257 */
1258 retval = 0;
1259 vma = next;
1260 break;
1261 }
1262 vma = next;
1263 }
1264
1265 /*
1266 * We need look no further than the maximum address a fragment
1267 * could possibly have landed at. Also cast things to loff_t to
1268 * prevent overflows and make comparisons vs. equal-width types.
1269 */
1270 size = PAGE_ALIGN(size);
1271 while (vma && (loff_t)(vma->vm_end - addr) <= size) {
1272 next = vma->vm_next;
1273
1274 /* finding a matching vma now does not alter retval */
1275 if ((vma->vm_ops == &shm_vm_ops) &&
1276 (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff)
1277
1278 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1279 vma = next;
1280 }
1281
1282 #else /* CONFIG_MMU */
1283 /* under NOMMU conditions, the exact address to be destroyed must be
1284 * given */
1285 retval = -EINVAL;
1286 if (vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) {
1287 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start);
1288 retval = 0;
1289 }
1290
1291 #endif
1292
1293 up_write(&mm->mmap_sem);
1294 return retval;
1295 }
1296
1297 #ifdef CONFIG_PROC_FS
1298 static int sysvipc_shm_proc_show(struct seq_file *s, void *it)
1299 {
1300 struct user_namespace *user_ns = seq_user_ns(s);
1301 struct shmid_kernel *shp = it;
1302 unsigned long rss = 0, swp = 0;
1303
1304 shm_add_rss_swap(shp, &rss, &swp);
1305
1306 #if BITS_PER_LONG <= 32
1307 #define SIZE_SPEC "%10lu"
1308 #else
1309 #define SIZE_SPEC "%21lu"
1310 #endif
1311
1312 return seq_printf(s,
1313 "%10d %10d %4o " SIZE_SPEC " %5u %5u "
1314 "%5lu %5u %5u %5u %5u %10lu %10lu %10lu "
1315 SIZE_SPEC " " SIZE_SPEC "\n",
1316 shp->shm_perm.key,
1317 shp->shm_perm.id,
1318 shp->shm_perm.mode,
1319 shp->shm_segsz,
1320 shp->shm_cprid,
1321 shp->shm_lprid,
1322 shp->shm_nattch,
1323 from_kuid_munged(user_ns, shp->shm_perm.uid),
1324 from_kgid_munged(user_ns, shp->shm_perm.gid),
1325 from_kuid_munged(user_ns, shp->shm_perm.cuid),
1326 from_kgid_munged(user_ns, shp->shm_perm.cgid),
1327 shp->shm_atim,
1328 shp->shm_dtim,
1329 shp->shm_ctim,
1330 rss * PAGE_SIZE,
1331 swp * PAGE_SIZE);
1332 }
1333 #endif