UPSTREAM: capabilities: ambient capabilities
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / kernel / user_namespace.c
1 /*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License as
4 * published by the Free Software Foundation, version 2 of the
5 * License.
6 */
7
8 #include <linux/export.h>
9 #include <linux/nsproxy.h>
10 #include <linux/slab.h>
11 #include <linux/user_namespace.h>
12 #include <linux/proc_ns.h>
13 #include <linux/highuid.h>
14 #include <linux/cred.h>
15 #include <linux/securebits.h>
16 #include <linux/keyctl.h>
17 #include <linux/key-type.h>
18 #include <keys/user-type.h>
19 #include <linux/seq_file.h>
20 #include <linux/fs.h>
21 #include <linux/uaccess.h>
22 #include <linux/ctype.h>
23 #include <linux/projid.h>
24 #include <linux/fs_struct.h>
25
26 static struct kmem_cache *user_ns_cachep __read_mostly;
27 static DEFINE_MUTEX(userns_state_mutex);
28
29 static bool new_idmap_permitted(const struct file *file,
30 struct user_namespace *ns, int cap_setid,
31 struct uid_gid_map *map);
32
33 static void set_cred_user_ns(struct cred *cred, struct user_namespace *user_ns)
34 {
35 /* Start with the same capabilities as init but useless for doing
36 * anything as the capabilities are bound to the new user namespace.
37 */
38 cred->securebits = SECUREBITS_DEFAULT;
39 cred->cap_inheritable = CAP_EMPTY_SET;
40 cred->cap_permitted = CAP_FULL_SET;
41 cred->cap_effective = CAP_FULL_SET;
42 cred->cap_ambient = CAP_EMPTY_SET;
43 cred->cap_bset = CAP_FULL_SET;
44 #ifdef CONFIG_KEYS
45 key_put(cred->request_key_auth);
46 cred->request_key_auth = NULL;
47 #endif
48 /* tgcred will be cleared in our caller bc CLONE_THREAD won't be set */
49 cred->user_ns = user_ns;
50 }
51
52 /*
53 * Create a new user namespace, deriving the creator from the user in the
54 * passed credentials, and replacing that user with the new root user for the
55 * new namespace.
56 *
57 * This is called by copy_creds(), which will finish setting the target task's
58 * credentials.
59 */
60 int create_user_ns(struct cred *new)
61 {
62 struct user_namespace *ns, *parent_ns = new->user_ns;
63 kuid_t owner = new->euid;
64 kgid_t group = new->egid;
65 int ret;
66
67 if (parent_ns->level > 32)
68 return -EUSERS;
69
70 /*
71 * Verify that we can not violate the policy of which files
72 * may be accessed that is specified by the root directory,
73 * by verifing that the root directory is at the root of the
74 * mount namespace which allows all files to be accessed.
75 */
76 if (current_chrooted())
77 return -EPERM;
78
79 /* The creator needs a mapping in the parent user namespace
80 * or else we won't be able to reasonably tell userspace who
81 * created a user_namespace.
82 */
83 if (!kuid_has_mapping(parent_ns, owner) ||
84 !kgid_has_mapping(parent_ns, group))
85 return -EPERM;
86
87 ns = kmem_cache_zalloc(user_ns_cachep, GFP_KERNEL);
88 if (!ns)
89 return -ENOMEM;
90
91 ret = proc_alloc_inum(&ns->proc_inum);
92 if (ret) {
93 kmem_cache_free(user_ns_cachep, ns);
94 return ret;
95 }
96
97 atomic_set(&ns->count, 1);
98 /* Leave the new->user_ns reference with the new user namespace. */
99 ns->parent = parent_ns;
100 ns->level = parent_ns->level + 1;
101 ns->owner = owner;
102 ns->group = group;
103
104 /* Inherit USERNS_SETGROUPS_ALLOWED from our parent */
105 mutex_lock(&userns_state_mutex);
106 ns->flags = parent_ns->flags;
107 mutex_unlock(&userns_state_mutex);
108
109 set_cred_user_ns(new, ns);
110
111 update_mnt_policy(ns);
112
113 return 0;
114 }
115
116 int unshare_userns(unsigned long unshare_flags, struct cred **new_cred)
117 {
118 struct cred *cred;
119 int err = -ENOMEM;
120
121 if (!(unshare_flags & CLONE_NEWUSER))
122 return 0;
123
124 cred = prepare_creds();
125 if (cred) {
126 err = create_user_ns(cred);
127 if (err)
128 put_cred(cred);
129 else
130 *new_cred = cred;
131 }
132
133 return err;
134 }
135
136 void free_user_ns(struct user_namespace *ns)
137 {
138 struct user_namespace *parent;
139
140 do {
141 parent = ns->parent;
142 proc_free_inum(ns->proc_inum);
143 kmem_cache_free(user_ns_cachep, ns);
144 ns = parent;
145 } while (atomic_dec_and_test(&parent->count));
146 }
147 EXPORT_SYMBOL(free_user_ns);
148
149 static u32 map_id_range_down(struct uid_gid_map *map, u32 id, u32 count)
150 {
151 unsigned idx, extents;
152 u32 first, last, id2;
153
154 id2 = id + count - 1;
155
156 /* Find the matching extent */
157 extents = map->nr_extents;
158 smp_rmb();
159 for (idx = 0; idx < extents; idx++) {
160 first = map->extent[idx].first;
161 last = first + map->extent[idx].count - 1;
162 if (id >= first && id <= last &&
163 (id2 >= first && id2 <= last))
164 break;
165 }
166 /* Map the id or note failure */
167 if (idx < extents)
168 id = (id - first) + map->extent[idx].lower_first;
169 else
170 id = (u32) -1;
171
172 return id;
173 }
174
175 static u32 map_id_down(struct uid_gid_map *map, u32 id)
176 {
177 unsigned idx, extents;
178 u32 first, last;
179
180 /* Find the matching extent */
181 extents = map->nr_extents;
182 smp_rmb();
183 for (idx = 0; idx < extents; idx++) {
184 first = map->extent[idx].first;
185 last = first + map->extent[idx].count - 1;
186 if (id >= first && id <= last)
187 break;
188 }
189 /* Map the id or note failure */
190 if (idx < extents)
191 id = (id - first) + map->extent[idx].lower_first;
192 else
193 id = (u32) -1;
194
195 return id;
196 }
197
198 static u32 map_id_up(struct uid_gid_map *map, u32 id)
199 {
200 unsigned idx, extents;
201 u32 first, last;
202
203 /* Find the matching extent */
204 extents = map->nr_extents;
205 smp_rmb();
206 for (idx = 0; idx < extents; idx++) {
207 first = map->extent[idx].lower_first;
208 last = first + map->extent[idx].count - 1;
209 if (id >= first && id <= last)
210 break;
211 }
212 /* Map the id or note failure */
213 if (idx < extents)
214 id = (id - first) + map->extent[idx].first;
215 else
216 id = (u32) -1;
217
218 return id;
219 }
220
221 /**
222 * make_kuid - Map a user-namespace uid pair into a kuid.
223 * @ns: User namespace that the uid is in
224 * @uid: User identifier
225 *
226 * Maps a user-namespace uid pair into a kernel internal kuid,
227 * and returns that kuid.
228 *
229 * When there is no mapping defined for the user-namespace uid
230 * pair INVALID_UID is returned. Callers are expected to test
231 * for and handle handle INVALID_UID being returned. INVALID_UID
232 * may be tested for using uid_valid().
233 */
234 kuid_t make_kuid(struct user_namespace *ns, uid_t uid)
235 {
236 /* Map the uid to a global kernel uid */
237 return KUIDT_INIT(map_id_down(&ns->uid_map, uid));
238 }
239 EXPORT_SYMBOL(make_kuid);
240
241 /**
242 * from_kuid - Create a uid from a kuid user-namespace pair.
243 * @targ: The user namespace we want a uid in.
244 * @kuid: The kernel internal uid to start with.
245 *
246 * Map @kuid into the user-namespace specified by @targ and
247 * return the resulting uid.
248 *
249 * There is always a mapping into the initial user_namespace.
250 *
251 * If @kuid has no mapping in @targ (uid_t)-1 is returned.
252 */
253 uid_t from_kuid(struct user_namespace *targ, kuid_t kuid)
254 {
255 /* Map the uid from a global kernel uid */
256 return map_id_up(&targ->uid_map, __kuid_val(kuid));
257 }
258 EXPORT_SYMBOL(from_kuid);
259
260 /**
261 * from_kuid_munged - Create a uid from a kuid user-namespace pair.
262 * @targ: The user namespace we want a uid in.
263 * @kuid: The kernel internal uid to start with.
264 *
265 * Map @kuid into the user-namespace specified by @targ and
266 * return the resulting uid.
267 *
268 * There is always a mapping into the initial user_namespace.
269 *
270 * Unlike from_kuid from_kuid_munged never fails and always
271 * returns a valid uid. This makes from_kuid_munged appropriate
272 * for use in syscalls like stat and getuid where failing the
273 * system call and failing to provide a valid uid are not an
274 * options.
275 *
276 * If @kuid has no mapping in @targ overflowuid is returned.
277 */
278 uid_t from_kuid_munged(struct user_namespace *targ, kuid_t kuid)
279 {
280 uid_t uid;
281 uid = from_kuid(targ, kuid);
282
283 if (uid == (uid_t) -1)
284 uid = overflowuid;
285 return uid;
286 }
287 EXPORT_SYMBOL(from_kuid_munged);
288
289 /**
290 * make_kgid - Map a user-namespace gid pair into a kgid.
291 * @ns: User namespace that the gid is in
292 * @uid: group identifier
293 *
294 * Maps a user-namespace gid pair into a kernel internal kgid,
295 * and returns that kgid.
296 *
297 * When there is no mapping defined for the user-namespace gid
298 * pair INVALID_GID is returned. Callers are expected to test
299 * for and handle INVALID_GID being returned. INVALID_GID may be
300 * tested for using gid_valid().
301 */
302 kgid_t make_kgid(struct user_namespace *ns, gid_t gid)
303 {
304 /* Map the gid to a global kernel gid */
305 return KGIDT_INIT(map_id_down(&ns->gid_map, gid));
306 }
307 EXPORT_SYMBOL(make_kgid);
308
309 /**
310 * from_kgid - Create a gid from a kgid user-namespace pair.
311 * @targ: The user namespace we want a gid in.
312 * @kgid: The kernel internal gid to start with.
313 *
314 * Map @kgid into the user-namespace specified by @targ and
315 * return the resulting gid.
316 *
317 * There is always a mapping into the initial user_namespace.
318 *
319 * If @kgid has no mapping in @targ (gid_t)-1 is returned.
320 */
321 gid_t from_kgid(struct user_namespace *targ, kgid_t kgid)
322 {
323 /* Map the gid from a global kernel gid */
324 return map_id_up(&targ->gid_map, __kgid_val(kgid));
325 }
326 EXPORT_SYMBOL(from_kgid);
327
328 /**
329 * from_kgid_munged - Create a gid from a kgid user-namespace pair.
330 * @targ: The user namespace we want a gid in.
331 * @kgid: The kernel internal gid to start with.
332 *
333 * Map @kgid into the user-namespace specified by @targ and
334 * return the resulting gid.
335 *
336 * There is always a mapping into the initial user_namespace.
337 *
338 * Unlike from_kgid from_kgid_munged never fails and always
339 * returns a valid gid. This makes from_kgid_munged appropriate
340 * for use in syscalls like stat and getgid where failing the
341 * system call and failing to provide a valid gid are not options.
342 *
343 * If @kgid has no mapping in @targ overflowgid is returned.
344 */
345 gid_t from_kgid_munged(struct user_namespace *targ, kgid_t kgid)
346 {
347 gid_t gid;
348 gid = from_kgid(targ, kgid);
349
350 if (gid == (gid_t) -1)
351 gid = overflowgid;
352 return gid;
353 }
354 EXPORT_SYMBOL(from_kgid_munged);
355
356 /**
357 * make_kprojid - Map a user-namespace projid pair into a kprojid.
358 * @ns: User namespace that the projid is in
359 * @projid: Project identifier
360 *
361 * Maps a user-namespace uid pair into a kernel internal kuid,
362 * and returns that kuid.
363 *
364 * When there is no mapping defined for the user-namespace projid
365 * pair INVALID_PROJID is returned. Callers are expected to test
366 * for and handle handle INVALID_PROJID being returned. INVALID_PROJID
367 * may be tested for using projid_valid().
368 */
369 kprojid_t make_kprojid(struct user_namespace *ns, projid_t projid)
370 {
371 /* Map the uid to a global kernel uid */
372 return KPROJIDT_INIT(map_id_down(&ns->projid_map, projid));
373 }
374 EXPORT_SYMBOL(make_kprojid);
375
376 /**
377 * from_kprojid - Create a projid from a kprojid user-namespace pair.
378 * @targ: The user namespace we want a projid in.
379 * @kprojid: The kernel internal project identifier to start with.
380 *
381 * Map @kprojid into the user-namespace specified by @targ and
382 * return the resulting projid.
383 *
384 * There is always a mapping into the initial user_namespace.
385 *
386 * If @kprojid has no mapping in @targ (projid_t)-1 is returned.
387 */
388 projid_t from_kprojid(struct user_namespace *targ, kprojid_t kprojid)
389 {
390 /* Map the uid from a global kernel uid */
391 return map_id_up(&targ->projid_map, __kprojid_val(kprojid));
392 }
393 EXPORT_SYMBOL(from_kprojid);
394
395 /**
396 * from_kprojid_munged - Create a projiid from a kprojid user-namespace pair.
397 * @targ: The user namespace we want a projid in.
398 * @kprojid: The kernel internal projid to start with.
399 *
400 * Map @kprojid into the user-namespace specified by @targ and
401 * return the resulting projid.
402 *
403 * There is always a mapping into the initial user_namespace.
404 *
405 * Unlike from_kprojid from_kprojid_munged never fails and always
406 * returns a valid projid. This makes from_kprojid_munged
407 * appropriate for use in syscalls like stat and where
408 * failing the system call and failing to provide a valid projid are
409 * not an options.
410 *
411 * If @kprojid has no mapping in @targ OVERFLOW_PROJID is returned.
412 */
413 projid_t from_kprojid_munged(struct user_namespace *targ, kprojid_t kprojid)
414 {
415 projid_t projid;
416 projid = from_kprojid(targ, kprojid);
417
418 if (projid == (projid_t) -1)
419 projid = OVERFLOW_PROJID;
420 return projid;
421 }
422 EXPORT_SYMBOL(from_kprojid_munged);
423
424
425 static int uid_m_show(struct seq_file *seq, void *v)
426 {
427 struct user_namespace *ns = seq->private;
428 struct uid_gid_extent *extent = v;
429 struct user_namespace *lower_ns;
430 uid_t lower;
431
432 lower_ns = seq_user_ns(seq);
433 if ((lower_ns == ns) && lower_ns->parent)
434 lower_ns = lower_ns->parent;
435
436 lower = from_kuid(lower_ns, KUIDT_INIT(extent->lower_first));
437
438 seq_printf(seq, "%10u %10u %10u\n",
439 extent->first,
440 lower,
441 extent->count);
442
443 return 0;
444 }
445
446 static int gid_m_show(struct seq_file *seq, void *v)
447 {
448 struct user_namespace *ns = seq->private;
449 struct uid_gid_extent *extent = v;
450 struct user_namespace *lower_ns;
451 gid_t lower;
452
453 lower_ns = seq_user_ns(seq);
454 if ((lower_ns == ns) && lower_ns->parent)
455 lower_ns = lower_ns->parent;
456
457 lower = from_kgid(lower_ns, KGIDT_INIT(extent->lower_first));
458
459 seq_printf(seq, "%10u %10u %10u\n",
460 extent->first,
461 lower,
462 extent->count);
463
464 return 0;
465 }
466
467 static int projid_m_show(struct seq_file *seq, void *v)
468 {
469 struct user_namespace *ns = seq->private;
470 struct uid_gid_extent *extent = v;
471 struct user_namespace *lower_ns;
472 projid_t lower;
473
474 lower_ns = seq_user_ns(seq);
475 if ((lower_ns == ns) && lower_ns->parent)
476 lower_ns = lower_ns->parent;
477
478 lower = from_kprojid(lower_ns, KPROJIDT_INIT(extent->lower_first));
479
480 seq_printf(seq, "%10u %10u %10u\n",
481 extent->first,
482 lower,
483 extent->count);
484
485 return 0;
486 }
487
488 static void *m_start(struct seq_file *seq, loff_t *ppos, struct uid_gid_map *map)
489 {
490 struct uid_gid_extent *extent = NULL;
491 loff_t pos = *ppos;
492
493 if (pos < map->nr_extents)
494 extent = &map->extent[pos];
495
496 return extent;
497 }
498
499 static void *uid_m_start(struct seq_file *seq, loff_t *ppos)
500 {
501 struct user_namespace *ns = seq->private;
502
503 return m_start(seq, ppos, &ns->uid_map);
504 }
505
506 static void *gid_m_start(struct seq_file *seq, loff_t *ppos)
507 {
508 struct user_namespace *ns = seq->private;
509
510 return m_start(seq, ppos, &ns->gid_map);
511 }
512
513 static void *projid_m_start(struct seq_file *seq, loff_t *ppos)
514 {
515 struct user_namespace *ns = seq->private;
516
517 return m_start(seq, ppos, &ns->projid_map);
518 }
519
520 static void *m_next(struct seq_file *seq, void *v, loff_t *pos)
521 {
522 (*pos)++;
523 return seq->op->start(seq, pos);
524 }
525
526 static void m_stop(struct seq_file *seq, void *v)
527 {
528 return;
529 }
530
531 struct seq_operations proc_uid_seq_operations = {
532 .start = uid_m_start,
533 .stop = m_stop,
534 .next = m_next,
535 .show = uid_m_show,
536 };
537
538 struct seq_operations proc_gid_seq_operations = {
539 .start = gid_m_start,
540 .stop = m_stop,
541 .next = m_next,
542 .show = gid_m_show,
543 };
544
545 struct seq_operations proc_projid_seq_operations = {
546 .start = projid_m_start,
547 .stop = m_stop,
548 .next = m_next,
549 .show = projid_m_show,
550 };
551
552 static bool mappings_overlap(struct uid_gid_map *new_map, struct uid_gid_extent *extent)
553 {
554 u32 upper_first, lower_first, upper_last, lower_last;
555 unsigned idx;
556
557 upper_first = extent->first;
558 lower_first = extent->lower_first;
559 upper_last = upper_first + extent->count - 1;
560 lower_last = lower_first + extent->count - 1;
561
562 for (idx = 0; idx < new_map->nr_extents; idx++) {
563 u32 prev_upper_first, prev_lower_first;
564 u32 prev_upper_last, prev_lower_last;
565 struct uid_gid_extent *prev;
566
567 prev = &new_map->extent[idx];
568
569 prev_upper_first = prev->first;
570 prev_lower_first = prev->lower_first;
571 prev_upper_last = prev_upper_first + prev->count - 1;
572 prev_lower_last = prev_lower_first + prev->count - 1;
573
574 /* Does the upper range intersect a previous extent? */
575 if ((prev_upper_first <= upper_last) &&
576 (prev_upper_last >= upper_first))
577 return true;
578
579 /* Does the lower range intersect a previous extent? */
580 if ((prev_lower_first <= lower_last) &&
581 (prev_lower_last >= lower_first))
582 return true;
583 }
584 return false;
585 }
586
587 static ssize_t map_write(struct file *file, const char __user *buf,
588 size_t count, loff_t *ppos,
589 int cap_setid,
590 struct uid_gid_map *map,
591 struct uid_gid_map *parent_map)
592 {
593 struct seq_file *seq = file->private_data;
594 struct user_namespace *ns = seq->private;
595 struct uid_gid_map new_map;
596 unsigned idx;
597 struct uid_gid_extent *extent = NULL;
598 unsigned long page = 0;
599 char *kbuf, *pos, *next_line;
600 ssize_t ret = -EINVAL;
601
602 /*
603 * The userns_state_mutex serializes all writes to any given map.
604 *
605 * Any map is only ever written once.
606 *
607 * An id map fits within 1 cache line on most architectures.
608 *
609 * On read nothing needs to be done unless you are on an
610 * architecture with a crazy cache coherency model like alpha.
611 *
612 * There is a one time data dependency between reading the
613 * count of the extents and the values of the extents. The
614 * desired behavior is to see the values of the extents that
615 * were written before the count of the extents.
616 *
617 * To achieve this smp_wmb() is used on guarantee the write
618 * order and smp_rmb() is guaranteed that we don't have crazy
619 * architectures returning stale data.
620 */
621 mutex_lock(&userns_state_mutex);
622
623 ret = -EPERM;
624 /* Only allow one successful write to the map */
625 if (map->nr_extents != 0)
626 goto out;
627
628 /*
629 * Adjusting namespace settings requires capabilities on the target.
630 */
631 if (cap_valid(cap_setid) && !file_ns_capable(file, ns, CAP_SYS_ADMIN))
632 goto out;
633
634 /* Get a buffer */
635 ret = -ENOMEM;
636 page = __get_free_page(GFP_TEMPORARY);
637 kbuf = (char *) page;
638 if (!page)
639 goto out;
640
641 /* Only allow <= page size writes at the beginning of the file */
642 ret = -EINVAL;
643 if ((*ppos != 0) || (count >= PAGE_SIZE))
644 goto out;
645
646 /* Slurp in the user data */
647 ret = -EFAULT;
648 if (copy_from_user(kbuf, buf, count))
649 goto out;
650 kbuf[count] = '\0';
651
652 /* Parse the user data */
653 ret = -EINVAL;
654 pos = kbuf;
655 new_map.nr_extents = 0;
656 for (;pos; pos = next_line) {
657 extent = &new_map.extent[new_map.nr_extents];
658
659 /* Find the end of line and ensure I don't look past it */
660 next_line = strchr(pos, '\n');
661 if (next_line) {
662 *next_line = '\0';
663 next_line++;
664 if (*next_line == '\0')
665 next_line = NULL;
666 }
667
668 pos = skip_spaces(pos);
669 extent->first = simple_strtoul(pos, &pos, 10);
670 if (!isspace(*pos))
671 goto out;
672
673 pos = skip_spaces(pos);
674 extent->lower_first = simple_strtoul(pos, &pos, 10);
675 if (!isspace(*pos))
676 goto out;
677
678 pos = skip_spaces(pos);
679 extent->count = simple_strtoul(pos, &pos, 10);
680 if (*pos && !isspace(*pos))
681 goto out;
682
683 /* Verify there is not trailing junk on the line */
684 pos = skip_spaces(pos);
685 if (*pos != '\0')
686 goto out;
687
688 /* Verify we have been given valid starting values */
689 if ((extent->first == (u32) -1) ||
690 (extent->lower_first == (u32) -1 ))
691 goto out;
692
693 /* Verify count is not zero and does not cause the extent to wrap */
694 if ((extent->first + extent->count) <= extent->first)
695 goto out;
696 if ((extent->lower_first + extent->count) <= extent->lower_first)
697 goto out;
698
699 /* Do the ranges in extent overlap any previous extents? */
700 if (mappings_overlap(&new_map, extent))
701 goto out;
702
703 new_map.nr_extents++;
704
705 /* Fail if the file contains too many extents */
706 if ((new_map.nr_extents == UID_GID_MAP_MAX_EXTENTS) &&
707 (next_line != NULL))
708 goto out;
709 }
710 /* Be very certaint the new map actually exists */
711 if (new_map.nr_extents == 0)
712 goto out;
713
714 ret = -EPERM;
715 /* Validate the user is allowed to use user id's mapped to. */
716 if (!new_idmap_permitted(file, ns, cap_setid, &new_map))
717 goto out;
718
719 /* Map the lower ids from the parent user namespace to the
720 * kernel global id space.
721 */
722 for (idx = 0; idx < new_map.nr_extents; idx++) {
723 u32 lower_first;
724 extent = &new_map.extent[idx];
725
726 lower_first = map_id_range_down(parent_map,
727 extent->lower_first,
728 extent->count);
729
730 /* Fail if we can not map the specified extent to
731 * the kernel global id space.
732 */
733 if (lower_first == (u32) -1)
734 goto out;
735
736 extent->lower_first = lower_first;
737 }
738
739 /* Install the map */
740 memcpy(map->extent, new_map.extent,
741 new_map.nr_extents*sizeof(new_map.extent[0]));
742 smp_wmb();
743 map->nr_extents = new_map.nr_extents;
744
745 *ppos = count;
746 ret = count;
747 out:
748 mutex_unlock(&userns_state_mutex);
749 if (page)
750 free_page(page);
751 return ret;
752 }
753
754 ssize_t proc_uid_map_write(struct file *file, const char __user *buf, size_t size, loff_t *ppos)
755 {
756 struct seq_file *seq = file->private_data;
757 struct user_namespace *ns = seq->private;
758 struct user_namespace *seq_ns = seq_user_ns(seq);
759
760 if (!ns->parent)
761 return -EPERM;
762
763 if ((seq_ns != ns) && (seq_ns != ns->parent))
764 return -EPERM;
765
766 return map_write(file, buf, size, ppos, CAP_SETUID,
767 &ns->uid_map, &ns->parent->uid_map);
768 }
769
770 ssize_t proc_gid_map_write(struct file *file, const char __user *buf, size_t size, loff_t *ppos)
771 {
772 struct seq_file *seq = file->private_data;
773 struct user_namespace *ns = seq->private;
774 struct user_namespace *seq_ns = seq_user_ns(seq);
775
776 if (!ns->parent)
777 return -EPERM;
778
779 if ((seq_ns != ns) && (seq_ns != ns->parent))
780 return -EPERM;
781
782 return map_write(file, buf, size, ppos, CAP_SETGID,
783 &ns->gid_map, &ns->parent->gid_map);
784 }
785
786 ssize_t proc_projid_map_write(struct file *file, const char __user *buf, size_t size, loff_t *ppos)
787 {
788 struct seq_file *seq = file->private_data;
789 struct user_namespace *ns = seq->private;
790 struct user_namespace *seq_ns = seq_user_ns(seq);
791
792 if (!ns->parent)
793 return -EPERM;
794
795 if ((seq_ns != ns) && (seq_ns != ns->parent))
796 return -EPERM;
797
798 /* Anyone can set any valid project id no capability needed */
799 return map_write(file, buf, size, ppos, -1,
800 &ns->projid_map, &ns->parent->projid_map);
801 }
802
803 static bool new_idmap_permitted(const struct file *file,
804 struct user_namespace *ns, int cap_setid,
805 struct uid_gid_map *new_map)
806 {
807 const struct cred *cred = file->f_cred;
808 /* Don't allow mappings that would allow anything that wouldn't
809 * be allowed without the establishment of unprivileged mappings.
810 */
811 if ((new_map->nr_extents == 1) && (new_map->extent[0].count == 1) &&
812 uid_eq(ns->owner, cred->euid)) {
813 u32 id = new_map->extent[0].lower_first;
814 if (cap_setid == CAP_SETUID) {
815 kuid_t uid = make_kuid(ns->parent, id);
816 if (uid_eq(uid, cred->euid))
817 return true;
818 } else if (cap_setid == CAP_SETGID) {
819 kgid_t gid = make_kgid(ns->parent, id);
820 if (!(ns->flags & USERNS_SETGROUPS_ALLOWED) &&
821 gid_eq(gid, cred->egid))
822 return true;
823 }
824 }
825
826 /* Allow anyone to set a mapping that doesn't require privilege */
827 if (!cap_valid(cap_setid))
828 return true;
829
830 /* Allow the specified ids if we have the appropriate capability
831 * (CAP_SETUID or CAP_SETGID) over the parent user namespace.
832 * And the opener of the id file also had the approprpiate capability.
833 */
834 if (ns_capable(ns->parent, cap_setid) &&
835 file_ns_capable(file, ns->parent, cap_setid))
836 return true;
837
838 return false;
839 }
840
841 int proc_setgroups_show(struct seq_file *seq, void *v)
842 {
843 struct user_namespace *ns = seq->private;
844 unsigned long userns_flags = ACCESS_ONCE(ns->flags);
845
846 seq_printf(seq, "%s\n",
847 (userns_flags & USERNS_SETGROUPS_ALLOWED) ?
848 "allow" : "deny");
849 return 0;
850 }
851
852 ssize_t proc_setgroups_write(struct file *file, const char __user *buf,
853 size_t count, loff_t *ppos)
854 {
855 struct seq_file *seq = file->private_data;
856 struct user_namespace *ns = seq->private;
857 char kbuf[8], *pos;
858 bool setgroups_allowed;
859 ssize_t ret;
860
861 /* Only allow a very narrow range of strings to be written */
862 ret = -EINVAL;
863 if ((*ppos != 0) || (count >= sizeof(kbuf)))
864 goto out;
865
866 /* What was written? */
867 ret = -EFAULT;
868 if (copy_from_user(kbuf, buf, count))
869 goto out;
870 kbuf[count] = '\0';
871 pos = kbuf;
872
873 /* What is being requested? */
874 ret = -EINVAL;
875 if (strncmp(pos, "allow", 5) == 0) {
876 pos += 5;
877 setgroups_allowed = true;
878 }
879 else if (strncmp(pos, "deny", 4) == 0) {
880 pos += 4;
881 setgroups_allowed = false;
882 }
883 else
884 goto out;
885
886 /* Verify there is not trailing junk on the line */
887 pos = skip_spaces(pos);
888 if (*pos != '\0')
889 goto out;
890
891 ret = -EPERM;
892 mutex_lock(&userns_state_mutex);
893 if (setgroups_allowed) {
894 /* Enabling setgroups after setgroups has been disabled
895 * is not allowed.
896 */
897 if (!(ns->flags & USERNS_SETGROUPS_ALLOWED))
898 goto out_unlock;
899 } else {
900 /* Permanently disabling setgroups after setgroups has
901 * been enabled by writing the gid_map is not allowed.
902 */
903 if (ns->gid_map.nr_extents != 0)
904 goto out_unlock;
905 ns->flags &= ~USERNS_SETGROUPS_ALLOWED;
906 }
907 mutex_unlock(&userns_state_mutex);
908
909 /* Report a successful write */
910 *ppos = count;
911 ret = count;
912 out:
913 return ret;
914 out_unlock:
915 mutex_unlock(&userns_state_mutex);
916 goto out;
917 }
918
919 bool userns_may_setgroups(const struct user_namespace *ns)
920 {
921 bool allowed;
922
923 mutex_lock(&userns_state_mutex);
924 /* It is not safe to use setgroups until a gid mapping in
925 * the user namespace has been established.
926 */
927 allowed = ns->gid_map.nr_extents != 0;
928 /* Is setgroups allowed? */
929 allowed = allowed && (ns->flags & USERNS_SETGROUPS_ALLOWED);
930 mutex_unlock(&userns_state_mutex);
931
932 return allowed;
933 }
934
935 static void *userns_get(struct task_struct *task)
936 {
937 struct user_namespace *user_ns;
938
939 rcu_read_lock();
940 user_ns = get_user_ns(__task_cred(task)->user_ns);
941 rcu_read_unlock();
942
943 return user_ns;
944 }
945
946 static void userns_put(void *ns)
947 {
948 put_user_ns(ns);
949 }
950
951 static int userns_install(struct nsproxy *nsproxy, void *ns)
952 {
953 struct user_namespace *user_ns = ns;
954 struct cred *cred;
955
956 /* Don't allow gaining capabilities by reentering
957 * the same user namespace.
958 */
959 if (user_ns == current_user_ns())
960 return -EINVAL;
961
962 /* Threaded processes may not enter a different user namespace */
963 if (atomic_read(&current->mm->mm_users) > 1)
964 return -EINVAL;
965
966 if (current->fs->users != 1)
967 return -EINVAL;
968
969 if (!ns_capable(user_ns, CAP_SYS_ADMIN))
970 return -EPERM;
971
972 cred = prepare_creds();
973 if (!cred)
974 return -ENOMEM;
975
976 put_user_ns(cred->user_ns);
977 set_cred_user_ns(cred, get_user_ns(user_ns));
978
979 return commit_creds(cred);
980 }
981
982 static unsigned int userns_inum(void *ns)
983 {
984 struct user_namespace *user_ns = ns;
985 return user_ns->proc_inum;
986 }
987
988 const struct proc_ns_operations userns_operations = {
989 .name = "user",
990 .type = CLONE_NEWUSER,
991 .get = userns_get,
992 .put = userns_put,
993 .install = userns_install,
994 .inum = userns_inum,
995 };
996
997 static __init int user_namespaces_init(void)
998 {
999 user_ns_cachep = KMEM_CACHE(user_namespace, SLAB_PANIC);
1000 return 0;
1001 }
1002 module_init(user_namespaces_init);