Merge tag 'v3.10.62' into update
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / security / device_cgroup.c
1 /*
2 * device_cgroup.c - device cgroup subsystem
3 *
4 * Copyright 2007 IBM Corp
5 */
6
7 #include <linux/device_cgroup.h>
8 #include <linux/cgroup.h>
9 #include <linux/ctype.h>
10 #include <linux/list.h>
11 #include <linux/uaccess.h>
12 #include <linux/seq_file.h>
13 #include <linux/slab.h>
14 #include <linux/rcupdate.h>
15 #include <linux/mutex.h>
16
17 #define ACC_MKNOD 1
18 #define ACC_READ 2
19 #define ACC_WRITE 4
20 #define ACC_MASK (ACC_MKNOD | ACC_READ | ACC_WRITE)
21
22 #define DEV_BLOCK 1
23 #define DEV_CHAR 2
24 #define DEV_ALL 4 /* this represents all devices */
25
26 static DEFINE_MUTEX(devcgroup_mutex);
27
28 enum devcg_behavior {
29 DEVCG_DEFAULT_NONE,
30 DEVCG_DEFAULT_ALLOW,
31 DEVCG_DEFAULT_DENY,
32 };
33
34 /*
35 * exception list locking rules:
36 * hold devcgroup_mutex for update/read.
37 * hold rcu_read_lock() for read.
38 */
39
40 struct dev_exception_item {
41 u32 major, minor;
42 short type;
43 short access;
44 struct list_head list;
45 struct rcu_head rcu;
46 };
47
48 struct dev_cgroup {
49 struct cgroup_subsys_state css;
50 struct list_head exceptions;
51 enum devcg_behavior behavior;
52 /* temporary list for pending propagation operations */
53 struct list_head propagate_pending;
54 };
55
56 static inline struct dev_cgroup *css_to_devcgroup(struct cgroup_subsys_state *s)
57 {
58 return container_of(s, struct dev_cgroup, css);
59 }
60
61 static inline struct dev_cgroup *cgroup_to_devcgroup(struct cgroup *cgroup)
62 {
63 return css_to_devcgroup(cgroup_subsys_state(cgroup, devices_subsys_id));
64 }
65
66 static inline struct dev_cgroup *task_devcgroup(struct task_struct *task)
67 {
68 return css_to_devcgroup(task_subsys_state(task, devices_subsys_id));
69 }
70
71 struct cgroup_subsys devices_subsys;
72
73 static int devcgroup_can_attach(struct cgroup *new_cgrp,
74 struct cgroup_taskset *set)
75 {
76 struct task_struct *task = cgroup_taskset_first(set);
77
78 if (current != task && !capable(CAP_SYS_ADMIN))
79 return -EPERM;
80 return 0;
81 }
82
83 /*
84 * called under devcgroup_mutex
85 */
86 static int dev_exceptions_copy(struct list_head *dest, struct list_head *orig)
87 {
88 struct dev_exception_item *ex, *tmp, *new;
89
90 lockdep_assert_held(&devcgroup_mutex);
91
92 list_for_each_entry(ex, orig, list) {
93 new = kmemdup(ex, sizeof(*ex), GFP_KERNEL);
94 if (!new)
95 goto free_and_exit;
96 list_add_tail(&new->list, dest);
97 }
98
99 return 0;
100
101 free_and_exit:
102 list_for_each_entry_safe(ex, tmp, dest, list) {
103 list_del(&ex->list);
104 kfree(ex);
105 }
106 return -ENOMEM;
107 }
108
109 /*
110 * called under devcgroup_mutex
111 */
112 static int dev_exception_add(struct dev_cgroup *dev_cgroup,
113 struct dev_exception_item *ex)
114 {
115 struct dev_exception_item *excopy, *walk;
116
117 lockdep_assert_held(&devcgroup_mutex);
118
119 excopy = kmemdup(ex, sizeof(*ex), GFP_KERNEL);
120 if (!excopy)
121 return -ENOMEM;
122
123 list_for_each_entry(walk, &dev_cgroup->exceptions, list) {
124 if (walk->type != ex->type)
125 continue;
126 if (walk->major != ex->major)
127 continue;
128 if (walk->minor != ex->minor)
129 continue;
130
131 walk->access |= ex->access;
132 kfree(excopy);
133 excopy = NULL;
134 }
135
136 if (excopy != NULL)
137 list_add_tail_rcu(&excopy->list, &dev_cgroup->exceptions);
138 return 0;
139 }
140
141 /*
142 * called under devcgroup_mutex
143 */
144 static void dev_exception_rm(struct dev_cgroup *dev_cgroup,
145 struct dev_exception_item *ex)
146 {
147 struct dev_exception_item *walk, *tmp;
148
149 lockdep_assert_held(&devcgroup_mutex);
150
151 list_for_each_entry_safe(walk, tmp, &dev_cgroup->exceptions, list) {
152 if (walk->type != ex->type)
153 continue;
154 if (walk->major != ex->major)
155 continue;
156 if (walk->minor != ex->minor)
157 continue;
158
159 walk->access &= ~ex->access;
160 if (!walk->access) {
161 list_del_rcu(&walk->list);
162 kfree_rcu(walk, rcu);
163 }
164 }
165 }
166
167 static void __dev_exception_clean(struct dev_cgroup *dev_cgroup)
168 {
169 struct dev_exception_item *ex, *tmp;
170
171 list_for_each_entry_safe(ex, tmp, &dev_cgroup->exceptions, list) {
172 list_del_rcu(&ex->list);
173 kfree_rcu(ex, rcu);
174 }
175 }
176
177 /**
178 * dev_exception_clean - frees all entries of the exception list
179 * @dev_cgroup: dev_cgroup with the exception list to be cleaned
180 *
181 * called under devcgroup_mutex
182 */
183 static void dev_exception_clean(struct dev_cgroup *dev_cgroup)
184 {
185 lockdep_assert_held(&devcgroup_mutex);
186
187 __dev_exception_clean(dev_cgroup);
188 }
189
190 static inline bool is_devcg_online(const struct dev_cgroup *devcg)
191 {
192 return (devcg->behavior != DEVCG_DEFAULT_NONE);
193 }
194
195 /**
196 * devcgroup_online - initializes devcgroup's behavior and exceptions based on
197 * parent's
198 * @cgroup: cgroup getting online
199 * returns 0 in case of success, error code otherwise
200 */
201 static int devcgroup_online(struct cgroup *cgroup)
202 {
203 struct dev_cgroup *dev_cgroup, *parent_dev_cgroup = NULL;
204 int ret = 0;
205
206 mutex_lock(&devcgroup_mutex);
207 dev_cgroup = cgroup_to_devcgroup(cgroup);
208 if (cgroup->parent)
209 parent_dev_cgroup = cgroup_to_devcgroup(cgroup->parent);
210
211 if (parent_dev_cgroup == NULL)
212 dev_cgroup->behavior = DEVCG_DEFAULT_ALLOW;
213 else {
214 ret = dev_exceptions_copy(&dev_cgroup->exceptions,
215 &parent_dev_cgroup->exceptions);
216 if (!ret)
217 dev_cgroup->behavior = parent_dev_cgroup->behavior;
218 }
219 mutex_unlock(&devcgroup_mutex);
220
221 return ret;
222 }
223
224 static void devcgroup_offline(struct cgroup *cgroup)
225 {
226 struct dev_cgroup *dev_cgroup = cgroup_to_devcgroup(cgroup);
227
228 mutex_lock(&devcgroup_mutex);
229 dev_cgroup->behavior = DEVCG_DEFAULT_NONE;
230 mutex_unlock(&devcgroup_mutex);
231 }
232
233 /*
234 * called from kernel/cgroup.c with cgroup_lock() held.
235 */
236 static struct cgroup_subsys_state *devcgroup_css_alloc(struct cgroup *cgroup)
237 {
238 struct dev_cgroup *dev_cgroup;
239
240 dev_cgroup = kzalloc(sizeof(*dev_cgroup), GFP_KERNEL);
241 if (!dev_cgroup)
242 return ERR_PTR(-ENOMEM);
243 INIT_LIST_HEAD(&dev_cgroup->exceptions);
244 INIT_LIST_HEAD(&dev_cgroup->propagate_pending);
245 dev_cgroup->behavior = DEVCG_DEFAULT_NONE;
246
247 return &dev_cgroup->css;
248 }
249
250 static void devcgroup_css_free(struct cgroup *cgroup)
251 {
252 struct dev_cgroup *dev_cgroup;
253
254 dev_cgroup = cgroup_to_devcgroup(cgroup);
255 __dev_exception_clean(dev_cgroup);
256 kfree(dev_cgroup);
257 }
258
259 #define DEVCG_ALLOW 1
260 #define DEVCG_DENY 2
261 #define DEVCG_LIST 3
262
263 #define MAJMINLEN 13
264 #define ACCLEN 4
265
266 static void set_access(char *acc, short access)
267 {
268 int idx = 0;
269 memset(acc, 0, ACCLEN);
270 if (access & ACC_READ)
271 acc[idx++] = 'r';
272 if (access & ACC_WRITE)
273 acc[idx++] = 'w';
274 if (access & ACC_MKNOD)
275 acc[idx++] = 'm';
276 }
277
278 static char type_to_char(short type)
279 {
280 if (type == DEV_ALL)
281 return 'a';
282 if (type == DEV_CHAR)
283 return 'c';
284 if (type == DEV_BLOCK)
285 return 'b';
286 return 'X';
287 }
288
289 static void set_majmin(char *str, unsigned m)
290 {
291 if (m == ~0)
292 strcpy(str, "*");
293 else
294 sprintf(str, "%u", m);
295 }
296
297 static int devcgroup_seq_read(struct cgroup *cgroup, struct cftype *cft,
298 struct seq_file *m)
299 {
300 struct dev_cgroup *devcgroup = cgroup_to_devcgroup(cgroup);
301 struct dev_exception_item *ex;
302 char maj[MAJMINLEN], min[MAJMINLEN], acc[ACCLEN];
303
304 rcu_read_lock();
305 /*
306 * To preserve the compatibility:
307 * - Only show the "all devices" when the default policy is to allow
308 * - List the exceptions in case the default policy is to deny
309 * This way, the file remains as a "whitelist of devices"
310 */
311 if (devcgroup->behavior == DEVCG_DEFAULT_ALLOW) {
312 set_access(acc, ACC_MASK);
313 set_majmin(maj, ~0);
314 set_majmin(min, ~0);
315 seq_printf(m, "%c %s:%s %s\n", type_to_char(DEV_ALL),
316 maj, min, acc);
317 } else {
318 list_for_each_entry_rcu(ex, &devcgroup->exceptions, list) {
319 set_access(acc, ex->access);
320 set_majmin(maj, ex->major);
321 set_majmin(min, ex->minor);
322 seq_printf(m, "%c %s:%s %s\n", type_to_char(ex->type),
323 maj, min, acc);
324 }
325 }
326 rcu_read_unlock();
327
328 return 0;
329 }
330
331 /**
332 * may_access - verifies if a new exception is part of what is allowed
333 * by a dev cgroup based on the default policy +
334 * exceptions. This is used to make sure a child cgroup
335 * won't have more privileges than its parent or to
336 * verify if a certain access is allowed.
337 * @dev_cgroup: dev cgroup to be tested against
338 * @refex: new exception
339 * @behavior: behavior of the exception
340 */
341 static bool may_access(struct dev_cgroup *dev_cgroup,
342 struct dev_exception_item *refex,
343 enum devcg_behavior behavior)
344 {
345 struct dev_exception_item *ex;
346 bool match = false;
347
348 rcu_lockdep_assert(rcu_read_lock_held() ||
349 lockdep_is_held(&devcgroup_mutex),
350 "device_cgroup::may_access() called without proper synchronization");
351
352 list_for_each_entry_rcu(ex, &dev_cgroup->exceptions, list) {
353 if ((refex->type & DEV_BLOCK) && !(ex->type & DEV_BLOCK))
354 continue;
355 if ((refex->type & DEV_CHAR) && !(ex->type & DEV_CHAR))
356 continue;
357 if (ex->major != ~0 && ex->major != refex->major)
358 continue;
359 if (ex->minor != ~0 && ex->minor != refex->minor)
360 continue;
361 if (refex->access & (~ex->access))
362 continue;
363 match = true;
364 break;
365 }
366
367 if (dev_cgroup->behavior == DEVCG_DEFAULT_ALLOW) {
368 if (behavior == DEVCG_DEFAULT_ALLOW) {
369 /* the exception will deny access to certain devices */
370 return true;
371 } else {
372 /* the exception will allow access to certain devices */
373 if (match)
374 /*
375 * a new exception allowing access shouldn't
376 * match an parent's exception
377 */
378 return false;
379 return true;
380 }
381 } else {
382 /* only behavior == DEVCG_DEFAULT_DENY allowed here */
383 if (match)
384 /* parent has an exception that matches the proposed */
385 return true;
386 else
387 return false;
388 }
389 return false;
390 }
391
392 /*
393 * parent_has_perm:
394 * when adding a new allow rule to a device exception list, the rule
395 * must be allowed in the parent device
396 */
397 static int parent_has_perm(struct dev_cgroup *childcg,
398 struct dev_exception_item *ex)
399 {
400 struct cgroup *pcg = childcg->css.cgroup->parent;
401 struct dev_cgroup *parent;
402
403 if (!pcg)
404 return 1;
405 parent = cgroup_to_devcgroup(pcg);
406 return may_access(parent, ex, childcg->behavior);
407 }
408
409 /**
410 * may_allow_all - checks if it's possible to change the behavior to
411 * allow based on parent's rules.
412 * @parent: device cgroup's parent
413 * returns: != 0 in case it's allowed, 0 otherwise
414 */
415 static inline int may_allow_all(struct dev_cgroup *parent)
416 {
417 if (!parent)
418 return 1;
419 return parent->behavior == DEVCG_DEFAULT_ALLOW;
420 }
421
422 /**
423 * revalidate_active_exceptions - walks through the active exception list and
424 * revalidates the exceptions based on parent's
425 * behavior and exceptions. The exceptions that
426 * are no longer valid will be removed.
427 * Called with devcgroup_mutex held.
428 * @devcg: cgroup which exceptions will be checked
429 *
430 * This is one of the three key functions for hierarchy implementation.
431 * This function is responsible for re-evaluating all the cgroup's active
432 * exceptions due to a parent's exception change.
433 * Refer to Documentation/cgroups/devices.txt for more details.
434 */
435 static void revalidate_active_exceptions(struct dev_cgroup *devcg)
436 {
437 struct dev_exception_item *ex;
438 struct list_head *this, *tmp;
439
440 list_for_each_safe(this, tmp, &devcg->exceptions) {
441 ex = container_of(this, struct dev_exception_item, list);
442 if (!parent_has_perm(devcg, ex))
443 dev_exception_rm(devcg, ex);
444 }
445 }
446
447 /**
448 * get_online_devcg - walks the cgroup tree and fills a list with the online
449 * groups
450 * @root: cgroup used as starting point
451 * @online: list that will be filled with online groups
452 *
453 * Must be called with devcgroup_mutex held. Grabs RCU lock.
454 * Because devcgroup_mutex is held, no devcg will become online or offline
455 * during the tree walk (see devcgroup_online, devcgroup_offline)
456 * A separated list is needed because propagate_behavior() and
457 * propagate_exception() need to allocate memory and can block.
458 */
459 static void get_online_devcg(struct cgroup *root, struct list_head *online)
460 {
461 struct cgroup *pos;
462 struct dev_cgroup *devcg;
463
464 lockdep_assert_held(&devcgroup_mutex);
465
466 rcu_read_lock();
467 cgroup_for_each_descendant_pre(pos, root) {
468 devcg = cgroup_to_devcgroup(pos);
469 if (is_devcg_online(devcg))
470 list_add_tail(&devcg->propagate_pending, online);
471 }
472 rcu_read_unlock();
473 }
474
475 /**
476 * propagate_exception - propagates a new exception to the children
477 * @devcg_root: device cgroup that added a new exception
478 * @ex: new exception to be propagated
479 *
480 * returns: 0 in case of success, != 0 in case of error
481 */
482 static int propagate_exception(struct dev_cgroup *devcg_root,
483 struct dev_exception_item *ex)
484 {
485 struct cgroup *root = devcg_root->css.cgroup;
486 struct dev_cgroup *devcg, *parent, *tmp;
487 int rc = 0;
488 LIST_HEAD(pending);
489
490 get_online_devcg(root, &pending);
491
492 list_for_each_entry_safe(devcg, tmp, &pending, propagate_pending) {
493 parent = cgroup_to_devcgroup(devcg->css.cgroup->parent);
494
495 /*
496 * in case both root's behavior and devcg is allow, a new
497 * restriction means adding to the exception list
498 */
499 if (devcg_root->behavior == DEVCG_DEFAULT_ALLOW &&
500 devcg->behavior == DEVCG_DEFAULT_ALLOW) {
501 rc = dev_exception_add(devcg, ex);
502 if (rc)
503 break;
504 } else {
505 /*
506 * in the other possible cases:
507 * root's behavior: allow, devcg's: deny
508 * root's behavior: deny, devcg's: deny
509 * the exception will be removed
510 */
511 dev_exception_rm(devcg, ex);
512 }
513 revalidate_active_exceptions(devcg);
514
515 list_del_init(&devcg->propagate_pending);
516 }
517 return rc;
518 }
519
520 static inline bool has_children(struct dev_cgroup *devcgroup)
521 {
522 struct cgroup *cgrp = devcgroup->css.cgroup;
523
524 return !list_empty(&cgrp->children);
525 }
526
527 /*
528 * Modify the exception list using allow/deny rules.
529 * CAP_SYS_ADMIN is needed for this. It's at least separate from CAP_MKNOD
530 * so we can give a container CAP_MKNOD to let it create devices but not
531 * modify the exception list.
532 * It seems likely we'll want to add a CAP_CONTAINER capability to allow
533 * us to also grant CAP_SYS_ADMIN to containers without giving away the
534 * device exception list controls, but for now we'll stick with CAP_SYS_ADMIN
535 *
536 * Taking rules away is always allowed (given CAP_SYS_ADMIN). Granting
537 * new access is only allowed if you're in the top-level cgroup, or your
538 * parent cgroup has the access you're asking for.
539 */
540 static int devcgroup_update_access(struct dev_cgroup *devcgroup,
541 int filetype, const char *buffer)
542 {
543 const char *b;
544 char temp[12]; /* 11 + 1 characters needed for a u32 */
545 int count, rc = 0;
546 struct dev_exception_item ex;
547 struct cgroup *p = devcgroup->css.cgroup;
548 struct dev_cgroup *parent = NULL;
549
550 if (!capable(CAP_SYS_ADMIN))
551 return -EPERM;
552
553 if (p->parent)
554 parent = cgroup_to_devcgroup(p->parent);
555
556 memset(&ex, 0, sizeof(ex));
557 b = buffer;
558
559 switch (*b) {
560 case 'a':
561 switch (filetype) {
562 case DEVCG_ALLOW:
563 if (has_children(devcgroup))
564 return -EINVAL;
565
566 if (!may_allow_all(parent))
567 return -EPERM;
568 dev_exception_clean(devcgroup);
569 devcgroup->behavior = DEVCG_DEFAULT_ALLOW;
570 if (!parent)
571 break;
572
573 rc = dev_exceptions_copy(&devcgroup->exceptions,
574 &parent->exceptions);
575 if (rc)
576 return rc;
577 break;
578 case DEVCG_DENY:
579 if (has_children(devcgroup))
580 return -EINVAL;
581
582 dev_exception_clean(devcgroup);
583 devcgroup->behavior = DEVCG_DEFAULT_DENY;
584 break;
585 default:
586 return -EINVAL;
587 }
588 return 0;
589 case 'b':
590 ex.type = DEV_BLOCK;
591 break;
592 case 'c':
593 ex.type = DEV_CHAR;
594 break;
595 default:
596 return -EINVAL;
597 }
598 b++;
599 if (!isspace(*b))
600 return -EINVAL;
601 b++;
602 if (*b == '*') {
603 ex.major = ~0;
604 b++;
605 } else if (isdigit(*b)) {
606 memset(temp, 0, sizeof(temp));
607 for (count = 0; count < sizeof(temp) - 1; count++) {
608 temp[count] = *b;
609 b++;
610 if (!isdigit(*b))
611 break;
612 }
613 rc = kstrtou32(temp, 10, &ex.major);
614 if (rc)
615 return -EINVAL;
616 } else {
617 return -EINVAL;
618 }
619 if (*b != ':')
620 return -EINVAL;
621 b++;
622
623 /* read minor */
624 if (*b == '*') {
625 ex.minor = ~0;
626 b++;
627 } else if (isdigit(*b)) {
628 memset(temp, 0, sizeof(temp));
629 for (count = 0; count < sizeof(temp) - 1; count++) {
630 temp[count] = *b;
631 b++;
632 if (!isdigit(*b))
633 break;
634 }
635 rc = kstrtou32(temp, 10, &ex.minor);
636 if (rc)
637 return -EINVAL;
638 } else {
639 return -EINVAL;
640 }
641 if (!isspace(*b))
642 return -EINVAL;
643 for (b++, count = 0; count < 3; count++, b++) {
644 switch (*b) {
645 case 'r':
646 ex.access |= ACC_READ;
647 break;
648 case 'w':
649 ex.access |= ACC_WRITE;
650 break;
651 case 'm':
652 ex.access |= ACC_MKNOD;
653 break;
654 case '\n':
655 case '\0':
656 count = 3;
657 break;
658 default:
659 return -EINVAL;
660 }
661 }
662
663 switch (filetype) {
664 case DEVCG_ALLOW:
665 if (!parent_has_perm(devcgroup, &ex))
666 return -EPERM;
667 /*
668 * If the default policy is to allow by default, try to remove
669 * an matching exception instead. And be silent about it: we
670 * don't want to break compatibility
671 */
672 if (devcgroup->behavior == DEVCG_DEFAULT_ALLOW) {
673 dev_exception_rm(devcgroup, &ex);
674 return 0;
675 }
676 rc = dev_exception_add(devcgroup, &ex);
677 break;
678 case DEVCG_DENY:
679 /*
680 * If the default policy is to deny by default, try to remove
681 * an matching exception instead. And be silent about it: we
682 * don't want to break compatibility
683 */
684 if (devcgroup->behavior == DEVCG_DEFAULT_DENY)
685 dev_exception_rm(devcgroup, &ex);
686 else
687 rc = dev_exception_add(devcgroup, &ex);
688
689 if (rc)
690 break;
691 /* we only propagate new restrictions */
692 rc = propagate_exception(devcgroup, &ex);
693 break;
694 default:
695 rc = -EINVAL;
696 }
697 return rc;
698 }
699
700 static int devcgroup_access_write(struct cgroup *cgrp, struct cftype *cft,
701 const char *buffer)
702 {
703 int retval;
704
705 mutex_lock(&devcgroup_mutex);
706 retval = devcgroup_update_access(cgroup_to_devcgroup(cgrp),
707 cft->private, buffer);
708 mutex_unlock(&devcgroup_mutex);
709 return retval;
710 }
711
712 static struct cftype dev_cgroup_files[] = {
713 {
714 .name = "allow",
715 .write_string = devcgroup_access_write,
716 .private = DEVCG_ALLOW,
717 },
718 {
719 .name = "deny",
720 .write_string = devcgroup_access_write,
721 .private = DEVCG_DENY,
722 },
723 {
724 .name = "list",
725 .read_seq_string = devcgroup_seq_read,
726 .private = DEVCG_LIST,
727 },
728 { } /* terminate */
729 };
730
731 struct cgroup_subsys devices_subsys = {
732 .name = "devices",
733 .can_attach = devcgroup_can_attach,
734 .css_alloc = devcgroup_css_alloc,
735 .css_free = devcgroup_css_free,
736 .css_online = devcgroup_online,
737 .css_offline = devcgroup_offline,
738 .subsys_id = devices_subsys_id,
739 .base_cftypes = dev_cgroup_files,
740 };
741
742 /**
743 * __devcgroup_check_permission - checks if an inode operation is permitted
744 * @dev_cgroup: the dev cgroup to be tested against
745 * @type: device type
746 * @major: device major number
747 * @minor: device minor number
748 * @access: combination of ACC_WRITE, ACC_READ and ACC_MKNOD
749 *
750 * returns 0 on success, -EPERM case the operation is not permitted
751 */
752 static int __devcgroup_check_permission(short type, u32 major, u32 minor,
753 short access)
754 {
755 struct dev_cgroup *dev_cgroup;
756 struct dev_exception_item ex;
757 int rc;
758
759 memset(&ex, 0, sizeof(ex));
760 ex.type = type;
761 ex.major = major;
762 ex.minor = minor;
763 ex.access = access;
764
765 rcu_read_lock();
766 dev_cgroup = task_devcgroup(current);
767 rc = may_access(dev_cgroup, &ex, dev_cgroup->behavior);
768 rcu_read_unlock();
769
770 if (!rc)
771 return -EPERM;
772
773 return 0;
774 }
775
776 int __devcgroup_inode_permission(struct inode *inode, int mask)
777 {
778 short type, access = 0;
779
780 if (S_ISBLK(inode->i_mode))
781 type = DEV_BLOCK;
782 if (S_ISCHR(inode->i_mode))
783 type = DEV_CHAR;
784 if (mask & MAY_WRITE)
785 access |= ACC_WRITE;
786 if (mask & MAY_READ)
787 access |= ACC_READ;
788
789 return __devcgroup_check_permission(type, imajor(inode), iminor(inode),
790 access);
791 }
792
793 int devcgroup_inode_mknod(int mode, dev_t dev)
794 {
795 short type;
796
797 if (!S_ISBLK(mode) && !S_ISCHR(mode))
798 return 0;
799
800 if (S_ISBLK(mode))
801 type = DEV_BLOCK;
802 else
803 type = DEV_CHAR;
804
805 return __devcgroup_check_permission(type, MAJOR(dev), MINOR(dev),
806 ACC_MKNOD);
807
808 }