drivers: power: report battery voltage in AOSP compatible format
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / fs / notify / inotify / inotify_user.c
1 /*
2 * fs/inotify_user.c - inotify support for userspace
3 *
4 * Authors:
5 * John McCutchan <ttb@tentacle.dhs.org>
6 * Robert Love <rml@novell.com>
7 *
8 * Copyright (C) 2005 John McCutchan
9 * Copyright 2006 Hewlett-Packard Development Company, L.P.
10 *
11 * Copyright (C) 2009 Eric Paris <Red Hat Inc>
12 * inotify was largely rewriten to make use of the fsnotify infrastructure
13 *
14 * This program is free software; you can redistribute it and/or modify it
15 * under the terms of the GNU General Public License as published by the
16 * Free Software Foundation; either version 2, or (at your option) any
17 * later version.
18 *
19 * This program is distributed in the hope that it will be useful, but
20 * WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * General Public License for more details.
23 */
24
25 #include <linux/file.h>
26 #include <linux/fs.h> /* struct inode */
27 #include <linux/fsnotify_backend.h>
28 #include <linux/idr.h>
29 #include <linux/init.h> /* module_init */
30 #include <linux/inotify.h>
31 #include <linux/kernel.h> /* roundup() */
32 #include <linux/namei.h> /* LOOKUP_FOLLOW */
33 #include <linux/sched.h> /* struct user */
34 #include <linux/slab.h> /* struct kmem_cache */
35 #include <linux/syscalls.h>
36 #include <linux/types.h>
37 #include <linux/anon_inodes.h>
38 #include <linux/uaccess.h>
39 #include <linux/poll.h>
40 #include <linux/wait.h>
41
42 #include "inotify.h"
43 #include "../fdinfo.h"
44
45 #include <asm/ioctls.h>
46
47 /* these are configurable via /proc/sys/fs/inotify/ */
48 static int inotify_max_user_instances __read_mostly;
49 static int inotify_max_queued_events __read_mostly;
50 static int inotify_max_user_watches __read_mostly;
51
52 static struct kmem_cache *inotify_inode_mark_cachep __read_mostly;
53 struct kmem_cache *event_priv_cachep __read_mostly;
54
55 #ifdef CONFIG_SYSCTL
56
57 #include <linux/sysctl.h>
58
59 static int zero;
60
61 ctl_table inotify_table[] = {
62 {
63 .procname = "max_user_instances",
64 .data = &inotify_max_user_instances,
65 .maxlen = sizeof(int),
66 .mode = 0644,
67 .proc_handler = proc_dointvec_minmax,
68 .extra1 = &zero,
69 },
70 {
71 .procname = "max_user_watches",
72 .data = &inotify_max_user_watches,
73 .maxlen = sizeof(int),
74 .mode = 0644,
75 .proc_handler = proc_dointvec_minmax,
76 .extra1 = &zero,
77 },
78 {
79 .procname = "max_queued_events",
80 .data = &inotify_max_queued_events,
81 .maxlen = sizeof(int),
82 .mode = 0644,
83 .proc_handler = proc_dointvec_minmax,
84 .extra1 = &zero
85 },
86 { }
87 };
88 #endif /* CONFIG_SYSCTL */
89
90 static inline __u32 inotify_arg_to_mask(u32 arg)
91 {
92 __u32 mask;
93
94 /*
95 * everything should accept their own ignored, cares about children,
96 * and should receive events when the inode is unmounted
97 */
98 mask = (FS_IN_IGNORED | FS_EVENT_ON_CHILD | FS_UNMOUNT);
99
100 /* mask off the flags used to open the fd */
101 mask |= (arg & (IN_ALL_EVENTS | IN_ONESHOT | IN_EXCL_UNLINK));
102
103 return mask;
104 }
105
106 static inline u32 inotify_mask_to_arg(__u32 mask)
107 {
108 return mask & (IN_ALL_EVENTS | IN_ISDIR | IN_UNMOUNT | IN_IGNORED |
109 IN_Q_OVERFLOW);
110 }
111
112 /* intofiy userspace file descriptor functions */
113 static unsigned int inotify_poll(struct file *file, poll_table *wait)
114 {
115 struct fsnotify_group *group = file->private_data;
116 int ret = 0;
117
118 poll_wait(file, &group->notification_waitq, wait);
119 mutex_lock(&group->notification_mutex);
120 if (!fsnotify_notify_queue_is_empty(group))
121 ret = POLLIN | POLLRDNORM;
122 mutex_unlock(&group->notification_mutex);
123
124 return ret;
125 }
126
127 /*
128 * Get an inotify_kernel_event if one exists and is small
129 * enough to fit in "count". Return an error pointer if
130 * not large enough.
131 *
132 * Called with the group->notification_mutex held.
133 */
134 static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
135 size_t count)
136 {
137 size_t event_size = sizeof(struct inotify_event);
138 struct fsnotify_event *event;
139
140 if (fsnotify_notify_queue_is_empty(group))
141 return NULL;
142
143 event = fsnotify_peek_notify_event(group);
144
145 pr_debug("%s: group=%p event=%p\n", __func__, group, event);
146
147 if (event->name_len)
148 event_size += roundup(event->name_len + 1, event_size);
149
150 if (event_size > count)
151 return ERR_PTR(-EINVAL);
152
153 /* held the notification_mutex the whole time, so this is the
154 * same event we peeked above */
155 fsnotify_remove_notify_event(group);
156
157 return event;
158 }
159
160 /*
161 * Copy an event to user space, returning how much we copied.
162 *
163 * We already checked that the event size is smaller than the
164 * buffer we had in "get_one_event()" above.
165 */
166 static ssize_t copy_event_to_user(struct fsnotify_group *group,
167 struct fsnotify_event *event,
168 char __user *buf)
169 {
170 struct inotify_event inotify_event;
171 struct fsnotify_event_private_data *fsn_priv;
172 struct inotify_event_private_data *priv;
173 size_t event_size = sizeof(struct inotify_event);
174 size_t name_len = 0;
175
176 pr_debug("%s: group=%p event=%p\n", __func__, group, event);
177
178 /* we get the inotify watch descriptor from the event private data */
179 spin_lock(&event->lock);
180 fsn_priv = fsnotify_remove_priv_from_event(group, event);
181 spin_unlock(&event->lock);
182
183 if (!fsn_priv)
184 inotify_event.wd = -1;
185 else {
186 priv = container_of(fsn_priv, struct inotify_event_private_data,
187 fsnotify_event_priv_data);
188 inotify_event.wd = priv->wd;
189 inotify_free_event_priv(fsn_priv);
190 }
191
192 /*
193 * round up event->name_len so it is a multiple of event_size
194 * plus an extra byte for the terminating '\0'.
195 */
196 if (event->name_len)
197 name_len = roundup(event->name_len + 1, event_size);
198 inotify_event.len = name_len;
199
200 inotify_event.mask = inotify_mask_to_arg(event->mask);
201 inotify_event.cookie = event->sync_cookie;
202
203 /* send the main event */
204 if (copy_to_user(buf, &inotify_event, event_size))
205 return -EFAULT;
206
207 buf += event_size;
208
209 /*
210 * fsnotify only stores the pathname, so here we have to send the pathname
211 * and then pad that pathname out to a multiple of sizeof(inotify_event)
212 * with zeros. I get my zeros from the nul_inotify_event.
213 */
214 if (name_len) {
215 unsigned int len_to_zero = name_len - event->name_len;
216 /* copy the path name */
217 if (copy_to_user(buf, event->file_name, event->name_len))
218 return -EFAULT;
219 buf += event->name_len;
220
221 /* fill userspace with 0's */
222 if (clear_user(buf, len_to_zero))
223 return -EFAULT;
224 buf += len_to_zero;
225 event_size += name_len;
226 }
227
228 return event_size;
229 }
230
231 static ssize_t inotify_read(struct file *file, char __user *buf,
232 size_t count, loff_t *pos)
233 {
234 struct fsnotify_group *group;
235 struct fsnotify_event *kevent;
236 char __user *start;
237 int ret;
238 DEFINE_WAIT(wait);
239
240 start = buf;
241 group = file->private_data;
242
243 while (1) {
244 prepare_to_wait(&group->notification_waitq, &wait, TASK_INTERRUPTIBLE);
245
246 mutex_lock(&group->notification_mutex);
247 kevent = get_one_event(group, count);
248 mutex_unlock(&group->notification_mutex);
249
250 pr_debug("%s: group=%p kevent=%p\n", __func__, group, kevent);
251
252 if (kevent) {
253 ret = PTR_ERR(kevent);
254 if (IS_ERR(kevent))
255 break;
256 ret = copy_event_to_user(group, kevent, buf);
257 fsnotify_put_event(kevent);
258 if (ret < 0)
259 break;
260 buf += ret;
261 count -= ret;
262 continue;
263 }
264
265 ret = -EAGAIN;
266 if (file->f_flags & O_NONBLOCK)
267 break;
268 ret = -ERESTARTSYS;
269 if (signal_pending(current))
270 break;
271
272 if (start != buf)
273 break;
274
275 schedule();
276 }
277
278 finish_wait(&group->notification_waitq, &wait);
279 if (start != buf && ret != -EFAULT)
280 ret = buf - start;
281 return ret;
282 }
283
284 static int inotify_release(struct inode *ignored, struct file *file)
285 {
286 struct fsnotify_group *group = file->private_data;
287
288 pr_debug("%s: group=%p\n", __func__, group);
289
290 /* free this group, matching get was inotify_init->fsnotify_obtain_group */
291 fsnotify_destroy_group(group);
292
293 return 0;
294 }
295
296 static long inotify_ioctl(struct file *file, unsigned int cmd,
297 unsigned long arg)
298 {
299 struct fsnotify_group *group;
300 struct fsnotify_event_holder *holder;
301 struct fsnotify_event *event;
302 void __user *p;
303 int ret = -ENOTTY;
304 size_t send_len = 0;
305
306 group = file->private_data;
307 p = (void __user *) arg;
308
309 pr_debug("%s: group=%p cmd=%u\n", __func__, group, cmd);
310
311 switch (cmd) {
312 case FIONREAD:
313 mutex_lock(&group->notification_mutex);
314 list_for_each_entry(holder, &group->notification_list, event_list) {
315 event = holder->event;
316 send_len += sizeof(struct inotify_event);
317 if (event->name_len)
318 send_len += roundup(event->name_len + 1,
319 sizeof(struct inotify_event));
320 }
321 mutex_unlock(&group->notification_mutex);
322 ret = put_user(send_len, (int __user *) p);
323 break;
324 }
325
326 return ret;
327 }
328
329 static const struct file_operations inotify_fops = {
330 .show_fdinfo = inotify_show_fdinfo,
331 .poll = inotify_poll,
332 .read = inotify_read,
333 .fasync = fsnotify_fasync,
334 .release = inotify_release,
335 .unlocked_ioctl = inotify_ioctl,
336 .compat_ioctl = inotify_ioctl,
337 .llseek = noop_llseek,
338 };
339
340
341 /*
342 * find_inode - resolve a user-given path to a specific inode
343 */
344 static int inotify_find_inode(const char __user *dirname, struct path *path, unsigned flags)
345 {
346 int error;
347
348 error = user_path_at(AT_FDCWD, dirname, flags, path);
349 if (error)
350 return error;
351 /* you can only watch an inode if you have read permissions on it */
352 error = inode_permission(path->dentry->d_inode, MAY_READ);
353 if (error)
354 path_put(path);
355 return error;
356 }
357
358 static int inotify_add_to_idr(struct idr *idr, spinlock_t *idr_lock,
359 struct inotify_inode_mark *i_mark)
360 {
361 int ret;
362
363 idr_preload(GFP_KERNEL);
364 spin_lock(idr_lock);
365
366 ret = idr_alloc_cyclic(idr, i_mark, 1, 0, GFP_NOWAIT);
367 if (ret >= 0) {
368 /* we added the mark to the idr, take a reference */
369 i_mark->wd = ret;
370 fsnotify_get_mark(&i_mark->fsn_mark);
371 }
372
373 spin_unlock(idr_lock);
374 idr_preload_end();
375 return ret < 0 ? ret : 0;
376 }
377
378 static struct inotify_inode_mark *inotify_idr_find_locked(struct fsnotify_group *group,
379 int wd)
380 {
381 struct idr *idr = &group->inotify_data.idr;
382 spinlock_t *idr_lock = &group->inotify_data.idr_lock;
383 struct inotify_inode_mark *i_mark;
384
385 assert_spin_locked(idr_lock);
386
387 i_mark = idr_find(idr, wd);
388 if (i_mark) {
389 struct fsnotify_mark *fsn_mark = &i_mark->fsn_mark;
390
391 fsnotify_get_mark(fsn_mark);
392 /* One ref for being in the idr, one ref we just took */
393 BUG_ON(atomic_read(&fsn_mark->refcnt) < 2);
394 }
395
396 return i_mark;
397 }
398
399 static struct inotify_inode_mark *inotify_idr_find(struct fsnotify_group *group,
400 int wd)
401 {
402 struct inotify_inode_mark *i_mark;
403 spinlock_t *idr_lock = &group->inotify_data.idr_lock;
404
405 spin_lock(idr_lock);
406 i_mark = inotify_idr_find_locked(group, wd);
407 spin_unlock(idr_lock);
408
409 return i_mark;
410 }
411
412 static void do_inotify_remove_from_idr(struct fsnotify_group *group,
413 struct inotify_inode_mark *i_mark)
414 {
415 struct idr *idr = &group->inotify_data.idr;
416 spinlock_t *idr_lock = &group->inotify_data.idr_lock;
417 int wd = i_mark->wd;
418
419 assert_spin_locked(idr_lock);
420
421 idr_remove(idr, wd);
422
423 /* removed from the idr, drop that ref */
424 fsnotify_put_mark(&i_mark->fsn_mark);
425 }
426
427 /*
428 * Remove the mark from the idr (if present) and drop the reference
429 * on the mark because it was in the idr.
430 */
431 static void inotify_remove_from_idr(struct fsnotify_group *group,
432 struct inotify_inode_mark *i_mark)
433 {
434 spinlock_t *idr_lock = &group->inotify_data.idr_lock;
435 struct inotify_inode_mark *found_i_mark = NULL;
436 int wd;
437
438 spin_lock(idr_lock);
439 wd = i_mark->wd;
440
441 /*
442 * does this i_mark think it is in the idr? we shouldn't get called
443 * if it wasn't....
444 */
445 if (wd == -1) {
446 WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p"
447 " i_mark->inode=%p\n", __func__, i_mark, i_mark->wd,
448 i_mark->fsn_mark.group, i_mark->fsn_mark.i.inode);
449 goto out;
450 }
451
452 /* Lets look in the idr to see if we find it */
453 found_i_mark = inotify_idr_find_locked(group, wd);
454 if (unlikely(!found_i_mark)) {
455 WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p"
456 " i_mark->inode=%p\n", __func__, i_mark, i_mark->wd,
457 i_mark->fsn_mark.group, i_mark->fsn_mark.i.inode);
458 goto out;
459 }
460
461 /*
462 * We found an mark in the idr at the right wd, but it's
463 * not the mark we were told to remove. eparis seriously
464 * fucked up somewhere.
465 */
466 if (unlikely(found_i_mark != i_mark)) {
467 WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p "
468 "mark->inode=%p found_i_mark=%p found_i_mark->wd=%d "
469 "found_i_mark->group=%p found_i_mark->inode=%p\n",
470 __func__, i_mark, i_mark->wd, i_mark->fsn_mark.group,
471 i_mark->fsn_mark.i.inode, found_i_mark, found_i_mark->wd,
472 found_i_mark->fsn_mark.group,
473 found_i_mark->fsn_mark.i.inode);
474 goto out;
475 }
476
477 /*
478 * One ref for being in the idr
479 * one ref held by the caller trying to kill us
480 * one ref grabbed by inotify_idr_find
481 */
482 if (unlikely(atomic_read(&i_mark->fsn_mark.refcnt) < 3)) {
483 printk(KERN_ERR "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p"
484 " i_mark->inode=%p\n", __func__, i_mark, i_mark->wd,
485 i_mark->fsn_mark.group, i_mark->fsn_mark.i.inode);
486 /* we can't really recover with bad ref cnting.. */
487 BUG();
488 }
489
490 do_inotify_remove_from_idr(group, i_mark);
491 out:
492 /* match the ref taken by inotify_idr_find_locked() */
493 if (found_i_mark)
494 fsnotify_put_mark(&found_i_mark->fsn_mark);
495 i_mark->wd = -1;
496 spin_unlock(idr_lock);
497 }
498
499 /*
500 * Send IN_IGNORED for this wd, remove this wd from the idr.
501 */
502 void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark,
503 struct fsnotify_group *group)
504 {
505 struct inotify_inode_mark *i_mark;
506 struct fsnotify_event *ignored_event, *notify_event;
507 struct inotify_event_private_data *event_priv;
508 struct fsnotify_event_private_data *fsn_event_priv;
509 int ret;
510
511 i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
512
513 ignored_event = fsnotify_create_event(NULL, FS_IN_IGNORED, NULL,
514 FSNOTIFY_EVENT_NONE, NULL, 0,
515 GFP_NOFS);
516 if (!ignored_event)
517 goto skip_send_ignore;
518
519 event_priv = kmem_cache_alloc(event_priv_cachep, GFP_NOFS);
520 if (unlikely(!event_priv))
521 goto skip_send_ignore;
522
523 fsn_event_priv = &event_priv->fsnotify_event_priv_data;
524
525 fsnotify_get_group(group);
526 fsn_event_priv->group = group;
527 event_priv->wd = i_mark->wd;
528
529 notify_event = fsnotify_add_notify_event(group, ignored_event, fsn_event_priv, NULL);
530 if (notify_event) {
531 if (IS_ERR(notify_event))
532 ret = PTR_ERR(notify_event);
533 else
534 fsnotify_put_event(notify_event);
535 inotify_free_event_priv(fsn_event_priv);
536 }
537
538 skip_send_ignore:
539 /* matches the reference taken when the event was created */
540 if (ignored_event)
541 fsnotify_put_event(ignored_event);
542
543 /* remove this mark from the idr */
544 inotify_remove_from_idr(group, i_mark);
545
546 atomic_dec(&group->inotify_data.user->inotify_watches);
547 }
548
549 /* ding dong the mark is dead */
550 static void inotify_free_mark(struct fsnotify_mark *fsn_mark)
551 {
552 struct inotify_inode_mark *i_mark;
553
554 i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
555
556 kmem_cache_free(inotify_inode_mark_cachep, i_mark);
557 }
558
559 static int inotify_update_existing_watch(struct fsnotify_group *group,
560 struct inode *inode,
561 u32 arg)
562 {
563 struct fsnotify_mark *fsn_mark;
564 struct inotify_inode_mark *i_mark;
565 __u32 old_mask, new_mask;
566 __u32 mask;
567 int add = (arg & IN_MASK_ADD);
568 int ret;
569
570 mask = inotify_arg_to_mask(arg);
571
572 fsn_mark = fsnotify_find_inode_mark(group, inode);
573 if (!fsn_mark)
574 return -ENOENT;
575
576 i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
577
578 spin_lock(&fsn_mark->lock);
579
580 old_mask = fsn_mark->mask;
581 if (add)
582 fsnotify_set_mark_mask_locked(fsn_mark, (fsn_mark->mask | mask));
583 else
584 fsnotify_set_mark_mask_locked(fsn_mark, mask);
585 new_mask = fsn_mark->mask;
586
587 spin_unlock(&fsn_mark->lock);
588
589 if (old_mask != new_mask) {
590 /* more bits in old than in new? */
591 int dropped = (old_mask & ~new_mask);
592 /* more bits in this fsn_mark than the inode's mask? */
593 int do_inode = (new_mask & ~inode->i_fsnotify_mask);
594
595 /* update the inode with this new fsn_mark */
596 if (dropped || do_inode)
597 fsnotify_recalc_inode_mask(inode);
598
599 }
600
601 /* return the wd */
602 ret = i_mark->wd;
603
604 /* match the get from fsnotify_find_mark() */
605 fsnotify_put_mark(fsn_mark);
606
607 return ret;
608 }
609
610 static int inotify_new_watch(struct fsnotify_group *group,
611 struct inode *inode,
612 u32 arg)
613 {
614 struct inotify_inode_mark *tmp_i_mark;
615 __u32 mask;
616 int ret;
617 struct idr *idr = &group->inotify_data.idr;
618 spinlock_t *idr_lock = &group->inotify_data.idr_lock;
619
620 mask = inotify_arg_to_mask(arg);
621
622 tmp_i_mark = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL);
623 if (unlikely(!tmp_i_mark))
624 return -ENOMEM;
625
626 fsnotify_init_mark(&tmp_i_mark->fsn_mark, inotify_free_mark);
627 tmp_i_mark->fsn_mark.mask = mask;
628 tmp_i_mark->wd = -1;
629
630 ret = -ENOSPC;
631 if (atomic_read(&group->inotify_data.user->inotify_watches) >= inotify_max_user_watches)
632 goto out_err;
633
634 ret = inotify_add_to_idr(idr, idr_lock, tmp_i_mark);
635 if (ret)
636 goto out_err;
637
638 /* we are on the idr, now get on the inode */
639 ret = fsnotify_add_mark(&tmp_i_mark->fsn_mark, group, inode, NULL, 0);
640 if (ret) {
641 /* we failed to get on the inode, get off the idr */
642 inotify_remove_from_idr(group, tmp_i_mark);
643 goto out_err;
644 }
645
646 /* increment the number of watches the user has */
647 atomic_inc(&group->inotify_data.user->inotify_watches);
648
649 /* return the watch descriptor for this new mark */
650 ret = tmp_i_mark->wd;
651
652 out_err:
653 /* match the ref from fsnotify_init_mark() */
654 fsnotify_put_mark(&tmp_i_mark->fsn_mark);
655
656 return ret;
657 }
658
659 static int inotify_update_watch(struct fsnotify_group *group, struct inode *inode, u32 arg)
660 {
661 int ret = 0;
662
663 retry:
664 /* try to update and existing watch with the new arg */
665 ret = inotify_update_existing_watch(group, inode, arg);
666 /* no mark present, try to add a new one */
667 if (ret == -ENOENT)
668 ret = inotify_new_watch(group, inode, arg);
669 /*
670 * inotify_new_watch could race with another thread which did an
671 * inotify_new_watch between the update_existing and the add watch
672 * here, go back and try to update an existing mark again.
673 */
674 if (ret == -EEXIST)
675 goto retry;
676
677 return ret;
678 }
679
680 static struct fsnotify_group *inotify_new_group(unsigned int max_events)
681 {
682 struct fsnotify_group *group;
683
684 group = fsnotify_alloc_group(&inotify_fsnotify_ops);
685 if (IS_ERR(group))
686 return group;
687
688 group->max_events = max_events;
689
690 spin_lock_init(&group->inotify_data.idr_lock);
691 idr_init(&group->inotify_data.idr);
692 group->inotify_data.user = get_current_user();
693
694 if (atomic_inc_return(&group->inotify_data.user->inotify_devs) >
695 inotify_max_user_instances) {
696 fsnotify_destroy_group(group);
697 return ERR_PTR(-EMFILE);
698 }
699
700 return group;
701 }
702
703
704 /* inotify syscalls */
705 SYSCALL_DEFINE1(inotify_init1, int, flags)
706 {
707 struct fsnotify_group *group;
708 int ret;
709
710 /* Check the IN_* constants for consistency. */
711 BUILD_BUG_ON(IN_CLOEXEC != O_CLOEXEC);
712 BUILD_BUG_ON(IN_NONBLOCK != O_NONBLOCK);
713
714 if (flags & ~(IN_CLOEXEC | IN_NONBLOCK))
715 return -EINVAL;
716
717 /* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */
718 group = inotify_new_group(inotify_max_queued_events);
719 if (IS_ERR(group))
720 return PTR_ERR(group);
721
722 ret = anon_inode_getfd("inotify", &inotify_fops, group,
723 O_RDONLY | flags);
724 if (ret < 0)
725 fsnotify_destroy_group(group);
726
727 return ret;
728 }
729
730 SYSCALL_DEFINE0(inotify_init)
731 {
732 return sys_inotify_init1(0);
733 }
734
735 SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
736 u32, mask)
737 {
738 struct fsnotify_group *group;
739 struct inode *inode;
740 struct path path;
741 struct fd f;
742 int ret;
743 unsigned flags = 0;
744
745 /* don't allow invalid bits: we don't want flags set */
746 if (unlikely(!(mask & ALL_INOTIFY_BITS)))
747 return -EINVAL;
748
749 f = fdget(fd);
750 if (unlikely(!f.file))
751 return -EBADF;
752
753 /* verify that this is indeed an inotify instance */
754 if (unlikely(f.file->f_op != &inotify_fops)) {
755 ret = -EINVAL;
756 goto fput_and_out;
757 }
758
759 if (!(mask & IN_DONT_FOLLOW))
760 flags |= LOOKUP_FOLLOW;
761 if (mask & IN_ONLYDIR)
762 flags |= LOOKUP_DIRECTORY;
763
764 ret = inotify_find_inode(pathname, &path, flags);
765 if (ret)
766 goto fput_and_out;
767
768 /* inode held in place by reference to path; group by fget on fd */
769 inode = path.dentry->d_inode;
770 group = f.file->private_data;
771
772 /* create/update an inode mark */
773 ret = inotify_update_watch(group, inode, mask);
774 path_put(&path);
775 fput_and_out:
776 fdput(f);
777 return ret;
778 }
779
780 SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd)
781 {
782 struct fsnotify_group *group;
783 struct inotify_inode_mark *i_mark;
784 struct fd f;
785 int ret = 0;
786
787 f = fdget(fd);
788 if (unlikely(!f.file))
789 return -EBADF;
790
791 /* verify that this is indeed an inotify instance */
792 ret = -EINVAL;
793 if (unlikely(f.file->f_op != &inotify_fops))
794 goto out;
795
796 group = f.file->private_data;
797
798 ret = -EINVAL;
799 i_mark = inotify_idr_find(group, wd);
800 if (unlikely(!i_mark))
801 goto out;
802
803 ret = 0;
804
805 fsnotify_destroy_mark(&i_mark->fsn_mark, group);
806
807 /* match ref taken by inotify_idr_find */
808 fsnotify_put_mark(&i_mark->fsn_mark);
809
810 out:
811 fdput(f);
812 return ret;
813 }
814
815 /*
816 * inotify_user_setup - Our initialization function. Note that we cannot return
817 * error because we have compiled-in VFS hooks. So an (unlikely) failure here
818 * must result in panic().
819 */
820 static int __init inotify_user_setup(void)
821 {
822 BUILD_BUG_ON(IN_ACCESS != FS_ACCESS);
823 BUILD_BUG_ON(IN_MODIFY != FS_MODIFY);
824 BUILD_BUG_ON(IN_ATTRIB != FS_ATTRIB);
825 BUILD_BUG_ON(IN_CLOSE_WRITE != FS_CLOSE_WRITE);
826 BUILD_BUG_ON(IN_CLOSE_NOWRITE != FS_CLOSE_NOWRITE);
827 BUILD_BUG_ON(IN_OPEN != FS_OPEN);
828 BUILD_BUG_ON(IN_MOVED_FROM != FS_MOVED_FROM);
829 BUILD_BUG_ON(IN_MOVED_TO != FS_MOVED_TO);
830 BUILD_BUG_ON(IN_CREATE != FS_CREATE);
831 BUILD_BUG_ON(IN_DELETE != FS_DELETE);
832 BUILD_BUG_ON(IN_DELETE_SELF != FS_DELETE_SELF);
833 BUILD_BUG_ON(IN_MOVE_SELF != FS_MOVE_SELF);
834 BUILD_BUG_ON(IN_UNMOUNT != FS_UNMOUNT);
835 BUILD_BUG_ON(IN_Q_OVERFLOW != FS_Q_OVERFLOW);
836 BUILD_BUG_ON(IN_IGNORED != FS_IN_IGNORED);
837 BUILD_BUG_ON(IN_EXCL_UNLINK != FS_EXCL_UNLINK);
838 BUILD_BUG_ON(IN_ISDIR != FS_ISDIR);
839 BUILD_BUG_ON(IN_ONESHOT != FS_IN_ONESHOT);
840
841 BUG_ON(hweight32(ALL_INOTIFY_BITS) != 21);
842
843 inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark, SLAB_PANIC);
844 event_priv_cachep = KMEM_CACHE(inotify_event_private_data, SLAB_PANIC);
845
846 inotify_max_queued_events = 16384;
847 inotify_max_user_instances = 128;
848 inotify_max_user_watches = 8192;
849
850 return 0;
851 }
852 module_init(inotify_user_setup);