[PATCH] add a vfs_permission helper
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / fs / inotify.c
CommitLineData
0eeca283
RL
1/*
2 * fs/inotify.c - inode-based file event notifications
3 *
4 * Authors:
5 * John McCutchan <ttb@tentacle.dhs.org>
6 * Robert Love <rml@novell.com>
7 *
8 * Copyright (C) 2005 John McCutchan
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2, or (at your option) any
13 * later version.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 */
20
21#include <linux/module.h>
22#include <linux/kernel.h>
23#include <linux/sched.h>
24#include <linux/spinlock.h>
25#include <linux/idr.h>
26#include <linux/slab.h>
27#include <linux/fs.h>
28#include <linux/file.h>
29#include <linux/mount.h>
30#include <linux/namei.h>
31#include <linux/poll.h>
0eeca283
RL
32#include <linux/init.h>
33#include <linux/list.h>
34#include <linux/writeback.h>
35#include <linux/inotify.h>
36
37#include <asm/ioctls.h>
38
39static atomic_t inotify_cookie;
820249ba 40static atomic_t inotify_watches;
0eeca283
RL
41
42static kmem_cache_t *watch_cachep;
43static kmem_cache_t *event_cachep;
44
45static struct vfsmount *inotify_mnt;
46
0399cb08
RL
47/* these are configurable via /proc/sys/fs/inotify/ */
48int inotify_max_user_instances;
0eeca283
RL
49int inotify_max_user_watches;
50int inotify_max_queued_events;
51
52/*
53 * Lock ordering:
54 *
55 * dentry->d_lock (used to keep d_move() away from dentry->d_parent)
56 * iprune_sem (synchronize shrink_icache_memory())
57 * inode_lock (protects the super_block->s_inodes list)
58 * inode->inotify_sem (protects inode->inotify_watches and watches->i_list)
59 * inotify_dev->sem (protects inotify_device and watches->d_list)
60 */
61
62/*
63 * Lifetimes of the three main data structures--inotify_device, inode, and
64 * inotify_watch--are managed by reference count.
65 *
b680716e
RL
66 * inotify_device: Lifetime is from inotify_init() until release. Additional
67 * references can bump the count via get_inotify_dev() and drop the count via
0eeca283
RL
68 * put_inotify_dev().
69 *
70 * inotify_watch: Lifetime is from create_watch() to destory_watch().
71 * Additional references can bump the count via get_inotify_watch() and drop
72 * the count via put_inotify_watch().
73 *
74 * inode: Pinned so long as the inode is associated with a watch, from
75 * create_watch() to put_inotify_watch().
76 */
77
78/*
b680716e 79 * struct inotify_device - represents an inotify instance
0eeca283
RL
80 *
81 * This structure is protected by the semaphore 'sem'.
82 */
83struct inotify_device {
84 wait_queue_head_t wq; /* wait queue for i/o */
85 struct idr idr; /* idr mapping wd -> watch */
86 struct semaphore sem; /* protects this bad boy */
87 struct list_head events; /* list of queued events */
88 struct list_head watches; /* list of watches */
89 atomic_t count; /* reference count */
90 struct user_struct *user; /* user who opened this dev */
91 unsigned int queue_size; /* size of the queue (bytes) */
92 unsigned int event_count; /* number of pending events */
93 unsigned int max_events; /* maximum number of events */
b9c55d29 94 u32 last_wd; /* the last wd allocated */
0eeca283
RL
95};
96
97/*
98 * struct inotify_kernel_event - An inotify event, originating from a watch and
99 * queued for user-space. A list of these is attached to each instance of the
100 * device. In read(), this list is walked and all events that can fit in the
101 * buffer are returned.
102 *
103 * Protected by dev->sem of the device in which we are queued.
104 */
105struct inotify_kernel_event {
106 struct inotify_event event; /* the user-space event */
107 struct list_head list; /* entry in inotify_device's list */
108 char *name; /* filename, if any */
109};
110
111/*
112 * struct inotify_watch - represents a watch request on a specific inode
113 *
114 * d_list is protected by dev->sem of the associated watch->dev.
115 * i_list and mask are protected by inode->inotify_sem of the associated inode.
116 * dev, inode, and wd are never written to once the watch is created.
117 */
118struct inotify_watch {
119 struct list_head d_list; /* entry in inotify_device's list */
120 struct list_head i_list; /* entry in inode's list */
121 atomic_t count; /* reference count */
122 struct inotify_device *dev; /* associated device */
123 struct inode *inode; /* associated inode */
124 s32 wd; /* watch descriptor */
125 u32 mask; /* event mask for this watch */
126};
127
0399cb08
RL
128#ifdef CONFIG_SYSCTL
129
130#include <linux/sysctl.h>
131
132static int zero;
133
134ctl_table inotify_table[] = {
135 {
136 .ctl_name = INOTIFY_MAX_USER_INSTANCES,
137 .procname = "max_user_instances",
138 .data = &inotify_max_user_instances,
139 .maxlen = sizeof(int),
140 .mode = 0644,
141 .proc_handler = &proc_dointvec_minmax,
142 .strategy = &sysctl_intvec,
143 .extra1 = &zero,
144 },
145 {
146 .ctl_name = INOTIFY_MAX_USER_WATCHES,
147 .procname = "max_user_watches",
148 .data = &inotify_max_user_watches,
149 .maxlen = sizeof(int),
150 .mode = 0644,
151 .proc_handler = &proc_dointvec_minmax,
152 .strategy = &sysctl_intvec,
153 .extra1 = &zero,
154 },
155 {
156 .ctl_name = INOTIFY_MAX_QUEUED_EVENTS,
157 .procname = "max_queued_events",
158 .data = &inotify_max_queued_events,
159 .maxlen = sizeof(int),
160 .mode = 0644,
161 .proc_handler = &proc_dointvec_minmax,
162 .strategy = &sysctl_intvec,
163 .extra1 = &zero
164 },
165 { .ctl_name = 0 }
166};
167#endif /* CONFIG_SYSCTL */
168
0eeca283
RL
169static inline void get_inotify_dev(struct inotify_device *dev)
170{
171 atomic_inc(&dev->count);
172}
173
174static inline void put_inotify_dev(struct inotify_device *dev)
175{
176 if (atomic_dec_and_test(&dev->count)) {
177 atomic_dec(&dev->user->inotify_devs);
178 free_uid(dev->user);
8d3b3591 179 idr_destroy(&dev->idr);
0eeca283
RL
180 kfree(dev);
181 }
182}
183
184static inline void get_inotify_watch(struct inotify_watch *watch)
185{
186 atomic_inc(&watch->count);
187}
188
189/*
190 * put_inotify_watch - decrements the ref count on a given watch. cleans up
191 * the watch and its references if the count reaches zero.
192 */
193static inline void put_inotify_watch(struct inotify_watch *watch)
194{
195 if (atomic_dec_and_test(&watch->count)) {
196 put_inotify_dev(watch->dev);
197 iput(watch->inode);
198 kmem_cache_free(watch_cachep, watch);
199 }
200}
201
202/*
203 * kernel_event - create a new kernel event with the given parameters
204 *
205 * This function can sleep.
206 */
207static struct inotify_kernel_event * kernel_event(s32 wd, u32 mask, u32 cookie,
208 const char *name)
209{
210 struct inotify_kernel_event *kevent;
211
212 kevent = kmem_cache_alloc(event_cachep, GFP_KERNEL);
213 if (unlikely(!kevent))
214 return NULL;
215
216 /* we hand this out to user-space, so zero it just in case */
217 memset(&kevent->event, 0, sizeof(struct inotify_event));
218
219 kevent->event.wd = wd;
220 kevent->event.mask = mask;
221 kevent->event.cookie = cookie;
222
223 INIT_LIST_HEAD(&kevent->list);
224
225 if (name) {
226 size_t len, rem, event_size = sizeof(struct inotify_event);
227
228 /*
229 * We need to pad the filename so as to properly align an
230 * array of inotify_event structures. Because the structure is
231 * small and the common case is a small filename, we just round
232 * up to the next multiple of the structure's sizeof. This is
233 * simple and safe for all architectures.
234 */
235 len = strlen(name) + 1;
236 rem = event_size - len;
237 if (len > event_size) {
238 rem = event_size - (len % event_size);
239 if (len % event_size == 0)
240 rem = 0;
241 }
242
243 kevent->name = kmalloc(len + rem, GFP_KERNEL);
244 if (unlikely(!kevent->name)) {
245 kmem_cache_free(event_cachep, kevent);
246 return NULL;
247 }
248 memcpy(kevent->name, name, len);
249 if (rem)
250 memset(kevent->name + len, 0, rem);
251 kevent->event.len = len + rem;
252 } else {
253 kevent->event.len = 0;
254 kevent->name = NULL;
255 }
256
257 return kevent;
258}
259
260/*
261 * inotify_dev_get_event - return the next event in the given dev's queue
262 *
263 * Caller must hold dev->sem.
264 */
265static inline struct inotify_kernel_event *
266inotify_dev_get_event(struct inotify_device *dev)
267{
268 return list_entry(dev->events.next, struct inotify_kernel_event, list);
269}
270
271/*
272 * inotify_dev_queue_event - add a new event to the given device
273 *
274 * Caller must hold dev->sem. Can sleep (calls kernel_event()).
275 */
276static void inotify_dev_queue_event(struct inotify_device *dev,
277 struct inotify_watch *watch, u32 mask,
278 u32 cookie, const char *name)
279{
280 struct inotify_kernel_event *kevent, *last;
281
282 /* coalescing: drop this event if it is a dupe of the previous */
283 last = inotify_dev_get_event(dev);
284 if (last && last->event.mask == mask && last->event.wd == watch->wd &&
285 last->event.cookie == cookie) {
286 const char *lastname = last->name;
287
288 if (!name && !lastname)
289 return;
290 if (name && lastname && !strcmp(lastname, name))
291 return;
292 }
293
294 /* the queue overflowed and we already sent the Q_OVERFLOW event */
295 if (unlikely(dev->event_count > dev->max_events))
296 return;
297
298 /* if the queue overflows, we need to notify user space */
299 if (unlikely(dev->event_count == dev->max_events))
300 kevent = kernel_event(-1, IN_Q_OVERFLOW, cookie, NULL);
301 else
302 kevent = kernel_event(watch->wd, mask, cookie, name);
303
304 if (unlikely(!kevent))
305 return;
306
307 /* queue the event and wake up anyone waiting */
308 dev->event_count++;
309 dev->queue_size += sizeof(struct inotify_event) + kevent->event.len;
310 list_add_tail(&kevent->list, &dev->events);
311 wake_up_interruptible(&dev->wq);
312}
313
314/*
315 * remove_kevent - cleans up and ultimately frees the given kevent
316 *
317 * Caller must hold dev->sem.
318 */
319static void remove_kevent(struct inotify_device *dev,
320 struct inotify_kernel_event *kevent)
321{
322 list_del(&kevent->list);
323
324 dev->event_count--;
325 dev->queue_size -= sizeof(struct inotify_event) + kevent->event.len;
326
327 kfree(kevent->name);
328 kmem_cache_free(event_cachep, kevent);
329}
330
331/*
332 * inotify_dev_event_dequeue - destroy an event on the given device
333 *
334 * Caller must hold dev->sem.
335 */
336static void inotify_dev_event_dequeue(struct inotify_device *dev)
337{
338 if (!list_empty(&dev->events)) {
339 struct inotify_kernel_event *kevent;
340 kevent = inotify_dev_get_event(dev);
341 remove_kevent(dev, kevent);
342 }
343}
344
345/*
346 * inotify_dev_get_wd - returns the next WD for use by the given dev
347 *
348 * Callers must hold dev->sem. This function can sleep.
349 */
350static int inotify_dev_get_wd(struct inotify_device *dev,
351 struct inotify_watch *watch)
352{
353 int ret;
354
355 do {
356 if (unlikely(!idr_pre_get(&dev->idr, GFP_KERNEL)))
357 return -ENOSPC;
7c657f2f 358 ret = idr_get_new_above(&dev->idr, watch, dev->last_wd+1, &watch->wd);
0eeca283
RL
359 } while (ret == -EAGAIN);
360
361 return ret;
362}
363
364/*
365 * find_inode - resolve a user-given path to a specific inode and return a nd
366 */
367static int find_inode(const char __user *dirname, struct nameidata *nd)
368{
369 int error;
370
371 error = __user_walk(dirname, LOOKUP_FOLLOW, nd);
372 if (error)
373 return error;
374 /* you can only watch an inode if you have read permissions on it */
e4543edd 375 error = vfs_permission(nd, MAY_READ);
0eeca283 376 if (error)
b680716e 377 path_release(nd);
0eeca283
RL
378 return error;
379}
380
381/*
382 * create_watch - creates a watch on the given device.
383 *
384 * Callers must hold dev->sem. Calls inotify_dev_get_wd() so may sleep.
385 * Both 'dev' and 'inode' (by way of nameidata) need to be pinned.
386 */
387static struct inotify_watch *create_watch(struct inotify_device *dev,
388 u32 mask, struct inode *inode)
389{
390 struct inotify_watch *watch;
391 int ret;
392
b680716e
RL
393 if (atomic_read(&dev->user->inotify_watches) >=
394 inotify_max_user_watches)
0eeca283
RL
395 return ERR_PTR(-ENOSPC);
396
397 watch = kmem_cache_alloc(watch_cachep, GFP_KERNEL);
398 if (unlikely(!watch))
399 return ERR_PTR(-ENOMEM);
400
401 ret = inotify_dev_get_wd(dev, watch);
402 if (unlikely(ret)) {
403 kmem_cache_free(watch_cachep, watch);
404 return ERR_PTR(ret);
405 }
406
0bf955ce 407 dev->last_wd = watch->wd;
0eeca283
RL
408 watch->mask = mask;
409 atomic_set(&watch->count, 0);
410 INIT_LIST_HEAD(&watch->d_list);
411 INIT_LIST_HEAD(&watch->i_list);
412
413 /* save a reference to device and bump the count to make it official */
414 get_inotify_dev(dev);
415 watch->dev = dev;
416
417 /*
418 * Save a reference to the inode and bump the ref count to make it
419 * official. We hold a reference to nameidata, which makes this safe.
420 */
421 watch->inode = igrab(inode);
422
423 /* bump our own count, corresponding to our entry in dev->watches */
424 get_inotify_watch(watch);
425
426 atomic_inc(&dev->user->inotify_watches);
820249ba 427 atomic_inc(&inotify_watches);
0eeca283
RL
428
429 return watch;
430}
431
432/*
433 * inotify_find_dev - find the watch associated with the given inode and dev
434 *
435 * Callers must hold inode->inotify_sem.
436 */
437static struct inotify_watch *inode_find_dev(struct inode *inode,
438 struct inotify_device *dev)
439{
440 struct inotify_watch *watch;
441
442 list_for_each_entry(watch, &inode->inotify_watches, i_list) {
443 if (watch->dev == dev)
444 return watch;
445 }
446
447 return NULL;
448}
449
450/*
451 * remove_watch_no_event - remove_watch() without the IN_IGNORED event.
452 */
453static void remove_watch_no_event(struct inotify_watch *watch,
454 struct inotify_device *dev)
455{
456 list_del(&watch->i_list);
457 list_del(&watch->d_list);
458
459 atomic_dec(&dev->user->inotify_watches);
820249ba 460 atomic_dec(&inotify_watches);
0eeca283
RL
461 idr_remove(&dev->idr, watch->wd);
462 put_inotify_watch(watch);
463}
464
465/*
466 * remove_watch - Remove a watch from both the device and the inode. Sends
467 * the IN_IGNORED event to the given device signifying that the inode is no
468 * longer watched.
469 *
470 * Callers must hold both inode->inotify_sem and dev->sem. We drop a
471 * reference to the inode before returning.
472 *
473 * The inode is not iput() so as to remain atomic. If the inode needs to be
474 * iput(), the call returns one. Otherwise, it returns zero.
475 */
476static void remove_watch(struct inotify_watch *watch,struct inotify_device *dev)
477{
478 inotify_dev_queue_event(dev, watch, IN_IGNORED, 0, NULL);
479 remove_watch_no_event(watch, dev);
480}
481
482/*
483 * inotify_inode_watched - returns nonzero if there are watches on this inode
484 * and zero otherwise. We call this lockless, we do not care if we race.
485 */
486static inline int inotify_inode_watched(struct inode *inode)
487{
488 return !list_empty(&inode->inotify_watches);
489}
490
491/* Kernel API */
492
493/**
494 * inotify_inode_queue_event - queue an event to all watches on this inode
495 * @inode: inode event is originating from
496 * @mask: event mask describing this event
497 * @cookie: cookie for synchronization, or zero
498 * @name: filename, if any
499 */
500void inotify_inode_queue_event(struct inode *inode, u32 mask, u32 cookie,
501 const char *name)
502{
503 struct inotify_watch *watch, *next;
504
505 if (!inotify_inode_watched(inode))
506 return;
507
508 down(&inode->inotify_sem);
509 list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) {
510 u32 watch_mask = watch->mask;
511 if (watch_mask & mask) {
512 struct inotify_device *dev = watch->dev;
513 get_inotify_watch(watch);
514 down(&dev->sem);
515 inotify_dev_queue_event(dev, watch, mask, cookie, name);
516 if (watch_mask & IN_ONESHOT)
517 remove_watch_no_event(watch, dev);
518 up(&dev->sem);
519 put_inotify_watch(watch);
520 }
521 }
522 up(&inode->inotify_sem);
523}
524EXPORT_SYMBOL_GPL(inotify_inode_queue_event);
525
526/**
527 * inotify_dentry_parent_queue_event - queue an event to a dentry's parent
528 * @dentry: the dentry in question, we queue against this dentry's parent
529 * @mask: event mask describing this event
530 * @cookie: cookie for synchronization, or zero
531 * @name: filename, if any
532 */
533void inotify_dentry_parent_queue_event(struct dentry *dentry, u32 mask,
534 u32 cookie, const char *name)
535{
536 struct dentry *parent;
537 struct inode *inode;
538
820249ba
JM
539 if (!atomic_read (&inotify_watches))
540 return;
541
0eeca283
RL
542 spin_lock(&dentry->d_lock);
543 parent = dentry->d_parent;
544 inode = parent->d_inode;
545
546 if (inotify_inode_watched(inode)) {
547 dget(parent);
548 spin_unlock(&dentry->d_lock);
549 inotify_inode_queue_event(inode, mask, cookie, name);
550 dput(parent);
551 } else
552 spin_unlock(&dentry->d_lock);
553}
554EXPORT_SYMBOL_GPL(inotify_dentry_parent_queue_event);
555
556/**
557 * inotify_get_cookie - return a unique cookie for use in synchronizing events.
558 */
559u32 inotify_get_cookie(void)
560{
561 return atomic_inc_return(&inotify_cookie);
562}
563EXPORT_SYMBOL_GPL(inotify_get_cookie);
564
565/**
566 * inotify_unmount_inodes - an sb is unmounting. handle any watched inodes.
567 * @list: list of inodes being unmounted (sb->s_inodes)
568 *
569 * Called with inode_lock held, protecting the unmounting super block's list
570 * of inodes, and with iprune_sem held, keeping shrink_icache_memory() at bay.
571 * We temporarily drop inode_lock, however, and CAN block.
572 */
573void inotify_unmount_inodes(struct list_head *list)
574{
575 struct inode *inode, *next_i, *need_iput = NULL;
576
577 list_for_each_entry_safe(inode, next_i, list, i_sb_list) {
578 struct inotify_watch *watch, *next_w;
579 struct inode *need_iput_tmp;
580 struct list_head *watches;
581
582 /*
583 * If i_count is zero, the inode cannot have any watches and
584 * doing an __iget/iput with MS_ACTIVE clear would actually
585 * evict all inodes with zero i_count from icache which is
586 * unnecessarily violent and may in fact be illegal to do.
587 */
588 if (!atomic_read(&inode->i_count))
589 continue;
590
591 /*
592 * We cannot __iget() an inode in state I_CLEAR, I_FREEING, or
593 * I_WILL_FREE which is fine because by that point the inode
594 * cannot have any associated watches.
595 */
596 if (inode->i_state & (I_CLEAR | I_FREEING | I_WILL_FREE))
597 continue;
598
599 need_iput_tmp = need_iput;
600 need_iput = NULL;
601 /* In case the remove_watch() drops a reference. */
602 if (inode != need_iput_tmp)
603 __iget(inode);
604 else
605 need_iput_tmp = NULL;
606 /* In case the dropping of a reference would nuke next_i. */
607 if ((&next_i->i_sb_list != list) &&
608 atomic_read(&next_i->i_count) &&
609 !(next_i->i_state & (I_CLEAR | I_FREEING |
610 I_WILL_FREE))) {
611 __iget(next_i);
612 need_iput = next_i;
613 }
614
615 /*
616 * We can safely drop inode_lock here because we hold
617 * references on both inode and next_i. Also no new inodes
618 * will be added since the umount has begun. Finally,
619 * iprune_sem keeps shrink_icache_memory() away.
620 */
621 spin_unlock(&inode_lock);
622
623 if (need_iput_tmp)
624 iput(need_iput_tmp);
625
626 /* for each watch, send IN_UNMOUNT and then remove it */
627 down(&inode->inotify_sem);
628 watches = &inode->inotify_watches;
629 list_for_each_entry_safe(watch, next_w, watches, i_list) {
630 struct inotify_device *dev = watch->dev;
631 down(&dev->sem);
632 inotify_dev_queue_event(dev, watch, IN_UNMOUNT,0,NULL);
633 remove_watch(watch, dev);
634 up(&dev->sem);
635 }
636 up(&inode->inotify_sem);
637 iput(inode);
638
639 spin_lock(&inode_lock);
640 }
641}
642EXPORT_SYMBOL_GPL(inotify_unmount_inodes);
643
644/**
645 * inotify_inode_is_dead - an inode has been deleted, cleanup any watches
646 * @inode: inode that is about to be removed
647 */
648void inotify_inode_is_dead(struct inode *inode)
649{
650 struct inotify_watch *watch, *next;
651
652 down(&inode->inotify_sem);
653 list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) {
654 struct inotify_device *dev = watch->dev;
655 down(&dev->sem);
656 remove_watch(watch, dev);
657 up(&dev->sem);
658 }
659 up(&inode->inotify_sem);
660}
661EXPORT_SYMBOL_GPL(inotify_inode_is_dead);
662
663/* Device Interface */
664
665static unsigned int inotify_poll(struct file *file, poll_table *wait)
666{
667 struct inotify_device *dev = file->private_data;
668 int ret = 0;
669
670 poll_wait(file, &dev->wq, wait);
671 down(&dev->sem);
672 if (!list_empty(&dev->events))
673 ret = POLLIN | POLLRDNORM;
674 up(&dev->sem);
675
676 return ret;
677}
678
679static ssize_t inotify_read(struct file *file, char __user *buf,
680 size_t count, loff_t *pos)
681{
682 size_t event_size = sizeof (struct inotify_event);
683 struct inotify_device *dev;
684 char __user *start;
685 int ret;
686 DEFINE_WAIT(wait);
687
688 start = buf;
689 dev = file->private_data;
690
691 while (1) {
692 int events;
693
694 prepare_to_wait(&dev->wq, &wait, TASK_INTERRUPTIBLE);
695
696 down(&dev->sem);
697 events = !list_empty(&dev->events);
698 up(&dev->sem);
699 if (events) {
700 ret = 0;
701 break;
702 }
703
704 if (file->f_flags & O_NONBLOCK) {
705 ret = -EAGAIN;
706 break;
707 }
708
709 if (signal_pending(current)) {
710 ret = -EINTR;
711 break;
712 }
713
714 schedule();
715 }
716
717 finish_wait(&dev->wq, &wait);
718 if (ret)
719 return ret;
720
721 down(&dev->sem);
722 while (1) {
723 struct inotify_kernel_event *kevent;
724
725 ret = buf - start;
726 if (list_empty(&dev->events))
727 break;
728
729 kevent = inotify_dev_get_event(dev);
730 if (event_size + kevent->event.len > count)
731 break;
732
733 if (copy_to_user(buf, &kevent->event, event_size)) {
734 ret = -EFAULT;
735 break;
736 }
737 buf += event_size;
738 count -= event_size;
739
740 if (kevent->name) {
741 if (copy_to_user(buf, kevent->name, kevent->event.len)){
742 ret = -EFAULT;
743 break;
744 }
745 buf += kevent->event.len;
746 count -= kevent->event.len;
747 }
748
749 remove_kevent(dev, kevent);
750 }
751 up(&dev->sem);
752
753 return ret;
754}
755
756static int inotify_release(struct inode *ignored, struct file *file)
757{
758 struct inotify_device *dev = file->private_data;
759
760 /*
761 * Destroy all of the watches on this device. Unfortunately, not very
762 * pretty. We cannot do a simple iteration over the list, because we
763 * do not know the inode until we iterate to the watch. But we need to
764 * hold inode->inotify_sem before dev->sem. The following works.
765 */
766 while (1) {
767 struct inotify_watch *watch;
768 struct list_head *watches;
769 struct inode *inode;
770
771 down(&dev->sem);
772 watches = &dev->watches;
773 if (list_empty(watches)) {
774 up(&dev->sem);
775 break;
776 }
777 watch = list_entry(watches->next, struct inotify_watch, d_list);
778 get_inotify_watch(watch);
779 up(&dev->sem);
780
781 inode = watch->inode;
782 down(&inode->inotify_sem);
783 down(&dev->sem);
784 remove_watch_no_event(watch, dev);
785 up(&dev->sem);
786 up(&inode->inotify_sem);
787 put_inotify_watch(watch);
788 }
789
790 /* destroy all of the events on this device */
791 down(&dev->sem);
792 while (!list_empty(&dev->events))
793 inotify_dev_event_dequeue(dev);
794 up(&dev->sem);
795
b680716e 796 /* free this device: the put matching the get in inotify_init() */
0eeca283
RL
797 put_inotify_dev(dev);
798
799 return 0;
800}
801
802/*
b680716e 803 * inotify_ignore - remove a given wd from this inotify instance.
0eeca283
RL
804 *
805 * Can sleep.
806 */
807static int inotify_ignore(struct inotify_device *dev, s32 wd)
808{
809 struct inotify_watch *watch;
810 struct inode *inode;
811
812 down(&dev->sem);
813 watch = idr_find(&dev->idr, wd);
814 if (unlikely(!watch)) {
815 up(&dev->sem);
816 return -EINVAL;
817 }
818 get_inotify_watch(watch);
819 inode = watch->inode;
820 up(&dev->sem);
821
822 down(&inode->inotify_sem);
823 down(&dev->sem);
824
825 /* make sure that we did not race */
826 watch = idr_find(&dev->idr, wd);
827 if (likely(watch))
828 remove_watch(watch, dev);
829
830 up(&dev->sem);
831 up(&inode->inotify_sem);
832 put_inotify_watch(watch);
833
834 return 0;
835}
836
837static long inotify_ioctl(struct file *file, unsigned int cmd,
838 unsigned long arg)
839{
840 struct inotify_device *dev;
841 void __user *p;
842 int ret = -ENOTTY;
843
844 dev = file->private_data;
845 p = (void __user *) arg;
846
847 switch (cmd) {
848 case FIONREAD:
849 ret = put_user(dev->queue_size, (int __user *) p);
850 break;
851 }
852
853 return ret;
854}
855
856static struct file_operations inotify_fops = {
857 .poll = inotify_poll,
858 .read = inotify_read,
859 .release = inotify_release,
860 .unlocked_ioctl = inotify_ioctl,
861 .compat_ioctl = inotify_ioctl,
862};
863
864asmlinkage long sys_inotify_init(void)
865{
866 struct inotify_device *dev;
867 struct user_struct *user;
b680716e
RL
868 struct file *filp;
869 int fd, ret;
0eeca283
RL
870
871 fd = get_unused_fd();
b680716e
RL
872 if (fd < 0)
873 return fd;
0eeca283
RL
874
875 filp = get_empty_filp();
876 if (!filp) {
0eeca283 877 ret = -ENFILE;
5eb22cbc 878 goto out_put_fd;
0eeca283 879 }
0eeca283
RL
880
881 user = get_uid(current->user);
b680716e
RL
882 if (unlikely(atomic_read(&user->inotify_devs) >=
883 inotify_max_user_instances)) {
0eeca283 884 ret = -EMFILE;
5eb22cbc 885 goto out_free_uid;
0eeca283
RL
886 }
887
888 dev = kmalloc(sizeof(struct inotify_device), GFP_KERNEL);
889 if (unlikely(!dev)) {
890 ret = -ENOMEM;
5eb22cbc 891 goto out_free_uid;
0eeca283
RL
892 }
893
b680716e
RL
894 filp->f_op = &inotify_fops;
895 filp->f_vfsmnt = mntget(inotify_mnt);
896 filp->f_dentry = dget(inotify_mnt->mnt_root);
897 filp->f_mapping = filp->f_dentry->d_inode->i_mapping;
898 filp->f_mode = FMODE_READ;
899 filp->f_flags = O_RDONLY;
900 filp->private_data = dev;
901
0eeca283
RL
902 idr_init(&dev->idr);
903 INIT_LIST_HEAD(&dev->events);
904 INIT_LIST_HEAD(&dev->watches);
905 init_waitqueue_head(&dev->wq);
906 sema_init(&dev->sem, 1);
907 dev->event_count = 0;
908 dev->queue_size = 0;
909 dev->max_events = inotify_max_queued_events;
910 dev->user = user;
b9c55d29 911 dev->last_wd = 0;
0eeca283
RL
912 atomic_set(&dev->count, 0);
913
914 get_inotify_dev(dev);
915 atomic_inc(&user->inotify_devs);
b680716e 916 fd_install(fd, filp);
0eeca283 917
0eeca283 918 return fd;
5eb22cbc 919out_free_uid:
0eeca283 920 free_uid(user);
5eb22cbc
RL
921 put_filp(filp);
922out_put_fd:
923 put_unused_fd(fd);
0eeca283
RL
924 return ret;
925}
926
b680716e 927asmlinkage long sys_inotify_add_watch(int fd, const char __user *path, u32 mask)
0eeca283
RL
928{
929 struct inotify_watch *watch, *old;
930 struct inode *inode;
931 struct inotify_device *dev;
932 struct nameidata nd;
933 struct file *filp;
33ea2f52 934 int ret, fput_needed;
7ea6040b 935 int mask_add = 0;
0eeca283 936
33ea2f52
RL
937 filp = fget_light(fd, &fput_needed);
938 if (unlikely(!filp))
0eeca283
RL
939 return -EBADF;
940
783bc29b
RL
941 /* verify that this is indeed an inotify instance */
942 if (unlikely(filp->f_op != &inotify_fops)) {
943 ret = -EINVAL;
944 goto fput_and_out;
945 }
946
b680716e
RL
947 ret = find_inode(path, &nd);
948 if (unlikely(ret))
0eeca283
RL
949 goto fput_and_out;
950
b680716e 951 /* inode held in place by reference to nd; dev by fget on fd */
0eeca283 952 inode = nd.dentry->d_inode;
b680716e 953 dev = filp->private_data;
0eeca283
RL
954
955 down(&inode->inotify_sem);
956 down(&dev->sem);
957
7ea6040b
JM
958 if (mask & IN_MASK_ADD)
959 mask_add = 1;
960
0eeca283
RL
961 /* don't let user-space set invalid bits: we don't want flags set */
962 mask &= IN_ALL_EVENTS;
b680716e 963 if (unlikely(!mask)) {
0eeca283
RL
964 ret = -EINVAL;
965 goto out;
966 }
967
968 /*
969 * Handle the case of re-adding a watch on an (inode,dev) pair that we
970 * are already watching. We just update the mask and return its wd.
971 */
972 old = inode_find_dev(inode, dev);
973 if (unlikely(old)) {
7ea6040b
JM
974 if (mask_add)
975 old->mask |= mask;
976 else
977 old->mask = mask;
0eeca283
RL
978 ret = old->wd;
979 goto out;
980 }
981
982 watch = create_watch(dev, mask, inode);
983 if (unlikely(IS_ERR(watch))) {
984 ret = PTR_ERR(watch);
985 goto out;
986 }
987
988 /* Add the watch to the device's and the inode's list */
989 list_add(&watch->d_list, &dev->watches);
990 list_add(&watch->i_list, &inode->inotify_watches);
991 ret = watch->wd;
992out:
0eeca283
RL
993 up(&dev->sem);
994 up(&inode->inotify_sem);
5eb22cbc 995 path_release(&nd);
0eeca283 996fput_and_out:
33ea2f52 997 fput_light(filp, fput_needed);
0eeca283
RL
998 return ret;
999}
1000
1001asmlinkage long sys_inotify_rm_watch(int fd, u32 wd)
1002{
1003 struct file *filp;
1004 struct inotify_device *dev;
33ea2f52 1005 int ret, fput_needed;
0eeca283 1006
33ea2f52
RL
1007 filp = fget_light(fd, &fput_needed);
1008 if (unlikely(!filp))
0eeca283 1009 return -EBADF;
783bc29b
RL
1010
1011 /* verify that this is indeed an inotify instance */
1012 if (unlikely(filp->f_op != &inotify_fops)) {
1013 ret = -EINVAL;
1014 goto out;
1015 }
1016
0eeca283 1017 dev = filp->private_data;
9a556e89 1018 ret = inotify_ignore(dev, wd);
9a556e89 1019
783bc29b
RL
1020out:
1021 fput_light(filp, fput_needed);
0eeca283
RL
1022 return ret;
1023}
1024
1025static struct super_block *
1026inotify_get_sb(struct file_system_type *fs_type, int flags,
1027 const char *dev_name, void *data)
1028{
1029 return get_sb_pseudo(fs_type, "inotify", NULL, 0xBAD1DEA);
1030}
1031
1032static struct file_system_type inotify_fs_type = {
1033 .name = "inotifyfs",
1034 .get_sb = inotify_get_sb,
1035 .kill_sb = kill_anon_super,
1036};
1037
1038/*
b680716e 1039 * inotify_setup - Our initialization function. Note that we cannnot return
0eeca283
RL
1040 * error because we have compiled-in VFS hooks. So an (unlikely) failure here
1041 * must result in panic().
1042 */
b680716e 1043static int __init inotify_setup(void)
0eeca283 1044{
e5ca844a
RL
1045 int ret;
1046
1047 ret = register_filesystem(&inotify_fs_type);
1048 if (unlikely(ret))
1049 panic("inotify: register_filesystem returned %d!\n", ret);
1050
0eeca283 1051 inotify_mnt = kern_mount(&inotify_fs_type);
89373de7 1052 if (IS_ERR(inotify_mnt))
e5ca844a 1053 panic("inotify: kern_mount ret %ld!\n", PTR_ERR(inotify_mnt));
0eeca283 1054
1b2ccf0c
RL
1055 inotify_max_queued_events = 16384;
1056 inotify_max_user_instances = 128;
0eeca283
RL
1057 inotify_max_user_watches = 8192;
1058
1059 atomic_set(&inotify_cookie, 0);
820249ba 1060 atomic_set(&inotify_watches, 0);
0eeca283
RL
1061
1062 watch_cachep = kmem_cache_create("inotify_watch_cache",
1063 sizeof(struct inotify_watch),
1064 0, SLAB_PANIC, NULL, NULL);
1065 event_cachep = kmem_cache_create("inotify_event_cache",
1066 sizeof(struct inotify_kernel_event),
1067 0, SLAB_PANIC, NULL, NULL);
1068
0eeca283
RL
1069 return 0;
1070}
1071
b680716e 1072module_init(inotify_setup);