[PATCH] ext2: Enable atomic inode security labeling
[deliverable/linux.git] / fs / inotify.c
CommitLineData
0eeca283
RL
1/*
2 * fs/inotify.c - inode-based file event notifications
3 *
4 * Authors:
5 * John McCutchan <ttb@tentacle.dhs.org>
6 * Robert Love <rml@novell.com>
7 *
8 * Copyright (C) 2005 John McCutchan
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2, or (at your option) any
13 * later version.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 */
20
21#include <linux/module.h>
22#include <linux/kernel.h>
23#include <linux/sched.h>
24#include <linux/spinlock.h>
25#include <linux/idr.h>
26#include <linux/slab.h>
27#include <linux/fs.h>
28#include <linux/file.h>
29#include <linux/mount.h>
30#include <linux/namei.h>
31#include <linux/poll.h>
0eeca283
RL
32#include <linux/init.h>
33#include <linux/list.h>
34#include <linux/writeback.h>
35#include <linux/inotify.h>
36
37#include <asm/ioctls.h>
38
39static atomic_t inotify_cookie;
820249ba 40static atomic_t inotify_watches;
0eeca283
RL
41
42static kmem_cache_t *watch_cachep;
43static kmem_cache_t *event_cachep;
44
45static struct vfsmount *inotify_mnt;
46
0399cb08
RL
47/* these are configurable via /proc/sys/fs/inotify/ */
48int inotify_max_user_instances;
0eeca283
RL
49int inotify_max_user_watches;
50int inotify_max_queued_events;
51
52/*
53 * Lock ordering:
54 *
55 * dentry->d_lock (used to keep d_move() away from dentry->d_parent)
56 * iprune_sem (synchronize shrink_icache_memory())
57 * inode_lock (protects the super_block->s_inodes list)
58 * inode->inotify_sem (protects inode->inotify_watches and watches->i_list)
59 * inotify_dev->sem (protects inotify_device and watches->d_list)
60 */
61
62/*
63 * Lifetimes of the three main data structures--inotify_device, inode, and
64 * inotify_watch--are managed by reference count.
65 *
b680716e
RL
66 * inotify_device: Lifetime is from inotify_init() until release. Additional
67 * references can bump the count via get_inotify_dev() and drop the count via
0eeca283
RL
68 * put_inotify_dev().
69 *
70 * inotify_watch: Lifetime is from create_watch() to destory_watch().
71 * Additional references can bump the count via get_inotify_watch() and drop
72 * the count via put_inotify_watch().
73 *
74 * inode: Pinned so long as the inode is associated with a watch, from
75 * create_watch() to put_inotify_watch().
76 */
77
78/*
b680716e 79 * struct inotify_device - represents an inotify instance
0eeca283
RL
80 *
81 * This structure is protected by the semaphore 'sem'.
82 */
83struct inotify_device {
84 wait_queue_head_t wq; /* wait queue for i/o */
85 struct idr idr; /* idr mapping wd -> watch */
86 struct semaphore sem; /* protects this bad boy */
87 struct list_head events; /* list of queued events */
88 struct list_head watches; /* list of watches */
89 atomic_t count; /* reference count */
90 struct user_struct *user; /* user who opened this dev */
91 unsigned int queue_size; /* size of the queue (bytes) */
92 unsigned int event_count; /* number of pending events */
93 unsigned int max_events; /* maximum number of events */
b9c55d29 94 u32 last_wd; /* the last wd allocated */
0eeca283
RL
95};
96
97/*
98 * struct inotify_kernel_event - An inotify event, originating from a watch and
99 * queued for user-space. A list of these is attached to each instance of the
100 * device. In read(), this list is walked and all events that can fit in the
101 * buffer are returned.
102 *
103 * Protected by dev->sem of the device in which we are queued.
104 */
105struct inotify_kernel_event {
106 struct inotify_event event; /* the user-space event */
107 struct list_head list; /* entry in inotify_device's list */
108 char *name; /* filename, if any */
109};
110
111/*
112 * struct inotify_watch - represents a watch request on a specific inode
113 *
114 * d_list is protected by dev->sem of the associated watch->dev.
115 * i_list and mask are protected by inode->inotify_sem of the associated inode.
116 * dev, inode, and wd are never written to once the watch is created.
117 */
118struct inotify_watch {
119 struct list_head d_list; /* entry in inotify_device's list */
120 struct list_head i_list; /* entry in inode's list */
121 atomic_t count; /* reference count */
122 struct inotify_device *dev; /* associated device */
123 struct inode *inode; /* associated inode */
124 s32 wd; /* watch descriptor */
125 u32 mask; /* event mask for this watch */
126};
127
0399cb08
RL
128#ifdef CONFIG_SYSCTL
129
130#include <linux/sysctl.h>
131
132static int zero;
133
134ctl_table inotify_table[] = {
135 {
136 .ctl_name = INOTIFY_MAX_USER_INSTANCES,
137 .procname = "max_user_instances",
138 .data = &inotify_max_user_instances,
139 .maxlen = sizeof(int),
140 .mode = 0644,
141 .proc_handler = &proc_dointvec_minmax,
142 .strategy = &sysctl_intvec,
143 .extra1 = &zero,
144 },
145 {
146 .ctl_name = INOTIFY_MAX_USER_WATCHES,
147 .procname = "max_user_watches",
148 .data = &inotify_max_user_watches,
149 .maxlen = sizeof(int),
150 .mode = 0644,
151 .proc_handler = &proc_dointvec_minmax,
152 .strategy = &sysctl_intvec,
153 .extra1 = &zero,
154 },
155 {
156 .ctl_name = INOTIFY_MAX_QUEUED_EVENTS,
157 .procname = "max_queued_events",
158 .data = &inotify_max_queued_events,
159 .maxlen = sizeof(int),
160 .mode = 0644,
161 .proc_handler = &proc_dointvec_minmax,
162 .strategy = &sysctl_intvec,
163 .extra1 = &zero
164 },
165 { .ctl_name = 0 }
166};
167#endif /* CONFIG_SYSCTL */
168
0eeca283
RL
169static inline void get_inotify_dev(struct inotify_device *dev)
170{
171 atomic_inc(&dev->count);
172}
173
174static inline void put_inotify_dev(struct inotify_device *dev)
175{
176 if (atomic_dec_and_test(&dev->count)) {
177 atomic_dec(&dev->user->inotify_devs);
178 free_uid(dev->user);
179 kfree(dev);
180 }
181}
182
183static inline void get_inotify_watch(struct inotify_watch *watch)
184{
185 atomic_inc(&watch->count);
186}
187
188/*
189 * put_inotify_watch - decrements the ref count on a given watch. cleans up
190 * the watch and its references if the count reaches zero.
191 */
192static inline void put_inotify_watch(struct inotify_watch *watch)
193{
194 if (atomic_dec_and_test(&watch->count)) {
195 put_inotify_dev(watch->dev);
196 iput(watch->inode);
197 kmem_cache_free(watch_cachep, watch);
198 }
199}
200
201/*
202 * kernel_event - create a new kernel event with the given parameters
203 *
204 * This function can sleep.
205 */
206static struct inotify_kernel_event * kernel_event(s32 wd, u32 mask, u32 cookie,
207 const char *name)
208{
209 struct inotify_kernel_event *kevent;
210
211 kevent = kmem_cache_alloc(event_cachep, GFP_KERNEL);
212 if (unlikely(!kevent))
213 return NULL;
214
215 /* we hand this out to user-space, so zero it just in case */
216 memset(&kevent->event, 0, sizeof(struct inotify_event));
217
218 kevent->event.wd = wd;
219 kevent->event.mask = mask;
220 kevent->event.cookie = cookie;
221
222 INIT_LIST_HEAD(&kevent->list);
223
224 if (name) {
225 size_t len, rem, event_size = sizeof(struct inotify_event);
226
227 /*
228 * We need to pad the filename so as to properly align an
229 * array of inotify_event structures. Because the structure is
230 * small and the common case is a small filename, we just round
231 * up to the next multiple of the structure's sizeof. This is
232 * simple and safe for all architectures.
233 */
234 len = strlen(name) + 1;
235 rem = event_size - len;
236 if (len > event_size) {
237 rem = event_size - (len % event_size);
238 if (len % event_size == 0)
239 rem = 0;
240 }
241
242 kevent->name = kmalloc(len + rem, GFP_KERNEL);
243 if (unlikely(!kevent->name)) {
244 kmem_cache_free(event_cachep, kevent);
245 return NULL;
246 }
247 memcpy(kevent->name, name, len);
248 if (rem)
249 memset(kevent->name + len, 0, rem);
250 kevent->event.len = len + rem;
251 } else {
252 kevent->event.len = 0;
253 kevent->name = NULL;
254 }
255
256 return kevent;
257}
258
259/*
260 * inotify_dev_get_event - return the next event in the given dev's queue
261 *
262 * Caller must hold dev->sem.
263 */
264static inline struct inotify_kernel_event *
265inotify_dev_get_event(struct inotify_device *dev)
266{
267 return list_entry(dev->events.next, struct inotify_kernel_event, list);
268}
269
270/*
271 * inotify_dev_queue_event - add a new event to the given device
272 *
273 * Caller must hold dev->sem. Can sleep (calls kernel_event()).
274 */
275static void inotify_dev_queue_event(struct inotify_device *dev,
276 struct inotify_watch *watch, u32 mask,
277 u32 cookie, const char *name)
278{
279 struct inotify_kernel_event *kevent, *last;
280
281 /* coalescing: drop this event if it is a dupe of the previous */
282 last = inotify_dev_get_event(dev);
283 if (last && last->event.mask == mask && last->event.wd == watch->wd &&
284 last->event.cookie == cookie) {
285 const char *lastname = last->name;
286
287 if (!name && !lastname)
288 return;
289 if (name && lastname && !strcmp(lastname, name))
290 return;
291 }
292
293 /* the queue overflowed and we already sent the Q_OVERFLOW event */
294 if (unlikely(dev->event_count > dev->max_events))
295 return;
296
297 /* if the queue overflows, we need to notify user space */
298 if (unlikely(dev->event_count == dev->max_events))
299 kevent = kernel_event(-1, IN_Q_OVERFLOW, cookie, NULL);
300 else
301 kevent = kernel_event(watch->wd, mask, cookie, name);
302
303 if (unlikely(!kevent))
304 return;
305
306 /* queue the event and wake up anyone waiting */
307 dev->event_count++;
308 dev->queue_size += sizeof(struct inotify_event) + kevent->event.len;
309 list_add_tail(&kevent->list, &dev->events);
310 wake_up_interruptible(&dev->wq);
311}
312
313/*
314 * remove_kevent - cleans up and ultimately frees the given kevent
315 *
316 * Caller must hold dev->sem.
317 */
318static void remove_kevent(struct inotify_device *dev,
319 struct inotify_kernel_event *kevent)
320{
321 list_del(&kevent->list);
322
323 dev->event_count--;
324 dev->queue_size -= sizeof(struct inotify_event) + kevent->event.len;
325
326 kfree(kevent->name);
327 kmem_cache_free(event_cachep, kevent);
328}
329
330/*
331 * inotify_dev_event_dequeue - destroy an event on the given device
332 *
333 * Caller must hold dev->sem.
334 */
335static void inotify_dev_event_dequeue(struct inotify_device *dev)
336{
337 if (!list_empty(&dev->events)) {
338 struct inotify_kernel_event *kevent;
339 kevent = inotify_dev_get_event(dev);
340 remove_kevent(dev, kevent);
341 }
342}
343
344/*
345 * inotify_dev_get_wd - returns the next WD for use by the given dev
346 *
347 * Callers must hold dev->sem. This function can sleep.
348 */
349static int inotify_dev_get_wd(struct inotify_device *dev,
350 struct inotify_watch *watch)
351{
352 int ret;
353
354 do {
355 if (unlikely(!idr_pre_get(&dev->idr, GFP_KERNEL)))
356 return -ENOSPC;
7c657f2f 357 ret = idr_get_new_above(&dev->idr, watch, dev->last_wd+1, &watch->wd);
0eeca283
RL
358 } while (ret == -EAGAIN);
359
360 return ret;
361}
362
363/*
364 * find_inode - resolve a user-given path to a specific inode and return a nd
365 */
366static int find_inode(const char __user *dirname, struct nameidata *nd)
367{
368 int error;
369
370 error = __user_walk(dirname, LOOKUP_FOLLOW, nd);
371 if (error)
372 return error;
373 /* you can only watch an inode if you have read permissions on it */
374 error = permission(nd->dentry->d_inode, MAY_READ, NULL);
375 if (error)
b680716e 376 path_release(nd);
0eeca283
RL
377 return error;
378}
379
380/*
381 * create_watch - creates a watch on the given device.
382 *
383 * Callers must hold dev->sem. Calls inotify_dev_get_wd() so may sleep.
384 * Both 'dev' and 'inode' (by way of nameidata) need to be pinned.
385 */
386static struct inotify_watch *create_watch(struct inotify_device *dev,
387 u32 mask, struct inode *inode)
388{
389 struct inotify_watch *watch;
390 int ret;
391
b680716e
RL
392 if (atomic_read(&dev->user->inotify_watches) >=
393 inotify_max_user_watches)
0eeca283
RL
394 return ERR_PTR(-ENOSPC);
395
396 watch = kmem_cache_alloc(watch_cachep, GFP_KERNEL);
397 if (unlikely(!watch))
398 return ERR_PTR(-ENOMEM);
399
400 ret = inotify_dev_get_wd(dev, watch);
401 if (unlikely(ret)) {
402 kmem_cache_free(watch_cachep, watch);
403 return ERR_PTR(ret);
404 }
405
0bf955ce 406 dev->last_wd = watch->wd;
0eeca283
RL
407 watch->mask = mask;
408 atomic_set(&watch->count, 0);
409 INIT_LIST_HEAD(&watch->d_list);
410 INIT_LIST_HEAD(&watch->i_list);
411
412 /* save a reference to device and bump the count to make it official */
413 get_inotify_dev(dev);
414 watch->dev = dev;
415
416 /*
417 * Save a reference to the inode and bump the ref count to make it
418 * official. We hold a reference to nameidata, which makes this safe.
419 */
420 watch->inode = igrab(inode);
421
422 /* bump our own count, corresponding to our entry in dev->watches */
423 get_inotify_watch(watch);
424
425 atomic_inc(&dev->user->inotify_watches);
820249ba 426 atomic_inc(&inotify_watches);
0eeca283
RL
427
428 return watch;
429}
430
431/*
432 * inotify_find_dev - find the watch associated with the given inode and dev
433 *
434 * Callers must hold inode->inotify_sem.
435 */
436static struct inotify_watch *inode_find_dev(struct inode *inode,
437 struct inotify_device *dev)
438{
439 struct inotify_watch *watch;
440
441 list_for_each_entry(watch, &inode->inotify_watches, i_list) {
442 if (watch->dev == dev)
443 return watch;
444 }
445
446 return NULL;
447}
448
449/*
450 * remove_watch_no_event - remove_watch() without the IN_IGNORED event.
451 */
452static void remove_watch_no_event(struct inotify_watch *watch,
453 struct inotify_device *dev)
454{
455 list_del(&watch->i_list);
456 list_del(&watch->d_list);
457
458 atomic_dec(&dev->user->inotify_watches);
820249ba 459 atomic_dec(&inotify_watches);
0eeca283
RL
460 idr_remove(&dev->idr, watch->wd);
461 put_inotify_watch(watch);
462}
463
464/*
465 * remove_watch - Remove a watch from both the device and the inode. Sends
466 * the IN_IGNORED event to the given device signifying that the inode is no
467 * longer watched.
468 *
469 * Callers must hold both inode->inotify_sem and dev->sem. We drop a
470 * reference to the inode before returning.
471 *
472 * The inode is not iput() so as to remain atomic. If the inode needs to be
473 * iput(), the call returns one. Otherwise, it returns zero.
474 */
475static void remove_watch(struct inotify_watch *watch,struct inotify_device *dev)
476{
477 inotify_dev_queue_event(dev, watch, IN_IGNORED, 0, NULL);
478 remove_watch_no_event(watch, dev);
479}
480
481/*
482 * inotify_inode_watched - returns nonzero if there are watches on this inode
483 * and zero otherwise. We call this lockless, we do not care if we race.
484 */
485static inline int inotify_inode_watched(struct inode *inode)
486{
487 return !list_empty(&inode->inotify_watches);
488}
489
490/* Kernel API */
491
492/**
493 * inotify_inode_queue_event - queue an event to all watches on this inode
494 * @inode: inode event is originating from
495 * @mask: event mask describing this event
496 * @cookie: cookie for synchronization, or zero
497 * @name: filename, if any
498 */
499void inotify_inode_queue_event(struct inode *inode, u32 mask, u32 cookie,
500 const char *name)
501{
502 struct inotify_watch *watch, *next;
503
504 if (!inotify_inode_watched(inode))
505 return;
506
507 down(&inode->inotify_sem);
508 list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) {
509 u32 watch_mask = watch->mask;
510 if (watch_mask & mask) {
511 struct inotify_device *dev = watch->dev;
512 get_inotify_watch(watch);
513 down(&dev->sem);
514 inotify_dev_queue_event(dev, watch, mask, cookie, name);
515 if (watch_mask & IN_ONESHOT)
516 remove_watch_no_event(watch, dev);
517 up(&dev->sem);
518 put_inotify_watch(watch);
519 }
520 }
521 up(&inode->inotify_sem);
522}
523EXPORT_SYMBOL_GPL(inotify_inode_queue_event);
524
525/**
526 * inotify_dentry_parent_queue_event - queue an event to a dentry's parent
527 * @dentry: the dentry in question, we queue against this dentry's parent
528 * @mask: event mask describing this event
529 * @cookie: cookie for synchronization, or zero
530 * @name: filename, if any
531 */
532void inotify_dentry_parent_queue_event(struct dentry *dentry, u32 mask,
533 u32 cookie, const char *name)
534{
535 struct dentry *parent;
536 struct inode *inode;
537
820249ba
JM
538 if (!atomic_read (&inotify_watches))
539 return;
540
0eeca283
RL
541 spin_lock(&dentry->d_lock);
542 parent = dentry->d_parent;
543 inode = parent->d_inode;
544
545 if (inotify_inode_watched(inode)) {
546 dget(parent);
547 spin_unlock(&dentry->d_lock);
548 inotify_inode_queue_event(inode, mask, cookie, name);
549 dput(parent);
550 } else
551 spin_unlock(&dentry->d_lock);
552}
553EXPORT_SYMBOL_GPL(inotify_dentry_parent_queue_event);
554
555/**
556 * inotify_get_cookie - return a unique cookie for use in synchronizing events.
557 */
558u32 inotify_get_cookie(void)
559{
560 return atomic_inc_return(&inotify_cookie);
561}
562EXPORT_SYMBOL_GPL(inotify_get_cookie);
563
564/**
565 * inotify_unmount_inodes - an sb is unmounting. handle any watched inodes.
566 * @list: list of inodes being unmounted (sb->s_inodes)
567 *
568 * Called with inode_lock held, protecting the unmounting super block's list
569 * of inodes, and with iprune_sem held, keeping shrink_icache_memory() at bay.
570 * We temporarily drop inode_lock, however, and CAN block.
571 */
572void inotify_unmount_inodes(struct list_head *list)
573{
574 struct inode *inode, *next_i, *need_iput = NULL;
575
576 list_for_each_entry_safe(inode, next_i, list, i_sb_list) {
577 struct inotify_watch *watch, *next_w;
578 struct inode *need_iput_tmp;
579 struct list_head *watches;
580
581 /*
582 * If i_count is zero, the inode cannot have any watches and
583 * doing an __iget/iput with MS_ACTIVE clear would actually
584 * evict all inodes with zero i_count from icache which is
585 * unnecessarily violent and may in fact be illegal to do.
586 */
587 if (!atomic_read(&inode->i_count))
588 continue;
589
590 /*
591 * We cannot __iget() an inode in state I_CLEAR, I_FREEING, or
592 * I_WILL_FREE which is fine because by that point the inode
593 * cannot have any associated watches.
594 */
595 if (inode->i_state & (I_CLEAR | I_FREEING | I_WILL_FREE))
596 continue;
597
598 need_iput_tmp = need_iput;
599 need_iput = NULL;
600 /* In case the remove_watch() drops a reference. */
601 if (inode != need_iput_tmp)
602 __iget(inode);
603 else
604 need_iput_tmp = NULL;
605 /* In case the dropping of a reference would nuke next_i. */
606 if ((&next_i->i_sb_list != list) &&
607 atomic_read(&next_i->i_count) &&
608 !(next_i->i_state & (I_CLEAR | I_FREEING |
609 I_WILL_FREE))) {
610 __iget(next_i);
611 need_iput = next_i;
612 }
613
614 /*
615 * We can safely drop inode_lock here because we hold
616 * references on both inode and next_i. Also no new inodes
617 * will be added since the umount has begun. Finally,
618 * iprune_sem keeps shrink_icache_memory() away.
619 */
620 spin_unlock(&inode_lock);
621
622 if (need_iput_tmp)
623 iput(need_iput_tmp);
624
625 /* for each watch, send IN_UNMOUNT and then remove it */
626 down(&inode->inotify_sem);
627 watches = &inode->inotify_watches;
628 list_for_each_entry_safe(watch, next_w, watches, i_list) {
629 struct inotify_device *dev = watch->dev;
630 down(&dev->sem);
631 inotify_dev_queue_event(dev, watch, IN_UNMOUNT,0,NULL);
632 remove_watch(watch, dev);
633 up(&dev->sem);
634 }
635 up(&inode->inotify_sem);
636 iput(inode);
637
638 spin_lock(&inode_lock);
639 }
640}
641EXPORT_SYMBOL_GPL(inotify_unmount_inodes);
642
643/**
644 * inotify_inode_is_dead - an inode has been deleted, cleanup any watches
645 * @inode: inode that is about to be removed
646 */
647void inotify_inode_is_dead(struct inode *inode)
648{
649 struct inotify_watch *watch, *next;
650
651 down(&inode->inotify_sem);
652 list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) {
653 struct inotify_device *dev = watch->dev;
654 down(&dev->sem);
655 remove_watch(watch, dev);
656 up(&dev->sem);
657 }
658 up(&inode->inotify_sem);
659}
660EXPORT_SYMBOL_GPL(inotify_inode_is_dead);
661
662/* Device Interface */
663
664static unsigned int inotify_poll(struct file *file, poll_table *wait)
665{
666 struct inotify_device *dev = file->private_data;
667 int ret = 0;
668
669 poll_wait(file, &dev->wq, wait);
670 down(&dev->sem);
671 if (!list_empty(&dev->events))
672 ret = POLLIN | POLLRDNORM;
673 up(&dev->sem);
674
675 return ret;
676}
677
678static ssize_t inotify_read(struct file *file, char __user *buf,
679 size_t count, loff_t *pos)
680{
681 size_t event_size = sizeof (struct inotify_event);
682 struct inotify_device *dev;
683 char __user *start;
684 int ret;
685 DEFINE_WAIT(wait);
686
687 start = buf;
688 dev = file->private_data;
689
690 while (1) {
691 int events;
692
693 prepare_to_wait(&dev->wq, &wait, TASK_INTERRUPTIBLE);
694
695 down(&dev->sem);
696 events = !list_empty(&dev->events);
697 up(&dev->sem);
698 if (events) {
699 ret = 0;
700 break;
701 }
702
703 if (file->f_flags & O_NONBLOCK) {
704 ret = -EAGAIN;
705 break;
706 }
707
708 if (signal_pending(current)) {
709 ret = -EINTR;
710 break;
711 }
712
713 schedule();
714 }
715
716 finish_wait(&dev->wq, &wait);
717 if (ret)
718 return ret;
719
720 down(&dev->sem);
721 while (1) {
722 struct inotify_kernel_event *kevent;
723
724 ret = buf - start;
725 if (list_empty(&dev->events))
726 break;
727
728 kevent = inotify_dev_get_event(dev);
729 if (event_size + kevent->event.len > count)
730 break;
731
732 if (copy_to_user(buf, &kevent->event, event_size)) {
733 ret = -EFAULT;
734 break;
735 }
736 buf += event_size;
737 count -= event_size;
738
739 if (kevent->name) {
740 if (copy_to_user(buf, kevent->name, kevent->event.len)){
741 ret = -EFAULT;
742 break;
743 }
744 buf += kevent->event.len;
745 count -= kevent->event.len;
746 }
747
748 remove_kevent(dev, kevent);
749 }
750 up(&dev->sem);
751
752 return ret;
753}
754
755static int inotify_release(struct inode *ignored, struct file *file)
756{
757 struct inotify_device *dev = file->private_data;
758
759 /*
760 * Destroy all of the watches on this device. Unfortunately, not very
761 * pretty. We cannot do a simple iteration over the list, because we
762 * do not know the inode until we iterate to the watch. But we need to
763 * hold inode->inotify_sem before dev->sem. The following works.
764 */
765 while (1) {
766 struct inotify_watch *watch;
767 struct list_head *watches;
768 struct inode *inode;
769
770 down(&dev->sem);
771 watches = &dev->watches;
772 if (list_empty(watches)) {
773 up(&dev->sem);
774 break;
775 }
776 watch = list_entry(watches->next, struct inotify_watch, d_list);
777 get_inotify_watch(watch);
778 up(&dev->sem);
779
780 inode = watch->inode;
781 down(&inode->inotify_sem);
782 down(&dev->sem);
783 remove_watch_no_event(watch, dev);
784 up(&dev->sem);
785 up(&inode->inotify_sem);
786 put_inotify_watch(watch);
787 }
788
789 /* destroy all of the events on this device */
790 down(&dev->sem);
791 while (!list_empty(&dev->events))
792 inotify_dev_event_dequeue(dev);
793 up(&dev->sem);
794
b680716e 795 /* free this device: the put matching the get in inotify_init() */
0eeca283
RL
796 put_inotify_dev(dev);
797
798 return 0;
799}
800
801/*
b680716e 802 * inotify_ignore - remove a given wd from this inotify instance.
0eeca283
RL
803 *
804 * Can sleep.
805 */
806static int inotify_ignore(struct inotify_device *dev, s32 wd)
807{
808 struct inotify_watch *watch;
809 struct inode *inode;
810
811 down(&dev->sem);
812 watch = idr_find(&dev->idr, wd);
813 if (unlikely(!watch)) {
814 up(&dev->sem);
815 return -EINVAL;
816 }
817 get_inotify_watch(watch);
818 inode = watch->inode;
819 up(&dev->sem);
820
821 down(&inode->inotify_sem);
822 down(&dev->sem);
823
824 /* make sure that we did not race */
825 watch = idr_find(&dev->idr, wd);
826 if (likely(watch))
827 remove_watch(watch, dev);
828
829 up(&dev->sem);
830 up(&inode->inotify_sem);
831 put_inotify_watch(watch);
832
833 return 0;
834}
835
836static long inotify_ioctl(struct file *file, unsigned int cmd,
837 unsigned long arg)
838{
839 struct inotify_device *dev;
840 void __user *p;
841 int ret = -ENOTTY;
842
843 dev = file->private_data;
844 p = (void __user *) arg;
845
846 switch (cmd) {
847 case FIONREAD:
848 ret = put_user(dev->queue_size, (int __user *) p);
849 break;
850 }
851
852 return ret;
853}
854
855static struct file_operations inotify_fops = {
856 .poll = inotify_poll,
857 .read = inotify_read,
858 .release = inotify_release,
859 .unlocked_ioctl = inotify_ioctl,
860 .compat_ioctl = inotify_ioctl,
861};
862
863asmlinkage long sys_inotify_init(void)
864{
865 struct inotify_device *dev;
866 struct user_struct *user;
b680716e
RL
867 struct file *filp;
868 int fd, ret;
0eeca283
RL
869
870 fd = get_unused_fd();
b680716e
RL
871 if (fd < 0)
872 return fd;
0eeca283
RL
873
874 filp = get_empty_filp();
875 if (!filp) {
0eeca283 876 ret = -ENFILE;
5eb22cbc 877 goto out_put_fd;
0eeca283 878 }
0eeca283
RL
879
880 user = get_uid(current->user);
b680716e
RL
881 if (unlikely(atomic_read(&user->inotify_devs) >=
882 inotify_max_user_instances)) {
0eeca283 883 ret = -EMFILE;
5eb22cbc 884 goto out_free_uid;
0eeca283
RL
885 }
886
887 dev = kmalloc(sizeof(struct inotify_device), GFP_KERNEL);
888 if (unlikely(!dev)) {
889 ret = -ENOMEM;
5eb22cbc 890 goto out_free_uid;
0eeca283
RL
891 }
892
b680716e
RL
893 filp->f_op = &inotify_fops;
894 filp->f_vfsmnt = mntget(inotify_mnt);
895 filp->f_dentry = dget(inotify_mnt->mnt_root);
896 filp->f_mapping = filp->f_dentry->d_inode->i_mapping;
897 filp->f_mode = FMODE_READ;
898 filp->f_flags = O_RDONLY;
899 filp->private_data = dev;
900
0eeca283
RL
901 idr_init(&dev->idr);
902 INIT_LIST_HEAD(&dev->events);
903 INIT_LIST_HEAD(&dev->watches);
904 init_waitqueue_head(&dev->wq);
905 sema_init(&dev->sem, 1);
906 dev->event_count = 0;
907 dev->queue_size = 0;
908 dev->max_events = inotify_max_queued_events;
909 dev->user = user;
b9c55d29 910 dev->last_wd = 0;
0eeca283
RL
911 atomic_set(&dev->count, 0);
912
913 get_inotify_dev(dev);
914 atomic_inc(&user->inotify_devs);
b680716e 915 fd_install(fd, filp);
0eeca283 916
0eeca283 917 return fd;
5eb22cbc 918out_free_uid:
0eeca283 919 free_uid(user);
5eb22cbc
RL
920 put_filp(filp);
921out_put_fd:
922 put_unused_fd(fd);
0eeca283
RL
923 return ret;
924}
925
b680716e 926asmlinkage long sys_inotify_add_watch(int fd, const char __user *path, u32 mask)
0eeca283
RL
927{
928 struct inotify_watch *watch, *old;
929 struct inode *inode;
930 struct inotify_device *dev;
931 struct nameidata nd;
932 struct file *filp;
33ea2f52 933 int ret, fput_needed;
7ea6040b 934 int mask_add = 0;
0eeca283 935
33ea2f52
RL
936 filp = fget_light(fd, &fput_needed);
937 if (unlikely(!filp))
0eeca283
RL
938 return -EBADF;
939
783bc29b
RL
940 /* verify that this is indeed an inotify instance */
941 if (unlikely(filp->f_op != &inotify_fops)) {
942 ret = -EINVAL;
943 goto fput_and_out;
944 }
945
b680716e
RL
946 ret = find_inode(path, &nd);
947 if (unlikely(ret))
0eeca283
RL
948 goto fput_and_out;
949
b680716e 950 /* inode held in place by reference to nd; dev by fget on fd */
0eeca283 951 inode = nd.dentry->d_inode;
b680716e 952 dev = filp->private_data;
0eeca283
RL
953
954 down(&inode->inotify_sem);
955 down(&dev->sem);
956
7ea6040b
JM
957 if (mask & IN_MASK_ADD)
958 mask_add = 1;
959
0eeca283
RL
960 /* don't let user-space set invalid bits: we don't want flags set */
961 mask &= IN_ALL_EVENTS;
b680716e 962 if (unlikely(!mask)) {
0eeca283
RL
963 ret = -EINVAL;
964 goto out;
965 }
966
967 /*
968 * Handle the case of re-adding a watch on an (inode,dev) pair that we
969 * are already watching. We just update the mask and return its wd.
970 */
971 old = inode_find_dev(inode, dev);
972 if (unlikely(old)) {
7ea6040b
JM
973 if (mask_add)
974 old->mask |= mask;
975 else
976 old->mask = mask;
0eeca283
RL
977 ret = old->wd;
978 goto out;
979 }
980
981 watch = create_watch(dev, mask, inode);
982 if (unlikely(IS_ERR(watch))) {
983 ret = PTR_ERR(watch);
984 goto out;
985 }
986
987 /* Add the watch to the device's and the inode's list */
988 list_add(&watch->d_list, &dev->watches);
989 list_add(&watch->i_list, &inode->inotify_watches);
990 ret = watch->wd;
991out:
0eeca283
RL
992 up(&dev->sem);
993 up(&inode->inotify_sem);
5eb22cbc 994 path_release(&nd);
0eeca283 995fput_and_out:
33ea2f52 996 fput_light(filp, fput_needed);
0eeca283
RL
997 return ret;
998}
999
1000asmlinkage long sys_inotify_rm_watch(int fd, u32 wd)
1001{
1002 struct file *filp;
1003 struct inotify_device *dev;
33ea2f52 1004 int ret, fput_needed;
0eeca283 1005
33ea2f52
RL
1006 filp = fget_light(fd, &fput_needed);
1007 if (unlikely(!filp))
0eeca283 1008 return -EBADF;
783bc29b
RL
1009
1010 /* verify that this is indeed an inotify instance */
1011 if (unlikely(filp->f_op != &inotify_fops)) {
1012 ret = -EINVAL;
1013 goto out;
1014 }
1015
0eeca283 1016 dev = filp->private_data;
9a556e89 1017 ret = inotify_ignore(dev, wd);
9a556e89 1018
783bc29b
RL
1019out:
1020 fput_light(filp, fput_needed);
0eeca283
RL
1021 return ret;
1022}
1023
1024static struct super_block *
1025inotify_get_sb(struct file_system_type *fs_type, int flags,
1026 const char *dev_name, void *data)
1027{
1028 return get_sb_pseudo(fs_type, "inotify", NULL, 0xBAD1DEA);
1029}
1030
1031static struct file_system_type inotify_fs_type = {
1032 .name = "inotifyfs",
1033 .get_sb = inotify_get_sb,
1034 .kill_sb = kill_anon_super,
1035};
1036
1037/*
b680716e 1038 * inotify_setup - Our initialization function. Note that we cannnot return
0eeca283
RL
1039 * error because we have compiled-in VFS hooks. So an (unlikely) failure here
1040 * must result in panic().
1041 */
b680716e 1042static int __init inotify_setup(void)
0eeca283 1043{
e5ca844a
RL
1044 int ret;
1045
1046 ret = register_filesystem(&inotify_fs_type);
1047 if (unlikely(ret))
1048 panic("inotify: register_filesystem returned %d!\n", ret);
1049
0eeca283 1050 inotify_mnt = kern_mount(&inotify_fs_type);
89373de7 1051 if (IS_ERR(inotify_mnt))
e5ca844a 1052 panic("inotify: kern_mount ret %ld!\n", PTR_ERR(inotify_mnt));
0eeca283 1053
1b2ccf0c
RL
1054 inotify_max_queued_events = 16384;
1055 inotify_max_user_instances = 128;
0eeca283
RL
1056 inotify_max_user_watches = 8192;
1057
1058 atomic_set(&inotify_cookie, 0);
820249ba 1059 atomic_set(&inotify_watches, 0);
0eeca283
RL
1060
1061 watch_cachep = kmem_cache_create("inotify_watch_cache",
1062 sizeof(struct inotify_watch),
1063 0, SLAB_PANIC, NULL, NULL);
1064 event_cachep = kmem_cache_create("inotify_event_cache",
1065 sizeof(struct inotify_kernel_event),
1066 0, SLAB_PANIC, NULL, NULL);
1067
0eeca283
RL
1068 return 0;
1069}
1070
b680716e 1071module_init(inotify_setup);
This page took 0.083605 seconds and 5 git commands to generate.