bio: add support for inlining a number of bio_vecs inside the bio
[deliverable/linux.git] / fs / inotify.c
1 /*
2 * fs/inotify.c - inode-based file event notifications
3 *
4 * Authors:
5 * John McCutchan <ttb@tentacle.dhs.org>
6 * Robert Love <rml@novell.com>
7 *
8 * Kernel API added by: Amy Griffis <amy.griffis@hp.com>
9 *
10 * Copyright (C) 2005 John McCutchan
11 * Copyright 2006 Hewlett-Packard Development Company, L.P.
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the
15 * Free Software Foundation; either version 2, or (at your option) any
16 * later version.
17 *
18 * This program is distributed in the hope that it will be useful, but
19 * WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 * General Public License for more details.
22 */
23
24 #include <linux/module.h>
25 #include <linux/kernel.h>
26 #include <linux/spinlock.h>
27 #include <linux/idr.h>
28 #include <linux/slab.h>
29 #include <linux/fs.h>
30 #include <linux/sched.h>
31 #include <linux/init.h>
32 #include <linux/list.h>
33 #include <linux/writeback.h>
34 #include <linux/inotify.h>
35
36 static atomic_t inotify_cookie;
37
38 /*
39 * Lock ordering:
40 *
41 * dentry->d_lock (used to keep d_move() away from dentry->d_parent)
42 * iprune_mutex (synchronize shrink_icache_memory())
43 * inode_lock (protects the super_block->s_inodes list)
44 * inode->inotify_mutex (protects inode->inotify_watches and watches->i_list)
45 * inotify_handle->mutex (protects inotify_handle and watches->h_list)
46 *
47 * The inode->inotify_mutex and inotify_handle->mutex and held during execution
48 * of a caller's event handler. Thus, the caller must not hold any locks
49 * taken in their event handler while calling any of the published inotify
50 * interfaces.
51 */
52
53 /*
54 * Lifetimes of the three main data structures--inotify_handle, inode, and
55 * inotify_watch--are managed by reference count.
56 *
57 * inotify_handle: Lifetime is from inotify_init() to inotify_destroy().
58 * Additional references can bump the count via get_inotify_handle() and drop
59 * the count via put_inotify_handle().
60 *
61 * inotify_watch: for inotify's purposes, lifetime is from inotify_add_watch()
62 * to remove_watch_no_event(). Additional references can bump the count via
63 * get_inotify_watch() and drop the count via put_inotify_watch(). The caller
64 * is reponsible for the final put after receiving IN_IGNORED, or when using
65 * IN_ONESHOT after receiving the first event. Inotify does the final put if
66 * inotify_destroy() is called.
67 *
68 * inode: Pinned so long as the inode is associated with a watch, from
69 * inotify_add_watch() to the final put_inotify_watch().
70 */
71
72 /*
73 * struct inotify_handle - represents an inotify instance
74 *
75 * This structure is protected by the mutex 'mutex'.
76 */
77 struct inotify_handle {
78 struct idr idr; /* idr mapping wd -> watch */
79 struct mutex mutex; /* protects this bad boy */
80 struct list_head watches; /* list of watches */
81 atomic_t count; /* reference count */
82 u32 last_wd; /* the last wd allocated */
83 const struct inotify_operations *in_ops; /* inotify caller operations */
84 };
85
86 static inline void get_inotify_handle(struct inotify_handle *ih)
87 {
88 atomic_inc(&ih->count);
89 }
90
91 static inline void put_inotify_handle(struct inotify_handle *ih)
92 {
93 if (atomic_dec_and_test(&ih->count)) {
94 idr_destroy(&ih->idr);
95 kfree(ih);
96 }
97 }
98
99 /**
100 * get_inotify_watch - grab a reference to an inotify_watch
101 * @watch: watch to grab
102 */
103 void get_inotify_watch(struct inotify_watch *watch)
104 {
105 atomic_inc(&watch->count);
106 }
107 EXPORT_SYMBOL_GPL(get_inotify_watch);
108
109 int pin_inotify_watch(struct inotify_watch *watch)
110 {
111 struct super_block *sb = watch->inode->i_sb;
112 spin_lock(&sb_lock);
113 if (sb->s_count >= S_BIAS) {
114 atomic_inc(&sb->s_active);
115 spin_unlock(&sb_lock);
116 atomic_inc(&watch->count);
117 return 1;
118 }
119 spin_unlock(&sb_lock);
120 return 0;
121 }
122
123 /**
124 * put_inotify_watch - decrements the ref count on a given watch. cleans up
125 * watch references if the count reaches zero. inotify_watch is freed by
126 * inotify callers via the destroy_watch() op.
127 * @watch: watch to release
128 */
129 void put_inotify_watch(struct inotify_watch *watch)
130 {
131 if (atomic_dec_and_test(&watch->count)) {
132 struct inotify_handle *ih = watch->ih;
133
134 iput(watch->inode);
135 ih->in_ops->destroy_watch(watch);
136 put_inotify_handle(ih);
137 }
138 }
139 EXPORT_SYMBOL_GPL(put_inotify_watch);
140
141 void unpin_inotify_watch(struct inotify_watch *watch)
142 {
143 struct super_block *sb = watch->inode->i_sb;
144 put_inotify_watch(watch);
145 deactivate_super(sb);
146 }
147
148 /*
149 * inotify_handle_get_wd - returns the next WD for use by the given handle
150 *
151 * Callers must hold ih->mutex. This function can sleep.
152 */
153 static int inotify_handle_get_wd(struct inotify_handle *ih,
154 struct inotify_watch *watch)
155 {
156 int ret;
157
158 do {
159 if (unlikely(!idr_pre_get(&ih->idr, GFP_KERNEL)))
160 return -ENOSPC;
161 ret = idr_get_new_above(&ih->idr, watch, ih->last_wd+1, &watch->wd);
162 } while (ret == -EAGAIN);
163
164 if (likely(!ret))
165 ih->last_wd = watch->wd;
166
167 return ret;
168 }
169
170 /*
171 * inotify_inode_watched - returns nonzero if there are watches on this inode
172 * and zero otherwise. We call this lockless, we do not care if we race.
173 */
174 static inline int inotify_inode_watched(struct inode *inode)
175 {
176 return !list_empty(&inode->inotify_watches);
177 }
178
179 /*
180 * Get child dentry flag into synch with parent inode.
181 * Flag should always be clear for negative dentrys.
182 */
183 static void set_dentry_child_flags(struct inode *inode, int watched)
184 {
185 struct dentry *alias;
186
187 spin_lock(&dcache_lock);
188 list_for_each_entry(alias, &inode->i_dentry, d_alias) {
189 struct dentry *child;
190
191 list_for_each_entry(child, &alias->d_subdirs, d_u.d_child) {
192 if (!child->d_inode)
193 continue;
194
195 spin_lock(&child->d_lock);
196 if (watched)
197 child->d_flags |= DCACHE_INOTIFY_PARENT_WATCHED;
198 else
199 child->d_flags &=~DCACHE_INOTIFY_PARENT_WATCHED;
200 spin_unlock(&child->d_lock);
201 }
202 }
203 spin_unlock(&dcache_lock);
204 }
205
206 /*
207 * inotify_find_handle - find the watch associated with the given inode and
208 * handle
209 *
210 * Callers must hold inode->inotify_mutex.
211 */
212 static struct inotify_watch *inode_find_handle(struct inode *inode,
213 struct inotify_handle *ih)
214 {
215 struct inotify_watch *watch;
216
217 list_for_each_entry(watch, &inode->inotify_watches, i_list) {
218 if (watch->ih == ih)
219 return watch;
220 }
221
222 return NULL;
223 }
224
225 /*
226 * remove_watch_no_event - remove watch without the IN_IGNORED event.
227 *
228 * Callers must hold both inode->inotify_mutex and ih->mutex.
229 */
230 static void remove_watch_no_event(struct inotify_watch *watch,
231 struct inotify_handle *ih)
232 {
233 list_del(&watch->i_list);
234 list_del(&watch->h_list);
235
236 if (!inotify_inode_watched(watch->inode))
237 set_dentry_child_flags(watch->inode, 0);
238
239 idr_remove(&ih->idr, watch->wd);
240 }
241
242 /**
243 * inotify_remove_watch_locked - Remove a watch from both the handle and the
244 * inode. Sends the IN_IGNORED event signifying that the inode is no longer
245 * watched. May be invoked from a caller's event handler.
246 * @ih: inotify handle associated with watch
247 * @watch: watch to remove
248 *
249 * Callers must hold both inode->inotify_mutex and ih->mutex.
250 */
251 void inotify_remove_watch_locked(struct inotify_handle *ih,
252 struct inotify_watch *watch)
253 {
254 remove_watch_no_event(watch, ih);
255 ih->in_ops->handle_event(watch, watch->wd, IN_IGNORED, 0, NULL, NULL);
256 }
257 EXPORT_SYMBOL_GPL(inotify_remove_watch_locked);
258
259 /* Kernel API for producing events */
260
261 /*
262 * inotify_d_instantiate - instantiate dcache entry for inode
263 */
264 void inotify_d_instantiate(struct dentry *entry, struct inode *inode)
265 {
266 struct dentry *parent;
267
268 if (!inode)
269 return;
270
271 spin_lock(&entry->d_lock);
272 parent = entry->d_parent;
273 if (parent->d_inode && inotify_inode_watched(parent->d_inode))
274 entry->d_flags |= DCACHE_INOTIFY_PARENT_WATCHED;
275 spin_unlock(&entry->d_lock);
276 }
277
278 /*
279 * inotify_d_move - dcache entry has been moved
280 */
281 void inotify_d_move(struct dentry *entry)
282 {
283 struct dentry *parent;
284
285 parent = entry->d_parent;
286 if (inotify_inode_watched(parent->d_inode))
287 entry->d_flags |= DCACHE_INOTIFY_PARENT_WATCHED;
288 else
289 entry->d_flags &= ~DCACHE_INOTIFY_PARENT_WATCHED;
290 }
291
292 /**
293 * inotify_inode_queue_event - queue an event to all watches on this inode
294 * @inode: inode event is originating from
295 * @mask: event mask describing this event
296 * @cookie: cookie for synchronization, or zero
297 * @name: filename, if any
298 * @n_inode: inode associated with name
299 */
300 void inotify_inode_queue_event(struct inode *inode, u32 mask, u32 cookie,
301 const char *name, struct inode *n_inode)
302 {
303 struct inotify_watch *watch, *next;
304
305 if (!inotify_inode_watched(inode))
306 return;
307
308 mutex_lock(&inode->inotify_mutex);
309 list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) {
310 u32 watch_mask = watch->mask;
311 if (watch_mask & mask) {
312 struct inotify_handle *ih= watch->ih;
313 mutex_lock(&ih->mutex);
314 if (watch_mask & IN_ONESHOT)
315 remove_watch_no_event(watch, ih);
316 ih->in_ops->handle_event(watch, watch->wd, mask, cookie,
317 name, n_inode);
318 mutex_unlock(&ih->mutex);
319 }
320 }
321 mutex_unlock(&inode->inotify_mutex);
322 }
323 EXPORT_SYMBOL_GPL(inotify_inode_queue_event);
324
325 /**
326 * inotify_dentry_parent_queue_event - queue an event to a dentry's parent
327 * @dentry: the dentry in question, we queue against this dentry's parent
328 * @mask: event mask describing this event
329 * @cookie: cookie for synchronization, or zero
330 * @name: filename, if any
331 */
332 void inotify_dentry_parent_queue_event(struct dentry *dentry, u32 mask,
333 u32 cookie, const char *name)
334 {
335 struct dentry *parent;
336 struct inode *inode;
337
338 if (!(dentry->d_flags & DCACHE_INOTIFY_PARENT_WATCHED))
339 return;
340
341 spin_lock(&dentry->d_lock);
342 parent = dentry->d_parent;
343 inode = parent->d_inode;
344
345 if (inotify_inode_watched(inode)) {
346 dget(parent);
347 spin_unlock(&dentry->d_lock);
348 inotify_inode_queue_event(inode, mask, cookie, name,
349 dentry->d_inode);
350 dput(parent);
351 } else
352 spin_unlock(&dentry->d_lock);
353 }
354 EXPORT_SYMBOL_GPL(inotify_dentry_parent_queue_event);
355
356 /**
357 * inotify_get_cookie - return a unique cookie for use in synchronizing events.
358 */
359 u32 inotify_get_cookie(void)
360 {
361 return atomic_inc_return(&inotify_cookie);
362 }
363 EXPORT_SYMBOL_GPL(inotify_get_cookie);
364
365 /**
366 * inotify_unmount_inodes - an sb is unmounting. handle any watched inodes.
367 * @list: list of inodes being unmounted (sb->s_inodes)
368 *
369 * Called with inode_lock held, protecting the unmounting super block's list
370 * of inodes, and with iprune_mutex held, keeping shrink_icache_memory() at bay.
371 * We temporarily drop inode_lock, however, and CAN block.
372 */
373 void inotify_unmount_inodes(struct list_head *list)
374 {
375 struct inode *inode, *next_i, *need_iput = NULL;
376
377 list_for_each_entry_safe(inode, next_i, list, i_sb_list) {
378 struct inotify_watch *watch, *next_w;
379 struct inode *need_iput_tmp;
380 struct list_head *watches;
381
382 /*
383 * If i_count is zero, the inode cannot have any watches and
384 * doing an __iget/iput with MS_ACTIVE clear would actually
385 * evict all inodes with zero i_count from icache which is
386 * unnecessarily violent and may in fact be illegal to do.
387 */
388 if (!atomic_read(&inode->i_count))
389 continue;
390
391 /*
392 * We cannot __iget() an inode in state I_CLEAR, I_FREEING, or
393 * I_WILL_FREE which is fine because by that point the inode
394 * cannot have any associated watches.
395 */
396 if (inode->i_state & (I_CLEAR | I_FREEING | I_WILL_FREE))
397 continue;
398
399 need_iput_tmp = need_iput;
400 need_iput = NULL;
401 /* In case inotify_remove_watch_locked() drops a reference. */
402 if (inode != need_iput_tmp)
403 __iget(inode);
404 else
405 need_iput_tmp = NULL;
406 /* In case the dropping of a reference would nuke next_i. */
407 if ((&next_i->i_sb_list != list) &&
408 atomic_read(&next_i->i_count) &&
409 !(next_i->i_state & (I_CLEAR | I_FREEING |
410 I_WILL_FREE))) {
411 __iget(next_i);
412 need_iput = next_i;
413 }
414
415 /*
416 * We can safely drop inode_lock here because we hold
417 * references on both inode and next_i. Also no new inodes
418 * will be added since the umount has begun. Finally,
419 * iprune_mutex keeps shrink_icache_memory() away.
420 */
421 spin_unlock(&inode_lock);
422
423 if (need_iput_tmp)
424 iput(need_iput_tmp);
425
426 /* for each watch, send IN_UNMOUNT and then remove it */
427 mutex_lock(&inode->inotify_mutex);
428 watches = &inode->inotify_watches;
429 list_for_each_entry_safe(watch, next_w, watches, i_list) {
430 struct inotify_handle *ih= watch->ih;
431 get_inotify_watch(watch);
432 mutex_lock(&ih->mutex);
433 ih->in_ops->handle_event(watch, watch->wd, IN_UNMOUNT, 0,
434 NULL, NULL);
435 inotify_remove_watch_locked(ih, watch);
436 mutex_unlock(&ih->mutex);
437 put_inotify_watch(watch);
438 }
439 mutex_unlock(&inode->inotify_mutex);
440 iput(inode);
441
442 spin_lock(&inode_lock);
443 }
444 }
445 EXPORT_SYMBOL_GPL(inotify_unmount_inodes);
446
447 /**
448 * inotify_inode_is_dead - an inode has been deleted, cleanup any watches
449 * @inode: inode that is about to be removed
450 */
451 void inotify_inode_is_dead(struct inode *inode)
452 {
453 struct inotify_watch *watch, *next;
454
455 mutex_lock(&inode->inotify_mutex);
456 list_for_each_entry_safe(watch, next, &inode->inotify_watches, i_list) {
457 struct inotify_handle *ih = watch->ih;
458 mutex_lock(&ih->mutex);
459 inotify_remove_watch_locked(ih, watch);
460 mutex_unlock(&ih->mutex);
461 }
462 mutex_unlock(&inode->inotify_mutex);
463 }
464 EXPORT_SYMBOL_GPL(inotify_inode_is_dead);
465
466 /* Kernel Consumer API */
467
468 /**
469 * inotify_init - allocate and initialize an inotify instance
470 * @ops: caller's inotify operations
471 */
472 struct inotify_handle *inotify_init(const struct inotify_operations *ops)
473 {
474 struct inotify_handle *ih;
475
476 ih = kmalloc(sizeof(struct inotify_handle), GFP_KERNEL);
477 if (unlikely(!ih))
478 return ERR_PTR(-ENOMEM);
479
480 idr_init(&ih->idr);
481 INIT_LIST_HEAD(&ih->watches);
482 mutex_init(&ih->mutex);
483 ih->last_wd = 0;
484 ih->in_ops = ops;
485 atomic_set(&ih->count, 0);
486 get_inotify_handle(ih);
487
488 return ih;
489 }
490 EXPORT_SYMBOL_GPL(inotify_init);
491
492 /**
493 * inotify_init_watch - initialize an inotify watch
494 * @watch: watch to initialize
495 */
496 void inotify_init_watch(struct inotify_watch *watch)
497 {
498 INIT_LIST_HEAD(&watch->h_list);
499 INIT_LIST_HEAD(&watch->i_list);
500 atomic_set(&watch->count, 0);
501 get_inotify_watch(watch); /* initial get */
502 }
503 EXPORT_SYMBOL_GPL(inotify_init_watch);
504
505 /*
506 * Watch removals suck violently. To kick the watch out we need (in this
507 * order) inode->inotify_mutex and ih->mutex. That's fine if we have
508 * a hold on inode; however, for all other cases we need to make damn sure
509 * we don't race with umount. We can *NOT* just grab a reference to a
510 * watch - inotify_unmount_inodes() will happily sail past it and we'll end
511 * with reference to inode potentially outliving its superblock. Ideally
512 * we just want to grab an active reference to superblock if we can; that
513 * will make sure we won't go into inotify_umount_inodes() until we are
514 * done. Cleanup is just deactivate_super(). However, that leaves a messy
515 * case - what if we *are* racing with umount() and active references to
516 * superblock can't be acquired anymore? We can bump ->s_count, grab
517 * ->s_umount, which will almost certainly wait until the superblock is shut
518 * down and the watch in question is pining for fjords. That's fine, but
519 * there is a problem - we might have hit the window between ->s_active
520 * getting to 0 / ->s_count - below S_BIAS (i.e. the moment when superblock
521 * is past the point of no return and is heading for shutdown) and the
522 * moment when deactivate_super() acquires ->s_umount. We could just do
523 * drop_super() yield() and retry, but that's rather antisocial and this
524 * stuff is luser-triggerable. OTOH, having grabbed ->s_umount and having
525 * found that we'd got there first (i.e. that ->s_root is non-NULL) we know
526 * that we won't race with inotify_umount_inodes(). So we could grab a
527 * reference to watch and do the rest as above, just with drop_super() instead
528 * of deactivate_super(), right? Wrong. We had to drop ih->mutex before we
529 * could grab ->s_umount. So the watch could've been gone already.
530 *
531 * That still can be dealt with - we need to save watch->wd, do idr_find()
532 * and compare its result with our pointer. If they match, we either have
533 * the damn thing still alive or we'd lost not one but two races at once,
534 * the watch had been killed and a new one got created with the same ->wd
535 * at the same address. That couldn't have happened in inotify_destroy(),
536 * but inotify_rm_wd() could run into that. Still, "new one got created"
537 * is not a problem - we have every right to kill it or leave it alone,
538 * whatever's more convenient.
539 *
540 * So we can use idr_find(...) == watch && watch->inode->i_sb == sb as
541 * "grab it and kill it" check. If it's been our original watch, we are
542 * fine, if it's a newcomer - nevermind, just pretend that we'd won the
543 * race and kill the fscker anyway; we are safe since we know that its
544 * superblock won't be going away.
545 *
546 * And yes, this is far beyond mere "not very pretty"; so's the entire
547 * concept of inotify to start with.
548 */
549
550 /**
551 * pin_to_kill - pin the watch down for removal
552 * @ih: inotify handle
553 * @watch: watch to kill
554 *
555 * Called with ih->mutex held, drops it. Possible return values:
556 * 0 - nothing to do, it has died
557 * 1 - remove it, drop the reference and deactivate_super()
558 * 2 - remove it, drop the reference and drop_super(); we tried hard to avoid
559 * that variant, since it involved a lot of PITA, but that's the best that
560 * could've been done.
561 */
562 static int pin_to_kill(struct inotify_handle *ih, struct inotify_watch *watch)
563 {
564 struct super_block *sb = watch->inode->i_sb;
565 s32 wd = watch->wd;
566
567 spin_lock(&sb_lock);
568 if (sb->s_count >= S_BIAS) {
569 atomic_inc(&sb->s_active);
570 spin_unlock(&sb_lock);
571 get_inotify_watch(watch);
572 mutex_unlock(&ih->mutex);
573 return 1; /* the best outcome */
574 }
575 sb->s_count++;
576 spin_unlock(&sb_lock);
577 mutex_unlock(&ih->mutex); /* can't grab ->s_umount under it */
578 down_read(&sb->s_umount);
579 if (likely(!sb->s_root)) {
580 /* fs is already shut down; the watch is dead */
581 drop_super(sb);
582 return 0;
583 }
584 /* raced with the final deactivate_super() */
585 mutex_lock(&ih->mutex);
586 if (idr_find(&ih->idr, wd) != watch || watch->inode->i_sb != sb) {
587 /* the watch is dead */
588 mutex_unlock(&ih->mutex);
589 drop_super(sb);
590 return 0;
591 }
592 /* still alive or freed and reused with the same sb and wd; kill */
593 get_inotify_watch(watch);
594 mutex_unlock(&ih->mutex);
595 return 2;
596 }
597
598 static void unpin_and_kill(struct inotify_watch *watch, int how)
599 {
600 struct super_block *sb = watch->inode->i_sb;
601 put_inotify_watch(watch);
602 switch (how) {
603 case 1:
604 deactivate_super(sb);
605 break;
606 case 2:
607 drop_super(sb);
608 }
609 }
610
611 /**
612 * inotify_destroy - clean up and destroy an inotify instance
613 * @ih: inotify handle
614 */
615 void inotify_destroy(struct inotify_handle *ih)
616 {
617 /*
618 * Destroy all of the watches for this handle. Unfortunately, not very
619 * pretty. We cannot do a simple iteration over the list, because we
620 * do not know the inode until we iterate to the watch. But we need to
621 * hold inode->inotify_mutex before ih->mutex. The following works.
622 *
623 * AV: it had to become even uglier to start working ;-/
624 */
625 while (1) {
626 struct inotify_watch *watch;
627 struct list_head *watches;
628 struct super_block *sb;
629 struct inode *inode;
630 int how;
631
632 mutex_lock(&ih->mutex);
633 watches = &ih->watches;
634 if (list_empty(watches)) {
635 mutex_unlock(&ih->mutex);
636 break;
637 }
638 watch = list_first_entry(watches, struct inotify_watch, h_list);
639 sb = watch->inode->i_sb;
640 how = pin_to_kill(ih, watch);
641 if (!how)
642 continue;
643
644 inode = watch->inode;
645 mutex_lock(&inode->inotify_mutex);
646 mutex_lock(&ih->mutex);
647
648 /* make sure we didn't race with another list removal */
649 if (likely(idr_find(&ih->idr, watch->wd))) {
650 remove_watch_no_event(watch, ih);
651 put_inotify_watch(watch);
652 }
653
654 mutex_unlock(&ih->mutex);
655 mutex_unlock(&inode->inotify_mutex);
656 unpin_and_kill(watch, how);
657 }
658
659 /* free this handle: the put matching the get in inotify_init() */
660 put_inotify_handle(ih);
661 }
662 EXPORT_SYMBOL_GPL(inotify_destroy);
663
664 /**
665 * inotify_find_watch - find an existing watch for an (ih,inode) pair
666 * @ih: inotify handle
667 * @inode: inode to watch
668 * @watchp: pointer to existing inotify_watch
669 *
670 * Caller must pin given inode (via nameidata).
671 */
672 s32 inotify_find_watch(struct inotify_handle *ih, struct inode *inode,
673 struct inotify_watch **watchp)
674 {
675 struct inotify_watch *old;
676 int ret = -ENOENT;
677
678 mutex_lock(&inode->inotify_mutex);
679 mutex_lock(&ih->mutex);
680
681 old = inode_find_handle(inode, ih);
682 if (unlikely(old)) {
683 get_inotify_watch(old); /* caller must put watch */
684 *watchp = old;
685 ret = old->wd;
686 }
687
688 mutex_unlock(&ih->mutex);
689 mutex_unlock(&inode->inotify_mutex);
690
691 return ret;
692 }
693 EXPORT_SYMBOL_GPL(inotify_find_watch);
694
695 /**
696 * inotify_find_update_watch - find and update the mask of an existing watch
697 * @ih: inotify handle
698 * @inode: inode's watch to update
699 * @mask: mask of events to watch
700 *
701 * Caller must pin given inode (via nameidata).
702 */
703 s32 inotify_find_update_watch(struct inotify_handle *ih, struct inode *inode,
704 u32 mask)
705 {
706 struct inotify_watch *old;
707 int mask_add = 0;
708 int ret;
709
710 if (mask & IN_MASK_ADD)
711 mask_add = 1;
712
713 /* don't allow invalid bits: we don't want flags set */
714 mask &= IN_ALL_EVENTS | IN_ONESHOT;
715 if (unlikely(!mask))
716 return -EINVAL;
717
718 mutex_lock(&inode->inotify_mutex);
719 mutex_lock(&ih->mutex);
720
721 /*
722 * Handle the case of re-adding a watch on an (inode,ih) pair that we
723 * are already watching. We just update the mask and return its wd.
724 */
725 old = inode_find_handle(inode, ih);
726 if (unlikely(!old)) {
727 ret = -ENOENT;
728 goto out;
729 }
730
731 if (mask_add)
732 old->mask |= mask;
733 else
734 old->mask = mask;
735 ret = old->wd;
736 out:
737 mutex_unlock(&ih->mutex);
738 mutex_unlock(&inode->inotify_mutex);
739 return ret;
740 }
741 EXPORT_SYMBOL_GPL(inotify_find_update_watch);
742
743 /**
744 * inotify_add_watch - add a watch to an inotify instance
745 * @ih: inotify handle
746 * @watch: caller allocated watch structure
747 * @inode: inode to watch
748 * @mask: mask of events to watch
749 *
750 * Caller must pin given inode (via nameidata).
751 * Caller must ensure it only calls inotify_add_watch() once per watch.
752 * Calls inotify_handle_get_wd() so may sleep.
753 */
754 s32 inotify_add_watch(struct inotify_handle *ih, struct inotify_watch *watch,
755 struct inode *inode, u32 mask)
756 {
757 int ret = 0;
758 int newly_watched;
759
760 /* don't allow invalid bits: we don't want flags set */
761 mask &= IN_ALL_EVENTS | IN_ONESHOT;
762 if (unlikely(!mask))
763 return -EINVAL;
764 watch->mask = mask;
765
766 mutex_lock(&inode->inotify_mutex);
767 mutex_lock(&ih->mutex);
768
769 /* Initialize a new watch */
770 ret = inotify_handle_get_wd(ih, watch);
771 if (unlikely(ret))
772 goto out;
773 ret = watch->wd;
774
775 /* save a reference to handle and bump the count to make it official */
776 get_inotify_handle(ih);
777 watch->ih = ih;
778
779 /*
780 * Save a reference to the inode and bump the ref count to make it
781 * official. We hold a reference to nameidata, which makes this safe.
782 */
783 watch->inode = igrab(inode);
784
785 /* Add the watch to the handle's and the inode's list */
786 newly_watched = !inotify_inode_watched(inode);
787 list_add(&watch->h_list, &ih->watches);
788 list_add(&watch->i_list, &inode->inotify_watches);
789 /*
790 * Set child flags _after_ adding the watch, so there is no race
791 * windows where newly instantiated children could miss their parent's
792 * watched flag.
793 */
794 if (newly_watched)
795 set_dentry_child_flags(inode, 1);
796
797 out:
798 mutex_unlock(&ih->mutex);
799 mutex_unlock(&inode->inotify_mutex);
800 return ret;
801 }
802 EXPORT_SYMBOL_GPL(inotify_add_watch);
803
804 /**
805 * inotify_clone_watch - put the watch next to existing one
806 * @old: already installed watch
807 * @new: new watch
808 *
809 * Caller must hold the inotify_mutex of inode we are dealing with;
810 * it is expected to remove the old watch before unlocking the inode.
811 */
812 s32 inotify_clone_watch(struct inotify_watch *old, struct inotify_watch *new)
813 {
814 struct inotify_handle *ih = old->ih;
815 int ret = 0;
816
817 new->mask = old->mask;
818 new->ih = ih;
819
820 mutex_lock(&ih->mutex);
821
822 /* Initialize a new watch */
823 ret = inotify_handle_get_wd(ih, new);
824 if (unlikely(ret))
825 goto out;
826 ret = new->wd;
827
828 get_inotify_handle(ih);
829
830 new->inode = igrab(old->inode);
831
832 list_add(&new->h_list, &ih->watches);
833 list_add(&new->i_list, &old->inode->inotify_watches);
834 out:
835 mutex_unlock(&ih->mutex);
836 return ret;
837 }
838
839 void inotify_evict_watch(struct inotify_watch *watch)
840 {
841 get_inotify_watch(watch);
842 mutex_lock(&watch->ih->mutex);
843 inotify_remove_watch_locked(watch->ih, watch);
844 mutex_unlock(&watch->ih->mutex);
845 }
846
847 /**
848 * inotify_rm_wd - remove a watch from an inotify instance
849 * @ih: inotify handle
850 * @wd: watch descriptor to remove
851 *
852 * Can sleep.
853 */
854 int inotify_rm_wd(struct inotify_handle *ih, u32 wd)
855 {
856 struct inotify_watch *watch;
857 struct super_block *sb;
858 struct inode *inode;
859 int how;
860
861 mutex_lock(&ih->mutex);
862 watch = idr_find(&ih->idr, wd);
863 if (unlikely(!watch)) {
864 mutex_unlock(&ih->mutex);
865 return -EINVAL;
866 }
867 sb = watch->inode->i_sb;
868 how = pin_to_kill(ih, watch);
869 if (!how)
870 return 0;
871
872 inode = watch->inode;
873
874 mutex_lock(&inode->inotify_mutex);
875 mutex_lock(&ih->mutex);
876
877 /* make sure that we did not race */
878 if (likely(idr_find(&ih->idr, wd) == watch))
879 inotify_remove_watch_locked(ih, watch);
880
881 mutex_unlock(&ih->mutex);
882 mutex_unlock(&inode->inotify_mutex);
883 unpin_and_kill(watch, how);
884
885 return 0;
886 }
887 EXPORT_SYMBOL_GPL(inotify_rm_wd);
888
889 /**
890 * inotify_rm_watch - remove a watch from an inotify instance
891 * @ih: inotify handle
892 * @watch: watch to remove
893 *
894 * Can sleep.
895 */
896 int inotify_rm_watch(struct inotify_handle *ih,
897 struct inotify_watch *watch)
898 {
899 return inotify_rm_wd(ih, watch->wd);
900 }
901 EXPORT_SYMBOL_GPL(inotify_rm_watch);
902
903 /*
904 * inotify_setup - core initialization function
905 */
906 static int __init inotify_setup(void)
907 {
908 atomic_set(&inotify_cookie, 0);
909
910 return 0;
911 }
912
913 module_init(inotify_setup);
This page took 0.049918 seconds and 5 git commands to generate.