inotify: convert to idr_alloc()
[deliverable/linux.git] / fs / notify / inotify / inotify_user.c
CommitLineData
2d9048e2
AG
1/*
2 * fs/inotify_user.c - inotify support for userspace
3 *
4 * Authors:
5 * John McCutchan <ttb@tentacle.dhs.org>
6 * Robert Love <rml@novell.com>
7 *
8 * Copyright (C) 2005 John McCutchan
9 * Copyright 2006 Hewlett-Packard Development Company, L.P.
10 *
63c882a0
EP
11 * Copyright (C) 2009 Eric Paris <Red Hat Inc>
12 * inotify was largely rewriten to make use of the fsnotify infrastructure
13 *
2d9048e2
AG
14 * This program is free software; you can redistribute it and/or modify it
15 * under the terms of the GNU General Public License as published by the
16 * Free Software Foundation; either version 2, or (at your option) any
17 * later version.
18 *
19 * This program is distributed in the hope that it will be useful, but
20 * WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 * General Public License for more details.
23 */
24
2d9048e2 25#include <linux/file.h>
63c882a0
EP
26#include <linux/fs.h> /* struct inode */
27#include <linux/fsnotify_backend.h>
28#include <linux/idr.h>
29#include <linux/init.h> /* module_init */
2d9048e2 30#include <linux/inotify.h>
63c882a0 31#include <linux/kernel.h> /* roundup() */
63c882a0 32#include <linux/namei.h> /* LOOKUP_FOLLOW */
63c882a0
EP
33#include <linux/sched.h> /* struct user */
34#include <linux/slab.h> /* struct kmem_cache */
2d9048e2 35#include <linux/syscalls.h>
63c882a0 36#include <linux/types.h>
c44dcc56 37#include <linux/anon_inodes.h>
63c882a0
EP
38#include <linux/uaccess.h>
39#include <linux/poll.h>
40#include <linux/wait.h>
2d9048e2 41
63c882a0 42#include "inotify.h"
be77196b 43#include "../fdinfo.h"
2d9048e2 44
63c882a0 45#include <asm/ioctls.h>
2d9048e2 46
2d9048e2 47/* these are configurable via /proc/sys/fs/inotify/ */
3c828e49 48static int inotify_max_user_instances __read_mostly;
3c828e49 49static int inotify_max_queued_events __read_mostly;
0a24887a 50static int inotify_max_user_watches __read_mostly;
2d9048e2 51
63c882a0
EP
52static struct kmem_cache *inotify_inode_mark_cachep __read_mostly;
53struct kmem_cache *event_priv_cachep __read_mostly;
2d9048e2 54
2d9048e2
AG
55#ifdef CONFIG_SYSCTL
56
57#include <linux/sysctl.h>
58
59static int zero;
60
61ctl_table inotify_table[] = {
62 {
2d9048e2
AG
63 .procname = "max_user_instances",
64 .data = &inotify_max_user_instances,
65 .maxlen = sizeof(int),
66 .mode = 0644,
6d456111 67 .proc_handler = proc_dointvec_minmax,
2d9048e2
AG
68 .extra1 = &zero,
69 },
70 {
2d9048e2
AG
71 .procname = "max_user_watches",
72 .data = &inotify_max_user_watches,
73 .maxlen = sizeof(int),
74 .mode = 0644,
6d456111 75 .proc_handler = proc_dointvec_minmax,
2d9048e2
AG
76 .extra1 = &zero,
77 },
78 {
2d9048e2
AG
79 .procname = "max_queued_events",
80 .data = &inotify_max_queued_events,
81 .maxlen = sizeof(int),
82 .mode = 0644,
6d456111 83 .proc_handler = proc_dointvec_minmax,
2d9048e2
AG
84 .extra1 = &zero
85 },
ab09203e 86 { }
2d9048e2
AG
87};
88#endif /* CONFIG_SYSCTL */
89
63c882a0 90static inline __u32 inotify_arg_to_mask(u32 arg)
1c17d18e 91{
63c882a0 92 __u32 mask;
1c17d18e 93
611da04f
EP
94 /*
95 * everything should accept their own ignored, cares about children,
96 * and should receive events when the inode is unmounted
97 */
98 mask = (FS_IN_IGNORED | FS_EVENT_ON_CHILD | FS_UNMOUNT);
2d9048e2 99
63c882a0 100 /* mask off the flags used to open the fd */
8c1934c8 101 mask |= (arg & (IN_ALL_EVENTS | IN_ONESHOT | IN_EXCL_UNLINK));
2d9048e2 102
63c882a0 103 return mask;
2d9048e2
AG
104}
105
63c882a0 106static inline u32 inotify_mask_to_arg(__u32 mask)
2d9048e2 107{
63c882a0
EP
108 return mask & (IN_ALL_EVENTS | IN_ISDIR | IN_UNMOUNT | IN_IGNORED |
109 IN_Q_OVERFLOW);
2d9048e2
AG
110}
111
63c882a0 112/* intofiy userspace file descriptor functions */
2d9048e2
AG
113static unsigned int inotify_poll(struct file *file, poll_table *wait)
114{
63c882a0 115 struct fsnotify_group *group = file->private_data;
2d9048e2
AG
116 int ret = 0;
117
63c882a0
EP
118 poll_wait(file, &group->notification_waitq, wait);
119 mutex_lock(&group->notification_mutex);
120 if (!fsnotify_notify_queue_is_empty(group))
2d9048e2 121 ret = POLLIN | POLLRDNORM;
63c882a0 122 mutex_unlock(&group->notification_mutex);
2d9048e2
AG
123
124 return ret;
125}
126
3632dee2
VN
127/*
128 * Get an inotify_kernel_event if one exists and is small
129 * enough to fit in "count". Return an error pointer if
130 * not large enough.
131 *
63c882a0 132 * Called with the group->notification_mutex held.
3632dee2 133 */
63c882a0
EP
134static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
135 size_t count)
3632dee2
VN
136{
137 size_t event_size = sizeof(struct inotify_event);
63c882a0 138 struct fsnotify_event *event;
3632dee2 139
63c882a0 140 if (fsnotify_notify_queue_is_empty(group))
3632dee2
VN
141 return NULL;
142
63c882a0
EP
143 event = fsnotify_peek_notify_event(group);
144
5ba08e2e
EP
145 pr_debug("%s: group=%p event=%p\n", __func__, group, event);
146
83cb10f0
EP
147 if (event->name_len)
148 event_size += roundup(event->name_len + 1, event_size);
3632dee2
VN
149
150 if (event_size > count)
151 return ERR_PTR(-EINVAL);
152
63c882a0
EP
153 /* held the notification_mutex the whole time, so this is the
154 * same event we peeked above */
155 fsnotify_remove_notify_event(group);
156
157 return event;
3632dee2
VN
158}
159
160/*
161 * Copy an event to user space, returning how much we copied.
162 *
163 * We already checked that the event size is smaller than the
164 * buffer we had in "get_one_event()" above.
165 */
63c882a0
EP
166static ssize_t copy_event_to_user(struct fsnotify_group *group,
167 struct fsnotify_event *event,
3632dee2
VN
168 char __user *buf)
169{
63c882a0
EP
170 struct inotify_event inotify_event;
171 struct fsnotify_event_private_data *fsn_priv;
172 struct inotify_event_private_data *priv;
3632dee2 173 size_t event_size = sizeof(struct inotify_event);
b962e731 174 size_t name_len = 0;
63c882a0 175
5ba08e2e
EP
176 pr_debug("%s: group=%p event=%p\n", __func__, group, event);
177
63c882a0
EP
178 /* we get the inotify watch descriptor from the event private data */
179 spin_lock(&event->lock);
180 fsn_priv = fsnotify_remove_priv_from_event(group, event);
181 spin_unlock(&event->lock);
182
183 if (!fsn_priv)
184 inotify_event.wd = -1;
185 else {
186 priv = container_of(fsn_priv, struct inotify_event_private_data,
187 fsnotify_event_priv_data);
188 inotify_event.wd = priv->wd;
189 inotify_free_event_priv(fsn_priv);
190 }
191
b962e731
BR
192 /*
193 * round up event->name_len so it is a multiple of event_size
0db501bd
EB
194 * plus an extra byte for the terminating '\0'.
195 */
b962e731
BR
196 if (event->name_len)
197 name_len = roundup(event->name_len + 1, event_size);
63c882a0
EP
198 inotify_event.len = name_len;
199
200 inotify_event.mask = inotify_mask_to_arg(event->mask);
201 inotify_event.cookie = event->sync_cookie;
3632dee2 202
63c882a0
EP
203 /* send the main event */
204 if (copy_to_user(buf, &inotify_event, event_size))
3632dee2
VN
205 return -EFAULT;
206
63c882a0 207 buf += event_size;
3632dee2 208
63c882a0
EP
209 /*
210 * fsnotify only stores the pathname, so here we have to send the pathname
211 * and then pad that pathname out to a multiple of sizeof(inotify_event)
212 * with zeros. I get my zeros from the nul_inotify_event.
213 */
214 if (name_len) {
215 unsigned int len_to_zero = name_len - event->name_len;
216 /* copy the path name */
217 if (copy_to_user(buf, event->file_name, event->name_len))
3632dee2 218 return -EFAULT;
63c882a0 219 buf += event->name_len;
3632dee2 220
0db501bd
EB
221 /* fill userspace with 0's */
222 if (clear_user(buf, len_to_zero))
63c882a0
EP
223 return -EFAULT;
224 buf += len_to_zero;
225 event_size += name_len;
3632dee2 226 }
63c882a0 227
3632dee2
VN
228 return event_size;
229}
230
2d9048e2
AG
231static ssize_t inotify_read(struct file *file, char __user *buf,
232 size_t count, loff_t *pos)
233{
63c882a0
EP
234 struct fsnotify_group *group;
235 struct fsnotify_event *kevent;
2d9048e2
AG
236 char __user *start;
237 int ret;
238 DEFINE_WAIT(wait);
239
240 start = buf;
63c882a0 241 group = file->private_data;
2d9048e2
AG
242
243 while (1) {
63c882a0 244 prepare_to_wait(&group->notification_waitq, &wait, TASK_INTERRUPTIBLE);
2d9048e2 245
63c882a0
EP
246 mutex_lock(&group->notification_mutex);
247 kevent = get_one_event(group, count);
248 mutex_unlock(&group->notification_mutex);
2d9048e2 249
5ba08e2e
EP
250 pr_debug("%s: group=%p kevent=%p\n", __func__, group, kevent);
251
3632dee2
VN
252 if (kevent) {
253 ret = PTR_ERR(kevent);
254 if (IS_ERR(kevent))
255 break;
63c882a0
EP
256 ret = copy_event_to_user(group, kevent, buf);
257 fsnotify_put_event(kevent);
3632dee2
VN
258 if (ret < 0)
259 break;
260 buf += ret;
261 count -= ret;
262 continue;
2d9048e2
AG
263 }
264
3632dee2
VN
265 ret = -EAGAIN;
266 if (file->f_flags & O_NONBLOCK)
2d9048e2 267 break;
1ca39ab9 268 ret = -ERESTARTSYS;
3632dee2 269 if (signal_pending(current))
2d9048e2 270 break;
16dbc6c9 271
3632dee2 272 if (start != buf)
2d9048e2 273 break;
16dbc6c9 274
3632dee2 275 schedule();
2d9048e2 276 }
2d9048e2 277
63c882a0 278 finish_wait(&group->notification_waitq, &wait);
3632dee2
VN
279 if (start != buf && ret != -EFAULT)
280 ret = buf - start;
2d9048e2
AG
281 return ret;
282}
283
284static int inotify_release(struct inode *ignored, struct file *file)
285{
63c882a0 286 struct fsnotify_group *group = file->private_data;
2d9048e2 287
5ba08e2e
EP
288 pr_debug("%s: group=%p\n", __func__, group);
289
0a6b6bd5
EP
290 if (file->f_flags & FASYNC)
291 fsnotify_fasync(-1, file, 0);
2d9048e2 292
63c882a0 293 /* free this group, matching get was inotify_init->fsnotify_obtain_group */
d8153d4d 294 fsnotify_destroy_group(group);
2d9048e2
AG
295
296 return 0;
297}
298
299static long inotify_ioctl(struct file *file, unsigned int cmd,
300 unsigned long arg)
301{
63c882a0
EP
302 struct fsnotify_group *group;
303 struct fsnotify_event_holder *holder;
304 struct fsnotify_event *event;
2d9048e2
AG
305 void __user *p;
306 int ret = -ENOTTY;
63c882a0 307 size_t send_len = 0;
2d9048e2 308
63c882a0 309 group = file->private_data;
2d9048e2
AG
310 p = (void __user *) arg;
311
5ba08e2e
EP
312 pr_debug("%s: group=%p cmd=%u\n", __func__, group, cmd);
313
2d9048e2
AG
314 switch (cmd) {
315 case FIONREAD:
63c882a0
EP
316 mutex_lock(&group->notification_mutex);
317 list_for_each_entry(holder, &group->notification_list, event_list) {
318 event = holder->event;
319 send_len += sizeof(struct inotify_event);
83cb10f0
EP
320 if (event->name_len)
321 send_len += roundup(event->name_len + 1,
322 sizeof(struct inotify_event));
63c882a0
EP
323 }
324 mutex_unlock(&group->notification_mutex);
325 ret = put_user(send_len, (int __user *) p);
2d9048e2
AG
326 break;
327 }
328
329 return ret;
330}
331
332static const struct file_operations inotify_fops = {
be77196b 333 .show_fdinfo = inotify_show_fdinfo,
63c882a0
EP
334 .poll = inotify_poll,
335 .read = inotify_read,
0a6b6bd5 336 .fasync = fsnotify_fasync,
63c882a0
EP
337 .release = inotify_release,
338 .unlocked_ioctl = inotify_ioctl,
2d9048e2 339 .compat_ioctl = inotify_ioctl,
6038f373 340 .llseek = noop_llseek,
2d9048e2
AG
341};
342
2d9048e2 343
63c882a0
EP
344/*
345 * find_inode - resolve a user-given path to a specific inode
346 */
347static int inotify_find_inode(const char __user *dirname, struct path *path, unsigned flags)
348{
349 int error;
350
351 error = user_path_at(AT_FDCWD, dirname, flags, path);
352 if (error)
353 return error;
354 /* you can only watch an inode if you have read permissions on it */
355 error = inode_permission(path->dentry->d_inode, MAY_READ);
356 if (error)
357 path_put(path);
358 return error;
359}
360
b7ba8371 361static int inotify_add_to_idr(struct idr *idr, spinlock_t *idr_lock,
7050c488 362 int *last_wd,
000285de 363 struct inotify_inode_mark *i_mark)
b7ba8371
EP
364{
365 int ret;
366
4542da63
TH
367 idr_preload(GFP_KERNEL);
368 spin_lock(idr_lock);
b7ba8371 369
4542da63
TH
370 ret = idr_alloc(idr, i_mark, *last_wd + 1, 0, GFP_NOWAIT);
371 if (ret >= 0) {
b7ba8371 372 /* we added the mark to the idr, take a reference */
4542da63
TH
373 i_mark->wd = ret;
374 *last_wd = i_mark->wd;
375 fsnotify_get_mark(&i_mark->fsn_mark);
376 }
b7ba8371 377
4542da63
TH
378 spin_unlock(idr_lock);
379 idr_preload_end();
380 return ret < 0 ? ret : 0;
b7ba8371
EP
381}
382
000285de 383static struct inotify_inode_mark *inotify_idr_find_locked(struct fsnotify_group *group,
b7ba8371
EP
384 int wd)
385{
386 struct idr *idr = &group->inotify_data.idr;
387 spinlock_t *idr_lock = &group->inotify_data.idr_lock;
000285de 388 struct inotify_inode_mark *i_mark;
b7ba8371
EP
389
390 assert_spin_locked(idr_lock);
391
000285de
EP
392 i_mark = idr_find(idr, wd);
393 if (i_mark) {
394 struct fsnotify_mark *fsn_mark = &i_mark->fsn_mark;
b7ba8371 395
000285de 396 fsnotify_get_mark(fsn_mark);
b7ba8371 397 /* One ref for being in the idr, one ref we just took */
000285de 398 BUG_ON(atomic_read(&fsn_mark->refcnt) < 2);
b7ba8371
EP
399 }
400
000285de 401 return i_mark;
b7ba8371
EP
402}
403
000285de 404static struct inotify_inode_mark *inotify_idr_find(struct fsnotify_group *group,
b7ba8371
EP
405 int wd)
406{
000285de 407 struct inotify_inode_mark *i_mark;
b7ba8371
EP
408 spinlock_t *idr_lock = &group->inotify_data.idr_lock;
409
410 spin_lock(idr_lock);
000285de 411 i_mark = inotify_idr_find_locked(group, wd);
b7ba8371
EP
412 spin_unlock(idr_lock);
413
000285de 414 return i_mark;
b7ba8371
EP
415}
416
417static void do_inotify_remove_from_idr(struct fsnotify_group *group,
000285de 418 struct inotify_inode_mark *i_mark)
b7ba8371
EP
419{
420 struct idr *idr = &group->inotify_data.idr;
421 spinlock_t *idr_lock = &group->inotify_data.idr_lock;
000285de 422 int wd = i_mark->wd;
b7ba8371
EP
423
424 assert_spin_locked(idr_lock);
425
426 idr_remove(idr, wd);
427
428 /* removed from the idr, drop that ref */
000285de 429 fsnotify_put_mark(&i_mark->fsn_mark);
b7ba8371
EP
430}
431
dead537d
EP
432/*
433 * Remove the mark from the idr (if present) and drop the reference
434 * on the mark because it was in the idr.
435 */
7e790dd5 436static void inotify_remove_from_idr(struct fsnotify_group *group,
000285de 437 struct inotify_inode_mark *i_mark)
7e790dd5 438{
b7ba8371 439 spinlock_t *idr_lock = &group->inotify_data.idr_lock;
000285de 440 struct inotify_inode_mark *found_i_mark = NULL;
dead537d 441 int wd;
7e790dd5 442
b7ba8371 443 spin_lock(idr_lock);
000285de 444 wd = i_mark->wd;
dead537d 445
b7ba8371 446 /*
000285de 447 * does this i_mark think it is in the idr? we shouldn't get called
b7ba8371
EP
448 * if it wasn't....
449 */
450 if (wd == -1) {
000285de
EP
451 WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p"
452 " i_mark->inode=%p\n", __func__, i_mark, i_mark->wd,
453 i_mark->fsn_mark.group, i_mark->fsn_mark.i.inode);
dead537d 454 goto out;
b7ba8371 455 }
dead537d 456
b7ba8371 457 /* Lets look in the idr to see if we find it */
000285de
EP
458 found_i_mark = inotify_idr_find_locked(group, wd);
459 if (unlikely(!found_i_mark)) {
460 WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p"
461 " i_mark->inode=%p\n", __func__, i_mark, i_mark->wd,
462 i_mark->fsn_mark.group, i_mark->fsn_mark.i.inode);
dead537d 463 goto out;
b7ba8371 464 }
dead537d 465
b7ba8371 466 /*
000285de
EP
467 * We found an mark in the idr at the right wd, but it's
468 * not the mark we were told to remove. eparis seriously
b7ba8371
EP
469 * fucked up somewhere.
470 */
000285de
EP
471 if (unlikely(found_i_mark != i_mark)) {
472 WARN_ONCE(1, "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p "
473 "mark->inode=%p found_i_mark=%p found_i_mark->wd=%d "
474 "found_i_mark->group=%p found_i_mark->inode=%p\n",
475 __func__, i_mark, i_mark->wd, i_mark->fsn_mark.group,
476 i_mark->fsn_mark.i.inode, found_i_mark, found_i_mark->wd,
477 found_i_mark->fsn_mark.group,
478 found_i_mark->fsn_mark.i.inode);
dead537d
EP
479 goto out;
480 }
481
b7ba8371
EP
482 /*
483 * One ref for being in the idr
484 * one ref held by the caller trying to kill us
485 * one ref grabbed by inotify_idr_find
486 */
000285de
EP
487 if (unlikely(atomic_read(&i_mark->fsn_mark.refcnt) < 3)) {
488 printk(KERN_ERR "%s: i_mark=%p i_mark->wd=%d i_mark->group=%p"
489 " i_mark->inode=%p\n", __func__, i_mark, i_mark->wd,
490 i_mark->fsn_mark.group, i_mark->fsn_mark.i.inode);
b7ba8371
EP
491 /* we can't really recover with bad ref cnting.. */
492 BUG();
493 }
dead537d 494
000285de 495 do_inotify_remove_from_idr(group, i_mark);
dead537d 496out:
b7ba8371 497 /* match the ref taken by inotify_idr_find_locked() */
000285de
EP
498 if (found_i_mark)
499 fsnotify_put_mark(&found_i_mark->fsn_mark);
500 i_mark->wd = -1;
b7ba8371 501 spin_unlock(idr_lock);
7e790dd5 502}
dead537d 503
63c882a0 504/*
dead537d 505 * Send IN_IGNORED for this wd, remove this wd from the idr.
63c882a0 506 */
000285de 507void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark,
528da3e9 508 struct fsnotify_group *group)
63c882a0 509{
000285de 510 struct inotify_inode_mark *i_mark;
f70ab54c 511 struct fsnotify_event *ignored_event, *notify_event;
63c882a0
EP
512 struct inotify_event_private_data *event_priv;
513 struct fsnotify_event_private_data *fsn_event_priv;
eef3a116 514 int ret;
63c882a0 515
8b99c3cc
LS
516 i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
517
f44aebcc
EP
518 ignored_event = fsnotify_create_event(NULL, FS_IN_IGNORED, NULL,
519 FSNOTIFY_EVENT_NONE, NULL, 0,
520 GFP_NOFS);
521 if (!ignored_event)
8b99c3cc 522 goto skip_send_ignore;
63c882a0 523
f44aebcc 524 event_priv = kmem_cache_alloc(event_priv_cachep, GFP_NOFS);
63c882a0
EP
525 if (unlikely(!event_priv))
526 goto skip_send_ignore;
527
528 fsn_event_priv = &event_priv->fsnotify_event_priv_data;
529
23e964c2 530 fsnotify_get_group(group);
63c882a0 531 fsn_event_priv->group = group;
000285de 532 event_priv->wd = i_mark->wd;
63c882a0 533
f70ab54c
EP
534 notify_event = fsnotify_add_notify_event(group, ignored_event, fsn_event_priv, NULL);
535 if (notify_event) {
536 if (IS_ERR(notify_event))
537 ret = PTR_ERR(notify_event);
538 else
539 fsnotify_put_event(notify_event);
63c882a0 540 inotify_free_event_priv(fsn_event_priv);
f70ab54c 541 }
63c882a0
EP
542
543skip_send_ignore:
f44aebcc 544 /* matches the reference taken when the event was created */
8b99c3cc
LS
545 if (ignored_event)
546 fsnotify_put_event(ignored_event);
f44aebcc 547
000285de
EP
548 /* remove this mark from the idr */
549 inotify_remove_from_idr(group, i_mark);
63c882a0 550
5549f7cd 551 atomic_dec(&group->inotify_data.user->inotify_watches);
63c882a0
EP
552}
553
554/* ding dong the mark is dead */
000285de 555static void inotify_free_mark(struct fsnotify_mark *fsn_mark)
63c882a0 556{
000285de 557 struct inotify_inode_mark *i_mark;
31ddd326 558
000285de 559 i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
63c882a0 560
000285de 561 kmem_cache_free(inotify_inode_mark_cachep, i_mark);
63c882a0
EP
562}
563
52cef755
EP
564static int inotify_update_existing_watch(struct fsnotify_group *group,
565 struct inode *inode,
566 u32 arg)
63c882a0 567{
000285de
EP
568 struct fsnotify_mark *fsn_mark;
569 struct inotify_inode_mark *i_mark;
63c882a0 570 __u32 old_mask, new_mask;
52cef755
EP
571 __u32 mask;
572 int add = (arg & IN_MASK_ADD);
573 int ret;
63c882a0
EP
574
575 /* don't allow invalid bits: we don't want flags set */
576 mask = inotify_arg_to_mask(arg);
63c882a0 577
5444e298 578 fsn_mark = fsnotify_find_inode_mark(group, inode);
000285de 579 if (!fsn_mark)
52cef755 580 return -ENOENT;
7e790dd5 581
000285de 582 i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
75fe2b26 583
000285de 584 spin_lock(&fsn_mark->lock);
63c882a0 585
000285de 586 old_mask = fsn_mark->mask;
90b1e7a5
EP
587 if (add)
588 fsnotify_set_mark_mask_locked(fsn_mark, (fsn_mark->mask | mask));
589 else
590 fsnotify_set_mark_mask_locked(fsn_mark, mask);
591 new_mask = fsn_mark->mask;
63c882a0 592
000285de 593 spin_unlock(&fsn_mark->lock);
63c882a0
EP
594
595 if (old_mask != new_mask) {
596 /* more bits in old than in new? */
597 int dropped = (old_mask & ~new_mask);
000285de 598 /* more bits in this fsn_mark than the inode's mask? */
63c882a0 599 int do_inode = (new_mask & ~inode->i_fsnotify_mask);
63c882a0 600
000285de 601 /* update the inode with this new fsn_mark */
63c882a0
EP
602 if (dropped || do_inode)
603 fsnotify_recalc_inode_mask(inode);
604
63c882a0
EP
605 }
606
52cef755 607 /* return the wd */
000285de 608 ret = i_mark->wd;
52cef755 609
d0775441 610 /* match the get from fsnotify_find_mark() */
000285de 611 fsnotify_put_mark(fsn_mark);
75fe2b26 612
52cef755
EP
613 return ret;
614}
615
616static int inotify_new_watch(struct fsnotify_group *group,
617 struct inode *inode,
618 u32 arg)
619{
000285de 620 struct inotify_inode_mark *tmp_i_mark;
52cef755
EP
621 __u32 mask;
622 int ret;
b7ba8371
EP
623 struct idr *idr = &group->inotify_data.idr;
624 spinlock_t *idr_lock = &group->inotify_data.idr_lock;
52cef755
EP
625
626 /* don't allow invalid bits: we don't want flags set */
627 mask = inotify_arg_to_mask(arg);
52cef755 628
000285de
EP
629 tmp_i_mark = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL);
630 if (unlikely(!tmp_i_mark))
52cef755
EP
631 return -ENOMEM;
632
000285de
EP
633 fsnotify_init_mark(&tmp_i_mark->fsn_mark, inotify_free_mark);
634 tmp_i_mark->fsn_mark.mask = mask;
635 tmp_i_mark->wd = -1;
52cef755
EP
636
637 ret = -ENOSPC;
638 if (atomic_read(&group->inotify_data.user->inotify_watches) >= inotify_max_user_watches)
639 goto out_err;
e0873344 640
7050c488 641 ret = inotify_add_to_idr(idr, idr_lock, &group->inotify_data.last_wd,
000285de 642 tmp_i_mark);
b7ba8371 643 if (ret)
52cef755 644 goto out_err;
52cef755
EP
645
646 /* we are on the idr, now get on the inode */
5444e298 647 ret = fsnotify_add_mark(&tmp_i_mark->fsn_mark, group, inode, NULL, 0);
52cef755
EP
648 if (ret) {
649 /* we failed to get on the inode, get off the idr */
000285de 650 inotify_remove_from_idr(group, tmp_i_mark);
52cef755
EP
651 goto out_err;
652 }
653
52cef755
EP
654 /* increment the number of watches the user has */
655 atomic_inc(&group->inotify_data.user->inotify_watches);
656
000285de
EP
657 /* return the watch descriptor for this new mark */
658 ret = tmp_i_mark->wd;
52cef755 659
63c882a0 660out_err:
000285de
EP
661 /* match the ref from fsnotify_init_mark() */
662 fsnotify_put_mark(&tmp_i_mark->fsn_mark);
52cef755
EP
663
664 return ret;
665}
666
667static int inotify_update_watch(struct fsnotify_group *group, struct inode *inode, u32 arg)
668{
669 int ret = 0;
670
671retry:
672 /* try to update and existing watch with the new arg */
673 ret = inotify_update_existing_watch(group, inode, arg);
674 /* no mark present, try to add a new one */
675 if (ret == -ENOENT)
676 ret = inotify_new_watch(group, inode, arg);
677 /*
678 * inotify_new_watch could race with another thread which did an
679 * inotify_new_watch between the update_existing and the add watch
680 * here, go back and try to update an existing mark again.
681 */
682 if (ret == -EEXIST)
683 goto retry;
7e790dd5 684
63c882a0
EP
685 return ret;
686}
687
d0de4dc5 688static struct fsnotify_group *inotify_new_group(unsigned int max_events)
63c882a0
EP
689{
690 struct fsnotify_group *group;
63c882a0 691
0d2e2a1d 692 group = fsnotify_alloc_group(&inotify_fsnotify_ops);
63c882a0
EP
693 if (IS_ERR(group))
694 return group;
695
696 group->max_events = max_events;
697
698 spin_lock_init(&group->inotify_data.idr_lock);
699 idr_init(&group->inotify_data.idr);
9e572cc9 700 group->inotify_data.last_wd = 0;
d0de4dc5
EP
701 group->inotify_data.user = get_current_user();
702
703 if (atomic_inc_return(&group->inotify_data.user->inotify_devs) >
704 inotify_max_user_instances) {
d8153d4d 705 fsnotify_destroy_group(group);
d0de4dc5
EP
706 return ERR_PTR(-EMFILE);
707 }
63c882a0
EP
708
709 return group;
710}
711
712
713/* inotify syscalls */
938bb9f5 714SYSCALL_DEFINE1(inotify_init1, int, flags)
2d9048e2 715{
63c882a0 716 struct fsnotify_group *group;
c44dcc56 717 int ret;
2d9048e2 718
e38b36f3
UD
719 /* Check the IN_* constants for consistency. */
720 BUILD_BUG_ON(IN_CLOEXEC != O_CLOEXEC);
721 BUILD_BUG_ON(IN_NONBLOCK != O_NONBLOCK);
722
510df2dd 723 if (flags & ~(IN_CLOEXEC | IN_NONBLOCK))
4006553b
UD
724 return -EINVAL;
725
63c882a0 726 /* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */
d0de4dc5
EP
727 group = inotify_new_group(inotify_max_queued_events);
728 if (IS_ERR(group))
729 return PTR_ERR(group);
825f9692 730
c44dcc56
AV
731 ret = anon_inode_getfd("inotify", &inotify_fops, group,
732 O_RDONLY | flags);
d0de4dc5 733 if (ret < 0)
d8153d4d 734 fsnotify_destroy_group(group);
825f9692 735
2d9048e2
AG
736 return ret;
737}
738
938bb9f5 739SYSCALL_DEFINE0(inotify_init)
4006553b
UD
740{
741 return sys_inotify_init1(0);
742}
743
2e4d0924
HC
744SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
745 u32, mask)
2d9048e2 746{
63c882a0 747 struct fsnotify_group *group;
2d9048e2 748 struct inode *inode;
2d8f3038 749 struct path path;
2903ff01
AV
750 struct fd f;
751 int ret;
2d9048e2
AG
752 unsigned flags = 0;
753
2903ff01
AV
754 f = fdget(fd);
755 if (unlikely(!f.file))
2d9048e2
AG
756 return -EBADF;
757
758 /* verify that this is indeed an inotify instance */
2903ff01 759 if (unlikely(f.file->f_op != &inotify_fops)) {
2d9048e2
AG
760 ret = -EINVAL;
761 goto fput_and_out;
762 }
763
764 if (!(mask & IN_DONT_FOLLOW))
765 flags |= LOOKUP_FOLLOW;
766 if (mask & IN_ONLYDIR)
767 flags |= LOOKUP_DIRECTORY;
768
63c882a0
EP
769 ret = inotify_find_inode(pathname, &path, flags);
770 if (ret)
2d9048e2
AG
771 goto fput_and_out;
772
63c882a0 773 /* inode held in place by reference to path; group by fget on fd */
2d8f3038 774 inode = path.dentry->d_inode;
2903ff01 775 group = f.file->private_data;
2d9048e2 776
63c882a0
EP
777 /* create/update an inode mark */
778 ret = inotify_update_watch(group, inode, mask);
2d8f3038 779 path_put(&path);
2d9048e2 780fput_and_out:
2903ff01 781 fdput(f);
2d9048e2
AG
782 return ret;
783}
784
2e4d0924 785SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd)
2d9048e2 786{
63c882a0 787 struct fsnotify_group *group;
000285de 788 struct inotify_inode_mark *i_mark;
2903ff01
AV
789 struct fd f;
790 int ret = 0;
2d9048e2 791
2903ff01
AV
792 f = fdget(fd);
793 if (unlikely(!f.file))
2d9048e2
AG
794 return -EBADF;
795
796 /* verify that this is indeed an inotify instance */
b7ba8371 797 ret = -EINVAL;
2903ff01 798 if (unlikely(f.file->f_op != &inotify_fops))
2d9048e2 799 goto out;
2d9048e2 800
2903ff01 801 group = f.file->private_data;
2d9048e2 802
b7ba8371 803 ret = -EINVAL;
000285de
EP
804 i_mark = inotify_idr_find(group, wd);
805 if (unlikely(!i_mark))
63c882a0 806 goto out;
63c882a0 807
b7ba8371
EP
808 ret = 0;
809
e2a29943 810 fsnotify_destroy_mark(&i_mark->fsn_mark, group);
b7ba8371
EP
811
812 /* match ref taken by inotify_idr_find */
000285de 813 fsnotify_put_mark(&i_mark->fsn_mark);
2d9048e2
AG
814
815out:
2903ff01 816 fdput(f);
2d9048e2
AG
817 return ret;
818}
819
2d9048e2 820/*
ae0e47f0 821 * inotify_user_setup - Our initialization function. Note that we cannot return
2d9048e2
AG
822 * error because we have compiled-in VFS hooks. So an (unlikely) failure here
823 * must result in panic().
824 */
825static int __init inotify_user_setup(void)
826{
f874e1ac
EP
827 BUILD_BUG_ON(IN_ACCESS != FS_ACCESS);
828 BUILD_BUG_ON(IN_MODIFY != FS_MODIFY);
829 BUILD_BUG_ON(IN_ATTRIB != FS_ATTRIB);
830 BUILD_BUG_ON(IN_CLOSE_WRITE != FS_CLOSE_WRITE);
831 BUILD_BUG_ON(IN_CLOSE_NOWRITE != FS_CLOSE_NOWRITE);
832 BUILD_BUG_ON(IN_OPEN != FS_OPEN);
833 BUILD_BUG_ON(IN_MOVED_FROM != FS_MOVED_FROM);
834 BUILD_BUG_ON(IN_MOVED_TO != FS_MOVED_TO);
835 BUILD_BUG_ON(IN_CREATE != FS_CREATE);
836 BUILD_BUG_ON(IN_DELETE != FS_DELETE);
837 BUILD_BUG_ON(IN_DELETE_SELF != FS_DELETE_SELF);
838 BUILD_BUG_ON(IN_MOVE_SELF != FS_MOVE_SELF);
839 BUILD_BUG_ON(IN_UNMOUNT != FS_UNMOUNT);
840 BUILD_BUG_ON(IN_Q_OVERFLOW != FS_Q_OVERFLOW);
841 BUILD_BUG_ON(IN_IGNORED != FS_IN_IGNORED);
842 BUILD_BUG_ON(IN_EXCL_UNLINK != FS_EXCL_UNLINK);
b29866aa 843 BUILD_BUG_ON(IN_ISDIR != FS_ISDIR);
f874e1ac
EP
844 BUILD_BUG_ON(IN_ONESHOT != FS_IN_ONESHOT);
845
846 BUG_ON(hweight32(ALL_INOTIFY_BITS) != 21);
847
000285de 848 inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark, SLAB_PANIC);
63c882a0 849 event_priv_cachep = KMEM_CACHE(inotify_event_private_data, SLAB_PANIC);
63c882a0 850
2d9048e2
AG
851 inotify_max_queued_events = 16384;
852 inotify_max_user_instances = 128;
853 inotify_max_user_watches = 8192;
854
2d9048e2
AG
855 return 0;
856}
2d9048e2 857module_init(inotify_user_setup);
This page took 0.609863 seconds and 5 git commands to generate.