fs/fscache: replace seq_printf by seq_puts
[deliverable/linux.git] / fs / notify / fanotify / fanotify_user.c
1 #include <linux/fanotify.h>
2 #include <linux/fcntl.h>
3 #include <linux/file.h>
4 #include <linux/fs.h>
5 #include <linux/anon_inodes.h>
6 #include <linux/fsnotify_backend.h>
7 #include <linux/init.h>
8 #include <linux/mount.h>
9 #include <linux/namei.h>
10 #include <linux/poll.h>
11 #include <linux/security.h>
12 #include <linux/syscalls.h>
13 #include <linux/slab.h>
14 #include <linux/types.h>
15 #include <linux/uaccess.h>
16 #include <linux/compat.h>
17
18 #include <asm/ioctls.h>
19
20 #include "../../mount.h"
21 #include "../fdinfo.h"
22 #include "fanotify.h"
23
24 #define FANOTIFY_DEFAULT_MAX_EVENTS 16384
25 #define FANOTIFY_DEFAULT_MAX_MARKS 8192
26 #define FANOTIFY_DEFAULT_MAX_LISTENERS 128
27
28 extern const struct fsnotify_ops fanotify_fsnotify_ops;
29
30 static struct kmem_cache *fanotify_mark_cache __read_mostly;
31 struct kmem_cache *fanotify_event_cachep __read_mostly;
32 struct kmem_cache *fanotify_perm_event_cachep __read_mostly;
33
34 /*
35 * Get an fsnotify notification event if one exists and is small
36 * enough to fit in "count". Return an error pointer if the count
37 * is not large enough.
38 *
39 * Called with the group->notification_mutex held.
40 */
41 static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
42 size_t count)
43 {
44 BUG_ON(!mutex_is_locked(&group->notification_mutex));
45
46 pr_debug("%s: group=%p count=%zd\n", __func__, group, count);
47
48 if (fsnotify_notify_queue_is_empty(group))
49 return NULL;
50
51 if (FAN_EVENT_METADATA_LEN > count)
52 return ERR_PTR(-EINVAL);
53
54 /* held the notification_mutex the whole time, so this is the
55 * same event we peeked above */
56 return fsnotify_remove_notify_event(group);
57 }
58
59 static int create_fd(struct fsnotify_group *group,
60 struct fanotify_event_info *event,
61 struct file **file)
62 {
63 int client_fd;
64 struct file *new_file;
65
66 pr_debug("%s: group=%p event=%p\n", __func__, group, event);
67
68 client_fd = get_unused_fd();
69 if (client_fd < 0)
70 return client_fd;
71
72 /*
73 * we need a new file handle for the userspace program so it can read even if it was
74 * originally opened O_WRONLY.
75 */
76 /* it's possible this event was an overflow event. in that case dentry and mnt
77 * are NULL; That's fine, just don't call dentry open */
78 if (event->path.dentry && event->path.mnt)
79 new_file = dentry_open(&event->path,
80 group->fanotify_data.f_flags | FMODE_NONOTIFY,
81 current_cred());
82 else
83 new_file = ERR_PTR(-EOVERFLOW);
84 if (IS_ERR(new_file)) {
85 /*
86 * we still send an event even if we can't open the file. this
87 * can happen when say tasks are gone and we try to open their
88 * /proc files or we try to open a WRONLY file like in sysfs
89 * we just send the errno to userspace since there isn't much
90 * else we can do.
91 */
92 put_unused_fd(client_fd);
93 client_fd = PTR_ERR(new_file);
94 } else {
95 *file = new_file;
96 }
97
98 return client_fd;
99 }
100
101 static int fill_event_metadata(struct fsnotify_group *group,
102 struct fanotify_event_metadata *metadata,
103 struct fsnotify_event *fsn_event,
104 struct file **file)
105 {
106 int ret = 0;
107 struct fanotify_event_info *event;
108
109 pr_debug("%s: group=%p metadata=%p event=%p\n", __func__,
110 group, metadata, fsn_event);
111
112 *file = NULL;
113 event = container_of(fsn_event, struct fanotify_event_info, fse);
114 metadata->event_len = FAN_EVENT_METADATA_LEN;
115 metadata->metadata_len = FAN_EVENT_METADATA_LEN;
116 metadata->vers = FANOTIFY_METADATA_VERSION;
117 metadata->reserved = 0;
118 metadata->mask = fsn_event->mask & FAN_ALL_OUTGOING_EVENTS;
119 metadata->pid = pid_vnr(event->tgid);
120 if (unlikely(fsn_event->mask & FAN_Q_OVERFLOW))
121 metadata->fd = FAN_NOFD;
122 else {
123 metadata->fd = create_fd(group, event, file);
124 if (metadata->fd < 0)
125 ret = metadata->fd;
126 }
127
128 return ret;
129 }
130
131 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
132 static struct fanotify_perm_event_info *dequeue_event(
133 struct fsnotify_group *group, int fd)
134 {
135 struct fanotify_perm_event_info *event, *return_e = NULL;
136
137 spin_lock(&group->fanotify_data.access_lock);
138 list_for_each_entry(event, &group->fanotify_data.access_list,
139 fae.fse.list) {
140 if (event->fd != fd)
141 continue;
142
143 list_del_init(&event->fae.fse.list);
144 return_e = event;
145 break;
146 }
147 spin_unlock(&group->fanotify_data.access_lock);
148
149 pr_debug("%s: found return_re=%p\n", __func__, return_e);
150
151 return return_e;
152 }
153
154 static int process_access_response(struct fsnotify_group *group,
155 struct fanotify_response *response_struct)
156 {
157 struct fanotify_perm_event_info *event;
158 int fd = response_struct->fd;
159 int response = response_struct->response;
160
161 pr_debug("%s: group=%p fd=%d response=%d\n", __func__, group,
162 fd, response);
163 /*
164 * make sure the response is valid, if invalid we do nothing and either
165 * userspace can send a valid response or we will clean it up after the
166 * timeout
167 */
168 switch (response) {
169 case FAN_ALLOW:
170 case FAN_DENY:
171 break;
172 default:
173 return -EINVAL;
174 }
175
176 if (fd < 0)
177 return -EINVAL;
178
179 event = dequeue_event(group, fd);
180 if (!event)
181 return -ENOENT;
182
183 event->response = response;
184 wake_up(&group->fanotify_data.access_waitq);
185
186 return 0;
187 }
188 #endif
189
190 static ssize_t copy_event_to_user(struct fsnotify_group *group,
191 struct fsnotify_event *event,
192 char __user *buf)
193 {
194 struct fanotify_event_metadata fanotify_event_metadata;
195 struct file *f;
196 int fd, ret;
197
198 pr_debug("%s: group=%p event=%p\n", __func__, group, event);
199
200 ret = fill_event_metadata(group, &fanotify_event_metadata, event, &f);
201 if (ret < 0)
202 return ret;
203
204 fd = fanotify_event_metadata.fd;
205 ret = -EFAULT;
206 if (copy_to_user(buf, &fanotify_event_metadata,
207 fanotify_event_metadata.event_len))
208 goto out_close_fd;
209
210 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
211 if (event->mask & FAN_ALL_PERM_EVENTS)
212 FANOTIFY_PE(event)->fd = fd;
213 #endif
214
215 if (fd != FAN_NOFD)
216 fd_install(fd, f);
217 return fanotify_event_metadata.event_len;
218
219 out_close_fd:
220 if (fd != FAN_NOFD) {
221 put_unused_fd(fd);
222 fput(f);
223 }
224 return ret;
225 }
226
227 /* intofiy userspace file descriptor functions */
228 static unsigned int fanotify_poll(struct file *file, poll_table *wait)
229 {
230 struct fsnotify_group *group = file->private_data;
231 int ret = 0;
232
233 poll_wait(file, &group->notification_waitq, wait);
234 mutex_lock(&group->notification_mutex);
235 if (!fsnotify_notify_queue_is_empty(group))
236 ret = POLLIN | POLLRDNORM;
237 mutex_unlock(&group->notification_mutex);
238
239 return ret;
240 }
241
242 static ssize_t fanotify_read(struct file *file, char __user *buf,
243 size_t count, loff_t *pos)
244 {
245 struct fsnotify_group *group;
246 struct fsnotify_event *kevent;
247 char __user *start;
248 int ret;
249 DEFINE_WAIT(wait);
250
251 start = buf;
252 group = file->private_data;
253
254 pr_debug("%s: group=%p\n", __func__, group);
255
256 while (1) {
257 prepare_to_wait(&group->notification_waitq, &wait, TASK_INTERRUPTIBLE);
258
259 mutex_lock(&group->notification_mutex);
260 kevent = get_one_event(group, count);
261 mutex_unlock(&group->notification_mutex);
262
263 if (IS_ERR(kevent)) {
264 ret = PTR_ERR(kevent);
265 break;
266 }
267
268 if (!kevent) {
269 ret = -EAGAIN;
270 if (file->f_flags & O_NONBLOCK)
271 break;
272
273 ret = -ERESTARTSYS;
274 if (signal_pending(current))
275 break;
276
277 if (start != buf)
278 break;
279 schedule();
280 continue;
281 }
282
283 ret = copy_event_to_user(group, kevent, buf);
284 /*
285 * Permission events get queued to wait for response. Other
286 * events can be destroyed now.
287 */
288 if (!(kevent->mask & FAN_ALL_PERM_EVENTS)) {
289 fsnotify_destroy_event(group, kevent);
290 if (ret < 0)
291 break;
292 } else {
293 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
294 if (ret < 0) {
295 FANOTIFY_PE(kevent)->response = FAN_DENY;
296 wake_up(&group->fanotify_data.access_waitq);
297 break;
298 }
299 spin_lock(&group->fanotify_data.access_lock);
300 list_add_tail(&kevent->list,
301 &group->fanotify_data.access_list);
302 spin_unlock(&group->fanotify_data.access_lock);
303 #endif
304 }
305 buf += ret;
306 count -= ret;
307 }
308
309 finish_wait(&group->notification_waitq, &wait);
310 if (start != buf && ret != -EFAULT)
311 ret = buf - start;
312 return ret;
313 }
314
315 static ssize_t fanotify_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)
316 {
317 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
318 struct fanotify_response response = { .fd = -1, .response = -1 };
319 struct fsnotify_group *group;
320 int ret;
321
322 group = file->private_data;
323
324 if (count > sizeof(response))
325 count = sizeof(response);
326
327 pr_debug("%s: group=%p count=%zu\n", __func__, group, count);
328
329 if (copy_from_user(&response, buf, count))
330 return -EFAULT;
331
332 ret = process_access_response(group, &response);
333 if (ret < 0)
334 count = ret;
335
336 return count;
337 #else
338 return -EINVAL;
339 #endif
340 }
341
342 static int fanotify_release(struct inode *ignored, struct file *file)
343 {
344 struct fsnotify_group *group = file->private_data;
345
346 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
347 struct fanotify_perm_event_info *event, *next;
348
349 spin_lock(&group->fanotify_data.access_lock);
350
351 atomic_inc(&group->fanotify_data.bypass_perm);
352
353 list_for_each_entry_safe(event, next, &group->fanotify_data.access_list,
354 fae.fse.list) {
355 pr_debug("%s: found group=%p event=%p\n", __func__, group,
356 event);
357
358 list_del_init(&event->fae.fse.list);
359 event->response = FAN_ALLOW;
360 }
361 spin_unlock(&group->fanotify_data.access_lock);
362
363 wake_up(&group->fanotify_data.access_waitq);
364 #endif
365
366 /* matches the fanotify_init->fsnotify_alloc_group */
367 fsnotify_destroy_group(group);
368
369 return 0;
370 }
371
372 static long fanotify_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
373 {
374 struct fsnotify_group *group;
375 struct fsnotify_event *fsn_event;
376 void __user *p;
377 int ret = -ENOTTY;
378 size_t send_len = 0;
379
380 group = file->private_data;
381
382 p = (void __user *) arg;
383
384 switch (cmd) {
385 case FIONREAD:
386 mutex_lock(&group->notification_mutex);
387 list_for_each_entry(fsn_event, &group->notification_list, list)
388 send_len += FAN_EVENT_METADATA_LEN;
389 mutex_unlock(&group->notification_mutex);
390 ret = put_user(send_len, (int __user *) p);
391 break;
392 }
393
394 return ret;
395 }
396
397 static const struct file_operations fanotify_fops = {
398 .show_fdinfo = fanotify_show_fdinfo,
399 .poll = fanotify_poll,
400 .read = fanotify_read,
401 .write = fanotify_write,
402 .fasync = NULL,
403 .release = fanotify_release,
404 .unlocked_ioctl = fanotify_ioctl,
405 .compat_ioctl = fanotify_ioctl,
406 .llseek = noop_llseek,
407 };
408
409 static void fanotify_free_mark(struct fsnotify_mark *fsn_mark)
410 {
411 kmem_cache_free(fanotify_mark_cache, fsn_mark);
412 }
413
414 static int fanotify_find_path(int dfd, const char __user *filename,
415 struct path *path, unsigned int flags)
416 {
417 int ret;
418
419 pr_debug("%s: dfd=%d filename=%p flags=%x\n", __func__,
420 dfd, filename, flags);
421
422 if (filename == NULL) {
423 struct fd f = fdget(dfd);
424
425 ret = -EBADF;
426 if (!f.file)
427 goto out;
428
429 ret = -ENOTDIR;
430 if ((flags & FAN_MARK_ONLYDIR) &&
431 !(S_ISDIR(file_inode(f.file)->i_mode))) {
432 fdput(f);
433 goto out;
434 }
435
436 *path = f.file->f_path;
437 path_get(path);
438 fdput(f);
439 } else {
440 unsigned int lookup_flags = 0;
441
442 if (!(flags & FAN_MARK_DONT_FOLLOW))
443 lookup_flags |= LOOKUP_FOLLOW;
444 if (flags & FAN_MARK_ONLYDIR)
445 lookup_flags |= LOOKUP_DIRECTORY;
446
447 ret = user_path_at(dfd, filename, lookup_flags, path);
448 if (ret)
449 goto out;
450 }
451
452 /* you can only watch an inode if you have read permissions on it */
453 ret = inode_permission(path->dentry->d_inode, MAY_READ);
454 if (ret)
455 path_put(path);
456 out:
457 return ret;
458 }
459
460 static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark,
461 __u32 mask,
462 unsigned int flags,
463 int *destroy)
464 {
465 __u32 oldmask;
466
467 spin_lock(&fsn_mark->lock);
468 if (!(flags & FAN_MARK_IGNORED_MASK)) {
469 oldmask = fsn_mark->mask;
470 fsnotify_set_mark_mask_locked(fsn_mark, (oldmask & ~mask));
471 } else {
472 oldmask = fsn_mark->ignored_mask;
473 fsnotify_set_mark_ignored_mask_locked(fsn_mark, (oldmask & ~mask));
474 }
475 spin_unlock(&fsn_mark->lock);
476
477 *destroy = !(oldmask & ~mask);
478
479 return mask & oldmask;
480 }
481
482 static int fanotify_remove_vfsmount_mark(struct fsnotify_group *group,
483 struct vfsmount *mnt, __u32 mask,
484 unsigned int flags)
485 {
486 struct fsnotify_mark *fsn_mark = NULL;
487 __u32 removed;
488 int destroy_mark;
489
490 mutex_lock(&group->mark_mutex);
491 fsn_mark = fsnotify_find_vfsmount_mark(group, mnt);
492 if (!fsn_mark) {
493 mutex_unlock(&group->mark_mutex);
494 return -ENOENT;
495 }
496
497 removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags,
498 &destroy_mark);
499 if (destroy_mark)
500 fsnotify_destroy_mark_locked(fsn_mark, group);
501 mutex_unlock(&group->mark_mutex);
502
503 fsnotify_put_mark(fsn_mark);
504 if (removed & real_mount(mnt)->mnt_fsnotify_mask)
505 fsnotify_recalc_vfsmount_mask(mnt);
506
507 return 0;
508 }
509
510 static int fanotify_remove_inode_mark(struct fsnotify_group *group,
511 struct inode *inode, __u32 mask,
512 unsigned int flags)
513 {
514 struct fsnotify_mark *fsn_mark = NULL;
515 __u32 removed;
516 int destroy_mark;
517
518 mutex_lock(&group->mark_mutex);
519 fsn_mark = fsnotify_find_inode_mark(group, inode);
520 if (!fsn_mark) {
521 mutex_unlock(&group->mark_mutex);
522 return -ENOENT;
523 }
524
525 removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags,
526 &destroy_mark);
527 if (destroy_mark)
528 fsnotify_destroy_mark_locked(fsn_mark, group);
529 mutex_unlock(&group->mark_mutex);
530
531 /* matches the fsnotify_find_inode_mark() */
532 fsnotify_put_mark(fsn_mark);
533 if (removed & inode->i_fsnotify_mask)
534 fsnotify_recalc_inode_mask(inode);
535
536 return 0;
537 }
538
539 static __u32 fanotify_mark_add_to_mask(struct fsnotify_mark *fsn_mark,
540 __u32 mask,
541 unsigned int flags)
542 {
543 __u32 oldmask = -1;
544
545 spin_lock(&fsn_mark->lock);
546 if (!(flags & FAN_MARK_IGNORED_MASK)) {
547 oldmask = fsn_mark->mask;
548 fsnotify_set_mark_mask_locked(fsn_mark, (oldmask | mask));
549 } else {
550 __u32 tmask = fsn_mark->ignored_mask | mask;
551 fsnotify_set_mark_ignored_mask_locked(fsn_mark, tmask);
552 if (flags & FAN_MARK_IGNORED_SURV_MODIFY)
553 fsn_mark->flags |= FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY;
554 }
555
556 if (!(flags & FAN_MARK_ONDIR)) {
557 __u32 tmask = fsn_mark->ignored_mask | FAN_ONDIR;
558 fsnotify_set_mark_ignored_mask_locked(fsn_mark, tmask);
559 }
560
561 spin_unlock(&fsn_mark->lock);
562
563 return mask & ~oldmask;
564 }
565
566 static struct fsnotify_mark *fanotify_add_new_mark(struct fsnotify_group *group,
567 struct inode *inode,
568 struct vfsmount *mnt)
569 {
570 struct fsnotify_mark *mark;
571 int ret;
572
573 if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks)
574 return ERR_PTR(-ENOSPC);
575
576 mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL);
577 if (!mark)
578 return ERR_PTR(-ENOMEM);
579
580 fsnotify_init_mark(mark, fanotify_free_mark);
581 ret = fsnotify_add_mark_locked(mark, group, inode, mnt, 0);
582 if (ret) {
583 fsnotify_put_mark(mark);
584 return ERR_PTR(ret);
585 }
586
587 return mark;
588 }
589
590
591 static int fanotify_add_vfsmount_mark(struct fsnotify_group *group,
592 struct vfsmount *mnt, __u32 mask,
593 unsigned int flags)
594 {
595 struct fsnotify_mark *fsn_mark;
596 __u32 added;
597
598 mutex_lock(&group->mark_mutex);
599 fsn_mark = fsnotify_find_vfsmount_mark(group, mnt);
600 if (!fsn_mark) {
601 fsn_mark = fanotify_add_new_mark(group, NULL, mnt);
602 if (IS_ERR(fsn_mark)) {
603 mutex_unlock(&group->mark_mutex);
604 return PTR_ERR(fsn_mark);
605 }
606 }
607 added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
608 mutex_unlock(&group->mark_mutex);
609
610 if (added & ~real_mount(mnt)->mnt_fsnotify_mask)
611 fsnotify_recalc_vfsmount_mask(mnt);
612
613 fsnotify_put_mark(fsn_mark);
614 return 0;
615 }
616
617 static int fanotify_add_inode_mark(struct fsnotify_group *group,
618 struct inode *inode, __u32 mask,
619 unsigned int flags)
620 {
621 struct fsnotify_mark *fsn_mark;
622 __u32 added;
623
624 pr_debug("%s: group=%p inode=%p\n", __func__, group, inode);
625
626 /*
627 * If some other task has this inode open for write we should not add
628 * an ignored mark, unless that ignored mark is supposed to survive
629 * modification changes anyway.
630 */
631 if ((flags & FAN_MARK_IGNORED_MASK) &&
632 !(flags & FAN_MARK_IGNORED_SURV_MODIFY) &&
633 (atomic_read(&inode->i_writecount) > 0))
634 return 0;
635
636 mutex_lock(&group->mark_mutex);
637 fsn_mark = fsnotify_find_inode_mark(group, inode);
638 if (!fsn_mark) {
639 fsn_mark = fanotify_add_new_mark(group, inode, NULL);
640 if (IS_ERR(fsn_mark)) {
641 mutex_unlock(&group->mark_mutex);
642 return PTR_ERR(fsn_mark);
643 }
644 }
645 added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
646 mutex_unlock(&group->mark_mutex);
647
648 if (added & ~inode->i_fsnotify_mask)
649 fsnotify_recalc_inode_mask(inode);
650
651 fsnotify_put_mark(fsn_mark);
652 return 0;
653 }
654
655 /* fanotify syscalls */
656 SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
657 {
658 struct fsnotify_group *group;
659 int f_flags, fd;
660 struct user_struct *user;
661 struct fanotify_event_info *oevent;
662
663 pr_debug("%s: flags=%d event_f_flags=%d\n",
664 __func__, flags, event_f_flags);
665
666 if (!capable(CAP_SYS_ADMIN))
667 return -EPERM;
668
669 if (flags & ~FAN_ALL_INIT_FLAGS)
670 return -EINVAL;
671
672 user = get_current_user();
673 if (atomic_read(&user->fanotify_listeners) > FANOTIFY_DEFAULT_MAX_LISTENERS) {
674 free_uid(user);
675 return -EMFILE;
676 }
677
678 f_flags = O_RDWR | FMODE_NONOTIFY;
679 if (flags & FAN_CLOEXEC)
680 f_flags |= O_CLOEXEC;
681 if (flags & FAN_NONBLOCK)
682 f_flags |= O_NONBLOCK;
683
684 /* fsnotify_alloc_group takes a ref. Dropped in fanotify_release */
685 group = fsnotify_alloc_group(&fanotify_fsnotify_ops);
686 if (IS_ERR(group)) {
687 free_uid(user);
688 return PTR_ERR(group);
689 }
690
691 group->fanotify_data.user = user;
692 atomic_inc(&user->fanotify_listeners);
693
694 oevent = fanotify_alloc_event(NULL, FS_Q_OVERFLOW, NULL);
695 if (unlikely(!oevent)) {
696 fd = -ENOMEM;
697 goto out_destroy_group;
698 }
699 group->overflow_event = &oevent->fse;
700
701 if (force_o_largefile())
702 event_f_flags |= O_LARGEFILE;
703 group->fanotify_data.f_flags = event_f_flags;
704 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
705 spin_lock_init(&group->fanotify_data.access_lock);
706 init_waitqueue_head(&group->fanotify_data.access_waitq);
707 INIT_LIST_HEAD(&group->fanotify_data.access_list);
708 atomic_set(&group->fanotify_data.bypass_perm, 0);
709 #endif
710 switch (flags & FAN_ALL_CLASS_BITS) {
711 case FAN_CLASS_NOTIF:
712 group->priority = FS_PRIO_0;
713 break;
714 case FAN_CLASS_CONTENT:
715 group->priority = FS_PRIO_1;
716 break;
717 case FAN_CLASS_PRE_CONTENT:
718 group->priority = FS_PRIO_2;
719 break;
720 default:
721 fd = -EINVAL;
722 goto out_destroy_group;
723 }
724
725 if (flags & FAN_UNLIMITED_QUEUE) {
726 fd = -EPERM;
727 if (!capable(CAP_SYS_ADMIN))
728 goto out_destroy_group;
729 group->max_events = UINT_MAX;
730 } else {
731 group->max_events = FANOTIFY_DEFAULT_MAX_EVENTS;
732 }
733
734 if (flags & FAN_UNLIMITED_MARKS) {
735 fd = -EPERM;
736 if (!capable(CAP_SYS_ADMIN))
737 goto out_destroy_group;
738 group->fanotify_data.max_marks = UINT_MAX;
739 } else {
740 group->fanotify_data.max_marks = FANOTIFY_DEFAULT_MAX_MARKS;
741 }
742
743 fd = anon_inode_getfd("[fanotify]", &fanotify_fops, group, f_flags);
744 if (fd < 0)
745 goto out_destroy_group;
746
747 return fd;
748
749 out_destroy_group:
750 fsnotify_destroy_group(group);
751 return fd;
752 }
753
754 SYSCALL_DEFINE5(fanotify_mark, int, fanotify_fd, unsigned int, flags,
755 __u64, mask, int, dfd,
756 const char __user *, pathname)
757 {
758 struct inode *inode = NULL;
759 struct vfsmount *mnt = NULL;
760 struct fsnotify_group *group;
761 struct fd f;
762 struct path path;
763 int ret;
764
765 pr_debug("%s: fanotify_fd=%d flags=%x dfd=%d pathname=%p mask=%llx\n",
766 __func__, fanotify_fd, flags, dfd, pathname, mask);
767
768 /* we only use the lower 32 bits as of right now. */
769 if (mask & ((__u64)0xffffffff << 32))
770 return -EINVAL;
771
772 if (flags & ~FAN_ALL_MARK_FLAGS)
773 return -EINVAL;
774 switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) {
775 case FAN_MARK_ADD: /* fallthrough */
776 case FAN_MARK_REMOVE:
777 if (!mask)
778 return -EINVAL;
779 case FAN_MARK_FLUSH:
780 break;
781 default:
782 return -EINVAL;
783 }
784
785 if (mask & FAN_ONDIR) {
786 flags |= FAN_MARK_ONDIR;
787 mask &= ~FAN_ONDIR;
788 }
789
790 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
791 if (mask & ~(FAN_ALL_EVENTS | FAN_ALL_PERM_EVENTS | FAN_EVENT_ON_CHILD))
792 #else
793 if (mask & ~(FAN_ALL_EVENTS | FAN_EVENT_ON_CHILD))
794 #endif
795 return -EINVAL;
796
797 f = fdget(fanotify_fd);
798 if (unlikely(!f.file))
799 return -EBADF;
800
801 /* verify that this is indeed an fanotify instance */
802 ret = -EINVAL;
803 if (unlikely(f.file->f_op != &fanotify_fops))
804 goto fput_and_out;
805 group = f.file->private_data;
806
807 /*
808 * group->priority == FS_PRIO_0 == FAN_CLASS_NOTIF. These are not
809 * allowed to set permissions events.
810 */
811 ret = -EINVAL;
812 if (mask & FAN_ALL_PERM_EVENTS &&
813 group->priority == FS_PRIO_0)
814 goto fput_and_out;
815
816 ret = fanotify_find_path(dfd, pathname, &path, flags);
817 if (ret)
818 goto fput_and_out;
819
820 /* inode held in place by reference to path; group by fget on fd */
821 if (!(flags & FAN_MARK_MOUNT))
822 inode = path.dentry->d_inode;
823 else
824 mnt = path.mnt;
825
826 /* create/update an inode mark */
827 switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) {
828 case FAN_MARK_ADD:
829 if (flags & FAN_MARK_MOUNT)
830 ret = fanotify_add_vfsmount_mark(group, mnt, mask, flags);
831 else
832 ret = fanotify_add_inode_mark(group, inode, mask, flags);
833 break;
834 case FAN_MARK_REMOVE:
835 if (flags & FAN_MARK_MOUNT)
836 ret = fanotify_remove_vfsmount_mark(group, mnt, mask, flags);
837 else
838 ret = fanotify_remove_inode_mark(group, inode, mask, flags);
839 break;
840 case FAN_MARK_FLUSH:
841 if (flags & FAN_MARK_MOUNT)
842 fsnotify_clear_vfsmount_marks_by_group(group);
843 else
844 fsnotify_clear_inode_marks_by_group(group);
845 break;
846 default:
847 ret = -EINVAL;
848 }
849
850 path_put(&path);
851 fput_and_out:
852 fdput(f);
853 return ret;
854 }
855
856 #ifdef CONFIG_COMPAT
857 COMPAT_SYSCALL_DEFINE6(fanotify_mark,
858 int, fanotify_fd, unsigned int, flags,
859 __u32, mask0, __u32, mask1, int, dfd,
860 const char __user *, pathname)
861 {
862 return sys_fanotify_mark(fanotify_fd, flags,
863 #ifdef __BIG_ENDIAN
864 ((__u64)mask0 << 32) | mask1,
865 #else
866 ((__u64)mask1 << 32) | mask0,
867 #endif
868 dfd, pathname);
869 }
870 #endif
871
872 /*
873 * fanotify_user_setup - Our initialization function. Note that we cannot return
874 * error because we have compiled-in VFS hooks. So an (unlikely) failure here
875 * must result in panic().
876 */
877 static int __init fanotify_user_setup(void)
878 {
879 fanotify_mark_cache = KMEM_CACHE(fsnotify_mark, SLAB_PANIC);
880 fanotify_event_cachep = KMEM_CACHE(fanotify_event_info, SLAB_PANIC);
881 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
882 fanotify_perm_event_cachep = KMEM_CACHE(fanotify_perm_event_info,
883 SLAB_PANIC);
884 #endif
885
886 return 0;
887 }
888 device_initcall(fanotify_user_setup);
This page took 0.067274 seconds and 6 git commands to generate.