unify compat fanotify_mark(2), switch to COMPAT_SYSCALL_DEFINE
[deliverable/linux.git] / fs / notify / fanotify / fanotify_user.c
1 #include <linux/fanotify.h>
2 #include <linux/fcntl.h>
3 #include <linux/file.h>
4 #include <linux/fs.h>
5 #include <linux/anon_inodes.h>
6 #include <linux/fsnotify_backend.h>
7 #include <linux/init.h>
8 #include <linux/mount.h>
9 #include <linux/namei.h>
10 #include <linux/poll.h>
11 #include <linux/security.h>
12 #include <linux/syscalls.h>
13 #include <linux/slab.h>
14 #include <linux/types.h>
15 #include <linux/uaccess.h>
16 #include <linux/compat.h>
17
18 #include <asm/ioctls.h>
19
20 #include "../../mount.h"
21 #include "../fdinfo.h"
22
23 #define FANOTIFY_DEFAULT_MAX_EVENTS 16384
24 #define FANOTIFY_DEFAULT_MAX_MARKS 8192
25 #define FANOTIFY_DEFAULT_MAX_LISTENERS 128
26
27 extern const struct fsnotify_ops fanotify_fsnotify_ops;
28
29 static struct kmem_cache *fanotify_mark_cache __read_mostly;
30 static struct kmem_cache *fanotify_response_event_cache __read_mostly;
31
32 struct fanotify_response_event {
33 struct list_head list;
34 __s32 fd;
35 struct fsnotify_event *event;
36 };
37
38 /*
39 * Get an fsnotify notification event if one exists and is small
40 * enough to fit in "count". Return an error pointer if the count
41 * is not large enough.
42 *
43 * Called with the group->notification_mutex held.
44 */
45 static struct fsnotify_event *get_one_event(struct fsnotify_group *group,
46 size_t count)
47 {
48 BUG_ON(!mutex_is_locked(&group->notification_mutex));
49
50 pr_debug("%s: group=%p count=%zd\n", __func__, group, count);
51
52 if (fsnotify_notify_queue_is_empty(group))
53 return NULL;
54
55 if (FAN_EVENT_METADATA_LEN > count)
56 return ERR_PTR(-EINVAL);
57
58 /* held the notification_mutex the whole time, so this is the
59 * same event we peeked above */
60 return fsnotify_remove_notify_event(group);
61 }
62
63 static int create_fd(struct fsnotify_group *group,
64 struct fsnotify_event *event,
65 struct file **file)
66 {
67 int client_fd;
68 struct file *new_file;
69
70 pr_debug("%s: group=%p event=%p\n", __func__, group, event);
71
72 client_fd = get_unused_fd();
73 if (client_fd < 0)
74 return client_fd;
75
76 if (event->data_type != FSNOTIFY_EVENT_PATH) {
77 WARN_ON(1);
78 put_unused_fd(client_fd);
79 return -EINVAL;
80 }
81
82 /*
83 * we need a new file handle for the userspace program so it can read even if it was
84 * originally opened O_WRONLY.
85 */
86 /* it's possible this event was an overflow event. in that case dentry and mnt
87 * are NULL; That's fine, just don't call dentry open */
88 if (event->path.dentry && event->path.mnt)
89 new_file = dentry_open(&event->path,
90 group->fanotify_data.f_flags | FMODE_NONOTIFY,
91 current_cred());
92 else
93 new_file = ERR_PTR(-EOVERFLOW);
94 if (IS_ERR(new_file)) {
95 /*
96 * we still send an event even if we can't open the file. this
97 * can happen when say tasks are gone and we try to open their
98 * /proc files or we try to open a WRONLY file like in sysfs
99 * we just send the errno to userspace since there isn't much
100 * else we can do.
101 */
102 put_unused_fd(client_fd);
103 client_fd = PTR_ERR(new_file);
104 } else {
105 *file = new_file;
106 }
107
108 return client_fd;
109 }
110
111 static int fill_event_metadata(struct fsnotify_group *group,
112 struct fanotify_event_metadata *metadata,
113 struct fsnotify_event *event,
114 struct file **file)
115 {
116 int ret = 0;
117
118 pr_debug("%s: group=%p metadata=%p event=%p\n", __func__,
119 group, metadata, event);
120
121 *file = NULL;
122 metadata->event_len = FAN_EVENT_METADATA_LEN;
123 metadata->metadata_len = FAN_EVENT_METADATA_LEN;
124 metadata->vers = FANOTIFY_METADATA_VERSION;
125 metadata->mask = event->mask & FAN_ALL_OUTGOING_EVENTS;
126 metadata->pid = pid_vnr(event->tgid);
127 if (unlikely(event->mask & FAN_Q_OVERFLOW))
128 metadata->fd = FAN_NOFD;
129 else {
130 metadata->fd = create_fd(group, event, file);
131 if (metadata->fd < 0)
132 ret = metadata->fd;
133 }
134
135 return ret;
136 }
137
138 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
139 static struct fanotify_response_event *dequeue_re(struct fsnotify_group *group,
140 __s32 fd)
141 {
142 struct fanotify_response_event *re, *return_re = NULL;
143
144 mutex_lock(&group->fanotify_data.access_mutex);
145 list_for_each_entry(re, &group->fanotify_data.access_list, list) {
146 if (re->fd != fd)
147 continue;
148
149 list_del_init(&re->list);
150 return_re = re;
151 break;
152 }
153 mutex_unlock(&group->fanotify_data.access_mutex);
154
155 pr_debug("%s: found return_re=%p\n", __func__, return_re);
156
157 return return_re;
158 }
159
160 static int process_access_response(struct fsnotify_group *group,
161 struct fanotify_response *response_struct)
162 {
163 struct fanotify_response_event *re;
164 __s32 fd = response_struct->fd;
165 __u32 response = response_struct->response;
166
167 pr_debug("%s: group=%p fd=%d response=%d\n", __func__, group,
168 fd, response);
169 /*
170 * make sure the response is valid, if invalid we do nothing and either
171 * userspace can send a valid response or we will clean it up after the
172 * timeout
173 */
174 switch (response) {
175 case FAN_ALLOW:
176 case FAN_DENY:
177 break;
178 default:
179 return -EINVAL;
180 }
181
182 if (fd < 0)
183 return -EINVAL;
184
185 re = dequeue_re(group, fd);
186 if (!re)
187 return -ENOENT;
188
189 re->event->response = response;
190
191 wake_up(&group->fanotify_data.access_waitq);
192
193 kmem_cache_free(fanotify_response_event_cache, re);
194
195 return 0;
196 }
197
198 static int prepare_for_access_response(struct fsnotify_group *group,
199 struct fsnotify_event *event,
200 __s32 fd)
201 {
202 struct fanotify_response_event *re;
203
204 if (!(event->mask & FAN_ALL_PERM_EVENTS))
205 return 0;
206
207 re = kmem_cache_alloc(fanotify_response_event_cache, GFP_KERNEL);
208 if (!re)
209 return -ENOMEM;
210
211 re->event = event;
212 re->fd = fd;
213
214 mutex_lock(&group->fanotify_data.access_mutex);
215
216 if (atomic_read(&group->fanotify_data.bypass_perm)) {
217 mutex_unlock(&group->fanotify_data.access_mutex);
218 kmem_cache_free(fanotify_response_event_cache, re);
219 event->response = FAN_ALLOW;
220 return 0;
221 }
222
223 list_add_tail(&re->list, &group->fanotify_data.access_list);
224 mutex_unlock(&group->fanotify_data.access_mutex);
225
226 return 0;
227 }
228
229 #else
230 static int prepare_for_access_response(struct fsnotify_group *group,
231 struct fsnotify_event *event,
232 __s32 fd)
233 {
234 return 0;
235 }
236
237 #endif
238
239 static ssize_t copy_event_to_user(struct fsnotify_group *group,
240 struct fsnotify_event *event,
241 char __user *buf)
242 {
243 struct fanotify_event_metadata fanotify_event_metadata;
244 struct file *f;
245 int fd, ret;
246
247 pr_debug("%s: group=%p event=%p\n", __func__, group, event);
248
249 ret = fill_event_metadata(group, &fanotify_event_metadata, event, &f);
250 if (ret < 0)
251 goto out;
252
253 fd = fanotify_event_metadata.fd;
254 ret = -EFAULT;
255 if (copy_to_user(buf, &fanotify_event_metadata,
256 fanotify_event_metadata.event_len))
257 goto out_close_fd;
258
259 ret = prepare_for_access_response(group, event, fd);
260 if (ret)
261 goto out_close_fd;
262
263 if (fd != FAN_NOFD)
264 fd_install(fd, f);
265 return fanotify_event_metadata.event_len;
266
267 out_close_fd:
268 if (fd != FAN_NOFD) {
269 put_unused_fd(fd);
270 fput(f);
271 }
272 out:
273 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
274 if (event->mask & FAN_ALL_PERM_EVENTS) {
275 event->response = FAN_DENY;
276 wake_up(&group->fanotify_data.access_waitq);
277 }
278 #endif
279 return ret;
280 }
281
282 /* intofiy userspace file descriptor functions */
283 static unsigned int fanotify_poll(struct file *file, poll_table *wait)
284 {
285 struct fsnotify_group *group = file->private_data;
286 int ret = 0;
287
288 poll_wait(file, &group->notification_waitq, wait);
289 mutex_lock(&group->notification_mutex);
290 if (!fsnotify_notify_queue_is_empty(group))
291 ret = POLLIN | POLLRDNORM;
292 mutex_unlock(&group->notification_mutex);
293
294 return ret;
295 }
296
297 static ssize_t fanotify_read(struct file *file, char __user *buf,
298 size_t count, loff_t *pos)
299 {
300 struct fsnotify_group *group;
301 struct fsnotify_event *kevent;
302 char __user *start;
303 int ret;
304 DEFINE_WAIT(wait);
305
306 start = buf;
307 group = file->private_data;
308
309 pr_debug("%s: group=%p\n", __func__, group);
310
311 while (1) {
312 prepare_to_wait(&group->notification_waitq, &wait, TASK_INTERRUPTIBLE);
313
314 mutex_lock(&group->notification_mutex);
315 kevent = get_one_event(group, count);
316 mutex_unlock(&group->notification_mutex);
317
318 if (kevent) {
319 ret = PTR_ERR(kevent);
320 if (IS_ERR(kevent))
321 break;
322 ret = copy_event_to_user(group, kevent, buf);
323 fsnotify_put_event(kevent);
324 if (ret < 0)
325 break;
326 buf += ret;
327 count -= ret;
328 continue;
329 }
330
331 ret = -EAGAIN;
332 if (file->f_flags & O_NONBLOCK)
333 break;
334 ret = -ERESTARTSYS;
335 if (signal_pending(current))
336 break;
337
338 if (start != buf)
339 break;
340
341 schedule();
342 }
343
344 finish_wait(&group->notification_waitq, &wait);
345 if (start != buf && ret != -EFAULT)
346 ret = buf - start;
347 return ret;
348 }
349
350 static ssize_t fanotify_write(struct file *file, const char __user *buf, size_t count, loff_t *pos)
351 {
352 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
353 struct fanotify_response response = { .fd = -1, .response = -1 };
354 struct fsnotify_group *group;
355 int ret;
356
357 group = file->private_data;
358
359 if (count > sizeof(response))
360 count = sizeof(response);
361
362 pr_debug("%s: group=%p count=%zu\n", __func__, group, count);
363
364 if (copy_from_user(&response, buf, count))
365 return -EFAULT;
366
367 ret = process_access_response(group, &response);
368 if (ret < 0)
369 count = ret;
370
371 return count;
372 #else
373 return -EINVAL;
374 #endif
375 }
376
377 static int fanotify_release(struct inode *ignored, struct file *file)
378 {
379 struct fsnotify_group *group = file->private_data;
380
381 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
382 struct fanotify_response_event *re, *lre;
383
384 mutex_lock(&group->fanotify_data.access_mutex);
385
386 atomic_inc(&group->fanotify_data.bypass_perm);
387
388 list_for_each_entry_safe(re, lre, &group->fanotify_data.access_list, list) {
389 pr_debug("%s: found group=%p re=%p event=%p\n", __func__, group,
390 re, re->event);
391
392 list_del_init(&re->list);
393 re->event->response = FAN_ALLOW;
394
395 kmem_cache_free(fanotify_response_event_cache, re);
396 }
397 mutex_unlock(&group->fanotify_data.access_mutex);
398
399 wake_up(&group->fanotify_data.access_waitq);
400 #endif
401
402 if (file->f_flags & FASYNC)
403 fsnotify_fasync(-1, file, 0);
404
405 /* matches the fanotify_init->fsnotify_alloc_group */
406 fsnotify_destroy_group(group);
407
408 return 0;
409 }
410
411 static long fanotify_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
412 {
413 struct fsnotify_group *group;
414 struct fsnotify_event_holder *holder;
415 void __user *p;
416 int ret = -ENOTTY;
417 size_t send_len = 0;
418
419 group = file->private_data;
420
421 p = (void __user *) arg;
422
423 switch (cmd) {
424 case FIONREAD:
425 mutex_lock(&group->notification_mutex);
426 list_for_each_entry(holder, &group->notification_list, event_list)
427 send_len += FAN_EVENT_METADATA_LEN;
428 mutex_unlock(&group->notification_mutex);
429 ret = put_user(send_len, (int __user *) p);
430 break;
431 }
432
433 return ret;
434 }
435
436 static const struct file_operations fanotify_fops = {
437 .show_fdinfo = fanotify_show_fdinfo,
438 .poll = fanotify_poll,
439 .read = fanotify_read,
440 .write = fanotify_write,
441 .fasync = NULL,
442 .release = fanotify_release,
443 .unlocked_ioctl = fanotify_ioctl,
444 .compat_ioctl = fanotify_ioctl,
445 .llseek = noop_llseek,
446 };
447
448 static void fanotify_free_mark(struct fsnotify_mark *fsn_mark)
449 {
450 kmem_cache_free(fanotify_mark_cache, fsn_mark);
451 }
452
453 static int fanotify_find_path(int dfd, const char __user *filename,
454 struct path *path, unsigned int flags)
455 {
456 int ret;
457
458 pr_debug("%s: dfd=%d filename=%p flags=%x\n", __func__,
459 dfd, filename, flags);
460
461 if (filename == NULL) {
462 struct fd f = fdget(dfd);
463
464 ret = -EBADF;
465 if (!f.file)
466 goto out;
467
468 ret = -ENOTDIR;
469 if ((flags & FAN_MARK_ONLYDIR) &&
470 !(S_ISDIR(file_inode(f.file)->i_mode))) {
471 fdput(f);
472 goto out;
473 }
474
475 *path = f.file->f_path;
476 path_get(path);
477 fdput(f);
478 } else {
479 unsigned int lookup_flags = 0;
480
481 if (!(flags & FAN_MARK_DONT_FOLLOW))
482 lookup_flags |= LOOKUP_FOLLOW;
483 if (flags & FAN_MARK_ONLYDIR)
484 lookup_flags |= LOOKUP_DIRECTORY;
485
486 ret = user_path_at(dfd, filename, lookup_flags, path);
487 if (ret)
488 goto out;
489 }
490
491 /* you can only watch an inode if you have read permissions on it */
492 ret = inode_permission(path->dentry->d_inode, MAY_READ);
493 if (ret)
494 path_put(path);
495 out:
496 return ret;
497 }
498
499 static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark,
500 __u32 mask,
501 unsigned int flags,
502 int *destroy)
503 {
504 __u32 oldmask;
505
506 spin_lock(&fsn_mark->lock);
507 if (!(flags & FAN_MARK_IGNORED_MASK)) {
508 oldmask = fsn_mark->mask;
509 fsnotify_set_mark_mask_locked(fsn_mark, (oldmask & ~mask));
510 } else {
511 oldmask = fsn_mark->ignored_mask;
512 fsnotify_set_mark_ignored_mask_locked(fsn_mark, (oldmask & ~mask));
513 }
514 spin_unlock(&fsn_mark->lock);
515
516 *destroy = !(oldmask & ~mask);
517
518 return mask & oldmask;
519 }
520
521 static int fanotify_remove_vfsmount_mark(struct fsnotify_group *group,
522 struct vfsmount *mnt, __u32 mask,
523 unsigned int flags)
524 {
525 struct fsnotify_mark *fsn_mark = NULL;
526 __u32 removed;
527 int destroy_mark;
528
529 fsn_mark = fsnotify_find_vfsmount_mark(group, mnt);
530 if (!fsn_mark)
531 return -ENOENT;
532
533 removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags,
534 &destroy_mark);
535 if (destroy_mark)
536 fsnotify_destroy_mark(fsn_mark, group);
537
538 fsnotify_put_mark(fsn_mark);
539 if (removed & real_mount(mnt)->mnt_fsnotify_mask)
540 fsnotify_recalc_vfsmount_mask(mnt);
541
542 return 0;
543 }
544
545 static int fanotify_remove_inode_mark(struct fsnotify_group *group,
546 struct inode *inode, __u32 mask,
547 unsigned int flags)
548 {
549 struct fsnotify_mark *fsn_mark = NULL;
550 __u32 removed;
551 int destroy_mark;
552
553 fsn_mark = fsnotify_find_inode_mark(group, inode);
554 if (!fsn_mark)
555 return -ENOENT;
556
557 removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags,
558 &destroy_mark);
559 if (destroy_mark)
560 fsnotify_destroy_mark(fsn_mark, group);
561 /* matches the fsnotify_find_inode_mark() */
562 fsnotify_put_mark(fsn_mark);
563 if (removed & inode->i_fsnotify_mask)
564 fsnotify_recalc_inode_mask(inode);
565
566 return 0;
567 }
568
569 static __u32 fanotify_mark_add_to_mask(struct fsnotify_mark *fsn_mark,
570 __u32 mask,
571 unsigned int flags)
572 {
573 __u32 oldmask = -1;
574
575 spin_lock(&fsn_mark->lock);
576 if (!(flags & FAN_MARK_IGNORED_MASK)) {
577 oldmask = fsn_mark->mask;
578 fsnotify_set_mark_mask_locked(fsn_mark, (oldmask | mask));
579 } else {
580 __u32 tmask = fsn_mark->ignored_mask | mask;
581 fsnotify_set_mark_ignored_mask_locked(fsn_mark, tmask);
582 if (flags & FAN_MARK_IGNORED_SURV_MODIFY)
583 fsn_mark->flags |= FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY;
584 }
585
586 if (!(flags & FAN_MARK_ONDIR)) {
587 __u32 tmask = fsn_mark->ignored_mask | FAN_ONDIR;
588 fsnotify_set_mark_ignored_mask_locked(fsn_mark, tmask);
589 }
590
591 spin_unlock(&fsn_mark->lock);
592
593 return mask & ~oldmask;
594 }
595
596 static int fanotify_add_vfsmount_mark(struct fsnotify_group *group,
597 struct vfsmount *mnt, __u32 mask,
598 unsigned int flags)
599 {
600 struct fsnotify_mark *fsn_mark;
601 __u32 added;
602 int ret = 0;
603
604 fsn_mark = fsnotify_find_vfsmount_mark(group, mnt);
605 if (!fsn_mark) {
606 if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks)
607 return -ENOSPC;
608
609 fsn_mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL);
610 if (!fsn_mark)
611 return -ENOMEM;
612
613 fsnotify_init_mark(fsn_mark, fanotify_free_mark);
614 ret = fsnotify_add_mark(fsn_mark, group, NULL, mnt, 0);
615 if (ret)
616 goto err;
617 }
618 added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
619
620 if (added & ~real_mount(mnt)->mnt_fsnotify_mask)
621 fsnotify_recalc_vfsmount_mask(mnt);
622 err:
623 fsnotify_put_mark(fsn_mark);
624 return ret;
625 }
626
627 static int fanotify_add_inode_mark(struct fsnotify_group *group,
628 struct inode *inode, __u32 mask,
629 unsigned int flags)
630 {
631 struct fsnotify_mark *fsn_mark;
632 __u32 added;
633 int ret = 0;
634
635 pr_debug("%s: group=%p inode=%p\n", __func__, group, inode);
636
637 /*
638 * If some other task has this inode open for write we should not add
639 * an ignored mark, unless that ignored mark is supposed to survive
640 * modification changes anyway.
641 */
642 if ((flags & FAN_MARK_IGNORED_MASK) &&
643 !(flags & FAN_MARK_IGNORED_SURV_MODIFY) &&
644 (atomic_read(&inode->i_writecount) > 0))
645 return 0;
646
647 fsn_mark = fsnotify_find_inode_mark(group, inode);
648 if (!fsn_mark) {
649 if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks)
650 return -ENOSPC;
651
652 fsn_mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL);
653 if (!fsn_mark)
654 return -ENOMEM;
655
656 fsnotify_init_mark(fsn_mark, fanotify_free_mark);
657 ret = fsnotify_add_mark(fsn_mark, group, inode, NULL, 0);
658 if (ret)
659 goto err;
660 }
661 added = fanotify_mark_add_to_mask(fsn_mark, mask, flags);
662
663 if (added & ~inode->i_fsnotify_mask)
664 fsnotify_recalc_inode_mask(inode);
665 err:
666 fsnotify_put_mark(fsn_mark);
667 return ret;
668 }
669
670 /* fanotify syscalls */
671 SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
672 {
673 struct fsnotify_group *group;
674 int f_flags, fd;
675 struct user_struct *user;
676
677 pr_debug("%s: flags=%d event_f_flags=%d\n",
678 __func__, flags, event_f_flags);
679
680 if (!capable(CAP_SYS_ADMIN))
681 return -EPERM;
682
683 if (flags & ~FAN_ALL_INIT_FLAGS)
684 return -EINVAL;
685
686 user = get_current_user();
687 if (atomic_read(&user->fanotify_listeners) > FANOTIFY_DEFAULT_MAX_LISTENERS) {
688 free_uid(user);
689 return -EMFILE;
690 }
691
692 f_flags = O_RDWR | FMODE_NONOTIFY;
693 if (flags & FAN_CLOEXEC)
694 f_flags |= O_CLOEXEC;
695 if (flags & FAN_NONBLOCK)
696 f_flags |= O_NONBLOCK;
697
698 /* fsnotify_alloc_group takes a ref. Dropped in fanotify_release */
699 group = fsnotify_alloc_group(&fanotify_fsnotify_ops);
700 if (IS_ERR(group)) {
701 free_uid(user);
702 return PTR_ERR(group);
703 }
704
705 group->fanotify_data.user = user;
706 atomic_inc(&user->fanotify_listeners);
707
708 group->fanotify_data.f_flags = event_f_flags;
709 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
710 mutex_init(&group->fanotify_data.access_mutex);
711 init_waitqueue_head(&group->fanotify_data.access_waitq);
712 INIT_LIST_HEAD(&group->fanotify_data.access_list);
713 atomic_set(&group->fanotify_data.bypass_perm, 0);
714 #endif
715 switch (flags & FAN_ALL_CLASS_BITS) {
716 case FAN_CLASS_NOTIF:
717 group->priority = FS_PRIO_0;
718 break;
719 case FAN_CLASS_CONTENT:
720 group->priority = FS_PRIO_1;
721 break;
722 case FAN_CLASS_PRE_CONTENT:
723 group->priority = FS_PRIO_2;
724 break;
725 default:
726 fd = -EINVAL;
727 goto out_destroy_group;
728 }
729
730 if (flags & FAN_UNLIMITED_QUEUE) {
731 fd = -EPERM;
732 if (!capable(CAP_SYS_ADMIN))
733 goto out_destroy_group;
734 group->max_events = UINT_MAX;
735 } else {
736 group->max_events = FANOTIFY_DEFAULT_MAX_EVENTS;
737 }
738
739 if (flags & FAN_UNLIMITED_MARKS) {
740 fd = -EPERM;
741 if (!capable(CAP_SYS_ADMIN))
742 goto out_destroy_group;
743 group->fanotify_data.max_marks = UINT_MAX;
744 } else {
745 group->fanotify_data.max_marks = FANOTIFY_DEFAULT_MAX_MARKS;
746 }
747
748 fd = anon_inode_getfd("[fanotify]", &fanotify_fops, group, f_flags);
749 if (fd < 0)
750 goto out_destroy_group;
751
752 return fd;
753
754 out_destroy_group:
755 fsnotify_destroy_group(group);
756 return fd;
757 }
758
759 SYSCALL_DEFINE5(fanotify_mark, int, fanotify_fd, unsigned int, flags,
760 __u64, mask, int, dfd,
761 const char __user *, pathname)
762 {
763 struct inode *inode = NULL;
764 struct vfsmount *mnt = NULL;
765 struct fsnotify_group *group;
766 struct fd f;
767 struct path path;
768 int ret;
769
770 pr_debug("%s: fanotify_fd=%d flags=%x dfd=%d pathname=%p mask=%llx\n",
771 __func__, fanotify_fd, flags, dfd, pathname, mask);
772
773 /* we only use the lower 32 bits as of right now. */
774 if (mask & ((__u64)0xffffffff << 32))
775 return -EINVAL;
776
777 if (flags & ~FAN_ALL_MARK_FLAGS)
778 return -EINVAL;
779 switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) {
780 case FAN_MARK_ADD: /* fallthrough */
781 case FAN_MARK_REMOVE:
782 if (!mask)
783 return -EINVAL;
784 case FAN_MARK_FLUSH:
785 break;
786 default:
787 return -EINVAL;
788 }
789
790 if (mask & FAN_ONDIR) {
791 flags |= FAN_MARK_ONDIR;
792 mask &= ~FAN_ONDIR;
793 }
794
795 #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
796 if (mask & ~(FAN_ALL_EVENTS | FAN_ALL_PERM_EVENTS | FAN_EVENT_ON_CHILD))
797 #else
798 if (mask & ~(FAN_ALL_EVENTS | FAN_EVENT_ON_CHILD))
799 #endif
800 return -EINVAL;
801
802 f = fdget(fanotify_fd);
803 if (unlikely(!f.file))
804 return -EBADF;
805
806 /* verify that this is indeed an fanotify instance */
807 ret = -EINVAL;
808 if (unlikely(f.file->f_op != &fanotify_fops))
809 goto fput_and_out;
810 group = f.file->private_data;
811
812 /*
813 * group->priority == FS_PRIO_0 == FAN_CLASS_NOTIF. These are not
814 * allowed to set permissions events.
815 */
816 ret = -EINVAL;
817 if (mask & FAN_ALL_PERM_EVENTS &&
818 group->priority == FS_PRIO_0)
819 goto fput_and_out;
820
821 ret = fanotify_find_path(dfd, pathname, &path, flags);
822 if (ret)
823 goto fput_and_out;
824
825 /* inode held in place by reference to path; group by fget on fd */
826 if (!(flags & FAN_MARK_MOUNT))
827 inode = path.dentry->d_inode;
828 else
829 mnt = path.mnt;
830
831 /* create/update an inode mark */
832 switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) {
833 case FAN_MARK_ADD:
834 if (flags & FAN_MARK_MOUNT)
835 ret = fanotify_add_vfsmount_mark(group, mnt, mask, flags);
836 else
837 ret = fanotify_add_inode_mark(group, inode, mask, flags);
838 break;
839 case FAN_MARK_REMOVE:
840 if (flags & FAN_MARK_MOUNT)
841 ret = fanotify_remove_vfsmount_mark(group, mnt, mask, flags);
842 else
843 ret = fanotify_remove_inode_mark(group, inode, mask, flags);
844 break;
845 case FAN_MARK_FLUSH:
846 if (flags & FAN_MARK_MOUNT)
847 fsnotify_clear_vfsmount_marks_by_group(group);
848 else
849 fsnotify_clear_inode_marks_by_group(group);
850 break;
851 default:
852 ret = -EINVAL;
853 }
854
855 path_put(&path);
856 fput_and_out:
857 fdput(f);
858 return ret;
859 }
860
861 #ifdef CONFIG_COMPAT
862 COMPAT_SYSCALL_DEFINE6(fanotify_mark,
863 int, fanotify_fd, unsigned int, flags,
864 __u32, mask0, __u32, mask1, int, dfd,
865 const char __user *, pathname)
866 {
867 return sys_fanotify_mark(fanotify_fd, flags,
868 #ifdef __BIG_ENDIAN
869 ((__u64)mask1 << 32) | mask0,
870 #else
871 ((__u64)mask0 << 32) | mask1,
872 #endif
873 dfd, pathname);
874 }
875 #endif
876
877 /*
878 * fanotify_user_setup - Our initialization function. Note that we cannot return
879 * error because we have compiled-in VFS hooks. So an (unlikely) failure here
880 * must result in panic().
881 */
882 static int __init fanotify_user_setup(void)
883 {
884 fanotify_mark_cache = KMEM_CACHE(fsnotify_mark, SLAB_PANIC);
885 fanotify_response_event_cache = KMEM_CACHE(fanotify_response_event,
886 SLAB_PANIC);
887
888 return 0;
889 }
890 device_initcall(fanotify_user_setup);
This page took 0.075549 seconds and 5 git commands to generate.