| 1 | /* |
| 2 | * Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com> |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License as published by |
| 6 | * the Free Software Foundation; either version 2, or (at your option) |
| 7 | * any later version. |
| 8 | * |
| 9 | * This program is distributed in the hope that it will be useful, |
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 12 | * GNU General Public License for more details. |
| 13 | * |
| 14 | * You should have received a copy of the GNU General Public License |
| 15 | * along with this program; see the file COPYING. If not, write to |
| 16 | * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. |
| 17 | */ |
| 18 | |
| 19 | #include <linux/fs.h> |
| 20 | #include <linux/init.h> |
| 21 | #include <linux/kernel.h> |
| 22 | #include <linux/module.h> |
| 23 | #include <linux/mutex.h> |
| 24 | #include <linux/spinlock.h> |
| 25 | #include <linux/writeback.h> /* for inode_lock */ |
| 26 | |
| 27 | #include <asm/atomic.h> |
| 28 | |
| 29 | #include <linux/fsnotify_backend.h> |
| 30 | #include "fsnotify.h" |
| 31 | |
| 32 | /* |
| 33 | * Recalculate the mask of events relevant to a given inode locked. |
| 34 | */ |
| 35 | static void fsnotify_recalc_inode_mask_locked(struct inode *inode) |
| 36 | { |
| 37 | struct fsnotify_mark *mark; |
| 38 | struct hlist_node *pos; |
| 39 | __u32 new_mask = 0; |
| 40 | |
| 41 | assert_spin_locked(&inode->i_lock); |
| 42 | |
| 43 | hlist_for_each_entry(mark, pos, &inode->i_fsnotify_marks, i.i_list) |
| 44 | new_mask |= mark->mask; |
| 45 | inode->i_fsnotify_mask = new_mask; |
| 46 | } |
| 47 | |
| 48 | /* |
| 49 | * Recalculate the inode->i_fsnotify_mask, or the mask of all FS_* event types |
| 50 | * any notifier is interested in hearing for this inode. |
| 51 | */ |
| 52 | void fsnotify_recalc_inode_mask(struct inode *inode) |
| 53 | { |
| 54 | spin_lock(&inode->i_lock); |
| 55 | fsnotify_recalc_inode_mask_locked(inode); |
| 56 | spin_unlock(&inode->i_lock); |
| 57 | |
| 58 | __fsnotify_update_child_dentry_flags(inode); |
| 59 | } |
| 60 | |
| 61 | void fsnotify_destroy_inode_mark(struct fsnotify_mark *mark) |
| 62 | { |
| 63 | struct inode *inode = mark->i.inode; |
| 64 | |
| 65 | assert_spin_locked(&mark->lock); |
| 66 | assert_spin_locked(&mark->group->mark_lock); |
| 67 | |
| 68 | spin_lock(&inode->i_lock); |
| 69 | |
| 70 | hlist_del_init(&mark->i.i_list); |
| 71 | mark->i.inode = NULL; |
| 72 | |
| 73 | /* |
| 74 | * this mark is now off the inode->i_fsnotify_marks list and we |
| 75 | * hold the inode->i_lock, so this is the perfect time to update the |
| 76 | * inode->i_fsnotify_mask |
| 77 | */ |
| 78 | fsnotify_recalc_inode_mask_locked(inode); |
| 79 | |
| 80 | spin_unlock(&inode->i_lock); |
| 81 | } |
| 82 | |
| 83 | /* |
| 84 | * Given an inode, destroy all of the marks associated with that inode. |
| 85 | */ |
| 86 | void fsnotify_clear_marks_by_inode(struct inode *inode) |
| 87 | { |
| 88 | struct fsnotify_mark *mark, *lmark; |
| 89 | struct hlist_node *pos, *n; |
| 90 | LIST_HEAD(free_list); |
| 91 | |
| 92 | spin_lock(&inode->i_lock); |
| 93 | hlist_for_each_entry_safe(mark, pos, n, &inode->i_fsnotify_marks, i.i_list) { |
| 94 | list_add(&mark->i.free_i_list, &free_list); |
| 95 | hlist_del_init(&mark->i.i_list); |
| 96 | fsnotify_get_mark(mark); |
| 97 | } |
| 98 | spin_unlock(&inode->i_lock); |
| 99 | |
| 100 | list_for_each_entry_safe(mark, lmark, &free_list, i.free_i_list) { |
| 101 | fsnotify_destroy_mark(mark); |
| 102 | fsnotify_put_mark(mark); |
| 103 | } |
| 104 | } |
| 105 | |
| 106 | /* |
| 107 | * Given a group clear all of the inode marks associated with that group. |
| 108 | */ |
| 109 | void fsnotify_clear_inode_marks_by_group(struct fsnotify_group *group) |
| 110 | { |
| 111 | fsnotify_clear_marks_by_group_flags(group, FSNOTIFY_MARK_FLAG_INODE); |
| 112 | } |
| 113 | |
| 114 | /* |
| 115 | * given a group and inode, find the mark associated with that combination. |
| 116 | * if found take a reference to that mark and return it, else return NULL |
| 117 | */ |
| 118 | struct fsnotify_mark *fsnotify_find_inode_mark_locked(struct fsnotify_group *group, |
| 119 | struct inode *inode) |
| 120 | { |
| 121 | struct fsnotify_mark *mark; |
| 122 | struct hlist_node *pos; |
| 123 | |
| 124 | assert_spin_locked(&inode->i_lock); |
| 125 | |
| 126 | hlist_for_each_entry(mark, pos, &inode->i_fsnotify_marks, i.i_list) { |
| 127 | if (mark->group == group) { |
| 128 | fsnotify_get_mark(mark); |
| 129 | return mark; |
| 130 | } |
| 131 | } |
| 132 | return NULL; |
| 133 | } |
| 134 | |
| 135 | /* |
| 136 | * given a group and inode, find the mark associated with that combination. |
| 137 | * if found take a reference to that mark and return it, else return NULL |
| 138 | */ |
| 139 | struct fsnotify_mark *fsnotify_find_inode_mark(struct fsnotify_group *group, |
| 140 | struct inode *inode) |
| 141 | { |
| 142 | struct fsnotify_mark *mark; |
| 143 | |
| 144 | spin_lock(&inode->i_lock); |
| 145 | mark = fsnotify_find_inode_mark_locked(group, inode); |
| 146 | spin_unlock(&inode->i_lock); |
| 147 | |
| 148 | return mark; |
| 149 | } |
| 150 | |
| 151 | /* |
| 152 | * If we are setting a mark mask on an inode mark we should pin the inode |
| 153 | * in memory. |
| 154 | */ |
| 155 | void fsnotify_set_inode_mark_mask_locked(struct fsnotify_mark *mark, |
| 156 | __u32 mask) |
| 157 | { |
| 158 | struct inode *inode; |
| 159 | |
| 160 | assert_spin_locked(&mark->lock); |
| 161 | |
| 162 | if (mask && |
| 163 | mark->i.inode && |
| 164 | !(mark->flags & FSNOTIFY_MARK_FLAG_OBJECT_PINNED)) { |
| 165 | mark->flags |= FSNOTIFY_MARK_FLAG_OBJECT_PINNED; |
| 166 | inode = igrab(mark->i.inode); |
| 167 | /* |
| 168 | * we shouldn't be able to get here if the inode wasn't |
| 169 | * already safely held in memory. But bug in case it |
| 170 | * ever is wrong. |
| 171 | */ |
| 172 | BUG_ON(!inode); |
| 173 | } |
| 174 | } |
| 175 | |
| 176 | /* |
| 177 | * Attach an initialized mark to a given group and inode. |
| 178 | * These marks may be used for the fsnotify backend to determine which |
| 179 | * event types should be delivered to which group and for which inodes. |
| 180 | */ |
| 181 | int fsnotify_add_inode_mark(struct fsnotify_mark *mark, |
| 182 | struct fsnotify_group *group, struct inode *inode, |
| 183 | int allow_dups) |
| 184 | { |
| 185 | struct fsnotify_mark *lmark = NULL; |
| 186 | int ret = 0; |
| 187 | |
| 188 | mark->flags = FSNOTIFY_MARK_FLAG_INODE; |
| 189 | |
| 190 | assert_spin_locked(&mark->lock); |
| 191 | assert_spin_locked(&group->mark_lock); |
| 192 | |
| 193 | spin_lock(&inode->i_lock); |
| 194 | |
| 195 | if (!allow_dups) |
| 196 | lmark = fsnotify_find_inode_mark_locked(group, inode); |
| 197 | if (!lmark) { |
| 198 | mark->i.inode = inode; |
| 199 | |
| 200 | hlist_add_head(&mark->i.i_list, &inode->i_fsnotify_marks); |
| 201 | |
| 202 | fsnotify_recalc_inode_mask_locked(inode); |
| 203 | } |
| 204 | |
| 205 | spin_unlock(&inode->i_lock); |
| 206 | |
| 207 | if (lmark) |
| 208 | ret = -EEXIST; |
| 209 | |
| 210 | return ret; |
| 211 | } |
| 212 | |
| 213 | /** |
| 214 | * fsnotify_unmount_inodes - an sb is unmounting. handle any watched inodes. |
| 215 | * @list: list of inodes being unmounted (sb->s_inodes) |
| 216 | * |
| 217 | * Called with inode_lock held, protecting the unmounting super block's list |
| 218 | * of inodes, and with iprune_mutex held, keeping shrink_icache_memory() at bay. |
| 219 | * We temporarily drop inode_lock, however, and CAN block. |
| 220 | */ |
| 221 | void fsnotify_unmount_inodes(struct list_head *list) |
| 222 | { |
| 223 | struct inode *inode, *next_i, *need_iput = NULL; |
| 224 | |
| 225 | list_for_each_entry_safe(inode, next_i, list, i_sb_list) { |
| 226 | struct inode *need_iput_tmp; |
| 227 | |
| 228 | /* |
| 229 | * We cannot __iget() an inode in state I_CLEAR, I_FREEING, |
| 230 | * I_WILL_FREE, or I_NEW which is fine because by that point |
| 231 | * the inode cannot have any associated watches. |
| 232 | */ |
| 233 | if (inode->i_state & (I_CLEAR|I_FREEING|I_WILL_FREE|I_NEW)) |
| 234 | continue; |
| 235 | |
| 236 | /* |
| 237 | * If i_count is zero, the inode cannot have any watches and |
| 238 | * doing an __iget/iput with MS_ACTIVE clear would actually |
| 239 | * evict all inodes with zero i_count from icache which is |
| 240 | * unnecessarily violent and may in fact be illegal to do. |
| 241 | */ |
| 242 | if (!atomic_read(&inode->i_count)) |
| 243 | continue; |
| 244 | |
| 245 | need_iput_tmp = need_iput; |
| 246 | need_iput = NULL; |
| 247 | |
| 248 | /* In case fsnotify_inode_delete() drops a reference. */ |
| 249 | if (inode != need_iput_tmp) |
| 250 | __iget(inode); |
| 251 | else |
| 252 | need_iput_tmp = NULL; |
| 253 | |
| 254 | /* In case the dropping of a reference would nuke next_i. */ |
| 255 | if ((&next_i->i_sb_list != list) && |
| 256 | atomic_read(&next_i->i_count) && |
| 257 | !(next_i->i_state & (I_CLEAR | I_FREEING | I_WILL_FREE))) { |
| 258 | __iget(next_i); |
| 259 | need_iput = next_i; |
| 260 | } |
| 261 | |
| 262 | /* |
| 263 | * We can safely drop inode_lock here because we hold |
| 264 | * references on both inode and next_i. Also no new inodes |
| 265 | * will be added since the umount has begun. Finally, |
| 266 | * iprune_mutex keeps shrink_icache_memory() away. |
| 267 | */ |
| 268 | spin_unlock(&inode_lock); |
| 269 | |
| 270 | if (need_iput_tmp) |
| 271 | iput(need_iput_tmp); |
| 272 | |
| 273 | /* for each watch, send FS_UNMOUNT and then remove it */ |
| 274 | fsnotify(inode, FS_UNMOUNT, inode, FSNOTIFY_EVENT_INODE, NULL, 0); |
| 275 | |
| 276 | fsnotify_inode_delete(inode); |
| 277 | |
| 278 | iput(inode); |
| 279 | |
| 280 | spin_lock(&inode_lock); |
| 281 | } |
| 282 | } |