| 1 | /* |
| 2 | * linux/fs/pnode.c |
| 3 | * |
| 4 | * (C) Copyright IBM Corporation 2005. |
| 5 | * Released under GPL v2. |
| 6 | * Author : Ram Pai (linuxram@us.ibm.com) |
| 7 | * |
| 8 | */ |
| 9 | #include <linux/mnt_namespace.h> |
| 10 | #include <linux/mount.h> |
| 11 | #include <linux/fs.h> |
| 12 | #include <linux/nsproxy.h> |
| 13 | #include "internal.h" |
| 14 | #include "pnode.h" |
| 15 | |
| 16 | /* return the next shared peer mount of @p */ |
| 17 | static inline struct mount *next_peer(struct mount *p) |
| 18 | { |
| 19 | return list_entry(p->mnt_share.next, struct mount, mnt_share); |
| 20 | } |
| 21 | |
| 22 | static inline struct mount *first_slave(struct mount *p) |
| 23 | { |
| 24 | return list_entry(p->mnt_slave_list.next, struct mount, mnt_slave); |
| 25 | } |
| 26 | |
| 27 | static inline struct mount *next_slave(struct mount *p) |
| 28 | { |
| 29 | return list_entry(p->mnt_slave.next, struct mount, mnt_slave); |
| 30 | } |
| 31 | |
| 32 | static struct mount *get_peer_under_root(struct mount *mnt, |
| 33 | struct mnt_namespace *ns, |
| 34 | const struct path *root) |
| 35 | { |
| 36 | struct mount *m = mnt; |
| 37 | |
| 38 | do { |
| 39 | /* Check the namespace first for optimization */ |
| 40 | if (m->mnt_ns == ns && is_path_reachable(m, m->mnt.mnt_root, root)) |
| 41 | return m; |
| 42 | |
| 43 | m = next_peer(m); |
| 44 | } while (m != mnt); |
| 45 | |
| 46 | return NULL; |
| 47 | } |
| 48 | |
| 49 | /* |
| 50 | * Get ID of closest dominating peer group having a representative |
| 51 | * under the given root. |
| 52 | * |
| 53 | * Caller must hold namespace_sem |
| 54 | */ |
| 55 | int get_dominating_id(struct mount *mnt, const struct path *root) |
| 56 | { |
| 57 | struct mount *m; |
| 58 | |
| 59 | for (m = mnt->mnt_master; m != NULL; m = m->mnt_master) { |
| 60 | struct mount *d = get_peer_under_root(m, mnt->mnt_ns, root); |
| 61 | if (d) |
| 62 | return d->mnt_group_id; |
| 63 | } |
| 64 | |
| 65 | return 0; |
| 66 | } |
| 67 | |
| 68 | static int do_make_slave(struct mount *mnt) |
| 69 | { |
| 70 | struct mount *peer_mnt = mnt, *master = mnt->mnt_master; |
| 71 | struct mount *slave_mnt; |
| 72 | |
| 73 | /* |
| 74 | * slave 'mnt' to a peer mount that has the |
| 75 | * same root dentry. If none is available then |
| 76 | * slave it to anything that is available. |
| 77 | */ |
| 78 | while ((peer_mnt = next_peer(peer_mnt)) != mnt && |
| 79 | peer_mnt->mnt.mnt_root != mnt->mnt.mnt_root) ; |
| 80 | |
| 81 | if (peer_mnt == mnt) { |
| 82 | peer_mnt = next_peer(mnt); |
| 83 | if (peer_mnt == mnt) |
| 84 | peer_mnt = NULL; |
| 85 | } |
| 86 | if (mnt->mnt_group_id && IS_MNT_SHARED(mnt) && |
| 87 | list_empty(&mnt->mnt_share)) |
| 88 | mnt_release_group_id(mnt); |
| 89 | |
| 90 | list_del_init(&mnt->mnt_share); |
| 91 | mnt->mnt_group_id = 0; |
| 92 | |
| 93 | if (peer_mnt) |
| 94 | master = peer_mnt; |
| 95 | |
| 96 | if (master) { |
| 97 | list_for_each_entry(slave_mnt, &mnt->mnt_slave_list, mnt_slave) |
| 98 | slave_mnt->mnt_master = master; |
| 99 | list_move(&mnt->mnt_slave, &master->mnt_slave_list); |
| 100 | list_splice(&mnt->mnt_slave_list, master->mnt_slave_list.prev); |
| 101 | INIT_LIST_HEAD(&mnt->mnt_slave_list); |
| 102 | } else { |
| 103 | struct list_head *p = &mnt->mnt_slave_list; |
| 104 | while (!list_empty(p)) { |
| 105 | slave_mnt = list_first_entry(p, |
| 106 | struct mount, mnt_slave); |
| 107 | list_del_init(&slave_mnt->mnt_slave); |
| 108 | slave_mnt->mnt_master = NULL; |
| 109 | } |
| 110 | } |
| 111 | mnt->mnt_master = master; |
| 112 | CLEAR_MNT_SHARED(mnt); |
| 113 | return 0; |
| 114 | } |
| 115 | |
| 116 | /* |
| 117 | * vfsmount lock must be held for write |
| 118 | */ |
| 119 | void change_mnt_propagation(struct mount *mnt, int type) |
| 120 | { |
| 121 | if (type == MS_SHARED) { |
| 122 | set_mnt_shared(mnt); |
| 123 | return; |
| 124 | } |
| 125 | do_make_slave(mnt); |
| 126 | if (type != MS_SLAVE) { |
| 127 | list_del_init(&mnt->mnt_slave); |
| 128 | mnt->mnt_master = NULL; |
| 129 | if (type == MS_UNBINDABLE) |
| 130 | mnt->mnt.mnt_flags |= MNT_UNBINDABLE; |
| 131 | else |
| 132 | mnt->mnt.mnt_flags &= ~MNT_UNBINDABLE; |
| 133 | } |
| 134 | } |
| 135 | |
| 136 | /* |
| 137 | * get the next mount in the propagation tree. |
| 138 | * @m: the mount seen last |
| 139 | * @origin: the original mount from where the tree walk initiated |
| 140 | * |
| 141 | * Note that peer groups form contiguous segments of slave lists. |
| 142 | * We rely on that in get_source() to be able to find out if |
| 143 | * vfsmount found while iterating with propagation_next() is |
| 144 | * a peer of one we'd found earlier. |
| 145 | */ |
| 146 | static struct mount *propagation_next(struct mount *m, |
| 147 | struct mount *origin) |
| 148 | { |
| 149 | /* are there any slaves of this mount? */ |
| 150 | if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list)) |
| 151 | return first_slave(m); |
| 152 | |
| 153 | while (1) { |
| 154 | struct mount *master = m->mnt_master; |
| 155 | |
| 156 | if (master == origin->mnt_master) { |
| 157 | struct mount *next = next_peer(m); |
| 158 | return (next == origin) ? NULL : next; |
| 159 | } else if (m->mnt_slave.next != &master->mnt_slave_list) |
| 160 | return next_slave(m); |
| 161 | |
| 162 | /* back at master */ |
| 163 | m = master; |
| 164 | } |
| 165 | } |
| 166 | |
| 167 | static struct mount *next_group(struct mount *m, struct mount *origin) |
| 168 | { |
| 169 | while (1) { |
| 170 | while (1) { |
| 171 | struct mount *next; |
| 172 | if (!IS_MNT_NEW(m) && !list_empty(&m->mnt_slave_list)) |
| 173 | return first_slave(m); |
| 174 | next = next_peer(m); |
| 175 | if (m->mnt_group_id == origin->mnt_group_id) { |
| 176 | if (next == origin) |
| 177 | return NULL; |
| 178 | } else if (m->mnt_slave.next != &next->mnt_slave) |
| 179 | break; |
| 180 | m = next; |
| 181 | } |
| 182 | /* m is the last peer */ |
| 183 | while (1) { |
| 184 | struct mount *master = m->mnt_master; |
| 185 | if (m->mnt_slave.next != &master->mnt_slave_list) |
| 186 | return next_slave(m); |
| 187 | m = next_peer(master); |
| 188 | if (master->mnt_group_id == origin->mnt_group_id) |
| 189 | break; |
| 190 | if (master->mnt_slave.next == &m->mnt_slave) |
| 191 | break; |
| 192 | m = master; |
| 193 | } |
| 194 | if (m == origin) |
| 195 | return NULL; |
| 196 | } |
| 197 | } |
| 198 | |
| 199 | /* all accesses are serialized by namespace_sem */ |
| 200 | static struct user_namespace *user_ns; |
| 201 | static struct mount *last_dest, *last_source, *dest_master; |
| 202 | static struct mountpoint *mp; |
| 203 | static struct hlist_head *list; |
| 204 | |
| 205 | static int propagate_one(struct mount *m) |
| 206 | { |
| 207 | struct mount *child; |
| 208 | int type; |
| 209 | /* skip ones added by this propagate_mnt() */ |
| 210 | if (IS_MNT_NEW(m)) |
| 211 | return 0; |
| 212 | /* skip if mountpoint isn't covered by it */ |
| 213 | if (!is_subdir(mp->m_dentry, m->mnt.mnt_root)) |
| 214 | return 0; |
| 215 | if (m->mnt_group_id == last_dest->mnt_group_id) { |
| 216 | type = CL_MAKE_SHARED; |
| 217 | } else { |
| 218 | struct mount *n, *p; |
| 219 | for (n = m; ; n = p) { |
| 220 | p = n->mnt_master; |
| 221 | if (p == dest_master || IS_MNT_MARKED(p)) { |
| 222 | while (last_dest->mnt_master != p) { |
| 223 | last_source = last_source->mnt_master; |
| 224 | last_dest = last_source->mnt_parent; |
| 225 | } |
| 226 | if (n->mnt_group_id != last_dest->mnt_group_id) { |
| 227 | last_source = last_source->mnt_master; |
| 228 | last_dest = last_source->mnt_parent; |
| 229 | } |
| 230 | break; |
| 231 | } |
| 232 | } |
| 233 | type = CL_SLAVE; |
| 234 | /* beginning of peer group among the slaves? */ |
| 235 | if (IS_MNT_SHARED(m)) |
| 236 | type |= CL_MAKE_SHARED; |
| 237 | } |
| 238 | |
| 239 | /* Notice when we are propagating across user namespaces */ |
| 240 | if (m->mnt_ns->user_ns != user_ns) |
| 241 | type |= CL_UNPRIVILEGED; |
| 242 | child = copy_tree(last_source, last_source->mnt.mnt_root, type); |
| 243 | if (IS_ERR(child)) |
| 244 | return PTR_ERR(child); |
| 245 | child->mnt.mnt_flags &= ~MNT_LOCKED; |
| 246 | mnt_set_mountpoint(m, mp, child); |
| 247 | last_dest = m; |
| 248 | last_source = child; |
| 249 | if (m->mnt_master != dest_master) { |
| 250 | read_seqlock_excl(&mount_lock); |
| 251 | SET_MNT_MARK(m->mnt_master); |
| 252 | read_sequnlock_excl(&mount_lock); |
| 253 | } |
| 254 | hlist_add_head(&child->mnt_hash, list); |
| 255 | return 0; |
| 256 | } |
| 257 | |
| 258 | /* |
| 259 | * mount 'source_mnt' under the destination 'dest_mnt' at |
| 260 | * dentry 'dest_dentry'. And propagate that mount to |
| 261 | * all the peer and slave mounts of 'dest_mnt'. |
| 262 | * Link all the new mounts into a propagation tree headed at |
| 263 | * source_mnt. Also link all the new mounts using ->mnt_list |
| 264 | * headed at source_mnt's ->mnt_list |
| 265 | * |
| 266 | * @dest_mnt: destination mount. |
| 267 | * @dest_dentry: destination dentry. |
| 268 | * @source_mnt: source mount. |
| 269 | * @tree_list : list of heads of trees to be attached. |
| 270 | */ |
| 271 | int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp, |
| 272 | struct mount *source_mnt, struct hlist_head *tree_list) |
| 273 | { |
| 274 | struct mount *m, *n; |
| 275 | int ret = 0; |
| 276 | |
| 277 | /* |
| 278 | * we don't want to bother passing tons of arguments to |
| 279 | * propagate_one(); everything is serialized by namespace_sem, |
| 280 | * so globals will do just fine. |
| 281 | */ |
| 282 | user_ns = current->nsproxy->mnt_ns->user_ns; |
| 283 | last_dest = dest_mnt; |
| 284 | last_source = source_mnt; |
| 285 | mp = dest_mp; |
| 286 | list = tree_list; |
| 287 | dest_master = dest_mnt->mnt_master; |
| 288 | |
| 289 | /* all peers of dest_mnt, except dest_mnt itself */ |
| 290 | for (n = next_peer(dest_mnt); n != dest_mnt; n = next_peer(n)) { |
| 291 | ret = propagate_one(n); |
| 292 | if (ret) |
| 293 | goto out; |
| 294 | } |
| 295 | |
| 296 | /* all slave groups */ |
| 297 | for (m = next_group(dest_mnt, dest_mnt); m; |
| 298 | m = next_group(m, dest_mnt)) { |
| 299 | /* everything in that slave group */ |
| 300 | n = m; |
| 301 | do { |
| 302 | ret = propagate_one(n); |
| 303 | if (ret) |
| 304 | goto out; |
| 305 | n = next_peer(n); |
| 306 | } while (n != m); |
| 307 | } |
| 308 | out: |
| 309 | read_seqlock_excl(&mount_lock); |
| 310 | hlist_for_each_entry(n, tree_list, mnt_hash) { |
| 311 | m = n->mnt_parent; |
| 312 | if (m->mnt_master != dest_mnt->mnt_master) |
| 313 | CLEAR_MNT_MARK(m->mnt_master); |
| 314 | } |
| 315 | read_sequnlock_excl(&mount_lock); |
| 316 | return ret; |
| 317 | } |
| 318 | |
| 319 | /* |
| 320 | * return true if the refcount is greater than count |
| 321 | */ |
| 322 | static inline int do_refcount_check(struct mount *mnt, int count) |
| 323 | { |
| 324 | return mnt_get_count(mnt) > count; |
| 325 | } |
| 326 | |
| 327 | /* |
| 328 | * check if the mount 'mnt' can be unmounted successfully. |
| 329 | * @mnt: the mount to be checked for unmount |
| 330 | * NOTE: unmounting 'mnt' would naturally propagate to all |
| 331 | * other mounts its parent propagates to. |
| 332 | * Check if any of these mounts that **do not have submounts** |
| 333 | * have more references than 'refcnt'. If so return busy. |
| 334 | * |
| 335 | * vfsmount lock must be held for write |
| 336 | */ |
| 337 | int propagate_mount_busy(struct mount *mnt, int refcnt) |
| 338 | { |
| 339 | struct mount *m, *child; |
| 340 | struct mount *parent = mnt->mnt_parent; |
| 341 | int ret = 0; |
| 342 | |
| 343 | if (mnt == parent) |
| 344 | return do_refcount_check(mnt, refcnt); |
| 345 | |
| 346 | /* |
| 347 | * quickly check if the current mount can be unmounted. |
| 348 | * If not, we don't have to go checking for all other |
| 349 | * mounts |
| 350 | */ |
| 351 | if (!list_empty(&mnt->mnt_mounts) || do_refcount_check(mnt, refcnt)) |
| 352 | return 1; |
| 353 | |
| 354 | for (m = propagation_next(parent, parent); m; |
| 355 | m = propagation_next(m, parent)) { |
| 356 | child = __lookup_mnt_last(&m->mnt, mnt->mnt_mountpoint); |
| 357 | if (child && list_empty(&child->mnt_mounts) && |
| 358 | (ret = do_refcount_check(child, 1))) |
| 359 | break; |
| 360 | } |
| 361 | return ret; |
| 362 | } |
| 363 | |
| 364 | /* |
| 365 | * Clear MNT_LOCKED when it can be shown to be safe. |
| 366 | * |
| 367 | * mount_lock lock must be held for write |
| 368 | */ |
| 369 | void propagate_mount_unlock(struct mount *mnt) |
| 370 | { |
| 371 | struct mount *parent = mnt->mnt_parent; |
| 372 | struct mount *m, *child; |
| 373 | |
| 374 | BUG_ON(parent == mnt); |
| 375 | |
| 376 | for (m = propagation_next(parent, parent); m; |
| 377 | m = propagation_next(m, parent)) { |
| 378 | child = __lookup_mnt_last(&m->mnt, mnt->mnt_mountpoint); |
| 379 | if (child) |
| 380 | child->mnt.mnt_flags &= ~MNT_LOCKED; |
| 381 | } |
| 382 | } |
| 383 | |
| 384 | /* |
| 385 | * Mark all mounts that the MNT_LOCKED logic will allow to be unmounted. |
| 386 | */ |
| 387 | static void mark_umount_candidates(struct mount *mnt) |
| 388 | { |
| 389 | struct mount *parent = mnt->mnt_parent; |
| 390 | struct mount *m; |
| 391 | |
| 392 | BUG_ON(parent == mnt); |
| 393 | |
| 394 | for (m = propagation_next(parent, parent); m; |
| 395 | m = propagation_next(m, parent)) { |
| 396 | struct mount *child = __lookup_mnt_last(&m->mnt, |
| 397 | mnt->mnt_mountpoint); |
| 398 | if (child && (!IS_MNT_LOCKED(child) || IS_MNT_MARKED(m))) { |
| 399 | SET_MNT_MARK(child); |
| 400 | } |
| 401 | } |
| 402 | } |
| 403 | |
| 404 | /* |
| 405 | * NOTE: unmounting 'mnt' naturally propagates to all other mounts its |
| 406 | * parent propagates to. |
| 407 | */ |
| 408 | static void __propagate_umount(struct mount *mnt) |
| 409 | { |
| 410 | struct mount *parent = mnt->mnt_parent; |
| 411 | struct mount *m; |
| 412 | |
| 413 | BUG_ON(parent == mnt); |
| 414 | |
| 415 | for (m = propagation_next(parent, parent); m; |
| 416 | m = propagation_next(m, parent)) { |
| 417 | |
| 418 | struct mount *child = __lookup_mnt_last(&m->mnt, |
| 419 | mnt->mnt_mountpoint); |
| 420 | /* |
| 421 | * umount the child only if the child has no children |
| 422 | * and the child is marked safe to unmount. |
| 423 | */ |
| 424 | if (!child || !IS_MNT_MARKED(child)) |
| 425 | continue; |
| 426 | CLEAR_MNT_MARK(child); |
| 427 | if (list_empty(&child->mnt_mounts)) { |
| 428 | list_del_init(&child->mnt_child); |
| 429 | child->mnt.mnt_flags |= MNT_UMOUNT; |
| 430 | list_move_tail(&child->mnt_list, &mnt->mnt_list); |
| 431 | } |
| 432 | } |
| 433 | } |
| 434 | |
| 435 | /* |
| 436 | * collect all mounts that receive propagation from the mount in @list, |
| 437 | * and return these additional mounts in the same list. |
| 438 | * @list: the list of mounts to be unmounted. |
| 439 | * |
| 440 | * vfsmount lock must be held for write |
| 441 | */ |
| 442 | int propagate_umount(struct list_head *list) |
| 443 | { |
| 444 | struct mount *mnt; |
| 445 | |
| 446 | list_for_each_entry_reverse(mnt, list, mnt_list) |
| 447 | mark_umount_candidates(mnt); |
| 448 | |
| 449 | list_for_each_entry(mnt, list, mnt_list) |
| 450 | __propagate_umount(mnt); |
| 451 | return 0; |
| 452 | } |