kernfs: implement kernfs_syscall_ops->remount_fs() and ->show_options()
[deliverable/linux.git] / fs / kernfs / dir.c
... / ...
CommitLineData
1/*
2 * fs/kernfs/dir.c - kernfs directory implementation
3 *
4 * Copyright (c) 2001-3 Patrick Mochel
5 * Copyright (c) 2007 SUSE Linux Products GmbH
6 * Copyright (c) 2007, 2013 Tejun Heo <tj@kernel.org>
7 *
8 * This file is released under the GPLv2.
9 */
10
11#include <linux/sched.h>
12#include <linux/fs.h>
13#include <linux/namei.h>
14#include <linux/idr.h>
15#include <linux/slab.h>
16#include <linux/security.h>
17#include <linux/hash.h>
18
19#include "kernfs-internal.h"
20
21DEFINE_MUTEX(kernfs_mutex);
22
23#define rb_to_kn(X) rb_entry((X), struct kernfs_node, rb)
24
25static bool kernfs_active(struct kernfs_node *kn)
26{
27 lockdep_assert_held(&kernfs_mutex);
28 return atomic_read(&kn->active) >= 0;
29}
30
31static bool kernfs_lockdep(struct kernfs_node *kn)
32{
33#ifdef CONFIG_DEBUG_LOCK_ALLOC
34 return kn->flags & KERNFS_LOCKDEP;
35#else
36 return false;
37#endif
38}
39
40/**
41 * kernfs_name_hash
42 * @name: Null terminated string to hash
43 * @ns: Namespace tag to hash
44 *
45 * Returns 31 bit hash of ns + name (so it fits in an off_t )
46 */
47static unsigned int kernfs_name_hash(const char *name, const void *ns)
48{
49 unsigned long hash = init_name_hash();
50 unsigned int len = strlen(name);
51 while (len--)
52 hash = partial_name_hash(*name++, hash);
53 hash = (end_name_hash(hash) ^ hash_ptr((void *)ns, 31));
54 hash &= 0x7fffffffU;
55 /* Reserve hash numbers 0, 1 and INT_MAX for magic directory entries */
56 if (hash < 1)
57 hash += 2;
58 if (hash >= INT_MAX)
59 hash = INT_MAX - 1;
60 return hash;
61}
62
63static int kernfs_name_compare(unsigned int hash, const char *name,
64 const void *ns, const struct kernfs_node *kn)
65{
66 if (hash != kn->hash)
67 return hash - kn->hash;
68 if (ns != kn->ns)
69 return ns - kn->ns;
70 return strcmp(name, kn->name);
71}
72
73static int kernfs_sd_compare(const struct kernfs_node *left,
74 const struct kernfs_node *right)
75{
76 return kernfs_name_compare(left->hash, left->name, left->ns, right);
77}
78
79/**
80 * kernfs_link_sibling - link kernfs_node into sibling rbtree
81 * @kn: kernfs_node of interest
82 *
83 * Link @kn into its sibling rbtree which starts from
84 * @kn->parent->dir.children.
85 *
86 * Locking:
87 * mutex_lock(kernfs_mutex)
88 *
89 * RETURNS:
90 * 0 on susccess -EEXIST on failure.
91 */
92static int kernfs_link_sibling(struct kernfs_node *kn)
93{
94 struct rb_node **node = &kn->parent->dir.children.rb_node;
95 struct rb_node *parent = NULL;
96
97 if (kernfs_type(kn) == KERNFS_DIR)
98 kn->parent->dir.subdirs++;
99
100 while (*node) {
101 struct kernfs_node *pos;
102 int result;
103
104 pos = rb_to_kn(*node);
105 parent = *node;
106 result = kernfs_sd_compare(kn, pos);
107 if (result < 0)
108 node = &pos->rb.rb_left;
109 else if (result > 0)
110 node = &pos->rb.rb_right;
111 else
112 return -EEXIST;
113 }
114 /* add new node and rebalance the tree */
115 rb_link_node(&kn->rb, parent, node);
116 rb_insert_color(&kn->rb, &kn->parent->dir.children);
117 return 0;
118}
119
120/**
121 * kernfs_unlink_sibling - unlink kernfs_node from sibling rbtree
122 * @kn: kernfs_node of interest
123 *
124 * Try to unlink @kn from its sibling rbtree which starts from
125 * kn->parent->dir.children. Returns %true if @kn was actually
126 * removed, %false if @kn wasn't on the rbtree.
127 *
128 * Locking:
129 * mutex_lock(kernfs_mutex)
130 */
131static bool kernfs_unlink_sibling(struct kernfs_node *kn)
132{
133 if (RB_EMPTY_NODE(&kn->rb))
134 return false;
135
136 if (kernfs_type(kn) == KERNFS_DIR)
137 kn->parent->dir.subdirs--;
138
139 rb_erase(&kn->rb, &kn->parent->dir.children);
140 RB_CLEAR_NODE(&kn->rb);
141 return true;
142}
143
144/**
145 * kernfs_get_active - get an active reference to kernfs_node
146 * @kn: kernfs_node to get an active reference to
147 *
148 * Get an active reference of @kn. This function is noop if @kn
149 * is NULL.
150 *
151 * RETURNS:
152 * Pointer to @kn on success, NULL on failure.
153 */
154struct kernfs_node *kernfs_get_active(struct kernfs_node *kn)
155{
156 if (unlikely(!kn))
157 return NULL;
158
159 if (!atomic_inc_unless_negative(&kn->active))
160 return NULL;
161
162 if (kernfs_lockdep(kn))
163 rwsem_acquire_read(&kn->dep_map, 0, 1, _RET_IP_);
164 return kn;
165}
166
167/**
168 * kernfs_put_active - put an active reference to kernfs_node
169 * @kn: kernfs_node to put an active reference to
170 *
171 * Put an active reference to @kn. This function is noop if @kn
172 * is NULL.
173 */
174void kernfs_put_active(struct kernfs_node *kn)
175{
176 struct kernfs_root *root = kernfs_root(kn);
177 int v;
178
179 if (unlikely(!kn))
180 return;
181
182 if (kernfs_lockdep(kn))
183 rwsem_release(&kn->dep_map, 1, _RET_IP_);
184 v = atomic_dec_return(&kn->active);
185 if (likely(v != KN_DEACTIVATED_BIAS))
186 return;
187
188 wake_up_all(&root->deactivate_waitq);
189}
190
191/**
192 * kernfs_drain - drain kernfs_node
193 * @kn: kernfs_node to drain
194 *
195 * Drain existing usages and nuke all existing mmaps of @kn. Mutiple
196 * removers may invoke this function concurrently on @kn and all will
197 * return after draining is complete.
198 */
199static void kernfs_drain(struct kernfs_node *kn)
200 __releases(&kernfs_mutex) __acquires(&kernfs_mutex)
201{
202 struct kernfs_root *root = kernfs_root(kn);
203
204 lockdep_assert_held(&kernfs_mutex);
205 WARN_ON_ONCE(kernfs_active(kn));
206
207 mutex_unlock(&kernfs_mutex);
208
209 if (kernfs_lockdep(kn)) {
210 rwsem_acquire(&kn->dep_map, 0, 0, _RET_IP_);
211 if (atomic_read(&kn->active) != KN_DEACTIVATED_BIAS)
212 lock_contended(&kn->dep_map, _RET_IP_);
213 }
214
215 /* but everyone should wait for draining */
216 wait_event(root->deactivate_waitq,
217 atomic_read(&kn->active) == KN_DEACTIVATED_BIAS);
218
219 if (kernfs_lockdep(kn)) {
220 lock_acquired(&kn->dep_map, _RET_IP_);
221 rwsem_release(&kn->dep_map, 1, _RET_IP_);
222 }
223
224 kernfs_unmap_bin_file(kn);
225
226 mutex_lock(&kernfs_mutex);
227}
228
229/**
230 * kernfs_get - get a reference count on a kernfs_node
231 * @kn: the target kernfs_node
232 */
233void kernfs_get(struct kernfs_node *kn)
234{
235 if (kn) {
236 WARN_ON(!atomic_read(&kn->count));
237 atomic_inc(&kn->count);
238 }
239}
240EXPORT_SYMBOL_GPL(kernfs_get);
241
242/**
243 * kernfs_put - put a reference count on a kernfs_node
244 * @kn: the target kernfs_node
245 *
246 * Put a reference count of @kn and destroy it if it reached zero.
247 */
248void kernfs_put(struct kernfs_node *kn)
249{
250 struct kernfs_node *parent;
251 struct kernfs_root *root;
252
253 if (!kn || !atomic_dec_and_test(&kn->count))
254 return;
255 root = kernfs_root(kn);
256 repeat:
257 /*
258 * Moving/renaming is always done while holding reference.
259 * kn->parent won't change beneath us.
260 */
261 parent = kn->parent;
262
263 WARN_ONCE(atomic_read(&kn->active) != KN_DEACTIVATED_BIAS,
264 "kernfs_put: %s/%s: released with incorrect active_ref %d\n",
265 parent ? parent->name : "", kn->name, atomic_read(&kn->active));
266
267 if (kernfs_type(kn) == KERNFS_LINK)
268 kernfs_put(kn->symlink.target_kn);
269 if (!(kn->flags & KERNFS_STATIC_NAME))
270 kfree(kn->name);
271 if (kn->iattr) {
272 if (kn->iattr->ia_secdata)
273 security_release_secctx(kn->iattr->ia_secdata,
274 kn->iattr->ia_secdata_len);
275 simple_xattrs_free(&kn->iattr->xattrs);
276 }
277 kfree(kn->iattr);
278 ida_simple_remove(&root->ino_ida, kn->ino);
279 kmem_cache_free(kernfs_node_cache, kn);
280
281 kn = parent;
282 if (kn) {
283 if (atomic_dec_and_test(&kn->count))
284 goto repeat;
285 } else {
286 /* just released the root kn, free @root too */
287 ida_destroy(&root->ino_ida);
288 kfree(root);
289 }
290}
291EXPORT_SYMBOL_GPL(kernfs_put);
292
293static int kernfs_dop_revalidate(struct dentry *dentry, unsigned int flags)
294{
295 struct kernfs_node *kn;
296
297 if (flags & LOOKUP_RCU)
298 return -ECHILD;
299
300 /* Always perform fresh lookup for negatives */
301 if (!dentry->d_inode)
302 goto out_bad_unlocked;
303
304 kn = dentry->d_fsdata;
305 mutex_lock(&kernfs_mutex);
306
307 /* The kernfs node has been deactivated */
308 if (!kernfs_active(kn))
309 goto out_bad;
310
311 /* The kernfs node has been moved? */
312 if (dentry->d_parent->d_fsdata != kn->parent)
313 goto out_bad;
314
315 /* The kernfs node has been renamed */
316 if (strcmp(dentry->d_name.name, kn->name) != 0)
317 goto out_bad;
318
319 /* The kernfs node has been moved to a different namespace */
320 if (kn->parent && kernfs_ns_enabled(kn->parent) &&
321 kernfs_info(dentry->d_sb)->ns != kn->ns)
322 goto out_bad;
323
324 mutex_unlock(&kernfs_mutex);
325out_valid:
326 return 1;
327out_bad:
328 mutex_unlock(&kernfs_mutex);
329out_bad_unlocked:
330 /*
331 * @dentry doesn't match the underlying kernfs node, drop the
332 * dentry and force lookup. If we have submounts we must allow the
333 * vfs caches to lie about the state of the filesystem to prevent
334 * leaks and other nasty things, so use check_submounts_and_drop()
335 * instead of d_drop().
336 */
337 if (check_submounts_and_drop(dentry) != 0)
338 goto out_valid;
339
340 return 0;
341}
342
343static void kernfs_dop_release(struct dentry *dentry)
344{
345 kernfs_put(dentry->d_fsdata);
346}
347
348const struct dentry_operations kernfs_dops = {
349 .d_revalidate = kernfs_dop_revalidate,
350 .d_release = kernfs_dop_release,
351};
352
353static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root,
354 const char *name, umode_t mode,
355 unsigned flags)
356{
357 char *dup_name = NULL;
358 struct kernfs_node *kn;
359 int ret;
360
361 if (!(flags & KERNFS_STATIC_NAME)) {
362 name = dup_name = kstrdup(name, GFP_KERNEL);
363 if (!name)
364 return NULL;
365 }
366
367 kn = kmem_cache_zalloc(kernfs_node_cache, GFP_KERNEL);
368 if (!kn)
369 goto err_out1;
370
371 ret = ida_simple_get(&root->ino_ida, 1, 0, GFP_KERNEL);
372 if (ret < 0)
373 goto err_out2;
374 kn->ino = ret;
375
376 atomic_set(&kn->count, 1);
377 atomic_set(&kn->active, KN_DEACTIVATED_BIAS);
378 RB_CLEAR_NODE(&kn->rb);
379
380 kn->name = name;
381 kn->mode = mode;
382 kn->flags = flags;
383
384 return kn;
385
386 err_out2:
387 kmem_cache_free(kernfs_node_cache, kn);
388 err_out1:
389 kfree(dup_name);
390 return NULL;
391}
392
393struct kernfs_node *kernfs_new_node(struct kernfs_node *parent,
394 const char *name, umode_t mode,
395 unsigned flags)
396{
397 struct kernfs_node *kn;
398
399 kn = __kernfs_new_node(kernfs_root(parent), name, mode, flags);
400 if (kn) {
401 kernfs_get(parent);
402 kn->parent = parent;
403 }
404 return kn;
405}
406
407/**
408 * kernfs_add_one - add kernfs_node to parent without warning
409 * @kn: kernfs_node to be added
410 *
411 * The caller must already have initialized @kn->parent. This
412 * function increments nlink of the parent's inode if @kn is a
413 * directory and link into the children list of the parent.
414 *
415 * RETURNS:
416 * 0 on success, -EEXIST if entry with the given name already
417 * exists.
418 */
419int kernfs_add_one(struct kernfs_node *kn)
420{
421 struct kernfs_node *parent = kn->parent;
422 struct kernfs_iattrs *ps_iattr;
423 bool has_ns;
424 int ret;
425
426 mutex_lock(&kernfs_mutex);
427
428 ret = -EINVAL;
429 has_ns = kernfs_ns_enabled(parent);
430 if (WARN(has_ns != (bool)kn->ns, KERN_WARNING "kernfs: ns %s in '%s' for '%s'\n",
431 has_ns ? "required" : "invalid", parent->name, kn->name))
432 goto out_unlock;
433
434 if (kernfs_type(parent) != KERNFS_DIR)
435 goto out_unlock;
436
437 ret = -ENOENT;
438 if (!kernfs_active(parent))
439 goto out_unlock;
440
441 kn->hash = kernfs_name_hash(kn->name, kn->ns);
442
443 ret = kernfs_link_sibling(kn);
444 if (ret)
445 goto out_unlock;
446
447 /* Update timestamps on the parent */
448 ps_iattr = parent->iattr;
449 if (ps_iattr) {
450 struct iattr *ps_iattrs = &ps_iattr->ia_iattr;
451 ps_iattrs->ia_ctime = ps_iattrs->ia_mtime = CURRENT_TIME;
452 }
453
454 /* Mark the entry added into directory tree */
455 atomic_sub(KN_DEACTIVATED_BIAS, &kn->active);
456 ret = 0;
457out_unlock:
458 mutex_unlock(&kernfs_mutex);
459 return ret;
460}
461
462/**
463 * kernfs_find_ns - find kernfs_node with the given name
464 * @parent: kernfs_node to search under
465 * @name: name to look for
466 * @ns: the namespace tag to use
467 *
468 * Look for kernfs_node with name @name under @parent. Returns pointer to
469 * the found kernfs_node on success, %NULL on failure.
470 */
471static struct kernfs_node *kernfs_find_ns(struct kernfs_node *parent,
472 const unsigned char *name,
473 const void *ns)
474{
475 struct rb_node *node = parent->dir.children.rb_node;
476 bool has_ns = kernfs_ns_enabled(parent);
477 unsigned int hash;
478
479 lockdep_assert_held(&kernfs_mutex);
480
481 if (has_ns != (bool)ns) {
482 WARN(1, KERN_WARNING "kernfs: ns %s in '%s' for '%s'\n",
483 has_ns ? "required" : "invalid", parent->name, name);
484 return NULL;
485 }
486
487 hash = kernfs_name_hash(name, ns);
488 while (node) {
489 struct kernfs_node *kn;
490 int result;
491
492 kn = rb_to_kn(node);
493 result = kernfs_name_compare(hash, name, ns, kn);
494 if (result < 0)
495 node = node->rb_left;
496 else if (result > 0)
497 node = node->rb_right;
498 else
499 return kn;
500 }
501 return NULL;
502}
503
504/**
505 * kernfs_find_and_get_ns - find and get kernfs_node with the given name
506 * @parent: kernfs_node to search under
507 * @name: name to look for
508 * @ns: the namespace tag to use
509 *
510 * Look for kernfs_node with name @name under @parent and get a reference
511 * if found. This function may sleep and returns pointer to the found
512 * kernfs_node on success, %NULL on failure.
513 */
514struct kernfs_node *kernfs_find_and_get_ns(struct kernfs_node *parent,
515 const char *name, const void *ns)
516{
517 struct kernfs_node *kn;
518
519 mutex_lock(&kernfs_mutex);
520 kn = kernfs_find_ns(parent, name, ns);
521 kernfs_get(kn);
522 mutex_unlock(&kernfs_mutex);
523
524 return kn;
525}
526EXPORT_SYMBOL_GPL(kernfs_find_and_get_ns);
527
528/**
529 * kernfs_create_root - create a new kernfs hierarchy
530 * @scops: optional syscall operations for the hierarchy
531 * @priv: opaque data associated with the new directory
532 *
533 * Returns the root of the new hierarchy on success, ERR_PTR() value on
534 * failure.
535 */
536struct kernfs_root *kernfs_create_root(struct kernfs_syscall_ops *scops,
537 void *priv)
538{
539 struct kernfs_root *root;
540 struct kernfs_node *kn;
541
542 root = kzalloc(sizeof(*root), GFP_KERNEL);
543 if (!root)
544 return ERR_PTR(-ENOMEM);
545
546 ida_init(&root->ino_ida);
547
548 kn = __kernfs_new_node(root, "", S_IFDIR | S_IRUGO | S_IXUGO,
549 KERNFS_DIR);
550 if (!kn) {
551 ida_destroy(&root->ino_ida);
552 kfree(root);
553 return ERR_PTR(-ENOMEM);
554 }
555
556 atomic_sub(KN_DEACTIVATED_BIAS, &kn->active);
557 kn->priv = priv;
558 kn->dir.root = root;
559
560 root->syscall_ops = scops;
561 root->kn = kn;
562 init_waitqueue_head(&root->deactivate_waitq);
563
564 return root;
565}
566
567/**
568 * kernfs_destroy_root - destroy a kernfs hierarchy
569 * @root: root of the hierarchy to destroy
570 *
571 * Destroy the hierarchy anchored at @root by removing all existing
572 * directories and destroying @root.
573 */
574void kernfs_destroy_root(struct kernfs_root *root)
575{
576 kernfs_remove(root->kn); /* will also free @root */
577}
578
579/**
580 * kernfs_create_dir_ns - create a directory
581 * @parent: parent in which to create a new directory
582 * @name: name of the new directory
583 * @mode: mode of the new directory
584 * @priv: opaque data associated with the new directory
585 * @ns: optional namespace tag of the directory
586 *
587 * Returns the created node on success, ERR_PTR() value on failure.
588 */
589struct kernfs_node *kernfs_create_dir_ns(struct kernfs_node *parent,
590 const char *name, umode_t mode,
591 void *priv, const void *ns)
592{
593 struct kernfs_node *kn;
594 int rc;
595
596 /* allocate */
597 kn = kernfs_new_node(parent, name, mode | S_IFDIR, KERNFS_DIR);
598 if (!kn)
599 return ERR_PTR(-ENOMEM);
600
601 kn->dir.root = parent->dir.root;
602 kn->ns = ns;
603 kn->priv = priv;
604
605 /* link in */
606 rc = kernfs_add_one(kn);
607 if (!rc)
608 return kn;
609
610 kernfs_put(kn);
611 return ERR_PTR(rc);
612}
613
614static struct dentry *kernfs_iop_lookup(struct inode *dir,
615 struct dentry *dentry,
616 unsigned int flags)
617{
618 struct dentry *ret;
619 struct kernfs_node *parent = dentry->d_parent->d_fsdata;
620 struct kernfs_node *kn;
621 struct inode *inode;
622 const void *ns = NULL;
623
624 mutex_lock(&kernfs_mutex);
625
626 if (kernfs_ns_enabled(parent))
627 ns = kernfs_info(dir->i_sb)->ns;
628
629 kn = kernfs_find_ns(parent, dentry->d_name.name, ns);
630
631 /* no such entry */
632 if (!kn) {
633 ret = NULL;
634 goto out_unlock;
635 }
636 kernfs_get(kn);
637 dentry->d_fsdata = kn;
638
639 /* attach dentry and inode */
640 inode = kernfs_get_inode(dir->i_sb, kn);
641 if (!inode) {
642 ret = ERR_PTR(-ENOMEM);
643 goto out_unlock;
644 }
645
646 /* instantiate and hash dentry */
647 ret = d_materialise_unique(dentry, inode);
648 out_unlock:
649 mutex_unlock(&kernfs_mutex);
650 return ret;
651}
652
653static int kernfs_iop_mkdir(struct inode *dir, struct dentry *dentry,
654 umode_t mode)
655{
656 struct kernfs_node *parent = dir->i_private;
657 struct kernfs_syscall_ops *scops = kernfs_root(parent)->syscall_ops;
658 int ret;
659
660 if (!scops || !scops->mkdir)
661 return -EPERM;
662
663 if (!kernfs_get_active(parent))
664 return -ENODEV;
665
666 ret = scops->mkdir(parent, dentry->d_name.name, mode);
667
668 kernfs_put_active(parent);
669 return ret;
670}
671
672static int kernfs_iop_rmdir(struct inode *dir, struct dentry *dentry)
673{
674 struct kernfs_node *kn = dentry->d_fsdata;
675 struct kernfs_syscall_ops *scops = kernfs_root(kn)->syscall_ops;
676 int ret;
677
678 if (!scops || !scops->rmdir)
679 return -EPERM;
680
681 if (!kernfs_get_active(kn))
682 return -ENODEV;
683
684 ret = scops->rmdir(kn);
685
686 kernfs_put_active(kn);
687 return ret;
688}
689
690static int kernfs_iop_rename(struct inode *old_dir, struct dentry *old_dentry,
691 struct inode *new_dir, struct dentry *new_dentry)
692{
693 struct kernfs_node *kn = old_dentry->d_fsdata;
694 struct kernfs_node *new_parent = new_dir->i_private;
695 struct kernfs_syscall_ops *scops = kernfs_root(kn)->syscall_ops;
696 int ret;
697
698 if (!scops || !scops->rename)
699 return -EPERM;
700
701 if (!kernfs_get_active(kn))
702 return -ENODEV;
703
704 if (!kernfs_get_active(new_parent)) {
705 kernfs_put_active(kn);
706 return -ENODEV;
707 }
708
709 ret = scops->rename(kn, new_parent, new_dentry->d_name.name);
710
711 kernfs_put_active(new_parent);
712 kernfs_put_active(kn);
713 return ret;
714}
715
716const struct inode_operations kernfs_dir_iops = {
717 .lookup = kernfs_iop_lookup,
718 .permission = kernfs_iop_permission,
719 .setattr = kernfs_iop_setattr,
720 .getattr = kernfs_iop_getattr,
721 .setxattr = kernfs_iop_setxattr,
722 .removexattr = kernfs_iop_removexattr,
723 .getxattr = kernfs_iop_getxattr,
724 .listxattr = kernfs_iop_listxattr,
725
726 .mkdir = kernfs_iop_mkdir,
727 .rmdir = kernfs_iop_rmdir,
728 .rename = kernfs_iop_rename,
729};
730
731static struct kernfs_node *kernfs_leftmost_descendant(struct kernfs_node *pos)
732{
733 struct kernfs_node *last;
734
735 while (true) {
736 struct rb_node *rbn;
737
738 last = pos;
739
740 if (kernfs_type(pos) != KERNFS_DIR)
741 break;
742
743 rbn = rb_first(&pos->dir.children);
744 if (!rbn)
745 break;
746
747 pos = rb_to_kn(rbn);
748 }
749
750 return last;
751}
752
753/**
754 * kernfs_next_descendant_post - find the next descendant for post-order walk
755 * @pos: the current position (%NULL to initiate traversal)
756 * @root: kernfs_node whose descendants to walk
757 *
758 * Find the next descendant to visit for post-order traversal of @root's
759 * descendants. @root is included in the iteration and the last node to be
760 * visited.
761 */
762static struct kernfs_node *kernfs_next_descendant_post(struct kernfs_node *pos,
763 struct kernfs_node *root)
764{
765 struct rb_node *rbn;
766
767 lockdep_assert_held(&kernfs_mutex);
768
769 /* if first iteration, visit leftmost descendant which may be root */
770 if (!pos)
771 return kernfs_leftmost_descendant(root);
772
773 /* if we visited @root, we're done */
774 if (pos == root)
775 return NULL;
776
777 /* if there's an unvisited sibling, visit its leftmost descendant */
778 rbn = rb_next(&pos->rb);
779 if (rbn)
780 return kernfs_leftmost_descendant(rb_to_kn(rbn));
781
782 /* no sibling left, visit parent */
783 return pos->parent;
784}
785
786static void __kernfs_remove(struct kernfs_node *kn)
787{
788 struct kernfs_node *pos;
789
790 lockdep_assert_held(&kernfs_mutex);
791
792 /*
793 * Short-circuit if non-root @kn has already finished removal.
794 * This is for kernfs_remove_self() which plays with active ref
795 * after removal.
796 */
797 if (!kn || (kn->parent && RB_EMPTY_NODE(&kn->rb)))
798 return;
799
800 pr_debug("kernfs %s: removing\n", kn->name);
801
802 /* prevent any new usage under @kn by deactivating all nodes */
803 pos = NULL;
804 while ((pos = kernfs_next_descendant_post(pos, kn)))
805 if (kernfs_active(pos))
806 atomic_add(KN_DEACTIVATED_BIAS, &pos->active);
807
808 /* deactivate and unlink the subtree node-by-node */
809 do {
810 pos = kernfs_leftmost_descendant(kn);
811
812 /*
813 * kernfs_drain() drops kernfs_mutex temporarily and @pos's
814 * base ref could have been put by someone else by the time
815 * the function returns. Make sure it doesn't go away
816 * underneath us.
817 */
818 kernfs_get(pos);
819
820 kernfs_drain(pos);
821
822 /*
823 * kernfs_unlink_sibling() succeeds once per node. Use it
824 * to decide who's responsible for cleanups.
825 */
826 if (!pos->parent || kernfs_unlink_sibling(pos)) {
827 struct kernfs_iattrs *ps_iattr =
828 pos->parent ? pos->parent->iattr : NULL;
829
830 /* update timestamps on the parent */
831 if (ps_iattr) {
832 ps_iattr->ia_iattr.ia_ctime = CURRENT_TIME;
833 ps_iattr->ia_iattr.ia_mtime = CURRENT_TIME;
834 }
835
836 kernfs_put(pos);
837 }
838
839 kernfs_put(pos);
840 } while (pos != kn);
841}
842
843/**
844 * kernfs_remove - remove a kernfs_node recursively
845 * @kn: the kernfs_node to remove
846 *
847 * Remove @kn along with all its subdirectories and files.
848 */
849void kernfs_remove(struct kernfs_node *kn)
850{
851 mutex_lock(&kernfs_mutex);
852 __kernfs_remove(kn);
853 mutex_unlock(&kernfs_mutex);
854}
855
856/**
857 * kernfs_break_active_protection - break out of active protection
858 * @kn: the self kernfs_node
859 *
860 * The caller must be running off of a kernfs operation which is invoked
861 * with an active reference - e.g. one of kernfs_ops. Each invocation of
862 * this function must also be matched with an invocation of
863 * kernfs_unbreak_active_protection().
864 *
865 * This function releases the active reference of @kn the caller is
866 * holding. Once this function is called, @kn may be removed at any point
867 * and the caller is solely responsible for ensuring that the objects it
868 * dereferences are accessible.
869 */
870void kernfs_break_active_protection(struct kernfs_node *kn)
871{
872 /*
873 * Take out ourself out of the active ref dependency chain. If
874 * we're called without an active ref, lockdep will complain.
875 */
876 kernfs_put_active(kn);
877}
878
879/**
880 * kernfs_unbreak_active_protection - undo kernfs_break_active_protection()
881 * @kn: the self kernfs_node
882 *
883 * If kernfs_break_active_protection() was called, this function must be
884 * invoked before finishing the kernfs operation. Note that while this
885 * function restores the active reference, it doesn't and can't actually
886 * restore the active protection - @kn may already or be in the process of
887 * being removed. Once kernfs_break_active_protection() is invoked, that
888 * protection is irreversibly gone for the kernfs operation instance.
889 *
890 * While this function may be called at any point after
891 * kernfs_break_active_protection() is invoked, its most useful location
892 * would be right before the enclosing kernfs operation returns.
893 */
894void kernfs_unbreak_active_protection(struct kernfs_node *kn)
895{
896 /*
897 * @kn->active could be in any state; however, the increment we do
898 * here will be undone as soon as the enclosing kernfs operation
899 * finishes and this temporary bump can't break anything. If @kn
900 * is alive, nothing changes. If @kn is being deactivated, the
901 * soon-to-follow put will either finish deactivation or restore
902 * deactivated state. If @kn is already removed, the temporary
903 * bump is guaranteed to be gone before @kn is released.
904 */
905 atomic_inc(&kn->active);
906 if (kernfs_lockdep(kn))
907 rwsem_acquire(&kn->dep_map, 0, 1, _RET_IP_);
908}
909
910/**
911 * kernfs_remove_self - remove a kernfs_node from its own method
912 * @kn: the self kernfs_node to remove
913 *
914 * The caller must be running off of a kernfs operation which is invoked
915 * with an active reference - e.g. one of kernfs_ops. This can be used to
916 * implement a file operation which deletes itself.
917 *
918 * For example, the "delete" file for a sysfs device directory can be
919 * implemented by invoking kernfs_remove_self() on the "delete" file
920 * itself. This function breaks the circular dependency of trying to
921 * deactivate self while holding an active ref itself. It isn't necessary
922 * to modify the usual removal path to use kernfs_remove_self(). The
923 * "delete" implementation can simply invoke kernfs_remove_self() on self
924 * before proceeding with the usual removal path. kernfs will ignore later
925 * kernfs_remove() on self.
926 *
927 * kernfs_remove_self() can be called multiple times concurrently on the
928 * same kernfs_node. Only the first one actually performs removal and
929 * returns %true. All others will wait until the kernfs operation which
930 * won self-removal finishes and return %false. Note that the losers wait
931 * for the completion of not only the winning kernfs_remove_self() but also
932 * the whole kernfs_ops which won the arbitration. This can be used to
933 * guarantee, for example, all concurrent writes to a "delete" file to
934 * finish only after the whole operation is complete.
935 */
936bool kernfs_remove_self(struct kernfs_node *kn)
937{
938 bool ret;
939
940 mutex_lock(&kernfs_mutex);
941 kernfs_break_active_protection(kn);
942
943 /*
944 * SUICIDAL is used to arbitrate among competing invocations. Only
945 * the first one will actually perform removal. When the removal
946 * is complete, SUICIDED is set and the active ref is restored
947 * while holding kernfs_mutex. The ones which lost arbitration
948 * waits for SUICDED && drained which can happen only after the
949 * enclosing kernfs operation which executed the winning instance
950 * of kernfs_remove_self() finished.
951 */
952 if (!(kn->flags & KERNFS_SUICIDAL)) {
953 kn->flags |= KERNFS_SUICIDAL;
954 __kernfs_remove(kn);
955 kn->flags |= KERNFS_SUICIDED;
956 ret = true;
957 } else {
958 wait_queue_head_t *waitq = &kernfs_root(kn)->deactivate_waitq;
959 DEFINE_WAIT(wait);
960
961 while (true) {
962 prepare_to_wait(waitq, &wait, TASK_UNINTERRUPTIBLE);
963
964 if ((kn->flags & KERNFS_SUICIDED) &&
965 atomic_read(&kn->active) == KN_DEACTIVATED_BIAS)
966 break;
967
968 mutex_unlock(&kernfs_mutex);
969 schedule();
970 mutex_lock(&kernfs_mutex);
971 }
972 finish_wait(waitq, &wait);
973 WARN_ON_ONCE(!RB_EMPTY_NODE(&kn->rb));
974 ret = false;
975 }
976
977 /*
978 * This must be done while holding kernfs_mutex; otherwise, waiting
979 * for SUICIDED && deactivated could finish prematurely.
980 */
981 kernfs_unbreak_active_protection(kn);
982
983 mutex_unlock(&kernfs_mutex);
984 return ret;
985}
986
987/**
988 * kernfs_remove_by_name_ns - find a kernfs_node by name and remove it
989 * @parent: parent of the target
990 * @name: name of the kernfs_node to remove
991 * @ns: namespace tag of the kernfs_node to remove
992 *
993 * Look for the kernfs_node with @name and @ns under @parent and remove it.
994 * Returns 0 on success, -ENOENT if such entry doesn't exist.
995 */
996int kernfs_remove_by_name_ns(struct kernfs_node *parent, const char *name,
997 const void *ns)
998{
999 struct kernfs_node *kn;
1000
1001 if (!parent) {
1002 WARN(1, KERN_WARNING "kernfs: can not remove '%s', no directory\n",
1003 name);
1004 return -ENOENT;
1005 }
1006
1007 mutex_lock(&kernfs_mutex);
1008
1009 kn = kernfs_find_ns(parent, name, ns);
1010 if (kn)
1011 __kernfs_remove(kn);
1012
1013 mutex_unlock(&kernfs_mutex);
1014
1015 if (kn)
1016 return 0;
1017 else
1018 return -ENOENT;
1019}
1020
1021/**
1022 * kernfs_rename_ns - move and rename a kernfs_node
1023 * @kn: target node
1024 * @new_parent: new parent to put @sd under
1025 * @new_name: new name
1026 * @new_ns: new namespace tag
1027 */
1028int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent,
1029 const char *new_name, const void *new_ns)
1030{
1031 int error;
1032
1033 mutex_lock(&kernfs_mutex);
1034
1035 error = -ENOENT;
1036 if (!kernfs_active(kn) || !kernfs_active(new_parent))
1037 goto out;
1038
1039 error = 0;
1040 if ((kn->parent == new_parent) && (kn->ns == new_ns) &&
1041 (strcmp(kn->name, new_name) == 0))
1042 goto out; /* nothing to rename */
1043
1044 error = -EEXIST;
1045 if (kernfs_find_ns(new_parent, new_name, new_ns))
1046 goto out;
1047
1048 /* rename kernfs_node */
1049 if (strcmp(kn->name, new_name) != 0) {
1050 error = -ENOMEM;
1051 new_name = kstrdup(new_name, GFP_KERNEL);
1052 if (!new_name)
1053 goto out;
1054
1055 if (kn->flags & KERNFS_STATIC_NAME)
1056 kn->flags &= ~KERNFS_STATIC_NAME;
1057 else
1058 kfree(kn->name);
1059
1060 kn->name = new_name;
1061 }
1062
1063 /*
1064 * Move to the appropriate place in the appropriate directories rbtree.
1065 */
1066 kernfs_unlink_sibling(kn);
1067 kernfs_get(new_parent);
1068 kernfs_put(kn->parent);
1069 kn->ns = new_ns;
1070 kn->hash = kernfs_name_hash(kn->name, kn->ns);
1071 kn->parent = new_parent;
1072 kernfs_link_sibling(kn);
1073
1074 error = 0;
1075 out:
1076 mutex_unlock(&kernfs_mutex);
1077 return error;
1078}
1079
1080/* Relationship between s_mode and the DT_xxx types */
1081static inline unsigned char dt_type(struct kernfs_node *kn)
1082{
1083 return (kn->mode >> 12) & 15;
1084}
1085
1086static int kernfs_dir_fop_release(struct inode *inode, struct file *filp)
1087{
1088 kernfs_put(filp->private_data);
1089 return 0;
1090}
1091
1092static struct kernfs_node *kernfs_dir_pos(const void *ns,
1093 struct kernfs_node *parent, loff_t hash, struct kernfs_node *pos)
1094{
1095 if (pos) {
1096 int valid = kernfs_active(pos) &&
1097 pos->parent == parent && hash == pos->hash;
1098 kernfs_put(pos);
1099 if (!valid)
1100 pos = NULL;
1101 }
1102 if (!pos && (hash > 1) && (hash < INT_MAX)) {
1103 struct rb_node *node = parent->dir.children.rb_node;
1104 while (node) {
1105 pos = rb_to_kn(node);
1106
1107 if (hash < pos->hash)
1108 node = node->rb_left;
1109 else if (hash > pos->hash)
1110 node = node->rb_right;
1111 else
1112 break;
1113 }
1114 }
1115 /* Skip over entries in the wrong namespace */
1116 while (pos && pos->ns != ns) {
1117 struct rb_node *node = rb_next(&pos->rb);
1118 if (!node)
1119 pos = NULL;
1120 else
1121 pos = rb_to_kn(node);
1122 }
1123 return pos;
1124}
1125
1126static struct kernfs_node *kernfs_dir_next_pos(const void *ns,
1127 struct kernfs_node *parent, ino_t ino, struct kernfs_node *pos)
1128{
1129 pos = kernfs_dir_pos(ns, parent, ino, pos);
1130 if (pos)
1131 do {
1132 struct rb_node *node = rb_next(&pos->rb);
1133 if (!node)
1134 pos = NULL;
1135 else
1136 pos = rb_to_kn(node);
1137 } while (pos && pos->ns != ns);
1138 return pos;
1139}
1140
1141static int kernfs_fop_readdir(struct file *file, struct dir_context *ctx)
1142{
1143 struct dentry *dentry = file->f_path.dentry;
1144 struct kernfs_node *parent = dentry->d_fsdata;
1145 struct kernfs_node *pos = file->private_data;
1146 const void *ns = NULL;
1147
1148 if (!dir_emit_dots(file, ctx))
1149 return 0;
1150 mutex_lock(&kernfs_mutex);
1151
1152 if (kernfs_ns_enabled(parent))
1153 ns = kernfs_info(dentry->d_sb)->ns;
1154
1155 for (pos = kernfs_dir_pos(ns, parent, ctx->pos, pos);
1156 pos;
1157 pos = kernfs_dir_next_pos(ns, parent, ctx->pos, pos)) {
1158 const char *name = pos->name;
1159 unsigned int type = dt_type(pos);
1160 int len = strlen(name);
1161 ino_t ino = pos->ino;
1162
1163 ctx->pos = pos->hash;
1164 file->private_data = pos;
1165 kernfs_get(pos);
1166
1167 mutex_unlock(&kernfs_mutex);
1168 if (!dir_emit(ctx, name, len, ino, type))
1169 return 0;
1170 mutex_lock(&kernfs_mutex);
1171 }
1172 mutex_unlock(&kernfs_mutex);
1173 file->private_data = NULL;
1174 ctx->pos = INT_MAX;
1175 return 0;
1176}
1177
1178static loff_t kernfs_dir_fop_llseek(struct file *file, loff_t offset,
1179 int whence)
1180{
1181 struct inode *inode = file_inode(file);
1182 loff_t ret;
1183
1184 mutex_lock(&inode->i_mutex);
1185 ret = generic_file_llseek(file, offset, whence);
1186 mutex_unlock(&inode->i_mutex);
1187
1188 return ret;
1189}
1190
1191const struct file_operations kernfs_dir_fops = {
1192 .read = generic_read_dir,
1193 .iterate = kernfs_fop_readdir,
1194 .release = kernfs_dir_fop_release,
1195 .llseek = kernfs_dir_fop_llseek,
1196};
This page took 0.027135 seconds and 5 git commands to generate.