Revert "kernfs: remove kernfs_addrm_cxt"
[deliverable/linux.git] / fs / kernfs / dir.c
1 /*
2 * fs/kernfs/dir.c - kernfs directory implementation
3 *
4 * Copyright (c) 2001-3 Patrick Mochel
5 * Copyright (c) 2007 SUSE Linux Products GmbH
6 * Copyright (c) 2007, 2013 Tejun Heo <tj@kernel.org>
7 *
8 * This file is released under the GPLv2.
9 */
10
11 #include <linux/sched.h>
12 #include <linux/fs.h>
13 #include <linux/namei.h>
14 #include <linux/idr.h>
15 #include <linux/slab.h>
16 #include <linux/security.h>
17 #include <linux/hash.h>
18
19 #include "kernfs-internal.h"
20
21 DEFINE_MUTEX(kernfs_mutex);
22
23 #define rb_to_kn(X) rb_entry((X), struct kernfs_node, rb)
24
25 static bool kernfs_lockdep(struct kernfs_node *kn)
26 {
27 #ifdef CONFIG_DEBUG_LOCK_ALLOC
28 return kn->flags & KERNFS_LOCKDEP;
29 #else
30 return false;
31 #endif
32 }
33
34 /**
35 * kernfs_name_hash
36 * @name: Null terminated string to hash
37 * @ns: Namespace tag to hash
38 *
39 * Returns 31 bit hash of ns + name (so it fits in an off_t )
40 */
41 static unsigned int kernfs_name_hash(const char *name, const void *ns)
42 {
43 unsigned long hash = init_name_hash();
44 unsigned int len = strlen(name);
45 while (len--)
46 hash = partial_name_hash(*name++, hash);
47 hash = (end_name_hash(hash) ^ hash_ptr((void *)ns, 31));
48 hash &= 0x7fffffffU;
49 /* Reserve hash numbers 0, 1 and INT_MAX for magic directory entries */
50 if (hash < 1)
51 hash += 2;
52 if (hash >= INT_MAX)
53 hash = INT_MAX - 1;
54 return hash;
55 }
56
57 static int kernfs_name_compare(unsigned int hash, const char *name,
58 const void *ns, const struct kernfs_node *kn)
59 {
60 if (hash != kn->hash)
61 return hash - kn->hash;
62 if (ns != kn->ns)
63 return ns - kn->ns;
64 return strcmp(name, kn->name);
65 }
66
67 static int kernfs_sd_compare(const struct kernfs_node *left,
68 const struct kernfs_node *right)
69 {
70 return kernfs_name_compare(left->hash, left->name, left->ns, right);
71 }
72
73 /**
74 * kernfs_link_sibling - link kernfs_node into sibling rbtree
75 * @kn: kernfs_node of interest
76 *
77 * Link @kn into its sibling rbtree which starts from
78 * @kn->parent->dir.children.
79 *
80 * Locking:
81 * mutex_lock(kernfs_mutex)
82 *
83 * RETURNS:
84 * 0 on susccess -EEXIST on failure.
85 */
86 static int kernfs_link_sibling(struct kernfs_node *kn)
87 {
88 struct rb_node **node = &kn->parent->dir.children.rb_node;
89 struct rb_node *parent = NULL;
90
91 if (kernfs_type(kn) == KERNFS_DIR)
92 kn->parent->dir.subdirs++;
93
94 while (*node) {
95 struct kernfs_node *pos;
96 int result;
97
98 pos = rb_to_kn(*node);
99 parent = *node;
100 result = kernfs_sd_compare(kn, pos);
101 if (result < 0)
102 node = &pos->rb.rb_left;
103 else if (result > 0)
104 node = &pos->rb.rb_right;
105 else
106 return -EEXIST;
107 }
108 /* add new node and rebalance the tree */
109 rb_link_node(&kn->rb, parent, node);
110 rb_insert_color(&kn->rb, &kn->parent->dir.children);
111 return 0;
112 }
113
114 /**
115 * kernfs_unlink_sibling - unlink kernfs_node from sibling rbtree
116 * @kn: kernfs_node of interest
117 *
118 * Unlink @kn from its sibling rbtree which starts from
119 * kn->parent->dir.children.
120 *
121 * Locking:
122 * mutex_lock(kernfs_mutex)
123 */
124 static bool kernfs_unlink_sibling(struct kernfs_node *kn)
125 {
126 if (RB_EMPTY_NODE(&kn->rb))
127 return false;
128
129 if (kernfs_type(kn) == KERNFS_DIR)
130 kn->parent->dir.subdirs--;
131
132 rb_erase(&kn->rb, &kn->parent->dir.children);
133 RB_CLEAR_NODE(&kn->rb);
134 return true;
135 }
136
137 /**
138 * kernfs_get_active - get an active reference to kernfs_node
139 * @kn: kernfs_node to get an active reference to
140 *
141 * Get an active reference of @kn. This function is noop if @kn
142 * is NULL.
143 *
144 * RETURNS:
145 * Pointer to @kn on success, NULL on failure.
146 */
147 struct kernfs_node *kernfs_get_active(struct kernfs_node *kn)
148 {
149 if (unlikely(!kn))
150 return NULL;
151
152 if (!atomic_inc_unless_negative(&kn->active))
153 return NULL;
154
155 if (kernfs_lockdep(kn))
156 rwsem_acquire_read(&kn->dep_map, 0, 1, _RET_IP_);
157 return kn;
158 }
159
160 /**
161 * kernfs_put_active - put an active reference to kernfs_node
162 * @kn: kernfs_node to put an active reference to
163 *
164 * Put an active reference to @kn. This function is noop if @kn
165 * is NULL.
166 */
167 void kernfs_put_active(struct kernfs_node *kn)
168 {
169 struct kernfs_root *root = kernfs_root(kn);
170 int v;
171
172 if (unlikely(!kn))
173 return;
174
175 if (kernfs_lockdep(kn))
176 rwsem_release(&kn->dep_map, 1, _RET_IP_);
177 v = atomic_dec_return(&kn->active);
178 if (likely(v != KN_DEACTIVATED_BIAS))
179 return;
180
181 wake_up_all(&root->deactivate_waitq);
182 }
183
184 /**
185 * kernfs_drain - drain kernfs_node
186 * @kn: kernfs_node to drain
187 *
188 * Drain existing usages of @kn. Mutiple removers may invoke this function
189 * concurrently on @kn and all will return after draining is complete.
190 * Returns %true if drain is performed and kernfs_mutex was temporarily
191 * released. %false if @kn was already drained and no operation was
192 * necessary.
193 *
194 * The caller is responsible for ensuring @kn stays pinned while this
195 * function is in progress even if it gets removed by someone else.
196 */
197 static bool kernfs_drain(struct kernfs_node *kn)
198 __releases(&kernfs_mutex) __acquires(&kernfs_mutex)
199 {
200 struct kernfs_root *root = kernfs_root(kn);
201
202 lockdep_assert_held(&kernfs_mutex);
203 WARN_ON_ONCE(atomic_read(&kn->active) >= 0);
204
205 /*
206 * We want to go through the active ref lockdep annotation at least
207 * once for all node removals, but the lockdep annotation can't be
208 * nested inside kernfs_mutex and deactivation can't make forward
209 * progress if we keep dropping the mutex. Use JUST_ACTIVATED to
210 * force the slow path once for each deactivation if lockdep is
211 * enabled.
212 */
213 if ((!kernfs_lockdep(kn) || !(kn->flags & KERNFS_JUST_DEACTIVATED)) &&
214 atomic_read(&kn->active) == KN_DEACTIVATED_BIAS)
215 return false;
216
217 kn->flags &= ~KERNFS_JUST_DEACTIVATED;
218 mutex_unlock(&kernfs_mutex);
219
220 if (kernfs_lockdep(kn)) {
221 rwsem_acquire(&kn->dep_map, 0, 0, _RET_IP_);
222 if (atomic_read(&kn->active) != KN_DEACTIVATED_BIAS)
223 lock_contended(&kn->dep_map, _RET_IP_);
224 }
225
226 wait_event(root->deactivate_waitq,
227 atomic_read(&kn->active) == KN_DEACTIVATED_BIAS);
228
229 if (kernfs_lockdep(kn)) {
230 lock_acquired(&kn->dep_map, _RET_IP_);
231 rwsem_release(&kn->dep_map, 1, _RET_IP_);
232 }
233
234 mutex_lock(&kernfs_mutex);
235 return true;
236 }
237
238 /**
239 * kernfs_get - get a reference count on a kernfs_node
240 * @kn: the target kernfs_node
241 */
242 void kernfs_get(struct kernfs_node *kn)
243 {
244 if (kn) {
245 WARN_ON(!atomic_read(&kn->count));
246 atomic_inc(&kn->count);
247 }
248 }
249 EXPORT_SYMBOL_GPL(kernfs_get);
250
251 /**
252 * kernfs_put - put a reference count on a kernfs_node
253 * @kn: the target kernfs_node
254 *
255 * Put a reference count of @kn and destroy it if it reached zero.
256 */
257 void kernfs_put(struct kernfs_node *kn)
258 {
259 struct kernfs_node *parent;
260 struct kernfs_root *root;
261
262 if (!kn || !atomic_dec_and_test(&kn->count))
263 return;
264 root = kernfs_root(kn);
265 repeat:
266 /*
267 * Moving/renaming is always done while holding reference.
268 * kn->parent won't change beneath us.
269 */
270 parent = kn->parent;
271
272 WARN_ONCE(atomic_read(&kn->active) != KN_DEACTIVATED_BIAS,
273 "kernfs_put: %s/%s: released with incorrect active_ref %d\n",
274 parent ? parent->name : "", kn->name, atomic_read(&kn->active));
275
276 if (kernfs_type(kn) == KERNFS_LINK)
277 kernfs_put(kn->symlink.target_kn);
278 if (!(kn->flags & KERNFS_STATIC_NAME))
279 kfree(kn->name);
280 if (kn->iattr) {
281 if (kn->iattr->ia_secdata)
282 security_release_secctx(kn->iattr->ia_secdata,
283 kn->iattr->ia_secdata_len);
284 simple_xattrs_free(&kn->iattr->xattrs);
285 }
286 kfree(kn->iattr);
287 ida_simple_remove(&root->ino_ida, kn->ino);
288 kmem_cache_free(kernfs_node_cache, kn);
289
290 kn = parent;
291 if (kn) {
292 if (atomic_dec_and_test(&kn->count))
293 goto repeat;
294 } else {
295 /* just released the root kn, free @root too */
296 ida_destroy(&root->ino_ida);
297 kfree(root);
298 }
299 }
300 EXPORT_SYMBOL_GPL(kernfs_put);
301
302 static int kernfs_dop_revalidate(struct dentry *dentry, unsigned int flags)
303 {
304 struct kernfs_node *kn;
305
306 if (flags & LOOKUP_RCU)
307 return -ECHILD;
308
309 /* Always perform fresh lookup for negatives */
310 if (!dentry->d_inode)
311 goto out_bad_unlocked;
312
313 kn = dentry->d_fsdata;
314 mutex_lock(&kernfs_mutex);
315
316 /* Force fresh lookup if removed */
317 if (kn->parent && RB_EMPTY_NODE(&kn->rb))
318 goto out_bad;
319
320 /* The kernfs node has been moved? */
321 if (dentry->d_parent->d_fsdata != kn->parent)
322 goto out_bad;
323
324 /* The kernfs node has been renamed */
325 if (strcmp(dentry->d_name.name, kn->name) != 0)
326 goto out_bad;
327
328 /* The kernfs node has been moved to a different namespace */
329 if (kn->parent && kernfs_ns_enabled(kn->parent) &&
330 kernfs_info(dentry->d_sb)->ns != kn->ns)
331 goto out_bad;
332
333 mutex_unlock(&kernfs_mutex);
334 out_valid:
335 return 1;
336 out_bad:
337 mutex_unlock(&kernfs_mutex);
338 out_bad_unlocked:
339 /*
340 * @dentry doesn't match the underlying kernfs node, drop the
341 * dentry and force lookup. If we have submounts we must allow the
342 * vfs caches to lie about the state of the filesystem to prevent
343 * leaks and other nasty things, so use check_submounts_and_drop()
344 * instead of d_drop().
345 */
346 if (check_submounts_and_drop(dentry) != 0)
347 goto out_valid;
348
349 return 0;
350 }
351
352 static void kernfs_dop_release(struct dentry *dentry)
353 {
354 kernfs_put(dentry->d_fsdata);
355 }
356
357 const struct dentry_operations kernfs_dops = {
358 .d_revalidate = kernfs_dop_revalidate,
359 .d_release = kernfs_dop_release,
360 };
361
362 struct kernfs_node *kernfs_new_node(struct kernfs_root *root, const char *name,
363 umode_t mode, unsigned flags)
364 {
365 char *dup_name = NULL;
366 struct kernfs_node *kn;
367 int ret;
368
369 if (!(flags & KERNFS_STATIC_NAME)) {
370 name = dup_name = kstrdup(name, GFP_KERNEL);
371 if (!name)
372 return NULL;
373 }
374
375 kn = kmem_cache_zalloc(kernfs_node_cache, GFP_KERNEL);
376 if (!kn)
377 goto err_out1;
378
379 ret = ida_simple_get(&root->ino_ida, 1, 0, GFP_KERNEL);
380 if (ret < 0)
381 goto err_out2;
382 kn->ino = ret;
383
384 atomic_set(&kn->count, 1);
385 atomic_set(&kn->active, KN_DEACTIVATED_BIAS);
386 RB_CLEAR_NODE(&kn->rb);
387
388 kn->name = name;
389 kn->mode = mode;
390 kn->flags = flags;
391
392 return kn;
393
394 err_out2:
395 kmem_cache_free(kernfs_node_cache, kn);
396 err_out1:
397 kfree(dup_name);
398 return NULL;
399 }
400
401 /**
402 * kernfs_addrm_start - prepare for kernfs_node add/remove
403 * @acxt: pointer to kernfs_addrm_cxt to be used
404 *
405 * This function is called when the caller is about to add or remove
406 * kernfs_node. This function acquires kernfs_mutex. @acxt is used
407 * to keep and pass context to other addrm functions.
408 *
409 * LOCKING:
410 * Kernel thread context (may sleep). kernfs_mutex is locked on
411 * return.
412 */
413 void kernfs_addrm_start(struct kernfs_addrm_cxt *acxt)
414 __acquires(kernfs_mutex)
415 {
416 memset(acxt, 0, sizeof(*acxt));
417
418 mutex_lock(&kernfs_mutex);
419 }
420
421 /**
422 * kernfs_add_one - add kernfs_node to parent without warning
423 * @acxt: addrm context to use
424 * @kn: kernfs_node to be added
425 * @parent: the parent kernfs_node to add @kn to
426 *
427 * Get @parent and set @kn->parent to it and increment nlink of the
428 * parent inode if @kn is a directory and link into the children list
429 * of the parent.
430 *
431 * This function should be called between calls to
432 * kernfs_addrm_start() and kernfs_addrm_finish() and should be passed
433 * the same @acxt as passed to kernfs_addrm_start().
434 *
435 * LOCKING:
436 * Determined by kernfs_addrm_start().
437 *
438 * RETURNS:
439 * 0 on success, -EEXIST if entry with the given name already
440 * exists.
441 */
442 int kernfs_add_one(struct kernfs_addrm_cxt *acxt, struct kernfs_node *kn,
443 struct kernfs_node *parent)
444 {
445 bool has_ns = kernfs_ns_enabled(parent);
446 struct kernfs_iattrs *ps_iattr;
447 int ret;
448
449 WARN_ON_ONCE(atomic_read(&parent->active) < 0);
450
451 if (has_ns != (bool)kn->ns) {
452 WARN(1, KERN_WARNING "kernfs: ns %s in '%s' for '%s'\n",
453 has_ns ? "required" : "invalid", parent->name, kn->name);
454 return -EINVAL;
455 }
456
457 if (kernfs_type(parent) != KERNFS_DIR)
458 return -EINVAL;
459
460 kn->hash = kernfs_name_hash(kn->name, kn->ns);
461 kn->parent = parent;
462 kernfs_get(parent);
463
464 ret = kernfs_link_sibling(kn);
465 if (ret)
466 return ret;
467
468 /* Update timestamps on the parent */
469 ps_iattr = parent->iattr;
470 if (ps_iattr) {
471 struct iattr *ps_iattrs = &ps_iattr->ia_iattr;
472 ps_iattrs->ia_ctime = ps_iattrs->ia_mtime = CURRENT_TIME;
473 }
474
475 /* Mark the entry added into directory tree */
476 atomic_sub(KN_DEACTIVATED_BIAS, &kn->active);
477 return 0;
478 }
479
480 /**
481 * kernfs_addrm_finish - finish up kernfs_node add/remove
482 * @acxt: addrm context to finish up
483 *
484 * Finish up kernfs_node add/remove. Resources acquired by
485 * kernfs_addrm_start() are released and removed kernfs_nodes are
486 * cleaned up.
487 *
488 * LOCKING:
489 * kernfs_mutex is released.
490 */
491 void kernfs_addrm_finish(struct kernfs_addrm_cxt *acxt)
492 __releases(kernfs_mutex)
493 {
494 /* release resources acquired by kernfs_addrm_start() */
495 mutex_unlock(&kernfs_mutex);
496
497 /* kill removed kernfs_nodes */
498 while (acxt->removed) {
499 struct kernfs_node *kn = acxt->removed;
500
501 acxt->removed = kn->u.removed_list;
502
503 kernfs_put(kn);
504 }
505 }
506
507 /**
508 * kernfs_find_ns - find kernfs_node with the given name
509 * @parent: kernfs_node to search under
510 * @name: name to look for
511 * @ns: the namespace tag to use
512 *
513 * Look for kernfs_node with name @name under @parent. Returns pointer to
514 * the found kernfs_node on success, %NULL on failure.
515 */
516 static struct kernfs_node *kernfs_find_ns(struct kernfs_node *parent,
517 const unsigned char *name,
518 const void *ns)
519 {
520 struct rb_node *node = parent->dir.children.rb_node;
521 bool has_ns = kernfs_ns_enabled(parent);
522 unsigned int hash;
523
524 lockdep_assert_held(&kernfs_mutex);
525
526 if (has_ns != (bool)ns) {
527 WARN(1, KERN_WARNING "kernfs: ns %s in '%s' for '%s'\n",
528 has_ns ? "required" : "invalid", parent->name, name);
529 return NULL;
530 }
531
532 hash = kernfs_name_hash(name, ns);
533 while (node) {
534 struct kernfs_node *kn;
535 int result;
536
537 kn = rb_to_kn(node);
538 result = kernfs_name_compare(hash, name, ns, kn);
539 if (result < 0)
540 node = node->rb_left;
541 else if (result > 0)
542 node = node->rb_right;
543 else
544 return kn;
545 }
546 return NULL;
547 }
548
549 /**
550 * kernfs_find_and_get_ns - find and get kernfs_node with the given name
551 * @parent: kernfs_node to search under
552 * @name: name to look for
553 * @ns: the namespace tag to use
554 *
555 * Look for kernfs_node with name @name under @parent and get a reference
556 * if found. This function may sleep and returns pointer to the found
557 * kernfs_node on success, %NULL on failure.
558 */
559 struct kernfs_node *kernfs_find_and_get_ns(struct kernfs_node *parent,
560 const char *name, const void *ns)
561 {
562 struct kernfs_node *kn;
563
564 mutex_lock(&kernfs_mutex);
565 kn = kernfs_find_ns(parent, name, ns);
566 kernfs_get(kn);
567 mutex_unlock(&kernfs_mutex);
568
569 return kn;
570 }
571 EXPORT_SYMBOL_GPL(kernfs_find_and_get_ns);
572
573 /**
574 * kernfs_create_root - create a new kernfs hierarchy
575 * @kdops: optional directory syscall operations for the hierarchy
576 * @priv: opaque data associated with the new directory
577 *
578 * Returns the root of the new hierarchy on success, ERR_PTR() value on
579 * failure.
580 */
581 struct kernfs_root *kernfs_create_root(struct kernfs_dir_ops *kdops, void *priv)
582 {
583 struct kernfs_root *root;
584 struct kernfs_node *kn;
585
586 root = kzalloc(sizeof(*root), GFP_KERNEL);
587 if (!root)
588 return ERR_PTR(-ENOMEM);
589
590 ida_init(&root->ino_ida);
591
592 kn = kernfs_new_node(root, "", S_IFDIR | S_IRUGO | S_IXUGO, KERNFS_DIR);
593 if (!kn) {
594 ida_destroy(&root->ino_ida);
595 kfree(root);
596 return ERR_PTR(-ENOMEM);
597 }
598
599 atomic_sub(KN_DEACTIVATED_BIAS, &kn->active);
600 kn->priv = priv;
601 kn->dir.root = root;
602
603 root->dir_ops = kdops;
604 root->kn = kn;
605 init_waitqueue_head(&root->deactivate_waitq);
606
607 return root;
608 }
609
610 /**
611 * kernfs_destroy_root - destroy a kernfs hierarchy
612 * @root: root of the hierarchy to destroy
613 *
614 * Destroy the hierarchy anchored at @root by removing all existing
615 * directories and destroying @root.
616 */
617 void kernfs_destroy_root(struct kernfs_root *root)
618 {
619 kernfs_remove(root->kn); /* will also free @root */
620 }
621
622 /**
623 * kernfs_create_dir_ns - create a directory
624 * @parent: parent in which to create a new directory
625 * @name: name of the new directory
626 * @mode: mode of the new directory
627 * @priv: opaque data associated with the new directory
628 * @ns: optional namespace tag of the directory
629 *
630 * Returns the created node on success, ERR_PTR() value on failure.
631 */
632 struct kernfs_node *kernfs_create_dir_ns(struct kernfs_node *parent,
633 const char *name, umode_t mode,
634 void *priv, const void *ns)
635 {
636 struct kernfs_addrm_cxt acxt;
637 struct kernfs_node *kn;
638 int rc;
639
640 /* allocate */
641 kn = kernfs_new_node(kernfs_root(parent), name, mode | S_IFDIR,
642 KERNFS_DIR);
643 if (!kn)
644 return ERR_PTR(-ENOMEM);
645
646 kn->dir.root = parent->dir.root;
647 kn->ns = ns;
648 kn->priv = priv;
649
650 /* link in */
651 rc = -ENOENT;
652 if (kernfs_get_active(parent)) {
653 kernfs_addrm_start(&acxt);
654 rc = kernfs_add_one(&acxt, kn, parent);
655 kernfs_addrm_finish(&acxt);
656 kernfs_put_active(parent);
657 }
658
659 if (!rc)
660 return kn;
661
662 kernfs_put(kn);
663 return ERR_PTR(rc);
664 }
665
666 static struct dentry *kernfs_iop_lookup(struct inode *dir,
667 struct dentry *dentry,
668 unsigned int flags)
669 {
670 struct dentry *ret;
671 struct kernfs_node *parent = dentry->d_parent->d_fsdata;
672 struct kernfs_node *kn;
673 struct inode *inode;
674 const void *ns = NULL;
675
676 mutex_lock(&kernfs_mutex);
677
678 if (kernfs_ns_enabled(parent))
679 ns = kernfs_info(dir->i_sb)->ns;
680
681 kn = kernfs_find_ns(parent, dentry->d_name.name, ns);
682
683 /* no such entry */
684 if (!kn) {
685 ret = NULL;
686 goto out_unlock;
687 }
688 kernfs_get(kn);
689 dentry->d_fsdata = kn;
690
691 /* attach dentry and inode */
692 inode = kernfs_get_inode(dir->i_sb, kn);
693 if (!inode) {
694 ret = ERR_PTR(-ENOMEM);
695 goto out_unlock;
696 }
697
698 /* instantiate and hash dentry */
699 ret = d_materialise_unique(dentry, inode);
700 out_unlock:
701 mutex_unlock(&kernfs_mutex);
702 return ret;
703 }
704
705 static int kernfs_iop_mkdir(struct inode *dir, struct dentry *dentry,
706 umode_t mode)
707 {
708 struct kernfs_node *parent = dir->i_private;
709 struct kernfs_dir_ops *kdops = kernfs_root(parent)->dir_ops;
710
711 if (!kdops || !kdops->mkdir)
712 return -EPERM;
713
714 return kdops->mkdir(parent, dentry->d_name.name, mode);
715 }
716
717 static int kernfs_iop_rmdir(struct inode *dir, struct dentry *dentry)
718 {
719 struct kernfs_node *kn = dentry->d_fsdata;
720 struct kernfs_dir_ops *kdops = kernfs_root(kn)->dir_ops;
721
722 if (!kdops || !kdops->rmdir)
723 return -EPERM;
724
725 return kdops->rmdir(kn);
726 }
727
728 static int kernfs_iop_rename(struct inode *old_dir, struct dentry *old_dentry,
729 struct inode *new_dir, struct dentry *new_dentry)
730 {
731 struct kernfs_node *kn = old_dentry->d_fsdata;
732 struct kernfs_node *new_parent = new_dir->i_private;
733 struct kernfs_dir_ops *kdops = kernfs_root(kn)->dir_ops;
734
735 if (!kdops || !kdops->rename)
736 return -EPERM;
737
738 return kdops->rename(kn, new_parent, new_dentry->d_name.name);
739 }
740
741 const struct inode_operations kernfs_dir_iops = {
742 .lookup = kernfs_iop_lookup,
743 .permission = kernfs_iop_permission,
744 .setattr = kernfs_iop_setattr,
745 .getattr = kernfs_iop_getattr,
746 .setxattr = kernfs_iop_setxattr,
747 .removexattr = kernfs_iop_removexattr,
748 .getxattr = kernfs_iop_getxattr,
749 .listxattr = kernfs_iop_listxattr,
750
751 .mkdir = kernfs_iop_mkdir,
752 .rmdir = kernfs_iop_rmdir,
753 .rename = kernfs_iop_rename,
754 };
755
756 static struct kernfs_node *kernfs_leftmost_descendant(struct kernfs_node *pos)
757 {
758 struct kernfs_node *last;
759
760 while (true) {
761 struct rb_node *rbn;
762
763 last = pos;
764
765 if (kernfs_type(pos) != KERNFS_DIR)
766 break;
767
768 rbn = rb_first(&pos->dir.children);
769 if (!rbn)
770 break;
771
772 pos = rb_to_kn(rbn);
773 }
774
775 return last;
776 }
777
778 /**
779 * kernfs_next_descendant_post - find the next descendant for post-order walk
780 * @pos: the current position (%NULL to initiate traversal)
781 * @root: kernfs_node whose descendants to walk
782 *
783 * Find the next descendant to visit for post-order traversal of @root's
784 * descendants. @root is included in the iteration and the last node to be
785 * visited.
786 */
787 static struct kernfs_node *kernfs_next_descendant_post(struct kernfs_node *pos,
788 struct kernfs_node *root)
789 {
790 struct rb_node *rbn;
791
792 lockdep_assert_held(&kernfs_mutex);
793
794 /* if first iteration, visit leftmost descendant which may be root */
795 if (!pos)
796 return kernfs_leftmost_descendant(root);
797
798 /* if we visited @root, we're done */
799 if (pos == root)
800 return NULL;
801
802 /* if there's an unvisited sibling, visit its leftmost descendant */
803 rbn = rb_next(&pos->rb);
804 if (rbn)
805 return kernfs_leftmost_descendant(rb_to_kn(rbn));
806
807 /* no sibling left, visit parent */
808 return pos->parent;
809 }
810
811 static void __kernfs_deactivate(struct kernfs_node *kn)
812 {
813 struct kernfs_node *pos;
814
815 lockdep_assert_held(&kernfs_mutex);
816
817 /* prevent any new usage under @kn by deactivating all nodes */
818 pos = NULL;
819 while ((pos = kernfs_next_descendant_post(pos, kn))) {
820 if (atomic_read(&pos->active) >= 0) {
821 atomic_add(KN_DEACTIVATED_BIAS, &pos->active);
822 pos->flags |= KERNFS_JUST_DEACTIVATED;
823 }
824 }
825
826 /*
827 * Drain the subtree. If kernfs_drain() blocked to drain, which is
828 * indicated by %true return, it temporarily released kernfs_mutex
829 * and the rbtree might have been modified inbetween breaking our
830 * future walk. Restart the walk after each %true return.
831 */
832 pos = NULL;
833 while ((pos = kernfs_next_descendant_post(pos, kn))) {
834 bool drained;
835
836 kernfs_get(pos);
837 drained = kernfs_drain(pos);
838 kernfs_put(pos);
839 if (drained)
840 pos = NULL;
841 }
842 }
843
844 static void __kernfs_remove(struct kernfs_addrm_cxt *acxt,
845 struct kernfs_node *kn)
846 {
847 struct kernfs_node *pos;
848
849 lockdep_assert_held(&kernfs_mutex);
850
851 if (!kn)
852 return;
853
854 pr_debug("kernfs %s: removing\n", kn->name);
855
856 __kernfs_deactivate(kn);
857
858 /* unlink the subtree node-by-node */
859 do {
860 pos = kernfs_leftmost_descendant(kn);
861
862 /*
863 * We're gonna release kernfs_mutex to unmap bin files,
864 * Make sure @pos doesn't go away inbetween.
865 */
866 kernfs_get(pos);
867
868 /*
869 * This must be come before unlinking; otherwise, when
870 * there are multiple removers, some may finish before
871 * unmapping is complete.
872 */
873 if (pos->flags & KERNFS_HAS_MMAP) {
874 mutex_unlock(&kernfs_mutex);
875 kernfs_unmap_file(pos);
876 mutex_lock(&kernfs_mutex);
877 }
878
879 /*
880 * kernfs_unlink_sibling() succeeds once per node. Use it
881 * to decide who's responsible for cleanups.
882 */
883 if (!pos->parent || kernfs_unlink_sibling(pos)) {
884 struct kernfs_iattrs *ps_iattr =
885 pos->parent ? pos->parent->iattr : NULL;
886
887 /* update timestamps on the parent */
888 if (ps_iattr) {
889 ps_iattr->ia_iattr.ia_ctime = CURRENT_TIME;
890 ps_iattr->ia_iattr.ia_mtime = CURRENT_TIME;
891 }
892
893 pos->u.removed_list = acxt->removed;
894 acxt->removed = pos;
895 }
896
897 kernfs_put(pos);
898 } while (pos != kn);
899 }
900
901 /**
902 * kernfs_remove - remove a kernfs_node recursively
903 * @kn: the kernfs_node to remove
904 *
905 * Remove @kn along with all its subdirectories and files.
906 */
907 void kernfs_remove(struct kernfs_node *kn)
908 {
909 struct kernfs_addrm_cxt acxt;
910
911 kernfs_addrm_start(&acxt);
912 __kernfs_remove(&acxt, kn);
913 kernfs_addrm_finish(&acxt);
914 }
915
916 /**
917 * kernfs_remove_by_name_ns - find a kernfs_node by name and remove it
918 * @parent: parent of the target
919 * @name: name of the kernfs_node to remove
920 * @ns: namespace tag of the kernfs_node to remove
921 *
922 * Look for the kernfs_node with @name and @ns under @parent and remove it.
923 * Returns 0 on success, -ENOENT if such entry doesn't exist.
924 */
925 int kernfs_remove_by_name_ns(struct kernfs_node *parent, const char *name,
926 const void *ns)
927 {
928 struct kernfs_addrm_cxt acxt;
929 struct kernfs_node *kn;
930
931 if (!parent) {
932 WARN(1, KERN_WARNING "kernfs: can not remove '%s', no directory\n",
933 name);
934 return -ENOENT;
935 }
936
937 kernfs_addrm_start(&acxt);
938
939 kn = kernfs_find_ns(parent, name, ns);
940 if (kn)
941 __kernfs_remove(&acxt, kn);
942
943 kernfs_addrm_finish(&acxt);
944
945 if (kn)
946 return 0;
947 else
948 return -ENOENT;
949 }
950
951 /**
952 * kernfs_rename_ns - move and rename a kernfs_node
953 * @kn: target node
954 * @new_parent: new parent to put @sd under
955 * @new_name: new name
956 * @new_ns: new namespace tag
957 */
958 int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent,
959 const char *new_name, const void *new_ns)
960 {
961 int error;
962
963 error = -ENOENT;
964 if (!kernfs_get_active(new_parent))
965 goto out;
966 if (!kernfs_get_active(kn))
967 goto out_put_new_parent;
968
969 mutex_lock(&kernfs_mutex);
970
971 error = 0;
972 if ((kn->parent == new_parent) && (kn->ns == new_ns) &&
973 (strcmp(kn->name, new_name) == 0))
974 goto out_unlock; /* nothing to rename */
975
976 error = -EEXIST;
977 if (kernfs_find_ns(new_parent, new_name, new_ns))
978 goto out_unlock;
979
980 /* rename kernfs_node */
981 if (strcmp(kn->name, new_name) != 0) {
982 error = -ENOMEM;
983 new_name = kstrdup(new_name, GFP_KERNEL);
984 if (!new_name)
985 goto out_unlock;
986
987 if (kn->flags & KERNFS_STATIC_NAME)
988 kn->flags &= ~KERNFS_STATIC_NAME;
989 else
990 kfree(kn->name);
991
992 kn->name = new_name;
993 }
994
995 /*
996 * Move to the appropriate place in the appropriate directories rbtree.
997 */
998 kernfs_unlink_sibling(kn);
999 kernfs_get(new_parent);
1000 kernfs_put(kn->parent);
1001 kn->ns = new_ns;
1002 kn->hash = kernfs_name_hash(kn->name, kn->ns);
1003 kn->parent = new_parent;
1004 kernfs_link_sibling(kn);
1005
1006 error = 0;
1007 out_unlock:
1008 mutex_unlock(&kernfs_mutex);
1009 kernfs_put_active(kn);
1010 out_put_new_parent:
1011 kernfs_put_active(new_parent);
1012 out:
1013 return error;
1014 }
1015
1016 /* Relationship between s_mode and the DT_xxx types */
1017 static inline unsigned char dt_type(struct kernfs_node *kn)
1018 {
1019 return (kn->mode >> 12) & 15;
1020 }
1021
1022 static int kernfs_dir_fop_release(struct inode *inode, struct file *filp)
1023 {
1024 kernfs_put(filp->private_data);
1025 return 0;
1026 }
1027
1028 static struct kernfs_node *kernfs_dir_pos(const void *ns,
1029 struct kernfs_node *parent, loff_t hash, struct kernfs_node *pos)
1030 {
1031 if (pos) {
1032 int valid = pos->parent == parent && hash == pos->hash;
1033 kernfs_put(pos);
1034 if (!valid)
1035 pos = NULL;
1036 }
1037 if (!pos && (hash > 1) && (hash < INT_MAX)) {
1038 struct rb_node *node = parent->dir.children.rb_node;
1039 while (node) {
1040 pos = rb_to_kn(node);
1041
1042 if (hash < pos->hash)
1043 node = node->rb_left;
1044 else if (hash > pos->hash)
1045 node = node->rb_right;
1046 else
1047 break;
1048 }
1049 }
1050 /* Skip over entries in the wrong namespace */
1051 while (pos && pos->ns != ns) {
1052 struct rb_node *node = rb_next(&pos->rb);
1053 if (!node)
1054 pos = NULL;
1055 else
1056 pos = rb_to_kn(node);
1057 }
1058 return pos;
1059 }
1060
1061 static struct kernfs_node *kernfs_dir_next_pos(const void *ns,
1062 struct kernfs_node *parent, ino_t ino, struct kernfs_node *pos)
1063 {
1064 pos = kernfs_dir_pos(ns, parent, ino, pos);
1065 if (pos)
1066 do {
1067 struct rb_node *node = rb_next(&pos->rb);
1068 if (!node)
1069 pos = NULL;
1070 else
1071 pos = rb_to_kn(node);
1072 } while (pos && pos->ns != ns);
1073 return pos;
1074 }
1075
1076 static int kernfs_fop_readdir(struct file *file, struct dir_context *ctx)
1077 {
1078 struct dentry *dentry = file->f_path.dentry;
1079 struct kernfs_node *parent = dentry->d_fsdata;
1080 struct kernfs_node *pos = file->private_data;
1081 const void *ns = NULL;
1082
1083 if (!dir_emit_dots(file, ctx))
1084 return 0;
1085 mutex_lock(&kernfs_mutex);
1086
1087 if (kernfs_ns_enabled(parent))
1088 ns = kernfs_info(dentry->d_sb)->ns;
1089
1090 for (pos = kernfs_dir_pos(ns, parent, ctx->pos, pos);
1091 pos;
1092 pos = kernfs_dir_next_pos(ns, parent, ctx->pos, pos)) {
1093 const char *name = pos->name;
1094 unsigned int type = dt_type(pos);
1095 int len = strlen(name);
1096 ino_t ino = pos->ino;
1097
1098 ctx->pos = pos->hash;
1099 file->private_data = pos;
1100 kernfs_get(pos);
1101
1102 mutex_unlock(&kernfs_mutex);
1103 if (!dir_emit(ctx, name, len, ino, type))
1104 return 0;
1105 mutex_lock(&kernfs_mutex);
1106 }
1107 mutex_unlock(&kernfs_mutex);
1108 file->private_data = NULL;
1109 ctx->pos = INT_MAX;
1110 return 0;
1111 }
1112
1113 static loff_t kernfs_dir_fop_llseek(struct file *file, loff_t offset,
1114 int whence)
1115 {
1116 struct inode *inode = file_inode(file);
1117 loff_t ret;
1118
1119 mutex_lock(&inode->i_mutex);
1120 ret = generic_file_llseek(file, offset, whence);
1121 mutex_unlock(&inode->i_mutex);
1122
1123 return ret;
1124 }
1125
1126 const struct file_operations kernfs_dir_fops = {
1127 .read = generic_read_dir,
1128 .iterate = kernfs_fop_readdir,
1129 .release = kernfs_dir_fop_release,
1130 .llseek = kernfs_dir_fop_llseek,
1131 };
This page took 0.056618 seconds and 5 git commands to generate.