eCryptfs: Add reference counting to lower files
[deliverable/linux.git] / fs / dcache.c
1 /*
2 * fs/dcache.c
3 *
4 * Complete reimplementation
5 * (C) 1997 Thomas Schoebel-Theuer,
6 * with heavy changes by Linus Torvalds
7 */
8
9 /*
10 * Notes on the allocation strategy:
11 *
12 * The dcache is a master of the icache - whenever a dcache entry
13 * exists, the inode will always exist. "iput()" is done either when
14 * the dcache entry is deleted or garbage collected.
15 */
16
17 #include <linux/syscalls.h>
18 #include <linux/string.h>
19 #include <linux/mm.h>
20 #include <linux/fs.h>
21 #include <linux/fsnotify.h>
22 #include <linux/slab.h>
23 #include <linux/init.h>
24 #include <linux/hash.h>
25 #include <linux/cache.h>
26 #include <linux/module.h>
27 #include <linux/mount.h>
28 #include <linux/file.h>
29 #include <asm/uaccess.h>
30 #include <linux/security.h>
31 #include <linux/seqlock.h>
32 #include <linux/swap.h>
33 #include <linux/bootmem.h>
34 #include <linux/fs_struct.h>
35 #include <linux/hardirq.h>
36 #include <linux/bit_spinlock.h>
37 #include <linux/rculist_bl.h>
38 #include "internal.h"
39
40 /*
41 * Usage:
42 * dcache->d_inode->i_lock protects:
43 * - i_dentry, d_alias, d_inode of aliases
44 * dcache_hash_bucket lock protects:
45 * - the dcache hash table
46 * s_anon bl list spinlock protects:
47 * - the s_anon list (see __d_drop)
48 * dcache_lru_lock protects:
49 * - the dcache lru lists and counters
50 * d_lock protects:
51 * - d_flags
52 * - d_name
53 * - d_lru
54 * - d_count
55 * - d_unhashed()
56 * - d_parent and d_subdirs
57 * - childrens' d_child and d_parent
58 * - d_alias, d_inode
59 *
60 * Ordering:
61 * dentry->d_inode->i_lock
62 * dentry->d_lock
63 * dcache_lru_lock
64 * dcache_hash_bucket lock
65 * s_anon lock
66 *
67 * If there is an ancestor relationship:
68 * dentry->d_parent->...->d_parent->d_lock
69 * ...
70 * dentry->d_parent->d_lock
71 * dentry->d_lock
72 *
73 * If no ancestor relationship:
74 * if (dentry1 < dentry2)
75 * dentry1->d_lock
76 * dentry2->d_lock
77 */
78 int sysctl_vfs_cache_pressure __read_mostly = 100;
79 EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
80
81 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lru_lock);
82 __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
83
84 EXPORT_SYMBOL(rename_lock);
85
86 static struct kmem_cache *dentry_cache __read_mostly;
87
88 /*
89 * This is the single most critical data structure when it comes
90 * to the dcache: the hashtable for lookups. Somebody should try
91 * to make this good - I've just made it work.
92 *
93 * This hash-function tries to avoid losing too many bits of hash
94 * information, yet avoid using a prime hash-size or similar.
95 */
96 #define D_HASHBITS d_hash_shift
97 #define D_HASHMASK d_hash_mask
98
99 static unsigned int d_hash_mask __read_mostly;
100 static unsigned int d_hash_shift __read_mostly;
101
102 static struct hlist_bl_head *dentry_hashtable __read_mostly;
103
104 static inline struct hlist_bl_head *d_hash(struct dentry *parent,
105 unsigned long hash)
106 {
107 hash += ((unsigned long) parent ^ GOLDEN_RATIO_PRIME) / L1_CACHE_BYTES;
108 hash = hash ^ ((hash ^ GOLDEN_RATIO_PRIME) >> D_HASHBITS);
109 return dentry_hashtable + (hash & D_HASHMASK);
110 }
111
112 static inline void spin_lock_bucket(struct hlist_bl_head *b)
113 {
114 bit_spin_lock(0, (unsigned long *)&b->first);
115 }
116
117 static inline void spin_unlock_bucket(struct hlist_bl_head *b)
118 {
119 __bit_spin_unlock(0, (unsigned long *)&b->first);
120 }
121
122 /* Statistics gathering. */
123 struct dentry_stat_t dentry_stat = {
124 .age_limit = 45,
125 };
126
127 static DEFINE_PER_CPU(unsigned int, nr_dentry);
128
129 #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
130 static int get_nr_dentry(void)
131 {
132 int i;
133 int sum = 0;
134 for_each_possible_cpu(i)
135 sum += per_cpu(nr_dentry, i);
136 return sum < 0 ? 0 : sum;
137 }
138
139 int proc_nr_dentry(ctl_table *table, int write, void __user *buffer,
140 size_t *lenp, loff_t *ppos)
141 {
142 dentry_stat.nr_dentry = get_nr_dentry();
143 return proc_dointvec(table, write, buffer, lenp, ppos);
144 }
145 #endif
146
147 static void __d_free(struct rcu_head *head)
148 {
149 struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
150
151 WARN_ON(!list_empty(&dentry->d_alias));
152 if (dname_external(dentry))
153 kfree(dentry->d_name.name);
154 kmem_cache_free(dentry_cache, dentry);
155 }
156
157 /*
158 * no locks, please.
159 */
160 static void d_free(struct dentry *dentry)
161 {
162 BUG_ON(dentry->d_count);
163 this_cpu_dec(nr_dentry);
164 if (dentry->d_op && dentry->d_op->d_release)
165 dentry->d_op->d_release(dentry);
166
167 /* if dentry was never visible to RCU, immediate free is OK */
168 if (!(dentry->d_flags & DCACHE_RCUACCESS))
169 __d_free(&dentry->d_u.d_rcu);
170 else
171 call_rcu(&dentry->d_u.d_rcu, __d_free);
172 }
173
174 /**
175 * dentry_rcuwalk_barrier - invalidate in-progress rcu-walk lookups
176 * @dentry: the target dentry
177 * After this call, in-progress rcu-walk path lookup will fail. This
178 * should be called after unhashing, and after changing d_inode (if
179 * the dentry has not already been unhashed).
180 */
181 static inline void dentry_rcuwalk_barrier(struct dentry *dentry)
182 {
183 assert_spin_locked(&dentry->d_lock);
184 /* Go through a barrier */
185 write_seqcount_barrier(&dentry->d_seq);
186 }
187
188 /*
189 * Release the dentry's inode, using the filesystem
190 * d_iput() operation if defined. Dentry has no refcount
191 * and is unhashed.
192 */
193 static void dentry_iput(struct dentry * dentry)
194 __releases(dentry->d_lock)
195 __releases(dentry->d_inode->i_lock)
196 {
197 struct inode *inode = dentry->d_inode;
198 if (inode) {
199 dentry->d_inode = NULL;
200 list_del_init(&dentry->d_alias);
201 spin_unlock(&dentry->d_lock);
202 spin_unlock(&inode->i_lock);
203 if (!inode->i_nlink)
204 fsnotify_inoderemove(inode);
205 if (dentry->d_op && dentry->d_op->d_iput)
206 dentry->d_op->d_iput(dentry, inode);
207 else
208 iput(inode);
209 } else {
210 spin_unlock(&dentry->d_lock);
211 }
212 }
213
214 /*
215 * Release the dentry's inode, using the filesystem
216 * d_iput() operation if defined. dentry remains in-use.
217 */
218 static void dentry_unlink_inode(struct dentry * dentry)
219 __releases(dentry->d_lock)
220 __releases(dentry->d_inode->i_lock)
221 {
222 struct inode *inode = dentry->d_inode;
223 dentry->d_inode = NULL;
224 list_del_init(&dentry->d_alias);
225 dentry_rcuwalk_barrier(dentry);
226 spin_unlock(&dentry->d_lock);
227 spin_unlock(&inode->i_lock);
228 if (!inode->i_nlink)
229 fsnotify_inoderemove(inode);
230 if (dentry->d_op && dentry->d_op->d_iput)
231 dentry->d_op->d_iput(dentry, inode);
232 else
233 iput(inode);
234 }
235
236 /*
237 * dentry_lru_(add|del|move_tail) must be called with d_lock held.
238 */
239 static void dentry_lru_add(struct dentry *dentry)
240 {
241 if (list_empty(&dentry->d_lru)) {
242 spin_lock(&dcache_lru_lock);
243 list_add(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
244 dentry->d_sb->s_nr_dentry_unused++;
245 dentry_stat.nr_unused++;
246 spin_unlock(&dcache_lru_lock);
247 }
248 }
249
250 static void __dentry_lru_del(struct dentry *dentry)
251 {
252 list_del_init(&dentry->d_lru);
253 dentry->d_sb->s_nr_dentry_unused--;
254 dentry_stat.nr_unused--;
255 }
256
257 static void dentry_lru_del(struct dentry *dentry)
258 {
259 if (!list_empty(&dentry->d_lru)) {
260 spin_lock(&dcache_lru_lock);
261 __dentry_lru_del(dentry);
262 spin_unlock(&dcache_lru_lock);
263 }
264 }
265
266 static void dentry_lru_move_tail(struct dentry *dentry)
267 {
268 spin_lock(&dcache_lru_lock);
269 if (list_empty(&dentry->d_lru)) {
270 list_add_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
271 dentry->d_sb->s_nr_dentry_unused++;
272 dentry_stat.nr_unused++;
273 } else {
274 list_move_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
275 }
276 spin_unlock(&dcache_lru_lock);
277 }
278
279 /**
280 * d_kill - kill dentry and return parent
281 * @dentry: dentry to kill
282 * @parent: parent dentry
283 *
284 * The dentry must already be unhashed and removed from the LRU.
285 *
286 * If this is the root of the dentry tree, return NULL.
287 *
288 * dentry->d_lock and parent->d_lock must be held by caller, and are dropped by
289 * d_kill.
290 */
291 static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent)
292 __releases(dentry->d_lock)
293 __releases(parent->d_lock)
294 __releases(dentry->d_inode->i_lock)
295 {
296 list_del(&dentry->d_u.d_child);
297 /*
298 * Inform try_to_ascend() that we are no longer attached to the
299 * dentry tree
300 */
301 dentry->d_flags |= DCACHE_DISCONNECTED;
302 if (parent)
303 spin_unlock(&parent->d_lock);
304 dentry_iput(dentry);
305 /*
306 * dentry_iput drops the locks, at which point nobody (except
307 * transient RCU lookups) can reach this dentry.
308 */
309 d_free(dentry);
310 return parent;
311 }
312
313 /**
314 * d_drop - drop a dentry
315 * @dentry: dentry to drop
316 *
317 * d_drop() unhashes the entry from the parent dentry hashes, so that it won't
318 * be found through a VFS lookup any more. Note that this is different from
319 * deleting the dentry - d_delete will try to mark the dentry negative if
320 * possible, giving a successful _negative_ lookup, while d_drop will
321 * just make the cache lookup fail.
322 *
323 * d_drop() is used mainly for stuff that wants to invalidate a dentry for some
324 * reason (NFS timeouts or autofs deletes).
325 *
326 * __d_drop requires dentry->d_lock.
327 */
328 void __d_drop(struct dentry *dentry)
329 {
330 if (!d_unhashed(dentry)) {
331 struct hlist_bl_head *b;
332 if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED))
333 b = &dentry->d_sb->s_anon;
334 else
335 b = d_hash(dentry->d_parent, dentry->d_name.hash);
336
337 spin_lock_bucket(b);
338 __hlist_bl_del(&dentry->d_hash);
339 dentry->d_hash.pprev = NULL;
340 spin_unlock_bucket(b);
341
342 dentry_rcuwalk_barrier(dentry);
343 }
344 }
345 EXPORT_SYMBOL(__d_drop);
346
347 void d_drop(struct dentry *dentry)
348 {
349 spin_lock(&dentry->d_lock);
350 __d_drop(dentry);
351 spin_unlock(&dentry->d_lock);
352 }
353 EXPORT_SYMBOL(d_drop);
354
355 /*
356 * Finish off a dentry we've decided to kill.
357 * dentry->d_lock must be held, returns with it unlocked.
358 * If ref is non-zero, then decrement the refcount too.
359 * Returns dentry requiring refcount drop, or NULL if we're done.
360 */
361 static inline struct dentry *dentry_kill(struct dentry *dentry, int ref)
362 __releases(dentry->d_lock)
363 {
364 struct inode *inode;
365 struct dentry *parent;
366
367 inode = dentry->d_inode;
368 if (inode && !spin_trylock(&inode->i_lock)) {
369 relock:
370 spin_unlock(&dentry->d_lock);
371 cpu_relax();
372 return dentry; /* try again with same dentry */
373 }
374 if (IS_ROOT(dentry))
375 parent = NULL;
376 else
377 parent = dentry->d_parent;
378 if (parent && !spin_trylock(&parent->d_lock)) {
379 if (inode)
380 spin_unlock(&inode->i_lock);
381 goto relock;
382 }
383
384 if (ref)
385 dentry->d_count--;
386 /* if dentry was on the d_lru list delete it from there */
387 dentry_lru_del(dentry);
388 /* if it was on the hash then remove it */
389 __d_drop(dentry);
390 return d_kill(dentry, parent);
391 }
392
393 /*
394 * This is dput
395 *
396 * This is complicated by the fact that we do not want to put
397 * dentries that are no longer on any hash chain on the unused
398 * list: we'd much rather just get rid of them immediately.
399 *
400 * However, that implies that we have to traverse the dentry
401 * tree upwards to the parents which might _also_ now be
402 * scheduled for deletion (it may have been only waiting for
403 * its last child to go away).
404 *
405 * This tail recursion is done by hand as we don't want to depend
406 * on the compiler to always get this right (gcc generally doesn't).
407 * Real recursion would eat up our stack space.
408 */
409
410 /*
411 * dput - release a dentry
412 * @dentry: dentry to release
413 *
414 * Release a dentry. This will drop the usage count and if appropriate
415 * call the dentry unlink method as well as removing it from the queues and
416 * releasing its resources. If the parent dentries were scheduled for release
417 * they too may now get deleted.
418 */
419 void dput(struct dentry *dentry)
420 {
421 if (!dentry)
422 return;
423
424 repeat:
425 if (dentry->d_count == 1)
426 might_sleep();
427 spin_lock(&dentry->d_lock);
428 BUG_ON(!dentry->d_count);
429 if (dentry->d_count > 1) {
430 dentry->d_count--;
431 spin_unlock(&dentry->d_lock);
432 return;
433 }
434
435 if (dentry->d_flags & DCACHE_OP_DELETE) {
436 if (dentry->d_op->d_delete(dentry))
437 goto kill_it;
438 }
439
440 /* Unreachable? Get rid of it */
441 if (d_unhashed(dentry))
442 goto kill_it;
443
444 /* Otherwise leave it cached and ensure it's on the LRU */
445 dentry->d_flags |= DCACHE_REFERENCED;
446 dentry_lru_add(dentry);
447
448 dentry->d_count--;
449 spin_unlock(&dentry->d_lock);
450 return;
451
452 kill_it:
453 dentry = dentry_kill(dentry, 1);
454 if (dentry)
455 goto repeat;
456 }
457 EXPORT_SYMBOL(dput);
458
459 /**
460 * d_invalidate - invalidate a dentry
461 * @dentry: dentry to invalidate
462 *
463 * Try to invalidate the dentry if it turns out to be
464 * possible. If there are other dentries that can be
465 * reached through this one we can't delete it and we
466 * return -EBUSY. On success we return 0.
467 *
468 * no dcache lock.
469 */
470
471 int d_invalidate(struct dentry * dentry)
472 {
473 /*
474 * If it's already been dropped, return OK.
475 */
476 spin_lock(&dentry->d_lock);
477 if (d_unhashed(dentry)) {
478 spin_unlock(&dentry->d_lock);
479 return 0;
480 }
481 /*
482 * Check whether to do a partial shrink_dcache
483 * to get rid of unused child entries.
484 */
485 if (!list_empty(&dentry->d_subdirs)) {
486 spin_unlock(&dentry->d_lock);
487 shrink_dcache_parent(dentry);
488 spin_lock(&dentry->d_lock);
489 }
490
491 /*
492 * Somebody else still using it?
493 *
494 * If it's a directory, we can't drop it
495 * for fear of somebody re-populating it
496 * with children (even though dropping it
497 * would make it unreachable from the root,
498 * we might still populate it if it was a
499 * working directory or similar).
500 */
501 if (dentry->d_count > 1) {
502 if (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode)) {
503 spin_unlock(&dentry->d_lock);
504 return -EBUSY;
505 }
506 }
507
508 __d_drop(dentry);
509 spin_unlock(&dentry->d_lock);
510 return 0;
511 }
512 EXPORT_SYMBOL(d_invalidate);
513
514 /* This must be called with d_lock held */
515 static inline void __dget_dlock(struct dentry *dentry)
516 {
517 dentry->d_count++;
518 }
519
520 static inline void __dget(struct dentry *dentry)
521 {
522 spin_lock(&dentry->d_lock);
523 __dget_dlock(dentry);
524 spin_unlock(&dentry->d_lock);
525 }
526
527 struct dentry *dget_parent(struct dentry *dentry)
528 {
529 struct dentry *ret;
530
531 repeat:
532 /*
533 * Don't need rcu_dereference because we re-check it was correct under
534 * the lock.
535 */
536 rcu_read_lock();
537 ret = dentry->d_parent;
538 if (!ret) {
539 rcu_read_unlock();
540 goto out;
541 }
542 spin_lock(&ret->d_lock);
543 if (unlikely(ret != dentry->d_parent)) {
544 spin_unlock(&ret->d_lock);
545 rcu_read_unlock();
546 goto repeat;
547 }
548 rcu_read_unlock();
549 BUG_ON(!ret->d_count);
550 ret->d_count++;
551 spin_unlock(&ret->d_lock);
552 out:
553 return ret;
554 }
555 EXPORT_SYMBOL(dget_parent);
556
557 /**
558 * d_find_alias - grab a hashed alias of inode
559 * @inode: inode in question
560 * @want_discon: flag, used by d_splice_alias, to request
561 * that only a DISCONNECTED alias be returned.
562 *
563 * If inode has a hashed alias, or is a directory and has any alias,
564 * acquire the reference to alias and return it. Otherwise return NULL.
565 * Notice that if inode is a directory there can be only one alias and
566 * it can be unhashed only if it has no children, or if it is the root
567 * of a filesystem.
568 *
569 * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer
570 * any other hashed alias over that one unless @want_discon is set,
571 * in which case only return an IS_ROOT, DCACHE_DISCONNECTED alias.
572 */
573 static struct dentry *__d_find_alias(struct inode *inode, int want_discon)
574 {
575 struct dentry *alias, *discon_alias;
576
577 again:
578 discon_alias = NULL;
579 list_for_each_entry(alias, &inode->i_dentry, d_alias) {
580 spin_lock(&alias->d_lock);
581 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
582 if (IS_ROOT(alias) &&
583 (alias->d_flags & DCACHE_DISCONNECTED)) {
584 discon_alias = alias;
585 } else if (!want_discon) {
586 __dget_dlock(alias);
587 spin_unlock(&alias->d_lock);
588 return alias;
589 }
590 }
591 spin_unlock(&alias->d_lock);
592 }
593 if (discon_alias) {
594 alias = discon_alias;
595 spin_lock(&alias->d_lock);
596 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
597 if (IS_ROOT(alias) &&
598 (alias->d_flags & DCACHE_DISCONNECTED)) {
599 __dget_dlock(alias);
600 spin_unlock(&alias->d_lock);
601 return alias;
602 }
603 }
604 spin_unlock(&alias->d_lock);
605 goto again;
606 }
607 return NULL;
608 }
609
610 struct dentry *d_find_alias(struct inode *inode)
611 {
612 struct dentry *de = NULL;
613
614 if (!list_empty(&inode->i_dentry)) {
615 spin_lock(&inode->i_lock);
616 de = __d_find_alias(inode, 0);
617 spin_unlock(&inode->i_lock);
618 }
619 return de;
620 }
621 EXPORT_SYMBOL(d_find_alias);
622
623 /*
624 * Try to kill dentries associated with this inode.
625 * WARNING: you must own a reference to inode.
626 */
627 void d_prune_aliases(struct inode *inode)
628 {
629 struct dentry *dentry;
630 restart:
631 spin_lock(&inode->i_lock);
632 list_for_each_entry(dentry, &inode->i_dentry, d_alias) {
633 spin_lock(&dentry->d_lock);
634 if (!dentry->d_count) {
635 __dget_dlock(dentry);
636 __d_drop(dentry);
637 spin_unlock(&dentry->d_lock);
638 spin_unlock(&inode->i_lock);
639 dput(dentry);
640 goto restart;
641 }
642 spin_unlock(&dentry->d_lock);
643 }
644 spin_unlock(&inode->i_lock);
645 }
646 EXPORT_SYMBOL(d_prune_aliases);
647
648 /*
649 * Try to throw away a dentry - free the inode, dput the parent.
650 * Requires dentry->d_lock is held, and dentry->d_count == 0.
651 * Releases dentry->d_lock.
652 *
653 * This may fail if locks cannot be acquired no problem, just try again.
654 */
655 static void try_prune_one_dentry(struct dentry *dentry)
656 __releases(dentry->d_lock)
657 {
658 struct dentry *parent;
659
660 parent = dentry_kill(dentry, 0);
661 /*
662 * If dentry_kill returns NULL, we have nothing more to do.
663 * if it returns the same dentry, trylocks failed. In either
664 * case, just loop again.
665 *
666 * Otherwise, we need to prune ancestors too. This is necessary
667 * to prevent quadratic behavior of shrink_dcache_parent(), but
668 * is also expected to be beneficial in reducing dentry cache
669 * fragmentation.
670 */
671 if (!parent)
672 return;
673 if (parent == dentry)
674 return;
675
676 /* Prune ancestors. */
677 dentry = parent;
678 while (dentry) {
679 spin_lock(&dentry->d_lock);
680 if (dentry->d_count > 1) {
681 dentry->d_count--;
682 spin_unlock(&dentry->d_lock);
683 return;
684 }
685 dentry = dentry_kill(dentry, 1);
686 }
687 }
688
689 static void shrink_dentry_list(struct list_head *list)
690 {
691 struct dentry *dentry;
692
693 rcu_read_lock();
694 for (;;) {
695 dentry = list_entry_rcu(list->prev, struct dentry, d_lru);
696 if (&dentry->d_lru == list)
697 break; /* empty */
698 spin_lock(&dentry->d_lock);
699 if (dentry != list_entry(list->prev, struct dentry, d_lru)) {
700 spin_unlock(&dentry->d_lock);
701 continue;
702 }
703
704 /*
705 * We found an inuse dentry which was not removed from
706 * the LRU because of laziness during lookup. Do not free
707 * it - just keep it off the LRU list.
708 */
709 if (dentry->d_count) {
710 dentry_lru_del(dentry);
711 spin_unlock(&dentry->d_lock);
712 continue;
713 }
714
715 rcu_read_unlock();
716
717 try_prune_one_dentry(dentry);
718
719 rcu_read_lock();
720 }
721 rcu_read_unlock();
722 }
723
724 /**
725 * __shrink_dcache_sb - shrink the dentry LRU on a given superblock
726 * @sb: superblock to shrink dentry LRU.
727 * @count: number of entries to prune
728 * @flags: flags to control the dentry processing
729 *
730 * If flags contains DCACHE_REFERENCED reference dentries will not be pruned.
731 */
732 static void __shrink_dcache_sb(struct super_block *sb, int *count, int flags)
733 {
734 /* called from prune_dcache() and shrink_dcache_parent() */
735 struct dentry *dentry;
736 LIST_HEAD(referenced);
737 LIST_HEAD(tmp);
738 int cnt = *count;
739
740 relock:
741 spin_lock(&dcache_lru_lock);
742 while (!list_empty(&sb->s_dentry_lru)) {
743 dentry = list_entry(sb->s_dentry_lru.prev,
744 struct dentry, d_lru);
745 BUG_ON(dentry->d_sb != sb);
746
747 if (!spin_trylock(&dentry->d_lock)) {
748 spin_unlock(&dcache_lru_lock);
749 cpu_relax();
750 goto relock;
751 }
752
753 /*
754 * If we are honouring the DCACHE_REFERENCED flag and the
755 * dentry has this flag set, don't free it. Clear the flag
756 * and put it back on the LRU.
757 */
758 if (flags & DCACHE_REFERENCED &&
759 dentry->d_flags & DCACHE_REFERENCED) {
760 dentry->d_flags &= ~DCACHE_REFERENCED;
761 list_move(&dentry->d_lru, &referenced);
762 spin_unlock(&dentry->d_lock);
763 } else {
764 list_move_tail(&dentry->d_lru, &tmp);
765 spin_unlock(&dentry->d_lock);
766 if (!--cnt)
767 break;
768 }
769 cond_resched_lock(&dcache_lru_lock);
770 }
771 if (!list_empty(&referenced))
772 list_splice(&referenced, &sb->s_dentry_lru);
773 spin_unlock(&dcache_lru_lock);
774
775 shrink_dentry_list(&tmp);
776
777 *count = cnt;
778 }
779
780 /**
781 * prune_dcache - shrink the dcache
782 * @count: number of entries to try to free
783 *
784 * Shrink the dcache. This is done when we need more memory, or simply when we
785 * need to unmount something (at which point we need to unuse all dentries).
786 *
787 * This function may fail to free any resources if all the dentries are in use.
788 */
789 static void prune_dcache(int count)
790 {
791 struct super_block *sb, *p = NULL;
792 int w_count;
793 int unused = dentry_stat.nr_unused;
794 int prune_ratio;
795 int pruned;
796
797 if (unused == 0 || count == 0)
798 return;
799 if (count >= unused)
800 prune_ratio = 1;
801 else
802 prune_ratio = unused / count;
803 spin_lock(&sb_lock);
804 list_for_each_entry(sb, &super_blocks, s_list) {
805 if (list_empty(&sb->s_instances))
806 continue;
807 if (sb->s_nr_dentry_unused == 0)
808 continue;
809 sb->s_count++;
810 /* Now, we reclaim unused dentrins with fairness.
811 * We reclaim them same percentage from each superblock.
812 * We calculate number of dentries to scan on this sb
813 * as follows, but the implementation is arranged to avoid
814 * overflows:
815 * number of dentries to scan on this sb =
816 * count * (number of dentries on this sb /
817 * number of dentries in the machine)
818 */
819 spin_unlock(&sb_lock);
820 if (prune_ratio != 1)
821 w_count = (sb->s_nr_dentry_unused / prune_ratio) + 1;
822 else
823 w_count = sb->s_nr_dentry_unused;
824 pruned = w_count;
825 /*
826 * We need to be sure this filesystem isn't being unmounted,
827 * otherwise we could race with generic_shutdown_super(), and
828 * end up holding a reference to an inode while the filesystem
829 * is unmounted. So we try to get s_umount, and make sure
830 * s_root isn't NULL.
831 */
832 if (down_read_trylock(&sb->s_umount)) {
833 if ((sb->s_root != NULL) &&
834 (!list_empty(&sb->s_dentry_lru))) {
835 __shrink_dcache_sb(sb, &w_count,
836 DCACHE_REFERENCED);
837 pruned -= w_count;
838 }
839 up_read(&sb->s_umount);
840 }
841 spin_lock(&sb_lock);
842 if (p)
843 __put_super(p);
844 count -= pruned;
845 p = sb;
846 /* more work left to do? */
847 if (count <= 0)
848 break;
849 }
850 if (p)
851 __put_super(p);
852 spin_unlock(&sb_lock);
853 }
854
855 /**
856 * shrink_dcache_sb - shrink dcache for a superblock
857 * @sb: superblock
858 *
859 * Shrink the dcache for the specified super block. This is used to free
860 * the dcache before unmounting a file system.
861 */
862 void shrink_dcache_sb(struct super_block *sb)
863 {
864 LIST_HEAD(tmp);
865
866 spin_lock(&dcache_lru_lock);
867 while (!list_empty(&sb->s_dentry_lru)) {
868 list_splice_init(&sb->s_dentry_lru, &tmp);
869 spin_unlock(&dcache_lru_lock);
870 shrink_dentry_list(&tmp);
871 spin_lock(&dcache_lru_lock);
872 }
873 spin_unlock(&dcache_lru_lock);
874 }
875 EXPORT_SYMBOL(shrink_dcache_sb);
876
877 /*
878 * destroy a single subtree of dentries for unmount
879 * - see the comments on shrink_dcache_for_umount() for a description of the
880 * locking
881 */
882 static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
883 {
884 struct dentry *parent;
885 unsigned detached = 0;
886
887 BUG_ON(!IS_ROOT(dentry));
888
889 /* detach this root from the system */
890 spin_lock(&dentry->d_lock);
891 dentry_lru_del(dentry);
892 __d_drop(dentry);
893 spin_unlock(&dentry->d_lock);
894
895 for (;;) {
896 /* descend to the first leaf in the current subtree */
897 while (!list_empty(&dentry->d_subdirs)) {
898 struct dentry *loop;
899
900 /* this is a branch with children - detach all of them
901 * from the system in one go */
902 spin_lock(&dentry->d_lock);
903 list_for_each_entry(loop, &dentry->d_subdirs,
904 d_u.d_child) {
905 spin_lock_nested(&loop->d_lock,
906 DENTRY_D_LOCK_NESTED);
907 dentry_lru_del(loop);
908 __d_drop(loop);
909 spin_unlock(&loop->d_lock);
910 }
911 spin_unlock(&dentry->d_lock);
912
913 /* move to the first child */
914 dentry = list_entry(dentry->d_subdirs.next,
915 struct dentry, d_u.d_child);
916 }
917
918 /* consume the dentries from this leaf up through its parents
919 * until we find one with children or run out altogether */
920 do {
921 struct inode *inode;
922
923 if (dentry->d_count != 0) {
924 printk(KERN_ERR
925 "BUG: Dentry %p{i=%lx,n=%s}"
926 " still in use (%d)"
927 " [unmount of %s %s]\n",
928 dentry,
929 dentry->d_inode ?
930 dentry->d_inode->i_ino : 0UL,
931 dentry->d_name.name,
932 dentry->d_count,
933 dentry->d_sb->s_type->name,
934 dentry->d_sb->s_id);
935 BUG();
936 }
937
938 if (IS_ROOT(dentry)) {
939 parent = NULL;
940 list_del(&dentry->d_u.d_child);
941 } else {
942 parent = dentry->d_parent;
943 spin_lock(&parent->d_lock);
944 parent->d_count--;
945 list_del(&dentry->d_u.d_child);
946 spin_unlock(&parent->d_lock);
947 }
948
949 detached++;
950
951 inode = dentry->d_inode;
952 if (inode) {
953 dentry->d_inode = NULL;
954 list_del_init(&dentry->d_alias);
955 if (dentry->d_op && dentry->d_op->d_iput)
956 dentry->d_op->d_iput(dentry, inode);
957 else
958 iput(inode);
959 }
960
961 d_free(dentry);
962
963 /* finished when we fall off the top of the tree,
964 * otherwise we ascend to the parent and move to the
965 * next sibling if there is one */
966 if (!parent)
967 return;
968 dentry = parent;
969 } while (list_empty(&dentry->d_subdirs));
970
971 dentry = list_entry(dentry->d_subdirs.next,
972 struct dentry, d_u.d_child);
973 }
974 }
975
976 /*
977 * destroy the dentries attached to a superblock on unmounting
978 * - we don't need to use dentry->d_lock because:
979 * - the superblock is detached from all mountings and open files, so the
980 * dentry trees will not be rearranged by the VFS
981 * - s_umount is write-locked, so the memory pressure shrinker will ignore
982 * any dentries belonging to this superblock that it comes across
983 * - the filesystem itself is no longer permitted to rearrange the dentries
984 * in this superblock
985 */
986 void shrink_dcache_for_umount(struct super_block *sb)
987 {
988 struct dentry *dentry;
989
990 if (down_read_trylock(&sb->s_umount))
991 BUG();
992
993 dentry = sb->s_root;
994 sb->s_root = NULL;
995 spin_lock(&dentry->d_lock);
996 dentry->d_count--;
997 spin_unlock(&dentry->d_lock);
998 shrink_dcache_for_umount_subtree(dentry);
999
1000 while (!hlist_bl_empty(&sb->s_anon)) {
1001 dentry = hlist_bl_entry(hlist_bl_first(&sb->s_anon), struct dentry, d_hash);
1002 shrink_dcache_for_umount_subtree(dentry);
1003 }
1004 }
1005
1006 /*
1007 * This tries to ascend one level of parenthood, but
1008 * we can race with renaming, so we need to re-check
1009 * the parenthood after dropping the lock and check
1010 * that the sequence number still matches.
1011 */
1012 static struct dentry *try_to_ascend(struct dentry *old, int locked, unsigned seq)
1013 {
1014 struct dentry *new = old->d_parent;
1015
1016 rcu_read_lock();
1017 spin_unlock(&old->d_lock);
1018 spin_lock(&new->d_lock);
1019
1020 /*
1021 * might go back up the wrong parent if we have had a rename
1022 * or deletion
1023 */
1024 if (new != old->d_parent ||
1025 (old->d_flags & DCACHE_DISCONNECTED) ||
1026 (!locked && read_seqretry(&rename_lock, seq))) {
1027 spin_unlock(&new->d_lock);
1028 new = NULL;
1029 }
1030 rcu_read_unlock();
1031 return new;
1032 }
1033
1034
1035 /*
1036 * Search for at least 1 mount point in the dentry's subdirs.
1037 * We descend to the next level whenever the d_subdirs
1038 * list is non-empty and continue searching.
1039 */
1040
1041 /**
1042 * have_submounts - check for mounts over a dentry
1043 * @parent: dentry to check.
1044 *
1045 * Return true if the parent or its subdirectories contain
1046 * a mount point
1047 */
1048 int have_submounts(struct dentry *parent)
1049 {
1050 struct dentry *this_parent;
1051 struct list_head *next;
1052 unsigned seq;
1053 int locked = 0;
1054
1055 seq = read_seqbegin(&rename_lock);
1056 again:
1057 this_parent = parent;
1058
1059 if (d_mountpoint(parent))
1060 goto positive;
1061 spin_lock(&this_parent->d_lock);
1062 repeat:
1063 next = this_parent->d_subdirs.next;
1064 resume:
1065 while (next != &this_parent->d_subdirs) {
1066 struct list_head *tmp = next;
1067 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
1068 next = tmp->next;
1069
1070 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1071 /* Have we found a mount point ? */
1072 if (d_mountpoint(dentry)) {
1073 spin_unlock(&dentry->d_lock);
1074 spin_unlock(&this_parent->d_lock);
1075 goto positive;
1076 }
1077 if (!list_empty(&dentry->d_subdirs)) {
1078 spin_unlock(&this_parent->d_lock);
1079 spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
1080 this_parent = dentry;
1081 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
1082 goto repeat;
1083 }
1084 spin_unlock(&dentry->d_lock);
1085 }
1086 /*
1087 * All done at this level ... ascend and resume the search.
1088 */
1089 if (this_parent != parent) {
1090 struct dentry *child = this_parent;
1091 this_parent = try_to_ascend(this_parent, locked, seq);
1092 if (!this_parent)
1093 goto rename_retry;
1094 next = child->d_u.d_child.next;
1095 goto resume;
1096 }
1097 spin_unlock(&this_parent->d_lock);
1098 if (!locked && read_seqretry(&rename_lock, seq))
1099 goto rename_retry;
1100 if (locked)
1101 write_sequnlock(&rename_lock);
1102 return 0; /* No mount points found in tree */
1103 positive:
1104 if (!locked && read_seqretry(&rename_lock, seq))
1105 goto rename_retry;
1106 if (locked)
1107 write_sequnlock(&rename_lock);
1108 return 1;
1109
1110 rename_retry:
1111 locked = 1;
1112 write_seqlock(&rename_lock);
1113 goto again;
1114 }
1115 EXPORT_SYMBOL(have_submounts);
1116
1117 /*
1118 * Search the dentry child list for the specified parent,
1119 * and move any unused dentries to the end of the unused
1120 * list for prune_dcache(). We descend to the next level
1121 * whenever the d_subdirs list is non-empty and continue
1122 * searching.
1123 *
1124 * It returns zero iff there are no unused children,
1125 * otherwise it returns the number of children moved to
1126 * the end of the unused list. This may not be the total
1127 * number of unused children, because select_parent can
1128 * drop the lock and return early due to latency
1129 * constraints.
1130 */
1131 static int select_parent(struct dentry * parent)
1132 {
1133 struct dentry *this_parent;
1134 struct list_head *next;
1135 unsigned seq;
1136 int found = 0;
1137 int locked = 0;
1138
1139 seq = read_seqbegin(&rename_lock);
1140 again:
1141 this_parent = parent;
1142 spin_lock(&this_parent->d_lock);
1143 repeat:
1144 next = this_parent->d_subdirs.next;
1145 resume:
1146 while (next != &this_parent->d_subdirs) {
1147 struct list_head *tmp = next;
1148 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
1149 next = tmp->next;
1150
1151 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1152
1153 /*
1154 * move only zero ref count dentries to the end
1155 * of the unused list for prune_dcache
1156 */
1157 if (!dentry->d_count) {
1158 dentry_lru_move_tail(dentry);
1159 found++;
1160 } else {
1161 dentry_lru_del(dentry);
1162 }
1163
1164 /*
1165 * We can return to the caller if we have found some (this
1166 * ensures forward progress). We'll be coming back to find
1167 * the rest.
1168 */
1169 if (found && need_resched()) {
1170 spin_unlock(&dentry->d_lock);
1171 goto out;
1172 }
1173
1174 /*
1175 * Descend a level if the d_subdirs list is non-empty.
1176 */
1177 if (!list_empty(&dentry->d_subdirs)) {
1178 spin_unlock(&this_parent->d_lock);
1179 spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
1180 this_parent = dentry;
1181 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
1182 goto repeat;
1183 }
1184
1185 spin_unlock(&dentry->d_lock);
1186 }
1187 /*
1188 * All done at this level ... ascend and resume the search.
1189 */
1190 if (this_parent != parent) {
1191 struct dentry *child = this_parent;
1192 this_parent = try_to_ascend(this_parent, locked, seq);
1193 if (!this_parent)
1194 goto rename_retry;
1195 next = child->d_u.d_child.next;
1196 goto resume;
1197 }
1198 out:
1199 spin_unlock(&this_parent->d_lock);
1200 if (!locked && read_seqretry(&rename_lock, seq))
1201 goto rename_retry;
1202 if (locked)
1203 write_sequnlock(&rename_lock);
1204 return found;
1205
1206 rename_retry:
1207 if (found)
1208 return found;
1209 locked = 1;
1210 write_seqlock(&rename_lock);
1211 goto again;
1212 }
1213
1214 /**
1215 * shrink_dcache_parent - prune dcache
1216 * @parent: parent of entries to prune
1217 *
1218 * Prune the dcache to remove unused children of the parent dentry.
1219 */
1220
1221 void shrink_dcache_parent(struct dentry * parent)
1222 {
1223 struct super_block *sb = parent->d_sb;
1224 int found;
1225
1226 while ((found = select_parent(parent)) != 0)
1227 __shrink_dcache_sb(sb, &found, 0);
1228 }
1229 EXPORT_SYMBOL(shrink_dcache_parent);
1230
1231 /*
1232 * Scan `nr' dentries and return the number which remain.
1233 *
1234 * We need to avoid reentering the filesystem if the caller is performing a
1235 * GFP_NOFS allocation attempt. One example deadlock is:
1236 *
1237 * ext2_new_block->getblk->GFP->shrink_dcache_memory->prune_dcache->
1238 * prune_one_dentry->dput->dentry_iput->iput->inode->i_sb->s_op->put_inode->
1239 * ext2_discard_prealloc->ext2_free_blocks->lock_super->DEADLOCK.
1240 *
1241 * In this case we return -1 to tell the caller that we baled.
1242 */
1243 static int shrink_dcache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask)
1244 {
1245 if (nr) {
1246 if (!(gfp_mask & __GFP_FS))
1247 return -1;
1248 prune_dcache(nr);
1249 }
1250
1251 return (dentry_stat.nr_unused / 100) * sysctl_vfs_cache_pressure;
1252 }
1253
1254 static struct shrinker dcache_shrinker = {
1255 .shrink = shrink_dcache_memory,
1256 .seeks = DEFAULT_SEEKS,
1257 };
1258
1259 /**
1260 * d_alloc - allocate a dcache entry
1261 * @parent: parent of entry to allocate
1262 * @name: qstr of the name
1263 *
1264 * Allocates a dentry. It returns %NULL if there is insufficient memory
1265 * available. On a success the dentry is returned. The name passed in is
1266 * copied and the copy passed in may be reused after this call.
1267 */
1268
1269 struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
1270 {
1271 struct dentry *dentry;
1272 char *dname;
1273
1274 dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL);
1275 if (!dentry)
1276 return NULL;
1277
1278 if (name->len > DNAME_INLINE_LEN-1) {
1279 dname = kmalloc(name->len + 1, GFP_KERNEL);
1280 if (!dname) {
1281 kmem_cache_free(dentry_cache, dentry);
1282 return NULL;
1283 }
1284 } else {
1285 dname = dentry->d_iname;
1286 }
1287 dentry->d_name.name = dname;
1288
1289 dentry->d_name.len = name->len;
1290 dentry->d_name.hash = name->hash;
1291 memcpy(dname, name->name, name->len);
1292 dname[name->len] = 0;
1293
1294 dentry->d_count = 1;
1295 dentry->d_flags = 0;
1296 spin_lock_init(&dentry->d_lock);
1297 seqcount_init(&dentry->d_seq);
1298 dentry->d_inode = NULL;
1299 dentry->d_parent = NULL;
1300 dentry->d_sb = NULL;
1301 dentry->d_op = NULL;
1302 dentry->d_fsdata = NULL;
1303 INIT_HLIST_BL_NODE(&dentry->d_hash);
1304 INIT_LIST_HEAD(&dentry->d_lru);
1305 INIT_LIST_HEAD(&dentry->d_subdirs);
1306 INIT_LIST_HEAD(&dentry->d_alias);
1307 INIT_LIST_HEAD(&dentry->d_u.d_child);
1308
1309 if (parent) {
1310 spin_lock(&parent->d_lock);
1311 /*
1312 * don't need child lock because it is not subject
1313 * to concurrency here
1314 */
1315 __dget_dlock(parent);
1316 dentry->d_parent = parent;
1317 dentry->d_sb = parent->d_sb;
1318 d_set_d_op(dentry, dentry->d_sb->s_d_op);
1319 list_add(&dentry->d_u.d_child, &parent->d_subdirs);
1320 spin_unlock(&parent->d_lock);
1321 }
1322
1323 this_cpu_inc(nr_dentry);
1324
1325 return dentry;
1326 }
1327 EXPORT_SYMBOL(d_alloc);
1328
1329 struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name)
1330 {
1331 struct dentry *dentry = d_alloc(NULL, name);
1332 if (dentry) {
1333 dentry->d_sb = sb;
1334 d_set_d_op(dentry, dentry->d_sb->s_d_op);
1335 dentry->d_parent = dentry;
1336 dentry->d_flags |= DCACHE_DISCONNECTED;
1337 }
1338 return dentry;
1339 }
1340 EXPORT_SYMBOL(d_alloc_pseudo);
1341
1342 struct dentry *d_alloc_name(struct dentry *parent, const char *name)
1343 {
1344 struct qstr q;
1345
1346 q.name = name;
1347 q.len = strlen(name);
1348 q.hash = full_name_hash(q.name, q.len);
1349 return d_alloc(parent, &q);
1350 }
1351 EXPORT_SYMBOL(d_alloc_name);
1352
1353 void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
1354 {
1355 WARN_ON_ONCE(dentry->d_op);
1356 WARN_ON_ONCE(dentry->d_flags & (DCACHE_OP_HASH |
1357 DCACHE_OP_COMPARE |
1358 DCACHE_OP_REVALIDATE |
1359 DCACHE_OP_DELETE ));
1360 dentry->d_op = op;
1361 if (!op)
1362 return;
1363 if (op->d_hash)
1364 dentry->d_flags |= DCACHE_OP_HASH;
1365 if (op->d_compare)
1366 dentry->d_flags |= DCACHE_OP_COMPARE;
1367 if (op->d_revalidate)
1368 dentry->d_flags |= DCACHE_OP_REVALIDATE;
1369 if (op->d_delete)
1370 dentry->d_flags |= DCACHE_OP_DELETE;
1371
1372 }
1373 EXPORT_SYMBOL(d_set_d_op);
1374
1375 static void __d_instantiate(struct dentry *dentry, struct inode *inode)
1376 {
1377 spin_lock(&dentry->d_lock);
1378 if (inode) {
1379 if (unlikely(IS_AUTOMOUNT(inode)))
1380 dentry->d_flags |= DCACHE_NEED_AUTOMOUNT;
1381 list_add(&dentry->d_alias, &inode->i_dentry);
1382 }
1383 dentry->d_inode = inode;
1384 dentry_rcuwalk_barrier(dentry);
1385 spin_unlock(&dentry->d_lock);
1386 fsnotify_d_instantiate(dentry, inode);
1387 }
1388
1389 /**
1390 * d_instantiate - fill in inode information for a dentry
1391 * @entry: dentry to complete
1392 * @inode: inode to attach to this dentry
1393 *
1394 * Fill in inode information in the entry.
1395 *
1396 * This turns negative dentries into productive full members
1397 * of society.
1398 *
1399 * NOTE! This assumes that the inode count has been incremented
1400 * (or otherwise set) by the caller to indicate that it is now
1401 * in use by the dcache.
1402 */
1403
1404 void d_instantiate(struct dentry *entry, struct inode * inode)
1405 {
1406 BUG_ON(!list_empty(&entry->d_alias));
1407 if (inode)
1408 spin_lock(&inode->i_lock);
1409 __d_instantiate(entry, inode);
1410 if (inode)
1411 spin_unlock(&inode->i_lock);
1412 security_d_instantiate(entry, inode);
1413 }
1414 EXPORT_SYMBOL(d_instantiate);
1415
1416 /**
1417 * d_instantiate_unique - instantiate a non-aliased dentry
1418 * @entry: dentry to instantiate
1419 * @inode: inode to attach to this dentry
1420 *
1421 * Fill in inode information in the entry. On success, it returns NULL.
1422 * If an unhashed alias of "entry" already exists, then we return the
1423 * aliased dentry instead and drop one reference to inode.
1424 *
1425 * Note that in order to avoid conflicts with rename() etc, the caller
1426 * had better be holding the parent directory semaphore.
1427 *
1428 * This also assumes that the inode count has been incremented
1429 * (or otherwise set) by the caller to indicate that it is now
1430 * in use by the dcache.
1431 */
1432 static struct dentry *__d_instantiate_unique(struct dentry *entry,
1433 struct inode *inode)
1434 {
1435 struct dentry *alias;
1436 int len = entry->d_name.len;
1437 const char *name = entry->d_name.name;
1438 unsigned int hash = entry->d_name.hash;
1439
1440 if (!inode) {
1441 __d_instantiate(entry, NULL);
1442 return NULL;
1443 }
1444
1445 list_for_each_entry(alias, &inode->i_dentry, d_alias) {
1446 struct qstr *qstr = &alias->d_name;
1447
1448 /*
1449 * Don't need alias->d_lock here, because aliases with
1450 * d_parent == entry->d_parent are not subject to name or
1451 * parent changes, because the parent inode i_mutex is held.
1452 */
1453 if (qstr->hash != hash)
1454 continue;
1455 if (alias->d_parent != entry->d_parent)
1456 continue;
1457 if (dentry_cmp(qstr->name, qstr->len, name, len))
1458 continue;
1459 __dget(alias);
1460 return alias;
1461 }
1462
1463 __d_instantiate(entry, inode);
1464 return NULL;
1465 }
1466
1467 struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode)
1468 {
1469 struct dentry *result;
1470
1471 BUG_ON(!list_empty(&entry->d_alias));
1472
1473 if (inode)
1474 spin_lock(&inode->i_lock);
1475 result = __d_instantiate_unique(entry, inode);
1476 if (inode)
1477 spin_unlock(&inode->i_lock);
1478
1479 if (!result) {
1480 security_d_instantiate(entry, inode);
1481 return NULL;
1482 }
1483
1484 BUG_ON(!d_unhashed(result));
1485 iput(inode);
1486 return result;
1487 }
1488
1489 EXPORT_SYMBOL(d_instantiate_unique);
1490
1491 /**
1492 * d_alloc_root - allocate root dentry
1493 * @root_inode: inode to allocate the root for
1494 *
1495 * Allocate a root ("/") dentry for the inode given. The inode is
1496 * instantiated and returned. %NULL is returned if there is insufficient
1497 * memory or the inode passed is %NULL.
1498 */
1499
1500 struct dentry * d_alloc_root(struct inode * root_inode)
1501 {
1502 struct dentry *res = NULL;
1503
1504 if (root_inode) {
1505 static const struct qstr name = { .name = "/", .len = 1 };
1506
1507 res = d_alloc(NULL, &name);
1508 if (res) {
1509 res->d_sb = root_inode->i_sb;
1510 d_set_d_op(res, res->d_sb->s_d_op);
1511 res->d_parent = res;
1512 d_instantiate(res, root_inode);
1513 }
1514 }
1515 return res;
1516 }
1517 EXPORT_SYMBOL(d_alloc_root);
1518
1519 static struct dentry * __d_find_any_alias(struct inode *inode)
1520 {
1521 struct dentry *alias;
1522
1523 if (list_empty(&inode->i_dentry))
1524 return NULL;
1525 alias = list_first_entry(&inode->i_dentry, struct dentry, d_alias);
1526 __dget(alias);
1527 return alias;
1528 }
1529
1530 static struct dentry * d_find_any_alias(struct inode *inode)
1531 {
1532 struct dentry *de;
1533
1534 spin_lock(&inode->i_lock);
1535 de = __d_find_any_alias(inode);
1536 spin_unlock(&inode->i_lock);
1537 return de;
1538 }
1539
1540
1541 /**
1542 * d_obtain_alias - find or allocate a dentry for a given inode
1543 * @inode: inode to allocate the dentry for
1544 *
1545 * Obtain a dentry for an inode resulting from NFS filehandle conversion or
1546 * similar open by handle operations. The returned dentry may be anonymous,
1547 * or may have a full name (if the inode was already in the cache).
1548 *
1549 * When called on a directory inode, we must ensure that the inode only ever
1550 * has one dentry. If a dentry is found, that is returned instead of
1551 * allocating a new one.
1552 *
1553 * On successful return, the reference to the inode has been transferred
1554 * to the dentry. In case of an error the reference on the inode is released.
1555 * To make it easier to use in export operations a %NULL or IS_ERR inode may
1556 * be passed in and will be the error will be propagate to the return value,
1557 * with a %NULL @inode replaced by ERR_PTR(-ESTALE).
1558 */
1559 struct dentry *d_obtain_alias(struct inode *inode)
1560 {
1561 static const struct qstr anonstring = { .name = "" };
1562 struct dentry *tmp;
1563 struct dentry *res;
1564
1565 if (!inode)
1566 return ERR_PTR(-ESTALE);
1567 if (IS_ERR(inode))
1568 return ERR_CAST(inode);
1569
1570 res = d_find_any_alias(inode);
1571 if (res)
1572 goto out_iput;
1573
1574 tmp = d_alloc(NULL, &anonstring);
1575 if (!tmp) {
1576 res = ERR_PTR(-ENOMEM);
1577 goto out_iput;
1578 }
1579 tmp->d_parent = tmp; /* make sure dput doesn't croak */
1580
1581
1582 spin_lock(&inode->i_lock);
1583 res = __d_find_any_alias(inode);
1584 if (res) {
1585 spin_unlock(&inode->i_lock);
1586 dput(tmp);
1587 goto out_iput;
1588 }
1589
1590 /* attach a disconnected dentry */
1591 spin_lock(&tmp->d_lock);
1592 tmp->d_sb = inode->i_sb;
1593 d_set_d_op(tmp, tmp->d_sb->s_d_op);
1594 tmp->d_inode = inode;
1595 tmp->d_flags |= DCACHE_DISCONNECTED;
1596 list_add(&tmp->d_alias, &inode->i_dentry);
1597 spin_lock_bucket(&tmp->d_sb->s_anon);
1598 hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon);
1599 spin_unlock_bucket(&tmp->d_sb->s_anon);
1600 spin_unlock(&tmp->d_lock);
1601 spin_unlock(&inode->i_lock);
1602 security_d_instantiate(tmp, inode);
1603
1604 return tmp;
1605
1606 out_iput:
1607 if (res && !IS_ERR(res))
1608 security_d_instantiate(res, inode);
1609 iput(inode);
1610 return res;
1611 }
1612 EXPORT_SYMBOL(d_obtain_alias);
1613
1614 /**
1615 * d_splice_alias - splice a disconnected dentry into the tree if one exists
1616 * @inode: the inode which may have a disconnected dentry
1617 * @dentry: a negative dentry which we want to point to the inode.
1618 *
1619 * If inode is a directory and has a 'disconnected' dentry (i.e. IS_ROOT and
1620 * DCACHE_DISCONNECTED), then d_move that in place of the given dentry
1621 * and return it, else simply d_add the inode to the dentry and return NULL.
1622 *
1623 * This is needed in the lookup routine of any filesystem that is exportable
1624 * (via knfsd) so that we can build dcache paths to directories effectively.
1625 *
1626 * If a dentry was found and moved, then it is returned. Otherwise NULL
1627 * is returned. This matches the expected return value of ->lookup.
1628 *
1629 */
1630 struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
1631 {
1632 struct dentry *new = NULL;
1633
1634 if (inode && S_ISDIR(inode->i_mode)) {
1635 spin_lock(&inode->i_lock);
1636 new = __d_find_alias(inode, 1);
1637 if (new) {
1638 BUG_ON(!(new->d_flags & DCACHE_DISCONNECTED));
1639 spin_unlock(&inode->i_lock);
1640 security_d_instantiate(new, inode);
1641 d_move(new, dentry);
1642 iput(inode);
1643 } else {
1644 /* already taking inode->i_lock, so d_add() by hand */
1645 __d_instantiate(dentry, inode);
1646 spin_unlock(&inode->i_lock);
1647 security_d_instantiate(dentry, inode);
1648 d_rehash(dentry);
1649 }
1650 } else
1651 d_add(dentry, inode);
1652 return new;
1653 }
1654 EXPORT_SYMBOL(d_splice_alias);
1655
1656 /**
1657 * d_add_ci - lookup or allocate new dentry with case-exact name
1658 * @inode: the inode case-insensitive lookup has found
1659 * @dentry: the negative dentry that was passed to the parent's lookup func
1660 * @name: the case-exact name to be associated with the returned dentry
1661 *
1662 * This is to avoid filling the dcache with case-insensitive names to the
1663 * same inode, only the actual correct case is stored in the dcache for
1664 * case-insensitive filesystems.
1665 *
1666 * For a case-insensitive lookup match and if the the case-exact dentry
1667 * already exists in in the dcache, use it and return it.
1668 *
1669 * If no entry exists with the exact case name, allocate new dentry with
1670 * the exact case, and return the spliced entry.
1671 */
1672 struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
1673 struct qstr *name)
1674 {
1675 int error;
1676 struct dentry *found;
1677 struct dentry *new;
1678
1679 /*
1680 * First check if a dentry matching the name already exists,
1681 * if not go ahead and create it now.
1682 */
1683 found = d_hash_and_lookup(dentry->d_parent, name);
1684 if (!found) {
1685 new = d_alloc(dentry->d_parent, name);
1686 if (!new) {
1687 error = -ENOMEM;
1688 goto err_out;
1689 }
1690
1691 found = d_splice_alias(inode, new);
1692 if (found) {
1693 dput(new);
1694 return found;
1695 }
1696 return new;
1697 }
1698
1699 /*
1700 * If a matching dentry exists, and it's not negative use it.
1701 *
1702 * Decrement the reference count to balance the iget() done
1703 * earlier on.
1704 */
1705 if (found->d_inode) {
1706 if (unlikely(found->d_inode != inode)) {
1707 /* This can't happen because bad inodes are unhashed. */
1708 BUG_ON(!is_bad_inode(inode));
1709 BUG_ON(!is_bad_inode(found->d_inode));
1710 }
1711 iput(inode);
1712 return found;
1713 }
1714
1715 /*
1716 * Negative dentry: instantiate it unless the inode is a directory and
1717 * already has a dentry.
1718 */
1719 spin_lock(&inode->i_lock);
1720 if (!S_ISDIR(inode->i_mode) || list_empty(&inode->i_dentry)) {
1721 __d_instantiate(found, inode);
1722 spin_unlock(&inode->i_lock);
1723 security_d_instantiate(found, inode);
1724 return found;
1725 }
1726
1727 /*
1728 * In case a directory already has a (disconnected) entry grab a
1729 * reference to it, move it in place and use it.
1730 */
1731 new = list_entry(inode->i_dentry.next, struct dentry, d_alias);
1732 __dget(new);
1733 spin_unlock(&inode->i_lock);
1734 security_d_instantiate(found, inode);
1735 d_move(new, found);
1736 iput(inode);
1737 dput(found);
1738 return new;
1739
1740 err_out:
1741 iput(inode);
1742 return ERR_PTR(error);
1743 }
1744 EXPORT_SYMBOL(d_add_ci);
1745
1746 /**
1747 * __d_lookup_rcu - search for a dentry (racy, store-free)
1748 * @parent: parent dentry
1749 * @name: qstr of name we wish to find
1750 * @seq: returns d_seq value at the point where the dentry was found
1751 * @inode: returns dentry->d_inode when the inode was found valid.
1752 * Returns: dentry, or NULL
1753 *
1754 * __d_lookup_rcu is the dcache lookup function for rcu-walk name
1755 * resolution (store-free path walking) design described in
1756 * Documentation/filesystems/path-lookup.txt.
1757 *
1758 * This is not to be used outside core vfs.
1759 *
1760 * __d_lookup_rcu must only be used in rcu-walk mode, ie. with vfsmount lock
1761 * held, and rcu_read_lock held. The returned dentry must not be stored into
1762 * without taking d_lock and checking d_seq sequence count against @seq
1763 * returned here.
1764 *
1765 * A refcount may be taken on the found dentry with the __d_rcu_to_refcount
1766 * function.
1767 *
1768 * Alternatively, __d_lookup_rcu may be called again to look up the child of
1769 * the returned dentry, so long as its parent's seqlock is checked after the
1770 * child is looked up. Thus, an interlocking stepping of sequence lock checks
1771 * is formed, giving integrity down the path walk.
1772 */
1773 struct dentry *__d_lookup_rcu(struct dentry *parent, struct qstr *name,
1774 unsigned *seq, struct inode **inode)
1775 {
1776 unsigned int len = name->len;
1777 unsigned int hash = name->hash;
1778 const unsigned char *str = name->name;
1779 struct hlist_bl_head *b = d_hash(parent, hash);
1780 struct hlist_bl_node *node;
1781 struct dentry *dentry;
1782
1783 /*
1784 * Note: There is significant duplication with __d_lookup_rcu which is
1785 * required to prevent single threaded performance regressions
1786 * especially on architectures where smp_rmb (in seqcounts) are costly.
1787 * Keep the two functions in sync.
1788 */
1789
1790 /*
1791 * The hash list is protected using RCU.
1792 *
1793 * Carefully use d_seq when comparing a candidate dentry, to avoid
1794 * races with d_move().
1795 *
1796 * It is possible that concurrent renames can mess up our list
1797 * walk here and result in missing our dentry, resulting in the
1798 * false-negative result. d_lookup() protects against concurrent
1799 * renames using rename_lock seqlock.
1800 *
1801 * See Documentation/filesystems/path-lookup.txt for more details.
1802 */
1803 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
1804 struct inode *i;
1805 const char *tname;
1806 int tlen;
1807
1808 if (dentry->d_name.hash != hash)
1809 continue;
1810
1811 seqretry:
1812 *seq = read_seqcount_begin(&dentry->d_seq);
1813 if (dentry->d_parent != parent)
1814 continue;
1815 if (d_unhashed(dentry))
1816 continue;
1817 tlen = dentry->d_name.len;
1818 tname = dentry->d_name.name;
1819 i = dentry->d_inode;
1820 prefetch(tname);
1821 if (i)
1822 prefetch(i);
1823 /*
1824 * This seqcount check is required to ensure name and
1825 * len are loaded atomically, so as not to walk off the
1826 * edge of memory when walking. If we could load this
1827 * atomically some other way, we could drop this check.
1828 */
1829 if (read_seqcount_retry(&dentry->d_seq, *seq))
1830 goto seqretry;
1831 if (parent->d_flags & DCACHE_OP_COMPARE) {
1832 if (parent->d_op->d_compare(parent, *inode,
1833 dentry, i,
1834 tlen, tname, name))
1835 continue;
1836 } else {
1837 if (dentry_cmp(tname, tlen, str, len))
1838 continue;
1839 }
1840 /*
1841 * No extra seqcount check is required after the name
1842 * compare. The caller must perform a seqcount check in
1843 * order to do anything useful with the returned dentry
1844 * anyway.
1845 */
1846 *inode = i;
1847 return dentry;
1848 }
1849 return NULL;
1850 }
1851
1852 /**
1853 * d_lookup - search for a dentry
1854 * @parent: parent dentry
1855 * @name: qstr of name we wish to find
1856 * Returns: dentry, or NULL
1857 *
1858 * d_lookup searches the children of the parent dentry for the name in
1859 * question. If the dentry is found its reference count is incremented and the
1860 * dentry is returned. The caller must use dput to free the entry when it has
1861 * finished using it. %NULL is returned if the dentry does not exist.
1862 */
1863 struct dentry *d_lookup(struct dentry *parent, struct qstr *name)
1864 {
1865 struct dentry *dentry;
1866 unsigned seq;
1867
1868 do {
1869 seq = read_seqbegin(&rename_lock);
1870 dentry = __d_lookup(parent, name);
1871 if (dentry)
1872 break;
1873 } while (read_seqretry(&rename_lock, seq));
1874 return dentry;
1875 }
1876 EXPORT_SYMBOL(d_lookup);
1877
1878 /**
1879 * __d_lookup - search for a dentry (racy)
1880 * @parent: parent dentry
1881 * @name: qstr of name we wish to find
1882 * Returns: dentry, or NULL
1883 *
1884 * __d_lookup is like d_lookup, however it may (rarely) return a
1885 * false-negative result due to unrelated rename activity.
1886 *
1887 * __d_lookup is slightly faster by avoiding rename_lock read seqlock,
1888 * however it must be used carefully, eg. with a following d_lookup in
1889 * the case of failure.
1890 *
1891 * __d_lookup callers must be commented.
1892 */
1893 struct dentry *__d_lookup(struct dentry *parent, struct qstr *name)
1894 {
1895 unsigned int len = name->len;
1896 unsigned int hash = name->hash;
1897 const unsigned char *str = name->name;
1898 struct hlist_bl_head *b = d_hash(parent, hash);
1899 struct hlist_bl_node *node;
1900 struct dentry *found = NULL;
1901 struct dentry *dentry;
1902
1903 /*
1904 * Note: There is significant duplication with __d_lookup_rcu which is
1905 * required to prevent single threaded performance regressions
1906 * especially on architectures where smp_rmb (in seqcounts) are costly.
1907 * Keep the two functions in sync.
1908 */
1909
1910 /*
1911 * The hash list is protected using RCU.
1912 *
1913 * Take d_lock when comparing a candidate dentry, to avoid races
1914 * with d_move().
1915 *
1916 * It is possible that concurrent renames can mess up our list
1917 * walk here and result in missing our dentry, resulting in the
1918 * false-negative result. d_lookup() protects against concurrent
1919 * renames using rename_lock seqlock.
1920 *
1921 * See Documentation/filesystems/path-lookup.txt for more details.
1922 */
1923 rcu_read_lock();
1924
1925 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
1926 const char *tname;
1927 int tlen;
1928
1929 if (dentry->d_name.hash != hash)
1930 continue;
1931
1932 spin_lock(&dentry->d_lock);
1933 if (dentry->d_parent != parent)
1934 goto next;
1935 if (d_unhashed(dentry))
1936 goto next;
1937
1938 /*
1939 * It is safe to compare names since d_move() cannot
1940 * change the qstr (protected by d_lock).
1941 */
1942 tlen = dentry->d_name.len;
1943 tname = dentry->d_name.name;
1944 if (parent->d_flags & DCACHE_OP_COMPARE) {
1945 if (parent->d_op->d_compare(parent, parent->d_inode,
1946 dentry, dentry->d_inode,
1947 tlen, tname, name))
1948 goto next;
1949 } else {
1950 if (dentry_cmp(tname, tlen, str, len))
1951 goto next;
1952 }
1953
1954 dentry->d_count++;
1955 found = dentry;
1956 spin_unlock(&dentry->d_lock);
1957 break;
1958 next:
1959 spin_unlock(&dentry->d_lock);
1960 }
1961 rcu_read_unlock();
1962
1963 return found;
1964 }
1965
1966 /**
1967 * d_hash_and_lookup - hash the qstr then search for a dentry
1968 * @dir: Directory to search in
1969 * @name: qstr of name we wish to find
1970 *
1971 * On hash failure or on lookup failure NULL is returned.
1972 */
1973 struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name)
1974 {
1975 struct dentry *dentry = NULL;
1976
1977 /*
1978 * Check for a fs-specific hash function. Note that we must
1979 * calculate the standard hash first, as the d_op->d_hash()
1980 * routine may choose to leave the hash value unchanged.
1981 */
1982 name->hash = full_name_hash(name->name, name->len);
1983 if (dir->d_flags & DCACHE_OP_HASH) {
1984 if (dir->d_op->d_hash(dir, dir->d_inode, name) < 0)
1985 goto out;
1986 }
1987 dentry = d_lookup(dir, name);
1988 out:
1989 return dentry;
1990 }
1991
1992 /**
1993 * d_validate - verify dentry provided from insecure source (deprecated)
1994 * @dentry: The dentry alleged to be valid child of @dparent
1995 * @dparent: The parent dentry (known to be valid)
1996 *
1997 * An insecure source has sent us a dentry, here we verify it and dget() it.
1998 * This is used by ncpfs in its readdir implementation.
1999 * Zero is returned in the dentry is invalid.
2000 *
2001 * This function is slow for big directories, and deprecated, do not use it.
2002 */
2003 int d_validate(struct dentry *dentry, struct dentry *dparent)
2004 {
2005 struct dentry *child;
2006
2007 spin_lock(&dparent->d_lock);
2008 list_for_each_entry(child, &dparent->d_subdirs, d_u.d_child) {
2009 if (dentry == child) {
2010 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
2011 __dget_dlock(dentry);
2012 spin_unlock(&dentry->d_lock);
2013 spin_unlock(&dparent->d_lock);
2014 return 1;
2015 }
2016 }
2017 spin_unlock(&dparent->d_lock);
2018
2019 return 0;
2020 }
2021 EXPORT_SYMBOL(d_validate);
2022
2023 /*
2024 * When a file is deleted, we have two options:
2025 * - turn this dentry into a negative dentry
2026 * - unhash this dentry and free it.
2027 *
2028 * Usually, we want to just turn this into
2029 * a negative dentry, but if anybody else is
2030 * currently using the dentry or the inode
2031 * we can't do that and we fall back on removing
2032 * it from the hash queues and waiting for
2033 * it to be deleted later when it has no users
2034 */
2035
2036 /**
2037 * d_delete - delete a dentry
2038 * @dentry: The dentry to delete
2039 *
2040 * Turn the dentry into a negative dentry if possible, otherwise
2041 * remove it from the hash queues so it can be deleted later
2042 */
2043
2044 void d_delete(struct dentry * dentry)
2045 {
2046 struct inode *inode;
2047 int isdir = 0;
2048 /*
2049 * Are we the only user?
2050 */
2051 again:
2052 spin_lock(&dentry->d_lock);
2053 inode = dentry->d_inode;
2054 isdir = S_ISDIR(inode->i_mode);
2055 if (dentry->d_count == 1) {
2056 if (inode && !spin_trylock(&inode->i_lock)) {
2057 spin_unlock(&dentry->d_lock);
2058 cpu_relax();
2059 goto again;
2060 }
2061 dentry->d_flags &= ~DCACHE_CANT_MOUNT;
2062 dentry_unlink_inode(dentry);
2063 fsnotify_nameremove(dentry, isdir);
2064 return;
2065 }
2066
2067 if (!d_unhashed(dentry))
2068 __d_drop(dentry);
2069
2070 spin_unlock(&dentry->d_lock);
2071
2072 fsnotify_nameremove(dentry, isdir);
2073 }
2074 EXPORT_SYMBOL(d_delete);
2075
2076 static void __d_rehash(struct dentry * entry, struct hlist_bl_head *b)
2077 {
2078 BUG_ON(!d_unhashed(entry));
2079 spin_lock_bucket(b);
2080 entry->d_flags |= DCACHE_RCUACCESS;
2081 hlist_bl_add_head_rcu(&entry->d_hash, b);
2082 spin_unlock_bucket(b);
2083 }
2084
2085 static void _d_rehash(struct dentry * entry)
2086 {
2087 __d_rehash(entry, d_hash(entry->d_parent, entry->d_name.hash));
2088 }
2089
2090 /**
2091 * d_rehash - add an entry back to the hash
2092 * @entry: dentry to add to the hash
2093 *
2094 * Adds a dentry to the hash according to its name.
2095 */
2096
2097 void d_rehash(struct dentry * entry)
2098 {
2099 spin_lock(&entry->d_lock);
2100 _d_rehash(entry);
2101 spin_unlock(&entry->d_lock);
2102 }
2103 EXPORT_SYMBOL(d_rehash);
2104
2105 /**
2106 * dentry_update_name_case - update case insensitive dentry with a new name
2107 * @dentry: dentry to be updated
2108 * @name: new name
2109 *
2110 * Update a case insensitive dentry with new case of name.
2111 *
2112 * dentry must have been returned by d_lookup with name @name. Old and new
2113 * name lengths must match (ie. no d_compare which allows mismatched name
2114 * lengths).
2115 *
2116 * Parent inode i_mutex must be held over d_lookup and into this call (to
2117 * keep renames and concurrent inserts, and readdir(2) away).
2118 */
2119 void dentry_update_name_case(struct dentry *dentry, struct qstr *name)
2120 {
2121 BUG_ON(!mutex_is_locked(&dentry->d_parent->d_inode->i_mutex));
2122 BUG_ON(dentry->d_name.len != name->len); /* d_lookup gives this */
2123
2124 spin_lock(&dentry->d_lock);
2125 write_seqcount_begin(&dentry->d_seq);
2126 memcpy((unsigned char *)dentry->d_name.name, name->name, name->len);
2127 write_seqcount_end(&dentry->d_seq);
2128 spin_unlock(&dentry->d_lock);
2129 }
2130 EXPORT_SYMBOL(dentry_update_name_case);
2131
2132 static void switch_names(struct dentry *dentry, struct dentry *target)
2133 {
2134 if (dname_external(target)) {
2135 if (dname_external(dentry)) {
2136 /*
2137 * Both external: swap the pointers
2138 */
2139 swap(target->d_name.name, dentry->d_name.name);
2140 } else {
2141 /*
2142 * dentry:internal, target:external. Steal target's
2143 * storage and make target internal.
2144 */
2145 memcpy(target->d_iname, dentry->d_name.name,
2146 dentry->d_name.len + 1);
2147 dentry->d_name.name = target->d_name.name;
2148 target->d_name.name = target->d_iname;
2149 }
2150 } else {
2151 if (dname_external(dentry)) {
2152 /*
2153 * dentry:external, target:internal. Give dentry's
2154 * storage to target and make dentry internal
2155 */
2156 memcpy(dentry->d_iname, target->d_name.name,
2157 target->d_name.len + 1);
2158 target->d_name.name = dentry->d_name.name;
2159 dentry->d_name.name = dentry->d_iname;
2160 } else {
2161 /*
2162 * Both are internal. Just copy target to dentry
2163 */
2164 memcpy(dentry->d_iname, target->d_name.name,
2165 target->d_name.len + 1);
2166 dentry->d_name.len = target->d_name.len;
2167 return;
2168 }
2169 }
2170 swap(dentry->d_name.len, target->d_name.len);
2171 }
2172
2173 static void dentry_lock_for_move(struct dentry *dentry, struct dentry *target)
2174 {
2175 /*
2176 * XXXX: do we really need to take target->d_lock?
2177 */
2178 if (IS_ROOT(dentry) || dentry->d_parent == target->d_parent)
2179 spin_lock(&target->d_parent->d_lock);
2180 else {
2181 if (d_ancestor(dentry->d_parent, target->d_parent)) {
2182 spin_lock(&dentry->d_parent->d_lock);
2183 spin_lock_nested(&target->d_parent->d_lock,
2184 DENTRY_D_LOCK_NESTED);
2185 } else {
2186 spin_lock(&target->d_parent->d_lock);
2187 spin_lock_nested(&dentry->d_parent->d_lock,
2188 DENTRY_D_LOCK_NESTED);
2189 }
2190 }
2191 if (target < dentry) {
2192 spin_lock_nested(&target->d_lock, 2);
2193 spin_lock_nested(&dentry->d_lock, 3);
2194 } else {
2195 spin_lock_nested(&dentry->d_lock, 2);
2196 spin_lock_nested(&target->d_lock, 3);
2197 }
2198 }
2199
2200 static void dentry_unlock_parents_for_move(struct dentry *dentry,
2201 struct dentry *target)
2202 {
2203 if (target->d_parent != dentry->d_parent)
2204 spin_unlock(&dentry->d_parent->d_lock);
2205 if (target->d_parent != target)
2206 spin_unlock(&target->d_parent->d_lock);
2207 }
2208
2209 /*
2210 * When switching names, the actual string doesn't strictly have to
2211 * be preserved in the target - because we're dropping the target
2212 * anyway. As such, we can just do a simple memcpy() to copy over
2213 * the new name before we switch.
2214 *
2215 * Note that we have to be a lot more careful about getting the hash
2216 * switched - we have to switch the hash value properly even if it
2217 * then no longer matches the actual (corrupted) string of the target.
2218 * The hash value has to match the hash queue that the dentry is on..
2219 */
2220 /*
2221 * d_move - move a dentry
2222 * @dentry: entry to move
2223 * @target: new dentry
2224 *
2225 * Update the dcache to reflect the move of a file name. Negative
2226 * dcache entries should not be moved in this way.
2227 */
2228 void d_move(struct dentry * dentry, struct dentry * target)
2229 {
2230 if (!dentry->d_inode)
2231 printk(KERN_WARNING "VFS: moving negative dcache entry\n");
2232
2233 BUG_ON(d_ancestor(dentry, target));
2234 BUG_ON(d_ancestor(target, dentry));
2235
2236 write_seqlock(&rename_lock);
2237
2238 dentry_lock_for_move(dentry, target);
2239
2240 write_seqcount_begin(&dentry->d_seq);
2241 write_seqcount_begin(&target->d_seq);
2242
2243 /* __d_drop does write_seqcount_barrier, but they're OK to nest. */
2244
2245 /*
2246 * Move the dentry to the target hash queue. Don't bother checking
2247 * for the same hash queue because of how unlikely it is.
2248 */
2249 __d_drop(dentry);
2250 __d_rehash(dentry, d_hash(target->d_parent, target->d_name.hash));
2251
2252 /* Unhash the target: dput() will then get rid of it */
2253 __d_drop(target);
2254
2255 list_del(&dentry->d_u.d_child);
2256 list_del(&target->d_u.d_child);
2257
2258 /* Switch the names.. */
2259 switch_names(dentry, target);
2260 swap(dentry->d_name.hash, target->d_name.hash);
2261
2262 /* ... and switch the parents */
2263 if (IS_ROOT(dentry)) {
2264 dentry->d_parent = target->d_parent;
2265 target->d_parent = target;
2266 INIT_LIST_HEAD(&target->d_u.d_child);
2267 } else {
2268 swap(dentry->d_parent, target->d_parent);
2269
2270 /* And add them back to the (new) parent lists */
2271 list_add(&target->d_u.d_child, &target->d_parent->d_subdirs);
2272 }
2273
2274 list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs);
2275
2276 write_seqcount_end(&target->d_seq);
2277 write_seqcount_end(&dentry->d_seq);
2278
2279 dentry_unlock_parents_for_move(dentry, target);
2280 spin_unlock(&target->d_lock);
2281 fsnotify_d_move(dentry);
2282 spin_unlock(&dentry->d_lock);
2283 write_sequnlock(&rename_lock);
2284 }
2285 EXPORT_SYMBOL(d_move);
2286
2287 /**
2288 * d_ancestor - search for an ancestor
2289 * @p1: ancestor dentry
2290 * @p2: child dentry
2291 *
2292 * Returns the ancestor dentry of p2 which is a child of p1, if p1 is
2293 * an ancestor of p2, else NULL.
2294 */
2295 struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2)
2296 {
2297 struct dentry *p;
2298
2299 for (p = p2; !IS_ROOT(p); p = p->d_parent) {
2300 if (p->d_parent == p1)
2301 return p;
2302 }
2303 return NULL;
2304 }
2305
2306 /*
2307 * This helper attempts to cope with remotely renamed directories
2308 *
2309 * It assumes that the caller is already holding
2310 * dentry->d_parent->d_inode->i_mutex and the inode->i_lock
2311 *
2312 * Note: If ever the locking in lock_rename() changes, then please
2313 * remember to update this too...
2314 */
2315 static struct dentry *__d_unalias(struct inode *inode,
2316 struct dentry *dentry, struct dentry *alias)
2317 {
2318 struct mutex *m1 = NULL, *m2 = NULL;
2319 struct dentry *ret;
2320
2321 /* If alias and dentry share a parent, then no extra locks required */
2322 if (alias->d_parent == dentry->d_parent)
2323 goto out_unalias;
2324
2325 /* Check for loops */
2326 ret = ERR_PTR(-ELOOP);
2327 if (d_ancestor(alias, dentry))
2328 goto out_err;
2329
2330 /* See lock_rename() */
2331 ret = ERR_PTR(-EBUSY);
2332 if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex))
2333 goto out_err;
2334 m1 = &dentry->d_sb->s_vfs_rename_mutex;
2335 if (!mutex_trylock(&alias->d_parent->d_inode->i_mutex))
2336 goto out_err;
2337 m2 = &alias->d_parent->d_inode->i_mutex;
2338 out_unalias:
2339 d_move(alias, dentry);
2340 ret = alias;
2341 out_err:
2342 spin_unlock(&inode->i_lock);
2343 if (m2)
2344 mutex_unlock(m2);
2345 if (m1)
2346 mutex_unlock(m1);
2347 return ret;
2348 }
2349
2350 /*
2351 * Prepare an anonymous dentry for life in the superblock's dentry tree as a
2352 * named dentry in place of the dentry to be replaced.
2353 * returns with anon->d_lock held!
2354 */
2355 static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon)
2356 {
2357 struct dentry *dparent, *aparent;
2358
2359 dentry_lock_for_move(anon, dentry);
2360
2361 write_seqcount_begin(&dentry->d_seq);
2362 write_seqcount_begin(&anon->d_seq);
2363
2364 dparent = dentry->d_parent;
2365 aparent = anon->d_parent;
2366
2367 switch_names(dentry, anon);
2368 swap(dentry->d_name.hash, anon->d_name.hash);
2369
2370 dentry->d_parent = (aparent == anon) ? dentry : aparent;
2371 list_del(&dentry->d_u.d_child);
2372 if (!IS_ROOT(dentry))
2373 list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs);
2374 else
2375 INIT_LIST_HEAD(&dentry->d_u.d_child);
2376
2377 anon->d_parent = (dparent == dentry) ? anon : dparent;
2378 list_del(&anon->d_u.d_child);
2379 if (!IS_ROOT(anon))
2380 list_add(&anon->d_u.d_child, &anon->d_parent->d_subdirs);
2381 else
2382 INIT_LIST_HEAD(&anon->d_u.d_child);
2383
2384 write_seqcount_end(&dentry->d_seq);
2385 write_seqcount_end(&anon->d_seq);
2386
2387 dentry_unlock_parents_for_move(anon, dentry);
2388 spin_unlock(&dentry->d_lock);
2389
2390 /* anon->d_lock still locked, returns locked */
2391 anon->d_flags &= ~DCACHE_DISCONNECTED;
2392 }
2393
2394 /**
2395 * d_materialise_unique - introduce an inode into the tree
2396 * @dentry: candidate dentry
2397 * @inode: inode to bind to the dentry, to which aliases may be attached
2398 *
2399 * Introduces an dentry into the tree, substituting an extant disconnected
2400 * root directory alias in its place if there is one
2401 */
2402 struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode)
2403 {
2404 struct dentry *actual;
2405
2406 BUG_ON(!d_unhashed(dentry));
2407
2408 if (!inode) {
2409 actual = dentry;
2410 __d_instantiate(dentry, NULL);
2411 d_rehash(actual);
2412 goto out_nolock;
2413 }
2414
2415 spin_lock(&inode->i_lock);
2416
2417 if (S_ISDIR(inode->i_mode)) {
2418 struct dentry *alias;
2419
2420 /* Does an aliased dentry already exist? */
2421 alias = __d_find_alias(inode, 0);
2422 if (alias) {
2423 actual = alias;
2424 /* Is this an anonymous mountpoint that we could splice
2425 * into our tree? */
2426 if (IS_ROOT(alias)) {
2427 __d_materialise_dentry(dentry, alias);
2428 __d_drop(alias);
2429 goto found;
2430 }
2431 /* Nope, but we must(!) avoid directory aliasing */
2432 actual = __d_unalias(inode, dentry, alias);
2433 if (IS_ERR(actual))
2434 dput(alias);
2435 goto out_nolock;
2436 }
2437 }
2438
2439 /* Add a unique reference */
2440 actual = __d_instantiate_unique(dentry, inode);
2441 if (!actual)
2442 actual = dentry;
2443 else
2444 BUG_ON(!d_unhashed(actual));
2445
2446 spin_lock(&actual->d_lock);
2447 found:
2448 _d_rehash(actual);
2449 spin_unlock(&actual->d_lock);
2450 spin_unlock(&inode->i_lock);
2451 out_nolock:
2452 if (actual == dentry) {
2453 security_d_instantiate(dentry, inode);
2454 return NULL;
2455 }
2456
2457 iput(inode);
2458 return actual;
2459 }
2460 EXPORT_SYMBOL_GPL(d_materialise_unique);
2461
2462 static int prepend(char **buffer, int *buflen, const char *str, int namelen)
2463 {
2464 *buflen -= namelen;
2465 if (*buflen < 0)
2466 return -ENAMETOOLONG;
2467 *buffer -= namelen;
2468 memcpy(*buffer, str, namelen);
2469 return 0;
2470 }
2471
2472 static int prepend_name(char **buffer, int *buflen, struct qstr *name)
2473 {
2474 return prepend(buffer, buflen, name->name, name->len);
2475 }
2476
2477 /**
2478 * prepend_path - Prepend path string to a buffer
2479 * @path: the dentry/vfsmount to report
2480 * @root: root vfsmnt/dentry (may be modified by this function)
2481 * @buffer: pointer to the end of the buffer
2482 * @buflen: pointer to buffer length
2483 *
2484 * Caller holds the rename_lock.
2485 *
2486 * If path is not reachable from the supplied root, then the value of
2487 * root is changed (without modifying refcounts).
2488 */
2489 static int prepend_path(const struct path *path, struct path *root,
2490 char **buffer, int *buflen)
2491 {
2492 struct dentry *dentry = path->dentry;
2493 struct vfsmount *vfsmnt = path->mnt;
2494 bool slash = false;
2495 int error = 0;
2496
2497 br_read_lock(vfsmount_lock);
2498 while (dentry != root->dentry || vfsmnt != root->mnt) {
2499 struct dentry * parent;
2500
2501 if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
2502 /* Global root? */
2503 if (vfsmnt->mnt_parent == vfsmnt) {
2504 goto global_root;
2505 }
2506 dentry = vfsmnt->mnt_mountpoint;
2507 vfsmnt = vfsmnt->mnt_parent;
2508 continue;
2509 }
2510 parent = dentry->d_parent;
2511 prefetch(parent);
2512 spin_lock(&dentry->d_lock);
2513 error = prepend_name(buffer, buflen, &dentry->d_name);
2514 spin_unlock(&dentry->d_lock);
2515 if (!error)
2516 error = prepend(buffer, buflen, "/", 1);
2517 if (error)
2518 break;
2519
2520 slash = true;
2521 dentry = parent;
2522 }
2523
2524 out:
2525 if (!error && !slash)
2526 error = prepend(buffer, buflen, "/", 1);
2527
2528 br_read_unlock(vfsmount_lock);
2529 return error;
2530
2531 global_root:
2532 /*
2533 * Filesystems needing to implement special "root names"
2534 * should do so with ->d_dname()
2535 */
2536 if (IS_ROOT(dentry) &&
2537 (dentry->d_name.len != 1 || dentry->d_name.name[0] != '/')) {
2538 WARN(1, "Root dentry has weird name <%.*s>\n",
2539 (int) dentry->d_name.len, dentry->d_name.name);
2540 }
2541 root->mnt = vfsmnt;
2542 root->dentry = dentry;
2543 goto out;
2544 }
2545
2546 /**
2547 * __d_path - return the path of a dentry
2548 * @path: the dentry/vfsmount to report
2549 * @root: root vfsmnt/dentry (may be modified by this function)
2550 * @buf: buffer to return value in
2551 * @buflen: buffer length
2552 *
2553 * Convert a dentry into an ASCII path name.
2554 *
2555 * Returns a pointer into the buffer or an error code if the
2556 * path was too long.
2557 *
2558 * "buflen" should be positive.
2559 *
2560 * If path is not reachable from the supplied root, then the value of
2561 * root is changed (without modifying refcounts).
2562 */
2563 char *__d_path(const struct path *path, struct path *root,
2564 char *buf, int buflen)
2565 {
2566 char *res = buf + buflen;
2567 int error;
2568
2569 prepend(&res, &buflen, "\0", 1);
2570 write_seqlock(&rename_lock);
2571 error = prepend_path(path, root, &res, &buflen);
2572 write_sequnlock(&rename_lock);
2573
2574 if (error)
2575 return ERR_PTR(error);
2576 return res;
2577 }
2578
2579 /*
2580 * same as __d_path but appends "(deleted)" for unlinked files.
2581 */
2582 static int path_with_deleted(const struct path *path, struct path *root,
2583 char **buf, int *buflen)
2584 {
2585 prepend(buf, buflen, "\0", 1);
2586 if (d_unlinked(path->dentry)) {
2587 int error = prepend(buf, buflen, " (deleted)", 10);
2588 if (error)
2589 return error;
2590 }
2591
2592 return prepend_path(path, root, buf, buflen);
2593 }
2594
2595 static int prepend_unreachable(char **buffer, int *buflen)
2596 {
2597 return prepend(buffer, buflen, "(unreachable)", 13);
2598 }
2599
2600 /**
2601 * d_path - return the path of a dentry
2602 * @path: path to report
2603 * @buf: buffer to return value in
2604 * @buflen: buffer length
2605 *
2606 * Convert a dentry into an ASCII path name. If the entry has been deleted
2607 * the string " (deleted)" is appended. Note that this is ambiguous.
2608 *
2609 * Returns a pointer into the buffer or an error code if the path was
2610 * too long. Note: Callers should use the returned pointer, not the passed
2611 * in buffer, to use the name! The implementation often starts at an offset
2612 * into the buffer, and may leave 0 bytes at the start.
2613 *
2614 * "buflen" should be positive.
2615 */
2616 char *d_path(const struct path *path, char *buf, int buflen)
2617 {
2618 char *res = buf + buflen;
2619 struct path root;
2620 struct path tmp;
2621 int error;
2622
2623 /*
2624 * We have various synthetic filesystems that never get mounted. On
2625 * these filesystems dentries are never used for lookup purposes, and
2626 * thus don't need to be hashed. They also don't need a name until a
2627 * user wants to identify the object in /proc/pid/fd/. The little hack
2628 * below allows us to generate a name for these objects on demand:
2629 */
2630 if (path->dentry->d_op && path->dentry->d_op->d_dname)
2631 return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
2632
2633 get_fs_root(current->fs, &root);
2634 write_seqlock(&rename_lock);
2635 tmp = root;
2636 error = path_with_deleted(path, &tmp, &res, &buflen);
2637 if (error)
2638 res = ERR_PTR(error);
2639 write_sequnlock(&rename_lock);
2640 path_put(&root);
2641 return res;
2642 }
2643 EXPORT_SYMBOL(d_path);
2644
2645 /**
2646 * d_path_with_unreachable - return the path of a dentry
2647 * @path: path to report
2648 * @buf: buffer to return value in
2649 * @buflen: buffer length
2650 *
2651 * The difference from d_path() is that this prepends "(unreachable)"
2652 * to paths which are unreachable from the current process' root.
2653 */
2654 char *d_path_with_unreachable(const struct path *path, char *buf, int buflen)
2655 {
2656 char *res = buf + buflen;
2657 struct path root;
2658 struct path tmp;
2659 int error;
2660
2661 if (path->dentry->d_op && path->dentry->d_op->d_dname)
2662 return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
2663
2664 get_fs_root(current->fs, &root);
2665 write_seqlock(&rename_lock);
2666 tmp = root;
2667 error = path_with_deleted(path, &tmp, &res, &buflen);
2668 if (!error && !path_equal(&tmp, &root))
2669 error = prepend_unreachable(&res, &buflen);
2670 write_sequnlock(&rename_lock);
2671 path_put(&root);
2672 if (error)
2673 res = ERR_PTR(error);
2674
2675 return res;
2676 }
2677
2678 /*
2679 * Helper function for dentry_operations.d_dname() members
2680 */
2681 char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen,
2682 const char *fmt, ...)
2683 {
2684 va_list args;
2685 char temp[64];
2686 int sz;
2687
2688 va_start(args, fmt);
2689 sz = vsnprintf(temp, sizeof(temp), fmt, args) + 1;
2690 va_end(args);
2691
2692 if (sz > sizeof(temp) || sz > buflen)
2693 return ERR_PTR(-ENAMETOOLONG);
2694
2695 buffer += buflen - sz;
2696 return memcpy(buffer, temp, sz);
2697 }
2698
2699 /*
2700 * Write full pathname from the root of the filesystem into the buffer.
2701 */
2702 static char *__dentry_path(struct dentry *dentry, char *buf, int buflen)
2703 {
2704 char *end = buf + buflen;
2705 char *retval;
2706
2707 prepend(&end, &buflen, "\0", 1);
2708 if (buflen < 1)
2709 goto Elong;
2710 /* Get '/' right */
2711 retval = end-1;
2712 *retval = '/';
2713
2714 while (!IS_ROOT(dentry)) {
2715 struct dentry *parent = dentry->d_parent;
2716 int error;
2717
2718 prefetch(parent);
2719 spin_lock(&dentry->d_lock);
2720 error = prepend_name(&end, &buflen, &dentry->d_name);
2721 spin_unlock(&dentry->d_lock);
2722 if (error != 0 || prepend(&end, &buflen, "/", 1) != 0)
2723 goto Elong;
2724
2725 retval = end;
2726 dentry = parent;
2727 }
2728 return retval;
2729 Elong:
2730 return ERR_PTR(-ENAMETOOLONG);
2731 }
2732
2733 char *dentry_path_raw(struct dentry *dentry, char *buf, int buflen)
2734 {
2735 char *retval;
2736
2737 write_seqlock(&rename_lock);
2738 retval = __dentry_path(dentry, buf, buflen);
2739 write_sequnlock(&rename_lock);
2740
2741 return retval;
2742 }
2743 EXPORT_SYMBOL(dentry_path_raw);
2744
2745 char *dentry_path(struct dentry *dentry, char *buf, int buflen)
2746 {
2747 char *p = NULL;
2748 char *retval;
2749
2750 write_seqlock(&rename_lock);
2751 if (d_unlinked(dentry)) {
2752 p = buf + buflen;
2753 if (prepend(&p, &buflen, "//deleted", 10) != 0)
2754 goto Elong;
2755 buflen++;
2756 }
2757 retval = __dentry_path(dentry, buf, buflen);
2758 write_sequnlock(&rename_lock);
2759 if (!IS_ERR(retval) && p)
2760 *p = '/'; /* restore '/' overriden with '\0' */
2761 return retval;
2762 Elong:
2763 return ERR_PTR(-ENAMETOOLONG);
2764 }
2765
2766 /*
2767 * NOTE! The user-level library version returns a
2768 * character pointer. The kernel system call just
2769 * returns the length of the buffer filled (which
2770 * includes the ending '\0' character), or a negative
2771 * error value. So libc would do something like
2772 *
2773 * char *getcwd(char * buf, size_t size)
2774 * {
2775 * int retval;
2776 *
2777 * retval = sys_getcwd(buf, size);
2778 * if (retval >= 0)
2779 * return buf;
2780 * errno = -retval;
2781 * return NULL;
2782 * }
2783 */
2784 SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
2785 {
2786 int error;
2787 struct path pwd, root;
2788 char *page = (char *) __get_free_page(GFP_USER);
2789
2790 if (!page)
2791 return -ENOMEM;
2792
2793 get_fs_root_and_pwd(current->fs, &root, &pwd);
2794
2795 error = -ENOENT;
2796 write_seqlock(&rename_lock);
2797 if (!d_unlinked(pwd.dentry)) {
2798 unsigned long len;
2799 struct path tmp = root;
2800 char *cwd = page + PAGE_SIZE;
2801 int buflen = PAGE_SIZE;
2802
2803 prepend(&cwd, &buflen, "\0", 1);
2804 error = prepend_path(&pwd, &tmp, &cwd, &buflen);
2805 write_sequnlock(&rename_lock);
2806
2807 if (error)
2808 goto out;
2809
2810 /* Unreachable from current root */
2811 if (!path_equal(&tmp, &root)) {
2812 error = prepend_unreachable(&cwd, &buflen);
2813 if (error)
2814 goto out;
2815 }
2816
2817 error = -ERANGE;
2818 len = PAGE_SIZE + page - cwd;
2819 if (len <= size) {
2820 error = len;
2821 if (copy_to_user(buf, cwd, len))
2822 error = -EFAULT;
2823 }
2824 } else {
2825 write_sequnlock(&rename_lock);
2826 }
2827
2828 out:
2829 path_put(&pwd);
2830 path_put(&root);
2831 free_page((unsigned long) page);
2832 return error;
2833 }
2834
2835 /*
2836 * Test whether new_dentry is a subdirectory of old_dentry.
2837 *
2838 * Trivially implemented using the dcache structure
2839 */
2840
2841 /**
2842 * is_subdir - is new dentry a subdirectory of old_dentry
2843 * @new_dentry: new dentry
2844 * @old_dentry: old dentry
2845 *
2846 * Returns 1 if new_dentry is a subdirectory of the parent (at any depth).
2847 * Returns 0 otherwise.
2848 * Caller must ensure that "new_dentry" is pinned before calling is_subdir()
2849 */
2850
2851 int is_subdir(struct dentry *new_dentry, struct dentry *old_dentry)
2852 {
2853 int result;
2854 unsigned seq;
2855
2856 if (new_dentry == old_dentry)
2857 return 1;
2858
2859 do {
2860 /* for restarting inner loop in case of seq retry */
2861 seq = read_seqbegin(&rename_lock);
2862 /*
2863 * Need rcu_readlock to protect against the d_parent trashing
2864 * due to d_move
2865 */
2866 rcu_read_lock();
2867 if (d_ancestor(old_dentry, new_dentry))
2868 result = 1;
2869 else
2870 result = 0;
2871 rcu_read_unlock();
2872 } while (read_seqretry(&rename_lock, seq));
2873
2874 return result;
2875 }
2876
2877 int path_is_under(struct path *path1, struct path *path2)
2878 {
2879 struct vfsmount *mnt = path1->mnt;
2880 struct dentry *dentry = path1->dentry;
2881 int res;
2882
2883 br_read_lock(vfsmount_lock);
2884 if (mnt != path2->mnt) {
2885 for (;;) {
2886 if (mnt->mnt_parent == mnt) {
2887 br_read_unlock(vfsmount_lock);
2888 return 0;
2889 }
2890 if (mnt->mnt_parent == path2->mnt)
2891 break;
2892 mnt = mnt->mnt_parent;
2893 }
2894 dentry = mnt->mnt_mountpoint;
2895 }
2896 res = is_subdir(dentry, path2->dentry);
2897 br_read_unlock(vfsmount_lock);
2898 return res;
2899 }
2900 EXPORT_SYMBOL(path_is_under);
2901
2902 void d_genocide(struct dentry *root)
2903 {
2904 struct dentry *this_parent;
2905 struct list_head *next;
2906 unsigned seq;
2907 int locked = 0;
2908
2909 seq = read_seqbegin(&rename_lock);
2910 again:
2911 this_parent = root;
2912 spin_lock(&this_parent->d_lock);
2913 repeat:
2914 next = this_parent->d_subdirs.next;
2915 resume:
2916 while (next != &this_parent->d_subdirs) {
2917 struct list_head *tmp = next;
2918 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
2919 next = tmp->next;
2920
2921 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
2922 if (d_unhashed(dentry) || !dentry->d_inode) {
2923 spin_unlock(&dentry->d_lock);
2924 continue;
2925 }
2926 if (!list_empty(&dentry->d_subdirs)) {
2927 spin_unlock(&this_parent->d_lock);
2928 spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
2929 this_parent = dentry;
2930 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
2931 goto repeat;
2932 }
2933 if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
2934 dentry->d_flags |= DCACHE_GENOCIDE;
2935 dentry->d_count--;
2936 }
2937 spin_unlock(&dentry->d_lock);
2938 }
2939 if (this_parent != root) {
2940 struct dentry *child = this_parent;
2941 if (!(this_parent->d_flags & DCACHE_GENOCIDE)) {
2942 this_parent->d_flags |= DCACHE_GENOCIDE;
2943 this_parent->d_count--;
2944 }
2945 this_parent = try_to_ascend(this_parent, locked, seq);
2946 if (!this_parent)
2947 goto rename_retry;
2948 next = child->d_u.d_child.next;
2949 goto resume;
2950 }
2951 spin_unlock(&this_parent->d_lock);
2952 if (!locked && read_seqretry(&rename_lock, seq))
2953 goto rename_retry;
2954 if (locked)
2955 write_sequnlock(&rename_lock);
2956 return;
2957
2958 rename_retry:
2959 locked = 1;
2960 write_seqlock(&rename_lock);
2961 goto again;
2962 }
2963
2964 /**
2965 * find_inode_number - check for dentry with name
2966 * @dir: directory to check
2967 * @name: Name to find.
2968 *
2969 * Check whether a dentry already exists for the given name,
2970 * and return the inode number if it has an inode. Otherwise
2971 * 0 is returned.
2972 *
2973 * This routine is used to post-process directory listings for
2974 * filesystems using synthetic inode numbers, and is necessary
2975 * to keep getcwd() working.
2976 */
2977
2978 ino_t find_inode_number(struct dentry *dir, struct qstr *name)
2979 {
2980 struct dentry * dentry;
2981 ino_t ino = 0;
2982
2983 dentry = d_hash_and_lookup(dir, name);
2984 if (dentry) {
2985 if (dentry->d_inode)
2986 ino = dentry->d_inode->i_ino;
2987 dput(dentry);
2988 }
2989 return ino;
2990 }
2991 EXPORT_SYMBOL(find_inode_number);
2992
2993 static __initdata unsigned long dhash_entries;
2994 static int __init set_dhash_entries(char *str)
2995 {
2996 if (!str)
2997 return 0;
2998 dhash_entries = simple_strtoul(str, &str, 0);
2999 return 1;
3000 }
3001 __setup("dhash_entries=", set_dhash_entries);
3002
3003 static void __init dcache_init_early(void)
3004 {
3005 int loop;
3006
3007 /* If hashes are distributed across NUMA nodes, defer
3008 * hash allocation until vmalloc space is available.
3009 */
3010 if (hashdist)
3011 return;
3012
3013 dentry_hashtable =
3014 alloc_large_system_hash("Dentry cache",
3015 sizeof(struct hlist_bl_head),
3016 dhash_entries,
3017 13,
3018 HASH_EARLY,
3019 &d_hash_shift,
3020 &d_hash_mask,
3021 0);
3022
3023 for (loop = 0; loop < (1 << d_hash_shift); loop++)
3024 INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
3025 }
3026
3027 static void __init dcache_init(void)
3028 {
3029 int loop;
3030
3031 /*
3032 * A constructor could be added for stable state like the lists,
3033 * but it is probably not worth it because of the cache nature
3034 * of the dcache.
3035 */
3036 dentry_cache = KMEM_CACHE(dentry,
3037 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD);
3038
3039 register_shrinker(&dcache_shrinker);
3040
3041 /* Hash may have been set up in dcache_init_early */
3042 if (!hashdist)
3043 return;
3044
3045 dentry_hashtable =
3046 alloc_large_system_hash("Dentry cache",
3047 sizeof(struct hlist_bl_head),
3048 dhash_entries,
3049 13,
3050 0,
3051 &d_hash_shift,
3052 &d_hash_mask,
3053 0);
3054
3055 for (loop = 0; loop < (1 << d_hash_shift); loop++)
3056 INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
3057 }
3058
3059 /* SLAB cache for __getname() consumers */
3060 struct kmem_cache *names_cachep __read_mostly;
3061 EXPORT_SYMBOL(names_cachep);
3062
3063 EXPORT_SYMBOL(d_genocide);
3064
3065 void __init vfs_caches_init_early(void)
3066 {
3067 dcache_init_early();
3068 inode_init_early();
3069 }
3070
3071 void __init vfs_caches_init(unsigned long mempages)
3072 {
3073 unsigned long reserve;
3074
3075 /* Base hash sizes on available memory, with a reserve equal to
3076 150% of current kernel size */
3077
3078 reserve = min((mempages - nr_free_pages()) * 3/2, mempages - 1);
3079 mempages -= reserve;
3080
3081 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
3082 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3083
3084 dcache_init();
3085 inode_init();
3086 files_init(mempages);
3087 mnt_init();
3088 bdev_cache_init();
3089 chrdev_init();
3090 }
This page took 0.09484 seconds and 5 git commands to generate.