4 * Complete reimplementation
5 * (C) 1997 Thomas Schoebel-Theuer,
6 * with heavy changes by Linus Torvalds
10 * Notes on the allocation strategy:
12 * The dcache is a master of the icache - whenever a dcache entry
13 * exists, the inode will always exist. "iput()" is done either when
14 * the dcache entry is deleted or garbage collected.
17 #include <linux/syscalls.h>
18 #include <linux/string.h>
21 #include <linux/fsnotify.h>
22 #include <linux/slab.h>
23 #include <linux/init.h>
24 #include <linux/hash.h>
25 #include <linux/cache.h>
26 #include <linux/export.h>
27 #include <linux/mount.h>
28 #include <linux/file.h>
29 #include <asm/uaccess.h>
30 #include <linux/security.h>
31 #include <linux/seqlock.h>
32 #include <linux/swap.h>
33 #include <linux/bootmem.h>
34 #include <linux/fs_struct.h>
35 #include <linux/hardirq.h>
36 #include <linux/bit_spinlock.h>
37 #include <linux/rculist_bl.h>
38 #include <linux/prefetch.h>
39 #include <linux/ratelimit.h>
40 #include <linux/list_lru.h>
46 * dcache->d_inode->i_lock protects:
47 * - i_dentry, d_alias, d_inode of aliases
48 * dcache_hash_bucket lock protects:
49 * - the dcache hash table
50 * s_anon bl list spinlock protects:
51 * - the s_anon list (see __d_drop)
52 * dentry->d_sb->s_dentry_lru_lock protects:
53 * - the dcache lru lists and counters
60 * - d_parent and d_subdirs
61 * - childrens' d_child and d_parent
65 * dentry->d_inode->i_lock
67 * dentry->d_sb->s_dentry_lru_lock
68 * dcache_hash_bucket lock
71 * If there is an ancestor relationship:
72 * dentry->d_parent->...->d_parent->d_lock
74 * dentry->d_parent->d_lock
77 * If no ancestor relationship:
78 * if (dentry1 < dentry2)
82 int sysctl_vfs_cache_pressure __read_mostly
= 100;
83 EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure
);
85 __cacheline_aligned_in_smp
DEFINE_SEQLOCK(rename_lock
);
87 EXPORT_SYMBOL(rename_lock
);
89 static struct kmem_cache
*dentry_cache __read_mostly
;
92 * read_seqbegin_or_lock - begin a sequence number check or locking block
93 * @lock: sequence lock
94 * @seq : sequence number to be checked
96 * First try it once optimistically without taking the lock. If that fails,
97 * take the lock. The sequence number is also used as a marker for deciding
98 * whether to be a reader (even) or writer (odd).
99 * N.B. seq must be initialized to an even number to begin with.
101 static inline void read_seqbegin_or_lock(seqlock_t
*lock
, int *seq
)
103 if (!(*seq
& 1)) /* Even */
104 *seq
= read_seqbegin(lock
);
106 read_seqlock_excl(lock
);
109 static inline int need_seqretry(seqlock_t
*lock
, int seq
)
111 return !(seq
& 1) && read_seqretry(lock
, seq
);
114 static inline void done_seqretry(seqlock_t
*lock
, int seq
)
117 read_sequnlock_excl(lock
);
121 * This is the single most critical data structure when it comes
122 * to the dcache: the hashtable for lookups. Somebody should try
123 * to make this good - I've just made it work.
125 * This hash-function tries to avoid losing too many bits of hash
126 * information, yet avoid using a prime hash-size or similar.
128 #define D_HASHBITS d_hash_shift
129 #define D_HASHMASK d_hash_mask
131 static unsigned int d_hash_mask __read_mostly
;
132 static unsigned int d_hash_shift __read_mostly
;
134 static struct hlist_bl_head
*dentry_hashtable __read_mostly
;
136 static inline struct hlist_bl_head
*d_hash(const struct dentry
*parent
,
139 hash
+= (unsigned long) parent
/ L1_CACHE_BYTES
;
140 hash
= hash
+ (hash
>> D_HASHBITS
);
141 return dentry_hashtable
+ (hash
& D_HASHMASK
);
144 /* Statistics gathering. */
145 struct dentry_stat_t dentry_stat
= {
149 static DEFINE_PER_CPU(long, nr_dentry
);
150 static DEFINE_PER_CPU(long, nr_dentry_unused
);
152 #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
155 * Here we resort to our own counters instead of using generic per-cpu counters
156 * for consistency with what the vfs inode code does. We are expected to harvest
157 * better code and performance by having our own specialized counters.
159 * Please note that the loop is done over all possible CPUs, not over all online
160 * CPUs. The reason for this is that we don't want to play games with CPUs going
161 * on and off. If one of them goes off, we will just keep their counters.
163 * glommer: See cffbc8a for details, and if you ever intend to change this,
164 * please update all vfs counters to match.
166 static long get_nr_dentry(void)
170 for_each_possible_cpu(i
)
171 sum
+= per_cpu(nr_dentry
, i
);
172 return sum
< 0 ? 0 : sum
;
175 static long get_nr_dentry_unused(void)
179 for_each_possible_cpu(i
)
180 sum
+= per_cpu(nr_dentry_unused
, i
);
181 return sum
< 0 ? 0 : sum
;
184 int proc_nr_dentry(ctl_table
*table
, int write
, void __user
*buffer
,
185 size_t *lenp
, loff_t
*ppos
)
187 dentry_stat
.nr_dentry
= get_nr_dentry();
188 dentry_stat
.nr_unused
= get_nr_dentry_unused();
189 return proc_doulongvec_minmax(table
, write
, buffer
, lenp
, ppos
);
194 * Compare 2 name strings, return 0 if they match, otherwise non-zero.
195 * The strings are both count bytes long, and count is non-zero.
197 #ifdef CONFIG_DCACHE_WORD_ACCESS
199 #include <asm/word-at-a-time.h>
201 * NOTE! 'cs' and 'scount' come from a dentry, so it has a
202 * aligned allocation for this particular component. We don't
203 * strictly need the load_unaligned_zeropad() safety, but it
204 * doesn't hurt either.
206 * In contrast, 'ct' and 'tcount' can be from a pathname, and do
207 * need the careful unaligned handling.
209 static inline int dentry_string_cmp(const unsigned char *cs
, const unsigned char *ct
, unsigned tcount
)
211 unsigned long a
,b
,mask
;
214 a
= *(unsigned long *)cs
;
215 b
= load_unaligned_zeropad(ct
);
216 if (tcount
< sizeof(unsigned long))
218 if (unlikely(a
!= b
))
220 cs
+= sizeof(unsigned long);
221 ct
+= sizeof(unsigned long);
222 tcount
-= sizeof(unsigned long);
226 mask
= ~(~0ul << tcount
*8);
227 return unlikely(!!((a
^ b
) & mask
));
232 static inline int dentry_string_cmp(const unsigned char *cs
, const unsigned char *ct
, unsigned tcount
)
246 static inline int dentry_cmp(const struct dentry
*dentry
, const unsigned char *ct
, unsigned tcount
)
248 const unsigned char *cs
;
250 * Be careful about RCU walk racing with rename:
251 * use ACCESS_ONCE to fetch the name pointer.
253 * NOTE! Even if a rename will mean that the length
254 * was not loaded atomically, we don't care. The
255 * RCU walk will check the sequence count eventually,
256 * and catch it. And we won't overrun the buffer,
257 * because we're reading the name pointer atomically,
258 * and a dentry name is guaranteed to be properly
259 * terminated with a NUL byte.
261 * End result: even if 'len' is wrong, we'll exit
262 * early because the data cannot match (there can
263 * be no NUL in the ct/tcount data)
265 cs
= ACCESS_ONCE(dentry
->d_name
.name
);
266 smp_read_barrier_depends();
267 return dentry_string_cmp(cs
, ct
, tcount
);
270 static void __d_free(struct rcu_head
*head
)
272 struct dentry
*dentry
= container_of(head
, struct dentry
, d_u
.d_rcu
);
274 WARN_ON(!hlist_unhashed(&dentry
->d_alias
));
275 if (dname_external(dentry
))
276 kfree(dentry
->d_name
.name
);
277 kmem_cache_free(dentry_cache
, dentry
);
283 static void d_free(struct dentry
*dentry
)
285 BUG_ON((int)dentry
->d_lockref
.count
> 0);
286 this_cpu_dec(nr_dentry
);
287 if (dentry
->d_op
&& dentry
->d_op
->d_release
)
288 dentry
->d_op
->d_release(dentry
);
290 /* if dentry was never visible to RCU, immediate free is OK */
291 if (!(dentry
->d_flags
& DCACHE_RCUACCESS
))
292 __d_free(&dentry
->d_u
.d_rcu
);
294 call_rcu(&dentry
->d_u
.d_rcu
, __d_free
);
298 * dentry_rcuwalk_barrier - invalidate in-progress rcu-walk lookups
299 * @dentry: the target dentry
300 * After this call, in-progress rcu-walk path lookup will fail. This
301 * should be called after unhashing, and after changing d_inode (if
302 * the dentry has not already been unhashed).
304 static inline void dentry_rcuwalk_barrier(struct dentry
*dentry
)
306 assert_spin_locked(&dentry
->d_lock
);
307 /* Go through a barrier */
308 write_seqcount_barrier(&dentry
->d_seq
);
312 * Release the dentry's inode, using the filesystem
313 * d_iput() operation if defined. Dentry has no refcount
316 static void dentry_iput(struct dentry
* dentry
)
317 __releases(dentry
->d_lock
)
318 __releases(dentry
->d_inode
->i_lock
)
320 struct inode
*inode
= dentry
->d_inode
;
322 dentry
->d_inode
= NULL
;
323 hlist_del_init(&dentry
->d_alias
);
324 spin_unlock(&dentry
->d_lock
);
325 spin_unlock(&inode
->i_lock
);
327 fsnotify_inoderemove(inode
);
328 if (dentry
->d_op
&& dentry
->d_op
->d_iput
)
329 dentry
->d_op
->d_iput(dentry
, inode
);
333 spin_unlock(&dentry
->d_lock
);
338 * Release the dentry's inode, using the filesystem
339 * d_iput() operation if defined. dentry remains in-use.
341 static void dentry_unlink_inode(struct dentry
* dentry
)
342 __releases(dentry
->d_lock
)
343 __releases(dentry
->d_inode
->i_lock
)
345 struct inode
*inode
= dentry
->d_inode
;
346 dentry
->d_inode
= NULL
;
347 hlist_del_init(&dentry
->d_alias
);
348 dentry_rcuwalk_barrier(dentry
);
349 spin_unlock(&dentry
->d_lock
);
350 spin_unlock(&inode
->i_lock
);
352 fsnotify_inoderemove(inode
);
353 if (dentry
->d_op
&& dentry
->d_op
->d_iput
)
354 dentry
->d_op
->d_iput(dentry
, inode
);
360 * The DCACHE_LRU_LIST bit is set whenever the 'd_lru' entry
361 * is in use - which includes both the "real" per-superblock
362 * LRU list _and_ the DCACHE_SHRINK_LIST use.
364 * The DCACHE_SHRINK_LIST bit is set whenever the dentry is
365 * on the shrink list (ie not on the superblock LRU list).
367 * The per-cpu "nr_dentry_unused" counters are updated with
368 * the DCACHE_LRU_LIST bit.
370 * These helper functions make sure we always follow the
371 * rules. d_lock must be held by the caller.
373 #define D_FLAG_VERIFY(dentry,x) WARN_ON_ONCE(((dentry)->d_flags & (DCACHE_LRU_LIST | DCACHE_SHRINK_LIST)) != (x))
374 static void d_lru_add(struct dentry
*dentry
)
376 D_FLAG_VERIFY(dentry
, 0);
377 dentry
->d_flags
|= DCACHE_LRU_LIST
;
378 this_cpu_inc(nr_dentry_unused
);
379 WARN_ON_ONCE(!list_lru_add(&dentry
->d_sb
->s_dentry_lru
, &dentry
->d_lru
));
382 static void d_lru_del(struct dentry
*dentry
)
384 D_FLAG_VERIFY(dentry
, DCACHE_LRU_LIST
);
385 dentry
->d_flags
&= ~DCACHE_LRU_LIST
;
386 this_cpu_dec(nr_dentry_unused
);
387 WARN_ON_ONCE(!list_lru_del(&dentry
->d_sb
->s_dentry_lru
, &dentry
->d_lru
));
390 static void d_shrink_del(struct dentry
*dentry
)
392 D_FLAG_VERIFY(dentry
, DCACHE_SHRINK_LIST
| DCACHE_LRU_LIST
);
393 list_del_init(&dentry
->d_lru
);
394 dentry
->d_flags
&= ~(DCACHE_SHRINK_LIST
| DCACHE_LRU_LIST
);
395 this_cpu_dec(nr_dentry_unused
);
398 static void d_shrink_add(struct dentry
*dentry
, struct list_head
*list
)
400 D_FLAG_VERIFY(dentry
, 0);
401 list_add(&dentry
->d_lru
, list
);
402 dentry
->d_flags
|= DCACHE_SHRINK_LIST
| DCACHE_LRU_LIST
;
403 this_cpu_inc(nr_dentry_unused
);
407 * These can only be called under the global LRU lock, ie during the
408 * callback for freeing the LRU list. "isolate" removes it from the
409 * LRU lists entirely, while shrink_move moves it to the indicated
412 static void d_lru_isolate(struct dentry
*dentry
)
414 D_FLAG_VERIFY(dentry
, DCACHE_LRU_LIST
);
415 dentry
->d_flags
&= ~DCACHE_LRU_LIST
;
416 this_cpu_dec(nr_dentry_unused
);
417 list_del_init(&dentry
->d_lru
);
420 static void d_lru_shrink_move(struct dentry
*dentry
, struct list_head
*list
)
422 D_FLAG_VERIFY(dentry
, DCACHE_LRU_LIST
);
423 dentry
->d_flags
|= DCACHE_SHRINK_LIST
;
424 list_move_tail(&dentry
->d_lru
, list
);
428 * dentry_lru_(add|del)_list) must be called with d_lock held.
430 static void dentry_lru_add(struct dentry
*dentry
)
432 if (unlikely(!(dentry
->d_flags
& DCACHE_LRU_LIST
)))
437 * Remove a dentry with references from the LRU.
439 * If we are on the shrink list, then we can get to try_prune_one_dentry() and
440 * lose our last reference through the parent walk. In this case, we need to
441 * remove ourselves from the shrink list, not the LRU.
443 static void dentry_lru_del(struct dentry
*dentry
)
445 if (dentry
->d_flags
& DCACHE_LRU_LIST
) {
446 if (dentry
->d_flags
& DCACHE_SHRINK_LIST
)
447 return d_shrink_del(dentry
);
453 * d_kill - kill dentry and return parent
454 * @dentry: dentry to kill
455 * @parent: parent dentry
457 * The dentry must already be unhashed and removed from the LRU.
459 * If this is the root of the dentry tree, return NULL.
461 * dentry->d_lock and parent->d_lock must be held by caller, and are dropped by
464 static struct dentry
*d_kill(struct dentry
*dentry
, struct dentry
*parent
)
465 __releases(dentry
->d_lock
)
466 __releases(parent
->d_lock
)
467 __releases(dentry
->d_inode
->i_lock
)
469 list_del(&dentry
->d_u
.d_child
);
471 * Inform try_to_ascend() that we are no longer attached to the
474 dentry
->d_flags
|= DCACHE_DENTRY_KILLED
;
476 spin_unlock(&parent
->d_lock
);
479 * dentry_iput drops the locks, at which point nobody (except
480 * transient RCU lookups) can reach this dentry.
487 * Unhash a dentry without inserting an RCU walk barrier or checking that
488 * dentry->d_lock is locked. The caller must take care of that, if
491 static void __d_shrink(struct dentry
*dentry
)
493 if (!d_unhashed(dentry
)) {
494 struct hlist_bl_head
*b
;
495 if (unlikely(dentry
->d_flags
& DCACHE_DISCONNECTED
))
496 b
= &dentry
->d_sb
->s_anon
;
498 b
= d_hash(dentry
->d_parent
, dentry
->d_name
.hash
);
501 __hlist_bl_del(&dentry
->d_hash
);
502 dentry
->d_hash
.pprev
= NULL
;
508 * d_drop - drop a dentry
509 * @dentry: dentry to drop
511 * d_drop() unhashes the entry from the parent dentry hashes, so that it won't
512 * be found through a VFS lookup any more. Note that this is different from
513 * deleting the dentry - d_delete will try to mark the dentry negative if
514 * possible, giving a successful _negative_ lookup, while d_drop will
515 * just make the cache lookup fail.
517 * d_drop() is used mainly for stuff that wants to invalidate a dentry for some
518 * reason (NFS timeouts or autofs deletes).
520 * __d_drop requires dentry->d_lock.
522 void __d_drop(struct dentry
*dentry
)
524 if (!d_unhashed(dentry
)) {
526 dentry_rcuwalk_barrier(dentry
);
529 EXPORT_SYMBOL(__d_drop
);
531 void d_drop(struct dentry
*dentry
)
533 spin_lock(&dentry
->d_lock
);
535 spin_unlock(&dentry
->d_lock
);
537 EXPORT_SYMBOL(d_drop
);
540 * Finish off a dentry we've decided to kill.
541 * dentry->d_lock must be held, returns with it unlocked.
542 * If ref is non-zero, then decrement the refcount too.
543 * Returns dentry requiring refcount drop, or NULL if we're done.
545 static inline struct dentry
*
546 dentry_kill(struct dentry
*dentry
, int unlock_on_failure
)
547 __releases(dentry
->d_lock
)
550 struct dentry
*parent
;
552 inode
= dentry
->d_inode
;
553 if (inode
&& !spin_trylock(&inode
->i_lock
)) {
555 if (unlock_on_failure
) {
556 spin_unlock(&dentry
->d_lock
);
559 return dentry
; /* try again with same dentry */
564 parent
= dentry
->d_parent
;
565 if (parent
&& !spin_trylock(&parent
->d_lock
)) {
567 spin_unlock(&inode
->i_lock
);
572 * The dentry is now unrecoverably dead to the world.
574 lockref_mark_dead(&dentry
->d_lockref
);
577 * inform the fs via d_prune that this dentry is about to be
578 * unhashed and destroyed.
580 if ((dentry
->d_flags
& DCACHE_OP_PRUNE
) && !d_unhashed(dentry
))
581 dentry
->d_op
->d_prune(dentry
);
583 dentry_lru_del(dentry
);
584 /* if it was on the hash then remove it */
586 return d_kill(dentry
, parent
);
592 * This is complicated by the fact that we do not want to put
593 * dentries that are no longer on any hash chain on the unused
594 * list: we'd much rather just get rid of them immediately.
596 * However, that implies that we have to traverse the dentry
597 * tree upwards to the parents which might _also_ now be
598 * scheduled for deletion (it may have been only waiting for
599 * its last child to go away).
601 * This tail recursion is done by hand as we don't want to depend
602 * on the compiler to always get this right (gcc generally doesn't).
603 * Real recursion would eat up our stack space.
607 * dput - release a dentry
608 * @dentry: dentry to release
610 * Release a dentry. This will drop the usage count and if appropriate
611 * call the dentry unlink method as well as removing it from the queues and
612 * releasing its resources. If the parent dentries were scheduled for release
613 * they too may now get deleted.
615 void dput(struct dentry
*dentry
)
617 if (unlikely(!dentry
))
621 if (lockref_put_or_lock(&dentry
->d_lockref
))
624 /* Unreachable? Get rid of it */
625 if (unlikely(d_unhashed(dentry
)))
628 if (unlikely(dentry
->d_flags
& DCACHE_OP_DELETE
)) {
629 if (dentry
->d_op
->d_delete(dentry
))
633 dentry
->d_flags
|= DCACHE_REFERENCED
;
634 dentry_lru_add(dentry
);
636 dentry
->d_lockref
.count
--;
637 spin_unlock(&dentry
->d_lock
);
641 dentry
= dentry_kill(dentry
, 1);
648 * d_invalidate - invalidate a dentry
649 * @dentry: dentry to invalidate
651 * Try to invalidate the dentry if it turns out to be
652 * possible. If there are other dentries that can be
653 * reached through this one we can't delete it and we
654 * return -EBUSY. On success we return 0.
659 int d_invalidate(struct dentry
* dentry
)
662 * If it's already been dropped, return OK.
664 spin_lock(&dentry
->d_lock
);
665 if (d_unhashed(dentry
)) {
666 spin_unlock(&dentry
->d_lock
);
670 * Check whether to do a partial shrink_dcache
671 * to get rid of unused child entries.
673 if (!list_empty(&dentry
->d_subdirs
)) {
674 spin_unlock(&dentry
->d_lock
);
675 shrink_dcache_parent(dentry
);
676 spin_lock(&dentry
->d_lock
);
680 * Somebody else still using it?
682 * If it's a directory, we can't drop it
683 * for fear of somebody re-populating it
684 * with children (even though dropping it
685 * would make it unreachable from the root,
686 * we might still populate it if it was a
687 * working directory or similar).
688 * We also need to leave mountpoints alone,
691 if (dentry
->d_lockref
.count
> 1 && dentry
->d_inode
) {
692 if (S_ISDIR(dentry
->d_inode
->i_mode
) || d_mountpoint(dentry
)) {
693 spin_unlock(&dentry
->d_lock
);
699 spin_unlock(&dentry
->d_lock
);
702 EXPORT_SYMBOL(d_invalidate
);
704 /* This must be called with d_lock held */
705 static inline void __dget_dlock(struct dentry
*dentry
)
707 dentry
->d_lockref
.count
++;
710 static inline void __dget(struct dentry
*dentry
)
712 lockref_get(&dentry
->d_lockref
);
715 struct dentry
*dget_parent(struct dentry
*dentry
)
721 * Do optimistic parent lookup without any
725 ret
= ACCESS_ONCE(dentry
->d_parent
);
726 gotref
= lockref_get_not_zero(&ret
->d_lockref
);
728 if (likely(gotref
)) {
729 if (likely(ret
== ACCESS_ONCE(dentry
->d_parent
)))
736 * Don't need rcu_dereference because we re-check it was correct under
740 ret
= dentry
->d_parent
;
741 spin_lock(&ret
->d_lock
);
742 if (unlikely(ret
!= dentry
->d_parent
)) {
743 spin_unlock(&ret
->d_lock
);
748 BUG_ON(!ret
->d_lockref
.count
);
749 ret
->d_lockref
.count
++;
750 spin_unlock(&ret
->d_lock
);
753 EXPORT_SYMBOL(dget_parent
);
756 * d_find_alias - grab a hashed alias of inode
757 * @inode: inode in question
758 * @want_discon: flag, used by d_splice_alias, to request
759 * that only a DISCONNECTED alias be returned.
761 * If inode has a hashed alias, or is a directory and has any alias,
762 * acquire the reference to alias and return it. Otherwise return NULL.
763 * Notice that if inode is a directory there can be only one alias and
764 * it can be unhashed only if it has no children, or if it is the root
767 * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer
768 * any other hashed alias over that one unless @want_discon is set,
769 * in which case only return an IS_ROOT, DCACHE_DISCONNECTED alias.
771 static struct dentry
*__d_find_alias(struct inode
*inode
, int want_discon
)
773 struct dentry
*alias
, *discon_alias
;
777 hlist_for_each_entry(alias
, &inode
->i_dentry
, d_alias
) {
778 spin_lock(&alias
->d_lock
);
779 if (S_ISDIR(inode
->i_mode
) || !d_unhashed(alias
)) {
780 if (IS_ROOT(alias
) &&
781 (alias
->d_flags
& DCACHE_DISCONNECTED
)) {
782 discon_alias
= alias
;
783 } else if (!want_discon
) {
785 spin_unlock(&alias
->d_lock
);
789 spin_unlock(&alias
->d_lock
);
792 alias
= discon_alias
;
793 spin_lock(&alias
->d_lock
);
794 if (S_ISDIR(inode
->i_mode
) || !d_unhashed(alias
)) {
795 if (IS_ROOT(alias
) &&
796 (alias
->d_flags
& DCACHE_DISCONNECTED
)) {
798 spin_unlock(&alias
->d_lock
);
802 spin_unlock(&alias
->d_lock
);
808 struct dentry
*d_find_alias(struct inode
*inode
)
810 struct dentry
*de
= NULL
;
812 if (!hlist_empty(&inode
->i_dentry
)) {
813 spin_lock(&inode
->i_lock
);
814 de
= __d_find_alias(inode
, 0);
815 spin_unlock(&inode
->i_lock
);
819 EXPORT_SYMBOL(d_find_alias
);
822 * Try to kill dentries associated with this inode.
823 * WARNING: you must own a reference to inode.
825 void d_prune_aliases(struct inode
*inode
)
827 struct dentry
*dentry
;
829 spin_lock(&inode
->i_lock
);
830 hlist_for_each_entry(dentry
, &inode
->i_dentry
, d_alias
) {
831 spin_lock(&dentry
->d_lock
);
832 if (!dentry
->d_lockref
.count
) {
834 * inform the fs via d_prune that this dentry
835 * is about to be unhashed and destroyed.
837 if ((dentry
->d_flags
& DCACHE_OP_PRUNE
) &&
839 dentry
->d_op
->d_prune(dentry
);
841 __dget_dlock(dentry
);
843 spin_unlock(&dentry
->d_lock
);
844 spin_unlock(&inode
->i_lock
);
848 spin_unlock(&dentry
->d_lock
);
850 spin_unlock(&inode
->i_lock
);
852 EXPORT_SYMBOL(d_prune_aliases
);
855 * Try to throw away a dentry - free the inode, dput the parent.
856 * Requires dentry->d_lock is held, and dentry->d_count == 0.
857 * Releases dentry->d_lock.
859 * This may fail if locks cannot be acquired no problem, just try again.
861 static struct dentry
* try_prune_one_dentry(struct dentry
*dentry
)
862 __releases(dentry
->d_lock
)
864 struct dentry
*parent
;
866 parent
= dentry_kill(dentry
, 0);
868 * If dentry_kill returns NULL, we have nothing more to do.
869 * if it returns the same dentry, trylocks failed. In either
870 * case, just loop again.
872 * Otherwise, we need to prune ancestors too. This is necessary
873 * to prevent quadratic behavior of shrink_dcache_parent(), but
874 * is also expected to be beneficial in reducing dentry cache
879 if (parent
== dentry
)
882 /* Prune ancestors. */
885 if (lockref_put_or_lock(&dentry
->d_lockref
))
887 dentry
= dentry_kill(dentry
, 1);
892 static void shrink_dentry_list(struct list_head
*list
)
894 struct dentry
*dentry
;
898 dentry
= list_entry_rcu(list
->prev
, struct dentry
, d_lru
);
899 if (&dentry
->d_lru
== list
)
903 * Get the dentry lock, and re-verify that the dentry is
904 * this on the shrinking list. If it is, we know that
905 * DCACHE_SHRINK_LIST and DCACHE_LRU_LIST are set.
907 spin_lock(&dentry
->d_lock
);
908 if (dentry
!= list_entry(list
->prev
, struct dentry
, d_lru
)) {
909 spin_unlock(&dentry
->d_lock
);
914 * The dispose list is isolated and dentries are not accounted
915 * to the LRU here, so we can simply remove it from the list
916 * here regardless of whether it is referenced or not.
918 d_shrink_del(dentry
);
921 * We found an inuse dentry which was not removed from
922 * the LRU because of laziness during lookup. Do not free it.
924 if (dentry
->d_lockref
.count
) {
925 spin_unlock(&dentry
->d_lock
);
931 * If 'try_to_prune()' returns a dentry, it will
932 * be the same one we passed in, and d_lock will
933 * have been held the whole time, so it will not
934 * have been added to any other lists. We failed
935 * to get the inode lock.
937 * We just add it back to the shrink list.
939 dentry
= try_prune_one_dentry(dentry
);
943 d_shrink_add(dentry
, list
);
944 spin_unlock(&dentry
->d_lock
);
950 static enum lru_status
951 dentry_lru_isolate(struct list_head
*item
, spinlock_t
*lru_lock
, void *arg
)
953 struct list_head
*freeable
= arg
;
954 struct dentry
*dentry
= container_of(item
, struct dentry
, d_lru
);
958 * we are inverting the lru lock/dentry->d_lock here,
959 * so use a trylock. If we fail to get the lock, just skip
962 if (!spin_trylock(&dentry
->d_lock
))
966 * Referenced dentries are still in use. If they have active
967 * counts, just remove them from the LRU. Otherwise give them
968 * another pass through the LRU.
970 if (dentry
->d_lockref
.count
) {
971 d_lru_isolate(dentry
);
972 spin_unlock(&dentry
->d_lock
);
976 if (dentry
->d_flags
& DCACHE_REFERENCED
) {
977 dentry
->d_flags
&= ~DCACHE_REFERENCED
;
978 spin_unlock(&dentry
->d_lock
);
981 * The list move itself will be made by the common LRU code. At
982 * this point, we've dropped the dentry->d_lock but keep the
983 * lru lock. This is safe to do, since every list movement is
984 * protected by the lru lock even if both locks are held.
986 * This is guaranteed by the fact that all LRU management
987 * functions are intermediated by the LRU API calls like
988 * list_lru_add and list_lru_del. List movement in this file
989 * only ever occur through this functions or through callbacks
990 * like this one, that are called from the LRU API.
992 * The only exceptions to this are functions like
993 * shrink_dentry_list, and code that first checks for the
994 * DCACHE_SHRINK_LIST flag. Those are guaranteed to be
995 * operating only with stack provided lists after they are
996 * properly isolated from the main list. It is thus, always a
1002 d_lru_shrink_move(dentry
, freeable
);
1003 spin_unlock(&dentry
->d_lock
);
1009 * prune_dcache_sb - shrink the dcache
1011 * @nr_to_scan : number of entries to try to free
1012 * @nid: which node to scan for freeable entities
1014 * Attempt to shrink the superblock dcache LRU by @nr_to_scan entries. This is
1015 * done when we need more memory an called from the superblock shrinker
1018 * This function may fail to free any resources if all the dentries are in
1021 long prune_dcache_sb(struct super_block
*sb
, unsigned long nr_to_scan
,
1027 freed
= list_lru_walk_node(&sb
->s_dentry_lru
, nid
, dentry_lru_isolate
,
1028 &dispose
, &nr_to_scan
);
1029 shrink_dentry_list(&dispose
);
1033 static enum lru_status
dentry_lru_isolate_shrink(struct list_head
*item
,
1034 spinlock_t
*lru_lock
, void *arg
)
1036 struct list_head
*freeable
= arg
;
1037 struct dentry
*dentry
= container_of(item
, struct dentry
, d_lru
);
1040 * we are inverting the lru lock/dentry->d_lock here,
1041 * so use a trylock. If we fail to get the lock, just skip
1044 if (!spin_trylock(&dentry
->d_lock
))
1047 d_lru_shrink_move(dentry
, freeable
);
1048 spin_unlock(&dentry
->d_lock
);
1055 * shrink_dcache_sb - shrink dcache for a superblock
1058 * Shrink the dcache for the specified super block. This is used to free
1059 * the dcache before unmounting a file system.
1061 void shrink_dcache_sb(struct super_block
*sb
)
1068 freed
= list_lru_walk(&sb
->s_dentry_lru
,
1069 dentry_lru_isolate_shrink
, &dispose
, UINT_MAX
);
1071 this_cpu_sub(nr_dentry_unused
, freed
);
1072 shrink_dentry_list(&dispose
);
1073 } while (freed
> 0);
1075 EXPORT_SYMBOL(shrink_dcache_sb
);
1078 * destroy a single subtree of dentries for unmount
1079 * - see the comments on shrink_dcache_for_umount() for a description of the
1082 static void shrink_dcache_for_umount_subtree(struct dentry
*dentry
)
1084 struct dentry
*parent
;
1086 BUG_ON(!IS_ROOT(dentry
));
1089 /* descend to the first leaf in the current subtree */
1090 while (!list_empty(&dentry
->d_subdirs
))
1091 dentry
= list_entry(dentry
->d_subdirs
.next
,
1092 struct dentry
, d_u
.d_child
);
1094 /* consume the dentries from this leaf up through its parents
1095 * until we find one with children or run out altogether */
1097 struct inode
*inode
;
1100 * inform the fs that this dentry is about to be
1101 * unhashed and destroyed.
1103 if ((dentry
->d_flags
& DCACHE_OP_PRUNE
) &&
1104 !d_unhashed(dentry
))
1105 dentry
->d_op
->d_prune(dentry
);
1107 dentry_lru_del(dentry
);
1110 if (dentry
->d_lockref
.count
!= 0) {
1112 "BUG: Dentry %p{i=%lx,n=%s}"
1113 " still in use (%d)"
1114 " [unmount of %s %s]\n",
1117 dentry
->d_inode
->i_ino
: 0UL,
1118 dentry
->d_name
.name
,
1119 dentry
->d_lockref
.count
,
1120 dentry
->d_sb
->s_type
->name
,
1121 dentry
->d_sb
->s_id
);
1125 if (IS_ROOT(dentry
)) {
1127 list_del(&dentry
->d_u
.d_child
);
1129 parent
= dentry
->d_parent
;
1130 parent
->d_lockref
.count
--;
1131 list_del(&dentry
->d_u
.d_child
);
1134 inode
= dentry
->d_inode
;
1136 dentry
->d_inode
= NULL
;
1137 hlist_del_init(&dentry
->d_alias
);
1138 if (dentry
->d_op
&& dentry
->d_op
->d_iput
)
1139 dentry
->d_op
->d_iput(dentry
, inode
);
1146 /* finished when we fall off the top of the tree,
1147 * otherwise we ascend to the parent and move to the
1148 * next sibling if there is one */
1152 } while (list_empty(&dentry
->d_subdirs
));
1154 dentry
= list_entry(dentry
->d_subdirs
.next
,
1155 struct dentry
, d_u
.d_child
);
1160 * destroy the dentries attached to a superblock on unmounting
1161 * - we don't need to use dentry->d_lock because:
1162 * - the superblock is detached from all mountings and open files, so the
1163 * dentry trees will not be rearranged by the VFS
1164 * - s_umount is write-locked, so the memory pressure shrinker will ignore
1165 * any dentries belonging to this superblock that it comes across
1166 * - the filesystem itself is no longer permitted to rearrange the dentries
1167 * in this superblock
1169 void shrink_dcache_for_umount(struct super_block
*sb
)
1171 struct dentry
*dentry
;
1173 if (down_read_trylock(&sb
->s_umount
))
1176 dentry
= sb
->s_root
;
1178 dentry
->d_lockref
.count
--;
1179 shrink_dcache_for_umount_subtree(dentry
);
1181 while (!hlist_bl_empty(&sb
->s_anon
)) {
1182 dentry
= hlist_bl_entry(hlist_bl_first(&sb
->s_anon
), struct dentry
, d_hash
);
1183 shrink_dcache_for_umount_subtree(dentry
);
1188 * This tries to ascend one level of parenthood, but
1189 * we can race with renaming, so we need to re-check
1190 * the parenthood after dropping the lock and check
1191 * that the sequence number still matches.
1193 static struct dentry
*try_to_ascend(struct dentry
*old
, unsigned seq
)
1195 struct dentry
*new = old
->d_parent
;
1198 spin_unlock(&old
->d_lock
);
1199 spin_lock(&new->d_lock
);
1202 * might go back up the wrong parent if we have had a rename
1205 if (new != old
->d_parent
||
1206 (old
->d_flags
& DCACHE_DENTRY_KILLED
) ||
1207 need_seqretry(&rename_lock
, seq
)) {
1208 spin_unlock(&new->d_lock
);
1216 * enum d_walk_ret - action to talke during tree walk
1217 * @D_WALK_CONTINUE: contrinue walk
1218 * @D_WALK_QUIT: quit walk
1219 * @D_WALK_NORETRY: quit when retry is needed
1220 * @D_WALK_SKIP: skip this dentry and its children
1230 * d_walk - walk the dentry tree
1231 * @parent: start of walk
1232 * @data: data passed to @enter() and @finish()
1233 * @enter: callback when first entering the dentry
1234 * @finish: callback when successfully finished the walk
1236 * The @enter() and @finish() callbacks are called with d_lock held.
1238 static void d_walk(struct dentry
*parent
, void *data
,
1239 enum d_walk_ret (*enter
)(void *, struct dentry
*),
1240 void (*finish
)(void *))
1242 struct dentry
*this_parent
;
1243 struct list_head
*next
;
1245 enum d_walk_ret ret
;
1249 read_seqbegin_or_lock(&rename_lock
, &seq
);
1250 this_parent
= parent
;
1251 spin_lock(&this_parent
->d_lock
);
1253 ret
= enter(data
, this_parent
);
1255 case D_WALK_CONTINUE
:
1260 case D_WALK_NORETRY
:
1265 next
= this_parent
->d_subdirs
.next
;
1267 while (next
!= &this_parent
->d_subdirs
) {
1268 struct list_head
*tmp
= next
;
1269 struct dentry
*dentry
= list_entry(tmp
, struct dentry
, d_u
.d_child
);
1272 spin_lock_nested(&dentry
->d_lock
, DENTRY_D_LOCK_NESTED
);
1274 ret
= enter(data
, dentry
);
1276 case D_WALK_CONTINUE
:
1279 spin_unlock(&dentry
->d_lock
);
1281 case D_WALK_NORETRY
:
1285 spin_unlock(&dentry
->d_lock
);
1289 if (!list_empty(&dentry
->d_subdirs
)) {
1290 spin_unlock(&this_parent
->d_lock
);
1291 spin_release(&dentry
->d_lock
.dep_map
, 1, _RET_IP_
);
1292 this_parent
= dentry
;
1293 spin_acquire(&this_parent
->d_lock
.dep_map
, 0, 1, _RET_IP_
);
1296 spin_unlock(&dentry
->d_lock
);
1299 * All done at this level ... ascend and resume the search.
1301 if (this_parent
!= parent
) {
1302 struct dentry
*child
= this_parent
;
1303 this_parent
= try_to_ascend(this_parent
, seq
);
1306 next
= child
->d_u
.d_child
.next
;
1309 if (need_seqretry(&rename_lock
, seq
)) {
1310 spin_unlock(&this_parent
->d_lock
);
1317 spin_unlock(&this_parent
->d_lock
);
1318 done_seqretry(&rename_lock
, seq
);
1329 * Search for at least 1 mount point in the dentry's subdirs.
1330 * We descend to the next level whenever the d_subdirs
1331 * list is non-empty and continue searching.
1334 static enum d_walk_ret
check_mount(void *data
, struct dentry
*dentry
)
1337 if (d_mountpoint(dentry
)) {
1341 return D_WALK_CONTINUE
;
1345 * have_submounts - check for mounts over a dentry
1346 * @parent: dentry to check.
1348 * Return true if the parent or its subdirectories contain
1351 int have_submounts(struct dentry
*parent
)
1355 d_walk(parent
, &ret
, check_mount
, NULL
);
1359 EXPORT_SYMBOL(have_submounts
);
1362 * Called by mount code to set a mountpoint and check if the mountpoint is
1363 * reachable (e.g. NFS can unhash a directory dentry and then the complete
1364 * subtree can become unreachable).
1366 * Only one of check_submounts_and_drop() and d_set_mounted() must succeed. For
1367 * this reason take rename_lock and d_lock on dentry and ancestors.
1369 int d_set_mounted(struct dentry
*dentry
)
1373 write_seqlock(&rename_lock
);
1374 for (p
= dentry
->d_parent
; !IS_ROOT(p
); p
= p
->d_parent
) {
1375 /* Need exclusion wrt. check_submounts_and_drop() */
1376 spin_lock(&p
->d_lock
);
1377 if (unlikely(d_unhashed(p
))) {
1378 spin_unlock(&p
->d_lock
);
1381 spin_unlock(&p
->d_lock
);
1383 spin_lock(&dentry
->d_lock
);
1384 if (!d_unlinked(dentry
)) {
1385 dentry
->d_flags
|= DCACHE_MOUNTED
;
1388 spin_unlock(&dentry
->d_lock
);
1390 write_sequnlock(&rename_lock
);
1395 * Search the dentry child list of the specified parent,
1396 * and move any unused dentries to the end of the unused
1397 * list for prune_dcache(). We descend to the next level
1398 * whenever the d_subdirs list is non-empty and continue
1401 * It returns zero iff there are no unused children,
1402 * otherwise it returns the number of children moved to
1403 * the end of the unused list. This may not be the total
1404 * number of unused children, because select_parent can
1405 * drop the lock and return early due to latency
1409 struct select_data
{
1410 struct dentry
*start
;
1411 struct list_head dispose
;
1415 static enum d_walk_ret
select_collect(void *_data
, struct dentry
*dentry
)
1417 struct select_data
*data
= _data
;
1418 enum d_walk_ret ret
= D_WALK_CONTINUE
;
1420 if (data
->start
== dentry
)
1424 * move only zero ref count dentries to the dispose list.
1426 * Those which are presently on the shrink list, being processed
1427 * by shrink_dentry_list(), shouldn't be moved. Otherwise the
1428 * loop in shrink_dcache_parent() might not make any progress
1431 if (dentry
->d_lockref
.count
) {
1432 dentry_lru_del(dentry
);
1433 } else if (!(dentry
->d_flags
& DCACHE_SHRINK_LIST
)) {
1435 * We can't use d_lru_shrink_move() because we
1436 * need to get the global LRU lock and do the
1440 d_shrink_add(dentry
, &data
->dispose
);
1442 ret
= D_WALK_NORETRY
;
1445 * We can return to the caller if we have found some (this
1446 * ensures forward progress). We'll be coming back to find
1449 if (data
->found
&& need_resched())
1456 * shrink_dcache_parent - prune dcache
1457 * @parent: parent of entries to prune
1459 * Prune the dcache to remove unused children of the parent dentry.
1461 void shrink_dcache_parent(struct dentry
*parent
)
1464 struct select_data data
;
1466 INIT_LIST_HEAD(&data
.dispose
);
1467 data
.start
= parent
;
1470 d_walk(parent
, &data
, select_collect
, NULL
);
1474 shrink_dentry_list(&data
.dispose
);
1478 EXPORT_SYMBOL(shrink_dcache_parent
);
1480 static enum d_walk_ret
check_and_collect(void *_data
, struct dentry
*dentry
)
1482 struct select_data
*data
= _data
;
1484 if (d_mountpoint(dentry
)) {
1485 data
->found
= -EBUSY
;
1489 return select_collect(_data
, dentry
);
1492 static void check_and_drop(void *_data
)
1494 struct select_data
*data
= _data
;
1496 if (d_mountpoint(data
->start
))
1497 data
->found
= -EBUSY
;
1499 __d_drop(data
->start
);
1503 * check_submounts_and_drop - prune dcache, check for submounts and drop
1505 * All done as a single atomic operation relative to has_unlinked_ancestor().
1506 * Returns 0 if successfully unhashed @parent. If there were submounts then
1509 * @dentry: dentry to prune and drop
1511 int check_submounts_and_drop(struct dentry
*dentry
)
1515 /* Negative dentries can be dropped without further checks */
1516 if (!dentry
->d_inode
) {
1522 struct select_data data
;
1524 INIT_LIST_HEAD(&data
.dispose
);
1525 data
.start
= dentry
;
1528 d_walk(dentry
, &data
, check_and_collect
, check_and_drop
);
1531 if (!list_empty(&data
.dispose
))
1532 shrink_dentry_list(&data
.dispose
);
1543 EXPORT_SYMBOL(check_submounts_and_drop
);
1546 * __d_alloc - allocate a dcache entry
1547 * @sb: filesystem it will belong to
1548 * @name: qstr of the name
1550 * Allocates a dentry. It returns %NULL if there is insufficient memory
1551 * available. On a success the dentry is returned. The name passed in is
1552 * copied and the copy passed in may be reused after this call.
1555 struct dentry
*__d_alloc(struct super_block
*sb
, const struct qstr
*name
)
1557 struct dentry
*dentry
;
1560 dentry
= kmem_cache_alloc(dentry_cache
, GFP_KERNEL
);
1565 * We guarantee that the inline name is always NUL-terminated.
1566 * This way the memcpy() done by the name switching in rename
1567 * will still always have a NUL at the end, even if we might
1568 * be overwriting an internal NUL character
1570 dentry
->d_iname
[DNAME_INLINE_LEN
-1] = 0;
1571 if (name
->len
> DNAME_INLINE_LEN
-1) {
1572 dname
= kmalloc(name
->len
+ 1, GFP_KERNEL
);
1574 kmem_cache_free(dentry_cache
, dentry
);
1578 dname
= dentry
->d_iname
;
1581 dentry
->d_name
.len
= name
->len
;
1582 dentry
->d_name
.hash
= name
->hash
;
1583 memcpy(dname
, name
->name
, name
->len
);
1584 dname
[name
->len
] = 0;
1586 /* Make sure we always see the terminating NUL character */
1588 dentry
->d_name
.name
= dname
;
1590 dentry
->d_lockref
.count
= 1;
1591 dentry
->d_flags
= 0;
1592 spin_lock_init(&dentry
->d_lock
);
1593 seqcount_init(&dentry
->d_seq
);
1594 dentry
->d_inode
= NULL
;
1595 dentry
->d_parent
= dentry
;
1597 dentry
->d_op
= NULL
;
1598 dentry
->d_fsdata
= NULL
;
1599 INIT_HLIST_BL_NODE(&dentry
->d_hash
);
1600 INIT_LIST_HEAD(&dentry
->d_lru
);
1601 INIT_LIST_HEAD(&dentry
->d_subdirs
);
1602 INIT_HLIST_NODE(&dentry
->d_alias
);
1603 INIT_LIST_HEAD(&dentry
->d_u
.d_child
);
1604 d_set_d_op(dentry
, dentry
->d_sb
->s_d_op
);
1606 this_cpu_inc(nr_dentry
);
1612 * d_alloc - allocate a dcache entry
1613 * @parent: parent of entry to allocate
1614 * @name: qstr of the name
1616 * Allocates a dentry. It returns %NULL if there is insufficient memory
1617 * available. On a success the dentry is returned. The name passed in is
1618 * copied and the copy passed in may be reused after this call.
1620 struct dentry
*d_alloc(struct dentry
* parent
, const struct qstr
*name
)
1622 struct dentry
*dentry
= __d_alloc(parent
->d_sb
, name
);
1626 spin_lock(&parent
->d_lock
);
1628 * don't need child lock because it is not subject
1629 * to concurrency here
1631 __dget_dlock(parent
);
1632 dentry
->d_parent
= parent
;
1633 list_add(&dentry
->d_u
.d_child
, &parent
->d_subdirs
);
1634 spin_unlock(&parent
->d_lock
);
1638 EXPORT_SYMBOL(d_alloc
);
1640 struct dentry
*d_alloc_pseudo(struct super_block
*sb
, const struct qstr
*name
)
1642 struct dentry
*dentry
= __d_alloc(sb
, name
);
1644 dentry
->d_flags
|= DCACHE_DISCONNECTED
;
1647 EXPORT_SYMBOL(d_alloc_pseudo
);
1649 struct dentry
*d_alloc_name(struct dentry
*parent
, const char *name
)
1654 q
.len
= strlen(name
);
1655 q
.hash
= full_name_hash(q
.name
, q
.len
);
1656 return d_alloc(parent
, &q
);
1658 EXPORT_SYMBOL(d_alloc_name
);
1660 void d_set_d_op(struct dentry
*dentry
, const struct dentry_operations
*op
)
1662 WARN_ON_ONCE(dentry
->d_op
);
1663 WARN_ON_ONCE(dentry
->d_flags
& (DCACHE_OP_HASH
|
1665 DCACHE_OP_REVALIDATE
|
1666 DCACHE_OP_WEAK_REVALIDATE
|
1667 DCACHE_OP_DELETE
));
1672 dentry
->d_flags
|= DCACHE_OP_HASH
;
1674 dentry
->d_flags
|= DCACHE_OP_COMPARE
;
1675 if (op
->d_revalidate
)
1676 dentry
->d_flags
|= DCACHE_OP_REVALIDATE
;
1677 if (op
->d_weak_revalidate
)
1678 dentry
->d_flags
|= DCACHE_OP_WEAK_REVALIDATE
;
1680 dentry
->d_flags
|= DCACHE_OP_DELETE
;
1682 dentry
->d_flags
|= DCACHE_OP_PRUNE
;
1685 EXPORT_SYMBOL(d_set_d_op
);
1687 static void __d_instantiate(struct dentry
*dentry
, struct inode
*inode
)
1689 spin_lock(&dentry
->d_lock
);
1691 if (unlikely(IS_AUTOMOUNT(inode
)))
1692 dentry
->d_flags
|= DCACHE_NEED_AUTOMOUNT
;
1693 hlist_add_head(&dentry
->d_alias
, &inode
->i_dentry
);
1695 dentry
->d_inode
= inode
;
1696 dentry_rcuwalk_barrier(dentry
);
1697 spin_unlock(&dentry
->d_lock
);
1698 fsnotify_d_instantiate(dentry
, inode
);
1702 * d_instantiate - fill in inode information for a dentry
1703 * @entry: dentry to complete
1704 * @inode: inode to attach to this dentry
1706 * Fill in inode information in the entry.
1708 * This turns negative dentries into productive full members
1711 * NOTE! This assumes that the inode count has been incremented
1712 * (or otherwise set) by the caller to indicate that it is now
1713 * in use by the dcache.
1716 void d_instantiate(struct dentry
*entry
, struct inode
* inode
)
1718 BUG_ON(!hlist_unhashed(&entry
->d_alias
));
1720 spin_lock(&inode
->i_lock
);
1721 __d_instantiate(entry
, inode
);
1723 spin_unlock(&inode
->i_lock
);
1724 security_d_instantiate(entry
, inode
);
1726 EXPORT_SYMBOL(d_instantiate
);
1729 * d_instantiate_unique - instantiate a non-aliased dentry
1730 * @entry: dentry to instantiate
1731 * @inode: inode to attach to this dentry
1733 * Fill in inode information in the entry. On success, it returns NULL.
1734 * If an unhashed alias of "entry" already exists, then we return the
1735 * aliased dentry instead and drop one reference to inode.
1737 * Note that in order to avoid conflicts with rename() etc, the caller
1738 * had better be holding the parent directory semaphore.
1740 * This also assumes that the inode count has been incremented
1741 * (or otherwise set) by the caller to indicate that it is now
1742 * in use by the dcache.
1744 static struct dentry
*__d_instantiate_unique(struct dentry
*entry
,
1745 struct inode
*inode
)
1747 struct dentry
*alias
;
1748 int len
= entry
->d_name
.len
;
1749 const char *name
= entry
->d_name
.name
;
1750 unsigned int hash
= entry
->d_name
.hash
;
1753 __d_instantiate(entry
, NULL
);
1757 hlist_for_each_entry(alias
, &inode
->i_dentry
, d_alias
) {
1759 * Don't need alias->d_lock here, because aliases with
1760 * d_parent == entry->d_parent are not subject to name or
1761 * parent changes, because the parent inode i_mutex is held.
1763 if (alias
->d_name
.hash
!= hash
)
1765 if (alias
->d_parent
!= entry
->d_parent
)
1767 if (alias
->d_name
.len
!= len
)
1769 if (dentry_cmp(alias
, name
, len
))
1775 __d_instantiate(entry
, inode
);
1779 struct dentry
*d_instantiate_unique(struct dentry
*entry
, struct inode
*inode
)
1781 struct dentry
*result
;
1783 BUG_ON(!hlist_unhashed(&entry
->d_alias
));
1786 spin_lock(&inode
->i_lock
);
1787 result
= __d_instantiate_unique(entry
, inode
);
1789 spin_unlock(&inode
->i_lock
);
1792 security_d_instantiate(entry
, inode
);
1796 BUG_ON(!d_unhashed(result
));
1801 EXPORT_SYMBOL(d_instantiate_unique
);
1803 struct dentry
*d_make_root(struct inode
*root_inode
)
1805 struct dentry
*res
= NULL
;
1808 static const struct qstr name
= QSTR_INIT("/", 1);
1810 res
= __d_alloc(root_inode
->i_sb
, &name
);
1812 d_instantiate(res
, root_inode
);
1818 EXPORT_SYMBOL(d_make_root
);
1820 static struct dentry
* __d_find_any_alias(struct inode
*inode
)
1822 struct dentry
*alias
;
1824 if (hlist_empty(&inode
->i_dentry
))
1826 alias
= hlist_entry(inode
->i_dentry
.first
, struct dentry
, d_alias
);
1832 * d_find_any_alias - find any alias for a given inode
1833 * @inode: inode to find an alias for
1835 * If any aliases exist for the given inode, take and return a
1836 * reference for one of them. If no aliases exist, return %NULL.
1838 struct dentry
*d_find_any_alias(struct inode
*inode
)
1842 spin_lock(&inode
->i_lock
);
1843 de
= __d_find_any_alias(inode
);
1844 spin_unlock(&inode
->i_lock
);
1847 EXPORT_SYMBOL(d_find_any_alias
);
1850 * d_obtain_alias - find or allocate a dentry for a given inode
1851 * @inode: inode to allocate the dentry for
1853 * Obtain a dentry for an inode resulting from NFS filehandle conversion or
1854 * similar open by handle operations. The returned dentry may be anonymous,
1855 * or may have a full name (if the inode was already in the cache).
1857 * When called on a directory inode, we must ensure that the inode only ever
1858 * has one dentry. If a dentry is found, that is returned instead of
1859 * allocating a new one.
1861 * On successful return, the reference to the inode has been transferred
1862 * to the dentry. In case of an error the reference on the inode is released.
1863 * To make it easier to use in export operations a %NULL or IS_ERR inode may
1864 * be passed in and will be the error will be propagate to the return value,
1865 * with a %NULL @inode replaced by ERR_PTR(-ESTALE).
1867 struct dentry
*d_obtain_alias(struct inode
*inode
)
1869 static const struct qstr anonstring
= QSTR_INIT("/", 1);
1874 return ERR_PTR(-ESTALE
);
1876 return ERR_CAST(inode
);
1878 res
= d_find_any_alias(inode
);
1882 tmp
= __d_alloc(inode
->i_sb
, &anonstring
);
1884 res
= ERR_PTR(-ENOMEM
);
1888 spin_lock(&inode
->i_lock
);
1889 res
= __d_find_any_alias(inode
);
1891 spin_unlock(&inode
->i_lock
);
1896 /* attach a disconnected dentry */
1897 spin_lock(&tmp
->d_lock
);
1898 tmp
->d_inode
= inode
;
1899 tmp
->d_flags
|= DCACHE_DISCONNECTED
;
1900 hlist_add_head(&tmp
->d_alias
, &inode
->i_dentry
);
1901 hlist_bl_lock(&tmp
->d_sb
->s_anon
);
1902 hlist_bl_add_head(&tmp
->d_hash
, &tmp
->d_sb
->s_anon
);
1903 hlist_bl_unlock(&tmp
->d_sb
->s_anon
);
1904 spin_unlock(&tmp
->d_lock
);
1905 spin_unlock(&inode
->i_lock
);
1906 security_d_instantiate(tmp
, inode
);
1911 if (res
&& !IS_ERR(res
))
1912 security_d_instantiate(res
, inode
);
1916 EXPORT_SYMBOL(d_obtain_alias
);
1919 * d_splice_alias - splice a disconnected dentry into the tree if one exists
1920 * @inode: the inode which may have a disconnected dentry
1921 * @dentry: a negative dentry which we want to point to the inode.
1923 * If inode is a directory and has a 'disconnected' dentry (i.e. IS_ROOT and
1924 * DCACHE_DISCONNECTED), then d_move that in place of the given dentry
1925 * and return it, else simply d_add the inode to the dentry and return NULL.
1927 * This is needed in the lookup routine of any filesystem that is exportable
1928 * (via knfsd) so that we can build dcache paths to directories effectively.
1930 * If a dentry was found and moved, then it is returned. Otherwise NULL
1931 * is returned. This matches the expected return value of ->lookup.
1933 * Cluster filesystems may call this function with a negative, hashed dentry.
1934 * In that case, we know that the inode will be a regular file, and also this
1935 * will only occur during atomic_open. So we need to check for the dentry
1936 * being already hashed only in the final case.
1938 struct dentry
*d_splice_alias(struct inode
*inode
, struct dentry
*dentry
)
1940 struct dentry
*new = NULL
;
1943 return ERR_CAST(inode
);
1945 if (inode
&& S_ISDIR(inode
->i_mode
)) {
1946 spin_lock(&inode
->i_lock
);
1947 new = __d_find_alias(inode
, 1);
1949 BUG_ON(!(new->d_flags
& DCACHE_DISCONNECTED
));
1950 spin_unlock(&inode
->i_lock
);
1951 security_d_instantiate(new, inode
);
1952 d_move(new, dentry
);
1955 /* already taking inode->i_lock, so d_add() by hand */
1956 __d_instantiate(dentry
, inode
);
1957 spin_unlock(&inode
->i_lock
);
1958 security_d_instantiate(dentry
, inode
);
1962 d_instantiate(dentry
, inode
);
1963 if (d_unhashed(dentry
))
1968 EXPORT_SYMBOL(d_splice_alias
);
1971 * d_add_ci - lookup or allocate new dentry with case-exact name
1972 * @inode: the inode case-insensitive lookup has found
1973 * @dentry: the negative dentry that was passed to the parent's lookup func
1974 * @name: the case-exact name to be associated with the returned dentry
1976 * This is to avoid filling the dcache with case-insensitive names to the
1977 * same inode, only the actual correct case is stored in the dcache for
1978 * case-insensitive filesystems.
1980 * For a case-insensitive lookup match and if the the case-exact dentry
1981 * already exists in in the dcache, use it and return it.
1983 * If no entry exists with the exact case name, allocate new dentry with
1984 * the exact case, and return the spliced entry.
1986 struct dentry
*d_add_ci(struct dentry
*dentry
, struct inode
*inode
,
1989 struct dentry
*found
;
1993 * First check if a dentry matching the name already exists,
1994 * if not go ahead and create it now.
1996 found
= d_hash_and_lookup(dentry
->d_parent
, name
);
1997 if (unlikely(IS_ERR(found
)))
2000 new = d_alloc(dentry
->d_parent
, name
);
2002 found
= ERR_PTR(-ENOMEM
);
2006 found
= d_splice_alias(inode
, new);
2015 * If a matching dentry exists, and it's not negative use it.
2017 * Decrement the reference count to balance the iget() done
2020 if (found
->d_inode
) {
2021 if (unlikely(found
->d_inode
!= inode
)) {
2022 /* This can't happen because bad inodes are unhashed. */
2023 BUG_ON(!is_bad_inode(inode
));
2024 BUG_ON(!is_bad_inode(found
->d_inode
));
2031 * Negative dentry: instantiate it unless the inode is a directory and
2032 * already has a dentry.
2034 new = d_splice_alias(inode
, found
);
2045 EXPORT_SYMBOL(d_add_ci
);
2048 * Do the slow-case of the dentry name compare.
2050 * Unlike the dentry_cmp() function, we need to atomically
2051 * load the name and length information, so that the
2052 * filesystem can rely on them, and can use the 'name' and
2053 * 'len' information without worrying about walking off the
2054 * end of memory etc.
2056 * Thus the read_seqcount_retry() and the "duplicate" info
2057 * in arguments (the low-level filesystem should not look
2058 * at the dentry inode or name contents directly, since
2059 * rename can change them while we're in RCU mode).
2061 enum slow_d_compare
{
2067 static noinline
enum slow_d_compare
slow_dentry_cmp(
2068 const struct dentry
*parent
,
2069 struct dentry
*dentry
,
2071 const struct qstr
*name
)
2073 int tlen
= dentry
->d_name
.len
;
2074 const char *tname
= dentry
->d_name
.name
;
2076 if (read_seqcount_retry(&dentry
->d_seq
, seq
)) {
2078 return D_COMP_SEQRETRY
;
2080 if (parent
->d_op
->d_compare(parent
, dentry
, tlen
, tname
, name
))
2081 return D_COMP_NOMATCH
;
2086 * __d_lookup_rcu - search for a dentry (racy, store-free)
2087 * @parent: parent dentry
2088 * @name: qstr of name we wish to find
2089 * @seqp: returns d_seq value at the point where the dentry was found
2090 * Returns: dentry, or NULL
2092 * __d_lookup_rcu is the dcache lookup function for rcu-walk name
2093 * resolution (store-free path walking) design described in
2094 * Documentation/filesystems/path-lookup.txt.
2096 * This is not to be used outside core vfs.
2098 * __d_lookup_rcu must only be used in rcu-walk mode, ie. with vfsmount lock
2099 * held, and rcu_read_lock held. The returned dentry must not be stored into
2100 * without taking d_lock and checking d_seq sequence count against @seq
2103 * A refcount may be taken on the found dentry with the d_rcu_to_refcount
2106 * Alternatively, __d_lookup_rcu may be called again to look up the child of
2107 * the returned dentry, so long as its parent's seqlock is checked after the
2108 * child is looked up. Thus, an interlocking stepping of sequence lock checks
2109 * is formed, giving integrity down the path walk.
2111 * NOTE! The caller *has* to check the resulting dentry against the sequence
2112 * number we've returned before using any of the resulting dentry state!
2114 struct dentry
*__d_lookup_rcu(const struct dentry
*parent
,
2115 const struct qstr
*name
,
2118 u64 hashlen
= name
->hash_len
;
2119 const unsigned char *str
= name
->name
;
2120 struct hlist_bl_head
*b
= d_hash(parent
, hashlen_hash(hashlen
));
2121 struct hlist_bl_node
*node
;
2122 struct dentry
*dentry
;
2125 * Note: There is significant duplication with __d_lookup_rcu which is
2126 * required to prevent single threaded performance regressions
2127 * especially on architectures where smp_rmb (in seqcounts) are costly.
2128 * Keep the two functions in sync.
2132 * The hash list is protected using RCU.
2134 * Carefully use d_seq when comparing a candidate dentry, to avoid
2135 * races with d_move().
2137 * It is possible that concurrent renames can mess up our list
2138 * walk here and result in missing our dentry, resulting in the
2139 * false-negative result. d_lookup() protects against concurrent
2140 * renames using rename_lock seqlock.
2142 * See Documentation/filesystems/path-lookup.txt for more details.
2144 hlist_bl_for_each_entry_rcu(dentry
, node
, b
, d_hash
) {
2149 * The dentry sequence count protects us from concurrent
2150 * renames, and thus protects parent and name fields.
2152 * The caller must perform a seqcount check in order
2153 * to do anything useful with the returned dentry.
2155 * NOTE! We do a "raw" seqcount_begin here. That means that
2156 * we don't wait for the sequence count to stabilize if it
2157 * is in the middle of a sequence change. If we do the slow
2158 * dentry compare, we will do seqretries until it is stable,
2159 * and if we end up with a successful lookup, we actually
2160 * want to exit RCU lookup anyway.
2162 seq
= raw_seqcount_begin(&dentry
->d_seq
);
2163 if (dentry
->d_parent
!= parent
)
2165 if (d_unhashed(dentry
))
2168 if (unlikely(parent
->d_flags
& DCACHE_OP_COMPARE
)) {
2169 if (dentry
->d_name
.hash
!= hashlen_hash(hashlen
))
2172 switch (slow_dentry_cmp(parent
, dentry
, seq
, name
)) {
2175 case D_COMP_NOMATCH
:
2182 if (dentry
->d_name
.hash_len
!= hashlen
)
2185 if (!dentry_cmp(dentry
, str
, hashlen_len(hashlen
)))
2192 * d_lookup - search for a dentry
2193 * @parent: parent dentry
2194 * @name: qstr of name we wish to find
2195 * Returns: dentry, or NULL
2197 * d_lookup searches the children of the parent dentry for the name in
2198 * question. If the dentry is found its reference count is incremented and the
2199 * dentry is returned. The caller must use dput to free the entry when it has
2200 * finished using it. %NULL is returned if the dentry does not exist.
2202 struct dentry
*d_lookup(const struct dentry
*parent
, const struct qstr
*name
)
2204 struct dentry
*dentry
;
2208 seq
= read_seqbegin(&rename_lock
);
2209 dentry
= __d_lookup(parent
, name
);
2212 } while (read_seqretry(&rename_lock
, seq
));
2215 EXPORT_SYMBOL(d_lookup
);
2218 * __d_lookup - search for a dentry (racy)
2219 * @parent: parent dentry
2220 * @name: qstr of name we wish to find
2221 * Returns: dentry, or NULL
2223 * __d_lookup is like d_lookup, however it may (rarely) return a
2224 * false-negative result due to unrelated rename activity.
2226 * __d_lookup is slightly faster by avoiding rename_lock read seqlock,
2227 * however it must be used carefully, eg. with a following d_lookup in
2228 * the case of failure.
2230 * __d_lookup callers must be commented.
2232 struct dentry
*__d_lookup(const struct dentry
*parent
, const struct qstr
*name
)
2234 unsigned int len
= name
->len
;
2235 unsigned int hash
= name
->hash
;
2236 const unsigned char *str
= name
->name
;
2237 struct hlist_bl_head
*b
= d_hash(parent
, hash
);
2238 struct hlist_bl_node
*node
;
2239 struct dentry
*found
= NULL
;
2240 struct dentry
*dentry
;
2243 * Note: There is significant duplication with __d_lookup_rcu which is
2244 * required to prevent single threaded performance regressions
2245 * especially on architectures where smp_rmb (in seqcounts) are costly.
2246 * Keep the two functions in sync.
2250 * The hash list is protected using RCU.
2252 * Take d_lock when comparing a candidate dentry, to avoid races
2255 * It is possible that concurrent renames can mess up our list
2256 * walk here and result in missing our dentry, resulting in the
2257 * false-negative result. d_lookup() protects against concurrent
2258 * renames using rename_lock seqlock.
2260 * See Documentation/filesystems/path-lookup.txt for more details.
2264 hlist_bl_for_each_entry_rcu(dentry
, node
, b
, d_hash
) {
2266 if (dentry
->d_name
.hash
!= hash
)
2269 spin_lock(&dentry
->d_lock
);
2270 if (dentry
->d_parent
!= parent
)
2272 if (d_unhashed(dentry
))
2276 * It is safe to compare names since d_move() cannot
2277 * change the qstr (protected by d_lock).
2279 if (parent
->d_flags
& DCACHE_OP_COMPARE
) {
2280 int tlen
= dentry
->d_name
.len
;
2281 const char *tname
= dentry
->d_name
.name
;
2282 if (parent
->d_op
->d_compare(parent
, dentry
, tlen
, tname
, name
))
2285 if (dentry
->d_name
.len
!= len
)
2287 if (dentry_cmp(dentry
, str
, len
))
2291 dentry
->d_lockref
.count
++;
2293 spin_unlock(&dentry
->d_lock
);
2296 spin_unlock(&dentry
->d_lock
);
2304 * d_hash_and_lookup - hash the qstr then search for a dentry
2305 * @dir: Directory to search in
2306 * @name: qstr of name we wish to find
2308 * On lookup failure NULL is returned; on bad name - ERR_PTR(-error)
2310 struct dentry
*d_hash_and_lookup(struct dentry
*dir
, struct qstr
*name
)
2313 * Check for a fs-specific hash function. Note that we must
2314 * calculate the standard hash first, as the d_op->d_hash()
2315 * routine may choose to leave the hash value unchanged.
2317 name
->hash
= full_name_hash(name
->name
, name
->len
);
2318 if (dir
->d_flags
& DCACHE_OP_HASH
) {
2319 int err
= dir
->d_op
->d_hash(dir
, name
);
2320 if (unlikely(err
< 0))
2321 return ERR_PTR(err
);
2323 return d_lookup(dir
, name
);
2325 EXPORT_SYMBOL(d_hash_and_lookup
);
2328 * d_validate - verify dentry provided from insecure source (deprecated)
2329 * @dentry: The dentry alleged to be valid child of @dparent
2330 * @dparent: The parent dentry (known to be valid)
2332 * An insecure source has sent us a dentry, here we verify it and dget() it.
2333 * This is used by ncpfs in its readdir implementation.
2334 * Zero is returned in the dentry is invalid.
2336 * This function is slow for big directories, and deprecated, do not use it.
2338 int d_validate(struct dentry
*dentry
, struct dentry
*dparent
)
2340 struct dentry
*child
;
2342 spin_lock(&dparent
->d_lock
);
2343 list_for_each_entry(child
, &dparent
->d_subdirs
, d_u
.d_child
) {
2344 if (dentry
== child
) {
2345 spin_lock_nested(&dentry
->d_lock
, DENTRY_D_LOCK_NESTED
);
2346 __dget_dlock(dentry
);
2347 spin_unlock(&dentry
->d_lock
);
2348 spin_unlock(&dparent
->d_lock
);
2352 spin_unlock(&dparent
->d_lock
);
2356 EXPORT_SYMBOL(d_validate
);
2359 * When a file is deleted, we have two options:
2360 * - turn this dentry into a negative dentry
2361 * - unhash this dentry and free it.
2363 * Usually, we want to just turn this into
2364 * a negative dentry, but if anybody else is
2365 * currently using the dentry or the inode
2366 * we can't do that and we fall back on removing
2367 * it from the hash queues and waiting for
2368 * it to be deleted later when it has no users
2372 * d_delete - delete a dentry
2373 * @dentry: The dentry to delete
2375 * Turn the dentry into a negative dentry if possible, otherwise
2376 * remove it from the hash queues so it can be deleted later
2379 void d_delete(struct dentry
* dentry
)
2381 struct inode
*inode
;
2384 * Are we the only user?
2387 spin_lock(&dentry
->d_lock
);
2388 inode
= dentry
->d_inode
;
2389 isdir
= S_ISDIR(inode
->i_mode
);
2390 if (dentry
->d_lockref
.count
== 1) {
2391 if (!spin_trylock(&inode
->i_lock
)) {
2392 spin_unlock(&dentry
->d_lock
);
2396 dentry
->d_flags
&= ~DCACHE_CANT_MOUNT
;
2397 dentry_unlink_inode(dentry
);
2398 fsnotify_nameremove(dentry
, isdir
);
2402 if (!d_unhashed(dentry
))
2405 spin_unlock(&dentry
->d_lock
);
2407 fsnotify_nameremove(dentry
, isdir
);
2409 EXPORT_SYMBOL(d_delete
);
2411 static void __d_rehash(struct dentry
* entry
, struct hlist_bl_head
*b
)
2413 BUG_ON(!d_unhashed(entry
));
2415 entry
->d_flags
|= DCACHE_RCUACCESS
;
2416 hlist_bl_add_head_rcu(&entry
->d_hash
, b
);
2420 static void _d_rehash(struct dentry
* entry
)
2422 __d_rehash(entry
, d_hash(entry
->d_parent
, entry
->d_name
.hash
));
2426 * d_rehash - add an entry back to the hash
2427 * @entry: dentry to add to the hash
2429 * Adds a dentry to the hash according to its name.
2432 void d_rehash(struct dentry
* entry
)
2434 spin_lock(&entry
->d_lock
);
2436 spin_unlock(&entry
->d_lock
);
2438 EXPORT_SYMBOL(d_rehash
);
2441 * dentry_update_name_case - update case insensitive dentry with a new name
2442 * @dentry: dentry to be updated
2445 * Update a case insensitive dentry with new case of name.
2447 * dentry must have been returned by d_lookup with name @name. Old and new
2448 * name lengths must match (ie. no d_compare which allows mismatched name
2451 * Parent inode i_mutex must be held over d_lookup and into this call (to
2452 * keep renames and concurrent inserts, and readdir(2) away).
2454 void dentry_update_name_case(struct dentry
*dentry
, struct qstr
*name
)
2456 BUG_ON(!mutex_is_locked(&dentry
->d_parent
->d_inode
->i_mutex
));
2457 BUG_ON(dentry
->d_name
.len
!= name
->len
); /* d_lookup gives this */
2459 spin_lock(&dentry
->d_lock
);
2460 write_seqcount_begin(&dentry
->d_seq
);
2461 memcpy((unsigned char *)dentry
->d_name
.name
, name
->name
, name
->len
);
2462 write_seqcount_end(&dentry
->d_seq
);
2463 spin_unlock(&dentry
->d_lock
);
2465 EXPORT_SYMBOL(dentry_update_name_case
);
2467 static void switch_names(struct dentry
*dentry
, struct dentry
*target
)
2469 if (dname_external(target
)) {
2470 if (dname_external(dentry
)) {
2472 * Both external: swap the pointers
2474 swap(target
->d_name
.name
, dentry
->d_name
.name
);
2477 * dentry:internal, target:external. Steal target's
2478 * storage and make target internal.
2480 memcpy(target
->d_iname
, dentry
->d_name
.name
,
2481 dentry
->d_name
.len
+ 1);
2482 dentry
->d_name
.name
= target
->d_name
.name
;
2483 target
->d_name
.name
= target
->d_iname
;
2486 if (dname_external(dentry
)) {
2488 * dentry:external, target:internal. Give dentry's
2489 * storage to target and make dentry internal
2491 memcpy(dentry
->d_iname
, target
->d_name
.name
,
2492 target
->d_name
.len
+ 1);
2493 target
->d_name
.name
= dentry
->d_name
.name
;
2494 dentry
->d_name
.name
= dentry
->d_iname
;
2497 * Both are internal. Just copy target to dentry
2499 memcpy(dentry
->d_iname
, target
->d_name
.name
,
2500 target
->d_name
.len
+ 1);
2501 dentry
->d_name
.len
= target
->d_name
.len
;
2505 swap(dentry
->d_name
.len
, target
->d_name
.len
);
2508 static void dentry_lock_for_move(struct dentry
*dentry
, struct dentry
*target
)
2511 * XXXX: do we really need to take target->d_lock?
2513 if (IS_ROOT(dentry
) || dentry
->d_parent
== target
->d_parent
)
2514 spin_lock(&target
->d_parent
->d_lock
);
2516 if (d_ancestor(dentry
->d_parent
, target
->d_parent
)) {
2517 spin_lock(&dentry
->d_parent
->d_lock
);
2518 spin_lock_nested(&target
->d_parent
->d_lock
,
2519 DENTRY_D_LOCK_NESTED
);
2521 spin_lock(&target
->d_parent
->d_lock
);
2522 spin_lock_nested(&dentry
->d_parent
->d_lock
,
2523 DENTRY_D_LOCK_NESTED
);
2526 if (target
< dentry
) {
2527 spin_lock_nested(&target
->d_lock
, 2);
2528 spin_lock_nested(&dentry
->d_lock
, 3);
2530 spin_lock_nested(&dentry
->d_lock
, 2);
2531 spin_lock_nested(&target
->d_lock
, 3);
2535 static void dentry_unlock_parents_for_move(struct dentry
*dentry
,
2536 struct dentry
*target
)
2538 if (target
->d_parent
!= dentry
->d_parent
)
2539 spin_unlock(&dentry
->d_parent
->d_lock
);
2540 if (target
->d_parent
!= target
)
2541 spin_unlock(&target
->d_parent
->d_lock
);
2545 * When switching names, the actual string doesn't strictly have to
2546 * be preserved in the target - because we're dropping the target
2547 * anyway. As such, we can just do a simple memcpy() to copy over
2548 * the new name before we switch.
2550 * Note that we have to be a lot more careful about getting the hash
2551 * switched - we have to switch the hash value properly even if it
2552 * then no longer matches the actual (corrupted) string of the target.
2553 * The hash value has to match the hash queue that the dentry is on..
2556 * __d_move - move a dentry
2557 * @dentry: entry to move
2558 * @target: new dentry
2560 * Update the dcache to reflect the move of a file name. Negative
2561 * dcache entries should not be moved in this way. Caller must hold
2562 * rename_lock, the i_mutex of the source and target directories,
2563 * and the sb->s_vfs_rename_mutex if they differ. See lock_rename().
2565 static void __d_move(struct dentry
* dentry
, struct dentry
* target
)
2567 if (!dentry
->d_inode
)
2568 printk(KERN_WARNING
"VFS: moving negative dcache entry\n");
2570 BUG_ON(d_ancestor(dentry
, target
));
2571 BUG_ON(d_ancestor(target
, dentry
));
2573 dentry_lock_for_move(dentry
, target
);
2575 write_seqcount_begin(&dentry
->d_seq
);
2576 write_seqcount_begin(&target
->d_seq
);
2578 /* __d_drop does write_seqcount_barrier, but they're OK to nest. */
2581 * Move the dentry to the target hash queue. Don't bother checking
2582 * for the same hash queue because of how unlikely it is.
2585 __d_rehash(dentry
, d_hash(target
->d_parent
, target
->d_name
.hash
));
2587 /* Unhash the target: dput() will then get rid of it */
2590 list_del(&dentry
->d_u
.d_child
);
2591 list_del(&target
->d_u
.d_child
);
2593 /* Switch the names.. */
2594 switch_names(dentry
, target
);
2595 swap(dentry
->d_name
.hash
, target
->d_name
.hash
);
2597 /* ... and switch the parents */
2598 if (IS_ROOT(dentry
)) {
2599 dentry
->d_parent
= target
->d_parent
;
2600 target
->d_parent
= target
;
2601 INIT_LIST_HEAD(&target
->d_u
.d_child
);
2603 swap(dentry
->d_parent
, target
->d_parent
);
2605 /* And add them back to the (new) parent lists */
2606 list_add(&target
->d_u
.d_child
, &target
->d_parent
->d_subdirs
);
2609 list_add(&dentry
->d_u
.d_child
, &dentry
->d_parent
->d_subdirs
);
2611 write_seqcount_end(&target
->d_seq
);
2612 write_seqcount_end(&dentry
->d_seq
);
2614 dentry_unlock_parents_for_move(dentry
, target
);
2615 spin_unlock(&target
->d_lock
);
2616 fsnotify_d_move(dentry
);
2617 spin_unlock(&dentry
->d_lock
);
2621 * d_move - move a dentry
2622 * @dentry: entry to move
2623 * @target: new dentry
2625 * Update the dcache to reflect the move of a file name. Negative
2626 * dcache entries should not be moved in this way. See the locking
2627 * requirements for __d_move.
2629 void d_move(struct dentry
*dentry
, struct dentry
*target
)
2631 write_seqlock(&rename_lock
);
2632 __d_move(dentry
, target
);
2633 write_sequnlock(&rename_lock
);
2635 EXPORT_SYMBOL(d_move
);
2638 * d_ancestor - search for an ancestor
2639 * @p1: ancestor dentry
2642 * Returns the ancestor dentry of p2 which is a child of p1, if p1 is
2643 * an ancestor of p2, else NULL.
2645 struct dentry
*d_ancestor(struct dentry
*p1
, struct dentry
*p2
)
2649 for (p
= p2
; !IS_ROOT(p
); p
= p
->d_parent
) {
2650 if (p
->d_parent
== p1
)
2657 * This helper attempts to cope with remotely renamed directories
2659 * It assumes that the caller is already holding
2660 * dentry->d_parent->d_inode->i_mutex, inode->i_lock and rename_lock
2662 * Note: If ever the locking in lock_rename() changes, then please
2663 * remember to update this too...
2665 static struct dentry
*__d_unalias(struct inode
*inode
,
2666 struct dentry
*dentry
, struct dentry
*alias
)
2668 struct mutex
*m1
= NULL
, *m2
= NULL
;
2669 struct dentry
*ret
= ERR_PTR(-EBUSY
);
2671 /* If alias and dentry share a parent, then no extra locks required */
2672 if (alias
->d_parent
== dentry
->d_parent
)
2675 /* See lock_rename() */
2676 if (!mutex_trylock(&dentry
->d_sb
->s_vfs_rename_mutex
))
2678 m1
= &dentry
->d_sb
->s_vfs_rename_mutex
;
2679 if (!mutex_trylock(&alias
->d_parent
->d_inode
->i_mutex
))
2681 m2
= &alias
->d_parent
->d_inode
->i_mutex
;
2683 if (likely(!d_mountpoint(alias
))) {
2684 __d_move(alias
, dentry
);
2688 spin_unlock(&inode
->i_lock
);
2697 * Prepare an anonymous dentry for life in the superblock's dentry tree as a
2698 * named dentry in place of the dentry to be replaced.
2699 * returns with anon->d_lock held!
2701 static void __d_materialise_dentry(struct dentry
*dentry
, struct dentry
*anon
)
2703 struct dentry
*dparent
;
2705 dentry_lock_for_move(anon
, dentry
);
2707 write_seqcount_begin(&dentry
->d_seq
);
2708 write_seqcount_begin(&anon
->d_seq
);
2710 dparent
= dentry
->d_parent
;
2712 switch_names(dentry
, anon
);
2713 swap(dentry
->d_name
.hash
, anon
->d_name
.hash
);
2715 dentry
->d_parent
= dentry
;
2716 list_del_init(&dentry
->d_u
.d_child
);
2717 anon
->d_parent
= dparent
;
2718 list_move(&anon
->d_u
.d_child
, &dparent
->d_subdirs
);
2720 write_seqcount_end(&dentry
->d_seq
);
2721 write_seqcount_end(&anon
->d_seq
);
2723 dentry_unlock_parents_for_move(anon
, dentry
);
2724 spin_unlock(&dentry
->d_lock
);
2726 /* anon->d_lock still locked, returns locked */
2727 anon
->d_flags
&= ~DCACHE_DISCONNECTED
;
2731 * d_materialise_unique - introduce an inode into the tree
2732 * @dentry: candidate dentry
2733 * @inode: inode to bind to the dentry, to which aliases may be attached
2735 * Introduces an dentry into the tree, substituting an extant disconnected
2736 * root directory alias in its place if there is one. Caller must hold the
2737 * i_mutex of the parent directory.
2739 struct dentry
*d_materialise_unique(struct dentry
*dentry
, struct inode
*inode
)
2741 struct dentry
*actual
;
2743 BUG_ON(!d_unhashed(dentry
));
2747 __d_instantiate(dentry
, NULL
);
2752 spin_lock(&inode
->i_lock
);
2754 if (S_ISDIR(inode
->i_mode
)) {
2755 struct dentry
*alias
;
2757 /* Does an aliased dentry already exist? */
2758 alias
= __d_find_alias(inode
, 0);
2761 write_seqlock(&rename_lock
);
2763 if (d_ancestor(alias
, dentry
)) {
2764 /* Check for loops */
2765 actual
= ERR_PTR(-ELOOP
);
2766 spin_unlock(&inode
->i_lock
);
2767 } else if (IS_ROOT(alias
)) {
2768 /* Is this an anonymous mountpoint that we
2769 * could splice into our tree? */
2770 __d_materialise_dentry(dentry
, alias
);
2771 write_sequnlock(&rename_lock
);
2775 /* Nope, but we must(!) avoid directory
2776 * aliasing. This drops inode->i_lock */
2777 actual
= __d_unalias(inode
, dentry
, alias
);
2779 write_sequnlock(&rename_lock
);
2780 if (IS_ERR(actual
)) {
2781 if (PTR_ERR(actual
) == -ELOOP
)
2782 pr_warn_ratelimited(
2783 "VFS: Lookup of '%s' in %s %s"
2784 " would have caused loop\n",
2785 dentry
->d_name
.name
,
2786 inode
->i_sb
->s_type
->name
,
2794 /* Add a unique reference */
2795 actual
= __d_instantiate_unique(dentry
, inode
);
2799 BUG_ON(!d_unhashed(actual
));
2801 spin_lock(&actual
->d_lock
);
2804 spin_unlock(&actual
->d_lock
);
2805 spin_unlock(&inode
->i_lock
);
2807 if (actual
== dentry
) {
2808 security_d_instantiate(dentry
, inode
);
2815 EXPORT_SYMBOL_GPL(d_materialise_unique
);
2817 static int prepend(char **buffer
, int *buflen
, const char *str
, int namelen
)
2821 return -ENAMETOOLONG
;
2823 memcpy(*buffer
, str
, namelen
);
2828 * prepend_name - prepend a pathname in front of current buffer pointer
2829 * @buffer: buffer pointer
2830 * @buflen: allocated length of the buffer
2831 * @name: name string and length qstr structure
2833 * With RCU path tracing, it may race with d_move(). Use ACCESS_ONCE() to
2834 * make sure that either the old or the new name pointer and length are
2835 * fetched. However, there may be mismatch between length and pointer.
2836 * The length cannot be trusted, we need to copy it byte-by-byte until
2837 * the length is reached or a null byte is found. It also prepends "/" at
2838 * the beginning of the name. The sequence number check at the caller will
2839 * retry it again when a d_move() does happen. So any garbage in the buffer
2840 * due to mismatched pointer and length will be discarded.
2842 static int prepend_name(char **buffer
, int *buflen
, struct qstr
*name
)
2844 const char *dname
= ACCESS_ONCE(name
->name
);
2845 u32 dlen
= ACCESS_ONCE(name
->len
);
2848 if (*buflen
< dlen
+ 1)
2849 return -ENAMETOOLONG
;
2850 *buflen
-= dlen
+ 1;
2851 p
= *buffer
-= dlen
+ 1;
2863 * prepend_path - Prepend path string to a buffer
2864 * @path: the dentry/vfsmount to report
2865 * @root: root vfsmnt/dentry
2866 * @buffer: pointer to the end of the buffer
2867 * @buflen: pointer to buffer length
2869 * The function will first try to write out the pathname without taking any
2870 * lock other than the RCU read lock to make sure that dentries won't go away.
2871 * It only checks the sequence number of the global rename_lock as any change
2872 * in the dentry's d_seq will be preceded by changes in the rename_lock
2873 * sequence number. If the sequence number had been changed, it will restart
2874 * the whole pathname back-tracing sequence again by taking the rename_lock.
2875 * In this case, there is no need to take the RCU read lock as the recursive
2876 * parent pointer references will keep the dentry chain alive as long as no
2877 * rename operation is performed.
2879 static int prepend_path(const struct path
*path
,
2880 const struct path
*root
,
2881 char **buffer
, int *buflen
)
2883 struct dentry
*dentry
= path
->dentry
;
2884 struct vfsmount
*vfsmnt
= path
->mnt
;
2885 struct mount
*mnt
= real_mount(vfsmnt
);
2895 read_seqbegin_or_lock(&rename_lock
, &seq
);
2896 while (dentry
!= root
->dentry
|| vfsmnt
!= root
->mnt
) {
2897 struct dentry
* parent
;
2899 if (dentry
== vfsmnt
->mnt_root
|| IS_ROOT(dentry
)) {
2901 if (mnt_has_parent(mnt
)) {
2902 dentry
= mnt
->mnt_mountpoint
;
2903 mnt
= mnt
->mnt_parent
;
2908 * Filesystems needing to implement special "root names"
2909 * should do so with ->d_dname()
2911 if (IS_ROOT(dentry
) &&
2912 (dentry
->d_name
.len
!= 1 ||
2913 dentry
->d_name
.name
[0] != '/')) {
2914 WARN(1, "Root dentry has weird name <%.*s>\n",
2915 (int) dentry
->d_name
.len
,
2916 dentry
->d_name
.name
);
2919 error
= is_mounted(vfsmnt
) ? 1 : 2;
2922 parent
= dentry
->d_parent
;
2924 error
= prepend_name(&bptr
, &blen
, &dentry
->d_name
);
2932 if (need_seqretry(&rename_lock
, seq
)) {
2936 done_seqretry(&rename_lock
, seq
);
2938 if (error
>= 0 && bptr
== *buffer
) {
2940 error
= -ENAMETOOLONG
;
2950 * __d_path - return the path of a dentry
2951 * @path: the dentry/vfsmount to report
2952 * @root: root vfsmnt/dentry
2953 * @buf: buffer to return value in
2954 * @buflen: buffer length
2956 * Convert a dentry into an ASCII path name.
2958 * Returns a pointer into the buffer or an error code if the
2959 * path was too long.
2961 * "buflen" should be positive.
2963 * If the path is not reachable from the supplied root, return %NULL.
2965 char *__d_path(const struct path
*path
,
2966 const struct path
*root
,
2967 char *buf
, int buflen
)
2969 char *res
= buf
+ buflen
;
2972 prepend(&res
, &buflen
, "\0", 1);
2973 br_read_lock(&vfsmount_lock
);
2974 error
= prepend_path(path
, root
, &res
, &buflen
);
2975 br_read_unlock(&vfsmount_lock
);
2978 return ERR_PTR(error
);
2984 char *d_absolute_path(const struct path
*path
,
2985 char *buf
, int buflen
)
2987 struct path root
= {};
2988 char *res
= buf
+ buflen
;
2991 prepend(&res
, &buflen
, "\0", 1);
2992 br_read_lock(&vfsmount_lock
);
2993 error
= prepend_path(path
, &root
, &res
, &buflen
);
2994 br_read_unlock(&vfsmount_lock
);
2999 return ERR_PTR(error
);
3004 * same as __d_path but appends "(deleted)" for unlinked files.
3006 static int path_with_deleted(const struct path
*path
,
3007 const struct path
*root
,
3008 char **buf
, int *buflen
)
3010 prepend(buf
, buflen
, "\0", 1);
3011 if (d_unlinked(path
->dentry
)) {
3012 int error
= prepend(buf
, buflen
, " (deleted)", 10);
3017 return prepend_path(path
, root
, buf
, buflen
);
3020 static int prepend_unreachable(char **buffer
, int *buflen
)
3022 return prepend(buffer
, buflen
, "(unreachable)", 13);
3025 static void get_fs_root_rcu(struct fs_struct
*fs
, struct path
*root
)
3030 seq
= read_seqcount_begin(&fs
->seq
);
3032 } while (read_seqcount_retry(&fs
->seq
, seq
));
3036 * d_path - return the path of a dentry
3037 * @path: path to report
3038 * @buf: buffer to return value in
3039 * @buflen: buffer length
3041 * Convert a dentry into an ASCII path name. If the entry has been deleted
3042 * the string " (deleted)" is appended. Note that this is ambiguous.
3044 * Returns a pointer into the buffer or an error code if the path was
3045 * too long. Note: Callers should use the returned pointer, not the passed
3046 * in buffer, to use the name! The implementation often starts at an offset
3047 * into the buffer, and may leave 0 bytes at the start.
3049 * "buflen" should be positive.
3051 char *d_path(const struct path
*path
, char *buf
, int buflen
)
3053 char *res
= buf
+ buflen
;
3058 * We have various synthetic filesystems that never get mounted. On
3059 * these filesystems dentries are never used for lookup purposes, and
3060 * thus don't need to be hashed. They also don't need a name until a
3061 * user wants to identify the object in /proc/pid/fd/. The little hack
3062 * below allows us to generate a name for these objects on demand:
3064 if (path
->dentry
->d_op
&& path
->dentry
->d_op
->d_dname
)
3065 return path
->dentry
->d_op
->d_dname(path
->dentry
, buf
, buflen
);
3068 get_fs_root_rcu(current
->fs
, &root
);
3069 br_read_lock(&vfsmount_lock
);
3070 error
= path_with_deleted(path
, &root
, &res
, &buflen
);
3071 br_read_unlock(&vfsmount_lock
);
3075 res
= ERR_PTR(error
);
3078 EXPORT_SYMBOL(d_path
);
3081 * Helper function for dentry_operations.d_dname() members
3083 char *dynamic_dname(struct dentry
*dentry
, char *buffer
, int buflen
,
3084 const char *fmt
, ...)
3090 va_start(args
, fmt
);
3091 sz
= vsnprintf(temp
, sizeof(temp
), fmt
, args
) + 1;
3094 if (sz
> sizeof(temp
) || sz
> buflen
)
3095 return ERR_PTR(-ENAMETOOLONG
);
3097 buffer
+= buflen
- sz
;
3098 return memcpy(buffer
, temp
, sz
);
3101 char *simple_dname(struct dentry
*dentry
, char *buffer
, int buflen
)
3103 char *end
= buffer
+ buflen
;
3104 /* these dentries are never renamed, so d_lock is not needed */
3105 if (prepend(&end
, &buflen
, " (deleted)", 11) ||
3106 prepend(&end
, &buflen
, dentry
->d_name
.name
, dentry
->d_name
.len
) ||
3107 prepend(&end
, &buflen
, "/", 1))
3108 end
= ERR_PTR(-ENAMETOOLONG
);
3113 * Write full pathname from the root of the filesystem into the buffer.
3115 static char *__dentry_path(struct dentry
*dentry
, char *buf
, int buflen
)
3125 prepend(&end
, &len
, "\0", 1);
3131 read_seqbegin_or_lock(&rename_lock
, &seq
);
3132 while (!IS_ROOT(dentry
)) {
3133 struct dentry
*parent
= dentry
->d_parent
;
3137 error
= prepend_name(&end
, &len
, &dentry
->d_name
);
3146 if (need_seqretry(&rename_lock
, seq
)) {
3150 done_seqretry(&rename_lock
, seq
);
3155 return ERR_PTR(-ENAMETOOLONG
);
3158 char *dentry_path_raw(struct dentry
*dentry
, char *buf
, int buflen
)
3160 return __dentry_path(dentry
, buf
, buflen
);
3162 EXPORT_SYMBOL(dentry_path_raw
);
3164 char *dentry_path(struct dentry
*dentry
, char *buf
, int buflen
)
3169 if (d_unlinked(dentry
)) {
3171 if (prepend(&p
, &buflen
, "//deleted", 10) != 0)
3175 retval
= __dentry_path(dentry
, buf
, buflen
);
3176 if (!IS_ERR(retval
) && p
)
3177 *p
= '/'; /* restore '/' overriden with '\0' */
3180 return ERR_PTR(-ENAMETOOLONG
);
3183 static void get_fs_root_and_pwd_rcu(struct fs_struct
*fs
, struct path
*root
,
3189 seq
= read_seqcount_begin(&fs
->seq
);
3192 } while (read_seqcount_retry(&fs
->seq
, seq
));
3196 * NOTE! The user-level library version returns a
3197 * character pointer. The kernel system call just
3198 * returns the length of the buffer filled (which
3199 * includes the ending '\0' character), or a negative
3200 * error value. So libc would do something like
3202 * char *getcwd(char * buf, size_t size)
3206 * retval = sys_getcwd(buf, size);
3213 SYSCALL_DEFINE2(getcwd
, char __user
*, buf
, unsigned long, size
)
3216 struct path pwd
, root
;
3217 char *page
= __getname();
3223 get_fs_root_and_pwd_rcu(current
->fs
, &root
, &pwd
);
3226 br_read_lock(&vfsmount_lock
);
3227 if (!d_unlinked(pwd
.dentry
)) {
3229 char *cwd
= page
+ PATH_MAX
;
3230 int buflen
= PATH_MAX
;
3232 prepend(&cwd
, &buflen
, "\0", 1);
3233 error
= prepend_path(&pwd
, &root
, &cwd
, &buflen
);
3234 br_read_unlock(&vfsmount_lock
);
3240 /* Unreachable from current root */
3242 error
= prepend_unreachable(&cwd
, &buflen
);
3248 len
= PATH_MAX
+ page
- cwd
;
3251 if (copy_to_user(buf
, cwd
, len
))
3255 br_read_unlock(&vfsmount_lock
);
3265 * Test whether new_dentry is a subdirectory of old_dentry.
3267 * Trivially implemented using the dcache structure
3271 * is_subdir - is new dentry a subdirectory of old_dentry
3272 * @new_dentry: new dentry
3273 * @old_dentry: old dentry
3275 * Returns 1 if new_dentry is a subdirectory of the parent (at any depth).
3276 * Returns 0 otherwise.
3277 * Caller must ensure that "new_dentry" is pinned before calling is_subdir()
3280 int is_subdir(struct dentry
*new_dentry
, struct dentry
*old_dentry
)
3285 if (new_dentry
== old_dentry
)
3289 /* for restarting inner loop in case of seq retry */
3290 seq
= read_seqbegin(&rename_lock
);
3292 * Need rcu_readlock to protect against the d_parent trashing
3296 if (d_ancestor(old_dentry
, new_dentry
))
3301 } while (read_seqretry(&rename_lock
, seq
));
3306 static enum d_walk_ret
d_genocide_kill(void *data
, struct dentry
*dentry
)
3308 struct dentry
*root
= data
;
3309 if (dentry
!= root
) {
3310 if (d_unhashed(dentry
) || !dentry
->d_inode
)
3313 if (!(dentry
->d_flags
& DCACHE_GENOCIDE
)) {
3314 dentry
->d_flags
|= DCACHE_GENOCIDE
;
3315 dentry
->d_lockref
.count
--;
3318 return D_WALK_CONTINUE
;
3321 void d_genocide(struct dentry
*parent
)
3323 d_walk(parent
, parent
, d_genocide_kill
, NULL
);
3326 void d_tmpfile(struct dentry
*dentry
, struct inode
*inode
)
3328 inode_dec_link_count(inode
);
3329 BUG_ON(dentry
->d_name
.name
!= dentry
->d_iname
||
3330 !hlist_unhashed(&dentry
->d_alias
) ||
3331 !d_unlinked(dentry
));
3332 spin_lock(&dentry
->d_parent
->d_lock
);
3333 spin_lock_nested(&dentry
->d_lock
, DENTRY_D_LOCK_NESTED
);
3334 dentry
->d_name
.len
= sprintf(dentry
->d_iname
, "#%llu",
3335 (unsigned long long)inode
->i_ino
);
3336 spin_unlock(&dentry
->d_lock
);
3337 spin_unlock(&dentry
->d_parent
->d_lock
);
3338 d_instantiate(dentry
, inode
);
3340 EXPORT_SYMBOL(d_tmpfile
);
3342 static __initdata
unsigned long dhash_entries
;
3343 static int __init
set_dhash_entries(char *str
)
3347 dhash_entries
= simple_strtoul(str
, &str
, 0);
3350 __setup("dhash_entries=", set_dhash_entries
);
3352 static void __init
dcache_init_early(void)
3356 /* If hashes are distributed across NUMA nodes, defer
3357 * hash allocation until vmalloc space is available.
3363 alloc_large_system_hash("Dentry cache",
3364 sizeof(struct hlist_bl_head
),
3373 for (loop
= 0; loop
< (1U << d_hash_shift
); loop
++)
3374 INIT_HLIST_BL_HEAD(dentry_hashtable
+ loop
);
3377 static void __init
dcache_init(void)
3382 * A constructor could be added for stable state like the lists,
3383 * but it is probably not worth it because of the cache nature
3386 dentry_cache
= KMEM_CACHE(dentry
,
3387 SLAB_RECLAIM_ACCOUNT
|SLAB_PANIC
|SLAB_MEM_SPREAD
);
3389 /* Hash may have been set up in dcache_init_early */
3394 alloc_large_system_hash("Dentry cache",
3395 sizeof(struct hlist_bl_head
),
3404 for (loop
= 0; loop
< (1U << d_hash_shift
); loop
++)
3405 INIT_HLIST_BL_HEAD(dentry_hashtable
+ loop
);
3408 /* SLAB cache for __getname() consumers */
3409 struct kmem_cache
*names_cachep __read_mostly
;
3410 EXPORT_SYMBOL(names_cachep
);
3412 EXPORT_SYMBOL(d_genocide
);
3414 void __init
vfs_caches_init_early(void)
3416 dcache_init_early();
3420 void __init
vfs_caches_init(unsigned long mempages
)
3422 unsigned long reserve
;
3424 /* Base hash sizes on available memory, with a reserve equal to
3425 150% of current kernel size */
3427 reserve
= min((mempages
- nr_free_pages()) * 3/2, mempages
- 1);
3428 mempages
-= reserve
;
3430 names_cachep
= kmem_cache_create("names_cache", PATH_MAX
, 0,
3431 SLAB_HWCACHE_ALIGN
|SLAB_PANIC
, NULL
);
3435 files_init(mempages
);