take read_seqbegin_or_lock() and friends to seqlock.h
[deliverable/linux.git] / fs / dcache.c
1 /*
2 * fs/dcache.c
3 *
4 * Complete reimplementation
5 * (C) 1997 Thomas Schoebel-Theuer,
6 * with heavy changes by Linus Torvalds
7 */
8
9 /*
10 * Notes on the allocation strategy:
11 *
12 * The dcache is a master of the icache - whenever a dcache entry
13 * exists, the inode will always exist. "iput()" is done either when
14 * the dcache entry is deleted or garbage collected.
15 */
16
17 #include <linux/syscalls.h>
18 #include <linux/string.h>
19 #include <linux/mm.h>
20 #include <linux/fs.h>
21 #include <linux/fsnotify.h>
22 #include <linux/slab.h>
23 #include <linux/init.h>
24 #include <linux/hash.h>
25 #include <linux/cache.h>
26 #include <linux/export.h>
27 #include <linux/mount.h>
28 #include <linux/file.h>
29 #include <asm/uaccess.h>
30 #include <linux/security.h>
31 #include <linux/seqlock.h>
32 #include <linux/swap.h>
33 #include <linux/bootmem.h>
34 #include <linux/fs_struct.h>
35 #include <linux/hardirq.h>
36 #include <linux/bit_spinlock.h>
37 #include <linux/rculist_bl.h>
38 #include <linux/prefetch.h>
39 #include <linux/ratelimit.h>
40 #include <linux/list_lru.h>
41 #include "internal.h"
42 #include "mount.h"
43
44 /*
45 * Usage:
46 * dcache->d_inode->i_lock protects:
47 * - i_dentry, d_alias, d_inode of aliases
48 * dcache_hash_bucket lock protects:
49 * - the dcache hash table
50 * s_anon bl list spinlock protects:
51 * - the s_anon list (see __d_drop)
52 * dentry->d_sb->s_dentry_lru_lock protects:
53 * - the dcache lru lists and counters
54 * d_lock protects:
55 * - d_flags
56 * - d_name
57 * - d_lru
58 * - d_count
59 * - d_unhashed()
60 * - d_parent and d_subdirs
61 * - childrens' d_child and d_parent
62 * - d_alias, d_inode
63 *
64 * Ordering:
65 * dentry->d_inode->i_lock
66 * dentry->d_lock
67 * dentry->d_sb->s_dentry_lru_lock
68 * dcache_hash_bucket lock
69 * s_anon lock
70 *
71 * If there is an ancestor relationship:
72 * dentry->d_parent->...->d_parent->d_lock
73 * ...
74 * dentry->d_parent->d_lock
75 * dentry->d_lock
76 *
77 * If no ancestor relationship:
78 * if (dentry1 < dentry2)
79 * dentry1->d_lock
80 * dentry2->d_lock
81 */
82 int sysctl_vfs_cache_pressure __read_mostly = 100;
83 EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
84
85 __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
86
87 EXPORT_SYMBOL(rename_lock);
88
89 static struct kmem_cache *dentry_cache __read_mostly;
90
91 /*
92 * This is the single most critical data structure when it comes
93 * to the dcache: the hashtable for lookups. Somebody should try
94 * to make this good - I've just made it work.
95 *
96 * This hash-function tries to avoid losing too many bits of hash
97 * information, yet avoid using a prime hash-size or similar.
98 */
99 #define D_HASHBITS d_hash_shift
100 #define D_HASHMASK d_hash_mask
101
102 static unsigned int d_hash_mask __read_mostly;
103 static unsigned int d_hash_shift __read_mostly;
104
105 static struct hlist_bl_head *dentry_hashtable __read_mostly;
106
107 static inline struct hlist_bl_head *d_hash(const struct dentry *parent,
108 unsigned int hash)
109 {
110 hash += (unsigned long) parent / L1_CACHE_BYTES;
111 hash = hash + (hash >> D_HASHBITS);
112 return dentry_hashtable + (hash & D_HASHMASK);
113 }
114
115 /* Statistics gathering. */
116 struct dentry_stat_t dentry_stat = {
117 .age_limit = 45,
118 };
119
120 static DEFINE_PER_CPU(long, nr_dentry);
121 static DEFINE_PER_CPU(long, nr_dentry_unused);
122
123 #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
124
125 /*
126 * Here we resort to our own counters instead of using generic per-cpu counters
127 * for consistency with what the vfs inode code does. We are expected to harvest
128 * better code and performance by having our own specialized counters.
129 *
130 * Please note that the loop is done over all possible CPUs, not over all online
131 * CPUs. The reason for this is that we don't want to play games with CPUs going
132 * on and off. If one of them goes off, we will just keep their counters.
133 *
134 * glommer: See cffbc8a for details, and if you ever intend to change this,
135 * please update all vfs counters to match.
136 */
137 static long get_nr_dentry(void)
138 {
139 int i;
140 long sum = 0;
141 for_each_possible_cpu(i)
142 sum += per_cpu(nr_dentry, i);
143 return sum < 0 ? 0 : sum;
144 }
145
146 static long get_nr_dentry_unused(void)
147 {
148 int i;
149 long sum = 0;
150 for_each_possible_cpu(i)
151 sum += per_cpu(nr_dentry_unused, i);
152 return sum < 0 ? 0 : sum;
153 }
154
155 int proc_nr_dentry(ctl_table *table, int write, void __user *buffer,
156 size_t *lenp, loff_t *ppos)
157 {
158 dentry_stat.nr_dentry = get_nr_dentry();
159 dentry_stat.nr_unused = get_nr_dentry_unused();
160 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
161 }
162 #endif
163
164 /*
165 * Compare 2 name strings, return 0 if they match, otherwise non-zero.
166 * The strings are both count bytes long, and count is non-zero.
167 */
168 #ifdef CONFIG_DCACHE_WORD_ACCESS
169
170 #include <asm/word-at-a-time.h>
171 /*
172 * NOTE! 'cs' and 'scount' come from a dentry, so it has a
173 * aligned allocation for this particular component. We don't
174 * strictly need the load_unaligned_zeropad() safety, but it
175 * doesn't hurt either.
176 *
177 * In contrast, 'ct' and 'tcount' can be from a pathname, and do
178 * need the careful unaligned handling.
179 */
180 static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
181 {
182 unsigned long a,b,mask;
183
184 for (;;) {
185 a = *(unsigned long *)cs;
186 b = load_unaligned_zeropad(ct);
187 if (tcount < sizeof(unsigned long))
188 break;
189 if (unlikely(a != b))
190 return 1;
191 cs += sizeof(unsigned long);
192 ct += sizeof(unsigned long);
193 tcount -= sizeof(unsigned long);
194 if (!tcount)
195 return 0;
196 }
197 mask = ~(~0ul << tcount*8);
198 return unlikely(!!((a ^ b) & mask));
199 }
200
201 #else
202
203 static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
204 {
205 do {
206 if (*cs != *ct)
207 return 1;
208 cs++;
209 ct++;
210 tcount--;
211 } while (tcount);
212 return 0;
213 }
214
215 #endif
216
217 static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *ct, unsigned tcount)
218 {
219 const unsigned char *cs;
220 /*
221 * Be careful about RCU walk racing with rename:
222 * use ACCESS_ONCE to fetch the name pointer.
223 *
224 * NOTE! Even if a rename will mean that the length
225 * was not loaded atomically, we don't care. The
226 * RCU walk will check the sequence count eventually,
227 * and catch it. And we won't overrun the buffer,
228 * because we're reading the name pointer atomically,
229 * and a dentry name is guaranteed to be properly
230 * terminated with a NUL byte.
231 *
232 * End result: even if 'len' is wrong, we'll exit
233 * early because the data cannot match (there can
234 * be no NUL in the ct/tcount data)
235 */
236 cs = ACCESS_ONCE(dentry->d_name.name);
237 smp_read_barrier_depends();
238 return dentry_string_cmp(cs, ct, tcount);
239 }
240
241 static void __d_free(struct rcu_head *head)
242 {
243 struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
244
245 WARN_ON(!hlist_unhashed(&dentry->d_alias));
246 if (dname_external(dentry))
247 kfree(dentry->d_name.name);
248 kmem_cache_free(dentry_cache, dentry);
249 }
250
251 /*
252 * no locks, please.
253 */
254 static void d_free(struct dentry *dentry)
255 {
256 BUG_ON((int)dentry->d_lockref.count > 0);
257 this_cpu_dec(nr_dentry);
258 if (dentry->d_op && dentry->d_op->d_release)
259 dentry->d_op->d_release(dentry);
260
261 /* if dentry was never visible to RCU, immediate free is OK */
262 if (!(dentry->d_flags & DCACHE_RCUACCESS))
263 __d_free(&dentry->d_u.d_rcu);
264 else
265 call_rcu(&dentry->d_u.d_rcu, __d_free);
266 }
267
268 /**
269 * dentry_rcuwalk_barrier - invalidate in-progress rcu-walk lookups
270 * @dentry: the target dentry
271 * After this call, in-progress rcu-walk path lookup will fail. This
272 * should be called after unhashing, and after changing d_inode (if
273 * the dentry has not already been unhashed).
274 */
275 static inline void dentry_rcuwalk_barrier(struct dentry *dentry)
276 {
277 assert_spin_locked(&dentry->d_lock);
278 /* Go through a barrier */
279 write_seqcount_barrier(&dentry->d_seq);
280 }
281
282 /*
283 * Release the dentry's inode, using the filesystem
284 * d_iput() operation if defined. Dentry has no refcount
285 * and is unhashed.
286 */
287 static void dentry_iput(struct dentry * dentry)
288 __releases(dentry->d_lock)
289 __releases(dentry->d_inode->i_lock)
290 {
291 struct inode *inode = dentry->d_inode;
292 if (inode) {
293 dentry->d_inode = NULL;
294 hlist_del_init(&dentry->d_alias);
295 spin_unlock(&dentry->d_lock);
296 spin_unlock(&inode->i_lock);
297 if (!inode->i_nlink)
298 fsnotify_inoderemove(inode);
299 if (dentry->d_op && dentry->d_op->d_iput)
300 dentry->d_op->d_iput(dentry, inode);
301 else
302 iput(inode);
303 } else {
304 spin_unlock(&dentry->d_lock);
305 }
306 }
307
308 /*
309 * Release the dentry's inode, using the filesystem
310 * d_iput() operation if defined. dentry remains in-use.
311 */
312 static void dentry_unlink_inode(struct dentry * dentry)
313 __releases(dentry->d_lock)
314 __releases(dentry->d_inode->i_lock)
315 {
316 struct inode *inode = dentry->d_inode;
317 __d_clear_type(dentry);
318 dentry->d_inode = NULL;
319 hlist_del_init(&dentry->d_alias);
320 dentry_rcuwalk_barrier(dentry);
321 spin_unlock(&dentry->d_lock);
322 spin_unlock(&inode->i_lock);
323 if (!inode->i_nlink)
324 fsnotify_inoderemove(inode);
325 if (dentry->d_op && dentry->d_op->d_iput)
326 dentry->d_op->d_iput(dentry, inode);
327 else
328 iput(inode);
329 }
330
331 /*
332 * The DCACHE_LRU_LIST bit is set whenever the 'd_lru' entry
333 * is in use - which includes both the "real" per-superblock
334 * LRU list _and_ the DCACHE_SHRINK_LIST use.
335 *
336 * The DCACHE_SHRINK_LIST bit is set whenever the dentry is
337 * on the shrink list (ie not on the superblock LRU list).
338 *
339 * The per-cpu "nr_dentry_unused" counters are updated with
340 * the DCACHE_LRU_LIST bit.
341 *
342 * These helper functions make sure we always follow the
343 * rules. d_lock must be held by the caller.
344 */
345 #define D_FLAG_VERIFY(dentry,x) WARN_ON_ONCE(((dentry)->d_flags & (DCACHE_LRU_LIST | DCACHE_SHRINK_LIST)) != (x))
346 static void d_lru_add(struct dentry *dentry)
347 {
348 D_FLAG_VERIFY(dentry, 0);
349 dentry->d_flags |= DCACHE_LRU_LIST;
350 this_cpu_inc(nr_dentry_unused);
351 WARN_ON_ONCE(!list_lru_add(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
352 }
353
354 static void d_lru_del(struct dentry *dentry)
355 {
356 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
357 dentry->d_flags &= ~DCACHE_LRU_LIST;
358 this_cpu_dec(nr_dentry_unused);
359 WARN_ON_ONCE(!list_lru_del(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
360 }
361
362 static void d_shrink_del(struct dentry *dentry)
363 {
364 D_FLAG_VERIFY(dentry, DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
365 list_del_init(&dentry->d_lru);
366 dentry->d_flags &= ~(DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
367 this_cpu_dec(nr_dentry_unused);
368 }
369
370 static void d_shrink_add(struct dentry *dentry, struct list_head *list)
371 {
372 D_FLAG_VERIFY(dentry, 0);
373 list_add(&dentry->d_lru, list);
374 dentry->d_flags |= DCACHE_SHRINK_LIST | DCACHE_LRU_LIST;
375 this_cpu_inc(nr_dentry_unused);
376 }
377
378 /*
379 * These can only be called under the global LRU lock, ie during the
380 * callback for freeing the LRU list. "isolate" removes it from the
381 * LRU lists entirely, while shrink_move moves it to the indicated
382 * private list.
383 */
384 static void d_lru_isolate(struct dentry *dentry)
385 {
386 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
387 dentry->d_flags &= ~DCACHE_LRU_LIST;
388 this_cpu_dec(nr_dentry_unused);
389 list_del_init(&dentry->d_lru);
390 }
391
392 static void d_lru_shrink_move(struct dentry *dentry, struct list_head *list)
393 {
394 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
395 dentry->d_flags |= DCACHE_SHRINK_LIST;
396 list_move_tail(&dentry->d_lru, list);
397 }
398
399 /*
400 * dentry_lru_(add|del)_list) must be called with d_lock held.
401 */
402 static void dentry_lru_add(struct dentry *dentry)
403 {
404 if (unlikely(!(dentry->d_flags & DCACHE_LRU_LIST)))
405 d_lru_add(dentry);
406 }
407
408 /*
409 * Remove a dentry with references from the LRU.
410 *
411 * If we are on the shrink list, then we can get to try_prune_one_dentry() and
412 * lose our last reference through the parent walk. In this case, we need to
413 * remove ourselves from the shrink list, not the LRU.
414 */
415 static void dentry_lru_del(struct dentry *dentry)
416 {
417 if (dentry->d_flags & DCACHE_LRU_LIST) {
418 if (dentry->d_flags & DCACHE_SHRINK_LIST)
419 return d_shrink_del(dentry);
420 d_lru_del(dentry);
421 }
422 }
423
424 /**
425 * d_kill - kill dentry and return parent
426 * @dentry: dentry to kill
427 * @parent: parent dentry
428 *
429 * The dentry must already be unhashed and removed from the LRU.
430 *
431 * If this is the root of the dentry tree, return NULL.
432 *
433 * dentry->d_lock and parent->d_lock must be held by caller, and are dropped by
434 * d_kill.
435 */
436 static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent)
437 __releases(dentry->d_lock)
438 __releases(parent->d_lock)
439 __releases(dentry->d_inode->i_lock)
440 {
441 list_del(&dentry->d_u.d_child);
442 /*
443 * Inform try_to_ascend() that we are no longer attached to the
444 * dentry tree
445 */
446 dentry->d_flags |= DCACHE_DENTRY_KILLED;
447 if (parent)
448 spin_unlock(&parent->d_lock);
449 dentry_iput(dentry);
450 /*
451 * dentry_iput drops the locks, at which point nobody (except
452 * transient RCU lookups) can reach this dentry.
453 */
454 d_free(dentry);
455 return parent;
456 }
457
458 /**
459 * d_drop - drop a dentry
460 * @dentry: dentry to drop
461 *
462 * d_drop() unhashes the entry from the parent dentry hashes, so that it won't
463 * be found through a VFS lookup any more. Note that this is different from
464 * deleting the dentry - d_delete will try to mark the dentry negative if
465 * possible, giving a successful _negative_ lookup, while d_drop will
466 * just make the cache lookup fail.
467 *
468 * d_drop() is used mainly for stuff that wants to invalidate a dentry for some
469 * reason (NFS timeouts or autofs deletes).
470 *
471 * __d_drop requires dentry->d_lock.
472 */
473 void __d_drop(struct dentry *dentry)
474 {
475 if (!d_unhashed(dentry)) {
476 struct hlist_bl_head *b;
477 /*
478 * Hashed dentries are normally on the dentry hashtable,
479 * with the exception of those newly allocated by
480 * d_obtain_alias, which are always IS_ROOT:
481 */
482 if (unlikely(IS_ROOT(dentry)))
483 b = &dentry->d_sb->s_anon;
484 else
485 b = d_hash(dentry->d_parent, dentry->d_name.hash);
486
487 hlist_bl_lock(b);
488 __hlist_bl_del(&dentry->d_hash);
489 dentry->d_hash.pprev = NULL;
490 hlist_bl_unlock(b);
491 dentry_rcuwalk_barrier(dentry);
492 }
493 }
494 EXPORT_SYMBOL(__d_drop);
495
496 void d_drop(struct dentry *dentry)
497 {
498 spin_lock(&dentry->d_lock);
499 __d_drop(dentry);
500 spin_unlock(&dentry->d_lock);
501 }
502 EXPORT_SYMBOL(d_drop);
503
504 /*
505 * Finish off a dentry we've decided to kill.
506 * dentry->d_lock must be held, returns with it unlocked.
507 * If ref is non-zero, then decrement the refcount too.
508 * Returns dentry requiring refcount drop, or NULL if we're done.
509 */
510 static struct dentry *
511 dentry_kill(struct dentry *dentry, int unlock_on_failure)
512 __releases(dentry->d_lock)
513 {
514 struct inode *inode;
515 struct dentry *parent;
516
517 inode = dentry->d_inode;
518 if (inode && !spin_trylock(&inode->i_lock)) {
519 relock:
520 if (unlock_on_failure) {
521 spin_unlock(&dentry->d_lock);
522 cpu_relax();
523 }
524 return dentry; /* try again with same dentry */
525 }
526 if (IS_ROOT(dentry))
527 parent = NULL;
528 else
529 parent = dentry->d_parent;
530 if (parent && !spin_trylock(&parent->d_lock)) {
531 if (inode)
532 spin_unlock(&inode->i_lock);
533 goto relock;
534 }
535
536 /*
537 * The dentry is now unrecoverably dead to the world.
538 */
539 lockref_mark_dead(&dentry->d_lockref);
540
541 /*
542 * inform the fs via d_prune that this dentry is about to be
543 * unhashed and destroyed.
544 */
545 if ((dentry->d_flags & DCACHE_OP_PRUNE) && !d_unhashed(dentry))
546 dentry->d_op->d_prune(dentry);
547
548 dentry_lru_del(dentry);
549 /* if it was on the hash then remove it */
550 __d_drop(dentry);
551 return d_kill(dentry, parent);
552 }
553
554 /*
555 * This is dput
556 *
557 * This is complicated by the fact that we do not want to put
558 * dentries that are no longer on any hash chain on the unused
559 * list: we'd much rather just get rid of them immediately.
560 *
561 * However, that implies that we have to traverse the dentry
562 * tree upwards to the parents which might _also_ now be
563 * scheduled for deletion (it may have been only waiting for
564 * its last child to go away).
565 *
566 * This tail recursion is done by hand as we don't want to depend
567 * on the compiler to always get this right (gcc generally doesn't).
568 * Real recursion would eat up our stack space.
569 */
570
571 /*
572 * dput - release a dentry
573 * @dentry: dentry to release
574 *
575 * Release a dentry. This will drop the usage count and if appropriate
576 * call the dentry unlink method as well as removing it from the queues and
577 * releasing its resources. If the parent dentries were scheduled for release
578 * they too may now get deleted.
579 */
580 void dput(struct dentry *dentry)
581 {
582 if (unlikely(!dentry))
583 return;
584
585 repeat:
586 if (lockref_put_or_lock(&dentry->d_lockref))
587 return;
588
589 /* Unreachable? Get rid of it */
590 if (unlikely(d_unhashed(dentry)))
591 goto kill_it;
592
593 if (unlikely(dentry->d_flags & DCACHE_OP_DELETE)) {
594 if (dentry->d_op->d_delete(dentry))
595 goto kill_it;
596 }
597
598 if (!(dentry->d_flags & DCACHE_REFERENCED))
599 dentry->d_flags |= DCACHE_REFERENCED;
600 dentry_lru_add(dentry);
601
602 dentry->d_lockref.count--;
603 spin_unlock(&dentry->d_lock);
604 return;
605
606 kill_it:
607 dentry = dentry_kill(dentry, 1);
608 if (dentry)
609 goto repeat;
610 }
611 EXPORT_SYMBOL(dput);
612
613 /**
614 * d_invalidate - invalidate a dentry
615 * @dentry: dentry to invalidate
616 *
617 * Try to invalidate the dentry if it turns out to be
618 * possible. If there are other dentries that can be
619 * reached through this one we can't delete it and we
620 * return -EBUSY. On success we return 0.
621 *
622 * no dcache lock.
623 */
624
625 int d_invalidate(struct dentry * dentry)
626 {
627 /*
628 * If it's already been dropped, return OK.
629 */
630 spin_lock(&dentry->d_lock);
631 if (d_unhashed(dentry)) {
632 spin_unlock(&dentry->d_lock);
633 return 0;
634 }
635 /*
636 * Check whether to do a partial shrink_dcache
637 * to get rid of unused child entries.
638 */
639 if (!list_empty(&dentry->d_subdirs)) {
640 spin_unlock(&dentry->d_lock);
641 shrink_dcache_parent(dentry);
642 spin_lock(&dentry->d_lock);
643 }
644
645 /*
646 * Somebody else still using it?
647 *
648 * If it's a directory, we can't drop it
649 * for fear of somebody re-populating it
650 * with children (even though dropping it
651 * would make it unreachable from the root,
652 * we might still populate it if it was a
653 * working directory or similar).
654 * We also need to leave mountpoints alone,
655 * directory or not.
656 */
657 if (dentry->d_lockref.count > 1 && dentry->d_inode) {
658 if (S_ISDIR(dentry->d_inode->i_mode) || d_mountpoint(dentry)) {
659 spin_unlock(&dentry->d_lock);
660 return -EBUSY;
661 }
662 }
663
664 __d_drop(dentry);
665 spin_unlock(&dentry->d_lock);
666 return 0;
667 }
668 EXPORT_SYMBOL(d_invalidate);
669
670 /* This must be called with d_lock held */
671 static inline void __dget_dlock(struct dentry *dentry)
672 {
673 dentry->d_lockref.count++;
674 }
675
676 static inline void __dget(struct dentry *dentry)
677 {
678 lockref_get(&dentry->d_lockref);
679 }
680
681 struct dentry *dget_parent(struct dentry *dentry)
682 {
683 int gotref;
684 struct dentry *ret;
685
686 /*
687 * Do optimistic parent lookup without any
688 * locking.
689 */
690 rcu_read_lock();
691 ret = ACCESS_ONCE(dentry->d_parent);
692 gotref = lockref_get_not_zero(&ret->d_lockref);
693 rcu_read_unlock();
694 if (likely(gotref)) {
695 if (likely(ret == ACCESS_ONCE(dentry->d_parent)))
696 return ret;
697 dput(ret);
698 }
699
700 repeat:
701 /*
702 * Don't need rcu_dereference because we re-check it was correct under
703 * the lock.
704 */
705 rcu_read_lock();
706 ret = dentry->d_parent;
707 spin_lock(&ret->d_lock);
708 if (unlikely(ret != dentry->d_parent)) {
709 spin_unlock(&ret->d_lock);
710 rcu_read_unlock();
711 goto repeat;
712 }
713 rcu_read_unlock();
714 BUG_ON(!ret->d_lockref.count);
715 ret->d_lockref.count++;
716 spin_unlock(&ret->d_lock);
717 return ret;
718 }
719 EXPORT_SYMBOL(dget_parent);
720
721 /**
722 * d_find_alias - grab a hashed alias of inode
723 * @inode: inode in question
724 * @want_discon: flag, used by d_splice_alias, to request
725 * that only a DISCONNECTED alias be returned.
726 *
727 * If inode has a hashed alias, or is a directory and has any alias,
728 * acquire the reference to alias and return it. Otherwise return NULL.
729 * Notice that if inode is a directory there can be only one alias and
730 * it can be unhashed only if it has no children, or if it is the root
731 * of a filesystem.
732 *
733 * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer
734 * any other hashed alias over that one unless @want_discon is set,
735 * in which case only return an IS_ROOT, DCACHE_DISCONNECTED alias.
736 */
737 static struct dentry *__d_find_alias(struct inode *inode, int want_discon)
738 {
739 struct dentry *alias, *discon_alias;
740
741 again:
742 discon_alias = NULL;
743 hlist_for_each_entry(alias, &inode->i_dentry, d_alias) {
744 spin_lock(&alias->d_lock);
745 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
746 if (IS_ROOT(alias) &&
747 (alias->d_flags & DCACHE_DISCONNECTED)) {
748 discon_alias = alias;
749 } else if (!want_discon) {
750 __dget_dlock(alias);
751 spin_unlock(&alias->d_lock);
752 return alias;
753 }
754 }
755 spin_unlock(&alias->d_lock);
756 }
757 if (discon_alias) {
758 alias = discon_alias;
759 spin_lock(&alias->d_lock);
760 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
761 if (IS_ROOT(alias) &&
762 (alias->d_flags & DCACHE_DISCONNECTED)) {
763 __dget_dlock(alias);
764 spin_unlock(&alias->d_lock);
765 return alias;
766 }
767 }
768 spin_unlock(&alias->d_lock);
769 goto again;
770 }
771 return NULL;
772 }
773
774 struct dentry *d_find_alias(struct inode *inode)
775 {
776 struct dentry *de = NULL;
777
778 if (!hlist_empty(&inode->i_dentry)) {
779 spin_lock(&inode->i_lock);
780 de = __d_find_alias(inode, 0);
781 spin_unlock(&inode->i_lock);
782 }
783 return de;
784 }
785 EXPORT_SYMBOL(d_find_alias);
786
787 /*
788 * Try to kill dentries associated with this inode.
789 * WARNING: you must own a reference to inode.
790 */
791 void d_prune_aliases(struct inode *inode)
792 {
793 struct dentry *dentry;
794 restart:
795 spin_lock(&inode->i_lock);
796 hlist_for_each_entry(dentry, &inode->i_dentry, d_alias) {
797 spin_lock(&dentry->d_lock);
798 if (!dentry->d_lockref.count) {
799 /*
800 * inform the fs via d_prune that this dentry
801 * is about to be unhashed and destroyed.
802 */
803 if ((dentry->d_flags & DCACHE_OP_PRUNE) &&
804 !d_unhashed(dentry))
805 dentry->d_op->d_prune(dentry);
806
807 __dget_dlock(dentry);
808 __d_drop(dentry);
809 spin_unlock(&dentry->d_lock);
810 spin_unlock(&inode->i_lock);
811 dput(dentry);
812 goto restart;
813 }
814 spin_unlock(&dentry->d_lock);
815 }
816 spin_unlock(&inode->i_lock);
817 }
818 EXPORT_SYMBOL(d_prune_aliases);
819
820 /*
821 * Try to throw away a dentry - free the inode, dput the parent.
822 * Requires dentry->d_lock is held, and dentry->d_count == 0.
823 * Releases dentry->d_lock.
824 *
825 * This may fail if locks cannot be acquired no problem, just try again.
826 */
827 static struct dentry * try_prune_one_dentry(struct dentry *dentry)
828 __releases(dentry->d_lock)
829 {
830 struct dentry *parent;
831
832 parent = dentry_kill(dentry, 0);
833 /*
834 * If dentry_kill returns NULL, we have nothing more to do.
835 * if it returns the same dentry, trylocks failed. In either
836 * case, just loop again.
837 *
838 * Otherwise, we need to prune ancestors too. This is necessary
839 * to prevent quadratic behavior of shrink_dcache_parent(), but
840 * is also expected to be beneficial in reducing dentry cache
841 * fragmentation.
842 */
843 if (!parent)
844 return NULL;
845 if (parent == dentry)
846 return dentry;
847
848 /* Prune ancestors. */
849 dentry = parent;
850 while (dentry) {
851 if (lockref_put_or_lock(&dentry->d_lockref))
852 return NULL;
853 dentry = dentry_kill(dentry, 1);
854 }
855 return NULL;
856 }
857
858 static void shrink_dentry_list(struct list_head *list)
859 {
860 struct dentry *dentry;
861
862 rcu_read_lock();
863 for (;;) {
864 dentry = list_entry_rcu(list->prev, struct dentry, d_lru);
865 if (&dentry->d_lru == list)
866 break; /* empty */
867
868 /*
869 * Get the dentry lock, and re-verify that the dentry is
870 * this on the shrinking list. If it is, we know that
871 * DCACHE_SHRINK_LIST and DCACHE_LRU_LIST are set.
872 */
873 spin_lock(&dentry->d_lock);
874 if (dentry != list_entry(list->prev, struct dentry, d_lru)) {
875 spin_unlock(&dentry->d_lock);
876 continue;
877 }
878
879 /*
880 * The dispose list is isolated and dentries are not accounted
881 * to the LRU here, so we can simply remove it from the list
882 * here regardless of whether it is referenced or not.
883 */
884 d_shrink_del(dentry);
885
886 /*
887 * We found an inuse dentry which was not removed from
888 * the LRU because of laziness during lookup. Do not free it.
889 */
890 if (dentry->d_lockref.count) {
891 spin_unlock(&dentry->d_lock);
892 continue;
893 }
894 rcu_read_unlock();
895
896 /*
897 * If 'try_to_prune()' returns a dentry, it will
898 * be the same one we passed in, and d_lock will
899 * have been held the whole time, so it will not
900 * have been added to any other lists. We failed
901 * to get the inode lock.
902 *
903 * We just add it back to the shrink list.
904 */
905 dentry = try_prune_one_dentry(dentry);
906
907 rcu_read_lock();
908 if (dentry) {
909 d_shrink_add(dentry, list);
910 spin_unlock(&dentry->d_lock);
911 }
912 }
913 rcu_read_unlock();
914 }
915
916 static enum lru_status
917 dentry_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg)
918 {
919 struct list_head *freeable = arg;
920 struct dentry *dentry = container_of(item, struct dentry, d_lru);
921
922
923 /*
924 * we are inverting the lru lock/dentry->d_lock here,
925 * so use a trylock. If we fail to get the lock, just skip
926 * it
927 */
928 if (!spin_trylock(&dentry->d_lock))
929 return LRU_SKIP;
930
931 /*
932 * Referenced dentries are still in use. If they have active
933 * counts, just remove them from the LRU. Otherwise give them
934 * another pass through the LRU.
935 */
936 if (dentry->d_lockref.count) {
937 d_lru_isolate(dentry);
938 spin_unlock(&dentry->d_lock);
939 return LRU_REMOVED;
940 }
941
942 if (dentry->d_flags & DCACHE_REFERENCED) {
943 dentry->d_flags &= ~DCACHE_REFERENCED;
944 spin_unlock(&dentry->d_lock);
945
946 /*
947 * The list move itself will be made by the common LRU code. At
948 * this point, we've dropped the dentry->d_lock but keep the
949 * lru lock. This is safe to do, since every list movement is
950 * protected by the lru lock even if both locks are held.
951 *
952 * This is guaranteed by the fact that all LRU management
953 * functions are intermediated by the LRU API calls like
954 * list_lru_add and list_lru_del. List movement in this file
955 * only ever occur through this functions or through callbacks
956 * like this one, that are called from the LRU API.
957 *
958 * The only exceptions to this are functions like
959 * shrink_dentry_list, and code that first checks for the
960 * DCACHE_SHRINK_LIST flag. Those are guaranteed to be
961 * operating only with stack provided lists after they are
962 * properly isolated from the main list. It is thus, always a
963 * local access.
964 */
965 return LRU_ROTATE;
966 }
967
968 d_lru_shrink_move(dentry, freeable);
969 spin_unlock(&dentry->d_lock);
970
971 return LRU_REMOVED;
972 }
973
974 /**
975 * prune_dcache_sb - shrink the dcache
976 * @sb: superblock
977 * @nr_to_scan : number of entries to try to free
978 * @nid: which node to scan for freeable entities
979 *
980 * Attempt to shrink the superblock dcache LRU by @nr_to_scan entries. This is
981 * done when we need more memory an called from the superblock shrinker
982 * function.
983 *
984 * This function may fail to free any resources if all the dentries are in
985 * use.
986 */
987 long prune_dcache_sb(struct super_block *sb, unsigned long nr_to_scan,
988 int nid)
989 {
990 LIST_HEAD(dispose);
991 long freed;
992
993 freed = list_lru_walk_node(&sb->s_dentry_lru, nid, dentry_lru_isolate,
994 &dispose, &nr_to_scan);
995 shrink_dentry_list(&dispose);
996 return freed;
997 }
998
999 static enum lru_status dentry_lru_isolate_shrink(struct list_head *item,
1000 spinlock_t *lru_lock, void *arg)
1001 {
1002 struct list_head *freeable = arg;
1003 struct dentry *dentry = container_of(item, struct dentry, d_lru);
1004
1005 /*
1006 * we are inverting the lru lock/dentry->d_lock here,
1007 * so use a trylock. If we fail to get the lock, just skip
1008 * it
1009 */
1010 if (!spin_trylock(&dentry->d_lock))
1011 return LRU_SKIP;
1012
1013 d_lru_shrink_move(dentry, freeable);
1014 spin_unlock(&dentry->d_lock);
1015
1016 return LRU_REMOVED;
1017 }
1018
1019
1020 /**
1021 * shrink_dcache_sb - shrink dcache for a superblock
1022 * @sb: superblock
1023 *
1024 * Shrink the dcache for the specified super block. This is used to free
1025 * the dcache before unmounting a file system.
1026 */
1027 void shrink_dcache_sb(struct super_block *sb)
1028 {
1029 long freed;
1030
1031 do {
1032 LIST_HEAD(dispose);
1033
1034 freed = list_lru_walk(&sb->s_dentry_lru,
1035 dentry_lru_isolate_shrink, &dispose, UINT_MAX);
1036
1037 this_cpu_sub(nr_dentry_unused, freed);
1038 shrink_dentry_list(&dispose);
1039 } while (freed > 0);
1040 }
1041 EXPORT_SYMBOL(shrink_dcache_sb);
1042
1043 /*
1044 * This tries to ascend one level of parenthood, but
1045 * we can race with renaming, so we need to re-check
1046 * the parenthood after dropping the lock and check
1047 * that the sequence number still matches.
1048 */
1049 static struct dentry *try_to_ascend(struct dentry *old, unsigned seq)
1050 {
1051 struct dentry *new = old->d_parent;
1052
1053 rcu_read_lock();
1054 spin_unlock(&old->d_lock);
1055 spin_lock(&new->d_lock);
1056
1057 /*
1058 * might go back up the wrong parent if we have had a rename
1059 * or deletion
1060 */
1061 if (new != old->d_parent ||
1062 (old->d_flags & DCACHE_DENTRY_KILLED) ||
1063 need_seqretry(&rename_lock, seq)) {
1064 spin_unlock(&new->d_lock);
1065 new = NULL;
1066 }
1067 rcu_read_unlock();
1068 return new;
1069 }
1070
1071 /**
1072 * enum d_walk_ret - action to talke during tree walk
1073 * @D_WALK_CONTINUE: contrinue walk
1074 * @D_WALK_QUIT: quit walk
1075 * @D_WALK_NORETRY: quit when retry is needed
1076 * @D_WALK_SKIP: skip this dentry and its children
1077 */
1078 enum d_walk_ret {
1079 D_WALK_CONTINUE,
1080 D_WALK_QUIT,
1081 D_WALK_NORETRY,
1082 D_WALK_SKIP,
1083 };
1084
1085 /**
1086 * d_walk - walk the dentry tree
1087 * @parent: start of walk
1088 * @data: data passed to @enter() and @finish()
1089 * @enter: callback when first entering the dentry
1090 * @finish: callback when successfully finished the walk
1091 *
1092 * The @enter() and @finish() callbacks are called with d_lock held.
1093 */
1094 static void d_walk(struct dentry *parent, void *data,
1095 enum d_walk_ret (*enter)(void *, struct dentry *),
1096 void (*finish)(void *))
1097 {
1098 struct dentry *this_parent;
1099 struct list_head *next;
1100 unsigned seq = 0;
1101 enum d_walk_ret ret;
1102 bool retry = true;
1103
1104 again:
1105 read_seqbegin_or_lock(&rename_lock, &seq);
1106 this_parent = parent;
1107 spin_lock(&this_parent->d_lock);
1108
1109 ret = enter(data, this_parent);
1110 switch (ret) {
1111 case D_WALK_CONTINUE:
1112 break;
1113 case D_WALK_QUIT:
1114 case D_WALK_SKIP:
1115 goto out_unlock;
1116 case D_WALK_NORETRY:
1117 retry = false;
1118 break;
1119 }
1120 repeat:
1121 next = this_parent->d_subdirs.next;
1122 resume:
1123 while (next != &this_parent->d_subdirs) {
1124 struct list_head *tmp = next;
1125 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
1126 next = tmp->next;
1127
1128 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1129
1130 ret = enter(data, dentry);
1131 switch (ret) {
1132 case D_WALK_CONTINUE:
1133 break;
1134 case D_WALK_QUIT:
1135 spin_unlock(&dentry->d_lock);
1136 goto out_unlock;
1137 case D_WALK_NORETRY:
1138 retry = false;
1139 break;
1140 case D_WALK_SKIP:
1141 spin_unlock(&dentry->d_lock);
1142 continue;
1143 }
1144
1145 if (!list_empty(&dentry->d_subdirs)) {
1146 spin_unlock(&this_parent->d_lock);
1147 spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
1148 this_parent = dentry;
1149 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
1150 goto repeat;
1151 }
1152 spin_unlock(&dentry->d_lock);
1153 }
1154 /*
1155 * All done at this level ... ascend and resume the search.
1156 */
1157 if (this_parent != parent) {
1158 struct dentry *child = this_parent;
1159 this_parent = try_to_ascend(this_parent, seq);
1160 if (!this_parent)
1161 goto rename_retry;
1162 next = child->d_u.d_child.next;
1163 goto resume;
1164 }
1165 if (need_seqretry(&rename_lock, seq)) {
1166 spin_unlock(&this_parent->d_lock);
1167 goto rename_retry;
1168 }
1169 if (finish)
1170 finish(data);
1171
1172 out_unlock:
1173 spin_unlock(&this_parent->d_lock);
1174 done_seqretry(&rename_lock, seq);
1175 return;
1176
1177 rename_retry:
1178 if (!retry)
1179 return;
1180 seq = 1;
1181 goto again;
1182 }
1183
1184 /*
1185 * Search for at least 1 mount point in the dentry's subdirs.
1186 * We descend to the next level whenever the d_subdirs
1187 * list is non-empty and continue searching.
1188 */
1189
1190 static enum d_walk_ret check_mount(void *data, struct dentry *dentry)
1191 {
1192 int *ret = data;
1193 if (d_mountpoint(dentry)) {
1194 *ret = 1;
1195 return D_WALK_QUIT;
1196 }
1197 return D_WALK_CONTINUE;
1198 }
1199
1200 /**
1201 * have_submounts - check for mounts over a dentry
1202 * @parent: dentry to check.
1203 *
1204 * Return true if the parent or its subdirectories contain
1205 * a mount point
1206 */
1207 int have_submounts(struct dentry *parent)
1208 {
1209 int ret = 0;
1210
1211 d_walk(parent, &ret, check_mount, NULL);
1212
1213 return ret;
1214 }
1215 EXPORT_SYMBOL(have_submounts);
1216
1217 /*
1218 * Called by mount code to set a mountpoint and check if the mountpoint is
1219 * reachable (e.g. NFS can unhash a directory dentry and then the complete
1220 * subtree can become unreachable).
1221 *
1222 * Only one of check_submounts_and_drop() and d_set_mounted() must succeed. For
1223 * this reason take rename_lock and d_lock on dentry and ancestors.
1224 */
1225 int d_set_mounted(struct dentry *dentry)
1226 {
1227 struct dentry *p;
1228 int ret = -ENOENT;
1229 write_seqlock(&rename_lock);
1230 for (p = dentry->d_parent; !IS_ROOT(p); p = p->d_parent) {
1231 /* Need exclusion wrt. check_submounts_and_drop() */
1232 spin_lock(&p->d_lock);
1233 if (unlikely(d_unhashed(p))) {
1234 spin_unlock(&p->d_lock);
1235 goto out;
1236 }
1237 spin_unlock(&p->d_lock);
1238 }
1239 spin_lock(&dentry->d_lock);
1240 if (!d_unlinked(dentry)) {
1241 dentry->d_flags |= DCACHE_MOUNTED;
1242 ret = 0;
1243 }
1244 spin_unlock(&dentry->d_lock);
1245 out:
1246 write_sequnlock(&rename_lock);
1247 return ret;
1248 }
1249
1250 /*
1251 * Search the dentry child list of the specified parent,
1252 * and move any unused dentries to the end of the unused
1253 * list for prune_dcache(). We descend to the next level
1254 * whenever the d_subdirs list is non-empty and continue
1255 * searching.
1256 *
1257 * It returns zero iff there are no unused children,
1258 * otherwise it returns the number of children moved to
1259 * the end of the unused list. This may not be the total
1260 * number of unused children, because select_parent can
1261 * drop the lock and return early due to latency
1262 * constraints.
1263 */
1264
1265 struct select_data {
1266 struct dentry *start;
1267 struct list_head dispose;
1268 int found;
1269 };
1270
1271 static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
1272 {
1273 struct select_data *data = _data;
1274 enum d_walk_ret ret = D_WALK_CONTINUE;
1275
1276 if (data->start == dentry)
1277 goto out;
1278
1279 /*
1280 * move only zero ref count dentries to the dispose list.
1281 *
1282 * Those which are presently on the shrink list, being processed
1283 * by shrink_dentry_list(), shouldn't be moved. Otherwise the
1284 * loop in shrink_dcache_parent() might not make any progress
1285 * and loop forever.
1286 */
1287 if (dentry->d_lockref.count) {
1288 dentry_lru_del(dentry);
1289 } else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) {
1290 /*
1291 * We can't use d_lru_shrink_move() because we
1292 * need to get the global LRU lock and do the
1293 * LRU accounting.
1294 */
1295 d_lru_del(dentry);
1296 d_shrink_add(dentry, &data->dispose);
1297 data->found++;
1298 ret = D_WALK_NORETRY;
1299 }
1300 /*
1301 * We can return to the caller if we have found some (this
1302 * ensures forward progress). We'll be coming back to find
1303 * the rest.
1304 */
1305 if (data->found && need_resched())
1306 ret = D_WALK_QUIT;
1307 out:
1308 return ret;
1309 }
1310
1311 /**
1312 * shrink_dcache_parent - prune dcache
1313 * @parent: parent of entries to prune
1314 *
1315 * Prune the dcache to remove unused children of the parent dentry.
1316 */
1317 void shrink_dcache_parent(struct dentry *parent)
1318 {
1319 for (;;) {
1320 struct select_data data;
1321
1322 INIT_LIST_HEAD(&data.dispose);
1323 data.start = parent;
1324 data.found = 0;
1325
1326 d_walk(parent, &data, select_collect, NULL);
1327 if (!data.found)
1328 break;
1329
1330 shrink_dentry_list(&data.dispose);
1331 cond_resched();
1332 }
1333 }
1334 EXPORT_SYMBOL(shrink_dcache_parent);
1335
1336 static enum d_walk_ret umount_collect(void *_data, struct dentry *dentry)
1337 {
1338 struct select_data *data = _data;
1339 enum d_walk_ret ret = D_WALK_CONTINUE;
1340
1341 if (dentry->d_lockref.count) {
1342 dentry_lru_del(dentry);
1343 if (likely(!list_empty(&dentry->d_subdirs)))
1344 goto out;
1345 if (dentry == data->start && dentry->d_lockref.count == 1)
1346 goto out;
1347 printk(KERN_ERR
1348 "BUG: Dentry %p{i=%lx,n=%s}"
1349 " still in use (%d)"
1350 " [unmount of %s %s]\n",
1351 dentry,
1352 dentry->d_inode ?
1353 dentry->d_inode->i_ino : 0UL,
1354 dentry->d_name.name,
1355 dentry->d_lockref.count,
1356 dentry->d_sb->s_type->name,
1357 dentry->d_sb->s_id);
1358 BUG();
1359 } else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) {
1360 /*
1361 * We can't use d_lru_shrink_move() because we
1362 * need to get the global LRU lock and do the
1363 * LRU accounting.
1364 */
1365 if (dentry->d_flags & DCACHE_LRU_LIST)
1366 d_lru_del(dentry);
1367 d_shrink_add(dentry, &data->dispose);
1368 data->found++;
1369 ret = D_WALK_NORETRY;
1370 }
1371 out:
1372 if (data->found && need_resched())
1373 ret = D_WALK_QUIT;
1374 return ret;
1375 }
1376
1377 /*
1378 * destroy the dentries attached to a superblock on unmounting
1379 */
1380 void shrink_dcache_for_umount(struct super_block *sb)
1381 {
1382 struct dentry *dentry;
1383
1384 if (down_read_trylock(&sb->s_umount))
1385 BUG();
1386
1387 dentry = sb->s_root;
1388 sb->s_root = NULL;
1389 for (;;) {
1390 struct select_data data;
1391
1392 INIT_LIST_HEAD(&data.dispose);
1393 data.start = dentry;
1394 data.found = 0;
1395
1396 d_walk(dentry, &data, umount_collect, NULL);
1397 if (!data.found)
1398 break;
1399
1400 shrink_dentry_list(&data.dispose);
1401 cond_resched();
1402 }
1403 d_drop(dentry);
1404 dput(dentry);
1405
1406 while (!hlist_bl_empty(&sb->s_anon)) {
1407 struct select_data data;
1408 dentry = hlist_bl_entry(hlist_bl_first(&sb->s_anon), struct dentry, d_hash);
1409
1410 INIT_LIST_HEAD(&data.dispose);
1411 data.start = NULL;
1412 data.found = 0;
1413
1414 d_walk(dentry, &data, umount_collect, NULL);
1415 if (data.found)
1416 shrink_dentry_list(&data.dispose);
1417 cond_resched();
1418 }
1419 }
1420
1421 static enum d_walk_ret check_and_collect(void *_data, struct dentry *dentry)
1422 {
1423 struct select_data *data = _data;
1424
1425 if (d_mountpoint(dentry)) {
1426 data->found = -EBUSY;
1427 return D_WALK_QUIT;
1428 }
1429
1430 return select_collect(_data, dentry);
1431 }
1432
1433 static void check_and_drop(void *_data)
1434 {
1435 struct select_data *data = _data;
1436
1437 if (d_mountpoint(data->start))
1438 data->found = -EBUSY;
1439 if (!data->found)
1440 __d_drop(data->start);
1441 }
1442
1443 /**
1444 * check_submounts_and_drop - prune dcache, check for submounts and drop
1445 *
1446 * All done as a single atomic operation relative to has_unlinked_ancestor().
1447 * Returns 0 if successfully unhashed @parent. If there were submounts then
1448 * return -EBUSY.
1449 *
1450 * @dentry: dentry to prune and drop
1451 */
1452 int check_submounts_and_drop(struct dentry *dentry)
1453 {
1454 int ret = 0;
1455
1456 /* Negative dentries can be dropped without further checks */
1457 if (!dentry->d_inode) {
1458 d_drop(dentry);
1459 goto out;
1460 }
1461
1462 for (;;) {
1463 struct select_data data;
1464
1465 INIT_LIST_HEAD(&data.dispose);
1466 data.start = dentry;
1467 data.found = 0;
1468
1469 d_walk(dentry, &data, check_and_collect, check_and_drop);
1470 ret = data.found;
1471
1472 if (!list_empty(&data.dispose))
1473 shrink_dentry_list(&data.dispose);
1474
1475 if (ret <= 0)
1476 break;
1477
1478 cond_resched();
1479 }
1480
1481 out:
1482 return ret;
1483 }
1484 EXPORT_SYMBOL(check_submounts_and_drop);
1485
1486 /**
1487 * __d_alloc - allocate a dcache entry
1488 * @sb: filesystem it will belong to
1489 * @name: qstr of the name
1490 *
1491 * Allocates a dentry. It returns %NULL if there is insufficient memory
1492 * available. On a success the dentry is returned. The name passed in is
1493 * copied and the copy passed in may be reused after this call.
1494 */
1495
1496 struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
1497 {
1498 struct dentry *dentry;
1499 char *dname;
1500
1501 dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL);
1502 if (!dentry)
1503 return NULL;
1504
1505 /*
1506 * We guarantee that the inline name is always NUL-terminated.
1507 * This way the memcpy() done by the name switching in rename
1508 * will still always have a NUL at the end, even if we might
1509 * be overwriting an internal NUL character
1510 */
1511 dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
1512 if (name->len > DNAME_INLINE_LEN-1) {
1513 dname = kmalloc(name->len + 1, GFP_KERNEL);
1514 if (!dname) {
1515 kmem_cache_free(dentry_cache, dentry);
1516 return NULL;
1517 }
1518 } else {
1519 dname = dentry->d_iname;
1520 }
1521
1522 dentry->d_name.len = name->len;
1523 dentry->d_name.hash = name->hash;
1524 memcpy(dname, name->name, name->len);
1525 dname[name->len] = 0;
1526
1527 /* Make sure we always see the terminating NUL character */
1528 smp_wmb();
1529 dentry->d_name.name = dname;
1530
1531 dentry->d_lockref.count = 1;
1532 dentry->d_flags = 0;
1533 spin_lock_init(&dentry->d_lock);
1534 seqcount_init(&dentry->d_seq);
1535 dentry->d_inode = NULL;
1536 dentry->d_parent = dentry;
1537 dentry->d_sb = sb;
1538 dentry->d_op = NULL;
1539 dentry->d_fsdata = NULL;
1540 INIT_HLIST_BL_NODE(&dentry->d_hash);
1541 INIT_LIST_HEAD(&dentry->d_lru);
1542 INIT_LIST_HEAD(&dentry->d_subdirs);
1543 INIT_HLIST_NODE(&dentry->d_alias);
1544 INIT_LIST_HEAD(&dentry->d_u.d_child);
1545 d_set_d_op(dentry, dentry->d_sb->s_d_op);
1546
1547 this_cpu_inc(nr_dentry);
1548
1549 return dentry;
1550 }
1551
1552 /**
1553 * d_alloc - allocate a dcache entry
1554 * @parent: parent of entry to allocate
1555 * @name: qstr of the name
1556 *
1557 * Allocates a dentry. It returns %NULL if there is insufficient memory
1558 * available. On a success the dentry is returned. The name passed in is
1559 * copied and the copy passed in may be reused after this call.
1560 */
1561 struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
1562 {
1563 struct dentry *dentry = __d_alloc(parent->d_sb, name);
1564 if (!dentry)
1565 return NULL;
1566
1567 spin_lock(&parent->d_lock);
1568 /*
1569 * don't need child lock because it is not subject
1570 * to concurrency here
1571 */
1572 __dget_dlock(parent);
1573 dentry->d_parent = parent;
1574 list_add(&dentry->d_u.d_child, &parent->d_subdirs);
1575 spin_unlock(&parent->d_lock);
1576
1577 return dentry;
1578 }
1579 EXPORT_SYMBOL(d_alloc);
1580
1581 /**
1582 * d_alloc_pseudo - allocate a dentry (for lookup-less filesystems)
1583 * @sb: the superblock
1584 * @name: qstr of the name
1585 *
1586 * For a filesystem that just pins its dentries in memory and never
1587 * performs lookups at all, return an unhashed IS_ROOT dentry.
1588 */
1589 struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name)
1590 {
1591 return __d_alloc(sb, name);
1592 }
1593 EXPORT_SYMBOL(d_alloc_pseudo);
1594
1595 struct dentry *d_alloc_name(struct dentry *parent, const char *name)
1596 {
1597 struct qstr q;
1598
1599 q.name = name;
1600 q.len = strlen(name);
1601 q.hash = full_name_hash(q.name, q.len);
1602 return d_alloc(parent, &q);
1603 }
1604 EXPORT_SYMBOL(d_alloc_name);
1605
1606 void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
1607 {
1608 WARN_ON_ONCE(dentry->d_op);
1609 WARN_ON_ONCE(dentry->d_flags & (DCACHE_OP_HASH |
1610 DCACHE_OP_COMPARE |
1611 DCACHE_OP_REVALIDATE |
1612 DCACHE_OP_WEAK_REVALIDATE |
1613 DCACHE_OP_DELETE ));
1614 dentry->d_op = op;
1615 if (!op)
1616 return;
1617 if (op->d_hash)
1618 dentry->d_flags |= DCACHE_OP_HASH;
1619 if (op->d_compare)
1620 dentry->d_flags |= DCACHE_OP_COMPARE;
1621 if (op->d_revalidate)
1622 dentry->d_flags |= DCACHE_OP_REVALIDATE;
1623 if (op->d_weak_revalidate)
1624 dentry->d_flags |= DCACHE_OP_WEAK_REVALIDATE;
1625 if (op->d_delete)
1626 dentry->d_flags |= DCACHE_OP_DELETE;
1627 if (op->d_prune)
1628 dentry->d_flags |= DCACHE_OP_PRUNE;
1629
1630 }
1631 EXPORT_SYMBOL(d_set_d_op);
1632
1633 static unsigned d_flags_for_inode(struct inode *inode)
1634 {
1635 unsigned add_flags = DCACHE_FILE_TYPE;
1636
1637 if (!inode)
1638 return DCACHE_MISS_TYPE;
1639
1640 if (S_ISDIR(inode->i_mode)) {
1641 add_flags = DCACHE_DIRECTORY_TYPE;
1642 if (unlikely(!(inode->i_opflags & IOP_LOOKUP))) {
1643 if (unlikely(!inode->i_op->lookup))
1644 add_flags = DCACHE_AUTODIR_TYPE;
1645 else
1646 inode->i_opflags |= IOP_LOOKUP;
1647 }
1648 } else if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) {
1649 if (unlikely(inode->i_op->follow_link))
1650 add_flags = DCACHE_SYMLINK_TYPE;
1651 else
1652 inode->i_opflags |= IOP_NOFOLLOW;
1653 }
1654
1655 if (unlikely(IS_AUTOMOUNT(inode)))
1656 add_flags |= DCACHE_NEED_AUTOMOUNT;
1657 return add_flags;
1658 }
1659
1660 static void __d_instantiate(struct dentry *dentry, struct inode *inode)
1661 {
1662 unsigned add_flags = d_flags_for_inode(inode);
1663
1664 spin_lock(&dentry->d_lock);
1665 dentry->d_flags &= ~DCACHE_ENTRY_TYPE;
1666 dentry->d_flags |= add_flags;
1667 if (inode)
1668 hlist_add_head(&dentry->d_alias, &inode->i_dentry);
1669 dentry->d_inode = inode;
1670 dentry_rcuwalk_barrier(dentry);
1671 spin_unlock(&dentry->d_lock);
1672 fsnotify_d_instantiate(dentry, inode);
1673 }
1674
1675 /**
1676 * d_instantiate - fill in inode information for a dentry
1677 * @entry: dentry to complete
1678 * @inode: inode to attach to this dentry
1679 *
1680 * Fill in inode information in the entry.
1681 *
1682 * This turns negative dentries into productive full members
1683 * of society.
1684 *
1685 * NOTE! This assumes that the inode count has been incremented
1686 * (or otherwise set) by the caller to indicate that it is now
1687 * in use by the dcache.
1688 */
1689
1690 void d_instantiate(struct dentry *entry, struct inode * inode)
1691 {
1692 BUG_ON(!hlist_unhashed(&entry->d_alias));
1693 if (inode)
1694 spin_lock(&inode->i_lock);
1695 __d_instantiate(entry, inode);
1696 if (inode)
1697 spin_unlock(&inode->i_lock);
1698 security_d_instantiate(entry, inode);
1699 }
1700 EXPORT_SYMBOL(d_instantiate);
1701
1702 /**
1703 * d_instantiate_unique - instantiate a non-aliased dentry
1704 * @entry: dentry to instantiate
1705 * @inode: inode to attach to this dentry
1706 *
1707 * Fill in inode information in the entry. On success, it returns NULL.
1708 * If an unhashed alias of "entry" already exists, then we return the
1709 * aliased dentry instead and drop one reference to inode.
1710 *
1711 * Note that in order to avoid conflicts with rename() etc, the caller
1712 * had better be holding the parent directory semaphore.
1713 *
1714 * This also assumes that the inode count has been incremented
1715 * (or otherwise set) by the caller to indicate that it is now
1716 * in use by the dcache.
1717 */
1718 static struct dentry *__d_instantiate_unique(struct dentry *entry,
1719 struct inode *inode)
1720 {
1721 struct dentry *alias;
1722 int len = entry->d_name.len;
1723 const char *name = entry->d_name.name;
1724 unsigned int hash = entry->d_name.hash;
1725
1726 if (!inode) {
1727 __d_instantiate(entry, NULL);
1728 return NULL;
1729 }
1730
1731 hlist_for_each_entry(alias, &inode->i_dentry, d_alias) {
1732 /*
1733 * Don't need alias->d_lock here, because aliases with
1734 * d_parent == entry->d_parent are not subject to name or
1735 * parent changes, because the parent inode i_mutex is held.
1736 */
1737 if (alias->d_name.hash != hash)
1738 continue;
1739 if (alias->d_parent != entry->d_parent)
1740 continue;
1741 if (alias->d_name.len != len)
1742 continue;
1743 if (dentry_cmp(alias, name, len))
1744 continue;
1745 __dget(alias);
1746 return alias;
1747 }
1748
1749 __d_instantiate(entry, inode);
1750 return NULL;
1751 }
1752
1753 struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode)
1754 {
1755 struct dentry *result;
1756
1757 BUG_ON(!hlist_unhashed(&entry->d_alias));
1758
1759 if (inode)
1760 spin_lock(&inode->i_lock);
1761 result = __d_instantiate_unique(entry, inode);
1762 if (inode)
1763 spin_unlock(&inode->i_lock);
1764
1765 if (!result) {
1766 security_d_instantiate(entry, inode);
1767 return NULL;
1768 }
1769
1770 BUG_ON(!d_unhashed(result));
1771 iput(inode);
1772 return result;
1773 }
1774
1775 EXPORT_SYMBOL(d_instantiate_unique);
1776
1777 /**
1778 * d_instantiate_no_diralias - instantiate a non-aliased dentry
1779 * @entry: dentry to complete
1780 * @inode: inode to attach to this dentry
1781 *
1782 * Fill in inode information in the entry. If a directory alias is found, then
1783 * return an error (and drop inode). Together with d_materialise_unique() this
1784 * guarantees that a directory inode may never have more than one alias.
1785 */
1786 int d_instantiate_no_diralias(struct dentry *entry, struct inode *inode)
1787 {
1788 BUG_ON(!hlist_unhashed(&entry->d_alias));
1789
1790 spin_lock(&inode->i_lock);
1791 if (S_ISDIR(inode->i_mode) && !hlist_empty(&inode->i_dentry)) {
1792 spin_unlock(&inode->i_lock);
1793 iput(inode);
1794 return -EBUSY;
1795 }
1796 __d_instantiate(entry, inode);
1797 spin_unlock(&inode->i_lock);
1798 security_d_instantiate(entry, inode);
1799
1800 return 0;
1801 }
1802 EXPORT_SYMBOL(d_instantiate_no_diralias);
1803
1804 struct dentry *d_make_root(struct inode *root_inode)
1805 {
1806 struct dentry *res = NULL;
1807
1808 if (root_inode) {
1809 static const struct qstr name = QSTR_INIT("/", 1);
1810
1811 res = __d_alloc(root_inode->i_sb, &name);
1812 if (res)
1813 d_instantiate(res, root_inode);
1814 else
1815 iput(root_inode);
1816 }
1817 return res;
1818 }
1819 EXPORT_SYMBOL(d_make_root);
1820
1821 static struct dentry * __d_find_any_alias(struct inode *inode)
1822 {
1823 struct dentry *alias;
1824
1825 if (hlist_empty(&inode->i_dentry))
1826 return NULL;
1827 alias = hlist_entry(inode->i_dentry.first, struct dentry, d_alias);
1828 __dget(alias);
1829 return alias;
1830 }
1831
1832 /**
1833 * d_find_any_alias - find any alias for a given inode
1834 * @inode: inode to find an alias for
1835 *
1836 * If any aliases exist for the given inode, take and return a
1837 * reference for one of them. If no aliases exist, return %NULL.
1838 */
1839 struct dentry *d_find_any_alias(struct inode *inode)
1840 {
1841 struct dentry *de;
1842
1843 spin_lock(&inode->i_lock);
1844 de = __d_find_any_alias(inode);
1845 spin_unlock(&inode->i_lock);
1846 return de;
1847 }
1848 EXPORT_SYMBOL(d_find_any_alias);
1849
1850 /**
1851 * d_obtain_alias - find or allocate a dentry for a given inode
1852 * @inode: inode to allocate the dentry for
1853 *
1854 * Obtain a dentry for an inode resulting from NFS filehandle conversion or
1855 * similar open by handle operations. The returned dentry may be anonymous,
1856 * or may have a full name (if the inode was already in the cache).
1857 *
1858 * When called on a directory inode, we must ensure that the inode only ever
1859 * has one dentry. If a dentry is found, that is returned instead of
1860 * allocating a new one.
1861 *
1862 * On successful return, the reference to the inode has been transferred
1863 * to the dentry. In case of an error the reference on the inode is released.
1864 * To make it easier to use in export operations a %NULL or IS_ERR inode may
1865 * be passed in and will be the error will be propagate to the return value,
1866 * with a %NULL @inode replaced by ERR_PTR(-ESTALE).
1867 */
1868 struct dentry *d_obtain_alias(struct inode *inode)
1869 {
1870 static const struct qstr anonstring = QSTR_INIT("/", 1);
1871 struct dentry *tmp;
1872 struct dentry *res;
1873 unsigned add_flags;
1874
1875 if (!inode)
1876 return ERR_PTR(-ESTALE);
1877 if (IS_ERR(inode))
1878 return ERR_CAST(inode);
1879
1880 res = d_find_any_alias(inode);
1881 if (res)
1882 goto out_iput;
1883
1884 tmp = __d_alloc(inode->i_sb, &anonstring);
1885 if (!tmp) {
1886 res = ERR_PTR(-ENOMEM);
1887 goto out_iput;
1888 }
1889
1890 spin_lock(&inode->i_lock);
1891 res = __d_find_any_alias(inode);
1892 if (res) {
1893 spin_unlock(&inode->i_lock);
1894 dput(tmp);
1895 goto out_iput;
1896 }
1897
1898 /* attach a disconnected dentry */
1899 add_flags = d_flags_for_inode(inode) | DCACHE_DISCONNECTED;
1900
1901 spin_lock(&tmp->d_lock);
1902 tmp->d_inode = inode;
1903 tmp->d_flags |= add_flags;
1904 hlist_add_head(&tmp->d_alias, &inode->i_dentry);
1905 hlist_bl_lock(&tmp->d_sb->s_anon);
1906 hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon);
1907 hlist_bl_unlock(&tmp->d_sb->s_anon);
1908 spin_unlock(&tmp->d_lock);
1909 spin_unlock(&inode->i_lock);
1910 security_d_instantiate(tmp, inode);
1911
1912 return tmp;
1913
1914 out_iput:
1915 if (res && !IS_ERR(res))
1916 security_d_instantiate(res, inode);
1917 iput(inode);
1918 return res;
1919 }
1920 EXPORT_SYMBOL(d_obtain_alias);
1921
1922 /**
1923 * d_splice_alias - splice a disconnected dentry into the tree if one exists
1924 * @inode: the inode which may have a disconnected dentry
1925 * @dentry: a negative dentry which we want to point to the inode.
1926 *
1927 * If inode is a directory and has a 'disconnected' dentry (i.e. IS_ROOT and
1928 * DCACHE_DISCONNECTED), then d_move that in place of the given dentry
1929 * and return it, else simply d_add the inode to the dentry and return NULL.
1930 *
1931 * This is needed in the lookup routine of any filesystem that is exportable
1932 * (via knfsd) so that we can build dcache paths to directories effectively.
1933 *
1934 * If a dentry was found and moved, then it is returned. Otherwise NULL
1935 * is returned. This matches the expected return value of ->lookup.
1936 *
1937 * Cluster filesystems may call this function with a negative, hashed dentry.
1938 * In that case, we know that the inode will be a regular file, and also this
1939 * will only occur during atomic_open. So we need to check for the dentry
1940 * being already hashed only in the final case.
1941 */
1942 struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
1943 {
1944 struct dentry *new = NULL;
1945
1946 if (IS_ERR(inode))
1947 return ERR_CAST(inode);
1948
1949 if (inode && S_ISDIR(inode->i_mode)) {
1950 spin_lock(&inode->i_lock);
1951 new = __d_find_alias(inode, 1);
1952 if (new) {
1953 BUG_ON(!(new->d_flags & DCACHE_DISCONNECTED));
1954 spin_unlock(&inode->i_lock);
1955 security_d_instantiate(new, inode);
1956 d_move(new, dentry);
1957 iput(inode);
1958 } else {
1959 /* already taking inode->i_lock, so d_add() by hand */
1960 __d_instantiate(dentry, inode);
1961 spin_unlock(&inode->i_lock);
1962 security_d_instantiate(dentry, inode);
1963 d_rehash(dentry);
1964 }
1965 } else {
1966 d_instantiate(dentry, inode);
1967 if (d_unhashed(dentry))
1968 d_rehash(dentry);
1969 }
1970 return new;
1971 }
1972 EXPORT_SYMBOL(d_splice_alias);
1973
1974 /**
1975 * d_add_ci - lookup or allocate new dentry with case-exact name
1976 * @inode: the inode case-insensitive lookup has found
1977 * @dentry: the negative dentry that was passed to the parent's lookup func
1978 * @name: the case-exact name to be associated with the returned dentry
1979 *
1980 * This is to avoid filling the dcache with case-insensitive names to the
1981 * same inode, only the actual correct case is stored in the dcache for
1982 * case-insensitive filesystems.
1983 *
1984 * For a case-insensitive lookup match and if the the case-exact dentry
1985 * already exists in in the dcache, use it and return it.
1986 *
1987 * If no entry exists with the exact case name, allocate new dentry with
1988 * the exact case, and return the spliced entry.
1989 */
1990 struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
1991 struct qstr *name)
1992 {
1993 struct dentry *found;
1994 struct dentry *new;
1995
1996 /*
1997 * First check if a dentry matching the name already exists,
1998 * if not go ahead and create it now.
1999 */
2000 found = d_hash_and_lookup(dentry->d_parent, name);
2001 if (unlikely(IS_ERR(found)))
2002 goto err_out;
2003 if (!found) {
2004 new = d_alloc(dentry->d_parent, name);
2005 if (!new) {
2006 found = ERR_PTR(-ENOMEM);
2007 goto err_out;
2008 }
2009
2010 found = d_splice_alias(inode, new);
2011 if (found) {
2012 dput(new);
2013 return found;
2014 }
2015 return new;
2016 }
2017
2018 /*
2019 * If a matching dentry exists, and it's not negative use it.
2020 *
2021 * Decrement the reference count to balance the iget() done
2022 * earlier on.
2023 */
2024 if (found->d_inode) {
2025 if (unlikely(found->d_inode != inode)) {
2026 /* This can't happen because bad inodes are unhashed. */
2027 BUG_ON(!is_bad_inode(inode));
2028 BUG_ON(!is_bad_inode(found->d_inode));
2029 }
2030 iput(inode);
2031 return found;
2032 }
2033
2034 /*
2035 * Negative dentry: instantiate it unless the inode is a directory and
2036 * already has a dentry.
2037 */
2038 new = d_splice_alias(inode, found);
2039 if (new) {
2040 dput(found);
2041 found = new;
2042 }
2043 return found;
2044
2045 err_out:
2046 iput(inode);
2047 return found;
2048 }
2049 EXPORT_SYMBOL(d_add_ci);
2050
2051 /*
2052 * Do the slow-case of the dentry name compare.
2053 *
2054 * Unlike the dentry_cmp() function, we need to atomically
2055 * load the name and length information, so that the
2056 * filesystem can rely on them, and can use the 'name' and
2057 * 'len' information without worrying about walking off the
2058 * end of memory etc.
2059 *
2060 * Thus the read_seqcount_retry() and the "duplicate" info
2061 * in arguments (the low-level filesystem should not look
2062 * at the dentry inode or name contents directly, since
2063 * rename can change them while we're in RCU mode).
2064 */
2065 enum slow_d_compare {
2066 D_COMP_OK,
2067 D_COMP_NOMATCH,
2068 D_COMP_SEQRETRY,
2069 };
2070
2071 static noinline enum slow_d_compare slow_dentry_cmp(
2072 const struct dentry *parent,
2073 struct dentry *dentry,
2074 unsigned int seq,
2075 const struct qstr *name)
2076 {
2077 int tlen = dentry->d_name.len;
2078 const char *tname = dentry->d_name.name;
2079
2080 if (read_seqcount_retry(&dentry->d_seq, seq)) {
2081 cpu_relax();
2082 return D_COMP_SEQRETRY;
2083 }
2084 if (parent->d_op->d_compare(parent, dentry, tlen, tname, name))
2085 return D_COMP_NOMATCH;
2086 return D_COMP_OK;
2087 }
2088
2089 /**
2090 * __d_lookup_rcu - search for a dentry (racy, store-free)
2091 * @parent: parent dentry
2092 * @name: qstr of name we wish to find
2093 * @seqp: returns d_seq value at the point where the dentry was found
2094 * Returns: dentry, or NULL
2095 *
2096 * __d_lookup_rcu is the dcache lookup function for rcu-walk name
2097 * resolution (store-free path walking) design described in
2098 * Documentation/filesystems/path-lookup.txt.
2099 *
2100 * This is not to be used outside core vfs.
2101 *
2102 * __d_lookup_rcu must only be used in rcu-walk mode, ie. with vfsmount lock
2103 * held, and rcu_read_lock held. The returned dentry must not be stored into
2104 * without taking d_lock and checking d_seq sequence count against @seq
2105 * returned here.
2106 *
2107 * A refcount may be taken on the found dentry with the d_rcu_to_refcount
2108 * function.
2109 *
2110 * Alternatively, __d_lookup_rcu may be called again to look up the child of
2111 * the returned dentry, so long as its parent's seqlock is checked after the
2112 * child is looked up. Thus, an interlocking stepping of sequence lock checks
2113 * is formed, giving integrity down the path walk.
2114 *
2115 * NOTE! The caller *has* to check the resulting dentry against the sequence
2116 * number we've returned before using any of the resulting dentry state!
2117 */
2118 struct dentry *__d_lookup_rcu(const struct dentry *parent,
2119 const struct qstr *name,
2120 unsigned *seqp)
2121 {
2122 u64 hashlen = name->hash_len;
2123 const unsigned char *str = name->name;
2124 struct hlist_bl_head *b = d_hash(parent, hashlen_hash(hashlen));
2125 struct hlist_bl_node *node;
2126 struct dentry *dentry;
2127
2128 /*
2129 * Note: There is significant duplication with __d_lookup_rcu which is
2130 * required to prevent single threaded performance regressions
2131 * especially on architectures where smp_rmb (in seqcounts) are costly.
2132 * Keep the two functions in sync.
2133 */
2134
2135 /*
2136 * The hash list is protected using RCU.
2137 *
2138 * Carefully use d_seq when comparing a candidate dentry, to avoid
2139 * races with d_move().
2140 *
2141 * It is possible that concurrent renames can mess up our list
2142 * walk here and result in missing our dentry, resulting in the
2143 * false-negative result. d_lookup() protects against concurrent
2144 * renames using rename_lock seqlock.
2145 *
2146 * See Documentation/filesystems/path-lookup.txt for more details.
2147 */
2148 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2149 unsigned seq;
2150
2151 seqretry:
2152 /*
2153 * The dentry sequence count protects us from concurrent
2154 * renames, and thus protects parent and name fields.
2155 *
2156 * The caller must perform a seqcount check in order
2157 * to do anything useful with the returned dentry.
2158 *
2159 * NOTE! We do a "raw" seqcount_begin here. That means that
2160 * we don't wait for the sequence count to stabilize if it
2161 * is in the middle of a sequence change. If we do the slow
2162 * dentry compare, we will do seqretries until it is stable,
2163 * and if we end up with a successful lookup, we actually
2164 * want to exit RCU lookup anyway.
2165 */
2166 seq = raw_seqcount_begin(&dentry->d_seq);
2167 if (dentry->d_parent != parent)
2168 continue;
2169 if (d_unhashed(dentry))
2170 continue;
2171
2172 if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) {
2173 if (dentry->d_name.hash != hashlen_hash(hashlen))
2174 continue;
2175 *seqp = seq;
2176 switch (slow_dentry_cmp(parent, dentry, seq, name)) {
2177 case D_COMP_OK:
2178 return dentry;
2179 case D_COMP_NOMATCH:
2180 continue;
2181 default:
2182 goto seqretry;
2183 }
2184 }
2185
2186 if (dentry->d_name.hash_len != hashlen)
2187 continue;
2188 *seqp = seq;
2189 if (!dentry_cmp(dentry, str, hashlen_len(hashlen)))
2190 return dentry;
2191 }
2192 return NULL;
2193 }
2194
2195 /**
2196 * d_lookup - search for a dentry
2197 * @parent: parent dentry
2198 * @name: qstr of name we wish to find
2199 * Returns: dentry, or NULL
2200 *
2201 * d_lookup searches the children of the parent dentry for the name in
2202 * question. If the dentry is found its reference count is incremented and the
2203 * dentry is returned. The caller must use dput to free the entry when it has
2204 * finished using it. %NULL is returned if the dentry does not exist.
2205 */
2206 struct dentry *d_lookup(const struct dentry *parent, const struct qstr *name)
2207 {
2208 struct dentry *dentry;
2209 unsigned seq;
2210
2211 do {
2212 seq = read_seqbegin(&rename_lock);
2213 dentry = __d_lookup(parent, name);
2214 if (dentry)
2215 break;
2216 } while (read_seqretry(&rename_lock, seq));
2217 return dentry;
2218 }
2219 EXPORT_SYMBOL(d_lookup);
2220
2221 /**
2222 * __d_lookup - search for a dentry (racy)
2223 * @parent: parent dentry
2224 * @name: qstr of name we wish to find
2225 * Returns: dentry, or NULL
2226 *
2227 * __d_lookup is like d_lookup, however it may (rarely) return a
2228 * false-negative result due to unrelated rename activity.
2229 *
2230 * __d_lookup is slightly faster by avoiding rename_lock read seqlock,
2231 * however it must be used carefully, eg. with a following d_lookup in
2232 * the case of failure.
2233 *
2234 * __d_lookup callers must be commented.
2235 */
2236 struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
2237 {
2238 unsigned int len = name->len;
2239 unsigned int hash = name->hash;
2240 const unsigned char *str = name->name;
2241 struct hlist_bl_head *b = d_hash(parent, hash);
2242 struct hlist_bl_node *node;
2243 struct dentry *found = NULL;
2244 struct dentry *dentry;
2245
2246 /*
2247 * Note: There is significant duplication with __d_lookup_rcu which is
2248 * required to prevent single threaded performance regressions
2249 * especially on architectures where smp_rmb (in seqcounts) are costly.
2250 * Keep the two functions in sync.
2251 */
2252
2253 /*
2254 * The hash list is protected using RCU.
2255 *
2256 * Take d_lock when comparing a candidate dentry, to avoid races
2257 * with d_move().
2258 *
2259 * It is possible that concurrent renames can mess up our list
2260 * walk here and result in missing our dentry, resulting in the
2261 * false-negative result. d_lookup() protects against concurrent
2262 * renames using rename_lock seqlock.
2263 *
2264 * See Documentation/filesystems/path-lookup.txt for more details.
2265 */
2266 rcu_read_lock();
2267
2268 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2269
2270 if (dentry->d_name.hash != hash)
2271 continue;
2272
2273 spin_lock(&dentry->d_lock);
2274 if (dentry->d_parent != parent)
2275 goto next;
2276 if (d_unhashed(dentry))
2277 goto next;
2278
2279 /*
2280 * It is safe to compare names since d_move() cannot
2281 * change the qstr (protected by d_lock).
2282 */
2283 if (parent->d_flags & DCACHE_OP_COMPARE) {
2284 int tlen = dentry->d_name.len;
2285 const char *tname = dentry->d_name.name;
2286 if (parent->d_op->d_compare(parent, dentry, tlen, tname, name))
2287 goto next;
2288 } else {
2289 if (dentry->d_name.len != len)
2290 goto next;
2291 if (dentry_cmp(dentry, str, len))
2292 goto next;
2293 }
2294
2295 dentry->d_lockref.count++;
2296 found = dentry;
2297 spin_unlock(&dentry->d_lock);
2298 break;
2299 next:
2300 spin_unlock(&dentry->d_lock);
2301 }
2302 rcu_read_unlock();
2303
2304 return found;
2305 }
2306
2307 /**
2308 * d_hash_and_lookup - hash the qstr then search for a dentry
2309 * @dir: Directory to search in
2310 * @name: qstr of name we wish to find
2311 *
2312 * On lookup failure NULL is returned; on bad name - ERR_PTR(-error)
2313 */
2314 struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name)
2315 {
2316 /*
2317 * Check for a fs-specific hash function. Note that we must
2318 * calculate the standard hash first, as the d_op->d_hash()
2319 * routine may choose to leave the hash value unchanged.
2320 */
2321 name->hash = full_name_hash(name->name, name->len);
2322 if (dir->d_flags & DCACHE_OP_HASH) {
2323 int err = dir->d_op->d_hash(dir, name);
2324 if (unlikely(err < 0))
2325 return ERR_PTR(err);
2326 }
2327 return d_lookup(dir, name);
2328 }
2329 EXPORT_SYMBOL(d_hash_and_lookup);
2330
2331 /**
2332 * d_validate - verify dentry provided from insecure source (deprecated)
2333 * @dentry: The dentry alleged to be valid child of @dparent
2334 * @dparent: The parent dentry (known to be valid)
2335 *
2336 * An insecure source has sent us a dentry, here we verify it and dget() it.
2337 * This is used by ncpfs in its readdir implementation.
2338 * Zero is returned in the dentry is invalid.
2339 *
2340 * This function is slow for big directories, and deprecated, do not use it.
2341 */
2342 int d_validate(struct dentry *dentry, struct dentry *dparent)
2343 {
2344 struct dentry *child;
2345
2346 spin_lock(&dparent->d_lock);
2347 list_for_each_entry(child, &dparent->d_subdirs, d_u.d_child) {
2348 if (dentry == child) {
2349 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
2350 __dget_dlock(dentry);
2351 spin_unlock(&dentry->d_lock);
2352 spin_unlock(&dparent->d_lock);
2353 return 1;
2354 }
2355 }
2356 spin_unlock(&dparent->d_lock);
2357
2358 return 0;
2359 }
2360 EXPORT_SYMBOL(d_validate);
2361
2362 /*
2363 * When a file is deleted, we have two options:
2364 * - turn this dentry into a negative dentry
2365 * - unhash this dentry and free it.
2366 *
2367 * Usually, we want to just turn this into
2368 * a negative dentry, but if anybody else is
2369 * currently using the dentry or the inode
2370 * we can't do that and we fall back on removing
2371 * it from the hash queues and waiting for
2372 * it to be deleted later when it has no users
2373 */
2374
2375 /**
2376 * d_delete - delete a dentry
2377 * @dentry: The dentry to delete
2378 *
2379 * Turn the dentry into a negative dentry if possible, otherwise
2380 * remove it from the hash queues so it can be deleted later
2381 */
2382
2383 void d_delete(struct dentry * dentry)
2384 {
2385 struct inode *inode;
2386 int isdir = 0;
2387 /*
2388 * Are we the only user?
2389 */
2390 again:
2391 spin_lock(&dentry->d_lock);
2392 inode = dentry->d_inode;
2393 isdir = S_ISDIR(inode->i_mode);
2394 if (dentry->d_lockref.count == 1) {
2395 if (!spin_trylock(&inode->i_lock)) {
2396 spin_unlock(&dentry->d_lock);
2397 cpu_relax();
2398 goto again;
2399 }
2400 dentry->d_flags &= ~DCACHE_CANT_MOUNT;
2401 dentry_unlink_inode(dentry);
2402 fsnotify_nameremove(dentry, isdir);
2403 return;
2404 }
2405
2406 if (!d_unhashed(dentry))
2407 __d_drop(dentry);
2408
2409 spin_unlock(&dentry->d_lock);
2410
2411 fsnotify_nameremove(dentry, isdir);
2412 }
2413 EXPORT_SYMBOL(d_delete);
2414
2415 static void __d_rehash(struct dentry * entry, struct hlist_bl_head *b)
2416 {
2417 BUG_ON(!d_unhashed(entry));
2418 hlist_bl_lock(b);
2419 entry->d_flags |= DCACHE_RCUACCESS;
2420 hlist_bl_add_head_rcu(&entry->d_hash, b);
2421 hlist_bl_unlock(b);
2422 }
2423
2424 static void _d_rehash(struct dentry * entry)
2425 {
2426 __d_rehash(entry, d_hash(entry->d_parent, entry->d_name.hash));
2427 }
2428
2429 /**
2430 * d_rehash - add an entry back to the hash
2431 * @entry: dentry to add to the hash
2432 *
2433 * Adds a dentry to the hash according to its name.
2434 */
2435
2436 void d_rehash(struct dentry * entry)
2437 {
2438 spin_lock(&entry->d_lock);
2439 _d_rehash(entry);
2440 spin_unlock(&entry->d_lock);
2441 }
2442 EXPORT_SYMBOL(d_rehash);
2443
2444 /**
2445 * dentry_update_name_case - update case insensitive dentry with a new name
2446 * @dentry: dentry to be updated
2447 * @name: new name
2448 *
2449 * Update a case insensitive dentry with new case of name.
2450 *
2451 * dentry must have been returned by d_lookup with name @name. Old and new
2452 * name lengths must match (ie. no d_compare which allows mismatched name
2453 * lengths).
2454 *
2455 * Parent inode i_mutex must be held over d_lookup and into this call (to
2456 * keep renames and concurrent inserts, and readdir(2) away).
2457 */
2458 void dentry_update_name_case(struct dentry *dentry, struct qstr *name)
2459 {
2460 BUG_ON(!mutex_is_locked(&dentry->d_parent->d_inode->i_mutex));
2461 BUG_ON(dentry->d_name.len != name->len); /* d_lookup gives this */
2462
2463 spin_lock(&dentry->d_lock);
2464 write_seqcount_begin(&dentry->d_seq);
2465 memcpy((unsigned char *)dentry->d_name.name, name->name, name->len);
2466 write_seqcount_end(&dentry->d_seq);
2467 spin_unlock(&dentry->d_lock);
2468 }
2469 EXPORT_SYMBOL(dentry_update_name_case);
2470
2471 static void switch_names(struct dentry *dentry, struct dentry *target)
2472 {
2473 if (dname_external(target)) {
2474 if (dname_external(dentry)) {
2475 /*
2476 * Both external: swap the pointers
2477 */
2478 swap(target->d_name.name, dentry->d_name.name);
2479 } else {
2480 /*
2481 * dentry:internal, target:external. Steal target's
2482 * storage and make target internal.
2483 */
2484 memcpy(target->d_iname, dentry->d_name.name,
2485 dentry->d_name.len + 1);
2486 dentry->d_name.name = target->d_name.name;
2487 target->d_name.name = target->d_iname;
2488 }
2489 } else {
2490 if (dname_external(dentry)) {
2491 /*
2492 * dentry:external, target:internal. Give dentry's
2493 * storage to target and make dentry internal
2494 */
2495 memcpy(dentry->d_iname, target->d_name.name,
2496 target->d_name.len + 1);
2497 target->d_name.name = dentry->d_name.name;
2498 dentry->d_name.name = dentry->d_iname;
2499 } else {
2500 /*
2501 * Both are internal. Just copy target to dentry
2502 */
2503 memcpy(dentry->d_iname, target->d_name.name,
2504 target->d_name.len + 1);
2505 dentry->d_name.len = target->d_name.len;
2506 return;
2507 }
2508 }
2509 swap(dentry->d_name.len, target->d_name.len);
2510 }
2511
2512 static void dentry_lock_for_move(struct dentry *dentry, struct dentry *target)
2513 {
2514 /*
2515 * XXXX: do we really need to take target->d_lock?
2516 */
2517 if (IS_ROOT(dentry) || dentry->d_parent == target->d_parent)
2518 spin_lock(&target->d_parent->d_lock);
2519 else {
2520 if (d_ancestor(dentry->d_parent, target->d_parent)) {
2521 spin_lock(&dentry->d_parent->d_lock);
2522 spin_lock_nested(&target->d_parent->d_lock,
2523 DENTRY_D_LOCK_NESTED);
2524 } else {
2525 spin_lock(&target->d_parent->d_lock);
2526 spin_lock_nested(&dentry->d_parent->d_lock,
2527 DENTRY_D_LOCK_NESTED);
2528 }
2529 }
2530 if (target < dentry) {
2531 spin_lock_nested(&target->d_lock, 2);
2532 spin_lock_nested(&dentry->d_lock, 3);
2533 } else {
2534 spin_lock_nested(&dentry->d_lock, 2);
2535 spin_lock_nested(&target->d_lock, 3);
2536 }
2537 }
2538
2539 static void dentry_unlock_parents_for_move(struct dentry *dentry,
2540 struct dentry *target)
2541 {
2542 if (target->d_parent != dentry->d_parent)
2543 spin_unlock(&dentry->d_parent->d_lock);
2544 if (target->d_parent != target)
2545 spin_unlock(&target->d_parent->d_lock);
2546 }
2547
2548 /*
2549 * When switching names, the actual string doesn't strictly have to
2550 * be preserved in the target - because we're dropping the target
2551 * anyway. As such, we can just do a simple memcpy() to copy over
2552 * the new name before we switch.
2553 *
2554 * Note that we have to be a lot more careful about getting the hash
2555 * switched - we have to switch the hash value properly even if it
2556 * then no longer matches the actual (corrupted) string of the target.
2557 * The hash value has to match the hash queue that the dentry is on..
2558 */
2559 /*
2560 * __d_move - move a dentry
2561 * @dentry: entry to move
2562 * @target: new dentry
2563 *
2564 * Update the dcache to reflect the move of a file name. Negative
2565 * dcache entries should not be moved in this way. Caller must hold
2566 * rename_lock, the i_mutex of the source and target directories,
2567 * and the sb->s_vfs_rename_mutex if they differ. See lock_rename().
2568 */
2569 static void __d_move(struct dentry * dentry, struct dentry * target)
2570 {
2571 if (!dentry->d_inode)
2572 printk(KERN_WARNING "VFS: moving negative dcache entry\n");
2573
2574 BUG_ON(d_ancestor(dentry, target));
2575 BUG_ON(d_ancestor(target, dentry));
2576
2577 dentry_lock_for_move(dentry, target);
2578
2579 write_seqcount_begin(&dentry->d_seq);
2580 write_seqcount_begin_nested(&target->d_seq, DENTRY_D_LOCK_NESTED);
2581
2582 /* __d_drop does write_seqcount_barrier, but they're OK to nest. */
2583
2584 /*
2585 * Move the dentry to the target hash queue. Don't bother checking
2586 * for the same hash queue because of how unlikely it is.
2587 */
2588 __d_drop(dentry);
2589 __d_rehash(dentry, d_hash(target->d_parent, target->d_name.hash));
2590
2591 /* Unhash the target: dput() will then get rid of it */
2592 __d_drop(target);
2593
2594 list_del(&dentry->d_u.d_child);
2595 list_del(&target->d_u.d_child);
2596
2597 /* Switch the names.. */
2598 switch_names(dentry, target);
2599 swap(dentry->d_name.hash, target->d_name.hash);
2600
2601 /* ... and switch the parents */
2602 if (IS_ROOT(dentry)) {
2603 dentry->d_parent = target->d_parent;
2604 target->d_parent = target;
2605 INIT_LIST_HEAD(&target->d_u.d_child);
2606 } else {
2607 swap(dentry->d_parent, target->d_parent);
2608
2609 /* And add them back to the (new) parent lists */
2610 list_add(&target->d_u.d_child, &target->d_parent->d_subdirs);
2611 }
2612
2613 list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs);
2614
2615 write_seqcount_end(&target->d_seq);
2616 write_seqcount_end(&dentry->d_seq);
2617
2618 dentry_unlock_parents_for_move(dentry, target);
2619 spin_unlock(&target->d_lock);
2620 fsnotify_d_move(dentry);
2621 spin_unlock(&dentry->d_lock);
2622 }
2623
2624 /*
2625 * d_move - move a dentry
2626 * @dentry: entry to move
2627 * @target: new dentry
2628 *
2629 * Update the dcache to reflect the move of a file name. Negative
2630 * dcache entries should not be moved in this way. See the locking
2631 * requirements for __d_move.
2632 */
2633 void d_move(struct dentry *dentry, struct dentry *target)
2634 {
2635 write_seqlock(&rename_lock);
2636 __d_move(dentry, target);
2637 write_sequnlock(&rename_lock);
2638 }
2639 EXPORT_SYMBOL(d_move);
2640
2641 /**
2642 * d_ancestor - search for an ancestor
2643 * @p1: ancestor dentry
2644 * @p2: child dentry
2645 *
2646 * Returns the ancestor dentry of p2 which is a child of p1, if p1 is
2647 * an ancestor of p2, else NULL.
2648 */
2649 struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2)
2650 {
2651 struct dentry *p;
2652
2653 for (p = p2; !IS_ROOT(p); p = p->d_parent) {
2654 if (p->d_parent == p1)
2655 return p;
2656 }
2657 return NULL;
2658 }
2659
2660 /*
2661 * This helper attempts to cope with remotely renamed directories
2662 *
2663 * It assumes that the caller is already holding
2664 * dentry->d_parent->d_inode->i_mutex, inode->i_lock and rename_lock
2665 *
2666 * Note: If ever the locking in lock_rename() changes, then please
2667 * remember to update this too...
2668 */
2669 static struct dentry *__d_unalias(struct inode *inode,
2670 struct dentry *dentry, struct dentry *alias)
2671 {
2672 struct mutex *m1 = NULL, *m2 = NULL;
2673 struct dentry *ret = ERR_PTR(-EBUSY);
2674
2675 /* If alias and dentry share a parent, then no extra locks required */
2676 if (alias->d_parent == dentry->d_parent)
2677 goto out_unalias;
2678
2679 /* See lock_rename() */
2680 if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex))
2681 goto out_err;
2682 m1 = &dentry->d_sb->s_vfs_rename_mutex;
2683 if (!mutex_trylock(&alias->d_parent->d_inode->i_mutex))
2684 goto out_err;
2685 m2 = &alias->d_parent->d_inode->i_mutex;
2686 out_unalias:
2687 if (likely(!d_mountpoint(alias))) {
2688 __d_move(alias, dentry);
2689 ret = alias;
2690 }
2691 out_err:
2692 spin_unlock(&inode->i_lock);
2693 if (m2)
2694 mutex_unlock(m2);
2695 if (m1)
2696 mutex_unlock(m1);
2697 return ret;
2698 }
2699
2700 /*
2701 * Prepare an anonymous dentry for life in the superblock's dentry tree as a
2702 * named dentry in place of the dentry to be replaced.
2703 * returns with anon->d_lock held!
2704 */
2705 static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon)
2706 {
2707 struct dentry *dparent;
2708
2709 dentry_lock_for_move(anon, dentry);
2710
2711 write_seqcount_begin(&dentry->d_seq);
2712 write_seqcount_begin_nested(&anon->d_seq, DENTRY_D_LOCK_NESTED);
2713
2714 dparent = dentry->d_parent;
2715
2716 switch_names(dentry, anon);
2717 swap(dentry->d_name.hash, anon->d_name.hash);
2718
2719 dentry->d_parent = dentry;
2720 list_del_init(&dentry->d_u.d_child);
2721 anon->d_parent = dparent;
2722 list_move(&anon->d_u.d_child, &dparent->d_subdirs);
2723
2724 write_seqcount_end(&dentry->d_seq);
2725 write_seqcount_end(&anon->d_seq);
2726
2727 dentry_unlock_parents_for_move(anon, dentry);
2728 spin_unlock(&dentry->d_lock);
2729
2730 /* anon->d_lock still locked, returns locked */
2731 }
2732
2733 /**
2734 * d_materialise_unique - introduce an inode into the tree
2735 * @dentry: candidate dentry
2736 * @inode: inode to bind to the dentry, to which aliases may be attached
2737 *
2738 * Introduces an dentry into the tree, substituting an extant disconnected
2739 * root directory alias in its place if there is one. Caller must hold the
2740 * i_mutex of the parent directory.
2741 */
2742 struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode)
2743 {
2744 struct dentry *actual;
2745
2746 BUG_ON(!d_unhashed(dentry));
2747
2748 if (!inode) {
2749 actual = dentry;
2750 __d_instantiate(dentry, NULL);
2751 d_rehash(actual);
2752 goto out_nolock;
2753 }
2754
2755 spin_lock(&inode->i_lock);
2756
2757 if (S_ISDIR(inode->i_mode)) {
2758 struct dentry *alias;
2759
2760 /* Does an aliased dentry already exist? */
2761 alias = __d_find_alias(inode, 0);
2762 if (alias) {
2763 actual = alias;
2764 write_seqlock(&rename_lock);
2765
2766 if (d_ancestor(alias, dentry)) {
2767 /* Check for loops */
2768 actual = ERR_PTR(-ELOOP);
2769 spin_unlock(&inode->i_lock);
2770 } else if (IS_ROOT(alias)) {
2771 /* Is this an anonymous mountpoint that we
2772 * could splice into our tree? */
2773 __d_materialise_dentry(dentry, alias);
2774 write_sequnlock(&rename_lock);
2775 __d_drop(alias);
2776 goto found;
2777 } else {
2778 /* Nope, but we must(!) avoid directory
2779 * aliasing. This drops inode->i_lock */
2780 actual = __d_unalias(inode, dentry, alias);
2781 }
2782 write_sequnlock(&rename_lock);
2783 if (IS_ERR(actual)) {
2784 if (PTR_ERR(actual) == -ELOOP)
2785 pr_warn_ratelimited(
2786 "VFS: Lookup of '%s' in %s %s"
2787 " would have caused loop\n",
2788 dentry->d_name.name,
2789 inode->i_sb->s_type->name,
2790 inode->i_sb->s_id);
2791 dput(alias);
2792 }
2793 goto out_nolock;
2794 }
2795 }
2796
2797 /* Add a unique reference */
2798 actual = __d_instantiate_unique(dentry, inode);
2799 if (!actual)
2800 actual = dentry;
2801 else
2802 BUG_ON(!d_unhashed(actual));
2803
2804 spin_lock(&actual->d_lock);
2805 found:
2806 _d_rehash(actual);
2807 spin_unlock(&actual->d_lock);
2808 spin_unlock(&inode->i_lock);
2809 out_nolock:
2810 if (actual == dentry) {
2811 security_d_instantiate(dentry, inode);
2812 return NULL;
2813 }
2814
2815 iput(inode);
2816 return actual;
2817 }
2818 EXPORT_SYMBOL_GPL(d_materialise_unique);
2819
2820 static int prepend(char **buffer, int *buflen, const char *str, int namelen)
2821 {
2822 *buflen -= namelen;
2823 if (*buflen < 0)
2824 return -ENAMETOOLONG;
2825 *buffer -= namelen;
2826 memcpy(*buffer, str, namelen);
2827 return 0;
2828 }
2829
2830 /**
2831 * prepend_name - prepend a pathname in front of current buffer pointer
2832 * @buffer: buffer pointer
2833 * @buflen: allocated length of the buffer
2834 * @name: name string and length qstr structure
2835 *
2836 * With RCU path tracing, it may race with d_move(). Use ACCESS_ONCE() to
2837 * make sure that either the old or the new name pointer and length are
2838 * fetched. However, there may be mismatch between length and pointer.
2839 * The length cannot be trusted, we need to copy it byte-by-byte until
2840 * the length is reached or a null byte is found. It also prepends "/" at
2841 * the beginning of the name. The sequence number check at the caller will
2842 * retry it again when a d_move() does happen. So any garbage in the buffer
2843 * due to mismatched pointer and length will be discarded.
2844 */
2845 static int prepend_name(char **buffer, int *buflen, struct qstr *name)
2846 {
2847 const char *dname = ACCESS_ONCE(name->name);
2848 u32 dlen = ACCESS_ONCE(name->len);
2849 char *p;
2850
2851 if (*buflen < dlen + 1)
2852 return -ENAMETOOLONG;
2853 *buflen -= dlen + 1;
2854 p = *buffer -= dlen + 1;
2855 *p++ = '/';
2856 while (dlen--) {
2857 char c = *dname++;
2858 if (!c)
2859 break;
2860 *p++ = c;
2861 }
2862 return 0;
2863 }
2864
2865 /**
2866 * prepend_path - Prepend path string to a buffer
2867 * @path: the dentry/vfsmount to report
2868 * @root: root vfsmnt/dentry
2869 * @buffer: pointer to the end of the buffer
2870 * @buflen: pointer to buffer length
2871 *
2872 * The function will first try to write out the pathname without taking any
2873 * lock other than the RCU read lock to make sure that dentries won't go away.
2874 * It only checks the sequence number of the global rename_lock as any change
2875 * in the dentry's d_seq will be preceded by changes in the rename_lock
2876 * sequence number. If the sequence number had been changed, it will restart
2877 * the whole pathname back-tracing sequence again by taking the rename_lock.
2878 * In this case, there is no need to take the RCU read lock as the recursive
2879 * parent pointer references will keep the dentry chain alive as long as no
2880 * rename operation is performed.
2881 */
2882 static int prepend_path(const struct path *path,
2883 const struct path *root,
2884 char **buffer, int *buflen)
2885 {
2886 struct dentry *dentry;
2887 struct vfsmount *vfsmnt;
2888 struct mount *mnt;
2889 int error = 0;
2890 unsigned seq, m_seq = 0;
2891 char *bptr;
2892 int blen;
2893
2894 rcu_read_lock();
2895 restart_mnt:
2896 read_seqbegin_or_lock(&mount_lock, &m_seq);
2897 seq = 0;
2898 rcu_read_lock();
2899 restart:
2900 bptr = *buffer;
2901 blen = *buflen;
2902 error = 0;
2903 dentry = path->dentry;
2904 vfsmnt = path->mnt;
2905 mnt = real_mount(vfsmnt);
2906 read_seqbegin_or_lock(&rename_lock, &seq);
2907 while (dentry != root->dentry || vfsmnt != root->mnt) {
2908 struct dentry * parent;
2909
2910 if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
2911 struct mount *parent = ACCESS_ONCE(mnt->mnt_parent);
2912 /* Global root? */
2913 if (mnt != parent) {
2914 dentry = ACCESS_ONCE(mnt->mnt_mountpoint);
2915 mnt = parent;
2916 vfsmnt = &mnt->mnt;
2917 continue;
2918 }
2919 /*
2920 * Filesystems needing to implement special "root names"
2921 * should do so with ->d_dname()
2922 */
2923 if (IS_ROOT(dentry) &&
2924 (dentry->d_name.len != 1 ||
2925 dentry->d_name.name[0] != '/')) {
2926 WARN(1, "Root dentry has weird name <%.*s>\n",
2927 (int) dentry->d_name.len,
2928 dentry->d_name.name);
2929 }
2930 if (!error)
2931 error = is_mounted(vfsmnt) ? 1 : 2;
2932 break;
2933 }
2934 parent = dentry->d_parent;
2935 prefetch(parent);
2936 error = prepend_name(&bptr, &blen, &dentry->d_name);
2937 if (error)
2938 break;
2939
2940 dentry = parent;
2941 }
2942 if (!(seq & 1))
2943 rcu_read_unlock();
2944 if (need_seqretry(&rename_lock, seq)) {
2945 seq = 1;
2946 goto restart;
2947 }
2948 done_seqretry(&rename_lock, seq);
2949
2950 if (!(m_seq & 1))
2951 rcu_read_unlock();
2952 if (need_seqretry(&mount_lock, m_seq)) {
2953 m_seq = 1;
2954 goto restart_mnt;
2955 }
2956 done_seqretry(&mount_lock, m_seq);
2957
2958 if (error >= 0 && bptr == *buffer) {
2959 if (--blen < 0)
2960 error = -ENAMETOOLONG;
2961 else
2962 *--bptr = '/';
2963 }
2964 *buffer = bptr;
2965 *buflen = blen;
2966 return error;
2967 }
2968
2969 /**
2970 * __d_path - return the path of a dentry
2971 * @path: the dentry/vfsmount to report
2972 * @root: root vfsmnt/dentry
2973 * @buf: buffer to return value in
2974 * @buflen: buffer length
2975 *
2976 * Convert a dentry into an ASCII path name.
2977 *
2978 * Returns a pointer into the buffer or an error code if the
2979 * path was too long.
2980 *
2981 * "buflen" should be positive.
2982 *
2983 * If the path is not reachable from the supplied root, return %NULL.
2984 */
2985 char *__d_path(const struct path *path,
2986 const struct path *root,
2987 char *buf, int buflen)
2988 {
2989 char *res = buf + buflen;
2990 int error;
2991
2992 prepend(&res, &buflen, "\0", 1);
2993 error = prepend_path(path, root, &res, &buflen);
2994
2995 if (error < 0)
2996 return ERR_PTR(error);
2997 if (error > 0)
2998 return NULL;
2999 return res;
3000 }
3001
3002 char *d_absolute_path(const struct path *path,
3003 char *buf, int buflen)
3004 {
3005 struct path root = {};
3006 char *res = buf + buflen;
3007 int error;
3008
3009 prepend(&res, &buflen, "\0", 1);
3010 error = prepend_path(path, &root, &res, &buflen);
3011
3012 if (error > 1)
3013 error = -EINVAL;
3014 if (error < 0)
3015 return ERR_PTR(error);
3016 return res;
3017 }
3018
3019 /*
3020 * same as __d_path but appends "(deleted)" for unlinked files.
3021 */
3022 static int path_with_deleted(const struct path *path,
3023 const struct path *root,
3024 char **buf, int *buflen)
3025 {
3026 prepend(buf, buflen, "\0", 1);
3027 if (d_unlinked(path->dentry)) {
3028 int error = prepend(buf, buflen, " (deleted)", 10);
3029 if (error)
3030 return error;
3031 }
3032
3033 return prepend_path(path, root, buf, buflen);
3034 }
3035
3036 static int prepend_unreachable(char **buffer, int *buflen)
3037 {
3038 return prepend(buffer, buflen, "(unreachable)", 13);
3039 }
3040
3041 static void get_fs_root_rcu(struct fs_struct *fs, struct path *root)
3042 {
3043 unsigned seq;
3044
3045 do {
3046 seq = read_seqcount_begin(&fs->seq);
3047 *root = fs->root;
3048 } while (read_seqcount_retry(&fs->seq, seq));
3049 }
3050
3051 /**
3052 * d_path - return the path of a dentry
3053 * @path: path to report
3054 * @buf: buffer to return value in
3055 * @buflen: buffer length
3056 *
3057 * Convert a dentry into an ASCII path name. If the entry has been deleted
3058 * the string " (deleted)" is appended. Note that this is ambiguous.
3059 *
3060 * Returns a pointer into the buffer or an error code if the path was
3061 * too long. Note: Callers should use the returned pointer, not the passed
3062 * in buffer, to use the name! The implementation often starts at an offset
3063 * into the buffer, and may leave 0 bytes at the start.
3064 *
3065 * "buflen" should be positive.
3066 */
3067 char *d_path(const struct path *path, char *buf, int buflen)
3068 {
3069 char *res = buf + buflen;
3070 struct path root;
3071 int error;
3072
3073 /*
3074 * We have various synthetic filesystems that never get mounted. On
3075 * these filesystems dentries are never used for lookup purposes, and
3076 * thus don't need to be hashed. They also don't need a name until a
3077 * user wants to identify the object in /proc/pid/fd/. The little hack
3078 * below allows us to generate a name for these objects on demand:
3079 */
3080 if (path->dentry->d_op && path->dentry->d_op->d_dname)
3081 return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
3082
3083 rcu_read_lock();
3084 get_fs_root_rcu(current->fs, &root);
3085 error = path_with_deleted(path, &root, &res, &buflen);
3086 rcu_read_unlock();
3087
3088 if (error < 0)
3089 res = ERR_PTR(error);
3090 return res;
3091 }
3092 EXPORT_SYMBOL(d_path);
3093
3094 /*
3095 * Helper function for dentry_operations.d_dname() members
3096 */
3097 char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen,
3098 const char *fmt, ...)
3099 {
3100 va_list args;
3101 char temp[64];
3102 int sz;
3103
3104 va_start(args, fmt);
3105 sz = vsnprintf(temp, sizeof(temp), fmt, args) + 1;
3106 va_end(args);
3107
3108 if (sz > sizeof(temp) || sz > buflen)
3109 return ERR_PTR(-ENAMETOOLONG);
3110
3111 buffer += buflen - sz;
3112 return memcpy(buffer, temp, sz);
3113 }
3114
3115 char *simple_dname(struct dentry *dentry, char *buffer, int buflen)
3116 {
3117 char *end = buffer + buflen;
3118 /* these dentries are never renamed, so d_lock is not needed */
3119 if (prepend(&end, &buflen, " (deleted)", 11) ||
3120 prepend(&end, &buflen, dentry->d_name.name, dentry->d_name.len) ||
3121 prepend(&end, &buflen, "/", 1))
3122 end = ERR_PTR(-ENAMETOOLONG);
3123 return end;
3124 }
3125
3126 /*
3127 * Write full pathname from the root of the filesystem into the buffer.
3128 */
3129 static char *__dentry_path(struct dentry *dentry, char *buf, int buflen)
3130 {
3131 char *end, *retval;
3132 int len, seq = 0;
3133 int error = 0;
3134
3135 rcu_read_lock();
3136 restart:
3137 end = buf + buflen;
3138 len = buflen;
3139 prepend(&end, &len, "\0", 1);
3140 if (buflen < 1)
3141 goto Elong;
3142 /* Get '/' right */
3143 retval = end-1;
3144 *retval = '/';
3145 read_seqbegin_or_lock(&rename_lock, &seq);
3146 while (!IS_ROOT(dentry)) {
3147 struct dentry *parent = dentry->d_parent;
3148 int error;
3149
3150 prefetch(parent);
3151 error = prepend_name(&end, &len, &dentry->d_name);
3152 if (error)
3153 break;
3154
3155 retval = end;
3156 dentry = parent;
3157 }
3158 if (!(seq & 1))
3159 rcu_read_unlock();
3160 if (need_seqretry(&rename_lock, seq)) {
3161 seq = 1;
3162 goto restart;
3163 }
3164 done_seqretry(&rename_lock, seq);
3165 if (error)
3166 goto Elong;
3167 return retval;
3168 Elong:
3169 return ERR_PTR(-ENAMETOOLONG);
3170 }
3171
3172 char *dentry_path_raw(struct dentry *dentry, char *buf, int buflen)
3173 {
3174 return __dentry_path(dentry, buf, buflen);
3175 }
3176 EXPORT_SYMBOL(dentry_path_raw);
3177
3178 char *dentry_path(struct dentry *dentry, char *buf, int buflen)
3179 {
3180 char *p = NULL;
3181 char *retval;
3182
3183 if (d_unlinked(dentry)) {
3184 p = buf + buflen;
3185 if (prepend(&p, &buflen, "//deleted", 10) != 0)
3186 goto Elong;
3187 buflen++;
3188 }
3189 retval = __dentry_path(dentry, buf, buflen);
3190 if (!IS_ERR(retval) && p)
3191 *p = '/'; /* restore '/' overriden with '\0' */
3192 return retval;
3193 Elong:
3194 return ERR_PTR(-ENAMETOOLONG);
3195 }
3196
3197 static void get_fs_root_and_pwd_rcu(struct fs_struct *fs, struct path *root,
3198 struct path *pwd)
3199 {
3200 unsigned seq;
3201
3202 do {
3203 seq = read_seqcount_begin(&fs->seq);
3204 *root = fs->root;
3205 *pwd = fs->pwd;
3206 } while (read_seqcount_retry(&fs->seq, seq));
3207 }
3208
3209 /*
3210 * NOTE! The user-level library version returns a
3211 * character pointer. The kernel system call just
3212 * returns the length of the buffer filled (which
3213 * includes the ending '\0' character), or a negative
3214 * error value. So libc would do something like
3215 *
3216 * char *getcwd(char * buf, size_t size)
3217 * {
3218 * int retval;
3219 *
3220 * retval = sys_getcwd(buf, size);
3221 * if (retval >= 0)
3222 * return buf;
3223 * errno = -retval;
3224 * return NULL;
3225 * }
3226 */
3227 SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
3228 {
3229 int error;
3230 struct path pwd, root;
3231 char *page = __getname();
3232
3233 if (!page)
3234 return -ENOMEM;
3235
3236 rcu_read_lock();
3237 get_fs_root_and_pwd_rcu(current->fs, &root, &pwd);
3238
3239 error = -ENOENT;
3240 if (!d_unlinked(pwd.dentry)) {
3241 unsigned long len;
3242 char *cwd = page + PATH_MAX;
3243 int buflen = PATH_MAX;
3244
3245 prepend(&cwd, &buflen, "\0", 1);
3246 error = prepend_path(&pwd, &root, &cwd, &buflen);
3247 rcu_read_unlock();
3248
3249 if (error < 0)
3250 goto out;
3251
3252 /* Unreachable from current root */
3253 if (error > 0) {
3254 error = prepend_unreachable(&cwd, &buflen);
3255 if (error)
3256 goto out;
3257 }
3258
3259 error = -ERANGE;
3260 len = PATH_MAX + page - cwd;
3261 if (len <= size) {
3262 error = len;
3263 if (copy_to_user(buf, cwd, len))
3264 error = -EFAULT;
3265 }
3266 } else {
3267 rcu_read_unlock();
3268 }
3269
3270 out:
3271 __putname(page);
3272 return error;
3273 }
3274
3275 /*
3276 * Test whether new_dentry is a subdirectory of old_dentry.
3277 *
3278 * Trivially implemented using the dcache structure
3279 */
3280
3281 /**
3282 * is_subdir - is new dentry a subdirectory of old_dentry
3283 * @new_dentry: new dentry
3284 * @old_dentry: old dentry
3285 *
3286 * Returns 1 if new_dentry is a subdirectory of the parent (at any depth).
3287 * Returns 0 otherwise.
3288 * Caller must ensure that "new_dentry" is pinned before calling is_subdir()
3289 */
3290
3291 int is_subdir(struct dentry *new_dentry, struct dentry *old_dentry)
3292 {
3293 int result;
3294 unsigned seq;
3295
3296 if (new_dentry == old_dentry)
3297 return 1;
3298
3299 do {
3300 /* for restarting inner loop in case of seq retry */
3301 seq = read_seqbegin(&rename_lock);
3302 /*
3303 * Need rcu_readlock to protect against the d_parent trashing
3304 * due to d_move
3305 */
3306 rcu_read_lock();
3307 if (d_ancestor(old_dentry, new_dentry))
3308 result = 1;
3309 else
3310 result = 0;
3311 rcu_read_unlock();
3312 } while (read_seqretry(&rename_lock, seq));
3313
3314 return result;
3315 }
3316
3317 static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
3318 {
3319 struct dentry *root = data;
3320 if (dentry != root) {
3321 if (d_unhashed(dentry) || !dentry->d_inode)
3322 return D_WALK_SKIP;
3323
3324 if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
3325 dentry->d_flags |= DCACHE_GENOCIDE;
3326 dentry->d_lockref.count--;
3327 }
3328 }
3329 return D_WALK_CONTINUE;
3330 }
3331
3332 void d_genocide(struct dentry *parent)
3333 {
3334 d_walk(parent, parent, d_genocide_kill, NULL);
3335 }
3336
3337 void d_tmpfile(struct dentry *dentry, struct inode *inode)
3338 {
3339 inode_dec_link_count(inode);
3340 BUG_ON(dentry->d_name.name != dentry->d_iname ||
3341 !hlist_unhashed(&dentry->d_alias) ||
3342 !d_unlinked(dentry));
3343 spin_lock(&dentry->d_parent->d_lock);
3344 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
3345 dentry->d_name.len = sprintf(dentry->d_iname, "#%llu",
3346 (unsigned long long)inode->i_ino);
3347 spin_unlock(&dentry->d_lock);
3348 spin_unlock(&dentry->d_parent->d_lock);
3349 d_instantiate(dentry, inode);
3350 }
3351 EXPORT_SYMBOL(d_tmpfile);
3352
3353 static __initdata unsigned long dhash_entries;
3354 static int __init set_dhash_entries(char *str)
3355 {
3356 if (!str)
3357 return 0;
3358 dhash_entries = simple_strtoul(str, &str, 0);
3359 return 1;
3360 }
3361 __setup("dhash_entries=", set_dhash_entries);
3362
3363 static void __init dcache_init_early(void)
3364 {
3365 unsigned int loop;
3366
3367 /* If hashes are distributed across NUMA nodes, defer
3368 * hash allocation until vmalloc space is available.
3369 */
3370 if (hashdist)
3371 return;
3372
3373 dentry_hashtable =
3374 alloc_large_system_hash("Dentry cache",
3375 sizeof(struct hlist_bl_head),
3376 dhash_entries,
3377 13,
3378 HASH_EARLY,
3379 &d_hash_shift,
3380 &d_hash_mask,
3381 0,
3382 0);
3383
3384 for (loop = 0; loop < (1U << d_hash_shift); loop++)
3385 INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
3386 }
3387
3388 static void __init dcache_init(void)
3389 {
3390 unsigned int loop;
3391
3392 /*
3393 * A constructor could be added for stable state like the lists,
3394 * but it is probably not worth it because of the cache nature
3395 * of the dcache.
3396 */
3397 dentry_cache = KMEM_CACHE(dentry,
3398 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD);
3399
3400 /* Hash may have been set up in dcache_init_early */
3401 if (!hashdist)
3402 return;
3403
3404 dentry_hashtable =
3405 alloc_large_system_hash("Dentry cache",
3406 sizeof(struct hlist_bl_head),
3407 dhash_entries,
3408 13,
3409 0,
3410 &d_hash_shift,
3411 &d_hash_mask,
3412 0,
3413 0);
3414
3415 for (loop = 0; loop < (1U << d_hash_shift); loop++)
3416 INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
3417 }
3418
3419 /* SLAB cache for __getname() consumers */
3420 struct kmem_cache *names_cachep __read_mostly;
3421 EXPORT_SYMBOL(names_cachep);
3422
3423 EXPORT_SYMBOL(d_genocide);
3424
3425 void __init vfs_caches_init_early(void)
3426 {
3427 dcache_init_early();
3428 inode_init_early();
3429 }
3430
3431 void __init vfs_caches_init(unsigned long mempages)
3432 {
3433 unsigned long reserve;
3434
3435 /* Base hash sizes on available memory, with a reserve equal to
3436 150% of current kernel size */
3437
3438 reserve = min((mempages - nr_free_pages()) * 3/2, mempages - 1);
3439 mempages -= reserve;
3440
3441 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
3442 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3443
3444 dcache_init();
3445 inode_init();
3446 files_init(mempages);
3447 mnt_init();
3448 bdev_cache_init();
3449 chrdev_init();
3450 }
This page took 0.102362 seconds and 6 git commands to generate.