4 * Complete reimplementation
5 * (C) 1997 Thomas Schoebel-Theuer,
6 * with heavy changes by Linus Torvalds
10 * Notes on the allocation strategy:
12 * The dcache is a master of the icache - whenever a dcache entry
13 * exists, the inode will always exist. "iput()" is done either when
14 * the dcache entry is deleted or garbage collected.
17 #include <linux/syscalls.h>
18 #include <linux/string.h>
21 #include <linux/fsnotify.h>
22 #include <linux/slab.h>
23 #include <linux/init.h>
24 #include <linux/hash.h>
25 #include <linux/cache.h>
26 #include <linux/export.h>
27 #include <linux/mount.h>
28 #include <linux/file.h>
29 #include <asm/uaccess.h>
30 #include <linux/security.h>
31 #include <linux/seqlock.h>
32 #include <linux/swap.h>
33 #include <linux/bootmem.h>
34 #include <linux/fs_struct.h>
35 #include <linux/hardirq.h>
36 #include <linux/bit_spinlock.h>
37 #include <linux/rculist_bl.h>
38 #include <linux/prefetch.h>
39 #include <linux/ratelimit.h>
40 #include <linux/list_lru.h>
46 * dcache->d_inode->i_lock protects:
47 * - i_dentry, d_alias, d_inode of aliases
48 * dcache_hash_bucket lock protects:
49 * - the dcache hash table
50 * s_anon bl list spinlock protects:
51 * - the s_anon list (see __d_drop)
52 * dentry->d_sb->s_dentry_lru_lock protects:
53 * - the dcache lru lists and counters
60 * - d_parent and d_subdirs
61 * - childrens' d_child and d_parent
65 * dentry->d_inode->i_lock
67 * dentry->d_sb->s_dentry_lru_lock
68 * dcache_hash_bucket lock
71 * If there is an ancestor relationship:
72 * dentry->d_parent->...->d_parent->d_lock
74 * dentry->d_parent->d_lock
77 * If no ancestor relationship:
78 * if (dentry1 < dentry2)
82 int sysctl_vfs_cache_pressure __read_mostly
= 100;
83 EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure
);
85 __cacheline_aligned_in_smp
DEFINE_SEQLOCK(rename_lock
);
87 EXPORT_SYMBOL(rename_lock
);
89 static struct kmem_cache
*dentry_cache __read_mostly
;
92 * This is the single most critical data structure when it comes
93 * to the dcache: the hashtable for lookups. Somebody should try
94 * to make this good - I've just made it work.
96 * This hash-function tries to avoid losing too many bits of hash
97 * information, yet avoid using a prime hash-size or similar.
100 static unsigned int d_hash_mask __read_mostly
;
101 static unsigned int d_hash_shift __read_mostly
;
103 static struct hlist_bl_head
*dentry_hashtable __read_mostly
;
105 static inline struct hlist_bl_head
*d_hash(const struct dentry
*parent
,
108 hash
+= (unsigned long) parent
/ L1_CACHE_BYTES
;
109 return dentry_hashtable
+ hash_32(hash
, d_hash_shift
);
112 /* Statistics gathering. */
113 struct dentry_stat_t dentry_stat
= {
117 static DEFINE_PER_CPU(long, nr_dentry
);
118 static DEFINE_PER_CPU(long, nr_dentry_unused
);
120 #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
123 * Here we resort to our own counters instead of using generic per-cpu counters
124 * for consistency with what the vfs inode code does. We are expected to harvest
125 * better code and performance by having our own specialized counters.
127 * Please note that the loop is done over all possible CPUs, not over all online
128 * CPUs. The reason for this is that we don't want to play games with CPUs going
129 * on and off. If one of them goes off, we will just keep their counters.
131 * glommer: See cffbc8a for details, and if you ever intend to change this,
132 * please update all vfs counters to match.
134 static long get_nr_dentry(void)
138 for_each_possible_cpu(i
)
139 sum
+= per_cpu(nr_dentry
, i
);
140 return sum
< 0 ? 0 : sum
;
143 static long get_nr_dentry_unused(void)
147 for_each_possible_cpu(i
)
148 sum
+= per_cpu(nr_dentry_unused
, i
);
149 return sum
< 0 ? 0 : sum
;
152 int proc_nr_dentry(struct ctl_table
*table
, int write
, void __user
*buffer
,
153 size_t *lenp
, loff_t
*ppos
)
155 dentry_stat
.nr_dentry
= get_nr_dentry();
156 dentry_stat
.nr_unused
= get_nr_dentry_unused();
157 return proc_doulongvec_minmax(table
, write
, buffer
, lenp
, ppos
);
162 * Compare 2 name strings, return 0 if they match, otherwise non-zero.
163 * The strings are both count bytes long, and count is non-zero.
165 #ifdef CONFIG_DCACHE_WORD_ACCESS
167 #include <asm/word-at-a-time.h>
169 * NOTE! 'cs' and 'scount' come from a dentry, so it has a
170 * aligned allocation for this particular component. We don't
171 * strictly need the load_unaligned_zeropad() safety, but it
172 * doesn't hurt either.
174 * In contrast, 'ct' and 'tcount' can be from a pathname, and do
175 * need the careful unaligned handling.
177 static inline int dentry_string_cmp(const unsigned char *cs
, const unsigned char *ct
, unsigned tcount
)
179 unsigned long a
,b
,mask
;
182 a
= *(unsigned long *)cs
;
183 b
= load_unaligned_zeropad(ct
);
184 if (tcount
< sizeof(unsigned long))
186 if (unlikely(a
!= b
))
188 cs
+= sizeof(unsigned long);
189 ct
+= sizeof(unsigned long);
190 tcount
-= sizeof(unsigned long);
194 mask
= bytemask_from_count(tcount
);
195 return unlikely(!!((a
^ b
) & mask
));
200 static inline int dentry_string_cmp(const unsigned char *cs
, const unsigned char *ct
, unsigned tcount
)
214 static inline int dentry_cmp(const struct dentry
*dentry
, const unsigned char *ct
, unsigned tcount
)
216 const unsigned char *cs
;
218 * Be careful about RCU walk racing with rename:
219 * use ACCESS_ONCE to fetch the name pointer.
221 * NOTE! Even if a rename will mean that the length
222 * was not loaded atomically, we don't care. The
223 * RCU walk will check the sequence count eventually,
224 * and catch it. And we won't overrun the buffer,
225 * because we're reading the name pointer atomically,
226 * and a dentry name is guaranteed to be properly
227 * terminated with a NUL byte.
229 * End result: even if 'len' is wrong, we'll exit
230 * early because the data cannot match (there can
231 * be no NUL in the ct/tcount data)
233 cs
= ACCESS_ONCE(dentry
->d_name
.name
);
234 smp_read_barrier_depends();
235 return dentry_string_cmp(cs
, ct
, tcount
);
238 struct external_name
{
241 struct rcu_head head
;
243 unsigned char name
[];
246 static inline struct external_name
*external_name(struct dentry
*dentry
)
248 return container_of(dentry
->d_name
.name
, struct external_name
, name
[0]);
251 static void __d_free(struct rcu_head
*head
)
253 struct dentry
*dentry
= container_of(head
, struct dentry
, d_u
.d_rcu
);
255 WARN_ON(!hlist_unhashed(&dentry
->d_alias
));
256 kmem_cache_free(dentry_cache
, dentry
);
259 static void __d_free_external(struct rcu_head
*head
)
261 struct dentry
*dentry
= container_of(head
, struct dentry
, d_u
.d_rcu
);
262 WARN_ON(!hlist_unhashed(&dentry
->d_alias
));
263 kfree(external_name(dentry
));
264 kmem_cache_free(dentry_cache
, dentry
);
267 static void dentry_free(struct dentry
*dentry
)
269 if (unlikely(dname_external(dentry
))) {
270 struct external_name
*p
= external_name(dentry
);
271 if (likely(atomic_dec_and_test(&p
->u
.count
))) {
272 call_rcu(&dentry
->d_u
.d_rcu
, __d_free_external
);
276 /* if dentry was never visible to RCU, immediate free is OK */
277 if (!(dentry
->d_flags
& DCACHE_RCUACCESS
))
278 __d_free(&dentry
->d_u
.d_rcu
);
280 call_rcu(&dentry
->d_u
.d_rcu
, __d_free
);
284 * dentry_rcuwalk_barrier - invalidate in-progress rcu-walk lookups
285 * @dentry: the target dentry
286 * After this call, in-progress rcu-walk path lookup will fail. This
287 * should be called after unhashing, and after changing d_inode (if
288 * the dentry has not already been unhashed).
290 static inline void dentry_rcuwalk_barrier(struct dentry
*dentry
)
292 assert_spin_locked(&dentry
->d_lock
);
293 /* Go through a barrier */
294 write_seqcount_barrier(&dentry
->d_seq
);
298 * Release the dentry's inode, using the filesystem
299 * d_iput() operation if defined. Dentry has no refcount
302 static void dentry_iput(struct dentry
* dentry
)
303 __releases(dentry
->d_lock
)
304 __releases(dentry
->d_inode
->i_lock
)
306 struct inode
*inode
= dentry
->d_inode
;
308 dentry
->d_inode
= NULL
;
309 hlist_del_init(&dentry
->d_alias
);
310 spin_unlock(&dentry
->d_lock
);
311 spin_unlock(&inode
->i_lock
);
313 fsnotify_inoderemove(inode
);
314 if (dentry
->d_op
&& dentry
->d_op
->d_iput
)
315 dentry
->d_op
->d_iput(dentry
, inode
);
319 spin_unlock(&dentry
->d_lock
);
324 * Release the dentry's inode, using the filesystem
325 * d_iput() operation if defined. dentry remains in-use.
327 static void dentry_unlink_inode(struct dentry
* dentry
)
328 __releases(dentry
->d_lock
)
329 __releases(dentry
->d_inode
->i_lock
)
331 struct inode
*inode
= dentry
->d_inode
;
332 __d_clear_type(dentry
);
333 dentry
->d_inode
= NULL
;
334 hlist_del_init(&dentry
->d_alias
);
335 dentry_rcuwalk_barrier(dentry
);
336 spin_unlock(&dentry
->d_lock
);
337 spin_unlock(&inode
->i_lock
);
339 fsnotify_inoderemove(inode
);
340 if (dentry
->d_op
&& dentry
->d_op
->d_iput
)
341 dentry
->d_op
->d_iput(dentry
, inode
);
347 * The DCACHE_LRU_LIST bit is set whenever the 'd_lru' entry
348 * is in use - which includes both the "real" per-superblock
349 * LRU list _and_ the DCACHE_SHRINK_LIST use.
351 * The DCACHE_SHRINK_LIST bit is set whenever the dentry is
352 * on the shrink list (ie not on the superblock LRU list).
354 * The per-cpu "nr_dentry_unused" counters are updated with
355 * the DCACHE_LRU_LIST bit.
357 * These helper functions make sure we always follow the
358 * rules. d_lock must be held by the caller.
360 #define D_FLAG_VERIFY(dentry,x) WARN_ON_ONCE(((dentry)->d_flags & (DCACHE_LRU_LIST | DCACHE_SHRINK_LIST)) != (x))
361 static void d_lru_add(struct dentry
*dentry
)
363 D_FLAG_VERIFY(dentry
, 0);
364 dentry
->d_flags
|= DCACHE_LRU_LIST
;
365 this_cpu_inc(nr_dentry_unused
);
366 WARN_ON_ONCE(!list_lru_add(&dentry
->d_sb
->s_dentry_lru
, &dentry
->d_lru
));
369 static void d_lru_del(struct dentry
*dentry
)
371 D_FLAG_VERIFY(dentry
, DCACHE_LRU_LIST
);
372 dentry
->d_flags
&= ~DCACHE_LRU_LIST
;
373 this_cpu_dec(nr_dentry_unused
);
374 WARN_ON_ONCE(!list_lru_del(&dentry
->d_sb
->s_dentry_lru
, &dentry
->d_lru
));
377 static void d_shrink_del(struct dentry
*dentry
)
379 D_FLAG_VERIFY(dentry
, DCACHE_SHRINK_LIST
| DCACHE_LRU_LIST
);
380 list_del_init(&dentry
->d_lru
);
381 dentry
->d_flags
&= ~(DCACHE_SHRINK_LIST
| DCACHE_LRU_LIST
);
382 this_cpu_dec(nr_dentry_unused
);
385 static void d_shrink_add(struct dentry
*dentry
, struct list_head
*list
)
387 D_FLAG_VERIFY(dentry
, 0);
388 list_add(&dentry
->d_lru
, list
);
389 dentry
->d_flags
|= DCACHE_SHRINK_LIST
| DCACHE_LRU_LIST
;
390 this_cpu_inc(nr_dentry_unused
);
394 * These can only be called under the global LRU lock, ie during the
395 * callback for freeing the LRU list. "isolate" removes it from the
396 * LRU lists entirely, while shrink_move moves it to the indicated
399 static void d_lru_isolate(struct dentry
*dentry
)
401 D_FLAG_VERIFY(dentry
, DCACHE_LRU_LIST
);
402 dentry
->d_flags
&= ~DCACHE_LRU_LIST
;
403 this_cpu_dec(nr_dentry_unused
);
404 list_del_init(&dentry
->d_lru
);
407 static void d_lru_shrink_move(struct dentry
*dentry
, struct list_head
*list
)
409 D_FLAG_VERIFY(dentry
, DCACHE_LRU_LIST
);
410 dentry
->d_flags
|= DCACHE_SHRINK_LIST
;
411 list_move_tail(&dentry
->d_lru
, list
);
415 * dentry_lru_(add|del)_list) must be called with d_lock held.
417 static void dentry_lru_add(struct dentry
*dentry
)
419 if (unlikely(!(dentry
->d_flags
& DCACHE_LRU_LIST
)))
424 * d_drop - drop a dentry
425 * @dentry: dentry to drop
427 * d_drop() unhashes the entry from the parent dentry hashes, so that it won't
428 * be found through a VFS lookup any more. Note that this is different from
429 * deleting the dentry - d_delete will try to mark the dentry negative if
430 * possible, giving a successful _negative_ lookup, while d_drop will
431 * just make the cache lookup fail.
433 * d_drop() is used mainly for stuff that wants to invalidate a dentry for some
434 * reason (NFS timeouts or autofs deletes).
436 * __d_drop requires dentry->d_lock.
438 void __d_drop(struct dentry
*dentry
)
440 if (!d_unhashed(dentry
)) {
441 struct hlist_bl_head
*b
;
443 * Hashed dentries are normally on the dentry hashtable,
444 * with the exception of those newly allocated by
445 * d_obtain_alias, which are always IS_ROOT:
447 if (unlikely(IS_ROOT(dentry
)))
448 b
= &dentry
->d_sb
->s_anon
;
450 b
= d_hash(dentry
->d_parent
, dentry
->d_name
.hash
);
453 __hlist_bl_del(&dentry
->d_hash
);
454 dentry
->d_hash
.pprev
= NULL
;
456 dentry_rcuwalk_barrier(dentry
);
459 EXPORT_SYMBOL(__d_drop
);
461 void d_drop(struct dentry
*dentry
)
463 spin_lock(&dentry
->d_lock
);
465 spin_unlock(&dentry
->d_lock
);
467 EXPORT_SYMBOL(d_drop
);
469 static void __dentry_kill(struct dentry
*dentry
)
471 struct dentry
*parent
= NULL
;
472 bool can_free
= true;
473 if (!IS_ROOT(dentry
))
474 parent
= dentry
->d_parent
;
477 * The dentry is now unrecoverably dead to the world.
479 lockref_mark_dead(&dentry
->d_lockref
);
482 * inform the fs via d_prune that this dentry is about to be
483 * unhashed and destroyed.
485 if ((dentry
->d_flags
& DCACHE_OP_PRUNE
) && !d_unhashed(dentry
))
486 dentry
->d_op
->d_prune(dentry
);
488 if (dentry
->d_flags
& DCACHE_LRU_LIST
) {
489 if (!(dentry
->d_flags
& DCACHE_SHRINK_LIST
))
492 /* if it was on the hash then remove it */
494 list_del(&dentry
->d_u
.d_child
);
496 * Inform d_walk() that we are no longer attached to the
499 dentry
->d_flags
|= DCACHE_DENTRY_KILLED
;
501 spin_unlock(&parent
->d_lock
);
504 * dentry_iput drops the locks, at which point nobody (except
505 * transient RCU lookups) can reach this dentry.
507 BUG_ON((int)dentry
->d_lockref
.count
> 0);
508 this_cpu_dec(nr_dentry
);
509 if (dentry
->d_op
&& dentry
->d_op
->d_release
)
510 dentry
->d_op
->d_release(dentry
);
512 spin_lock(&dentry
->d_lock
);
513 if (dentry
->d_flags
& DCACHE_SHRINK_LIST
) {
514 dentry
->d_flags
|= DCACHE_MAY_FREE
;
517 spin_unlock(&dentry
->d_lock
);
518 if (likely(can_free
))
523 * Finish off a dentry we've decided to kill.
524 * dentry->d_lock must be held, returns with it unlocked.
525 * If ref is non-zero, then decrement the refcount too.
526 * Returns dentry requiring refcount drop, or NULL if we're done.
528 static struct dentry
*dentry_kill(struct dentry
*dentry
)
529 __releases(dentry
->d_lock
)
531 struct inode
*inode
= dentry
->d_inode
;
532 struct dentry
*parent
= NULL
;
534 if (inode
&& unlikely(!spin_trylock(&inode
->i_lock
)))
537 if (!IS_ROOT(dentry
)) {
538 parent
= dentry
->d_parent
;
539 if (unlikely(!spin_trylock(&parent
->d_lock
))) {
541 spin_unlock(&inode
->i_lock
);
546 __dentry_kill(dentry
);
550 spin_unlock(&dentry
->d_lock
);
552 return dentry
; /* try again with same dentry */
555 static inline struct dentry
*lock_parent(struct dentry
*dentry
)
557 struct dentry
*parent
= dentry
->d_parent
;
560 if (unlikely((int)dentry
->d_lockref
.count
< 0))
562 if (likely(spin_trylock(&parent
->d_lock
)))
565 spin_unlock(&dentry
->d_lock
);
567 parent
= ACCESS_ONCE(dentry
->d_parent
);
568 spin_lock(&parent
->d_lock
);
570 * We can't blindly lock dentry until we are sure
571 * that we won't violate the locking order.
572 * Any changes of dentry->d_parent must have
573 * been done with parent->d_lock held, so
574 * spin_lock() above is enough of a barrier
575 * for checking if it's still our child.
577 if (unlikely(parent
!= dentry
->d_parent
)) {
578 spin_unlock(&parent
->d_lock
);
582 if (parent
!= dentry
)
583 spin_lock_nested(&dentry
->d_lock
, DENTRY_D_LOCK_NESTED
);
592 * This is complicated by the fact that we do not want to put
593 * dentries that are no longer on any hash chain on the unused
594 * list: we'd much rather just get rid of them immediately.
596 * However, that implies that we have to traverse the dentry
597 * tree upwards to the parents which might _also_ now be
598 * scheduled for deletion (it may have been only waiting for
599 * its last child to go away).
601 * This tail recursion is done by hand as we don't want to depend
602 * on the compiler to always get this right (gcc generally doesn't).
603 * Real recursion would eat up our stack space.
607 * dput - release a dentry
608 * @dentry: dentry to release
610 * Release a dentry. This will drop the usage count and if appropriate
611 * call the dentry unlink method as well as removing it from the queues and
612 * releasing its resources. If the parent dentries were scheduled for release
613 * they too may now get deleted.
615 void dput(struct dentry
*dentry
)
617 if (unlikely(!dentry
))
621 if (lockref_put_or_lock(&dentry
->d_lockref
))
624 /* Unreachable? Get rid of it */
625 if (unlikely(d_unhashed(dentry
)))
628 if (unlikely(dentry
->d_flags
& DCACHE_OP_DELETE
)) {
629 if (dentry
->d_op
->d_delete(dentry
))
633 if (!(dentry
->d_flags
& DCACHE_REFERENCED
))
634 dentry
->d_flags
|= DCACHE_REFERENCED
;
635 dentry_lru_add(dentry
);
637 dentry
->d_lockref
.count
--;
638 spin_unlock(&dentry
->d_lock
);
642 dentry
= dentry_kill(dentry
);
649 /* This must be called with d_lock held */
650 static inline void __dget_dlock(struct dentry
*dentry
)
652 dentry
->d_lockref
.count
++;
655 static inline void __dget(struct dentry
*dentry
)
657 lockref_get(&dentry
->d_lockref
);
660 struct dentry
*dget_parent(struct dentry
*dentry
)
666 * Do optimistic parent lookup without any
670 ret
= ACCESS_ONCE(dentry
->d_parent
);
671 gotref
= lockref_get_not_zero(&ret
->d_lockref
);
673 if (likely(gotref
)) {
674 if (likely(ret
== ACCESS_ONCE(dentry
->d_parent
)))
681 * Don't need rcu_dereference because we re-check it was correct under
685 ret
= dentry
->d_parent
;
686 spin_lock(&ret
->d_lock
);
687 if (unlikely(ret
!= dentry
->d_parent
)) {
688 spin_unlock(&ret
->d_lock
);
693 BUG_ON(!ret
->d_lockref
.count
);
694 ret
->d_lockref
.count
++;
695 spin_unlock(&ret
->d_lock
);
698 EXPORT_SYMBOL(dget_parent
);
701 * d_find_alias - grab a hashed alias of inode
702 * @inode: inode in question
704 * If inode has a hashed alias, or is a directory and has any alias,
705 * acquire the reference to alias and return it. Otherwise return NULL.
706 * Notice that if inode is a directory there can be only one alias and
707 * it can be unhashed only if it has no children, or if it is the root
708 * of a filesystem, or if the directory was renamed and d_revalidate
709 * was the first vfs operation to notice.
711 * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer
712 * any other hashed alias over that one.
714 static struct dentry
*__d_find_alias(struct inode
*inode
)
716 struct dentry
*alias
, *discon_alias
;
720 hlist_for_each_entry(alias
, &inode
->i_dentry
, d_alias
) {
721 spin_lock(&alias
->d_lock
);
722 if (S_ISDIR(inode
->i_mode
) || !d_unhashed(alias
)) {
723 if (IS_ROOT(alias
) &&
724 (alias
->d_flags
& DCACHE_DISCONNECTED
)) {
725 discon_alias
= alias
;
728 spin_unlock(&alias
->d_lock
);
732 spin_unlock(&alias
->d_lock
);
735 alias
= discon_alias
;
736 spin_lock(&alias
->d_lock
);
737 if (S_ISDIR(inode
->i_mode
) || !d_unhashed(alias
)) {
739 spin_unlock(&alias
->d_lock
);
742 spin_unlock(&alias
->d_lock
);
748 struct dentry
*d_find_alias(struct inode
*inode
)
750 struct dentry
*de
= NULL
;
752 if (!hlist_empty(&inode
->i_dentry
)) {
753 spin_lock(&inode
->i_lock
);
754 de
= __d_find_alias(inode
);
755 spin_unlock(&inode
->i_lock
);
759 EXPORT_SYMBOL(d_find_alias
);
762 * Try to kill dentries associated with this inode.
763 * WARNING: you must own a reference to inode.
765 void d_prune_aliases(struct inode
*inode
)
767 struct dentry
*dentry
;
769 spin_lock(&inode
->i_lock
);
770 hlist_for_each_entry(dentry
, &inode
->i_dentry
, d_alias
) {
771 spin_lock(&dentry
->d_lock
);
772 if (!dentry
->d_lockref
.count
) {
774 * inform the fs via d_prune that this dentry
775 * is about to be unhashed and destroyed.
777 if ((dentry
->d_flags
& DCACHE_OP_PRUNE
) &&
779 dentry
->d_op
->d_prune(dentry
);
781 __dget_dlock(dentry
);
783 spin_unlock(&dentry
->d_lock
);
784 spin_unlock(&inode
->i_lock
);
788 spin_unlock(&dentry
->d_lock
);
790 spin_unlock(&inode
->i_lock
);
792 EXPORT_SYMBOL(d_prune_aliases
);
794 static void shrink_dentry_list(struct list_head
*list
)
796 struct dentry
*dentry
, *parent
;
798 while (!list_empty(list
)) {
800 dentry
= list_entry(list
->prev
, struct dentry
, d_lru
);
801 spin_lock(&dentry
->d_lock
);
802 parent
= lock_parent(dentry
);
805 * The dispose list is isolated and dentries are not accounted
806 * to the LRU here, so we can simply remove it from the list
807 * here regardless of whether it is referenced or not.
809 d_shrink_del(dentry
);
812 * We found an inuse dentry which was not removed from
813 * the LRU because of laziness during lookup. Do not free it.
815 if ((int)dentry
->d_lockref
.count
> 0) {
816 spin_unlock(&dentry
->d_lock
);
818 spin_unlock(&parent
->d_lock
);
823 if (unlikely(dentry
->d_flags
& DCACHE_DENTRY_KILLED
)) {
824 bool can_free
= dentry
->d_flags
& DCACHE_MAY_FREE
;
825 spin_unlock(&dentry
->d_lock
);
827 spin_unlock(&parent
->d_lock
);
833 inode
= dentry
->d_inode
;
834 if (inode
&& unlikely(!spin_trylock(&inode
->i_lock
))) {
835 d_shrink_add(dentry
, list
);
836 spin_unlock(&dentry
->d_lock
);
838 spin_unlock(&parent
->d_lock
);
842 __dentry_kill(dentry
);
845 * We need to prune ancestors too. This is necessary to prevent
846 * quadratic behavior of shrink_dcache_parent(), but is also
847 * expected to be beneficial in reducing dentry cache
851 while (dentry
&& !lockref_put_or_lock(&dentry
->d_lockref
)) {
852 parent
= lock_parent(dentry
);
853 if (dentry
->d_lockref
.count
!= 1) {
854 dentry
->d_lockref
.count
--;
855 spin_unlock(&dentry
->d_lock
);
857 spin_unlock(&parent
->d_lock
);
860 inode
= dentry
->d_inode
; /* can't be NULL */
861 if (unlikely(!spin_trylock(&inode
->i_lock
))) {
862 spin_unlock(&dentry
->d_lock
);
864 spin_unlock(&parent
->d_lock
);
868 __dentry_kill(dentry
);
874 static enum lru_status
875 dentry_lru_isolate(struct list_head
*item
, spinlock_t
*lru_lock
, void *arg
)
877 struct list_head
*freeable
= arg
;
878 struct dentry
*dentry
= container_of(item
, struct dentry
, d_lru
);
882 * we are inverting the lru lock/dentry->d_lock here,
883 * so use a trylock. If we fail to get the lock, just skip
886 if (!spin_trylock(&dentry
->d_lock
))
890 * Referenced dentries are still in use. If they have active
891 * counts, just remove them from the LRU. Otherwise give them
892 * another pass through the LRU.
894 if (dentry
->d_lockref
.count
) {
895 d_lru_isolate(dentry
);
896 spin_unlock(&dentry
->d_lock
);
900 if (dentry
->d_flags
& DCACHE_REFERENCED
) {
901 dentry
->d_flags
&= ~DCACHE_REFERENCED
;
902 spin_unlock(&dentry
->d_lock
);
905 * The list move itself will be made by the common LRU code. At
906 * this point, we've dropped the dentry->d_lock but keep the
907 * lru lock. This is safe to do, since every list movement is
908 * protected by the lru lock even if both locks are held.
910 * This is guaranteed by the fact that all LRU management
911 * functions are intermediated by the LRU API calls like
912 * list_lru_add and list_lru_del. List movement in this file
913 * only ever occur through this functions or through callbacks
914 * like this one, that are called from the LRU API.
916 * The only exceptions to this are functions like
917 * shrink_dentry_list, and code that first checks for the
918 * DCACHE_SHRINK_LIST flag. Those are guaranteed to be
919 * operating only with stack provided lists after they are
920 * properly isolated from the main list. It is thus, always a
926 d_lru_shrink_move(dentry
, freeable
);
927 spin_unlock(&dentry
->d_lock
);
933 * prune_dcache_sb - shrink the dcache
935 * @nr_to_scan : number of entries to try to free
936 * @nid: which node to scan for freeable entities
938 * Attempt to shrink the superblock dcache LRU by @nr_to_scan entries. This is
939 * done when we need more memory an called from the superblock shrinker
942 * This function may fail to free any resources if all the dentries are in
945 long prune_dcache_sb(struct super_block
*sb
, unsigned long nr_to_scan
,
951 freed
= list_lru_walk_node(&sb
->s_dentry_lru
, nid
, dentry_lru_isolate
,
952 &dispose
, &nr_to_scan
);
953 shrink_dentry_list(&dispose
);
957 static enum lru_status
dentry_lru_isolate_shrink(struct list_head
*item
,
958 spinlock_t
*lru_lock
, void *arg
)
960 struct list_head
*freeable
= arg
;
961 struct dentry
*dentry
= container_of(item
, struct dentry
, d_lru
);
964 * we are inverting the lru lock/dentry->d_lock here,
965 * so use a trylock. If we fail to get the lock, just skip
968 if (!spin_trylock(&dentry
->d_lock
))
971 d_lru_shrink_move(dentry
, freeable
);
972 spin_unlock(&dentry
->d_lock
);
979 * shrink_dcache_sb - shrink dcache for a superblock
982 * Shrink the dcache for the specified super block. This is used to free
983 * the dcache before unmounting a file system.
985 void shrink_dcache_sb(struct super_block
*sb
)
992 freed
= list_lru_walk(&sb
->s_dentry_lru
,
993 dentry_lru_isolate_shrink
, &dispose
, UINT_MAX
);
995 this_cpu_sub(nr_dentry_unused
, freed
);
996 shrink_dentry_list(&dispose
);
999 EXPORT_SYMBOL(shrink_dcache_sb
);
1002 * enum d_walk_ret - action to talke during tree walk
1003 * @D_WALK_CONTINUE: contrinue walk
1004 * @D_WALK_QUIT: quit walk
1005 * @D_WALK_NORETRY: quit when retry is needed
1006 * @D_WALK_SKIP: skip this dentry and its children
1016 * d_walk - walk the dentry tree
1017 * @parent: start of walk
1018 * @data: data passed to @enter() and @finish()
1019 * @enter: callback when first entering the dentry
1020 * @finish: callback when successfully finished the walk
1022 * The @enter() and @finish() callbacks are called with d_lock held.
1024 static void d_walk(struct dentry
*parent
, void *data
,
1025 enum d_walk_ret (*enter
)(void *, struct dentry
*),
1026 void (*finish
)(void *))
1028 struct dentry
*this_parent
;
1029 struct list_head
*next
;
1031 enum d_walk_ret ret
;
1035 read_seqbegin_or_lock(&rename_lock
, &seq
);
1036 this_parent
= parent
;
1037 spin_lock(&this_parent
->d_lock
);
1039 ret
= enter(data
, this_parent
);
1041 case D_WALK_CONTINUE
:
1046 case D_WALK_NORETRY
:
1051 next
= this_parent
->d_subdirs
.next
;
1053 while (next
!= &this_parent
->d_subdirs
) {
1054 struct list_head
*tmp
= next
;
1055 struct dentry
*dentry
= list_entry(tmp
, struct dentry
, d_u
.d_child
);
1058 spin_lock_nested(&dentry
->d_lock
, DENTRY_D_LOCK_NESTED
);
1060 ret
= enter(data
, dentry
);
1062 case D_WALK_CONTINUE
:
1065 spin_unlock(&dentry
->d_lock
);
1067 case D_WALK_NORETRY
:
1071 spin_unlock(&dentry
->d_lock
);
1075 if (!list_empty(&dentry
->d_subdirs
)) {
1076 spin_unlock(&this_parent
->d_lock
);
1077 spin_release(&dentry
->d_lock
.dep_map
, 1, _RET_IP_
);
1078 this_parent
= dentry
;
1079 spin_acquire(&this_parent
->d_lock
.dep_map
, 0, 1, _RET_IP_
);
1082 spin_unlock(&dentry
->d_lock
);
1085 * All done at this level ... ascend and resume the search.
1087 if (this_parent
!= parent
) {
1088 struct dentry
*child
= this_parent
;
1089 this_parent
= child
->d_parent
;
1092 spin_unlock(&child
->d_lock
);
1093 spin_lock(&this_parent
->d_lock
);
1096 * might go back up the wrong parent if we have had a rename
1099 if (this_parent
!= child
->d_parent
||
1100 (child
->d_flags
& DCACHE_DENTRY_KILLED
) ||
1101 need_seqretry(&rename_lock
, seq
)) {
1102 spin_unlock(&this_parent
->d_lock
);
1107 next
= child
->d_u
.d_child
.next
;
1110 if (need_seqretry(&rename_lock
, seq
)) {
1111 spin_unlock(&this_parent
->d_lock
);
1118 spin_unlock(&this_parent
->d_lock
);
1119 done_seqretry(&rename_lock
, seq
);
1130 * Search for at least 1 mount point in the dentry's subdirs.
1131 * We descend to the next level whenever the d_subdirs
1132 * list is non-empty and continue searching.
1135 static enum d_walk_ret
check_mount(void *data
, struct dentry
*dentry
)
1138 if (d_mountpoint(dentry
)) {
1142 return D_WALK_CONTINUE
;
1146 * have_submounts - check for mounts over a dentry
1147 * @parent: dentry to check.
1149 * Return true if the parent or its subdirectories contain
1152 int have_submounts(struct dentry
*parent
)
1156 d_walk(parent
, &ret
, check_mount
, NULL
);
1160 EXPORT_SYMBOL(have_submounts
);
1163 * Called by mount code to set a mountpoint and check if the mountpoint is
1164 * reachable (e.g. NFS can unhash a directory dentry and then the complete
1165 * subtree can become unreachable).
1167 * Only one of d_invalidate() and d_set_mounted() must succeed. For
1168 * this reason take rename_lock and d_lock on dentry and ancestors.
1170 int d_set_mounted(struct dentry
*dentry
)
1174 write_seqlock(&rename_lock
);
1175 for (p
= dentry
->d_parent
; !IS_ROOT(p
); p
= p
->d_parent
) {
1176 /* Need exclusion wrt. d_invalidate() */
1177 spin_lock(&p
->d_lock
);
1178 if (unlikely(d_unhashed(p
))) {
1179 spin_unlock(&p
->d_lock
);
1182 spin_unlock(&p
->d_lock
);
1184 spin_lock(&dentry
->d_lock
);
1185 if (!d_unlinked(dentry
)) {
1186 dentry
->d_flags
|= DCACHE_MOUNTED
;
1189 spin_unlock(&dentry
->d_lock
);
1191 write_sequnlock(&rename_lock
);
1196 * Search the dentry child list of the specified parent,
1197 * and move any unused dentries to the end of the unused
1198 * list for prune_dcache(). We descend to the next level
1199 * whenever the d_subdirs list is non-empty and continue
1202 * It returns zero iff there are no unused children,
1203 * otherwise it returns the number of children moved to
1204 * the end of the unused list. This may not be the total
1205 * number of unused children, because select_parent can
1206 * drop the lock and return early due to latency
1210 struct select_data
{
1211 struct dentry
*start
;
1212 struct list_head dispose
;
1216 static enum d_walk_ret
select_collect(void *_data
, struct dentry
*dentry
)
1218 struct select_data
*data
= _data
;
1219 enum d_walk_ret ret
= D_WALK_CONTINUE
;
1221 if (data
->start
== dentry
)
1224 if (dentry
->d_flags
& DCACHE_SHRINK_LIST
) {
1227 if (dentry
->d_flags
& DCACHE_LRU_LIST
)
1229 if (!dentry
->d_lockref
.count
) {
1230 d_shrink_add(dentry
, &data
->dispose
);
1235 * We can return to the caller if we have found some (this
1236 * ensures forward progress). We'll be coming back to find
1239 if (!list_empty(&data
->dispose
))
1240 ret
= need_resched() ? D_WALK_QUIT
: D_WALK_NORETRY
;
1246 * shrink_dcache_parent - prune dcache
1247 * @parent: parent of entries to prune
1249 * Prune the dcache to remove unused children of the parent dentry.
1251 void shrink_dcache_parent(struct dentry
*parent
)
1254 struct select_data data
;
1256 INIT_LIST_HEAD(&data
.dispose
);
1257 data
.start
= parent
;
1260 d_walk(parent
, &data
, select_collect
, NULL
);
1264 shrink_dentry_list(&data
.dispose
);
1268 EXPORT_SYMBOL(shrink_dcache_parent
);
1270 static enum d_walk_ret
umount_check(void *_data
, struct dentry
*dentry
)
1272 /* it has busy descendents; complain about those instead */
1273 if (!list_empty(&dentry
->d_subdirs
))
1274 return D_WALK_CONTINUE
;
1276 /* root with refcount 1 is fine */
1277 if (dentry
== _data
&& dentry
->d_lockref
.count
== 1)
1278 return D_WALK_CONTINUE
;
1280 printk(KERN_ERR
"BUG: Dentry %p{i=%lx,n=%pd} "
1281 " still in use (%d) [unmount of %s %s]\n",
1284 dentry
->d_inode
->i_ino
: 0UL,
1286 dentry
->d_lockref
.count
,
1287 dentry
->d_sb
->s_type
->name
,
1288 dentry
->d_sb
->s_id
);
1290 return D_WALK_CONTINUE
;
1293 static void do_one_tree(struct dentry
*dentry
)
1295 shrink_dcache_parent(dentry
);
1296 d_walk(dentry
, dentry
, umount_check
, NULL
);
1302 * destroy the dentries attached to a superblock on unmounting
1304 void shrink_dcache_for_umount(struct super_block
*sb
)
1306 struct dentry
*dentry
;
1308 WARN(down_read_trylock(&sb
->s_umount
), "s_umount should've been locked");
1310 dentry
= sb
->s_root
;
1312 do_one_tree(dentry
);
1314 while (!hlist_bl_empty(&sb
->s_anon
)) {
1315 dentry
= dget(hlist_bl_entry(hlist_bl_first(&sb
->s_anon
), struct dentry
, d_hash
));
1316 do_one_tree(dentry
);
1320 struct detach_data
{
1321 struct select_data select
;
1322 struct dentry
*mountpoint
;
1324 static enum d_walk_ret
detach_and_collect(void *_data
, struct dentry
*dentry
)
1326 struct detach_data
*data
= _data
;
1328 if (d_mountpoint(dentry
)) {
1329 __dget_dlock(dentry
);
1330 data
->mountpoint
= dentry
;
1334 return select_collect(&data
->select
, dentry
);
1337 static void check_and_drop(void *_data
)
1339 struct detach_data
*data
= _data
;
1341 if (!data
->mountpoint
&& !data
->select
.found
)
1342 __d_drop(data
->select
.start
);
1346 * d_invalidate - detach submounts, prune dcache, and drop
1347 * @dentry: dentry to invalidate (aka detach, prune and drop)
1349 * Try to invalidate the dentry if it turns out to be
1350 * possible. If there are reasons not to delete it
1351 * return -EBUSY. On success return 0.
1355 * The final d_drop is done as an atomic operation relative to
1356 * rename_lock ensuring there are no races with d_set_mounted. This
1357 * ensures there are no unhashed dentries on the path to a mountpoint.
1359 int d_invalidate(struct dentry
*dentry
)
1364 * If it's already been dropped, return OK.
1366 spin_lock(&dentry
->d_lock
);
1367 if (d_unhashed(dentry
)) {
1368 spin_unlock(&dentry
->d_lock
);
1371 spin_unlock(&dentry
->d_lock
);
1373 /* Negative dentries can be dropped without further checks */
1374 if (!dentry
->d_inode
) {
1380 struct detach_data data
;
1382 data
.mountpoint
= NULL
;
1383 INIT_LIST_HEAD(&data
.select
.dispose
);
1384 data
.select
.start
= dentry
;
1385 data
.select
.found
= 0;
1387 d_walk(dentry
, &data
, detach_and_collect
, check_and_drop
);
1389 if (data
.select
.found
)
1390 shrink_dentry_list(&data
.select
.dispose
);
1392 if (data
.mountpoint
) {
1393 detach_mounts(data
.mountpoint
);
1394 dput(data
.mountpoint
);
1397 if (!data
.mountpoint
&& !data
.select
.found
)
1406 EXPORT_SYMBOL(d_invalidate
);
1409 * __d_alloc - allocate a dcache entry
1410 * @sb: filesystem it will belong to
1411 * @name: qstr of the name
1413 * Allocates a dentry. It returns %NULL if there is insufficient memory
1414 * available. On a success the dentry is returned. The name passed in is
1415 * copied and the copy passed in may be reused after this call.
1418 struct dentry
*__d_alloc(struct super_block
*sb
, const struct qstr
*name
)
1420 struct dentry
*dentry
;
1423 dentry
= kmem_cache_alloc(dentry_cache
, GFP_KERNEL
);
1428 * We guarantee that the inline name is always NUL-terminated.
1429 * This way the memcpy() done by the name switching in rename
1430 * will still always have a NUL at the end, even if we might
1431 * be overwriting an internal NUL character
1433 dentry
->d_iname
[DNAME_INLINE_LEN
-1] = 0;
1434 if (name
->len
> DNAME_INLINE_LEN
-1) {
1435 size_t size
= offsetof(struct external_name
, name
[1]);
1436 struct external_name
*p
= kmalloc(size
+ name
->len
, GFP_KERNEL
);
1438 kmem_cache_free(dentry_cache
, dentry
);
1441 atomic_set(&p
->u
.count
, 1);
1444 dname
= dentry
->d_iname
;
1447 dentry
->d_name
.len
= name
->len
;
1448 dentry
->d_name
.hash
= name
->hash
;
1449 memcpy(dname
, name
->name
, name
->len
);
1450 dname
[name
->len
] = 0;
1452 /* Make sure we always see the terminating NUL character */
1454 dentry
->d_name
.name
= dname
;
1456 dentry
->d_lockref
.count
= 1;
1457 dentry
->d_flags
= 0;
1458 spin_lock_init(&dentry
->d_lock
);
1459 seqcount_init(&dentry
->d_seq
);
1460 dentry
->d_inode
= NULL
;
1461 dentry
->d_parent
= dentry
;
1463 dentry
->d_op
= NULL
;
1464 dentry
->d_fsdata
= NULL
;
1465 INIT_HLIST_BL_NODE(&dentry
->d_hash
);
1466 INIT_LIST_HEAD(&dentry
->d_lru
);
1467 INIT_LIST_HEAD(&dentry
->d_subdirs
);
1468 INIT_HLIST_NODE(&dentry
->d_alias
);
1469 INIT_LIST_HEAD(&dentry
->d_u
.d_child
);
1470 d_set_d_op(dentry
, dentry
->d_sb
->s_d_op
);
1472 this_cpu_inc(nr_dentry
);
1478 * d_alloc - allocate a dcache entry
1479 * @parent: parent of entry to allocate
1480 * @name: qstr of the name
1482 * Allocates a dentry. It returns %NULL if there is insufficient memory
1483 * available. On a success the dentry is returned. The name passed in is
1484 * copied and the copy passed in may be reused after this call.
1486 struct dentry
*d_alloc(struct dentry
* parent
, const struct qstr
*name
)
1488 struct dentry
*dentry
= __d_alloc(parent
->d_sb
, name
);
1492 spin_lock(&parent
->d_lock
);
1494 * don't need child lock because it is not subject
1495 * to concurrency here
1497 __dget_dlock(parent
);
1498 dentry
->d_parent
= parent
;
1499 list_add(&dentry
->d_u
.d_child
, &parent
->d_subdirs
);
1500 spin_unlock(&parent
->d_lock
);
1504 EXPORT_SYMBOL(d_alloc
);
1507 * d_alloc_pseudo - allocate a dentry (for lookup-less filesystems)
1508 * @sb: the superblock
1509 * @name: qstr of the name
1511 * For a filesystem that just pins its dentries in memory and never
1512 * performs lookups at all, return an unhashed IS_ROOT dentry.
1514 struct dentry
*d_alloc_pseudo(struct super_block
*sb
, const struct qstr
*name
)
1516 return __d_alloc(sb
, name
);
1518 EXPORT_SYMBOL(d_alloc_pseudo
);
1520 struct dentry
*d_alloc_name(struct dentry
*parent
, const char *name
)
1525 q
.len
= strlen(name
);
1526 q
.hash
= full_name_hash(q
.name
, q
.len
);
1527 return d_alloc(parent
, &q
);
1529 EXPORT_SYMBOL(d_alloc_name
);
1531 void d_set_d_op(struct dentry
*dentry
, const struct dentry_operations
*op
)
1533 WARN_ON_ONCE(dentry
->d_op
);
1534 WARN_ON_ONCE(dentry
->d_flags
& (DCACHE_OP_HASH
|
1536 DCACHE_OP_REVALIDATE
|
1537 DCACHE_OP_WEAK_REVALIDATE
|
1538 DCACHE_OP_DELETE
));
1543 dentry
->d_flags
|= DCACHE_OP_HASH
;
1545 dentry
->d_flags
|= DCACHE_OP_COMPARE
;
1546 if (op
->d_revalidate
)
1547 dentry
->d_flags
|= DCACHE_OP_REVALIDATE
;
1548 if (op
->d_weak_revalidate
)
1549 dentry
->d_flags
|= DCACHE_OP_WEAK_REVALIDATE
;
1551 dentry
->d_flags
|= DCACHE_OP_DELETE
;
1553 dentry
->d_flags
|= DCACHE_OP_PRUNE
;
1556 EXPORT_SYMBOL(d_set_d_op
);
1558 static unsigned d_flags_for_inode(struct inode
*inode
)
1560 unsigned add_flags
= DCACHE_FILE_TYPE
;
1563 return DCACHE_MISS_TYPE
;
1565 if (S_ISDIR(inode
->i_mode
)) {
1566 add_flags
= DCACHE_DIRECTORY_TYPE
;
1567 if (unlikely(!(inode
->i_opflags
& IOP_LOOKUP
))) {
1568 if (unlikely(!inode
->i_op
->lookup
))
1569 add_flags
= DCACHE_AUTODIR_TYPE
;
1571 inode
->i_opflags
|= IOP_LOOKUP
;
1573 } else if (unlikely(!(inode
->i_opflags
& IOP_NOFOLLOW
))) {
1574 if (unlikely(inode
->i_op
->follow_link
))
1575 add_flags
= DCACHE_SYMLINK_TYPE
;
1577 inode
->i_opflags
|= IOP_NOFOLLOW
;
1580 if (unlikely(IS_AUTOMOUNT(inode
)))
1581 add_flags
|= DCACHE_NEED_AUTOMOUNT
;
1585 static void __d_instantiate(struct dentry
*dentry
, struct inode
*inode
)
1587 unsigned add_flags
= d_flags_for_inode(inode
);
1589 spin_lock(&dentry
->d_lock
);
1590 __d_set_type(dentry
, add_flags
);
1592 hlist_add_head(&dentry
->d_alias
, &inode
->i_dentry
);
1593 dentry
->d_inode
= inode
;
1594 dentry_rcuwalk_barrier(dentry
);
1595 spin_unlock(&dentry
->d_lock
);
1596 fsnotify_d_instantiate(dentry
, inode
);
1600 * d_instantiate - fill in inode information for a dentry
1601 * @entry: dentry to complete
1602 * @inode: inode to attach to this dentry
1604 * Fill in inode information in the entry.
1606 * This turns negative dentries into productive full members
1609 * NOTE! This assumes that the inode count has been incremented
1610 * (or otherwise set) by the caller to indicate that it is now
1611 * in use by the dcache.
1614 void d_instantiate(struct dentry
*entry
, struct inode
* inode
)
1616 BUG_ON(!hlist_unhashed(&entry
->d_alias
));
1618 spin_lock(&inode
->i_lock
);
1619 __d_instantiate(entry
, inode
);
1621 spin_unlock(&inode
->i_lock
);
1622 security_d_instantiate(entry
, inode
);
1624 EXPORT_SYMBOL(d_instantiate
);
1627 * d_instantiate_unique - instantiate a non-aliased dentry
1628 * @entry: dentry to instantiate
1629 * @inode: inode to attach to this dentry
1631 * Fill in inode information in the entry. On success, it returns NULL.
1632 * If an unhashed alias of "entry" already exists, then we return the
1633 * aliased dentry instead and drop one reference to inode.
1635 * Note that in order to avoid conflicts with rename() etc, the caller
1636 * had better be holding the parent directory semaphore.
1638 * This also assumes that the inode count has been incremented
1639 * (or otherwise set) by the caller to indicate that it is now
1640 * in use by the dcache.
1642 static struct dentry
*__d_instantiate_unique(struct dentry
*entry
,
1643 struct inode
*inode
)
1645 struct dentry
*alias
;
1646 int len
= entry
->d_name
.len
;
1647 const char *name
= entry
->d_name
.name
;
1648 unsigned int hash
= entry
->d_name
.hash
;
1651 __d_instantiate(entry
, NULL
);
1655 hlist_for_each_entry(alias
, &inode
->i_dentry
, d_alias
) {
1657 * Don't need alias->d_lock here, because aliases with
1658 * d_parent == entry->d_parent are not subject to name or
1659 * parent changes, because the parent inode i_mutex is held.
1661 if (alias
->d_name
.hash
!= hash
)
1663 if (alias
->d_parent
!= entry
->d_parent
)
1665 if (alias
->d_name
.len
!= len
)
1667 if (dentry_cmp(alias
, name
, len
))
1673 __d_instantiate(entry
, inode
);
1677 struct dentry
*d_instantiate_unique(struct dentry
*entry
, struct inode
*inode
)
1679 struct dentry
*result
;
1681 BUG_ON(!hlist_unhashed(&entry
->d_alias
));
1684 spin_lock(&inode
->i_lock
);
1685 result
= __d_instantiate_unique(entry
, inode
);
1687 spin_unlock(&inode
->i_lock
);
1690 security_d_instantiate(entry
, inode
);
1694 BUG_ON(!d_unhashed(result
));
1699 EXPORT_SYMBOL(d_instantiate_unique
);
1702 * d_instantiate_no_diralias - instantiate a non-aliased dentry
1703 * @entry: dentry to complete
1704 * @inode: inode to attach to this dentry
1706 * Fill in inode information in the entry. If a directory alias is found, then
1707 * return an error (and drop inode). Together with d_materialise_unique() this
1708 * guarantees that a directory inode may never have more than one alias.
1710 int d_instantiate_no_diralias(struct dentry
*entry
, struct inode
*inode
)
1712 BUG_ON(!hlist_unhashed(&entry
->d_alias
));
1714 spin_lock(&inode
->i_lock
);
1715 if (S_ISDIR(inode
->i_mode
) && !hlist_empty(&inode
->i_dentry
)) {
1716 spin_unlock(&inode
->i_lock
);
1720 __d_instantiate(entry
, inode
);
1721 spin_unlock(&inode
->i_lock
);
1722 security_d_instantiate(entry
, inode
);
1726 EXPORT_SYMBOL(d_instantiate_no_diralias
);
1728 struct dentry
*d_make_root(struct inode
*root_inode
)
1730 struct dentry
*res
= NULL
;
1733 static const struct qstr name
= QSTR_INIT("/", 1);
1735 res
= __d_alloc(root_inode
->i_sb
, &name
);
1737 d_instantiate(res
, root_inode
);
1743 EXPORT_SYMBOL(d_make_root
);
1745 static struct dentry
* __d_find_any_alias(struct inode
*inode
)
1747 struct dentry
*alias
;
1749 if (hlist_empty(&inode
->i_dentry
))
1751 alias
= hlist_entry(inode
->i_dentry
.first
, struct dentry
, d_alias
);
1757 * d_find_any_alias - find any alias for a given inode
1758 * @inode: inode to find an alias for
1760 * If any aliases exist for the given inode, take and return a
1761 * reference for one of them. If no aliases exist, return %NULL.
1763 struct dentry
*d_find_any_alias(struct inode
*inode
)
1767 spin_lock(&inode
->i_lock
);
1768 de
= __d_find_any_alias(inode
);
1769 spin_unlock(&inode
->i_lock
);
1772 EXPORT_SYMBOL(d_find_any_alias
);
1774 static struct dentry
*__d_obtain_alias(struct inode
*inode
, int disconnected
)
1776 static const struct qstr anonstring
= QSTR_INIT("/", 1);
1782 return ERR_PTR(-ESTALE
);
1784 return ERR_CAST(inode
);
1786 res
= d_find_any_alias(inode
);
1790 tmp
= __d_alloc(inode
->i_sb
, &anonstring
);
1792 res
= ERR_PTR(-ENOMEM
);
1796 spin_lock(&inode
->i_lock
);
1797 res
= __d_find_any_alias(inode
);
1799 spin_unlock(&inode
->i_lock
);
1804 /* attach a disconnected dentry */
1805 add_flags
= d_flags_for_inode(inode
);
1808 add_flags
|= DCACHE_DISCONNECTED
;
1810 spin_lock(&tmp
->d_lock
);
1811 tmp
->d_inode
= inode
;
1812 tmp
->d_flags
|= add_flags
;
1813 hlist_add_head(&tmp
->d_alias
, &inode
->i_dentry
);
1814 hlist_bl_lock(&tmp
->d_sb
->s_anon
);
1815 hlist_bl_add_head(&tmp
->d_hash
, &tmp
->d_sb
->s_anon
);
1816 hlist_bl_unlock(&tmp
->d_sb
->s_anon
);
1817 spin_unlock(&tmp
->d_lock
);
1818 spin_unlock(&inode
->i_lock
);
1819 security_d_instantiate(tmp
, inode
);
1824 if (res
&& !IS_ERR(res
))
1825 security_d_instantiate(res
, inode
);
1831 * d_obtain_alias - find or allocate a DISCONNECTED dentry for a given inode
1832 * @inode: inode to allocate the dentry for
1834 * Obtain a dentry for an inode resulting from NFS filehandle conversion or
1835 * similar open by handle operations. The returned dentry may be anonymous,
1836 * or may have a full name (if the inode was already in the cache).
1838 * When called on a directory inode, we must ensure that the inode only ever
1839 * has one dentry. If a dentry is found, that is returned instead of
1840 * allocating a new one.
1842 * On successful return, the reference to the inode has been transferred
1843 * to the dentry. In case of an error the reference on the inode is released.
1844 * To make it easier to use in export operations a %NULL or IS_ERR inode may
1845 * be passed in and the error will be propagated to the return value,
1846 * with a %NULL @inode replaced by ERR_PTR(-ESTALE).
1848 struct dentry
*d_obtain_alias(struct inode
*inode
)
1850 return __d_obtain_alias(inode
, 1);
1852 EXPORT_SYMBOL(d_obtain_alias
);
1855 * d_obtain_root - find or allocate a dentry for a given inode
1856 * @inode: inode to allocate the dentry for
1858 * Obtain an IS_ROOT dentry for the root of a filesystem.
1860 * We must ensure that directory inodes only ever have one dentry. If a
1861 * dentry is found, that is returned instead of allocating a new one.
1863 * On successful return, the reference to the inode has been transferred
1864 * to the dentry. In case of an error the reference on the inode is
1865 * released. A %NULL or IS_ERR inode may be passed in and will be the
1866 * error will be propagate to the return value, with a %NULL @inode
1867 * replaced by ERR_PTR(-ESTALE).
1869 struct dentry
*d_obtain_root(struct inode
*inode
)
1871 return __d_obtain_alias(inode
, 0);
1873 EXPORT_SYMBOL(d_obtain_root
);
1876 * d_add_ci - lookup or allocate new dentry with case-exact name
1877 * @inode: the inode case-insensitive lookup has found
1878 * @dentry: the negative dentry that was passed to the parent's lookup func
1879 * @name: the case-exact name to be associated with the returned dentry
1881 * This is to avoid filling the dcache with case-insensitive names to the
1882 * same inode, only the actual correct case is stored in the dcache for
1883 * case-insensitive filesystems.
1885 * For a case-insensitive lookup match and if the the case-exact dentry
1886 * already exists in in the dcache, use it and return it.
1888 * If no entry exists with the exact case name, allocate new dentry with
1889 * the exact case, and return the spliced entry.
1891 struct dentry
*d_add_ci(struct dentry
*dentry
, struct inode
*inode
,
1894 struct dentry
*found
;
1898 * First check if a dentry matching the name already exists,
1899 * if not go ahead and create it now.
1901 found
= d_hash_and_lookup(dentry
->d_parent
, name
);
1902 if (unlikely(IS_ERR(found
)))
1905 new = d_alloc(dentry
->d_parent
, name
);
1907 found
= ERR_PTR(-ENOMEM
);
1911 found
= d_splice_alias(inode
, new);
1920 * If a matching dentry exists, and it's not negative use it.
1922 * Decrement the reference count to balance the iget() done
1925 if (found
->d_inode
) {
1926 if (unlikely(found
->d_inode
!= inode
)) {
1927 /* This can't happen because bad inodes are unhashed. */
1928 BUG_ON(!is_bad_inode(inode
));
1929 BUG_ON(!is_bad_inode(found
->d_inode
));
1936 * Negative dentry: instantiate it unless the inode is a directory and
1937 * already has a dentry.
1939 new = d_splice_alias(inode
, found
);
1950 EXPORT_SYMBOL(d_add_ci
);
1953 * Do the slow-case of the dentry name compare.
1955 * Unlike the dentry_cmp() function, we need to atomically
1956 * load the name and length information, so that the
1957 * filesystem can rely on them, and can use the 'name' and
1958 * 'len' information without worrying about walking off the
1959 * end of memory etc.
1961 * Thus the read_seqcount_retry() and the "duplicate" info
1962 * in arguments (the low-level filesystem should not look
1963 * at the dentry inode or name contents directly, since
1964 * rename can change them while we're in RCU mode).
1966 enum slow_d_compare
{
1972 static noinline
enum slow_d_compare
slow_dentry_cmp(
1973 const struct dentry
*parent
,
1974 struct dentry
*dentry
,
1976 const struct qstr
*name
)
1978 int tlen
= dentry
->d_name
.len
;
1979 const char *tname
= dentry
->d_name
.name
;
1981 if (read_seqcount_retry(&dentry
->d_seq
, seq
)) {
1983 return D_COMP_SEQRETRY
;
1985 if (parent
->d_op
->d_compare(parent
, dentry
, tlen
, tname
, name
))
1986 return D_COMP_NOMATCH
;
1991 * __d_lookup_rcu - search for a dentry (racy, store-free)
1992 * @parent: parent dentry
1993 * @name: qstr of name we wish to find
1994 * @seqp: returns d_seq value at the point where the dentry was found
1995 * Returns: dentry, or NULL
1997 * __d_lookup_rcu is the dcache lookup function for rcu-walk name
1998 * resolution (store-free path walking) design described in
1999 * Documentation/filesystems/path-lookup.txt.
2001 * This is not to be used outside core vfs.
2003 * __d_lookup_rcu must only be used in rcu-walk mode, ie. with vfsmount lock
2004 * held, and rcu_read_lock held. The returned dentry must not be stored into
2005 * without taking d_lock and checking d_seq sequence count against @seq
2008 * A refcount may be taken on the found dentry with the d_rcu_to_refcount
2011 * Alternatively, __d_lookup_rcu may be called again to look up the child of
2012 * the returned dentry, so long as its parent's seqlock is checked after the
2013 * child is looked up. Thus, an interlocking stepping of sequence lock checks
2014 * is formed, giving integrity down the path walk.
2016 * NOTE! The caller *has* to check the resulting dentry against the sequence
2017 * number we've returned before using any of the resulting dentry state!
2019 struct dentry
*__d_lookup_rcu(const struct dentry
*parent
,
2020 const struct qstr
*name
,
2023 u64 hashlen
= name
->hash_len
;
2024 const unsigned char *str
= name
->name
;
2025 struct hlist_bl_head
*b
= d_hash(parent
, hashlen_hash(hashlen
));
2026 struct hlist_bl_node
*node
;
2027 struct dentry
*dentry
;
2030 * Note: There is significant duplication with __d_lookup_rcu which is
2031 * required to prevent single threaded performance regressions
2032 * especially on architectures where smp_rmb (in seqcounts) are costly.
2033 * Keep the two functions in sync.
2037 * The hash list is protected using RCU.
2039 * Carefully use d_seq when comparing a candidate dentry, to avoid
2040 * races with d_move().
2042 * It is possible that concurrent renames can mess up our list
2043 * walk here and result in missing our dentry, resulting in the
2044 * false-negative result. d_lookup() protects against concurrent
2045 * renames using rename_lock seqlock.
2047 * See Documentation/filesystems/path-lookup.txt for more details.
2049 hlist_bl_for_each_entry_rcu(dentry
, node
, b
, d_hash
) {
2054 * The dentry sequence count protects us from concurrent
2055 * renames, and thus protects parent and name fields.
2057 * The caller must perform a seqcount check in order
2058 * to do anything useful with the returned dentry.
2060 * NOTE! We do a "raw" seqcount_begin here. That means that
2061 * we don't wait for the sequence count to stabilize if it
2062 * is in the middle of a sequence change. If we do the slow
2063 * dentry compare, we will do seqretries until it is stable,
2064 * and if we end up with a successful lookup, we actually
2065 * want to exit RCU lookup anyway.
2067 seq
= raw_seqcount_begin(&dentry
->d_seq
);
2068 if (dentry
->d_parent
!= parent
)
2070 if (d_unhashed(dentry
))
2073 if (unlikely(parent
->d_flags
& DCACHE_OP_COMPARE
)) {
2074 if (dentry
->d_name
.hash
!= hashlen_hash(hashlen
))
2077 switch (slow_dentry_cmp(parent
, dentry
, seq
, name
)) {
2080 case D_COMP_NOMATCH
:
2087 if (dentry
->d_name
.hash_len
!= hashlen
)
2090 if (!dentry_cmp(dentry
, str
, hashlen_len(hashlen
)))
2097 * d_lookup - search for a dentry
2098 * @parent: parent dentry
2099 * @name: qstr of name we wish to find
2100 * Returns: dentry, or NULL
2102 * d_lookup searches the children of the parent dentry for the name in
2103 * question. If the dentry is found its reference count is incremented and the
2104 * dentry is returned. The caller must use dput to free the entry when it has
2105 * finished using it. %NULL is returned if the dentry does not exist.
2107 struct dentry
*d_lookup(const struct dentry
*parent
, const struct qstr
*name
)
2109 struct dentry
*dentry
;
2113 seq
= read_seqbegin(&rename_lock
);
2114 dentry
= __d_lookup(parent
, name
);
2117 } while (read_seqretry(&rename_lock
, seq
));
2120 EXPORT_SYMBOL(d_lookup
);
2123 * __d_lookup - search for a dentry (racy)
2124 * @parent: parent dentry
2125 * @name: qstr of name we wish to find
2126 * Returns: dentry, or NULL
2128 * __d_lookup is like d_lookup, however it may (rarely) return a
2129 * false-negative result due to unrelated rename activity.
2131 * __d_lookup is slightly faster by avoiding rename_lock read seqlock,
2132 * however it must be used carefully, eg. with a following d_lookup in
2133 * the case of failure.
2135 * __d_lookup callers must be commented.
2137 struct dentry
*__d_lookup(const struct dentry
*parent
, const struct qstr
*name
)
2139 unsigned int len
= name
->len
;
2140 unsigned int hash
= name
->hash
;
2141 const unsigned char *str
= name
->name
;
2142 struct hlist_bl_head
*b
= d_hash(parent
, hash
);
2143 struct hlist_bl_node
*node
;
2144 struct dentry
*found
= NULL
;
2145 struct dentry
*dentry
;
2148 * Note: There is significant duplication with __d_lookup_rcu which is
2149 * required to prevent single threaded performance regressions
2150 * especially on architectures where smp_rmb (in seqcounts) are costly.
2151 * Keep the two functions in sync.
2155 * The hash list is protected using RCU.
2157 * Take d_lock when comparing a candidate dentry, to avoid races
2160 * It is possible that concurrent renames can mess up our list
2161 * walk here and result in missing our dentry, resulting in the
2162 * false-negative result. d_lookup() protects against concurrent
2163 * renames using rename_lock seqlock.
2165 * See Documentation/filesystems/path-lookup.txt for more details.
2169 hlist_bl_for_each_entry_rcu(dentry
, node
, b
, d_hash
) {
2171 if (dentry
->d_name
.hash
!= hash
)
2174 spin_lock(&dentry
->d_lock
);
2175 if (dentry
->d_parent
!= parent
)
2177 if (d_unhashed(dentry
))
2181 * It is safe to compare names since d_move() cannot
2182 * change the qstr (protected by d_lock).
2184 if (parent
->d_flags
& DCACHE_OP_COMPARE
) {
2185 int tlen
= dentry
->d_name
.len
;
2186 const char *tname
= dentry
->d_name
.name
;
2187 if (parent
->d_op
->d_compare(parent
, dentry
, tlen
, tname
, name
))
2190 if (dentry
->d_name
.len
!= len
)
2192 if (dentry_cmp(dentry
, str
, len
))
2196 dentry
->d_lockref
.count
++;
2198 spin_unlock(&dentry
->d_lock
);
2201 spin_unlock(&dentry
->d_lock
);
2209 * d_hash_and_lookup - hash the qstr then search for a dentry
2210 * @dir: Directory to search in
2211 * @name: qstr of name we wish to find
2213 * On lookup failure NULL is returned; on bad name - ERR_PTR(-error)
2215 struct dentry
*d_hash_and_lookup(struct dentry
*dir
, struct qstr
*name
)
2218 * Check for a fs-specific hash function. Note that we must
2219 * calculate the standard hash first, as the d_op->d_hash()
2220 * routine may choose to leave the hash value unchanged.
2222 name
->hash
= full_name_hash(name
->name
, name
->len
);
2223 if (dir
->d_flags
& DCACHE_OP_HASH
) {
2224 int err
= dir
->d_op
->d_hash(dir
, name
);
2225 if (unlikely(err
< 0))
2226 return ERR_PTR(err
);
2228 return d_lookup(dir
, name
);
2230 EXPORT_SYMBOL(d_hash_and_lookup
);
2233 * d_validate - verify dentry provided from insecure source (deprecated)
2234 * @dentry: The dentry alleged to be valid child of @dparent
2235 * @dparent: The parent dentry (known to be valid)
2237 * An insecure source has sent us a dentry, here we verify it and dget() it.
2238 * This is used by ncpfs in its readdir implementation.
2239 * Zero is returned in the dentry is invalid.
2241 * This function is slow for big directories, and deprecated, do not use it.
2243 int d_validate(struct dentry
*dentry
, struct dentry
*dparent
)
2245 struct dentry
*child
;
2247 spin_lock(&dparent
->d_lock
);
2248 list_for_each_entry(child
, &dparent
->d_subdirs
, d_u
.d_child
) {
2249 if (dentry
== child
) {
2250 spin_lock_nested(&dentry
->d_lock
, DENTRY_D_LOCK_NESTED
);
2251 __dget_dlock(dentry
);
2252 spin_unlock(&dentry
->d_lock
);
2253 spin_unlock(&dparent
->d_lock
);
2257 spin_unlock(&dparent
->d_lock
);
2261 EXPORT_SYMBOL(d_validate
);
2264 * When a file is deleted, we have two options:
2265 * - turn this dentry into a negative dentry
2266 * - unhash this dentry and free it.
2268 * Usually, we want to just turn this into
2269 * a negative dentry, but if anybody else is
2270 * currently using the dentry or the inode
2271 * we can't do that and we fall back on removing
2272 * it from the hash queues and waiting for
2273 * it to be deleted later when it has no users
2277 * d_delete - delete a dentry
2278 * @dentry: The dentry to delete
2280 * Turn the dentry into a negative dentry if possible, otherwise
2281 * remove it from the hash queues so it can be deleted later
2284 void d_delete(struct dentry
* dentry
)
2286 struct inode
*inode
;
2289 * Are we the only user?
2292 spin_lock(&dentry
->d_lock
);
2293 inode
= dentry
->d_inode
;
2294 isdir
= S_ISDIR(inode
->i_mode
);
2295 if (dentry
->d_lockref
.count
== 1) {
2296 if (!spin_trylock(&inode
->i_lock
)) {
2297 spin_unlock(&dentry
->d_lock
);
2301 dentry
->d_flags
&= ~DCACHE_CANT_MOUNT
;
2302 dentry_unlink_inode(dentry
);
2303 fsnotify_nameremove(dentry
, isdir
);
2307 if (!d_unhashed(dentry
))
2310 spin_unlock(&dentry
->d_lock
);
2312 fsnotify_nameremove(dentry
, isdir
);
2314 EXPORT_SYMBOL(d_delete
);
2316 static void __d_rehash(struct dentry
* entry
, struct hlist_bl_head
*b
)
2318 BUG_ON(!d_unhashed(entry
));
2320 entry
->d_flags
|= DCACHE_RCUACCESS
;
2321 hlist_bl_add_head_rcu(&entry
->d_hash
, b
);
2325 static void _d_rehash(struct dentry
* entry
)
2327 __d_rehash(entry
, d_hash(entry
->d_parent
, entry
->d_name
.hash
));
2331 * d_rehash - add an entry back to the hash
2332 * @entry: dentry to add to the hash
2334 * Adds a dentry to the hash according to its name.
2337 void d_rehash(struct dentry
* entry
)
2339 spin_lock(&entry
->d_lock
);
2341 spin_unlock(&entry
->d_lock
);
2343 EXPORT_SYMBOL(d_rehash
);
2346 * dentry_update_name_case - update case insensitive dentry with a new name
2347 * @dentry: dentry to be updated
2350 * Update a case insensitive dentry with new case of name.
2352 * dentry must have been returned by d_lookup with name @name. Old and new
2353 * name lengths must match (ie. no d_compare which allows mismatched name
2356 * Parent inode i_mutex must be held over d_lookup and into this call (to
2357 * keep renames and concurrent inserts, and readdir(2) away).
2359 void dentry_update_name_case(struct dentry
*dentry
, struct qstr
*name
)
2361 BUG_ON(!mutex_is_locked(&dentry
->d_parent
->d_inode
->i_mutex
));
2362 BUG_ON(dentry
->d_name
.len
!= name
->len
); /* d_lookup gives this */
2364 spin_lock(&dentry
->d_lock
);
2365 write_seqcount_begin(&dentry
->d_seq
);
2366 memcpy((unsigned char *)dentry
->d_name
.name
, name
->name
, name
->len
);
2367 write_seqcount_end(&dentry
->d_seq
);
2368 spin_unlock(&dentry
->d_lock
);
2370 EXPORT_SYMBOL(dentry_update_name_case
);
2372 static void swap_names(struct dentry
*dentry
, struct dentry
*target
)
2374 if (unlikely(dname_external(target
))) {
2375 if (unlikely(dname_external(dentry
))) {
2377 * Both external: swap the pointers
2379 swap(target
->d_name
.name
, dentry
->d_name
.name
);
2382 * dentry:internal, target:external. Steal target's
2383 * storage and make target internal.
2385 memcpy(target
->d_iname
, dentry
->d_name
.name
,
2386 dentry
->d_name
.len
+ 1);
2387 dentry
->d_name
.name
= target
->d_name
.name
;
2388 target
->d_name
.name
= target
->d_iname
;
2391 if (unlikely(dname_external(dentry
))) {
2393 * dentry:external, target:internal. Give dentry's
2394 * storage to target and make dentry internal
2396 memcpy(dentry
->d_iname
, target
->d_name
.name
,
2397 target
->d_name
.len
+ 1);
2398 target
->d_name
.name
= dentry
->d_name
.name
;
2399 dentry
->d_name
.name
= dentry
->d_iname
;
2402 * Both are internal.
2405 BUILD_BUG_ON(!IS_ALIGNED(DNAME_INLINE_LEN
, sizeof(long)));
2406 for (i
= 0; i
< DNAME_INLINE_LEN
/ sizeof(long); i
++) {
2407 swap(((long *) &dentry
->d_iname
)[i
],
2408 ((long *) &target
->d_iname
)[i
]);
2412 swap(dentry
->d_name
.hash_len
, target
->d_name
.hash_len
);
2415 static void copy_name(struct dentry
*dentry
, struct dentry
*target
)
2417 struct external_name
*old_name
= NULL
;
2418 if (unlikely(dname_external(dentry
)))
2419 old_name
= external_name(dentry
);
2420 if (unlikely(dname_external(target
))) {
2421 atomic_inc(&external_name(target
)->u
.count
);
2422 dentry
->d_name
= target
->d_name
;
2424 memcpy(dentry
->d_iname
, target
->d_name
.name
,
2425 target
->d_name
.len
+ 1);
2426 dentry
->d_name
.name
= dentry
->d_iname
;
2427 dentry
->d_name
.hash_len
= target
->d_name
.hash_len
;
2429 if (old_name
&& likely(atomic_dec_and_test(&old_name
->u
.count
)))
2430 kfree_rcu(old_name
, u
.head
);
2433 static void dentry_lock_for_move(struct dentry
*dentry
, struct dentry
*target
)
2436 * XXXX: do we really need to take target->d_lock?
2438 if (IS_ROOT(dentry
) || dentry
->d_parent
== target
->d_parent
)
2439 spin_lock(&target
->d_parent
->d_lock
);
2441 if (d_ancestor(dentry
->d_parent
, target
->d_parent
)) {
2442 spin_lock(&dentry
->d_parent
->d_lock
);
2443 spin_lock_nested(&target
->d_parent
->d_lock
,
2444 DENTRY_D_LOCK_NESTED
);
2446 spin_lock(&target
->d_parent
->d_lock
);
2447 spin_lock_nested(&dentry
->d_parent
->d_lock
,
2448 DENTRY_D_LOCK_NESTED
);
2451 if (target
< dentry
) {
2452 spin_lock_nested(&target
->d_lock
, 2);
2453 spin_lock_nested(&dentry
->d_lock
, 3);
2455 spin_lock_nested(&dentry
->d_lock
, 2);
2456 spin_lock_nested(&target
->d_lock
, 3);
2460 static void dentry_unlock_for_move(struct dentry
*dentry
, struct dentry
*target
)
2462 if (target
->d_parent
!= dentry
->d_parent
)
2463 spin_unlock(&dentry
->d_parent
->d_lock
);
2464 if (target
->d_parent
!= target
)
2465 spin_unlock(&target
->d_parent
->d_lock
);
2466 spin_unlock(&target
->d_lock
);
2467 spin_unlock(&dentry
->d_lock
);
2471 * When switching names, the actual string doesn't strictly have to
2472 * be preserved in the target - because we're dropping the target
2473 * anyway. As such, we can just do a simple memcpy() to copy over
2474 * the new name before we switch, unless we are going to rehash
2475 * it. Note that if we *do* unhash the target, we are not allowed
2476 * to rehash it without giving it a new name/hash key - whether
2477 * we swap or overwrite the names here, resulting name won't match
2478 * the reality in filesystem; it's only there for d_path() purposes.
2479 * Note that all of this is happening under rename_lock, so the
2480 * any hash lookup seeing it in the middle of manipulations will
2481 * be discarded anyway. So we do not care what happens to the hash
2485 * __d_move - move a dentry
2486 * @dentry: entry to move
2487 * @target: new dentry
2488 * @exchange: exchange the two dentries
2490 * Update the dcache to reflect the move of a file name. Negative
2491 * dcache entries should not be moved in this way. Caller must hold
2492 * rename_lock, the i_mutex of the source and target directories,
2493 * and the sb->s_vfs_rename_mutex if they differ. See lock_rename().
2495 static void __d_move(struct dentry
*dentry
, struct dentry
*target
,
2498 if (!dentry
->d_inode
)
2499 printk(KERN_WARNING
"VFS: moving negative dcache entry\n");
2501 BUG_ON(d_ancestor(dentry
, target
));
2502 BUG_ON(d_ancestor(target
, dentry
));
2504 dentry_lock_for_move(dentry
, target
);
2506 write_seqcount_begin(&dentry
->d_seq
);
2507 write_seqcount_begin_nested(&target
->d_seq
, DENTRY_D_LOCK_NESTED
);
2509 /* __d_drop does write_seqcount_barrier, but they're OK to nest. */
2512 * Move the dentry to the target hash queue. Don't bother checking
2513 * for the same hash queue because of how unlikely it is.
2516 __d_rehash(dentry
, d_hash(target
->d_parent
, target
->d_name
.hash
));
2519 * Unhash the target (d_delete() is not usable here). If exchanging
2520 * the two dentries, then rehash onto the other's hash queue.
2525 d_hash(dentry
->d_parent
, dentry
->d_name
.hash
));
2528 /* Switch the names.. */
2530 swap_names(dentry
, target
);
2532 copy_name(dentry
, target
);
2534 /* ... and switch them in the tree */
2535 if (IS_ROOT(dentry
)) {
2536 /* splicing a tree */
2537 dentry
->d_parent
= target
->d_parent
;
2538 target
->d_parent
= target
;
2539 list_del_init(&target
->d_u
.d_child
);
2540 list_move(&dentry
->d_u
.d_child
, &dentry
->d_parent
->d_subdirs
);
2542 /* swapping two dentries */
2543 swap(dentry
->d_parent
, target
->d_parent
);
2544 list_move(&target
->d_u
.d_child
, &target
->d_parent
->d_subdirs
);
2545 list_move(&dentry
->d_u
.d_child
, &dentry
->d_parent
->d_subdirs
);
2547 fsnotify_d_move(target
);
2548 fsnotify_d_move(dentry
);
2551 write_seqcount_end(&target
->d_seq
);
2552 write_seqcount_end(&dentry
->d_seq
);
2554 dentry_unlock_for_move(dentry
, target
);
2558 * d_move - move a dentry
2559 * @dentry: entry to move
2560 * @target: new dentry
2562 * Update the dcache to reflect the move of a file name. Negative
2563 * dcache entries should not be moved in this way. See the locking
2564 * requirements for __d_move.
2566 void d_move(struct dentry
*dentry
, struct dentry
*target
)
2568 write_seqlock(&rename_lock
);
2569 __d_move(dentry
, target
, false);
2570 write_sequnlock(&rename_lock
);
2572 EXPORT_SYMBOL(d_move
);
2575 * d_exchange - exchange two dentries
2576 * @dentry1: first dentry
2577 * @dentry2: second dentry
2579 void d_exchange(struct dentry
*dentry1
, struct dentry
*dentry2
)
2581 write_seqlock(&rename_lock
);
2583 WARN_ON(!dentry1
->d_inode
);
2584 WARN_ON(!dentry2
->d_inode
);
2585 WARN_ON(IS_ROOT(dentry1
));
2586 WARN_ON(IS_ROOT(dentry2
));
2588 __d_move(dentry1
, dentry2
, true);
2590 write_sequnlock(&rename_lock
);
2594 * d_ancestor - search for an ancestor
2595 * @p1: ancestor dentry
2598 * Returns the ancestor dentry of p2 which is a child of p1, if p1 is
2599 * an ancestor of p2, else NULL.
2601 struct dentry
*d_ancestor(struct dentry
*p1
, struct dentry
*p2
)
2605 for (p
= p2
; !IS_ROOT(p
); p
= p
->d_parent
) {
2606 if (p
->d_parent
== p1
)
2613 * This helper attempts to cope with remotely renamed directories
2615 * It assumes that the caller is already holding
2616 * dentry->d_parent->d_inode->i_mutex, inode->i_lock and rename_lock
2618 * Note: If ever the locking in lock_rename() changes, then please
2619 * remember to update this too...
2621 static struct dentry
*__d_unalias(struct inode
*inode
,
2622 struct dentry
*dentry
, struct dentry
*alias
)
2624 struct mutex
*m1
= NULL
, *m2
= NULL
;
2625 struct dentry
*ret
= ERR_PTR(-EBUSY
);
2627 /* If alias and dentry share a parent, then no extra locks required */
2628 if (alias
->d_parent
== dentry
->d_parent
)
2631 /* See lock_rename() */
2632 if (!mutex_trylock(&dentry
->d_sb
->s_vfs_rename_mutex
))
2634 m1
= &dentry
->d_sb
->s_vfs_rename_mutex
;
2635 if (!mutex_trylock(&alias
->d_parent
->d_inode
->i_mutex
))
2637 m2
= &alias
->d_parent
->d_inode
->i_mutex
;
2639 __d_move(alias
, dentry
, false);
2642 spin_unlock(&inode
->i_lock
);
2651 * d_splice_alias - splice a disconnected dentry into the tree if one exists
2652 * @inode: the inode which may have a disconnected dentry
2653 * @dentry: a negative dentry which we want to point to the inode.
2655 * If inode is a directory and has an IS_ROOT alias, then d_move that in
2656 * place of the given dentry and return it, else simply d_add the inode
2657 * to the dentry and return NULL.
2659 * If a non-IS_ROOT directory is found, the filesystem is corrupt, and
2660 * we should error out: directories can't have multiple aliases.
2662 * This is needed in the lookup routine of any filesystem that is exportable
2663 * (via knfsd) so that we can build dcache paths to directories effectively.
2665 * If a dentry was found and moved, then it is returned. Otherwise NULL
2666 * is returned. This matches the expected return value of ->lookup.
2668 * Cluster filesystems may call this function with a negative, hashed dentry.
2669 * In that case, we know that the inode will be a regular file, and also this
2670 * will only occur during atomic_open. So we need to check for the dentry
2671 * being already hashed only in the final case.
2673 struct dentry
*d_splice_alias(struct inode
*inode
, struct dentry
*dentry
)
2675 struct dentry
*new = NULL
;
2678 return ERR_CAST(inode
);
2680 if (inode
&& S_ISDIR(inode
->i_mode
)) {
2681 spin_lock(&inode
->i_lock
);
2682 new = __d_find_any_alias(inode
);
2684 if (!IS_ROOT(new)) {
2685 spin_unlock(&inode
->i_lock
);
2687 return ERR_PTR(-EIO
);
2689 if (d_ancestor(new, dentry
)) {
2690 spin_unlock(&inode
->i_lock
);
2692 return ERR_PTR(-EIO
);
2694 write_seqlock(&rename_lock
);
2695 __d_move(new, dentry
, false);
2696 write_sequnlock(&rename_lock
);
2697 spin_unlock(&inode
->i_lock
);
2698 security_d_instantiate(new, inode
);
2701 /* already taking inode->i_lock, so d_add() by hand */
2702 __d_instantiate(dentry
, inode
);
2703 spin_unlock(&inode
->i_lock
);
2704 security_d_instantiate(dentry
, inode
);
2708 d_instantiate(dentry
, inode
);
2709 if (d_unhashed(dentry
))
2714 EXPORT_SYMBOL(d_splice_alias
);
2717 * d_materialise_unique - introduce an inode into the tree
2718 * @dentry: candidate dentry
2719 * @inode: inode to bind to the dentry, to which aliases may be attached
2721 * Introduces an dentry into the tree, substituting an extant disconnected
2722 * root directory alias in its place if there is one. Caller must hold the
2723 * i_mutex of the parent directory.
2725 struct dentry
*d_materialise_unique(struct dentry
*dentry
, struct inode
*inode
)
2727 struct dentry
*actual
;
2729 BUG_ON(!d_unhashed(dentry
));
2733 __d_instantiate(dentry
, NULL
);
2738 spin_lock(&inode
->i_lock
);
2740 if (S_ISDIR(inode
->i_mode
)) {
2741 struct dentry
*alias
;
2743 /* Does an aliased dentry already exist? */
2744 alias
= __d_find_alias(inode
);
2747 write_seqlock(&rename_lock
);
2749 if (d_ancestor(alias
, dentry
)) {
2750 /* Check for loops */
2751 actual
= ERR_PTR(-ELOOP
);
2752 spin_unlock(&inode
->i_lock
);
2753 } else if (IS_ROOT(alias
)) {
2754 /* Is this an anonymous mountpoint that we
2755 * could splice into our tree? */
2756 __d_move(alias
, dentry
, false);
2757 write_sequnlock(&rename_lock
);
2760 /* Nope, but we must(!) avoid directory
2761 * aliasing. This drops inode->i_lock */
2762 actual
= __d_unalias(inode
, dentry
, alias
);
2764 write_sequnlock(&rename_lock
);
2765 if (IS_ERR(actual
)) {
2766 if (PTR_ERR(actual
) == -ELOOP
)
2767 pr_warn_ratelimited(
2768 "VFS: Lookup of '%s' in %s %s"
2769 " would have caused loop\n",
2770 dentry
->d_name
.name
,
2771 inode
->i_sb
->s_type
->name
,
2779 /* Add a unique reference */
2780 actual
= __d_instantiate_unique(dentry
, inode
);
2786 spin_unlock(&inode
->i_lock
);
2788 if (actual
== dentry
) {
2789 security_d_instantiate(dentry
, inode
);
2796 EXPORT_SYMBOL_GPL(d_materialise_unique
);
2798 static int prepend(char **buffer
, int *buflen
, const char *str
, int namelen
)
2802 return -ENAMETOOLONG
;
2804 memcpy(*buffer
, str
, namelen
);
2809 * prepend_name - prepend a pathname in front of current buffer pointer
2810 * @buffer: buffer pointer
2811 * @buflen: allocated length of the buffer
2812 * @name: name string and length qstr structure
2814 * With RCU path tracing, it may race with d_move(). Use ACCESS_ONCE() to
2815 * make sure that either the old or the new name pointer and length are
2816 * fetched. However, there may be mismatch between length and pointer.
2817 * The length cannot be trusted, we need to copy it byte-by-byte until
2818 * the length is reached or a null byte is found. It also prepends "/" at
2819 * the beginning of the name. The sequence number check at the caller will
2820 * retry it again when a d_move() does happen. So any garbage in the buffer
2821 * due to mismatched pointer and length will be discarded.
2823 * Data dependency barrier is needed to make sure that we see that terminating
2824 * NUL. Alpha strikes again, film at 11...
2826 static int prepend_name(char **buffer
, int *buflen
, struct qstr
*name
)
2828 const char *dname
= ACCESS_ONCE(name
->name
);
2829 u32 dlen
= ACCESS_ONCE(name
->len
);
2832 smp_read_barrier_depends();
2834 *buflen
-= dlen
+ 1;
2836 return -ENAMETOOLONG
;
2837 p
= *buffer
-= dlen
+ 1;
2849 * prepend_path - Prepend path string to a buffer
2850 * @path: the dentry/vfsmount to report
2851 * @root: root vfsmnt/dentry
2852 * @buffer: pointer to the end of the buffer
2853 * @buflen: pointer to buffer length
2855 * The function will first try to write out the pathname without taking any
2856 * lock other than the RCU read lock to make sure that dentries won't go away.
2857 * It only checks the sequence number of the global rename_lock as any change
2858 * in the dentry's d_seq will be preceded by changes in the rename_lock
2859 * sequence number. If the sequence number had been changed, it will restart
2860 * the whole pathname back-tracing sequence again by taking the rename_lock.
2861 * In this case, there is no need to take the RCU read lock as the recursive
2862 * parent pointer references will keep the dentry chain alive as long as no
2863 * rename operation is performed.
2865 static int prepend_path(const struct path
*path
,
2866 const struct path
*root
,
2867 char **buffer
, int *buflen
)
2869 struct dentry
*dentry
;
2870 struct vfsmount
*vfsmnt
;
2873 unsigned seq
, m_seq
= 0;
2879 read_seqbegin_or_lock(&mount_lock
, &m_seq
);
2886 dentry
= path
->dentry
;
2888 mnt
= real_mount(vfsmnt
);
2889 read_seqbegin_or_lock(&rename_lock
, &seq
);
2890 while (dentry
!= root
->dentry
|| vfsmnt
!= root
->mnt
) {
2891 struct dentry
* parent
;
2893 if (dentry
== vfsmnt
->mnt_root
|| IS_ROOT(dentry
)) {
2894 struct mount
*parent
= ACCESS_ONCE(mnt
->mnt_parent
);
2896 if (mnt
!= parent
) {
2897 dentry
= ACCESS_ONCE(mnt
->mnt_mountpoint
);
2903 * Filesystems needing to implement special "root names"
2904 * should do so with ->d_dname()
2906 if (IS_ROOT(dentry
) &&
2907 (dentry
->d_name
.len
!= 1 ||
2908 dentry
->d_name
.name
[0] != '/')) {
2909 WARN(1, "Root dentry has weird name <%.*s>\n",
2910 (int) dentry
->d_name
.len
,
2911 dentry
->d_name
.name
);
2914 error
= is_mounted(vfsmnt
) ? 1 : 2;
2917 parent
= dentry
->d_parent
;
2919 error
= prepend_name(&bptr
, &blen
, &dentry
->d_name
);
2927 if (need_seqretry(&rename_lock
, seq
)) {
2931 done_seqretry(&rename_lock
, seq
);
2935 if (need_seqretry(&mount_lock
, m_seq
)) {
2939 done_seqretry(&mount_lock
, m_seq
);
2941 if (error
>= 0 && bptr
== *buffer
) {
2943 error
= -ENAMETOOLONG
;
2953 * __d_path - return the path of a dentry
2954 * @path: the dentry/vfsmount to report
2955 * @root: root vfsmnt/dentry
2956 * @buf: buffer to return value in
2957 * @buflen: buffer length
2959 * Convert a dentry into an ASCII path name.
2961 * Returns a pointer into the buffer or an error code if the
2962 * path was too long.
2964 * "buflen" should be positive.
2966 * If the path is not reachable from the supplied root, return %NULL.
2968 char *__d_path(const struct path
*path
,
2969 const struct path
*root
,
2970 char *buf
, int buflen
)
2972 char *res
= buf
+ buflen
;
2975 prepend(&res
, &buflen
, "\0", 1);
2976 error
= prepend_path(path
, root
, &res
, &buflen
);
2979 return ERR_PTR(error
);
2985 char *d_absolute_path(const struct path
*path
,
2986 char *buf
, int buflen
)
2988 struct path root
= {};
2989 char *res
= buf
+ buflen
;
2992 prepend(&res
, &buflen
, "\0", 1);
2993 error
= prepend_path(path
, &root
, &res
, &buflen
);
2998 return ERR_PTR(error
);
3003 * same as __d_path but appends "(deleted)" for unlinked files.
3005 static int path_with_deleted(const struct path
*path
,
3006 const struct path
*root
,
3007 char **buf
, int *buflen
)
3009 prepend(buf
, buflen
, "\0", 1);
3010 if (d_unlinked(path
->dentry
)) {
3011 int error
= prepend(buf
, buflen
, " (deleted)", 10);
3016 return prepend_path(path
, root
, buf
, buflen
);
3019 static int prepend_unreachable(char **buffer
, int *buflen
)
3021 return prepend(buffer
, buflen
, "(unreachable)", 13);
3024 static void get_fs_root_rcu(struct fs_struct
*fs
, struct path
*root
)
3029 seq
= read_seqcount_begin(&fs
->seq
);
3031 } while (read_seqcount_retry(&fs
->seq
, seq
));
3035 * d_path - return the path of a dentry
3036 * @path: path to report
3037 * @buf: buffer to return value in
3038 * @buflen: buffer length
3040 * Convert a dentry into an ASCII path name. If the entry has been deleted
3041 * the string " (deleted)" is appended. Note that this is ambiguous.
3043 * Returns a pointer into the buffer or an error code if the path was
3044 * too long. Note: Callers should use the returned pointer, not the passed
3045 * in buffer, to use the name! The implementation often starts at an offset
3046 * into the buffer, and may leave 0 bytes at the start.
3048 * "buflen" should be positive.
3050 char *d_path(const struct path
*path
, char *buf
, int buflen
)
3052 char *res
= buf
+ buflen
;
3057 * We have various synthetic filesystems that never get mounted. On
3058 * these filesystems dentries are never used for lookup purposes, and
3059 * thus don't need to be hashed. They also don't need a name until a
3060 * user wants to identify the object in /proc/pid/fd/. The little hack
3061 * below allows us to generate a name for these objects on demand:
3063 * Some pseudo inodes are mountable. When they are mounted
3064 * path->dentry == path->mnt->mnt_root. In that case don't call d_dname
3065 * and instead have d_path return the mounted path.
3067 if (path
->dentry
->d_op
&& path
->dentry
->d_op
->d_dname
&&
3068 (!IS_ROOT(path
->dentry
) || path
->dentry
!= path
->mnt
->mnt_root
))
3069 return path
->dentry
->d_op
->d_dname(path
->dentry
, buf
, buflen
);
3072 get_fs_root_rcu(current
->fs
, &root
);
3073 error
= path_with_deleted(path
, &root
, &res
, &buflen
);
3077 res
= ERR_PTR(error
);
3080 EXPORT_SYMBOL(d_path
);
3083 * Helper function for dentry_operations.d_dname() members
3085 char *dynamic_dname(struct dentry
*dentry
, char *buffer
, int buflen
,
3086 const char *fmt
, ...)
3092 va_start(args
, fmt
);
3093 sz
= vsnprintf(temp
, sizeof(temp
), fmt
, args
) + 1;
3096 if (sz
> sizeof(temp
) || sz
> buflen
)
3097 return ERR_PTR(-ENAMETOOLONG
);
3099 buffer
+= buflen
- sz
;
3100 return memcpy(buffer
, temp
, sz
);
3103 char *simple_dname(struct dentry
*dentry
, char *buffer
, int buflen
)
3105 char *end
= buffer
+ buflen
;
3106 /* these dentries are never renamed, so d_lock is not needed */
3107 if (prepend(&end
, &buflen
, " (deleted)", 11) ||
3108 prepend(&end
, &buflen
, dentry
->d_name
.name
, dentry
->d_name
.len
) ||
3109 prepend(&end
, &buflen
, "/", 1))
3110 end
= ERR_PTR(-ENAMETOOLONG
);
3113 EXPORT_SYMBOL(simple_dname
);
3116 * Write full pathname from the root of the filesystem into the buffer.
3118 static char *__dentry_path(struct dentry
*d
, char *buf
, int buflen
)
3120 struct dentry
*dentry
;
3133 prepend(&end
, &len
, "\0", 1);
3137 read_seqbegin_or_lock(&rename_lock
, &seq
);
3138 while (!IS_ROOT(dentry
)) {
3139 struct dentry
*parent
= dentry
->d_parent
;
3142 error
= prepend_name(&end
, &len
, &dentry
->d_name
);
3151 if (need_seqretry(&rename_lock
, seq
)) {
3155 done_seqretry(&rename_lock
, seq
);
3160 return ERR_PTR(-ENAMETOOLONG
);
3163 char *dentry_path_raw(struct dentry
*dentry
, char *buf
, int buflen
)
3165 return __dentry_path(dentry
, buf
, buflen
);
3167 EXPORT_SYMBOL(dentry_path_raw
);
3169 char *dentry_path(struct dentry
*dentry
, char *buf
, int buflen
)
3174 if (d_unlinked(dentry
)) {
3176 if (prepend(&p
, &buflen
, "//deleted", 10) != 0)
3180 retval
= __dentry_path(dentry
, buf
, buflen
);
3181 if (!IS_ERR(retval
) && p
)
3182 *p
= '/'; /* restore '/' overriden with '\0' */
3185 return ERR_PTR(-ENAMETOOLONG
);
3188 static void get_fs_root_and_pwd_rcu(struct fs_struct
*fs
, struct path
*root
,
3194 seq
= read_seqcount_begin(&fs
->seq
);
3197 } while (read_seqcount_retry(&fs
->seq
, seq
));
3201 * NOTE! The user-level library version returns a
3202 * character pointer. The kernel system call just
3203 * returns the length of the buffer filled (which
3204 * includes the ending '\0' character), or a negative
3205 * error value. So libc would do something like
3207 * char *getcwd(char * buf, size_t size)
3211 * retval = sys_getcwd(buf, size);
3218 SYSCALL_DEFINE2(getcwd
, char __user
*, buf
, unsigned long, size
)
3221 struct path pwd
, root
;
3222 char *page
= __getname();
3228 get_fs_root_and_pwd_rcu(current
->fs
, &root
, &pwd
);
3231 if (!d_unlinked(pwd
.dentry
)) {
3233 char *cwd
= page
+ PATH_MAX
;
3234 int buflen
= PATH_MAX
;
3236 prepend(&cwd
, &buflen
, "\0", 1);
3237 error
= prepend_path(&pwd
, &root
, &cwd
, &buflen
);
3243 /* Unreachable from current root */
3245 error
= prepend_unreachable(&cwd
, &buflen
);
3251 len
= PATH_MAX
+ page
- cwd
;
3254 if (copy_to_user(buf
, cwd
, len
))
3267 * Test whether new_dentry is a subdirectory of old_dentry.
3269 * Trivially implemented using the dcache structure
3273 * is_subdir - is new dentry a subdirectory of old_dentry
3274 * @new_dentry: new dentry
3275 * @old_dentry: old dentry
3277 * Returns 1 if new_dentry is a subdirectory of the parent (at any depth).
3278 * Returns 0 otherwise.
3279 * Caller must ensure that "new_dentry" is pinned before calling is_subdir()
3282 int is_subdir(struct dentry
*new_dentry
, struct dentry
*old_dentry
)
3287 if (new_dentry
== old_dentry
)
3291 /* for restarting inner loop in case of seq retry */
3292 seq
= read_seqbegin(&rename_lock
);
3294 * Need rcu_readlock to protect against the d_parent trashing
3298 if (d_ancestor(old_dentry
, new_dentry
))
3303 } while (read_seqretry(&rename_lock
, seq
));
3308 static enum d_walk_ret
d_genocide_kill(void *data
, struct dentry
*dentry
)
3310 struct dentry
*root
= data
;
3311 if (dentry
!= root
) {
3312 if (d_unhashed(dentry
) || !dentry
->d_inode
)
3315 if (!(dentry
->d_flags
& DCACHE_GENOCIDE
)) {
3316 dentry
->d_flags
|= DCACHE_GENOCIDE
;
3317 dentry
->d_lockref
.count
--;
3320 return D_WALK_CONTINUE
;
3323 void d_genocide(struct dentry
*parent
)
3325 d_walk(parent
, parent
, d_genocide_kill
, NULL
);
3328 void d_tmpfile(struct dentry
*dentry
, struct inode
*inode
)
3330 inode_dec_link_count(inode
);
3331 BUG_ON(dentry
->d_name
.name
!= dentry
->d_iname
||
3332 !hlist_unhashed(&dentry
->d_alias
) ||
3333 !d_unlinked(dentry
));
3334 spin_lock(&dentry
->d_parent
->d_lock
);
3335 spin_lock_nested(&dentry
->d_lock
, DENTRY_D_LOCK_NESTED
);
3336 dentry
->d_name
.len
= sprintf(dentry
->d_iname
, "#%llu",
3337 (unsigned long long)inode
->i_ino
);
3338 spin_unlock(&dentry
->d_lock
);
3339 spin_unlock(&dentry
->d_parent
->d_lock
);
3340 d_instantiate(dentry
, inode
);
3342 EXPORT_SYMBOL(d_tmpfile
);
3344 static __initdata
unsigned long dhash_entries
;
3345 static int __init
set_dhash_entries(char *str
)
3349 dhash_entries
= simple_strtoul(str
, &str
, 0);
3352 __setup("dhash_entries=", set_dhash_entries
);
3354 static void __init
dcache_init_early(void)
3358 /* If hashes are distributed across NUMA nodes, defer
3359 * hash allocation until vmalloc space is available.
3365 alloc_large_system_hash("Dentry cache",
3366 sizeof(struct hlist_bl_head
),
3375 for (loop
= 0; loop
< (1U << d_hash_shift
); loop
++)
3376 INIT_HLIST_BL_HEAD(dentry_hashtable
+ loop
);
3379 static void __init
dcache_init(void)
3384 * A constructor could be added for stable state like the lists,
3385 * but it is probably not worth it because of the cache nature
3388 dentry_cache
= KMEM_CACHE(dentry
,
3389 SLAB_RECLAIM_ACCOUNT
|SLAB_PANIC
|SLAB_MEM_SPREAD
);
3391 /* Hash may have been set up in dcache_init_early */
3396 alloc_large_system_hash("Dentry cache",
3397 sizeof(struct hlist_bl_head
),
3406 for (loop
= 0; loop
< (1U << d_hash_shift
); loop
++)
3407 INIT_HLIST_BL_HEAD(dentry_hashtable
+ loop
);
3410 /* SLAB cache for __getname() consumers */
3411 struct kmem_cache
*names_cachep __read_mostly
;
3412 EXPORT_SYMBOL(names_cachep
);
3414 EXPORT_SYMBOL(d_genocide
);
3416 void __init
vfs_caches_init_early(void)
3418 dcache_init_early();
3422 void __init
vfs_caches_init(unsigned long mempages
)
3424 unsigned long reserve
;
3426 /* Base hash sizes on available memory, with a reserve equal to
3427 150% of current kernel size */
3429 reserve
= min((mempages
- nr_free_pages()) * 3/2, mempages
- 1);
3430 mempages
-= reserve
;
3432 names_cachep
= kmem_cache_create("names_cache", PATH_MAX
, 0,
3433 SLAB_HWCACHE_ALIGN
|SLAB_PANIC
, NULL
);
3437 files_init(mempages
);