1 // SPDX-License-Identifier: GPL-2.0-only
5 * Complete reimplementation
6 * (C) 1997 Thomas Schoebel-Theuer,
7 * with heavy changes by Linus Torvalds
11 * Notes on the allocation strategy:
13 * The dcache is a master of the icache - whenever a dcache entry
14 * exists, the inode will always exist. "iput()" is done either when
15 * the dcache entry is deleted or garbage collected.
18 #include <linux/ratelimit.h>
19 #include <linux/string.h>
22 #include <linux/fscrypt.h>
23 #include <linux/fsnotify.h>
24 #include <linux/slab.h>
25 #include <linux/init.h>
26 #include <linux/hash.h>
27 #include <linux/cache.h>
28 #include <linux/export.h>
29 #include <linux/security.h>
30 #include <linux/seqlock.h>
31 #include <linux/memblock.h>
32 #include <linux/bit_spinlock.h>
33 #include <linux/rculist_bl.h>
34 #include <linux/list_lru.h>
40 * dcache->d_inode->i_lock protects:
41 * - i_dentry, d_u.d_alias, d_inode of aliases
42 * dcache_hash_bucket lock protects:
43 * - the dcache hash table
44 * s_roots bl list spinlock protects:
45 * - the s_roots list (see __d_drop)
46 * dentry->d_sb->s_dentry_lru_lock protects:
47 * - the dcache lru lists and counters
54 * - d_parent and d_subdirs
55 * - childrens' d_child and d_parent
56 * - d_u.d_alias, d_inode
59 * dentry->d_inode->i_lock
61 * dentry->d_sb->s_dentry_lru_lock
62 * dcache_hash_bucket lock
65 * If there is an ancestor relationship:
66 * dentry->d_parent->...->d_parent->d_lock
68 * dentry->d_parent->d_lock
71 * If no ancestor relationship:
72 * arbitrary, since it's serialized on rename_lock
74 int sysctl_vfs_cache_pressure __read_mostly
= 100;
75 EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure
);
77 __cacheline_aligned_in_smp
DEFINE_SEQLOCK(rename_lock
);
79 EXPORT_SYMBOL(rename_lock
);
81 static struct kmem_cache
*dentry_cache __read_mostly
;
83 const struct qstr empty_name
= QSTR_INIT("", 0);
84 EXPORT_SYMBOL(empty_name
);
85 const struct qstr slash_name
= QSTR_INIT("/", 1);
86 EXPORT_SYMBOL(slash_name
);
87 const struct qstr dotdot_name
= QSTR_INIT("..", 2);
88 EXPORT_SYMBOL(dotdot_name
);
91 * This is the single most critical data structure when it comes
92 * to the dcache: the hashtable for lookups. Somebody should try
93 * to make this good - I've just made it work.
95 * This hash-function tries to avoid losing too many bits of hash
96 * information, yet avoid using a prime hash-size or similar.
99 static unsigned int d_hash_shift __read_mostly
;
101 static struct hlist_bl_head
*dentry_hashtable __read_mostly
;
103 static inline struct hlist_bl_head
*d_hash(unsigned int hash
)
105 return dentry_hashtable
+ (hash
>> d_hash_shift
);
108 #define IN_LOOKUP_SHIFT 10
109 static struct hlist_bl_head in_lookup_hashtable
[1 << IN_LOOKUP_SHIFT
];
111 static inline struct hlist_bl_head
*in_lookup_hash(const struct dentry
*parent
,
114 hash
+= (unsigned long) parent
/ L1_CACHE_BYTES
;
115 return in_lookup_hashtable
+ hash_32(hash
, IN_LOOKUP_SHIFT
);
119 /* Statistics gathering. */
120 struct dentry_stat_t dentry_stat
= {
124 static DEFINE_PER_CPU(long, nr_dentry
);
125 static DEFINE_PER_CPU(long, nr_dentry_unused
);
126 static DEFINE_PER_CPU(long, nr_dentry_negative
);
128 #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
131 * Here we resort to our own counters instead of using generic per-cpu counters
132 * for consistency with what the vfs inode code does. We are expected to harvest
133 * better code and performance by having our own specialized counters.
135 * Please note that the loop is done over all possible CPUs, not over all online
136 * CPUs. The reason for this is that we don't want to play games with CPUs going
137 * on and off. If one of them goes off, we will just keep their counters.
139 * glommer: See cffbc8a for details, and if you ever intend to change this,
140 * please update all vfs counters to match.
142 static long get_nr_dentry(void)
146 for_each_possible_cpu(i
)
147 sum
+= per_cpu(nr_dentry
, i
);
148 return sum
< 0 ? 0 : sum
;
151 static long get_nr_dentry_unused(void)
155 for_each_possible_cpu(i
)
156 sum
+= per_cpu(nr_dentry_unused
, i
);
157 return sum
< 0 ? 0 : sum
;
160 static long get_nr_dentry_negative(void)
165 for_each_possible_cpu(i
)
166 sum
+= per_cpu(nr_dentry_negative
, i
);
167 return sum
< 0 ? 0 : sum
;
170 int proc_nr_dentry(struct ctl_table
*table
, int write
, void *buffer
,
171 size_t *lenp
, loff_t
*ppos
)
173 dentry_stat
.nr_dentry
= get_nr_dentry();
174 dentry_stat
.nr_unused
= get_nr_dentry_unused();
175 dentry_stat
.nr_negative
= get_nr_dentry_negative();
176 return proc_doulongvec_minmax(table
, write
, buffer
, lenp
, ppos
);
181 * Compare 2 name strings, return 0 if they match, otherwise non-zero.
182 * The strings are both count bytes long, and count is non-zero.
184 #ifdef CONFIG_DCACHE_WORD_ACCESS
186 #include <asm/word-at-a-time.h>
188 * NOTE! 'cs' and 'scount' come from a dentry, so it has a
189 * aligned allocation for this particular component. We don't
190 * strictly need the load_unaligned_zeropad() safety, but it
191 * doesn't hurt either.
193 * In contrast, 'ct' and 'tcount' can be from a pathname, and do
194 * need the careful unaligned handling.
196 static inline int dentry_string_cmp(const unsigned char *cs
, const unsigned char *ct
, unsigned tcount
)
198 unsigned long a
,b
,mask
;
201 a
= read_word_at_a_time(cs
);
202 b
= load_unaligned_zeropad(ct
);
203 if (tcount
< sizeof(unsigned long))
205 if (unlikely(a
!= b
))
207 cs
+= sizeof(unsigned long);
208 ct
+= sizeof(unsigned long);
209 tcount
-= sizeof(unsigned long);
213 mask
= bytemask_from_count(tcount
);
214 return unlikely(!!((a
^ b
) & mask
));
219 static inline int dentry_string_cmp(const unsigned char *cs
, const unsigned char *ct
, unsigned tcount
)
233 static inline int dentry_cmp(const struct dentry
*dentry
, const unsigned char *ct
, unsigned tcount
)
236 * Be careful about RCU walk racing with rename:
237 * use 'READ_ONCE' to fetch the name pointer.
239 * NOTE! Even if a rename will mean that the length
240 * was not loaded atomically, we don't care. The
241 * RCU walk will check the sequence count eventually,
242 * and catch it. And we won't overrun the buffer,
243 * because we're reading the name pointer atomically,
244 * and a dentry name is guaranteed to be properly
245 * terminated with a NUL byte.
247 * End result: even if 'len' is wrong, we'll exit
248 * early because the data cannot match (there can
249 * be no NUL in the ct/tcount data)
251 const unsigned char *cs
= READ_ONCE(dentry
->d_name
.name
);
253 return dentry_string_cmp(cs
, ct
, tcount
);
256 struct external_name
{
259 struct rcu_head head
;
261 unsigned char name
[];
264 static inline struct external_name
*external_name(struct dentry
*dentry
)
266 return container_of(dentry
->d_name
.name
, struct external_name
, name
[0]);
269 static void __d_free(struct rcu_head
*head
)
271 struct dentry
*dentry
= container_of(head
, struct dentry
, d_u
.d_rcu
);
273 kmem_cache_free(dentry_cache
, dentry
);
276 static void __d_free_external(struct rcu_head
*head
)
278 struct dentry
*dentry
= container_of(head
, struct dentry
, d_u
.d_rcu
);
279 kfree(external_name(dentry
));
280 kmem_cache_free(dentry_cache
, dentry
);
283 static inline int dname_external(const struct dentry
*dentry
)
285 return dentry
->d_name
.name
!= dentry
->d_iname
;
288 void take_dentry_name_snapshot(struct name_snapshot
*name
, struct dentry
*dentry
)
290 spin_lock(&dentry
->d_lock
);
291 name
->name
= dentry
->d_name
;
292 if (unlikely(dname_external(dentry
))) {
293 atomic_inc(&external_name(dentry
)->u
.count
);
295 memcpy(name
->inline_name
, dentry
->d_iname
,
296 dentry
->d_name
.len
+ 1);
297 name
->name
.name
= name
->inline_name
;
299 spin_unlock(&dentry
->d_lock
);
301 EXPORT_SYMBOL(take_dentry_name_snapshot
);
303 void release_dentry_name_snapshot(struct name_snapshot
*name
)
305 if (unlikely(name
->name
.name
!= name
->inline_name
)) {
306 struct external_name
*p
;
307 p
= container_of(name
->name
.name
, struct external_name
, name
[0]);
308 if (unlikely(atomic_dec_and_test(&p
->u
.count
)))
309 kfree_rcu(p
, u
.head
);
312 EXPORT_SYMBOL(release_dentry_name_snapshot
);
314 static inline void __d_set_inode_and_type(struct dentry
*dentry
,
320 dentry
->d_inode
= inode
;
321 flags
= READ_ONCE(dentry
->d_flags
);
322 flags
&= ~(DCACHE_ENTRY_TYPE
| DCACHE_FALLTHRU
);
324 smp_store_release(&dentry
->d_flags
, flags
);
327 static inline void __d_clear_type_and_inode(struct dentry
*dentry
)
329 unsigned flags
= READ_ONCE(dentry
->d_flags
);
331 flags
&= ~(DCACHE_ENTRY_TYPE
| DCACHE_FALLTHRU
);
332 WRITE_ONCE(dentry
->d_flags
, flags
);
333 dentry
->d_inode
= NULL
;
334 if (dentry
->d_flags
& DCACHE_LRU_LIST
)
335 this_cpu_inc(nr_dentry_negative
);
338 static void dentry_free(struct dentry
*dentry
)
340 WARN_ON(!hlist_unhashed(&dentry
->d_u
.d_alias
));
341 if (unlikely(dname_external(dentry
))) {
342 struct external_name
*p
= external_name(dentry
);
343 if (likely(atomic_dec_and_test(&p
->u
.count
))) {
344 call_rcu(&dentry
->d_u
.d_rcu
, __d_free_external
);
348 /* if dentry was never visible to RCU, immediate free is OK */
349 if (dentry
->d_flags
& DCACHE_NORCU
)
350 __d_free(&dentry
->d_u
.d_rcu
);
352 call_rcu(&dentry
->d_u
.d_rcu
, __d_free
);
356 * Release the dentry's inode, using the filesystem
357 * d_iput() operation if defined.
359 static void dentry_unlink_inode(struct dentry
* dentry
)
360 __releases(dentry
->d_lock
)
361 __releases(dentry
->d_inode
->i_lock
)
363 struct inode
*inode
= dentry
->d_inode
;
365 raw_write_seqcount_begin(&dentry
->d_seq
);
366 __d_clear_type_and_inode(dentry
);
367 hlist_del_init(&dentry
->d_u
.d_alias
);
368 raw_write_seqcount_end(&dentry
->d_seq
);
369 spin_unlock(&dentry
->d_lock
);
370 spin_unlock(&inode
->i_lock
);
372 fsnotify_inoderemove(inode
);
373 if (dentry
->d_op
&& dentry
->d_op
->d_iput
)
374 dentry
->d_op
->d_iput(dentry
, inode
);
380 * The DCACHE_LRU_LIST bit is set whenever the 'd_lru' entry
381 * is in use - which includes both the "real" per-superblock
382 * LRU list _and_ the DCACHE_SHRINK_LIST use.
384 * The DCACHE_SHRINK_LIST bit is set whenever the dentry is
385 * on the shrink list (ie not on the superblock LRU list).
387 * The per-cpu "nr_dentry_unused" counters are updated with
388 * the DCACHE_LRU_LIST bit.
390 * The per-cpu "nr_dentry_negative" counters are only updated
391 * when deleted from or added to the per-superblock LRU list, not
392 * from/to the shrink list. That is to avoid an unneeded dec/inc
393 * pair when moving from LRU to shrink list in select_collect().
395 * These helper functions make sure we always follow the
396 * rules. d_lock must be held by the caller.
398 #define D_FLAG_VERIFY(dentry,x) WARN_ON_ONCE(((dentry)->d_flags & (DCACHE_LRU_LIST | DCACHE_SHRINK_LIST)) != (x))
399 static void d_lru_add(struct dentry
*dentry
)
401 D_FLAG_VERIFY(dentry
, 0);
402 dentry
->d_flags
|= DCACHE_LRU_LIST
;
403 this_cpu_inc(nr_dentry_unused
);
404 if (d_is_negative(dentry
))
405 this_cpu_inc(nr_dentry_negative
);
406 WARN_ON_ONCE(!list_lru_add(&dentry
->d_sb
->s_dentry_lru
, &dentry
->d_lru
));
409 static void d_lru_del(struct dentry
*dentry
)
411 D_FLAG_VERIFY(dentry
, DCACHE_LRU_LIST
);
412 dentry
->d_flags
&= ~DCACHE_LRU_LIST
;
413 this_cpu_dec(nr_dentry_unused
);
414 if (d_is_negative(dentry
))
415 this_cpu_dec(nr_dentry_negative
);
416 WARN_ON_ONCE(!list_lru_del(&dentry
->d_sb
->s_dentry_lru
, &dentry
->d_lru
));
419 static void d_shrink_del(struct dentry
*dentry
)
421 D_FLAG_VERIFY(dentry
, DCACHE_SHRINK_LIST
| DCACHE_LRU_LIST
);
422 list_del_init(&dentry
->d_lru
);
423 dentry
->d_flags
&= ~(DCACHE_SHRINK_LIST
| DCACHE_LRU_LIST
);
424 this_cpu_dec(nr_dentry_unused
);
427 static void d_shrink_add(struct dentry
*dentry
, struct list_head
*list
)
429 D_FLAG_VERIFY(dentry
, 0);
430 list_add(&dentry
->d_lru
, list
);
431 dentry
->d_flags
|= DCACHE_SHRINK_LIST
| DCACHE_LRU_LIST
;
432 this_cpu_inc(nr_dentry_unused
);
436 * These can only be called under the global LRU lock, ie during the
437 * callback for freeing the LRU list. "isolate" removes it from the
438 * LRU lists entirely, while shrink_move moves it to the indicated
441 static void d_lru_isolate(struct list_lru_one
*lru
, struct dentry
*dentry
)
443 D_FLAG_VERIFY(dentry
, DCACHE_LRU_LIST
);
444 dentry
->d_flags
&= ~DCACHE_LRU_LIST
;
445 this_cpu_dec(nr_dentry_unused
);
446 if (d_is_negative(dentry
))
447 this_cpu_dec(nr_dentry_negative
);
448 list_lru_isolate(lru
, &dentry
->d_lru
);
451 static void d_lru_shrink_move(struct list_lru_one
*lru
, struct dentry
*dentry
,
452 struct list_head
*list
)
454 D_FLAG_VERIFY(dentry
, DCACHE_LRU_LIST
);
455 dentry
->d_flags
|= DCACHE_SHRINK_LIST
;
456 if (d_is_negative(dentry
))
457 this_cpu_dec(nr_dentry_negative
);
458 list_lru_isolate_move(lru
, &dentry
->d_lru
, list
);
461 static void ___d_drop(struct dentry
*dentry
)
463 struct hlist_bl_head
*b
;
465 * Hashed dentries are normally on the dentry hashtable,
466 * with the exception of those newly allocated by
467 * d_obtain_root, which are always IS_ROOT:
469 if (unlikely(IS_ROOT(dentry
)))
470 b
= &dentry
->d_sb
->s_roots
;
472 b
= d_hash(dentry
->d_name
.hash
);
475 __hlist_bl_del(&dentry
->d_hash
);
479 void __d_drop(struct dentry
*dentry
)
481 if (!d_unhashed(dentry
)) {
483 dentry
->d_hash
.pprev
= NULL
;
484 write_seqcount_invalidate(&dentry
->d_seq
);
487 EXPORT_SYMBOL(__d_drop
);
490 * d_drop - drop a dentry
491 * @dentry: dentry to drop
493 * d_drop() unhashes the entry from the parent dentry hashes, so that it won't
494 * be found through a VFS lookup any more. Note that this is different from
495 * deleting the dentry - d_delete will try to mark the dentry negative if
496 * possible, giving a successful _negative_ lookup, while d_drop will
497 * just make the cache lookup fail.
499 * d_drop() is used mainly for stuff that wants to invalidate a dentry for some
500 * reason (NFS timeouts or autofs deletes).
502 * __d_drop requires dentry->d_lock
504 * ___d_drop doesn't mark dentry as "unhashed"
505 * (dentry->d_hash.pprev will be LIST_POISON2, not NULL).
507 void d_drop(struct dentry
*dentry
)
509 spin_lock(&dentry
->d_lock
);
511 spin_unlock(&dentry
->d_lock
);
513 EXPORT_SYMBOL(d_drop
);
515 static inline void dentry_unlist(struct dentry
*dentry
, struct dentry
*parent
)
519 * Inform d_walk() and shrink_dentry_list() that we are no longer
520 * attached to the dentry tree
522 dentry
->d_flags
|= DCACHE_DENTRY_KILLED
;
523 if (unlikely(list_empty(&dentry
->d_child
)))
525 __list_del_entry(&dentry
->d_child
);
527 * Cursors can move around the list of children. While we'd been
528 * a normal list member, it didn't matter - ->d_child.next would've
529 * been updated. However, from now on it won't be and for the
530 * things like d_walk() it might end up with a nasty surprise.
531 * Normally d_walk() doesn't care about cursors moving around -
532 * ->d_lock on parent prevents that and since a cursor has no children
533 * of its own, we get through it without ever unlocking the parent.
534 * There is one exception, though - if we ascend from a child that
535 * gets killed as soon as we unlock it, the next sibling is found
536 * using the value left in its ->d_child.next. And if _that_
537 * pointed to a cursor, and cursor got moved (e.g. by lseek())
538 * before d_walk() regains parent->d_lock, we'll end up skipping
539 * everything the cursor had been moved past.
541 * Solution: make sure that the pointer left behind in ->d_child.next
542 * points to something that won't be moving around. I.e. skip the
545 while (dentry
->d_child
.next
!= &parent
->d_subdirs
) {
546 next
= list_entry(dentry
->d_child
.next
, struct dentry
, d_child
);
547 if (likely(!(next
->d_flags
& DCACHE_DENTRY_CURSOR
)))
549 dentry
->d_child
.next
= next
->d_child
.next
;
553 static void __dentry_kill(struct dentry
*dentry
)
555 struct dentry
*parent
= NULL
;
556 bool can_free
= true;
557 if (!IS_ROOT(dentry
))
558 parent
= dentry
->d_parent
;
561 * The dentry is now unrecoverably dead to the world.
563 lockref_mark_dead(&dentry
->d_lockref
);
566 * inform the fs via d_prune that this dentry is about to be
567 * unhashed and destroyed.
569 if (dentry
->d_flags
& DCACHE_OP_PRUNE
)
570 dentry
->d_op
->d_prune(dentry
);
572 if (dentry
->d_flags
& DCACHE_LRU_LIST
) {
573 if (!(dentry
->d_flags
& DCACHE_SHRINK_LIST
))
576 /* if it was on the hash then remove it */
578 dentry_unlist(dentry
, parent
);
580 spin_unlock(&parent
->d_lock
);
582 dentry_unlink_inode(dentry
);
584 spin_unlock(&dentry
->d_lock
);
585 this_cpu_dec(nr_dentry
);
586 if (dentry
->d_op
&& dentry
->d_op
->d_release
)
587 dentry
->d_op
->d_release(dentry
);
589 spin_lock(&dentry
->d_lock
);
590 if (dentry
->d_flags
& DCACHE_SHRINK_LIST
) {
591 dentry
->d_flags
|= DCACHE_MAY_FREE
;
594 spin_unlock(&dentry
->d_lock
);
595 if (likely(can_free
))
600 static struct dentry
*__lock_parent(struct dentry
*dentry
)
602 struct dentry
*parent
;
604 spin_unlock(&dentry
->d_lock
);
606 parent
= READ_ONCE(dentry
->d_parent
);
607 spin_lock(&parent
->d_lock
);
609 * We can't blindly lock dentry until we are sure
610 * that we won't violate the locking order.
611 * Any changes of dentry->d_parent must have
612 * been done with parent->d_lock held, so
613 * spin_lock() above is enough of a barrier
614 * for checking if it's still our child.
616 if (unlikely(parent
!= dentry
->d_parent
)) {
617 spin_unlock(&parent
->d_lock
);
621 if (parent
!= dentry
)
622 spin_lock_nested(&dentry
->d_lock
, DENTRY_D_LOCK_NESTED
);
628 static inline struct dentry
*lock_parent(struct dentry
*dentry
)
630 struct dentry
*parent
= dentry
->d_parent
;
633 if (likely(spin_trylock(&parent
->d_lock
)))
635 return __lock_parent(dentry
);
638 static inline bool retain_dentry(struct dentry
*dentry
)
640 WARN_ON(d_in_lookup(dentry
));
642 /* Unreachable? Get rid of it */
643 if (unlikely(d_unhashed(dentry
)))
646 if (unlikely(dentry
->d_flags
& DCACHE_DISCONNECTED
))
649 if (unlikely(dentry
->d_flags
& DCACHE_OP_DELETE
)) {
650 if (dentry
->d_op
->d_delete(dentry
))
654 if (unlikely(dentry
->d_flags
& DCACHE_DONTCACHE
))
657 /* retain; LRU fodder */
658 dentry
->d_lockref
.count
--;
659 if (unlikely(!(dentry
->d_flags
& DCACHE_LRU_LIST
)))
661 else if (unlikely(!(dentry
->d_flags
& DCACHE_REFERENCED
)))
662 dentry
->d_flags
|= DCACHE_REFERENCED
;
666 void d_mark_dontcache(struct inode
*inode
)
670 spin_lock(&inode
->i_lock
);
671 hlist_for_each_entry(de
, &inode
->i_dentry
, d_u
.d_alias
) {
672 spin_lock(&de
->d_lock
);
673 de
->d_flags
|= DCACHE_DONTCACHE
;
674 spin_unlock(&de
->d_lock
);
676 inode
->i_state
|= I_DONTCACHE
;
677 spin_unlock(&inode
->i_lock
);
679 EXPORT_SYMBOL(d_mark_dontcache
);
682 * Finish off a dentry we've decided to kill.
683 * dentry->d_lock must be held, returns with it unlocked.
684 * Returns dentry requiring refcount drop, or NULL if we're done.
686 static struct dentry
*dentry_kill(struct dentry
*dentry
)
687 __releases(dentry
->d_lock
)
689 struct inode
*inode
= dentry
->d_inode
;
690 struct dentry
*parent
= NULL
;
692 if (inode
&& unlikely(!spin_trylock(&inode
->i_lock
)))
695 if (!IS_ROOT(dentry
)) {
696 parent
= dentry
->d_parent
;
697 if (unlikely(!spin_trylock(&parent
->d_lock
))) {
698 parent
= __lock_parent(dentry
);
699 if (likely(inode
|| !dentry
->d_inode
))
701 /* negative that became positive */
703 spin_unlock(&parent
->d_lock
);
704 inode
= dentry
->d_inode
;
708 __dentry_kill(dentry
);
712 spin_unlock(&dentry
->d_lock
);
713 spin_lock(&inode
->i_lock
);
714 spin_lock(&dentry
->d_lock
);
715 parent
= lock_parent(dentry
);
717 if (unlikely(dentry
->d_lockref
.count
!= 1)) {
718 dentry
->d_lockref
.count
--;
719 } else if (likely(!retain_dentry(dentry
))) {
720 __dentry_kill(dentry
);
723 /* we are keeping it, after all */
725 spin_unlock(&inode
->i_lock
);
727 spin_unlock(&parent
->d_lock
);
728 spin_unlock(&dentry
->d_lock
);
733 * Try to do a lockless dput(), and return whether that was successful.
735 * If unsuccessful, we return false, having already taken the dentry lock.
737 * The caller needs to hold the RCU read lock, so that the dentry is
738 * guaranteed to stay around even if the refcount goes down to zero!
740 static inline bool fast_dput(struct dentry
*dentry
)
743 unsigned int d_flags
;
746 * If we have a d_op->d_delete() operation, we sould not
747 * let the dentry count go to zero, so use "put_or_lock".
749 if (unlikely(dentry
->d_flags
& DCACHE_OP_DELETE
))
750 return lockref_put_or_lock(&dentry
->d_lockref
);
753 * .. otherwise, we can try to just decrement the
754 * lockref optimistically.
756 ret
= lockref_put_return(&dentry
->d_lockref
);
759 * If the lockref_put_return() failed due to the lock being held
760 * by somebody else, the fast path has failed. We will need to
761 * get the lock, and then check the count again.
763 if (unlikely(ret
< 0)) {
764 spin_lock(&dentry
->d_lock
);
765 if (dentry
->d_lockref
.count
> 1) {
766 dentry
->d_lockref
.count
--;
767 spin_unlock(&dentry
->d_lock
);
774 * If we weren't the last ref, we're done.
780 * Careful, careful. The reference count went down
781 * to zero, but we don't hold the dentry lock, so
782 * somebody else could get it again, and do another
783 * dput(), and we need to not race with that.
785 * However, there is a very special and common case
786 * where we don't care, because there is nothing to
787 * do: the dentry is still hashed, it does not have
788 * a 'delete' op, and it's referenced and already on
791 * NOTE! Since we aren't locked, these values are
792 * not "stable". However, it is sufficient that at
793 * some point after we dropped the reference the
794 * dentry was hashed and the flags had the proper
795 * value. Other dentry users may have re-gotten
796 * a reference to the dentry and change that, but
797 * our work is done - we can leave the dentry
798 * around with a zero refcount.
800 * Nevertheless, there are two cases that we should kill
802 * 1. free disconnected dentries as soon as their refcount
804 * 2. free dentries if they should not be cached.
807 d_flags
= READ_ONCE(dentry
->d_flags
);
808 d_flags
&= DCACHE_REFERENCED
| DCACHE_LRU_LIST
|
809 DCACHE_DISCONNECTED
| DCACHE_DONTCACHE
;
811 /* Nothing to do? Dropping the reference was all we needed? */
812 if (d_flags
== (DCACHE_REFERENCED
| DCACHE_LRU_LIST
) && !d_unhashed(dentry
))
816 * Not the fast normal case? Get the lock. We've already decremented
817 * the refcount, but we'll need to re-check the situation after
820 spin_lock(&dentry
->d_lock
);
823 * Did somebody else grab a reference to it in the meantime, and
824 * we're no longer the last user after all? Alternatively, somebody
825 * else could have killed it and marked it dead. Either way, we
826 * don't need to do anything else.
828 if (dentry
->d_lockref
.count
) {
829 spin_unlock(&dentry
->d_lock
);
834 * Re-get the reference we optimistically dropped. We hold the
835 * lock, and we just tested that it was zero, so we can just
838 dentry
->d_lockref
.count
= 1;
846 * This is complicated by the fact that we do not want to put
847 * dentries that are no longer on any hash chain on the unused
848 * list: we'd much rather just get rid of them immediately.
850 * However, that implies that we have to traverse the dentry
851 * tree upwards to the parents which might _also_ now be
852 * scheduled for deletion (it may have been only waiting for
853 * its last child to go away).
855 * This tail recursion is done by hand as we don't want to depend
856 * on the compiler to always get this right (gcc generally doesn't).
857 * Real recursion would eat up our stack space.
861 * dput - release a dentry
862 * @dentry: dentry to release
864 * Release a dentry. This will drop the usage count and if appropriate
865 * call the dentry unlink method as well as removing it from the queues and
866 * releasing its resources. If the parent dentries were scheduled for release
867 * they too may now get deleted.
869 void dput(struct dentry
*dentry
)
875 if (likely(fast_dput(dentry
))) {
880 /* Slow case: now with the dentry lock held */
883 if (likely(retain_dentry(dentry
))) {
884 spin_unlock(&dentry
->d_lock
);
888 dentry
= dentry_kill(dentry
);
893 static void __dput_to_list(struct dentry
*dentry
, struct list_head
*list
)
894 __must_hold(&dentry
->d_lock
)
896 if (dentry
->d_flags
& DCACHE_SHRINK_LIST
) {
897 /* let the owner of the list it's on deal with it */
898 --dentry
->d_lockref
.count
;
900 if (dentry
->d_flags
& DCACHE_LRU_LIST
)
902 if (!--dentry
->d_lockref
.count
)
903 d_shrink_add(dentry
, list
);
907 void dput_to_list(struct dentry
*dentry
, struct list_head
*list
)
910 if (likely(fast_dput(dentry
))) {
915 if (!retain_dentry(dentry
))
916 __dput_to_list(dentry
, list
);
917 spin_unlock(&dentry
->d_lock
);
920 /* This must be called with d_lock held */
921 static inline void __dget_dlock(struct dentry
*dentry
)
923 dentry
->d_lockref
.count
++;
926 static inline void __dget(struct dentry
*dentry
)
928 lockref_get(&dentry
->d_lockref
);
931 struct dentry
*dget_parent(struct dentry
*dentry
)
938 * Do optimistic parent lookup without any
942 seq
= raw_seqcount_begin(&dentry
->d_seq
);
943 ret
= READ_ONCE(dentry
->d_parent
);
944 gotref
= lockref_get_not_zero(&ret
->d_lockref
);
946 if (likely(gotref
)) {
947 if (!read_seqcount_retry(&dentry
->d_seq
, seq
))
954 * Don't need rcu_dereference because we re-check it was correct under
958 ret
= dentry
->d_parent
;
959 spin_lock(&ret
->d_lock
);
960 if (unlikely(ret
!= dentry
->d_parent
)) {
961 spin_unlock(&ret
->d_lock
);
966 BUG_ON(!ret
->d_lockref
.count
);
967 ret
->d_lockref
.count
++;
968 spin_unlock(&ret
->d_lock
);
971 EXPORT_SYMBOL(dget_parent
);
973 static struct dentry
* __d_find_any_alias(struct inode
*inode
)
975 struct dentry
*alias
;
977 if (hlist_empty(&inode
->i_dentry
))
979 alias
= hlist_entry(inode
->i_dentry
.first
, struct dentry
, d_u
.d_alias
);
985 * d_find_any_alias - find any alias for a given inode
986 * @inode: inode to find an alias for
988 * If any aliases exist for the given inode, take and return a
989 * reference for one of them. If no aliases exist, return %NULL.
991 struct dentry
*d_find_any_alias(struct inode
*inode
)
995 spin_lock(&inode
->i_lock
);
996 de
= __d_find_any_alias(inode
);
997 spin_unlock(&inode
->i_lock
);
1000 EXPORT_SYMBOL(d_find_any_alias
);
1002 static struct dentry
*__d_find_alias(struct inode
*inode
)
1004 struct dentry
*alias
;
1006 if (S_ISDIR(inode
->i_mode
))
1007 return __d_find_any_alias(inode
);
1009 hlist_for_each_entry(alias
, &inode
->i_dentry
, d_u
.d_alias
) {
1010 spin_lock(&alias
->d_lock
);
1011 if (!d_unhashed(alias
)) {
1012 __dget_dlock(alias
);
1013 spin_unlock(&alias
->d_lock
);
1016 spin_unlock(&alias
->d_lock
);
1022 * d_find_alias - grab a hashed alias of inode
1023 * @inode: inode in question
1025 * If inode has a hashed alias, or is a directory and has any alias,
1026 * acquire the reference to alias and return it. Otherwise return NULL.
1027 * Notice that if inode is a directory there can be only one alias and
1028 * it can be unhashed only if it has no children, or if it is the root
1029 * of a filesystem, or if the directory was renamed and d_revalidate
1030 * was the first vfs operation to notice.
1032 * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer
1033 * any other hashed alias over that one.
1035 struct dentry
*d_find_alias(struct inode
*inode
)
1037 struct dentry
*de
= NULL
;
1039 if (!hlist_empty(&inode
->i_dentry
)) {
1040 spin_lock(&inode
->i_lock
);
1041 de
= __d_find_alias(inode
);
1042 spin_unlock(&inode
->i_lock
);
1046 EXPORT_SYMBOL(d_find_alias
);
1049 * Caller MUST be holding rcu_read_lock() and be guaranteed
1050 * that inode won't get freed until rcu_read_unlock().
1052 struct dentry
*d_find_alias_rcu(struct inode
*inode
)
1054 struct hlist_head
*l
= &inode
->i_dentry
;
1055 struct dentry
*de
= NULL
;
1057 spin_lock(&inode
->i_lock
);
1058 // ->i_dentry and ->i_rcu are colocated, but the latter won't be
1059 // used without having I_FREEING set, which means no aliases left
1060 if (likely(!(inode
->i_state
& I_FREEING
) && !hlist_empty(l
))) {
1061 if (S_ISDIR(inode
->i_mode
)) {
1062 de
= hlist_entry(l
->first
, struct dentry
, d_u
.d_alias
);
1064 hlist_for_each_entry(de
, l
, d_u
.d_alias
)
1065 if (!d_unhashed(de
))
1069 spin_unlock(&inode
->i_lock
);
1074 * Try to kill dentries associated with this inode.
1075 * WARNING: you must own a reference to inode.
1077 void d_prune_aliases(struct inode
*inode
)
1079 struct dentry
*dentry
;
1081 spin_lock(&inode
->i_lock
);
1082 hlist_for_each_entry(dentry
, &inode
->i_dentry
, d_u
.d_alias
) {
1083 spin_lock(&dentry
->d_lock
);
1084 if (!dentry
->d_lockref
.count
) {
1085 struct dentry
*parent
= lock_parent(dentry
);
1086 if (likely(!dentry
->d_lockref
.count
)) {
1087 __dentry_kill(dentry
);
1092 spin_unlock(&parent
->d_lock
);
1094 spin_unlock(&dentry
->d_lock
);
1096 spin_unlock(&inode
->i_lock
);
1098 EXPORT_SYMBOL(d_prune_aliases
);
1101 * Lock a dentry from shrink list.
1102 * Called under rcu_read_lock() and dentry->d_lock; the former
1103 * guarantees that nothing we access will be freed under us.
1104 * Note that dentry is *not* protected from concurrent dentry_kill(),
1107 * Return false if dentry has been disrupted or grabbed, leaving
1108 * the caller to kick it off-list. Otherwise, return true and have
1109 * that dentry's inode and parent both locked.
1111 static bool shrink_lock_dentry(struct dentry
*dentry
)
1113 struct inode
*inode
;
1114 struct dentry
*parent
;
1116 if (dentry
->d_lockref
.count
)
1119 inode
= dentry
->d_inode
;
1120 if (inode
&& unlikely(!spin_trylock(&inode
->i_lock
))) {
1121 spin_unlock(&dentry
->d_lock
);
1122 spin_lock(&inode
->i_lock
);
1123 spin_lock(&dentry
->d_lock
);
1124 if (unlikely(dentry
->d_lockref
.count
))
1126 /* changed inode means that somebody had grabbed it */
1127 if (unlikely(inode
!= dentry
->d_inode
))
1131 parent
= dentry
->d_parent
;
1132 if (IS_ROOT(dentry
) || likely(spin_trylock(&parent
->d_lock
)))
1135 spin_unlock(&dentry
->d_lock
);
1136 spin_lock(&parent
->d_lock
);
1137 if (unlikely(parent
!= dentry
->d_parent
)) {
1138 spin_unlock(&parent
->d_lock
);
1139 spin_lock(&dentry
->d_lock
);
1142 spin_lock_nested(&dentry
->d_lock
, DENTRY_D_LOCK_NESTED
);
1143 if (likely(!dentry
->d_lockref
.count
))
1145 spin_unlock(&parent
->d_lock
);
1148 spin_unlock(&inode
->i_lock
);
1152 void shrink_dentry_list(struct list_head
*list
)
1154 while (!list_empty(list
)) {
1155 struct dentry
*dentry
, *parent
;
1157 dentry
= list_entry(list
->prev
, struct dentry
, d_lru
);
1158 spin_lock(&dentry
->d_lock
);
1160 if (!shrink_lock_dentry(dentry
)) {
1161 bool can_free
= false;
1163 d_shrink_del(dentry
);
1164 if (dentry
->d_lockref
.count
< 0)
1165 can_free
= dentry
->d_flags
& DCACHE_MAY_FREE
;
1166 spin_unlock(&dentry
->d_lock
);
1168 dentry_free(dentry
);
1172 d_shrink_del(dentry
);
1173 parent
= dentry
->d_parent
;
1174 if (parent
!= dentry
)
1175 __dput_to_list(parent
, list
);
1176 __dentry_kill(dentry
);
1180 static enum lru_status
dentry_lru_isolate(struct list_head
*item
,
1181 struct list_lru_one
*lru
, spinlock_t
*lru_lock
, void *arg
)
1183 struct list_head
*freeable
= arg
;
1184 struct dentry
*dentry
= container_of(item
, struct dentry
, d_lru
);
1188 * we are inverting the lru lock/dentry->d_lock here,
1189 * so use a trylock. If we fail to get the lock, just skip
1192 if (!spin_trylock(&dentry
->d_lock
))
1196 * Referenced dentries are still in use. If they have active
1197 * counts, just remove them from the LRU. Otherwise give them
1198 * another pass through the LRU.
1200 if (dentry
->d_lockref
.count
) {
1201 d_lru_isolate(lru
, dentry
);
1202 spin_unlock(&dentry
->d_lock
);
1206 if (dentry
->d_flags
& DCACHE_REFERENCED
) {
1207 dentry
->d_flags
&= ~DCACHE_REFERENCED
;
1208 spin_unlock(&dentry
->d_lock
);
1211 * The list move itself will be made by the common LRU code. At
1212 * this point, we've dropped the dentry->d_lock but keep the
1213 * lru lock. This is safe to do, since every list movement is
1214 * protected by the lru lock even if both locks are held.
1216 * This is guaranteed by the fact that all LRU management
1217 * functions are intermediated by the LRU API calls like
1218 * list_lru_add and list_lru_del. List movement in this file
1219 * only ever occur through this functions or through callbacks
1220 * like this one, that are called from the LRU API.
1222 * The only exceptions to this are functions like
1223 * shrink_dentry_list, and code that first checks for the
1224 * DCACHE_SHRINK_LIST flag. Those are guaranteed to be
1225 * operating only with stack provided lists after they are
1226 * properly isolated from the main list. It is thus, always a
1232 d_lru_shrink_move(lru
, dentry
, freeable
);
1233 spin_unlock(&dentry
->d_lock
);
1239 * prune_dcache_sb - shrink the dcache
1241 * @sc: shrink control, passed to list_lru_shrink_walk()
1243 * Attempt to shrink the superblock dcache LRU by @sc->nr_to_scan entries. This
1244 * is done when we need more memory and called from the superblock shrinker
1247 * This function may fail to free any resources if all the dentries are in
1250 long prune_dcache_sb(struct super_block
*sb
, struct shrink_control
*sc
)
1255 freed
= list_lru_shrink_walk(&sb
->s_dentry_lru
, sc
,
1256 dentry_lru_isolate
, &dispose
);
1257 shrink_dentry_list(&dispose
);
1261 static enum lru_status
dentry_lru_isolate_shrink(struct list_head
*item
,
1262 struct list_lru_one
*lru
, spinlock_t
*lru_lock
, void *arg
)
1264 struct list_head
*freeable
= arg
;
1265 struct dentry
*dentry
= container_of(item
, struct dentry
, d_lru
);
1268 * we are inverting the lru lock/dentry->d_lock here,
1269 * so use a trylock. If we fail to get the lock, just skip
1272 if (!spin_trylock(&dentry
->d_lock
))
1275 d_lru_shrink_move(lru
, dentry
, freeable
);
1276 spin_unlock(&dentry
->d_lock
);
1283 * shrink_dcache_sb - shrink dcache for a superblock
1286 * Shrink the dcache for the specified super block. This is used to free
1287 * the dcache before unmounting a file system.
1289 void shrink_dcache_sb(struct super_block
*sb
)
1294 list_lru_walk(&sb
->s_dentry_lru
,
1295 dentry_lru_isolate_shrink
, &dispose
, 1024);
1296 shrink_dentry_list(&dispose
);
1297 } while (list_lru_count(&sb
->s_dentry_lru
) > 0);
1299 EXPORT_SYMBOL(shrink_dcache_sb
);
1302 * enum d_walk_ret - action to talke during tree walk
1303 * @D_WALK_CONTINUE: contrinue walk
1304 * @D_WALK_QUIT: quit walk
1305 * @D_WALK_NORETRY: quit when retry is needed
1306 * @D_WALK_SKIP: skip this dentry and its children
1316 * d_walk - walk the dentry tree
1317 * @parent: start of walk
1318 * @data: data passed to @enter() and @finish()
1319 * @enter: callback when first entering the dentry
1321 * The @enter() callbacks are called with d_lock held.
1323 static void d_walk(struct dentry
*parent
, void *data
,
1324 enum d_walk_ret (*enter
)(void *, struct dentry
*))
1326 struct dentry
*this_parent
;
1327 struct list_head
*next
;
1329 enum d_walk_ret ret
;
1333 read_seqbegin_or_lock(&rename_lock
, &seq
);
1334 this_parent
= parent
;
1335 spin_lock(&this_parent
->d_lock
);
1337 ret
= enter(data
, this_parent
);
1339 case D_WALK_CONTINUE
:
1344 case D_WALK_NORETRY
:
1349 next
= this_parent
->d_subdirs
.next
;
1351 while (next
!= &this_parent
->d_subdirs
) {
1352 struct list_head
*tmp
= next
;
1353 struct dentry
*dentry
= list_entry(tmp
, struct dentry
, d_child
);
1356 if (unlikely(dentry
->d_flags
& DCACHE_DENTRY_CURSOR
))
1359 spin_lock_nested(&dentry
->d_lock
, DENTRY_D_LOCK_NESTED
);
1361 ret
= enter(data
, dentry
);
1363 case D_WALK_CONTINUE
:
1366 spin_unlock(&dentry
->d_lock
);
1368 case D_WALK_NORETRY
:
1372 spin_unlock(&dentry
->d_lock
);
1376 if (!list_empty(&dentry
->d_subdirs
)) {
1377 spin_unlock(&this_parent
->d_lock
);
1378 spin_release(&dentry
->d_lock
.dep_map
, _RET_IP_
);
1379 this_parent
= dentry
;
1380 spin_acquire(&this_parent
->d_lock
.dep_map
, 0, 1, _RET_IP_
);
1383 spin_unlock(&dentry
->d_lock
);
1386 * All done at this level ... ascend and resume the search.
1390 if (this_parent
!= parent
) {
1391 struct dentry
*child
= this_parent
;
1392 this_parent
= child
->d_parent
;
1394 spin_unlock(&child
->d_lock
);
1395 spin_lock(&this_parent
->d_lock
);
1397 /* might go back up the wrong parent if we have had a rename. */
1398 if (need_seqretry(&rename_lock
, seq
))
1400 /* go into the first sibling still alive */
1402 next
= child
->d_child
.next
;
1403 if (next
== &this_parent
->d_subdirs
)
1405 child
= list_entry(next
, struct dentry
, d_child
);
1406 } while (unlikely(child
->d_flags
& DCACHE_DENTRY_KILLED
));
1410 if (need_seqretry(&rename_lock
, seq
))
1415 spin_unlock(&this_parent
->d_lock
);
1416 done_seqretry(&rename_lock
, seq
);
1420 spin_unlock(&this_parent
->d_lock
);
1429 struct check_mount
{
1430 struct vfsmount
*mnt
;
1431 unsigned int mounted
;
1434 static enum d_walk_ret
path_check_mount(void *data
, struct dentry
*dentry
)
1436 struct check_mount
*info
= data
;
1437 struct path path
= { .mnt
= info
->mnt
, .dentry
= dentry
};
1439 if (likely(!d_mountpoint(dentry
)))
1440 return D_WALK_CONTINUE
;
1441 if (__path_is_mountpoint(&path
)) {
1445 return D_WALK_CONTINUE
;
1449 * path_has_submounts - check for mounts over a dentry in the
1450 * current namespace.
1451 * @parent: path to check.
1453 * Return true if the parent or its subdirectories contain
1454 * a mount point in the current namespace.
1456 int path_has_submounts(const struct path
*parent
)
1458 struct check_mount data
= { .mnt
= parent
->mnt
, .mounted
= 0 };
1460 read_seqlock_excl(&mount_lock
);
1461 d_walk(parent
->dentry
, &data
, path_check_mount
);
1462 read_sequnlock_excl(&mount_lock
);
1464 return data
.mounted
;
1466 EXPORT_SYMBOL(path_has_submounts
);
1469 * Called by mount code to set a mountpoint and check if the mountpoint is
1470 * reachable (e.g. NFS can unhash a directory dentry and then the complete
1471 * subtree can become unreachable).
1473 * Only one of d_invalidate() and d_set_mounted() must succeed. For
1474 * this reason take rename_lock and d_lock on dentry and ancestors.
1476 int d_set_mounted(struct dentry
*dentry
)
1480 write_seqlock(&rename_lock
);
1481 for (p
= dentry
->d_parent
; !IS_ROOT(p
); p
= p
->d_parent
) {
1482 /* Need exclusion wrt. d_invalidate() */
1483 spin_lock(&p
->d_lock
);
1484 if (unlikely(d_unhashed(p
))) {
1485 spin_unlock(&p
->d_lock
);
1488 spin_unlock(&p
->d_lock
);
1490 spin_lock(&dentry
->d_lock
);
1491 if (!d_unlinked(dentry
)) {
1493 if (!d_mountpoint(dentry
)) {
1494 dentry
->d_flags
|= DCACHE_MOUNTED
;
1498 spin_unlock(&dentry
->d_lock
);
1500 write_sequnlock(&rename_lock
);
1505 * Search the dentry child list of the specified parent,
1506 * and move any unused dentries to the end of the unused
1507 * list for prune_dcache(). We descend to the next level
1508 * whenever the d_subdirs list is non-empty and continue
1511 * It returns zero iff there are no unused children,
1512 * otherwise it returns the number of children moved to
1513 * the end of the unused list. This may not be the total
1514 * number of unused children, because select_parent can
1515 * drop the lock and return early due to latency
1519 struct select_data
{
1520 struct dentry
*start
;
1523 struct dentry
*victim
;
1525 struct list_head dispose
;
1528 static enum d_walk_ret
select_collect(void *_data
, struct dentry
*dentry
)
1530 struct select_data
*data
= _data
;
1531 enum d_walk_ret ret
= D_WALK_CONTINUE
;
1533 if (data
->start
== dentry
)
1536 if (dentry
->d_flags
& DCACHE_SHRINK_LIST
) {
1539 if (dentry
->d_flags
& DCACHE_LRU_LIST
)
1541 if (!dentry
->d_lockref
.count
) {
1542 d_shrink_add(dentry
, &data
->dispose
);
1547 * We can return to the caller if we have found some (this
1548 * ensures forward progress). We'll be coming back to find
1551 if (!list_empty(&data
->dispose
))
1552 ret
= need_resched() ? D_WALK_QUIT
: D_WALK_NORETRY
;
1557 static enum d_walk_ret
select_collect2(void *_data
, struct dentry
*dentry
)
1559 struct select_data
*data
= _data
;
1560 enum d_walk_ret ret
= D_WALK_CONTINUE
;
1562 if (data
->start
== dentry
)
1565 if (dentry
->d_flags
& DCACHE_SHRINK_LIST
) {
1566 if (!dentry
->d_lockref
.count
) {
1568 data
->victim
= dentry
;
1572 if (dentry
->d_flags
& DCACHE_LRU_LIST
)
1574 if (!dentry
->d_lockref
.count
)
1575 d_shrink_add(dentry
, &data
->dispose
);
1578 * We can return to the caller if we have found some (this
1579 * ensures forward progress). We'll be coming back to find
1582 if (!list_empty(&data
->dispose
))
1583 ret
= need_resched() ? D_WALK_QUIT
: D_WALK_NORETRY
;
1589 * shrink_dcache_parent - prune dcache
1590 * @parent: parent of entries to prune
1592 * Prune the dcache to remove unused children of the parent dentry.
1594 void shrink_dcache_parent(struct dentry
*parent
)
1597 struct select_data data
= {.start
= parent
};
1599 INIT_LIST_HEAD(&data
.dispose
);
1600 d_walk(parent
, &data
, select_collect
);
1602 if (!list_empty(&data
.dispose
)) {
1603 shrink_dentry_list(&data
.dispose
);
1611 d_walk(parent
, &data
, select_collect2
);
1613 struct dentry
*parent
;
1614 spin_lock(&data
.victim
->d_lock
);
1615 if (!shrink_lock_dentry(data
.victim
)) {
1616 spin_unlock(&data
.victim
->d_lock
);
1620 parent
= data
.victim
->d_parent
;
1621 if (parent
!= data
.victim
)
1622 __dput_to_list(parent
, &data
.dispose
);
1623 __dentry_kill(data
.victim
);
1626 if (!list_empty(&data
.dispose
))
1627 shrink_dentry_list(&data
.dispose
);
1630 EXPORT_SYMBOL(shrink_dcache_parent
);
1632 static enum d_walk_ret
umount_check(void *_data
, struct dentry
*dentry
)
1634 /* it has busy descendents; complain about those instead */
1635 if (!list_empty(&dentry
->d_subdirs
))
1636 return D_WALK_CONTINUE
;
1638 /* root with refcount 1 is fine */
1639 if (dentry
== _data
&& dentry
->d_lockref
.count
== 1)
1640 return D_WALK_CONTINUE
;
1642 printk(KERN_ERR
"BUG: Dentry %p{i=%lx,n=%pd} "
1643 " still in use (%d) [unmount of %s %s]\n",
1646 dentry
->d_inode
->i_ino
: 0UL,
1648 dentry
->d_lockref
.count
,
1649 dentry
->d_sb
->s_type
->name
,
1650 dentry
->d_sb
->s_id
);
1652 return D_WALK_CONTINUE
;
1655 static void do_one_tree(struct dentry
*dentry
)
1657 shrink_dcache_parent(dentry
);
1658 d_walk(dentry
, dentry
, umount_check
);
1664 * destroy the dentries attached to a superblock on unmounting
1666 void shrink_dcache_for_umount(struct super_block
*sb
)
1668 struct dentry
*dentry
;
1670 WARN(down_read_trylock(&sb
->s_umount
), "s_umount should've been locked");
1672 dentry
= sb
->s_root
;
1674 do_one_tree(dentry
);
1676 while (!hlist_bl_empty(&sb
->s_roots
)) {
1677 dentry
= dget(hlist_bl_entry(hlist_bl_first(&sb
->s_roots
), struct dentry
, d_hash
));
1678 do_one_tree(dentry
);
1682 static enum d_walk_ret
find_submount(void *_data
, struct dentry
*dentry
)
1684 struct dentry
**victim
= _data
;
1685 if (d_mountpoint(dentry
)) {
1686 __dget_dlock(dentry
);
1690 return D_WALK_CONTINUE
;
1694 * d_invalidate - detach submounts, prune dcache, and drop
1695 * @dentry: dentry to invalidate (aka detach, prune and drop)
1697 void d_invalidate(struct dentry
*dentry
)
1699 bool had_submounts
= false;
1700 spin_lock(&dentry
->d_lock
);
1701 if (d_unhashed(dentry
)) {
1702 spin_unlock(&dentry
->d_lock
);
1706 spin_unlock(&dentry
->d_lock
);
1708 /* Negative dentries can be dropped without further checks */
1709 if (!dentry
->d_inode
)
1712 shrink_dcache_parent(dentry
);
1714 struct dentry
*victim
= NULL
;
1715 d_walk(dentry
, &victim
, find_submount
);
1718 shrink_dcache_parent(dentry
);
1721 had_submounts
= true;
1722 detach_mounts(victim
);
1726 EXPORT_SYMBOL(d_invalidate
);
1729 * __d_alloc - allocate a dcache entry
1730 * @sb: filesystem it will belong to
1731 * @name: qstr of the name
1733 * Allocates a dentry. It returns %NULL if there is insufficient memory
1734 * available. On a success the dentry is returned. The name passed in is
1735 * copied and the copy passed in may be reused after this call.
1738 static struct dentry
*__d_alloc(struct super_block
*sb
, const struct qstr
*name
)
1740 struct dentry
*dentry
;
1744 dentry
= kmem_cache_alloc(dentry_cache
, GFP_KERNEL
);
1749 * We guarantee that the inline name is always NUL-terminated.
1750 * This way the memcpy() done by the name switching in rename
1751 * will still always have a NUL at the end, even if we might
1752 * be overwriting an internal NUL character
1754 dentry
->d_iname
[DNAME_INLINE_LEN
-1] = 0;
1755 if (unlikely(!name
)) {
1757 dname
= dentry
->d_iname
;
1758 } else if (name
->len
> DNAME_INLINE_LEN
-1) {
1759 size_t size
= offsetof(struct external_name
, name
[1]);
1760 struct external_name
*p
= kmalloc(size
+ name
->len
,
1761 GFP_KERNEL_ACCOUNT
|
1764 kmem_cache_free(dentry_cache
, dentry
);
1767 atomic_set(&p
->u
.count
, 1);
1770 dname
= dentry
->d_iname
;
1773 dentry
->d_name
.len
= name
->len
;
1774 dentry
->d_name
.hash
= name
->hash
;
1775 memcpy(dname
, name
->name
, name
->len
);
1776 dname
[name
->len
] = 0;
1778 /* Make sure we always see the terminating NUL character */
1779 smp_store_release(&dentry
->d_name
.name
, dname
); /* ^^^ */
1781 dentry
->d_lockref
.count
= 1;
1782 dentry
->d_flags
= 0;
1783 spin_lock_init(&dentry
->d_lock
);
1784 seqcount_spinlock_init(&dentry
->d_seq
, &dentry
->d_lock
);
1785 dentry
->d_inode
= NULL
;
1786 dentry
->d_parent
= dentry
;
1788 dentry
->d_op
= NULL
;
1789 dentry
->d_fsdata
= NULL
;
1790 INIT_HLIST_BL_NODE(&dentry
->d_hash
);
1791 INIT_LIST_HEAD(&dentry
->d_lru
);
1792 INIT_LIST_HEAD(&dentry
->d_subdirs
);
1793 INIT_HLIST_NODE(&dentry
->d_u
.d_alias
);
1794 INIT_LIST_HEAD(&dentry
->d_child
);
1795 d_set_d_op(dentry
, dentry
->d_sb
->s_d_op
);
1797 if (dentry
->d_op
&& dentry
->d_op
->d_init
) {
1798 err
= dentry
->d_op
->d_init(dentry
);
1800 if (dname_external(dentry
))
1801 kfree(external_name(dentry
));
1802 kmem_cache_free(dentry_cache
, dentry
);
1807 this_cpu_inc(nr_dentry
);
1813 * d_alloc - allocate a dcache entry
1814 * @parent: parent of entry to allocate
1815 * @name: qstr of the name
1817 * Allocates a dentry. It returns %NULL if there is insufficient memory
1818 * available. On a success the dentry is returned. The name passed in is
1819 * copied and the copy passed in may be reused after this call.
1821 struct dentry
*d_alloc(struct dentry
* parent
, const struct qstr
*name
)
1823 struct dentry
*dentry
= __d_alloc(parent
->d_sb
, name
);
1826 spin_lock(&parent
->d_lock
);
1828 * don't need child lock because it is not subject
1829 * to concurrency here
1831 __dget_dlock(parent
);
1832 dentry
->d_parent
= parent
;
1833 list_add(&dentry
->d_child
, &parent
->d_subdirs
);
1834 spin_unlock(&parent
->d_lock
);
1838 EXPORT_SYMBOL(d_alloc
);
1840 struct dentry
*d_alloc_anon(struct super_block
*sb
)
1842 return __d_alloc(sb
, NULL
);
1844 EXPORT_SYMBOL(d_alloc_anon
);
1846 struct dentry
*d_alloc_cursor(struct dentry
* parent
)
1848 struct dentry
*dentry
= d_alloc_anon(parent
->d_sb
);
1850 dentry
->d_flags
|= DCACHE_DENTRY_CURSOR
;
1851 dentry
->d_parent
= dget(parent
);
1857 * d_alloc_pseudo - allocate a dentry (for lookup-less filesystems)
1858 * @sb: the superblock
1859 * @name: qstr of the name
1861 * For a filesystem that just pins its dentries in memory and never
1862 * performs lookups at all, return an unhashed IS_ROOT dentry.
1863 * This is used for pipes, sockets et.al. - the stuff that should
1864 * never be anyone's children or parents. Unlike all other
1865 * dentries, these will not have RCU delay between dropping the
1866 * last reference and freeing them.
1868 * The only user is alloc_file_pseudo() and that's what should
1869 * be considered a public interface. Don't use directly.
1871 struct dentry
*d_alloc_pseudo(struct super_block
*sb
, const struct qstr
*name
)
1873 struct dentry
*dentry
= __d_alloc(sb
, name
);
1875 dentry
->d_flags
|= DCACHE_NORCU
;
1879 struct dentry
*d_alloc_name(struct dentry
*parent
, const char *name
)
1884 q
.hash_len
= hashlen_string(parent
, name
);
1885 return d_alloc(parent
, &q
);
1887 EXPORT_SYMBOL(d_alloc_name
);
1889 void d_set_d_op(struct dentry
*dentry
, const struct dentry_operations
*op
)
1891 WARN_ON_ONCE(dentry
->d_op
);
1892 WARN_ON_ONCE(dentry
->d_flags
& (DCACHE_OP_HASH
|
1894 DCACHE_OP_REVALIDATE
|
1895 DCACHE_OP_WEAK_REVALIDATE
|
1902 dentry
->d_flags
|= DCACHE_OP_HASH
;
1904 dentry
->d_flags
|= DCACHE_OP_COMPARE
;
1905 if (op
->d_revalidate
)
1906 dentry
->d_flags
|= DCACHE_OP_REVALIDATE
;
1907 if (op
->d_weak_revalidate
)
1908 dentry
->d_flags
|= DCACHE_OP_WEAK_REVALIDATE
;
1910 dentry
->d_flags
|= DCACHE_OP_DELETE
;
1912 dentry
->d_flags
|= DCACHE_OP_PRUNE
;
1914 dentry
->d_flags
|= DCACHE_OP_REAL
;
1917 EXPORT_SYMBOL(d_set_d_op
);
1921 * d_set_fallthru - Mark a dentry as falling through to a lower layer
1922 * @dentry - The dentry to mark
1924 * Mark a dentry as falling through to the lower layer (as set with
1925 * d_pin_lower()). This flag may be recorded on the medium.
1927 void d_set_fallthru(struct dentry
*dentry
)
1929 spin_lock(&dentry
->d_lock
);
1930 dentry
->d_flags
|= DCACHE_FALLTHRU
;
1931 spin_unlock(&dentry
->d_lock
);
1933 EXPORT_SYMBOL(d_set_fallthru
);
1935 static unsigned d_flags_for_inode(struct inode
*inode
)
1937 unsigned add_flags
= DCACHE_REGULAR_TYPE
;
1940 return DCACHE_MISS_TYPE
;
1942 if (S_ISDIR(inode
->i_mode
)) {
1943 add_flags
= DCACHE_DIRECTORY_TYPE
;
1944 if (unlikely(!(inode
->i_opflags
& IOP_LOOKUP
))) {
1945 if (unlikely(!inode
->i_op
->lookup
))
1946 add_flags
= DCACHE_AUTODIR_TYPE
;
1948 inode
->i_opflags
|= IOP_LOOKUP
;
1950 goto type_determined
;
1953 if (unlikely(!(inode
->i_opflags
& IOP_NOFOLLOW
))) {
1954 if (unlikely(inode
->i_op
->get_link
)) {
1955 add_flags
= DCACHE_SYMLINK_TYPE
;
1956 goto type_determined
;
1958 inode
->i_opflags
|= IOP_NOFOLLOW
;
1961 if (unlikely(!S_ISREG(inode
->i_mode
)))
1962 add_flags
= DCACHE_SPECIAL_TYPE
;
1965 if (unlikely(IS_AUTOMOUNT(inode
)))
1966 add_flags
|= DCACHE_NEED_AUTOMOUNT
;
1970 static void __d_instantiate(struct dentry
*dentry
, struct inode
*inode
)
1972 unsigned add_flags
= d_flags_for_inode(inode
);
1973 WARN_ON(d_in_lookup(dentry
));
1975 spin_lock(&dentry
->d_lock
);
1977 * Decrement negative dentry count if it was in the LRU list.
1979 if (dentry
->d_flags
& DCACHE_LRU_LIST
)
1980 this_cpu_dec(nr_dentry_negative
);
1981 hlist_add_head(&dentry
->d_u
.d_alias
, &inode
->i_dentry
);
1982 raw_write_seqcount_begin(&dentry
->d_seq
);
1983 __d_set_inode_and_type(dentry
, inode
, add_flags
);
1984 raw_write_seqcount_end(&dentry
->d_seq
);
1985 fsnotify_update_flags(dentry
);
1986 spin_unlock(&dentry
->d_lock
);
1990 * d_instantiate - fill in inode information for a dentry
1991 * @entry: dentry to complete
1992 * @inode: inode to attach to this dentry
1994 * Fill in inode information in the entry.
1996 * This turns negative dentries into productive full members
1999 * NOTE! This assumes that the inode count has been incremented
2000 * (or otherwise set) by the caller to indicate that it is now
2001 * in use by the dcache.
2004 void d_instantiate(struct dentry
*entry
, struct inode
* inode
)
2006 BUG_ON(!hlist_unhashed(&entry
->d_u
.d_alias
));
2008 security_d_instantiate(entry
, inode
);
2009 spin_lock(&inode
->i_lock
);
2010 __d_instantiate(entry
, inode
);
2011 spin_unlock(&inode
->i_lock
);
2014 EXPORT_SYMBOL(d_instantiate
);
2017 * This should be equivalent to d_instantiate() + unlock_new_inode(),
2018 * with lockdep-related part of unlock_new_inode() done before
2019 * anything else. Use that instead of open-coding d_instantiate()/
2020 * unlock_new_inode() combinations.
2022 void d_instantiate_new(struct dentry
*entry
, struct inode
*inode
)
2024 BUG_ON(!hlist_unhashed(&entry
->d_u
.d_alias
));
2026 lockdep_annotate_inode_mutex_key(inode
);
2027 security_d_instantiate(entry
, inode
);
2028 spin_lock(&inode
->i_lock
);
2029 __d_instantiate(entry
, inode
);
2030 WARN_ON(!(inode
->i_state
& I_NEW
));
2031 inode
->i_state
&= ~I_NEW
& ~I_CREATING
;
2033 wake_up_bit(&inode
->i_state
, __I_NEW
);
2034 spin_unlock(&inode
->i_lock
);
2036 EXPORT_SYMBOL(d_instantiate_new
);
2038 struct dentry
*d_make_root(struct inode
*root_inode
)
2040 struct dentry
*res
= NULL
;
2043 res
= d_alloc_anon(root_inode
->i_sb
);
2045 d_instantiate(res
, root_inode
);
2051 EXPORT_SYMBOL(d_make_root
);
2053 static struct dentry
*__d_instantiate_anon(struct dentry
*dentry
,
2054 struct inode
*inode
,
2060 security_d_instantiate(dentry
, inode
);
2061 spin_lock(&inode
->i_lock
);
2062 res
= __d_find_any_alias(inode
);
2064 spin_unlock(&inode
->i_lock
);
2069 /* attach a disconnected dentry */
2070 add_flags
= d_flags_for_inode(inode
);
2073 add_flags
|= DCACHE_DISCONNECTED
;
2075 spin_lock(&dentry
->d_lock
);
2076 __d_set_inode_and_type(dentry
, inode
, add_flags
);
2077 hlist_add_head(&dentry
->d_u
.d_alias
, &inode
->i_dentry
);
2078 if (!disconnected
) {
2079 hlist_bl_lock(&dentry
->d_sb
->s_roots
);
2080 hlist_bl_add_head(&dentry
->d_hash
, &dentry
->d_sb
->s_roots
);
2081 hlist_bl_unlock(&dentry
->d_sb
->s_roots
);
2083 spin_unlock(&dentry
->d_lock
);
2084 spin_unlock(&inode
->i_lock
);
2093 struct dentry
*d_instantiate_anon(struct dentry
*dentry
, struct inode
*inode
)
2095 return __d_instantiate_anon(dentry
, inode
, true);
2097 EXPORT_SYMBOL(d_instantiate_anon
);
2099 static struct dentry
*__d_obtain_alias(struct inode
*inode
, bool disconnected
)
2105 return ERR_PTR(-ESTALE
);
2107 return ERR_CAST(inode
);
2109 res
= d_find_any_alias(inode
);
2113 tmp
= d_alloc_anon(inode
->i_sb
);
2115 res
= ERR_PTR(-ENOMEM
);
2119 return __d_instantiate_anon(tmp
, inode
, disconnected
);
2127 * d_obtain_alias - find or allocate a DISCONNECTED dentry for a given inode
2128 * @inode: inode to allocate the dentry for
2130 * Obtain a dentry for an inode resulting from NFS filehandle conversion or
2131 * similar open by handle operations. The returned dentry may be anonymous,
2132 * or may have a full name (if the inode was already in the cache).
2134 * When called on a directory inode, we must ensure that the inode only ever
2135 * has one dentry. If a dentry is found, that is returned instead of
2136 * allocating a new one.
2138 * On successful return, the reference to the inode has been transferred
2139 * to the dentry. In case of an error the reference on the inode is released.
2140 * To make it easier to use in export operations a %NULL or IS_ERR inode may
2141 * be passed in and the error will be propagated to the return value,
2142 * with a %NULL @inode replaced by ERR_PTR(-ESTALE).
2144 struct dentry
*d_obtain_alias(struct inode
*inode
)
2146 return __d_obtain_alias(inode
, true);
2148 EXPORT_SYMBOL(d_obtain_alias
);
2151 * d_obtain_root - find or allocate a dentry for a given inode
2152 * @inode: inode to allocate the dentry for
2154 * Obtain an IS_ROOT dentry for the root of a filesystem.
2156 * We must ensure that directory inodes only ever have one dentry. If a
2157 * dentry is found, that is returned instead of allocating a new one.
2159 * On successful return, the reference to the inode has been transferred
2160 * to the dentry. In case of an error the reference on the inode is
2161 * released. A %NULL or IS_ERR inode may be passed in and will be the
2162 * error will be propagate to the return value, with a %NULL @inode
2163 * replaced by ERR_PTR(-ESTALE).
2165 struct dentry
*d_obtain_root(struct inode
*inode
)
2167 return __d_obtain_alias(inode
, false);
2169 EXPORT_SYMBOL(d_obtain_root
);
2172 * d_add_ci - lookup or allocate new dentry with case-exact name
2173 * @inode: the inode case-insensitive lookup has found
2174 * @dentry: the negative dentry that was passed to the parent's lookup func
2175 * @name: the case-exact name to be associated with the returned dentry
2177 * This is to avoid filling the dcache with case-insensitive names to the
2178 * same inode, only the actual correct case is stored in the dcache for
2179 * case-insensitive filesystems.
2181 * For a case-insensitive lookup match and if the case-exact dentry
2182 * already exists in the dcache, use it and return it.
2184 * If no entry exists with the exact case name, allocate new dentry with
2185 * the exact case, and return the spliced entry.
2187 struct dentry
*d_add_ci(struct dentry
*dentry
, struct inode
*inode
,
2190 struct dentry
*found
, *res
;
2193 * First check if a dentry matching the name already exists,
2194 * if not go ahead and create it now.
2196 found
= d_hash_and_lookup(dentry
->d_parent
, name
);
2201 if (d_in_lookup(dentry
)) {
2202 found
= d_alloc_parallel(dentry
->d_parent
, name
,
2204 if (IS_ERR(found
) || !d_in_lookup(found
)) {
2209 found
= d_alloc(dentry
->d_parent
, name
);
2212 return ERR_PTR(-ENOMEM
);
2215 res
= d_splice_alias(inode
, found
);
2222 EXPORT_SYMBOL(d_add_ci
);
2225 static inline bool d_same_name(const struct dentry
*dentry
,
2226 const struct dentry
*parent
,
2227 const struct qstr
*name
)
2229 if (likely(!(parent
->d_flags
& DCACHE_OP_COMPARE
))) {
2230 if (dentry
->d_name
.len
!= name
->len
)
2232 return dentry_cmp(dentry
, name
->name
, name
->len
) == 0;
2234 return parent
->d_op
->d_compare(dentry
,
2235 dentry
->d_name
.len
, dentry
->d_name
.name
,
2240 * __d_lookup_rcu - search for a dentry (racy, store-free)
2241 * @parent: parent dentry
2242 * @name: qstr of name we wish to find
2243 * @seqp: returns d_seq value at the point where the dentry was found
2244 * Returns: dentry, or NULL
2246 * __d_lookup_rcu is the dcache lookup function for rcu-walk name
2247 * resolution (store-free path walking) design described in
2248 * Documentation/filesystems/path-lookup.txt.
2250 * This is not to be used outside core vfs.
2252 * __d_lookup_rcu must only be used in rcu-walk mode, ie. with vfsmount lock
2253 * held, and rcu_read_lock held. The returned dentry must not be stored into
2254 * without taking d_lock and checking d_seq sequence count against @seq
2257 * A refcount may be taken on the found dentry with the d_rcu_to_refcount
2260 * Alternatively, __d_lookup_rcu may be called again to look up the child of
2261 * the returned dentry, so long as its parent's seqlock is checked after the
2262 * child is looked up. Thus, an interlocking stepping of sequence lock checks
2263 * is formed, giving integrity down the path walk.
2265 * NOTE! The caller *has* to check the resulting dentry against the sequence
2266 * number we've returned before using any of the resulting dentry state!
2268 struct dentry
*__d_lookup_rcu(const struct dentry
*parent
,
2269 const struct qstr
*name
,
2272 u64 hashlen
= name
->hash_len
;
2273 const unsigned char *str
= name
->name
;
2274 struct hlist_bl_head
*b
= d_hash(hashlen_hash(hashlen
));
2275 struct hlist_bl_node
*node
;
2276 struct dentry
*dentry
;
2279 * Note: There is significant duplication with __d_lookup_rcu which is
2280 * required to prevent single threaded performance regressions
2281 * especially on architectures where smp_rmb (in seqcounts) are costly.
2282 * Keep the two functions in sync.
2286 * The hash list is protected using RCU.
2288 * Carefully use d_seq when comparing a candidate dentry, to avoid
2289 * races with d_move().
2291 * It is possible that concurrent renames can mess up our list
2292 * walk here and result in missing our dentry, resulting in the
2293 * false-negative result. d_lookup() protects against concurrent
2294 * renames using rename_lock seqlock.
2296 * See Documentation/filesystems/path-lookup.txt for more details.
2298 hlist_bl_for_each_entry_rcu(dentry
, node
, b
, d_hash
) {
2303 * The dentry sequence count protects us from concurrent
2304 * renames, and thus protects parent and name fields.
2306 * The caller must perform a seqcount check in order
2307 * to do anything useful with the returned dentry.
2309 * NOTE! We do a "raw" seqcount_begin here. That means that
2310 * we don't wait for the sequence count to stabilize if it
2311 * is in the middle of a sequence change. If we do the slow
2312 * dentry compare, we will do seqretries until it is stable,
2313 * and if we end up with a successful lookup, we actually
2314 * want to exit RCU lookup anyway.
2316 * Note that raw_seqcount_begin still *does* smp_rmb(), so
2317 * we are still guaranteed NUL-termination of ->d_name.name.
2319 seq
= raw_seqcount_begin(&dentry
->d_seq
);
2320 if (dentry
->d_parent
!= parent
)
2322 if (d_unhashed(dentry
))
2325 if (unlikely(parent
->d_flags
& DCACHE_OP_COMPARE
)) {
2328 if (dentry
->d_name
.hash
!= hashlen_hash(hashlen
))
2330 tlen
= dentry
->d_name
.len
;
2331 tname
= dentry
->d_name
.name
;
2332 /* we want a consistent (name,len) pair */
2333 if (read_seqcount_retry(&dentry
->d_seq
, seq
)) {
2337 if (parent
->d_op
->d_compare(dentry
,
2338 tlen
, tname
, name
) != 0)
2341 if (dentry
->d_name
.hash_len
!= hashlen
)
2343 if (dentry_cmp(dentry
, str
, hashlen_len(hashlen
)) != 0)
2353 * d_lookup - search for a dentry
2354 * @parent: parent dentry
2355 * @name: qstr of name we wish to find
2356 * Returns: dentry, or NULL
2358 * d_lookup searches the children of the parent dentry for the name in
2359 * question. If the dentry is found its reference count is incremented and the
2360 * dentry is returned. The caller must use dput to free the entry when it has
2361 * finished using it. %NULL is returned if the dentry does not exist.
2363 struct dentry
*d_lookup(const struct dentry
*parent
, const struct qstr
*name
)
2365 struct dentry
*dentry
;
2369 seq
= read_seqbegin(&rename_lock
);
2370 dentry
= __d_lookup(parent
, name
);
2373 } while (read_seqretry(&rename_lock
, seq
));
2376 EXPORT_SYMBOL(d_lookup
);
2379 * __d_lookup - search for a dentry (racy)
2380 * @parent: parent dentry
2381 * @name: qstr of name we wish to find
2382 * Returns: dentry, or NULL
2384 * __d_lookup is like d_lookup, however it may (rarely) return a
2385 * false-negative result due to unrelated rename activity.
2387 * __d_lookup is slightly faster by avoiding rename_lock read seqlock,
2388 * however it must be used carefully, eg. with a following d_lookup in
2389 * the case of failure.
2391 * __d_lookup callers must be commented.
2393 struct dentry
*__d_lookup(const struct dentry
*parent
, const struct qstr
*name
)
2395 unsigned int hash
= name
->hash
;
2396 struct hlist_bl_head
*b
= d_hash(hash
);
2397 struct hlist_bl_node
*node
;
2398 struct dentry
*found
= NULL
;
2399 struct dentry
*dentry
;
2402 * Note: There is significant duplication with __d_lookup_rcu which is
2403 * required to prevent single threaded performance regressions
2404 * especially on architectures where smp_rmb (in seqcounts) are costly.
2405 * Keep the two functions in sync.
2409 * The hash list is protected using RCU.
2411 * Take d_lock when comparing a candidate dentry, to avoid races
2414 * It is possible that concurrent renames can mess up our list
2415 * walk here and result in missing our dentry, resulting in the
2416 * false-negative result. d_lookup() protects against concurrent
2417 * renames using rename_lock seqlock.
2419 * See Documentation/filesystems/path-lookup.txt for more details.
2423 hlist_bl_for_each_entry_rcu(dentry
, node
, b
, d_hash
) {
2425 if (dentry
->d_name
.hash
!= hash
)
2428 spin_lock(&dentry
->d_lock
);
2429 if (dentry
->d_parent
!= parent
)
2431 if (d_unhashed(dentry
))
2434 if (!d_same_name(dentry
, parent
, name
))
2437 dentry
->d_lockref
.count
++;
2439 spin_unlock(&dentry
->d_lock
);
2442 spin_unlock(&dentry
->d_lock
);
2450 * d_hash_and_lookup - hash the qstr then search for a dentry
2451 * @dir: Directory to search in
2452 * @name: qstr of name we wish to find
2454 * On lookup failure NULL is returned; on bad name - ERR_PTR(-error)
2456 struct dentry
*d_hash_and_lookup(struct dentry
*dir
, struct qstr
*name
)
2459 * Check for a fs-specific hash function. Note that we must
2460 * calculate the standard hash first, as the d_op->d_hash()
2461 * routine may choose to leave the hash value unchanged.
2463 name
->hash
= full_name_hash(dir
, name
->name
, name
->len
);
2464 if (dir
->d_flags
& DCACHE_OP_HASH
) {
2465 int err
= dir
->d_op
->d_hash(dir
, name
);
2466 if (unlikely(err
< 0))
2467 return ERR_PTR(err
);
2469 return d_lookup(dir
, name
);
2471 EXPORT_SYMBOL(d_hash_and_lookup
);
2474 * When a file is deleted, we have two options:
2475 * - turn this dentry into a negative dentry
2476 * - unhash this dentry and free it.
2478 * Usually, we want to just turn this into
2479 * a negative dentry, but if anybody else is
2480 * currently using the dentry or the inode
2481 * we can't do that and we fall back on removing
2482 * it from the hash queues and waiting for
2483 * it to be deleted later when it has no users
2487 * d_delete - delete a dentry
2488 * @dentry: The dentry to delete
2490 * Turn the dentry into a negative dentry if possible, otherwise
2491 * remove it from the hash queues so it can be deleted later
2494 void d_delete(struct dentry
* dentry
)
2496 struct inode
*inode
= dentry
->d_inode
;
2498 spin_lock(&inode
->i_lock
);
2499 spin_lock(&dentry
->d_lock
);
2501 * Are we the only user?
2503 if (dentry
->d_lockref
.count
== 1) {
2504 dentry
->d_flags
&= ~DCACHE_CANT_MOUNT
;
2505 dentry_unlink_inode(dentry
);
2508 spin_unlock(&dentry
->d_lock
);
2509 spin_unlock(&inode
->i_lock
);
2512 EXPORT_SYMBOL(d_delete
);
2514 static void __d_rehash(struct dentry
*entry
)
2516 struct hlist_bl_head
*b
= d_hash(entry
->d_name
.hash
);
2519 hlist_bl_add_head_rcu(&entry
->d_hash
, b
);
2524 * d_rehash - add an entry back to the hash
2525 * @entry: dentry to add to the hash
2527 * Adds a dentry to the hash according to its name.
2530 void d_rehash(struct dentry
* entry
)
2532 spin_lock(&entry
->d_lock
);
2534 spin_unlock(&entry
->d_lock
);
2536 EXPORT_SYMBOL(d_rehash
);
2538 static inline unsigned start_dir_add(struct inode
*dir
)
2542 unsigned n
= dir
->i_dir_seq
;
2543 if (!(n
& 1) && cmpxchg(&dir
->i_dir_seq
, n
, n
+ 1) == n
)
2549 static inline void end_dir_add(struct inode
*dir
, unsigned n
)
2551 smp_store_release(&dir
->i_dir_seq
, n
+ 2);
2554 static void d_wait_lookup(struct dentry
*dentry
)
2556 if (d_in_lookup(dentry
)) {
2557 DECLARE_WAITQUEUE(wait
, current
);
2558 add_wait_queue(dentry
->d_wait
, &wait
);
2560 set_current_state(TASK_UNINTERRUPTIBLE
);
2561 spin_unlock(&dentry
->d_lock
);
2563 spin_lock(&dentry
->d_lock
);
2564 } while (d_in_lookup(dentry
));
2568 struct dentry
*d_alloc_parallel(struct dentry
*parent
,
2569 const struct qstr
*name
,
2570 wait_queue_head_t
*wq
)
2572 unsigned int hash
= name
->hash
;
2573 struct hlist_bl_head
*b
= in_lookup_hash(parent
, hash
);
2574 struct hlist_bl_node
*node
;
2575 struct dentry
*new = d_alloc(parent
, name
);
2576 struct dentry
*dentry
;
2577 unsigned seq
, r_seq
, d_seq
;
2580 return ERR_PTR(-ENOMEM
);
2584 seq
= smp_load_acquire(&parent
->d_inode
->i_dir_seq
);
2585 r_seq
= read_seqbegin(&rename_lock
);
2586 dentry
= __d_lookup_rcu(parent
, name
, &d_seq
);
2587 if (unlikely(dentry
)) {
2588 if (!lockref_get_not_dead(&dentry
->d_lockref
)) {
2592 if (read_seqcount_retry(&dentry
->d_seq
, d_seq
)) {
2601 if (unlikely(read_seqretry(&rename_lock
, r_seq
))) {
2606 if (unlikely(seq
& 1)) {
2612 if (unlikely(READ_ONCE(parent
->d_inode
->i_dir_seq
) != seq
)) {
2618 * No changes for the parent since the beginning of d_lookup().
2619 * Since all removals from the chain happen with hlist_bl_lock(),
2620 * any potential in-lookup matches are going to stay here until
2621 * we unlock the chain. All fields are stable in everything
2624 hlist_bl_for_each_entry(dentry
, node
, b
, d_u
.d_in_lookup_hash
) {
2625 if (dentry
->d_name
.hash
!= hash
)
2627 if (dentry
->d_parent
!= parent
)
2629 if (!d_same_name(dentry
, parent
, name
))
2632 /* now we can try to grab a reference */
2633 if (!lockref_get_not_dead(&dentry
->d_lockref
)) {
2640 * somebody is likely to be still doing lookup for it;
2641 * wait for them to finish
2643 spin_lock(&dentry
->d_lock
);
2644 d_wait_lookup(dentry
);
2646 * it's not in-lookup anymore; in principle we should repeat
2647 * everything from dcache lookup, but it's likely to be what
2648 * d_lookup() would've found anyway. If it is, just return it;
2649 * otherwise we really have to repeat the whole thing.
2651 if (unlikely(dentry
->d_name
.hash
!= hash
))
2653 if (unlikely(dentry
->d_parent
!= parent
))
2655 if (unlikely(d_unhashed(dentry
)))
2657 if (unlikely(!d_same_name(dentry
, parent
, name
)))
2659 /* OK, it *is* a hashed match; return it */
2660 spin_unlock(&dentry
->d_lock
);
2665 /* we can't take ->d_lock here; it's OK, though. */
2666 new->d_flags
|= DCACHE_PAR_LOOKUP
;
2668 hlist_bl_add_head_rcu(&new->d_u
.d_in_lookup_hash
, b
);
2672 spin_unlock(&dentry
->d_lock
);
2676 EXPORT_SYMBOL(d_alloc_parallel
);
2678 void __d_lookup_done(struct dentry
*dentry
)
2680 struct hlist_bl_head
*b
= in_lookup_hash(dentry
->d_parent
,
2681 dentry
->d_name
.hash
);
2683 dentry
->d_flags
&= ~DCACHE_PAR_LOOKUP
;
2684 __hlist_bl_del(&dentry
->d_u
.d_in_lookup_hash
);
2685 wake_up_all(dentry
->d_wait
);
2686 dentry
->d_wait
= NULL
;
2688 INIT_HLIST_NODE(&dentry
->d_u
.d_alias
);
2689 INIT_LIST_HEAD(&dentry
->d_lru
);
2691 EXPORT_SYMBOL(__d_lookup_done
);
2693 /* inode->i_lock held if inode is non-NULL */
2695 static inline void __d_add(struct dentry
*dentry
, struct inode
*inode
)
2697 struct inode
*dir
= NULL
;
2699 spin_lock(&dentry
->d_lock
);
2700 if (unlikely(d_in_lookup(dentry
))) {
2701 dir
= dentry
->d_parent
->d_inode
;
2702 n
= start_dir_add(dir
);
2703 __d_lookup_done(dentry
);
2706 unsigned add_flags
= d_flags_for_inode(inode
);
2707 hlist_add_head(&dentry
->d_u
.d_alias
, &inode
->i_dentry
);
2708 raw_write_seqcount_begin(&dentry
->d_seq
);
2709 __d_set_inode_and_type(dentry
, inode
, add_flags
);
2710 raw_write_seqcount_end(&dentry
->d_seq
);
2711 fsnotify_update_flags(dentry
);
2715 end_dir_add(dir
, n
);
2716 spin_unlock(&dentry
->d_lock
);
2718 spin_unlock(&inode
->i_lock
);
2722 * d_add - add dentry to hash queues
2723 * @entry: dentry to add
2724 * @inode: The inode to attach to this dentry
2726 * This adds the entry to the hash queues and initializes @inode.
2727 * The entry was actually filled in earlier during d_alloc().
2730 void d_add(struct dentry
*entry
, struct inode
*inode
)
2733 security_d_instantiate(entry
, inode
);
2734 spin_lock(&inode
->i_lock
);
2736 __d_add(entry
, inode
);
2738 EXPORT_SYMBOL(d_add
);
2741 * d_exact_alias - find and hash an exact unhashed alias
2742 * @entry: dentry to add
2743 * @inode: The inode to go with this dentry
2745 * If an unhashed dentry with the same name/parent and desired
2746 * inode already exists, hash and return it. Otherwise, return
2749 * Parent directory should be locked.
2751 struct dentry
*d_exact_alias(struct dentry
*entry
, struct inode
*inode
)
2753 struct dentry
*alias
;
2754 unsigned int hash
= entry
->d_name
.hash
;
2756 spin_lock(&inode
->i_lock
);
2757 hlist_for_each_entry(alias
, &inode
->i_dentry
, d_u
.d_alias
) {
2759 * Don't need alias->d_lock here, because aliases with
2760 * d_parent == entry->d_parent are not subject to name or
2761 * parent changes, because the parent inode i_mutex is held.
2763 if (alias
->d_name
.hash
!= hash
)
2765 if (alias
->d_parent
!= entry
->d_parent
)
2767 if (!d_same_name(alias
, entry
->d_parent
, &entry
->d_name
))
2769 spin_lock(&alias
->d_lock
);
2770 if (!d_unhashed(alias
)) {
2771 spin_unlock(&alias
->d_lock
);
2774 __dget_dlock(alias
);
2776 spin_unlock(&alias
->d_lock
);
2778 spin_unlock(&inode
->i_lock
);
2781 spin_unlock(&inode
->i_lock
);
2784 EXPORT_SYMBOL(d_exact_alias
);
2786 static void swap_names(struct dentry
*dentry
, struct dentry
*target
)
2788 if (unlikely(dname_external(target
))) {
2789 if (unlikely(dname_external(dentry
))) {
2791 * Both external: swap the pointers
2793 swap(target
->d_name
.name
, dentry
->d_name
.name
);
2796 * dentry:internal, target:external. Steal target's
2797 * storage and make target internal.
2799 memcpy(target
->d_iname
, dentry
->d_name
.name
,
2800 dentry
->d_name
.len
+ 1);
2801 dentry
->d_name
.name
= target
->d_name
.name
;
2802 target
->d_name
.name
= target
->d_iname
;
2805 if (unlikely(dname_external(dentry
))) {
2807 * dentry:external, target:internal. Give dentry's
2808 * storage to target and make dentry internal
2810 memcpy(dentry
->d_iname
, target
->d_name
.name
,
2811 target
->d_name
.len
+ 1);
2812 target
->d_name
.name
= dentry
->d_name
.name
;
2813 dentry
->d_name
.name
= dentry
->d_iname
;
2816 * Both are internal.
2819 BUILD_BUG_ON(!IS_ALIGNED(DNAME_INLINE_LEN
, sizeof(long)));
2820 for (i
= 0; i
< DNAME_INLINE_LEN
/ sizeof(long); i
++) {
2821 swap(((long *) &dentry
->d_iname
)[i
],
2822 ((long *) &target
->d_iname
)[i
]);
2826 swap(dentry
->d_name
.hash_len
, target
->d_name
.hash_len
);
2829 static void copy_name(struct dentry
*dentry
, struct dentry
*target
)
2831 struct external_name
*old_name
= NULL
;
2832 if (unlikely(dname_external(dentry
)))
2833 old_name
= external_name(dentry
);
2834 if (unlikely(dname_external(target
))) {
2835 atomic_inc(&external_name(target
)->u
.count
);
2836 dentry
->d_name
= target
->d_name
;
2838 memcpy(dentry
->d_iname
, target
->d_name
.name
,
2839 target
->d_name
.len
+ 1);
2840 dentry
->d_name
.name
= dentry
->d_iname
;
2841 dentry
->d_name
.hash_len
= target
->d_name
.hash_len
;
2843 if (old_name
&& likely(atomic_dec_and_test(&old_name
->u
.count
)))
2844 kfree_rcu(old_name
, u
.head
);
2848 * __d_move - move a dentry
2849 * @dentry: entry to move
2850 * @target: new dentry
2851 * @exchange: exchange the two dentries
2853 * Update the dcache to reflect the move of a file name. Negative
2854 * dcache entries should not be moved in this way. Caller must hold
2855 * rename_lock, the i_mutex of the source and target directories,
2856 * and the sb->s_vfs_rename_mutex if they differ. See lock_rename().
2858 static void __d_move(struct dentry
*dentry
, struct dentry
*target
,
2861 struct dentry
*old_parent
, *p
;
2862 struct inode
*dir
= NULL
;
2865 WARN_ON(!dentry
->d_inode
);
2866 if (WARN_ON(dentry
== target
))
2869 BUG_ON(d_ancestor(target
, dentry
));
2870 old_parent
= dentry
->d_parent
;
2871 p
= d_ancestor(old_parent
, target
);
2872 if (IS_ROOT(dentry
)) {
2874 spin_lock(&target
->d_parent
->d_lock
);
2876 /* target is not a descendent of dentry->d_parent */
2877 spin_lock(&target
->d_parent
->d_lock
);
2878 spin_lock_nested(&old_parent
->d_lock
, DENTRY_D_LOCK_NESTED
);
2880 BUG_ON(p
== dentry
);
2881 spin_lock(&old_parent
->d_lock
);
2883 spin_lock_nested(&target
->d_parent
->d_lock
,
2884 DENTRY_D_LOCK_NESTED
);
2886 spin_lock_nested(&dentry
->d_lock
, 2);
2887 spin_lock_nested(&target
->d_lock
, 3);
2889 if (unlikely(d_in_lookup(target
))) {
2890 dir
= target
->d_parent
->d_inode
;
2891 n
= start_dir_add(dir
);
2892 __d_lookup_done(target
);
2895 write_seqcount_begin(&dentry
->d_seq
);
2896 write_seqcount_begin_nested(&target
->d_seq
, DENTRY_D_LOCK_NESTED
);
2899 if (!d_unhashed(dentry
))
2901 if (!d_unhashed(target
))
2904 /* ... and switch them in the tree */
2905 dentry
->d_parent
= target
->d_parent
;
2907 copy_name(dentry
, target
);
2908 target
->d_hash
.pprev
= NULL
;
2909 dentry
->d_parent
->d_lockref
.count
++;
2910 if (dentry
!= old_parent
) /* wasn't IS_ROOT */
2911 WARN_ON(!--old_parent
->d_lockref
.count
);
2913 target
->d_parent
= old_parent
;
2914 swap_names(dentry
, target
);
2915 list_move(&target
->d_child
, &target
->d_parent
->d_subdirs
);
2917 fsnotify_update_flags(target
);
2919 list_move(&dentry
->d_child
, &dentry
->d_parent
->d_subdirs
);
2921 fsnotify_update_flags(dentry
);
2922 fscrypt_handle_d_move(dentry
);
2924 write_seqcount_end(&target
->d_seq
);
2925 write_seqcount_end(&dentry
->d_seq
);
2928 end_dir_add(dir
, n
);
2930 if (dentry
->d_parent
!= old_parent
)
2931 spin_unlock(&dentry
->d_parent
->d_lock
);
2932 if (dentry
!= old_parent
)
2933 spin_unlock(&old_parent
->d_lock
);
2934 spin_unlock(&target
->d_lock
);
2935 spin_unlock(&dentry
->d_lock
);
2939 * d_move - move a dentry
2940 * @dentry: entry to move
2941 * @target: new dentry
2943 * Update the dcache to reflect the move of a file name. Negative
2944 * dcache entries should not be moved in this way. See the locking
2945 * requirements for __d_move.
2947 void d_move(struct dentry
*dentry
, struct dentry
*target
)
2949 write_seqlock(&rename_lock
);
2950 __d_move(dentry
, target
, false);
2951 write_sequnlock(&rename_lock
);
2953 EXPORT_SYMBOL(d_move
);
2956 * d_exchange - exchange two dentries
2957 * @dentry1: first dentry
2958 * @dentry2: second dentry
2960 void d_exchange(struct dentry
*dentry1
, struct dentry
*dentry2
)
2962 write_seqlock(&rename_lock
);
2964 WARN_ON(!dentry1
->d_inode
);
2965 WARN_ON(!dentry2
->d_inode
);
2966 WARN_ON(IS_ROOT(dentry1
));
2967 WARN_ON(IS_ROOT(dentry2
));
2969 __d_move(dentry1
, dentry2
, true);
2971 write_sequnlock(&rename_lock
);
2975 * d_ancestor - search for an ancestor
2976 * @p1: ancestor dentry
2979 * Returns the ancestor dentry of p2 which is a child of p1, if p1 is
2980 * an ancestor of p2, else NULL.
2982 struct dentry
*d_ancestor(struct dentry
*p1
, struct dentry
*p2
)
2986 for (p
= p2
; !IS_ROOT(p
); p
= p
->d_parent
) {
2987 if (p
->d_parent
== p1
)
2994 * This helper attempts to cope with remotely renamed directories
2996 * It assumes that the caller is already holding
2997 * dentry->d_parent->d_inode->i_mutex, and rename_lock
2999 * Note: If ever the locking in lock_rename() changes, then please
3000 * remember to update this too...
3002 static int __d_unalias(struct inode
*inode
,
3003 struct dentry
*dentry
, struct dentry
*alias
)
3005 struct mutex
*m1
= NULL
;
3006 struct rw_semaphore
*m2
= NULL
;
3009 /* If alias and dentry share a parent, then no extra locks required */
3010 if (alias
->d_parent
== dentry
->d_parent
)
3013 /* See lock_rename() */
3014 if (!mutex_trylock(&dentry
->d_sb
->s_vfs_rename_mutex
))
3016 m1
= &dentry
->d_sb
->s_vfs_rename_mutex
;
3017 if (!inode_trylock_shared(alias
->d_parent
->d_inode
))
3019 m2
= &alias
->d_parent
->d_inode
->i_rwsem
;
3021 __d_move(alias
, dentry
, false);
3032 * d_splice_alias - splice a disconnected dentry into the tree if one exists
3033 * @inode: the inode which may have a disconnected dentry
3034 * @dentry: a negative dentry which we want to point to the inode.
3036 * If inode is a directory and has an IS_ROOT alias, then d_move that in
3037 * place of the given dentry and return it, else simply d_add the inode
3038 * to the dentry and return NULL.
3040 * If a non-IS_ROOT directory is found, the filesystem is corrupt, and
3041 * we should error out: directories can't have multiple aliases.
3043 * This is needed in the lookup routine of any filesystem that is exportable
3044 * (via knfsd) so that we can build dcache paths to directories effectively.
3046 * If a dentry was found and moved, then it is returned. Otherwise NULL
3047 * is returned. This matches the expected return value of ->lookup.
3049 * Cluster filesystems may call this function with a negative, hashed dentry.
3050 * In that case, we know that the inode will be a regular file, and also this
3051 * will only occur during atomic_open. So we need to check for the dentry
3052 * being already hashed only in the final case.
3054 struct dentry
*d_splice_alias(struct inode
*inode
, struct dentry
*dentry
)
3057 return ERR_CAST(inode
);
3059 BUG_ON(!d_unhashed(dentry
));
3064 security_d_instantiate(dentry
, inode
);
3065 spin_lock(&inode
->i_lock
);
3066 if (S_ISDIR(inode
->i_mode
)) {
3067 struct dentry
*new = __d_find_any_alias(inode
);
3068 if (unlikely(new)) {
3069 /* The reference to new ensures it remains an alias */
3070 spin_unlock(&inode
->i_lock
);
3071 write_seqlock(&rename_lock
);
3072 if (unlikely(d_ancestor(new, dentry
))) {
3073 write_sequnlock(&rename_lock
);
3075 new = ERR_PTR(-ELOOP
);
3076 pr_warn_ratelimited(
3077 "VFS: Lookup of '%s' in %s %s"
3078 " would have caused loop\n",
3079 dentry
->d_name
.name
,
3080 inode
->i_sb
->s_type
->name
,
3082 } else if (!IS_ROOT(new)) {
3083 struct dentry
*old_parent
= dget(new->d_parent
);
3084 int err
= __d_unalias(inode
, dentry
, new);
3085 write_sequnlock(&rename_lock
);
3092 __d_move(new, dentry
, false);
3093 write_sequnlock(&rename_lock
);
3100 __d_add(dentry
, inode
);
3103 EXPORT_SYMBOL(d_splice_alias
);
3106 * Test whether new_dentry is a subdirectory of old_dentry.
3108 * Trivially implemented using the dcache structure
3112 * is_subdir - is new dentry a subdirectory of old_dentry
3113 * @new_dentry: new dentry
3114 * @old_dentry: old dentry
3116 * Returns true if new_dentry is a subdirectory of the parent (at any depth).
3117 * Returns false otherwise.
3118 * Caller must ensure that "new_dentry" is pinned before calling is_subdir()
3121 bool is_subdir(struct dentry
*new_dentry
, struct dentry
*old_dentry
)
3126 if (new_dentry
== old_dentry
)
3130 /* for restarting inner loop in case of seq retry */
3131 seq
= read_seqbegin(&rename_lock
);
3133 * Need rcu_readlock to protect against the d_parent trashing
3137 if (d_ancestor(old_dentry
, new_dentry
))
3142 } while (read_seqretry(&rename_lock
, seq
));
3146 EXPORT_SYMBOL(is_subdir
);
3148 static enum d_walk_ret
d_genocide_kill(void *data
, struct dentry
*dentry
)
3150 struct dentry
*root
= data
;
3151 if (dentry
!= root
) {
3152 if (d_unhashed(dentry
) || !dentry
->d_inode
)
3155 if (!(dentry
->d_flags
& DCACHE_GENOCIDE
)) {
3156 dentry
->d_flags
|= DCACHE_GENOCIDE
;
3157 dentry
->d_lockref
.count
--;
3160 return D_WALK_CONTINUE
;
3163 void d_genocide(struct dentry
*parent
)
3165 d_walk(parent
, parent
, d_genocide_kill
);
3168 EXPORT_SYMBOL(d_genocide
);
3170 void d_tmpfile(struct dentry
*dentry
, struct inode
*inode
)
3172 inode_dec_link_count(inode
);
3173 BUG_ON(dentry
->d_name
.name
!= dentry
->d_iname
||
3174 !hlist_unhashed(&dentry
->d_u
.d_alias
) ||
3175 !d_unlinked(dentry
));
3176 spin_lock(&dentry
->d_parent
->d_lock
);
3177 spin_lock_nested(&dentry
->d_lock
, DENTRY_D_LOCK_NESTED
);
3178 dentry
->d_name
.len
= sprintf(dentry
->d_iname
, "#%llu",
3179 (unsigned long long)inode
->i_ino
);
3180 spin_unlock(&dentry
->d_lock
);
3181 spin_unlock(&dentry
->d_parent
->d_lock
);
3182 d_instantiate(dentry
, inode
);
3184 EXPORT_SYMBOL(d_tmpfile
);
3186 static __initdata
unsigned long dhash_entries
;
3187 static int __init
set_dhash_entries(char *str
)
3191 dhash_entries
= simple_strtoul(str
, &str
, 0);
3194 __setup("dhash_entries=", set_dhash_entries
);
3196 static void __init
dcache_init_early(void)
3198 /* If hashes are distributed across NUMA nodes, defer
3199 * hash allocation until vmalloc space is available.
3205 alloc_large_system_hash("Dentry cache",
3206 sizeof(struct hlist_bl_head
),
3209 HASH_EARLY
| HASH_ZERO
,
3214 d_hash_shift
= 32 - d_hash_shift
;
3217 static void __init
dcache_init(void)
3220 * A constructor could be added for stable state like the lists,
3221 * but it is probably not worth it because of the cache nature
3224 dentry_cache
= KMEM_CACHE_USERCOPY(dentry
,
3225 SLAB_RECLAIM_ACCOUNT
|SLAB_PANIC
|SLAB_MEM_SPREAD
|SLAB_ACCOUNT
,
3228 /* Hash may have been set up in dcache_init_early */
3233 alloc_large_system_hash("Dentry cache",
3234 sizeof(struct hlist_bl_head
),
3242 d_hash_shift
= 32 - d_hash_shift
;
3245 /* SLAB cache for __getname() consumers */
3246 struct kmem_cache
*names_cachep __read_mostly
;
3247 EXPORT_SYMBOL(names_cachep
);
3249 void __init
vfs_caches_init_early(void)
3253 for (i
= 0; i
< ARRAY_SIZE(in_lookup_hashtable
); i
++)
3254 INIT_HLIST_BL_HEAD(&in_lookup_hashtable
[i
]);
3256 dcache_init_early();
3260 void __init
vfs_caches_init(void)
3262 names_cachep
= kmem_cache_create_usercopy("names_cache", PATH_MAX
, 0,
3263 SLAB_HWCACHE_ALIGN
|SLAB_PANIC
, 0, PATH_MAX
, NULL
);
3268 files_maxfiles_init();