4 * Complete reimplementation
5 * (C) 1997 Thomas Schoebel-Theuer,
6 * with heavy changes by Linus Torvalds
10 * Notes on the allocation strategy:
12 * The dcache is a master of the icache - whenever a dcache entry
13 * exists, the inode will always exist. "iput()" is done either when
14 * the dcache entry is deleted or garbage collected.
17 #include <linux/syscalls.h>
18 #include <linux/string.h>
21 #include <linux/fsnotify.h>
22 #include <linux/slab.h>
23 #include <linux/init.h>
24 #include <linux/hash.h>
25 #include <linux/cache.h>
26 #include <linux/export.h>
27 #include <linux/mount.h>
28 #include <linux/file.h>
29 #include <linux/uaccess.h>
30 #include <linux/security.h>
31 #include <linux/seqlock.h>
32 #include <linux/swap.h>
33 #include <linux/bootmem.h>
34 #include <linux/fs_struct.h>
35 #include <linux/hardirq.h>
36 #include <linux/bit_spinlock.h>
37 #include <linux/rculist_bl.h>
38 #include <linux/prefetch.h>
39 #include <linux/ratelimit.h>
40 #include <linux/list_lru.h>
41 #include <linux/kasan.h>
48 * dcache->d_inode->i_lock protects:
49 * - i_dentry, d_u.d_alias, d_inode of aliases
50 * dcache_hash_bucket lock protects:
51 * - the dcache hash table
52 * s_anon bl list spinlock protects:
53 * - the s_anon list (see __d_drop)
54 * dentry->d_sb->s_dentry_lru_lock protects:
55 * - the dcache lru lists and counters
62 * - d_parent and d_subdirs
63 * - childrens' d_child and d_parent
64 * - d_u.d_alias, d_inode
67 * dentry->d_inode->i_lock
69 * dentry->d_sb->s_dentry_lru_lock
70 * dcache_hash_bucket lock
73 * If there is an ancestor relationship:
74 * dentry->d_parent->...->d_parent->d_lock
76 * dentry->d_parent->d_lock
79 * If no ancestor relationship:
80 * if (dentry1 < dentry2)
84 int sysctl_vfs_cache_pressure __read_mostly
= 100;
85 EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure
);
87 __cacheline_aligned_in_smp
DEFINE_SEQLOCK(rename_lock
);
89 EXPORT_SYMBOL(rename_lock
);
91 static struct kmem_cache
*dentry_cache __read_mostly
;
93 const struct qstr empty_name
= QSTR_INIT("", 0);
94 EXPORT_SYMBOL(empty_name
);
95 const struct qstr slash_name
= QSTR_INIT("/", 1);
96 EXPORT_SYMBOL(slash_name
);
99 * This is the single most critical data structure when it comes
100 * to the dcache: the hashtable for lookups. Somebody should try
101 * to make this good - I've just made it work.
103 * This hash-function tries to avoid losing too many bits of hash
104 * information, yet avoid using a prime hash-size or similar.
107 static unsigned int d_hash_mask __read_mostly
;
108 static unsigned int d_hash_shift __read_mostly
;
110 static struct hlist_bl_head
*dentry_hashtable __read_mostly
;
112 static inline struct hlist_bl_head
*d_hash(unsigned int hash
)
114 return dentry_hashtable
+ (hash
>> (32 - d_hash_shift
));
117 #define IN_LOOKUP_SHIFT 10
118 static struct hlist_bl_head in_lookup_hashtable
[1 << IN_LOOKUP_SHIFT
];
120 static inline struct hlist_bl_head
*in_lookup_hash(const struct dentry
*parent
,
123 hash
+= (unsigned long) parent
/ L1_CACHE_BYTES
;
124 return in_lookup_hashtable
+ hash_32(hash
, IN_LOOKUP_SHIFT
);
128 /* Statistics gathering. */
129 struct dentry_stat_t dentry_stat
= {
133 static DEFINE_PER_CPU(long, nr_dentry
);
134 static DEFINE_PER_CPU(long, nr_dentry_unused
);
136 #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
139 * Here we resort to our own counters instead of using generic per-cpu counters
140 * for consistency with what the vfs inode code does. We are expected to harvest
141 * better code and performance by having our own specialized counters.
143 * Please note that the loop is done over all possible CPUs, not over all online
144 * CPUs. The reason for this is that we don't want to play games with CPUs going
145 * on and off. If one of them goes off, we will just keep their counters.
147 * glommer: See cffbc8a for details, and if you ever intend to change this,
148 * please update all vfs counters to match.
150 static long get_nr_dentry(void)
154 for_each_possible_cpu(i
)
155 sum
+= per_cpu(nr_dentry
, i
);
156 return sum
< 0 ? 0 : sum
;
159 static long get_nr_dentry_unused(void)
163 for_each_possible_cpu(i
)
164 sum
+= per_cpu(nr_dentry_unused
, i
);
165 return sum
< 0 ? 0 : sum
;
168 int proc_nr_dentry(struct ctl_table
*table
, int write
, void __user
*buffer
,
169 size_t *lenp
, loff_t
*ppos
)
171 dentry_stat
.nr_dentry
= get_nr_dentry();
172 dentry_stat
.nr_unused
= get_nr_dentry_unused();
173 return proc_doulongvec_minmax(table
, write
, buffer
, lenp
, ppos
);
178 * Compare 2 name strings, return 0 if they match, otherwise non-zero.
179 * The strings are both count bytes long, and count is non-zero.
181 #ifdef CONFIG_DCACHE_WORD_ACCESS
183 #include <asm/word-at-a-time.h>
185 * NOTE! 'cs' and 'scount' come from a dentry, so it has a
186 * aligned allocation for this particular component. We don't
187 * strictly need the load_unaligned_zeropad() safety, but it
188 * doesn't hurt either.
190 * In contrast, 'ct' and 'tcount' can be from a pathname, and do
191 * need the careful unaligned handling.
193 static inline int dentry_string_cmp(const unsigned char *cs
, const unsigned char *ct
, unsigned tcount
)
195 unsigned long a
,b
,mask
;
198 a
= *(unsigned long *)cs
;
199 b
= load_unaligned_zeropad(ct
);
200 if (tcount
< sizeof(unsigned long))
202 if (unlikely(a
!= b
))
204 cs
+= sizeof(unsigned long);
205 ct
+= sizeof(unsigned long);
206 tcount
-= sizeof(unsigned long);
210 mask
= bytemask_from_count(tcount
);
211 return unlikely(!!((a
^ b
) & mask
));
216 static inline int dentry_string_cmp(const unsigned char *cs
, const unsigned char *ct
, unsigned tcount
)
230 static inline int dentry_cmp(const struct dentry
*dentry
, const unsigned char *ct
, unsigned tcount
)
233 * Be careful about RCU walk racing with rename:
234 * use 'READ_ONCE' to fetch the name pointer.
236 * NOTE! Even if a rename will mean that the length
237 * was not loaded atomically, we don't care. The
238 * RCU walk will check the sequence count eventually,
239 * and catch it. And we won't overrun the buffer,
240 * because we're reading the name pointer atomically,
241 * and a dentry name is guaranteed to be properly
242 * terminated with a NUL byte.
244 * End result: even if 'len' is wrong, we'll exit
245 * early because the data cannot match (there can
246 * be no NUL in the ct/tcount data)
248 const unsigned char *cs
= READ_ONCE(dentry
->d_name
.name
);
250 return dentry_string_cmp(cs
, ct
, tcount
);
253 struct external_name
{
256 struct rcu_head head
;
258 unsigned char name
[];
261 static inline struct external_name
*external_name(struct dentry
*dentry
)
263 return container_of(dentry
->d_name
.name
, struct external_name
, name
[0]);
266 static void __d_free(struct rcu_head
*head
)
268 struct dentry
*dentry
= container_of(head
, struct dentry
, d_u
.d_rcu
);
270 kmem_cache_free(dentry_cache
, dentry
);
273 static void __d_free_external(struct rcu_head
*head
)
275 struct dentry
*dentry
= container_of(head
, struct dentry
, d_u
.d_rcu
);
276 kfree(external_name(dentry
));
277 kmem_cache_free(dentry_cache
, dentry
);
280 static inline int dname_external(const struct dentry
*dentry
)
282 return dentry
->d_name
.name
!= dentry
->d_iname
;
285 void take_dentry_name_snapshot(struct name_snapshot
*name
, struct dentry
*dentry
)
287 spin_lock(&dentry
->d_lock
);
288 if (unlikely(dname_external(dentry
))) {
289 struct external_name
*p
= external_name(dentry
);
290 atomic_inc(&p
->u
.count
);
291 spin_unlock(&dentry
->d_lock
);
292 name
->name
= p
->name
;
294 memcpy(name
->inline_name
, dentry
->d_iname
, DNAME_INLINE_LEN
);
295 spin_unlock(&dentry
->d_lock
);
296 name
->name
= name
->inline_name
;
299 EXPORT_SYMBOL(take_dentry_name_snapshot
);
301 void release_dentry_name_snapshot(struct name_snapshot
*name
)
303 if (unlikely(name
->name
!= name
->inline_name
)) {
304 struct external_name
*p
;
305 p
= container_of(name
->name
, struct external_name
, name
[0]);
306 if (unlikely(atomic_dec_and_test(&p
->u
.count
)))
307 kfree_rcu(p
, u
.head
);
310 EXPORT_SYMBOL(release_dentry_name_snapshot
);
312 static inline void __d_set_inode_and_type(struct dentry
*dentry
,
318 dentry
->d_inode
= inode
;
319 flags
= READ_ONCE(dentry
->d_flags
);
320 flags
&= ~(DCACHE_ENTRY_TYPE
| DCACHE_FALLTHRU
);
322 WRITE_ONCE(dentry
->d_flags
, flags
);
325 static inline void __d_clear_type_and_inode(struct dentry
*dentry
)
327 unsigned flags
= READ_ONCE(dentry
->d_flags
);
329 flags
&= ~(DCACHE_ENTRY_TYPE
| DCACHE_FALLTHRU
);
330 WRITE_ONCE(dentry
->d_flags
, flags
);
331 dentry
->d_inode
= NULL
;
334 static void dentry_free(struct dentry
*dentry
)
336 WARN_ON(!hlist_unhashed(&dentry
->d_u
.d_alias
));
337 if (unlikely(dname_external(dentry
))) {
338 struct external_name
*p
= external_name(dentry
);
339 if (likely(atomic_dec_and_test(&p
->u
.count
))) {
340 call_rcu(&dentry
->d_u
.d_rcu
, __d_free_external
);
344 /* if dentry was never visible to RCU, immediate free is OK */
345 if (!(dentry
->d_flags
& DCACHE_RCUACCESS
))
346 __d_free(&dentry
->d_u
.d_rcu
);
348 call_rcu(&dentry
->d_u
.d_rcu
, __d_free
);
352 * Release the dentry's inode, using the filesystem
353 * d_iput() operation if defined.
355 static void dentry_unlink_inode(struct dentry
* dentry
)
356 __releases(dentry
->d_lock
)
357 __releases(dentry
->d_inode
->i_lock
)
359 struct inode
*inode
= dentry
->d_inode
;
361 raw_write_seqcount_begin(&dentry
->d_seq
);
362 __d_clear_type_and_inode(dentry
);
363 hlist_del_init(&dentry
->d_u
.d_alias
);
364 raw_write_seqcount_end(&dentry
->d_seq
);
365 spin_unlock(&dentry
->d_lock
);
366 spin_unlock(&inode
->i_lock
);
368 fsnotify_inoderemove(inode
);
369 if (dentry
->d_op
&& dentry
->d_op
->d_iput
)
370 dentry
->d_op
->d_iput(dentry
, inode
);
376 * The DCACHE_LRU_LIST bit is set whenever the 'd_lru' entry
377 * is in use - which includes both the "real" per-superblock
378 * LRU list _and_ the DCACHE_SHRINK_LIST use.
380 * The DCACHE_SHRINK_LIST bit is set whenever the dentry is
381 * on the shrink list (ie not on the superblock LRU list).
383 * The per-cpu "nr_dentry_unused" counters are updated with
384 * the DCACHE_LRU_LIST bit.
386 * These helper functions make sure we always follow the
387 * rules. d_lock must be held by the caller.
389 #define D_FLAG_VERIFY(dentry,x) WARN_ON_ONCE(((dentry)->d_flags & (DCACHE_LRU_LIST | DCACHE_SHRINK_LIST)) != (x))
390 static void d_lru_add(struct dentry
*dentry
)
392 D_FLAG_VERIFY(dentry
, 0);
393 dentry
->d_flags
|= DCACHE_LRU_LIST
;
394 this_cpu_inc(nr_dentry_unused
);
395 WARN_ON_ONCE(!list_lru_add(&dentry
->d_sb
->s_dentry_lru
, &dentry
->d_lru
));
398 static void d_lru_del(struct dentry
*dentry
)
400 D_FLAG_VERIFY(dentry
, DCACHE_LRU_LIST
);
401 dentry
->d_flags
&= ~DCACHE_LRU_LIST
;
402 this_cpu_dec(nr_dentry_unused
);
403 WARN_ON_ONCE(!list_lru_del(&dentry
->d_sb
->s_dentry_lru
, &dentry
->d_lru
));
406 static void d_shrink_del(struct dentry
*dentry
)
408 D_FLAG_VERIFY(dentry
, DCACHE_SHRINK_LIST
| DCACHE_LRU_LIST
);
409 list_del_init(&dentry
->d_lru
);
410 dentry
->d_flags
&= ~(DCACHE_SHRINK_LIST
| DCACHE_LRU_LIST
);
411 this_cpu_dec(nr_dentry_unused
);
414 static void d_shrink_add(struct dentry
*dentry
, struct list_head
*list
)
416 D_FLAG_VERIFY(dentry
, 0);
417 list_add(&dentry
->d_lru
, list
);
418 dentry
->d_flags
|= DCACHE_SHRINK_LIST
| DCACHE_LRU_LIST
;
419 this_cpu_inc(nr_dentry_unused
);
423 * These can only be called under the global LRU lock, ie during the
424 * callback for freeing the LRU list. "isolate" removes it from the
425 * LRU lists entirely, while shrink_move moves it to the indicated
428 static void d_lru_isolate(struct list_lru_one
*lru
, struct dentry
*dentry
)
430 D_FLAG_VERIFY(dentry
, DCACHE_LRU_LIST
);
431 dentry
->d_flags
&= ~DCACHE_LRU_LIST
;
432 this_cpu_dec(nr_dentry_unused
);
433 list_lru_isolate(lru
, &dentry
->d_lru
);
436 static void d_lru_shrink_move(struct list_lru_one
*lru
, struct dentry
*dentry
,
437 struct list_head
*list
)
439 D_FLAG_VERIFY(dentry
, DCACHE_LRU_LIST
);
440 dentry
->d_flags
|= DCACHE_SHRINK_LIST
;
441 list_lru_isolate_move(lru
, &dentry
->d_lru
, list
);
445 * dentry_lru_(add|del)_list) must be called with d_lock held.
447 static void dentry_lru_add(struct dentry
*dentry
)
449 if (unlikely(!(dentry
->d_flags
& DCACHE_LRU_LIST
)))
451 else if (unlikely(!(dentry
->d_flags
& DCACHE_REFERENCED
)))
452 dentry
->d_flags
|= DCACHE_REFERENCED
;
456 * d_drop - drop a dentry
457 * @dentry: dentry to drop
459 * d_drop() unhashes the entry from the parent dentry hashes, so that it won't
460 * be found through a VFS lookup any more. Note that this is different from
461 * deleting the dentry - d_delete will try to mark the dentry negative if
462 * possible, giving a successful _negative_ lookup, while d_drop will
463 * just make the cache lookup fail.
465 * d_drop() is used mainly for stuff that wants to invalidate a dentry for some
466 * reason (NFS timeouts or autofs deletes).
468 * __d_drop requires dentry->d_lock
469 * ___d_drop doesn't mark dentry as "unhashed"
470 * (dentry->d_hash.pprev will be LIST_POISON2, not NULL).
472 static void ___d_drop(struct dentry
*dentry
)
474 if (!d_unhashed(dentry
)) {
475 struct hlist_bl_head
*b
;
477 * Hashed dentries are normally on the dentry hashtable,
478 * with the exception of those newly allocated by
479 * d_obtain_alias, which are always IS_ROOT:
481 if (unlikely(IS_ROOT(dentry
)))
482 b
= &dentry
->d_sb
->s_anon
;
484 b
= d_hash(dentry
->d_name
.hash
);
487 __hlist_bl_del(&dentry
->d_hash
);
489 /* After this call, in-progress rcu-walk path lookup will fail. */
490 write_seqcount_invalidate(&dentry
->d_seq
);
494 void __d_drop(struct dentry
*dentry
)
497 dentry
->d_hash
.pprev
= NULL
;
499 EXPORT_SYMBOL(__d_drop
);
501 void d_drop(struct dentry
*dentry
)
503 spin_lock(&dentry
->d_lock
);
505 spin_unlock(&dentry
->d_lock
);
507 EXPORT_SYMBOL(d_drop
);
509 static inline void dentry_unlist(struct dentry
*dentry
, struct dentry
*parent
)
513 * Inform d_walk() and shrink_dentry_list() that we are no longer
514 * attached to the dentry tree
516 dentry
->d_flags
|= DCACHE_DENTRY_KILLED
;
517 if (unlikely(list_empty(&dentry
->d_child
)))
519 __list_del_entry(&dentry
->d_child
);
521 * Cursors can move around the list of children. While we'd been
522 * a normal list member, it didn't matter - ->d_child.next would've
523 * been updated. However, from now on it won't be and for the
524 * things like d_walk() it might end up with a nasty surprise.
525 * Normally d_walk() doesn't care about cursors moving around -
526 * ->d_lock on parent prevents that and since a cursor has no children
527 * of its own, we get through it without ever unlocking the parent.
528 * There is one exception, though - if we ascend from a child that
529 * gets killed as soon as we unlock it, the next sibling is found
530 * using the value left in its ->d_child.next. And if _that_
531 * pointed to a cursor, and cursor got moved (e.g. by lseek())
532 * before d_walk() regains parent->d_lock, we'll end up skipping
533 * everything the cursor had been moved past.
535 * Solution: make sure that the pointer left behind in ->d_child.next
536 * points to something that won't be moving around. I.e. skip the
539 while (dentry
->d_child
.next
!= &parent
->d_subdirs
) {
540 next
= list_entry(dentry
->d_child
.next
, struct dentry
, d_child
);
541 if (likely(!(next
->d_flags
& DCACHE_DENTRY_CURSOR
)))
543 dentry
->d_child
.next
= next
->d_child
.next
;
547 static void __dentry_kill(struct dentry
*dentry
)
549 struct dentry
*parent
= NULL
;
550 bool can_free
= true;
551 if (!IS_ROOT(dentry
))
552 parent
= dentry
->d_parent
;
555 * The dentry is now unrecoverably dead to the world.
557 lockref_mark_dead(&dentry
->d_lockref
);
560 * inform the fs via d_prune that this dentry is about to be
561 * unhashed and destroyed.
563 if (dentry
->d_flags
& DCACHE_OP_PRUNE
)
564 dentry
->d_op
->d_prune(dentry
);
566 if (dentry
->d_flags
& DCACHE_LRU_LIST
) {
567 if (!(dentry
->d_flags
& DCACHE_SHRINK_LIST
))
570 /* if it was on the hash then remove it */
572 dentry_unlist(dentry
, parent
);
574 spin_unlock(&parent
->d_lock
);
576 dentry_unlink_inode(dentry
);
578 spin_unlock(&dentry
->d_lock
);
579 this_cpu_dec(nr_dentry
);
580 if (dentry
->d_op
&& dentry
->d_op
->d_release
)
581 dentry
->d_op
->d_release(dentry
);
583 spin_lock(&dentry
->d_lock
);
584 if (dentry
->d_flags
& DCACHE_SHRINK_LIST
) {
585 dentry
->d_flags
|= DCACHE_MAY_FREE
;
588 spin_unlock(&dentry
->d_lock
);
589 if (likely(can_free
))
594 * Finish off a dentry we've decided to kill.
595 * dentry->d_lock must be held, returns with it unlocked.
596 * If ref is non-zero, then decrement the refcount too.
597 * Returns dentry requiring refcount drop, or NULL if we're done.
599 static struct dentry
*dentry_kill(struct dentry
*dentry
)
600 __releases(dentry
->d_lock
)
602 struct inode
*inode
= dentry
->d_inode
;
603 struct dentry
*parent
= NULL
;
605 if (inode
&& unlikely(!spin_trylock(&inode
->i_lock
)))
608 if (!IS_ROOT(dentry
)) {
609 parent
= dentry
->d_parent
;
610 if (unlikely(!spin_trylock(&parent
->d_lock
))) {
612 spin_unlock(&inode
->i_lock
);
617 __dentry_kill(dentry
);
621 spin_unlock(&dentry
->d_lock
);
622 return dentry
; /* try again with same dentry */
625 static inline struct dentry
*lock_parent(struct dentry
*dentry
)
627 struct dentry
*parent
= dentry
->d_parent
;
630 if (unlikely(dentry
->d_lockref
.count
< 0))
632 if (likely(spin_trylock(&parent
->d_lock
)))
635 spin_unlock(&dentry
->d_lock
);
637 parent
= READ_ONCE(dentry
->d_parent
);
638 spin_lock(&parent
->d_lock
);
640 * We can't blindly lock dentry until we are sure
641 * that we won't violate the locking order.
642 * Any changes of dentry->d_parent must have
643 * been done with parent->d_lock held, so
644 * spin_lock() above is enough of a barrier
645 * for checking if it's still our child.
647 if (unlikely(parent
!= dentry
->d_parent
)) {
648 spin_unlock(&parent
->d_lock
);
651 if (parent
!= dentry
) {
652 spin_lock_nested(&dentry
->d_lock
, DENTRY_D_LOCK_NESTED
);
653 if (unlikely(dentry
->d_lockref
.count
< 0)) {
654 spin_unlock(&parent
->d_lock
);
665 * Try to do a lockless dput(), and return whether that was successful.
667 * If unsuccessful, we return false, having already taken the dentry lock.
669 * The caller needs to hold the RCU read lock, so that the dentry is
670 * guaranteed to stay around even if the refcount goes down to zero!
672 static inline bool fast_dput(struct dentry
*dentry
)
675 unsigned int d_flags
;
678 * If we have a d_op->d_delete() operation, we sould not
679 * let the dentry count go to zero, so use "put_or_lock".
681 if (unlikely(dentry
->d_flags
& DCACHE_OP_DELETE
))
682 return lockref_put_or_lock(&dentry
->d_lockref
);
685 * .. otherwise, we can try to just decrement the
686 * lockref optimistically.
688 ret
= lockref_put_return(&dentry
->d_lockref
);
691 * If the lockref_put_return() failed due to the lock being held
692 * by somebody else, the fast path has failed. We will need to
693 * get the lock, and then check the count again.
695 if (unlikely(ret
< 0)) {
696 spin_lock(&dentry
->d_lock
);
697 if (dentry
->d_lockref
.count
> 1) {
698 dentry
->d_lockref
.count
--;
699 spin_unlock(&dentry
->d_lock
);
706 * If we weren't the last ref, we're done.
712 * Careful, careful. The reference count went down
713 * to zero, but we don't hold the dentry lock, so
714 * somebody else could get it again, and do another
715 * dput(), and we need to not race with that.
717 * However, there is a very special and common case
718 * where we don't care, because there is nothing to
719 * do: the dentry is still hashed, it does not have
720 * a 'delete' op, and it's referenced and already on
723 * NOTE! Since we aren't locked, these values are
724 * not "stable". However, it is sufficient that at
725 * some point after we dropped the reference the
726 * dentry was hashed and the flags had the proper
727 * value. Other dentry users may have re-gotten
728 * a reference to the dentry and change that, but
729 * our work is done - we can leave the dentry
730 * around with a zero refcount.
733 d_flags
= READ_ONCE(dentry
->d_flags
);
734 d_flags
&= DCACHE_REFERENCED
| DCACHE_LRU_LIST
| DCACHE_DISCONNECTED
;
736 /* Nothing to do? Dropping the reference was all we needed? */
737 if (d_flags
== (DCACHE_REFERENCED
| DCACHE_LRU_LIST
) && !d_unhashed(dentry
))
741 * Not the fast normal case? Get the lock. We've already decremented
742 * the refcount, but we'll need to re-check the situation after
745 spin_lock(&dentry
->d_lock
);
748 * Did somebody else grab a reference to it in the meantime, and
749 * we're no longer the last user after all? Alternatively, somebody
750 * else could have killed it and marked it dead. Either way, we
751 * don't need to do anything else.
753 if (dentry
->d_lockref
.count
) {
754 spin_unlock(&dentry
->d_lock
);
759 * Re-get the reference we optimistically dropped. We hold the
760 * lock, and we just tested that it was zero, so we can just
763 dentry
->d_lockref
.count
= 1;
771 * This is complicated by the fact that we do not want to put
772 * dentries that are no longer on any hash chain on the unused
773 * list: we'd much rather just get rid of them immediately.
775 * However, that implies that we have to traverse the dentry
776 * tree upwards to the parents which might _also_ now be
777 * scheduled for deletion (it may have been only waiting for
778 * its last child to go away).
780 * This tail recursion is done by hand as we don't want to depend
781 * on the compiler to always get this right (gcc generally doesn't).
782 * Real recursion would eat up our stack space.
786 * dput - release a dentry
787 * @dentry: dentry to release
789 * Release a dentry. This will drop the usage count and if appropriate
790 * call the dentry unlink method as well as removing it from the queues and
791 * releasing its resources. If the parent dentries were scheduled for release
792 * they too may now get deleted.
794 void dput(struct dentry
*dentry
)
796 if (unlikely(!dentry
))
803 if (likely(fast_dput(dentry
))) {
808 /* Slow case: now with the dentry lock held */
811 WARN_ON(d_in_lookup(dentry
));
813 /* Unreachable? Get rid of it */
814 if (unlikely(d_unhashed(dentry
)))
817 if (unlikely(dentry
->d_flags
& DCACHE_DISCONNECTED
))
820 if (unlikely(dentry
->d_flags
& DCACHE_OP_DELETE
)) {
821 if (dentry
->d_op
->d_delete(dentry
))
825 dentry_lru_add(dentry
);
827 dentry
->d_lockref
.count
--;
828 spin_unlock(&dentry
->d_lock
);
832 dentry
= dentry_kill(dentry
);
841 /* This must be called with d_lock held */
842 static inline void __dget_dlock(struct dentry
*dentry
)
844 dentry
->d_lockref
.count
++;
847 static inline void __dget(struct dentry
*dentry
)
849 lockref_get(&dentry
->d_lockref
);
852 struct dentry
*dget_parent(struct dentry
*dentry
)
858 * Do optimistic parent lookup without any
862 ret
= READ_ONCE(dentry
->d_parent
);
863 gotref
= lockref_get_not_zero(&ret
->d_lockref
);
865 if (likely(gotref
)) {
866 if (likely(ret
== READ_ONCE(dentry
->d_parent
)))
873 * Don't need rcu_dereference because we re-check it was correct under
877 ret
= dentry
->d_parent
;
878 spin_lock(&ret
->d_lock
);
879 if (unlikely(ret
!= dentry
->d_parent
)) {
880 spin_unlock(&ret
->d_lock
);
885 BUG_ON(!ret
->d_lockref
.count
);
886 ret
->d_lockref
.count
++;
887 spin_unlock(&ret
->d_lock
);
890 EXPORT_SYMBOL(dget_parent
);
893 * d_find_alias - grab a hashed alias of inode
894 * @inode: inode in question
896 * If inode has a hashed alias, or is a directory and has any alias,
897 * acquire the reference to alias and return it. Otherwise return NULL.
898 * Notice that if inode is a directory there can be only one alias and
899 * it can be unhashed only if it has no children, or if it is the root
900 * of a filesystem, or if the directory was renamed and d_revalidate
901 * was the first vfs operation to notice.
903 * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer
904 * any other hashed alias over that one.
906 static struct dentry
*__d_find_alias(struct inode
*inode
)
908 struct dentry
*alias
, *discon_alias
;
912 hlist_for_each_entry(alias
, &inode
->i_dentry
, d_u
.d_alias
) {
913 spin_lock(&alias
->d_lock
);
914 if (S_ISDIR(inode
->i_mode
) || !d_unhashed(alias
)) {
915 if (IS_ROOT(alias
) &&
916 (alias
->d_flags
& DCACHE_DISCONNECTED
)) {
917 discon_alias
= alias
;
920 spin_unlock(&alias
->d_lock
);
924 spin_unlock(&alias
->d_lock
);
927 alias
= discon_alias
;
928 spin_lock(&alias
->d_lock
);
929 if (S_ISDIR(inode
->i_mode
) || !d_unhashed(alias
)) {
931 spin_unlock(&alias
->d_lock
);
934 spin_unlock(&alias
->d_lock
);
940 struct dentry
*d_find_alias(struct inode
*inode
)
942 struct dentry
*de
= NULL
;
944 if (!hlist_empty(&inode
->i_dentry
)) {
945 spin_lock(&inode
->i_lock
);
946 de
= __d_find_alias(inode
);
947 spin_unlock(&inode
->i_lock
);
951 EXPORT_SYMBOL(d_find_alias
);
954 * Try to kill dentries associated with this inode.
955 * WARNING: you must own a reference to inode.
957 void d_prune_aliases(struct inode
*inode
)
959 struct dentry
*dentry
;
961 spin_lock(&inode
->i_lock
);
962 hlist_for_each_entry(dentry
, &inode
->i_dentry
, d_u
.d_alias
) {
963 spin_lock(&dentry
->d_lock
);
964 if (!dentry
->d_lockref
.count
) {
965 struct dentry
*parent
= lock_parent(dentry
);
966 if (likely(!dentry
->d_lockref
.count
)) {
967 __dentry_kill(dentry
);
972 spin_unlock(&parent
->d_lock
);
974 spin_unlock(&dentry
->d_lock
);
976 spin_unlock(&inode
->i_lock
);
978 EXPORT_SYMBOL(d_prune_aliases
);
980 static void shrink_dentry_list(struct list_head
*list
)
982 struct dentry
*dentry
, *parent
;
984 while (!list_empty(list
)) {
986 dentry
= list_entry(list
->prev
, struct dentry
, d_lru
);
987 spin_lock(&dentry
->d_lock
);
988 parent
= lock_parent(dentry
);
991 * The dispose list is isolated and dentries are not accounted
992 * to the LRU here, so we can simply remove it from the list
993 * here regardless of whether it is referenced or not.
995 d_shrink_del(dentry
);
998 * We found an inuse dentry which was not removed from
999 * the LRU because of laziness during lookup. Do not free it.
1001 if (dentry
->d_lockref
.count
> 0) {
1002 spin_unlock(&dentry
->d_lock
);
1004 spin_unlock(&parent
->d_lock
);
1009 if (unlikely(dentry
->d_flags
& DCACHE_DENTRY_KILLED
)) {
1010 bool can_free
= dentry
->d_flags
& DCACHE_MAY_FREE
;
1011 spin_unlock(&dentry
->d_lock
);
1013 spin_unlock(&parent
->d_lock
);
1015 dentry_free(dentry
);
1019 inode
= dentry
->d_inode
;
1020 if (inode
&& unlikely(!spin_trylock(&inode
->i_lock
))) {
1021 d_shrink_add(dentry
, list
);
1022 spin_unlock(&dentry
->d_lock
);
1024 spin_unlock(&parent
->d_lock
);
1028 __dentry_kill(dentry
);
1031 * We need to prune ancestors too. This is necessary to prevent
1032 * quadratic behavior of shrink_dcache_parent(), but is also
1033 * expected to be beneficial in reducing dentry cache
1037 while (dentry
&& !lockref_put_or_lock(&dentry
->d_lockref
)) {
1038 parent
= lock_parent(dentry
);
1039 if (dentry
->d_lockref
.count
!= 1) {
1040 dentry
->d_lockref
.count
--;
1041 spin_unlock(&dentry
->d_lock
);
1043 spin_unlock(&parent
->d_lock
);
1046 inode
= dentry
->d_inode
; /* can't be NULL */
1047 if (unlikely(!spin_trylock(&inode
->i_lock
))) {
1048 spin_unlock(&dentry
->d_lock
);
1050 spin_unlock(&parent
->d_lock
);
1054 __dentry_kill(dentry
);
1060 static enum lru_status
dentry_lru_isolate(struct list_head
*item
,
1061 struct list_lru_one
*lru
, spinlock_t
*lru_lock
, void *arg
)
1063 struct list_head
*freeable
= arg
;
1064 struct dentry
*dentry
= container_of(item
, struct dentry
, d_lru
);
1068 * we are inverting the lru lock/dentry->d_lock here,
1069 * so use a trylock. If we fail to get the lock, just skip
1072 if (!spin_trylock(&dentry
->d_lock
))
1076 * Referenced dentries are still in use. If they have active
1077 * counts, just remove them from the LRU. Otherwise give them
1078 * another pass through the LRU.
1080 if (dentry
->d_lockref
.count
) {
1081 d_lru_isolate(lru
, dentry
);
1082 spin_unlock(&dentry
->d_lock
);
1086 if (dentry
->d_flags
& DCACHE_REFERENCED
) {
1087 dentry
->d_flags
&= ~DCACHE_REFERENCED
;
1088 spin_unlock(&dentry
->d_lock
);
1091 * The list move itself will be made by the common LRU code. At
1092 * this point, we've dropped the dentry->d_lock but keep the
1093 * lru lock. This is safe to do, since every list movement is
1094 * protected by the lru lock even if both locks are held.
1096 * This is guaranteed by the fact that all LRU management
1097 * functions are intermediated by the LRU API calls like
1098 * list_lru_add and list_lru_del. List movement in this file
1099 * only ever occur through this functions or through callbacks
1100 * like this one, that are called from the LRU API.
1102 * The only exceptions to this are functions like
1103 * shrink_dentry_list, and code that first checks for the
1104 * DCACHE_SHRINK_LIST flag. Those are guaranteed to be
1105 * operating only with stack provided lists after they are
1106 * properly isolated from the main list. It is thus, always a
1112 d_lru_shrink_move(lru
, dentry
, freeable
);
1113 spin_unlock(&dentry
->d_lock
);
1119 * prune_dcache_sb - shrink the dcache
1121 * @sc: shrink control, passed to list_lru_shrink_walk()
1123 * Attempt to shrink the superblock dcache LRU by @sc->nr_to_scan entries. This
1124 * is done when we need more memory and called from the superblock shrinker
1127 * This function may fail to free any resources if all the dentries are in
1130 long prune_dcache_sb(struct super_block
*sb
, struct shrink_control
*sc
)
1135 freed
= list_lru_shrink_walk(&sb
->s_dentry_lru
, sc
,
1136 dentry_lru_isolate
, &dispose
);
1137 shrink_dentry_list(&dispose
);
1141 static enum lru_status
dentry_lru_isolate_shrink(struct list_head
*item
,
1142 struct list_lru_one
*lru
, spinlock_t
*lru_lock
, void *arg
)
1144 struct list_head
*freeable
= arg
;
1145 struct dentry
*dentry
= container_of(item
, struct dentry
, d_lru
);
1148 * we are inverting the lru lock/dentry->d_lock here,
1149 * so use a trylock. If we fail to get the lock, just skip
1152 if (!spin_trylock(&dentry
->d_lock
))
1155 d_lru_shrink_move(lru
, dentry
, freeable
);
1156 spin_unlock(&dentry
->d_lock
);
1163 * shrink_dcache_sb - shrink dcache for a superblock
1166 * Shrink the dcache for the specified super block. This is used to free
1167 * the dcache before unmounting a file system.
1169 void shrink_dcache_sb(struct super_block
*sb
)
1176 freed
= list_lru_walk(&sb
->s_dentry_lru
,
1177 dentry_lru_isolate_shrink
, &dispose
, 1024);
1179 this_cpu_sub(nr_dentry_unused
, freed
);
1180 shrink_dentry_list(&dispose
);
1182 } while (list_lru_count(&sb
->s_dentry_lru
) > 0);
1184 EXPORT_SYMBOL(shrink_dcache_sb
);
1187 * enum d_walk_ret - action to talke during tree walk
1188 * @D_WALK_CONTINUE: contrinue walk
1189 * @D_WALK_QUIT: quit walk
1190 * @D_WALK_NORETRY: quit when retry is needed
1191 * @D_WALK_SKIP: skip this dentry and its children
1201 * d_walk - walk the dentry tree
1202 * @parent: start of walk
1203 * @data: data passed to @enter() and @finish()
1204 * @enter: callback when first entering the dentry
1205 * @finish: callback when successfully finished the walk
1207 * The @enter() and @finish() callbacks are called with d_lock held.
1209 void d_walk(struct dentry
*parent
, void *data
,
1210 enum d_walk_ret (*enter
)(void *, struct dentry
*),
1211 void (*finish
)(void *))
1213 struct dentry
*this_parent
;
1214 struct list_head
*next
;
1216 enum d_walk_ret ret
;
1220 read_seqbegin_or_lock(&rename_lock
, &seq
);
1221 this_parent
= parent
;
1222 spin_lock(&this_parent
->d_lock
);
1224 ret
= enter(data
, this_parent
);
1226 case D_WALK_CONTINUE
:
1231 case D_WALK_NORETRY
:
1236 next
= this_parent
->d_subdirs
.next
;
1238 while (next
!= &this_parent
->d_subdirs
) {
1239 struct list_head
*tmp
= next
;
1240 struct dentry
*dentry
= list_entry(tmp
, struct dentry
, d_child
);
1243 if (unlikely(dentry
->d_flags
& DCACHE_DENTRY_CURSOR
))
1246 spin_lock_nested(&dentry
->d_lock
, DENTRY_D_LOCK_NESTED
);
1248 ret
= enter(data
, dentry
);
1250 case D_WALK_CONTINUE
:
1253 spin_unlock(&dentry
->d_lock
);
1255 case D_WALK_NORETRY
:
1259 spin_unlock(&dentry
->d_lock
);
1263 if (!list_empty(&dentry
->d_subdirs
)) {
1264 spin_unlock(&this_parent
->d_lock
);
1265 spin_release(&dentry
->d_lock
.dep_map
, 1, _RET_IP_
);
1266 this_parent
= dentry
;
1267 spin_acquire(&this_parent
->d_lock
.dep_map
, 0, 1, _RET_IP_
);
1270 spin_unlock(&dentry
->d_lock
);
1273 * All done at this level ... ascend and resume the search.
1277 if (this_parent
!= parent
) {
1278 struct dentry
*child
= this_parent
;
1279 this_parent
= child
->d_parent
;
1281 spin_unlock(&child
->d_lock
);
1282 spin_lock(&this_parent
->d_lock
);
1284 /* might go back up the wrong parent if we have had a rename. */
1285 if (need_seqretry(&rename_lock
, seq
))
1287 /* go into the first sibling still alive */
1289 next
= child
->d_child
.next
;
1290 if (next
== &this_parent
->d_subdirs
)
1292 child
= list_entry(next
, struct dentry
, d_child
);
1293 } while (unlikely(child
->d_flags
& DCACHE_DENTRY_KILLED
));
1297 if (need_seqretry(&rename_lock
, seq
))
1304 spin_unlock(&this_parent
->d_lock
);
1305 done_seqretry(&rename_lock
, seq
);
1309 spin_unlock(&this_parent
->d_lock
);
1317 EXPORT_SYMBOL_GPL(d_walk
);
1319 struct check_mount
{
1320 struct vfsmount
*mnt
;
1321 unsigned int mounted
;
1324 static enum d_walk_ret
path_check_mount(void *data
, struct dentry
*dentry
)
1326 struct check_mount
*info
= data
;
1327 struct path path
= { .mnt
= info
->mnt
, .dentry
= dentry
};
1329 if (likely(!d_mountpoint(dentry
)))
1330 return D_WALK_CONTINUE
;
1331 if (__path_is_mountpoint(&path
)) {
1335 return D_WALK_CONTINUE
;
1339 * path_has_submounts - check for mounts over a dentry in the
1340 * current namespace.
1341 * @parent: path to check.
1343 * Return true if the parent or its subdirectories contain
1344 * a mount point in the current namespace.
1346 int path_has_submounts(const struct path
*parent
)
1348 struct check_mount data
= { .mnt
= parent
->mnt
, .mounted
= 0 };
1350 read_seqlock_excl(&mount_lock
);
1351 d_walk(parent
->dentry
, &data
, path_check_mount
, NULL
);
1352 read_sequnlock_excl(&mount_lock
);
1354 return data
.mounted
;
1356 EXPORT_SYMBOL(path_has_submounts
);
1359 * Called by mount code to set a mountpoint and check if the mountpoint is
1360 * reachable (e.g. NFS can unhash a directory dentry and then the complete
1361 * subtree can become unreachable).
1363 * Only one of d_invalidate() and d_set_mounted() must succeed. For
1364 * this reason take rename_lock and d_lock on dentry and ancestors.
1366 int d_set_mounted(struct dentry
*dentry
)
1370 write_seqlock(&rename_lock
);
1371 for (p
= dentry
->d_parent
; !IS_ROOT(p
); p
= p
->d_parent
) {
1372 /* Need exclusion wrt. d_invalidate() */
1373 spin_lock(&p
->d_lock
);
1374 if (unlikely(d_unhashed(p
))) {
1375 spin_unlock(&p
->d_lock
);
1378 spin_unlock(&p
->d_lock
);
1380 spin_lock(&dentry
->d_lock
);
1381 if (!d_unlinked(dentry
)) {
1383 if (!d_mountpoint(dentry
)) {
1384 dentry
->d_flags
|= DCACHE_MOUNTED
;
1388 spin_unlock(&dentry
->d_lock
);
1390 write_sequnlock(&rename_lock
);
1395 * Search the dentry child list of the specified parent,
1396 * and move any unused dentries to the end of the unused
1397 * list for prune_dcache(). We descend to the next level
1398 * whenever the d_subdirs list is non-empty and continue
1401 * It returns zero iff there are no unused children,
1402 * otherwise it returns the number of children moved to
1403 * the end of the unused list. This may not be the total
1404 * number of unused children, because select_parent can
1405 * drop the lock and return early due to latency
1409 struct select_data
{
1410 struct dentry
*start
;
1411 struct list_head dispose
;
1415 static enum d_walk_ret
select_collect(void *_data
, struct dentry
*dentry
)
1417 struct select_data
*data
= _data
;
1418 enum d_walk_ret ret
= D_WALK_CONTINUE
;
1420 if (data
->start
== dentry
)
1423 if (dentry
->d_flags
& DCACHE_SHRINK_LIST
) {
1426 if (dentry
->d_flags
& DCACHE_LRU_LIST
)
1428 if (!dentry
->d_lockref
.count
) {
1429 d_shrink_add(dentry
, &data
->dispose
);
1434 * We can return to the caller if we have found some (this
1435 * ensures forward progress). We'll be coming back to find
1438 if (!list_empty(&data
->dispose
))
1439 ret
= need_resched() ? D_WALK_QUIT
: D_WALK_NORETRY
;
1445 * shrink_dcache_parent - prune dcache
1446 * @parent: parent of entries to prune
1448 * Prune the dcache to remove unused children of the parent dentry.
1450 void shrink_dcache_parent(struct dentry
*parent
)
1453 struct select_data data
;
1455 INIT_LIST_HEAD(&data
.dispose
);
1456 data
.start
= parent
;
1459 d_walk(parent
, &data
, select_collect
, NULL
);
1463 shrink_dentry_list(&data
.dispose
);
1467 EXPORT_SYMBOL(shrink_dcache_parent
);
1469 static enum d_walk_ret
umount_check(void *_data
, struct dentry
*dentry
)
1471 /* it has busy descendents; complain about those instead */
1472 if (!list_empty(&dentry
->d_subdirs
))
1473 return D_WALK_CONTINUE
;
1475 /* root with refcount 1 is fine */
1476 if (dentry
== _data
&& dentry
->d_lockref
.count
== 1)
1477 return D_WALK_CONTINUE
;
1479 printk(KERN_ERR
"BUG: Dentry %p{i=%lx,n=%pd} "
1480 " still in use (%d) [unmount of %s %s]\n",
1483 dentry
->d_inode
->i_ino
: 0UL,
1485 dentry
->d_lockref
.count
,
1486 dentry
->d_sb
->s_type
->name
,
1487 dentry
->d_sb
->s_id
);
1489 return D_WALK_CONTINUE
;
1492 static void do_one_tree(struct dentry
*dentry
)
1494 shrink_dcache_parent(dentry
);
1495 d_walk(dentry
, dentry
, umount_check
, NULL
);
1501 * destroy the dentries attached to a superblock on unmounting
1503 void shrink_dcache_for_umount(struct super_block
*sb
)
1505 struct dentry
*dentry
;
1507 WARN(down_read_trylock(&sb
->s_umount
), "s_umount should've been locked");
1509 dentry
= sb
->s_root
;
1511 do_one_tree(dentry
);
1513 while (!hlist_bl_empty(&sb
->s_anon
)) {
1514 dentry
= dget(hlist_bl_entry(hlist_bl_first(&sb
->s_anon
), struct dentry
, d_hash
));
1515 do_one_tree(dentry
);
1519 struct detach_data
{
1520 struct select_data select
;
1521 struct dentry
*mountpoint
;
1523 static enum d_walk_ret
detach_and_collect(void *_data
, struct dentry
*dentry
)
1525 struct detach_data
*data
= _data
;
1527 if (d_mountpoint(dentry
)) {
1528 __dget_dlock(dentry
);
1529 data
->mountpoint
= dentry
;
1533 return select_collect(&data
->select
, dentry
);
1536 static void check_and_drop(void *_data
)
1538 struct detach_data
*data
= _data
;
1540 if (!data
->mountpoint
&& list_empty(&data
->select
.dispose
))
1541 __d_drop(data
->select
.start
);
1545 * d_invalidate - detach submounts, prune dcache, and drop
1546 * @dentry: dentry to invalidate (aka detach, prune and drop)
1550 * The final d_drop is done as an atomic operation relative to
1551 * rename_lock ensuring there are no races with d_set_mounted. This
1552 * ensures there are no unhashed dentries on the path to a mountpoint.
1554 void d_invalidate(struct dentry
*dentry
)
1557 * If it's already been dropped, return OK.
1559 spin_lock(&dentry
->d_lock
);
1560 if (d_unhashed(dentry
)) {
1561 spin_unlock(&dentry
->d_lock
);
1564 spin_unlock(&dentry
->d_lock
);
1566 /* Negative dentries can be dropped without further checks */
1567 if (!dentry
->d_inode
) {
1573 struct detach_data data
;
1575 data
.mountpoint
= NULL
;
1576 INIT_LIST_HEAD(&data
.select
.dispose
);
1577 data
.select
.start
= dentry
;
1578 data
.select
.found
= 0;
1580 d_walk(dentry
, &data
, detach_and_collect
, check_and_drop
);
1582 if (!list_empty(&data
.select
.dispose
))
1583 shrink_dentry_list(&data
.select
.dispose
);
1584 else if (!data
.mountpoint
)
1587 if (data
.mountpoint
) {
1588 detach_mounts(data
.mountpoint
);
1589 dput(data
.mountpoint
);
1594 EXPORT_SYMBOL(d_invalidate
);
1597 * __d_alloc - allocate a dcache entry
1598 * @sb: filesystem it will belong to
1599 * @name: qstr of the name
1601 * Allocates a dentry. It returns %NULL if there is insufficient memory
1602 * available. On a success the dentry is returned. The name passed in is
1603 * copied and the copy passed in may be reused after this call.
1606 struct dentry
*__d_alloc(struct super_block
*sb
, const struct qstr
*name
)
1608 struct dentry
*dentry
;
1612 dentry
= kmem_cache_alloc(dentry_cache
, GFP_KERNEL
);
1617 * We guarantee that the inline name is always NUL-terminated.
1618 * This way the memcpy() done by the name switching in rename
1619 * will still always have a NUL at the end, even if we might
1620 * be overwriting an internal NUL character
1622 dentry
->d_iname
[DNAME_INLINE_LEN
-1] = 0;
1623 if (unlikely(!name
)) {
1625 dname
= dentry
->d_iname
;
1626 } else if (name
->len
> DNAME_INLINE_LEN
-1) {
1627 size_t size
= offsetof(struct external_name
, name
[1]);
1628 struct external_name
*p
= kmalloc(size
+ name
->len
,
1629 GFP_KERNEL_ACCOUNT
);
1631 kmem_cache_free(dentry_cache
, dentry
);
1634 atomic_set(&p
->u
.count
, 1);
1636 if (IS_ENABLED(CONFIG_DCACHE_WORD_ACCESS
))
1637 kasan_unpoison_shadow(dname
,
1638 round_up(name
->len
+ 1, sizeof(unsigned long)));
1640 dname
= dentry
->d_iname
;
1643 dentry
->d_name
.len
= name
->len
;
1644 dentry
->d_name
.hash
= name
->hash
;
1645 memcpy(dname
, name
->name
, name
->len
);
1646 dname
[name
->len
] = 0;
1648 /* Make sure we always see the terminating NUL character */
1650 dentry
->d_name
.name
= dname
;
1652 dentry
->d_lockref
.count
= 1;
1653 dentry
->d_flags
= 0;
1654 spin_lock_init(&dentry
->d_lock
);
1655 seqcount_init(&dentry
->d_seq
);
1656 dentry
->d_inode
= NULL
;
1657 dentry
->d_parent
= dentry
;
1659 dentry
->d_op
= NULL
;
1660 dentry
->d_fsdata
= NULL
;
1661 INIT_HLIST_BL_NODE(&dentry
->d_hash
);
1662 INIT_LIST_HEAD(&dentry
->d_lru
);
1663 INIT_LIST_HEAD(&dentry
->d_subdirs
);
1664 INIT_HLIST_NODE(&dentry
->d_u
.d_alias
);
1665 INIT_LIST_HEAD(&dentry
->d_child
);
1666 d_set_d_op(dentry
, dentry
->d_sb
->s_d_op
);
1668 if (dentry
->d_op
&& dentry
->d_op
->d_init
) {
1669 err
= dentry
->d_op
->d_init(dentry
);
1671 if (dname_external(dentry
))
1672 kfree(external_name(dentry
));
1673 kmem_cache_free(dentry_cache
, dentry
);
1678 this_cpu_inc(nr_dentry
);
1684 * d_alloc - allocate a dcache entry
1685 * @parent: parent of entry to allocate
1686 * @name: qstr of the name
1688 * Allocates a dentry. It returns %NULL if there is insufficient memory
1689 * available. On a success the dentry is returned. The name passed in is
1690 * copied and the copy passed in may be reused after this call.
1692 struct dentry
*d_alloc(struct dentry
* parent
, const struct qstr
*name
)
1694 struct dentry
*dentry
= __d_alloc(parent
->d_sb
, name
);
1697 dentry
->d_flags
|= DCACHE_RCUACCESS
;
1698 spin_lock(&parent
->d_lock
);
1700 * don't need child lock because it is not subject
1701 * to concurrency here
1703 __dget_dlock(parent
);
1704 dentry
->d_parent
= parent
;
1705 list_add(&dentry
->d_child
, &parent
->d_subdirs
);
1706 spin_unlock(&parent
->d_lock
);
1710 EXPORT_SYMBOL(d_alloc
);
1712 struct dentry
*d_alloc_cursor(struct dentry
* parent
)
1714 struct dentry
*dentry
= __d_alloc(parent
->d_sb
, NULL
);
1716 dentry
->d_flags
|= DCACHE_RCUACCESS
| DCACHE_DENTRY_CURSOR
;
1717 dentry
->d_parent
= dget(parent
);
1723 * d_alloc_pseudo - allocate a dentry (for lookup-less filesystems)
1724 * @sb: the superblock
1725 * @name: qstr of the name
1727 * For a filesystem that just pins its dentries in memory and never
1728 * performs lookups at all, return an unhashed IS_ROOT dentry.
1730 struct dentry
*d_alloc_pseudo(struct super_block
*sb
, const struct qstr
*name
)
1732 return __d_alloc(sb
, name
);
1734 EXPORT_SYMBOL(d_alloc_pseudo
);
1736 struct dentry
*d_alloc_name(struct dentry
*parent
, const char *name
)
1741 q
.hash_len
= hashlen_string(parent
, name
);
1742 return d_alloc(parent
, &q
);
1744 EXPORT_SYMBOL(d_alloc_name
);
1746 void d_set_d_op(struct dentry
*dentry
, const struct dentry_operations
*op
)
1748 WARN_ON_ONCE(dentry
->d_op
);
1749 WARN_ON_ONCE(dentry
->d_flags
& (DCACHE_OP_HASH
|
1751 DCACHE_OP_REVALIDATE
|
1752 DCACHE_OP_WEAK_REVALIDATE
|
1759 dentry
->d_flags
|= DCACHE_OP_HASH
;
1761 dentry
->d_flags
|= DCACHE_OP_COMPARE
;
1762 if (op
->d_revalidate
)
1763 dentry
->d_flags
|= DCACHE_OP_REVALIDATE
;
1764 if (op
->d_weak_revalidate
)
1765 dentry
->d_flags
|= DCACHE_OP_WEAK_REVALIDATE
;
1767 dentry
->d_flags
|= DCACHE_OP_DELETE
;
1769 dentry
->d_flags
|= DCACHE_OP_PRUNE
;
1771 dentry
->d_flags
|= DCACHE_OP_REAL
;
1774 EXPORT_SYMBOL(d_set_d_op
);
1778 * d_set_fallthru - Mark a dentry as falling through to a lower layer
1779 * @dentry - The dentry to mark
1781 * Mark a dentry as falling through to the lower layer (as set with
1782 * d_pin_lower()). This flag may be recorded on the medium.
1784 void d_set_fallthru(struct dentry
*dentry
)
1786 spin_lock(&dentry
->d_lock
);
1787 dentry
->d_flags
|= DCACHE_FALLTHRU
;
1788 spin_unlock(&dentry
->d_lock
);
1790 EXPORT_SYMBOL(d_set_fallthru
);
1792 static unsigned d_flags_for_inode(struct inode
*inode
)
1794 unsigned add_flags
= DCACHE_REGULAR_TYPE
;
1797 return DCACHE_MISS_TYPE
;
1799 if (S_ISDIR(inode
->i_mode
)) {
1800 add_flags
= DCACHE_DIRECTORY_TYPE
;
1801 if (unlikely(!(inode
->i_opflags
& IOP_LOOKUP
))) {
1802 if (unlikely(!inode
->i_op
->lookup
))
1803 add_flags
= DCACHE_AUTODIR_TYPE
;
1805 inode
->i_opflags
|= IOP_LOOKUP
;
1807 goto type_determined
;
1810 if (unlikely(!(inode
->i_opflags
& IOP_NOFOLLOW
))) {
1811 if (unlikely(inode
->i_op
->get_link
)) {
1812 add_flags
= DCACHE_SYMLINK_TYPE
;
1813 goto type_determined
;
1815 inode
->i_opflags
|= IOP_NOFOLLOW
;
1818 if (unlikely(!S_ISREG(inode
->i_mode
)))
1819 add_flags
= DCACHE_SPECIAL_TYPE
;
1822 if (unlikely(IS_AUTOMOUNT(inode
)))
1823 add_flags
|= DCACHE_NEED_AUTOMOUNT
;
1827 static void __d_instantiate(struct dentry
*dentry
, struct inode
*inode
)
1829 unsigned add_flags
= d_flags_for_inode(inode
);
1830 WARN_ON(d_in_lookup(dentry
));
1832 spin_lock(&dentry
->d_lock
);
1833 hlist_add_head(&dentry
->d_u
.d_alias
, &inode
->i_dentry
);
1834 raw_write_seqcount_begin(&dentry
->d_seq
);
1835 __d_set_inode_and_type(dentry
, inode
, add_flags
);
1836 raw_write_seqcount_end(&dentry
->d_seq
);
1837 fsnotify_update_flags(dentry
);
1838 spin_unlock(&dentry
->d_lock
);
1842 * d_instantiate - fill in inode information for a dentry
1843 * @entry: dentry to complete
1844 * @inode: inode to attach to this dentry
1846 * Fill in inode information in the entry.
1848 * This turns negative dentries into productive full members
1851 * NOTE! This assumes that the inode count has been incremented
1852 * (or otherwise set) by the caller to indicate that it is now
1853 * in use by the dcache.
1856 void d_instantiate(struct dentry
*entry
, struct inode
* inode
)
1858 BUG_ON(!hlist_unhashed(&entry
->d_u
.d_alias
));
1860 security_d_instantiate(entry
, inode
);
1861 spin_lock(&inode
->i_lock
);
1862 __d_instantiate(entry
, inode
);
1863 spin_unlock(&inode
->i_lock
);
1866 EXPORT_SYMBOL(d_instantiate
);
1869 * This should be equivalent to d_instantiate() + unlock_new_inode(),
1870 * with lockdep-related part of unlock_new_inode() done before
1871 * anything else. Use that instead of open-coding d_instantiate()/
1872 * unlock_new_inode() combinations.
1874 void d_instantiate_new(struct dentry
*entry
, struct inode
*inode
)
1876 BUG_ON(!hlist_unhashed(&entry
->d_u
.d_alias
));
1878 lockdep_annotate_inode_mutex_key(inode
);
1879 security_d_instantiate(entry
, inode
);
1880 spin_lock(&inode
->i_lock
);
1881 __d_instantiate(entry
, inode
);
1882 WARN_ON(!(inode
->i_state
& I_NEW
));
1883 inode
->i_state
&= ~I_NEW
;
1885 wake_up_bit(&inode
->i_state
, __I_NEW
);
1886 spin_unlock(&inode
->i_lock
);
1888 EXPORT_SYMBOL(d_instantiate_new
);
1891 * d_instantiate_no_diralias - instantiate a non-aliased dentry
1892 * @entry: dentry to complete
1893 * @inode: inode to attach to this dentry
1895 * Fill in inode information in the entry. If a directory alias is found, then
1896 * return an error (and drop inode). Together with d_materialise_unique() this
1897 * guarantees that a directory inode may never have more than one alias.
1899 int d_instantiate_no_diralias(struct dentry
*entry
, struct inode
*inode
)
1901 BUG_ON(!hlist_unhashed(&entry
->d_u
.d_alias
));
1903 security_d_instantiate(entry
, inode
);
1904 spin_lock(&inode
->i_lock
);
1905 if (S_ISDIR(inode
->i_mode
) && !hlist_empty(&inode
->i_dentry
)) {
1906 spin_unlock(&inode
->i_lock
);
1910 __d_instantiate(entry
, inode
);
1911 spin_unlock(&inode
->i_lock
);
1915 EXPORT_SYMBOL(d_instantiate_no_diralias
);
1917 struct dentry
*d_make_root(struct inode
*root_inode
)
1919 struct dentry
*res
= NULL
;
1922 res
= __d_alloc(root_inode
->i_sb
, NULL
);
1924 res
->d_flags
|= DCACHE_RCUACCESS
;
1925 d_instantiate(res
, root_inode
);
1932 EXPORT_SYMBOL(d_make_root
);
1934 static struct dentry
* __d_find_any_alias(struct inode
*inode
)
1936 struct dentry
*alias
;
1938 if (hlist_empty(&inode
->i_dentry
))
1940 alias
= hlist_entry(inode
->i_dentry
.first
, struct dentry
, d_u
.d_alias
);
1946 * d_find_any_alias - find any alias for a given inode
1947 * @inode: inode to find an alias for
1949 * If any aliases exist for the given inode, take and return a
1950 * reference for one of them. If no aliases exist, return %NULL.
1952 struct dentry
*d_find_any_alias(struct inode
*inode
)
1956 spin_lock(&inode
->i_lock
);
1957 de
= __d_find_any_alias(inode
);
1958 spin_unlock(&inode
->i_lock
);
1961 EXPORT_SYMBOL(d_find_any_alias
);
1963 static struct dentry
*__d_obtain_alias(struct inode
*inode
, int disconnected
)
1970 return ERR_PTR(-ESTALE
);
1972 return ERR_CAST(inode
);
1974 res
= d_find_any_alias(inode
);
1978 tmp
= __d_alloc(inode
->i_sb
, NULL
);
1980 res
= ERR_PTR(-ENOMEM
);
1984 security_d_instantiate(tmp
, inode
);
1985 spin_lock(&inode
->i_lock
);
1986 res
= __d_find_any_alias(inode
);
1988 spin_unlock(&inode
->i_lock
);
1993 /* attach a disconnected dentry */
1994 add_flags
= d_flags_for_inode(inode
);
1997 add_flags
|= DCACHE_DISCONNECTED
;
1999 spin_lock(&tmp
->d_lock
);
2000 __d_set_inode_and_type(tmp
, inode
, add_flags
);
2001 hlist_add_head(&tmp
->d_u
.d_alias
, &inode
->i_dentry
);
2002 hlist_bl_lock(&tmp
->d_sb
->s_anon
);
2003 hlist_bl_add_head(&tmp
->d_hash
, &tmp
->d_sb
->s_anon
);
2004 hlist_bl_unlock(&tmp
->d_sb
->s_anon
);
2005 spin_unlock(&tmp
->d_lock
);
2006 spin_unlock(&inode
->i_lock
);
2016 * d_obtain_alias - find or allocate a DISCONNECTED dentry for a given inode
2017 * @inode: inode to allocate the dentry for
2019 * Obtain a dentry for an inode resulting from NFS filehandle conversion or
2020 * similar open by handle operations. The returned dentry may be anonymous,
2021 * or may have a full name (if the inode was already in the cache).
2023 * When called on a directory inode, we must ensure that the inode only ever
2024 * has one dentry. If a dentry is found, that is returned instead of
2025 * allocating a new one.
2027 * On successful return, the reference to the inode has been transferred
2028 * to the dentry. In case of an error the reference on the inode is released.
2029 * To make it easier to use in export operations a %NULL or IS_ERR inode may
2030 * be passed in and the error will be propagated to the return value,
2031 * with a %NULL @inode replaced by ERR_PTR(-ESTALE).
2033 struct dentry
*d_obtain_alias(struct inode
*inode
)
2035 return __d_obtain_alias(inode
, 1);
2037 EXPORT_SYMBOL(d_obtain_alias
);
2040 * d_obtain_root - find or allocate a dentry for a given inode
2041 * @inode: inode to allocate the dentry for
2043 * Obtain an IS_ROOT dentry for the root of a filesystem.
2045 * We must ensure that directory inodes only ever have one dentry. If a
2046 * dentry is found, that is returned instead of allocating a new one.
2048 * On successful return, the reference to the inode has been transferred
2049 * to the dentry. In case of an error the reference on the inode is
2050 * released. A %NULL or IS_ERR inode may be passed in and will be the
2051 * error will be propagate to the return value, with a %NULL @inode
2052 * replaced by ERR_PTR(-ESTALE).
2054 struct dentry
*d_obtain_root(struct inode
*inode
)
2056 return __d_obtain_alias(inode
, 0);
2058 EXPORT_SYMBOL(d_obtain_root
);
2061 * d_add_ci - lookup or allocate new dentry with case-exact name
2062 * @inode: the inode case-insensitive lookup has found
2063 * @dentry: the negative dentry that was passed to the parent's lookup func
2064 * @name: the case-exact name to be associated with the returned dentry
2066 * This is to avoid filling the dcache with case-insensitive names to the
2067 * same inode, only the actual correct case is stored in the dcache for
2068 * case-insensitive filesystems.
2070 * For a case-insensitive lookup match and if the the case-exact dentry
2071 * already exists in in the dcache, use it and return it.
2073 * If no entry exists with the exact case name, allocate new dentry with
2074 * the exact case, and return the spliced entry.
2076 struct dentry
*d_add_ci(struct dentry
*dentry
, struct inode
*inode
,
2079 struct dentry
*found
, *res
;
2082 * First check if a dentry matching the name already exists,
2083 * if not go ahead and create it now.
2085 found
= d_hash_and_lookup(dentry
->d_parent
, name
);
2090 if (d_in_lookup(dentry
)) {
2091 found
= d_alloc_parallel(dentry
->d_parent
, name
,
2093 if (IS_ERR(found
) || !d_in_lookup(found
)) {
2098 found
= d_alloc(dentry
->d_parent
, name
);
2101 return ERR_PTR(-ENOMEM
);
2104 res
= d_splice_alias(inode
, found
);
2111 EXPORT_SYMBOL(d_add_ci
);
2114 static inline bool d_same_name(const struct dentry
*dentry
,
2115 const struct dentry
*parent
,
2116 const struct qstr
*name
)
2118 if (likely(!(parent
->d_flags
& DCACHE_OP_COMPARE
))) {
2119 if (dentry
->d_name
.len
!= name
->len
)
2121 return dentry_cmp(dentry
, name
->name
, name
->len
) == 0;
2123 return parent
->d_op
->d_compare(dentry
,
2124 dentry
->d_name
.len
, dentry
->d_name
.name
,
2129 * __d_lookup_rcu - search for a dentry (racy, store-free)
2130 * @parent: parent dentry
2131 * @name: qstr of name we wish to find
2132 * @seqp: returns d_seq value at the point where the dentry was found
2133 * Returns: dentry, or NULL
2135 * __d_lookup_rcu is the dcache lookup function for rcu-walk name
2136 * resolution (store-free path walking) design described in
2137 * Documentation/filesystems/path-lookup.txt.
2139 * This is not to be used outside core vfs.
2141 * __d_lookup_rcu must only be used in rcu-walk mode, ie. with vfsmount lock
2142 * held, and rcu_read_lock held. The returned dentry must not be stored into
2143 * without taking d_lock and checking d_seq sequence count against @seq
2146 * A refcount may be taken on the found dentry with the d_rcu_to_refcount
2149 * Alternatively, __d_lookup_rcu may be called again to look up the child of
2150 * the returned dentry, so long as its parent's seqlock is checked after the
2151 * child is looked up. Thus, an interlocking stepping of sequence lock checks
2152 * is formed, giving integrity down the path walk.
2154 * NOTE! The caller *has* to check the resulting dentry against the sequence
2155 * number we've returned before using any of the resulting dentry state!
2157 struct dentry
*__d_lookup_rcu(const struct dentry
*parent
,
2158 const struct qstr
*name
,
2161 u64 hashlen
= name
->hash_len
;
2162 const unsigned char *str
= name
->name
;
2163 struct hlist_bl_head
*b
= d_hash(hashlen_hash(hashlen
));
2164 struct hlist_bl_node
*node
;
2165 struct dentry
*dentry
;
2168 * Note: There is significant duplication with __d_lookup_rcu which is
2169 * required to prevent single threaded performance regressions
2170 * especially on architectures where smp_rmb (in seqcounts) are costly.
2171 * Keep the two functions in sync.
2175 * The hash list is protected using RCU.
2177 * Carefully use d_seq when comparing a candidate dentry, to avoid
2178 * races with d_move().
2180 * It is possible that concurrent renames can mess up our list
2181 * walk here and result in missing our dentry, resulting in the
2182 * false-negative result. d_lookup() protects against concurrent
2183 * renames using rename_lock seqlock.
2185 * See Documentation/filesystems/path-lookup.txt for more details.
2187 hlist_bl_for_each_entry_rcu(dentry
, node
, b
, d_hash
) {
2192 * The dentry sequence count protects us from concurrent
2193 * renames, and thus protects parent and name fields.
2195 * The caller must perform a seqcount check in order
2196 * to do anything useful with the returned dentry.
2198 * NOTE! We do a "raw" seqcount_begin here. That means that
2199 * we don't wait for the sequence count to stabilize if it
2200 * is in the middle of a sequence change. If we do the slow
2201 * dentry compare, we will do seqretries until it is stable,
2202 * and if we end up with a successful lookup, we actually
2203 * want to exit RCU lookup anyway.
2205 * Note that raw_seqcount_begin still *does* smp_rmb(), so
2206 * we are still guaranteed NUL-termination of ->d_name.name.
2208 seq
= raw_seqcount_begin(&dentry
->d_seq
);
2209 if (dentry
->d_parent
!= parent
)
2211 if (d_unhashed(dentry
))
2214 if (unlikely(parent
->d_flags
& DCACHE_OP_COMPARE
)) {
2217 if (dentry
->d_name
.hash
!= hashlen_hash(hashlen
))
2219 tlen
= dentry
->d_name
.len
;
2220 tname
= dentry
->d_name
.name
;
2221 /* we want a consistent (name,len) pair */
2222 if (read_seqcount_retry(&dentry
->d_seq
, seq
)) {
2226 if (parent
->d_op
->d_compare(dentry
,
2227 tlen
, tname
, name
) != 0)
2230 if (dentry
->d_name
.hash_len
!= hashlen
)
2232 if (dentry_cmp(dentry
, str
, hashlen_len(hashlen
)) != 0)
2242 * d_lookup - search for a dentry
2243 * @parent: parent dentry
2244 * @name: qstr of name we wish to find
2245 * Returns: dentry, or NULL
2247 * d_lookup searches the children of the parent dentry for the name in
2248 * question. If the dentry is found its reference count is incremented and the
2249 * dentry is returned. The caller must use dput to free the entry when it has
2250 * finished using it. %NULL is returned if the dentry does not exist.
2252 struct dentry
*d_lookup(const struct dentry
*parent
, const struct qstr
*name
)
2254 struct dentry
*dentry
;
2258 seq
= read_seqbegin(&rename_lock
);
2259 dentry
= __d_lookup(parent
, name
);
2262 } while (read_seqretry(&rename_lock
, seq
));
2265 EXPORT_SYMBOL(d_lookup
);
2268 * __d_lookup - search for a dentry (racy)
2269 * @parent: parent dentry
2270 * @name: qstr of name we wish to find
2271 * Returns: dentry, or NULL
2273 * __d_lookup is like d_lookup, however it may (rarely) return a
2274 * false-negative result due to unrelated rename activity.
2276 * __d_lookup is slightly faster by avoiding rename_lock read seqlock,
2277 * however it must be used carefully, eg. with a following d_lookup in
2278 * the case of failure.
2280 * __d_lookup callers must be commented.
2282 struct dentry
*__d_lookup(const struct dentry
*parent
, const struct qstr
*name
)
2284 unsigned int hash
= name
->hash
;
2285 struct hlist_bl_head
*b
= d_hash(hash
);
2286 struct hlist_bl_node
*node
;
2287 struct dentry
*found
= NULL
;
2288 struct dentry
*dentry
;
2291 * Note: There is significant duplication with __d_lookup_rcu which is
2292 * required to prevent single threaded performance regressions
2293 * especially on architectures where smp_rmb (in seqcounts) are costly.
2294 * Keep the two functions in sync.
2298 * The hash list is protected using RCU.
2300 * Take d_lock when comparing a candidate dentry, to avoid races
2303 * It is possible that concurrent renames can mess up our list
2304 * walk here and result in missing our dentry, resulting in the
2305 * false-negative result. d_lookup() protects against concurrent
2306 * renames using rename_lock seqlock.
2308 * See Documentation/filesystems/path-lookup.txt for more details.
2312 hlist_bl_for_each_entry_rcu(dentry
, node
, b
, d_hash
) {
2314 if (dentry
->d_name
.hash
!= hash
)
2317 spin_lock(&dentry
->d_lock
);
2318 if (dentry
->d_parent
!= parent
)
2320 if (d_unhashed(dentry
))
2323 if (!d_same_name(dentry
, parent
, name
))
2326 dentry
->d_lockref
.count
++;
2328 spin_unlock(&dentry
->d_lock
);
2331 spin_unlock(&dentry
->d_lock
);
2339 * d_hash_and_lookup - hash the qstr then search for a dentry
2340 * @dir: Directory to search in
2341 * @name: qstr of name we wish to find
2343 * On lookup failure NULL is returned; on bad name - ERR_PTR(-error)
2345 struct dentry
*d_hash_and_lookup(struct dentry
*dir
, struct qstr
*name
)
2348 * Check for a fs-specific hash function. Note that we must
2349 * calculate the standard hash first, as the d_op->d_hash()
2350 * routine may choose to leave the hash value unchanged.
2352 name
->hash
= full_name_hash(dir
, name
->name
, name
->len
);
2353 if (dir
->d_flags
& DCACHE_OP_HASH
) {
2354 int err
= dir
->d_op
->d_hash(dir
, name
);
2355 if (unlikely(err
< 0))
2356 return ERR_PTR(err
);
2358 return d_lookup(dir
, name
);
2360 EXPORT_SYMBOL(d_hash_and_lookup
);
2363 * When a file is deleted, we have two options:
2364 * - turn this dentry into a negative dentry
2365 * - unhash this dentry and free it.
2367 * Usually, we want to just turn this into
2368 * a negative dentry, but if anybody else is
2369 * currently using the dentry or the inode
2370 * we can't do that and we fall back on removing
2371 * it from the hash queues and waiting for
2372 * it to be deleted later when it has no users
2376 * d_delete - delete a dentry
2377 * @dentry: The dentry to delete
2379 * Turn the dentry into a negative dentry if possible, otherwise
2380 * remove it from the hash queues so it can be deleted later
2383 void d_delete(struct dentry
* dentry
)
2385 struct inode
*inode
;
2388 * Are we the only user?
2391 spin_lock(&dentry
->d_lock
);
2392 inode
= dentry
->d_inode
;
2393 isdir
= S_ISDIR(inode
->i_mode
);
2394 if (dentry
->d_lockref
.count
== 1) {
2395 if (!spin_trylock(&inode
->i_lock
)) {
2396 spin_unlock(&dentry
->d_lock
);
2400 dentry
->d_flags
&= ~DCACHE_CANT_MOUNT
;
2401 dentry_unlink_inode(dentry
);
2402 fsnotify_nameremove(dentry
, isdir
);
2406 if (!d_unhashed(dentry
))
2409 spin_unlock(&dentry
->d_lock
);
2411 fsnotify_nameremove(dentry
, isdir
);
2413 EXPORT_SYMBOL(d_delete
);
2415 static void __d_rehash(struct dentry
*entry
)
2417 struct hlist_bl_head
*b
= d_hash(entry
->d_name
.hash
);
2420 hlist_bl_add_head_rcu(&entry
->d_hash
, b
);
2425 * d_rehash - add an entry back to the hash
2426 * @entry: dentry to add to the hash
2428 * Adds a dentry to the hash according to its name.
2431 void d_rehash(struct dentry
* entry
)
2433 spin_lock(&entry
->d_lock
);
2435 spin_unlock(&entry
->d_lock
);
2437 EXPORT_SYMBOL(d_rehash
);
2439 static inline unsigned start_dir_add(struct inode
*dir
)
2443 unsigned n
= dir
->i_dir_seq
;
2444 if (!(n
& 1) && cmpxchg(&dir
->i_dir_seq
, n
, n
+ 1) == n
)
2450 static inline void end_dir_add(struct inode
*dir
, unsigned n
)
2452 smp_store_release(&dir
->i_dir_seq
, n
+ 2);
2455 static void d_wait_lookup(struct dentry
*dentry
)
2457 if (d_in_lookup(dentry
)) {
2458 DECLARE_WAITQUEUE(wait
, current
);
2459 add_wait_queue(dentry
->d_wait
, &wait
);
2461 set_current_state(TASK_UNINTERRUPTIBLE
);
2462 spin_unlock(&dentry
->d_lock
);
2464 spin_lock(&dentry
->d_lock
);
2465 } while (d_in_lookup(dentry
));
2469 struct dentry
*d_alloc_parallel(struct dentry
*parent
,
2470 const struct qstr
*name
,
2471 wait_queue_head_t
*wq
)
2473 unsigned int hash
= name
->hash
;
2474 struct hlist_bl_head
*b
= in_lookup_hash(parent
, hash
);
2475 struct hlist_bl_node
*node
;
2476 struct dentry
*new = d_alloc(parent
, name
);
2477 struct dentry
*dentry
;
2478 unsigned seq
, r_seq
, d_seq
;
2481 return ERR_PTR(-ENOMEM
);
2485 seq
= smp_load_acquire(&parent
->d_inode
->i_dir_seq
);
2486 r_seq
= read_seqbegin(&rename_lock
);
2487 dentry
= __d_lookup_rcu(parent
, name
, &d_seq
);
2488 if (unlikely(dentry
)) {
2489 if (!lockref_get_not_dead(&dentry
->d_lockref
)) {
2493 if (read_seqcount_retry(&dentry
->d_seq
, d_seq
)) {
2502 if (unlikely(read_seqretry(&rename_lock
, r_seq
))) {
2507 if (unlikely(seq
& 1)) {
2513 if (unlikely(READ_ONCE(parent
->d_inode
->i_dir_seq
) != seq
)) {
2519 * No changes for the parent since the beginning of d_lookup().
2520 * Since all removals from the chain happen with hlist_bl_lock(),
2521 * any potential in-lookup matches are going to stay here until
2522 * we unlock the chain. All fields are stable in everything
2525 hlist_bl_for_each_entry(dentry
, node
, b
, d_u
.d_in_lookup_hash
) {
2526 if (dentry
->d_name
.hash
!= hash
)
2528 if (dentry
->d_parent
!= parent
)
2530 if (!d_same_name(dentry
, parent
, name
))
2533 /* now we can try to grab a reference */
2534 if (!lockref_get_not_dead(&dentry
->d_lockref
)) {
2541 * somebody is likely to be still doing lookup for it;
2542 * wait for them to finish
2544 spin_lock(&dentry
->d_lock
);
2545 d_wait_lookup(dentry
);
2547 * it's not in-lookup anymore; in principle we should repeat
2548 * everything from dcache lookup, but it's likely to be what
2549 * d_lookup() would've found anyway. If it is, just return it;
2550 * otherwise we really have to repeat the whole thing.
2552 if (unlikely(dentry
->d_name
.hash
!= hash
))
2554 if (unlikely(dentry
->d_parent
!= parent
))
2556 if (unlikely(d_unhashed(dentry
)))
2558 if (unlikely(!d_same_name(dentry
, parent
, name
)))
2560 /* OK, it *is* a hashed match; return it */
2561 spin_unlock(&dentry
->d_lock
);
2566 /* we can't take ->d_lock here; it's OK, though. */
2567 new->d_flags
|= DCACHE_PAR_LOOKUP
;
2569 hlist_bl_add_head_rcu(&new->d_u
.d_in_lookup_hash
, b
);
2573 spin_unlock(&dentry
->d_lock
);
2577 EXPORT_SYMBOL(d_alloc_parallel
);
2579 void __d_lookup_done(struct dentry
*dentry
)
2581 struct hlist_bl_head
*b
= in_lookup_hash(dentry
->d_parent
,
2582 dentry
->d_name
.hash
);
2584 dentry
->d_flags
&= ~DCACHE_PAR_LOOKUP
;
2585 __hlist_bl_del(&dentry
->d_u
.d_in_lookup_hash
);
2586 wake_up_all(dentry
->d_wait
);
2587 dentry
->d_wait
= NULL
;
2589 INIT_HLIST_NODE(&dentry
->d_u
.d_alias
);
2590 INIT_LIST_HEAD(&dentry
->d_lru
);
2592 EXPORT_SYMBOL(__d_lookup_done
);
2594 /* inode->i_lock held if inode is non-NULL */
2596 static inline void __d_add(struct dentry
*dentry
, struct inode
*inode
)
2598 struct inode
*dir
= NULL
;
2600 spin_lock(&dentry
->d_lock
);
2601 if (unlikely(d_in_lookup(dentry
))) {
2602 dir
= dentry
->d_parent
->d_inode
;
2603 n
= start_dir_add(dir
);
2604 __d_lookup_done(dentry
);
2607 unsigned add_flags
= d_flags_for_inode(inode
);
2608 hlist_add_head(&dentry
->d_u
.d_alias
, &inode
->i_dentry
);
2609 raw_write_seqcount_begin(&dentry
->d_seq
);
2610 __d_set_inode_and_type(dentry
, inode
, add_flags
);
2611 raw_write_seqcount_end(&dentry
->d_seq
);
2612 fsnotify_update_flags(dentry
);
2616 end_dir_add(dir
, n
);
2617 spin_unlock(&dentry
->d_lock
);
2619 spin_unlock(&inode
->i_lock
);
2623 * d_add - add dentry to hash queues
2624 * @entry: dentry to add
2625 * @inode: The inode to attach to this dentry
2627 * This adds the entry to the hash queues and initializes @inode.
2628 * The entry was actually filled in earlier during d_alloc().
2631 void d_add(struct dentry
*entry
, struct inode
*inode
)
2634 security_d_instantiate(entry
, inode
);
2635 spin_lock(&inode
->i_lock
);
2637 __d_add(entry
, inode
);
2639 EXPORT_SYMBOL(d_add
);
2642 * d_exact_alias - find and hash an exact unhashed alias
2643 * @entry: dentry to add
2644 * @inode: The inode to go with this dentry
2646 * If an unhashed dentry with the same name/parent and desired
2647 * inode already exists, hash and return it. Otherwise, return
2650 * Parent directory should be locked.
2652 struct dentry
*d_exact_alias(struct dentry
*entry
, struct inode
*inode
)
2654 struct dentry
*alias
;
2655 unsigned int hash
= entry
->d_name
.hash
;
2657 spin_lock(&inode
->i_lock
);
2658 hlist_for_each_entry(alias
, &inode
->i_dentry
, d_u
.d_alias
) {
2660 * Don't need alias->d_lock here, because aliases with
2661 * d_parent == entry->d_parent are not subject to name or
2662 * parent changes, because the parent inode i_mutex is held.
2664 if (alias
->d_name
.hash
!= hash
)
2666 if (alias
->d_parent
!= entry
->d_parent
)
2668 if (!d_same_name(alias
, entry
->d_parent
, &entry
->d_name
))
2670 spin_lock(&alias
->d_lock
);
2671 if (!d_unhashed(alias
)) {
2672 spin_unlock(&alias
->d_lock
);
2675 __dget_dlock(alias
);
2677 spin_unlock(&alias
->d_lock
);
2679 spin_unlock(&inode
->i_lock
);
2682 spin_unlock(&inode
->i_lock
);
2685 EXPORT_SYMBOL(d_exact_alias
);
2688 * dentry_update_name_case - update case insensitive dentry with a new name
2689 * @dentry: dentry to be updated
2692 * Update a case insensitive dentry with new case of name.
2694 * dentry must have been returned by d_lookup with name @name. Old and new
2695 * name lengths must match (ie. no d_compare which allows mismatched name
2698 * Parent inode i_mutex must be held over d_lookup and into this call (to
2699 * keep renames and concurrent inserts, and readdir(2) away).
2701 void dentry_update_name_case(struct dentry
*dentry
, const struct qstr
*name
)
2703 BUG_ON(!inode_is_locked(dentry
->d_parent
->d_inode
));
2704 BUG_ON(dentry
->d_name
.len
!= name
->len
); /* d_lookup gives this */
2706 spin_lock(&dentry
->d_lock
);
2707 write_seqcount_begin(&dentry
->d_seq
);
2708 memcpy((unsigned char *)dentry
->d_name
.name
, name
->name
, name
->len
);
2709 write_seqcount_end(&dentry
->d_seq
);
2710 spin_unlock(&dentry
->d_lock
);
2712 EXPORT_SYMBOL(dentry_update_name_case
);
2714 static void swap_names(struct dentry
*dentry
, struct dentry
*target
)
2716 if (unlikely(dname_external(target
))) {
2717 if (unlikely(dname_external(dentry
))) {
2719 * Both external: swap the pointers
2721 swap(target
->d_name
.name
, dentry
->d_name
.name
);
2724 * dentry:internal, target:external. Steal target's
2725 * storage and make target internal.
2727 memcpy(target
->d_iname
, dentry
->d_name
.name
,
2728 dentry
->d_name
.len
+ 1);
2729 dentry
->d_name
.name
= target
->d_name
.name
;
2730 target
->d_name
.name
= target
->d_iname
;
2733 if (unlikely(dname_external(dentry
))) {
2735 * dentry:external, target:internal. Give dentry's
2736 * storage to target and make dentry internal
2738 memcpy(dentry
->d_iname
, target
->d_name
.name
,
2739 target
->d_name
.len
+ 1);
2740 target
->d_name
.name
= dentry
->d_name
.name
;
2741 dentry
->d_name
.name
= dentry
->d_iname
;
2744 * Both are internal.
2747 BUILD_BUG_ON(!IS_ALIGNED(DNAME_INLINE_LEN
, sizeof(long)));
2748 for (i
= 0; i
< DNAME_INLINE_LEN
/ sizeof(long); i
++) {
2749 swap(((long *) &dentry
->d_iname
)[i
],
2750 ((long *) &target
->d_iname
)[i
]);
2754 swap(dentry
->d_name
.hash_len
, target
->d_name
.hash_len
);
2757 static void copy_name(struct dentry
*dentry
, struct dentry
*target
)
2759 struct external_name
*old_name
= NULL
;
2760 if (unlikely(dname_external(dentry
)))
2761 old_name
= external_name(dentry
);
2762 if (unlikely(dname_external(target
))) {
2763 atomic_inc(&external_name(target
)->u
.count
);
2764 dentry
->d_name
= target
->d_name
;
2766 memcpy(dentry
->d_iname
, target
->d_name
.name
,
2767 target
->d_name
.len
+ 1);
2768 dentry
->d_name
.name
= dentry
->d_iname
;
2769 dentry
->d_name
.hash_len
= target
->d_name
.hash_len
;
2771 if (old_name
&& likely(atomic_dec_and_test(&old_name
->u
.count
)))
2772 kfree_rcu(old_name
, u
.head
);
2775 static void dentry_lock_for_move(struct dentry
*dentry
, struct dentry
*target
)
2778 * XXXX: do we really need to take target->d_lock?
2780 if (IS_ROOT(dentry
) || dentry
->d_parent
== target
->d_parent
)
2781 spin_lock(&target
->d_parent
->d_lock
);
2783 if (d_ancestor(dentry
->d_parent
, target
->d_parent
)) {
2784 spin_lock(&dentry
->d_parent
->d_lock
);
2785 spin_lock_nested(&target
->d_parent
->d_lock
,
2786 DENTRY_D_LOCK_NESTED
);
2788 spin_lock(&target
->d_parent
->d_lock
);
2789 spin_lock_nested(&dentry
->d_parent
->d_lock
,
2790 DENTRY_D_LOCK_NESTED
);
2793 if (target
< dentry
) {
2794 spin_lock_nested(&target
->d_lock
, 2);
2795 spin_lock_nested(&dentry
->d_lock
, 3);
2797 spin_lock_nested(&dentry
->d_lock
, 2);
2798 spin_lock_nested(&target
->d_lock
, 3);
2802 static void dentry_unlock_for_move(struct dentry
*dentry
, struct dentry
*target
)
2804 if (target
->d_parent
!= dentry
->d_parent
)
2805 spin_unlock(&dentry
->d_parent
->d_lock
);
2806 if (target
->d_parent
!= target
)
2807 spin_unlock(&target
->d_parent
->d_lock
);
2808 spin_unlock(&target
->d_lock
);
2809 spin_unlock(&dentry
->d_lock
);
2813 * When switching names, the actual string doesn't strictly have to
2814 * be preserved in the target - because we're dropping the target
2815 * anyway. As such, we can just do a simple memcpy() to copy over
2816 * the new name before we switch, unless we are going to rehash
2817 * it. Note that if we *do* unhash the target, we are not allowed
2818 * to rehash it without giving it a new name/hash key - whether
2819 * we swap or overwrite the names here, resulting name won't match
2820 * the reality in filesystem; it's only there for d_path() purposes.
2821 * Note that all of this is happening under rename_lock, so the
2822 * any hash lookup seeing it in the middle of manipulations will
2823 * be discarded anyway. So we do not care what happens to the hash
2827 * __d_move - move a dentry
2828 * @dentry: entry to move
2829 * @target: new dentry
2830 * @exchange: exchange the two dentries
2832 * Update the dcache to reflect the move of a file name. Negative
2833 * dcache entries should not be moved in this way. Caller must hold
2834 * rename_lock, the i_mutex of the source and target directories,
2835 * and the sb->s_vfs_rename_mutex if they differ. See lock_rename().
2837 static void __d_move(struct dentry
*dentry
, struct dentry
*target
,
2840 struct inode
*dir
= NULL
;
2842 if (!dentry
->d_inode
)
2843 printk(KERN_WARNING
"VFS: moving negative dcache entry\n");
2845 BUG_ON(d_ancestor(dentry
, target
));
2846 BUG_ON(d_ancestor(target
, dentry
));
2848 dentry_lock_for_move(dentry
, target
);
2849 if (unlikely(d_in_lookup(target
))) {
2850 dir
= target
->d_parent
->d_inode
;
2851 n
= start_dir_add(dir
);
2852 __d_lookup_done(target
);
2855 write_seqcount_begin(&dentry
->d_seq
);
2856 write_seqcount_begin_nested(&target
->d_seq
, DENTRY_D_LOCK_NESTED
);
2859 /* ___d_drop does write_seqcount_barrier, but they're OK to nest. */
2863 /* Switch the names.. */
2865 swap_names(dentry
, target
);
2867 copy_name(dentry
, target
);
2869 /* rehash in new place(s) */
2874 target
->d_hash
.pprev
= NULL
;
2876 /* ... and switch them in the tree */
2877 if (IS_ROOT(dentry
)) {
2878 /* splicing a tree */
2879 dentry
->d_flags
|= DCACHE_RCUACCESS
;
2880 dentry
->d_parent
= target
->d_parent
;
2881 target
->d_parent
= target
;
2882 list_del_init(&target
->d_child
);
2883 list_move(&dentry
->d_child
, &dentry
->d_parent
->d_subdirs
);
2885 /* swapping two dentries */
2886 swap(dentry
->d_parent
, target
->d_parent
);
2887 list_move(&target
->d_child
, &target
->d_parent
->d_subdirs
);
2888 list_move(&dentry
->d_child
, &dentry
->d_parent
->d_subdirs
);
2890 fsnotify_update_flags(target
);
2891 fsnotify_update_flags(dentry
);
2894 write_seqcount_end(&target
->d_seq
);
2895 write_seqcount_end(&dentry
->d_seq
);
2898 end_dir_add(dir
, n
);
2899 dentry_unlock_for_move(dentry
, target
);
2903 * d_move - move a dentry
2904 * @dentry: entry to move
2905 * @target: new dentry
2907 * Update the dcache to reflect the move of a file name. Negative
2908 * dcache entries should not be moved in this way. See the locking
2909 * requirements for __d_move.
2911 void d_move(struct dentry
*dentry
, struct dentry
*target
)
2913 write_seqlock(&rename_lock
);
2914 __d_move(dentry
, target
, false);
2915 write_sequnlock(&rename_lock
);
2917 EXPORT_SYMBOL(d_move
);
2920 * d_exchange - exchange two dentries
2921 * @dentry1: first dentry
2922 * @dentry2: second dentry
2924 void d_exchange(struct dentry
*dentry1
, struct dentry
*dentry2
)
2926 write_seqlock(&rename_lock
);
2928 WARN_ON(!dentry1
->d_inode
);
2929 WARN_ON(!dentry2
->d_inode
);
2930 WARN_ON(IS_ROOT(dentry1
));
2931 WARN_ON(IS_ROOT(dentry2
));
2933 __d_move(dentry1
, dentry2
, true);
2935 write_sequnlock(&rename_lock
);
2937 EXPORT_SYMBOL_GPL(d_exchange
);
2940 * d_ancestor - search for an ancestor
2941 * @p1: ancestor dentry
2944 * Returns the ancestor dentry of p2 which is a child of p1, if p1 is
2945 * an ancestor of p2, else NULL.
2947 struct dentry
*d_ancestor(struct dentry
*p1
, struct dentry
*p2
)
2951 for (p
= p2
; !IS_ROOT(p
); p
= p
->d_parent
) {
2952 if (p
->d_parent
== p1
)
2959 * This helper attempts to cope with remotely renamed directories
2961 * It assumes that the caller is already holding
2962 * dentry->d_parent->d_inode->i_mutex, and rename_lock
2964 * Note: If ever the locking in lock_rename() changes, then please
2965 * remember to update this too...
2967 static int __d_unalias(struct inode
*inode
,
2968 struct dentry
*dentry
, struct dentry
*alias
)
2970 struct mutex
*m1
= NULL
;
2971 struct rw_semaphore
*m2
= NULL
;
2974 /* If alias and dentry share a parent, then no extra locks required */
2975 if (alias
->d_parent
== dentry
->d_parent
)
2978 /* See lock_rename() */
2979 if (!mutex_trylock(&dentry
->d_sb
->s_vfs_rename_mutex
))
2981 m1
= &dentry
->d_sb
->s_vfs_rename_mutex
;
2982 if (!inode_trylock_shared(alias
->d_parent
->d_inode
))
2984 m2
= &alias
->d_parent
->d_inode
->i_rwsem
;
2986 __d_move(alias
, dentry
, false);
2997 * d_splice_alias - splice a disconnected dentry into the tree if one exists
2998 * @inode: the inode which may have a disconnected dentry
2999 * @dentry: a negative dentry which we want to point to the inode.
3001 * If inode is a directory and has an IS_ROOT alias, then d_move that in
3002 * place of the given dentry and return it, else simply d_add the inode
3003 * to the dentry and return NULL.
3005 * If a non-IS_ROOT directory is found, the filesystem is corrupt, and
3006 * we should error out: directories can't have multiple aliases.
3008 * This is needed in the lookup routine of any filesystem that is exportable
3009 * (via knfsd) so that we can build dcache paths to directories effectively.
3011 * If a dentry was found and moved, then it is returned. Otherwise NULL
3012 * is returned. This matches the expected return value of ->lookup.
3014 * Cluster filesystems may call this function with a negative, hashed dentry.
3015 * In that case, we know that the inode will be a regular file, and also this
3016 * will only occur during atomic_open. So we need to check for the dentry
3017 * being already hashed only in the final case.
3019 struct dentry
*d_splice_alias(struct inode
*inode
, struct dentry
*dentry
)
3022 return ERR_CAST(inode
);
3024 BUG_ON(!d_unhashed(dentry
));
3029 security_d_instantiate(dentry
, inode
);
3030 spin_lock(&inode
->i_lock
);
3031 if (S_ISDIR(inode
->i_mode
)) {
3032 struct dentry
*new = __d_find_any_alias(inode
);
3033 if (unlikely(new)) {
3034 /* The reference to new ensures it remains an alias */
3035 spin_unlock(&inode
->i_lock
);
3036 write_seqlock(&rename_lock
);
3037 if (unlikely(d_ancestor(new, dentry
))) {
3038 write_sequnlock(&rename_lock
);
3040 new = ERR_PTR(-ELOOP
);
3041 pr_warn_ratelimited(
3042 "VFS: Lookup of '%s' in %s %s"
3043 " would have caused loop\n",
3044 dentry
->d_name
.name
,
3045 inode
->i_sb
->s_type
->name
,
3047 } else if (!IS_ROOT(new)) {
3048 int err
= __d_unalias(inode
, dentry
, new);
3049 write_sequnlock(&rename_lock
);
3055 __d_move(new, dentry
, false);
3056 write_sequnlock(&rename_lock
);
3063 __d_add(dentry
, inode
);
3066 EXPORT_SYMBOL(d_splice_alias
);
3068 static int prepend(char **buffer
, int *buflen
, const char *str
, int namelen
)
3072 return -ENAMETOOLONG
;
3074 memcpy(*buffer
, str
, namelen
);
3079 * prepend_name - prepend a pathname in front of current buffer pointer
3080 * @buffer: buffer pointer
3081 * @buflen: allocated length of the buffer
3082 * @name: name string and length qstr structure
3084 * With RCU path tracing, it may race with d_move(). Use READ_ONCE() to
3085 * make sure that either the old or the new name pointer and length are
3086 * fetched. However, there may be mismatch between length and pointer.
3087 * The length cannot be trusted, we need to copy it byte-by-byte until
3088 * the length is reached or a null byte is found. It also prepends "/" at
3089 * the beginning of the name. The sequence number check at the caller will
3090 * retry it again when a d_move() does happen. So any garbage in the buffer
3091 * due to mismatched pointer and length will be discarded.
3093 * Data dependency barrier is needed to make sure that we see that terminating
3094 * NUL. Alpha strikes again, film at 11...
3096 static int prepend_name(char **buffer
, int *buflen
, const struct qstr
*name
)
3098 const char *dname
= READ_ONCE(name
->name
);
3099 u32 dlen
= READ_ONCE(name
->len
);
3102 smp_read_barrier_depends();
3104 *buflen
-= dlen
+ 1;
3106 return -ENAMETOOLONG
;
3107 p
= *buffer
-= dlen
+ 1;
3119 * prepend_path - Prepend path string to a buffer
3120 * @path: the dentry/vfsmount to report
3121 * @root: root vfsmnt/dentry
3122 * @buffer: pointer to the end of the buffer
3123 * @buflen: pointer to buffer length
3125 * The function will first try to write out the pathname without taking any
3126 * lock other than the RCU read lock to make sure that dentries won't go away.
3127 * It only checks the sequence number of the global rename_lock as any change
3128 * in the dentry's d_seq will be preceded by changes in the rename_lock
3129 * sequence number. If the sequence number had been changed, it will restart
3130 * the whole pathname back-tracing sequence again by taking the rename_lock.
3131 * In this case, there is no need to take the RCU read lock as the recursive
3132 * parent pointer references will keep the dentry chain alive as long as no
3133 * rename operation is performed.
3135 static int prepend_path(const struct path
*path
,
3136 const struct path
*root
,
3137 char **buffer
, int *buflen
)
3139 struct dentry
*dentry
;
3140 struct vfsmount
*vfsmnt
;
3143 unsigned seq
, m_seq
= 0;
3149 read_seqbegin_or_lock(&mount_lock
, &m_seq
);
3156 dentry
= path
->dentry
;
3158 mnt
= real_mount(vfsmnt
);
3159 read_seqbegin_or_lock(&rename_lock
, &seq
);
3160 while (dentry
!= root
->dentry
|| vfsmnt
!= root
->mnt
) {
3161 struct dentry
* parent
;
3163 if (dentry
== vfsmnt
->mnt_root
|| IS_ROOT(dentry
)) {
3164 struct mount
*parent
= READ_ONCE(mnt
->mnt_parent
);
3166 if (dentry
!= vfsmnt
->mnt_root
) {
3173 if (mnt
!= parent
) {
3174 dentry
= READ_ONCE(mnt
->mnt_mountpoint
);
3180 error
= is_mounted(vfsmnt
) ? 1 : 2;
3183 parent
= dentry
->d_parent
;
3185 error
= prepend_name(&bptr
, &blen
, &dentry
->d_name
);
3193 if (need_seqretry(&rename_lock
, seq
)) {
3197 done_seqretry(&rename_lock
, seq
);
3201 if (need_seqretry(&mount_lock
, m_seq
)) {
3205 done_seqretry(&mount_lock
, m_seq
);
3207 if (error
>= 0 && bptr
== *buffer
) {
3209 error
= -ENAMETOOLONG
;
3219 * __d_path - return the path of a dentry
3220 * @path: the dentry/vfsmount to report
3221 * @root: root vfsmnt/dentry
3222 * @buf: buffer to return value in
3223 * @buflen: buffer length
3225 * Convert a dentry into an ASCII path name.
3227 * Returns a pointer into the buffer or an error code if the
3228 * path was too long.
3230 * "buflen" should be positive.
3232 * If the path is not reachable from the supplied root, return %NULL.
3234 char *__d_path(const struct path
*path
,
3235 const struct path
*root
,
3236 char *buf
, int buflen
)
3238 char *res
= buf
+ buflen
;
3241 prepend(&res
, &buflen
, "\0", 1);
3242 error
= prepend_path(path
, root
, &res
, &buflen
);
3245 return ERR_PTR(error
);
3251 char *d_absolute_path(const struct path
*path
,
3252 char *buf
, int buflen
)
3254 struct path root
= {};
3255 char *res
= buf
+ buflen
;
3258 prepend(&res
, &buflen
, "\0", 1);
3259 error
= prepend_path(path
, &root
, &res
, &buflen
);
3264 return ERR_PTR(error
);
3269 * same as __d_path but appends "(deleted)" for unlinked files.
3271 static int path_with_deleted(const struct path
*path
,
3272 const struct path
*root
,
3273 char **buf
, int *buflen
)
3275 prepend(buf
, buflen
, "\0", 1);
3276 if (d_unlinked(path
->dentry
)) {
3277 int error
= prepend(buf
, buflen
, " (deleted)", 10);
3282 return prepend_path(path
, root
, buf
, buflen
);
3285 static int prepend_unreachable(char **buffer
, int *buflen
)
3287 return prepend(buffer
, buflen
, "(unreachable)", 13);
3290 static void get_fs_root_rcu(struct fs_struct
*fs
, struct path
*root
)
3295 seq
= read_seqcount_begin(&fs
->seq
);
3297 } while (read_seqcount_retry(&fs
->seq
, seq
));
3301 * d_path - return the path of a dentry
3302 * @path: path to report
3303 * @buf: buffer to return value in
3304 * @buflen: buffer length
3306 * Convert a dentry into an ASCII path name. If the entry has been deleted
3307 * the string " (deleted)" is appended. Note that this is ambiguous.
3309 * Returns a pointer into the buffer or an error code if the path was
3310 * too long. Note: Callers should use the returned pointer, not the passed
3311 * in buffer, to use the name! The implementation often starts at an offset
3312 * into the buffer, and may leave 0 bytes at the start.
3314 * "buflen" should be positive.
3316 char *d_path(const struct path
*path
, char *buf
, int buflen
)
3318 char *res
= buf
+ buflen
;
3323 * We have various synthetic filesystems that never get mounted. On
3324 * these filesystems dentries are never used for lookup purposes, and
3325 * thus don't need to be hashed. They also don't need a name until a
3326 * user wants to identify the object in /proc/pid/fd/. The little hack
3327 * below allows us to generate a name for these objects on demand:
3329 * Some pseudo inodes are mountable. When they are mounted
3330 * path->dentry == path->mnt->mnt_root. In that case don't call d_dname
3331 * and instead have d_path return the mounted path.
3333 if (path
->dentry
->d_op
&& path
->dentry
->d_op
->d_dname
&&
3334 (!IS_ROOT(path
->dentry
) || path
->dentry
!= path
->mnt
->mnt_root
))
3335 return path
->dentry
->d_op
->d_dname(path
->dentry
, buf
, buflen
);
3338 get_fs_root_rcu(current
->fs
, &root
);
3339 error
= path_with_deleted(path
, &root
, &res
, &buflen
);
3343 res
= ERR_PTR(error
);
3346 EXPORT_SYMBOL(d_path
);
3349 * Helper function for dentry_operations.d_dname() members
3351 char *dynamic_dname(struct dentry
*dentry
, char *buffer
, int buflen
,
3352 const char *fmt
, ...)
3358 va_start(args
, fmt
);
3359 sz
= vsnprintf(temp
, sizeof(temp
), fmt
, args
) + 1;
3362 if (sz
> sizeof(temp
) || sz
> buflen
)
3363 return ERR_PTR(-ENAMETOOLONG
);
3365 buffer
+= buflen
- sz
;
3366 return memcpy(buffer
, temp
, sz
);
3369 char *simple_dname(struct dentry
*dentry
, char *buffer
, int buflen
)
3371 char *end
= buffer
+ buflen
;
3372 /* these dentries are never renamed, so d_lock is not needed */
3373 if (prepend(&end
, &buflen
, " (deleted)", 11) ||
3374 prepend(&end
, &buflen
, dentry
->d_name
.name
, dentry
->d_name
.len
) ||
3375 prepend(&end
, &buflen
, "/", 1))
3376 end
= ERR_PTR(-ENAMETOOLONG
);
3379 EXPORT_SYMBOL(simple_dname
);
3382 * Write full pathname from the root of the filesystem into the buffer.
3384 static char *__dentry_path(struct dentry
*d
, char *buf
, int buflen
)
3386 struct dentry
*dentry
;
3399 prepend(&end
, &len
, "\0", 1);
3403 read_seqbegin_or_lock(&rename_lock
, &seq
);
3404 while (!IS_ROOT(dentry
)) {
3405 struct dentry
*parent
= dentry
->d_parent
;
3408 error
= prepend_name(&end
, &len
, &dentry
->d_name
);
3417 if (need_seqretry(&rename_lock
, seq
)) {
3421 done_seqretry(&rename_lock
, seq
);
3426 return ERR_PTR(-ENAMETOOLONG
);
3429 char *dentry_path_raw(struct dentry
*dentry
, char *buf
, int buflen
)
3431 return __dentry_path(dentry
, buf
, buflen
);
3433 EXPORT_SYMBOL(dentry_path_raw
);
3435 char *dentry_path(struct dentry
*dentry
, char *buf
, int buflen
)
3440 if (d_unlinked(dentry
)) {
3442 if (prepend(&p
, &buflen
, "//deleted", 10) != 0)
3446 retval
= __dentry_path(dentry
, buf
, buflen
);
3447 if (!IS_ERR(retval
) && p
)
3448 *p
= '/'; /* restore '/' overriden with '\0' */
3451 return ERR_PTR(-ENAMETOOLONG
);
3454 static void get_fs_root_and_pwd_rcu(struct fs_struct
*fs
, struct path
*root
,
3460 seq
= read_seqcount_begin(&fs
->seq
);
3463 } while (read_seqcount_retry(&fs
->seq
, seq
));
3467 * NOTE! The user-level library version returns a
3468 * character pointer. The kernel system call just
3469 * returns the length of the buffer filled (which
3470 * includes the ending '\0' character), or a negative
3471 * error value. So libc would do something like
3473 * char *getcwd(char * buf, size_t size)
3477 * retval = sys_getcwd(buf, size);
3484 SYSCALL_DEFINE2(getcwd
, char __user
*, buf
, unsigned long, size
)
3487 struct path pwd
, root
;
3488 char *page
= __getname();
3494 get_fs_root_and_pwd_rcu(current
->fs
, &root
, &pwd
);
3497 if (!d_unlinked(pwd
.dentry
)) {
3499 char *cwd
= page
+ PATH_MAX
;
3500 int buflen
= PATH_MAX
;
3502 prepend(&cwd
, &buflen
, "\0", 1);
3503 error
= prepend_path(&pwd
, &root
, &cwd
, &buflen
);
3509 /* Unreachable from current root */
3511 error
= prepend_unreachable(&cwd
, &buflen
);
3517 len
= PATH_MAX
+ page
- cwd
;
3520 if (copy_to_user(buf
, cwd
, len
))
3533 * Test whether new_dentry is a subdirectory of old_dentry.
3535 * Trivially implemented using the dcache structure
3539 * is_subdir - is new dentry a subdirectory of old_dentry
3540 * @new_dentry: new dentry
3541 * @old_dentry: old dentry
3543 * Returns true if new_dentry is a subdirectory of the parent (at any depth).
3544 * Returns false otherwise.
3545 * Caller must ensure that "new_dentry" is pinned before calling is_subdir()
3548 bool is_subdir(struct dentry
*new_dentry
, struct dentry
*old_dentry
)
3553 if (new_dentry
== old_dentry
)
3557 /* for restarting inner loop in case of seq retry */
3558 seq
= read_seqbegin(&rename_lock
);
3560 * Need rcu_readlock to protect against the d_parent trashing
3564 if (d_ancestor(old_dentry
, new_dentry
))
3569 } while (read_seqretry(&rename_lock
, seq
));
3574 static enum d_walk_ret
d_genocide_kill(void *data
, struct dentry
*dentry
)
3576 struct dentry
*root
= data
;
3577 if (dentry
!= root
) {
3578 if (d_unhashed(dentry
) || !dentry
->d_inode
)
3581 if (!(dentry
->d_flags
& DCACHE_GENOCIDE
)) {
3582 dentry
->d_flags
|= DCACHE_GENOCIDE
;
3583 dentry
->d_lockref
.count
--;
3586 return D_WALK_CONTINUE
;
3589 void d_genocide(struct dentry
*parent
)
3591 d_walk(parent
, parent
, d_genocide_kill
, NULL
);
3594 void d_tmpfile(struct dentry
*dentry
, struct inode
*inode
)
3596 inode_dec_link_count(inode
);
3597 BUG_ON(dentry
->d_name
.name
!= dentry
->d_iname
||
3598 !hlist_unhashed(&dentry
->d_u
.d_alias
) ||
3599 !d_unlinked(dentry
));
3600 spin_lock(&dentry
->d_parent
->d_lock
);
3601 spin_lock_nested(&dentry
->d_lock
, DENTRY_D_LOCK_NESTED
);
3602 dentry
->d_name
.len
= sprintf(dentry
->d_iname
, "#%llu",
3603 (unsigned long long)inode
->i_ino
);
3604 spin_unlock(&dentry
->d_lock
);
3605 spin_unlock(&dentry
->d_parent
->d_lock
);
3606 d_instantiate(dentry
, inode
);
3608 EXPORT_SYMBOL(d_tmpfile
);
3610 static __initdata
unsigned long dhash_entries
;
3611 static int __init
set_dhash_entries(char *str
)
3615 dhash_entries
= simple_strtoul(str
, &str
, 0);
3618 __setup("dhash_entries=", set_dhash_entries
);
3620 static void __init
dcache_init_early(void)
3622 /* If hashes are distributed across NUMA nodes, defer
3623 * hash allocation until vmalloc space is available.
3629 alloc_large_system_hash("Dentry cache",
3630 sizeof(struct hlist_bl_head
),
3633 HASH_EARLY
| HASH_ZERO
,
3640 static void __init
dcache_init(void)
3643 * A constructor could be added for stable state like the lists,
3644 * but it is probably not worth it because of the cache nature
3647 dentry_cache
= KMEM_CACHE(dentry
,
3648 SLAB_RECLAIM_ACCOUNT
|SLAB_PANIC
|SLAB_MEM_SPREAD
|SLAB_ACCOUNT
);
3650 /* Hash may have been set up in dcache_init_early */
3655 alloc_large_system_hash("Dentry cache",
3656 sizeof(struct hlist_bl_head
),
3666 /* SLAB cache for __getname() consumers */
3667 struct kmem_cache
*names_cachep __read_mostly
;
3668 EXPORT_SYMBOL(names_cachep
);
3670 EXPORT_SYMBOL(d_genocide
);
3672 void __init
vfs_caches_init_early(void)
3676 for (i
= 0; i
< ARRAY_SIZE(in_lookup_hashtable
); i
++)
3677 INIT_HLIST_BL_HEAD(&in_lookup_hashtable
[i
]);
3679 dcache_init_early();
3683 void __init
vfs_caches_init(void)
3685 names_cachep
= kmem_cache_create("names_cache", PATH_MAX
, 0,
3686 SLAB_HWCACHE_ALIGN
|SLAB_PANIC
, NULL
);
3691 files_maxfiles_init();