]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/dcache.c
Merge branch 'for-linus-4.1' of git://git.kernel.org/pub/scm/linux/kernel/git/mason...
[mirror_ubuntu-bionic-kernel.git] / fs / dcache.c
1 /*
2 * fs/dcache.c
3 *
4 * Complete reimplementation
5 * (C) 1997 Thomas Schoebel-Theuer,
6 * with heavy changes by Linus Torvalds
7 */
8
9 /*
10 * Notes on the allocation strategy:
11 *
12 * The dcache is a master of the icache - whenever a dcache entry
13 * exists, the inode will always exist. "iput()" is done either when
14 * the dcache entry is deleted or garbage collected.
15 */
16
17 #include <linux/syscalls.h>
18 #include <linux/string.h>
19 #include <linux/mm.h>
20 #include <linux/fs.h>
21 #include <linux/fsnotify.h>
22 #include <linux/slab.h>
23 #include <linux/init.h>
24 #include <linux/hash.h>
25 #include <linux/cache.h>
26 #include <linux/export.h>
27 #include <linux/mount.h>
28 #include <linux/file.h>
29 #include <asm/uaccess.h>
30 #include <linux/security.h>
31 #include <linux/seqlock.h>
32 #include <linux/swap.h>
33 #include <linux/bootmem.h>
34 #include <linux/fs_struct.h>
35 #include <linux/hardirq.h>
36 #include <linux/bit_spinlock.h>
37 #include <linux/rculist_bl.h>
38 #include <linux/prefetch.h>
39 #include <linux/ratelimit.h>
40 #include <linux/list_lru.h>
41 #include <linux/kasan.h>
42
43 #include "internal.h"
44 #include "mount.h"
45
46 /*
47 * Usage:
48 * dcache->d_inode->i_lock protects:
49 * - i_dentry, d_u.d_alias, d_inode of aliases
50 * dcache_hash_bucket lock protects:
51 * - the dcache hash table
52 * s_anon bl list spinlock protects:
53 * - the s_anon list (see __d_drop)
54 * dentry->d_sb->s_dentry_lru_lock protects:
55 * - the dcache lru lists and counters
56 * d_lock protects:
57 * - d_flags
58 * - d_name
59 * - d_lru
60 * - d_count
61 * - d_unhashed()
62 * - d_parent and d_subdirs
63 * - childrens' d_child and d_parent
64 * - d_u.d_alias, d_inode
65 *
66 * Ordering:
67 * dentry->d_inode->i_lock
68 * dentry->d_lock
69 * dentry->d_sb->s_dentry_lru_lock
70 * dcache_hash_bucket lock
71 * s_anon lock
72 *
73 * If there is an ancestor relationship:
74 * dentry->d_parent->...->d_parent->d_lock
75 * ...
76 * dentry->d_parent->d_lock
77 * dentry->d_lock
78 *
79 * If no ancestor relationship:
80 * if (dentry1 < dentry2)
81 * dentry1->d_lock
82 * dentry2->d_lock
83 */
84 int sysctl_vfs_cache_pressure __read_mostly = 100;
85 EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
86
87 __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
88
89 EXPORT_SYMBOL(rename_lock);
90
91 static struct kmem_cache *dentry_cache __read_mostly;
92
93 /*
94 * This is the single most critical data structure when it comes
95 * to the dcache: the hashtable for lookups. Somebody should try
96 * to make this good - I've just made it work.
97 *
98 * This hash-function tries to avoid losing too many bits of hash
99 * information, yet avoid using a prime hash-size or similar.
100 */
101
102 static unsigned int d_hash_mask __read_mostly;
103 static unsigned int d_hash_shift __read_mostly;
104
105 static struct hlist_bl_head *dentry_hashtable __read_mostly;
106
107 static inline struct hlist_bl_head *d_hash(const struct dentry *parent,
108 unsigned int hash)
109 {
110 hash += (unsigned long) parent / L1_CACHE_BYTES;
111 return dentry_hashtable + hash_32(hash, d_hash_shift);
112 }
113
114 /* Statistics gathering. */
115 struct dentry_stat_t dentry_stat = {
116 .age_limit = 45,
117 };
118
119 static DEFINE_PER_CPU(long, nr_dentry);
120 static DEFINE_PER_CPU(long, nr_dentry_unused);
121
122 #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
123
124 /*
125 * Here we resort to our own counters instead of using generic per-cpu counters
126 * for consistency with what the vfs inode code does. We are expected to harvest
127 * better code and performance by having our own specialized counters.
128 *
129 * Please note that the loop is done over all possible CPUs, not over all online
130 * CPUs. The reason for this is that we don't want to play games with CPUs going
131 * on and off. If one of them goes off, we will just keep their counters.
132 *
133 * glommer: See cffbc8a for details, and if you ever intend to change this,
134 * please update all vfs counters to match.
135 */
136 static long get_nr_dentry(void)
137 {
138 int i;
139 long sum = 0;
140 for_each_possible_cpu(i)
141 sum += per_cpu(nr_dentry, i);
142 return sum < 0 ? 0 : sum;
143 }
144
145 static long get_nr_dentry_unused(void)
146 {
147 int i;
148 long sum = 0;
149 for_each_possible_cpu(i)
150 sum += per_cpu(nr_dentry_unused, i);
151 return sum < 0 ? 0 : sum;
152 }
153
154 int proc_nr_dentry(struct ctl_table *table, int write, void __user *buffer,
155 size_t *lenp, loff_t *ppos)
156 {
157 dentry_stat.nr_dentry = get_nr_dentry();
158 dentry_stat.nr_unused = get_nr_dentry_unused();
159 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
160 }
161 #endif
162
163 /*
164 * Compare 2 name strings, return 0 if they match, otherwise non-zero.
165 * The strings are both count bytes long, and count is non-zero.
166 */
167 #ifdef CONFIG_DCACHE_WORD_ACCESS
168
169 #include <asm/word-at-a-time.h>
170 /*
171 * NOTE! 'cs' and 'scount' come from a dentry, so it has a
172 * aligned allocation for this particular component. We don't
173 * strictly need the load_unaligned_zeropad() safety, but it
174 * doesn't hurt either.
175 *
176 * In contrast, 'ct' and 'tcount' can be from a pathname, and do
177 * need the careful unaligned handling.
178 */
179 static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
180 {
181 unsigned long a,b,mask;
182
183 for (;;) {
184 a = *(unsigned long *)cs;
185 b = load_unaligned_zeropad(ct);
186 if (tcount < sizeof(unsigned long))
187 break;
188 if (unlikely(a != b))
189 return 1;
190 cs += sizeof(unsigned long);
191 ct += sizeof(unsigned long);
192 tcount -= sizeof(unsigned long);
193 if (!tcount)
194 return 0;
195 }
196 mask = bytemask_from_count(tcount);
197 return unlikely(!!((a ^ b) & mask));
198 }
199
200 #else
201
202 static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
203 {
204 do {
205 if (*cs != *ct)
206 return 1;
207 cs++;
208 ct++;
209 tcount--;
210 } while (tcount);
211 return 0;
212 }
213
214 #endif
215
216 static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *ct, unsigned tcount)
217 {
218 const unsigned char *cs;
219 /*
220 * Be careful about RCU walk racing with rename:
221 * use ACCESS_ONCE to fetch the name pointer.
222 *
223 * NOTE! Even if a rename will mean that the length
224 * was not loaded atomically, we don't care. The
225 * RCU walk will check the sequence count eventually,
226 * and catch it. And we won't overrun the buffer,
227 * because we're reading the name pointer atomically,
228 * and a dentry name is guaranteed to be properly
229 * terminated with a NUL byte.
230 *
231 * End result: even if 'len' is wrong, we'll exit
232 * early because the data cannot match (there can
233 * be no NUL in the ct/tcount data)
234 */
235 cs = ACCESS_ONCE(dentry->d_name.name);
236 smp_read_barrier_depends();
237 return dentry_string_cmp(cs, ct, tcount);
238 }
239
240 struct external_name {
241 union {
242 atomic_t count;
243 struct rcu_head head;
244 } u;
245 unsigned char name[];
246 };
247
248 static inline struct external_name *external_name(struct dentry *dentry)
249 {
250 return container_of(dentry->d_name.name, struct external_name, name[0]);
251 }
252
253 static void __d_free(struct rcu_head *head)
254 {
255 struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
256
257 kmem_cache_free(dentry_cache, dentry);
258 }
259
260 static void __d_free_external(struct rcu_head *head)
261 {
262 struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
263 kfree(external_name(dentry));
264 kmem_cache_free(dentry_cache, dentry);
265 }
266
267 static inline int dname_external(const struct dentry *dentry)
268 {
269 return dentry->d_name.name != dentry->d_iname;
270 }
271
272 /*
273 * Make sure other CPUs see the inode attached before the type is set.
274 */
275 static inline void __d_set_inode_and_type(struct dentry *dentry,
276 struct inode *inode,
277 unsigned type_flags)
278 {
279 unsigned flags;
280
281 dentry->d_inode = inode;
282 smp_wmb();
283 flags = READ_ONCE(dentry->d_flags);
284 flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
285 flags |= type_flags;
286 WRITE_ONCE(dentry->d_flags, flags);
287 }
288
289 /*
290 * Ideally, we want to make sure that other CPUs see the flags cleared before
291 * the inode is detached, but this is really a violation of RCU principles
292 * since the ordering suggests we should always set inode before flags.
293 *
294 * We should instead replace or discard the entire dentry - but that sucks
295 * performancewise on mass deletion/rename.
296 */
297 static inline void __d_clear_type_and_inode(struct dentry *dentry)
298 {
299 unsigned flags = READ_ONCE(dentry->d_flags);
300
301 flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
302 WRITE_ONCE(dentry->d_flags, flags);
303 smp_wmb();
304 dentry->d_inode = NULL;
305 }
306
307 static void dentry_free(struct dentry *dentry)
308 {
309 WARN_ON(!hlist_unhashed(&dentry->d_u.d_alias));
310 if (unlikely(dname_external(dentry))) {
311 struct external_name *p = external_name(dentry);
312 if (likely(atomic_dec_and_test(&p->u.count))) {
313 call_rcu(&dentry->d_u.d_rcu, __d_free_external);
314 return;
315 }
316 }
317 /* if dentry was never visible to RCU, immediate free is OK */
318 if (!(dentry->d_flags & DCACHE_RCUACCESS))
319 __d_free(&dentry->d_u.d_rcu);
320 else
321 call_rcu(&dentry->d_u.d_rcu, __d_free);
322 }
323
324 /**
325 * dentry_rcuwalk_barrier - invalidate in-progress rcu-walk lookups
326 * @dentry: the target dentry
327 * After this call, in-progress rcu-walk path lookup will fail. This
328 * should be called after unhashing, and after changing d_inode (if
329 * the dentry has not already been unhashed).
330 */
331 static inline void dentry_rcuwalk_barrier(struct dentry *dentry)
332 {
333 assert_spin_locked(&dentry->d_lock);
334 /* Go through a barrier */
335 write_seqcount_barrier(&dentry->d_seq);
336 }
337
338 /*
339 * Release the dentry's inode, using the filesystem
340 * d_iput() operation if defined. Dentry has no refcount
341 * and is unhashed.
342 */
343 static void dentry_iput(struct dentry * dentry)
344 __releases(dentry->d_lock)
345 __releases(dentry->d_inode->i_lock)
346 {
347 struct inode *inode = dentry->d_inode;
348 if (inode) {
349 __d_clear_type_and_inode(dentry);
350 hlist_del_init(&dentry->d_u.d_alias);
351 spin_unlock(&dentry->d_lock);
352 spin_unlock(&inode->i_lock);
353 if (!inode->i_nlink)
354 fsnotify_inoderemove(inode);
355 if (dentry->d_op && dentry->d_op->d_iput)
356 dentry->d_op->d_iput(dentry, inode);
357 else
358 iput(inode);
359 } else {
360 spin_unlock(&dentry->d_lock);
361 }
362 }
363
364 /*
365 * Release the dentry's inode, using the filesystem
366 * d_iput() operation if defined. dentry remains in-use.
367 */
368 static void dentry_unlink_inode(struct dentry * dentry)
369 __releases(dentry->d_lock)
370 __releases(dentry->d_inode->i_lock)
371 {
372 struct inode *inode = dentry->d_inode;
373 __d_clear_type_and_inode(dentry);
374 hlist_del_init(&dentry->d_u.d_alias);
375 dentry_rcuwalk_barrier(dentry);
376 spin_unlock(&dentry->d_lock);
377 spin_unlock(&inode->i_lock);
378 if (!inode->i_nlink)
379 fsnotify_inoderemove(inode);
380 if (dentry->d_op && dentry->d_op->d_iput)
381 dentry->d_op->d_iput(dentry, inode);
382 else
383 iput(inode);
384 }
385
386 /*
387 * The DCACHE_LRU_LIST bit is set whenever the 'd_lru' entry
388 * is in use - which includes both the "real" per-superblock
389 * LRU list _and_ the DCACHE_SHRINK_LIST use.
390 *
391 * The DCACHE_SHRINK_LIST bit is set whenever the dentry is
392 * on the shrink list (ie not on the superblock LRU list).
393 *
394 * The per-cpu "nr_dentry_unused" counters are updated with
395 * the DCACHE_LRU_LIST bit.
396 *
397 * These helper functions make sure we always follow the
398 * rules. d_lock must be held by the caller.
399 */
400 #define D_FLAG_VERIFY(dentry,x) WARN_ON_ONCE(((dentry)->d_flags & (DCACHE_LRU_LIST | DCACHE_SHRINK_LIST)) != (x))
401 static void d_lru_add(struct dentry *dentry)
402 {
403 D_FLAG_VERIFY(dentry, 0);
404 dentry->d_flags |= DCACHE_LRU_LIST;
405 this_cpu_inc(nr_dentry_unused);
406 WARN_ON_ONCE(!list_lru_add(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
407 }
408
409 static void d_lru_del(struct dentry *dentry)
410 {
411 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
412 dentry->d_flags &= ~DCACHE_LRU_LIST;
413 this_cpu_dec(nr_dentry_unused);
414 WARN_ON_ONCE(!list_lru_del(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
415 }
416
417 static void d_shrink_del(struct dentry *dentry)
418 {
419 D_FLAG_VERIFY(dentry, DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
420 list_del_init(&dentry->d_lru);
421 dentry->d_flags &= ~(DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
422 this_cpu_dec(nr_dentry_unused);
423 }
424
425 static void d_shrink_add(struct dentry *dentry, struct list_head *list)
426 {
427 D_FLAG_VERIFY(dentry, 0);
428 list_add(&dentry->d_lru, list);
429 dentry->d_flags |= DCACHE_SHRINK_LIST | DCACHE_LRU_LIST;
430 this_cpu_inc(nr_dentry_unused);
431 }
432
433 /*
434 * These can only be called under the global LRU lock, ie during the
435 * callback for freeing the LRU list. "isolate" removes it from the
436 * LRU lists entirely, while shrink_move moves it to the indicated
437 * private list.
438 */
439 static void d_lru_isolate(struct list_lru_one *lru, struct dentry *dentry)
440 {
441 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
442 dentry->d_flags &= ~DCACHE_LRU_LIST;
443 this_cpu_dec(nr_dentry_unused);
444 list_lru_isolate(lru, &dentry->d_lru);
445 }
446
447 static void d_lru_shrink_move(struct list_lru_one *lru, struct dentry *dentry,
448 struct list_head *list)
449 {
450 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
451 dentry->d_flags |= DCACHE_SHRINK_LIST;
452 list_lru_isolate_move(lru, &dentry->d_lru, list);
453 }
454
455 /*
456 * dentry_lru_(add|del)_list) must be called with d_lock held.
457 */
458 static void dentry_lru_add(struct dentry *dentry)
459 {
460 if (unlikely(!(dentry->d_flags & DCACHE_LRU_LIST)))
461 d_lru_add(dentry);
462 }
463
464 /**
465 * d_drop - drop a dentry
466 * @dentry: dentry to drop
467 *
468 * d_drop() unhashes the entry from the parent dentry hashes, so that it won't
469 * be found through a VFS lookup any more. Note that this is different from
470 * deleting the dentry - d_delete will try to mark the dentry negative if
471 * possible, giving a successful _negative_ lookup, while d_drop will
472 * just make the cache lookup fail.
473 *
474 * d_drop() is used mainly for stuff that wants to invalidate a dentry for some
475 * reason (NFS timeouts or autofs deletes).
476 *
477 * __d_drop requires dentry->d_lock.
478 */
479 void __d_drop(struct dentry *dentry)
480 {
481 if (!d_unhashed(dentry)) {
482 struct hlist_bl_head *b;
483 /*
484 * Hashed dentries are normally on the dentry hashtable,
485 * with the exception of those newly allocated by
486 * d_obtain_alias, which are always IS_ROOT:
487 */
488 if (unlikely(IS_ROOT(dentry)))
489 b = &dentry->d_sb->s_anon;
490 else
491 b = d_hash(dentry->d_parent, dentry->d_name.hash);
492
493 hlist_bl_lock(b);
494 __hlist_bl_del(&dentry->d_hash);
495 dentry->d_hash.pprev = NULL;
496 hlist_bl_unlock(b);
497 dentry_rcuwalk_barrier(dentry);
498 }
499 }
500 EXPORT_SYMBOL(__d_drop);
501
502 void d_drop(struct dentry *dentry)
503 {
504 spin_lock(&dentry->d_lock);
505 __d_drop(dentry);
506 spin_unlock(&dentry->d_lock);
507 }
508 EXPORT_SYMBOL(d_drop);
509
510 static void __dentry_kill(struct dentry *dentry)
511 {
512 struct dentry *parent = NULL;
513 bool can_free = true;
514 if (!IS_ROOT(dentry))
515 parent = dentry->d_parent;
516
517 /*
518 * The dentry is now unrecoverably dead to the world.
519 */
520 lockref_mark_dead(&dentry->d_lockref);
521
522 /*
523 * inform the fs via d_prune that this dentry is about to be
524 * unhashed and destroyed.
525 */
526 if (dentry->d_flags & DCACHE_OP_PRUNE)
527 dentry->d_op->d_prune(dentry);
528
529 if (dentry->d_flags & DCACHE_LRU_LIST) {
530 if (!(dentry->d_flags & DCACHE_SHRINK_LIST))
531 d_lru_del(dentry);
532 }
533 /* if it was on the hash then remove it */
534 __d_drop(dentry);
535 __list_del_entry(&dentry->d_child);
536 /*
537 * Inform d_walk() that we are no longer attached to the
538 * dentry tree
539 */
540 dentry->d_flags |= DCACHE_DENTRY_KILLED;
541 if (parent)
542 spin_unlock(&parent->d_lock);
543 dentry_iput(dentry);
544 /*
545 * dentry_iput drops the locks, at which point nobody (except
546 * transient RCU lookups) can reach this dentry.
547 */
548 BUG_ON(dentry->d_lockref.count > 0);
549 this_cpu_dec(nr_dentry);
550 if (dentry->d_op && dentry->d_op->d_release)
551 dentry->d_op->d_release(dentry);
552
553 spin_lock(&dentry->d_lock);
554 if (dentry->d_flags & DCACHE_SHRINK_LIST) {
555 dentry->d_flags |= DCACHE_MAY_FREE;
556 can_free = false;
557 }
558 spin_unlock(&dentry->d_lock);
559 if (likely(can_free))
560 dentry_free(dentry);
561 }
562
563 /*
564 * Finish off a dentry we've decided to kill.
565 * dentry->d_lock must be held, returns with it unlocked.
566 * If ref is non-zero, then decrement the refcount too.
567 * Returns dentry requiring refcount drop, or NULL if we're done.
568 */
569 static struct dentry *dentry_kill(struct dentry *dentry)
570 __releases(dentry->d_lock)
571 {
572 struct inode *inode = dentry->d_inode;
573 struct dentry *parent = NULL;
574
575 if (inode && unlikely(!spin_trylock(&inode->i_lock)))
576 goto failed;
577
578 if (!IS_ROOT(dentry)) {
579 parent = dentry->d_parent;
580 if (unlikely(!spin_trylock(&parent->d_lock))) {
581 if (inode)
582 spin_unlock(&inode->i_lock);
583 goto failed;
584 }
585 }
586
587 __dentry_kill(dentry);
588 return parent;
589
590 failed:
591 spin_unlock(&dentry->d_lock);
592 cpu_relax();
593 return dentry; /* try again with same dentry */
594 }
595
596 static inline struct dentry *lock_parent(struct dentry *dentry)
597 {
598 struct dentry *parent = dentry->d_parent;
599 if (IS_ROOT(dentry))
600 return NULL;
601 if (unlikely(dentry->d_lockref.count < 0))
602 return NULL;
603 if (likely(spin_trylock(&parent->d_lock)))
604 return parent;
605 rcu_read_lock();
606 spin_unlock(&dentry->d_lock);
607 again:
608 parent = ACCESS_ONCE(dentry->d_parent);
609 spin_lock(&parent->d_lock);
610 /*
611 * We can't blindly lock dentry until we are sure
612 * that we won't violate the locking order.
613 * Any changes of dentry->d_parent must have
614 * been done with parent->d_lock held, so
615 * spin_lock() above is enough of a barrier
616 * for checking if it's still our child.
617 */
618 if (unlikely(parent != dentry->d_parent)) {
619 spin_unlock(&parent->d_lock);
620 goto again;
621 }
622 rcu_read_unlock();
623 if (parent != dentry)
624 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
625 else
626 parent = NULL;
627 return parent;
628 }
629
630 /*
631 * Try to do a lockless dput(), and return whether that was successful.
632 *
633 * If unsuccessful, we return false, having already taken the dentry lock.
634 *
635 * The caller needs to hold the RCU read lock, so that the dentry is
636 * guaranteed to stay around even if the refcount goes down to zero!
637 */
638 static inline bool fast_dput(struct dentry *dentry)
639 {
640 int ret;
641 unsigned int d_flags;
642
643 /*
644 * If we have a d_op->d_delete() operation, we sould not
645 * let the dentry count go to zero, so use "put__or_lock".
646 */
647 if (unlikely(dentry->d_flags & DCACHE_OP_DELETE))
648 return lockref_put_or_lock(&dentry->d_lockref);
649
650 /*
651 * .. otherwise, we can try to just decrement the
652 * lockref optimistically.
653 */
654 ret = lockref_put_return(&dentry->d_lockref);
655
656 /*
657 * If the lockref_put_return() failed due to the lock being held
658 * by somebody else, the fast path has failed. We will need to
659 * get the lock, and then check the count again.
660 */
661 if (unlikely(ret < 0)) {
662 spin_lock(&dentry->d_lock);
663 if (dentry->d_lockref.count > 1) {
664 dentry->d_lockref.count--;
665 spin_unlock(&dentry->d_lock);
666 return 1;
667 }
668 return 0;
669 }
670
671 /*
672 * If we weren't the last ref, we're done.
673 */
674 if (ret)
675 return 1;
676
677 /*
678 * Careful, careful. The reference count went down
679 * to zero, but we don't hold the dentry lock, so
680 * somebody else could get it again, and do another
681 * dput(), and we need to not race with that.
682 *
683 * However, there is a very special and common case
684 * where we don't care, because there is nothing to
685 * do: the dentry is still hashed, it does not have
686 * a 'delete' op, and it's referenced and already on
687 * the LRU list.
688 *
689 * NOTE! Since we aren't locked, these values are
690 * not "stable". However, it is sufficient that at
691 * some point after we dropped the reference the
692 * dentry was hashed and the flags had the proper
693 * value. Other dentry users may have re-gotten
694 * a reference to the dentry and change that, but
695 * our work is done - we can leave the dentry
696 * around with a zero refcount.
697 */
698 smp_rmb();
699 d_flags = ACCESS_ONCE(dentry->d_flags);
700 d_flags &= DCACHE_REFERENCED | DCACHE_LRU_LIST;
701
702 /* Nothing to do? Dropping the reference was all we needed? */
703 if (d_flags == (DCACHE_REFERENCED | DCACHE_LRU_LIST) && !d_unhashed(dentry))
704 return 1;
705
706 /*
707 * Not the fast normal case? Get the lock. We've already decremented
708 * the refcount, but we'll need to re-check the situation after
709 * getting the lock.
710 */
711 spin_lock(&dentry->d_lock);
712
713 /*
714 * Did somebody else grab a reference to it in the meantime, and
715 * we're no longer the last user after all? Alternatively, somebody
716 * else could have killed it and marked it dead. Either way, we
717 * don't need to do anything else.
718 */
719 if (dentry->d_lockref.count) {
720 spin_unlock(&dentry->d_lock);
721 return 1;
722 }
723
724 /*
725 * Re-get the reference we optimistically dropped. We hold the
726 * lock, and we just tested that it was zero, so we can just
727 * set it to 1.
728 */
729 dentry->d_lockref.count = 1;
730 return 0;
731 }
732
733
734 /*
735 * This is dput
736 *
737 * This is complicated by the fact that we do not want to put
738 * dentries that are no longer on any hash chain on the unused
739 * list: we'd much rather just get rid of them immediately.
740 *
741 * However, that implies that we have to traverse the dentry
742 * tree upwards to the parents which might _also_ now be
743 * scheduled for deletion (it may have been only waiting for
744 * its last child to go away).
745 *
746 * This tail recursion is done by hand as we don't want to depend
747 * on the compiler to always get this right (gcc generally doesn't).
748 * Real recursion would eat up our stack space.
749 */
750
751 /*
752 * dput - release a dentry
753 * @dentry: dentry to release
754 *
755 * Release a dentry. This will drop the usage count and if appropriate
756 * call the dentry unlink method as well as removing it from the queues and
757 * releasing its resources. If the parent dentries were scheduled for release
758 * they too may now get deleted.
759 */
760 void dput(struct dentry *dentry)
761 {
762 if (unlikely(!dentry))
763 return;
764
765 repeat:
766 rcu_read_lock();
767 if (likely(fast_dput(dentry))) {
768 rcu_read_unlock();
769 return;
770 }
771
772 /* Slow case: now with the dentry lock held */
773 rcu_read_unlock();
774
775 /* Unreachable? Get rid of it */
776 if (unlikely(d_unhashed(dentry)))
777 goto kill_it;
778
779 if (unlikely(dentry->d_flags & DCACHE_OP_DELETE)) {
780 if (dentry->d_op->d_delete(dentry))
781 goto kill_it;
782 }
783
784 if (!(dentry->d_flags & DCACHE_REFERENCED))
785 dentry->d_flags |= DCACHE_REFERENCED;
786 dentry_lru_add(dentry);
787
788 dentry->d_lockref.count--;
789 spin_unlock(&dentry->d_lock);
790 return;
791
792 kill_it:
793 dentry = dentry_kill(dentry);
794 if (dentry)
795 goto repeat;
796 }
797 EXPORT_SYMBOL(dput);
798
799
800 /* This must be called with d_lock held */
801 static inline void __dget_dlock(struct dentry *dentry)
802 {
803 dentry->d_lockref.count++;
804 }
805
806 static inline void __dget(struct dentry *dentry)
807 {
808 lockref_get(&dentry->d_lockref);
809 }
810
811 struct dentry *dget_parent(struct dentry *dentry)
812 {
813 int gotref;
814 struct dentry *ret;
815
816 /*
817 * Do optimistic parent lookup without any
818 * locking.
819 */
820 rcu_read_lock();
821 ret = ACCESS_ONCE(dentry->d_parent);
822 gotref = lockref_get_not_zero(&ret->d_lockref);
823 rcu_read_unlock();
824 if (likely(gotref)) {
825 if (likely(ret == ACCESS_ONCE(dentry->d_parent)))
826 return ret;
827 dput(ret);
828 }
829
830 repeat:
831 /*
832 * Don't need rcu_dereference because we re-check it was correct under
833 * the lock.
834 */
835 rcu_read_lock();
836 ret = dentry->d_parent;
837 spin_lock(&ret->d_lock);
838 if (unlikely(ret != dentry->d_parent)) {
839 spin_unlock(&ret->d_lock);
840 rcu_read_unlock();
841 goto repeat;
842 }
843 rcu_read_unlock();
844 BUG_ON(!ret->d_lockref.count);
845 ret->d_lockref.count++;
846 spin_unlock(&ret->d_lock);
847 return ret;
848 }
849 EXPORT_SYMBOL(dget_parent);
850
851 /**
852 * d_find_alias - grab a hashed alias of inode
853 * @inode: inode in question
854 *
855 * If inode has a hashed alias, or is a directory and has any alias,
856 * acquire the reference to alias and return it. Otherwise return NULL.
857 * Notice that if inode is a directory there can be only one alias and
858 * it can be unhashed only if it has no children, or if it is the root
859 * of a filesystem, or if the directory was renamed and d_revalidate
860 * was the first vfs operation to notice.
861 *
862 * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer
863 * any other hashed alias over that one.
864 */
865 static struct dentry *__d_find_alias(struct inode *inode)
866 {
867 struct dentry *alias, *discon_alias;
868
869 again:
870 discon_alias = NULL;
871 hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
872 spin_lock(&alias->d_lock);
873 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
874 if (IS_ROOT(alias) &&
875 (alias->d_flags & DCACHE_DISCONNECTED)) {
876 discon_alias = alias;
877 } else {
878 __dget_dlock(alias);
879 spin_unlock(&alias->d_lock);
880 return alias;
881 }
882 }
883 spin_unlock(&alias->d_lock);
884 }
885 if (discon_alias) {
886 alias = discon_alias;
887 spin_lock(&alias->d_lock);
888 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
889 __dget_dlock(alias);
890 spin_unlock(&alias->d_lock);
891 return alias;
892 }
893 spin_unlock(&alias->d_lock);
894 goto again;
895 }
896 return NULL;
897 }
898
899 struct dentry *d_find_alias(struct inode *inode)
900 {
901 struct dentry *de = NULL;
902
903 if (!hlist_empty(&inode->i_dentry)) {
904 spin_lock(&inode->i_lock);
905 de = __d_find_alias(inode);
906 spin_unlock(&inode->i_lock);
907 }
908 return de;
909 }
910 EXPORT_SYMBOL(d_find_alias);
911
912 /*
913 * Try to kill dentries associated with this inode.
914 * WARNING: you must own a reference to inode.
915 */
916 void d_prune_aliases(struct inode *inode)
917 {
918 struct dentry *dentry;
919 restart:
920 spin_lock(&inode->i_lock);
921 hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
922 spin_lock(&dentry->d_lock);
923 if (!dentry->d_lockref.count) {
924 struct dentry *parent = lock_parent(dentry);
925 if (likely(!dentry->d_lockref.count)) {
926 __dentry_kill(dentry);
927 dput(parent);
928 goto restart;
929 }
930 if (parent)
931 spin_unlock(&parent->d_lock);
932 }
933 spin_unlock(&dentry->d_lock);
934 }
935 spin_unlock(&inode->i_lock);
936 }
937 EXPORT_SYMBOL(d_prune_aliases);
938
939 static void shrink_dentry_list(struct list_head *list)
940 {
941 struct dentry *dentry, *parent;
942
943 while (!list_empty(list)) {
944 struct inode *inode;
945 dentry = list_entry(list->prev, struct dentry, d_lru);
946 spin_lock(&dentry->d_lock);
947 parent = lock_parent(dentry);
948
949 /*
950 * The dispose list is isolated and dentries are not accounted
951 * to the LRU here, so we can simply remove it from the list
952 * here regardless of whether it is referenced or not.
953 */
954 d_shrink_del(dentry);
955
956 /*
957 * We found an inuse dentry which was not removed from
958 * the LRU because of laziness during lookup. Do not free it.
959 */
960 if (dentry->d_lockref.count > 0) {
961 spin_unlock(&dentry->d_lock);
962 if (parent)
963 spin_unlock(&parent->d_lock);
964 continue;
965 }
966
967
968 if (unlikely(dentry->d_flags & DCACHE_DENTRY_KILLED)) {
969 bool can_free = dentry->d_flags & DCACHE_MAY_FREE;
970 spin_unlock(&dentry->d_lock);
971 if (parent)
972 spin_unlock(&parent->d_lock);
973 if (can_free)
974 dentry_free(dentry);
975 continue;
976 }
977
978 inode = dentry->d_inode;
979 if (inode && unlikely(!spin_trylock(&inode->i_lock))) {
980 d_shrink_add(dentry, list);
981 spin_unlock(&dentry->d_lock);
982 if (parent)
983 spin_unlock(&parent->d_lock);
984 continue;
985 }
986
987 __dentry_kill(dentry);
988
989 /*
990 * We need to prune ancestors too. This is necessary to prevent
991 * quadratic behavior of shrink_dcache_parent(), but is also
992 * expected to be beneficial in reducing dentry cache
993 * fragmentation.
994 */
995 dentry = parent;
996 while (dentry && !lockref_put_or_lock(&dentry->d_lockref)) {
997 parent = lock_parent(dentry);
998 if (dentry->d_lockref.count != 1) {
999 dentry->d_lockref.count--;
1000 spin_unlock(&dentry->d_lock);
1001 if (parent)
1002 spin_unlock(&parent->d_lock);
1003 break;
1004 }
1005 inode = dentry->d_inode; /* can't be NULL */
1006 if (unlikely(!spin_trylock(&inode->i_lock))) {
1007 spin_unlock(&dentry->d_lock);
1008 if (parent)
1009 spin_unlock(&parent->d_lock);
1010 cpu_relax();
1011 continue;
1012 }
1013 __dentry_kill(dentry);
1014 dentry = parent;
1015 }
1016 }
1017 }
1018
1019 static enum lru_status dentry_lru_isolate(struct list_head *item,
1020 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
1021 {
1022 struct list_head *freeable = arg;
1023 struct dentry *dentry = container_of(item, struct dentry, d_lru);
1024
1025
1026 /*
1027 * we are inverting the lru lock/dentry->d_lock here,
1028 * so use a trylock. If we fail to get the lock, just skip
1029 * it
1030 */
1031 if (!spin_trylock(&dentry->d_lock))
1032 return LRU_SKIP;
1033
1034 /*
1035 * Referenced dentries are still in use. If they have active
1036 * counts, just remove them from the LRU. Otherwise give them
1037 * another pass through the LRU.
1038 */
1039 if (dentry->d_lockref.count) {
1040 d_lru_isolate(lru, dentry);
1041 spin_unlock(&dentry->d_lock);
1042 return LRU_REMOVED;
1043 }
1044
1045 if (dentry->d_flags & DCACHE_REFERENCED) {
1046 dentry->d_flags &= ~DCACHE_REFERENCED;
1047 spin_unlock(&dentry->d_lock);
1048
1049 /*
1050 * The list move itself will be made by the common LRU code. At
1051 * this point, we've dropped the dentry->d_lock but keep the
1052 * lru lock. This is safe to do, since every list movement is
1053 * protected by the lru lock even if both locks are held.
1054 *
1055 * This is guaranteed by the fact that all LRU management
1056 * functions are intermediated by the LRU API calls like
1057 * list_lru_add and list_lru_del. List movement in this file
1058 * only ever occur through this functions or through callbacks
1059 * like this one, that are called from the LRU API.
1060 *
1061 * The only exceptions to this are functions like
1062 * shrink_dentry_list, and code that first checks for the
1063 * DCACHE_SHRINK_LIST flag. Those are guaranteed to be
1064 * operating only with stack provided lists after they are
1065 * properly isolated from the main list. It is thus, always a
1066 * local access.
1067 */
1068 return LRU_ROTATE;
1069 }
1070
1071 d_lru_shrink_move(lru, dentry, freeable);
1072 spin_unlock(&dentry->d_lock);
1073
1074 return LRU_REMOVED;
1075 }
1076
1077 /**
1078 * prune_dcache_sb - shrink the dcache
1079 * @sb: superblock
1080 * @sc: shrink control, passed to list_lru_shrink_walk()
1081 *
1082 * Attempt to shrink the superblock dcache LRU by @sc->nr_to_scan entries. This
1083 * is done when we need more memory and called from the superblock shrinker
1084 * function.
1085 *
1086 * This function may fail to free any resources if all the dentries are in
1087 * use.
1088 */
1089 long prune_dcache_sb(struct super_block *sb, struct shrink_control *sc)
1090 {
1091 LIST_HEAD(dispose);
1092 long freed;
1093
1094 freed = list_lru_shrink_walk(&sb->s_dentry_lru, sc,
1095 dentry_lru_isolate, &dispose);
1096 shrink_dentry_list(&dispose);
1097 return freed;
1098 }
1099
1100 static enum lru_status dentry_lru_isolate_shrink(struct list_head *item,
1101 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
1102 {
1103 struct list_head *freeable = arg;
1104 struct dentry *dentry = container_of(item, struct dentry, d_lru);
1105
1106 /*
1107 * we are inverting the lru lock/dentry->d_lock here,
1108 * so use a trylock. If we fail to get the lock, just skip
1109 * it
1110 */
1111 if (!spin_trylock(&dentry->d_lock))
1112 return LRU_SKIP;
1113
1114 d_lru_shrink_move(lru, dentry, freeable);
1115 spin_unlock(&dentry->d_lock);
1116
1117 return LRU_REMOVED;
1118 }
1119
1120
1121 /**
1122 * shrink_dcache_sb - shrink dcache for a superblock
1123 * @sb: superblock
1124 *
1125 * Shrink the dcache for the specified super block. This is used to free
1126 * the dcache before unmounting a file system.
1127 */
1128 void shrink_dcache_sb(struct super_block *sb)
1129 {
1130 long freed;
1131
1132 do {
1133 LIST_HEAD(dispose);
1134
1135 freed = list_lru_walk(&sb->s_dentry_lru,
1136 dentry_lru_isolate_shrink, &dispose, UINT_MAX);
1137
1138 this_cpu_sub(nr_dentry_unused, freed);
1139 shrink_dentry_list(&dispose);
1140 } while (freed > 0);
1141 }
1142 EXPORT_SYMBOL(shrink_dcache_sb);
1143
1144 /**
1145 * enum d_walk_ret - action to talke during tree walk
1146 * @D_WALK_CONTINUE: contrinue walk
1147 * @D_WALK_QUIT: quit walk
1148 * @D_WALK_NORETRY: quit when retry is needed
1149 * @D_WALK_SKIP: skip this dentry and its children
1150 */
1151 enum d_walk_ret {
1152 D_WALK_CONTINUE,
1153 D_WALK_QUIT,
1154 D_WALK_NORETRY,
1155 D_WALK_SKIP,
1156 };
1157
1158 /**
1159 * d_walk - walk the dentry tree
1160 * @parent: start of walk
1161 * @data: data passed to @enter() and @finish()
1162 * @enter: callback when first entering the dentry
1163 * @finish: callback when successfully finished the walk
1164 *
1165 * The @enter() and @finish() callbacks are called with d_lock held.
1166 */
1167 static void d_walk(struct dentry *parent, void *data,
1168 enum d_walk_ret (*enter)(void *, struct dentry *),
1169 void (*finish)(void *))
1170 {
1171 struct dentry *this_parent;
1172 struct list_head *next;
1173 unsigned seq = 0;
1174 enum d_walk_ret ret;
1175 bool retry = true;
1176
1177 again:
1178 read_seqbegin_or_lock(&rename_lock, &seq);
1179 this_parent = parent;
1180 spin_lock(&this_parent->d_lock);
1181
1182 ret = enter(data, this_parent);
1183 switch (ret) {
1184 case D_WALK_CONTINUE:
1185 break;
1186 case D_WALK_QUIT:
1187 case D_WALK_SKIP:
1188 goto out_unlock;
1189 case D_WALK_NORETRY:
1190 retry = false;
1191 break;
1192 }
1193 repeat:
1194 next = this_parent->d_subdirs.next;
1195 resume:
1196 while (next != &this_parent->d_subdirs) {
1197 struct list_head *tmp = next;
1198 struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
1199 next = tmp->next;
1200
1201 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1202
1203 ret = enter(data, dentry);
1204 switch (ret) {
1205 case D_WALK_CONTINUE:
1206 break;
1207 case D_WALK_QUIT:
1208 spin_unlock(&dentry->d_lock);
1209 goto out_unlock;
1210 case D_WALK_NORETRY:
1211 retry = false;
1212 break;
1213 case D_WALK_SKIP:
1214 spin_unlock(&dentry->d_lock);
1215 continue;
1216 }
1217
1218 if (!list_empty(&dentry->d_subdirs)) {
1219 spin_unlock(&this_parent->d_lock);
1220 spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
1221 this_parent = dentry;
1222 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
1223 goto repeat;
1224 }
1225 spin_unlock(&dentry->d_lock);
1226 }
1227 /*
1228 * All done at this level ... ascend and resume the search.
1229 */
1230 rcu_read_lock();
1231 ascend:
1232 if (this_parent != parent) {
1233 struct dentry *child = this_parent;
1234 this_parent = child->d_parent;
1235
1236 spin_unlock(&child->d_lock);
1237 spin_lock(&this_parent->d_lock);
1238
1239 /* might go back up the wrong parent if we have had a rename. */
1240 if (need_seqretry(&rename_lock, seq))
1241 goto rename_retry;
1242 next = child->d_child.next;
1243 while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED)) {
1244 if (next == &this_parent->d_subdirs)
1245 goto ascend;
1246 child = list_entry(next, struct dentry, d_child);
1247 next = next->next;
1248 }
1249 rcu_read_unlock();
1250 goto resume;
1251 }
1252 if (need_seqretry(&rename_lock, seq))
1253 goto rename_retry;
1254 rcu_read_unlock();
1255 if (finish)
1256 finish(data);
1257
1258 out_unlock:
1259 spin_unlock(&this_parent->d_lock);
1260 done_seqretry(&rename_lock, seq);
1261 return;
1262
1263 rename_retry:
1264 spin_unlock(&this_parent->d_lock);
1265 rcu_read_unlock();
1266 BUG_ON(seq & 1);
1267 if (!retry)
1268 return;
1269 seq = 1;
1270 goto again;
1271 }
1272
1273 /*
1274 * Search for at least 1 mount point in the dentry's subdirs.
1275 * We descend to the next level whenever the d_subdirs
1276 * list is non-empty and continue searching.
1277 */
1278
1279 static enum d_walk_ret check_mount(void *data, struct dentry *dentry)
1280 {
1281 int *ret = data;
1282 if (d_mountpoint(dentry)) {
1283 *ret = 1;
1284 return D_WALK_QUIT;
1285 }
1286 return D_WALK_CONTINUE;
1287 }
1288
1289 /**
1290 * have_submounts - check for mounts over a dentry
1291 * @parent: dentry to check.
1292 *
1293 * Return true if the parent or its subdirectories contain
1294 * a mount point
1295 */
1296 int have_submounts(struct dentry *parent)
1297 {
1298 int ret = 0;
1299
1300 d_walk(parent, &ret, check_mount, NULL);
1301
1302 return ret;
1303 }
1304 EXPORT_SYMBOL(have_submounts);
1305
1306 /*
1307 * Called by mount code to set a mountpoint and check if the mountpoint is
1308 * reachable (e.g. NFS can unhash a directory dentry and then the complete
1309 * subtree can become unreachable).
1310 *
1311 * Only one of d_invalidate() and d_set_mounted() must succeed. For
1312 * this reason take rename_lock and d_lock on dentry and ancestors.
1313 */
1314 int d_set_mounted(struct dentry *dentry)
1315 {
1316 struct dentry *p;
1317 int ret = -ENOENT;
1318 write_seqlock(&rename_lock);
1319 for (p = dentry->d_parent; !IS_ROOT(p); p = p->d_parent) {
1320 /* Need exclusion wrt. d_invalidate() */
1321 spin_lock(&p->d_lock);
1322 if (unlikely(d_unhashed(p))) {
1323 spin_unlock(&p->d_lock);
1324 goto out;
1325 }
1326 spin_unlock(&p->d_lock);
1327 }
1328 spin_lock(&dentry->d_lock);
1329 if (!d_unlinked(dentry)) {
1330 dentry->d_flags |= DCACHE_MOUNTED;
1331 ret = 0;
1332 }
1333 spin_unlock(&dentry->d_lock);
1334 out:
1335 write_sequnlock(&rename_lock);
1336 return ret;
1337 }
1338
1339 /*
1340 * Search the dentry child list of the specified parent,
1341 * and move any unused dentries to the end of the unused
1342 * list for prune_dcache(). We descend to the next level
1343 * whenever the d_subdirs list is non-empty and continue
1344 * searching.
1345 *
1346 * It returns zero iff there are no unused children,
1347 * otherwise it returns the number of children moved to
1348 * the end of the unused list. This may not be the total
1349 * number of unused children, because select_parent can
1350 * drop the lock and return early due to latency
1351 * constraints.
1352 */
1353
1354 struct select_data {
1355 struct dentry *start;
1356 struct list_head dispose;
1357 int found;
1358 };
1359
1360 static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
1361 {
1362 struct select_data *data = _data;
1363 enum d_walk_ret ret = D_WALK_CONTINUE;
1364
1365 if (data->start == dentry)
1366 goto out;
1367
1368 if (dentry->d_flags & DCACHE_SHRINK_LIST) {
1369 data->found++;
1370 } else {
1371 if (dentry->d_flags & DCACHE_LRU_LIST)
1372 d_lru_del(dentry);
1373 if (!dentry->d_lockref.count) {
1374 d_shrink_add(dentry, &data->dispose);
1375 data->found++;
1376 }
1377 }
1378 /*
1379 * We can return to the caller if we have found some (this
1380 * ensures forward progress). We'll be coming back to find
1381 * the rest.
1382 */
1383 if (!list_empty(&data->dispose))
1384 ret = need_resched() ? D_WALK_QUIT : D_WALK_NORETRY;
1385 out:
1386 return ret;
1387 }
1388
1389 /**
1390 * shrink_dcache_parent - prune dcache
1391 * @parent: parent of entries to prune
1392 *
1393 * Prune the dcache to remove unused children of the parent dentry.
1394 */
1395 void shrink_dcache_parent(struct dentry *parent)
1396 {
1397 for (;;) {
1398 struct select_data data;
1399
1400 INIT_LIST_HEAD(&data.dispose);
1401 data.start = parent;
1402 data.found = 0;
1403
1404 d_walk(parent, &data, select_collect, NULL);
1405 if (!data.found)
1406 break;
1407
1408 shrink_dentry_list(&data.dispose);
1409 cond_resched();
1410 }
1411 }
1412 EXPORT_SYMBOL(shrink_dcache_parent);
1413
1414 static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
1415 {
1416 /* it has busy descendents; complain about those instead */
1417 if (!list_empty(&dentry->d_subdirs))
1418 return D_WALK_CONTINUE;
1419
1420 /* root with refcount 1 is fine */
1421 if (dentry == _data && dentry->d_lockref.count == 1)
1422 return D_WALK_CONTINUE;
1423
1424 printk(KERN_ERR "BUG: Dentry %p{i=%lx,n=%pd} "
1425 " still in use (%d) [unmount of %s %s]\n",
1426 dentry,
1427 dentry->d_inode ?
1428 dentry->d_inode->i_ino : 0UL,
1429 dentry,
1430 dentry->d_lockref.count,
1431 dentry->d_sb->s_type->name,
1432 dentry->d_sb->s_id);
1433 WARN_ON(1);
1434 return D_WALK_CONTINUE;
1435 }
1436
1437 static void do_one_tree(struct dentry *dentry)
1438 {
1439 shrink_dcache_parent(dentry);
1440 d_walk(dentry, dentry, umount_check, NULL);
1441 d_drop(dentry);
1442 dput(dentry);
1443 }
1444
1445 /*
1446 * destroy the dentries attached to a superblock on unmounting
1447 */
1448 void shrink_dcache_for_umount(struct super_block *sb)
1449 {
1450 struct dentry *dentry;
1451
1452 WARN(down_read_trylock(&sb->s_umount), "s_umount should've been locked");
1453
1454 dentry = sb->s_root;
1455 sb->s_root = NULL;
1456 do_one_tree(dentry);
1457
1458 while (!hlist_bl_empty(&sb->s_anon)) {
1459 dentry = dget(hlist_bl_entry(hlist_bl_first(&sb->s_anon), struct dentry, d_hash));
1460 do_one_tree(dentry);
1461 }
1462 }
1463
1464 struct detach_data {
1465 struct select_data select;
1466 struct dentry *mountpoint;
1467 };
1468 static enum d_walk_ret detach_and_collect(void *_data, struct dentry *dentry)
1469 {
1470 struct detach_data *data = _data;
1471
1472 if (d_mountpoint(dentry)) {
1473 __dget_dlock(dentry);
1474 data->mountpoint = dentry;
1475 return D_WALK_QUIT;
1476 }
1477
1478 return select_collect(&data->select, dentry);
1479 }
1480
1481 static void check_and_drop(void *_data)
1482 {
1483 struct detach_data *data = _data;
1484
1485 if (!data->mountpoint && !data->select.found)
1486 __d_drop(data->select.start);
1487 }
1488
1489 /**
1490 * d_invalidate - detach submounts, prune dcache, and drop
1491 * @dentry: dentry to invalidate (aka detach, prune and drop)
1492 *
1493 * no dcache lock.
1494 *
1495 * The final d_drop is done as an atomic operation relative to
1496 * rename_lock ensuring there are no races with d_set_mounted. This
1497 * ensures there are no unhashed dentries on the path to a mountpoint.
1498 */
1499 void d_invalidate(struct dentry *dentry)
1500 {
1501 /*
1502 * If it's already been dropped, return OK.
1503 */
1504 spin_lock(&dentry->d_lock);
1505 if (d_unhashed(dentry)) {
1506 spin_unlock(&dentry->d_lock);
1507 return;
1508 }
1509 spin_unlock(&dentry->d_lock);
1510
1511 /* Negative dentries can be dropped without further checks */
1512 if (!dentry->d_inode) {
1513 d_drop(dentry);
1514 return;
1515 }
1516
1517 for (;;) {
1518 struct detach_data data;
1519
1520 data.mountpoint = NULL;
1521 INIT_LIST_HEAD(&data.select.dispose);
1522 data.select.start = dentry;
1523 data.select.found = 0;
1524
1525 d_walk(dentry, &data, detach_and_collect, check_and_drop);
1526
1527 if (data.select.found)
1528 shrink_dentry_list(&data.select.dispose);
1529
1530 if (data.mountpoint) {
1531 detach_mounts(data.mountpoint);
1532 dput(data.mountpoint);
1533 }
1534
1535 if (!data.mountpoint && !data.select.found)
1536 break;
1537
1538 cond_resched();
1539 }
1540 }
1541 EXPORT_SYMBOL(d_invalidate);
1542
1543 /**
1544 * __d_alloc - allocate a dcache entry
1545 * @sb: filesystem it will belong to
1546 * @name: qstr of the name
1547 *
1548 * Allocates a dentry. It returns %NULL if there is insufficient memory
1549 * available. On a success the dentry is returned. The name passed in is
1550 * copied and the copy passed in may be reused after this call.
1551 */
1552
1553 struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
1554 {
1555 struct dentry *dentry;
1556 char *dname;
1557
1558 dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL);
1559 if (!dentry)
1560 return NULL;
1561
1562 /*
1563 * We guarantee that the inline name is always NUL-terminated.
1564 * This way the memcpy() done by the name switching in rename
1565 * will still always have a NUL at the end, even if we might
1566 * be overwriting an internal NUL character
1567 */
1568 dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
1569 if (name->len > DNAME_INLINE_LEN-1) {
1570 size_t size = offsetof(struct external_name, name[1]);
1571 struct external_name *p = kmalloc(size + name->len, GFP_KERNEL);
1572 if (!p) {
1573 kmem_cache_free(dentry_cache, dentry);
1574 return NULL;
1575 }
1576 atomic_set(&p->u.count, 1);
1577 dname = p->name;
1578 if (IS_ENABLED(CONFIG_DCACHE_WORD_ACCESS))
1579 kasan_unpoison_shadow(dname,
1580 round_up(name->len + 1, sizeof(unsigned long)));
1581 } else {
1582 dname = dentry->d_iname;
1583 }
1584
1585 dentry->d_name.len = name->len;
1586 dentry->d_name.hash = name->hash;
1587 memcpy(dname, name->name, name->len);
1588 dname[name->len] = 0;
1589
1590 /* Make sure we always see the terminating NUL character */
1591 smp_wmb();
1592 dentry->d_name.name = dname;
1593
1594 dentry->d_lockref.count = 1;
1595 dentry->d_flags = 0;
1596 spin_lock_init(&dentry->d_lock);
1597 seqcount_init(&dentry->d_seq);
1598 dentry->d_inode = NULL;
1599 dentry->d_parent = dentry;
1600 dentry->d_sb = sb;
1601 dentry->d_op = NULL;
1602 dentry->d_fsdata = NULL;
1603 INIT_HLIST_BL_NODE(&dentry->d_hash);
1604 INIT_LIST_HEAD(&dentry->d_lru);
1605 INIT_LIST_HEAD(&dentry->d_subdirs);
1606 INIT_HLIST_NODE(&dentry->d_u.d_alias);
1607 INIT_LIST_HEAD(&dentry->d_child);
1608 d_set_d_op(dentry, dentry->d_sb->s_d_op);
1609
1610 this_cpu_inc(nr_dentry);
1611
1612 return dentry;
1613 }
1614
1615 /**
1616 * d_alloc - allocate a dcache entry
1617 * @parent: parent of entry to allocate
1618 * @name: qstr of the name
1619 *
1620 * Allocates a dentry. It returns %NULL if there is insufficient memory
1621 * available. On a success the dentry is returned. The name passed in is
1622 * copied and the copy passed in may be reused after this call.
1623 */
1624 struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
1625 {
1626 struct dentry *dentry = __d_alloc(parent->d_sb, name);
1627 if (!dentry)
1628 return NULL;
1629
1630 spin_lock(&parent->d_lock);
1631 /*
1632 * don't need child lock because it is not subject
1633 * to concurrency here
1634 */
1635 __dget_dlock(parent);
1636 dentry->d_parent = parent;
1637 list_add(&dentry->d_child, &parent->d_subdirs);
1638 spin_unlock(&parent->d_lock);
1639
1640 return dentry;
1641 }
1642 EXPORT_SYMBOL(d_alloc);
1643
1644 /**
1645 * d_alloc_pseudo - allocate a dentry (for lookup-less filesystems)
1646 * @sb: the superblock
1647 * @name: qstr of the name
1648 *
1649 * For a filesystem that just pins its dentries in memory and never
1650 * performs lookups at all, return an unhashed IS_ROOT dentry.
1651 */
1652 struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name)
1653 {
1654 return __d_alloc(sb, name);
1655 }
1656 EXPORT_SYMBOL(d_alloc_pseudo);
1657
1658 struct dentry *d_alloc_name(struct dentry *parent, const char *name)
1659 {
1660 struct qstr q;
1661
1662 q.name = name;
1663 q.len = strlen(name);
1664 q.hash = full_name_hash(q.name, q.len);
1665 return d_alloc(parent, &q);
1666 }
1667 EXPORT_SYMBOL(d_alloc_name);
1668
1669 void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
1670 {
1671 WARN_ON_ONCE(dentry->d_op);
1672 WARN_ON_ONCE(dentry->d_flags & (DCACHE_OP_HASH |
1673 DCACHE_OP_COMPARE |
1674 DCACHE_OP_REVALIDATE |
1675 DCACHE_OP_WEAK_REVALIDATE |
1676 DCACHE_OP_DELETE ));
1677 dentry->d_op = op;
1678 if (!op)
1679 return;
1680 if (op->d_hash)
1681 dentry->d_flags |= DCACHE_OP_HASH;
1682 if (op->d_compare)
1683 dentry->d_flags |= DCACHE_OP_COMPARE;
1684 if (op->d_revalidate)
1685 dentry->d_flags |= DCACHE_OP_REVALIDATE;
1686 if (op->d_weak_revalidate)
1687 dentry->d_flags |= DCACHE_OP_WEAK_REVALIDATE;
1688 if (op->d_delete)
1689 dentry->d_flags |= DCACHE_OP_DELETE;
1690 if (op->d_prune)
1691 dentry->d_flags |= DCACHE_OP_PRUNE;
1692
1693 }
1694 EXPORT_SYMBOL(d_set_d_op);
1695
1696
1697 /*
1698 * d_set_fallthru - Mark a dentry as falling through to a lower layer
1699 * @dentry - The dentry to mark
1700 *
1701 * Mark a dentry as falling through to the lower layer (as set with
1702 * d_pin_lower()). This flag may be recorded on the medium.
1703 */
1704 void d_set_fallthru(struct dentry *dentry)
1705 {
1706 spin_lock(&dentry->d_lock);
1707 dentry->d_flags |= DCACHE_FALLTHRU;
1708 spin_unlock(&dentry->d_lock);
1709 }
1710 EXPORT_SYMBOL(d_set_fallthru);
1711
1712 static unsigned d_flags_for_inode(struct inode *inode)
1713 {
1714 unsigned add_flags = DCACHE_REGULAR_TYPE;
1715
1716 if (!inode)
1717 return DCACHE_MISS_TYPE;
1718
1719 if (S_ISDIR(inode->i_mode)) {
1720 add_flags = DCACHE_DIRECTORY_TYPE;
1721 if (unlikely(!(inode->i_opflags & IOP_LOOKUP))) {
1722 if (unlikely(!inode->i_op->lookup))
1723 add_flags = DCACHE_AUTODIR_TYPE;
1724 else
1725 inode->i_opflags |= IOP_LOOKUP;
1726 }
1727 goto type_determined;
1728 }
1729
1730 if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) {
1731 if (unlikely(inode->i_op->follow_link)) {
1732 add_flags = DCACHE_SYMLINK_TYPE;
1733 goto type_determined;
1734 }
1735 inode->i_opflags |= IOP_NOFOLLOW;
1736 }
1737
1738 if (unlikely(!S_ISREG(inode->i_mode)))
1739 add_flags = DCACHE_SPECIAL_TYPE;
1740
1741 type_determined:
1742 if (unlikely(IS_AUTOMOUNT(inode)))
1743 add_flags |= DCACHE_NEED_AUTOMOUNT;
1744 return add_flags;
1745 }
1746
1747 static void __d_instantiate(struct dentry *dentry, struct inode *inode)
1748 {
1749 unsigned add_flags = d_flags_for_inode(inode);
1750
1751 spin_lock(&dentry->d_lock);
1752 if (inode)
1753 hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
1754 __d_set_inode_and_type(dentry, inode, add_flags);
1755 dentry_rcuwalk_barrier(dentry);
1756 spin_unlock(&dentry->d_lock);
1757 fsnotify_d_instantiate(dentry, inode);
1758 }
1759
1760 /**
1761 * d_instantiate - fill in inode information for a dentry
1762 * @entry: dentry to complete
1763 * @inode: inode to attach to this dentry
1764 *
1765 * Fill in inode information in the entry.
1766 *
1767 * This turns negative dentries into productive full members
1768 * of society.
1769 *
1770 * NOTE! This assumes that the inode count has been incremented
1771 * (or otherwise set) by the caller to indicate that it is now
1772 * in use by the dcache.
1773 */
1774
1775 void d_instantiate(struct dentry *entry, struct inode * inode)
1776 {
1777 BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
1778 if (inode)
1779 spin_lock(&inode->i_lock);
1780 __d_instantiate(entry, inode);
1781 if (inode)
1782 spin_unlock(&inode->i_lock);
1783 security_d_instantiate(entry, inode);
1784 }
1785 EXPORT_SYMBOL(d_instantiate);
1786
1787 /**
1788 * d_instantiate_unique - instantiate a non-aliased dentry
1789 * @entry: dentry to instantiate
1790 * @inode: inode to attach to this dentry
1791 *
1792 * Fill in inode information in the entry. On success, it returns NULL.
1793 * If an unhashed alias of "entry" already exists, then we return the
1794 * aliased dentry instead and drop one reference to inode.
1795 *
1796 * Note that in order to avoid conflicts with rename() etc, the caller
1797 * had better be holding the parent directory semaphore.
1798 *
1799 * This also assumes that the inode count has been incremented
1800 * (or otherwise set) by the caller to indicate that it is now
1801 * in use by the dcache.
1802 */
1803 static struct dentry *__d_instantiate_unique(struct dentry *entry,
1804 struct inode *inode)
1805 {
1806 struct dentry *alias;
1807 int len = entry->d_name.len;
1808 const char *name = entry->d_name.name;
1809 unsigned int hash = entry->d_name.hash;
1810
1811 if (!inode) {
1812 __d_instantiate(entry, NULL);
1813 return NULL;
1814 }
1815
1816 hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
1817 /*
1818 * Don't need alias->d_lock here, because aliases with
1819 * d_parent == entry->d_parent are not subject to name or
1820 * parent changes, because the parent inode i_mutex is held.
1821 */
1822 if (alias->d_name.hash != hash)
1823 continue;
1824 if (alias->d_parent != entry->d_parent)
1825 continue;
1826 if (alias->d_name.len != len)
1827 continue;
1828 if (dentry_cmp(alias, name, len))
1829 continue;
1830 __dget(alias);
1831 return alias;
1832 }
1833
1834 __d_instantiate(entry, inode);
1835 return NULL;
1836 }
1837
1838 struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode)
1839 {
1840 struct dentry *result;
1841
1842 BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
1843
1844 if (inode)
1845 spin_lock(&inode->i_lock);
1846 result = __d_instantiate_unique(entry, inode);
1847 if (inode)
1848 spin_unlock(&inode->i_lock);
1849
1850 if (!result) {
1851 security_d_instantiate(entry, inode);
1852 return NULL;
1853 }
1854
1855 BUG_ON(!d_unhashed(result));
1856 iput(inode);
1857 return result;
1858 }
1859
1860 EXPORT_SYMBOL(d_instantiate_unique);
1861
1862 /**
1863 * d_instantiate_no_diralias - instantiate a non-aliased dentry
1864 * @entry: dentry to complete
1865 * @inode: inode to attach to this dentry
1866 *
1867 * Fill in inode information in the entry. If a directory alias is found, then
1868 * return an error (and drop inode). Together with d_materialise_unique() this
1869 * guarantees that a directory inode may never have more than one alias.
1870 */
1871 int d_instantiate_no_diralias(struct dentry *entry, struct inode *inode)
1872 {
1873 BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
1874
1875 spin_lock(&inode->i_lock);
1876 if (S_ISDIR(inode->i_mode) && !hlist_empty(&inode->i_dentry)) {
1877 spin_unlock(&inode->i_lock);
1878 iput(inode);
1879 return -EBUSY;
1880 }
1881 __d_instantiate(entry, inode);
1882 spin_unlock(&inode->i_lock);
1883 security_d_instantiate(entry, inode);
1884
1885 return 0;
1886 }
1887 EXPORT_SYMBOL(d_instantiate_no_diralias);
1888
1889 struct dentry *d_make_root(struct inode *root_inode)
1890 {
1891 struct dentry *res = NULL;
1892
1893 if (root_inode) {
1894 static const struct qstr name = QSTR_INIT("/", 1);
1895
1896 res = __d_alloc(root_inode->i_sb, &name);
1897 if (res)
1898 d_instantiate(res, root_inode);
1899 else
1900 iput(root_inode);
1901 }
1902 return res;
1903 }
1904 EXPORT_SYMBOL(d_make_root);
1905
1906 static struct dentry * __d_find_any_alias(struct inode *inode)
1907 {
1908 struct dentry *alias;
1909
1910 if (hlist_empty(&inode->i_dentry))
1911 return NULL;
1912 alias = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
1913 __dget(alias);
1914 return alias;
1915 }
1916
1917 /**
1918 * d_find_any_alias - find any alias for a given inode
1919 * @inode: inode to find an alias for
1920 *
1921 * If any aliases exist for the given inode, take and return a
1922 * reference for one of them. If no aliases exist, return %NULL.
1923 */
1924 struct dentry *d_find_any_alias(struct inode *inode)
1925 {
1926 struct dentry *de;
1927
1928 spin_lock(&inode->i_lock);
1929 de = __d_find_any_alias(inode);
1930 spin_unlock(&inode->i_lock);
1931 return de;
1932 }
1933 EXPORT_SYMBOL(d_find_any_alias);
1934
1935 static struct dentry *__d_obtain_alias(struct inode *inode, int disconnected)
1936 {
1937 static const struct qstr anonstring = QSTR_INIT("/", 1);
1938 struct dentry *tmp;
1939 struct dentry *res;
1940 unsigned add_flags;
1941
1942 if (!inode)
1943 return ERR_PTR(-ESTALE);
1944 if (IS_ERR(inode))
1945 return ERR_CAST(inode);
1946
1947 res = d_find_any_alias(inode);
1948 if (res)
1949 goto out_iput;
1950
1951 tmp = __d_alloc(inode->i_sb, &anonstring);
1952 if (!tmp) {
1953 res = ERR_PTR(-ENOMEM);
1954 goto out_iput;
1955 }
1956
1957 spin_lock(&inode->i_lock);
1958 res = __d_find_any_alias(inode);
1959 if (res) {
1960 spin_unlock(&inode->i_lock);
1961 dput(tmp);
1962 goto out_iput;
1963 }
1964
1965 /* attach a disconnected dentry */
1966 add_flags = d_flags_for_inode(inode);
1967
1968 if (disconnected)
1969 add_flags |= DCACHE_DISCONNECTED;
1970
1971 spin_lock(&tmp->d_lock);
1972 __d_set_inode_and_type(tmp, inode, add_flags);
1973 hlist_add_head(&tmp->d_u.d_alias, &inode->i_dentry);
1974 hlist_bl_lock(&tmp->d_sb->s_anon);
1975 hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon);
1976 hlist_bl_unlock(&tmp->d_sb->s_anon);
1977 spin_unlock(&tmp->d_lock);
1978 spin_unlock(&inode->i_lock);
1979 security_d_instantiate(tmp, inode);
1980
1981 return tmp;
1982
1983 out_iput:
1984 if (res && !IS_ERR(res))
1985 security_d_instantiate(res, inode);
1986 iput(inode);
1987 return res;
1988 }
1989
1990 /**
1991 * d_obtain_alias - find or allocate a DISCONNECTED dentry for a given inode
1992 * @inode: inode to allocate the dentry for
1993 *
1994 * Obtain a dentry for an inode resulting from NFS filehandle conversion or
1995 * similar open by handle operations. The returned dentry may be anonymous,
1996 * or may have a full name (if the inode was already in the cache).
1997 *
1998 * When called on a directory inode, we must ensure that the inode only ever
1999 * has one dentry. If a dentry is found, that is returned instead of
2000 * allocating a new one.
2001 *
2002 * On successful return, the reference to the inode has been transferred
2003 * to the dentry. In case of an error the reference on the inode is released.
2004 * To make it easier to use in export operations a %NULL or IS_ERR inode may
2005 * be passed in and the error will be propagated to the return value,
2006 * with a %NULL @inode replaced by ERR_PTR(-ESTALE).
2007 */
2008 struct dentry *d_obtain_alias(struct inode *inode)
2009 {
2010 return __d_obtain_alias(inode, 1);
2011 }
2012 EXPORT_SYMBOL(d_obtain_alias);
2013
2014 /**
2015 * d_obtain_root - find or allocate a dentry for a given inode
2016 * @inode: inode to allocate the dentry for
2017 *
2018 * Obtain an IS_ROOT dentry for the root of a filesystem.
2019 *
2020 * We must ensure that directory inodes only ever have one dentry. If a
2021 * dentry is found, that is returned instead of allocating a new one.
2022 *
2023 * On successful return, the reference to the inode has been transferred
2024 * to the dentry. In case of an error the reference on the inode is
2025 * released. A %NULL or IS_ERR inode may be passed in and will be the
2026 * error will be propagate to the return value, with a %NULL @inode
2027 * replaced by ERR_PTR(-ESTALE).
2028 */
2029 struct dentry *d_obtain_root(struct inode *inode)
2030 {
2031 return __d_obtain_alias(inode, 0);
2032 }
2033 EXPORT_SYMBOL(d_obtain_root);
2034
2035 /**
2036 * d_add_ci - lookup or allocate new dentry with case-exact name
2037 * @inode: the inode case-insensitive lookup has found
2038 * @dentry: the negative dentry that was passed to the parent's lookup func
2039 * @name: the case-exact name to be associated with the returned dentry
2040 *
2041 * This is to avoid filling the dcache with case-insensitive names to the
2042 * same inode, only the actual correct case is stored in the dcache for
2043 * case-insensitive filesystems.
2044 *
2045 * For a case-insensitive lookup match and if the the case-exact dentry
2046 * already exists in in the dcache, use it and return it.
2047 *
2048 * If no entry exists with the exact case name, allocate new dentry with
2049 * the exact case, and return the spliced entry.
2050 */
2051 struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
2052 struct qstr *name)
2053 {
2054 struct dentry *found;
2055 struct dentry *new;
2056
2057 /*
2058 * First check if a dentry matching the name already exists,
2059 * if not go ahead and create it now.
2060 */
2061 found = d_hash_and_lookup(dentry->d_parent, name);
2062 if (!found) {
2063 new = d_alloc(dentry->d_parent, name);
2064 if (!new) {
2065 found = ERR_PTR(-ENOMEM);
2066 } else {
2067 found = d_splice_alias(inode, new);
2068 if (found) {
2069 dput(new);
2070 return found;
2071 }
2072 return new;
2073 }
2074 }
2075 iput(inode);
2076 return found;
2077 }
2078 EXPORT_SYMBOL(d_add_ci);
2079
2080 /*
2081 * Do the slow-case of the dentry name compare.
2082 *
2083 * Unlike the dentry_cmp() function, we need to atomically
2084 * load the name and length information, so that the
2085 * filesystem can rely on them, and can use the 'name' and
2086 * 'len' information without worrying about walking off the
2087 * end of memory etc.
2088 *
2089 * Thus the read_seqcount_retry() and the "duplicate" info
2090 * in arguments (the low-level filesystem should not look
2091 * at the dentry inode or name contents directly, since
2092 * rename can change them while we're in RCU mode).
2093 */
2094 enum slow_d_compare {
2095 D_COMP_OK,
2096 D_COMP_NOMATCH,
2097 D_COMP_SEQRETRY,
2098 };
2099
2100 static noinline enum slow_d_compare slow_dentry_cmp(
2101 const struct dentry *parent,
2102 struct dentry *dentry,
2103 unsigned int seq,
2104 const struct qstr *name)
2105 {
2106 int tlen = dentry->d_name.len;
2107 const char *tname = dentry->d_name.name;
2108
2109 if (read_seqcount_retry(&dentry->d_seq, seq)) {
2110 cpu_relax();
2111 return D_COMP_SEQRETRY;
2112 }
2113 if (parent->d_op->d_compare(parent, dentry, tlen, tname, name))
2114 return D_COMP_NOMATCH;
2115 return D_COMP_OK;
2116 }
2117
2118 /**
2119 * __d_lookup_rcu - search for a dentry (racy, store-free)
2120 * @parent: parent dentry
2121 * @name: qstr of name we wish to find
2122 * @seqp: returns d_seq value at the point where the dentry was found
2123 * Returns: dentry, or NULL
2124 *
2125 * __d_lookup_rcu is the dcache lookup function for rcu-walk name
2126 * resolution (store-free path walking) design described in
2127 * Documentation/filesystems/path-lookup.txt.
2128 *
2129 * This is not to be used outside core vfs.
2130 *
2131 * __d_lookup_rcu must only be used in rcu-walk mode, ie. with vfsmount lock
2132 * held, and rcu_read_lock held. The returned dentry must not be stored into
2133 * without taking d_lock and checking d_seq sequence count against @seq
2134 * returned here.
2135 *
2136 * A refcount may be taken on the found dentry with the d_rcu_to_refcount
2137 * function.
2138 *
2139 * Alternatively, __d_lookup_rcu may be called again to look up the child of
2140 * the returned dentry, so long as its parent's seqlock is checked after the
2141 * child is looked up. Thus, an interlocking stepping of sequence lock checks
2142 * is formed, giving integrity down the path walk.
2143 *
2144 * NOTE! The caller *has* to check the resulting dentry against the sequence
2145 * number we've returned before using any of the resulting dentry state!
2146 */
2147 struct dentry *__d_lookup_rcu(const struct dentry *parent,
2148 const struct qstr *name,
2149 unsigned *seqp)
2150 {
2151 u64 hashlen = name->hash_len;
2152 const unsigned char *str = name->name;
2153 struct hlist_bl_head *b = d_hash(parent, hashlen_hash(hashlen));
2154 struct hlist_bl_node *node;
2155 struct dentry *dentry;
2156
2157 /*
2158 * Note: There is significant duplication with __d_lookup_rcu which is
2159 * required to prevent single threaded performance regressions
2160 * especially on architectures where smp_rmb (in seqcounts) are costly.
2161 * Keep the two functions in sync.
2162 */
2163
2164 /*
2165 * The hash list is protected using RCU.
2166 *
2167 * Carefully use d_seq when comparing a candidate dentry, to avoid
2168 * races with d_move().
2169 *
2170 * It is possible that concurrent renames can mess up our list
2171 * walk here and result in missing our dentry, resulting in the
2172 * false-negative result. d_lookup() protects against concurrent
2173 * renames using rename_lock seqlock.
2174 *
2175 * See Documentation/filesystems/path-lookup.txt for more details.
2176 */
2177 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2178 unsigned seq;
2179
2180 seqretry:
2181 /*
2182 * The dentry sequence count protects us from concurrent
2183 * renames, and thus protects parent and name fields.
2184 *
2185 * The caller must perform a seqcount check in order
2186 * to do anything useful with the returned dentry.
2187 *
2188 * NOTE! We do a "raw" seqcount_begin here. That means that
2189 * we don't wait for the sequence count to stabilize if it
2190 * is in the middle of a sequence change. If we do the slow
2191 * dentry compare, we will do seqretries until it is stable,
2192 * and if we end up with a successful lookup, we actually
2193 * want to exit RCU lookup anyway.
2194 */
2195 seq = raw_seqcount_begin(&dentry->d_seq);
2196 if (dentry->d_parent != parent)
2197 continue;
2198 if (d_unhashed(dentry))
2199 continue;
2200
2201 if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) {
2202 if (dentry->d_name.hash != hashlen_hash(hashlen))
2203 continue;
2204 *seqp = seq;
2205 switch (slow_dentry_cmp(parent, dentry, seq, name)) {
2206 case D_COMP_OK:
2207 return dentry;
2208 case D_COMP_NOMATCH:
2209 continue;
2210 default:
2211 goto seqretry;
2212 }
2213 }
2214
2215 if (dentry->d_name.hash_len != hashlen)
2216 continue;
2217 *seqp = seq;
2218 if (!dentry_cmp(dentry, str, hashlen_len(hashlen)))
2219 return dentry;
2220 }
2221 return NULL;
2222 }
2223
2224 /**
2225 * d_lookup - search for a dentry
2226 * @parent: parent dentry
2227 * @name: qstr of name we wish to find
2228 * Returns: dentry, or NULL
2229 *
2230 * d_lookup searches the children of the parent dentry for the name in
2231 * question. If the dentry is found its reference count is incremented and the
2232 * dentry is returned. The caller must use dput to free the entry when it has
2233 * finished using it. %NULL is returned if the dentry does not exist.
2234 */
2235 struct dentry *d_lookup(const struct dentry *parent, const struct qstr *name)
2236 {
2237 struct dentry *dentry;
2238 unsigned seq;
2239
2240 do {
2241 seq = read_seqbegin(&rename_lock);
2242 dentry = __d_lookup(parent, name);
2243 if (dentry)
2244 break;
2245 } while (read_seqretry(&rename_lock, seq));
2246 return dentry;
2247 }
2248 EXPORT_SYMBOL(d_lookup);
2249
2250 /**
2251 * __d_lookup - search for a dentry (racy)
2252 * @parent: parent dentry
2253 * @name: qstr of name we wish to find
2254 * Returns: dentry, or NULL
2255 *
2256 * __d_lookup is like d_lookup, however it may (rarely) return a
2257 * false-negative result due to unrelated rename activity.
2258 *
2259 * __d_lookup is slightly faster by avoiding rename_lock read seqlock,
2260 * however it must be used carefully, eg. with a following d_lookup in
2261 * the case of failure.
2262 *
2263 * __d_lookup callers must be commented.
2264 */
2265 struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
2266 {
2267 unsigned int len = name->len;
2268 unsigned int hash = name->hash;
2269 const unsigned char *str = name->name;
2270 struct hlist_bl_head *b = d_hash(parent, hash);
2271 struct hlist_bl_node *node;
2272 struct dentry *found = NULL;
2273 struct dentry *dentry;
2274
2275 /*
2276 * Note: There is significant duplication with __d_lookup_rcu which is
2277 * required to prevent single threaded performance regressions
2278 * especially on architectures where smp_rmb (in seqcounts) are costly.
2279 * Keep the two functions in sync.
2280 */
2281
2282 /*
2283 * The hash list is protected using RCU.
2284 *
2285 * Take d_lock when comparing a candidate dentry, to avoid races
2286 * with d_move().
2287 *
2288 * It is possible that concurrent renames can mess up our list
2289 * walk here and result in missing our dentry, resulting in the
2290 * false-negative result. d_lookup() protects against concurrent
2291 * renames using rename_lock seqlock.
2292 *
2293 * See Documentation/filesystems/path-lookup.txt for more details.
2294 */
2295 rcu_read_lock();
2296
2297 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2298
2299 if (dentry->d_name.hash != hash)
2300 continue;
2301
2302 spin_lock(&dentry->d_lock);
2303 if (dentry->d_parent != parent)
2304 goto next;
2305 if (d_unhashed(dentry))
2306 goto next;
2307
2308 /*
2309 * It is safe to compare names since d_move() cannot
2310 * change the qstr (protected by d_lock).
2311 */
2312 if (parent->d_flags & DCACHE_OP_COMPARE) {
2313 int tlen = dentry->d_name.len;
2314 const char *tname = dentry->d_name.name;
2315 if (parent->d_op->d_compare(parent, dentry, tlen, tname, name))
2316 goto next;
2317 } else {
2318 if (dentry->d_name.len != len)
2319 goto next;
2320 if (dentry_cmp(dentry, str, len))
2321 goto next;
2322 }
2323
2324 dentry->d_lockref.count++;
2325 found = dentry;
2326 spin_unlock(&dentry->d_lock);
2327 break;
2328 next:
2329 spin_unlock(&dentry->d_lock);
2330 }
2331 rcu_read_unlock();
2332
2333 return found;
2334 }
2335
2336 /**
2337 * d_hash_and_lookup - hash the qstr then search for a dentry
2338 * @dir: Directory to search in
2339 * @name: qstr of name we wish to find
2340 *
2341 * On lookup failure NULL is returned; on bad name - ERR_PTR(-error)
2342 */
2343 struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name)
2344 {
2345 /*
2346 * Check for a fs-specific hash function. Note that we must
2347 * calculate the standard hash first, as the d_op->d_hash()
2348 * routine may choose to leave the hash value unchanged.
2349 */
2350 name->hash = full_name_hash(name->name, name->len);
2351 if (dir->d_flags & DCACHE_OP_HASH) {
2352 int err = dir->d_op->d_hash(dir, name);
2353 if (unlikely(err < 0))
2354 return ERR_PTR(err);
2355 }
2356 return d_lookup(dir, name);
2357 }
2358 EXPORT_SYMBOL(d_hash_and_lookup);
2359
2360 /*
2361 * When a file is deleted, we have two options:
2362 * - turn this dentry into a negative dentry
2363 * - unhash this dentry and free it.
2364 *
2365 * Usually, we want to just turn this into
2366 * a negative dentry, but if anybody else is
2367 * currently using the dentry or the inode
2368 * we can't do that and we fall back on removing
2369 * it from the hash queues and waiting for
2370 * it to be deleted later when it has no users
2371 */
2372
2373 /**
2374 * d_delete - delete a dentry
2375 * @dentry: The dentry to delete
2376 *
2377 * Turn the dentry into a negative dentry if possible, otherwise
2378 * remove it from the hash queues so it can be deleted later
2379 */
2380
2381 void d_delete(struct dentry * dentry)
2382 {
2383 struct inode *inode;
2384 int isdir = 0;
2385 /*
2386 * Are we the only user?
2387 */
2388 again:
2389 spin_lock(&dentry->d_lock);
2390 inode = dentry->d_inode;
2391 isdir = S_ISDIR(inode->i_mode);
2392 if (dentry->d_lockref.count == 1) {
2393 if (!spin_trylock(&inode->i_lock)) {
2394 spin_unlock(&dentry->d_lock);
2395 cpu_relax();
2396 goto again;
2397 }
2398 dentry->d_flags &= ~DCACHE_CANT_MOUNT;
2399 dentry_unlink_inode(dentry);
2400 fsnotify_nameremove(dentry, isdir);
2401 return;
2402 }
2403
2404 if (!d_unhashed(dentry))
2405 __d_drop(dentry);
2406
2407 spin_unlock(&dentry->d_lock);
2408
2409 fsnotify_nameremove(dentry, isdir);
2410 }
2411 EXPORT_SYMBOL(d_delete);
2412
2413 static void __d_rehash(struct dentry * entry, struct hlist_bl_head *b)
2414 {
2415 BUG_ON(!d_unhashed(entry));
2416 hlist_bl_lock(b);
2417 entry->d_flags |= DCACHE_RCUACCESS;
2418 hlist_bl_add_head_rcu(&entry->d_hash, b);
2419 hlist_bl_unlock(b);
2420 }
2421
2422 static void _d_rehash(struct dentry * entry)
2423 {
2424 __d_rehash(entry, d_hash(entry->d_parent, entry->d_name.hash));
2425 }
2426
2427 /**
2428 * d_rehash - add an entry back to the hash
2429 * @entry: dentry to add to the hash
2430 *
2431 * Adds a dentry to the hash according to its name.
2432 */
2433
2434 void d_rehash(struct dentry * entry)
2435 {
2436 spin_lock(&entry->d_lock);
2437 _d_rehash(entry);
2438 spin_unlock(&entry->d_lock);
2439 }
2440 EXPORT_SYMBOL(d_rehash);
2441
2442 /**
2443 * dentry_update_name_case - update case insensitive dentry with a new name
2444 * @dentry: dentry to be updated
2445 * @name: new name
2446 *
2447 * Update a case insensitive dentry with new case of name.
2448 *
2449 * dentry must have been returned by d_lookup with name @name. Old and new
2450 * name lengths must match (ie. no d_compare which allows mismatched name
2451 * lengths).
2452 *
2453 * Parent inode i_mutex must be held over d_lookup and into this call (to
2454 * keep renames and concurrent inserts, and readdir(2) away).
2455 */
2456 void dentry_update_name_case(struct dentry *dentry, struct qstr *name)
2457 {
2458 BUG_ON(!mutex_is_locked(&dentry->d_parent->d_inode->i_mutex));
2459 BUG_ON(dentry->d_name.len != name->len); /* d_lookup gives this */
2460
2461 spin_lock(&dentry->d_lock);
2462 write_seqcount_begin(&dentry->d_seq);
2463 memcpy((unsigned char *)dentry->d_name.name, name->name, name->len);
2464 write_seqcount_end(&dentry->d_seq);
2465 spin_unlock(&dentry->d_lock);
2466 }
2467 EXPORT_SYMBOL(dentry_update_name_case);
2468
2469 static void swap_names(struct dentry *dentry, struct dentry *target)
2470 {
2471 if (unlikely(dname_external(target))) {
2472 if (unlikely(dname_external(dentry))) {
2473 /*
2474 * Both external: swap the pointers
2475 */
2476 swap(target->d_name.name, dentry->d_name.name);
2477 } else {
2478 /*
2479 * dentry:internal, target:external. Steal target's
2480 * storage and make target internal.
2481 */
2482 memcpy(target->d_iname, dentry->d_name.name,
2483 dentry->d_name.len + 1);
2484 dentry->d_name.name = target->d_name.name;
2485 target->d_name.name = target->d_iname;
2486 }
2487 } else {
2488 if (unlikely(dname_external(dentry))) {
2489 /*
2490 * dentry:external, target:internal. Give dentry's
2491 * storage to target and make dentry internal
2492 */
2493 memcpy(dentry->d_iname, target->d_name.name,
2494 target->d_name.len + 1);
2495 target->d_name.name = dentry->d_name.name;
2496 dentry->d_name.name = dentry->d_iname;
2497 } else {
2498 /*
2499 * Both are internal.
2500 */
2501 unsigned int i;
2502 BUILD_BUG_ON(!IS_ALIGNED(DNAME_INLINE_LEN, sizeof(long)));
2503 kmemcheck_mark_initialized(dentry->d_iname, DNAME_INLINE_LEN);
2504 kmemcheck_mark_initialized(target->d_iname, DNAME_INLINE_LEN);
2505 for (i = 0; i < DNAME_INLINE_LEN / sizeof(long); i++) {
2506 swap(((long *) &dentry->d_iname)[i],
2507 ((long *) &target->d_iname)[i]);
2508 }
2509 }
2510 }
2511 swap(dentry->d_name.hash_len, target->d_name.hash_len);
2512 }
2513
2514 static void copy_name(struct dentry *dentry, struct dentry *target)
2515 {
2516 struct external_name *old_name = NULL;
2517 if (unlikely(dname_external(dentry)))
2518 old_name = external_name(dentry);
2519 if (unlikely(dname_external(target))) {
2520 atomic_inc(&external_name(target)->u.count);
2521 dentry->d_name = target->d_name;
2522 } else {
2523 memcpy(dentry->d_iname, target->d_name.name,
2524 target->d_name.len + 1);
2525 dentry->d_name.name = dentry->d_iname;
2526 dentry->d_name.hash_len = target->d_name.hash_len;
2527 }
2528 if (old_name && likely(atomic_dec_and_test(&old_name->u.count)))
2529 kfree_rcu(old_name, u.head);
2530 }
2531
2532 static void dentry_lock_for_move(struct dentry *dentry, struct dentry *target)
2533 {
2534 /*
2535 * XXXX: do we really need to take target->d_lock?
2536 */
2537 if (IS_ROOT(dentry) || dentry->d_parent == target->d_parent)
2538 spin_lock(&target->d_parent->d_lock);
2539 else {
2540 if (d_ancestor(dentry->d_parent, target->d_parent)) {
2541 spin_lock(&dentry->d_parent->d_lock);
2542 spin_lock_nested(&target->d_parent->d_lock,
2543 DENTRY_D_LOCK_NESTED);
2544 } else {
2545 spin_lock(&target->d_parent->d_lock);
2546 spin_lock_nested(&dentry->d_parent->d_lock,
2547 DENTRY_D_LOCK_NESTED);
2548 }
2549 }
2550 if (target < dentry) {
2551 spin_lock_nested(&target->d_lock, 2);
2552 spin_lock_nested(&dentry->d_lock, 3);
2553 } else {
2554 spin_lock_nested(&dentry->d_lock, 2);
2555 spin_lock_nested(&target->d_lock, 3);
2556 }
2557 }
2558
2559 static void dentry_unlock_for_move(struct dentry *dentry, struct dentry *target)
2560 {
2561 if (target->d_parent != dentry->d_parent)
2562 spin_unlock(&dentry->d_parent->d_lock);
2563 if (target->d_parent != target)
2564 spin_unlock(&target->d_parent->d_lock);
2565 spin_unlock(&target->d_lock);
2566 spin_unlock(&dentry->d_lock);
2567 }
2568
2569 /*
2570 * When switching names, the actual string doesn't strictly have to
2571 * be preserved in the target - because we're dropping the target
2572 * anyway. As such, we can just do a simple memcpy() to copy over
2573 * the new name before we switch, unless we are going to rehash
2574 * it. Note that if we *do* unhash the target, we are not allowed
2575 * to rehash it without giving it a new name/hash key - whether
2576 * we swap or overwrite the names here, resulting name won't match
2577 * the reality in filesystem; it's only there for d_path() purposes.
2578 * Note that all of this is happening under rename_lock, so the
2579 * any hash lookup seeing it in the middle of manipulations will
2580 * be discarded anyway. So we do not care what happens to the hash
2581 * key in that case.
2582 */
2583 /*
2584 * __d_move - move a dentry
2585 * @dentry: entry to move
2586 * @target: new dentry
2587 * @exchange: exchange the two dentries
2588 *
2589 * Update the dcache to reflect the move of a file name. Negative
2590 * dcache entries should not be moved in this way. Caller must hold
2591 * rename_lock, the i_mutex of the source and target directories,
2592 * and the sb->s_vfs_rename_mutex if they differ. See lock_rename().
2593 */
2594 static void __d_move(struct dentry *dentry, struct dentry *target,
2595 bool exchange)
2596 {
2597 if (!dentry->d_inode)
2598 printk(KERN_WARNING "VFS: moving negative dcache entry\n");
2599
2600 BUG_ON(d_ancestor(dentry, target));
2601 BUG_ON(d_ancestor(target, dentry));
2602
2603 dentry_lock_for_move(dentry, target);
2604
2605 write_seqcount_begin(&dentry->d_seq);
2606 write_seqcount_begin_nested(&target->d_seq, DENTRY_D_LOCK_NESTED);
2607
2608 /* __d_drop does write_seqcount_barrier, but they're OK to nest. */
2609
2610 /*
2611 * Move the dentry to the target hash queue. Don't bother checking
2612 * for the same hash queue because of how unlikely it is.
2613 */
2614 __d_drop(dentry);
2615 __d_rehash(dentry, d_hash(target->d_parent, target->d_name.hash));
2616
2617 /*
2618 * Unhash the target (d_delete() is not usable here). If exchanging
2619 * the two dentries, then rehash onto the other's hash queue.
2620 */
2621 __d_drop(target);
2622 if (exchange) {
2623 __d_rehash(target,
2624 d_hash(dentry->d_parent, dentry->d_name.hash));
2625 }
2626
2627 /* Switch the names.. */
2628 if (exchange)
2629 swap_names(dentry, target);
2630 else
2631 copy_name(dentry, target);
2632
2633 /* ... and switch them in the tree */
2634 if (IS_ROOT(dentry)) {
2635 /* splicing a tree */
2636 dentry->d_parent = target->d_parent;
2637 target->d_parent = target;
2638 list_del_init(&target->d_child);
2639 list_move(&dentry->d_child, &dentry->d_parent->d_subdirs);
2640 } else {
2641 /* swapping two dentries */
2642 swap(dentry->d_parent, target->d_parent);
2643 list_move(&target->d_child, &target->d_parent->d_subdirs);
2644 list_move(&dentry->d_child, &dentry->d_parent->d_subdirs);
2645 if (exchange)
2646 fsnotify_d_move(target);
2647 fsnotify_d_move(dentry);
2648 }
2649
2650 write_seqcount_end(&target->d_seq);
2651 write_seqcount_end(&dentry->d_seq);
2652
2653 dentry_unlock_for_move(dentry, target);
2654 }
2655
2656 /*
2657 * d_move - move a dentry
2658 * @dentry: entry to move
2659 * @target: new dentry
2660 *
2661 * Update the dcache to reflect the move of a file name. Negative
2662 * dcache entries should not be moved in this way. See the locking
2663 * requirements for __d_move.
2664 */
2665 void d_move(struct dentry *dentry, struct dentry *target)
2666 {
2667 write_seqlock(&rename_lock);
2668 __d_move(dentry, target, false);
2669 write_sequnlock(&rename_lock);
2670 }
2671 EXPORT_SYMBOL(d_move);
2672
2673 /*
2674 * d_exchange - exchange two dentries
2675 * @dentry1: first dentry
2676 * @dentry2: second dentry
2677 */
2678 void d_exchange(struct dentry *dentry1, struct dentry *dentry2)
2679 {
2680 write_seqlock(&rename_lock);
2681
2682 WARN_ON(!dentry1->d_inode);
2683 WARN_ON(!dentry2->d_inode);
2684 WARN_ON(IS_ROOT(dentry1));
2685 WARN_ON(IS_ROOT(dentry2));
2686
2687 __d_move(dentry1, dentry2, true);
2688
2689 write_sequnlock(&rename_lock);
2690 }
2691
2692 /**
2693 * d_ancestor - search for an ancestor
2694 * @p1: ancestor dentry
2695 * @p2: child dentry
2696 *
2697 * Returns the ancestor dentry of p2 which is a child of p1, if p1 is
2698 * an ancestor of p2, else NULL.
2699 */
2700 struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2)
2701 {
2702 struct dentry *p;
2703
2704 for (p = p2; !IS_ROOT(p); p = p->d_parent) {
2705 if (p->d_parent == p1)
2706 return p;
2707 }
2708 return NULL;
2709 }
2710
2711 /*
2712 * This helper attempts to cope with remotely renamed directories
2713 *
2714 * It assumes that the caller is already holding
2715 * dentry->d_parent->d_inode->i_mutex, inode->i_lock and rename_lock
2716 *
2717 * Note: If ever the locking in lock_rename() changes, then please
2718 * remember to update this too...
2719 */
2720 static int __d_unalias(struct inode *inode,
2721 struct dentry *dentry, struct dentry *alias)
2722 {
2723 struct mutex *m1 = NULL, *m2 = NULL;
2724 int ret = -ESTALE;
2725
2726 /* If alias and dentry share a parent, then no extra locks required */
2727 if (alias->d_parent == dentry->d_parent)
2728 goto out_unalias;
2729
2730 /* See lock_rename() */
2731 if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex))
2732 goto out_err;
2733 m1 = &dentry->d_sb->s_vfs_rename_mutex;
2734 if (!mutex_trylock(&alias->d_parent->d_inode->i_mutex))
2735 goto out_err;
2736 m2 = &alias->d_parent->d_inode->i_mutex;
2737 out_unalias:
2738 __d_move(alias, dentry, false);
2739 ret = 0;
2740 out_err:
2741 spin_unlock(&inode->i_lock);
2742 if (m2)
2743 mutex_unlock(m2);
2744 if (m1)
2745 mutex_unlock(m1);
2746 return ret;
2747 }
2748
2749 /**
2750 * d_splice_alias - splice a disconnected dentry into the tree if one exists
2751 * @inode: the inode which may have a disconnected dentry
2752 * @dentry: a negative dentry which we want to point to the inode.
2753 *
2754 * If inode is a directory and has an IS_ROOT alias, then d_move that in
2755 * place of the given dentry and return it, else simply d_add the inode
2756 * to the dentry and return NULL.
2757 *
2758 * If a non-IS_ROOT directory is found, the filesystem is corrupt, and
2759 * we should error out: directories can't have multiple aliases.
2760 *
2761 * This is needed in the lookup routine of any filesystem that is exportable
2762 * (via knfsd) so that we can build dcache paths to directories effectively.
2763 *
2764 * If a dentry was found and moved, then it is returned. Otherwise NULL
2765 * is returned. This matches the expected return value of ->lookup.
2766 *
2767 * Cluster filesystems may call this function with a negative, hashed dentry.
2768 * In that case, we know that the inode will be a regular file, and also this
2769 * will only occur during atomic_open. So we need to check for the dentry
2770 * being already hashed only in the final case.
2771 */
2772 struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
2773 {
2774 if (IS_ERR(inode))
2775 return ERR_CAST(inode);
2776
2777 BUG_ON(!d_unhashed(dentry));
2778
2779 if (!inode) {
2780 __d_instantiate(dentry, NULL);
2781 goto out;
2782 }
2783 spin_lock(&inode->i_lock);
2784 if (S_ISDIR(inode->i_mode)) {
2785 struct dentry *new = __d_find_any_alias(inode);
2786 if (unlikely(new)) {
2787 write_seqlock(&rename_lock);
2788 if (unlikely(d_ancestor(new, dentry))) {
2789 write_sequnlock(&rename_lock);
2790 spin_unlock(&inode->i_lock);
2791 dput(new);
2792 new = ERR_PTR(-ELOOP);
2793 pr_warn_ratelimited(
2794 "VFS: Lookup of '%s' in %s %s"
2795 " would have caused loop\n",
2796 dentry->d_name.name,
2797 inode->i_sb->s_type->name,
2798 inode->i_sb->s_id);
2799 } else if (!IS_ROOT(new)) {
2800 int err = __d_unalias(inode, dentry, new);
2801 write_sequnlock(&rename_lock);
2802 if (err) {
2803 dput(new);
2804 new = ERR_PTR(err);
2805 }
2806 } else {
2807 __d_move(new, dentry, false);
2808 write_sequnlock(&rename_lock);
2809 spin_unlock(&inode->i_lock);
2810 security_d_instantiate(new, inode);
2811 }
2812 iput(inode);
2813 return new;
2814 }
2815 }
2816 /* already taking inode->i_lock, so d_add() by hand */
2817 __d_instantiate(dentry, inode);
2818 spin_unlock(&inode->i_lock);
2819 out:
2820 security_d_instantiate(dentry, inode);
2821 d_rehash(dentry);
2822 return NULL;
2823 }
2824 EXPORT_SYMBOL(d_splice_alias);
2825
2826 static int prepend(char **buffer, int *buflen, const char *str, int namelen)
2827 {
2828 *buflen -= namelen;
2829 if (*buflen < 0)
2830 return -ENAMETOOLONG;
2831 *buffer -= namelen;
2832 memcpy(*buffer, str, namelen);
2833 return 0;
2834 }
2835
2836 /**
2837 * prepend_name - prepend a pathname in front of current buffer pointer
2838 * @buffer: buffer pointer
2839 * @buflen: allocated length of the buffer
2840 * @name: name string and length qstr structure
2841 *
2842 * With RCU path tracing, it may race with d_move(). Use ACCESS_ONCE() to
2843 * make sure that either the old or the new name pointer and length are
2844 * fetched. However, there may be mismatch between length and pointer.
2845 * The length cannot be trusted, we need to copy it byte-by-byte until
2846 * the length is reached or a null byte is found. It also prepends "/" at
2847 * the beginning of the name. The sequence number check at the caller will
2848 * retry it again when a d_move() does happen. So any garbage in the buffer
2849 * due to mismatched pointer and length will be discarded.
2850 *
2851 * Data dependency barrier is needed to make sure that we see that terminating
2852 * NUL. Alpha strikes again, film at 11...
2853 */
2854 static int prepend_name(char **buffer, int *buflen, struct qstr *name)
2855 {
2856 const char *dname = ACCESS_ONCE(name->name);
2857 u32 dlen = ACCESS_ONCE(name->len);
2858 char *p;
2859
2860 smp_read_barrier_depends();
2861
2862 *buflen -= dlen + 1;
2863 if (*buflen < 0)
2864 return -ENAMETOOLONG;
2865 p = *buffer -= dlen + 1;
2866 *p++ = '/';
2867 while (dlen--) {
2868 char c = *dname++;
2869 if (!c)
2870 break;
2871 *p++ = c;
2872 }
2873 return 0;
2874 }
2875
2876 /**
2877 * prepend_path - Prepend path string to a buffer
2878 * @path: the dentry/vfsmount to report
2879 * @root: root vfsmnt/dentry
2880 * @buffer: pointer to the end of the buffer
2881 * @buflen: pointer to buffer length
2882 *
2883 * The function will first try to write out the pathname without taking any
2884 * lock other than the RCU read lock to make sure that dentries won't go away.
2885 * It only checks the sequence number of the global rename_lock as any change
2886 * in the dentry's d_seq will be preceded by changes in the rename_lock
2887 * sequence number. If the sequence number had been changed, it will restart
2888 * the whole pathname back-tracing sequence again by taking the rename_lock.
2889 * In this case, there is no need to take the RCU read lock as the recursive
2890 * parent pointer references will keep the dentry chain alive as long as no
2891 * rename operation is performed.
2892 */
2893 static int prepend_path(const struct path *path,
2894 const struct path *root,
2895 char **buffer, int *buflen)
2896 {
2897 struct dentry *dentry;
2898 struct vfsmount *vfsmnt;
2899 struct mount *mnt;
2900 int error = 0;
2901 unsigned seq, m_seq = 0;
2902 char *bptr;
2903 int blen;
2904
2905 rcu_read_lock();
2906 restart_mnt:
2907 read_seqbegin_or_lock(&mount_lock, &m_seq);
2908 seq = 0;
2909 rcu_read_lock();
2910 restart:
2911 bptr = *buffer;
2912 blen = *buflen;
2913 error = 0;
2914 dentry = path->dentry;
2915 vfsmnt = path->mnt;
2916 mnt = real_mount(vfsmnt);
2917 read_seqbegin_or_lock(&rename_lock, &seq);
2918 while (dentry != root->dentry || vfsmnt != root->mnt) {
2919 struct dentry * parent;
2920
2921 if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
2922 struct mount *parent = ACCESS_ONCE(mnt->mnt_parent);
2923 /* Global root? */
2924 if (mnt != parent) {
2925 dentry = ACCESS_ONCE(mnt->mnt_mountpoint);
2926 mnt = parent;
2927 vfsmnt = &mnt->mnt;
2928 continue;
2929 }
2930 /*
2931 * Filesystems needing to implement special "root names"
2932 * should do so with ->d_dname()
2933 */
2934 if (IS_ROOT(dentry) &&
2935 (dentry->d_name.len != 1 ||
2936 dentry->d_name.name[0] != '/')) {
2937 WARN(1, "Root dentry has weird name <%.*s>\n",
2938 (int) dentry->d_name.len,
2939 dentry->d_name.name);
2940 }
2941 if (!error)
2942 error = is_mounted(vfsmnt) ? 1 : 2;
2943 break;
2944 }
2945 parent = dentry->d_parent;
2946 prefetch(parent);
2947 error = prepend_name(&bptr, &blen, &dentry->d_name);
2948 if (error)
2949 break;
2950
2951 dentry = parent;
2952 }
2953 if (!(seq & 1))
2954 rcu_read_unlock();
2955 if (need_seqretry(&rename_lock, seq)) {
2956 seq = 1;
2957 goto restart;
2958 }
2959 done_seqretry(&rename_lock, seq);
2960
2961 if (!(m_seq & 1))
2962 rcu_read_unlock();
2963 if (need_seqretry(&mount_lock, m_seq)) {
2964 m_seq = 1;
2965 goto restart_mnt;
2966 }
2967 done_seqretry(&mount_lock, m_seq);
2968
2969 if (error >= 0 && bptr == *buffer) {
2970 if (--blen < 0)
2971 error = -ENAMETOOLONG;
2972 else
2973 *--bptr = '/';
2974 }
2975 *buffer = bptr;
2976 *buflen = blen;
2977 return error;
2978 }
2979
2980 /**
2981 * __d_path - return the path of a dentry
2982 * @path: the dentry/vfsmount to report
2983 * @root: root vfsmnt/dentry
2984 * @buf: buffer to return value in
2985 * @buflen: buffer length
2986 *
2987 * Convert a dentry into an ASCII path name.
2988 *
2989 * Returns a pointer into the buffer or an error code if the
2990 * path was too long.
2991 *
2992 * "buflen" should be positive.
2993 *
2994 * If the path is not reachable from the supplied root, return %NULL.
2995 */
2996 char *__d_path(const struct path *path,
2997 const struct path *root,
2998 char *buf, int buflen)
2999 {
3000 char *res = buf + buflen;
3001 int error;
3002
3003 prepend(&res, &buflen, "\0", 1);
3004 error = prepend_path(path, root, &res, &buflen);
3005
3006 if (error < 0)
3007 return ERR_PTR(error);
3008 if (error > 0)
3009 return NULL;
3010 return res;
3011 }
3012
3013 char *d_absolute_path(const struct path *path,
3014 char *buf, int buflen)
3015 {
3016 struct path root = {};
3017 char *res = buf + buflen;
3018 int error;
3019
3020 prepend(&res, &buflen, "\0", 1);
3021 error = prepend_path(path, &root, &res, &buflen);
3022
3023 if (error > 1)
3024 error = -EINVAL;
3025 if (error < 0)
3026 return ERR_PTR(error);
3027 return res;
3028 }
3029
3030 /*
3031 * same as __d_path but appends "(deleted)" for unlinked files.
3032 */
3033 static int path_with_deleted(const struct path *path,
3034 const struct path *root,
3035 char **buf, int *buflen)
3036 {
3037 prepend(buf, buflen, "\0", 1);
3038 if (d_unlinked(path->dentry)) {
3039 int error = prepend(buf, buflen, " (deleted)", 10);
3040 if (error)
3041 return error;
3042 }
3043
3044 return prepend_path(path, root, buf, buflen);
3045 }
3046
3047 static int prepend_unreachable(char **buffer, int *buflen)
3048 {
3049 return prepend(buffer, buflen, "(unreachable)", 13);
3050 }
3051
3052 static void get_fs_root_rcu(struct fs_struct *fs, struct path *root)
3053 {
3054 unsigned seq;
3055
3056 do {
3057 seq = read_seqcount_begin(&fs->seq);
3058 *root = fs->root;
3059 } while (read_seqcount_retry(&fs->seq, seq));
3060 }
3061
3062 /**
3063 * d_path - return the path of a dentry
3064 * @path: path to report
3065 * @buf: buffer to return value in
3066 * @buflen: buffer length
3067 *
3068 * Convert a dentry into an ASCII path name. If the entry has been deleted
3069 * the string " (deleted)" is appended. Note that this is ambiguous.
3070 *
3071 * Returns a pointer into the buffer or an error code if the path was
3072 * too long. Note: Callers should use the returned pointer, not the passed
3073 * in buffer, to use the name! The implementation often starts at an offset
3074 * into the buffer, and may leave 0 bytes at the start.
3075 *
3076 * "buflen" should be positive.
3077 */
3078 char *d_path(const struct path *path, char *buf, int buflen)
3079 {
3080 char *res = buf + buflen;
3081 struct path root;
3082 int error;
3083
3084 /*
3085 * We have various synthetic filesystems that never get mounted. On
3086 * these filesystems dentries are never used for lookup purposes, and
3087 * thus don't need to be hashed. They also don't need a name until a
3088 * user wants to identify the object in /proc/pid/fd/. The little hack
3089 * below allows us to generate a name for these objects on demand:
3090 *
3091 * Some pseudo inodes are mountable. When they are mounted
3092 * path->dentry == path->mnt->mnt_root. In that case don't call d_dname
3093 * and instead have d_path return the mounted path.
3094 */
3095 if (path->dentry->d_op && path->dentry->d_op->d_dname &&
3096 (!IS_ROOT(path->dentry) || path->dentry != path->mnt->mnt_root))
3097 return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
3098
3099 rcu_read_lock();
3100 get_fs_root_rcu(current->fs, &root);
3101 error = path_with_deleted(path, &root, &res, &buflen);
3102 rcu_read_unlock();
3103
3104 if (error < 0)
3105 res = ERR_PTR(error);
3106 return res;
3107 }
3108 EXPORT_SYMBOL(d_path);
3109
3110 /*
3111 * Helper function for dentry_operations.d_dname() members
3112 */
3113 char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen,
3114 const char *fmt, ...)
3115 {
3116 va_list args;
3117 char temp[64];
3118 int sz;
3119
3120 va_start(args, fmt);
3121 sz = vsnprintf(temp, sizeof(temp), fmt, args) + 1;
3122 va_end(args);
3123
3124 if (sz > sizeof(temp) || sz > buflen)
3125 return ERR_PTR(-ENAMETOOLONG);
3126
3127 buffer += buflen - sz;
3128 return memcpy(buffer, temp, sz);
3129 }
3130
3131 char *simple_dname(struct dentry *dentry, char *buffer, int buflen)
3132 {
3133 char *end = buffer + buflen;
3134 /* these dentries are never renamed, so d_lock is not needed */
3135 if (prepend(&end, &buflen, " (deleted)", 11) ||
3136 prepend(&end, &buflen, dentry->d_name.name, dentry->d_name.len) ||
3137 prepend(&end, &buflen, "/", 1))
3138 end = ERR_PTR(-ENAMETOOLONG);
3139 return end;
3140 }
3141 EXPORT_SYMBOL(simple_dname);
3142
3143 /*
3144 * Write full pathname from the root of the filesystem into the buffer.
3145 */
3146 static char *__dentry_path(struct dentry *d, char *buf, int buflen)
3147 {
3148 struct dentry *dentry;
3149 char *end, *retval;
3150 int len, seq = 0;
3151 int error = 0;
3152
3153 if (buflen < 2)
3154 goto Elong;
3155
3156 rcu_read_lock();
3157 restart:
3158 dentry = d;
3159 end = buf + buflen;
3160 len = buflen;
3161 prepend(&end, &len, "\0", 1);
3162 /* Get '/' right */
3163 retval = end-1;
3164 *retval = '/';
3165 read_seqbegin_or_lock(&rename_lock, &seq);
3166 while (!IS_ROOT(dentry)) {
3167 struct dentry *parent = dentry->d_parent;
3168
3169 prefetch(parent);
3170 error = prepend_name(&end, &len, &dentry->d_name);
3171 if (error)
3172 break;
3173
3174 retval = end;
3175 dentry = parent;
3176 }
3177 if (!(seq & 1))
3178 rcu_read_unlock();
3179 if (need_seqretry(&rename_lock, seq)) {
3180 seq = 1;
3181 goto restart;
3182 }
3183 done_seqretry(&rename_lock, seq);
3184 if (error)
3185 goto Elong;
3186 return retval;
3187 Elong:
3188 return ERR_PTR(-ENAMETOOLONG);
3189 }
3190
3191 char *dentry_path_raw(struct dentry *dentry, char *buf, int buflen)
3192 {
3193 return __dentry_path(dentry, buf, buflen);
3194 }
3195 EXPORT_SYMBOL(dentry_path_raw);
3196
3197 char *dentry_path(struct dentry *dentry, char *buf, int buflen)
3198 {
3199 char *p = NULL;
3200 char *retval;
3201
3202 if (d_unlinked(dentry)) {
3203 p = buf + buflen;
3204 if (prepend(&p, &buflen, "//deleted", 10) != 0)
3205 goto Elong;
3206 buflen++;
3207 }
3208 retval = __dentry_path(dentry, buf, buflen);
3209 if (!IS_ERR(retval) && p)
3210 *p = '/'; /* restore '/' overriden with '\0' */
3211 return retval;
3212 Elong:
3213 return ERR_PTR(-ENAMETOOLONG);
3214 }
3215
3216 static void get_fs_root_and_pwd_rcu(struct fs_struct *fs, struct path *root,
3217 struct path *pwd)
3218 {
3219 unsigned seq;
3220
3221 do {
3222 seq = read_seqcount_begin(&fs->seq);
3223 *root = fs->root;
3224 *pwd = fs->pwd;
3225 } while (read_seqcount_retry(&fs->seq, seq));
3226 }
3227
3228 /*
3229 * NOTE! The user-level library version returns a
3230 * character pointer. The kernel system call just
3231 * returns the length of the buffer filled (which
3232 * includes the ending '\0' character), or a negative
3233 * error value. So libc would do something like
3234 *
3235 * char *getcwd(char * buf, size_t size)
3236 * {
3237 * int retval;
3238 *
3239 * retval = sys_getcwd(buf, size);
3240 * if (retval >= 0)
3241 * return buf;
3242 * errno = -retval;
3243 * return NULL;
3244 * }
3245 */
3246 SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
3247 {
3248 int error;
3249 struct path pwd, root;
3250 char *page = __getname();
3251
3252 if (!page)
3253 return -ENOMEM;
3254
3255 rcu_read_lock();
3256 get_fs_root_and_pwd_rcu(current->fs, &root, &pwd);
3257
3258 error = -ENOENT;
3259 if (!d_unlinked(pwd.dentry)) {
3260 unsigned long len;
3261 char *cwd = page + PATH_MAX;
3262 int buflen = PATH_MAX;
3263
3264 prepend(&cwd, &buflen, "\0", 1);
3265 error = prepend_path(&pwd, &root, &cwd, &buflen);
3266 rcu_read_unlock();
3267
3268 if (error < 0)
3269 goto out;
3270
3271 /* Unreachable from current root */
3272 if (error > 0) {
3273 error = prepend_unreachable(&cwd, &buflen);
3274 if (error)
3275 goto out;
3276 }
3277
3278 error = -ERANGE;
3279 len = PATH_MAX + page - cwd;
3280 if (len <= size) {
3281 error = len;
3282 if (copy_to_user(buf, cwd, len))
3283 error = -EFAULT;
3284 }
3285 } else {
3286 rcu_read_unlock();
3287 }
3288
3289 out:
3290 __putname(page);
3291 return error;
3292 }
3293
3294 /*
3295 * Test whether new_dentry is a subdirectory of old_dentry.
3296 *
3297 * Trivially implemented using the dcache structure
3298 */
3299
3300 /**
3301 * is_subdir - is new dentry a subdirectory of old_dentry
3302 * @new_dentry: new dentry
3303 * @old_dentry: old dentry
3304 *
3305 * Returns 1 if new_dentry is a subdirectory of the parent (at any depth).
3306 * Returns 0 otherwise.
3307 * Caller must ensure that "new_dentry" is pinned before calling is_subdir()
3308 */
3309
3310 int is_subdir(struct dentry *new_dentry, struct dentry *old_dentry)
3311 {
3312 int result;
3313 unsigned seq;
3314
3315 if (new_dentry == old_dentry)
3316 return 1;
3317
3318 do {
3319 /* for restarting inner loop in case of seq retry */
3320 seq = read_seqbegin(&rename_lock);
3321 /*
3322 * Need rcu_readlock to protect against the d_parent trashing
3323 * due to d_move
3324 */
3325 rcu_read_lock();
3326 if (d_ancestor(old_dentry, new_dentry))
3327 result = 1;
3328 else
3329 result = 0;
3330 rcu_read_unlock();
3331 } while (read_seqretry(&rename_lock, seq));
3332
3333 return result;
3334 }
3335
3336 static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
3337 {
3338 struct dentry *root = data;
3339 if (dentry != root) {
3340 if (d_unhashed(dentry) || !dentry->d_inode)
3341 return D_WALK_SKIP;
3342
3343 if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
3344 dentry->d_flags |= DCACHE_GENOCIDE;
3345 dentry->d_lockref.count--;
3346 }
3347 }
3348 return D_WALK_CONTINUE;
3349 }
3350
3351 void d_genocide(struct dentry *parent)
3352 {
3353 d_walk(parent, parent, d_genocide_kill, NULL);
3354 }
3355
3356 void d_tmpfile(struct dentry *dentry, struct inode *inode)
3357 {
3358 inode_dec_link_count(inode);
3359 BUG_ON(dentry->d_name.name != dentry->d_iname ||
3360 !hlist_unhashed(&dentry->d_u.d_alias) ||
3361 !d_unlinked(dentry));
3362 spin_lock(&dentry->d_parent->d_lock);
3363 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
3364 dentry->d_name.len = sprintf(dentry->d_iname, "#%llu",
3365 (unsigned long long)inode->i_ino);
3366 spin_unlock(&dentry->d_lock);
3367 spin_unlock(&dentry->d_parent->d_lock);
3368 d_instantiate(dentry, inode);
3369 }
3370 EXPORT_SYMBOL(d_tmpfile);
3371
3372 static __initdata unsigned long dhash_entries;
3373 static int __init set_dhash_entries(char *str)
3374 {
3375 if (!str)
3376 return 0;
3377 dhash_entries = simple_strtoul(str, &str, 0);
3378 return 1;
3379 }
3380 __setup("dhash_entries=", set_dhash_entries);
3381
3382 static void __init dcache_init_early(void)
3383 {
3384 unsigned int loop;
3385
3386 /* If hashes are distributed across NUMA nodes, defer
3387 * hash allocation until vmalloc space is available.
3388 */
3389 if (hashdist)
3390 return;
3391
3392 dentry_hashtable =
3393 alloc_large_system_hash("Dentry cache",
3394 sizeof(struct hlist_bl_head),
3395 dhash_entries,
3396 13,
3397 HASH_EARLY,
3398 &d_hash_shift,
3399 &d_hash_mask,
3400 0,
3401 0);
3402
3403 for (loop = 0; loop < (1U << d_hash_shift); loop++)
3404 INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
3405 }
3406
3407 static void __init dcache_init(void)
3408 {
3409 unsigned int loop;
3410
3411 /*
3412 * A constructor could be added for stable state like the lists,
3413 * but it is probably not worth it because of the cache nature
3414 * of the dcache.
3415 */
3416 dentry_cache = KMEM_CACHE(dentry,
3417 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD);
3418
3419 /* Hash may have been set up in dcache_init_early */
3420 if (!hashdist)
3421 return;
3422
3423 dentry_hashtable =
3424 alloc_large_system_hash("Dentry cache",
3425 sizeof(struct hlist_bl_head),
3426 dhash_entries,
3427 13,
3428 0,
3429 &d_hash_shift,
3430 &d_hash_mask,
3431 0,
3432 0);
3433
3434 for (loop = 0; loop < (1U << d_hash_shift); loop++)
3435 INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
3436 }
3437
3438 /* SLAB cache for __getname() consumers */
3439 struct kmem_cache *names_cachep __read_mostly;
3440 EXPORT_SYMBOL(names_cachep);
3441
3442 EXPORT_SYMBOL(d_genocide);
3443
3444 void __init vfs_caches_init_early(void)
3445 {
3446 dcache_init_early();
3447 inode_init_early();
3448 }
3449
3450 void __init vfs_caches_init(unsigned long mempages)
3451 {
3452 unsigned long reserve;
3453
3454 /* Base hash sizes on available memory, with a reserve equal to
3455 150% of current kernel size */
3456
3457 reserve = min((mempages - nr_free_pages()) * 3/2, mempages - 1);
3458 mempages -= reserve;
3459
3460 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
3461 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3462
3463 dcache_init();
3464 inode_init();
3465 files_init(mempages);
3466 mnt_init();
3467 bdev_cache_init();
3468 chrdev_init();
3469 }