]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - fs/dcache.c
Merge branch 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[mirror_ubuntu-eoan-kernel.git] / fs / dcache.c
1 /*
2 * fs/dcache.c
3 *
4 * Complete reimplementation
5 * (C) 1997 Thomas Schoebel-Theuer,
6 * with heavy changes by Linus Torvalds
7 */
8
9 /*
10 * Notes on the allocation strategy:
11 *
12 * The dcache is a master of the icache - whenever a dcache entry
13 * exists, the inode will always exist. "iput()" is done either when
14 * the dcache entry is deleted or garbage collected.
15 */
16
17 #include <linux/ratelimit.h>
18 #include <linux/string.h>
19 #include <linux/mm.h>
20 #include <linux/fs.h>
21 #include <linux/fsnotify.h>
22 #include <linux/slab.h>
23 #include <linux/init.h>
24 #include <linux/hash.h>
25 #include <linux/cache.h>
26 #include <linux/export.h>
27 #include <linux/security.h>
28 #include <linux/seqlock.h>
29 #include <linux/bootmem.h>
30 #include <linux/bit_spinlock.h>
31 #include <linux/rculist_bl.h>
32 #include <linux/list_lru.h>
33 #include "internal.h"
34 #include "mount.h"
35
36 /*
37 * Usage:
38 * dcache->d_inode->i_lock protects:
39 * - i_dentry, d_u.d_alias, d_inode of aliases
40 * dcache_hash_bucket lock protects:
41 * - the dcache hash table
42 * s_roots bl list spinlock protects:
43 * - the s_roots list (see __d_drop)
44 * dentry->d_sb->s_dentry_lru_lock protects:
45 * - the dcache lru lists and counters
46 * d_lock protects:
47 * - d_flags
48 * - d_name
49 * - d_lru
50 * - d_count
51 * - d_unhashed()
52 * - d_parent and d_subdirs
53 * - childrens' d_child and d_parent
54 * - d_u.d_alias, d_inode
55 *
56 * Ordering:
57 * dentry->d_inode->i_lock
58 * dentry->d_lock
59 * dentry->d_sb->s_dentry_lru_lock
60 * dcache_hash_bucket lock
61 * s_roots lock
62 *
63 * If there is an ancestor relationship:
64 * dentry->d_parent->...->d_parent->d_lock
65 * ...
66 * dentry->d_parent->d_lock
67 * dentry->d_lock
68 *
69 * If no ancestor relationship:
70 * arbitrary, since it's serialized on rename_lock
71 */
72 int sysctl_vfs_cache_pressure __read_mostly = 100;
73 EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
74
75 __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
76
77 EXPORT_SYMBOL(rename_lock);
78
79 static struct kmem_cache *dentry_cache __read_mostly;
80
81 const struct qstr empty_name = QSTR_INIT("", 0);
82 EXPORT_SYMBOL(empty_name);
83 const struct qstr slash_name = QSTR_INIT("/", 1);
84 EXPORT_SYMBOL(slash_name);
85
86 /*
87 * This is the single most critical data structure when it comes
88 * to the dcache: the hashtable for lookups. Somebody should try
89 * to make this good - I've just made it work.
90 *
91 * This hash-function tries to avoid losing too many bits of hash
92 * information, yet avoid using a prime hash-size or similar.
93 */
94
95 static unsigned int d_hash_shift __read_mostly;
96
97 static struct hlist_bl_head *dentry_hashtable __read_mostly;
98
99 static inline struct hlist_bl_head *d_hash(unsigned int hash)
100 {
101 return dentry_hashtable + (hash >> d_hash_shift);
102 }
103
104 #define IN_LOOKUP_SHIFT 10
105 static struct hlist_bl_head in_lookup_hashtable[1 << IN_LOOKUP_SHIFT];
106
107 static inline struct hlist_bl_head *in_lookup_hash(const struct dentry *parent,
108 unsigned int hash)
109 {
110 hash += (unsigned long) parent / L1_CACHE_BYTES;
111 return in_lookup_hashtable + hash_32(hash, IN_LOOKUP_SHIFT);
112 }
113
114
115 /* Statistics gathering. */
116 struct dentry_stat_t dentry_stat = {
117 .age_limit = 45,
118 };
119
120 static DEFINE_PER_CPU(long, nr_dentry);
121 static DEFINE_PER_CPU(long, nr_dentry_unused);
122
123 #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
124
125 /*
126 * Here we resort to our own counters instead of using generic per-cpu counters
127 * for consistency with what the vfs inode code does. We are expected to harvest
128 * better code and performance by having our own specialized counters.
129 *
130 * Please note that the loop is done over all possible CPUs, not over all online
131 * CPUs. The reason for this is that we don't want to play games with CPUs going
132 * on and off. If one of them goes off, we will just keep their counters.
133 *
134 * glommer: See cffbc8a for details, and if you ever intend to change this,
135 * please update all vfs counters to match.
136 */
137 static long get_nr_dentry(void)
138 {
139 int i;
140 long sum = 0;
141 for_each_possible_cpu(i)
142 sum += per_cpu(nr_dentry, i);
143 return sum < 0 ? 0 : sum;
144 }
145
146 static long get_nr_dentry_unused(void)
147 {
148 int i;
149 long sum = 0;
150 for_each_possible_cpu(i)
151 sum += per_cpu(nr_dentry_unused, i);
152 return sum < 0 ? 0 : sum;
153 }
154
155 int proc_nr_dentry(struct ctl_table *table, int write, void __user *buffer,
156 size_t *lenp, loff_t *ppos)
157 {
158 dentry_stat.nr_dentry = get_nr_dentry();
159 dentry_stat.nr_unused = get_nr_dentry_unused();
160 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
161 }
162 #endif
163
164 /*
165 * Compare 2 name strings, return 0 if they match, otherwise non-zero.
166 * The strings are both count bytes long, and count is non-zero.
167 */
168 #ifdef CONFIG_DCACHE_WORD_ACCESS
169
170 #include <asm/word-at-a-time.h>
171 /*
172 * NOTE! 'cs' and 'scount' come from a dentry, so it has a
173 * aligned allocation for this particular component. We don't
174 * strictly need the load_unaligned_zeropad() safety, but it
175 * doesn't hurt either.
176 *
177 * In contrast, 'ct' and 'tcount' can be from a pathname, and do
178 * need the careful unaligned handling.
179 */
180 static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
181 {
182 unsigned long a,b,mask;
183
184 for (;;) {
185 a = read_word_at_a_time(cs);
186 b = load_unaligned_zeropad(ct);
187 if (tcount < sizeof(unsigned long))
188 break;
189 if (unlikely(a != b))
190 return 1;
191 cs += sizeof(unsigned long);
192 ct += sizeof(unsigned long);
193 tcount -= sizeof(unsigned long);
194 if (!tcount)
195 return 0;
196 }
197 mask = bytemask_from_count(tcount);
198 return unlikely(!!((a ^ b) & mask));
199 }
200
201 #else
202
203 static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount)
204 {
205 do {
206 if (*cs != *ct)
207 return 1;
208 cs++;
209 ct++;
210 tcount--;
211 } while (tcount);
212 return 0;
213 }
214
215 #endif
216
217 static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *ct, unsigned tcount)
218 {
219 /*
220 * Be careful about RCU walk racing with rename:
221 * use 'READ_ONCE' to fetch the name pointer.
222 *
223 * NOTE! Even if a rename will mean that the length
224 * was not loaded atomically, we don't care. The
225 * RCU walk will check the sequence count eventually,
226 * and catch it. And we won't overrun the buffer,
227 * because we're reading the name pointer atomically,
228 * and a dentry name is guaranteed to be properly
229 * terminated with a NUL byte.
230 *
231 * End result: even if 'len' is wrong, we'll exit
232 * early because the data cannot match (there can
233 * be no NUL in the ct/tcount data)
234 */
235 const unsigned char *cs = READ_ONCE(dentry->d_name.name);
236
237 return dentry_string_cmp(cs, ct, tcount);
238 }
239
240 struct external_name {
241 union {
242 atomic_t count;
243 struct rcu_head head;
244 } u;
245 unsigned char name[];
246 };
247
248 static inline struct external_name *external_name(struct dentry *dentry)
249 {
250 return container_of(dentry->d_name.name, struct external_name, name[0]);
251 }
252
253 static void __d_free(struct rcu_head *head)
254 {
255 struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
256
257 kmem_cache_free(dentry_cache, dentry);
258 }
259
260 static void __d_free_external_name(struct rcu_head *head)
261 {
262 struct external_name *name = container_of(head, struct external_name,
263 u.head);
264
265 mod_node_page_state(page_pgdat(virt_to_page(name)),
266 NR_INDIRECTLY_RECLAIMABLE_BYTES,
267 -ksize(name));
268
269 kfree(name);
270 }
271
272 static void __d_free_external(struct rcu_head *head)
273 {
274 struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
275
276 __d_free_external_name(&external_name(dentry)->u.head);
277
278 kmem_cache_free(dentry_cache, dentry);
279 }
280
281 static inline int dname_external(const struct dentry *dentry)
282 {
283 return dentry->d_name.name != dentry->d_iname;
284 }
285
286 void take_dentry_name_snapshot(struct name_snapshot *name, struct dentry *dentry)
287 {
288 spin_lock(&dentry->d_lock);
289 if (unlikely(dname_external(dentry))) {
290 struct external_name *p = external_name(dentry);
291 atomic_inc(&p->u.count);
292 spin_unlock(&dentry->d_lock);
293 name->name = p->name;
294 } else {
295 memcpy(name->inline_name, dentry->d_iname,
296 dentry->d_name.len + 1);
297 spin_unlock(&dentry->d_lock);
298 name->name = name->inline_name;
299 }
300 }
301 EXPORT_SYMBOL(take_dentry_name_snapshot);
302
303 void release_dentry_name_snapshot(struct name_snapshot *name)
304 {
305 if (unlikely(name->name != name->inline_name)) {
306 struct external_name *p;
307 p = container_of(name->name, struct external_name, name[0]);
308 if (unlikely(atomic_dec_and_test(&p->u.count)))
309 call_rcu(&p->u.head, __d_free_external_name);
310 }
311 }
312 EXPORT_SYMBOL(release_dentry_name_snapshot);
313
314 static inline void __d_set_inode_and_type(struct dentry *dentry,
315 struct inode *inode,
316 unsigned type_flags)
317 {
318 unsigned flags;
319
320 dentry->d_inode = inode;
321 flags = READ_ONCE(dentry->d_flags);
322 flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
323 flags |= type_flags;
324 WRITE_ONCE(dentry->d_flags, flags);
325 }
326
327 static inline void __d_clear_type_and_inode(struct dentry *dentry)
328 {
329 unsigned flags = READ_ONCE(dentry->d_flags);
330
331 flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
332 WRITE_ONCE(dentry->d_flags, flags);
333 dentry->d_inode = NULL;
334 }
335
336 static void dentry_free(struct dentry *dentry)
337 {
338 WARN_ON(!hlist_unhashed(&dentry->d_u.d_alias));
339 if (unlikely(dname_external(dentry))) {
340 struct external_name *p = external_name(dentry);
341 if (likely(atomic_dec_and_test(&p->u.count))) {
342 call_rcu(&dentry->d_u.d_rcu, __d_free_external);
343 return;
344 }
345 }
346 /* if dentry was never visible to RCU, immediate free is OK */
347 if (!(dentry->d_flags & DCACHE_RCUACCESS))
348 __d_free(&dentry->d_u.d_rcu);
349 else
350 call_rcu(&dentry->d_u.d_rcu, __d_free);
351 }
352
353 /*
354 * Release the dentry's inode, using the filesystem
355 * d_iput() operation if defined.
356 */
357 static void dentry_unlink_inode(struct dentry * dentry)
358 __releases(dentry->d_lock)
359 __releases(dentry->d_inode->i_lock)
360 {
361 struct inode *inode = dentry->d_inode;
362
363 raw_write_seqcount_begin(&dentry->d_seq);
364 __d_clear_type_and_inode(dentry);
365 hlist_del_init(&dentry->d_u.d_alias);
366 raw_write_seqcount_end(&dentry->d_seq);
367 spin_unlock(&dentry->d_lock);
368 spin_unlock(&inode->i_lock);
369 if (!inode->i_nlink)
370 fsnotify_inoderemove(inode);
371 if (dentry->d_op && dentry->d_op->d_iput)
372 dentry->d_op->d_iput(dentry, inode);
373 else
374 iput(inode);
375 }
376
377 /*
378 * The DCACHE_LRU_LIST bit is set whenever the 'd_lru' entry
379 * is in use - which includes both the "real" per-superblock
380 * LRU list _and_ the DCACHE_SHRINK_LIST use.
381 *
382 * The DCACHE_SHRINK_LIST bit is set whenever the dentry is
383 * on the shrink list (ie not on the superblock LRU list).
384 *
385 * The per-cpu "nr_dentry_unused" counters are updated with
386 * the DCACHE_LRU_LIST bit.
387 *
388 * These helper functions make sure we always follow the
389 * rules. d_lock must be held by the caller.
390 */
391 #define D_FLAG_VERIFY(dentry,x) WARN_ON_ONCE(((dentry)->d_flags & (DCACHE_LRU_LIST | DCACHE_SHRINK_LIST)) != (x))
392 static void d_lru_add(struct dentry *dentry)
393 {
394 D_FLAG_VERIFY(dentry, 0);
395 dentry->d_flags |= DCACHE_LRU_LIST;
396 this_cpu_inc(nr_dentry_unused);
397 WARN_ON_ONCE(!list_lru_add(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
398 }
399
400 static void d_lru_del(struct dentry *dentry)
401 {
402 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
403 dentry->d_flags &= ~DCACHE_LRU_LIST;
404 this_cpu_dec(nr_dentry_unused);
405 WARN_ON_ONCE(!list_lru_del(&dentry->d_sb->s_dentry_lru, &dentry->d_lru));
406 }
407
408 static void d_shrink_del(struct dentry *dentry)
409 {
410 D_FLAG_VERIFY(dentry, DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
411 list_del_init(&dentry->d_lru);
412 dentry->d_flags &= ~(DCACHE_SHRINK_LIST | DCACHE_LRU_LIST);
413 this_cpu_dec(nr_dentry_unused);
414 }
415
416 static void d_shrink_add(struct dentry *dentry, struct list_head *list)
417 {
418 D_FLAG_VERIFY(dentry, 0);
419 list_add(&dentry->d_lru, list);
420 dentry->d_flags |= DCACHE_SHRINK_LIST | DCACHE_LRU_LIST;
421 this_cpu_inc(nr_dentry_unused);
422 }
423
424 /*
425 * These can only be called under the global LRU lock, ie during the
426 * callback for freeing the LRU list. "isolate" removes it from the
427 * LRU lists entirely, while shrink_move moves it to the indicated
428 * private list.
429 */
430 static void d_lru_isolate(struct list_lru_one *lru, struct dentry *dentry)
431 {
432 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
433 dentry->d_flags &= ~DCACHE_LRU_LIST;
434 this_cpu_dec(nr_dentry_unused);
435 list_lru_isolate(lru, &dentry->d_lru);
436 }
437
438 static void d_lru_shrink_move(struct list_lru_one *lru, struct dentry *dentry,
439 struct list_head *list)
440 {
441 D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST);
442 dentry->d_flags |= DCACHE_SHRINK_LIST;
443 list_lru_isolate_move(lru, &dentry->d_lru, list);
444 }
445
446 /**
447 * d_drop - drop a dentry
448 * @dentry: dentry to drop
449 *
450 * d_drop() unhashes the entry from the parent dentry hashes, so that it won't
451 * be found through a VFS lookup any more. Note that this is different from
452 * deleting the dentry - d_delete will try to mark the dentry negative if
453 * possible, giving a successful _negative_ lookup, while d_drop will
454 * just make the cache lookup fail.
455 *
456 * d_drop() is used mainly for stuff that wants to invalidate a dentry for some
457 * reason (NFS timeouts or autofs deletes).
458 *
459 * __d_drop requires dentry->d_lock
460 * ___d_drop doesn't mark dentry as "unhashed"
461 * (dentry->d_hash.pprev will be LIST_POISON2, not NULL).
462 */
463 static void ___d_drop(struct dentry *dentry)
464 {
465 struct hlist_bl_head *b;
466 /*
467 * Hashed dentries are normally on the dentry hashtable,
468 * with the exception of those newly allocated by
469 * d_obtain_root, which are always IS_ROOT:
470 */
471 if (unlikely(IS_ROOT(dentry)))
472 b = &dentry->d_sb->s_roots;
473 else
474 b = d_hash(dentry->d_name.hash);
475
476 hlist_bl_lock(b);
477 __hlist_bl_del(&dentry->d_hash);
478 hlist_bl_unlock(b);
479 }
480
481 void __d_drop(struct dentry *dentry)
482 {
483 if (!d_unhashed(dentry)) {
484 ___d_drop(dentry);
485 dentry->d_hash.pprev = NULL;
486 write_seqcount_invalidate(&dentry->d_seq);
487 }
488 }
489 EXPORT_SYMBOL(__d_drop);
490
491 void d_drop(struct dentry *dentry)
492 {
493 spin_lock(&dentry->d_lock);
494 __d_drop(dentry);
495 spin_unlock(&dentry->d_lock);
496 }
497 EXPORT_SYMBOL(d_drop);
498
499 static inline void dentry_unlist(struct dentry *dentry, struct dentry *parent)
500 {
501 struct dentry *next;
502 /*
503 * Inform d_walk() and shrink_dentry_list() that we are no longer
504 * attached to the dentry tree
505 */
506 dentry->d_flags |= DCACHE_DENTRY_KILLED;
507 if (unlikely(list_empty(&dentry->d_child)))
508 return;
509 __list_del_entry(&dentry->d_child);
510 /*
511 * Cursors can move around the list of children. While we'd been
512 * a normal list member, it didn't matter - ->d_child.next would've
513 * been updated. However, from now on it won't be and for the
514 * things like d_walk() it might end up with a nasty surprise.
515 * Normally d_walk() doesn't care about cursors moving around -
516 * ->d_lock on parent prevents that and since a cursor has no children
517 * of its own, we get through it without ever unlocking the parent.
518 * There is one exception, though - if we ascend from a child that
519 * gets killed as soon as we unlock it, the next sibling is found
520 * using the value left in its ->d_child.next. And if _that_
521 * pointed to a cursor, and cursor got moved (e.g. by lseek())
522 * before d_walk() regains parent->d_lock, we'll end up skipping
523 * everything the cursor had been moved past.
524 *
525 * Solution: make sure that the pointer left behind in ->d_child.next
526 * points to something that won't be moving around. I.e. skip the
527 * cursors.
528 */
529 while (dentry->d_child.next != &parent->d_subdirs) {
530 next = list_entry(dentry->d_child.next, struct dentry, d_child);
531 if (likely(!(next->d_flags & DCACHE_DENTRY_CURSOR)))
532 break;
533 dentry->d_child.next = next->d_child.next;
534 }
535 }
536
537 static void __dentry_kill(struct dentry *dentry)
538 {
539 struct dentry *parent = NULL;
540 bool can_free = true;
541 if (!IS_ROOT(dentry))
542 parent = dentry->d_parent;
543
544 /*
545 * The dentry is now unrecoverably dead to the world.
546 */
547 lockref_mark_dead(&dentry->d_lockref);
548
549 /*
550 * inform the fs via d_prune that this dentry is about to be
551 * unhashed and destroyed.
552 */
553 if (dentry->d_flags & DCACHE_OP_PRUNE)
554 dentry->d_op->d_prune(dentry);
555
556 if (dentry->d_flags & DCACHE_LRU_LIST) {
557 if (!(dentry->d_flags & DCACHE_SHRINK_LIST))
558 d_lru_del(dentry);
559 }
560 /* if it was on the hash then remove it */
561 __d_drop(dentry);
562 dentry_unlist(dentry, parent);
563 if (parent)
564 spin_unlock(&parent->d_lock);
565 if (dentry->d_inode)
566 dentry_unlink_inode(dentry);
567 else
568 spin_unlock(&dentry->d_lock);
569 this_cpu_dec(nr_dentry);
570 if (dentry->d_op && dentry->d_op->d_release)
571 dentry->d_op->d_release(dentry);
572
573 spin_lock(&dentry->d_lock);
574 if (dentry->d_flags & DCACHE_SHRINK_LIST) {
575 dentry->d_flags |= DCACHE_MAY_FREE;
576 can_free = false;
577 }
578 spin_unlock(&dentry->d_lock);
579 if (likely(can_free))
580 dentry_free(dentry);
581 cond_resched();
582 }
583
584 static struct dentry *__lock_parent(struct dentry *dentry)
585 {
586 struct dentry *parent;
587 rcu_read_lock();
588 spin_unlock(&dentry->d_lock);
589 again:
590 parent = READ_ONCE(dentry->d_parent);
591 spin_lock(&parent->d_lock);
592 /*
593 * We can't blindly lock dentry until we are sure
594 * that we won't violate the locking order.
595 * Any changes of dentry->d_parent must have
596 * been done with parent->d_lock held, so
597 * spin_lock() above is enough of a barrier
598 * for checking if it's still our child.
599 */
600 if (unlikely(parent != dentry->d_parent)) {
601 spin_unlock(&parent->d_lock);
602 goto again;
603 }
604 rcu_read_unlock();
605 if (parent != dentry)
606 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
607 else
608 parent = NULL;
609 return parent;
610 }
611
612 static inline struct dentry *lock_parent(struct dentry *dentry)
613 {
614 struct dentry *parent = dentry->d_parent;
615 if (IS_ROOT(dentry))
616 return NULL;
617 if (likely(spin_trylock(&parent->d_lock)))
618 return parent;
619 return __lock_parent(dentry);
620 }
621
622 static inline bool retain_dentry(struct dentry *dentry)
623 {
624 WARN_ON(d_in_lookup(dentry));
625
626 /* Unreachable? Get rid of it */
627 if (unlikely(d_unhashed(dentry)))
628 return false;
629
630 if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED))
631 return false;
632
633 if (unlikely(dentry->d_flags & DCACHE_OP_DELETE)) {
634 if (dentry->d_op->d_delete(dentry))
635 return false;
636 }
637 /* retain; LRU fodder */
638 dentry->d_lockref.count--;
639 if (unlikely(!(dentry->d_flags & DCACHE_LRU_LIST)))
640 d_lru_add(dentry);
641 else if (unlikely(!(dentry->d_flags & DCACHE_REFERENCED)))
642 dentry->d_flags |= DCACHE_REFERENCED;
643 return true;
644 }
645
646 /*
647 * Finish off a dentry we've decided to kill.
648 * dentry->d_lock must be held, returns with it unlocked.
649 * Returns dentry requiring refcount drop, or NULL if we're done.
650 */
651 static struct dentry *dentry_kill(struct dentry *dentry)
652 __releases(dentry->d_lock)
653 {
654 struct inode *inode = dentry->d_inode;
655 struct dentry *parent = NULL;
656
657 if (inode && unlikely(!spin_trylock(&inode->i_lock)))
658 goto slow_positive;
659
660 if (!IS_ROOT(dentry)) {
661 parent = dentry->d_parent;
662 if (unlikely(!spin_trylock(&parent->d_lock))) {
663 parent = __lock_parent(dentry);
664 if (likely(inode || !dentry->d_inode))
665 goto got_locks;
666 /* negative that became positive */
667 if (parent)
668 spin_unlock(&parent->d_lock);
669 inode = dentry->d_inode;
670 goto slow_positive;
671 }
672 }
673 __dentry_kill(dentry);
674 return parent;
675
676 slow_positive:
677 spin_unlock(&dentry->d_lock);
678 spin_lock(&inode->i_lock);
679 spin_lock(&dentry->d_lock);
680 parent = lock_parent(dentry);
681 got_locks:
682 if (unlikely(dentry->d_lockref.count != 1)) {
683 dentry->d_lockref.count--;
684 } else if (likely(!retain_dentry(dentry))) {
685 __dentry_kill(dentry);
686 return parent;
687 }
688 /* we are keeping it, after all */
689 if (inode)
690 spin_unlock(&inode->i_lock);
691 if (parent)
692 spin_unlock(&parent->d_lock);
693 spin_unlock(&dentry->d_lock);
694 return NULL;
695 }
696
697 /*
698 * Try to do a lockless dput(), and return whether that was successful.
699 *
700 * If unsuccessful, we return false, having already taken the dentry lock.
701 *
702 * The caller needs to hold the RCU read lock, so that the dentry is
703 * guaranteed to stay around even if the refcount goes down to zero!
704 */
705 static inline bool fast_dput(struct dentry *dentry)
706 {
707 int ret;
708 unsigned int d_flags;
709
710 /*
711 * If we have a d_op->d_delete() operation, we sould not
712 * let the dentry count go to zero, so use "put_or_lock".
713 */
714 if (unlikely(dentry->d_flags & DCACHE_OP_DELETE))
715 return lockref_put_or_lock(&dentry->d_lockref);
716
717 /*
718 * .. otherwise, we can try to just decrement the
719 * lockref optimistically.
720 */
721 ret = lockref_put_return(&dentry->d_lockref);
722
723 /*
724 * If the lockref_put_return() failed due to the lock being held
725 * by somebody else, the fast path has failed. We will need to
726 * get the lock, and then check the count again.
727 */
728 if (unlikely(ret < 0)) {
729 spin_lock(&dentry->d_lock);
730 if (dentry->d_lockref.count > 1) {
731 dentry->d_lockref.count--;
732 spin_unlock(&dentry->d_lock);
733 return true;
734 }
735 return false;
736 }
737
738 /*
739 * If we weren't the last ref, we're done.
740 */
741 if (ret)
742 return true;
743
744 /*
745 * Careful, careful. The reference count went down
746 * to zero, but we don't hold the dentry lock, so
747 * somebody else could get it again, and do another
748 * dput(), and we need to not race with that.
749 *
750 * However, there is a very special and common case
751 * where we don't care, because there is nothing to
752 * do: the dentry is still hashed, it does not have
753 * a 'delete' op, and it's referenced and already on
754 * the LRU list.
755 *
756 * NOTE! Since we aren't locked, these values are
757 * not "stable". However, it is sufficient that at
758 * some point after we dropped the reference the
759 * dentry was hashed and the flags had the proper
760 * value. Other dentry users may have re-gotten
761 * a reference to the dentry and change that, but
762 * our work is done - we can leave the dentry
763 * around with a zero refcount.
764 */
765 smp_rmb();
766 d_flags = READ_ONCE(dentry->d_flags);
767 d_flags &= DCACHE_REFERENCED | DCACHE_LRU_LIST | DCACHE_DISCONNECTED;
768
769 /* Nothing to do? Dropping the reference was all we needed? */
770 if (d_flags == (DCACHE_REFERENCED | DCACHE_LRU_LIST) && !d_unhashed(dentry))
771 return true;
772
773 /*
774 * Not the fast normal case? Get the lock. We've already decremented
775 * the refcount, but we'll need to re-check the situation after
776 * getting the lock.
777 */
778 spin_lock(&dentry->d_lock);
779
780 /*
781 * Did somebody else grab a reference to it in the meantime, and
782 * we're no longer the last user after all? Alternatively, somebody
783 * else could have killed it and marked it dead. Either way, we
784 * don't need to do anything else.
785 */
786 if (dentry->d_lockref.count) {
787 spin_unlock(&dentry->d_lock);
788 return true;
789 }
790
791 /*
792 * Re-get the reference we optimistically dropped. We hold the
793 * lock, and we just tested that it was zero, so we can just
794 * set it to 1.
795 */
796 dentry->d_lockref.count = 1;
797 return false;
798 }
799
800
801 /*
802 * This is dput
803 *
804 * This is complicated by the fact that we do not want to put
805 * dentries that are no longer on any hash chain on the unused
806 * list: we'd much rather just get rid of them immediately.
807 *
808 * However, that implies that we have to traverse the dentry
809 * tree upwards to the parents which might _also_ now be
810 * scheduled for deletion (it may have been only waiting for
811 * its last child to go away).
812 *
813 * This tail recursion is done by hand as we don't want to depend
814 * on the compiler to always get this right (gcc generally doesn't).
815 * Real recursion would eat up our stack space.
816 */
817
818 /*
819 * dput - release a dentry
820 * @dentry: dentry to release
821 *
822 * Release a dentry. This will drop the usage count and if appropriate
823 * call the dentry unlink method as well as removing it from the queues and
824 * releasing its resources. If the parent dentries were scheduled for release
825 * they too may now get deleted.
826 */
827 void dput(struct dentry *dentry)
828 {
829 while (dentry) {
830 might_sleep();
831
832 rcu_read_lock();
833 if (likely(fast_dput(dentry))) {
834 rcu_read_unlock();
835 return;
836 }
837
838 /* Slow case: now with the dentry lock held */
839 rcu_read_unlock();
840
841 if (likely(retain_dentry(dentry))) {
842 spin_unlock(&dentry->d_lock);
843 return;
844 }
845
846 dentry = dentry_kill(dentry);
847 }
848 }
849 EXPORT_SYMBOL(dput);
850
851
852 /* This must be called with d_lock held */
853 static inline void __dget_dlock(struct dentry *dentry)
854 {
855 dentry->d_lockref.count++;
856 }
857
858 static inline void __dget(struct dentry *dentry)
859 {
860 lockref_get(&dentry->d_lockref);
861 }
862
863 struct dentry *dget_parent(struct dentry *dentry)
864 {
865 int gotref;
866 struct dentry *ret;
867
868 /*
869 * Do optimistic parent lookup without any
870 * locking.
871 */
872 rcu_read_lock();
873 ret = READ_ONCE(dentry->d_parent);
874 gotref = lockref_get_not_zero(&ret->d_lockref);
875 rcu_read_unlock();
876 if (likely(gotref)) {
877 if (likely(ret == READ_ONCE(dentry->d_parent)))
878 return ret;
879 dput(ret);
880 }
881
882 repeat:
883 /*
884 * Don't need rcu_dereference because we re-check it was correct under
885 * the lock.
886 */
887 rcu_read_lock();
888 ret = dentry->d_parent;
889 spin_lock(&ret->d_lock);
890 if (unlikely(ret != dentry->d_parent)) {
891 spin_unlock(&ret->d_lock);
892 rcu_read_unlock();
893 goto repeat;
894 }
895 rcu_read_unlock();
896 BUG_ON(!ret->d_lockref.count);
897 ret->d_lockref.count++;
898 spin_unlock(&ret->d_lock);
899 return ret;
900 }
901 EXPORT_SYMBOL(dget_parent);
902
903 static struct dentry * __d_find_any_alias(struct inode *inode)
904 {
905 struct dentry *alias;
906
907 if (hlist_empty(&inode->i_dentry))
908 return NULL;
909 alias = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
910 __dget(alias);
911 return alias;
912 }
913
914 /**
915 * d_find_any_alias - find any alias for a given inode
916 * @inode: inode to find an alias for
917 *
918 * If any aliases exist for the given inode, take and return a
919 * reference for one of them. If no aliases exist, return %NULL.
920 */
921 struct dentry *d_find_any_alias(struct inode *inode)
922 {
923 struct dentry *de;
924
925 spin_lock(&inode->i_lock);
926 de = __d_find_any_alias(inode);
927 spin_unlock(&inode->i_lock);
928 return de;
929 }
930 EXPORT_SYMBOL(d_find_any_alias);
931
932 /**
933 * d_find_alias - grab a hashed alias of inode
934 * @inode: inode in question
935 *
936 * If inode has a hashed alias, or is a directory and has any alias,
937 * acquire the reference to alias and return it. Otherwise return NULL.
938 * Notice that if inode is a directory there can be only one alias and
939 * it can be unhashed only if it has no children, or if it is the root
940 * of a filesystem, or if the directory was renamed and d_revalidate
941 * was the first vfs operation to notice.
942 *
943 * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer
944 * any other hashed alias over that one.
945 */
946 static struct dentry *__d_find_alias(struct inode *inode)
947 {
948 struct dentry *alias;
949
950 if (S_ISDIR(inode->i_mode))
951 return __d_find_any_alias(inode);
952
953 hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
954 spin_lock(&alias->d_lock);
955 if (!d_unhashed(alias)) {
956 __dget_dlock(alias);
957 spin_unlock(&alias->d_lock);
958 return alias;
959 }
960 spin_unlock(&alias->d_lock);
961 }
962 return NULL;
963 }
964
965 struct dentry *d_find_alias(struct inode *inode)
966 {
967 struct dentry *de = NULL;
968
969 if (!hlist_empty(&inode->i_dentry)) {
970 spin_lock(&inode->i_lock);
971 de = __d_find_alias(inode);
972 spin_unlock(&inode->i_lock);
973 }
974 return de;
975 }
976 EXPORT_SYMBOL(d_find_alias);
977
978 /*
979 * Try to kill dentries associated with this inode.
980 * WARNING: you must own a reference to inode.
981 */
982 void d_prune_aliases(struct inode *inode)
983 {
984 struct dentry *dentry;
985 restart:
986 spin_lock(&inode->i_lock);
987 hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
988 spin_lock(&dentry->d_lock);
989 if (!dentry->d_lockref.count) {
990 struct dentry *parent = lock_parent(dentry);
991 if (likely(!dentry->d_lockref.count)) {
992 __dentry_kill(dentry);
993 dput(parent);
994 goto restart;
995 }
996 if (parent)
997 spin_unlock(&parent->d_lock);
998 }
999 spin_unlock(&dentry->d_lock);
1000 }
1001 spin_unlock(&inode->i_lock);
1002 }
1003 EXPORT_SYMBOL(d_prune_aliases);
1004
1005 /*
1006 * Lock a dentry from shrink list.
1007 * Called under rcu_read_lock() and dentry->d_lock; the former
1008 * guarantees that nothing we access will be freed under us.
1009 * Note that dentry is *not* protected from concurrent dentry_kill(),
1010 * d_delete(), etc.
1011 *
1012 * Return false if dentry has been disrupted or grabbed, leaving
1013 * the caller to kick it off-list. Otherwise, return true and have
1014 * that dentry's inode and parent both locked.
1015 */
1016 static bool shrink_lock_dentry(struct dentry *dentry)
1017 {
1018 struct inode *inode;
1019 struct dentry *parent;
1020
1021 if (dentry->d_lockref.count)
1022 return false;
1023
1024 inode = dentry->d_inode;
1025 if (inode && unlikely(!spin_trylock(&inode->i_lock))) {
1026 spin_unlock(&dentry->d_lock);
1027 spin_lock(&inode->i_lock);
1028 spin_lock(&dentry->d_lock);
1029 if (unlikely(dentry->d_lockref.count))
1030 goto out;
1031 /* changed inode means that somebody had grabbed it */
1032 if (unlikely(inode != dentry->d_inode))
1033 goto out;
1034 }
1035
1036 parent = dentry->d_parent;
1037 if (IS_ROOT(dentry) || likely(spin_trylock(&parent->d_lock)))
1038 return true;
1039
1040 spin_unlock(&dentry->d_lock);
1041 spin_lock(&parent->d_lock);
1042 if (unlikely(parent != dentry->d_parent)) {
1043 spin_unlock(&parent->d_lock);
1044 spin_lock(&dentry->d_lock);
1045 goto out;
1046 }
1047 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1048 if (likely(!dentry->d_lockref.count))
1049 return true;
1050 spin_unlock(&parent->d_lock);
1051 out:
1052 if (inode)
1053 spin_unlock(&inode->i_lock);
1054 return false;
1055 }
1056
1057 static void shrink_dentry_list(struct list_head *list)
1058 {
1059 while (!list_empty(list)) {
1060 struct dentry *dentry, *parent;
1061
1062 dentry = list_entry(list->prev, struct dentry, d_lru);
1063 spin_lock(&dentry->d_lock);
1064 rcu_read_lock();
1065 if (!shrink_lock_dentry(dentry)) {
1066 bool can_free = false;
1067 rcu_read_unlock();
1068 d_shrink_del(dentry);
1069 if (dentry->d_lockref.count < 0)
1070 can_free = dentry->d_flags & DCACHE_MAY_FREE;
1071 spin_unlock(&dentry->d_lock);
1072 if (can_free)
1073 dentry_free(dentry);
1074 continue;
1075 }
1076 rcu_read_unlock();
1077 d_shrink_del(dentry);
1078 parent = dentry->d_parent;
1079 __dentry_kill(dentry);
1080 if (parent == dentry)
1081 continue;
1082 /*
1083 * We need to prune ancestors too. This is necessary to prevent
1084 * quadratic behavior of shrink_dcache_parent(), but is also
1085 * expected to be beneficial in reducing dentry cache
1086 * fragmentation.
1087 */
1088 dentry = parent;
1089 while (dentry && !lockref_put_or_lock(&dentry->d_lockref))
1090 dentry = dentry_kill(dentry);
1091 }
1092 }
1093
1094 static enum lru_status dentry_lru_isolate(struct list_head *item,
1095 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
1096 {
1097 struct list_head *freeable = arg;
1098 struct dentry *dentry = container_of(item, struct dentry, d_lru);
1099
1100
1101 /*
1102 * we are inverting the lru lock/dentry->d_lock here,
1103 * so use a trylock. If we fail to get the lock, just skip
1104 * it
1105 */
1106 if (!spin_trylock(&dentry->d_lock))
1107 return LRU_SKIP;
1108
1109 /*
1110 * Referenced dentries are still in use. If they have active
1111 * counts, just remove them from the LRU. Otherwise give them
1112 * another pass through the LRU.
1113 */
1114 if (dentry->d_lockref.count) {
1115 d_lru_isolate(lru, dentry);
1116 spin_unlock(&dentry->d_lock);
1117 return LRU_REMOVED;
1118 }
1119
1120 if (dentry->d_flags & DCACHE_REFERENCED) {
1121 dentry->d_flags &= ~DCACHE_REFERENCED;
1122 spin_unlock(&dentry->d_lock);
1123
1124 /*
1125 * The list move itself will be made by the common LRU code. At
1126 * this point, we've dropped the dentry->d_lock but keep the
1127 * lru lock. This is safe to do, since every list movement is
1128 * protected by the lru lock even if both locks are held.
1129 *
1130 * This is guaranteed by the fact that all LRU management
1131 * functions are intermediated by the LRU API calls like
1132 * list_lru_add and list_lru_del. List movement in this file
1133 * only ever occur through this functions or through callbacks
1134 * like this one, that are called from the LRU API.
1135 *
1136 * The only exceptions to this are functions like
1137 * shrink_dentry_list, and code that first checks for the
1138 * DCACHE_SHRINK_LIST flag. Those are guaranteed to be
1139 * operating only with stack provided lists after they are
1140 * properly isolated from the main list. It is thus, always a
1141 * local access.
1142 */
1143 return LRU_ROTATE;
1144 }
1145
1146 d_lru_shrink_move(lru, dentry, freeable);
1147 spin_unlock(&dentry->d_lock);
1148
1149 return LRU_REMOVED;
1150 }
1151
1152 /**
1153 * prune_dcache_sb - shrink the dcache
1154 * @sb: superblock
1155 * @sc: shrink control, passed to list_lru_shrink_walk()
1156 *
1157 * Attempt to shrink the superblock dcache LRU by @sc->nr_to_scan entries. This
1158 * is done when we need more memory and called from the superblock shrinker
1159 * function.
1160 *
1161 * This function may fail to free any resources if all the dentries are in
1162 * use.
1163 */
1164 long prune_dcache_sb(struct super_block *sb, struct shrink_control *sc)
1165 {
1166 LIST_HEAD(dispose);
1167 long freed;
1168
1169 freed = list_lru_shrink_walk(&sb->s_dentry_lru, sc,
1170 dentry_lru_isolate, &dispose);
1171 shrink_dentry_list(&dispose);
1172 return freed;
1173 }
1174
1175 static enum lru_status dentry_lru_isolate_shrink(struct list_head *item,
1176 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
1177 {
1178 struct list_head *freeable = arg;
1179 struct dentry *dentry = container_of(item, struct dentry, d_lru);
1180
1181 /*
1182 * we are inverting the lru lock/dentry->d_lock here,
1183 * so use a trylock. If we fail to get the lock, just skip
1184 * it
1185 */
1186 if (!spin_trylock(&dentry->d_lock))
1187 return LRU_SKIP;
1188
1189 d_lru_shrink_move(lru, dentry, freeable);
1190 spin_unlock(&dentry->d_lock);
1191
1192 return LRU_REMOVED;
1193 }
1194
1195
1196 /**
1197 * shrink_dcache_sb - shrink dcache for a superblock
1198 * @sb: superblock
1199 *
1200 * Shrink the dcache for the specified super block. This is used to free
1201 * the dcache before unmounting a file system.
1202 */
1203 void shrink_dcache_sb(struct super_block *sb)
1204 {
1205 long freed;
1206
1207 do {
1208 LIST_HEAD(dispose);
1209
1210 freed = list_lru_walk(&sb->s_dentry_lru,
1211 dentry_lru_isolate_shrink, &dispose, 1024);
1212
1213 this_cpu_sub(nr_dentry_unused, freed);
1214 shrink_dentry_list(&dispose);
1215 } while (list_lru_count(&sb->s_dentry_lru) > 0);
1216 }
1217 EXPORT_SYMBOL(shrink_dcache_sb);
1218
1219 /**
1220 * enum d_walk_ret - action to talke during tree walk
1221 * @D_WALK_CONTINUE: contrinue walk
1222 * @D_WALK_QUIT: quit walk
1223 * @D_WALK_NORETRY: quit when retry is needed
1224 * @D_WALK_SKIP: skip this dentry and its children
1225 */
1226 enum d_walk_ret {
1227 D_WALK_CONTINUE,
1228 D_WALK_QUIT,
1229 D_WALK_NORETRY,
1230 D_WALK_SKIP,
1231 };
1232
1233 /**
1234 * d_walk - walk the dentry tree
1235 * @parent: start of walk
1236 * @data: data passed to @enter() and @finish()
1237 * @enter: callback when first entering the dentry
1238 *
1239 * The @enter() callbacks are called with d_lock held.
1240 */
1241 static void d_walk(struct dentry *parent, void *data,
1242 enum d_walk_ret (*enter)(void *, struct dentry *))
1243 {
1244 struct dentry *this_parent;
1245 struct list_head *next;
1246 unsigned seq = 0;
1247 enum d_walk_ret ret;
1248 bool retry = true;
1249
1250 again:
1251 read_seqbegin_or_lock(&rename_lock, &seq);
1252 this_parent = parent;
1253 spin_lock(&this_parent->d_lock);
1254
1255 ret = enter(data, this_parent);
1256 switch (ret) {
1257 case D_WALK_CONTINUE:
1258 break;
1259 case D_WALK_QUIT:
1260 case D_WALK_SKIP:
1261 goto out_unlock;
1262 case D_WALK_NORETRY:
1263 retry = false;
1264 break;
1265 }
1266 repeat:
1267 next = this_parent->d_subdirs.next;
1268 resume:
1269 while (next != &this_parent->d_subdirs) {
1270 struct list_head *tmp = next;
1271 struct dentry *dentry = list_entry(tmp, struct dentry, d_child);
1272 next = tmp->next;
1273
1274 if (unlikely(dentry->d_flags & DCACHE_DENTRY_CURSOR))
1275 continue;
1276
1277 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1278
1279 ret = enter(data, dentry);
1280 switch (ret) {
1281 case D_WALK_CONTINUE:
1282 break;
1283 case D_WALK_QUIT:
1284 spin_unlock(&dentry->d_lock);
1285 goto out_unlock;
1286 case D_WALK_NORETRY:
1287 retry = false;
1288 break;
1289 case D_WALK_SKIP:
1290 spin_unlock(&dentry->d_lock);
1291 continue;
1292 }
1293
1294 if (!list_empty(&dentry->d_subdirs)) {
1295 spin_unlock(&this_parent->d_lock);
1296 spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
1297 this_parent = dentry;
1298 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
1299 goto repeat;
1300 }
1301 spin_unlock(&dentry->d_lock);
1302 }
1303 /*
1304 * All done at this level ... ascend and resume the search.
1305 */
1306 rcu_read_lock();
1307 ascend:
1308 if (this_parent != parent) {
1309 struct dentry *child = this_parent;
1310 this_parent = child->d_parent;
1311
1312 spin_unlock(&child->d_lock);
1313 spin_lock(&this_parent->d_lock);
1314
1315 /* might go back up the wrong parent if we have had a rename. */
1316 if (need_seqretry(&rename_lock, seq))
1317 goto rename_retry;
1318 /* go into the first sibling still alive */
1319 do {
1320 next = child->d_child.next;
1321 if (next == &this_parent->d_subdirs)
1322 goto ascend;
1323 child = list_entry(next, struct dentry, d_child);
1324 } while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED));
1325 rcu_read_unlock();
1326 goto resume;
1327 }
1328 if (need_seqretry(&rename_lock, seq))
1329 goto rename_retry;
1330 rcu_read_unlock();
1331
1332 out_unlock:
1333 spin_unlock(&this_parent->d_lock);
1334 done_seqretry(&rename_lock, seq);
1335 return;
1336
1337 rename_retry:
1338 spin_unlock(&this_parent->d_lock);
1339 rcu_read_unlock();
1340 BUG_ON(seq & 1);
1341 if (!retry)
1342 return;
1343 seq = 1;
1344 goto again;
1345 }
1346
1347 struct check_mount {
1348 struct vfsmount *mnt;
1349 unsigned int mounted;
1350 };
1351
1352 static enum d_walk_ret path_check_mount(void *data, struct dentry *dentry)
1353 {
1354 struct check_mount *info = data;
1355 struct path path = { .mnt = info->mnt, .dentry = dentry };
1356
1357 if (likely(!d_mountpoint(dentry)))
1358 return D_WALK_CONTINUE;
1359 if (__path_is_mountpoint(&path)) {
1360 info->mounted = 1;
1361 return D_WALK_QUIT;
1362 }
1363 return D_WALK_CONTINUE;
1364 }
1365
1366 /**
1367 * path_has_submounts - check for mounts over a dentry in the
1368 * current namespace.
1369 * @parent: path to check.
1370 *
1371 * Return true if the parent or its subdirectories contain
1372 * a mount point in the current namespace.
1373 */
1374 int path_has_submounts(const struct path *parent)
1375 {
1376 struct check_mount data = { .mnt = parent->mnt, .mounted = 0 };
1377
1378 read_seqlock_excl(&mount_lock);
1379 d_walk(parent->dentry, &data, path_check_mount);
1380 read_sequnlock_excl(&mount_lock);
1381
1382 return data.mounted;
1383 }
1384 EXPORT_SYMBOL(path_has_submounts);
1385
1386 /*
1387 * Called by mount code to set a mountpoint and check if the mountpoint is
1388 * reachable (e.g. NFS can unhash a directory dentry and then the complete
1389 * subtree can become unreachable).
1390 *
1391 * Only one of d_invalidate() and d_set_mounted() must succeed. For
1392 * this reason take rename_lock and d_lock on dentry and ancestors.
1393 */
1394 int d_set_mounted(struct dentry *dentry)
1395 {
1396 struct dentry *p;
1397 int ret = -ENOENT;
1398 write_seqlock(&rename_lock);
1399 for (p = dentry->d_parent; !IS_ROOT(p); p = p->d_parent) {
1400 /* Need exclusion wrt. d_invalidate() */
1401 spin_lock(&p->d_lock);
1402 if (unlikely(d_unhashed(p))) {
1403 spin_unlock(&p->d_lock);
1404 goto out;
1405 }
1406 spin_unlock(&p->d_lock);
1407 }
1408 spin_lock(&dentry->d_lock);
1409 if (!d_unlinked(dentry)) {
1410 ret = -EBUSY;
1411 if (!d_mountpoint(dentry)) {
1412 dentry->d_flags |= DCACHE_MOUNTED;
1413 ret = 0;
1414 }
1415 }
1416 spin_unlock(&dentry->d_lock);
1417 out:
1418 write_sequnlock(&rename_lock);
1419 return ret;
1420 }
1421
1422 /*
1423 * Search the dentry child list of the specified parent,
1424 * and move any unused dentries to the end of the unused
1425 * list for prune_dcache(). We descend to the next level
1426 * whenever the d_subdirs list is non-empty and continue
1427 * searching.
1428 *
1429 * It returns zero iff there are no unused children,
1430 * otherwise it returns the number of children moved to
1431 * the end of the unused list. This may not be the total
1432 * number of unused children, because select_parent can
1433 * drop the lock and return early due to latency
1434 * constraints.
1435 */
1436
1437 struct select_data {
1438 struct dentry *start;
1439 struct list_head dispose;
1440 int found;
1441 };
1442
1443 static enum d_walk_ret select_collect(void *_data, struct dentry *dentry)
1444 {
1445 struct select_data *data = _data;
1446 enum d_walk_ret ret = D_WALK_CONTINUE;
1447
1448 if (data->start == dentry)
1449 goto out;
1450
1451 if (dentry->d_flags & DCACHE_SHRINK_LIST) {
1452 data->found++;
1453 } else {
1454 if (dentry->d_flags & DCACHE_LRU_LIST)
1455 d_lru_del(dentry);
1456 if (!dentry->d_lockref.count) {
1457 d_shrink_add(dentry, &data->dispose);
1458 data->found++;
1459 }
1460 }
1461 /*
1462 * We can return to the caller if we have found some (this
1463 * ensures forward progress). We'll be coming back to find
1464 * the rest.
1465 */
1466 if (!list_empty(&data->dispose))
1467 ret = need_resched() ? D_WALK_QUIT : D_WALK_NORETRY;
1468 out:
1469 return ret;
1470 }
1471
1472 /**
1473 * shrink_dcache_parent - prune dcache
1474 * @parent: parent of entries to prune
1475 *
1476 * Prune the dcache to remove unused children of the parent dentry.
1477 */
1478 void shrink_dcache_parent(struct dentry *parent)
1479 {
1480 for (;;) {
1481 struct select_data data;
1482
1483 INIT_LIST_HEAD(&data.dispose);
1484 data.start = parent;
1485 data.found = 0;
1486
1487 d_walk(parent, &data, select_collect);
1488
1489 if (!list_empty(&data.dispose)) {
1490 shrink_dentry_list(&data.dispose);
1491 continue;
1492 }
1493
1494 cond_resched();
1495 if (!data.found)
1496 break;
1497 }
1498 }
1499 EXPORT_SYMBOL(shrink_dcache_parent);
1500
1501 static enum d_walk_ret umount_check(void *_data, struct dentry *dentry)
1502 {
1503 /* it has busy descendents; complain about those instead */
1504 if (!list_empty(&dentry->d_subdirs))
1505 return D_WALK_CONTINUE;
1506
1507 /* root with refcount 1 is fine */
1508 if (dentry == _data && dentry->d_lockref.count == 1)
1509 return D_WALK_CONTINUE;
1510
1511 printk(KERN_ERR "BUG: Dentry %p{i=%lx,n=%pd} "
1512 " still in use (%d) [unmount of %s %s]\n",
1513 dentry,
1514 dentry->d_inode ?
1515 dentry->d_inode->i_ino : 0UL,
1516 dentry,
1517 dentry->d_lockref.count,
1518 dentry->d_sb->s_type->name,
1519 dentry->d_sb->s_id);
1520 WARN_ON(1);
1521 return D_WALK_CONTINUE;
1522 }
1523
1524 static void do_one_tree(struct dentry *dentry)
1525 {
1526 shrink_dcache_parent(dentry);
1527 d_walk(dentry, dentry, umount_check);
1528 d_drop(dentry);
1529 dput(dentry);
1530 }
1531
1532 /*
1533 * destroy the dentries attached to a superblock on unmounting
1534 */
1535 void shrink_dcache_for_umount(struct super_block *sb)
1536 {
1537 struct dentry *dentry;
1538
1539 WARN(down_read_trylock(&sb->s_umount), "s_umount should've been locked");
1540
1541 dentry = sb->s_root;
1542 sb->s_root = NULL;
1543 do_one_tree(dentry);
1544
1545 while (!hlist_bl_empty(&sb->s_roots)) {
1546 dentry = dget(hlist_bl_entry(hlist_bl_first(&sb->s_roots), struct dentry, d_hash));
1547 do_one_tree(dentry);
1548 }
1549 }
1550
1551 static enum d_walk_ret find_submount(void *_data, struct dentry *dentry)
1552 {
1553 struct dentry **victim = _data;
1554 if (d_mountpoint(dentry)) {
1555 __dget_dlock(dentry);
1556 *victim = dentry;
1557 return D_WALK_QUIT;
1558 }
1559 return D_WALK_CONTINUE;
1560 }
1561
1562 /**
1563 * d_invalidate - detach submounts, prune dcache, and drop
1564 * @dentry: dentry to invalidate (aka detach, prune and drop)
1565 */
1566 void d_invalidate(struct dentry *dentry)
1567 {
1568 bool had_submounts = false;
1569 spin_lock(&dentry->d_lock);
1570 if (d_unhashed(dentry)) {
1571 spin_unlock(&dentry->d_lock);
1572 return;
1573 }
1574 __d_drop(dentry);
1575 spin_unlock(&dentry->d_lock);
1576
1577 /* Negative dentries can be dropped without further checks */
1578 if (!dentry->d_inode)
1579 return;
1580
1581 shrink_dcache_parent(dentry);
1582 for (;;) {
1583 struct dentry *victim = NULL;
1584 d_walk(dentry, &victim, find_submount);
1585 if (!victim) {
1586 if (had_submounts)
1587 shrink_dcache_parent(dentry);
1588 return;
1589 }
1590 had_submounts = true;
1591 detach_mounts(victim);
1592 dput(victim);
1593 }
1594 }
1595 EXPORT_SYMBOL(d_invalidate);
1596
1597 /**
1598 * __d_alloc - allocate a dcache entry
1599 * @sb: filesystem it will belong to
1600 * @name: qstr of the name
1601 *
1602 * Allocates a dentry. It returns %NULL if there is insufficient memory
1603 * available. On a success the dentry is returned. The name passed in is
1604 * copied and the copy passed in may be reused after this call.
1605 */
1606
1607 struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
1608 {
1609 struct external_name *ext = NULL;
1610 struct dentry *dentry;
1611 char *dname;
1612 int err;
1613
1614 dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL);
1615 if (!dentry)
1616 return NULL;
1617
1618 /*
1619 * We guarantee that the inline name is always NUL-terminated.
1620 * This way the memcpy() done by the name switching in rename
1621 * will still always have a NUL at the end, even if we might
1622 * be overwriting an internal NUL character
1623 */
1624 dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
1625 if (unlikely(!name)) {
1626 name = &slash_name;
1627 dname = dentry->d_iname;
1628 } else if (name->len > DNAME_INLINE_LEN-1) {
1629 size_t size = offsetof(struct external_name, name[1]);
1630
1631 ext = kmalloc(size + name->len, GFP_KERNEL_ACCOUNT);
1632 if (!ext) {
1633 kmem_cache_free(dentry_cache, dentry);
1634 return NULL;
1635 }
1636 atomic_set(&ext->u.count, 1);
1637 dname = ext->name;
1638 } else {
1639 dname = dentry->d_iname;
1640 }
1641
1642 dentry->d_name.len = name->len;
1643 dentry->d_name.hash = name->hash;
1644 memcpy(dname, name->name, name->len);
1645 dname[name->len] = 0;
1646
1647 /* Make sure we always see the terminating NUL character */
1648 smp_store_release(&dentry->d_name.name, dname); /* ^^^ */
1649
1650 dentry->d_lockref.count = 1;
1651 dentry->d_flags = 0;
1652 spin_lock_init(&dentry->d_lock);
1653 seqcount_init(&dentry->d_seq);
1654 dentry->d_inode = NULL;
1655 dentry->d_parent = dentry;
1656 dentry->d_sb = sb;
1657 dentry->d_op = NULL;
1658 dentry->d_fsdata = NULL;
1659 INIT_HLIST_BL_NODE(&dentry->d_hash);
1660 INIT_LIST_HEAD(&dentry->d_lru);
1661 INIT_LIST_HEAD(&dentry->d_subdirs);
1662 INIT_HLIST_NODE(&dentry->d_u.d_alias);
1663 INIT_LIST_HEAD(&dentry->d_child);
1664 d_set_d_op(dentry, dentry->d_sb->s_d_op);
1665
1666 if (dentry->d_op && dentry->d_op->d_init) {
1667 err = dentry->d_op->d_init(dentry);
1668 if (err) {
1669 if (dname_external(dentry))
1670 kfree(external_name(dentry));
1671 kmem_cache_free(dentry_cache, dentry);
1672 return NULL;
1673 }
1674 }
1675
1676 if (unlikely(ext)) {
1677 pg_data_t *pgdat = page_pgdat(virt_to_page(ext));
1678 mod_node_page_state(pgdat, NR_INDIRECTLY_RECLAIMABLE_BYTES,
1679 ksize(ext));
1680 }
1681
1682 this_cpu_inc(nr_dentry);
1683
1684 return dentry;
1685 }
1686
1687 /**
1688 * d_alloc - allocate a dcache entry
1689 * @parent: parent of entry to allocate
1690 * @name: qstr of the name
1691 *
1692 * Allocates a dentry. It returns %NULL if there is insufficient memory
1693 * available. On a success the dentry is returned. The name passed in is
1694 * copied and the copy passed in may be reused after this call.
1695 */
1696 struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
1697 {
1698 struct dentry *dentry = __d_alloc(parent->d_sb, name);
1699 if (!dentry)
1700 return NULL;
1701 dentry->d_flags |= DCACHE_RCUACCESS;
1702 spin_lock(&parent->d_lock);
1703 /*
1704 * don't need child lock because it is not subject
1705 * to concurrency here
1706 */
1707 __dget_dlock(parent);
1708 dentry->d_parent = parent;
1709 list_add(&dentry->d_child, &parent->d_subdirs);
1710 spin_unlock(&parent->d_lock);
1711
1712 return dentry;
1713 }
1714 EXPORT_SYMBOL(d_alloc);
1715
1716 struct dentry *d_alloc_anon(struct super_block *sb)
1717 {
1718 return __d_alloc(sb, NULL);
1719 }
1720 EXPORT_SYMBOL(d_alloc_anon);
1721
1722 struct dentry *d_alloc_cursor(struct dentry * parent)
1723 {
1724 struct dentry *dentry = d_alloc_anon(parent->d_sb);
1725 if (dentry) {
1726 dentry->d_flags |= DCACHE_RCUACCESS | DCACHE_DENTRY_CURSOR;
1727 dentry->d_parent = dget(parent);
1728 }
1729 return dentry;
1730 }
1731
1732 /**
1733 * d_alloc_pseudo - allocate a dentry (for lookup-less filesystems)
1734 * @sb: the superblock
1735 * @name: qstr of the name
1736 *
1737 * For a filesystem that just pins its dentries in memory and never
1738 * performs lookups at all, return an unhashed IS_ROOT dentry.
1739 */
1740 struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name)
1741 {
1742 return __d_alloc(sb, name);
1743 }
1744 EXPORT_SYMBOL(d_alloc_pseudo);
1745
1746 struct dentry *d_alloc_name(struct dentry *parent, const char *name)
1747 {
1748 struct qstr q;
1749
1750 q.name = name;
1751 q.hash_len = hashlen_string(parent, name);
1752 return d_alloc(parent, &q);
1753 }
1754 EXPORT_SYMBOL(d_alloc_name);
1755
1756 void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
1757 {
1758 WARN_ON_ONCE(dentry->d_op);
1759 WARN_ON_ONCE(dentry->d_flags & (DCACHE_OP_HASH |
1760 DCACHE_OP_COMPARE |
1761 DCACHE_OP_REVALIDATE |
1762 DCACHE_OP_WEAK_REVALIDATE |
1763 DCACHE_OP_DELETE |
1764 DCACHE_OP_REAL));
1765 dentry->d_op = op;
1766 if (!op)
1767 return;
1768 if (op->d_hash)
1769 dentry->d_flags |= DCACHE_OP_HASH;
1770 if (op->d_compare)
1771 dentry->d_flags |= DCACHE_OP_COMPARE;
1772 if (op->d_revalidate)
1773 dentry->d_flags |= DCACHE_OP_REVALIDATE;
1774 if (op->d_weak_revalidate)
1775 dentry->d_flags |= DCACHE_OP_WEAK_REVALIDATE;
1776 if (op->d_delete)
1777 dentry->d_flags |= DCACHE_OP_DELETE;
1778 if (op->d_prune)
1779 dentry->d_flags |= DCACHE_OP_PRUNE;
1780 if (op->d_real)
1781 dentry->d_flags |= DCACHE_OP_REAL;
1782
1783 }
1784 EXPORT_SYMBOL(d_set_d_op);
1785
1786
1787 /*
1788 * d_set_fallthru - Mark a dentry as falling through to a lower layer
1789 * @dentry - The dentry to mark
1790 *
1791 * Mark a dentry as falling through to the lower layer (as set with
1792 * d_pin_lower()). This flag may be recorded on the medium.
1793 */
1794 void d_set_fallthru(struct dentry *dentry)
1795 {
1796 spin_lock(&dentry->d_lock);
1797 dentry->d_flags |= DCACHE_FALLTHRU;
1798 spin_unlock(&dentry->d_lock);
1799 }
1800 EXPORT_SYMBOL(d_set_fallthru);
1801
1802 static unsigned d_flags_for_inode(struct inode *inode)
1803 {
1804 unsigned add_flags = DCACHE_REGULAR_TYPE;
1805
1806 if (!inode)
1807 return DCACHE_MISS_TYPE;
1808
1809 if (S_ISDIR(inode->i_mode)) {
1810 add_flags = DCACHE_DIRECTORY_TYPE;
1811 if (unlikely(!(inode->i_opflags & IOP_LOOKUP))) {
1812 if (unlikely(!inode->i_op->lookup))
1813 add_flags = DCACHE_AUTODIR_TYPE;
1814 else
1815 inode->i_opflags |= IOP_LOOKUP;
1816 }
1817 goto type_determined;
1818 }
1819
1820 if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) {
1821 if (unlikely(inode->i_op->get_link)) {
1822 add_flags = DCACHE_SYMLINK_TYPE;
1823 goto type_determined;
1824 }
1825 inode->i_opflags |= IOP_NOFOLLOW;
1826 }
1827
1828 if (unlikely(!S_ISREG(inode->i_mode)))
1829 add_flags = DCACHE_SPECIAL_TYPE;
1830
1831 type_determined:
1832 if (unlikely(IS_AUTOMOUNT(inode)))
1833 add_flags |= DCACHE_NEED_AUTOMOUNT;
1834 return add_flags;
1835 }
1836
1837 static void __d_instantiate(struct dentry *dentry, struct inode *inode)
1838 {
1839 unsigned add_flags = d_flags_for_inode(inode);
1840 WARN_ON(d_in_lookup(dentry));
1841
1842 spin_lock(&dentry->d_lock);
1843 hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
1844 raw_write_seqcount_begin(&dentry->d_seq);
1845 __d_set_inode_and_type(dentry, inode, add_flags);
1846 raw_write_seqcount_end(&dentry->d_seq);
1847 fsnotify_update_flags(dentry);
1848 spin_unlock(&dentry->d_lock);
1849 }
1850
1851 /**
1852 * d_instantiate - fill in inode information for a dentry
1853 * @entry: dentry to complete
1854 * @inode: inode to attach to this dentry
1855 *
1856 * Fill in inode information in the entry.
1857 *
1858 * This turns negative dentries into productive full members
1859 * of society.
1860 *
1861 * NOTE! This assumes that the inode count has been incremented
1862 * (or otherwise set) by the caller to indicate that it is now
1863 * in use by the dcache.
1864 */
1865
1866 void d_instantiate(struct dentry *entry, struct inode * inode)
1867 {
1868 BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
1869 if (inode) {
1870 security_d_instantiate(entry, inode);
1871 spin_lock(&inode->i_lock);
1872 __d_instantiate(entry, inode);
1873 spin_unlock(&inode->i_lock);
1874 }
1875 }
1876 EXPORT_SYMBOL(d_instantiate);
1877
1878 /*
1879 * This should be equivalent to d_instantiate() + unlock_new_inode(),
1880 * with lockdep-related part of unlock_new_inode() done before
1881 * anything else. Use that instead of open-coding d_instantiate()/
1882 * unlock_new_inode() combinations.
1883 */
1884 void d_instantiate_new(struct dentry *entry, struct inode *inode)
1885 {
1886 BUG_ON(!hlist_unhashed(&entry->d_u.d_alias));
1887 BUG_ON(!inode);
1888 lockdep_annotate_inode_mutex_key(inode);
1889 security_d_instantiate(entry, inode);
1890 spin_lock(&inode->i_lock);
1891 __d_instantiate(entry, inode);
1892 WARN_ON(!(inode->i_state & I_NEW));
1893 inode->i_state &= ~I_NEW & ~I_CREATING;
1894 smp_mb();
1895 wake_up_bit(&inode->i_state, __I_NEW);
1896 spin_unlock(&inode->i_lock);
1897 }
1898 EXPORT_SYMBOL(d_instantiate_new);
1899
1900 struct dentry *d_make_root(struct inode *root_inode)
1901 {
1902 struct dentry *res = NULL;
1903
1904 if (root_inode) {
1905 res = d_alloc_anon(root_inode->i_sb);
1906 if (res) {
1907 res->d_flags |= DCACHE_RCUACCESS;
1908 d_instantiate(res, root_inode);
1909 } else {
1910 iput(root_inode);
1911 }
1912 }
1913 return res;
1914 }
1915 EXPORT_SYMBOL(d_make_root);
1916
1917 static struct dentry *__d_instantiate_anon(struct dentry *dentry,
1918 struct inode *inode,
1919 bool disconnected)
1920 {
1921 struct dentry *res;
1922 unsigned add_flags;
1923
1924 security_d_instantiate(dentry, inode);
1925 spin_lock(&inode->i_lock);
1926 res = __d_find_any_alias(inode);
1927 if (res) {
1928 spin_unlock(&inode->i_lock);
1929 dput(dentry);
1930 goto out_iput;
1931 }
1932
1933 /* attach a disconnected dentry */
1934 add_flags = d_flags_for_inode(inode);
1935
1936 if (disconnected)
1937 add_flags |= DCACHE_DISCONNECTED;
1938
1939 spin_lock(&dentry->d_lock);
1940 __d_set_inode_and_type(dentry, inode, add_flags);
1941 hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
1942 if (!disconnected) {
1943 hlist_bl_lock(&dentry->d_sb->s_roots);
1944 hlist_bl_add_head(&dentry->d_hash, &dentry->d_sb->s_roots);
1945 hlist_bl_unlock(&dentry->d_sb->s_roots);
1946 }
1947 spin_unlock(&dentry->d_lock);
1948 spin_unlock(&inode->i_lock);
1949
1950 return dentry;
1951
1952 out_iput:
1953 iput(inode);
1954 return res;
1955 }
1956
1957 struct dentry *d_instantiate_anon(struct dentry *dentry, struct inode *inode)
1958 {
1959 return __d_instantiate_anon(dentry, inode, true);
1960 }
1961 EXPORT_SYMBOL(d_instantiate_anon);
1962
1963 static struct dentry *__d_obtain_alias(struct inode *inode, bool disconnected)
1964 {
1965 struct dentry *tmp;
1966 struct dentry *res;
1967
1968 if (!inode)
1969 return ERR_PTR(-ESTALE);
1970 if (IS_ERR(inode))
1971 return ERR_CAST(inode);
1972
1973 res = d_find_any_alias(inode);
1974 if (res)
1975 goto out_iput;
1976
1977 tmp = d_alloc_anon(inode->i_sb);
1978 if (!tmp) {
1979 res = ERR_PTR(-ENOMEM);
1980 goto out_iput;
1981 }
1982
1983 return __d_instantiate_anon(tmp, inode, disconnected);
1984
1985 out_iput:
1986 iput(inode);
1987 return res;
1988 }
1989
1990 /**
1991 * d_obtain_alias - find or allocate a DISCONNECTED dentry for a given inode
1992 * @inode: inode to allocate the dentry for
1993 *
1994 * Obtain a dentry for an inode resulting from NFS filehandle conversion or
1995 * similar open by handle operations. The returned dentry may be anonymous,
1996 * or may have a full name (if the inode was already in the cache).
1997 *
1998 * When called on a directory inode, we must ensure that the inode only ever
1999 * has one dentry. If a dentry is found, that is returned instead of
2000 * allocating a new one.
2001 *
2002 * On successful return, the reference to the inode has been transferred
2003 * to the dentry. In case of an error the reference on the inode is released.
2004 * To make it easier to use in export operations a %NULL or IS_ERR inode may
2005 * be passed in and the error will be propagated to the return value,
2006 * with a %NULL @inode replaced by ERR_PTR(-ESTALE).
2007 */
2008 struct dentry *d_obtain_alias(struct inode *inode)
2009 {
2010 return __d_obtain_alias(inode, true);
2011 }
2012 EXPORT_SYMBOL(d_obtain_alias);
2013
2014 /**
2015 * d_obtain_root - find or allocate a dentry for a given inode
2016 * @inode: inode to allocate the dentry for
2017 *
2018 * Obtain an IS_ROOT dentry for the root of a filesystem.
2019 *
2020 * We must ensure that directory inodes only ever have one dentry. If a
2021 * dentry is found, that is returned instead of allocating a new one.
2022 *
2023 * On successful return, the reference to the inode has been transferred
2024 * to the dentry. In case of an error the reference on the inode is
2025 * released. A %NULL or IS_ERR inode may be passed in and will be the
2026 * error will be propagate to the return value, with a %NULL @inode
2027 * replaced by ERR_PTR(-ESTALE).
2028 */
2029 struct dentry *d_obtain_root(struct inode *inode)
2030 {
2031 return __d_obtain_alias(inode, false);
2032 }
2033 EXPORT_SYMBOL(d_obtain_root);
2034
2035 /**
2036 * d_add_ci - lookup or allocate new dentry with case-exact name
2037 * @inode: the inode case-insensitive lookup has found
2038 * @dentry: the negative dentry that was passed to the parent's lookup func
2039 * @name: the case-exact name to be associated with the returned dentry
2040 *
2041 * This is to avoid filling the dcache with case-insensitive names to the
2042 * same inode, only the actual correct case is stored in the dcache for
2043 * case-insensitive filesystems.
2044 *
2045 * For a case-insensitive lookup match and if the the case-exact dentry
2046 * already exists in in the dcache, use it and return it.
2047 *
2048 * If no entry exists with the exact case name, allocate new dentry with
2049 * the exact case, and return the spliced entry.
2050 */
2051 struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
2052 struct qstr *name)
2053 {
2054 struct dentry *found, *res;
2055
2056 /*
2057 * First check if a dentry matching the name already exists,
2058 * if not go ahead and create it now.
2059 */
2060 found = d_hash_and_lookup(dentry->d_parent, name);
2061 if (found) {
2062 iput(inode);
2063 return found;
2064 }
2065 if (d_in_lookup(dentry)) {
2066 found = d_alloc_parallel(dentry->d_parent, name,
2067 dentry->d_wait);
2068 if (IS_ERR(found) || !d_in_lookup(found)) {
2069 iput(inode);
2070 return found;
2071 }
2072 } else {
2073 found = d_alloc(dentry->d_parent, name);
2074 if (!found) {
2075 iput(inode);
2076 return ERR_PTR(-ENOMEM);
2077 }
2078 }
2079 res = d_splice_alias(inode, found);
2080 if (res) {
2081 dput(found);
2082 return res;
2083 }
2084 return found;
2085 }
2086 EXPORT_SYMBOL(d_add_ci);
2087
2088
2089 static inline bool d_same_name(const struct dentry *dentry,
2090 const struct dentry *parent,
2091 const struct qstr *name)
2092 {
2093 if (likely(!(parent->d_flags & DCACHE_OP_COMPARE))) {
2094 if (dentry->d_name.len != name->len)
2095 return false;
2096 return dentry_cmp(dentry, name->name, name->len) == 0;
2097 }
2098 return parent->d_op->d_compare(dentry,
2099 dentry->d_name.len, dentry->d_name.name,
2100 name) == 0;
2101 }
2102
2103 /**
2104 * __d_lookup_rcu - search for a dentry (racy, store-free)
2105 * @parent: parent dentry
2106 * @name: qstr of name we wish to find
2107 * @seqp: returns d_seq value at the point where the dentry was found
2108 * Returns: dentry, or NULL
2109 *
2110 * __d_lookup_rcu is the dcache lookup function for rcu-walk name
2111 * resolution (store-free path walking) design described in
2112 * Documentation/filesystems/path-lookup.txt.
2113 *
2114 * This is not to be used outside core vfs.
2115 *
2116 * __d_lookup_rcu must only be used in rcu-walk mode, ie. with vfsmount lock
2117 * held, and rcu_read_lock held. The returned dentry must not be stored into
2118 * without taking d_lock and checking d_seq sequence count against @seq
2119 * returned here.
2120 *
2121 * A refcount may be taken on the found dentry with the d_rcu_to_refcount
2122 * function.
2123 *
2124 * Alternatively, __d_lookup_rcu may be called again to look up the child of
2125 * the returned dentry, so long as its parent's seqlock is checked after the
2126 * child is looked up. Thus, an interlocking stepping of sequence lock checks
2127 * is formed, giving integrity down the path walk.
2128 *
2129 * NOTE! The caller *has* to check the resulting dentry against the sequence
2130 * number we've returned before using any of the resulting dentry state!
2131 */
2132 struct dentry *__d_lookup_rcu(const struct dentry *parent,
2133 const struct qstr *name,
2134 unsigned *seqp)
2135 {
2136 u64 hashlen = name->hash_len;
2137 const unsigned char *str = name->name;
2138 struct hlist_bl_head *b = d_hash(hashlen_hash(hashlen));
2139 struct hlist_bl_node *node;
2140 struct dentry *dentry;
2141
2142 /*
2143 * Note: There is significant duplication with __d_lookup_rcu which is
2144 * required to prevent single threaded performance regressions
2145 * especially on architectures where smp_rmb (in seqcounts) are costly.
2146 * Keep the two functions in sync.
2147 */
2148
2149 /*
2150 * The hash list is protected using RCU.
2151 *
2152 * Carefully use d_seq when comparing a candidate dentry, to avoid
2153 * races with d_move().
2154 *
2155 * It is possible that concurrent renames can mess up our list
2156 * walk here and result in missing our dentry, resulting in the
2157 * false-negative result. d_lookup() protects against concurrent
2158 * renames using rename_lock seqlock.
2159 *
2160 * See Documentation/filesystems/path-lookup.txt for more details.
2161 */
2162 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2163 unsigned seq;
2164
2165 seqretry:
2166 /*
2167 * The dentry sequence count protects us from concurrent
2168 * renames, and thus protects parent and name fields.
2169 *
2170 * The caller must perform a seqcount check in order
2171 * to do anything useful with the returned dentry.
2172 *
2173 * NOTE! We do a "raw" seqcount_begin here. That means that
2174 * we don't wait for the sequence count to stabilize if it
2175 * is in the middle of a sequence change. If we do the slow
2176 * dentry compare, we will do seqretries until it is stable,
2177 * and if we end up with a successful lookup, we actually
2178 * want to exit RCU lookup anyway.
2179 *
2180 * Note that raw_seqcount_begin still *does* smp_rmb(), so
2181 * we are still guaranteed NUL-termination of ->d_name.name.
2182 */
2183 seq = raw_seqcount_begin(&dentry->d_seq);
2184 if (dentry->d_parent != parent)
2185 continue;
2186 if (d_unhashed(dentry))
2187 continue;
2188
2189 if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) {
2190 int tlen;
2191 const char *tname;
2192 if (dentry->d_name.hash != hashlen_hash(hashlen))
2193 continue;
2194 tlen = dentry->d_name.len;
2195 tname = dentry->d_name.name;
2196 /* we want a consistent (name,len) pair */
2197 if (read_seqcount_retry(&dentry->d_seq, seq)) {
2198 cpu_relax();
2199 goto seqretry;
2200 }
2201 if (parent->d_op->d_compare(dentry,
2202 tlen, tname, name) != 0)
2203 continue;
2204 } else {
2205 if (dentry->d_name.hash_len != hashlen)
2206 continue;
2207 if (dentry_cmp(dentry, str, hashlen_len(hashlen)) != 0)
2208 continue;
2209 }
2210 *seqp = seq;
2211 return dentry;
2212 }
2213 return NULL;
2214 }
2215
2216 /**
2217 * d_lookup - search for a dentry
2218 * @parent: parent dentry
2219 * @name: qstr of name we wish to find
2220 * Returns: dentry, or NULL
2221 *
2222 * d_lookup searches the children of the parent dentry for the name in
2223 * question. If the dentry is found its reference count is incremented and the
2224 * dentry is returned. The caller must use dput to free the entry when it has
2225 * finished using it. %NULL is returned if the dentry does not exist.
2226 */
2227 struct dentry *d_lookup(const struct dentry *parent, const struct qstr *name)
2228 {
2229 struct dentry *dentry;
2230 unsigned seq;
2231
2232 do {
2233 seq = read_seqbegin(&rename_lock);
2234 dentry = __d_lookup(parent, name);
2235 if (dentry)
2236 break;
2237 } while (read_seqretry(&rename_lock, seq));
2238 return dentry;
2239 }
2240 EXPORT_SYMBOL(d_lookup);
2241
2242 /**
2243 * __d_lookup - search for a dentry (racy)
2244 * @parent: parent dentry
2245 * @name: qstr of name we wish to find
2246 * Returns: dentry, or NULL
2247 *
2248 * __d_lookup is like d_lookup, however it may (rarely) return a
2249 * false-negative result due to unrelated rename activity.
2250 *
2251 * __d_lookup is slightly faster by avoiding rename_lock read seqlock,
2252 * however it must be used carefully, eg. with a following d_lookup in
2253 * the case of failure.
2254 *
2255 * __d_lookup callers must be commented.
2256 */
2257 struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name)
2258 {
2259 unsigned int hash = name->hash;
2260 struct hlist_bl_head *b = d_hash(hash);
2261 struct hlist_bl_node *node;
2262 struct dentry *found = NULL;
2263 struct dentry *dentry;
2264
2265 /*
2266 * Note: There is significant duplication with __d_lookup_rcu which is
2267 * required to prevent single threaded performance regressions
2268 * especially on architectures where smp_rmb (in seqcounts) are costly.
2269 * Keep the two functions in sync.
2270 */
2271
2272 /*
2273 * The hash list is protected using RCU.
2274 *
2275 * Take d_lock when comparing a candidate dentry, to avoid races
2276 * with d_move().
2277 *
2278 * It is possible that concurrent renames can mess up our list
2279 * walk here and result in missing our dentry, resulting in the
2280 * false-negative result. d_lookup() protects against concurrent
2281 * renames using rename_lock seqlock.
2282 *
2283 * See Documentation/filesystems/path-lookup.txt for more details.
2284 */
2285 rcu_read_lock();
2286
2287 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
2288
2289 if (dentry->d_name.hash != hash)
2290 continue;
2291
2292 spin_lock(&dentry->d_lock);
2293 if (dentry->d_parent != parent)
2294 goto next;
2295 if (d_unhashed(dentry))
2296 goto next;
2297
2298 if (!d_same_name(dentry, parent, name))
2299 goto next;
2300
2301 dentry->d_lockref.count++;
2302 found = dentry;
2303 spin_unlock(&dentry->d_lock);
2304 break;
2305 next:
2306 spin_unlock(&dentry->d_lock);
2307 }
2308 rcu_read_unlock();
2309
2310 return found;
2311 }
2312
2313 /**
2314 * d_hash_and_lookup - hash the qstr then search for a dentry
2315 * @dir: Directory to search in
2316 * @name: qstr of name we wish to find
2317 *
2318 * On lookup failure NULL is returned; on bad name - ERR_PTR(-error)
2319 */
2320 struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name)
2321 {
2322 /*
2323 * Check for a fs-specific hash function. Note that we must
2324 * calculate the standard hash first, as the d_op->d_hash()
2325 * routine may choose to leave the hash value unchanged.
2326 */
2327 name->hash = full_name_hash(dir, name->name, name->len);
2328 if (dir->d_flags & DCACHE_OP_HASH) {
2329 int err = dir->d_op->d_hash(dir, name);
2330 if (unlikely(err < 0))
2331 return ERR_PTR(err);
2332 }
2333 return d_lookup(dir, name);
2334 }
2335 EXPORT_SYMBOL(d_hash_and_lookup);
2336
2337 /*
2338 * When a file is deleted, we have two options:
2339 * - turn this dentry into a negative dentry
2340 * - unhash this dentry and free it.
2341 *
2342 * Usually, we want to just turn this into
2343 * a negative dentry, but if anybody else is
2344 * currently using the dentry or the inode
2345 * we can't do that and we fall back on removing
2346 * it from the hash queues and waiting for
2347 * it to be deleted later when it has no users
2348 */
2349
2350 /**
2351 * d_delete - delete a dentry
2352 * @dentry: The dentry to delete
2353 *
2354 * Turn the dentry into a negative dentry if possible, otherwise
2355 * remove it from the hash queues so it can be deleted later
2356 */
2357
2358 void d_delete(struct dentry * dentry)
2359 {
2360 struct inode *inode = dentry->d_inode;
2361 int isdir = d_is_dir(dentry);
2362
2363 spin_lock(&inode->i_lock);
2364 spin_lock(&dentry->d_lock);
2365 /*
2366 * Are we the only user?
2367 */
2368 if (dentry->d_lockref.count == 1) {
2369 dentry->d_flags &= ~DCACHE_CANT_MOUNT;
2370 dentry_unlink_inode(dentry);
2371 } else {
2372 __d_drop(dentry);
2373 spin_unlock(&dentry->d_lock);
2374 spin_unlock(&inode->i_lock);
2375 }
2376 fsnotify_nameremove(dentry, isdir);
2377 }
2378 EXPORT_SYMBOL(d_delete);
2379
2380 static void __d_rehash(struct dentry *entry)
2381 {
2382 struct hlist_bl_head *b = d_hash(entry->d_name.hash);
2383
2384 hlist_bl_lock(b);
2385 hlist_bl_add_head_rcu(&entry->d_hash, b);
2386 hlist_bl_unlock(b);
2387 }
2388
2389 /**
2390 * d_rehash - add an entry back to the hash
2391 * @entry: dentry to add to the hash
2392 *
2393 * Adds a dentry to the hash according to its name.
2394 */
2395
2396 void d_rehash(struct dentry * entry)
2397 {
2398 spin_lock(&entry->d_lock);
2399 __d_rehash(entry);
2400 spin_unlock(&entry->d_lock);
2401 }
2402 EXPORT_SYMBOL(d_rehash);
2403
2404 static inline unsigned start_dir_add(struct inode *dir)
2405 {
2406
2407 for (;;) {
2408 unsigned n = dir->i_dir_seq;
2409 if (!(n & 1) && cmpxchg(&dir->i_dir_seq, n, n + 1) == n)
2410 return n;
2411 cpu_relax();
2412 }
2413 }
2414
2415 static inline void end_dir_add(struct inode *dir, unsigned n)
2416 {
2417 smp_store_release(&dir->i_dir_seq, n + 2);
2418 }
2419
2420 static void d_wait_lookup(struct dentry *dentry)
2421 {
2422 if (d_in_lookup(dentry)) {
2423 DECLARE_WAITQUEUE(wait, current);
2424 add_wait_queue(dentry->d_wait, &wait);
2425 do {
2426 set_current_state(TASK_UNINTERRUPTIBLE);
2427 spin_unlock(&dentry->d_lock);
2428 schedule();
2429 spin_lock(&dentry->d_lock);
2430 } while (d_in_lookup(dentry));
2431 }
2432 }
2433
2434 struct dentry *d_alloc_parallel(struct dentry *parent,
2435 const struct qstr *name,
2436 wait_queue_head_t *wq)
2437 {
2438 unsigned int hash = name->hash;
2439 struct hlist_bl_head *b = in_lookup_hash(parent, hash);
2440 struct hlist_bl_node *node;
2441 struct dentry *new = d_alloc(parent, name);
2442 struct dentry *dentry;
2443 unsigned seq, r_seq, d_seq;
2444
2445 if (unlikely(!new))
2446 return ERR_PTR(-ENOMEM);
2447
2448 retry:
2449 rcu_read_lock();
2450 seq = smp_load_acquire(&parent->d_inode->i_dir_seq);
2451 r_seq = read_seqbegin(&rename_lock);
2452 dentry = __d_lookup_rcu(parent, name, &d_seq);
2453 if (unlikely(dentry)) {
2454 if (!lockref_get_not_dead(&dentry->d_lockref)) {
2455 rcu_read_unlock();
2456 goto retry;
2457 }
2458 if (read_seqcount_retry(&dentry->d_seq, d_seq)) {
2459 rcu_read_unlock();
2460 dput(dentry);
2461 goto retry;
2462 }
2463 rcu_read_unlock();
2464 dput(new);
2465 return dentry;
2466 }
2467 if (unlikely(read_seqretry(&rename_lock, r_seq))) {
2468 rcu_read_unlock();
2469 goto retry;
2470 }
2471
2472 if (unlikely(seq & 1)) {
2473 rcu_read_unlock();
2474 goto retry;
2475 }
2476
2477 hlist_bl_lock(b);
2478 if (unlikely(READ_ONCE(parent->d_inode->i_dir_seq) != seq)) {
2479 hlist_bl_unlock(b);
2480 rcu_read_unlock();
2481 goto retry;
2482 }
2483 /*
2484 * No changes for the parent since the beginning of d_lookup().
2485 * Since all removals from the chain happen with hlist_bl_lock(),
2486 * any potential in-lookup matches are going to stay here until
2487 * we unlock the chain. All fields are stable in everything
2488 * we encounter.
2489 */
2490 hlist_bl_for_each_entry(dentry, node, b, d_u.d_in_lookup_hash) {
2491 if (dentry->d_name.hash != hash)
2492 continue;
2493 if (dentry->d_parent != parent)
2494 continue;
2495 if (!d_same_name(dentry, parent, name))
2496 continue;
2497 hlist_bl_unlock(b);
2498 /* now we can try to grab a reference */
2499 if (!lockref_get_not_dead(&dentry->d_lockref)) {
2500 rcu_read_unlock();
2501 goto retry;
2502 }
2503
2504 rcu_read_unlock();
2505 /*
2506 * somebody is likely to be still doing lookup for it;
2507 * wait for them to finish
2508 */
2509 spin_lock(&dentry->d_lock);
2510 d_wait_lookup(dentry);
2511 /*
2512 * it's not in-lookup anymore; in principle we should repeat
2513 * everything from dcache lookup, but it's likely to be what
2514 * d_lookup() would've found anyway. If it is, just return it;
2515 * otherwise we really have to repeat the whole thing.
2516 */
2517 if (unlikely(dentry->d_name.hash != hash))
2518 goto mismatch;
2519 if (unlikely(dentry->d_parent != parent))
2520 goto mismatch;
2521 if (unlikely(d_unhashed(dentry)))
2522 goto mismatch;
2523 if (unlikely(!d_same_name(dentry, parent, name)))
2524 goto mismatch;
2525 /* OK, it *is* a hashed match; return it */
2526 spin_unlock(&dentry->d_lock);
2527 dput(new);
2528 return dentry;
2529 }
2530 rcu_read_unlock();
2531 /* we can't take ->d_lock here; it's OK, though. */
2532 new->d_flags |= DCACHE_PAR_LOOKUP;
2533 new->d_wait = wq;
2534 hlist_bl_add_head_rcu(&new->d_u.d_in_lookup_hash, b);
2535 hlist_bl_unlock(b);
2536 return new;
2537 mismatch:
2538 spin_unlock(&dentry->d_lock);
2539 dput(dentry);
2540 goto retry;
2541 }
2542 EXPORT_SYMBOL(d_alloc_parallel);
2543
2544 void __d_lookup_done(struct dentry *dentry)
2545 {
2546 struct hlist_bl_head *b = in_lookup_hash(dentry->d_parent,
2547 dentry->d_name.hash);
2548 hlist_bl_lock(b);
2549 dentry->d_flags &= ~DCACHE_PAR_LOOKUP;
2550 __hlist_bl_del(&dentry->d_u.d_in_lookup_hash);
2551 wake_up_all(dentry->d_wait);
2552 dentry->d_wait = NULL;
2553 hlist_bl_unlock(b);
2554 INIT_HLIST_NODE(&dentry->d_u.d_alias);
2555 INIT_LIST_HEAD(&dentry->d_lru);
2556 }
2557 EXPORT_SYMBOL(__d_lookup_done);
2558
2559 /* inode->i_lock held if inode is non-NULL */
2560
2561 static inline void __d_add(struct dentry *dentry, struct inode *inode)
2562 {
2563 struct inode *dir = NULL;
2564 unsigned n;
2565 spin_lock(&dentry->d_lock);
2566 if (unlikely(d_in_lookup(dentry))) {
2567 dir = dentry->d_parent->d_inode;
2568 n = start_dir_add(dir);
2569 __d_lookup_done(dentry);
2570 }
2571 if (inode) {
2572 unsigned add_flags = d_flags_for_inode(inode);
2573 hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
2574 raw_write_seqcount_begin(&dentry->d_seq);
2575 __d_set_inode_and_type(dentry, inode, add_flags);
2576 raw_write_seqcount_end(&dentry->d_seq);
2577 fsnotify_update_flags(dentry);
2578 }
2579 __d_rehash(dentry);
2580 if (dir)
2581 end_dir_add(dir, n);
2582 spin_unlock(&dentry->d_lock);
2583 if (inode)
2584 spin_unlock(&inode->i_lock);
2585 }
2586
2587 /**
2588 * d_add - add dentry to hash queues
2589 * @entry: dentry to add
2590 * @inode: The inode to attach to this dentry
2591 *
2592 * This adds the entry to the hash queues and initializes @inode.
2593 * The entry was actually filled in earlier during d_alloc().
2594 */
2595
2596 void d_add(struct dentry *entry, struct inode *inode)
2597 {
2598 if (inode) {
2599 security_d_instantiate(entry, inode);
2600 spin_lock(&inode->i_lock);
2601 }
2602 __d_add(entry, inode);
2603 }
2604 EXPORT_SYMBOL(d_add);
2605
2606 /**
2607 * d_exact_alias - find and hash an exact unhashed alias
2608 * @entry: dentry to add
2609 * @inode: The inode to go with this dentry
2610 *
2611 * If an unhashed dentry with the same name/parent and desired
2612 * inode already exists, hash and return it. Otherwise, return
2613 * NULL.
2614 *
2615 * Parent directory should be locked.
2616 */
2617 struct dentry *d_exact_alias(struct dentry *entry, struct inode *inode)
2618 {
2619 struct dentry *alias;
2620 unsigned int hash = entry->d_name.hash;
2621
2622 spin_lock(&inode->i_lock);
2623 hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
2624 /*
2625 * Don't need alias->d_lock here, because aliases with
2626 * d_parent == entry->d_parent are not subject to name or
2627 * parent changes, because the parent inode i_mutex is held.
2628 */
2629 if (alias->d_name.hash != hash)
2630 continue;
2631 if (alias->d_parent != entry->d_parent)
2632 continue;
2633 if (!d_same_name(alias, entry->d_parent, &entry->d_name))
2634 continue;
2635 spin_lock(&alias->d_lock);
2636 if (!d_unhashed(alias)) {
2637 spin_unlock(&alias->d_lock);
2638 alias = NULL;
2639 } else {
2640 __dget_dlock(alias);
2641 __d_rehash(alias);
2642 spin_unlock(&alias->d_lock);
2643 }
2644 spin_unlock(&inode->i_lock);
2645 return alias;
2646 }
2647 spin_unlock(&inode->i_lock);
2648 return NULL;
2649 }
2650 EXPORT_SYMBOL(d_exact_alias);
2651
2652 static void swap_names(struct dentry *dentry, struct dentry *target)
2653 {
2654 if (unlikely(dname_external(target))) {
2655 if (unlikely(dname_external(dentry))) {
2656 /*
2657 * Both external: swap the pointers
2658 */
2659 swap(target->d_name.name, dentry->d_name.name);
2660 } else {
2661 /*
2662 * dentry:internal, target:external. Steal target's
2663 * storage and make target internal.
2664 */
2665 memcpy(target->d_iname, dentry->d_name.name,
2666 dentry->d_name.len + 1);
2667 dentry->d_name.name = target->d_name.name;
2668 target->d_name.name = target->d_iname;
2669 }
2670 } else {
2671 if (unlikely(dname_external(dentry))) {
2672 /*
2673 * dentry:external, target:internal. Give dentry's
2674 * storage to target and make dentry internal
2675 */
2676 memcpy(dentry->d_iname, target->d_name.name,
2677 target->d_name.len + 1);
2678 target->d_name.name = dentry->d_name.name;
2679 dentry->d_name.name = dentry->d_iname;
2680 } else {
2681 /*
2682 * Both are internal.
2683 */
2684 unsigned int i;
2685 BUILD_BUG_ON(!IS_ALIGNED(DNAME_INLINE_LEN, sizeof(long)));
2686 for (i = 0; i < DNAME_INLINE_LEN / sizeof(long); i++) {
2687 swap(((long *) &dentry->d_iname)[i],
2688 ((long *) &target->d_iname)[i]);
2689 }
2690 }
2691 }
2692 swap(dentry->d_name.hash_len, target->d_name.hash_len);
2693 }
2694
2695 static void copy_name(struct dentry *dentry, struct dentry *target)
2696 {
2697 struct external_name *old_name = NULL;
2698 if (unlikely(dname_external(dentry)))
2699 old_name = external_name(dentry);
2700 if (unlikely(dname_external(target))) {
2701 atomic_inc(&external_name(target)->u.count);
2702 dentry->d_name = target->d_name;
2703 } else {
2704 memcpy(dentry->d_iname, target->d_name.name,
2705 target->d_name.len + 1);
2706 dentry->d_name.name = dentry->d_iname;
2707 dentry->d_name.hash_len = target->d_name.hash_len;
2708 }
2709 if (old_name && likely(atomic_dec_and_test(&old_name->u.count)))
2710 call_rcu(&old_name->u.head, __d_free_external_name);
2711 }
2712
2713 /*
2714 * __d_move - move a dentry
2715 * @dentry: entry to move
2716 * @target: new dentry
2717 * @exchange: exchange the two dentries
2718 *
2719 * Update the dcache to reflect the move of a file name. Negative
2720 * dcache entries should not be moved in this way. Caller must hold
2721 * rename_lock, the i_mutex of the source and target directories,
2722 * and the sb->s_vfs_rename_mutex if they differ. See lock_rename().
2723 */
2724 static void __d_move(struct dentry *dentry, struct dentry *target,
2725 bool exchange)
2726 {
2727 struct dentry *old_parent, *p;
2728 struct inode *dir = NULL;
2729 unsigned n;
2730
2731 WARN_ON(!dentry->d_inode);
2732 if (WARN_ON(dentry == target))
2733 return;
2734
2735 BUG_ON(d_ancestor(target, dentry));
2736 old_parent = dentry->d_parent;
2737 p = d_ancestor(old_parent, target);
2738 if (IS_ROOT(dentry)) {
2739 BUG_ON(p);
2740 spin_lock(&target->d_parent->d_lock);
2741 } else if (!p) {
2742 /* target is not a descendent of dentry->d_parent */
2743 spin_lock(&target->d_parent->d_lock);
2744 spin_lock_nested(&old_parent->d_lock, DENTRY_D_LOCK_NESTED);
2745 } else {
2746 BUG_ON(p == dentry);
2747 spin_lock(&old_parent->d_lock);
2748 if (p != target)
2749 spin_lock_nested(&target->d_parent->d_lock,
2750 DENTRY_D_LOCK_NESTED);
2751 }
2752 spin_lock_nested(&dentry->d_lock, 2);
2753 spin_lock_nested(&target->d_lock, 3);
2754
2755 if (unlikely(d_in_lookup(target))) {
2756 dir = target->d_parent->d_inode;
2757 n = start_dir_add(dir);
2758 __d_lookup_done(target);
2759 }
2760
2761 write_seqcount_begin(&dentry->d_seq);
2762 write_seqcount_begin_nested(&target->d_seq, DENTRY_D_LOCK_NESTED);
2763
2764 /* unhash both */
2765 if (!d_unhashed(dentry))
2766 ___d_drop(dentry);
2767 if (!d_unhashed(target))
2768 ___d_drop(target);
2769
2770 /* ... and switch them in the tree */
2771 dentry->d_parent = target->d_parent;
2772 if (!exchange) {
2773 copy_name(dentry, target);
2774 target->d_hash.pprev = NULL;
2775 dentry->d_parent->d_lockref.count++;
2776 if (dentry == old_parent)
2777 dentry->d_flags |= DCACHE_RCUACCESS;
2778 else
2779 WARN_ON(!--old_parent->d_lockref.count);
2780 } else {
2781 target->d_parent = old_parent;
2782 swap_names(dentry, target);
2783 list_move(&target->d_child, &target->d_parent->d_subdirs);
2784 __d_rehash(target);
2785 fsnotify_update_flags(target);
2786 }
2787 list_move(&dentry->d_child, &dentry->d_parent->d_subdirs);
2788 __d_rehash(dentry);
2789 fsnotify_update_flags(dentry);
2790
2791 write_seqcount_end(&target->d_seq);
2792 write_seqcount_end(&dentry->d_seq);
2793
2794 if (dir)
2795 end_dir_add(dir, n);
2796
2797 if (dentry->d_parent != old_parent)
2798 spin_unlock(&dentry->d_parent->d_lock);
2799 if (dentry != old_parent)
2800 spin_unlock(&old_parent->d_lock);
2801 spin_unlock(&target->d_lock);
2802 spin_unlock(&dentry->d_lock);
2803 }
2804
2805 /*
2806 * d_move - move a dentry
2807 * @dentry: entry to move
2808 * @target: new dentry
2809 *
2810 * Update the dcache to reflect the move of a file name. Negative
2811 * dcache entries should not be moved in this way. See the locking
2812 * requirements for __d_move.
2813 */
2814 void d_move(struct dentry *dentry, struct dentry *target)
2815 {
2816 write_seqlock(&rename_lock);
2817 __d_move(dentry, target, false);
2818 write_sequnlock(&rename_lock);
2819 }
2820 EXPORT_SYMBOL(d_move);
2821
2822 /*
2823 * d_exchange - exchange two dentries
2824 * @dentry1: first dentry
2825 * @dentry2: second dentry
2826 */
2827 void d_exchange(struct dentry *dentry1, struct dentry *dentry2)
2828 {
2829 write_seqlock(&rename_lock);
2830
2831 WARN_ON(!dentry1->d_inode);
2832 WARN_ON(!dentry2->d_inode);
2833 WARN_ON(IS_ROOT(dentry1));
2834 WARN_ON(IS_ROOT(dentry2));
2835
2836 __d_move(dentry1, dentry2, true);
2837
2838 write_sequnlock(&rename_lock);
2839 }
2840
2841 /**
2842 * d_ancestor - search for an ancestor
2843 * @p1: ancestor dentry
2844 * @p2: child dentry
2845 *
2846 * Returns the ancestor dentry of p2 which is a child of p1, if p1 is
2847 * an ancestor of p2, else NULL.
2848 */
2849 struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2)
2850 {
2851 struct dentry *p;
2852
2853 for (p = p2; !IS_ROOT(p); p = p->d_parent) {
2854 if (p->d_parent == p1)
2855 return p;
2856 }
2857 return NULL;
2858 }
2859
2860 /*
2861 * This helper attempts to cope with remotely renamed directories
2862 *
2863 * It assumes that the caller is already holding
2864 * dentry->d_parent->d_inode->i_mutex, and rename_lock
2865 *
2866 * Note: If ever the locking in lock_rename() changes, then please
2867 * remember to update this too...
2868 */
2869 static int __d_unalias(struct inode *inode,
2870 struct dentry *dentry, struct dentry *alias)
2871 {
2872 struct mutex *m1 = NULL;
2873 struct rw_semaphore *m2 = NULL;
2874 int ret = -ESTALE;
2875
2876 /* If alias and dentry share a parent, then no extra locks required */
2877 if (alias->d_parent == dentry->d_parent)
2878 goto out_unalias;
2879
2880 /* See lock_rename() */
2881 if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex))
2882 goto out_err;
2883 m1 = &dentry->d_sb->s_vfs_rename_mutex;
2884 if (!inode_trylock_shared(alias->d_parent->d_inode))
2885 goto out_err;
2886 m2 = &alias->d_parent->d_inode->i_rwsem;
2887 out_unalias:
2888 __d_move(alias, dentry, false);
2889 ret = 0;
2890 out_err:
2891 if (m2)
2892 up_read(m2);
2893 if (m1)
2894 mutex_unlock(m1);
2895 return ret;
2896 }
2897
2898 /**
2899 * d_splice_alias - splice a disconnected dentry into the tree if one exists
2900 * @inode: the inode which may have a disconnected dentry
2901 * @dentry: a negative dentry which we want to point to the inode.
2902 *
2903 * If inode is a directory and has an IS_ROOT alias, then d_move that in
2904 * place of the given dentry and return it, else simply d_add the inode
2905 * to the dentry and return NULL.
2906 *
2907 * If a non-IS_ROOT directory is found, the filesystem is corrupt, and
2908 * we should error out: directories can't have multiple aliases.
2909 *
2910 * This is needed in the lookup routine of any filesystem that is exportable
2911 * (via knfsd) so that we can build dcache paths to directories effectively.
2912 *
2913 * If a dentry was found and moved, then it is returned. Otherwise NULL
2914 * is returned. This matches the expected return value of ->lookup.
2915 *
2916 * Cluster filesystems may call this function with a negative, hashed dentry.
2917 * In that case, we know that the inode will be a regular file, and also this
2918 * will only occur during atomic_open. So we need to check for the dentry
2919 * being already hashed only in the final case.
2920 */
2921 struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
2922 {
2923 if (IS_ERR(inode))
2924 return ERR_CAST(inode);
2925
2926 BUG_ON(!d_unhashed(dentry));
2927
2928 if (!inode)
2929 goto out;
2930
2931 security_d_instantiate(dentry, inode);
2932 spin_lock(&inode->i_lock);
2933 if (S_ISDIR(inode->i_mode)) {
2934 struct dentry *new = __d_find_any_alias(inode);
2935 if (unlikely(new)) {
2936 /* The reference to new ensures it remains an alias */
2937 spin_unlock(&inode->i_lock);
2938 write_seqlock(&rename_lock);
2939 if (unlikely(d_ancestor(new, dentry))) {
2940 write_sequnlock(&rename_lock);
2941 dput(new);
2942 new = ERR_PTR(-ELOOP);
2943 pr_warn_ratelimited(
2944 "VFS: Lookup of '%s' in %s %s"
2945 " would have caused loop\n",
2946 dentry->d_name.name,
2947 inode->i_sb->s_type->name,
2948 inode->i_sb->s_id);
2949 } else if (!IS_ROOT(new)) {
2950 struct dentry *old_parent = dget(new->d_parent);
2951 int err = __d_unalias(inode, dentry, new);
2952 write_sequnlock(&rename_lock);
2953 if (err) {
2954 dput(new);
2955 new = ERR_PTR(err);
2956 }
2957 dput(old_parent);
2958 } else {
2959 __d_move(new, dentry, false);
2960 write_sequnlock(&rename_lock);
2961 }
2962 iput(inode);
2963 return new;
2964 }
2965 }
2966 out:
2967 __d_add(dentry, inode);
2968 return NULL;
2969 }
2970 EXPORT_SYMBOL(d_splice_alias);
2971
2972 /*
2973 * Test whether new_dentry is a subdirectory of old_dentry.
2974 *
2975 * Trivially implemented using the dcache structure
2976 */
2977
2978 /**
2979 * is_subdir - is new dentry a subdirectory of old_dentry
2980 * @new_dentry: new dentry
2981 * @old_dentry: old dentry
2982 *
2983 * Returns true if new_dentry is a subdirectory of the parent (at any depth).
2984 * Returns false otherwise.
2985 * Caller must ensure that "new_dentry" is pinned before calling is_subdir()
2986 */
2987
2988 bool is_subdir(struct dentry *new_dentry, struct dentry *old_dentry)
2989 {
2990 bool result;
2991 unsigned seq;
2992
2993 if (new_dentry == old_dentry)
2994 return true;
2995
2996 do {
2997 /* for restarting inner loop in case of seq retry */
2998 seq = read_seqbegin(&rename_lock);
2999 /*
3000 * Need rcu_readlock to protect against the d_parent trashing
3001 * due to d_move
3002 */
3003 rcu_read_lock();
3004 if (d_ancestor(old_dentry, new_dentry))
3005 result = true;
3006 else
3007 result = false;
3008 rcu_read_unlock();
3009 } while (read_seqretry(&rename_lock, seq));
3010
3011 return result;
3012 }
3013 EXPORT_SYMBOL(is_subdir);
3014
3015 static enum d_walk_ret d_genocide_kill(void *data, struct dentry *dentry)
3016 {
3017 struct dentry *root = data;
3018 if (dentry != root) {
3019 if (d_unhashed(dentry) || !dentry->d_inode)
3020 return D_WALK_SKIP;
3021
3022 if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
3023 dentry->d_flags |= DCACHE_GENOCIDE;
3024 dentry->d_lockref.count--;
3025 }
3026 }
3027 return D_WALK_CONTINUE;
3028 }
3029
3030 void d_genocide(struct dentry *parent)
3031 {
3032 d_walk(parent, parent, d_genocide_kill);
3033 }
3034
3035 EXPORT_SYMBOL(d_genocide);
3036
3037 void d_tmpfile(struct dentry *dentry, struct inode *inode)
3038 {
3039 inode_dec_link_count(inode);
3040 BUG_ON(dentry->d_name.name != dentry->d_iname ||
3041 !hlist_unhashed(&dentry->d_u.d_alias) ||
3042 !d_unlinked(dentry));
3043 spin_lock(&dentry->d_parent->d_lock);
3044 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
3045 dentry->d_name.len = sprintf(dentry->d_iname, "#%llu",
3046 (unsigned long long)inode->i_ino);
3047 spin_unlock(&dentry->d_lock);
3048 spin_unlock(&dentry->d_parent->d_lock);
3049 d_instantiate(dentry, inode);
3050 }
3051 EXPORT_SYMBOL(d_tmpfile);
3052
3053 static __initdata unsigned long dhash_entries;
3054 static int __init set_dhash_entries(char *str)
3055 {
3056 if (!str)
3057 return 0;
3058 dhash_entries = simple_strtoul(str, &str, 0);
3059 return 1;
3060 }
3061 __setup("dhash_entries=", set_dhash_entries);
3062
3063 static void __init dcache_init_early(void)
3064 {
3065 /* If hashes are distributed across NUMA nodes, defer
3066 * hash allocation until vmalloc space is available.
3067 */
3068 if (hashdist)
3069 return;
3070
3071 dentry_hashtable =
3072 alloc_large_system_hash("Dentry cache",
3073 sizeof(struct hlist_bl_head),
3074 dhash_entries,
3075 13,
3076 HASH_EARLY | HASH_ZERO,
3077 &d_hash_shift,
3078 NULL,
3079 0,
3080 0);
3081 d_hash_shift = 32 - d_hash_shift;
3082 }
3083
3084 static void __init dcache_init(void)
3085 {
3086 /*
3087 * A constructor could be added for stable state like the lists,
3088 * but it is probably not worth it because of the cache nature
3089 * of the dcache.
3090 */
3091 dentry_cache = KMEM_CACHE_USERCOPY(dentry,
3092 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD|SLAB_ACCOUNT,
3093 d_iname);
3094
3095 /* Hash may have been set up in dcache_init_early */
3096 if (!hashdist)
3097 return;
3098
3099 dentry_hashtable =
3100 alloc_large_system_hash("Dentry cache",
3101 sizeof(struct hlist_bl_head),
3102 dhash_entries,
3103 13,
3104 HASH_ZERO,
3105 &d_hash_shift,
3106 NULL,
3107 0,
3108 0);
3109 d_hash_shift = 32 - d_hash_shift;
3110 }
3111
3112 /* SLAB cache for __getname() consumers */
3113 struct kmem_cache *names_cachep __read_mostly;
3114 EXPORT_SYMBOL(names_cachep);
3115
3116 void __init vfs_caches_init_early(void)
3117 {
3118 int i;
3119
3120 for (i = 0; i < ARRAY_SIZE(in_lookup_hashtable); i++)
3121 INIT_HLIST_BL_HEAD(&in_lookup_hashtable[i]);
3122
3123 dcache_init_early();
3124 inode_init_early();
3125 }
3126
3127 void __init vfs_caches_init(void)
3128 {
3129 names_cachep = kmem_cache_create_usercopy("names_cache", PATH_MAX, 0,
3130 SLAB_HWCACHE_ALIGN|SLAB_PANIC, 0, PATH_MAX, NULL);
3131
3132 dcache_init();
3133 inode_init();
3134 files_init();
3135 files_maxfiles_init();
3136 mnt_init();
3137 bdev_cache_init();
3138 chrdev_init();
3139 }