]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - fs/dcache.c
vfs: now it can be done - make mnt_parent point to struct mount
[mirror_ubuntu-artful-kernel.git] / fs / dcache.c
1 /*
2 * fs/dcache.c
3 *
4 * Complete reimplementation
5 * (C) 1997 Thomas Schoebel-Theuer,
6 * with heavy changes by Linus Torvalds
7 */
8
9 /*
10 * Notes on the allocation strategy:
11 *
12 * The dcache is a master of the icache - whenever a dcache entry
13 * exists, the inode will always exist. "iput()" is done either when
14 * the dcache entry is deleted or garbage collected.
15 */
16
17 #include <linux/syscalls.h>
18 #include <linux/string.h>
19 #include <linux/mm.h>
20 #include <linux/fs.h>
21 #include <linux/fsnotify.h>
22 #include <linux/slab.h>
23 #include <linux/init.h>
24 #include <linux/hash.h>
25 #include <linux/cache.h>
26 #include <linux/module.h>
27 #include <linux/mount.h>
28 #include <linux/file.h>
29 #include <asm/uaccess.h>
30 #include <linux/security.h>
31 #include <linux/seqlock.h>
32 #include <linux/swap.h>
33 #include <linux/bootmem.h>
34 #include <linux/fs_struct.h>
35 #include <linux/hardirq.h>
36 #include <linux/bit_spinlock.h>
37 #include <linux/rculist_bl.h>
38 #include <linux/prefetch.h>
39 #include <linux/ratelimit.h>
40 #include "internal.h"
41 #include "mount.h"
42
43 /*
44 * Usage:
45 * dcache->d_inode->i_lock protects:
46 * - i_dentry, d_alias, d_inode of aliases
47 * dcache_hash_bucket lock protects:
48 * - the dcache hash table
49 * s_anon bl list spinlock protects:
50 * - the s_anon list (see __d_drop)
51 * dcache_lru_lock protects:
52 * - the dcache lru lists and counters
53 * d_lock protects:
54 * - d_flags
55 * - d_name
56 * - d_lru
57 * - d_count
58 * - d_unhashed()
59 * - d_parent and d_subdirs
60 * - childrens' d_child and d_parent
61 * - d_alias, d_inode
62 *
63 * Ordering:
64 * dentry->d_inode->i_lock
65 * dentry->d_lock
66 * dcache_lru_lock
67 * dcache_hash_bucket lock
68 * s_anon lock
69 *
70 * If there is an ancestor relationship:
71 * dentry->d_parent->...->d_parent->d_lock
72 * ...
73 * dentry->d_parent->d_lock
74 * dentry->d_lock
75 *
76 * If no ancestor relationship:
77 * if (dentry1 < dentry2)
78 * dentry1->d_lock
79 * dentry2->d_lock
80 */
81 int sysctl_vfs_cache_pressure __read_mostly = 100;
82 EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
83
84 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lru_lock);
85 __cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
86
87 EXPORT_SYMBOL(rename_lock);
88
89 static struct kmem_cache *dentry_cache __read_mostly;
90
91 /*
92 * This is the single most critical data structure when it comes
93 * to the dcache: the hashtable for lookups. Somebody should try
94 * to make this good - I've just made it work.
95 *
96 * This hash-function tries to avoid losing too many bits of hash
97 * information, yet avoid using a prime hash-size or similar.
98 */
99 #define D_HASHBITS d_hash_shift
100 #define D_HASHMASK d_hash_mask
101
102 static unsigned int d_hash_mask __read_mostly;
103 static unsigned int d_hash_shift __read_mostly;
104
105 static struct hlist_bl_head *dentry_hashtable __read_mostly;
106
107 static inline struct hlist_bl_head *d_hash(struct dentry *parent,
108 unsigned long hash)
109 {
110 hash += ((unsigned long) parent ^ GOLDEN_RATIO_PRIME) / L1_CACHE_BYTES;
111 hash = hash ^ ((hash ^ GOLDEN_RATIO_PRIME) >> D_HASHBITS);
112 return dentry_hashtable + (hash & D_HASHMASK);
113 }
114
115 /* Statistics gathering. */
116 struct dentry_stat_t dentry_stat = {
117 .age_limit = 45,
118 };
119
120 static DEFINE_PER_CPU(unsigned int, nr_dentry);
121
122 #if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
123 static int get_nr_dentry(void)
124 {
125 int i;
126 int sum = 0;
127 for_each_possible_cpu(i)
128 sum += per_cpu(nr_dentry, i);
129 return sum < 0 ? 0 : sum;
130 }
131
132 int proc_nr_dentry(ctl_table *table, int write, void __user *buffer,
133 size_t *lenp, loff_t *ppos)
134 {
135 dentry_stat.nr_dentry = get_nr_dentry();
136 return proc_dointvec(table, write, buffer, lenp, ppos);
137 }
138 #endif
139
140 static void __d_free(struct rcu_head *head)
141 {
142 struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
143
144 WARN_ON(!list_empty(&dentry->d_alias));
145 if (dname_external(dentry))
146 kfree(dentry->d_name.name);
147 kmem_cache_free(dentry_cache, dentry);
148 }
149
150 /*
151 * no locks, please.
152 */
153 static void d_free(struct dentry *dentry)
154 {
155 BUG_ON(dentry->d_count);
156 this_cpu_dec(nr_dentry);
157 if (dentry->d_op && dentry->d_op->d_release)
158 dentry->d_op->d_release(dentry);
159
160 /* if dentry was never visible to RCU, immediate free is OK */
161 if (!(dentry->d_flags & DCACHE_RCUACCESS))
162 __d_free(&dentry->d_u.d_rcu);
163 else
164 call_rcu(&dentry->d_u.d_rcu, __d_free);
165 }
166
167 /**
168 * dentry_rcuwalk_barrier - invalidate in-progress rcu-walk lookups
169 * @dentry: the target dentry
170 * After this call, in-progress rcu-walk path lookup will fail. This
171 * should be called after unhashing, and after changing d_inode (if
172 * the dentry has not already been unhashed).
173 */
174 static inline void dentry_rcuwalk_barrier(struct dentry *dentry)
175 {
176 assert_spin_locked(&dentry->d_lock);
177 /* Go through a barrier */
178 write_seqcount_barrier(&dentry->d_seq);
179 }
180
181 /*
182 * Release the dentry's inode, using the filesystem
183 * d_iput() operation if defined. Dentry has no refcount
184 * and is unhashed.
185 */
186 static void dentry_iput(struct dentry * dentry)
187 __releases(dentry->d_lock)
188 __releases(dentry->d_inode->i_lock)
189 {
190 struct inode *inode = dentry->d_inode;
191 if (inode) {
192 dentry->d_inode = NULL;
193 list_del_init(&dentry->d_alias);
194 spin_unlock(&dentry->d_lock);
195 spin_unlock(&inode->i_lock);
196 if (!inode->i_nlink)
197 fsnotify_inoderemove(inode);
198 if (dentry->d_op && dentry->d_op->d_iput)
199 dentry->d_op->d_iput(dentry, inode);
200 else
201 iput(inode);
202 } else {
203 spin_unlock(&dentry->d_lock);
204 }
205 }
206
207 /*
208 * Release the dentry's inode, using the filesystem
209 * d_iput() operation if defined. dentry remains in-use.
210 */
211 static void dentry_unlink_inode(struct dentry * dentry)
212 __releases(dentry->d_lock)
213 __releases(dentry->d_inode->i_lock)
214 {
215 struct inode *inode = dentry->d_inode;
216 dentry->d_inode = NULL;
217 list_del_init(&dentry->d_alias);
218 dentry_rcuwalk_barrier(dentry);
219 spin_unlock(&dentry->d_lock);
220 spin_unlock(&inode->i_lock);
221 if (!inode->i_nlink)
222 fsnotify_inoderemove(inode);
223 if (dentry->d_op && dentry->d_op->d_iput)
224 dentry->d_op->d_iput(dentry, inode);
225 else
226 iput(inode);
227 }
228
229 /*
230 * dentry_lru_(add|del|prune|move_tail) must be called with d_lock held.
231 */
232 static void dentry_lru_add(struct dentry *dentry)
233 {
234 if (list_empty(&dentry->d_lru)) {
235 spin_lock(&dcache_lru_lock);
236 list_add(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
237 dentry->d_sb->s_nr_dentry_unused++;
238 dentry_stat.nr_unused++;
239 spin_unlock(&dcache_lru_lock);
240 }
241 }
242
243 static void __dentry_lru_del(struct dentry *dentry)
244 {
245 list_del_init(&dentry->d_lru);
246 dentry->d_sb->s_nr_dentry_unused--;
247 dentry_stat.nr_unused--;
248 }
249
250 /*
251 * Remove a dentry with references from the LRU.
252 */
253 static void dentry_lru_del(struct dentry *dentry)
254 {
255 if (!list_empty(&dentry->d_lru)) {
256 spin_lock(&dcache_lru_lock);
257 __dentry_lru_del(dentry);
258 spin_unlock(&dcache_lru_lock);
259 }
260 }
261
262 /*
263 * Remove a dentry that is unreferenced and about to be pruned
264 * (unhashed and destroyed) from the LRU, and inform the file system.
265 * This wrapper should be called _prior_ to unhashing a victim dentry.
266 */
267 static void dentry_lru_prune(struct dentry *dentry)
268 {
269 if (!list_empty(&dentry->d_lru)) {
270 if (dentry->d_flags & DCACHE_OP_PRUNE)
271 dentry->d_op->d_prune(dentry);
272
273 spin_lock(&dcache_lru_lock);
274 __dentry_lru_del(dentry);
275 spin_unlock(&dcache_lru_lock);
276 }
277 }
278
279 static void dentry_lru_move_tail(struct dentry *dentry)
280 {
281 spin_lock(&dcache_lru_lock);
282 if (list_empty(&dentry->d_lru)) {
283 list_add_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
284 dentry->d_sb->s_nr_dentry_unused++;
285 dentry_stat.nr_unused++;
286 } else {
287 list_move_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
288 }
289 spin_unlock(&dcache_lru_lock);
290 }
291
292 /**
293 * d_kill - kill dentry and return parent
294 * @dentry: dentry to kill
295 * @parent: parent dentry
296 *
297 * The dentry must already be unhashed and removed from the LRU.
298 *
299 * If this is the root of the dentry tree, return NULL.
300 *
301 * dentry->d_lock and parent->d_lock must be held by caller, and are dropped by
302 * d_kill.
303 */
304 static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent)
305 __releases(dentry->d_lock)
306 __releases(parent->d_lock)
307 __releases(dentry->d_inode->i_lock)
308 {
309 list_del(&dentry->d_u.d_child);
310 /*
311 * Inform try_to_ascend() that we are no longer attached to the
312 * dentry tree
313 */
314 dentry->d_flags |= DCACHE_DISCONNECTED;
315 if (parent)
316 spin_unlock(&parent->d_lock);
317 dentry_iput(dentry);
318 /*
319 * dentry_iput drops the locks, at which point nobody (except
320 * transient RCU lookups) can reach this dentry.
321 */
322 d_free(dentry);
323 return parent;
324 }
325
326 /*
327 * Unhash a dentry without inserting an RCU walk barrier or checking that
328 * dentry->d_lock is locked. The caller must take care of that, if
329 * appropriate.
330 */
331 static void __d_shrink(struct dentry *dentry)
332 {
333 if (!d_unhashed(dentry)) {
334 struct hlist_bl_head *b;
335 if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED))
336 b = &dentry->d_sb->s_anon;
337 else
338 b = d_hash(dentry->d_parent, dentry->d_name.hash);
339
340 hlist_bl_lock(b);
341 __hlist_bl_del(&dentry->d_hash);
342 dentry->d_hash.pprev = NULL;
343 hlist_bl_unlock(b);
344 }
345 }
346
347 /**
348 * d_drop - drop a dentry
349 * @dentry: dentry to drop
350 *
351 * d_drop() unhashes the entry from the parent dentry hashes, so that it won't
352 * be found through a VFS lookup any more. Note that this is different from
353 * deleting the dentry - d_delete will try to mark the dentry negative if
354 * possible, giving a successful _negative_ lookup, while d_drop will
355 * just make the cache lookup fail.
356 *
357 * d_drop() is used mainly for stuff that wants to invalidate a dentry for some
358 * reason (NFS timeouts or autofs deletes).
359 *
360 * __d_drop requires dentry->d_lock.
361 */
362 void __d_drop(struct dentry *dentry)
363 {
364 if (!d_unhashed(dentry)) {
365 __d_shrink(dentry);
366 dentry_rcuwalk_barrier(dentry);
367 }
368 }
369 EXPORT_SYMBOL(__d_drop);
370
371 void d_drop(struct dentry *dentry)
372 {
373 spin_lock(&dentry->d_lock);
374 __d_drop(dentry);
375 spin_unlock(&dentry->d_lock);
376 }
377 EXPORT_SYMBOL(d_drop);
378
379 /*
380 * d_clear_need_lookup - drop a dentry from cache and clear the need lookup flag
381 * @dentry: dentry to drop
382 *
383 * This is called when we do a lookup on a placeholder dentry that needed to be
384 * looked up. The dentry should have been hashed in order for it to be found by
385 * the lookup code, but now needs to be unhashed while we do the actual lookup
386 * and clear the DCACHE_NEED_LOOKUP flag.
387 */
388 void d_clear_need_lookup(struct dentry *dentry)
389 {
390 spin_lock(&dentry->d_lock);
391 __d_drop(dentry);
392 dentry->d_flags &= ~DCACHE_NEED_LOOKUP;
393 spin_unlock(&dentry->d_lock);
394 }
395 EXPORT_SYMBOL(d_clear_need_lookup);
396
397 /*
398 * Finish off a dentry we've decided to kill.
399 * dentry->d_lock must be held, returns with it unlocked.
400 * If ref is non-zero, then decrement the refcount too.
401 * Returns dentry requiring refcount drop, or NULL if we're done.
402 */
403 static inline struct dentry *dentry_kill(struct dentry *dentry, int ref)
404 __releases(dentry->d_lock)
405 {
406 struct inode *inode;
407 struct dentry *parent;
408
409 inode = dentry->d_inode;
410 if (inode && !spin_trylock(&inode->i_lock)) {
411 relock:
412 spin_unlock(&dentry->d_lock);
413 cpu_relax();
414 return dentry; /* try again with same dentry */
415 }
416 if (IS_ROOT(dentry))
417 parent = NULL;
418 else
419 parent = dentry->d_parent;
420 if (parent && !spin_trylock(&parent->d_lock)) {
421 if (inode)
422 spin_unlock(&inode->i_lock);
423 goto relock;
424 }
425
426 if (ref)
427 dentry->d_count--;
428 /*
429 * if dentry was on the d_lru list delete it from there.
430 * inform the fs via d_prune that this dentry is about to be
431 * unhashed and destroyed.
432 */
433 dentry_lru_prune(dentry);
434 /* if it was on the hash then remove it */
435 __d_drop(dentry);
436 return d_kill(dentry, parent);
437 }
438
439 /*
440 * This is dput
441 *
442 * This is complicated by the fact that we do not want to put
443 * dentries that are no longer on any hash chain on the unused
444 * list: we'd much rather just get rid of them immediately.
445 *
446 * However, that implies that we have to traverse the dentry
447 * tree upwards to the parents which might _also_ now be
448 * scheduled for deletion (it may have been only waiting for
449 * its last child to go away).
450 *
451 * This tail recursion is done by hand as we don't want to depend
452 * on the compiler to always get this right (gcc generally doesn't).
453 * Real recursion would eat up our stack space.
454 */
455
456 /*
457 * dput - release a dentry
458 * @dentry: dentry to release
459 *
460 * Release a dentry. This will drop the usage count and if appropriate
461 * call the dentry unlink method as well as removing it from the queues and
462 * releasing its resources. If the parent dentries were scheduled for release
463 * they too may now get deleted.
464 */
465 void dput(struct dentry *dentry)
466 {
467 if (!dentry)
468 return;
469
470 repeat:
471 if (dentry->d_count == 1)
472 might_sleep();
473 spin_lock(&dentry->d_lock);
474 BUG_ON(!dentry->d_count);
475 if (dentry->d_count > 1) {
476 dentry->d_count--;
477 spin_unlock(&dentry->d_lock);
478 return;
479 }
480
481 if (dentry->d_flags & DCACHE_OP_DELETE) {
482 if (dentry->d_op->d_delete(dentry))
483 goto kill_it;
484 }
485
486 /* Unreachable? Get rid of it */
487 if (d_unhashed(dentry))
488 goto kill_it;
489
490 /*
491 * If this dentry needs lookup, don't set the referenced flag so that it
492 * is more likely to be cleaned up by the dcache shrinker in case of
493 * memory pressure.
494 */
495 if (!d_need_lookup(dentry))
496 dentry->d_flags |= DCACHE_REFERENCED;
497 dentry_lru_add(dentry);
498
499 dentry->d_count--;
500 spin_unlock(&dentry->d_lock);
501 return;
502
503 kill_it:
504 dentry = dentry_kill(dentry, 1);
505 if (dentry)
506 goto repeat;
507 }
508 EXPORT_SYMBOL(dput);
509
510 /**
511 * d_invalidate - invalidate a dentry
512 * @dentry: dentry to invalidate
513 *
514 * Try to invalidate the dentry if it turns out to be
515 * possible. If there are other dentries that can be
516 * reached through this one we can't delete it and we
517 * return -EBUSY. On success we return 0.
518 *
519 * no dcache lock.
520 */
521
522 int d_invalidate(struct dentry * dentry)
523 {
524 /*
525 * If it's already been dropped, return OK.
526 */
527 spin_lock(&dentry->d_lock);
528 if (d_unhashed(dentry)) {
529 spin_unlock(&dentry->d_lock);
530 return 0;
531 }
532 /*
533 * Check whether to do a partial shrink_dcache
534 * to get rid of unused child entries.
535 */
536 if (!list_empty(&dentry->d_subdirs)) {
537 spin_unlock(&dentry->d_lock);
538 shrink_dcache_parent(dentry);
539 spin_lock(&dentry->d_lock);
540 }
541
542 /*
543 * Somebody else still using it?
544 *
545 * If it's a directory, we can't drop it
546 * for fear of somebody re-populating it
547 * with children (even though dropping it
548 * would make it unreachable from the root,
549 * we might still populate it if it was a
550 * working directory or similar).
551 * We also need to leave mountpoints alone,
552 * directory or not.
553 */
554 if (dentry->d_count > 1 && dentry->d_inode) {
555 if (S_ISDIR(dentry->d_inode->i_mode) || d_mountpoint(dentry)) {
556 spin_unlock(&dentry->d_lock);
557 return -EBUSY;
558 }
559 }
560
561 __d_drop(dentry);
562 spin_unlock(&dentry->d_lock);
563 return 0;
564 }
565 EXPORT_SYMBOL(d_invalidate);
566
567 /* This must be called with d_lock held */
568 static inline void __dget_dlock(struct dentry *dentry)
569 {
570 dentry->d_count++;
571 }
572
573 static inline void __dget(struct dentry *dentry)
574 {
575 spin_lock(&dentry->d_lock);
576 __dget_dlock(dentry);
577 spin_unlock(&dentry->d_lock);
578 }
579
580 struct dentry *dget_parent(struct dentry *dentry)
581 {
582 struct dentry *ret;
583
584 repeat:
585 /*
586 * Don't need rcu_dereference because we re-check it was correct under
587 * the lock.
588 */
589 rcu_read_lock();
590 ret = dentry->d_parent;
591 spin_lock(&ret->d_lock);
592 if (unlikely(ret != dentry->d_parent)) {
593 spin_unlock(&ret->d_lock);
594 rcu_read_unlock();
595 goto repeat;
596 }
597 rcu_read_unlock();
598 BUG_ON(!ret->d_count);
599 ret->d_count++;
600 spin_unlock(&ret->d_lock);
601 return ret;
602 }
603 EXPORT_SYMBOL(dget_parent);
604
605 /**
606 * d_find_alias - grab a hashed alias of inode
607 * @inode: inode in question
608 * @want_discon: flag, used by d_splice_alias, to request
609 * that only a DISCONNECTED alias be returned.
610 *
611 * If inode has a hashed alias, or is a directory and has any alias,
612 * acquire the reference to alias and return it. Otherwise return NULL.
613 * Notice that if inode is a directory there can be only one alias and
614 * it can be unhashed only if it has no children, or if it is the root
615 * of a filesystem.
616 *
617 * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer
618 * any other hashed alias over that one unless @want_discon is set,
619 * in which case only return an IS_ROOT, DCACHE_DISCONNECTED alias.
620 */
621 static struct dentry *__d_find_alias(struct inode *inode, int want_discon)
622 {
623 struct dentry *alias, *discon_alias;
624
625 again:
626 discon_alias = NULL;
627 list_for_each_entry(alias, &inode->i_dentry, d_alias) {
628 spin_lock(&alias->d_lock);
629 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
630 if (IS_ROOT(alias) &&
631 (alias->d_flags & DCACHE_DISCONNECTED)) {
632 discon_alias = alias;
633 } else if (!want_discon) {
634 __dget_dlock(alias);
635 spin_unlock(&alias->d_lock);
636 return alias;
637 }
638 }
639 spin_unlock(&alias->d_lock);
640 }
641 if (discon_alias) {
642 alias = discon_alias;
643 spin_lock(&alias->d_lock);
644 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
645 if (IS_ROOT(alias) &&
646 (alias->d_flags & DCACHE_DISCONNECTED)) {
647 __dget_dlock(alias);
648 spin_unlock(&alias->d_lock);
649 return alias;
650 }
651 }
652 spin_unlock(&alias->d_lock);
653 goto again;
654 }
655 return NULL;
656 }
657
658 struct dentry *d_find_alias(struct inode *inode)
659 {
660 struct dentry *de = NULL;
661
662 if (!list_empty(&inode->i_dentry)) {
663 spin_lock(&inode->i_lock);
664 de = __d_find_alias(inode, 0);
665 spin_unlock(&inode->i_lock);
666 }
667 return de;
668 }
669 EXPORT_SYMBOL(d_find_alias);
670
671 /*
672 * Try to kill dentries associated with this inode.
673 * WARNING: you must own a reference to inode.
674 */
675 void d_prune_aliases(struct inode *inode)
676 {
677 struct dentry *dentry;
678 restart:
679 spin_lock(&inode->i_lock);
680 list_for_each_entry(dentry, &inode->i_dentry, d_alias) {
681 spin_lock(&dentry->d_lock);
682 if (!dentry->d_count) {
683 __dget_dlock(dentry);
684 __d_drop(dentry);
685 spin_unlock(&dentry->d_lock);
686 spin_unlock(&inode->i_lock);
687 dput(dentry);
688 goto restart;
689 }
690 spin_unlock(&dentry->d_lock);
691 }
692 spin_unlock(&inode->i_lock);
693 }
694 EXPORT_SYMBOL(d_prune_aliases);
695
696 /*
697 * Try to throw away a dentry - free the inode, dput the parent.
698 * Requires dentry->d_lock is held, and dentry->d_count == 0.
699 * Releases dentry->d_lock.
700 *
701 * This may fail if locks cannot be acquired no problem, just try again.
702 */
703 static void try_prune_one_dentry(struct dentry *dentry)
704 __releases(dentry->d_lock)
705 {
706 struct dentry *parent;
707
708 parent = dentry_kill(dentry, 0);
709 /*
710 * If dentry_kill returns NULL, we have nothing more to do.
711 * if it returns the same dentry, trylocks failed. In either
712 * case, just loop again.
713 *
714 * Otherwise, we need to prune ancestors too. This is necessary
715 * to prevent quadratic behavior of shrink_dcache_parent(), but
716 * is also expected to be beneficial in reducing dentry cache
717 * fragmentation.
718 */
719 if (!parent)
720 return;
721 if (parent == dentry)
722 return;
723
724 /* Prune ancestors. */
725 dentry = parent;
726 while (dentry) {
727 spin_lock(&dentry->d_lock);
728 if (dentry->d_count > 1) {
729 dentry->d_count--;
730 spin_unlock(&dentry->d_lock);
731 return;
732 }
733 dentry = dentry_kill(dentry, 1);
734 }
735 }
736
737 static void shrink_dentry_list(struct list_head *list)
738 {
739 struct dentry *dentry;
740
741 rcu_read_lock();
742 for (;;) {
743 dentry = list_entry_rcu(list->prev, struct dentry, d_lru);
744 if (&dentry->d_lru == list)
745 break; /* empty */
746 spin_lock(&dentry->d_lock);
747 if (dentry != list_entry(list->prev, struct dentry, d_lru)) {
748 spin_unlock(&dentry->d_lock);
749 continue;
750 }
751
752 /*
753 * We found an inuse dentry which was not removed from
754 * the LRU because of laziness during lookup. Do not free
755 * it - just keep it off the LRU list.
756 */
757 if (dentry->d_count) {
758 dentry_lru_del(dentry);
759 spin_unlock(&dentry->d_lock);
760 continue;
761 }
762
763 rcu_read_unlock();
764
765 try_prune_one_dentry(dentry);
766
767 rcu_read_lock();
768 }
769 rcu_read_unlock();
770 }
771
772 /**
773 * __shrink_dcache_sb - shrink the dentry LRU on a given superblock
774 * @sb: superblock to shrink dentry LRU.
775 * @count: number of entries to prune
776 * @flags: flags to control the dentry processing
777 *
778 * If flags contains DCACHE_REFERENCED reference dentries will not be pruned.
779 */
780 static void __shrink_dcache_sb(struct super_block *sb, int count, int flags)
781 {
782 struct dentry *dentry;
783 LIST_HEAD(referenced);
784 LIST_HEAD(tmp);
785
786 relock:
787 spin_lock(&dcache_lru_lock);
788 while (!list_empty(&sb->s_dentry_lru)) {
789 dentry = list_entry(sb->s_dentry_lru.prev,
790 struct dentry, d_lru);
791 BUG_ON(dentry->d_sb != sb);
792
793 if (!spin_trylock(&dentry->d_lock)) {
794 spin_unlock(&dcache_lru_lock);
795 cpu_relax();
796 goto relock;
797 }
798
799 /*
800 * If we are honouring the DCACHE_REFERENCED flag and the
801 * dentry has this flag set, don't free it. Clear the flag
802 * and put it back on the LRU.
803 */
804 if (flags & DCACHE_REFERENCED &&
805 dentry->d_flags & DCACHE_REFERENCED) {
806 dentry->d_flags &= ~DCACHE_REFERENCED;
807 list_move(&dentry->d_lru, &referenced);
808 spin_unlock(&dentry->d_lock);
809 } else {
810 list_move_tail(&dentry->d_lru, &tmp);
811 spin_unlock(&dentry->d_lock);
812 if (!--count)
813 break;
814 }
815 cond_resched_lock(&dcache_lru_lock);
816 }
817 if (!list_empty(&referenced))
818 list_splice(&referenced, &sb->s_dentry_lru);
819 spin_unlock(&dcache_lru_lock);
820
821 shrink_dentry_list(&tmp);
822 }
823
824 /**
825 * prune_dcache_sb - shrink the dcache
826 * @sb: superblock
827 * @nr_to_scan: number of entries to try to free
828 *
829 * Attempt to shrink the superblock dcache LRU by @nr_to_scan entries. This is
830 * done when we need more memory an called from the superblock shrinker
831 * function.
832 *
833 * This function may fail to free any resources if all the dentries are in
834 * use.
835 */
836 void prune_dcache_sb(struct super_block *sb, int nr_to_scan)
837 {
838 __shrink_dcache_sb(sb, nr_to_scan, DCACHE_REFERENCED);
839 }
840
841 /**
842 * shrink_dcache_sb - shrink dcache for a superblock
843 * @sb: superblock
844 *
845 * Shrink the dcache for the specified super block. This is used to free
846 * the dcache before unmounting a file system.
847 */
848 void shrink_dcache_sb(struct super_block *sb)
849 {
850 LIST_HEAD(tmp);
851
852 spin_lock(&dcache_lru_lock);
853 while (!list_empty(&sb->s_dentry_lru)) {
854 list_splice_init(&sb->s_dentry_lru, &tmp);
855 spin_unlock(&dcache_lru_lock);
856 shrink_dentry_list(&tmp);
857 spin_lock(&dcache_lru_lock);
858 }
859 spin_unlock(&dcache_lru_lock);
860 }
861 EXPORT_SYMBOL(shrink_dcache_sb);
862
863 /*
864 * destroy a single subtree of dentries for unmount
865 * - see the comments on shrink_dcache_for_umount() for a description of the
866 * locking
867 */
868 static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
869 {
870 struct dentry *parent;
871
872 BUG_ON(!IS_ROOT(dentry));
873
874 for (;;) {
875 /* descend to the first leaf in the current subtree */
876 while (!list_empty(&dentry->d_subdirs))
877 dentry = list_entry(dentry->d_subdirs.next,
878 struct dentry, d_u.d_child);
879
880 /* consume the dentries from this leaf up through its parents
881 * until we find one with children or run out altogether */
882 do {
883 struct inode *inode;
884
885 /*
886 * remove the dentry from the lru, and inform
887 * the fs that this dentry is about to be
888 * unhashed and destroyed.
889 */
890 dentry_lru_prune(dentry);
891 __d_shrink(dentry);
892
893 if (dentry->d_count != 0) {
894 printk(KERN_ERR
895 "BUG: Dentry %p{i=%lx,n=%s}"
896 " still in use (%d)"
897 " [unmount of %s %s]\n",
898 dentry,
899 dentry->d_inode ?
900 dentry->d_inode->i_ino : 0UL,
901 dentry->d_name.name,
902 dentry->d_count,
903 dentry->d_sb->s_type->name,
904 dentry->d_sb->s_id);
905 BUG();
906 }
907
908 if (IS_ROOT(dentry)) {
909 parent = NULL;
910 list_del(&dentry->d_u.d_child);
911 } else {
912 parent = dentry->d_parent;
913 parent->d_count--;
914 list_del(&dentry->d_u.d_child);
915 }
916
917 inode = dentry->d_inode;
918 if (inode) {
919 dentry->d_inode = NULL;
920 list_del_init(&dentry->d_alias);
921 if (dentry->d_op && dentry->d_op->d_iput)
922 dentry->d_op->d_iput(dentry, inode);
923 else
924 iput(inode);
925 }
926
927 d_free(dentry);
928
929 /* finished when we fall off the top of the tree,
930 * otherwise we ascend to the parent and move to the
931 * next sibling if there is one */
932 if (!parent)
933 return;
934 dentry = parent;
935 } while (list_empty(&dentry->d_subdirs));
936
937 dentry = list_entry(dentry->d_subdirs.next,
938 struct dentry, d_u.d_child);
939 }
940 }
941
942 /*
943 * destroy the dentries attached to a superblock on unmounting
944 * - we don't need to use dentry->d_lock because:
945 * - the superblock is detached from all mountings and open files, so the
946 * dentry trees will not be rearranged by the VFS
947 * - s_umount is write-locked, so the memory pressure shrinker will ignore
948 * any dentries belonging to this superblock that it comes across
949 * - the filesystem itself is no longer permitted to rearrange the dentries
950 * in this superblock
951 */
952 void shrink_dcache_for_umount(struct super_block *sb)
953 {
954 struct dentry *dentry;
955
956 if (down_read_trylock(&sb->s_umount))
957 BUG();
958
959 dentry = sb->s_root;
960 sb->s_root = NULL;
961 dentry->d_count--;
962 shrink_dcache_for_umount_subtree(dentry);
963
964 while (!hlist_bl_empty(&sb->s_anon)) {
965 dentry = hlist_bl_entry(hlist_bl_first(&sb->s_anon), struct dentry, d_hash);
966 shrink_dcache_for_umount_subtree(dentry);
967 }
968 }
969
970 /*
971 * This tries to ascend one level of parenthood, but
972 * we can race with renaming, so we need to re-check
973 * the parenthood after dropping the lock and check
974 * that the sequence number still matches.
975 */
976 static struct dentry *try_to_ascend(struct dentry *old, int locked, unsigned seq)
977 {
978 struct dentry *new = old->d_parent;
979
980 rcu_read_lock();
981 spin_unlock(&old->d_lock);
982 spin_lock(&new->d_lock);
983
984 /*
985 * might go back up the wrong parent if we have had a rename
986 * or deletion
987 */
988 if (new != old->d_parent ||
989 (old->d_flags & DCACHE_DISCONNECTED) ||
990 (!locked && read_seqretry(&rename_lock, seq))) {
991 spin_unlock(&new->d_lock);
992 new = NULL;
993 }
994 rcu_read_unlock();
995 return new;
996 }
997
998
999 /*
1000 * Search for at least 1 mount point in the dentry's subdirs.
1001 * We descend to the next level whenever the d_subdirs
1002 * list is non-empty and continue searching.
1003 */
1004
1005 /**
1006 * have_submounts - check for mounts over a dentry
1007 * @parent: dentry to check.
1008 *
1009 * Return true if the parent or its subdirectories contain
1010 * a mount point
1011 */
1012 int have_submounts(struct dentry *parent)
1013 {
1014 struct dentry *this_parent;
1015 struct list_head *next;
1016 unsigned seq;
1017 int locked = 0;
1018
1019 seq = read_seqbegin(&rename_lock);
1020 again:
1021 this_parent = parent;
1022
1023 if (d_mountpoint(parent))
1024 goto positive;
1025 spin_lock(&this_parent->d_lock);
1026 repeat:
1027 next = this_parent->d_subdirs.next;
1028 resume:
1029 while (next != &this_parent->d_subdirs) {
1030 struct list_head *tmp = next;
1031 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
1032 next = tmp->next;
1033
1034 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1035 /* Have we found a mount point ? */
1036 if (d_mountpoint(dentry)) {
1037 spin_unlock(&dentry->d_lock);
1038 spin_unlock(&this_parent->d_lock);
1039 goto positive;
1040 }
1041 if (!list_empty(&dentry->d_subdirs)) {
1042 spin_unlock(&this_parent->d_lock);
1043 spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
1044 this_parent = dentry;
1045 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
1046 goto repeat;
1047 }
1048 spin_unlock(&dentry->d_lock);
1049 }
1050 /*
1051 * All done at this level ... ascend and resume the search.
1052 */
1053 if (this_parent != parent) {
1054 struct dentry *child = this_parent;
1055 this_parent = try_to_ascend(this_parent, locked, seq);
1056 if (!this_parent)
1057 goto rename_retry;
1058 next = child->d_u.d_child.next;
1059 goto resume;
1060 }
1061 spin_unlock(&this_parent->d_lock);
1062 if (!locked && read_seqretry(&rename_lock, seq))
1063 goto rename_retry;
1064 if (locked)
1065 write_sequnlock(&rename_lock);
1066 return 0; /* No mount points found in tree */
1067 positive:
1068 if (!locked && read_seqretry(&rename_lock, seq))
1069 goto rename_retry;
1070 if (locked)
1071 write_sequnlock(&rename_lock);
1072 return 1;
1073
1074 rename_retry:
1075 locked = 1;
1076 write_seqlock(&rename_lock);
1077 goto again;
1078 }
1079 EXPORT_SYMBOL(have_submounts);
1080
1081 /*
1082 * Search the dentry child list for the specified parent,
1083 * and move any unused dentries to the end of the unused
1084 * list for prune_dcache(). We descend to the next level
1085 * whenever the d_subdirs list is non-empty and continue
1086 * searching.
1087 *
1088 * It returns zero iff there are no unused children,
1089 * otherwise it returns the number of children moved to
1090 * the end of the unused list. This may not be the total
1091 * number of unused children, because select_parent can
1092 * drop the lock and return early due to latency
1093 * constraints.
1094 */
1095 static int select_parent(struct dentry * parent)
1096 {
1097 struct dentry *this_parent;
1098 struct list_head *next;
1099 unsigned seq;
1100 int found = 0;
1101 int locked = 0;
1102
1103 seq = read_seqbegin(&rename_lock);
1104 again:
1105 this_parent = parent;
1106 spin_lock(&this_parent->d_lock);
1107 repeat:
1108 next = this_parent->d_subdirs.next;
1109 resume:
1110 while (next != &this_parent->d_subdirs) {
1111 struct list_head *tmp = next;
1112 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
1113 next = tmp->next;
1114
1115 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1116
1117 /*
1118 * move only zero ref count dentries to the end
1119 * of the unused list for prune_dcache
1120 */
1121 if (!dentry->d_count) {
1122 dentry_lru_move_tail(dentry);
1123 found++;
1124 } else {
1125 dentry_lru_del(dentry);
1126 }
1127
1128 /*
1129 * We can return to the caller if we have found some (this
1130 * ensures forward progress). We'll be coming back to find
1131 * the rest.
1132 */
1133 if (found && need_resched()) {
1134 spin_unlock(&dentry->d_lock);
1135 goto out;
1136 }
1137
1138 /*
1139 * Descend a level if the d_subdirs list is non-empty.
1140 */
1141 if (!list_empty(&dentry->d_subdirs)) {
1142 spin_unlock(&this_parent->d_lock);
1143 spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
1144 this_parent = dentry;
1145 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
1146 goto repeat;
1147 }
1148
1149 spin_unlock(&dentry->d_lock);
1150 }
1151 /*
1152 * All done at this level ... ascend and resume the search.
1153 */
1154 if (this_parent != parent) {
1155 struct dentry *child = this_parent;
1156 this_parent = try_to_ascend(this_parent, locked, seq);
1157 if (!this_parent)
1158 goto rename_retry;
1159 next = child->d_u.d_child.next;
1160 goto resume;
1161 }
1162 out:
1163 spin_unlock(&this_parent->d_lock);
1164 if (!locked && read_seqretry(&rename_lock, seq))
1165 goto rename_retry;
1166 if (locked)
1167 write_sequnlock(&rename_lock);
1168 return found;
1169
1170 rename_retry:
1171 if (found)
1172 return found;
1173 locked = 1;
1174 write_seqlock(&rename_lock);
1175 goto again;
1176 }
1177
1178 /**
1179 * shrink_dcache_parent - prune dcache
1180 * @parent: parent of entries to prune
1181 *
1182 * Prune the dcache to remove unused children of the parent dentry.
1183 */
1184
1185 void shrink_dcache_parent(struct dentry * parent)
1186 {
1187 struct super_block *sb = parent->d_sb;
1188 int found;
1189
1190 while ((found = select_parent(parent)) != 0)
1191 __shrink_dcache_sb(sb, found, 0);
1192 }
1193 EXPORT_SYMBOL(shrink_dcache_parent);
1194
1195 /**
1196 * __d_alloc - allocate a dcache entry
1197 * @sb: filesystem it will belong to
1198 * @name: qstr of the name
1199 *
1200 * Allocates a dentry. It returns %NULL if there is insufficient memory
1201 * available. On a success the dentry is returned. The name passed in is
1202 * copied and the copy passed in may be reused after this call.
1203 */
1204
1205 struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
1206 {
1207 struct dentry *dentry;
1208 char *dname;
1209
1210 dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL);
1211 if (!dentry)
1212 return NULL;
1213
1214 if (name->len > DNAME_INLINE_LEN-1) {
1215 dname = kmalloc(name->len + 1, GFP_KERNEL);
1216 if (!dname) {
1217 kmem_cache_free(dentry_cache, dentry);
1218 return NULL;
1219 }
1220 } else {
1221 dname = dentry->d_iname;
1222 }
1223 dentry->d_name.name = dname;
1224
1225 dentry->d_name.len = name->len;
1226 dentry->d_name.hash = name->hash;
1227 memcpy(dname, name->name, name->len);
1228 dname[name->len] = 0;
1229
1230 dentry->d_count = 1;
1231 dentry->d_flags = 0;
1232 spin_lock_init(&dentry->d_lock);
1233 seqcount_init(&dentry->d_seq);
1234 dentry->d_inode = NULL;
1235 dentry->d_parent = dentry;
1236 dentry->d_sb = sb;
1237 dentry->d_op = NULL;
1238 dentry->d_fsdata = NULL;
1239 INIT_HLIST_BL_NODE(&dentry->d_hash);
1240 INIT_LIST_HEAD(&dentry->d_lru);
1241 INIT_LIST_HEAD(&dentry->d_subdirs);
1242 INIT_LIST_HEAD(&dentry->d_alias);
1243 INIT_LIST_HEAD(&dentry->d_u.d_child);
1244 d_set_d_op(dentry, dentry->d_sb->s_d_op);
1245
1246 this_cpu_inc(nr_dentry);
1247
1248 return dentry;
1249 }
1250
1251 /**
1252 * d_alloc - allocate a dcache entry
1253 * @parent: parent of entry to allocate
1254 * @name: qstr of the name
1255 *
1256 * Allocates a dentry. It returns %NULL if there is insufficient memory
1257 * available. On a success the dentry is returned. The name passed in is
1258 * copied and the copy passed in may be reused after this call.
1259 */
1260 struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
1261 {
1262 struct dentry *dentry = __d_alloc(parent->d_sb, name);
1263 if (!dentry)
1264 return NULL;
1265
1266 spin_lock(&parent->d_lock);
1267 /*
1268 * don't need child lock because it is not subject
1269 * to concurrency here
1270 */
1271 __dget_dlock(parent);
1272 dentry->d_parent = parent;
1273 list_add(&dentry->d_u.d_child, &parent->d_subdirs);
1274 spin_unlock(&parent->d_lock);
1275
1276 return dentry;
1277 }
1278 EXPORT_SYMBOL(d_alloc);
1279
1280 struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name)
1281 {
1282 struct dentry *dentry = __d_alloc(sb, name);
1283 if (dentry)
1284 dentry->d_flags |= DCACHE_DISCONNECTED;
1285 return dentry;
1286 }
1287 EXPORT_SYMBOL(d_alloc_pseudo);
1288
1289 struct dentry *d_alloc_name(struct dentry *parent, const char *name)
1290 {
1291 struct qstr q;
1292
1293 q.name = name;
1294 q.len = strlen(name);
1295 q.hash = full_name_hash(q.name, q.len);
1296 return d_alloc(parent, &q);
1297 }
1298 EXPORT_SYMBOL(d_alloc_name);
1299
1300 void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
1301 {
1302 WARN_ON_ONCE(dentry->d_op);
1303 WARN_ON_ONCE(dentry->d_flags & (DCACHE_OP_HASH |
1304 DCACHE_OP_COMPARE |
1305 DCACHE_OP_REVALIDATE |
1306 DCACHE_OP_DELETE ));
1307 dentry->d_op = op;
1308 if (!op)
1309 return;
1310 if (op->d_hash)
1311 dentry->d_flags |= DCACHE_OP_HASH;
1312 if (op->d_compare)
1313 dentry->d_flags |= DCACHE_OP_COMPARE;
1314 if (op->d_revalidate)
1315 dentry->d_flags |= DCACHE_OP_REVALIDATE;
1316 if (op->d_delete)
1317 dentry->d_flags |= DCACHE_OP_DELETE;
1318 if (op->d_prune)
1319 dentry->d_flags |= DCACHE_OP_PRUNE;
1320
1321 }
1322 EXPORT_SYMBOL(d_set_d_op);
1323
1324 static void __d_instantiate(struct dentry *dentry, struct inode *inode)
1325 {
1326 spin_lock(&dentry->d_lock);
1327 if (inode) {
1328 if (unlikely(IS_AUTOMOUNT(inode)))
1329 dentry->d_flags |= DCACHE_NEED_AUTOMOUNT;
1330 list_add(&dentry->d_alias, &inode->i_dentry);
1331 }
1332 dentry->d_inode = inode;
1333 dentry_rcuwalk_barrier(dentry);
1334 spin_unlock(&dentry->d_lock);
1335 fsnotify_d_instantiate(dentry, inode);
1336 }
1337
1338 /**
1339 * d_instantiate - fill in inode information for a dentry
1340 * @entry: dentry to complete
1341 * @inode: inode to attach to this dentry
1342 *
1343 * Fill in inode information in the entry.
1344 *
1345 * This turns negative dentries into productive full members
1346 * of society.
1347 *
1348 * NOTE! This assumes that the inode count has been incremented
1349 * (or otherwise set) by the caller to indicate that it is now
1350 * in use by the dcache.
1351 */
1352
1353 void d_instantiate(struct dentry *entry, struct inode * inode)
1354 {
1355 BUG_ON(!list_empty(&entry->d_alias));
1356 if (inode)
1357 spin_lock(&inode->i_lock);
1358 __d_instantiate(entry, inode);
1359 if (inode)
1360 spin_unlock(&inode->i_lock);
1361 security_d_instantiate(entry, inode);
1362 }
1363 EXPORT_SYMBOL(d_instantiate);
1364
1365 /**
1366 * d_instantiate_unique - instantiate a non-aliased dentry
1367 * @entry: dentry to instantiate
1368 * @inode: inode to attach to this dentry
1369 *
1370 * Fill in inode information in the entry. On success, it returns NULL.
1371 * If an unhashed alias of "entry" already exists, then we return the
1372 * aliased dentry instead and drop one reference to inode.
1373 *
1374 * Note that in order to avoid conflicts with rename() etc, the caller
1375 * had better be holding the parent directory semaphore.
1376 *
1377 * This also assumes that the inode count has been incremented
1378 * (or otherwise set) by the caller to indicate that it is now
1379 * in use by the dcache.
1380 */
1381 static struct dentry *__d_instantiate_unique(struct dentry *entry,
1382 struct inode *inode)
1383 {
1384 struct dentry *alias;
1385 int len = entry->d_name.len;
1386 const char *name = entry->d_name.name;
1387 unsigned int hash = entry->d_name.hash;
1388
1389 if (!inode) {
1390 __d_instantiate(entry, NULL);
1391 return NULL;
1392 }
1393
1394 list_for_each_entry(alias, &inode->i_dentry, d_alias) {
1395 struct qstr *qstr = &alias->d_name;
1396
1397 /*
1398 * Don't need alias->d_lock here, because aliases with
1399 * d_parent == entry->d_parent are not subject to name or
1400 * parent changes, because the parent inode i_mutex is held.
1401 */
1402 if (qstr->hash != hash)
1403 continue;
1404 if (alias->d_parent != entry->d_parent)
1405 continue;
1406 if (dentry_cmp(qstr->name, qstr->len, name, len))
1407 continue;
1408 __dget(alias);
1409 return alias;
1410 }
1411
1412 __d_instantiate(entry, inode);
1413 return NULL;
1414 }
1415
1416 struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode)
1417 {
1418 struct dentry *result;
1419
1420 BUG_ON(!list_empty(&entry->d_alias));
1421
1422 if (inode)
1423 spin_lock(&inode->i_lock);
1424 result = __d_instantiate_unique(entry, inode);
1425 if (inode)
1426 spin_unlock(&inode->i_lock);
1427
1428 if (!result) {
1429 security_d_instantiate(entry, inode);
1430 return NULL;
1431 }
1432
1433 BUG_ON(!d_unhashed(result));
1434 iput(inode);
1435 return result;
1436 }
1437
1438 EXPORT_SYMBOL(d_instantiate_unique);
1439
1440 /**
1441 * d_alloc_root - allocate root dentry
1442 * @root_inode: inode to allocate the root for
1443 *
1444 * Allocate a root ("/") dentry for the inode given. The inode is
1445 * instantiated and returned. %NULL is returned if there is insufficient
1446 * memory or the inode passed is %NULL.
1447 */
1448
1449 struct dentry * d_alloc_root(struct inode * root_inode)
1450 {
1451 struct dentry *res = NULL;
1452
1453 if (root_inode) {
1454 static const struct qstr name = { .name = "/", .len = 1 };
1455
1456 res = __d_alloc(root_inode->i_sb, &name);
1457 if (res)
1458 d_instantiate(res, root_inode);
1459 }
1460 return res;
1461 }
1462 EXPORT_SYMBOL(d_alloc_root);
1463
1464 static struct dentry * __d_find_any_alias(struct inode *inode)
1465 {
1466 struct dentry *alias;
1467
1468 if (list_empty(&inode->i_dentry))
1469 return NULL;
1470 alias = list_first_entry(&inode->i_dentry, struct dentry, d_alias);
1471 __dget(alias);
1472 return alias;
1473 }
1474
1475 static struct dentry * d_find_any_alias(struct inode *inode)
1476 {
1477 struct dentry *de;
1478
1479 spin_lock(&inode->i_lock);
1480 de = __d_find_any_alias(inode);
1481 spin_unlock(&inode->i_lock);
1482 return de;
1483 }
1484
1485
1486 /**
1487 * d_obtain_alias - find or allocate a dentry for a given inode
1488 * @inode: inode to allocate the dentry for
1489 *
1490 * Obtain a dentry for an inode resulting from NFS filehandle conversion or
1491 * similar open by handle operations. The returned dentry may be anonymous,
1492 * or may have a full name (if the inode was already in the cache).
1493 *
1494 * When called on a directory inode, we must ensure that the inode only ever
1495 * has one dentry. If a dentry is found, that is returned instead of
1496 * allocating a new one.
1497 *
1498 * On successful return, the reference to the inode has been transferred
1499 * to the dentry. In case of an error the reference on the inode is released.
1500 * To make it easier to use in export operations a %NULL or IS_ERR inode may
1501 * be passed in and will be the error will be propagate to the return value,
1502 * with a %NULL @inode replaced by ERR_PTR(-ESTALE).
1503 */
1504 struct dentry *d_obtain_alias(struct inode *inode)
1505 {
1506 static const struct qstr anonstring = { .name = "" };
1507 struct dentry *tmp;
1508 struct dentry *res;
1509
1510 if (!inode)
1511 return ERR_PTR(-ESTALE);
1512 if (IS_ERR(inode))
1513 return ERR_CAST(inode);
1514
1515 res = d_find_any_alias(inode);
1516 if (res)
1517 goto out_iput;
1518
1519 tmp = __d_alloc(inode->i_sb, &anonstring);
1520 if (!tmp) {
1521 res = ERR_PTR(-ENOMEM);
1522 goto out_iput;
1523 }
1524
1525 spin_lock(&inode->i_lock);
1526 res = __d_find_any_alias(inode);
1527 if (res) {
1528 spin_unlock(&inode->i_lock);
1529 dput(tmp);
1530 goto out_iput;
1531 }
1532
1533 /* attach a disconnected dentry */
1534 spin_lock(&tmp->d_lock);
1535 tmp->d_inode = inode;
1536 tmp->d_flags |= DCACHE_DISCONNECTED;
1537 list_add(&tmp->d_alias, &inode->i_dentry);
1538 hlist_bl_lock(&tmp->d_sb->s_anon);
1539 hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon);
1540 hlist_bl_unlock(&tmp->d_sb->s_anon);
1541 spin_unlock(&tmp->d_lock);
1542 spin_unlock(&inode->i_lock);
1543 security_d_instantiate(tmp, inode);
1544
1545 return tmp;
1546
1547 out_iput:
1548 if (res && !IS_ERR(res))
1549 security_d_instantiate(res, inode);
1550 iput(inode);
1551 return res;
1552 }
1553 EXPORT_SYMBOL(d_obtain_alias);
1554
1555 /**
1556 * d_splice_alias - splice a disconnected dentry into the tree if one exists
1557 * @inode: the inode which may have a disconnected dentry
1558 * @dentry: a negative dentry which we want to point to the inode.
1559 *
1560 * If inode is a directory and has a 'disconnected' dentry (i.e. IS_ROOT and
1561 * DCACHE_DISCONNECTED), then d_move that in place of the given dentry
1562 * and return it, else simply d_add the inode to the dentry and return NULL.
1563 *
1564 * This is needed in the lookup routine of any filesystem that is exportable
1565 * (via knfsd) so that we can build dcache paths to directories effectively.
1566 *
1567 * If a dentry was found and moved, then it is returned. Otherwise NULL
1568 * is returned. This matches the expected return value of ->lookup.
1569 *
1570 */
1571 struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
1572 {
1573 struct dentry *new = NULL;
1574
1575 if (IS_ERR(inode))
1576 return ERR_CAST(inode);
1577
1578 if (inode && S_ISDIR(inode->i_mode)) {
1579 spin_lock(&inode->i_lock);
1580 new = __d_find_alias(inode, 1);
1581 if (new) {
1582 BUG_ON(!(new->d_flags & DCACHE_DISCONNECTED));
1583 spin_unlock(&inode->i_lock);
1584 security_d_instantiate(new, inode);
1585 d_move(new, dentry);
1586 iput(inode);
1587 } else {
1588 /* already taking inode->i_lock, so d_add() by hand */
1589 __d_instantiate(dentry, inode);
1590 spin_unlock(&inode->i_lock);
1591 security_d_instantiate(dentry, inode);
1592 d_rehash(dentry);
1593 }
1594 } else
1595 d_add(dentry, inode);
1596 return new;
1597 }
1598 EXPORT_SYMBOL(d_splice_alias);
1599
1600 /**
1601 * d_add_ci - lookup or allocate new dentry with case-exact name
1602 * @inode: the inode case-insensitive lookup has found
1603 * @dentry: the negative dentry that was passed to the parent's lookup func
1604 * @name: the case-exact name to be associated with the returned dentry
1605 *
1606 * This is to avoid filling the dcache with case-insensitive names to the
1607 * same inode, only the actual correct case is stored in the dcache for
1608 * case-insensitive filesystems.
1609 *
1610 * For a case-insensitive lookup match and if the the case-exact dentry
1611 * already exists in in the dcache, use it and return it.
1612 *
1613 * If no entry exists with the exact case name, allocate new dentry with
1614 * the exact case, and return the spliced entry.
1615 */
1616 struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
1617 struct qstr *name)
1618 {
1619 int error;
1620 struct dentry *found;
1621 struct dentry *new;
1622
1623 /*
1624 * First check if a dentry matching the name already exists,
1625 * if not go ahead and create it now.
1626 */
1627 found = d_hash_and_lookup(dentry->d_parent, name);
1628 if (!found) {
1629 new = d_alloc(dentry->d_parent, name);
1630 if (!new) {
1631 error = -ENOMEM;
1632 goto err_out;
1633 }
1634
1635 found = d_splice_alias(inode, new);
1636 if (found) {
1637 dput(new);
1638 return found;
1639 }
1640 return new;
1641 }
1642
1643 /*
1644 * If a matching dentry exists, and it's not negative use it.
1645 *
1646 * Decrement the reference count to balance the iget() done
1647 * earlier on.
1648 */
1649 if (found->d_inode) {
1650 if (unlikely(found->d_inode != inode)) {
1651 /* This can't happen because bad inodes are unhashed. */
1652 BUG_ON(!is_bad_inode(inode));
1653 BUG_ON(!is_bad_inode(found->d_inode));
1654 }
1655 iput(inode);
1656 return found;
1657 }
1658
1659 /*
1660 * We are going to instantiate this dentry, unhash it and clear the
1661 * lookup flag so we can do that.
1662 */
1663 if (unlikely(d_need_lookup(found)))
1664 d_clear_need_lookup(found);
1665
1666 /*
1667 * Negative dentry: instantiate it unless the inode is a directory and
1668 * already has a dentry.
1669 */
1670 new = d_splice_alias(inode, found);
1671 if (new) {
1672 dput(found);
1673 found = new;
1674 }
1675 return found;
1676
1677 err_out:
1678 iput(inode);
1679 return ERR_PTR(error);
1680 }
1681 EXPORT_SYMBOL(d_add_ci);
1682
1683 /**
1684 * __d_lookup_rcu - search for a dentry (racy, store-free)
1685 * @parent: parent dentry
1686 * @name: qstr of name we wish to find
1687 * @seq: returns d_seq value at the point where the dentry was found
1688 * @inode: returns dentry->d_inode when the inode was found valid.
1689 * Returns: dentry, or NULL
1690 *
1691 * __d_lookup_rcu is the dcache lookup function for rcu-walk name
1692 * resolution (store-free path walking) design described in
1693 * Documentation/filesystems/path-lookup.txt.
1694 *
1695 * This is not to be used outside core vfs.
1696 *
1697 * __d_lookup_rcu must only be used in rcu-walk mode, ie. with vfsmount lock
1698 * held, and rcu_read_lock held. The returned dentry must not be stored into
1699 * without taking d_lock and checking d_seq sequence count against @seq
1700 * returned here.
1701 *
1702 * A refcount may be taken on the found dentry with the __d_rcu_to_refcount
1703 * function.
1704 *
1705 * Alternatively, __d_lookup_rcu may be called again to look up the child of
1706 * the returned dentry, so long as its parent's seqlock is checked after the
1707 * child is looked up. Thus, an interlocking stepping of sequence lock checks
1708 * is formed, giving integrity down the path walk.
1709 */
1710 struct dentry *__d_lookup_rcu(struct dentry *parent, struct qstr *name,
1711 unsigned *seq, struct inode **inode)
1712 {
1713 unsigned int len = name->len;
1714 unsigned int hash = name->hash;
1715 const unsigned char *str = name->name;
1716 struct hlist_bl_head *b = d_hash(parent, hash);
1717 struct hlist_bl_node *node;
1718 struct dentry *dentry;
1719
1720 /*
1721 * Note: There is significant duplication with __d_lookup_rcu which is
1722 * required to prevent single threaded performance regressions
1723 * especially on architectures where smp_rmb (in seqcounts) are costly.
1724 * Keep the two functions in sync.
1725 */
1726
1727 /*
1728 * The hash list is protected using RCU.
1729 *
1730 * Carefully use d_seq when comparing a candidate dentry, to avoid
1731 * races with d_move().
1732 *
1733 * It is possible that concurrent renames can mess up our list
1734 * walk here and result in missing our dentry, resulting in the
1735 * false-negative result. d_lookup() protects against concurrent
1736 * renames using rename_lock seqlock.
1737 *
1738 * See Documentation/filesystems/path-lookup.txt for more details.
1739 */
1740 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
1741 struct inode *i;
1742 const char *tname;
1743 int tlen;
1744
1745 if (dentry->d_name.hash != hash)
1746 continue;
1747
1748 seqretry:
1749 *seq = read_seqcount_begin(&dentry->d_seq);
1750 if (dentry->d_parent != parent)
1751 continue;
1752 if (d_unhashed(dentry))
1753 continue;
1754 tlen = dentry->d_name.len;
1755 tname = dentry->d_name.name;
1756 i = dentry->d_inode;
1757 prefetch(tname);
1758 /*
1759 * This seqcount check is required to ensure name and
1760 * len are loaded atomically, so as not to walk off the
1761 * edge of memory when walking. If we could load this
1762 * atomically some other way, we could drop this check.
1763 */
1764 if (read_seqcount_retry(&dentry->d_seq, *seq))
1765 goto seqretry;
1766 if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) {
1767 if (parent->d_op->d_compare(parent, *inode,
1768 dentry, i,
1769 tlen, tname, name))
1770 continue;
1771 } else {
1772 if (dentry_cmp(tname, tlen, str, len))
1773 continue;
1774 }
1775 /*
1776 * No extra seqcount check is required after the name
1777 * compare. The caller must perform a seqcount check in
1778 * order to do anything useful with the returned dentry
1779 * anyway.
1780 */
1781 *inode = i;
1782 return dentry;
1783 }
1784 return NULL;
1785 }
1786
1787 /**
1788 * d_lookup - search for a dentry
1789 * @parent: parent dentry
1790 * @name: qstr of name we wish to find
1791 * Returns: dentry, or NULL
1792 *
1793 * d_lookup searches the children of the parent dentry for the name in
1794 * question. If the dentry is found its reference count is incremented and the
1795 * dentry is returned. The caller must use dput to free the entry when it has
1796 * finished using it. %NULL is returned if the dentry does not exist.
1797 */
1798 struct dentry *d_lookup(struct dentry *parent, struct qstr *name)
1799 {
1800 struct dentry *dentry;
1801 unsigned seq;
1802
1803 do {
1804 seq = read_seqbegin(&rename_lock);
1805 dentry = __d_lookup(parent, name);
1806 if (dentry)
1807 break;
1808 } while (read_seqretry(&rename_lock, seq));
1809 return dentry;
1810 }
1811 EXPORT_SYMBOL(d_lookup);
1812
1813 /**
1814 * __d_lookup - search for a dentry (racy)
1815 * @parent: parent dentry
1816 * @name: qstr of name we wish to find
1817 * Returns: dentry, or NULL
1818 *
1819 * __d_lookup is like d_lookup, however it may (rarely) return a
1820 * false-negative result due to unrelated rename activity.
1821 *
1822 * __d_lookup is slightly faster by avoiding rename_lock read seqlock,
1823 * however it must be used carefully, eg. with a following d_lookup in
1824 * the case of failure.
1825 *
1826 * __d_lookup callers must be commented.
1827 */
1828 struct dentry *__d_lookup(struct dentry *parent, struct qstr *name)
1829 {
1830 unsigned int len = name->len;
1831 unsigned int hash = name->hash;
1832 const unsigned char *str = name->name;
1833 struct hlist_bl_head *b = d_hash(parent, hash);
1834 struct hlist_bl_node *node;
1835 struct dentry *found = NULL;
1836 struct dentry *dentry;
1837
1838 /*
1839 * Note: There is significant duplication with __d_lookup_rcu which is
1840 * required to prevent single threaded performance regressions
1841 * especially on architectures where smp_rmb (in seqcounts) are costly.
1842 * Keep the two functions in sync.
1843 */
1844
1845 /*
1846 * The hash list is protected using RCU.
1847 *
1848 * Take d_lock when comparing a candidate dentry, to avoid races
1849 * with d_move().
1850 *
1851 * It is possible that concurrent renames can mess up our list
1852 * walk here and result in missing our dentry, resulting in the
1853 * false-negative result. d_lookup() protects against concurrent
1854 * renames using rename_lock seqlock.
1855 *
1856 * See Documentation/filesystems/path-lookup.txt for more details.
1857 */
1858 rcu_read_lock();
1859
1860 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
1861 const char *tname;
1862 int tlen;
1863
1864 if (dentry->d_name.hash != hash)
1865 continue;
1866
1867 spin_lock(&dentry->d_lock);
1868 if (dentry->d_parent != parent)
1869 goto next;
1870 if (d_unhashed(dentry))
1871 goto next;
1872
1873 /*
1874 * It is safe to compare names since d_move() cannot
1875 * change the qstr (protected by d_lock).
1876 */
1877 tlen = dentry->d_name.len;
1878 tname = dentry->d_name.name;
1879 if (parent->d_flags & DCACHE_OP_COMPARE) {
1880 if (parent->d_op->d_compare(parent, parent->d_inode,
1881 dentry, dentry->d_inode,
1882 tlen, tname, name))
1883 goto next;
1884 } else {
1885 if (dentry_cmp(tname, tlen, str, len))
1886 goto next;
1887 }
1888
1889 dentry->d_count++;
1890 found = dentry;
1891 spin_unlock(&dentry->d_lock);
1892 break;
1893 next:
1894 spin_unlock(&dentry->d_lock);
1895 }
1896 rcu_read_unlock();
1897
1898 return found;
1899 }
1900
1901 /**
1902 * d_hash_and_lookup - hash the qstr then search for a dentry
1903 * @dir: Directory to search in
1904 * @name: qstr of name we wish to find
1905 *
1906 * On hash failure or on lookup failure NULL is returned.
1907 */
1908 struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name)
1909 {
1910 struct dentry *dentry = NULL;
1911
1912 /*
1913 * Check for a fs-specific hash function. Note that we must
1914 * calculate the standard hash first, as the d_op->d_hash()
1915 * routine may choose to leave the hash value unchanged.
1916 */
1917 name->hash = full_name_hash(name->name, name->len);
1918 if (dir->d_flags & DCACHE_OP_HASH) {
1919 if (dir->d_op->d_hash(dir, dir->d_inode, name) < 0)
1920 goto out;
1921 }
1922 dentry = d_lookup(dir, name);
1923 out:
1924 return dentry;
1925 }
1926
1927 /**
1928 * d_validate - verify dentry provided from insecure source (deprecated)
1929 * @dentry: The dentry alleged to be valid child of @dparent
1930 * @dparent: The parent dentry (known to be valid)
1931 *
1932 * An insecure source has sent us a dentry, here we verify it and dget() it.
1933 * This is used by ncpfs in its readdir implementation.
1934 * Zero is returned in the dentry is invalid.
1935 *
1936 * This function is slow for big directories, and deprecated, do not use it.
1937 */
1938 int d_validate(struct dentry *dentry, struct dentry *dparent)
1939 {
1940 struct dentry *child;
1941
1942 spin_lock(&dparent->d_lock);
1943 list_for_each_entry(child, &dparent->d_subdirs, d_u.d_child) {
1944 if (dentry == child) {
1945 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1946 __dget_dlock(dentry);
1947 spin_unlock(&dentry->d_lock);
1948 spin_unlock(&dparent->d_lock);
1949 return 1;
1950 }
1951 }
1952 spin_unlock(&dparent->d_lock);
1953
1954 return 0;
1955 }
1956 EXPORT_SYMBOL(d_validate);
1957
1958 /*
1959 * When a file is deleted, we have two options:
1960 * - turn this dentry into a negative dentry
1961 * - unhash this dentry and free it.
1962 *
1963 * Usually, we want to just turn this into
1964 * a negative dentry, but if anybody else is
1965 * currently using the dentry or the inode
1966 * we can't do that and we fall back on removing
1967 * it from the hash queues and waiting for
1968 * it to be deleted later when it has no users
1969 */
1970
1971 /**
1972 * d_delete - delete a dentry
1973 * @dentry: The dentry to delete
1974 *
1975 * Turn the dentry into a negative dentry if possible, otherwise
1976 * remove it from the hash queues so it can be deleted later
1977 */
1978
1979 void d_delete(struct dentry * dentry)
1980 {
1981 struct inode *inode;
1982 int isdir = 0;
1983 /*
1984 * Are we the only user?
1985 */
1986 again:
1987 spin_lock(&dentry->d_lock);
1988 inode = dentry->d_inode;
1989 isdir = S_ISDIR(inode->i_mode);
1990 if (dentry->d_count == 1) {
1991 if (inode && !spin_trylock(&inode->i_lock)) {
1992 spin_unlock(&dentry->d_lock);
1993 cpu_relax();
1994 goto again;
1995 }
1996 dentry->d_flags &= ~DCACHE_CANT_MOUNT;
1997 dentry_unlink_inode(dentry);
1998 fsnotify_nameremove(dentry, isdir);
1999 return;
2000 }
2001
2002 if (!d_unhashed(dentry))
2003 __d_drop(dentry);
2004
2005 spin_unlock(&dentry->d_lock);
2006
2007 fsnotify_nameremove(dentry, isdir);
2008 }
2009 EXPORT_SYMBOL(d_delete);
2010
2011 static void __d_rehash(struct dentry * entry, struct hlist_bl_head *b)
2012 {
2013 BUG_ON(!d_unhashed(entry));
2014 hlist_bl_lock(b);
2015 entry->d_flags |= DCACHE_RCUACCESS;
2016 hlist_bl_add_head_rcu(&entry->d_hash, b);
2017 hlist_bl_unlock(b);
2018 }
2019
2020 static void _d_rehash(struct dentry * entry)
2021 {
2022 __d_rehash(entry, d_hash(entry->d_parent, entry->d_name.hash));
2023 }
2024
2025 /**
2026 * d_rehash - add an entry back to the hash
2027 * @entry: dentry to add to the hash
2028 *
2029 * Adds a dentry to the hash according to its name.
2030 */
2031
2032 void d_rehash(struct dentry * entry)
2033 {
2034 spin_lock(&entry->d_lock);
2035 _d_rehash(entry);
2036 spin_unlock(&entry->d_lock);
2037 }
2038 EXPORT_SYMBOL(d_rehash);
2039
2040 /**
2041 * dentry_update_name_case - update case insensitive dentry with a new name
2042 * @dentry: dentry to be updated
2043 * @name: new name
2044 *
2045 * Update a case insensitive dentry with new case of name.
2046 *
2047 * dentry must have been returned by d_lookup with name @name. Old and new
2048 * name lengths must match (ie. no d_compare which allows mismatched name
2049 * lengths).
2050 *
2051 * Parent inode i_mutex must be held over d_lookup and into this call (to
2052 * keep renames and concurrent inserts, and readdir(2) away).
2053 */
2054 void dentry_update_name_case(struct dentry *dentry, struct qstr *name)
2055 {
2056 BUG_ON(!mutex_is_locked(&dentry->d_parent->d_inode->i_mutex));
2057 BUG_ON(dentry->d_name.len != name->len); /* d_lookup gives this */
2058
2059 spin_lock(&dentry->d_lock);
2060 write_seqcount_begin(&dentry->d_seq);
2061 memcpy((unsigned char *)dentry->d_name.name, name->name, name->len);
2062 write_seqcount_end(&dentry->d_seq);
2063 spin_unlock(&dentry->d_lock);
2064 }
2065 EXPORT_SYMBOL(dentry_update_name_case);
2066
2067 static void switch_names(struct dentry *dentry, struct dentry *target)
2068 {
2069 if (dname_external(target)) {
2070 if (dname_external(dentry)) {
2071 /*
2072 * Both external: swap the pointers
2073 */
2074 swap(target->d_name.name, dentry->d_name.name);
2075 } else {
2076 /*
2077 * dentry:internal, target:external. Steal target's
2078 * storage and make target internal.
2079 */
2080 memcpy(target->d_iname, dentry->d_name.name,
2081 dentry->d_name.len + 1);
2082 dentry->d_name.name = target->d_name.name;
2083 target->d_name.name = target->d_iname;
2084 }
2085 } else {
2086 if (dname_external(dentry)) {
2087 /*
2088 * dentry:external, target:internal. Give dentry's
2089 * storage to target and make dentry internal
2090 */
2091 memcpy(dentry->d_iname, target->d_name.name,
2092 target->d_name.len + 1);
2093 target->d_name.name = dentry->d_name.name;
2094 dentry->d_name.name = dentry->d_iname;
2095 } else {
2096 /*
2097 * Both are internal. Just copy target to dentry
2098 */
2099 memcpy(dentry->d_iname, target->d_name.name,
2100 target->d_name.len + 1);
2101 dentry->d_name.len = target->d_name.len;
2102 return;
2103 }
2104 }
2105 swap(dentry->d_name.len, target->d_name.len);
2106 }
2107
2108 static void dentry_lock_for_move(struct dentry *dentry, struct dentry *target)
2109 {
2110 /*
2111 * XXXX: do we really need to take target->d_lock?
2112 */
2113 if (IS_ROOT(dentry) || dentry->d_parent == target->d_parent)
2114 spin_lock(&target->d_parent->d_lock);
2115 else {
2116 if (d_ancestor(dentry->d_parent, target->d_parent)) {
2117 spin_lock(&dentry->d_parent->d_lock);
2118 spin_lock_nested(&target->d_parent->d_lock,
2119 DENTRY_D_LOCK_NESTED);
2120 } else {
2121 spin_lock(&target->d_parent->d_lock);
2122 spin_lock_nested(&dentry->d_parent->d_lock,
2123 DENTRY_D_LOCK_NESTED);
2124 }
2125 }
2126 if (target < dentry) {
2127 spin_lock_nested(&target->d_lock, 2);
2128 spin_lock_nested(&dentry->d_lock, 3);
2129 } else {
2130 spin_lock_nested(&dentry->d_lock, 2);
2131 spin_lock_nested(&target->d_lock, 3);
2132 }
2133 }
2134
2135 static void dentry_unlock_parents_for_move(struct dentry *dentry,
2136 struct dentry *target)
2137 {
2138 if (target->d_parent != dentry->d_parent)
2139 spin_unlock(&dentry->d_parent->d_lock);
2140 if (target->d_parent != target)
2141 spin_unlock(&target->d_parent->d_lock);
2142 }
2143
2144 /*
2145 * When switching names, the actual string doesn't strictly have to
2146 * be preserved in the target - because we're dropping the target
2147 * anyway. As such, we can just do a simple memcpy() to copy over
2148 * the new name before we switch.
2149 *
2150 * Note that we have to be a lot more careful about getting the hash
2151 * switched - we have to switch the hash value properly even if it
2152 * then no longer matches the actual (corrupted) string of the target.
2153 * The hash value has to match the hash queue that the dentry is on..
2154 */
2155 /*
2156 * __d_move - move a dentry
2157 * @dentry: entry to move
2158 * @target: new dentry
2159 *
2160 * Update the dcache to reflect the move of a file name. Negative
2161 * dcache entries should not be moved in this way. Caller must hold
2162 * rename_lock, the i_mutex of the source and target directories,
2163 * and the sb->s_vfs_rename_mutex if they differ. See lock_rename().
2164 */
2165 static void __d_move(struct dentry * dentry, struct dentry * target)
2166 {
2167 if (!dentry->d_inode)
2168 printk(KERN_WARNING "VFS: moving negative dcache entry\n");
2169
2170 BUG_ON(d_ancestor(dentry, target));
2171 BUG_ON(d_ancestor(target, dentry));
2172
2173 dentry_lock_for_move(dentry, target);
2174
2175 write_seqcount_begin(&dentry->d_seq);
2176 write_seqcount_begin(&target->d_seq);
2177
2178 /* __d_drop does write_seqcount_barrier, but they're OK to nest. */
2179
2180 /*
2181 * Move the dentry to the target hash queue. Don't bother checking
2182 * for the same hash queue because of how unlikely it is.
2183 */
2184 __d_drop(dentry);
2185 __d_rehash(dentry, d_hash(target->d_parent, target->d_name.hash));
2186
2187 /* Unhash the target: dput() will then get rid of it */
2188 __d_drop(target);
2189
2190 list_del(&dentry->d_u.d_child);
2191 list_del(&target->d_u.d_child);
2192
2193 /* Switch the names.. */
2194 switch_names(dentry, target);
2195 swap(dentry->d_name.hash, target->d_name.hash);
2196
2197 /* ... and switch the parents */
2198 if (IS_ROOT(dentry)) {
2199 dentry->d_parent = target->d_parent;
2200 target->d_parent = target;
2201 INIT_LIST_HEAD(&target->d_u.d_child);
2202 } else {
2203 swap(dentry->d_parent, target->d_parent);
2204
2205 /* And add them back to the (new) parent lists */
2206 list_add(&target->d_u.d_child, &target->d_parent->d_subdirs);
2207 }
2208
2209 list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs);
2210
2211 write_seqcount_end(&target->d_seq);
2212 write_seqcount_end(&dentry->d_seq);
2213
2214 dentry_unlock_parents_for_move(dentry, target);
2215 spin_unlock(&target->d_lock);
2216 fsnotify_d_move(dentry);
2217 spin_unlock(&dentry->d_lock);
2218 }
2219
2220 /*
2221 * d_move - move a dentry
2222 * @dentry: entry to move
2223 * @target: new dentry
2224 *
2225 * Update the dcache to reflect the move of a file name. Negative
2226 * dcache entries should not be moved in this way. See the locking
2227 * requirements for __d_move.
2228 */
2229 void d_move(struct dentry *dentry, struct dentry *target)
2230 {
2231 write_seqlock(&rename_lock);
2232 __d_move(dentry, target);
2233 write_sequnlock(&rename_lock);
2234 }
2235 EXPORT_SYMBOL(d_move);
2236
2237 /**
2238 * d_ancestor - search for an ancestor
2239 * @p1: ancestor dentry
2240 * @p2: child dentry
2241 *
2242 * Returns the ancestor dentry of p2 which is a child of p1, if p1 is
2243 * an ancestor of p2, else NULL.
2244 */
2245 struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2)
2246 {
2247 struct dentry *p;
2248
2249 for (p = p2; !IS_ROOT(p); p = p->d_parent) {
2250 if (p->d_parent == p1)
2251 return p;
2252 }
2253 return NULL;
2254 }
2255
2256 /*
2257 * This helper attempts to cope with remotely renamed directories
2258 *
2259 * It assumes that the caller is already holding
2260 * dentry->d_parent->d_inode->i_mutex, inode->i_lock and rename_lock
2261 *
2262 * Note: If ever the locking in lock_rename() changes, then please
2263 * remember to update this too...
2264 */
2265 static struct dentry *__d_unalias(struct inode *inode,
2266 struct dentry *dentry, struct dentry *alias)
2267 {
2268 struct mutex *m1 = NULL, *m2 = NULL;
2269 struct dentry *ret;
2270
2271 /* If alias and dentry share a parent, then no extra locks required */
2272 if (alias->d_parent == dentry->d_parent)
2273 goto out_unalias;
2274
2275 /* See lock_rename() */
2276 ret = ERR_PTR(-EBUSY);
2277 if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex))
2278 goto out_err;
2279 m1 = &dentry->d_sb->s_vfs_rename_mutex;
2280 if (!mutex_trylock(&alias->d_parent->d_inode->i_mutex))
2281 goto out_err;
2282 m2 = &alias->d_parent->d_inode->i_mutex;
2283 out_unalias:
2284 __d_move(alias, dentry);
2285 ret = alias;
2286 out_err:
2287 spin_unlock(&inode->i_lock);
2288 if (m2)
2289 mutex_unlock(m2);
2290 if (m1)
2291 mutex_unlock(m1);
2292 return ret;
2293 }
2294
2295 /*
2296 * Prepare an anonymous dentry for life in the superblock's dentry tree as a
2297 * named dentry in place of the dentry to be replaced.
2298 * returns with anon->d_lock held!
2299 */
2300 static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon)
2301 {
2302 struct dentry *dparent, *aparent;
2303
2304 dentry_lock_for_move(anon, dentry);
2305
2306 write_seqcount_begin(&dentry->d_seq);
2307 write_seqcount_begin(&anon->d_seq);
2308
2309 dparent = dentry->d_parent;
2310 aparent = anon->d_parent;
2311
2312 switch_names(dentry, anon);
2313 swap(dentry->d_name.hash, anon->d_name.hash);
2314
2315 dentry->d_parent = (aparent == anon) ? dentry : aparent;
2316 list_del(&dentry->d_u.d_child);
2317 if (!IS_ROOT(dentry))
2318 list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs);
2319 else
2320 INIT_LIST_HEAD(&dentry->d_u.d_child);
2321
2322 anon->d_parent = (dparent == dentry) ? anon : dparent;
2323 list_del(&anon->d_u.d_child);
2324 if (!IS_ROOT(anon))
2325 list_add(&anon->d_u.d_child, &anon->d_parent->d_subdirs);
2326 else
2327 INIT_LIST_HEAD(&anon->d_u.d_child);
2328
2329 write_seqcount_end(&dentry->d_seq);
2330 write_seqcount_end(&anon->d_seq);
2331
2332 dentry_unlock_parents_for_move(anon, dentry);
2333 spin_unlock(&dentry->d_lock);
2334
2335 /* anon->d_lock still locked, returns locked */
2336 anon->d_flags &= ~DCACHE_DISCONNECTED;
2337 }
2338
2339 /**
2340 * d_materialise_unique - introduce an inode into the tree
2341 * @dentry: candidate dentry
2342 * @inode: inode to bind to the dentry, to which aliases may be attached
2343 *
2344 * Introduces an dentry into the tree, substituting an extant disconnected
2345 * root directory alias in its place if there is one. Caller must hold the
2346 * i_mutex of the parent directory.
2347 */
2348 struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode)
2349 {
2350 struct dentry *actual;
2351
2352 BUG_ON(!d_unhashed(dentry));
2353
2354 if (!inode) {
2355 actual = dentry;
2356 __d_instantiate(dentry, NULL);
2357 d_rehash(actual);
2358 goto out_nolock;
2359 }
2360
2361 spin_lock(&inode->i_lock);
2362
2363 if (S_ISDIR(inode->i_mode)) {
2364 struct dentry *alias;
2365
2366 /* Does an aliased dentry already exist? */
2367 alias = __d_find_alias(inode, 0);
2368 if (alias) {
2369 actual = alias;
2370 write_seqlock(&rename_lock);
2371
2372 if (d_ancestor(alias, dentry)) {
2373 /* Check for loops */
2374 actual = ERR_PTR(-ELOOP);
2375 } else if (IS_ROOT(alias)) {
2376 /* Is this an anonymous mountpoint that we
2377 * could splice into our tree? */
2378 __d_materialise_dentry(dentry, alias);
2379 write_sequnlock(&rename_lock);
2380 __d_drop(alias);
2381 goto found;
2382 } else {
2383 /* Nope, but we must(!) avoid directory
2384 * aliasing */
2385 actual = __d_unalias(inode, dentry, alias);
2386 }
2387 write_sequnlock(&rename_lock);
2388 if (IS_ERR(actual)) {
2389 if (PTR_ERR(actual) == -ELOOP)
2390 pr_warn_ratelimited(
2391 "VFS: Lookup of '%s' in %s %s"
2392 " would have caused loop\n",
2393 dentry->d_name.name,
2394 inode->i_sb->s_type->name,
2395 inode->i_sb->s_id);
2396 dput(alias);
2397 }
2398 goto out_nolock;
2399 }
2400 }
2401
2402 /* Add a unique reference */
2403 actual = __d_instantiate_unique(dentry, inode);
2404 if (!actual)
2405 actual = dentry;
2406 else
2407 BUG_ON(!d_unhashed(actual));
2408
2409 spin_lock(&actual->d_lock);
2410 found:
2411 _d_rehash(actual);
2412 spin_unlock(&actual->d_lock);
2413 spin_unlock(&inode->i_lock);
2414 out_nolock:
2415 if (actual == dentry) {
2416 security_d_instantiate(dentry, inode);
2417 return NULL;
2418 }
2419
2420 iput(inode);
2421 return actual;
2422 }
2423 EXPORT_SYMBOL_GPL(d_materialise_unique);
2424
2425 static int prepend(char **buffer, int *buflen, const char *str, int namelen)
2426 {
2427 *buflen -= namelen;
2428 if (*buflen < 0)
2429 return -ENAMETOOLONG;
2430 *buffer -= namelen;
2431 memcpy(*buffer, str, namelen);
2432 return 0;
2433 }
2434
2435 static int prepend_name(char **buffer, int *buflen, struct qstr *name)
2436 {
2437 return prepend(buffer, buflen, name->name, name->len);
2438 }
2439
2440 /**
2441 * prepend_path - Prepend path string to a buffer
2442 * @path: the dentry/vfsmount to report
2443 * @root: root vfsmnt/dentry
2444 * @buffer: pointer to the end of the buffer
2445 * @buflen: pointer to buffer length
2446 *
2447 * Caller holds the rename_lock.
2448 */
2449 static int prepend_path(const struct path *path,
2450 const struct path *root,
2451 char **buffer, int *buflen)
2452 {
2453 struct dentry *dentry = path->dentry;
2454 struct vfsmount *vfsmnt = path->mnt;
2455 struct mount *mnt = real_mount(vfsmnt);
2456 bool slash = false;
2457 int error = 0;
2458
2459 br_read_lock(vfsmount_lock);
2460 while (dentry != root->dentry || vfsmnt != root->mnt) {
2461 struct dentry * parent;
2462
2463 if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
2464 /* Global root? */
2465 if (!mnt_has_parent(mnt))
2466 goto global_root;
2467 dentry = mnt->mnt.mnt_mountpoint;
2468 mnt = mnt->mnt_parent;
2469 vfsmnt = &mnt->mnt;
2470 continue;
2471 }
2472 parent = dentry->d_parent;
2473 prefetch(parent);
2474 spin_lock(&dentry->d_lock);
2475 error = prepend_name(buffer, buflen, &dentry->d_name);
2476 spin_unlock(&dentry->d_lock);
2477 if (!error)
2478 error = prepend(buffer, buflen, "/", 1);
2479 if (error)
2480 break;
2481
2482 slash = true;
2483 dentry = parent;
2484 }
2485
2486 if (!error && !slash)
2487 error = prepend(buffer, buflen, "/", 1);
2488
2489 out:
2490 br_read_unlock(vfsmount_lock);
2491 return error;
2492
2493 global_root:
2494 /*
2495 * Filesystems needing to implement special "root names"
2496 * should do so with ->d_dname()
2497 */
2498 if (IS_ROOT(dentry) &&
2499 (dentry->d_name.len != 1 || dentry->d_name.name[0] != '/')) {
2500 WARN(1, "Root dentry has weird name <%.*s>\n",
2501 (int) dentry->d_name.len, dentry->d_name.name);
2502 }
2503 if (!slash)
2504 error = prepend(buffer, buflen, "/", 1);
2505 if (!error)
2506 error = vfsmnt->mnt_ns ? 1 : 2;
2507 goto out;
2508 }
2509
2510 /**
2511 * __d_path - return the path of a dentry
2512 * @path: the dentry/vfsmount to report
2513 * @root: root vfsmnt/dentry
2514 * @buf: buffer to return value in
2515 * @buflen: buffer length
2516 *
2517 * Convert a dentry into an ASCII path name.
2518 *
2519 * Returns a pointer into the buffer or an error code if the
2520 * path was too long.
2521 *
2522 * "buflen" should be positive.
2523 *
2524 * If the path is not reachable from the supplied root, return %NULL.
2525 */
2526 char *__d_path(const struct path *path,
2527 const struct path *root,
2528 char *buf, int buflen)
2529 {
2530 char *res = buf + buflen;
2531 int error;
2532
2533 prepend(&res, &buflen, "\0", 1);
2534 write_seqlock(&rename_lock);
2535 error = prepend_path(path, root, &res, &buflen);
2536 write_sequnlock(&rename_lock);
2537
2538 if (error < 0)
2539 return ERR_PTR(error);
2540 if (error > 0)
2541 return NULL;
2542 return res;
2543 }
2544
2545 char *d_absolute_path(const struct path *path,
2546 char *buf, int buflen)
2547 {
2548 struct path root = {};
2549 char *res = buf + buflen;
2550 int error;
2551
2552 prepend(&res, &buflen, "\0", 1);
2553 write_seqlock(&rename_lock);
2554 error = prepend_path(path, &root, &res, &buflen);
2555 write_sequnlock(&rename_lock);
2556
2557 if (error > 1)
2558 error = -EINVAL;
2559 if (error < 0)
2560 return ERR_PTR(error);
2561 return res;
2562 }
2563
2564 /*
2565 * same as __d_path but appends "(deleted)" for unlinked files.
2566 */
2567 static int path_with_deleted(const struct path *path,
2568 const struct path *root,
2569 char **buf, int *buflen)
2570 {
2571 prepend(buf, buflen, "\0", 1);
2572 if (d_unlinked(path->dentry)) {
2573 int error = prepend(buf, buflen, " (deleted)", 10);
2574 if (error)
2575 return error;
2576 }
2577
2578 return prepend_path(path, root, buf, buflen);
2579 }
2580
2581 static int prepend_unreachable(char **buffer, int *buflen)
2582 {
2583 return prepend(buffer, buflen, "(unreachable)", 13);
2584 }
2585
2586 /**
2587 * d_path - return the path of a dentry
2588 * @path: path to report
2589 * @buf: buffer to return value in
2590 * @buflen: buffer length
2591 *
2592 * Convert a dentry into an ASCII path name. If the entry has been deleted
2593 * the string " (deleted)" is appended. Note that this is ambiguous.
2594 *
2595 * Returns a pointer into the buffer or an error code if the path was
2596 * too long. Note: Callers should use the returned pointer, not the passed
2597 * in buffer, to use the name! The implementation often starts at an offset
2598 * into the buffer, and may leave 0 bytes at the start.
2599 *
2600 * "buflen" should be positive.
2601 */
2602 char *d_path(const struct path *path, char *buf, int buflen)
2603 {
2604 char *res = buf + buflen;
2605 struct path root;
2606 int error;
2607
2608 /*
2609 * We have various synthetic filesystems that never get mounted. On
2610 * these filesystems dentries are never used for lookup purposes, and
2611 * thus don't need to be hashed. They also don't need a name until a
2612 * user wants to identify the object in /proc/pid/fd/. The little hack
2613 * below allows us to generate a name for these objects on demand:
2614 */
2615 if (path->dentry->d_op && path->dentry->d_op->d_dname)
2616 return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
2617
2618 get_fs_root(current->fs, &root);
2619 write_seqlock(&rename_lock);
2620 error = path_with_deleted(path, &root, &res, &buflen);
2621 if (error < 0)
2622 res = ERR_PTR(error);
2623 write_sequnlock(&rename_lock);
2624 path_put(&root);
2625 return res;
2626 }
2627 EXPORT_SYMBOL(d_path);
2628
2629 /**
2630 * d_path_with_unreachable - return the path of a dentry
2631 * @path: path to report
2632 * @buf: buffer to return value in
2633 * @buflen: buffer length
2634 *
2635 * The difference from d_path() is that this prepends "(unreachable)"
2636 * to paths which are unreachable from the current process' root.
2637 */
2638 char *d_path_with_unreachable(const struct path *path, char *buf, int buflen)
2639 {
2640 char *res = buf + buflen;
2641 struct path root;
2642 int error;
2643
2644 if (path->dentry->d_op && path->dentry->d_op->d_dname)
2645 return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
2646
2647 get_fs_root(current->fs, &root);
2648 write_seqlock(&rename_lock);
2649 error = path_with_deleted(path, &root, &res, &buflen);
2650 if (error > 0)
2651 error = prepend_unreachable(&res, &buflen);
2652 write_sequnlock(&rename_lock);
2653 path_put(&root);
2654 if (error)
2655 res = ERR_PTR(error);
2656
2657 return res;
2658 }
2659
2660 /*
2661 * Helper function for dentry_operations.d_dname() members
2662 */
2663 char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen,
2664 const char *fmt, ...)
2665 {
2666 va_list args;
2667 char temp[64];
2668 int sz;
2669
2670 va_start(args, fmt);
2671 sz = vsnprintf(temp, sizeof(temp), fmt, args) + 1;
2672 va_end(args);
2673
2674 if (sz > sizeof(temp) || sz > buflen)
2675 return ERR_PTR(-ENAMETOOLONG);
2676
2677 buffer += buflen - sz;
2678 return memcpy(buffer, temp, sz);
2679 }
2680
2681 /*
2682 * Write full pathname from the root of the filesystem into the buffer.
2683 */
2684 static char *__dentry_path(struct dentry *dentry, char *buf, int buflen)
2685 {
2686 char *end = buf + buflen;
2687 char *retval;
2688
2689 prepend(&end, &buflen, "\0", 1);
2690 if (buflen < 1)
2691 goto Elong;
2692 /* Get '/' right */
2693 retval = end-1;
2694 *retval = '/';
2695
2696 while (!IS_ROOT(dentry)) {
2697 struct dentry *parent = dentry->d_parent;
2698 int error;
2699
2700 prefetch(parent);
2701 spin_lock(&dentry->d_lock);
2702 error = prepend_name(&end, &buflen, &dentry->d_name);
2703 spin_unlock(&dentry->d_lock);
2704 if (error != 0 || prepend(&end, &buflen, "/", 1) != 0)
2705 goto Elong;
2706
2707 retval = end;
2708 dentry = parent;
2709 }
2710 return retval;
2711 Elong:
2712 return ERR_PTR(-ENAMETOOLONG);
2713 }
2714
2715 char *dentry_path_raw(struct dentry *dentry, char *buf, int buflen)
2716 {
2717 char *retval;
2718
2719 write_seqlock(&rename_lock);
2720 retval = __dentry_path(dentry, buf, buflen);
2721 write_sequnlock(&rename_lock);
2722
2723 return retval;
2724 }
2725 EXPORT_SYMBOL(dentry_path_raw);
2726
2727 char *dentry_path(struct dentry *dentry, char *buf, int buflen)
2728 {
2729 char *p = NULL;
2730 char *retval;
2731
2732 write_seqlock(&rename_lock);
2733 if (d_unlinked(dentry)) {
2734 p = buf + buflen;
2735 if (prepend(&p, &buflen, "//deleted", 10) != 0)
2736 goto Elong;
2737 buflen++;
2738 }
2739 retval = __dentry_path(dentry, buf, buflen);
2740 write_sequnlock(&rename_lock);
2741 if (!IS_ERR(retval) && p)
2742 *p = '/'; /* restore '/' overriden with '\0' */
2743 return retval;
2744 Elong:
2745 return ERR_PTR(-ENAMETOOLONG);
2746 }
2747
2748 /*
2749 * NOTE! The user-level library version returns a
2750 * character pointer. The kernel system call just
2751 * returns the length of the buffer filled (which
2752 * includes the ending '\0' character), or a negative
2753 * error value. So libc would do something like
2754 *
2755 * char *getcwd(char * buf, size_t size)
2756 * {
2757 * int retval;
2758 *
2759 * retval = sys_getcwd(buf, size);
2760 * if (retval >= 0)
2761 * return buf;
2762 * errno = -retval;
2763 * return NULL;
2764 * }
2765 */
2766 SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
2767 {
2768 int error;
2769 struct path pwd, root;
2770 char *page = (char *) __get_free_page(GFP_USER);
2771
2772 if (!page)
2773 return -ENOMEM;
2774
2775 get_fs_root_and_pwd(current->fs, &root, &pwd);
2776
2777 error = -ENOENT;
2778 write_seqlock(&rename_lock);
2779 if (!d_unlinked(pwd.dentry)) {
2780 unsigned long len;
2781 char *cwd = page + PAGE_SIZE;
2782 int buflen = PAGE_SIZE;
2783
2784 prepend(&cwd, &buflen, "\0", 1);
2785 error = prepend_path(&pwd, &root, &cwd, &buflen);
2786 write_sequnlock(&rename_lock);
2787
2788 if (error < 0)
2789 goto out;
2790
2791 /* Unreachable from current root */
2792 if (error > 0) {
2793 error = prepend_unreachable(&cwd, &buflen);
2794 if (error)
2795 goto out;
2796 }
2797
2798 error = -ERANGE;
2799 len = PAGE_SIZE + page - cwd;
2800 if (len <= size) {
2801 error = len;
2802 if (copy_to_user(buf, cwd, len))
2803 error = -EFAULT;
2804 }
2805 } else {
2806 write_sequnlock(&rename_lock);
2807 }
2808
2809 out:
2810 path_put(&pwd);
2811 path_put(&root);
2812 free_page((unsigned long) page);
2813 return error;
2814 }
2815
2816 /*
2817 * Test whether new_dentry is a subdirectory of old_dentry.
2818 *
2819 * Trivially implemented using the dcache structure
2820 */
2821
2822 /**
2823 * is_subdir - is new dentry a subdirectory of old_dentry
2824 * @new_dentry: new dentry
2825 * @old_dentry: old dentry
2826 *
2827 * Returns 1 if new_dentry is a subdirectory of the parent (at any depth).
2828 * Returns 0 otherwise.
2829 * Caller must ensure that "new_dentry" is pinned before calling is_subdir()
2830 */
2831
2832 int is_subdir(struct dentry *new_dentry, struct dentry *old_dentry)
2833 {
2834 int result;
2835 unsigned seq;
2836
2837 if (new_dentry == old_dentry)
2838 return 1;
2839
2840 do {
2841 /* for restarting inner loop in case of seq retry */
2842 seq = read_seqbegin(&rename_lock);
2843 /*
2844 * Need rcu_readlock to protect against the d_parent trashing
2845 * due to d_move
2846 */
2847 rcu_read_lock();
2848 if (d_ancestor(old_dentry, new_dentry))
2849 result = 1;
2850 else
2851 result = 0;
2852 rcu_read_unlock();
2853 } while (read_seqretry(&rename_lock, seq));
2854
2855 return result;
2856 }
2857
2858 void d_genocide(struct dentry *root)
2859 {
2860 struct dentry *this_parent;
2861 struct list_head *next;
2862 unsigned seq;
2863 int locked = 0;
2864
2865 seq = read_seqbegin(&rename_lock);
2866 again:
2867 this_parent = root;
2868 spin_lock(&this_parent->d_lock);
2869 repeat:
2870 next = this_parent->d_subdirs.next;
2871 resume:
2872 while (next != &this_parent->d_subdirs) {
2873 struct list_head *tmp = next;
2874 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
2875 next = tmp->next;
2876
2877 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
2878 if (d_unhashed(dentry) || !dentry->d_inode) {
2879 spin_unlock(&dentry->d_lock);
2880 continue;
2881 }
2882 if (!list_empty(&dentry->d_subdirs)) {
2883 spin_unlock(&this_parent->d_lock);
2884 spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
2885 this_parent = dentry;
2886 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
2887 goto repeat;
2888 }
2889 if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
2890 dentry->d_flags |= DCACHE_GENOCIDE;
2891 dentry->d_count--;
2892 }
2893 spin_unlock(&dentry->d_lock);
2894 }
2895 if (this_parent != root) {
2896 struct dentry *child = this_parent;
2897 if (!(this_parent->d_flags & DCACHE_GENOCIDE)) {
2898 this_parent->d_flags |= DCACHE_GENOCIDE;
2899 this_parent->d_count--;
2900 }
2901 this_parent = try_to_ascend(this_parent, locked, seq);
2902 if (!this_parent)
2903 goto rename_retry;
2904 next = child->d_u.d_child.next;
2905 goto resume;
2906 }
2907 spin_unlock(&this_parent->d_lock);
2908 if (!locked && read_seqretry(&rename_lock, seq))
2909 goto rename_retry;
2910 if (locked)
2911 write_sequnlock(&rename_lock);
2912 return;
2913
2914 rename_retry:
2915 locked = 1;
2916 write_seqlock(&rename_lock);
2917 goto again;
2918 }
2919
2920 /**
2921 * find_inode_number - check for dentry with name
2922 * @dir: directory to check
2923 * @name: Name to find.
2924 *
2925 * Check whether a dentry already exists for the given name,
2926 * and return the inode number if it has an inode. Otherwise
2927 * 0 is returned.
2928 *
2929 * This routine is used to post-process directory listings for
2930 * filesystems using synthetic inode numbers, and is necessary
2931 * to keep getcwd() working.
2932 */
2933
2934 ino_t find_inode_number(struct dentry *dir, struct qstr *name)
2935 {
2936 struct dentry * dentry;
2937 ino_t ino = 0;
2938
2939 dentry = d_hash_and_lookup(dir, name);
2940 if (dentry) {
2941 if (dentry->d_inode)
2942 ino = dentry->d_inode->i_ino;
2943 dput(dentry);
2944 }
2945 return ino;
2946 }
2947 EXPORT_SYMBOL(find_inode_number);
2948
2949 static __initdata unsigned long dhash_entries;
2950 static int __init set_dhash_entries(char *str)
2951 {
2952 if (!str)
2953 return 0;
2954 dhash_entries = simple_strtoul(str, &str, 0);
2955 return 1;
2956 }
2957 __setup("dhash_entries=", set_dhash_entries);
2958
2959 static void __init dcache_init_early(void)
2960 {
2961 int loop;
2962
2963 /* If hashes are distributed across NUMA nodes, defer
2964 * hash allocation until vmalloc space is available.
2965 */
2966 if (hashdist)
2967 return;
2968
2969 dentry_hashtable =
2970 alloc_large_system_hash("Dentry cache",
2971 sizeof(struct hlist_bl_head),
2972 dhash_entries,
2973 13,
2974 HASH_EARLY,
2975 &d_hash_shift,
2976 &d_hash_mask,
2977 0);
2978
2979 for (loop = 0; loop < (1 << d_hash_shift); loop++)
2980 INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
2981 }
2982
2983 static void __init dcache_init(void)
2984 {
2985 int loop;
2986
2987 /*
2988 * A constructor could be added for stable state like the lists,
2989 * but it is probably not worth it because of the cache nature
2990 * of the dcache.
2991 */
2992 dentry_cache = KMEM_CACHE(dentry,
2993 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD);
2994
2995 /* Hash may have been set up in dcache_init_early */
2996 if (!hashdist)
2997 return;
2998
2999 dentry_hashtable =
3000 alloc_large_system_hash("Dentry cache",
3001 sizeof(struct hlist_bl_head),
3002 dhash_entries,
3003 13,
3004 0,
3005 &d_hash_shift,
3006 &d_hash_mask,
3007 0);
3008
3009 for (loop = 0; loop < (1 << d_hash_shift); loop++)
3010 INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
3011 }
3012
3013 /* SLAB cache for __getname() consumers */
3014 struct kmem_cache *names_cachep __read_mostly;
3015 EXPORT_SYMBOL(names_cachep);
3016
3017 EXPORT_SYMBOL(d_genocide);
3018
3019 void __init vfs_caches_init_early(void)
3020 {
3021 dcache_init_early();
3022 inode_init_early();
3023 }
3024
3025 void __init vfs_caches_init(unsigned long mempages)
3026 {
3027 unsigned long reserve;
3028
3029 /* Base hash sizes on available memory, with a reserve equal to
3030 150% of current kernel size */
3031
3032 reserve = min((mempages - nr_free_pages()) * 3/2, mempages - 1);
3033 mempages -= reserve;
3034
3035 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
3036 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3037
3038 dcache_init();
3039 inode_init();
3040 files_init(mempages);
3041 mnt_init();
3042 bdev_cache_init();
3043 chrdev_init();
3044 }