]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - fs/dcache.c
fs: add a DCACHE_NEED_LOOKUP flag for d_flags
[mirror_ubuntu-artful-kernel.git] / fs / dcache.c
CommitLineData
1da177e4
LT
1/*
2 * fs/dcache.c
3 *
4 * Complete reimplementation
5 * (C) 1997 Thomas Schoebel-Theuer,
6 * with heavy changes by Linus Torvalds
7 */
8
9/*
10 * Notes on the allocation strategy:
11 *
12 * The dcache is a master of the icache - whenever a dcache entry
13 * exists, the inode will always exist. "iput()" is done either when
14 * the dcache entry is deleted or garbage collected.
15 */
16
1da177e4
LT
17#include <linux/syscalls.h>
18#include <linux/string.h>
19#include <linux/mm.h>
20#include <linux/fs.h>
7a91bf7f 21#include <linux/fsnotify.h>
1da177e4
LT
22#include <linux/slab.h>
23#include <linux/init.h>
1da177e4
LT
24#include <linux/hash.h>
25#include <linux/cache.h>
26#include <linux/module.h>
27#include <linux/mount.h>
28#include <linux/file.h>
29#include <asm/uaccess.h>
30#include <linux/security.h>
31#include <linux/seqlock.h>
32#include <linux/swap.h>
33#include <linux/bootmem.h>
5ad4e53b 34#include <linux/fs_struct.h>
613afbf8 35#include <linux/hardirq.h>
ceb5bdc2
NP
36#include <linux/bit_spinlock.h>
37#include <linux/rculist_bl.h>
268bb0ce 38#include <linux/prefetch.h>
07f3f05c 39#include "internal.h"
1da177e4 40
789680d1
NP
41/*
42 * Usage:
873feea0
NP
43 * dcache->d_inode->i_lock protects:
44 * - i_dentry, d_alias, d_inode of aliases
ceb5bdc2
NP
45 * dcache_hash_bucket lock protects:
46 * - the dcache hash table
47 * s_anon bl list spinlock protects:
48 * - the s_anon list (see __d_drop)
23044507
NP
49 * dcache_lru_lock protects:
50 * - the dcache lru lists and counters
51 * d_lock protects:
52 * - d_flags
53 * - d_name
54 * - d_lru
b7ab39f6 55 * - d_count
da502956 56 * - d_unhashed()
2fd6b7f5
NP
57 * - d_parent and d_subdirs
58 * - childrens' d_child and d_parent
b23fb0a6 59 * - d_alias, d_inode
789680d1
NP
60 *
61 * Ordering:
873feea0 62 * dentry->d_inode->i_lock
b5c84bf6
NP
63 * dentry->d_lock
64 * dcache_lru_lock
ceb5bdc2
NP
65 * dcache_hash_bucket lock
66 * s_anon lock
789680d1 67 *
da502956
NP
68 * If there is an ancestor relationship:
69 * dentry->d_parent->...->d_parent->d_lock
70 * ...
71 * dentry->d_parent->d_lock
72 * dentry->d_lock
73 *
74 * If no ancestor relationship:
789680d1
NP
75 * if (dentry1 < dentry2)
76 * dentry1->d_lock
77 * dentry2->d_lock
78 */
fa3536cc 79int sysctl_vfs_cache_pressure __read_mostly = 100;
1da177e4
LT
80EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
81
23044507 82static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lru_lock);
74c3cbe3 83__cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
1da177e4 84
949854d0 85EXPORT_SYMBOL(rename_lock);
1da177e4 86
e18b890b 87static struct kmem_cache *dentry_cache __read_mostly;
1da177e4 88
1da177e4
LT
89/*
90 * This is the single most critical data structure when it comes
91 * to the dcache: the hashtable for lookups. Somebody should try
92 * to make this good - I've just made it work.
93 *
94 * This hash-function tries to avoid losing too many bits of hash
95 * information, yet avoid using a prime hash-size or similar.
96 */
97#define D_HASHBITS d_hash_shift
98#define D_HASHMASK d_hash_mask
99
fa3536cc
ED
100static unsigned int d_hash_mask __read_mostly;
101static unsigned int d_hash_shift __read_mostly;
ceb5bdc2 102
b07ad996 103static struct hlist_bl_head *dentry_hashtable __read_mostly;
ceb5bdc2 104
b07ad996 105static inline struct hlist_bl_head *d_hash(struct dentry *parent,
ceb5bdc2
NP
106 unsigned long hash)
107{
108 hash += ((unsigned long) parent ^ GOLDEN_RATIO_PRIME) / L1_CACHE_BYTES;
109 hash = hash ^ ((hash ^ GOLDEN_RATIO_PRIME) >> D_HASHBITS);
110 return dentry_hashtable + (hash & D_HASHMASK);
111}
112
1da177e4
LT
113/* Statistics gathering. */
114struct dentry_stat_t dentry_stat = {
115 .age_limit = 45,
116};
117
3e880fb5 118static DEFINE_PER_CPU(unsigned int, nr_dentry);
312d3ca8
CH
119
120#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
3e880fb5
NP
121static int get_nr_dentry(void)
122{
123 int i;
124 int sum = 0;
125 for_each_possible_cpu(i)
126 sum += per_cpu(nr_dentry, i);
127 return sum < 0 ? 0 : sum;
128}
129
312d3ca8
CH
130int proc_nr_dentry(ctl_table *table, int write, void __user *buffer,
131 size_t *lenp, loff_t *ppos)
132{
3e880fb5 133 dentry_stat.nr_dentry = get_nr_dentry();
312d3ca8
CH
134 return proc_dointvec(table, write, buffer, lenp, ppos);
135}
136#endif
137
9c82ab9c 138static void __d_free(struct rcu_head *head)
1da177e4 139{
9c82ab9c
CH
140 struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
141
fd217f4d 142 WARN_ON(!list_empty(&dentry->d_alias));
1da177e4
LT
143 if (dname_external(dentry))
144 kfree(dentry->d_name.name);
145 kmem_cache_free(dentry_cache, dentry);
146}
147
148/*
b5c84bf6 149 * no locks, please.
1da177e4
LT
150 */
151static void d_free(struct dentry *dentry)
152{
b7ab39f6 153 BUG_ON(dentry->d_count);
3e880fb5 154 this_cpu_dec(nr_dentry);
1da177e4
LT
155 if (dentry->d_op && dentry->d_op->d_release)
156 dentry->d_op->d_release(dentry);
312d3ca8 157
dea3667b
LT
158 /* if dentry was never visible to RCU, immediate free is OK */
159 if (!(dentry->d_flags & DCACHE_RCUACCESS))
9c82ab9c 160 __d_free(&dentry->d_u.d_rcu);
b3423415 161 else
9c82ab9c 162 call_rcu(&dentry->d_u.d_rcu, __d_free);
1da177e4
LT
163}
164
31e6b01f
NP
165/**
166 * dentry_rcuwalk_barrier - invalidate in-progress rcu-walk lookups
ff5fdb61 167 * @dentry: the target dentry
31e6b01f
NP
168 * After this call, in-progress rcu-walk path lookup will fail. This
169 * should be called after unhashing, and after changing d_inode (if
170 * the dentry has not already been unhashed).
171 */
172static inline void dentry_rcuwalk_barrier(struct dentry *dentry)
173{
174 assert_spin_locked(&dentry->d_lock);
175 /* Go through a barrier */
176 write_seqcount_barrier(&dentry->d_seq);
177}
178
1da177e4
LT
179/*
180 * Release the dentry's inode, using the filesystem
31e6b01f
NP
181 * d_iput() operation if defined. Dentry has no refcount
182 * and is unhashed.
1da177e4 183 */
858119e1 184static void dentry_iput(struct dentry * dentry)
31f3e0b3 185 __releases(dentry->d_lock)
873feea0 186 __releases(dentry->d_inode->i_lock)
1da177e4
LT
187{
188 struct inode *inode = dentry->d_inode;
189 if (inode) {
190 dentry->d_inode = NULL;
191 list_del_init(&dentry->d_alias);
192 spin_unlock(&dentry->d_lock);
873feea0 193 spin_unlock(&inode->i_lock);
f805fbda
LT
194 if (!inode->i_nlink)
195 fsnotify_inoderemove(inode);
1da177e4
LT
196 if (dentry->d_op && dentry->d_op->d_iput)
197 dentry->d_op->d_iput(dentry, inode);
198 else
199 iput(inode);
200 } else {
201 spin_unlock(&dentry->d_lock);
1da177e4
LT
202 }
203}
204
31e6b01f
NP
205/*
206 * Release the dentry's inode, using the filesystem
207 * d_iput() operation if defined. dentry remains in-use.
208 */
209static void dentry_unlink_inode(struct dentry * dentry)
210 __releases(dentry->d_lock)
873feea0 211 __releases(dentry->d_inode->i_lock)
31e6b01f
NP
212{
213 struct inode *inode = dentry->d_inode;
214 dentry->d_inode = NULL;
215 list_del_init(&dentry->d_alias);
216 dentry_rcuwalk_barrier(dentry);
217 spin_unlock(&dentry->d_lock);
873feea0 218 spin_unlock(&inode->i_lock);
31e6b01f
NP
219 if (!inode->i_nlink)
220 fsnotify_inoderemove(inode);
221 if (dentry->d_op && dentry->d_op->d_iput)
222 dentry->d_op->d_iput(dentry, inode);
223 else
224 iput(inode);
225}
226
da3bbdd4 227/*
23044507 228 * dentry_lru_(add|del|move_tail) must be called with d_lock held.
da3bbdd4
KM
229 */
230static void dentry_lru_add(struct dentry *dentry)
231{
a4633357 232 if (list_empty(&dentry->d_lru)) {
23044507 233 spin_lock(&dcache_lru_lock);
a4633357
CH
234 list_add(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
235 dentry->d_sb->s_nr_dentry_unused++;
86c8749e 236 dentry_stat.nr_unused++;
23044507 237 spin_unlock(&dcache_lru_lock);
a4633357 238 }
da3bbdd4
KM
239}
240
23044507
NP
241static void __dentry_lru_del(struct dentry *dentry)
242{
243 list_del_init(&dentry->d_lru);
244 dentry->d_sb->s_nr_dentry_unused--;
245 dentry_stat.nr_unused--;
246}
247
da3bbdd4
KM
248static void dentry_lru_del(struct dentry *dentry)
249{
250 if (!list_empty(&dentry->d_lru)) {
23044507
NP
251 spin_lock(&dcache_lru_lock);
252 __dentry_lru_del(dentry);
253 spin_unlock(&dcache_lru_lock);
da3bbdd4
KM
254 }
255}
256
a4633357 257static void dentry_lru_move_tail(struct dentry *dentry)
da3bbdd4 258{
23044507 259 spin_lock(&dcache_lru_lock);
a4633357
CH
260 if (list_empty(&dentry->d_lru)) {
261 list_add_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
262 dentry->d_sb->s_nr_dentry_unused++;
86c8749e 263 dentry_stat.nr_unused++;
a4633357
CH
264 } else {
265 list_move_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
da3bbdd4 266 }
23044507 267 spin_unlock(&dcache_lru_lock);
da3bbdd4
KM
268}
269
d52b9086
MS
270/**
271 * d_kill - kill dentry and return parent
272 * @dentry: dentry to kill
ff5fdb61 273 * @parent: parent dentry
d52b9086 274 *
31f3e0b3 275 * The dentry must already be unhashed and removed from the LRU.
d52b9086
MS
276 *
277 * If this is the root of the dentry tree, return NULL.
23044507 278 *
b5c84bf6
NP
279 * dentry->d_lock and parent->d_lock must be held by caller, and are dropped by
280 * d_kill.
d52b9086 281 */
2fd6b7f5 282static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent)
31f3e0b3 283 __releases(dentry->d_lock)
2fd6b7f5 284 __releases(parent->d_lock)
873feea0 285 __releases(dentry->d_inode->i_lock)
d52b9086 286{
d52b9086 287 list_del(&dentry->d_u.d_child);
c83ce989
TM
288 /*
289 * Inform try_to_ascend() that we are no longer attached to the
290 * dentry tree
291 */
292 dentry->d_flags |= DCACHE_DISCONNECTED;
2fd6b7f5
NP
293 if (parent)
294 spin_unlock(&parent->d_lock);
d52b9086 295 dentry_iput(dentry);
b7ab39f6
NP
296 /*
297 * dentry_iput drops the locks, at which point nobody (except
298 * transient RCU lookups) can reach this dentry.
299 */
d52b9086 300 d_free(dentry);
871c0067 301 return parent;
d52b9086
MS
302}
303
789680d1
NP
304/**
305 * d_drop - drop a dentry
306 * @dentry: dentry to drop
307 *
308 * d_drop() unhashes the entry from the parent dentry hashes, so that it won't
309 * be found through a VFS lookup any more. Note that this is different from
310 * deleting the dentry - d_delete will try to mark the dentry negative if
311 * possible, giving a successful _negative_ lookup, while d_drop will
312 * just make the cache lookup fail.
313 *
314 * d_drop() is used mainly for stuff that wants to invalidate a dentry for some
315 * reason (NFS timeouts or autofs deletes).
316 *
317 * __d_drop requires dentry->d_lock.
318 */
319void __d_drop(struct dentry *dentry)
320{
dea3667b 321 if (!d_unhashed(dentry)) {
b07ad996 322 struct hlist_bl_head *b;
dea3667b 323 if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED))
b07ad996 324 b = &dentry->d_sb->s_anon;
dea3667b 325 else
ceb5bdc2 326 b = d_hash(dentry->d_parent, dentry->d_name.hash);
dea3667b 327
1879fd6a 328 hlist_bl_lock(b);
dea3667b
LT
329 __hlist_bl_del(&dentry->d_hash);
330 dentry->d_hash.pprev = NULL;
1879fd6a 331 hlist_bl_unlock(b);
dea3667b
LT
332
333 dentry_rcuwalk_barrier(dentry);
789680d1
NP
334 }
335}
336EXPORT_SYMBOL(__d_drop);
337
338void d_drop(struct dentry *dentry)
339{
789680d1
NP
340 spin_lock(&dentry->d_lock);
341 __d_drop(dentry);
342 spin_unlock(&dentry->d_lock);
789680d1
NP
343}
344EXPORT_SYMBOL(d_drop);
345
44396f4b
JB
346/*
347 * d_clear_need_lookup - drop a dentry from cache and clear the need lookup flag
348 * @dentry: dentry to drop
349 *
350 * This is called when we do a lookup on a placeholder dentry that needed to be
351 * looked up. The dentry should have been hashed in order for it to be found by
352 * the lookup code, but now needs to be unhashed while we do the actual lookup
353 * and clear the DCACHE_NEED_LOOKUP flag.
354 */
355void d_clear_need_lookup(struct dentry *dentry)
356{
357 spin_lock(&dentry->d_lock);
358 __d_drop(dentry);
359 dentry->d_flags &= ~DCACHE_NEED_LOOKUP;
360 spin_unlock(&dentry->d_lock);
361}
362EXPORT_SYMBOL(d_clear_need_lookup);
363
77812a1e
NP
364/*
365 * Finish off a dentry we've decided to kill.
366 * dentry->d_lock must be held, returns with it unlocked.
367 * If ref is non-zero, then decrement the refcount too.
368 * Returns dentry requiring refcount drop, or NULL if we're done.
369 */
370static inline struct dentry *dentry_kill(struct dentry *dentry, int ref)
371 __releases(dentry->d_lock)
372{
873feea0 373 struct inode *inode;
77812a1e
NP
374 struct dentry *parent;
375
873feea0
NP
376 inode = dentry->d_inode;
377 if (inode && !spin_trylock(&inode->i_lock)) {
77812a1e
NP
378relock:
379 spin_unlock(&dentry->d_lock);
380 cpu_relax();
381 return dentry; /* try again with same dentry */
382 }
383 if (IS_ROOT(dentry))
384 parent = NULL;
385 else
386 parent = dentry->d_parent;
387 if (parent && !spin_trylock(&parent->d_lock)) {
873feea0
NP
388 if (inode)
389 spin_unlock(&inode->i_lock);
77812a1e
NP
390 goto relock;
391 }
31e6b01f 392
77812a1e
NP
393 if (ref)
394 dentry->d_count--;
395 /* if dentry was on the d_lru list delete it from there */
396 dentry_lru_del(dentry);
397 /* if it was on the hash then remove it */
398 __d_drop(dentry);
399 return d_kill(dentry, parent);
400}
401
1da177e4
LT
402/*
403 * This is dput
404 *
405 * This is complicated by the fact that we do not want to put
406 * dentries that are no longer on any hash chain on the unused
407 * list: we'd much rather just get rid of them immediately.
408 *
409 * However, that implies that we have to traverse the dentry
410 * tree upwards to the parents which might _also_ now be
411 * scheduled for deletion (it may have been only waiting for
412 * its last child to go away).
413 *
414 * This tail recursion is done by hand as we don't want to depend
415 * on the compiler to always get this right (gcc generally doesn't).
416 * Real recursion would eat up our stack space.
417 */
418
419/*
420 * dput - release a dentry
421 * @dentry: dentry to release
422 *
423 * Release a dentry. This will drop the usage count and if appropriate
424 * call the dentry unlink method as well as removing it from the queues and
425 * releasing its resources. If the parent dentries were scheduled for release
426 * they too may now get deleted.
1da177e4 427 */
1da177e4
LT
428void dput(struct dentry *dentry)
429{
430 if (!dentry)
431 return;
432
433repeat:
b7ab39f6 434 if (dentry->d_count == 1)
1da177e4 435 might_sleep();
1da177e4 436 spin_lock(&dentry->d_lock);
61f3dee4
NP
437 BUG_ON(!dentry->d_count);
438 if (dentry->d_count > 1) {
439 dentry->d_count--;
1da177e4 440 spin_unlock(&dentry->d_lock);
1da177e4
LT
441 return;
442 }
443
fb045adb 444 if (dentry->d_flags & DCACHE_OP_DELETE) {
1da177e4 445 if (dentry->d_op->d_delete(dentry))
61f3dee4 446 goto kill_it;
1da177e4 447 }
265ac902 448
1da177e4
LT
449 /* Unreachable? Get rid of it */
450 if (d_unhashed(dentry))
451 goto kill_it;
265ac902 452
44396f4b
JB
453 /*
454 * If this dentry needs lookup, don't set the referenced flag so that it
455 * is more likely to be cleaned up by the dcache shrinker in case of
456 * memory pressure.
457 */
458 if (!d_need_lookup(dentry))
459 dentry->d_flags |= DCACHE_REFERENCED;
a4633357 460 dentry_lru_add(dentry);
265ac902 461
61f3dee4
NP
462 dentry->d_count--;
463 spin_unlock(&dentry->d_lock);
1da177e4
LT
464 return;
465
d52b9086 466kill_it:
77812a1e 467 dentry = dentry_kill(dentry, 1);
d52b9086
MS
468 if (dentry)
469 goto repeat;
1da177e4 470}
ec4f8605 471EXPORT_SYMBOL(dput);
1da177e4
LT
472
473/**
474 * d_invalidate - invalidate a dentry
475 * @dentry: dentry to invalidate
476 *
477 * Try to invalidate the dentry if it turns out to be
478 * possible. If there are other dentries that can be
479 * reached through this one we can't delete it and we
480 * return -EBUSY. On success we return 0.
481 *
482 * no dcache lock.
483 */
484
485int d_invalidate(struct dentry * dentry)
486{
487 /*
488 * If it's already been dropped, return OK.
489 */
da502956 490 spin_lock(&dentry->d_lock);
1da177e4 491 if (d_unhashed(dentry)) {
da502956 492 spin_unlock(&dentry->d_lock);
1da177e4
LT
493 return 0;
494 }
495 /*
496 * Check whether to do a partial shrink_dcache
497 * to get rid of unused child entries.
498 */
499 if (!list_empty(&dentry->d_subdirs)) {
da502956 500 spin_unlock(&dentry->d_lock);
1da177e4 501 shrink_dcache_parent(dentry);
da502956 502 spin_lock(&dentry->d_lock);
1da177e4
LT
503 }
504
505 /*
506 * Somebody else still using it?
507 *
508 * If it's a directory, we can't drop it
509 * for fear of somebody re-populating it
510 * with children (even though dropping it
511 * would make it unreachable from the root,
512 * we might still populate it if it was a
513 * working directory or similar).
514 */
b7ab39f6 515 if (dentry->d_count > 1) {
1da177e4
LT
516 if (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode)) {
517 spin_unlock(&dentry->d_lock);
1da177e4
LT
518 return -EBUSY;
519 }
520 }
521
522 __d_drop(dentry);
523 spin_unlock(&dentry->d_lock);
1da177e4
LT
524 return 0;
525}
ec4f8605 526EXPORT_SYMBOL(d_invalidate);
1da177e4 527
b5c84bf6 528/* This must be called with d_lock held */
dc0474be 529static inline void __dget_dlock(struct dentry *dentry)
23044507 530{
b7ab39f6 531 dentry->d_count++;
23044507
NP
532}
533
dc0474be 534static inline void __dget(struct dentry *dentry)
1da177e4 535{
23044507 536 spin_lock(&dentry->d_lock);
dc0474be 537 __dget_dlock(dentry);
23044507 538 spin_unlock(&dentry->d_lock);
1da177e4
LT
539}
540
b7ab39f6
NP
541struct dentry *dget_parent(struct dentry *dentry)
542{
543 struct dentry *ret;
544
545repeat:
a734eb45
NP
546 /*
547 * Don't need rcu_dereference because we re-check it was correct under
548 * the lock.
549 */
550 rcu_read_lock();
b7ab39f6 551 ret = dentry->d_parent;
a734eb45
NP
552 if (!ret) {
553 rcu_read_unlock();
b7ab39f6
NP
554 goto out;
555 }
a734eb45
NP
556 spin_lock(&ret->d_lock);
557 if (unlikely(ret != dentry->d_parent)) {
558 spin_unlock(&ret->d_lock);
559 rcu_read_unlock();
b7ab39f6
NP
560 goto repeat;
561 }
a734eb45 562 rcu_read_unlock();
b7ab39f6
NP
563 BUG_ON(!ret->d_count);
564 ret->d_count++;
565 spin_unlock(&ret->d_lock);
566out:
b7ab39f6
NP
567 return ret;
568}
569EXPORT_SYMBOL(dget_parent);
570
1da177e4
LT
571/**
572 * d_find_alias - grab a hashed alias of inode
573 * @inode: inode in question
574 * @want_discon: flag, used by d_splice_alias, to request
575 * that only a DISCONNECTED alias be returned.
576 *
577 * If inode has a hashed alias, or is a directory and has any alias,
578 * acquire the reference to alias and return it. Otherwise return NULL.
579 * Notice that if inode is a directory there can be only one alias and
580 * it can be unhashed only if it has no children, or if it is the root
581 * of a filesystem.
582 *
21c0d8fd 583 * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer
1da177e4 584 * any other hashed alias over that one unless @want_discon is set,
21c0d8fd 585 * in which case only return an IS_ROOT, DCACHE_DISCONNECTED alias.
1da177e4 586 */
da502956 587static struct dentry *__d_find_alias(struct inode *inode, int want_discon)
1da177e4 588{
da502956 589 struct dentry *alias, *discon_alias;
1da177e4 590
da502956
NP
591again:
592 discon_alias = NULL;
593 list_for_each_entry(alias, &inode->i_dentry, d_alias) {
594 spin_lock(&alias->d_lock);
1da177e4 595 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
21c0d8fd 596 if (IS_ROOT(alias) &&
da502956 597 (alias->d_flags & DCACHE_DISCONNECTED)) {
1da177e4 598 discon_alias = alias;
da502956 599 } else if (!want_discon) {
dc0474be 600 __dget_dlock(alias);
da502956
NP
601 spin_unlock(&alias->d_lock);
602 return alias;
603 }
604 }
605 spin_unlock(&alias->d_lock);
606 }
607 if (discon_alias) {
608 alias = discon_alias;
609 spin_lock(&alias->d_lock);
610 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
611 if (IS_ROOT(alias) &&
612 (alias->d_flags & DCACHE_DISCONNECTED)) {
dc0474be 613 __dget_dlock(alias);
da502956 614 spin_unlock(&alias->d_lock);
1da177e4
LT
615 return alias;
616 }
617 }
da502956
NP
618 spin_unlock(&alias->d_lock);
619 goto again;
1da177e4 620 }
da502956 621 return NULL;
1da177e4
LT
622}
623
da502956 624struct dentry *d_find_alias(struct inode *inode)
1da177e4 625{
214fda1f
DH
626 struct dentry *de = NULL;
627
628 if (!list_empty(&inode->i_dentry)) {
873feea0 629 spin_lock(&inode->i_lock);
214fda1f 630 de = __d_find_alias(inode, 0);
873feea0 631 spin_unlock(&inode->i_lock);
214fda1f 632 }
1da177e4
LT
633 return de;
634}
ec4f8605 635EXPORT_SYMBOL(d_find_alias);
1da177e4
LT
636
637/*
638 * Try to kill dentries associated with this inode.
639 * WARNING: you must own a reference to inode.
640 */
641void d_prune_aliases(struct inode *inode)
642{
0cdca3f9 643 struct dentry *dentry;
1da177e4 644restart:
873feea0 645 spin_lock(&inode->i_lock);
0cdca3f9 646 list_for_each_entry(dentry, &inode->i_dentry, d_alias) {
1da177e4 647 spin_lock(&dentry->d_lock);
b7ab39f6 648 if (!dentry->d_count) {
dc0474be 649 __dget_dlock(dentry);
1da177e4
LT
650 __d_drop(dentry);
651 spin_unlock(&dentry->d_lock);
873feea0 652 spin_unlock(&inode->i_lock);
1da177e4
LT
653 dput(dentry);
654 goto restart;
655 }
656 spin_unlock(&dentry->d_lock);
657 }
873feea0 658 spin_unlock(&inode->i_lock);
1da177e4 659}
ec4f8605 660EXPORT_SYMBOL(d_prune_aliases);
1da177e4
LT
661
662/*
77812a1e
NP
663 * Try to throw away a dentry - free the inode, dput the parent.
664 * Requires dentry->d_lock is held, and dentry->d_count == 0.
665 * Releases dentry->d_lock.
d702ccb3 666 *
77812a1e 667 * This may fail if locks cannot be acquired no problem, just try again.
1da177e4 668 */
77812a1e 669static void try_prune_one_dentry(struct dentry *dentry)
31f3e0b3 670 __releases(dentry->d_lock)
1da177e4 671{
77812a1e 672 struct dentry *parent;
d52b9086 673
77812a1e 674 parent = dentry_kill(dentry, 0);
d52b9086 675 /*
77812a1e
NP
676 * If dentry_kill returns NULL, we have nothing more to do.
677 * if it returns the same dentry, trylocks failed. In either
678 * case, just loop again.
679 *
680 * Otherwise, we need to prune ancestors too. This is necessary
681 * to prevent quadratic behavior of shrink_dcache_parent(), but
682 * is also expected to be beneficial in reducing dentry cache
683 * fragmentation.
d52b9086 684 */
77812a1e
NP
685 if (!parent)
686 return;
687 if (parent == dentry)
688 return;
689
690 /* Prune ancestors. */
691 dentry = parent;
d52b9086 692 while (dentry) {
b7ab39f6 693 spin_lock(&dentry->d_lock);
89e60548
NP
694 if (dentry->d_count > 1) {
695 dentry->d_count--;
696 spin_unlock(&dentry->d_lock);
697 return;
698 }
77812a1e 699 dentry = dentry_kill(dentry, 1);
d52b9086 700 }
1da177e4
LT
701}
702
3049cfe2 703static void shrink_dentry_list(struct list_head *list)
1da177e4 704{
da3bbdd4 705 struct dentry *dentry;
da3bbdd4 706
ec33679d
NP
707 rcu_read_lock();
708 for (;;) {
ec33679d
NP
709 dentry = list_entry_rcu(list->prev, struct dentry, d_lru);
710 if (&dentry->d_lru == list)
711 break; /* empty */
712 spin_lock(&dentry->d_lock);
713 if (dentry != list_entry(list->prev, struct dentry, d_lru)) {
714 spin_unlock(&dentry->d_lock);
23044507
NP
715 continue;
716 }
717
1da177e4
LT
718 /*
719 * We found an inuse dentry which was not removed from
da3bbdd4
KM
720 * the LRU because of laziness during lookup. Do not free
721 * it - just keep it off the LRU list.
1da177e4 722 */
b7ab39f6 723 if (dentry->d_count) {
ec33679d 724 dentry_lru_del(dentry);
da3bbdd4 725 spin_unlock(&dentry->d_lock);
1da177e4
LT
726 continue;
727 }
ec33679d 728
ec33679d 729 rcu_read_unlock();
77812a1e
NP
730
731 try_prune_one_dentry(dentry);
732
ec33679d 733 rcu_read_lock();
da3bbdd4 734 }
ec33679d 735 rcu_read_unlock();
3049cfe2
CH
736}
737
738/**
739 * __shrink_dcache_sb - shrink the dentry LRU on a given superblock
740 * @sb: superblock to shrink dentry LRU.
741 * @count: number of entries to prune
742 * @flags: flags to control the dentry processing
743 *
744 * If flags contains DCACHE_REFERENCED reference dentries will not be pruned.
745 */
746static void __shrink_dcache_sb(struct super_block *sb, int *count, int flags)
747{
748 /* called from prune_dcache() and shrink_dcache_parent() */
749 struct dentry *dentry;
750 LIST_HEAD(referenced);
751 LIST_HEAD(tmp);
752 int cnt = *count;
753
23044507
NP
754relock:
755 spin_lock(&dcache_lru_lock);
3049cfe2
CH
756 while (!list_empty(&sb->s_dentry_lru)) {
757 dentry = list_entry(sb->s_dentry_lru.prev,
758 struct dentry, d_lru);
759 BUG_ON(dentry->d_sb != sb);
760
23044507
NP
761 if (!spin_trylock(&dentry->d_lock)) {
762 spin_unlock(&dcache_lru_lock);
763 cpu_relax();
764 goto relock;
765 }
766
3049cfe2
CH
767 /*
768 * If we are honouring the DCACHE_REFERENCED flag and the
769 * dentry has this flag set, don't free it. Clear the flag
770 * and put it back on the LRU.
771 */
23044507
NP
772 if (flags & DCACHE_REFERENCED &&
773 dentry->d_flags & DCACHE_REFERENCED) {
774 dentry->d_flags &= ~DCACHE_REFERENCED;
775 list_move(&dentry->d_lru, &referenced);
3049cfe2 776 spin_unlock(&dentry->d_lock);
23044507
NP
777 } else {
778 list_move_tail(&dentry->d_lru, &tmp);
779 spin_unlock(&dentry->d_lock);
780 if (!--cnt)
781 break;
3049cfe2 782 }
ec33679d 783 cond_resched_lock(&dcache_lru_lock);
3049cfe2 784 }
da3bbdd4
KM
785 if (!list_empty(&referenced))
786 list_splice(&referenced, &sb->s_dentry_lru);
23044507 787 spin_unlock(&dcache_lru_lock);
ec33679d
NP
788
789 shrink_dentry_list(&tmp);
790
791 *count = cnt;
da3bbdd4
KM
792}
793
794/**
795 * prune_dcache - shrink the dcache
796 * @count: number of entries to try to free
797 *
798 * Shrink the dcache. This is done when we need more memory, or simply when we
799 * need to unmount something (at which point we need to unuse all dentries).
800 *
801 * This function may fail to free any resources if all the dentries are in use.
802 */
803static void prune_dcache(int count)
804{
dca33252 805 struct super_block *sb, *p = NULL;
da3bbdd4 806 int w_count;
86c8749e 807 int unused = dentry_stat.nr_unused;
da3bbdd4
KM
808 int prune_ratio;
809 int pruned;
810
811 if (unused == 0 || count == 0)
812 return;
da3bbdd4
KM
813 if (count >= unused)
814 prune_ratio = 1;
815 else
816 prune_ratio = unused / count;
817 spin_lock(&sb_lock);
dca33252 818 list_for_each_entry(sb, &super_blocks, s_list) {
551de6f3
AV
819 if (list_empty(&sb->s_instances))
820 continue;
da3bbdd4 821 if (sb->s_nr_dentry_unused == 0)
1da177e4 822 continue;
da3bbdd4
KM
823 sb->s_count++;
824 /* Now, we reclaim unused dentrins with fairness.
825 * We reclaim them same percentage from each superblock.
826 * We calculate number of dentries to scan on this sb
827 * as follows, but the implementation is arranged to avoid
828 * overflows:
829 * number of dentries to scan on this sb =
830 * count * (number of dentries on this sb /
831 * number of dentries in the machine)
0feae5c4 832 */
da3bbdd4
KM
833 spin_unlock(&sb_lock);
834 if (prune_ratio != 1)
835 w_count = (sb->s_nr_dentry_unused / prune_ratio) + 1;
836 else
837 w_count = sb->s_nr_dentry_unused;
838 pruned = w_count;
0feae5c4 839 /*
da3bbdd4
KM
840 * We need to be sure this filesystem isn't being unmounted,
841 * otherwise we could race with generic_shutdown_super(), and
842 * end up holding a reference to an inode while the filesystem
843 * is unmounted. So we try to get s_umount, and make sure
844 * s_root isn't NULL.
0feae5c4 845 */
da3bbdd4
KM
846 if (down_read_trylock(&sb->s_umount)) {
847 if ((sb->s_root != NULL) &&
848 (!list_empty(&sb->s_dentry_lru))) {
da3bbdd4
KM
849 __shrink_dcache_sb(sb, &w_count,
850 DCACHE_REFERENCED);
851 pruned -= w_count;
0feae5c4 852 }
da3bbdd4 853 up_read(&sb->s_umount);
0feae5c4 854 }
da3bbdd4 855 spin_lock(&sb_lock);
dca33252
AV
856 if (p)
857 __put_super(p);
da3bbdd4 858 count -= pruned;
dca33252 859 p = sb;
79893c17
AV
860 /* more work left to do? */
861 if (count <= 0)
862 break;
1da177e4 863 }
dca33252
AV
864 if (p)
865 __put_super(p);
da3bbdd4 866 spin_unlock(&sb_lock);
1da177e4
LT
867}
868
1da177e4
LT
869/**
870 * shrink_dcache_sb - shrink dcache for a superblock
871 * @sb: superblock
872 *
3049cfe2
CH
873 * Shrink the dcache for the specified super block. This is used to free
874 * the dcache before unmounting a file system.
1da177e4 875 */
3049cfe2 876void shrink_dcache_sb(struct super_block *sb)
1da177e4 877{
3049cfe2
CH
878 LIST_HEAD(tmp);
879
23044507 880 spin_lock(&dcache_lru_lock);
3049cfe2
CH
881 while (!list_empty(&sb->s_dentry_lru)) {
882 list_splice_init(&sb->s_dentry_lru, &tmp);
ec33679d 883 spin_unlock(&dcache_lru_lock);
3049cfe2 884 shrink_dentry_list(&tmp);
ec33679d 885 spin_lock(&dcache_lru_lock);
3049cfe2 886 }
23044507 887 spin_unlock(&dcache_lru_lock);
1da177e4 888}
ec4f8605 889EXPORT_SYMBOL(shrink_dcache_sb);
1da177e4 890
c636ebdb
DH
891/*
892 * destroy a single subtree of dentries for unmount
893 * - see the comments on shrink_dcache_for_umount() for a description of the
894 * locking
895 */
896static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
897{
898 struct dentry *parent;
f8713576 899 unsigned detached = 0;
c636ebdb
DH
900
901 BUG_ON(!IS_ROOT(dentry));
902
903 /* detach this root from the system */
23044507 904 spin_lock(&dentry->d_lock);
a4633357 905 dentry_lru_del(dentry);
c636ebdb 906 __d_drop(dentry);
da502956 907 spin_unlock(&dentry->d_lock);
c636ebdb
DH
908
909 for (;;) {
910 /* descend to the first leaf in the current subtree */
911 while (!list_empty(&dentry->d_subdirs)) {
912 struct dentry *loop;
913
914 /* this is a branch with children - detach all of them
915 * from the system in one go */
2fd6b7f5 916 spin_lock(&dentry->d_lock);
c636ebdb
DH
917 list_for_each_entry(loop, &dentry->d_subdirs,
918 d_u.d_child) {
2fd6b7f5
NP
919 spin_lock_nested(&loop->d_lock,
920 DENTRY_D_LOCK_NESTED);
a4633357 921 dentry_lru_del(loop);
c636ebdb 922 __d_drop(loop);
da502956 923 spin_unlock(&loop->d_lock);
c636ebdb 924 }
2fd6b7f5 925 spin_unlock(&dentry->d_lock);
c636ebdb
DH
926
927 /* move to the first child */
928 dentry = list_entry(dentry->d_subdirs.next,
929 struct dentry, d_u.d_child);
930 }
931
932 /* consume the dentries from this leaf up through its parents
933 * until we find one with children or run out altogether */
934 do {
935 struct inode *inode;
936
b7ab39f6 937 if (dentry->d_count != 0) {
c636ebdb
DH
938 printk(KERN_ERR
939 "BUG: Dentry %p{i=%lx,n=%s}"
940 " still in use (%d)"
941 " [unmount of %s %s]\n",
942 dentry,
943 dentry->d_inode ?
944 dentry->d_inode->i_ino : 0UL,
945 dentry->d_name.name,
b7ab39f6 946 dentry->d_count,
c636ebdb
DH
947 dentry->d_sb->s_type->name,
948 dentry->d_sb->s_id);
949 BUG();
950 }
951
2fd6b7f5 952 if (IS_ROOT(dentry)) {
c636ebdb 953 parent = NULL;
2fd6b7f5
NP
954 list_del(&dentry->d_u.d_child);
955 } else {
871c0067 956 parent = dentry->d_parent;
b7ab39f6
NP
957 spin_lock(&parent->d_lock);
958 parent->d_count--;
2fd6b7f5 959 list_del(&dentry->d_u.d_child);
b7ab39f6 960 spin_unlock(&parent->d_lock);
871c0067 961 }
c636ebdb 962
f8713576 963 detached++;
c636ebdb
DH
964
965 inode = dentry->d_inode;
966 if (inode) {
967 dentry->d_inode = NULL;
968 list_del_init(&dentry->d_alias);
969 if (dentry->d_op && dentry->d_op->d_iput)
970 dentry->d_op->d_iput(dentry, inode);
971 else
972 iput(inode);
973 }
974
975 d_free(dentry);
976
977 /* finished when we fall off the top of the tree,
978 * otherwise we ascend to the parent and move to the
979 * next sibling if there is one */
980 if (!parent)
312d3ca8 981 return;
c636ebdb 982 dentry = parent;
c636ebdb
DH
983 } while (list_empty(&dentry->d_subdirs));
984
985 dentry = list_entry(dentry->d_subdirs.next,
986 struct dentry, d_u.d_child);
987 }
988}
989
990/*
991 * destroy the dentries attached to a superblock on unmounting
b5c84bf6 992 * - we don't need to use dentry->d_lock because:
c636ebdb
DH
993 * - the superblock is detached from all mountings and open files, so the
994 * dentry trees will not be rearranged by the VFS
995 * - s_umount is write-locked, so the memory pressure shrinker will ignore
996 * any dentries belonging to this superblock that it comes across
997 * - the filesystem itself is no longer permitted to rearrange the dentries
998 * in this superblock
999 */
1000void shrink_dcache_for_umount(struct super_block *sb)
1001{
1002 struct dentry *dentry;
1003
1004 if (down_read_trylock(&sb->s_umount))
1005 BUG();
1006
1007 dentry = sb->s_root;
1008 sb->s_root = NULL;
b7ab39f6
NP
1009 spin_lock(&dentry->d_lock);
1010 dentry->d_count--;
1011 spin_unlock(&dentry->d_lock);
c636ebdb
DH
1012 shrink_dcache_for_umount_subtree(dentry);
1013
ceb5bdc2
NP
1014 while (!hlist_bl_empty(&sb->s_anon)) {
1015 dentry = hlist_bl_entry(hlist_bl_first(&sb->s_anon), struct dentry, d_hash);
c636ebdb
DH
1016 shrink_dcache_for_umount_subtree(dentry);
1017 }
1018}
1019
c826cb7d
LT
1020/*
1021 * This tries to ascend one level of parenthood, but
1022 * we can race with renaming, so we need to re-check
1023 * the parenthood after dropping the lock and check
1024 * that the sequence number still matches.
1025 */
1026static struct dentry *try_to_ascend(struct dentry *old, int locked, unsigned seq)
1027{
1028 struct dentry *new = old->d_parent;
1029
1030 rcu_read_lock();
1031 spin_unlock(&old->d_lock);
1032 spin_lock(&new->d_lock);
1033
1034 /*
1035 * might go back up the wrong parent if we have had a rename
1036 * or deletion
1037 */
1038 if (new != old->d_parent ||
c83ce989 1039 (old->d_flags & DCACHE_DISCONNECTED) ||
c826cb7d
LT
1040 (!locked && read_seqretry(&rename_lock, seq))) {
1041 spin_unlock(&new->d_lock);
1042 new = NULL;
1043 }
1044 rcu_read_unlock();
1045 return new;
1046}
1047
1048
1da177e4
LT
1049/*
1050 * Search for at least 1 mount point in the dentry's subdirs.
1051 * We descend to the next level whenever the d_subdirs
1052 * list is non-empty and continue searching.
1053 */
1054
1055/**
1056 * have_submounts - check for mounts over a dentry
1057 * @parent: dentry to check.
1058 *
1059 * Return true if the parent or its subdirectories contain
1060 * a mount point
1061 */
1da177e4
LT
1062int have_submounts(struct dentry *parent)
1063{
949854d0 1064 struct dentry *this_parent;
1da177e4 1065 struct list_head *next;
949854d0 1066 unsigned seq;
58db63d0 1067 int locked = 0;
949854d0 1068
949854d0 1069 seq = read_seqbegin(&rename_lock);
58db63d0
NP
1070again:
1071 this_parent = parent;
1da177e4 1072
1da177e4
LT
1073 if (d_mountpoint(parent))
1074 goto positive;
2fd6b7f5 1075 spin_lock(&this_parent->d_lock);
1da177e4
LT
1076repeat:
1077 next = this_parent->d_subdirs.next;
1078resume:
1079 while (next != &this_parent->d_subdirs) {
1080 struct list_head *tmp = next;
5160ee6f 1081 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
1da177e4 1082 next = tmp->next;
2fd6b7f5
NP
1083
1084 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1da177e4 1085 /* Have we found a mount point ? */
2fd6b7f5
NP
1086 if (d_mountpoint(dentry)) {
1087 spin_unlock(&dentry->d_lock);
1088 spin_unlock(&this_parent->d_lock);
1da177e4 1089 goto positive;
2fd6b7f5 1090 }
1da177e4 1091 if (!list_empty(&dentry->d_subdirs)) {
2fd6b7f5
NP
1092 spin_unlock(&this_parent->d_lock);
1093 spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
1da177e4 1094 this_parent = dentry;
2fd6b7f5 1095 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
1da177e4
LT
1096 goto repeat;
1097 }
2fd6b7f5 1098 spin_unlock(&dentry->d_lock);
1da177e4
LT
1099 }
1100 /*
1101 * All done at this level ... ascend and resume the search.
1102 */
1103 if (this_parent != parent) {
c826cb7d
LT
1104 struct dentry *child = this_parent;
1105 this_parent = try_to_ascend(this_parent, locked, seq);
1106 if (!this_parent)
949854d0 1107 goto rename_retry;
949854d0 1108 next = child->d_u.d_child.next;
1da177e4
LT
1109 goto resume;
1110 }
2fd6b7f5 1111 spin_unlock(&this_parent->d_lock);
58db63d0 1112 if (!locked && read_seqretry(&rename_lock, seq))
949854d0 1113 goto rename_retry;
58db63d0
NP
1114 if (locked)
1115 write_sequnlock(&rename_lock);
1da177e4
LT
1116 return 0; /* No mount points found in tree */
1117positive:
58db63d0 1118 if (!locked && read_seqretry(&rename_lock, seq))
949854d0 1119 goto rename_retry;
58db63d0
NP
1120 if (locked)
1121 write_sequnlock(&rename_lock);
1da177e4 1122 return 1;
58db63d0
NP
1123
1124rename_retry:
1125 locked = 1;
1126 write_seqlock(&rename_lock);
1127 goto again;
1da177e4 1128}
ec4f8605 1129EXPORT_SYMBOL(have_submounts);
1da177e4
LT
1130
1131/*
1132 * Search the dentry child list for the specified parent,
1133 * and move any unused dentries to the end of the unused
1134 * list for prune_dcache(). We descend to the next level
1135 * whenever the d_subdirs list is non-empty and continue
1136 * searching.
1137 *
1138 * It returns zero iff there are no unused children,
1139 * otherwise it returns the number of children moved to
1140 * the end of the unused list. This may not be the total
1141 * number of unused children, because select_parent can
1142 * drop the lock and return early due to latency
1143 * constraints.
1144 */
1145static int select_parent(struct dentry * parent)
1146{
949854d0 1147 struct dentry *this_parent;
1da177e4 1148 struct list_head *next;
949854d0 1149 unsigned seq;
1da177e4 1150 int found = 0;
58db63d0 1151 int locked = 0;
1da177e4 1152
949854d0 1153 seq = read_seqbegin(&rename_lock);
58db63d0
NP
1154again:
1155 this_parent = parent;
2fd6b7f5 1156 spin_lock(&this_parent->d_lock);
1da177e4
LT
1157repeat:
1158 next = this_parent->d_subdirs.next;
1159resume:
1160 while (next != &this_parent->d_subdirs) {
1161 struct list_head *tmp = next;
5160ee6f 1162 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
1da177e4
LT
1163 next = tmp->next;
1164
2fd6b7f5 1165 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
23044507 1166
1da177e4
LT
1167 /*
1168 * move only zero ref count dentries to the end
1169 * of the unused list for prune_dcache
1170 */
b7ab39f6 1171 if (!dentry->d_count) {
a4633357 1172 dentry_lru_move_tail(dentry);
1da177e4 1173 found++;
a4633357
CH
1174 } else {
1175 dentry_lru_del(dentry);
1da177e4
LT
1176 }
1177
1178 /*
1179 * We can return to the caller if we have found some (this
1180 * ensures forward progress). We'll be coming back to find
1181 * the rest.
1182 */
2fd6b7f5
NP
1183 if (found && need_resched()) {
1184 spin_unlock(&dentry->d_lock);
1da177e4 1185 goto out;
2fd6b7f5 1186 }
1da177e4
LT
1187
1188 /*
1189 * Descend a level if the d_subdirs list is non-empty.
1190 */
1191 if (!list_empty(&dentry->d_subdirs)) {
2fd6b7f5
NP
1192 spin_unlock(&this_parent->d_lock);
1193 spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
1da177e4 1194 this_parent = dentry;
2fd6b7f5 1195 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
1da177e4
LT
1196 goto repeat;
1197 }
2fd6b7f5
NP
1198
1199 spin_unlock(&dentry->d_lock);
1da177e4
LT
1200 }
1201 /*
1202 * All done at this level ... ascend and resume the search.
1203 */
1204 if (this_parent != parent) {
c826cb7d
LT
1205 struct dentry *child = this_parent;
1206 this_parent = try_to_ascend(this_parent, locked, seq);
1207 if (!this_parent)
949854d0 1208 goto rename_retry;
949854d0 1209 next = child->d_u.d_child.next;
1da177e4
LT
1210 goto resume;
1211 }
1212out:
2fd6b7f5 1213 spin_unlock(&this_parent->d_lock);
58db63d0 1214 if (!locked && read_seqretry(&rename_lock, seq))
949854d0 1215 goto rename_retry;
58db63d0
NP
1216 if (locked)
1217 write_sequnlock(&rename_lock);
1da177e4 1218 return found;
58db63d0
NP
1219
1220rename_retry:
1221 if (found)
1222 return found;
1223 locked = 1;
1224 write_seqlock(&rename_lock);
1225 goto again;
1da177e4
LT
1226}
1227
1228/**
1229 * shrink_dcache_parent - prune dcache
1230 * @parent: parent of entries to prune
1231 *
1232 * Prune the dcache to remove unused children of the parent dentry.
1233 */
1234
1235void shrink_dcache_parent(struct dentry * parent)
1236{
da3bbdd4 1237 struct super_block *sb = parent->d_sb;
1da177e4
LT
1238 int found;
1239
1240 while ((found = select_parent(parent)) != 0)
da3bbdd4 1241 __shrink_dcache_sb(sb, &found, 0);
1da177e4 1242}
ec4f8605 1243EXPORT_SYMBOL(shrink_dcache_parent);
1da177e4 1244
1da177e4 1245/*
1495f230 1246 * Scan `sc->nr_slab_to_reclaim' dentries and return the number which remain.
1da177e4
LT
1247 *
1248 * We need to avoid reentering the filesystem if the caller is performing a
1249 * GFP_NOFS allocation attempt. One example deadlock is:
1250 *
1251 * ext2_new_block->getblk->GFP->shrink_dcache_memory->prune_dcache->
1252 * prune_one_dentry->dput->dentry_iput->iput->inode->i_sb->s_op->put_inode->
1253 * ext2_discard_prealloc->ext2_free_blocks->lock_super->DEADLOCK.
1254 *
1255 * In this case we return -1 to tell the caller that we baled.
1256 */
1495f230
YH
1257static int shrink_dcache_memory(struct shrinker *shrink,
1258 struct shrink_control *sc)
1da177e4 1259{
1495f230
YH
1260 int nr = sc->nr_to_scan;
1261 gfp_t gfp_mask = sc->gfp_mask;
1262
1da177e4
LT
1263 if (nr) {
1264 if (!(gfp_mask & __GFP_FS))
1265 return -1;
da3bbdd4 1266 prune_dcache(nr);
1da177e4 1267 }
312d3ca8 1268
86c8749e 1269 return (dentry_stat.nr_unused / 100) * sysctl_vfs_cache_pressure;
1da177e4
LT
1270}
1271
8e1f936b
RR
1272static struct shrinker dcache_shrinker = {
1273 .shrink = shrink_dcache_memory,
1274 .seeks = DEFAULT_SEEKS,
1275};
1276
1da177e4
LT
1277/**
1278 * d_alloc - allocate a dcache entry
1279 * @parent: parent of entry to allocate
1280 * @name: qstr of the name
1281 *
1282 * Allocates a dentry. It returns %NULL if there is insufficient memory
1283 * available. On a success the dentry is returned. The name passed in is
1284 * copied and the copy passed in may be reused after this call.
1285 */
1286
1287struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
1288{
1289 struct dentry *dentry;
1290 char *dname;
1291
e12ba74d 1292 dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL);
1da177e4
LT
1293 if (!dentry)
1294 return NULL;
1295
1296 if (name->len > DNAME_INLINE_LEN-1) {
1297 dname = kmalloc(name->len + 1, GFP_KERNEL);
1298 if (!dname) {
1299 kmem_cache_free(dentry_cache, dentry);
1300 return NULL;
1301 }
1302 } else {
1303 dname = dentry->d_iname;
1304 }
1305 dentry->d_name.name = dname;
1306
1307 dentry->d_name.len = name->len;
1308 dentry->d_name.hash = name->hash;
1309 memcpy(dname, name->name, name->len);
1310 dname[name->len] = 0;
1311
b7ab39f6 1312 dentry->d_count = 1;
dea3667b 1313 dentry->d_flags = 0;
1da177e4 1314 spin_lock_init(&dentry->d_lock);
31e6b01f 1315 seqcount_init(&dentry->d_seq);
1da177e4
LT
1316 dentry->d_inode = NULL;
1317 dentry->d_parent = NULL;
1318 dentry->d_sb = NULL;
1319 dentry->d_op = NULL;
1320 dentry->d_fsdata = NULL;
ceb5bdc2 1321 INIT_HLIST_BL_NODE(&dentry->d_hash);
1da177e4
LT
1322 INIT_LIST_HEAD(&dentry->d_lru);
1323 INIT_LIST_HEAD(&dentry->d_subdirs);
1324 INIT_LIST_HEAD(&dentry->d_alias);
2fd6b7f5 1325 INIT_LIST_HEAD(&dentry->d_u.d_child);
1da177e4
LT
1326
1327 if (parent) {
2fd6b7f5 1328 spin_lock(&parent->d_lock);
89ad485f
NP
1329 /*
1330 * don't need child lock because it is not subject
1331 * to concurrency here
1332 */
dc0474be
NP
1333 __dget_dlock(parent);
1334 dentry->d_parent = parent;
1da177e4 1335 dentry->d_sb = parent->d_sb;
c8aebb0c 1336 d_set_d_op(dentry, dentry->d_sb->s_d_op);
5160ee6f 1337 list_add(&dentry->d_u.d_child, &parent->d_subdirs);
2fd6b7f5 1338 spin_unlock(&parent->d_lock);
2fd6b7f5 1339 }
1da177e4 1340
3e880fb5 1341 this_cpu_inc(nr_dentry);
312d3ca8 1342
1da177e4
LT
1343 return dentry;
1344}
ec4f8605 1345EXPORT_SYMBOL(d_alloc);
1da177e4 1346
4b936885
NP
1347struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name)
1348{
1349 struct dentry *dentry = d_alloc(NULL, name);
1350 if (dentry) {
1351 dentry->d_sb = sb;
c8aebb0c 1352 d_set_d_op(dentry, dentry->d_sb->s_d_op);
4b936885
NP
1353 dentry->d_parent = dentry;
1354 dentry->d_flags |= DCACHE_DISCONNECTED;
1355 }
1356 return dentry;
1357}
1358EXPORT_SYMBOL(d_alloc_pseudo);
1359
1da177e4
LT
1360struct dentry *d_alloc_name(struct dentry *parent, const char *name)
1361{
1362 struct qstr q;
1363
1364 q.name = name;
1365 q.len = strlen(name);
1366 q.hash = full_name_hash(q.name, q.len);
1367 return d_alloc(parent, &q);
1368}
ef26ca97 1369EXPORT_SYMBOL(d_alloc_name);
1da177e4 1370
fb045adb
NP
1371void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
1372{
6f7f7caa
LT
1373 WARN_ON_ONCE(dentry->d_op);
1374 WARN_ON_ONCE(dentry->d_flags & (DCACHE_OP_HASH |
fb045adb
NP
1375 DCACHE_OP_COMPARE |
1376 DCACHE_OP_REVALIDATE |
1377 DCACHE_OP_DELETE ));
1378 dentry->d_op = op;
1379 if (!op)
1380 return;
1381 if (op->d_hash)
1382 dentry->d_flags |= DCACHE_OP_HASH;
1383 if (op->d_compare)
1384 dentry->d_flags |= DCACHE_OP_COMPARE;
1385 if (op->d_revalidate)
1386 dentry->d_flags |= DCACHE_OP_REVALIDATE;
1387 if (op->d_delete)
1388 dentry->d_flags |= DCACHE_OP_DELETE;
1389
1390}
1391EXPORT_SYMBOL(d_set_d_op);
1392
360da900
OH
1393static void __d_instantiate(struct dentry *dentry, struct inode *inode)
1394{
b23fb0a6 1395 spin_lock(&dentry->d_lock);
9875cf80
DH
1396 if (inode) {
1397 if (unlikely(IS_AUTOMOUNT(inode)))
1398 dentry->d_flags |= DCACHE_NEED_AUTOMOUNT;
360da900 1399 list_add(&dentry->d_alias, &inode->i_dentry);
9875cf80 1400 }
360da900 1401 dentry->d_inode = inode;
31e6b01f 1402 dentry_rcuwalk_barrier(dentry);
b23fb0a6 1403 spin_unlock(&dentry->d_lock);
360da900
OH
1404 fsnotify_d_instantiate(dentry, inode);
1405}
1406
1da177e4
LT
1407/**
1408 * d_instantiate - fill in inode information for a dentry
1409 * @entry: dentry to complete
1410 * @inode: inode to attach to this dentry
1411 *
1412 * Fill in inode information in the entry.
1413 *
1414 * This turns negative dentries into productive full members
1415 * of society.
1416 *
1417 * NOTE! This assumes that the inode count has been incremented
1418 * (or otherwise set) by the caller to indicate that it is now
1419 * in use by the dcache.
1420 */
1421
1422void d_instantiate(struct dentry *entry, struct inode * inode)
1423{
28133c7b 1424 BUG_ON(!list_empty(&entry->d_alias));
873feea0
NP
1425 if (inode)
1426 spin_lock(&inode->i_lock);
360da900 1427 __d_instantiate(entry, inode);
873feea0
NP
1428 if (inode)
1429 spin_unlock(&inode->i_lock);
1da177e4
LT
1430 security_d_instantiate(entry, inode);
1431}
ec4f8605 1432EXPORT_SYMBOL(d_instantiate);
1da177e4
LT
1433
1434/**
1435 * d_instantiate_unique - instantiate a non-aliased dentry
1436 * @entry: dentry to instantiate
1437 * @inode: inode to attach to this dentry
1438 *
1439 * Fill in inode information in the entry. On success, it returns NULL.
1440 * If an unhashed alias of "entry" already exists, then we return the
e866cfa9 1441 * aliased dentry instead and drop one reference to inode.
1da177e4
LT
1442 *
1443 * Note that in order to avoid conflicts with rename() etc, the caller
1444 * had better be holding the parent directory semaphore.
e866cfa9
OD
1445 *
1446 * This also assumes that the inode count has been incremented
1447 * (or otherwise set) by the caller to indicate that it is now
1448 * in use by the dcache.
1da177e4 1449 */
770bfad8
DH
1450static struct dentry *__d_instantiate_unique(struct dentry *entry,
1451 struct inode *inode)
1da177e4
LT
1452{
1453 struct dentry *alias;
1454 int len = entry->d_name.len;
1455 const char *name = entry->d_name.name;
1456 unsigned int hash = entry->d_name.hash;
1457
770bfad8 1458 if (!inode) {
360da900 1459 __d_instantiate(entry, NULL);
770bfad8
DH
1460 return NULL;
1461 }
1462
1da177e4
LT
1463 list_for_each_entry(alias, &inode->i_dentry, d_alias) {
1464 struct qstr *qstr = &alias->d_name;
1465
9abca360
NP
1466 /*
1467 * Don't need alias->d_lock here, because aliases with
1468 * d_parent == entry->d_parent are not subject to name or
1469 * parent changes, because the parent inode i_mutex is held.
1470 */
1da177e4
LT
1471 if (qstr->hash != hash)
1472 continue;
1473 if (alias->d_parent != entry->d_parent)
1474 continue;
9d55c369 1475 if (dentry_cmp(qstr->name, qstr->len, name, len))
1da177e4 1476 continue;
dc0474be 1477 __dget(alias);
1da177e4
LT
1478 return alias;
1479 }
770bfad8 1480
360da900 1481 __d_instantiate(entry, inode);
1da177e4
LT
1482 return NULL;
1483}
770bfad8
DH
1484
1485struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode)
1486{
1487 struct dentry *result;
1488
1489 BUG_ON(!list_empty(&entry->d_alias));
1490
873feea0
NP
1491 if (inode)
1492 spin_lock(&inode->i_lock);
770bfad8 1493 result = __d_instantiate_unique(entry, inode);
873feea0
NP
1494 if (inode)
1495 spin_unlock(&inode->i_lock);
770bfad8
DH
1496
1497 if (!result) {
1498 security_d_instantiate(entry, inode);
1499 return NULL;
1500 }
1501
1502 BUG_ON(!d_unhashed(result));
1503 iput(inode);
1504 return result;
1505}
1506
1da177e4
LT
1507EXPORT_SYMBOL(d_instantiate_unique);
1508
1509/**
1510 * d_alloc_root - allocate root dentry
1511 * @root_inode: inode to allocate the root for
1512 *
1513 * Allocate a root ("/") dentry for the inode given. The inode is
1514 * instantiated and returned. %NULL is returned if there is insufficient
1515 * memory or the inode passed is %NULL.
1516 */
1517
1518struct dentry * d_alloc_root(struct inode * root_inode)
1519{
1520 struct dentry *res = NULL;
1521
1522 if (root_inode) {
1523 static const struct qstr name = { .name = "/", .len = 1 };
1524
1525 res = d_alloc(NULL, &name);
1526 if (res) {
1527 res->d_sb = root_inode->i_sb;
c8aebb0c 1528 d_set_d_op(res, res->d_sb->s_d_op);
1da177e4
LT
1529 res->d_parent = res;
1530 d_instantiate(res, root_inode);
1531 }
1532 }
1533 return res;
1534}
ec4f8605 1535EXPORT_SYMBOL(d_alloc_root);
1da177e4 1536
d891eedb
BF
1537static struct dentry * __d_find_any_alias(struct inode *inode)
1538{
1539 struct dentry *alias;
1540
1541 if (list_empty(&inode->i_dentry))
1542 return NULL;
1543 alias = list_first_entry(&inode->i_dentry, struct dentry, d_alias);
1544 __dget(alias);
1545 return alias;
1546}
1547
1548static struct dentry * d_find_any_alias(struct inode *inode)
1549{
1550 struct dentry *de;
1551
1552 spin_lock(&inode->i_lock);
1553 de = __d_find_any_alias(inode);
1554 spin_unlock(&inode->i_lock);
1555 return de;
1556}
1557
1558
4ea3ada2
CH
1559/**
1560 * d_obtain_alias - find or allocate a dentry for a given inode
1561 * @inode: inode to allocate the dentry for
1562 *
1563 * Obtain a dentry for an inode resulting from NFS filehandle conversion or
1564 * similar open by handle operations. The returned dentry may be anonymous,
1565 * or may have a full name (if the inode was already in the cache).
1566 *
1567 * When called on a directory inode, we must ensure that the inode only ever
1568 * has one dentry. If a dentry is found, that is returned instead of
1569 * allocating a new one.
1570 *
1571 * On successful return, the reference to the inode has been transferred
44003728
CH
1572 * to the dentry. In case of an error the reference on the inode is released.
1573 * To make it easier to use in export operations a %NULL or IS_ERR inode may
1574 * be passed in and will be the error will be propagate to the return value,
1575 * with a %NULL @inode replaced by ERR_PTR(-ESTALE).
4ea3ada2
CH
1576 */
1577struct dentry *d_obtain_alias(struct inode *inode)
1578{
9308a612
CH
1579 static const struct qstr anonstring = { .name = "" };
1580 struct dentry *tmp;
1581 struct dentry *res;
4ea3ada2
CH
1582
1583 if (!inode)
44003728 1584 return ERR_PTR(-ESTALE);
4ea3ada2
CH
1585 if (IS_ERR(inode))
1586 return ERR_CAST(inode);
1587
d891eedb 1588 res = d_find_any_alias(inode);
9308a612
CH
1589 if (res)
1590 goto out_iput;
1591
1592 tmp = d_alloc(NULL, &anonstring);
1593 if (!tmp) {
1594 res = ERR_PTR(-ENOMEM);
1595 goto out_iput;
4ea3ada2 1596 }
9308a612
CH
1597 tmp->d_parent = tmp; /* make sure dput doesn't croak */
1598
b5c84bf6 1599
873feea0 1600 spin_lock(&inode->i_lock);
d891eedb 1601 res = __d_find_any_alias(inode);
9308a612 1602 if (res) {
873feea0 1603 spin_unlock(&inode->i_lock);
9308a612
CH
1604 dput(tmp);
1605 goto out_iput;
1606 }
1607
1608 /* attach a disconnected dentry */
1609 spin_lock(&tmp->d_lock);
1610 tmp->d_sb = inode->i_sb;
c8aebb0c 1611 d_set_d_op(tmp, tmp->d_sb->s_d_op);
9308a612
CH
1612 tmp->d_inode = inode;
1613 tmp->d_flags |= DCACHE_DISCONNECTED;
9308a612 1614 list_add(&tmp->d_alias, &inode->i_dentry);
1879fd6a 1615 hlist_bl_lock(&tmp->d_sb->s_anon);
ceb5bdc2 1616 hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon);
1879fd6a 1617 hlist_bl_unlock(&tmp->d_sb->s_anon);
9308a612 1618 spin_unlock(&tmp->d_lock);
873feea0 1619 spin_unlock(&inode->i_lock);
24ff6663 1620 security_d_instantiate(tmp, inode);
9308a612 1621
9308a612
CH
1622 return tmp;
1623
1624 out_iput:
24ff6663
JB
1625 if (res && !IS_ERR(res))
1626 security_d_instantiate(res, inode);
9308a612
CH
1627 iput(inode);
1628 return res;
4ea3ada2 1629}
adc48720 1630EXPORT_SYMBOL(d_obtain_alias);
1da177e4
LT
1631
1632/**
1633 * d_splice_alias - splice a disconnected dentry into the tree if one exists
1634 * @inode: the inode which may have a disconnected dentry
1635 * @dentry: a negative dentry which we want to point to the inode.
1636 *
1637 * If inode is a directory and has a 'disconnected' dentry (i.e. IS_ROOT and
1638 * DCACHE_DISCONNECTED), then d_move that in place of the given dentry
1639 * and return it, else simply d_add the inode to the dentry and return NULL.
1640 *
1641 * This is needed in the lookup routine of any filesystem that is exportable
1642 * (via knfsd) so that we can build dcache paths to directories effectively.
1643 *
1644 * If a dentry was found and moved, then it is returned. Otherwise NULL
1645 * is returned. This matches the expected return value of ->lookup.
1646 *
1647 */
1648struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
1649{
1650 struct dentry *new = NULL;
1651
21c0d8fd 1652 if (inode && S_ISDIR(inode->i_mode)) {
873feea0 1653 spin_lock(&inode->i_lock);
1da177e4
LT
1654 new = __d_find_alias(inode, 1);
1655 if (new) {
1656 BUG_ON(!(new->d_flags & DCACHE_DISCONNECTED));
873feea0 1657 spin_unlock(&inode->i_lock);
1da177e4 1658 security_d_instantiate(new, inode);
1da177e4
LT
1659 d_move(new, dentry);
1660 iput(inode);
1661 } else {
873feea0 1662 /* already taking inode->i_lock, so d_add() by hand */
360da900 1663 __d_instantiate(dentry, inode);
873feea0 1664 spin_unlock(&inode->i_lock);
1da177e4
LT
1665 security_d_instantiate(dentry, inode);
1666 d_rehash(dentry);
1667 }
1668 } else
1669 d_add(dentry, inode);
1670 return new;
1671}
ec4f8605 1672EXPORT_SYMBOL(d_splice_alias);
1da177e4 1673
9403540c
BN
1674/**
1675 * d_add_ci - lookup or allocate new dentry with case-exact name
1676 * @inode: the inode case-insensitive lookup has found
1677 * @dentry: the negative dentry that was passed to the parent's lookup func
1678 * @name: the case-exact name to be associated with the returned dentry
1679 *
1680 * This is to avoid filling the dcache with case-insensitive names to the
1681 * same inode, only the actual correct case is stored in the dcache for
1682 * case-insensitive filesystems.
1683 *
1684 * For a case-insensitive lookup match and if the the case-exact dentry
1685 * already exists in in the dcache, use it and return it.
1686 *
1687 * If no entry exists with the exact case name, allocate new dentry with
1688 * the exact case, and return the spliced entry.
1689 */
e45b590b 1690struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
9403540c
BN
1691 struct qstr *name)
1692{
1693 int error;
1694 struct dentry *found;
1695 struct dentry *new;
1696
b6520c81
CH
1697 /*
1698 * First check if a dentry matching the name already exists,
1699 * if not go ahead and create it now.
1700 */
9403540c 1701 found = d_hash_and_lookup(dentry->d_parent, name);
9403540c
BN
1702 if (!found) {
1703 new = d_alloc(dentry->d_parent, name);
1704 if (!new) {
1705 error = -ENOMEM;
1706 goto err_out;
1707 }
b6520c81 1708
9403540c
BN
1709 found = d_splice_alias(inode, new);
1710 if (found) {
1711 dput(new);
1712 return found;
1713 }
1714 return new;
1715 }
b6520c81
CH
1716
1717 /*
1718 * If a matching dentry exists, and it's not negative use it.
1719 *
1720 * Decrement the reference count to balance the iget() done
1721 * earlier on.
1722 */
9403540c
BN
1723 if (found->d_inode) {
1724 if (unlikely(found->d_inode != inode)) {
1725 /* This can't happen because bad inodes are unhashed. */
1726 BUG_ON(!is_bad_inode(inode));
1727 BUG_ON(!is_bad_inode(found->d_inode));
1728 }
9403540c
BN
1729 iput(inode);
1730 return found;
1731 }
b6520c81 1732
44396f4b
JB
1733 /*
1734 * We are going to instantiate this dentry, unhash it and clear the
1735 * lookup flag so we can do that.
1736 */
1737 if (unlikely(d_need_lookup(found)))
1738 d_clear_need_lookup(found);
1739
9403540c
BN
1740 /*
1741 * Negative dentry: instantiate it unless the inode is a directory and
b6520c81 1742 * already has a dentry.
9403540c 1743 */
873feea0 1744 spin_lock(&inode->i_lock);
b6520c81 1745 if (!S_ISDIR(inode->i_mode) || list_empty(&inode->i_dentry)) {
360da900 1746 __d_instantiate(found, inode);
873feea0 1747 spin_unlock(&inode->i_lock);
9403540c
BN
1748 security_d_instantiate(found, inode);
1749 return found;
1750 }
b6520c81 1751
9403540c 1752 /*
b6520c81
CH
1753 * In case a directory already has a (disconnected) entry grab a
1754 * reference to it, move it in place and use it.
9403540c
BN
1755 */
1756 new = list_entry(inode->i_dentry.next, struct dentry, d_alias);
dc0474be 1757 __dget(new);
873feea0 1758 spin_unlock(&inode->i_lock);
9403540c 1759 security_d_instantiate(found, inode);
9403540c 1760 d_move(new, found);
9403540c 1761 iput(inode);
9403540c 1762 dput(found);
9403540c
BN
1763 return new;
1764
1765err_out:
1766 iput(inode);
1767 return ERR_PTR(error);
1768}
ec4f8605 1769EXPORT_SYMBOL(d_add_ci);
1da177e4 1770
31e6b01f
NP
1771/**
1772 * __d_lookup_rcu - search for a dentry (racy, store-free)
1773 * @parent: parent dentry
1774 * @name: qstr of name we wish to find
1775 * @seq: returns d_seq value at the point where the dentry was found
1776 * @inode: returns dentry->d_inode when the inode was found valid.
1777 * Returns: dentry, or NULL
1778 *
1779 * __d_lookup_rcu is the dcache lookup function for rcu-walk name
1780 * resolution (store-free path walking) design described in
1781 * Documentation/filesystems/path-lookup.txt.
1782 *
1783 * This is not to be used outside core vfs.
1784 *
1785 * __d_lookup_rcu must only be used in rcu-walk mode, ie. with vfsmount lock
1786 * held, and rcu_read_lock held. The returned dentry must not be stored into
1787 * without taking d_lock and checking d_seq sequence count against @seq
1788 * returned here.
1789 *
1790 * A refcount may be taken on the found dentry with the __d_rcu_to_refcount
1791 * function.
1792 *
1793 * Alternatively, __d_lookup_rcu may be called again to look up the child of
1794 * the returned dentry, so long as its parent's seqlock is checked after the
1795 * child is looked up. Thus, an interlocking stepping of sequence lock checks
1796 * is formed, giving integrity down the path walk.
1797 */
1798struct dentry *__d_lookup_rcu(struct dentry *parent, struct qstr *name,
1799 unsigned *seq, struct inode **inode)
1800{
1801 unsigned int len = name->len;
1802 unsigned int hash = name->hash;
1803 const unsigned char *str = name->name;
b07ad996 1804 struct hlist_bl_head *b = d_hash(parent, hash);
ceb5bdc2 1805 struct hlist_bl_node *node;
31e6b01f
NP
1806 struct dentry *dentry;
1807
1808 /*
1809 * Note: There is significant duplication with __d_lookup_rcu which is
1810 * required to prevent single threaded performance regressions
1811 * especially on architectures where smp_rmb (in seqcounts) are costly.
1812 * Keep the two functions in sync.
1813 */
1814
1815 /*
1816 * The hash list is protected using RCU.
1817 *
1818 * Carefully use d_seq when comparing a candidate dentry, to avoid
1819 * races with d_move().
1820 *
1821 * It is possible that concurrent renames can mess up our list
1822 * walk here and result in missing our dentry, resulting in the
1823 * false-negative result. d_lookup() protects against concurrent
1824 * renames using rename_lock seqlock.
1825 *
b0a4bb83 1826 * See Documentation/filesystems/path-lookup.txt for more details.
31e6b01f 1827 */
b07ad996 1828 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
31e6b01f
NP
1829 struct inode *i;
1830 const char *tname;
1831 int tlen;
1832
1833 if (dentry->d_name.hash != hash)
1834 continue;
1835
1836seqretry:
1837 *seq = read_seqcount_begin(&dentry->d_seq);
1838 if (dentry->d_parent != parent)
1839 continue;
1840 if (d_unhashed(dentry))
1841 continue;
1842 tlen = dentry->d_name.len;
1843 tname = dentry->d_name.name;
1844 i = dentry->d_inode;
e1bb5782
NP
1845 prefetch(tname);
1846 if (i)
1847 prefetch(i);
31e6b01f
NP
1848 /*
1849 * This seqcount check is required to ensure name and
1850 * len are loaded atomically, so as not to walk off the
1851 * edge of memory when walking. If we could load this
1852 * atomically some other way, we could drop this check.
1853 */
1854 if (read_seqcount_retry(&dentry->d_seq, *seq))
1855 goto seqretry;
fb045adb 1856 if (parent->d_flags & DCACHE_OP_COMPARE) {
31e6b01f
NP
1857 if (parent->d_op->d_compare(parent, *inode,
1858 dentry, i,
1859 tlen, tname, name))
1860 continue;
1861 } else {
9d55c369 1862 if (dentry_cmp(tname, tlen, str, len))
31e6b01f
NP
1863 continue;
1864 }
1865 /*
1866 * No extra seqcount check is required after the name
1867 * compare. The caller must perform a seqcount check in
1868 * order to do anything useful with the returned dentry
1869 * anyway.
1870 */
1871 *inode = i;
1872 return dentry;
1873 }
1874 return NULL;
1875}
1876
1da177e4
LT
1877/**
1878 * d_lookup - search for a dentry
1879 * @parent: parent dentry
1880 * @name: qstr of name we wish to find
b04f784e 1881 * Returns: dentry, or NULL
1da177e4 1882 *
b04f784e
NP
1883 * d_lookup searches the children of the parent dentry for the name in
1884 * question. If the dentry is found its reference count is incremented and the
1885 * dentry is returned. The caller must use dput to free the entry when it has
1886 * finished using it. %NULL is returned if the dentry does not exist.
1da177e4 1887 */
31e6b01f 1888struct dentry *d_lookup(struct dentry *parent, struct qstr *name)
1da177e4 1889{
31e6b01f 1890 struct dentry *dentry;
949854d0 1891 unsigned seq;
1da177e4
LT
1892
1893 do {
1894 seq = read_seqbegin(&rename_lock);
1895 dentry = __d_lookup(parent, name);
1896 if (dentry)
1897 break;
1898 } while (read_seqretry(&rename_lock, seq));
1899 return dentry;
1900}
ec4f8605 1901EXPORT_SYMBOL(d_lookup);
1da177e4 1902
31e6b01f 1903/**
b04f784e
NP
1904 * __d_lookup - search for a dentry (racy)
1905 * @parent: parent dentry
1906 * @name: qstr of name we wish to find
1907 * Returns: dentry, or NULL
1908 *
1909 * __d_lookup is like d_lookup, however it may (rarely) return a
1910 * false-negative result due to unrelated rename activity.
1911 *
1912 * __d_lookup is slightly faster by avoiding rename_lock read seqlock,
1913 * however it must be used carefully, eg. with a following d_lookup in
1914 * the case of failure.
1915 *
1916 * __d_lookup callers must be commented.
1917 */
31e6b01f 1918struct dentry *__d_lookup(struct dentry *parent, struct qstr *name)
1da177e4
LT
1919{
1920 unsigned int len = name->len;
1921 unsigned int hash = name->hash;
1922 const unsigned char *str = name->name;
b07ad996 1923 struct hlist_bl_head *b = d_hash(parent, hash);
ceb5bdc2 1924 struct hlist_bl_node *node;
31e6b01f 1925 struct dentry *found = NULL;
665a7583 1926 struct dentry *dentry;
1da177e4 1927
31e6b01f
NP
1928 /*
1929 * Note: There is significant duplication with __d_lookup_rcu which is
1930 * required to prevent single threaded performance regressions
1931 * especially on architectures where smp_rmb (in seqcounts) are costly.
1932 * Keep the two functions in sync.
1933 */
1934
b04f784e
NP
1935 /*
1936 * The hash list is protected using RCU.
1937 *
1938 * Take d_lock when comparing a candidate dentry, to avoid races
1939 * with d_move().
1940 *
1941 * It is possible that concurrent renames can mess up our list
1942 * walk here and result in missing our dentry, resulting in the
1943 * false-negative result. d_lookup() protects against concurrent
1944 * renames using rename_lock seqlock.
1945 *
b0a4bb83 1946 * See Documentation/filesystems/path-lookup.txt for more details.
b04f784e 1947 */
1da177e4
LT
1948 rcu_read_lock();
1949
b07ad996 1950 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
31e6b01f
NP
1951 const char *tname;
1952 int tlen;
1da177e4 1953
1da177e4
LT
1954 if (dentry->d_name.hash != hash)
1955 continue;
1da177e4
LT
1956
1957 spin_lock(&dentry->d_lock);
1da177e4
LT
1958 if (dentry->d_parent != parent)
1959 goto next;
d0185c08
LT
1960 if (d_unhashed(dentry))
1961 goto next;
1962
1da177e4
LT
1963 /*
1964 * It is safe to compare names since d_move() cannot
1965 * change the qstr (protected by d_lock).
1966 */
31e6b01f
NP
1967 tlen = dentry->d_name.len;
1968 tname = dentry->d_name.name;
fb045adb 1969 if (parent->d_flags & DCACHE_OP_COMPARE) {
621e155a
NP
1970 if (parent->d_op->d_compare(parent, parent->d_inode,
1971 dentry, dentry->d_inode,
31e6b01f 1972 tlen, tname, name))
1da177e4
LT
1973 goto next;
1974 } else {
9d55c369 1975 if (dentry_cmp(tname, tlen, str, len))
1da177e4
LT
1976 goto next;
1977 }
1978
b7ab39f6 1979 dentry->d_count++;
d0185c08 1980 found = dentry;
1da177e4
LT
1981 spin_unlock(&dentry->d_lock);
1982 break;
1983next:
1984 spin_unlock(&dentry->d_lock);
1985 }
1986 rcu_read_unlock();
1987
1988 return found;
1989}
1990
3e7e241f
EB
1991/**
1992 * d_hash_and_lookup - hash the qstr then search for a dentry
1993 * @dir: Directory to search in
1994 * @name: qstr of name we wish to find
1995 *
1996 * On hash failure or on lookup failure NULL is returned.
1997 */
1998struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name)
1999{
2000 struct dentry *dentry = NULL;
2001
2002 /*
2003 * Check for a fs-specific hash function. Note that we must
2004 * calculate the standard hash first, as the d_op->d_hash()
2005 * routine may choose to leave the hash value unchanged.
2006 */
2007 name->hash = full_name_hash(name->name, name->len);
fb045adb 2008 if (dir->d_flags & DCACHE_OP_HASH) {
b1e6a015 2009 if (dir->d_op->d_hash(dir, dir->d_inode, name) < 0)
3e7e241f
EB
2010 goto out;
2011 }
2012 dentry = d_lookup(dir, name);
2013out:
2014 return dentry;
2015}
2016
1da177e4 2017/**
786a5e15 2018 * d_validate - verify dentry provided from insecure source (deprecated)
1da177e4 2019 * @dentry: The dentry alleged to be valid child of @dparent
ff5fdb61 2020 * @dparent: The parent dentry (known to be valid)
1da177e4
LT
2021 *
2022 * An insecure source has sent us a dentry, here we verify it and dget() it.
2023 * This is used by ncpfs in its readdir implementation.
2024 * Zero is returned in the dentry is invalid.
786a5e15
NP
2025 *
2026 * This function is slow for big directories, and deprecated, do not use it.
1da177e4 2027 */
d3a23e16 2028int d_validate(struct dentry *dentry, struct dentry *dparent)
1da177e4 2029{
786a5e15 2030 struct dentry *child;
d3a23e16 2031
2fd6b7f5 2032 spin_lock(&dparent->d_lock);
786a5e15
NP
2033 list_for_each_entry(child, &dparent->d_subdirs, d_u.d_child) {
2034 if (dentry == child) {
2fd6b7f5 2035 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
dc0474be 2036 __dget_dlock(dentry);
2fd6b7f5
NP
2037 spin_unlock(&dentry->d_lock);
2038 spin_unlock(&dparent->d_lock);
1da177e4
LT
2039 return 1;
2040 }
2041 }
2fd6b7f5 2042 spin_unlock(&dparent->d_lock);
786a5e15 2043
1da177e4
LT
2044 return 0;
2045}
ec4f8605 2046EXPORT_SYMBOL(d_validate);
1da177e4
LT
2047
2048/*
2049 * When a file is deleted, we have two options:
2050 * - turn this dentry into a negative dentry
2051 * - unhash this dentry and free it.
2052 *
2053 * Usually, we want to just turn this into
2054 * a negative dentry, but if anybody else is
2055 * currently using the dentry or the inode
2056 * we can't do that and we fall back on removing
2057 * it from the hash queues and waiting for
2058 * it to be deleted later when it has no users
2059 */
2060
2061/**
2062 * d_delete - delete a dentry
2063 * @dentry: The dentry to delete
2064 *
2065 * Turn the dentry into a negative dentry if possible, otherwise
2066 * remove it from the hash queues so it can be deleted later
2067 */
2068
2069void d_delete(struct dentry * dentry)
2070{
873feea0 2071 struct inode *inode;
7a91bf7f 2072 int isdir = 0;
1da177e4
LT
2073 /*
2074 * Are we the only user?
2075 */
357f8e65 2076again:
1da177e4 2077 spin_lock(&dentry->d_lock);
873feea0
NP
2078 inode = dentry->d_inode;
2079 isdir = S_ISDIR(inode->i_mode);
b7ab39f6 2080 if (dentry->d_count == 1) {
873feea0 2081 if (inode && !spin_trylock(&inode->i_lock)) {
357f8e65
NP
2082 spin_unlock(&dentry->d_lock);
2083 cpu_relax();
2084 goto again;
2085 }
13e3c5e5 2086 dentry->d_flags &= ~DCACHE_CANT_MOUNT;
31e6b01f 2087 dentry_unlink_inode(dentry);
7a91bf7f 2088 fsnotify_nameremove(dentry, isdir);
1da177e4
LT
2089 return;
2090 }
2091
2092 if (!d_unhashed(dentry))
2093 __d_drop(dentry);
2094
2095 spin_unlock(&dentry->d_lock);
7a91bf7f
JM
2096
2097 fsnotify_nameremove(dentry, isdir);
1da177e4 2098}
ec4f8605 2099EXPORT_SYMBOL(d_delete);
1da177e4 2100
b07ad996 2101static void __d_rehash(struct dentry * entry, struct hlist_bl_head *b)
1da177e4 2102{
ceb5bdc2 2103 BUG_ON(!d_unhashed(entry));
1879fd6a 2104 hlist_bl_lock(b);
dea3667b 2105 entry->d_flags |= DCACHE_RCUACCESS;
b07ad996 2106 hlist_bl_add_head_rcu(&entry->d_hash, b);
1879fd6a 2107 hlist_bl_unlock(b);
1da177e4
LT
2108}
2109
770bfad8
DH
2110static void _d_rehash(struct dentry * entry)
2111{
2112 __d_rehash(entry, d_hash(entry->d_parent, entry->d_name.hash));
2113}
2114
1da177e4
LT
2115/**
2116 * d_rehash - add an entry back to the hash
2117 * @entry: dentry to add to the hash
2118 *
2119 * Adds a dentry to the hash according to its name.
2120 */
2121
2122void d_rehash(struct dentry * entry)
2123{
1da177e4 2124 spin_lock(&entry->d_lock);
770bfad8 2125 _d_rehash(entry);
1da177e4 2126 spin_unlock(&entry->d_lock);
1da177e4 2127}
ec4f8605 2128EXPORT_SYMBOL(d_rehash);
1da177e4 2129
fb2d5b86
NP
2130/**
2131 * dentry_update_name_case - update case insensitive dentry with a new name
2132 * @dentry: dentry to be updated
2133 * @name: new name
2134 *
2135 * Update a case insensitive dentry with new case of name.
2136 *
2137 * dentry must have been returned by d_lookup with name @name. Old and new
2138 * name lengths must match (ie. no d_compare which allows mismatched name
2139 * lengths).
2140 *
2141 * Parent inode i_mutex must be held over d_lookup and into this call (to
2142 * keep renames and concurrent inserts, and readdir(2) away).
2143 */
2144void dentry_update_name_case(struct dentry *dentry, struct qstr *name)
2145{
7ebfa57f 2146 BUG_ON(!mutex_is_locked(&dentry->d_parent->d_inode->i_mutex));
fb2d5b86
NP
2147 BUG_ON(dentry->d_name.len != name->len); /* d_lookup gives this */
2148
fb2d5b86 2149 spin_lock(&dentry->d_lock);
31e6b01f 2150 write_seqcount_begin(&dentry->d_seq);
fb2d5b86 2151 memcpy((unsigned char *)dentry->d_name.name, name->name, name->len);
31e6b01f 2152 write_seqcount_end(&dentry->d_seq);
fb2d5b86 2153 spin_unlock(&dentry->d_lock);
fb2d5b86
NP
2154}
2155EXPORT_SYMBOL(dentry_update_name_case);
2156
1da177e4
LT
2157static void switch_names(struct dentry *dentry, struct dentry *target)
2158{
2159 if (dname_external(target)) {
2160 if (dname_external(dentry)) {
2161 /*
2162 * Both external: swap the pointers
2163 */
9a8d5bb4 2164 swap(target->d_name.name, dentry->d_name.name);
1da177e4
LT
2165 } else {
2166 /*
2167 * dentry:internal, target:external. Steal target's
2168 * storage and make target internal.
2169 */
321bcf92
BF
2170 memcpy(target->d_iname, dentry->d_name.name,
2171 dentry->d_name.len + 1);
1da177e4
LT
2172 dentry->d_name.name = target->d_name.name;
2173 target->d_name.name = target->d_iname;
2174 }
2175 } else {
2176 if (dname_external(dentry)) {
2177 /*
2178 * dentry:external, target:internal. Give dentry's
2179 * storage to target and make dentry internal
2180 */
2181 memcpy(dentry->d_iname, target->d_name.name,
2182 target->d_name.len + 1);
2183 target->d_name.name = dentry->d_name.name;
2184 dentry->d_name.name = dentry->d_iname;
2185 } else {
2186 /*
2187 * Both are internal. Just copy target to dentry
2188 */
2189 memcpy(dentry->d_iname, target->d_name.name,
2190 target->d_name.len + 1);
dc711ca3
AV
2191 dentry->d_name.len = target->d_name.len;
2192 return;
1da177e4
LT
2193 }
2194 }
9a8d5bb4 2195 swap(dentry->d_name.len, target->d_name.len);
1da177e4
LT
2196}
2197
2fd6b7f5
NP
2198static void dentry_lock_for_move(struct dentry *dentry, struct dentry *target)
2199{
2200 /*
2201 * XXXX: do we really need to take target->d_lock?
2202 */
2203 if (IS_ROOT(dentry) || dentry->d_parent == target->d_parent)
2204 spin_lock(&target->d_parent->d_lock);
2205 else {
2206 if (d_ancestor(dentry->d_parent, target->d_parent)) {
2207 spin_lock(&dentry->d_parent->d_lock);
2208 spin_lock_nested(&target->d_parent->d_lock,
2209 DENTRY_D_LOCK_NESTED);
2210 } else {
2211 spin_lock(&target->d_parent->d_lock);
2212 spin_lock_nested(&dentry->d_parent->d_lock,
2213 DENTRY_D_LOCK_NESTED);
2214 }
2215 }
2216 if (target < dentry) {
2217 spin_lock_nested(&target->d_lock, 2);
2218 spin_lock_nested(&dentry->d_lock, 3);
2219 } else {
2220 spin_lock_nested(&dentry->d_lock, 2);
2221 spin_lock_nested(&target->d_lock, 3);
2222 }
2223}
2224
2225static void dentry_unlock_parents_for_move(struct dentry *dentry,
2226 struct dentry *target)
2227{
2228 if (target->d_parent != dentry->d_parent)
2229 spin_unlock(&dentry->d_parent->d_lock);
2230 if (target->d_parent != target)
2231 spin_unlock(&target->d_parent->d_lock);
2232}
2233
1da177e4 2234/*
2fd6b7f5
NP
2235 * When switching names, the actual string doesn't strictly have to
2236 * be preserved in the target - because we're dropping the target
2237 * anyway. As such, we can just do a simple memcpy() to copy over
2238 * the new name before we switch.
2239 *
2240 * Note that we have to be a lot more careful about getting the hash
2241 * switched - we have to switch the hash value properly even if it
2242 * then no longer matches the actual (corrupted) string of the target.
2243 * The hash value has to match the hash queue that the dentry is on..
1da177e4 2244 */
9eaef27b 2245/*
18367501 2246 * __d_move - move a dentry
1da177e4
LT
2247 * @dentry: entry to move
2248 * @target: new dentry
2249 *
2250 * Update the dcache to reflect the move of a file name. Negative
18367501
AV
2251 * dcache entries should not be moved in this way. Caller hold
2252 * rename_lock.
1da177e4 2253 */
18367501 2254static void __d_move(struct dentry * dentry, struct dentry * target)
1da177e4 2255{
1da177e4
LT
2256 if (!dentry->d_inode)
2257 printk(KERN_WARNING "VFS: moving negative dcache entry\n");
2258
2fd6b7f5
NP
2259 BUG_ON(d_ancestor(dentry, target));
2260 BUG_ON(d_ancestor(target, dentry));
2261
2fd6b7f5 2262 dentry_lock_for_move(dentry, target);
1da177e4 2263
31e6b01f
NP
2264 write_seqcount_begin(&dentry->d_seq);
2265 write_seqcount_begin(&target->d_seq);
2266
ceb5bdc2
NP
2267 /* __d_drop does write_seqcount_barrier, but they're OK to nest. */
2268
2269 /*
2270 * Move the dentry to the target hash queue. Don't bother checking
2271 * for the same hash queue because of how unlikely it is.
2272 */
2273 __d_drop(dentry);
789680d1 2274 __d_rehash(dentry, d_hash(target->d_parent, target->d_name.hash));
1da177e4
LT
2275
2276 /* Unhash the target: dput() will then get rid of it */
2277 __d_drop(target);
2278
5160ee6f
ED
2279 list_del(&dentry->d_u.d_child);
2280 list_del(&target->d_u.d_child);
1da177e4
LT
2281
2282 /* Switch the names.. */
2283 switch_names(dentry, target);
9a8d5bb4 2284 swap(dentry->d_name.hash, target->d_name.hash);
1da177e4
LT
2285
2286 /* ... and switch the parents */
2287 if (IS_ROOT(dentry)) {
2288 dentry->d_parent = target->d_parent;
2289 target->d_parent = target;
5160ee6f 2290 INIT_LIST_HEAD(&target->d_u.d_child);
1da177e4 2291 } else {
9a8d5bb4 2292 swap(dentry->d_parent, target->d_parent);
1da177e4
LT
2293
2294 /* And add them back to the (new) parent lists */
5160ee6f 2295 list_add(&target->d_u.d_child, &target->d_parent->d_subdirs);
1da177e4
LT
2296 }
2297
5160ee6f 2298 list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs);
2fd6b7f5 2299
31e6b01f
NP
2300 write_seqcount_end(&target->d_seq);
2301 write_seqcount_end(&dentry->d_seq);
2302
2fd6b7f5 2303 dentry_unlock_parents_for_move(dentry, target);
1da177e4 2304 spin_unlock(&target->d_lock);
c32ccd87 2305 fsnotify_d_move(dentry);
1da177e4 2306 spin_unlock(&dentry->d_lock);
18367501
AV
2307}
2308
2309/*
2310 * d_move - move a dentry
2311 * @dentry: entry to move
2312 * @target: new dentry
2313 *
2314 * Update the dcache to reflect the move of a file name. Negative
2315 * dcache entries should not be moved in this way.
2316 */
2317void d_move(struct dentry *dentry, struct dentry *target)
2318{
2319 write_seqlock(&rename_lock);
2320 __d_move(dentry, target);
1da177e4 2321 write_sequnlock(&rename_lock);
9eaef27b 2322}
ec4f8605 2323EXPORT_SYMBOL(d_move);
1da177e4 2324
e2761a11
OH
2325/**
2326 * d_ancestor - search for an ancestor
2327 * @p1: ancestor dentry
2328 * @p2: child dentry
2329 *
2330 * Returns the ancestor dentry of p2 which is a child of p1, if p1 is
2331 * an ancestor of p2, else NULL.
9eaef27b 2332 */
e2761a11 2333struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2)
9eaef27b
TM
2334{
2335 struct dentry *p;
2336
871c0067 2337 for (p = p2; !IS_ROOT(p); p = p->d_parent) {
9eaef27b 2338 if (p->d_parent == p1)
e2761a11 2339 return p;
9eaef27b 2340 }
e2761a11 2341 return NULL;
9eaef27b
TM
2342}
2343
2344/*
2345 * This helper attempts to cope with remotely renamed directories
2346 *
2347 * It assumes that the caller is already holding
18367501 2348 * dentry->d_parent->d_inode->i_mutex, inode->i_lock and rename_lock
9eaef27b
TM
2349 *
2350 * Note: If ever the locking in lock_rename() changes, then please
2351 * remember to update this too...
9eaef27b 2352 */
873feea0
NP
2353static struct dentry *__d_unalias(struct inode *inode,
2354 struct dentry *dentry, struct dentry *alias)
9eaef27b
TM
2355{
2356 struct mutex *m1 = NULL, *m2 = NULL;
2357 struct dentry *ret;
2358
2359 /* If alias and dentry share a parent, then no extra locks required */
2360 if (alias->d_parent == dentry->d_parent)
2361 goto out_unalias;
2362
9eaef27b
TM
2363 /* See lock_rename() */
2364 ret = ERR_PTR(-EBUSY);
2365 if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex))
2366 goto out_err;
2367 m1 = &dentry->d_sb->s_vfs_rename_mutex;
2368 if (!mutex_trylock(&alias->d_parent->d_inode->i_mutex))
2369 goto out_err;
2370 m2 = &alias->d_parent->d_inode->i_mutex;
2371out_unalias:
18367501 2372 __d_move(alias, dentry);
9eaef27b
TM
2373 ret = alias;
2374out_err:
873feea0 2375 spin_unlock(&inode->i_lock);
9eaef27b
TM
2376 if (m2)
2377 mutex_unlock(m2);
2378 if (m1)
2379 mutex_unlock(m1);
2380 return ret;
2381}
2382
770bfad8
DH
2383/*
2384 * Prepare an anonymous dentry for life in the superblock's dentry tree as a
2385 * named dentry in place of the dentry to be replaced.
2fd6b7f5 2386 * returns with anon->d_lock held!
770bfad8
DH
2387 */
2388static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon)
2389{
2390 struct dentry *dparent, *aparent;
2391
2fd6b7f5 2392 dentry_lock_for_move(anon, dentry);
770bfad8 2393
31e6b01f
NP
2394 write_seqcount_begin(&dentry->d_seq);
2395 write_seqcount_begin(&anon->d_seq);
2396
770bfad8
DH
2397 dparent = dentry->d_parent;
2398 aparent = anon->d_parent;
2399
2fd6b7f5
NP
2400 switch_names(dentry, anon);
2401 swap(dentry->d_name.hash, anon->d_name.hash);
2402
770bfad8
DH
2403 dentry->d_parent = (aparent == anon) ? dentry : aparent;
2404 list_del(&dentry->d_u.d_child);
2405 if (!IS_ROOT(dentry))
2406 list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs);
2407 else
2408 INIT_LIST_HEAD(&dentry->d_u.d_child);
2409
2410 anon->d_parent = (dparent == dentry) ? anon : dparent;
2411 list_del(&anon->d_u.d_child);
2412 if (!IS_ROOT(anon))
2413 list_add(&anon->d_u.d_child, &anon->d_parent->d_subdirs);
2414 else
2415 INIT_LIST_HEAD(&anon->d_u.d_child);
2416
31e6b01f
NP
2417 write_seqcount_end(&dentry->d_seq);
2418 write_seqcount_end(&anon->d_seq);
2419
2fd6b7f5
NP
2420 dentry_unlock_parents_for_move(anon, dentry);
2421 spin_unlock(&dentry->d_lock);
2422
2423 /* anon->d_lock still locked, returns locked */
770bfad8
DH
2424 anon->d_flags &= ~DCACHE_DISCONNECTED;
2425}
2426
2427/**
2428 * d_materialise_unique - introduce an inode into the tree
2429 * @dentry: candidate dentry
2430 * @inode: inode to bind to the dentry, to which aliases may be attached
2431 *
2432 * Introduces an dentry into the tree, substituting an extant disconnected
2433 * root directory alias in its place if there is one
2434 */
2435struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode)
2436{
9eaef27b 2437 struct dentry *actual;
770bfad8
DH
2438
2439 BUG_ON(!d_unhashed(dentry));
2440
770bfad8
DH
2441 if (!inode) {
2442 actual = dentry;
360da900 2443 __d_instantiate(dentry, NULL);
357f8e65
NP
2444 d_rehash(actual);
2445 goto out_nolock;
770bfad8
DH
2446 }
2447
873feea0 2448 spin_lock(&inode->i_lock);
357f8e65 2449
9eaef27b
TM
2450 if (S_ISDIR(inode->i_mode)) {
2451 struct dentry *alias;
2452
2453 /* Does an aliased dentry already exist? */
2454 alias = __d_find_alias(inode, 0);
2455 if (alias) {
2456 actual = alias;
18367501
AV
2457 write_seqlock(&rename_lock);
2458
2459 if (d_ancestor(alias, dentry)) {
2460 /* Check for loops */
2461 actual = ERR_PTR(-ELOOP);
2462 } else if (IS_ROOT(alias)) {
2463 /* Is this an anonymous mountpoint that we
2464 * could splice into our tree? */
9eaef27b 2465 __d_materialise_dentry(dentry, alias);
18367501 2466 write_sequnlock(&rename_lock);
9eaef27b
TM
2467 __d_drop(alias);
2468 goto found;
18367501
AV
2469 } else {
2470 /* Nope, but we must(!) avoid directory
2471 * aliasing */
2472 actual = __d_unalias(inode, dentry, alias);
9eaef27b 2473 }
18367501 2474 write_sequnlock(&rename_lock);
9eaef27b
TM
2475 if (IS_ERR(actual))
2476 dput(alias);
2477 goto out_nolock;
2478 }
770bfad8
DH
2479 }
2480
2481 /* Add a unique reference */
2482 actual = __d_instantiate_unique(dentry, inode);
2483 if (!actual)
2484 actual = dentry;
357f8e65
NP
2485 else
2486 BUG_ON(!d_unhashed(actual));
770bfad8 2487
770bfad8
DH
2488 spin_lock(&actual->d_lock);
2489found:
2490 _d_rehash(actual);
2491 spin_unlock(&actual->d_lock);
873feea0 2492 spin_unlock(&inode->i_lock);
9eaef27b 2493out_nolock:
770bfad8
DH
2494 if (actual == dentry) {
2495 security_d_instantiate(dentry, inode);
2496 return NULL;
2497 }
2498
2499 iput(inode);
2500 return actual;
770bfad8 2501}
ec4f8605 2502EXPORT_SYMBOL_GPL(d_materialise_unique);
770bfad8 2503
cdd16d02 2504static int prepend(char **buffer, int *buflen, const char *str, int namelen)
6092d048
RP
2505{
2506 *buflen -= namelen;
2507 if (*buflen < 0)
2508 return -ENAMETOOLONG;
2509 *buffer -= namelen;
2510 memcpy(*buffer, str, namelen);
2511 return 0;
2512}
2513
cdd16d02
MS
2514static int prepend_name(char **buffer, int *buflen, struct qstr *name)
2515{
2516 return prepend(buffer, buflen, name->name, name->len);
2517}
2518
1da177e4 2519/**
208898c1 2520 * prepend_path - Prepend path string to a buffer
9d1bc601
MS
2521 * @path: the dentry/vfsmount to report
2522 * @root: root vfsmnt/dentry (may be modified by this function)
f2eb6575
MS
2523 * @buffer: pointer to the end of the buffer
2524 * @buflen: pointer to buffer length
552ce544 2525 *
949854d0 2526 * Caller holds the rename_lock.
9d1bc601
MS
2527 *
2528 * If path is not reachable from the supplied root, then the value of
2529 * root is changed (without modifying refcounts).
1da177e4 2530 */
f2eb6575
MS
2531static int prepend_path(const struct path *path, struct path *root,
2532 char **buffer, int *buflen)
1da177e4 2533{
9d1bc601
MS
2534 struct dentry *dentry = path->dentry;
2535 struct vfsmount *vfsmnt = path->mnt;
f2eb6575
MS
2536 bool slash = false;
2537 int error = 0;
6092d048 2538
99b7db7b 2539 br_read_lock(vfsmount_lock);
f2eb6575 2540 while (dentry != root->dentry || vfsmnt != root->mnt) {
1da177e4
LT
2541 struct dentry * parent;
2542
1da177e4 2543 if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
552ce544 2544 /* Global root? */
1da177e4 2545 if (vfsmnt->mnt_parent == vfsmnt) {
1da177e4
LT
2546 goto global_root;
2547 }
2548 dentry = vfsmnt->mnt_mountpoint;
2549 vfsmnt = vfsmnt->mnt_parent;
1da177e4
LT
2550 continue;
2551 }
2552 parent = dentry->d_parent;
2553 prefetch(parent);
9abca360 2554 spin_lock(&dentry->d_lock);
f2eb6575 2555 error = prepend_name(buffer, buflen, &dentry->d_name);
9abca360 2556 spin_unlock(&dentry->d_lock);
f2eb6575
MS
2557 if (!error)
2558 error = prepend(buffer, buflen, "/", 1);
2559 if (error)
2560 break;
2561
2562 slash = true;
1da177e4
LT
2563 dentry = parent;
2564 }
2565
be285c71 2566out:
f2eb6575
MS
2567 if (!error && !slash)
2568 error = prepend(buffer, buflen, "/", 1);
2569
99b7db7b 2570 br_read_unlock(vfsmount_lock);
f2eb6575 2571 return error;
1da177e4
LT
2572
2573global_root:
98dc568b
MS
2574 /*
2575 * Filesystems needing to implement special "root names"
2576 * should do so with ->d_dname()
2577 */
2578 if (IS_ROOT(dentry) &&
2579 (dentry->d_name.len != 1 || dentry->d_name.name[0] != '/')) {
2580 WARN(1, "Root dentry has weird name <%.*s>\n",
2581 (int) dentry->d_name.len, dentry->d_name.name);
2582 }
9d1bc601
MS
2583 root->mnt = vfsmnt;
2584 root->dentry = dentry;
be285c71 2585 goto out;
f2eb6575 2586}
be285c71 2587
f2eb6575
MS
2588/**
2589 * __d_path - return the path of a dentry
2590 * @path: the dentry/vfsmount to report
2591 * @root: root vfsmnt/dentry (may be modified by this function)
cd956a1c 2592 * @buf: buffer to return value in
f2eb6575
MS
2593 * @buflen: buffer length
2594 *
ffd1f4ed 2595 * Convert a dentry into an ASCII path name.
f2eb6575
MS
2596 *
2597 * Returns a pointer into the buffer or an error code if the
2598 * path was too long.
2599 *
be148247 2600 * "buflen" should be positive.
f2eb6575
MS
2601 *
2602 * If path is not reachable from the supplied root, then the value of
2603 * root is changed (without modifying refcounts).
2604 */
2605char *__d_path(const struct path *path, struct path *root,
2606 char *buf, int buflen)
2607{
2608 char *res = buf + buflen;
2609 int error;
2610
2611 prepend(&res, &buflen, "\0", 1);
949854d0 2612 write_seqlock(&rename_lock);
f2eb6575 2613 error = prepend_path(path, root, &res, &buflen);
949854d0 2614 write_sequnlock(&rename_lock);
be148247 2615
f2eb6575
MS
2616 if (error)
2617 return ERR_PTR(error);
f2eb6575 2618 return res;
1da177e4
LT
2619}
2620
ffd1f4ed
MS
2621/*
2622 * same as __d_path but appends "(deleted)" for unlinked files.
2623 */
2624static int path_with_deleted(const struct path *path, struct path *root,
2625 char **buf, int *buflen)
2626{
2627 prepend(buf, buflen, "\0", 1);
2628 if (d_unlinked(path->dentry)) {
2629 int error = prepend(buf, buflen, " (deleted)", 10);
2630 if (error)
2631 return error;
2632 }
2633
2634 return prepend_path(path, root, buf, buflen);
2635}
2636
8df9d1a4
MS
2637static int prepend_unreachable(char **buffer, int *buflen)
2638{
2639 return prepend(buffer, buflen, "(unreachable)", 13);
2640}
2641
a03a8a70
JB
2642/**
2643 * d_path - return the path of a dentry
cf28b486 2644 * @path: path to report
a03a8a70
JB
2645 * @buf: buffer to return value in
2646 * @buflen: buffer length
2647 *
2648 * Convert a dentry into an ASCII path name. If the entry has been deleted
2649 * the string " (deleted)" is appended. Note that this is ambiguous.
2650 *
52afeefb
AV
2651 * Returns a pointer into the buffer or an error code if the path was
2652 * too long. Note: Callers should use the returned pointer, not the passed
2653 * in buffer, to use the name! The implementation often starts at an offset
2654 * into the buffer, and may leave 0 bytes at the start.
a03a8a70 2655 *
31f3e0b3 2656 * "buflen" should be positive.
a03a8a70 2657 */
20d4fdc1 2658char *d_path(const struct path *path, char *buf, int buflen)
1da177e4 2659{
ffd1f4ed 2660 char *res = buf + buflen;
6ac08c39 2661 struct path root;
9d1bc601 2662 struct path tmp;
ffd1f4ed 2663 int error;
1da177e4 2664
c23fbb6b
ED
2665 /*
2666 * We have various synthetic filesystems that never get mounted. On
2667 * these filesystems dentries are never used for lookup purposes, and
2668 * thus don't need to be hashed. They also don't need a name until a
2669 * user wants to identify the object in /proc/pid/fd/. The little hack
2670 * below allows us to generate a name for these objects on demand:
2671 */
cf28b486
JB
2672 if (path->dentry->d_op && path->dentry->d_op->d_dname)
2673 return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
c23fbb6b 2674
f7ad3c6b 2675 get_fs_root(current->fs, &root);
949854d0 2676 write_seqlock(&rename_lock);
9d1bc601 2677 tmp = root;
ffd1f4ed
MS
2678 error = path_with_deleted(path, &tmp, &res, &buflen);
2679 if (error)
2680 res = ERR_PTR(error);
949854d0 2681 write_sequnlock(&rename_lock);
6ac08c39 2682 path_put(&root);
1da177e4
LT
2683 return res;
2684}
ec4f8605 2685EXPORT_SYMBOL(d_path);
1da177e4 2686
8df9d1a4
MS
2687/**
2688 * d_path_with_unreachable - return the path of a dentry
2689 * @path: path to report
2690 * @buf: buffer to return value in
2691 * @buflen: buffer length
2692 *
2693 * The difference from d_path() is that this prepends "(unreachable)"
2694 * to paths which are unreachable from the current process' root.
2695 */
2696char *d_path_with_unreachable(const struct path *path, char *buf, int buflen)
2697{
2698 char *res = buf + buflen;
2699 struct path root;
2700 struct path tmp;
2701 int error;
2702
2703 if (path->dentry->d_op && path->dentry->d_op->d_dname)
2704 return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
2705
2706 get_fs_root(current->fs, &root);
949854d0 2707 write_seqlock(&rename_lock);
8df9d1a4
MS
2708 tmp = root;
2709 error = path_with_deleted(path, &tmp, &res, &buflen);
2710 if (!error && !path_equal(&tmp, &root))
2711 error = prepend_unreachable(&res, &buflen);
949854d0 2712 write_sequnlock(&rename_lock);
8df9d1a4
MS
2713 path_put(&root);
2714 if (error)
2715 res = ERR_PTR(error);
2716
2717 return res;
2718}
2719
c23fbb6b
ED
2720/*
2721 * Helper function for dentry_operations.d_dname() members
2722 */
2723char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen,
2724 const char *fmt, ...)
2725{
2726 va_list args;
2727 char temp[64];
2728 int sz;
2729
2730 va_start(args, fmt);
2731 sz = vsnprintf(temp, sizeof(temp), fmt, args) + 1;
2732 va_end(args);
2733
2734 if (sz > sizeof(temp) || sz > buflen)
2735 return ERR_PTR(-ENAMETOOLONG);
2736
2737 buffer += buflen - sz;
2738 return memcpy(buffer, temp, sz);
2739}
2740
6092d048
RP
2741/*
2742 * Write full pathname from the root of the filesystem into the buffer.
2743 */
ec2447c2 2744static char *__dentry_path(struct dentry *dentry, char *buf, int buflen)
6092d048
RP
2745{
2746 char *end = buf + buflen;
2747 char *retval;
2748
6092d048 2749 prepend(&end, &buflen, "\0", 1);
6092d048
RP
2750 if (buflen < 1)
2751 goto Elong;
2752 /* Get '/' right */
2753 retval = end-1;
2754 *retval = '/';
2755
cdd16d02
MS
2756 while (!IS_ROOT(dentry)) {
2757 struct dentry *parent = dentry->d_parent;
9abca360 2758 int error;
6092d048 2759
6092d048 2760 prefetch(parent);
9abca360
NP
2761 spin_lock(&dentry->d_lock);
2762 error = prepend_name(&end, &buflen, &dentry->d_name);
2763 spin_unlock(&dentry->d_lock);
2764 if (error != 0 || prepend(&end, &buflen, "/", 1) != 0)
6092d048
RP
2765 goto Elong;
2766
2767 retval = end;
2768 dentry = parent;
2769 }
c103135c
AV
2770 return retval;
2771Elong:
2772 return ERR_PTR(-ENAMETOOLONG);
2773}
ec2447c2
NP
2774
2775char *dentry_path_raw(struct dentry *dentry, char *buf, int buflen)
2776{
2777 char *retval;
2778
949854d0 2779 write_seqlock(&rename_lock);
ec2447c2 2780 retval = __dentry_path(dentry, buf, buflen);
949854d0 2781 write_sequnlock(&rename_lock);
ec2447c2
NP
2782
2783 return retval;
2784}
2785EXPORT_SYMBOL(dentry_path_raw);
c103135c
AV
2786
2787char *dentry_path(struct dentry *dentry, char *buf, int buflen)
2788{
2789 char *p = NULL;
2790 char *retval;
2791
949854d0 2792 write_seqlock(&rename_lock);
c103135c
AV
2793 if (d_unlinked(dentry)) {
2794 p = buf + buflen;
2795 if (prepend(&p, &buflen, "//deleted", 10) != 0)
2796 goto Elong;
2797 buflen++;
2798 }
2799 retval = __dentry_path(dentry, buf, buflen);
949854d0 2800 write_sequnlock(&rename_lock);
c103135c
AV
2801 if (!IS_ERR(retval) && p)
2802 *p = '/'; /* restore '/' overriden with '\0' */
6092d048
RP
2803 return retval;
2804Elong:
6092d048
RP
2805 return ERR_PTR(-ENAMETOOLONG);
2806}
2807
1da177e4
LT
2808/*
2809 * NOTE! The user-level library version returns a
2810 * character pointer. The kernel system call just
2811 * returns the length of the buffer filled (which
2812 * includes the ending '\0' character), or a negative
2813 * error value. So libc would do something like
2814 *
2815 * char *getcwd(char * buf, size_t size)
2816 * {
2817 * int retval;
2818 *
2819 * retval = sys_getcwd(buf, size);
2820 * if (retval >= 0)
2821 * return buf;
2822 * errno = -retval;
2823 * return NULL;
2824 * }
2825 */
3cdad428 2826SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
1da177e4 2827{
552ce544 2828 int error;
6ac08c39 2829 struct path pwd, root;
552ce544 2830 char *page = (char *) __get_free_page(GFP_USER);
1da177e4
LT
2831
2832 if (!page)
2833 return -ENOMEM;
2834
f7ad3c6b 2835 get_fs_root_and_pwd(current->fs, &root, &pwd);
1da177e4 2836
552ce544 2837 error = -ENOENT;
949854d0 2838 write_seqlock(&rename_lock);
f3da392e 2839 if (!d_unlinked(pwd.dentry)) {
552ce544 2840 unsigned long len;
9d1bc601 2841 struct path tmp = root;
8df9d1a4
MS
2842 char *cwd = page + PAGE_SIZE;
2843 int buflen = PAGE_SIZE;
1da177e4 2844
8df9d1a4
MS
2845 prepend(&cwd, &buflen, "\0", 1);
2846 error = prepend_path(&pwd, &tmp, &cwd, &buflen);
949854d0 2847 write_sequnlock(&rename_lock);
552ce544 2848
8df9d1a4 2849 if (error)
552ce544
LT
2850 goto out;
2851
8df9d1a4
MS
2852 /* Unreachable from current root */
2853 if (!path_equal(&tmp, &root)) {
2854 error = prepend_unreachable(&cwd, &buflen);
2855 if (error)
2856 goto out;
2857 }
2858
552ce544
LT
2859 error = -ERANGE;
2860 len = PAGE_SIZE + page - cwd;
2861 if (len <= size) {
2862 error = len;
2863 if (copy_to_user(buf, cwd, len))
2864 error = -EFAULT;
2865 }
949854d0
NP
2866 } else {
2867 write_sequnlock(&rename_lock);
949854d0 2868 }
1da177e4
LT
2869
2870out:
6ac08c39
JB
2871 path_put(&pwd);
2872 path_put(&root);
1da177e4
LT
2873 free_page((unsigned long) page);
2874 return error;
2875}
2876
2877/*
2878 * Test whether new_dentry is a subdirectory of old_dentry.
2879 *
2880 * Trivially implemented using the dcache structure
2881 */
2882
2883/**
2884 * is_subdir - is new dentry a subdirectory of old_dentry
2885 * @new_dentry: new dentry
2886 * @old_dentry: old dentry
2887 *
2888 * Returns 1 if new_dentry is a subdirectory of the parent (at any depth).
2889 * Returns 0 otherwise.
2890 * Caller must ensure that "new_dentry" is pinned before calling is_subdir()
2891 */
2892
e2761a11 2893int is_subdir(struct dentry *new_dentry, struct dentry *old_dentry)
1da177e4
LT
2894{
2895 int result;
949854d0 2896 unsigned seq;
1da177e4 2897
e2761a11
OH
2898 if (new_dentry == old_dentry)
2899 return 1;
2900
e2761a11 2901 do {
1da177e4 2902 /* for restarting inner loop in case of seq retry */
1da177e4 2903 seq = read_seqbegin(&rename_lock);
949854d0
NP
2904 /*
2905 * Need rcu_readlock to protect against the d_parent trashing
2906 * due to d_move
2907 */
2908 rcu_read_lock();
e2761a11 2909 if (d_ancestor(old_dentry, new_dentry))
1da177e4 2910 result = 1;
e2761a11
OH
2911 else
2912 result = 0;
949854d0 2913 rcu_read_unlock();
1da177e4 2914 } while (read_seqretry(&rename_lock, seq));
1da177e4
LT
2915
2916 return result;
2917}
2918
2096f759
AV
2919int path_is_under(struct path *path1, struct path *path2)
2920{
2921 struct vfsmount *mnt = path1->mnt;
2922 struct dentry *dentry = path1->dentry;
2923 int res;
99b7db7b
NP
2924
2925 br_read_lock(vfsmount_lock);
2096f759
AV
2926 if (mnt != path2->mnt) {
2927 for (;;) {
2928 if (mnt->mnt_parent == mnt) {
99b7db7b 2929 br_read_unlock(vfsmount_lock);
2096f759
AV
2930 return 0;
2931 }
2932 if (mnt->mnt_parent == path2->mnt)
2933 break;
2934 mnt = mnt->mnt_parent;
2935 }
2936 dentry = mnt->mnt_mountpoint;
2937 }
2938 res = is_subdir(dentry, path2->dentry);
99b7db7b 2939 br_read_unlock(vfsmount_lock);
2096f759
AV
2940 return res;
2941}
2942EXPORT_SYMBOL(path_is_under);
2943
1da177e4
LT
2944void d_genocide(struct dentry *root)
2945{
949854d0 2946 struct dentry *this_parent;
1da177e4 2947 struct list_head *next;
949854d0 2948 unsigned seq;
58db63d0 2949 int locked = 0;
1da177e4 2950
949854d0 2951 seq = read_seqbegin(&rename_lock);
58db63d0
NP
2952again:
2953 this_parent = root;
2fd6b7f5 2954 spin_lock(&this_parent->d_lock);
1da177e4
LT
2955repeat:
2956 next = this_parent->d_subdirs.next;
2957resume:
2958 while (next != &this_parent->d_subdirs) {
2959 struct list_head *tmp = next;
5160ee6f 2960 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
1da177e4 2961 next = tmp->next;
949854d0 2962
da502956
NP
2963 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
2964 if (d_unhashed(dentry) || !dentry->d_inode) {
2965 spin_unlock(&dentry->d_lock);
1da177e4 2966 continue;
da502956 2967 }
1da177e4 2968 if (!list_empty(&dentry->d_subdirs)) {
2fd6b7f5
NP
2969 spin_unlock(&this_parent->d_lock);
2970 spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
1da177e4 2971 this_parent = dentry;
2fd6b7f5 2972 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
1da177e4
LT
2973 goto repeat;
2974 }
949854d0
NP
2975 if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
2976 dentry->d_flags |= DCACHE_GENOCIDE;
2977 dentry->d_count--;
2978 }
b7ab39f6 2979 spin_unlock(&dentry->d_lock);
1da177e4
LT
2980 }
2981 if (this_parent != root) {
c826cb7d 2982 struct dentry *child = this_parent;
949854d0
NP
2983 if (!(this_parent->d_flags & DCACHE_GENOCIDE)) {
2984 this_parent->d_flags |= DCACHE_GENOCIDE;
2985 this_parent->d_count--;
2986 }
c826cb7d
LT
2987 this_parent = try_to_ascend(this_parent, locked, seq);
2988 if (!this_parent)
949854d0 2989 goto rename_retry;
949854d0 2990 next = child->d_u.d_child.next;
1da177e4
LT
2991 goto resume;
2992 }
2fd6b7f5 2993 spin_unlock(&this_parent->d_lock);
58db63d0 2994 if (!locked && read_seqretry(&rename_lock, seq))
949854d0 2995 goto rename_retry;
58db63d0
NP
2996 if (locked)
2997 write_sequnlock(&rename_lock);
2998 return;
2999
3000rename_retry:
3001 locked = 1;
3002 write_seqlock(&rename_lock);
3003 goto again;
1da177e4
LT
3004}
3005
3006/**
3007 * find_inode_number - check for dentry with name
3008 * @dir: directory to check
3009 * @name: Name to find.
3010 *
3011 * Check whether a dentry already exists for the given name,
3012 * and return the inode number if it has an inode. Otherwise
3013 * 0 is returned.
3014 *
3015 * This routine is used to post-process directory listings for
3016 * filesystems using synthetic inode numbers, and is necessary
3017 * to keep getcwd() working.
3018 */
3019
3020ino_t find_inode_number(struct dentry *dir, struct qstr *name)
3021{
3022 struct dentry * dentry;
3023 ino_t ino = 0;
3024
3e7e241f
EB
3025 dentry = d_hash_and_lookup(dir, name);
3026 if (dentry) {
1da177e4
LT
3027 if (dentry->d_inode)
3028 ino = dentry->d_inode->i_ino;
3029 dput(dentry);
3030 }
1da177e4
LT
3031 return ino;
3032}
ec4f8605 3033EXPORT_SYMBOL(find_inode_number);
1da177e4
LT
3034
3035static __initdata unsigned long dhash_entries;
3036static int __init set_dhash_entries(char *str)
3037{
3038 if (!str)
3039 return 0;
3040 dhash_entries = simple_strtoul(str, &str, 0);
3041 return 1;
3042}
3043__setup("dhash_entries=", set_dhash_entries);
3044
3045static void __init dcache_init_early(void)
3046{
3047 int loop;
3048
3049 /* If hashes are distributed across NUMA nodes, defer
3050 * hash allocation until vmalloc space is available.
3051 */
3052 if (hashdist)
3053 return;
3054
3055 dentry_hashtable =
3056 alloc_large_system_hash("Dentry cache",
b07ad996 3057 sizeof(struct hlist_bl_head),
1da177e4
LT
3058 dhash_entries,
3059 13,
3060 HASH_EARLY,
3061 &d_hash_shift,
3062 &d_hash_mask,
3063 0);
3064
3065 for (loop = 0; loop < (1 << d_hash_shift); loop++)
b07ad996 3066 INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
1da177e4
LT
3067}
3068
74bf17cf 3069static void __init dcache_init(void)
1da177e4
LT
3070{
3071 int loop;
3072
3073 /*
3074 * A constructor could be added for stable state like the lists,
3075 * but it is probably not worth it because of the cache nature
3076 * of the dcache.
3077 */
0a31bd5f
CL
3078 dentry_cache = KMEM_CACHE(dentry,
3079 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD);
1da177e4 3080
8e1f936b 3081 register_shrinker(&dcache_shrinker);
1da177e4
LT
3082
3083 /* Hash may have been set up in dcache_init_early */
3084 if (!hashdist)
3085 return;
3086
3087 dentry_hashtable =
3088 alloc_large_system_hash("Dentry cache",
b07ad996 3089 sizeof(struct hlist_bl_head),
1da177e4
LT
3090 dhash_entries,
3091 13,
3092 0,
3093 &d_hash_shift,
3094 &d_hash_mask,
3095 0);
3096
3097 for (loop = 0; loop < (1 << d_hash_shift); loop++)
b07ad996 3098 INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
1da177e4
LT
3099}
3100
3101/* SLAB cache for __getname() consumers */
e18b890b 3102struct kmem_cache *names_cachep __read_mostly;
ec4f8605 3103EXPORT_SYMBOL(names_cachep);
1da177e4 3104
1da177e4
LT
3105EXPORT_SYMBOL(d_genocide);
3106
1da177e4
LT
3107void __init vfs_caches_init_early(void)
3108{
3109 dcache_init_early();
3110 inode_init_early();
3111}
3112
3113void __init vfs_caches_init(unsigned long mempages)
3114{
3115 unsigned long reserve;
3116
3117 /* Base hash sizes on available memory, with a reserve equal to
3118 150% of current kernel size */
3119
3120 reserve = min((mempages - nr_free_pages()) * 3/2, mempages - 1);
3121 mempages -= reserve;
3122
3123 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
20c2df83 3124 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
1da177e4 3125
74bf17cf
DC
3126 dcache_init();
3127 inode_init();
1da177e4 3128 files_init(mempages);
74bf17cf 3129 mnt_init();
1da177e4
LT
3130 bdev_cache_init();
3131 chrdev_init();
3132}