]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - fs/dcache.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs-2.6
[mirror_ubuntu-bionic-kernel.git] / fs / dcache.c
CommitLineData
1da177e4
LT
1/*
2 * fs/dcache.c
3 *
4 * Complete reimplementation
5 * (C) 1997 Thomas Schoebel-Theuer,
6 * with heavy changes by Linus Torvalds
7 */
8
9/*
10 * Notes on the allocation strategy:
11 *
12 * The dcache is a master of the icache - whenever a dcache entry
13 * exists, the inode will always exist. "iput()" is done either when
14 * the dcache entry is deleted or garbage collected.
15 */
16
1da177e4
LT
17#include <linux/syscalls.h>
18#include <linux/string.h>
19#include <linux/mm.h>
20#include <linux/fs.h>
7a91bf7f 21#include <linux/fsnotify.h>
1da177e4
LT
22#include <linux/slab.h>
23#include <linux/init.h>
1da177e4
LT
24#include <linux/hash.h>
25#include <linux/cache.h>
26#include <linux/module.h>
27#include <linux/mount.h>
28#include <linux/file.h>
29#include <asm/uaccess.h>
30#include <linux/security.h>
31#include <linux/seqlock.h>
32#include <linux/swap.h>
33#include <linux/bootmem.h>
5ad4e53b 34#include <linux/fs_struct.h>
613afbf8 35#include <linux/hardirq.h>
ceb5bdc2
NP
36#include <linux/bit_spinlock.h>
37#include <linux/rculist_bl.h>
268bb0ce 38#include <linux/prefetch.h>
07f3f05c 39#include "internal.h"
1da177e4 40
789680d1
NP
41/*
42 * Usage:
873feea0
NP
43 * dcache->d_inode->i_lock protects:
44 * - i_dentry, d_alias, d_inode of aliases
ceb5bdc2
NP
45 * dcache_hash_bucket lock protects:
46 * - the dcache hash table
47 * s_anon bl list spinlock protects:
48 * - the s_anon list (see __d_drop)
23044507
NP
49 * dcache_lru_lock protects:
50 * - the dcache lru lists and counters
51 * d_lock protects:
52 * - d_flags
53 * - d_name
54 * - d_lru
b7ab39f6 55 * - d_count
da502956 56 * - d_unhashed()
2fd6b7f5
NP
57 * - d_parent and d_subdirs
58 * - childrens' d_child and d_parent
b23fb0a6 59 * - d_alias, d_inode
789680d1
NP
60 *
61 * Ordering:
873feea0 62 * dentry->d_inode->i_lock
b5c84bf6
NP
63 * dentry->d_lock
64 * dcache_lru_lock
ceb5bdc2
NP
65 * dcache_hash_bucket lock
66 * s_anon lock
789680d1 67 *
da502956
NP
68 * If there is an ancestor relationship:
69 * dentry->d_parent->...->d_parent->d_lock
70 * ...
71 * dentry->d_parent->d_lock
72 * dentry->d_lock
73 *
74 * If no ancestor relationship:
789680d1
NP
75 * if (dentry1 < dentry2)
76 * dentry1->d_lock
77 * dentry2->d_lock
78 */
fa3536cc 79int sysctl_vfs_cache_pressure __read_mostly = 100;
1da177e4
LT
80EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure);
81
23044507 82static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lru_lock);
74c3cbe3 83__cacheline_aligned_in_smp DEFINE_SEQLOCK(rename_lock);
1da177e4 84
949854d0 85EXPORT_SYMBOL(rename_lock);
1da177e4 86
e18b890b 87static struct kmem_cache *dentry_cache __read_mostly;
1da177e4 88
1da177e4
LT
89/*
90 * This is the single most critical data structure when it comes
91 * to the dcache: the hashtable for lookups. Somebody should try
92 * to make this good - I've just made it work.
93 *
94 * This hash-function tries to avoid losing too many bits of hash
95 * information, yet avoid using a prime hash-size or similar.
96 */
97#define D_HASHBITS d_hash_shift
98#define D_HASHMASK d_hash_mask
99
fa3536cc
ED
100static unsigned int d_hash_mask __read_mostly;
101static unsigned int d_hash_shift __read_mostly;
ceb5bdc2 102
b07ad996 103static struct hlist_bl_head *dentry_hashtable __read_mostly;
ceb5bdc2 104
b07ad996 105static inline struct hlist_bl_head *d_hash(struct dentry *parent,
ceb5bdc2
NP
106 unsigned long hash)
107{
108 hash += ((unsigned long) parent ^ GOLDEN_RATIO_PRIME) / L1_CACHE_BYTES;
109 hash = hash ^ ((hash ^ GOLDEN_RATIO_PRIME) >> D_HASHBITS);
110 return dentry_hashtable + (hash & D_HASHMASK);
111}
112
1da177e4
LT
113/* Statistics gathering. */
114struct dentry_stat_t dentry_stat = {
115 .age_limit = 45,
116};
117
3e880fb5 118static DEFINE_PER_CPU(unsigned int, nr_dentry);
312d3ca8
CH
119
120#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
3e880fb5
NP
121static int get_nr_dentry(void)
122{
123 int i;
124 int sum = 0;
125 for_each_possible_cpu(i)
126 sum += per_cpu(nr_dentry, i);
127 return sum < 0 ? 0 : sum;
128}
129
312d3ca8
CH
130int proc_nr_dentry(ctl_table *table, int write, void __user *buffer,
131 size_t *lenp, loff_t *ppos)
132{
3e880fb5 133 dentry_stat.nr_dentry = get_nr_dentry();
312d3ca8
CH
134 return proc_dointvec(table, write, buffer, lenp, ppos);
135}
136#endif
137
9c82ab9c 138static void __d_free(struct rcu_head *head)
1da177e4 139{
9c82ab9c
CH
140 struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
141
fd217f4d 142 WARN_ON(!list_empty(&dentry->d_alias));
1da177e4
LT
143 if (dname_external(dentry))
144 kfree(dentry->d_name.name);
145 kmem_cache_free(dentry_cache, dentry);
146}
147
148/*
b5c84bf6 149 * no locks, please.
1da177e4
LT
150 */
151static void d_free(struct dentry *dentry)
152{
b7ab39f6 153 BUG_ON(dentry->d_count);
3e880fb5 154 this_cpu_dec(nr_dentry);
1da177e4
LT
155 if (dentry->d_op && dentry->d_op->d_release)
156 dentry->d_op->d_release(dentry);
312d3ca8 157
dea3667b
LT
158 /* if dentry was never visible to RCU, immediate free is OK */
159 if (!(dentry->d_flags & DCACHE_RCUACCESS))
9c82ab9c 160 __d_free(&dentry->d_u.d_rcu);
b3423415 161 else
9c82ab9c 162 call_rcu(&dentry->d_u.d_rcu, __d_free);
1da177e4
LT
163}
164
31e6b01f
NP
165/**
166 * dentry_rcuwalk_barrier - invalidate in-progress rcu-walk lookups
ff5fdb61 167 * @dentry: the target dentry
31e6b01f
NP
168 * After this call, in-progress rcu-walk path lookup will fail. This
169 * should be called after unhashing, and after changing d_inode (if
170 * the dentry has not already been unhashed).
171 */
172static inline void dentry_rcuwalk_barrier(struct dentry *dentry)
173{
174 assert_spin_locked(&dentry->d_lock);
175 /* Go through a barrier */
176 write_seqcount_barrier(&dentry->d_seq);
177}
178
1da177e4
LT
179/*
180 * Release the dentry's inode, using the filesystem
31e6b01f
NP
181 * d_iput() operation if defined. Dentry has no refcount
182 * and is unhashed.
1da177e4 183 */
858119e1 184static void dentry_iput(struct dentry * dentry)
31f3e0b3 185 __releases(dentry->d_lock)
873feea0 186 __releases(dentry->d_inode->i_lock)
1da177e4
LT
187{
188 struct inode *inode = dentry->d_inode;
189 if (inode) {
190 dentry->d_inode = NULL;
191 list_del_init(&dentry->d_alias);
192 spin_unlock(&dentry->d_lock);
873feea0 193 spin_unlock(&inode->i_lock);
f805fbda
LT
194 if (!inode->i_nlink)
195 fsnotify_inoderemove(inode);
1da177e4
LT
196 if (dentry->d_op && dentry->d_op->d_iput)
197 dentry->d_op->d_iput(dentry, inode);
198 else
199 iput(inode);
200 } else {
201 spin_unlock(&dentry->d_lock);
1da177e4
LT
202 }
203}
204
31e6b01f
NP
205/*
206 * Release the dentry's inode, using the filesystem
207 * d_iput() operation if defined. dentry remains in-use.
208 */
209static void dentry_unlink_inode(struct dentry * dentry)
210 __releases(dentry->d_lock)
873feea0 211 __releases(dentry->d_inode->i_lock)
31e6b01f
NP
212{
213 struct inode *inode = dentry->d_inode;
214 dentry->d_inode = NULL;
215 list_del_init(&dentry->d_alias);
216 dentry_rcuwalk_barrier(dentry);
217 spin_unlock(&dentry->d_lock);
873feea0 218 spin_unlock(&inode->i_lock);
31e6b01f
NP
219 if (!inode->i_nlink)
220 fsnotify_inoderemove(inode);
221 if (dentry->d_op && dentry->d_op->d_iput)
222 dentry->d_op->d_iput(dentry, inode);
223 else
224 iput(inode);
225}
226
da3bbdd4 227/*
23044507 228 * dentry_lru_(add|del|move_tail) must be called with d_lock held.
da3bbdd4
KM
229 */
230static void dentry_lru_add(struct dentry *dentry)
231{
a4633357 232 if (list_empty(&dentry->d_lru)) {
23044507 233 spin_lock(&dcache_lru_lock);
a4633357
CH
234 list_add(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
235 dentry->d_sb->s_nr_dentry_unused++;
86c8749e 236 dentry_stat.nr_unused++;
23044507 237 spin_unlock(&dcache_lru_lock);
a4633357 238 }
da3bbdd4
KM
239}
240
23044507
NP
241static void __dentry_lru_del(struct dentry *dentry)
242{
243 list_del_init(&dentry->d_lru);
244 dentry->d_sb->s_nr_dentry_unused--;
245 dentry_stat.nr_unused--;
246}
247
da3bbdd4
KM
248static void dentry_lru_del(struct dentry *dentry)
249{
250 if (!list_empty(&dentry->d_lru)) {
23044507
NP
251 spin_lock(&dcache_lru_lock);
252 __dentry_lru_del(dentry);
253 spin_unlock(&dcache_lru_lock);
da3bbdd4
KM
254 }
255}
256
a4633357 257static void dentry_lru_move_tail(struct dentry *dentry)
da3bbdd4 258{
23044507 259 spin_lock(&dcache_lru_lock);
a4633357
CH
260 if (list_empty(&dentry->d_lru)) {
261 list_add_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
262 dentry->d_sb->s_nr_dentry_unused++;
86c8749e 263 dentry_stat.nr_unused++;
a4633357
CH
264 } else {
265 list_move_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru);
da3bbdd4 266 }
23044507 267 spin_unlock(&dcache_lru_lock);
da3bbdd4
KM
268}
269
d52b9086
MS
270/**
271 * d_kill - kill dentry and return parent
272 * @dentry: dentry to kill
ff5fdb61 273 * @parent: parent dentry
d52b9086 274 *
31f3e0b3 275 * The dentry must already be unhashed and removed from the LRU.
d52b9086
MS
276 *
277 * If this is the root of the dentry tree, return NULL.
23044507 278 *
b5c84bf6
NP
279 * dentry->d_lock and parent->d_lock must be held by caller, and are dropped by
280 * d_kill.
d52b9086 281 */
2fd6b7f5 282static struct dentry *d_kill(struct dentry *dentry, struct dentry *parent)
31f3e0b3 283 __releases(dentry->d_lock)
2fd6b7f5 284 __releases(parent->d_lock)
873feea0 285 __releases(dentry->d_inode->i_lock)
d52b9086 286{
d52b9086 287 list_del(&dentry->d_u.d_child);
c83ce989
TM
288 /*
289 * Inform try_to_ascend() that we are no longer attached to the
290 * dentry tree
291 */
292 dentry->d_flags |= DCACHE_DISCONNECTED;
2fd6b7f5
NP
293 if (parent)
294 spin_unlock(&parent->d_lock);
d52b9086 295 dentry_iput(dentry);
b7ab39f6
NP
296 /*
297 * dentry_iput drops the locks, at which point nobody (except
298 * transient RCU lookups) can reach this dentry.
299 */
d52b9086 300 d_free(dentry);
871c0067 301 return parent;
d52b9086
MS
302}
303
c6627c60
DH
304/*
305 * Unhash a dentry without inserting an RCU walk barrier or checking that
306 * dentry->d_lock is locked. The caller must take care of that, if
307 * appropriate.
308 */
309static void __d_shrink(struct dentry *dentry)
310{
311 if (!d_unhashed(dentry)) {
312 struct hlist_bl_head *b;
313 if (unlikely(dentry->d_flags & DCACHE_DISCONNECTED))
314 b = &dentry->d_sb->s_anon;
315 else
316 b = d_hash(dentry->d_parent, dentry->d_name.hash);
317
318 hlist_bl_lock(b);
319 __hlist_bl_del(&dentry->d_hash);
320 dentry->d_hash.pprev = NULL;
321 hlist_bl_unlock(b);
322 }
323}
324
789680d1
NP
325/**
326 * d_drop - drop a dentry
327 * @dentry: dentry to drop
328 *
329 * d_drop() unhashes the entry from the parent dentry hashes, so that it won't
330 * be found through a VFS lookup any more. Note that this is different from
331 * deleting the dentry - d_delete will try to mark the dentry negative if
332 * possible, giving a successful _negative_ lookup, while d_drop will
333 * just make the cache lookup fail.
334 *
335 * d_drop() is used mainly for stuff that wants to invalidate a dentry for some
336 * reason (NFS timeouts or autofs deletes).
337 *
338 * __d_drop requires dentry->d_lock.
339 */
340void __d_drop(struct dentry *dentry)
341{
dea3667b 342 if (!d_unhashed(dentry)) {
c6627c60 343 __d_shrink(dentry);
dea3667b 344 dentry_rcuwalk_barrier(dentry);
789680d1
NP
345 }
346}
347EXPORT_SYMBOL(__d_drop);
348
349void d_drop(struct dentry *dentry)
350{
789680d1
NP
351 spin_lock(&dentry->d_lock);
352 __d_drop(dentry);
353 spin_unlock(&dentry->d_lock);
789680d1
NP
354}
355EXPORT_SYMBOL(d_drop);
356
44396f4b
JB
357/*
358 * d_clear_need_lookup - drop a dentry from cache and clear the need lookup flag
359 * @dentry: dentry to drop
360 *
361 * This is called when we do a lookup on a placeholder dentry that needed to be
362 * looked up. The dentry should have been hashed in order for it to be found by
363 * the lookup code, but now needs to be unhashed while we do the actual lookup
364 * and clear the DCACHE_NEED_LOOKUP flag.
365 */
366void d_clear_need_lookup(struct dentry *dentry)
367{
368 spin_lock(&dentry->d_lock);
369 __d_drop(dentry);
370 dentry->d_flags &= ~DCACHE_NEED_LOOKUP;
371 spin_unlock(&dentry->d_lock);
372}
373EXPORT_SYMBOL(d_clear_need_lookup);
374
77812a1e
NP
375/*
376 * Finish off a dentry we've decided to kill.
377 * dentry->d_lock must be held, returns with it unlocked.
378 * If ref is non-zero, then decrement the refcount too.
379 * Returns dentry requiring refcount drop, or NULL if we're done.
380 */
381static inline struct dentry *dentry_kill(struct dentry *dentry, int ref)
382 __releases(dentry->d_lock)
383{
873feea0 384 struct inode *inode;
77812a1e
NP
385 struct dentry *parent;
386
873feea0
NP
387 inode = dentry->d_inode;
388 if (inode && !spin_trylock(&inode->i_lock)) {
77812a1e
NP
389relock:
390 spin_unlock(&dentry->d_lock);
391 cpu_relax();
392 return dentry; /* try again with same dentry */
393 }
394 if (IS_ROOT(dentry))
395 parent = NULL;
396 else
397 parent = dentry->d_parent;
398 if (parent && !spin_trylock(&parent->d_lock)) {
873feea0
NP
399 if (inode)
400 spin_unlock(&inode->i_lock);
77812a1e
NP
401 goto relock;
402 }
31e6b01f 403
77812a1e
NP
404 if (ref)
405 dentry->d_count--;
406 /* if dentry was on the d_lru list delete it from there */
407 dentry_lru_del(dentry);
408 /* if it was on the hash then remove it */
409 __d_drop(dentry);
410 return d_kill(dentry, parent);
411}
412
1da177e4
LT
413/*
414 * This is dput
415 *
416 * This is complicated by the fact that we do not want to put
417 * dentries that are no longer on any hash chain on the unused
418 * list: we'd much rather just get rid of them immediately.
419 *
420 * However, that implies that we have to traverse the dentry
421 * tree upwards to the parents which might _also_ now be
422 * scheduled for deletion (it may have been only waiting for
423 * its last child to go away).
424 *
425 * This tail recursion is done by hand as we don't want to depend
426 * on the compiler to always get this right (gcc generally doesn't).
427 * Real recursion would eat up our stack space.
428 */
429
430/*
431 * dput - release a dentry
432 * @dentry: dentry to release
433 *
434 * Release a dentry. This will drop the usage count and if appropriate
435 * call the dentry unlink method as well as removing it from the queues and
436 * releasing its resources. If the parent dentries were scheduled for release
437 * they too may now get deleted.
1da177e4 438 */
1da177e4
LT
439void dput(struct dentry *dentry)
440{
441 if (!dentry)
442 return;
443
444repeat:
b7ab39f6 445 if (dentry->d_count == 1)
1da177e4 446 might_sleep();
1da177e4 447 spin_lock(&dentry->d_lock);
61f3dee4
NP
448 BUG_ON(!dentry->d_count);
449 if (dentry->d_count > 1) {
450 dentry->d_count--;
1da177e4 451 spin_unlock(&dentry->d_lock);
1da177e4
LT
452 return;
453 }
454
fb045adb 455 if (dentry->d_flags & DCACHE_OP_DELETE) {
1da177e4 456 if (dentry->d_op->d_delete(dentry))
61f3dee4 457 goto kill_it;
1da177e4 458 }
265ac902 459
1da177e4
LT
460 /* Unreachable? Get rid of it */
461 if (d_unhashed(dentry))
462 goto kill_it;
265ac902 463
44396f4b
JB
464 /*
465 * If this dentry needs lookup, don't set the referenced flag so that it
466 * is more likely to be cleaned up by the dcache shrinker in case of
467 * memory pressure.
468 */
469 if (!d_need_lookup(dentry))
470 dentry->d_flags |= DCACHE_REFERENCED;
a4633357 471 dentry_lru_add(dentry);
265ac902 472
61f3dee4
NP
473 dentry->d_count--;
474 spin_unlock(&dentry->d_lock);
1da177e4
LT
475 return;
476
d52b9086 477kill_it:
77812a1e 478 dentry = dentry_kill(dentry, 1);
d52b9086
MS
479 if (dentry)
480 goto repeat;
1da177e4 481}
ec4f8605 482EXPORT_SYMBOL(dput);
1da177e4
LT
483
484/**
485 * d_invalidate - invalidate a dentry
486 * @dentry: dentry to invalidate
487 *
488 * Try to invalidate the dentry if it turns out to be
489 * possible. If there are other dentries that can be
490 * reached through this one we can't delete it and we
491 * return -EBUSY. On success we return 0.
492 *
493 * no dcache lock.
494 */
495
496int d_invalidate(struct dentry * dentry)
497{
498 /*
499 * If it's already been dropped, return OK.
500 */
da502956 501 spin_lock(&dentry->d_lock);
1da177e4 502 if (d_unhashed(dentry)) {
da502956 503 spin_unlock(&dentry->d_lock);
1da177e4
LT
504 return 0;
505 }
506 /*
507 * Check whether to do a partial shrink_dcache
508 * to get rid of unused child entries.
509 */
510 if (!list_empty(&dentry->d_subdirs)) {
da502956 511 spin_unlock(&dentry->d_lock);
1da177e4 512 shrink_dcache_parent(dentry);
da502956 513 spin_lock(&dentry->d_lock);
1da177e4
LT
514 }
515
516 /*
517 * Somebody else still using it?
518 *
519 * If it's a directory, we can't drop it
520 * for fear of somebody re-populating it
521 * with children (even though dropping it
522 * would make it unreachable from the root,
523 * we might still populate it if it was a
524 * working directory or similar).
525 */
b7ab39f6 526 if (dentry->d_count > 1) {
1da177e4
LT
527 if (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode)) {
528 spin_unlock(&dentry->d_lock);
1da177e4
LT
529 return -EBUSY;
530 }
531 }
532
533 __d_drop(dentry);
534 spin_unlock(&dentry->d_lock);
1da177e4
LT
535 return 0;
536}
ec4f8605 537EXPORT_SYMBOL(d_invalidate);
1da177e4 538
b5c84bf6 539/* This must be called with d_lock held */
dc0474be 540static inline void __dget_dlock(struct dentry *dentry)
23044507 541{
b7ab39f6 542 dentry->d_count++;
23044507
NP
543}
544
dc0474be 545static inline void __dget(struct dentry *dentry)
1da177e4 546{
23044507 547 spin_lock(&dentry->d_lock);
dc0474be 548 __dget_dlock(dentry);
23044507 549 spin_unlock(&dentry->d_lock);
1da177e4
LT
550}
551
b7ab39f6
NP
552struct dentry *dget_parent(struct dentry *dentry)
553{
554 struct dentry *ret;
555
556repeat:
a734eb45
NP
557 /*
558 * Don't need rcu_dereference because we re-check it was correct under
559 * the lock.
560 */
561 rcu_read_lock();
b7ab39f6 562 ret = dentry->d_parent;
a734eb45
NP
563 spin_lock(&ret->d_lock);
564 if (unlikely(ret != dentry->d_parent)) {
565 spin_unlock(&ret->d_lock);
566 rcu_read_unlock();
b7ab39f6
NP
567 goto repeat;
568 }
a734eb45 569 rcu_read_unlock();
b7ab39f6
NP
570 BUG_ON(!ret->d_count);
571 ret->d_count++;
572 spin_unlock(&ret->d_lock);
b7ab39f6
NP
573 return ret;
574}
575EXPORT_SYMBOL(dget_parent);
576
1da177e4
LT
577/**
578 * d_find_alias - grab a hashed alias of inode
579 * @inode: inode in question
580 * @want_discon: flag, used by d_splice_alias, to request
581 * that only a DISCONNECTED alias be returned.
582 *
583 * If inode has a hashed alias, or is a directory and has any alias,
584 * acquire the reference to alias and return it. Otherwise return NULL.
585 * Notice that if inode is a directory there can be only one alias and
586 * it can be unhashed only if it has no children, or if it is the root
587 * of a filesystem.
588 *
21c0d8fd 589 * If the inode has an IS_ROOT, DCACHE_DISCONNECTED alias, then prefer
1da177e4 590 * any other hashed alias over that one unless @want_discon is set,
21c0d8fd 591 * in which case only return an IS_ROOT, DCACHE_DISCONNECTED alias.
1da177e4 592 */
da502956 593static struct dentry *__d_find_alias(struct inode *inode, int want_discon)
1da177e4 594{
da502956 595 struct dentry *alias, *discon_alias;
1da177e4 596
da502956
NP
597again:
598 discon_alias = NULL;
599 list_for_each_entry(alias, &inode->i_dentry, d_alias) {
600 spin_lock(&alias->d_lock);
1da177e4 601 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
21c0d8fd 602 if (IS_ROOT(alias) &&
da502956 603 (alias->d_flags & DCACHE_DISCONNECTED)) {
1da177e4 604 discon_alias = alias;
da502956 605 } else if (!want_discon) {
dc0474be 606 __dget_dlock(alias);
da502956
NP
607 spin_unlock(&alias->d_lock);
608 return alias;
609 }
610 }
611 spin_unlock(&alias->d_lock);
612 }
613 if (discon_alias) {
614 alias = discon_alias;
615 spin_lock(&alias->d_lock);
616 if (S_ISDIR(inode->i_mode) || !d_unhashed(alias)) {
617 if (IS_ROOT(alias) &&
618 (alias->d_flags & DCACHE_DISCONNECTED)) {
dc0474be 619 __dget_dlock(alias);
da502956 620 spin_unlock(&alias->d_lock);
1da177e4
LT
621 return alias;
622 }
623 }
da502956
NP
624 spin_unlock(&alias->d_lock);
625 goto again;
1da177e4 626 }
da502956 627 return NULL;
1da177e4
LT
628}
629
da502956 630struct dentry *d_find_alias(struct inode *inode)
1da177e4 631{
214fda1f
DH
632 struct dentry *de = NULL;
633
634 if (!list_empty(&inode->i_dentry)) {
873feea0 635 spin_lock(&inode->i_lock);
214fda1f 636 de = __d_find_alias(inode, 0);
873feea0 637 spin_unlock(&inode->i_lock);
214fda1f 638 }
1da177e4
LT
639 return de;
640}
ec4f8605 641EXPORT_SYMBOL(d_find_alias);
1da177e4
LT
642
643/*
644 * Try to kill dentries associated with this inode.
645 * WARNING: you must own a reference to inode.
646 */
647void d_prune_aliases(struct inode *inode)
648{
0cdca3f9 649 struct dentry *dentry;
1da177e4 650restart:
873feea0 651 spin_lock(&inode->i_lock);
0cdca3f9 652 list_for_each_entry(dentry, &inode->i_dentry, d_alias) {
1da177e4 653 spin_lock(&dentry->d_lock);
b7ab39f6 654 if (!dentry->d_count) {
dc0474be 655 __dget_dlock(dentry);
1da177e4
LT
656 __d_drop(dentry);
657 spin_unlock(&dentry->d_lock);
873feea0 658 spin_unlock(&inode->i_lock);
1da177e4
LT
659 dput(dentry);
660 goto restart;
661 }
662 spin_unlock(&dentry->d_lock);
663 }
873feea0 664 spin_unlock(&inode->i_lock);
1da177e4 665}
ec4f8605 666EXPORT_SYMBOL(d_prune_aliases);
1da177e4
LT
667
668/*
77812a1e
NP
669 * Try to throw away a dentry - free the inode, dput the parent.
670 * Requires dentry->d_lock is held, and dentry->d_count == 0.
671 * Releases dentry->d_lock.
d702ccb3 672 *
77812a1e 673 * This may fail if locks cannot be acquired no problem, just try again.
1da177e4 674 */
77812a1e 675static void try_prune_one_dentry(struct dentry *dentry)
31f3e0b3 676 __releases(dentry->d_lock)
1da177e4 677{
77812a1e 678 struct dentry *parent;
d52b9086 679
77812a1e 680 parent = dentry_kill(dentry, 0);
d52b9086 681 /*
77812a1e
NP
682 * If dentry_kill returns NULL, we have nothing more to do.
683 * if it returns the same dentry, trylocks failed. In either
684 * case, just loop again.
685 *
686 * Otherwise, we need to prune ancestors too. This is necessary
687 * to prevent quadratic behavior of shrink_dcache_parent(), but
688 * is also expected to be beneficial in reducing dentry cache
689 * fragmentation.
d52b9086 690 */
77812a1e
NP
691 if (!parent)
692 return;
693 if (parent == dentry)
694 return;
695
696 /* Prune ancestors. */
697 dentry = parent;
d52b9086 698 while (dentry) {
b7ab39f6 699 spin_lock(&dentry->d_lock);
89e60548
NP
700 if (dentry->d_count > 1) {
701 dentry->d_count--;
702 spin_unlock(&dentry->d_lock);
703 return;
704 }
77812a1e 705 dentry = dentry_kill(dentry, 1);
d52b9086 706 }
1da177e4
LT
707}
708
3049cfe2 709static void shrink_dentry_list(struct list_head *list)
1da177e4 710{
da3bbdd4 711 struct dentry *dentry;
da3bbdd4 712
ec33679d
NP
713 rcu_read_lock();
714 for (;;) {
ec33679d
NP
715 dentry = list_entry_rcu(list->prev, struct dentry, d_lru);
716 if (&dentry->d_lru == list)
717 break; /* empty */
718 spin_lock(&dentry->d_lock);
719 if (dentry != list_entry(list->prev, struct dentry, d_lru)) {
720 spin_unlock(&dentry->d_lock);
23044507
NP
721 continue;
722 }
723
1da177e4
LT
724 /*
725 * We found an inuse dentry which was not removed from
da3bbdd4
KM
726 * the LRU because of laziness during lookup. Do not free
727 * it - just keep it off the LRU list.
1da177e4 728 */
b7ab39f6 729 if (dentry->d_count) {
ec33679d 730 dentry_lru_del(dentry);
da3bbdd4 731 spin_unlock(&dentry->d_lock);
1da177e4
LT
732 continue;
733 }
ec33679d 734
ec33679d 735 rcu_read_unlock();
77812a1e
NP
736
737 try_prune_one_dentry(dentry);
738
ec33679d 739 rcu_read_lock();
da3bbdd4 740 }
ec33679d 741 rcu_read_unlock();
3049cfe2
CH
742}
743
744/**
745 * __shrink_dcache_sb - shrink the dentry LRU on a given superblock
746 * @sb: superblock to shrink dentry LRU.
747 * @count: number of entries to prune
748 * @flags: flags to control the dentry processing
749 *
750 * If flags contains DCACHE_REFERENCED reference dentries will not be pruned.
751 */
b0d40c92 752static void __shrink_dcache_sb(struct super_block *sb, int count, int flags)
3049cfe2 753{
3049cfe2
CH
754 struct dentry *dentry;
755 LIST_HEAD(referenced);
756 LIST_HEAD(tmp);
3049cfe2 757
23044507
NP
758relock:
759 spin_lock(&dcache_lru_lock);
3049cfe2
CH
760 while (!list_empty(&sb->s_dentry_lru)) {
761 dentry = list_entry(sb->s_dentry_lru.prev,
762 struct dentry, d_lru);
763 BUG_ON(dentry->d_sb != sb);
764
23044507
NP
765 if (!spin_trylock(&dentry->d_lock)) {
766 spin_unlock(&dcache_lru_lock);
767 cpu_relax();
768 goto relock;
769 }
770
3049cfe2
CH
771 /*
772 * If we are honouring the DCACHE_REFERENCED flag and the
773 * dentry has this flag set, don't free it. Clear the flag
774 * and put it back on the LRU.
775 */
23044507
NP
776 if (flags & DCACHE_REFERENCED &&
777 dentry->d_flags & DCACHE_REFERENCED) {
778 dentry->d_flags &= ~DCACHE_REFERENCED;
779 list_move(&dentry->d_lru, &referenced);
3049cfe2 780 spin_unlock(&dentry->d_lock);
23044507
NP
781 } else {
782 list_move_tail(&dentry->d_lru, &tmp);
783 spin_unlock(&dentry->d_lock);
b0d40c92 784 if (!--count)
23044507 785 break;
3049cfe2 786 }
ec33679d 787 cond_resched_lock(&dcache_lru_lock);
3049cfe2 788 }
da3bbdd4
KM
789 if (!list_empty(&referenced))
790 list_splice(&referenced, &sb->s_dentry_lru);
23044507 791 spin_unlock(&dcache_lru_lock);
ec33679d
NP
792
793 shrink_dentry_list(&tmp);
da3bbdd4
KM
794}
795
796/**
b0d40c92
DC
797 * prune_dcache_sb - shrink the dcache
798 * @nr_to_scan: number of entries to try to free
da3bbdd4 799 *
b0d40c92
DC
800 * Attempt to shrink the superblock dcache LRU by @nr_to_scan entries. This is
801 * done when we need more memory an called from the superblock shrinker
802 * function.
da3bbdd4 803 *
b0d40c92
DC
804 * This function may fail to free any resources if all the dentries are in
805 * use.
da3bbdd4 806 */
b0d40c92 807void prune_dcache_sb(struct super_block *sb, int nr_to_scan)
da3bbdd4 808{
b0d40c92 809 __shrink_dcache_sb(sb, nr_to_scan, DCACHE_REFERENCED);
1da177e4
LT
810}
811
1da177e4
LT
812/**
813 * shrink_dcache_sb - shrink dcache for a superblock
814 * @sb: superblock
815 *
3049cfe2
CH
816 * Shrink the dcache for the specified super block. This is used to free
817 * the dcache before unmounting a file system.
1da177e4 818 */
3049cfe2 819void shrink_dcache_sb(struct super_block *sb)
1da177e4 820{
3049cfe2
CH
821 LIST_HEAD(tmp);
822
23044507 823 spin_lock(&dcache_lru_lock);
3049cfe2
CH
824 while (!list_empty(&sb->s_dentry_lru)) {
825 list_splice_init(&sb->s_dentry_lru, &tmp);
ec33679d 826 spin_unlock(&dcache_lru_lock);
3049cfe2 827 shrink_dentry_list(&tmp);
ec33679d 828 spin_lock(&dcache_lru_lock);
3049cfe2 829 }
23044507 830 spin_unlock(&dcache_lru_lock);
1da177e4 831}
ec4f8605 832EXPORT_SYMBOL(shrink_dcache_sb);
1da177e4 833
c636ebdb
DH
834/*
835 * destroy a single subtree of dentries for unmount
836 * - see the comments on shrink_dcache_for_umount() for a description of the
837 * locking
838 */
839static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
840{
841 struct dentry *parent;
842
843 BUG_ON(!IS_ROOT(dentry));
844
c636ebdb
DH
845 for (;;) {
846 /* descend to the first leaf in the current subtree */
43c1c9cd 847 while (!list_empty(&dentry->d_subdirs))
c636ebdb
DH
848 dentry = list_entry(dentry->d_subdirs.next,
849 struct dentry, d_u.d_child);
c636ebdb
DH
850
851 /* consume the dentries from this leaf up through its parents
852 * until we find one with children or run out altogether */
853 do {
854 struct inode *inode;
855
43c1c9cd
DH
856 /* detach from the system */
857 dentry_lru_del(dentry);
858 __d_shrink(dentry);
859
b7ab39f6 860 if (dentry->d_count != 0) {
c636ebdb
DH
861 printk(KERN_ERR
862 "BUG: Dentry %p{i=%lx,n=%s}"
863 " still in use (%d)"
864 " [unmount of %s %s]\n",
865 dentry,
866 dentry->d_inode ?
867 dentry->d_inode->i_ino : 0UL,
868 dentry->d_name.name,
b7ab39f6 869 dentry->d_count,
c636ebdb
DH
870 dentry->d_sb->s_type->name,
871 dentry->d_sb->s_id);
872 BUG();
873 }
874
2fd6b7f5 875 if (IS_ROOT(dentry)) {
c636ebdb 876 parent = NULL;
2fd6b7f5
NP
877 list_del(&dentry->d_u.d_child);
878 } else {
871c0067 879 parent = dentry->d_parent;
b7ab39f6 880 parent->d_count--;
2fd6b7f5 881 list_del(&dentry->d_u.d_child);
871c0067 882 }
c636ebdb 883
c636ebdb
DH
884 inode = dentry->d_inode;
885 if (inode) {
886 dentry->d_inode = NULL;
887 list_del_init(&dentry->d_alias);
888 if (dentry->d_op && dentry->d_op->d_iput)
889 dentry->d_op->d_iput(dentry, inode);
890 else
891 iput(inode);
892 }
893
894 d_free(dentry);
895
896 /* finished when we fall off the top of the tree,
897 * otherwise we ascend to the parent and move to the
898 * next sibling if there is one */
899 if (!parent)
312d3ca8 900 return;
c636ebdb 901 dentry = parent;
c636ebdb
DH
902 } while (list_empty(&dentry->d_subdirs));
903
904 dentry = list_entry(dentry->d_subdirs.next,
905 struct dentry, d_u.d_child);
906 }
907}
908
909/*
910 * destroy the dentries attached to a superblock on unmounting
b5c84bf6 911 * - we don't need to use dentry->d_lock because:
c636ebdb
DH
912 * - the superblock is detached from all mountings and open files, so the
913 * dentry trees will not be rearranged by the VFS
914 * - s_umount is write-locked, so the memory pressure shrinker will ignore
915 * any dentries belonging to this superblock that it comes across
916 * - the filesystem itself is no longer permitted to rearrange the dentries
917 * in this superblock
918 */
919void shrink_dcache_for_umount(struct super_block *sb)
920{
921 struct dentry *dentry;
922
923 if (down_read_trylock(&sb->s_umount))
924 BUG();
925
926 dentry = sb->s_root;
927 sb->s_root = NULL;
b7ab39f6 928 dentry->d_count--;
c636ebdb
DH
929 shrink_dcache_for_umount_subtree(dentry);
930
ceb5bdc2
NP
931 while (!hlist_bl_empty(&sb->s_anon)) {
932 dentry = hlist_bl_entry(hlist_bl_first(&sb->s_anon), struct dentry, d_hash);
c636ebdb
DH
933 shrink_dcache_for_umount_subtree(dentry);
934 }
935}
936
c826cb7d
LT
937/*
938 * This tries to ascend one level of parenthood, but
939 * we can race with renaming, so we need to re-check
940 * the parenthood after dropping the lock and check
941 * that the sequence number still matches.
942 */
943static struct dentry *try_to_ascend(struct dentry *old, int locked, unsigned seq)
944{
945 struct dentry *new = old->d_parent;
946
947 rcu_read_lock();
948 spin_unlock(&old->d_lock);
949 spin_lock(&new->d_lock);
950
951 /*
952 * might go back up the wrong parent if we have had a rename
953 * or deletion
954 */
955 if (new != old->d_parent ||
c83ce989 956 (old->d_flags & DCACHE_DISCONNECTED) ||
c826cb7d
LT
957 (!locked && read_seqretry(&rename_lock, seq))) {
958 spin_unlock(&new->d_lock);
959 new = NULL;
960 }
961 rcu_read_unlock();
962 return new;
963}
964
965
1da177e4
LT
966/*
967 * Search for at least 1 mount point in the dentry's subdirs.
968 * We descend to the next level whenever the d_subdirs
969 * list is non-empty and continue searching.
970 */
971
972/**
973 * have_submounts - check for mounts over a dentry
974 * @parent: dentry to check.
975 *
976 * Return true if the parent or its subdirectories contain
977 * a mount point
978 */
1da177e4
LT
979int have_submounts(struct dentry *parent)
980{
949854d0 981 struct dentry *this_parent;
1da177e4 982 struct list_head *next;
949854d0 983 unsigned seq;
58db63d0 984 int locked = 0;
949854d0 985
949854d0 986 seq = read_seqbegin(&rename_lock);
58db63d0
NP
987again:
988 this_parent = parent;
1da177e4 989
1da177e4
LT
990 if (d_mountpoint(parent))
991 goto positive;
2fd6b7f5 992 spin_lock(&this_parent->d_lock);
1da177e4
LT
993repeat:
994 next = this_parent->d_subdirs.next;
995resume:
996 while (next != &this_parent->d_subdirs) {
997 struct list_head *tmp = next;
5160ee6f 998 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
1da177e4 999 next = tmp->next;
2fd6b7f5
NP
1000
1001 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
1da177e4 1002 /* Have we found a mount point ? */
2fd6b7f5
NP
1003 if (d_mountpoint(dentry)) {
1004 spin_unlock(&dentry->d_lock);
1005 spin_unlock(&this_parent->d_lock);
1da177e4 1006 goto positive;
2fd6b7f5 1007 }
1da177e4 1008 if (!list_empty(&dentry->d_subdirs)) {
2fd6b7f5
NP
1009 spin_unlock(&this_parent->d_lock);
1010 spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
1da177e4 1011 this_parent = dentry;
2fd6b7f5 1012 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
1da177e4
LT
1013 goto repeat;
1014 }
2fd6b7f5 1015 spin_unlock(&dentry->d_lock);
1da177e4
LT
1016 }
1017 /*
1018 * All done at this level ... ascend and resume the search.
1019 */
1020 if (this_parent != parent) {
c826cb7d
LT
1021 struct dentry *child = this_parent;
1022 this_parent = try_to_ascend(this_parent, locked, seq);
1023 if (!this_parent)
949854d0 1024 goto rename_retry;
949854d0 1025 next = child->d_u.d_child.next;
1da177e4
LT
1026 goto resume;
1027 }
2fd6b7f5 1028 spin_unlock(&this_parent->d_lock);
58db63d0 1029 if (!locked && read_seqretry(&rename_lock, seq))
949854d0 1030 goto rename_retry;
58db63d0
NP
1031 if (locked)
1032 write_sequnlock(&rename_lock);
1da177e4
LT
1033 return 0; /* No mount points found in tree */
1034positive:
58db63d0 1035 if (!locked && read_seqretry(&rename_lock, seq))
949854d0 1036 goto rename_retry;
58db63d0
NP
1037 if (locked)
1038 write_sequnlock(&rename_lock);
1da177e4 1039 return 1;
58db63d0
NP
1040
1041rename_retry:
1042 locked = 1;
1043 write_seqlock(&rename_lock);
1044 goto again;
1da177e4 1045}
ec4f8605 1046EXPORT_SYMBOL(have_submounts);
1da177e4
LT
1047
1048/*
1049 * Search the dentry child list for the specified parent,
1050 * and move any unused dentries to the end of the unused
1051 * list for prune_dcache(). We descend to the next level
1052 * whenever the d_subdirs list is non-empty and continue
1053 * searching.
1054 *
1055 * It returns zero iff there are no unused children,
1056 * otherwise it returns the number of children moved to
1057 * the end of the unused list. This may not be the total
1058 * number of unused children, because select_parent can
1059 * drop the lock and return early due to latency
1060 * constraints.
1061 */
1062static int select_parent(struct dentry * parent)
1063{
949854d0 1064 struct dentry *this_parent;
1da177e4 1065 struct list_head *next;
949854d0 1066 unsigned seq;
1da177e4 1067 int found = 0;
58db63d0 1068 int locked = 0;
1da177e4 1069
949854d0 1070 seq = read_seqbegin(&rename_lock);
58db63d0
NP
1071again:
1072 this_parent = parent;
2fd6b7f5 1073 spin_lock(&this_parent->d_lock);
1da177e4
LT
1074repeat:
1075 next = this_parent->d_subdirs.next;
1076resume:
1077 while (next != &this_parent->d_subdirs) {
1078 struct list_head *tmp = next;
5160ee6f 1079 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
1da177e4
LT
1080 next = tmp->next;
1081
2fd6b7f5 1082 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
23044507 1083
1da177e4
LT
1084 /*
1085 * move only zero ref count dentries to the end
1086 * of the unused list for prune_dcache
1087 */
b7ab39f6 1088 if (!dentry->d_count) {
a4633357 1089 dentry_lru_move_tail(dentry);
1da177e4 1090 found++;
a4633357
CH
1091 } else {
1092 dentry_lru_del(dentry);
1da177e4
LT
1093 }
1094
1095 /*
1096 * We can return to the caller if we have found some (this
1097 * ensures forward progress). We'll be coming back to find
1098 * the rest.
1099 */
2fd6b7f5
NP
1100 if (found && need_resched()) {
1101 spin_unlock(&dentry->d_lock);
1da177e4 1102 goto out;
2fd6b7f5 1103 }
1da177e4
LT
1104
1105 /*
1106 * Descend a level if the d_subdirs list is non-empty.
1107 */
1108 if (!list_empty(&dentry->d_subdirs)) {
2fd6b7f5
NP
1109 spin_unlock(&this_parent->d_lock);
1110 spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
1da177e4 1111 this_parent = dentry;
2fd6b7f5 1112 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
1da177e4
LT
1113 goto repeat;
1114 }
2fd6b7f5
NP
1115
1116 spin_unlock(&dentry->d_lock);
1da177e4
LT
1117 }
1118 /*
1119 * All done at this level ... ascend and resume the search.
1120 */
1121 if (this_parent != parent) {
c826cb7d
LT
1122 struct dentry *child = this_parent;
1123 this_parent = try_to_ascend(this_parent, locked, seq);
1124 if (!this_parent)
949854d0 1125 goto rename_retry;
949854d0 1126 next = child->d_u.d_child.next;
1da177e4
LT
1127 goto resume;
1128 }
1129out:
2fd6b7f5 1130 spin_unlock(&this_parent->d_lock);
58db63d0 1131 if (!locked && read_seqretry(&rename_lock, seq))
949854d0 1132 goto rename_retry;
58db63d0
NP
1133 if (locked)
1134 write_sequnlock(&rename_lock);
1da177e4 1135 return found;
58db63d0
NP
1136
1137rename_retry:
1138 if (found)
1139 return found;
1140 locked = 1;
1141 write_seqlock(&rename_lock);
1142 goto again;
1da177e4
LT
1143}
1144
1145/**
1146 * shrink_dcache_parent - prune dcache
1147 * @parent: parent of entries to prune
1148 *
1149 * Prune the dcache to remove unused children of the parent dentry.
1150 */
1151
1152void shrink_dcache_parent(struct dentry * parent)
1153{
da3bbdd4 1154 struct super_block *sb = parent->d_sb;
1da177e4
LT
1155 int found;
1156
1157 while ((found = select_parent(parent)) != 0)
b0d40c92 1158 __shrink_dcache_sb(sb, found, 0);
1da177e4 1159}
ec4f8605 1160EXPORT_SYMBOL(shrink_dcache_parent);
1da177e4 1161
1da177e4 1162/**
a4464dbc
AV
1163 * __d_alloc - allocate a dcache entry
1164 * @sb: filesystem it will belong to
1da177e4
LT
1165 * @name: qstr of the name
1166 *
1167 * Allocates a dentry. It returns %NULL if there is insufficient memory
1168 * available. On a success the dentry is returned. The name passed in is
1169 * copied and the copy passed in may be reused after this call.
1170 */
1171
a4464dbc 1172struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
1da177e4
LT
1173{
1174 struct dentry *dentry;
1175 char *dname;
1176
e12ba74d 1177 dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL);
1da177e4
LT
1178 if (!dentry)
1179 return NULL;
1180
1181 if (name->len > DNAME_INLINE_LEN-1) {
1182 dname = kmalloc(name->len + 1, GFP_KERNEL);
1183 if (!dname) {
1184 kmem_cache_free(dentry_cache, dentry);
1185 return NULL;
1186 }
1187 } else {
1188 dname = dentry->d_iname;
1189 }
1190 dentry->d_name.name = dname;
1191
1192 dentry->d_name.len = name->len;
1193 dentry->d_name.hash = name->hash;
1194 memcpy(dname, name->name, name->len);
1195 dname[name->len] = 0;
1196
b7ab39f6 1197 dentry->d_count = 1;
dea3667b 1198 dentry->d_flags = 0;
1da177e4 1199 spin_lock_init(&dentry->d_lock);
31e6b01f 1200 seqcount_init(&dentry->d_seq);
1da177e4 1201 dentry->d_inode = NULL;
a4464dbc
AV
1202 dentry->d_parent = dentry;
1203 dentry->d_sb = sb;
1da177e4
LT
1204 dentry->d_op = NULL;
1205 dentry->d_fsdata = NULL;
ceb5bdc2 1206 INIT_HLIST_BL_NODE(&dentry->d_hash);
1da177e4
LT
1207 INIT_LIST_HEAD(&dentry->d_lru);
1208 INIT_LIST_HEAD(&dentry->d_subdirs);
1209 INIT_LIST_HEAD(&dentry->d_alias);
2fd6b7f5 1210 INIT_LIST_HEAD(&dentry->d_u.d_child);
a4464dbc 1211 d_set_d_op(dentry, dentry->d_sb->s_d_op);
1da177e4 1212
3e880fb5 1213 this_cpu_inc(nr_dentry);
312d3ca8 1214
1da177e4
LT
1215 return dentry;
1216}
a4464dbc
AV
1217
1218/**
1219 * d_alloc - allocate a dcache entry
1220 * @parent: parent of entry to allocate
1221 * @name: qstr of the name
1222 *
1223 * Allocates a dentry. It returns %NULL if there is insufficient memory
1224 * available. On a success the dentry is returned. The name passed in is
1225 * copied and the copy passed in may be reused after this call.
1226 */
1227struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
1228{
1229 struct dentry *dentry = __d_alloc(parent->d_sb, name);
1230 if (!dentry)
1231 return NULL;
1232
1233 spin_lock(&parent->d_lock);
1234 /*
1235 * don't need child lock because it is not subject
1236 * to concurrency here
1237 */
1238 __dget_dlock(parent);
1239 dentry->d_parent = parent;
1240 list_add(&dentry->d_u.d_child, &parent->d_subdirs);
1241 spin_unlock(&parent->d_lock);
1242
1243 return dentry;
1244}
ec4f8605 1245EXPORT_SYMBOL(d_alloc);
1da177e4 1246
4b936885
NP
1247struct dentry *d_alloc_pseudo(struct super_block *sb, const struct qstr *name)
1248{
a4464dbc
AV
1249 struct dentry *dentry = __d_alloc(sb, name);
1250 if (dentry)
4b936885 1251 dentry->d_flags |= DCACHE_DISCONNECTED;
4b936885
NP
1252 return dentry;
1253}
1254EXPORT_SYMBOL(d_alloc_pseudo);
1255
1da177e4
LT
1256struct dentry *d_alloc_name(struct dentry *parent, const char *name)
1257{
1258 struct qstr q;
1259
1260 q.name = name;
1261 q.len = strlen(name);
1262 q.hash = full_name_hash(q.name, q.len);
1263 return d_alloc(parent, &q);
1264}
ef26ca97 1265EXPORT_SYMBOL(d_alloc_name);
1da177e4 1266
fb045adb
NP
1267void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
1268{
6f7f7caa
LT
1269 WARN_ON_ONCE(dentry->d_op);
1270 WARN_ON_ONCE(dentry->d_flags & (DCACHE_OP_HASH |
fb045adb
NP
1271 DCACHE_OP_COMPARE |
1272 DCACHE_OP_REVALIDATE |
1273 DCACHE_OP_DELETE ));
1274 dentry->d_op = op;
1275 if (!op)
1276 return;
1277 if (op->d_hash)
1278 dentry->d_flags |= DCACHE_OP_HASH;
1279 if (op->d_compare)
1280 dentry->d_flags |= DCACHE_OP_COMPARE;
1281 if (op->d_revalidate)
1282 dentry->d_flags |= DCACHE_OP_REVALIDATE;
1283 if (op->d_delete)
1284 dentry->d_flags |= DCACHE_OP_DELETE;
1285
1286}
1287EXPORT_SYMBOL(d_set_d_op);
1288
360da900
OH
1289static void __d_instantiate(struct dentry *dentry, struct inode *inode)
1290{
b23fb0a6 1291 spin_lock(&dentry->d_lock);
9875cf80
DH
1292 if (inode) {
1293 if (unlikely(IS_AUTOMOUNT(inode)))
1294 dentry->d_flags |= DCACHE_NEED_AUTOMOUNT;
360da900 1295 list_add(&dentry->d_alias, &inode->i_dentry);
9875cf80 1296 }
360da900 1297 dentry->d_inode = inode;
31e6b01f 1298 dentry_rcuwalk_barrier(dentry);
b23fb0a6 1299 spin_unlock(&dentry->d_lock);
360da900
OH
1300 fsnotify_d_instantiate(dentry, inode);
1301}
1302
1da177e4
LT
1303/**
1304 * d_instantiate - fill in inode information for a dentry
1305 * @entry: dentry to complete
1306 * @inode: inode to attach to this dentry
1307 *
1308 * Fill in inode information in the entry.
1309 *
1310 * This turns negative dentries into productive full members
1311 * of society.
1312 *
1313 * NOTE! This assumes that the inode count has been incremented
1314 * (or otherwise set) by the caller to indicate that it is now
1315 * in use by the dcache.
1316 */
1317
1318void d_instantiate(struct dentry *entry, struct inode * inode)
1319{
28133c7b 1320 BUG_ON(!list_empty(&entry->d_alias));
873feea0
NP
1321 if (inode)
1322 spin_lock(&inode->i_lock);
360da900 1323 __d_instantiate(entry, inode);
873feea0
NP
1324 if (inode)
1325 spin_unlock(&inode->i_lock);
1da177e4
LT
1326 security_d_instantiate(entry, inode);
1327}
ec4f8605 1328EXPORT_SYMBOL(d_instantiate);
1da177e4
LT
1329
1330/**
1331 * d_instantiate_unique - instantiate a non-aliased dentry
1332 * @entry: dentry to instantiate
1333 * @inode: inode to attach to this dentry
1334 *
1335 * Fill in inode information in the entry. On success, it returns NULL.
1336 * If an unhashed alias of "entry" already exists, then we return the
e866cfa9 1337 * aliased dentry instead and drop one reference to inode.
1da177e4
LT
1338 *
1339 * Note that in order to avoid conflicts with rename() etc, the caller
1340 * had better be holding the parent directory semaphore.
e866cfa9
OD
1341 *
1342 * This also assumes that the inode count has been incremented
1343 * (or otherwise set) by the caller to indicate that it is now
1344 * in use by the dcache.
1da177e4 1345 */
770bfad8
DH
1346static struct dentry *__d_instantiate_unique(struct dentry *entry,
1347 struct inode *inode)
1da177e4
LT
1348{
1349 struct dentry *alias;
1350 int len = entry->d_name.len;
1351 const char *name = entry->d_name.name;
1352 unsigned int hash = entry->d_name.hash;
1353
770bfad8 1354 if (!inode) {
360da900 1355 __d_instantiate(entry, NULL);
770bfad8
DH
1356 return NULL;
1357 }
1358
1da177e4
LT
1359 list_for_each_entry(alias, &inode->i_dentry, d_alias) {
1360 struct qstr *qstr = &alias->d_name;
1361
9abca360
NP
1362 /*
1363 * Don't need alias->d_lock here, because aliases with
1364 * d_parent == entry->d_parent are not subject to name or
1365 * parent changes, because the parent inode i_mutex is held.
1366 */
1da177e4
LT
1367 if (qstr->hash != hash)
1368 continue;
1369 if (alias->d_parent != entry->d_parent)
1370 continue;
9d55c369 1371 if (dentry_cmp(qstr->name, qstr->len, name, len))
1da177e4 1372 continue;
dc0474be 1373 __dget(alias);
1da177e4
LT
1374 return alias;
1375 }
770bfad8 1376
360da900 1377 __d_instantiate(entry, inode);
1da177e4
LT
1378 return NULL;
1379}
770bfad8
DH
1380
1381struct dentry *d_instantiate_unique(struct dentry *entry, struct inode *inode)
1382{
1383 struct dentry *result;
1384
1385 BUG_ON(!list_empty(&entry->d_alias));
1386
873feea0
NP
1387 if (inode)
1388 spin_lock(&inode->i_lock);
770bfad8 1389 result = __d_instantiate_unique(entry, inode);
873feea0
NP
1390 if (inode)
1391 spin_unlock(&inode->i_lock);
770bfad8
DH
1392
1393 if (!result) {
1394 security_d_instantiate(entry, inode);
1395 return NULL;
1396 }
1397
1398 BUG_ON(!d_unhashed(result));
1399 iput(inode);
1400 return result;
1401}
1402
1da177e4
LT
1403EXPORT_SYMBOL(d_instantiate_unique);
1404
1405/**
1406 * d_alloc_root - allocate root dentry
1407 * @root_inode: inode to allocate the root for
1408 *
1409 * Allocate a root ("/") dentry for the inode given. The inode is
1410 * instantiated and returned. %NULL is returned if there is insufficient
1411 * memory or the inode passed is %NULL.
1412 */
1413
1414struct dentry * d_alloc_root(struct inode * root_inode)
1415{
1416 struct dentry *res = NULL;
1417
1418 if (root_inode) {
1419 static const struct qstr name = { .name = "/", .len = 1 };
1420
a4464dbc
AV
1421 res = __d_alloc(root_inode->i_sb, &name);
1422 if (res)
1da177e4 1423 d_instantiate(res, root_inode);
1da177e4
LT
1424 }
1425 return res;
1426}
ec4f8605 1427EXPORT_SYMBOL(d_alloc_root);
1da177e4 1428
d891eedb
BF
1429static struct dentry * __d_find_any_alias(struct inode *inode)
1430{
1431 struct dentry *alias;
1432
1433 if (list_empty(&inode->i_dentry))
1434 return NULL;
1435 alias = list_first_entry(&inode->i_dentry, struct dentry, d_alias);
1436 __dget(alias);
1437 return alias;
1438}
1439
1440static struct dentry * d_find_any_alias(struct inode *inode)
1441{
1442 struct dentry *de;
1443
1444 spin_lock(&inode->i_lock);
1445 de = __d_find_any_alias(inode);
1446 spin_unlock(&inode->i_lock);
1447 return de;
1448}
1449
1450
4ea3ada2
CH
1451/**
1452 * d_obtain_alias - find or allocate a dentry for a given inode
1453 * @inode: inode to allocate the dentry for
1454 *
1455 * Obtain a dentry for an inode resulting from NFS filehandle conversion or
1456 * similar open by handle operations. The returned dentry may be anonymous,
1457 * or may have a full name (if the inode was already in the cache).
1458 *
1459 * When called on a directory inode, we must ensure that the inode only ever
1460 * has one dentry. If a dentry is found, that is returned instead of
1461 * allocating a new one.
1462 *
1463 * On successful return, the reference to the inode has been transferred
44003728
CH
1464 * to the dentry. In case of an error the reference on the inode is released.
1465 * To make it easier to use in export operations a %NULL or IS_ERR inode may
1466 * be passed in and will be the error will be propagate to the return value,
1467 * with a %NULL @inode replaced by ERR_PTR(-ESTALE).
4ea3ada2
CH
1468 */
1469struct dentry *d_obtain_alias(struct inode *inode)
1470{
9308a612
CH
1471 static const struct qstr anonstring = { .name = "" };
1472 struct dentry *tmp;
1473 struct dentry *res;
4ea3ada2
CH
1474
1475 if (!inode)
44003728 1476 return ERR_PTR(-ESTALE);
4ea3ada2
CH
1477 if (IS_ERR(inode))
1478 return ERR_CAST(inode);
1479
d891eedb 1480 res = d_find_any_alias(inode);
9308a612
CH
1481 if (res)
1482 goto out_iput;
1483
a4464dbc 1484 tmp = __d_alloc(inode->i_sb, &anonstring);
9308a612
CH
1485 if (!tmp) {
1486 res = ERR_PTR(-ENOMEM);
1487 goto out_iput;
4ea3ada2 1488 }
b5c84bf6 1489
873feea0 1490 spin_lock(&inode->i_lock);
d891eedb 1491 res = __d_find_any_alias(inode);
9308a612 1492 if (res) {
873feea0 1493 spin_unlock(&inode->i_lock);
9308a612
CH
1494 dput(tmp);
1495 goto out_iput;
1496 }
1497
1498 /* attach a disconnected dentry */
1499 spin_lock(&tmp->d_lock);
9308a612
CH
1500 tmp->d_inode = inode;
1501 tmp->d_flags |= DCACHE_DISCONNECTED;
9308a612 1502 list_add(&tmp->d_alias, &inode->i_dentry);
1879fd6a 1503 hlist_bl_lock(&tmp->d_sb->s_anon);
ceb5bdc2 1504 hlist_bl_add_head(&tmp->d_hash, &tmp->d_sb->s_anon);
1879fd6a 1505 hlist_bl_unlock(&tmp->d_sb->s_anon);
9308a612 1506 spin_unlock(&tmp->d_lock);
873feea0 1507 spin_unlock(&inode->i_lock);
24ff6663 1508 security_d_instantiate(tmp, inode);
9308a612 1509
9308a612
CH
1510 return tmp;
1511
1512 out_iput:
24ff6663
JB
1513 if (res && !IS_ERR(res))
1514 security_d_instantiate(res, inode);
9308a612
CH
1515 iput(inode);
1516 return res;
4ea3ada2 1517}
adc48720 1518EXPORT_SYMBOL(d_obtain_alias);
1da177e4
LT
1519
1520/**
1521 * d_splice_alias - splice a disconnected dentry into the tree if one exists
1522 * @inode: the inode which may have a disconnected dentry
1523 * @dentry: a negative dentry which we want to point to the inode.
1524 *
1525 * If inode is a directory and has a 'disconnected' dentry (i.e. IS_ROOT and
1526 * DCACHE_DISCONNECTED), then d_move that in place of the given dentry
1527 * and return it, else simply d_add the inode to the dentry and return NULL.
1528 *
1529 * This is needed in the lookup routine of any filesystem that is exportable
1530 * (via knfsd) so that we can build dcache paths to directories effectively.
1531 *
1532 * If a dentry was found and moved, then it is returned. Otherwise NULL
1533 * is returned. This matches the expected return value of ->lookup.
1534 *
1535 */
1536struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
1537{
1538 struct dentry *new = NULL;
1539
a9049376
AV
1540 if (IS_ERR(inode))
1541 return ERR_CAST(inode);
1542
21c0d8fd 1543 if (inode && S_ISDIR(inode->i_mode)) {
873feea0 1544 spin_lock(&inode->i_lock);
1da177e4
LT
1545 new = __d_find_alias(inode, 1);
1546 if (new) {
1547 BUG_ON(!(new->d_flags & DCACHE_DISCONNECTED));
873feea0 1548 spin_unlock(&inode->i_lock);
1da177e4 1549 security_d_instantiate(new, inode);
1da177e4
LT
1550 d_move(new, dentry);
1551 iput(inode);
1552 } else {
873feea0 1553 /* already taking inode->i_lock, so d_add() by hand */
360da900 1554 __d_instantiate(dentry, inode);
873feea0 1555 spin_unlock(&inode->i_lock);
1da177e4
LT
1556 security_d_instantiate(dentry, inode);
1557 d_rehash(dentry);
1558 }
1559 } else
1560 d_add(dentry, inode);
1561 return new;
1562}
ec4f8605 1563EXPORT_SYMBOL(d_splice_alias);
1da177e4 1564
9403540c
BN
1565/**
1566 * d_add_ci - lookup or allocate new dentry with case-exact name
1567 * @inode: the inode case-insensitive lookup has found
1568 * @dentry: the negative dentry that was passed to the parent's lookup func
1569 * @name: the case-exact name to be associated with the returned dentry
1570 *
1571 * This is to avoid filling the dcache with case-insensitive names to the
1572 * same inode, only the actual correct case is stored in the dcache for
1573 * case-insensitive filesystems.
1574 *
1575 * For a case-insensitive lookup match and if the the case-exact dentry
1576 * already exists in in the dcache, use it and return it.
1577 *
1578 * If no entry exists with the exact case name, allocate new dentry with
1579 * the exact case, and return the spliced entry.
1580 */
e45b590b 1581struct dentry *d_add_ci(struct dentry *dentry, struct inode *inode,
9403540c
BN
1582 struct qstr *name)
1583{
1584 int error;
1585 struct dentry *found;
1586 struct dentry *new;
1587
b6520c81
CH
1588 /*
1589 * First check if a dentry matching the name already exists,
1590 * if not go ahead and create it now.
1591 */
9403540c 1592 found = d_hash_and_lookup(dentry->d_parent, name);
9403540c
BN
1593 if (!found) {
1594 new = d_alloc(dentry->d_parent, name);
1595 if (!new) {
1596 error = -ENOMEM;
1597 goto err_out;
1598 }
b6520c81 1599
9403540c
BN
1600 found = d_splice_alias(inode, new);
1601 if (found) {
1602 dput(new);
1603 return found;
1604 }
1605 return new;
1606 }
b6520c81
CH
1607
1608 /*
1609 * If a matching dentry exists, and it's not negative use it.
1610 *
1611 * Decrement the reference count to balance the iget() done
1612 * earlier on.
1613 */
9403540c
BN
1614 if (found->d_inode) {
1615 if (unlikely(found->d_inode != inode)) {
1616 /* This can't happen because bad inodes are unhashed. */
1617 BUG_ON(!is_bad_inode(inode));
1618 BUG_ON(!is_bad_inode(found->d_inode));
1619 }
9403540c
BN
1620 iput(inode);
1621 return found;
1622 }
b6520c81 1623
9403540c 1624 /*
44396f4b
JB
1625 * We are going to instantiate this dentry, unhash it and clear the
1626 * lookup flag so we can do that.
9403540c 1627 */
44396f4b
JB
1628 if (unlikely(d_need_lookup(found)))
1629 d_clear_need_lookup(found);
b6520c81 1630
9403540c 1631 /*
9403540c 1632 * Negative dentry: instantiate it unless the inode is a directory and
b6520c81 1633 * already has a dentry.
9403540c 1634 */
4513d899
AV
1635 new = d_splice_alias(inode, found);
1636 if (new) {
1637 dput(found);
1638 found = new;
9403540c 1639 }
4513d899 1640 return found;
9403540c
BN
1641
1642err_out:
1643 iput(inode);
1644 return ERR_PTR(error);
1645}
ec4f8605 1646EXPORT_SYMBOL(d_add_ci);
1da177e4 1647
31e6b01f
NP
1648/**
1649 * __d_lookup_rcu - search for a dentry (racy, store-free)
1650 * @parent: parent dentry
1651 * @name: qstr of name we wish to find
1652 * @seq: returns d_seq value at the point where the dentry was found
1653 * @inode: returns dentry->d_inode when the inode was found valid.
1654 * Returns: dentry, or NULL
1655 *
1656 * __d_lookup_rcu is the dcache lookup function for rcu-walk name
1657 * resolution (store-free path walking) design described in
1658 * Documentation/filesystems/path-lookup.txt.
1659 *
1660 * This is not to be used outside core vfs.
1661 *
1662 * __d_lookup_rcu must only be used in rcu-walk mode, ie. with vfsmount lock
1663 * held, and rcu_read_lock held. The returned dentry must not be stored into
1664 * without taking d_lock and checking d_seq sequence count against @seq
1665 * returned here.
1666 *
1667 * A refcount may be taken on the found dentry with the __d_rcu_to_refcount
1668 * function.
1669 *
1670 * Alternatively, __d_lookup_rcu may be called again to look up the child of
1671 * the returned dentry, so long as its parent's seqlock is checked after the
1672 * child is looked up. Thus, an interlocking stepping of sequence lock checks
1673 * is formed, giving integrity down the path walk.
1674 */
1675struct dentry *__d_lookup_rcu(struct dentry *parent, struct qstr *name,
1676 unsigned *seq, struct inode **inode)
1677{
1678 unsigned int len = name->len;
1679 unsigned int hash = name->hash;
1680 const unsigned char *str = name->name;
b07ad996 1681 struct hlist_bl_head *b = d_hash(parent, hash);
ceb5bdc2 1682 struct hlist_bl_node *node;
31e6b01f
NP
1683 struct dentry *dentry;
1684
1685 /*
1686 * Note: There is significant duplication with __d_lookup_rcu which is
1687 * required to prevent single threaded performance regressions
1688 * especially on architectures where smp_rmb (in seqcounts) are costly.
1689 * Keep the two functions in sync.
1690 */
1691
1692 /*
1693 * The hash list is protected using RCU.
1694 *
1695 * Carefully use d_seq when comparing a candidate dentry, to avoid
1696 * races with d_move().
1697 *
1698 * It is possible that concurrent renames can mess up our list
1699 * walk here and result in missing our dentry, resulting in the
1700 * false-negative result. d_lookup() protects against concurrent
1701 * renames using rename_lock seqlock.
1702 *
b0a4bb83 1703 * See Documentation/filesystems/path-lookup.txt for more details.
31e6b01f 1704 */
b07ad996 1705 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
31e6b01f
NP
1706 struct inode *i;
1707 const char *tname;
1708 int tlen;
1709
1710 if (dentry->d_name.hash != hash)
1711 continue;
1712
1713seqretry:
1714 *seq = read_seqcount_begin(&dentry->d_seq);
1715 if (dentry->d_parent != parent)
1716 continue;
1717 if (d_unhashed(dentry))
1718 continue;
1719 tlen = dentry->d_name.len;
1720 tname = dentry->d_name.name;
1721 i = dentry->d_inode;
e1bb5782 1722 prefetch(tname);
31e6b01f
NP
1723 /*
1724 * This seqcount check is required to ensure name and
1725 * len are loaded atomically, so as not to walk off the
1726 * edge of memory when walking. If we could load this
1727 * atomically some other way, we could drop this check.
1728 */
1729 if (read_seqcount_retry(&dentry->d_seq, *seq))
1730 goto seqretry;
fb045adb 1731 if (parent->d_flags & DCACHE_OP_COMPARE) {
31e6b01f
NP
1732 if (parent->d_op->d_compare(parent, *inode,
1733 dentry, i,
1734 tlen, tname, name))
1735 continue;
1736 } else {
9d55c369 1737 if (dentry_cmp(tname, tlen, str, len))
31e6b01f
NP
1738 continue;
1739 }
1740 /*
1741 * No extra seqcount check is required after the name
1742 * compare. The caller must perform a seqcount check in
1743 * order to do anything useful with the returned dentry
1744 * anyway.
1745 */
1746 *inode = i;
1747 return dentry;
1748 }
1749 return NULL;
1750}
1751
1da177e4
LT
1752/**
1753 * d_lookup - search for a dentry
1754 * @parent: parent dentry
1755 * @name: qstr of name we wish to find
b04f784e 1756 * Returns: dentry, or NULL
1da177e4 1757 *
b04f784e
NP
1758 * d_lookup searches the children of the parent dentry for the name in
1759 * question. If the dentry is found its reference count is incremented and the
1760 * dentry is returned. The caller must use dput to free the entry when it has
1761 * finished using it. %NULL is returned if the dentry does not exist.
1da177e4 1762 */
31e6b01f 1763struct dentry *d_lookup(struct dentry *parent, struct qstr *name)
1da177e4 1764{
31e6b01f 1765 struct dentry *dentry;
949854d0 1766 unsigned seq;
1da177e4
LT
1767
1768 do {
1769 seq = read_seqbegin(&rename_lock);
1770 dentry = __d_lookup(parent, name);
1771 if (dentry)
1772 break;
1773 } while (read_seqretry(&rename_lock, seq));
1774 return dentry;
1775}
ec4f8605 1776EXPORT_SYMBOL(d_lookup);
1da177e4 1777
31e6b01f 1778/**
b04f784e
NP
1779 * __d_lookup - search for a dentry (racy)
1780 * @parent: parent dentry
1781 * @name: qstr of name we wish to find
1782 * Returns: dentry, or NULL
1783 *
1784 * __d_lookup is like d_lookup, however it may (rarely) return a
1785 * false-negative result due to unrelated rename activity.
1786 *
1787 * __d_lookup is slightly faster by avoiding rename_lock read seqlock,
1788 * however it must be used carefully, eg. with a following d_lookup in
1789 * the case of failure.
1790 *
1791 * __d_lookup callers must be commented.
1792 */
31e6b01f 1793struct dentry *__d_lookup(struct dentry *parent, struct qstr *name)
1da177e4
LT
1794{
1795 unsigned int len = name->len;
1796 unsigned int hash = name->hash;
1797 const unsigned char *str = name->name;
b07ad996 1798 struct hlist_bl_head *b = d_hash(parent, hash);
ceb5bdc2 1799 struct hlist_bl_node *node;
31e6b01f 1800 struct dentry *found = NULL;
665a7583 1801 struct dentry *dentry;
1da177e4 1802
31e6b01f
NP
1803 /*
1804 * Note: There is significant duplication with __d_lookup_rcu which is
1805 * required to prevent single threaded performance regressions
1806 * especially on architectures where smp_rmb (in seqcounts) are costly.
1807 * Keep the two functions in sync.
1808 */
1809
b04f784e
NP
1810 /*
1811 * The hash list is protected using RCU.
1812 *
1813 * Take d_lock when comparing a candidate dentry, to avoid races
1814 * with d_move().
1815 *
1816 * It is possible that concurrent renames can mess up our list
1817 * walk here and result in missing our dentry, resulting in the
1818 * false-negative result. d_lookup() protects against concurrent
1819 * renames using rename_lock seqlock.
1820 *
b0a4bb83 1821 * See Documentation/filesystems/path-lookup.txt for more details.
b04f784e 1822 */
1da177e4
LT
1823 rcu_read_lock();
1824
b07ad996 1825 hlist_bl_for_each_entry_rcu(dentry, node, b, d_hash) {
31e6b01f
NP
1826 const char *tname;
1827 int tlen;
1da177e4 1828
1da177e4
LT
1829 if (dentry->d_name.hash != hash)
1830 continue;
1da177e4
LT
1831
1832 spin_lock(&dentry->d_lock);
1da177e4
LT
1833 if (dentry->d_parent != parent)
1834 goto next;
d0185c08
LT
1835 if (d_unhashed(dentry))
1836 goto next;
1837
1da177e4
LT
1838 /*
1839 * It is safe to compare names since d_move() cannot
1840 * change the qstr (protected by d_lock).
1841 */
31e6b01f
NP
1842 tlen = dentry->d_name.len;
1843 tname = dentry->d_name.name;
fb045adb 1844 if (parent->d_flags & DCACHE_OP_COMPARE) {
621e155a
NP
1845 if (parent->d_op->d_compare(parent, parent->d_inode,
1846 dentry, dentry->d_inode,
31e6b01f 1847 tlen, tname, name))
1da177e4
LT
1848 goto next;
1849 } else {
9d55c369 1850 if (dentry_cmp(tname, tlen, str, len))
1da177e4
LT
1851 goto next;
1852 }
1853
b7ab39f6 1854 dentry->d_count++;
d0185c08 1855 found = dentry;
1da177e4
LT
1856 spin_unlock(&dentry->d_lock);
1857 break;
1858next:
1859 spin_unlock(&dentry->d_lock);
1860 }
1861 rcu_read_unlock();
1862
1863 return found;
1864}
1865
3e7e241f
EB
1866/**
1867 * d_hash_and_lookup - hash the qstr then search for a dentry
1868 * @dir: Directory to search in
1869 * @name: qstr of name we wish to find
1870 *
1871 * On hash failure or on lookup failure NULL is returned.
1872 */
1873struct dentry *d_hash_and_lookup(struct dentry *dir, struct qstr *name)
1874{
1875 struct dentry *dentry = NULL;
1876
1877 /*
1878 * Check for a fs-specific hash function. Note that we must
1879 * calculate the standard hash first, as the d_op->d_hash()
1880 * routine may choose to leave the hash value unchanged.
1881 */
1882 name->hash = full_name_hash(name->name, name->len);
fb045adb 1883 if (dir->d_flags & DCACHE_OP_HASH) {
b1e6a015 1884 if (dir->d_op->d_hash(dir, dir->d_inode, name) < 0)
3e7e241f
EB
1885 goto out;
1886 }
1887 dentry = d_lookup(dir, name);
1888out:
1889 return dentry;
1890}
1891
1da177e4 1892/**
786a5e15 1893 * d_validate - verify dentry provided from insecure source (deprecated)
1da177e4 1894 * @dentry: The dentry alleged to be valid child of @dparent
ff5fdb61 1895 * @dparent: The parent dentry (known to be valid)
1da177e4
LT
1896 *
1897 * An insecure source has sent us a dentry, here we verify it and dget() it.
1898 * This is used by ncpfs in its readdir implementation.
1899 * Zero is returned in the dentry is invalid.
786a5e15
NP
1900 *
1901 * This function is slow for big directories, and deprecated, do not use it.
1da177e4 1902 */
d3a23e16 1903int d_validate(struct dentry *dentry, struct dentry *dparent)
1da177e4 1904{
786a5e15 1905 struct dentry *child;
d3a23e16 1906
2fd6b7f5 1907 spin_lock(&dparent->d_lock);
786a5e15
NP
1908 list_for_each_entry(child, &dparent->d_subdirs, d_u.d_child) {
1909 if (dentry == child) {
2fd6b7f5 1910 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
dc0474be 1911 __dget_dlock(dentry);
2fd6b7f5
NP
1912 spin_unlock(&dentry->d_lock);
1913 spin_unlock(&dparent->d_lock);
1da177e4
LT
1914 return 1;
1915 }
1916 }
2fd6b7f5 1917 spin_unlock(&dparent->d_lock);
786a5e15 1918
1da177e4
LT
1919 return 0;
1920}
ec4f8605 1921EXPORT_SYMBOL(d_validate);
1da177e4
LT
1922
1923/*
1924 * When a file is deleted, we have two options:
1925 * - turn this dentry into a negative dentry
1926 * - unhash this dentry and free it.
1927 *
1928 * Usually, we want to just turn this into
1929 * a negative dentry, but if anybody else is
1930 * currently using the dentry or the inode
1931 * we can't do that and we fall back on removing
1932 * it from the hash queues and waiting for
1933 * it to be deleted later when it has no users
1934 */
1935
1936/**
1937 * d_delete - delete a dentry
1938 * @dentry: The dentry to delete
1939 *
1940 * Turn the dentry into a negative dentry if possible, otherwise
1941 * remove it from the hash queues so it can be deleted later
1942 */
1943
1944void d_delete(struct dentry * dentry)
1945{
873feea0 1946 struct inode *inode;
7a91bf7f 1947 int isdir = 0;
1da177e4
LT
1948 /*
1949 * Are we the only user?
1950 */
357f8e65 1951again:
1da177e4 1952 spin_lock(&dentry->d_lock);
873feea0
NP
1953 inode = dentry->d_inode;
1954 isdir = S_ISDIR(inode->i_mode);
b7ab39f6 1955 if (dentry->d_count == 1) {
873feea0 1956 if (inode && !spin_trylock(&inode->i_lock)) {
357f8e65
NP
1957 spin_unlock(&dentry->d_lock);
1958 cpu_relax();
1959 goto again;
1960 }
13e3c5e5 1961 dentry->d_flags &= ~DCACHE_CANT_MOUNT;
31e6b01f 1962 dentry_unlink_inode(dentry);
7a91bf7f 1963 fsnotify_nameremove(dentry, isdir);
1da177e4
LT
1964 return;
1965 }
1966
1967 if (!d_unhashed(dentry))
1968 __d_drop(dentry);
1969
1970 spin_unlock(&dentry->d_lock);
7a91bf7f
JM
1971
1972 fsnotify_nameremove(dentry, isdir);
1da177e4 1973}
ec4f8605 1974EXPORT_SYMBOL(d_delete);
1da177e4 1975
b07ad996 1976static void __d_rehash(struct dentry * entry, struct hlist_bl_head *b)
1da177e4 1977{
ceb5bdc2 1978 BUG_ON(!d_unhashed(entry));
1879fd6a 1979 hlist_bl_lock(b);
dea3667b 1980 entry->d_flags |= DCACHE_RCUACCESS;
b07ad996 1981 hlist_bl_add_head_rcu(&entry->d_hash, b);
1879fd6a 1982 hlist_bl_unlock(b);
1da177e4
LT
1983}
1984
770bfad8
DH
1985static void _d_rehash(struct dentry * entry)
1986{
1987 __d_rehash(entry, d_hash(entry->d_parent, entry->d_name.hash));
1988}
1989
1da177e4
LT
1990/**
1991 * d_rehash - add an entry back to the hash
1992 * @entry: dentry to add to the hash
1993 *
1994 * Adds a dentry to the hash according to its name.
1995 */
1996
1997void d_rehash(struct dentry * entry)
1998{
1da177e4 1999 spin_lock(&entry->d_lock);
770bfad8 2000 _d_rehash(entry);
1da177e4 2001 spin_unlock(&entry->d_lock);
1da177e4 2002}
ec4f8605 2003EXPORT_SYMBOL(d_rehash);
1da177e4 2004
fb2d5b86
NP
2005/**
2006 * dentry_update_name_case - update case insensitive dentry with a new name
2007 * @dentry: dentry to be updated
2008 * @name: new name
2009 *
2010 * Update a case insensitive dentry with new case of name.
2011 *
2012 * dentry must have been returned by d_lookup with name @name. Old and new
2013 * name lengths must match (ie. no d_compare which allows mismatched name
2014 * lengths).
2015 *
2016 * Parent inode i_mutex must be held over d_lookup and into this call (to
2017 * keep renames and concurrent inserts, and readdir(2) away).
2018 */
2019void dentry_update_name_case(struct dentry *dentry, struct qstr *name)
2020{
7ebfa57f 2021 BUG_ON(!mutex_is_locked(&dentry->d_parent->d_inode->i_mutex));
fb2d5b86
NP
2022 BUG_ON(dentry->d_name.len != name->len); /* d_lookup gives this */
2023
fb2d5b86 2024 spin_lock(&dentry->d_lock);
31e6b01f 2025 write_seqcount_begin(&dentry->d_seq);
fb2d5b86 2026 memcpy((unsigned char *)dentry->d_name.name, name->name, name->len);
31e6b01f 2027 write_seqcount_end(&dentry->d_seq);
fb2d5b86 2028 spin_unlock(&dentry->d_lock);
fb2d5b86
NP
2029}
2030EXPORT_SYMBOL(dentry_update_name_case);
2031
1da177e4
LT
2032static void switch_names(struct dentry *dentry, struct dentry *target)
2033{
2034 if (dname_external(target)) {
2035 if (dname_external(dentry)) {
2036 /*
2037 * Both external: swap the pointers
2038 */
9a8d5bb4 2039 swap(target->d_name.name, dentry->d_name.name);
1da177e4
LT
2040 } else {
2041 /*
2042 * dentry:internal, target:external. Steal target's
2043 * storage and make target internal.
2044 */
321bcf92
BF
2045 memcpy(target->d_iname, dentry->d_name.name,
2046 dentry->d_name.len + 1);
1da177e4
LT
2047 dentry->d_name.name = target->d_name.name;
2048 target->d_name.name = target->d_iname;
2049 }
2050 } else {
2051 if (dname_external(dentry)) {
2052 /*
2053 * dentry:external, target:internal. Give dentry's
2054 * storage to target and make dentry internal
2055 */
2056 memcpy(dentry->d_iname, target->d_name.name,
2057 target->d_name.len + 1);
2058 target->d_name.name = dentry->d_name.name;
2059 dentry->d_name.name = dentry->d_iname;
2060 } else {
2061 /*
2062 * Both are internal. Just copy target to dentry
2063 */
2064 memcpy(dentry->d_iname, target->d_name.name,
2065 target->d_name.len + 1);
dc711ca3
AV
2066 dentry->d_name.len = target->d_name.len;
2067 return;
1da177e4
LT
2068 }
2069 }
9a8d5bb4 2070 swap(dentry->d_name.len, target->d_name.len);
1da177e4
LT
2071}
2072
2fd6b7f5
NP
2073static void dentry_lock_for_move(struct dentry *dentry, struct dentry *target)
2074{
2075 /*
2076 * XXXX: do we really need to take target->d_lock?
2077 */
2078 if (IS_ROOT(dentry) || dentry->d_parent == target->d_parent)
2079 spin_lock(&target->d_parent->d_lock);
2080 else {
2081 if (d_ancestor(dentry->d_parent, target->d_parent)) {
2082 spin_lock(&dentry->d_parent->d_lock);
2083 spin_lock_nested(&target->d_parent->d_lock,
2084 DENTRY_D_LOCK_NESTED);
2085 } else {
2086 spin_lock(&target->d_parent->d_lock);
2087 spin_lock_nested(&dentry->d_parent->d_lock,
2088 DENTRY_D_LOCK_NESTED);
2089 }
2090 }
2091 if (target < dentry) {
2092 spin_lock_nested(&target->d_lock, 2);
2093 spin_lock_nested(&dentry->d_lock, 3);
2094 } else {
2095 spin_lock_nested(&dentry->d_lock, 2);
2096 spin_lock_nested(&target->d_lock, 3);
2097 }
2098}
2099
2100static void dentry_unlock_parents_for_move(struct dentry *dentry,
2101 struct dentry *target)
2102{
2103 if (target->d_parent != dentry->d_parent)
2104 spin_unlock(&dentry->d_parent->d_lock);
2105 if (target->d_parent != target)
2106 spin_unlock(&target->d_parent->d_lock);
2107}
2108
1da177e4 2109/*
2fd6b7f5
NP
2110 * When switching names, the actual string doesn't strictly have to
2111 * be preserved in the target - because we're dropping the target
2112 * anyway. As such, we can just do a simple memcpy() to copy over
2113 * the new name before we switch.
2114 *
2115 * Note that we have to be a lot more careful about getting the hash
2116 * switched - we have to switch the hash value properly even if it
2117 * then no longer matches the actual (corrupted) string of the target.
2118 * The hash value has to match the hash queue that the dentry is on..
1da177e4 2119 */
9eaef27b 2120/*
18367501 2121 * __d_move - move a dentry
1da177e4
LT
2122 * @dentry: entry to move
2123 * @target: new dentry
2124 *
2125 * Update the dcache to reflect the move of a file name. Negative
c46c8877
JL
2126 * dcache entries should not be moved in this way. Caller must hold
2127 * rename_lock, the i_mutex of the source and target directories,
2128 * and the sb->s_vfs_rename_mutex if they differ. See lock_rename().
1da177e4 2129 */
18367501 2130static void __d_move(struct dentry * dentry, struct dentry * target)
1da177e4 2131{
1da177e4
LT
2132 if (!dentry->d_inode)
2133 printk(KERN_WARNING "VFS: moving negative dcache entry\n");
2134
2fd6b7f5
NP
2135 BUG_ON(d_ancestor(dentry, target));
2136 BUG_ON(d_ancestor(target, dentry));
2137
2fd6b7f5 2138 dentry_lock_for_move(dentry, target);
1da177e4 2139
31e6b01f
NP
2140 write_seqcount_begin(&dentry->d_seq);
2141 write_seqcount_begin(&target->d_seq);
2142
ceb5bdc2
NP
2143 /* __d_drop does write_seqcount_barrier, but they're OK to nest. */
2144
2145 /*
2146 * Move the dentry to the target hash queue. Don't bother checking
2147 * for the same hash queue because of how unlikely it is.
2148 */
2149 __d_drop(dentry);
789680d1 2150 __d_rehash(dentry, d_hash(target->d_parent, target->d_name.hash));
1da177e4
LT
2151
2152 /* Unhash the target: dput() will then get rid of it */
2153 __d_drop(target);
2154
5160ee6f
ED
2155 list_del(&dentry->d_u.d_child);
2156 list_del(&target->d_u.d_child);
1da177e4
LT
2157
2158 /* Switch the names.. */
2159 switch_names(dentry, target);
9a8d5bb4 2160 swap(dentry->d_name.hash, target->d_name.hash);
1da177e4
LT
2161
2162 /* ... and switch the parents */
2163 if (IS_ROOT(dentry)) {
2164 dentry->d_parent = target->d_parent;
2165 target->d_parent = target;
5160ee6f 2166 INIT_LIST_HEAD(&target->d_u.d_child);
1da177e4 2167 } else {
9a8d5bb4 2168 swap(dentry->d_parent, target->d_parent);
1da177e4
LT
2169
2170 /* And add them back to the (new) parent lists */
5160ee6f 2171 list_add(&target->d_u.d_child, &target->d_parent->d_subdirs);
1da177e4
LT
2172 }
2173
5160ee6f 2174 list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs);
2fd6b7f5 2175
31e6b01f
NP
2176 write_seqcount_end(&target->d_seq);
2177 write_seqcount_end(&dentry->d_seq);
2178
2fd6b7f5 2179 dentry_unlock_parents_for_move(dentry, target);
1da177e4 2180 spin_unlock(&target->d_lock);
c32ccd87 2181 fsnotify_d_move(dentry);
1da177e4 2182 spin_unlock(&dentry->d_lock);
18367501
AV
2183}
2184
2185/*
2186 * d_move - move a dentry
2187 * @dentry: entry to move
2188 * @target: new dentry
2189 *
2190 * Update the dcache to reflect the move of a file name. Negative
c46c8877
JL
2191 * dcache entries should not be moved in this way. See the locking
2192 * requirements for __d_move.
18367501
AV
2193 */
2194void d_move(struct dentry *dentry, struct dentry *target)
2195{
2196 write_seqlock(&rename_lock);
2197 __d_move(dentry, target);
1da177e4 2198 write_sequnlock(&rename_lock);
9eaef27b 2199}
ec4f8605 2200EXPORT_SYMBOL(d_move);
1da177e4 2201
e2761a11
OH
2202/**
2203 * d_ancestor - search for an ancestor
2204 * @p1: ancestor dentry
2205 * @p2: child dentry
2206 *
2207 * Returns the ancestor dentry of p2 which is a child of p1, if p1 is
2208 * an ancestor of p2, else NULL.
9eaef27b 2209 */
e2761a11 2210struct dentry *d_ancestor(struct dentry *p1, struct dentry *p2)
9eaef27b
TM
2211{
2212 struct dentry *p;
2213
871c0067 2214 for (p = p2; !IS_ROOT(p); p = p->d_parent) {
9eaef27b 2215 if (p->d_parent == p1)
e2761a11 2216 return p;
9eaef27b 2217 }
e2761a11 2218 return NULL;
9eaef27b
TM
2219}
2220
2221/*
2222 * This helper attempts to cope with remotely renamed directories
2223 *
2224 * It assumes that the caller is already holding
18367501 2225 * dentry->d_parent->d_inode->i_mutex, inode->i_lock and rename_lock
9eaef27b
TM
2226 *
2227 * Note: If ever the locking in lock_rename() changes, then please
2228 * remember to update this too...
9eaef27b 2229 */
873feea0
NP
2230static struct dentry *__d_unalias(struct inode *inode,
2231 struct dentry *dentry, struct dentry *alias)
9eaef27b
TM
2232{
2233 struct mutex *m1 = NULL, *m2 = NULL;
2234 struct dentry *ret;
2235
2236 /* If alias and dentry share a parent, then no extra locks required */
2237 if (alias->d_parent == dentry->d_parent)
2238 goto out_unalias;
2239
9eaef27b
TM
2240 /* See lock_rename() */
2241 ret = ERR_PTR(-EBUSY);
2242 if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex))
2243 goto out_err;
2244 m1 = &dentry->d_sb->s_vfs_rename_mutex;
2245 if (!mutex_trylock(&alias->d_parent->d_inode->i_mutex))
2246 goto out_err;
2247 m2 = &alias->d_parent->d_inode->i_mutex;
2248out_unalias:
18367501 2249 __d_move(alias, dentry);
9eaef27b
TM
2250 ret = alias;
2251out_err:
873feea0 2252 spin_unlock(&inode->i_lock);
9eaef27b
TM
2253 if (m2)
2254 mutex_unlock(m2);
2255 if (m1)
2256 mutex_unlock(m1);
2257 return ret;
2258}
2259
770bfad8
DH
2260/*
2261 * Prepare an anonymous dentry for life in the superblock's dentry tree as a
2262 * named dentry in place of the dentry to be replaced.
2fd6b7f5 2263 * returns with anon->d_lock held!
770bfad8
DH
2264 */
2265static void __d_materialise_dentry(struct dentry *dentry, struct dentry *anon)
2266{
2267 struct dentry *dparent, *aparent;
2268
2fd6b7f5 2269 dentry_lock_for_move(anon, dentry);
770bfad8 2270
31e6b01f
NP
2271 write_seqcount_begin(&dentry->d_seq);
2272 write_seqcount_begin(&anon->d_seq);
2273
770bfad8
DH
2274 dparent = dentry->d_parent;
2275 aparent = anon->d_parent;
2276
2fd6b7f5
NP
2277 switch_names(dentry, anon);
2278 swap(dentry->d_name.hash, anon->d_name.hash);
2279
770bfad8
DH
2280 dentry->d_parent = (aparent == anon) ? dentry : aparent;
2281 list_del(&dentry->d_u.d_child);
2282 if (!IS_ROOT(dentry))
2283 list_add(&dentry->d_u.d_child, &dentry->d_parent->d_subdirs);
2284 else
2285 INIT_LIST_HEAD(&dentry->d_u.d_child);
2286
2287 anon->d_parent = (dparent == dentry) ? anon : dparent;
2288 list_del(&anon->d_u.d_child);
2289 if (!IS_ROOT(anon))
2290 list_add(&anon->d_u.d_child, &anon->d_parent->d_subdirs);
2291 else
2292 INIT_LIST_HEAD(&anon->d_u.d_child);
2293
31e6b01f
NP
2294 write_seqcount_end(&dentry->d_seq);
2295 write_seqcount_end(&anon->d_seq);
2296
2fd6b7f5
NP
2297 dentry_unlock_parents_for_move(anon, dentry);
2298 spin_unlock(&dentry->d_lock);
2299
2300 /* anon->d_lock still locked, returns locked */
770bfad8
DH
2301 anon->d_flags &= ~DCACHE_DISCONNECTED;
2302}
2303
2304/**
2305 * d_materialise_unique - introduce an inode into the tree
2306 * @dentry: candidate dentry
2307 * @inode: inode to bind to the dentry, to which aliases may be attached
2308 *
2309 * Introduces an dentry into the tree, substituting an extant disconnected
c46c8877
JL
2310 * root directory alias in its place if there is one. Caller must hold the
2311 * i_mutex of the parent directory.
770bfad8
DH
2312 */
2313struct dentry *d_materialise_unique(struct dentry *dentry, struct inode *inode)
2314{
9eaef27b 2315 struct dentry *actual;
770bfad8
DH
2316
2317 BUG_ON(!d_unhashed(dentry));
2318
770bfad8
DH
2319 if (!inode) {
2320 actual = dentry;
360da900 2321 __d_instantiate(dentry, NULL);
357f8e65
NP
2322 d_rehash(actual);
2323 goto out_nolock;
770bfad8
DH
2324 }
2325
873feea0 2326 spin_lock(&inode->i_lock);
357f8e65 2327
9eaef27b
TM
2328 if (S_ISDIR(inode->i_mode)) {
2329 struct dentry *alias;
2330
2331 /* Does an aliased dentry already exist? */
2332 alias = __d_find_alias(inode, 0);
2333 if (alias) {
2334 actual = alias;
18367501
AV
2335 write_seqlock(&rename_lock);
2336
2337 if (d_ancestor(alias, dentry)) {
2338 /* Check for loops */
2339 actual = ERR_PTR(-ELOOP);
2340 } else if (IS_ROOT(alias)) {
2341 /* Is this an anonymous mountpoint that we
2342 * could splice into our tree? */
9eaef27b 2343 __d_materialise_dentry(dentry, alias);
18367501 2344 write_sequnlock(&rename_lock);
9eaef27b
TM
2345 __d_drop(alias);
2346 goto found;
18367501
AV
2347 } else {
2348 /* Nope, but we must(!) avoid directory
2349 * aliasing */
2350 actual = __d_unalias(inode, dentry, alias);
9eaef27b 2351 }
18367501 2352 write_sequnlock(&rename_lock);
9eaef27b
TM
2353 if (IS_ERR(actual))
2354 dput(alias);
2355 goto out_nolock;
2356 }
770bfad8
DH
2357 }
2358
2359 /* Add a unique reference */
2360 actual = __d_instantiate_unique(dentry, inode);
2361 if (!actual)
2362 actual = dentry;
357f8e65
NP
2363 else
2364 BUG_ON(!d_unhashed(actual));
770bfad8 2365
770bfad8
DH
2366 spin_lock(&actual->d_lock);
2367found:
2368 _d_rehash(actual);
2369 spin_unlock(&actual->d_lock);
873feea0 2370 spin_unlock(&inode->i_lock);
9eaef27b 2371out_nolock:
770bfad8
DH
2372 if (actual == dentry) {
2373 security_d_instantiate(dentry, inode);
2374 return NULL;
2375 }
2376
2377 iput(inode);
2378 return actual;
770bfad8 2379}
ec4f8605 2380EXPORT_SYMBOL_GPL(d_materialise_unique);
770bfad8 2381
cdd16d02 2382static int prepend(char **buffer, int *buflen, const char *str, int namelen)
6092d048
RP
2383{
2384 *buflen -= namelen;
2385 if (*buflen < 0)
2386 return -ENAMETOOLONG;
2387 *buffer -= namelen;
2388 memcpy(*buffer, str, namelen);
2389 return 0;
2390}
2391
cdd16d02
MS
2392static int prepend_name(char **buffer, int *buflen, struct qstr *name)
2393{
2394 return prepend(buffer, buflen, name->name, name->len);
2395}
2396
1da177e4 2397/**
208898c1 2398 * prepend_path - Prepend path string to a buffer
9d1bc601
MS
2399 * @path: the dentry/vfsmount to report
2400 * @root: root vfsmnt/dentry (may be modified by this function)
f2eb6575
MS
2401 * @buffer: pointer to the end of the buffer
2402 * @buflen: pointer to buffer length
552ce544 2403 *
949854d0 2404 * Caller holds the rename_lock.
9d1bc601
MS
2405 *
2406 * If path is not reachable from the supplied root, then the value of
2407 * root is changed (without modifying refcounts).
1da177e4 2408 */
f2eb6575
MS
2409static int prepend_path(const struct path *path, struct path *root,
2410 char **buffer, int *buflen)
1da177e4 2411{
9d1bc601
MS
2412 struct dentry *dentry = path->dentry;
2413 struct vfsmount *vfsmnt = path->mnt;
f2eb6575
MS
2414 bool slash = false;
2415 int error = 0;
6092d048 2416
99b7db7b 2417 br_read_lock(vfsmount_lock);
f2eb6575 2418 while (dentry != root->dentry || vfsmnt != root->mnt) {
1da177e4
LT
2419 struct dentry * parent;
2420
1da177e4 2421 if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
552ce544 2422 /* Global root? */
1da177e4 2423 if (vfsmnt->mnt_parent == vfsmnt) {
1da177e4
LT
2424 goto global_root;
2425 }
2426 dentry = vfsmnt->mnt_mountpoint;
2427 vfsmnt = vfsmnt->mnt_parent;
1da177e4
LT
2428 continue;
2429 }
2430 parent = dentry->d_parent;
2431 prefetch(parent);
9abca360 2432 spin_lock(&dentry->d_lock);
f2eb6575 2433 error = prepend_name(buffer, buflen, &dentry->d_name);
9abca360 2434 spin_unlock(&dentry->d_lock);
f2eb6575
MS
2435 if (!error)
2436 error = prepend(buffer, buflen, "/", 1);
2437 if (error)
2438 break;
2439
2440 slash = true;
1da177e4
LT
2441 dentry = parent;
2442 }
2443
be285c71 2444out:
f2eb6575
MS
2445 if (!error && !slash)
2446 error = prepend(buffer, buflen, "/", 1);
2447
99b7db7b 2448 br_read_unlock(vfsmount_lock);
f2eb6575 2449 return error;
1da177e4
LT
2450
2451global_root:
98dc568b
MS
2452 /*
2453 * Filesystems needing to implement special "root names"
2454 * should do so with ->d_dname()
2455 */
2456 if (IS_ROOT(dentry) &&
2457 (dentry->d_name.len != 1 || dentry->d_name.name[0] != '/')) {
2458 WARN(1, "Root dentry has weird name <%.*s>\n",
2459 (int) dentry->d_name.len, dentry->d_name.name);
2460 }
9d1bc601
MS
2461 root->mnt = vfsmnt;
2462 root->dentry = dentry;
be285c71 2463 goto out;
f2eb6575 2464}
be285c71 2465
f2eb6575
MS
2466/**
2467 * __d_path - return the path of a dentry
2468 * @path: the dentry/vfsmount to report
2469 * @root: root vfsmnt/dentry (may be modified by this function)
cd956a1c 2470 * @buf: buffer to return value in
f2eb6575
MS
2471 * @buflen: buffer length
2472 *
ffd1f4ed 2473 * Convert a dentry into an ASCII path name.
f2eb6575
MS
2474 *
2475 * Returns a pointer into the buffer or an error code if the
2476 * path was too long.
2477 *
be148247 2478 * "buflen" should be positive.
f2eb6575
MS
2479 *
2480 * If path is not reachable from the supplied root, then the value of
2481 * root is changed (without modifying refcounts).
2482 */
2483char *__d_path(const struct path *path, struct path *root,
2484 char *buf, int buflen)
2485{
2486 char *res = buf + buflen;
2487 int error;
2488
2489 prepend(&res, &buflen, "\0", 1);
949854d0 2490 write_seqlock(&rename_lock);
f2eb6575 2491 error = prepend_path(path, root, &res, &buflen);
949854d0 2492 write_sequnlock(&rename_lock);
be148247 2493
f2eb6575
MS
2494 if (error)
2495 return ERR_PTR(error);
f2eb6575 2496 return res;
1da177e4
LT
2497}
2498
ffd1f4ed
MS
2499/*
2500 * same as __d_path but appends "(deleted)" for unlinked files.
2501 */
2502static int path_with_deleted(const struct path *path, struct path *root,
2503 char **buf, int *buflen)
2504{
2505 prepend(buf, buflen, "\0", 1);
2506 if (d_unlinked(path->dentry)) {
2507 int error = prepend(buf, buflen, " (deleted)", 10);
2508 if (error)
2509 return error;
2510 }
2511
2512 return prepend_path(path, root, buf, buflen);
2513}
2514
8df9d1a4
MS
2515static int prepend_unreachable(char **buffer, int *buflen)
2516{
2517 return prepend(buffer, buflen, "(unreachable)", 13);
2518}
2519
a03a8a70
JB
2520/**
2521 * d_path - return the path of a dentry
cf28b486 2522 * @path: path to report
a03a8a70
JB
2523 * @buf: buffer to return value in
2524 * @buflen: buffer length
2525 *
2526 * Convert a dentry into an ASCII path name. If the entry has been deleted
2527 * the string " (deleted)" is appended. Note that this is ambiguous.
2528 *
52afeefb
AV
2529 * Returns a pointer into the buffer or an error code if the path was
2530 * too long. Note: Callers should use the returned pointer, not the passed
2531 * in buffer, to use the name! The implementation often starts at an offset
2532 * into the buffer, and may leave 0 bytes at the start.
a03a8a70 2533 *
31f3e0b3 2534 * "buflen" should be positive.
a03a8a70 2535 */
20d4fdc1 2536char *d_path(const struct path *path, char *buf, int buflen)
1da177e4 2537{
ffd1f4ed 2538 char *res = buf + buflen;
6ac08c39 2539 struct path root;
9d1bc601 2540 struct path tmp;
ffd1f4ed 2541 int error;
1da177e4 2542
c23fbb6b
ED
2543 /*
2544 * We have various synthetic filesystems that never get mounted. On
2545 * these filesystems dentries are never used for lookup purposes, and
2546 * thus don't need to be hashed. They also don't need a name until a
2547 * user wants to identify the object in /proc/pid/fd/. The little hack
2548 * below allows us to generate a name for these objects on demand:
2549 */
cf28b486
JB
2550 if (path->dentry->d_op && path->dentry->d_op->d_dname)
2551 return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
c23fbb6b 2552
f7ad3c6b 2553 get_fs_root(current->fs, &root);
949854d0 2554 write_seqlock(&rename_lock);
9d1bc601 2555 tmp = root;
ffd1f4ed
MS
2556 error = path_with_deleted(path, &tmp, &res, &buflen);
2557 if (error)
2558 res = ERR_PTR(error);
949854d0 2559 write_sequnlock(&rename_lock);
6ac08c39 2560 path_put(&root);
1da177e4
LT
2561 return res;
2562}
ec4f8605 2563EXPORT_SYMBOL(d_path);
1da177e4 2564
8df9d1a4
MS
2565/**
2566 * d_path_with_unreachable - return the path of a dentry
2567 * @path: path to report
2568 * @buf: buffer to return value in
2569 * @buflen: buffer length
2570 *
2571 * The difference from d_path() is that this prepends "(unreachable)"
2572 * to paths which are unreachable from the current process' root.
2573 */
2574char *d_path_with_unreachable(const struct path *path, char *buf, int buflen)
2575{
2576 char *res = buf + buflen;
2577 struct path root;
2578 struct path tmp;
2579 int error;
2580
2581 if (path->dentry->d_op && path->dentry->d_op->d_dname)
2582 return path->dentry->d_op->d_dname(path->dentry, buf, buflen);
2583
2584 get_fs_root(current->fs, &root);
949854d0 2585 write_seqlock(&rename_lock);
8df9d1a4
MS
2586 tmp = root;
2587 error = path_with_deleted(path, &tmp, &res, &buflen);
2588 if (!error && !path_equal(&tmp, &root))
2589 error = prepend_unreachable(&res, &buflen);
949854d0 2590 write_sequnlock(&rename_lock);
8df9d1a4
MS
2591 path_put(&root);
2592 if (error)
2593 res = ERR_PTR(error);
2594
2595 return res;
2596}
2597
c23fbb6b
ED
2598/*
2599 * Helper function for dentry_operations.d_dname() members
2600 */
2601char *dynamic_dname(struct dentry *dentry, char *buffer, int buflen,
2602 const char *fmt, ...)
2603{
2604 va_list args;
2605 char temp[64];
2606 int sz;
2607
2608 va_start(args, fmt);
2609 sz = vsnprintf(temp, sizeof(temp), fmt, args) + 1;
2610 va_end(args);
2611
2612 if (sz > sizeof(temp) || sz > buflen)
2613 return ERR_PTR(-ENAMETOOLONG);
2614
2615 buffer += buflen - sz;
2616 return memcpy(buffer, temp, sz);
2617}
2618
6092d048
RP
2619/*
2620 * Write full pathname from the root of the filesystem into the buffer.
2621 */
ec2447c2 2622static char *__dentry_path(struct dentry *dentry, char *buf, int buflen)
6092d048
RP
2623{
2624 char *end = buf + buflen;
2625 char *retval;
2626
6092d048 2627 prepend(&end, &buflen, "\0", 1);
6092d048
RP
2628 if (buflen < 1)
2629 goto Elong;
2630 /* Get '/' right */
2631 retval = end-1;
2632 *retval = '/';
2633
cdd16d02
MS
2634 while (!IS_ROOT(dentry)) {
2635 struct dentry *parent = dentry->d_parent;
9abca360 2636 int error;
6092d048 2637
6092d048 2638 prefetch(parent);
9abca360
NP
2639 spin_lock(&dentry->d_lock);
2640 error = prepend_name(&end, &buflen, &dentry->d_name);
2641 spin_unlock(&dentry->d_lock);
2642 if (error != 0 || prepend(&end, &buflen, "/", 1) != 0)
6092d048
RP
2643 goto Elong;
2644
2645 retval = end;
2646 dentry = parent;
2647 }
c103135c
AV
2648 return retval;
2649Elong:
2650 return ERR_PTR(-ENAMETOOLONG);
2651}
ec2447c2
NP
2652
2653char *dentry_path_raw(struct dentry *dentry, char *buf, int buflen)
2654{
2655 char *retval;
2656
949854d0 2657 write_seqlock(&rename_lock);
ec2447c2 2658 retval = __dentry_path(dentry, buf, buflen);
949854d0 2659 write_sequnlock(&rename_lock);
ec2447c2
NP
2660
2661 return retval;
2662}
2663EXPORT_SYMBOL(dentry_path_raw);
c103135c
AV
2664
2665char *dentry_path(struct dentry *dentry, char *buf, int buflen)
2666{
2667 char *p = NULL;
2668 char *retval;
2669
949854d0 2670 write_seqlock(&rename_lock);
c103135c
AV
2671 if (d_unlinked(dentry)) {
2672 p = buf + buflen;
2673 if (prepend(&p, &buflen, "//deleted", 10) != 0)
2674 goto Elong;
2675 buflen++;
2676 }
2677 retval = __dentry_path(dentry, buf, buflen);
949854d0 2678 write_sequnlock(&rename_lock);
c103135c
AV
2679 if (!IS_ERR(retval) && p)
2680 *p = '/'; /* restore '/' overriden with '\0' */
6092d048
RP
2681 return retval;
2682Elong:
6092d048
RP
2683 return ERR_PTR(-ENAMETOOLONG);
2684}
2685
1da177e4
LT
2686/*
2687 * NOTE! The user-level library version returns a
2688 * character pointer. The kernel system call just
2689 * returns the length of the buffer filled (which
2690 * includes the ending '\0' character), or a negative
2691 * error value. So libc would do something like
2692 *
2693 * char *getcwd(char * buf, size_t size)
2694 * {
2695 * int retval;
2696 *
2697 * retval = sys_getcwd(buf, size);
2698 * if (retval >= 0)
2699 * return buf;
2700 * errno = -retval;
2701 * return NULL;
2702 * }
2703 */
3cdad428 2704SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
1da177e4 2705{
552ce544 2706 int error;
6ac08c39 2707 struct path pwd, root;
552ce544 2708 char *page = (char *) __get_free_page(GFP_USER);
1da177e4
LT
2709
2710 if (!page)
2711 return -ENOMEM;
2712
f7ad3c6b 2713 get_fs_root_and_pwd(current->fs, &root, &pwd);
1da177e4 2714
552ce544 2715 error = -ENOENT;
949854d0 2716 write_seqlock(&rename_lock);
f3da392e 2717 if (!d_unlinked(pwd.dentry)) {
552ce544 2718 unsigned long len;
9d1bc601 2719 struct path tmp = root;
8df9d1a4
MS
2720 char *cwd = page + PAGE_SIZE;
2721 int buflen = PAGE_SIZE;
1da177e4 2722
8df9d1a4
MS
2723 prepend(&cwd, &buflen, "\0", 1);
2724 error = prepend_path(&pwd, &tmp, &cwd, &buflen);
949854d0 2725 write_sequnlock(&rename_lock);
552ce544 2726
8df9d1a4 2727 if (error)
552ce544
LT
2728 goto out;
2729
8df9d1a4
MS
2730 /* Unreachable from current root */
2731 if (!path_equal(&tmp, &root)) {
2732 error = prepend_unreachable(&cwd, &buflen);
2733 if (error)
2734 goto out;
2735 }
2736
552ce544
LT
2737 error = -ERANGE;
2738 len = PAGE_SIZE + page - cwd;
2739 if (len <= size) {
2740 error = len;
2741 if (copy_to_user(buf, cwd, len))
2742 error = -EFAULT;
2743 }
949854d0
NP
2744 } else {
2745 write_sequnlock(&rename_lock);
949854d0 2746 }
1da177e4
LT
2747
2748out:
6ac08c39
JB
2749 path_put(&pwd);
2750 path_put(&root);
1da177e4
LT
2751 free_page((unsigned long) page);
2752 return error;
2753}
2754
2755/*
2756 * Test whether new_dentry is a subdirectory of old_dentry.
2757 *
2758 * Trivially implemented using the dcache structure
2759 */
2760
2761/**
2762 * is_subdir - is new dentry a subdirectory of old_dentry
2763 * @new_dentry: new dentry
2764 * @old_dentry: old dentry
2765 *
2766 * Returns 1 if new_dentry is a subdirectory of the parent (at any depth).
2767 * Returns 0 otherwise.
2768 * Caller must ensure that "new_dentry" is pinned before calling is_subdir()
2769 */
2770
e2761a11 2771int is_subdir(struct dentry *new_dentry, struct dentry *old_dentry)
1da177e4
LT
2772{
2773 int result;
949854d0 2774 unsigned seq;
1da177e4 2775
e2761a11
OH
2776 if (new_dentry == old_dentry)
2777 return 1;
2778
e2761a11 2779 do {
1da177e4 2780 /* for restarting inner loop in case of seq retry */
1da177e4 2781 seq = read_seqbegin(&rename_lock);
949854d0
NP
2782 /*
2783 * Need rcu_readlock to protect against the d_parent trashing
2784 * due to d_move
2785 */
2786 rcu_read_lock();
e2761a11 2787 if (d_ancestor(old_dentry, new_dentry))
1da177e4 2788 result = 1;
e2761a11
OH
2789 else
2790 result = 0;
949854d0 2791 rcu_read_unlock();
1da177e4 2792 } while (read_seqretry(&rename_lock, seq));
1da177e4
LT
2793
2794 return result;
2795}
2796
2096f759
AV
2797int path_is_under(struct path *path1, struct path *path2)
2798{
2799 struct vfsmount *mnt = path1->mnt;
2800 struct dentry *dentry = path1->dentry;
2801 int res;
99b7db7b
NP
2802
2803 br_read_lock(vfsmount_lock);
2096f759
AV
2804 if (mnt != path2->mnt) {
2805 for (;;) {
2806 if (mnt->mnt_parent == mnt) {
99b7db7b 2807 br_read_unlock(vfsmount_lock);
2096f759
AV
2808 return 0;
2809 }
2810 if (mnt->mnt_parent == path2->mnt)
2811 break;
2812 mnt = mnt->mnt_parent;
2813 }
2814 dentry = mnt->mnt_mountpoint;
2815 }
2816 res = is_subdir(dentry, path2->dentry);
99b7db7b 2817 br_read_unlock(vfsmount_lock);
2096f759
AV
2818 return res;
2819}
2820EXPORT_SYMBOL(path_is_under);
2821
1da177e4
LT
2822void d_genocide(struct dentry *root)
2823{
949854d0 2824 struct dentry *this_parent;
1da177e4 2825 struct list_head *next;
949854d0 2826 unsigned seq;
58db63d0 2827 int locked = 0;
1da177e4 2828
949854d0 2829 seq = read_seqbegin(&rename_lock);
58db63d0
NP
2830again:
2831 this_parent = root;
2fd6b7f5 2832 spin_lock(&this_parent->d_lock);
1da177e4
LT
2833repeat:
2834 next = this_parent->d_subdirs.next;
2835resume:
2836 while (next != &this_parent->d_subdirs) {
2837 struct list_head *tmp = next;
5160ee6f 2838 struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child);
1da177e4 2839 next = tmp->next;
949854d0 2840
da502956
NP
2841 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
2842 if (d_unhashed(dentry) || !dentry->d_inode) {
2843 spin_unlock(&dentry->d_lock);
1da177e4 2844 continue;
da502956 2845 }
1da177e4 2846 if (!list_empty(&dentry->d_subdirs)) {
2fd6b7f5
NP
2847 spin_unlock(&this_parent->d_lock);
2848 spin_release(&dentry->d_lock.dep_map, 1, _RET_IP_);
1da177e4 2849 this_parent = dentry;
2fd6b7f5 2850 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_);
1da177e4
LT
2851 goto repeat;
2852 }
949854d0
NP
2853 if (!(dentry->d_flags & DCACHE_GENOCIDE)) {
2854 dentry->d_flags |= DCACHE_GENOCIDE;
2855 dentry->d_count--;
2856 }
b7ab39f6 2857 spin_unlock(&dentry->d_lock);
1da177e4
LT
2858 }
2859 if (this_parent != root) {
c826cb7d 2860 struct dentry *child = this_parent;
949854d0
NP
2861 if (!(this_parent->d_flags & DCACHE_GENOCIDE)) {
2862 this_parent->d_flags |= DCACHE_GENOCIDE;
2863 this_parent->d_count--;
2864 }
c826cb7d
LT
2865 this_parent = try_to_ascend(this_parent, locked, seq);
2866 if (!this_parent)
949854d0 2867 goto rename_retry;
949854d0 2868 next = child->d_u.d_child.next;
1da177e4
LT
2869 goto resume;
2870 }
2fd6b7f5 2871 spin_unlock(&this_parent->d_lock);
58db63d0 2872 if (!locked && read_seqretry(&rename_lock, seq))
949854d0 2873 goto rename_retry;
58db63d0
NP
2874 if (locked)
2875 write_sequnlock(&rename_lock);
2876 return;
2877
2878rename_retry:
2879 locked = 1;
2880 write_seqlock(&rename_lock);
2881 goto again;
1da177e4
LT
2882}
2883
2884/**
2885 * find_inode_number - check for dentry with name
2886 * @dir: directory to check
2887 * @name: Name to find.
2888 *
2889 * Check whether a dentry already exists for the given name,
2890 * and return the inode number if it has an inode. Otherwise
2891 * 0 is returned.
2892 *
2893 * This routine is used to post-process directory listings for
2894 * filesystems using synthetic inode numbers, and is necessary
2895 * to keep getcwd() working.
2896 */
2897
2898ino_t find_inode_number(struct dentry *dir, struct qstr *name)
2899{
2900 struct dentry * dentry;
2901 ino_t ino = 0;
2902
3e7e241f
EB
2903 dentry = d_hash_and_lookup(dir, name);
2904 if (dentry) {
1da177e4
LT
2905 if (dentry->d_inode)
2906 ino = dentry->d_inode->i_ino;
2907 dput(dentry);
2908 }
1da177e4
LT
2909 return ino;
2910}
ec4f8605 2911EXPORT_SYMBOL(find_inode_number);
1da177e4
LT
2912
2913static __initdata unsigned long dhash_entries;
2914static int __init set_dhash_entries(char *str)
2915{
2916 if (!str)
2917 return 0;
2918 dhash_entries = simple_strtoul(str, &str, 0);
2919 return 1;
2920}
2921__setup("dhash_entries=", set_dhash_entries);
2922
2923static void __init dcache_init_early(void)
2924{
2925 int loop;
2926
2927 /* If hashes are distributed across NUMA nodes, defer
2928 * hash allocation until vmalloc space is available.
2929 */
2930 if (hashdist)
2931 return;
2932
2933 dentry_hashtable =
2934 alloc_large_system_hash("Dentry cache",
b07ad996 2935 sizeof(struct hlist_bl_head),
1da177e4
LT
2936 dhash_entries,
2937 13,
2938 HASH_EARLY,
2939 &d_hash_shift,
2940 &d_hash_mask,
2941 0);
2942
2943 for (loop = 0; loop < (1 << d_hash_shift); loop++)
b07ad996 2944 INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
1da177e4
LT
2945}
2946
74bf17cf 2947static void __init dcache_init(void)
1da177e4
LT
2948{
2949 int loop;
2950
2951 /*
2952 * A constructor could be added for stable state like the lists,
2953 * but it is probably not worth it because of the cache nature
2954 * of the dcache.
2955 */
0a31bd5f
CL
2956 dentry_cache = KMEM_CACHE(dentry,
2957 SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD);
1da177e4
LT
2958
2959 /* Hash may have been set up in dcache_init_early */
2960 if (!hashdist)
2961 return;
2962
2963 dentry_hashtable =
2964 alloc_large_system_hash("Dentry cache",
b07ad996 2965 sizeof(struct hlist_bl_head),
1da177e4
LT
2966 dhash_entries,
2967 13,
2968 0,
2969 &d_hash_shift,
2970 &d_hash_mask,
2971 0);
2972
2973 for (loop = 0; loop < (1 << d_hash_shift); loop++)
b07ad996 2974 INIT_HLIST_BL_HEAD(dentry_hashtable + loop);
1da177e4
LT
2975}
2976
2977/* SLAB cache for __getname() consumers */
e18b890b 2978struct kmem_cache *names_cachep __read_mostly;
ec4f8605 2979EXPORT_SYMBOL(names_cachep);
1da177e4 2980
1da177e4
LT
2981EXPORT_SYMBOL(d_genocide);
2982
1da177e4
LT
2983void __init vfs_caches_init_early(void)
2984{
2985 dcache_init_early();
2986 inode_init_early();
2987}
2988
2989void __init vfs_caches_init(unsigned long mempages)
2990{
2991 unsigned long reserve;
2992
2993 /* Base hash sizes on available memory, with a reserve equal to
2994 150% of current kernel size */
2995
2996 reserve = min((mempages - nr_free_pages()) * 3/2, mempages - 1);
2997 mempages -= reserve;
2998
2999 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
20c2df83 3000 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
1da177e4 3001
74bf17cf
DC
3002 dcache_init();
3003 inode_init();
1da177e4 3004 files_init(mempages);
74bf17cf 3005 mnt_init();
1da177e4
LT
3006 bdev_cache_init();
3007 chrdev_init();
3008}