]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame_incremental - fs/inode.c
vfs: conditionally call inode_wb_list_del()
[mirror_ubuntu-artful-kernel.git] / fs / inode.c
... / ...
CommitLineData
1/*
2 * (C) 1997 Linus Torvalds
3 * (C) 1999 Andrea Arcangeli <andrea@suse.de> (dynamic inode allocation)
4 */
5#include <linux/fs.h>
6#include <linux/mm.h>
7#include <linux/dcache.h>
8#include <linux/init.h>
9#include <linux/slab.h>
10#include <linux/writeback.h>
11#include <linux/module.h>
12#include <linux/backing-dev.h>
13#include <linux/wait.h>
14#include <linux/rwsem.h>
15#include <linux/hash.h>
16#include <linux/swap.h>
17#include <linux/security.h>
18#include <linux/pagemap.h>
19#include <linux/cdev.h>
20#include <linux/bootmem.h>
21#include <linux/fsnotify.h>
22#include <linux/mount.h>
23#include <linux/async.h>
24#include <linux/posix_acl.h>
25#include <linux/prefetch.h>
26#include <linux/ima.h>
27#include <linux/cred.h>
28#include <linux/buffer_head.h> /* for inode_has_buffers */
29#include "internal.h"
30
31/*
32 * Inode locking rules:
33 *
34 * inode->i_lock protects:
35 * inode->i_state, inode->i_hash, __iget()
36 * inode->i_sb->s_inode_lru_lock protects:
37 * inode->i_sb->s_inode_lru, inode->i_lru
38 * inode_sb_list_lock protects:
39 * sb->s_inodes, inode->i_sb_list
40 * bdi->wb.list_lock protects:
41 * bdi->wb.b_{dirty,io,more_io}, inode->i_wb_list
42 * inode_hash_lock protects:
43 * inode_hashtable, inode->i_hash
44 *
45 * Lock ordering:
46 *
47 * inode_sb_list_lock
48 * inode->i_lock
49 * inode->i_sb->s_inode_lru_lock
50 *
51 * bdi->wb.list_lock
52 * inode->i_lock
53 *
54 * inode_hash_lock
55 * inode_sb_list_lock
56 * inode->i_lock
57 *
58 * iunique_lock
59 * inode_hash_lock
60 */
61
62static unsigned int i_hash_mask __read_mostly;
63static unsigned int i_hash_shift __read_mostly;
64static struct hlist_head *inode_hashtable __read_mostly;
65static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock);
66
67__cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_sb_list_lock);
68
69/*
70 * Empty aops. Can be used for the cases where the user does not
71 * define any of the address_space operations.
72 */
73const struct address_space_operations empty_aops = {
74};
75EXPORT_SYMBOL(empty_aops);
76
77/*
78 * Statistics gathering..
79 */
80struct inodes_stat_t inodes_stat;
81
82static DEFINE_PER_CPU(unsigned int, nr_inodes);
83static DEFINE_PER_CPU(unsigned int, nr_unused);
84
85static struct kmem_cache *inode_cachep __read_mostly;
86
87static int get_nr_inodes(void)
88{
89 int i;
90 int sum = 0;
91 for_each_possible_cpu(i)
92 sum += per_cpu(nr_inodes, i);
93 return sum < 0 ? 0 : sum;
94}
95
96static inline int get_nr_inodes_unused(void)
97{
98 int i;
99 int sum = 0;
100 for_each_possible_cpu(i)
101 sum += per_cpu(nr_unused, i);
102 return sum < 0 ? 0 : sum;
103}
104
105int get_nr_dirty_inodes(void)
106{
107 /* not actually dirty inodes, but a wild approximation */
108 int nr_dirty = get_nr_inodes() - get_nr_inodes_unused();
109 return nr_dirty > 0 ? nr_dirty : 0;
110}
111
112/*
113 * Handle nr_inode sysctl
114 */
115#ifdef CONFIG_SYSCTL
116int proc_nr_inodes(ctl_table *table, int write,
117 void __user *buffer, size_t *lenp, loff_t *ppos)
118{
119 inodes_stat.nr_inodes = get_nr_inodes();
120 inodes_stat.nr_unused = get_nr_inodes_unused();
121 return proc_dointvec(table, write, buffer, lenp, ppos);
122}
123#endif
124
125/**
126 * inode_init_always - perform inode structure intialisation
127 * @sb: superblock inode belongs to
128 * @inode: inode to initialise
129 *
130 * These are initializations that need to be done on every inode
131 * allocation as the fields are not initialised by slab allocation.
132 */
133int inode_init_always(struct super_block *sb, struct inode *inode)
134{
135 static const struct inode_operations empty_iops;
136 static const struct file_operations empty_fops;
137 struct address_space *const mapping = &inode->i_data;
138
139 inode->i_sb = sb;
140 inode->i_blkbits = sb->s_blocksize_bits;
141 inode->i_flags = 0;
142 atomic_set(&inode->i_count, 1);
143 inode->i_op = &empty_iops;
144 inode->i_fop = &empty_fops;
145 inode->i_nlink = 1;
146 inode->i_uid = 0;
147 inode->i_gid = 0;
148 atomic_set(&inode->i_writecount, 0);
149 inode->i_size = 0;
150 inode->i_blocks = 0;
151 inode->i_bytes = 0;
152 inode->i_generation = 0;
153#ifdef CONFIG_QUOTA
154 memset(&inode->i_dquot, 0, sizeof(inode->i_dquot));
155#endif
156 inode->i_pipe = NULL;
157 inode->i_bdev = NULL;
158 inode->i_cdev = NULL;
159 inode->i_rdev = 0;
160 inode->dirtied_when = 0;
161
162 if (security_inode_alloc(inode))
163 goto out;
164 spin_lock_init(&inode->i_lock);
165 lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key);
166
167 mutex_init(&inode->i_mutex);
168 lockdep_set_class(&inode->i_mutex, &sb->s_type->i_mutex_key);
169
170 atomic_set(&inode->i_dio_count, 0);
171
172 mapping->a_ops = &empty_aops;
173 mapping->host = inode;
174 mapping->flags = 0;
175 mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE);
176 mapping->assoc_mapping = NULL;
177 mapping->backing_dev_info = &default_backing_dev_info;
178 mapping->writeback_index = 0;
179
180 /*
181 * If the block_device provides a backing_dev_info for client
182 * inodes then use that. Otherwise the inode share the bdev's
183 * backing_dev_info.
184 */
185 if (sb->s_bdev) {
186 struct backing_dev_info *bdi;
187
188 bdi = sb->s_bdev->bd_inode->i_mapping->backing_dev_info;
189 mapping->backing_dev_info = bdi;
190 }
191 inode->i_private = NULL;
192 inode->i_mapping = mapping;
193#ifdef CONFIG_FS_POSIX_ACL
194 inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED;
195#endif
196
197#ifdef CONFIG_FSNOTIFY
198 inode->i_fsnotify_mask = 0;
199#endif
200
201 this_cpu_inc(nr_inodes);
202
203 return 0;
204out:
205 return -ENOMEM;
206}
207EXPORT_SYMBOL(inode_init_always);
208
209static struct inode *alloc_inode(struct super_block *sb)
210{
211 struct inode *inode;
212
213 if (sb->s_op->alloc_inode)
214 inode = sb->s_op->alloc_inode(sb);
215 else
216 inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL);
217
218 if (!inode)
219 return NULL;
220
221 if (unlikely(inode_init_always(sb, inode))) {
222 if (inode->i_sb->s_op->destroy_inode)
223 inode->i_sb->s_op->destroy_inode(inode);
224 else
225 kmem_cache_free(inode_cachep, inode);
226 return NULL;
227 }
228
229 return inode;
230}
231
232void free_inode_nonrcu(struct inode *inode)
233{
234 kmem_cache_free(inode_cachep, inode);
235}
236EXPORT_SYMBOL(free_inode_nonrcu);
237
238void __destroy_inode(struct inode *inode)
239{
240 BUG_ON(inode_has_buffers(inode));
241 security_inode_free(inode);
242 fsnotify_inode_delete(inode);
243#ifdef CONFIG_FS_POSIX_ACL
244 if (inode->i_acl && inode->i_acl != ACL_NOT_CACHED)
245 posix_acl_release(inode->i_acl);
246 if (inode->i_default_acl && inode->i_default_acl != ACL_NOT_CACHED)
247 posix_acl_release(inode->i_default_acl);
248#endif
249 this_cpu_dec(nr_inodes);
250}
251EXPORT_SYMBOL(__destroy_inode);
252
253static void i_callback(struct rcu_head *head)
254{
255 struct inode *inode = container_of(head, struct inode, i_rcu);
256 INIT_LIST_HEAD(&inode->i_dentry);
257 kmem_cache_free(inode_cachep, inode);
258}
259
260static void destroy_inode(struct inode *inode)
261{
262 BUG_ON(!list_empty(&inode->i_lru));
263 __destroy_inode(inode);
264 if (inode->i_sb->s_op->destroy_inode)
265 inode->i_sb->s_op->destroy_inode(inode);
266 else
267 call_rcu(&inode->i_rcu, i_callback);
268}
269
270void address_space_init_once(struct address_space *mapping)
271{
272 memset(mapping, 0, sizeof(*mapping));
273 INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC);
274 spin_lock_init(&mapping->tree_lock);
275 mutex_init(&mapping->i_mmap_mutex);
276 INIT_LIST_HEAD(&mapping->private_list);
277 spin_lock_init(&mapping->private_lock);
278 INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap);
279 INIT_LIST_HEAD(&mapping->i_mmap_nonlinear);
280}
281EXPORT_SYMBOL(address_space_init_once);
282
283/*
284 * These are initializations that only need to be done
285 * once, because the fields are idempotent across use
286 * of the inode, so let the slab aware of that.
287 */
288void inode_init_once(struct inode *inode)
289{
290 memset(inode, 0, sizeof(*inode));
291 INIT_HLIST_NODE(&inode->i_hash);
292 INIT_LIST_HEAD(&inode->i_dentry);
293 INIT_LIST_HEAD(&inode->i_devices);
294 INIT_LIST_HEAD(&inode->i_wb_list);
295 INIT_LIST_HEAD(&inode->i_lru);
296 address_space_init_once(&inode->i_data);
297 i_size_ordered_init(inode);
298#ifdef CONFIG_FSNOTIFY
299 INIT_HLIST_HEAD(&inode->i_fsnotify_marks);
300#endif
301}
302EXPORT_SYMBOL(inode_init_once);
303
304static void init_once(void *foo)
305{
306 struct inode *inode = (struct inode *) foo;
307
308 inode_init_once(inode);
309}
310
311/*
312 * inode->i_lock must be held
313 */
314void __iget(struct inode *inode)
315{
316 atomic_inc(&inode->i_count);
317}
318
319/*
320 * get additional reference to inode; caller must already hold one.
321 */
322void ihold(struct inode *inode)
323{
324 WARN_ON(atomic_inc_return(&inode->i_count) < 2);
325}
326EXPORT_SYMBOL(ihold);
327
328static void inode_lru_list_add(struct inode *inode)
329{
330 spin_lock(&inode->i_sb->s_inode_lru_lock);
331 if (list_empty(&inode->i_lru)) {
332 list_add(&inode->i_lru, &inode->i_sb->s_inode_lru);
333 inode->i_sb->s_nr_inodes_unused++;
334 this_cpu_inc(nr_unused);
335 }
336 spin_unlock(&inode->i_sb->s_inode_lru_lock);
337}
338
339static void inode_lru_list_del(struct inode *inode)
340{
341 spin_lock(&inode->i_sb->s_inode_lru_lock);
342 if (!list_empty(&inode->i_lru)) {
343 list_del_init(&inode->i_lru);
344 inode->i_sb->s_nr_inodes_unused--;
345 this_cpu_dec(nr_unused);
346 }
347 spin_unlock(&inode->i_sb->s_inode_lru_lock);
348}
349
350/**
351 * inode_sb_list_add - add inode to the superblock list of inodes
352 * @inode: inode to add
353 */
354void inode_sb_list_add(struct inode *inode)
355{
356 spin_lock(&inode_sb_list_lock);
357 list_add(&inode->i_sb_list, &inode->i_sb->s_inodes);
358 spin_unlock(&inode_sb_list_lock);
359}
360EXPORT_SYMBOL_GPL(inode_sb_list_add);
361
362static inline void inode_sb_list_del(struct inode *inode)
363{
364 if (!list_empty(&inode->i_sb_list)) {
365 spin_lock(&inode_sb_list_lock);
366 list_del_init(&inode->i_sb_list);
367 spin_unlock(&inode_sb_list_lock);
368 }
369}
370
371static unsigned long hash(struct super_block *sb, unsigned long hashval)
372{
373 unsigned long tmp;
374
375 tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
376 L1_CACHE_BYTES;
377 tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> i_hash_shift);
378 return tmp & i_hash_mask;
379}
380
381/**
382 * __insert_inode_hash - hash an inode
383 * @inode: unhashed inode
384 * @hashval: unsigned long value used to locate this object in the
385 * inode_hashtable.
386 *
387 * Add an inode to the inode hash for this superblock.
388 */
389void __insert_inode_hash(struct inode *inode, unsigned long hashval)
390{
391 struct hlist_head *b = inode_hashtable + hash(inode->i_sb, hashval);
392
393 spin_lock(&inode_hash_lock);
394 spin_lock(&inode->i_lock);
395 hlist_add_head(&inode->i_hash, b);
396 spin_unlock(&inode->i_lock);
397 spin_unlock(&inode_hash_lock);
398}
399EXPORT_SYMBOL(__insert_inode_hash);
400
401/**
402 * remove_inode_hash - remove an inode from the hash
403 * @inode: inode to unhash
404 *
405 * Remove an inode from the superblock.
406 */
407void remove_inode_hash(struct inode *inode)
408{
409 spin_lock(&inode_hash_lock);
410 spin_lock(&inode->i_lock);
411 hlist_del_init(&inode->i_hash);
412 spin_unlock(&inode->i_lock);
413 spin_unlock(&inode_hash_lock);
414}
415EXPORT_SYMBOL(remove_inode_hash);
416
417void end_writeback(struct inode *inode)
418{
419 might_sleep();
420 /*
421 * We have to cycle tree_lock here because reclaim can be still in the
422 * process of removing the last page (in __delete_from_page_cache())
423 * and we must not free mapping under it.
424 */
425 spin_lock_irq(&inode->i_data.tree_lock);
426 BUG_ON(inode->i_data.nrpages);
427 spin_unlock_irq(&inode->i_data.tree_lock);
428 BUG_ON(!list_empty(&inode->i_data.private_list));
429 BUG_ON(!(inode->i_state & I_FREEING));
430 BUG_ON(inode->i_state & I_CLEAR);
431 inode_sync_wait(inode);
432 /* don't need i_lock here, no concurrent mods to i_state */
433 inode->i_state = I_FREEING | I_CLEAR;
434}
435EXPORT_SYMBOL(end_writeback);
436
437/*
438 * Free the inode passed in, removing it from the lists it is still connected
439 * to. We remove any pages still attached to the inode and wait for any IO that
440 * is still in progress before finally destroying the inode.
441 *
442 * An inode must already be marked I_FREEING so that we avoid the inode being
443 * moved back onto lists if we race with other code that manipulates the lists
444 * (e.g. writeback_single_inode). The caller is responsible for setting this.
445 *
446 * An inode must already be removed from the LRU list before being evicted from
447 * the cache. This should occur atomically with setting the I_FREEING state
448 * flag, so no inodes here should ever be on the LRU when being evicted.
449 */
450static void evict(struct inode *inode)
451{
452 const struct super_operations *op = inode->i_sb->s_op;
453
454 BUG_ON(!(inode->i_state & I_FREEING));
455 BUG_ON(!list_empty(&inode->i_lru));
456
457 if (!list_empty(&inode->i_wb_list))
458 inode_wb_list_del(inode);
459
460 inode_sb_list_del(inode);
461
462 if (op->evict_inode) {
463 op->evict_inode(inode);
464 } else {
465 if (inode->i_data.nrpages)
466 truncate_inode_pages(&inode->i_data, 0);
467 end_writeback(inode);
468 }
469 if (S_ISBLK(inode->i_mode) && inode->i_bdev)
470 bd_forget(inode);
471 if (S_ISCHR(inode->i_mode) && inode->i_cdev)
472 cd_forget(inode);
473
474 remove_inode_hash(inode);
475
476 spin_lock(&inode->i_lock);
477 wake_up_bit(&inode->i_state, __I_NEW);
478 BUG_ON(inode->i_state != (I_FREEING | I_CLEAR));
479 spin_unlock(&inode->i_lock);
480
481 destroy_inode(inode);
482}
483
484/*
485 * dispose_list - dispose of the contents of a local list
486 * @head: the head of the list to free
487 *
488 * Dispose-list gets a local list with local inodes in it, so it doesn't
489 * need to worry about list corruption and SMP locks.
490 */
491static void dispose_list(struct list_head *head)
492{
493 while (!list_empty(head)) {
494 struct inode *inode;
495
496 inode = list_first_entry(head, struct inode, i_lru);
497 list_del_init(&inode->i_lru);
498
499 evict(inode);
500 }
501}
502
503/**
504 * evict_inodes - evict all evictable inodes for a superblock
505 * @sb: superblock to operate on
506 *
507 * Make sure that no inodes with zero refcount are retained. This is
508 * called by superblock shutdown after having MS_ACTIVE flag removed,
509 * so any inode reaching zero refcount during or after that call will
510 * be immediately evicted.
511 */
512void evict_inodes(struct super_block *sb)
513{
514 struct inode *inode, *next;
515 LIST_HEAD(dispose);
516
517 spin_lock(&inode_sb_list_lock);
518 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
519 if (atomic_read(&inode->i_count))
520 continue;
521
522 spin_lock(&inode->i_lock);
523 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
524 spin_unlock(&inode->i_lock);
525 continue;
526 }
527
528 inode->i_state |= I_FREEING;
529 inode_lru_list_del(inode);
530 spin_unlock(&inode->i_lock);
531 list_add(&inode->i_lru, &dispose);
532 }
533 spin_unlock(&inode_sb_list_lock);
534
535 dispose_list(&dispose);
536}
537
538/**
539 * invalidate_inodes - attempt to free all inodes on a superblock
540 * @sb: superblock to operate on
541 * @kill_dirty: flag to guide handling of dirty inodes
542 *
543 * Attempts to free all inodes for a given superblock. If there were any
544 * busy inodes return a non-zero value, else zero.
545 * If @kill_dirty is set, discard dirty inodes too, otherwise treat
546 * them as busy.
547 */
548int invalidate_inodes(struct super_block *sb, bool kill_dirty)
549{
550 int busy = 0;
551 struct inode *inode, *next;
552 LIST_HEAD(dispose);
553
554 spin_lock(&inode_sb_list_lock);
555 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
556 spin_lock(&inode->i_lock);
557 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
558 spin_unlock(&inode->i_lock);
559 continue;
560 }
561 if (inode->i_state & I_DIRTY && !kill_dirty) {
562 spin_unlock(&inode->i_lock);
563 busy = 1;
564 continue;
565 }
566 if (atomic_read(&inode->i_count)) {
567 spin_unlock(&inode->i_lock);
568 busy = 1;
569 continue;
570 }
571
572 inode->i_state |= I_FREEING;
573 inode_lru_list_del(inode);
574 spin_unlock(&inode->i_lock);
575 list_add(&inode->i_lru, &dispose);
576 }
577 spin_unlock(&inode_sb_list_lock);
578
579 dispose_list(&dispose);
580
581 return busy;
582}
583
584static int can_unuse(struct inode *inode)
585{
586 if (inode->i_state & ~I_REFERENCED)
587 return 0;
588 if (inode_has_buffers(inode))
589 return 0;
590 if (atomic_read(&inode->i_count))
591 return 0;
592 if (inode->i_data.nrpages)
593 return 0;
594 return 1;
595}
596
597/*
598 * Walk the superblock inode LRU for freeable inodes and attempt to free them.
599 * This is called from the superblock shrinker function with a number of inodes
600 * to trim from the LRU. Inodes to be freed are moved to a temporary list and
601 * then are freed outside inode_lock by dispose_list().
602 *
603 * Any inodes which are pinned purely because of attached pagecache have their
604 * pagecache removed. If the inode has metadata buffers attached to
605 * mapping->private_list then try to remove them.
606 *
607 * If the inode has the I_REFERENCED flag set, then it means that it has been
608 * used recently - the flag is set in iput_final(). When we encounter such an
609 * inode, clear the flag and move it to the back of the LRU so it gets another
610 * pass through the LRU before it gets reclaimed. This is necessary because of
611 * the fact we are doing lazy LRU updates to minimise lock contention so the
612 * LRU does not have strict ordering. Hence we don't want to reclaim inodes
613 * with this flag set because they are the inodes that are out of order.
614 */
615void prune_icache_sb(struct super_block *sb, int nr_to_scan)
616{
617 LIST_HEAD(freeable);
618 int nr_scanned;
619 unsigned long reap = 0;
620
621 spin_lock(&sb->s_inode_lru_lock);
622 for (nr_scanned = nr_to_scan; nr_scanned >= 0; nr_scanned--) {
623 struct inode *inode;
624
625 if (list_empty(&sb->s_inode_lru))
626 break;
627
628 inode = list_entry(sb->s_inode_lru.prev, struct inode, i_lru);
629
630 /*
631 * we are inverting the sb->s_inode_lru_lock/inode->i_lock here,
632 * so use a trylock. If we fail to get the lock, just move the
633 * inode to the back of the list so we don't spin on it.
634 */
635 if (!spin_trylock(&inode->i_lock)) {
636 list_move(&inode->i_lru, &sb->s_inode_lru);
637 continue;
638 }
639
640 /*
641 * Referenced or dirty inodes are still in use. Give them
642 * another pass through the LRU as we canot reclaim them now.
643 */
644 if (atomic_read(&inode->i_count) ||
645 (inode->i_state & ~I_REFERENCED)) {
646 list_del_init(&inode->i_lru);
647 spin_unlock(&inode->i_lock);
648 sb->s_nr_inodes_unused--;
649 this_cpu_dec(nr_unused);
650 continue;
651 }
652
653 /* recently referenced inodes get one more pass */
654 if (inode->i_state & I_REFERENCED) {
655 inode->i_state &= ~I_REFERENCED;
656 list_move(&inode->i_lru, &sb->s_inode_lru);
657 spin_unlock(&inode->i_lock);
658 continue;
659 }
660 if (inode_has_buffers(inode) || inode->i_data.nrpages) {
661 __iget(inode);
662 spin_unlock(&inode->i_lock);
663 spin_unlock(&sb->s_inode_lru_lock);
664 if (remove_inode_buffers(inode))
665 reap += invalidate_mapping_pages(&inode->i_data,
666 0, -1);
667 iput(inode);
668 spin_lock(&sb->s_inode_lru_lock);
669
670 if (inode != list_entry(sb->s_inode_lru.next,
671 struct inode, i_lru))
672 continue; /* wrong inode or list_empty */
673 /* avoid lock inversions with trylock */
674 if (!spin_trylock(&inode->i_lock))
675 continue;
676 if (!can_unuse(inode)) {
677 spin_unlock(&inode->i_lock);
678 continue;
679 }
680 }
681 WARN_ON(inode->i_state & I_NEW);
682 inode->i_state |= I_FREEING;
683 spin_unlock(&inode->i_lock);
684
685 list_move(&inode->i_lru, &freeable);
686 sb->s_nr_inodes_unused--;
687 this_cpu_dec(nr_unused);
688 }
689 if (current_is_kswapd())
690 __count_vm_events(KSWAPD_INODESTEAL, reap);
691 else
692 __count_vm_events(PGINODESTEAL, reap);
693 spin_unlock(&sb->s_inode_lru_lock);
694
695 dispose_list(&freeable);
696}
697
698static void __wait_on_freeing_inode(struct inode *inode);
699/*
700 * Called with the inode lock held.
701 */
702static struct inode *find_inode(struct super_block *sb,
703 struct hlist_head *head,
704 int (*test)(struct inode *, void *),
705 void *data)
706{
707 struct hlist_node *node;
708 struct inode *inode = NULL;
709
710repeat:
711 hlist_for_each_entry(inode, node, head, i_hash) {
712 spin_lock(&inode->i_lock);
713 if (inode->i_sb != sb) {
714 spin_unlock(&inode->i_lock);
715 continue;
716 }
717 if (!test(inode, data)) {
718 spin_unlock(&inode->i_lock);
719 continue;
720 }
721 if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
722 __wait_on_freeing_inode(inode);
723 goto repeat;
724 }
725 __iget(inode);
726 spin_unlock(&inode->i_lock);
727 return inode;
728 }
729 return NULL;
730}
731
732/*
733 * find_inode_fast is the fast path version of find_inode, see the comment at
734 * iget_locked for details.
735 */
736static struct inode *find_inode_fast(struct super_block *sb,
737 struct hlist_head *head, unsigned long ino)
738{
739 struct hlist_node *node;
740 struct inode *inode = NULL;
741
742repeat:
743 hlist_for_each_entry(inode, node, head, i_hash) {
744 spin_lock(&inode->i_lock);
745 if (inode->i_ino != ino) {
746 spin_unlock(&inode->i_lock);
747 continue;
748 }
749 if (inode->i_sb != sb) {
750 spin_unlock(&inode->i_lock);
751 continue;
752 }
753 if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
754 __wait_on_freeing_inode(inode);
755 goto repeat;
756 }
757 __iget(inode);
758 spin_unlock(&inode->i_lock);
759 return inode;
760 }
761 return NULL;
762}
763
764/*
765 * Each cpu owns a range of LAST_INO_BATCH numbers.
766 * 'shared_last_ino' is dirtied only once out of LAST_INO_BATCH allocations,
767 * to renew the exhausted range.
768 *
769 * This does not significantly increase overflow rate because every CPU can
770 * consume at most LAST_INO_BATCH-1 unused inode numbers. So there is
771 * NR_CPUS*(LAST_INO_BATCH-1) wastage. At 4096 and 1024, this is ~0.1% of the
772 * 2^32 range, and is a worst-case. Even a 50% wastage would only increase
773 * overflow rate by 2x, which does not seem too significant.
774 *
775 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
776 * error if st_ino won't fit in target struct field. Use 32bit counter
777 * here to attempt to avoid that.
778 */
779#define LAST_INO_BATCH 1024
780static DEFINE_PER_CPU(unsigned int, last_ino);
781
782unsigned int get_next_ino(void)
783{
784 unsigned int *p = &get_cpu_var(last_ino);
785 unsigned int res = *p;
786
787#ifdef CONFIG_SMP
788 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
789 static atomic_t shared_last_ino;
790 int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
791
792 res = next - LAST_INO_BATCH;
793 }
794#endif
795
796 *p = ++res;
797 put_cpu_var(last_ino);
798 return res;
799}
800EXPORT_SYMBOL(get_next_ino);
801
802/**
803 * new_inode_pseudo - obtain an inode
804 * @sb: superblock
805 *
806 * Allocates a new inode for given superblock.
807 * Inode wont be chained in superblock s_inodes list
808 * This means :
809 * - fs can't be unmount
810 * - quotas, fsnotify, writeback can't work
811 */
812struct inode *new_inode_pseudo(struct super_block *sb)
813{
814 struct inode *inode = alloc_inode(sb);
815
816 if (inode) {
817 spin_lock(&inode->i_lock);
818 inode->i_state = 0;
819 spin_unlock(&inode->i_lock);
820 INIT_LIST_HEAD(&inode->i_sb_list);
821 }
822 return inode;
823}
824
825/**
826 * new_inode - obtain an inode
827 * @sb: superblock
828 *
829 * Allocates a new inode for given superblock. The default gfp_mask
830 * for allocations related to inode->i_mapping is GFP_HIGHUSER_MOVABLE.
831 * If HIGHMEM pages are unsuitable or it is known that pages allocated
832 * for the page cache are not reclaimable or migratable,
833 * mapping_set_gfp_mask() must be called with suitable flags on the
834 * newly created inode's mapping
835 *
836 */
837struct inode *new_inode(struct super_block *sb)
838{
839 struct inode *inode;
840
841 spin_lock_prefetch(&inode_sb_list_lock);
842
843 inode = new_inode_pseudo(sb);
844 if (inode)
845 inode_sb_list_add(inode);
846 return inode;
847}
848EXPORT_SYMBOL(new_inode);
849
850/**
851 * unlock_new_inode - clear the I_NEW state and wake up any waiters
852 * @inode: new inode to unlock
853 *
854 * Called when the inode is fully initialised to clear the new state of the
855 * inode and wake up anyone waiting for the inode to finish initialisation.
856 */
857void unlock_new_inode(struct inode *inode)
858{
859#ifdef CONFIG_DEBUG_LOCK_ALLOC
860 if (S_ISDIR(inode->i_mode)) {
861 struct file_system_type *type = inode->i_sb->s_type;
862
863 /* Set new key only if filesystem hasn't already changed it */
864 if (!lockdep_match_class(&inode->i_mutex,
865 &type->i_mutex_key)) {
866 /*
867 * ensure nobody is actually holding i_mutex
868 */
869 mutex_destroy(&inode->i_mutex);
870 mutex_init(&inode->i_mutex);
871 lockdep_set_class(&inode->i_mutex,
872 &type->i_mutex_dir_key);
873 }
874 }
875#endif
876 spin_lock(&inode->i_lock);
877 WARN_ON(!(inode->i_state & I_NEW));
878 inode->i_state &= ~I_NEW;
879 wake_up_bit(&inode->i_state, __I_NEW);
880 spin_unlock(&inode->i_lock);
881}
882EXPORT_SYMBOL(unlock_new_inode);
883
884/**
885 * iget5_locked - obtain an inode from a mounted file system
886 * @sb: super block of file system
887 * @hashval: hash value (usually inode number) to get
888 * @test: callback used for comparisons between inodes
889 * @set: callback used to initialize a new struct inode
890 * @data: opaque data pointer to pass to @test and @set
891 *
892 * Search for the inode specified by @hashval and @data in the inode cache,
893 * and if present it is return it with an increased reference count. This is
894 * a generalized version of iget_locked() for file systems where the inode
895 * number is not sufficient for unique identification of an inode.
896 *
897 * If the inode is not in cache, allocate a new inode and return it locked,
898 * hashed, and with the I_NEW flag set. The file system gets to fill it in
899 * before unlocking it via unlock_new_inode().
900 *
901 * Note both @test and @set are called with the inode_hash_lock held, so can't
902 * sleep.
903 */
904struct inode *iget5_locked(struct super_block *sb, unsigned long hashval,
905 int (*test)(struct inode *, void *),
906 int (*set)(struct inode *, void *), void *data)
907{
908 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
909 struct inode *inode;
910
911 spin_lock(&inode_hash_lock);
912 inode = find_inode(sb, head, test, data);
913 spin_unlock(&inode_hash_lock);
914
915 if (inode) {
916 wait_on_inode(inode);
917 return inode;
918 }
919
920 inode = alloc_inode(sb);
921 if (inode) {
922 struct inode *old;
923
924 spin_lock(&inode_hash_lock);
925 /* We released the lock, so.. */
926 old = find_inode(sb, head, test, data);
927 if (!old) {
928 if (set(inode, data))
929 goto set_failed;
930
931 spin_lock(&inode->i_lock);
932 inode->i_state = I_NEW;
933 hlist_add_head(&inode->i_hash, head);
934 spin_unlock(&inode->i_lock);
935 inode_sb_list_add(inode);
936 spin_unlock(&inode_hash_lock);
937
938 /* Return the locked inode with I_NEW set, the
939 * caller is responsible for filling in the contents
940 */
941 return inode;
942 }
943
944 /*
945 * Uhhuh, somebody else created the same inode under
946 * us. Use the old inode instead of the one we just
947 * allocated.
948 */
949 spin_unlock(&inode_hash_lock);
950 destroy_inode(inode);
951 inode = old;
952 wait_on_inode(inode);
953 }
954 return inode;
955
956set_failed:
957 spin_unlock(&inode_hash_lock);
958 destroy_inode(inode);
959 return NULL;
960}
961EXPORT_SYMBOL(iget5_locked);
962
963/**
964 * iget_locked - obtain an inode from a mounted file system
965 * @sb: super block of file system
966 * @ino: inode number to get
967 *
968 * Search for the inode specified by @ino in the inode cache and if present
969 * return it with an increased reference count. This is for file systems
970 * where the inode number is sufficient for unique identification of an inode.
971 *
972 * If the inode is not in cache, allocate a new inode and return it locked,
973 * hashed, and with the I_NEW flag set. The file system gets to fill it in
974 * before unlocking it via unlock_new_inode().
975 */
976struct inode *iget_locked(struct super_block *sb, unsigned long ino)
977{
978 struct hlist_head *head = inode_hashtable + hash(sb, ino);
979 struct inode *inode;
980
981 spin_lock(&inode_hash_lock);
982 inode = find_inode_fast(sb, head, ino);
983 spin_unlock(&inode_hash_lock);
984 if (inode) {
985 wait_on_inode(inode);
986 return inode;
987 }
988
989 inode = alloc_inode(sb);
990 if (inode) {
991 struct inode *old;
992
993 spin_lock(&inode_hash_lock);
994 /* We released the lock, so.. */
995 old = find_inode_fast(sb, head, ino);
996 if (!old) {
997 inode->i_ino = ino;
998 spin_lock(&inode->i_lock);
999 inode->i_state = I_NEW;
1000 hlist_add_head(&inode->i_hash, head);
1001 spin_unlock(&inode->i_lock);
1002 inode_sb_list_add(inode);
1003 spin_unlock(&inode_hash_lock);
1004
1005 /* Return the locked inode with I_NEW set, the
1006 * caller is responsible for filling in the contents
1007 */
1008 return inode;
1009 }
1010
1011 /*
1012 * Uhhuh, somebody else created the same inode under
1013 * us. Use the old inode instead of the one we just
1014 * allocated.
1015 */
1016 spin_unlock(&inode_hash_lock);
1017 destroy_inode(inode);
1018 inode = old;
1019 wait_on_inode(inode);
1020 }
1021 return inode;
1022}
1023EXPORT_SYMBOL(iget_locked);
1024
1025/*
1026 * search the inode cache for a matching inode number.
1027 * If we find one, then the inode number we are trying to
1028 * allocate is not unique and so we should not use it.
1029 *
1030 * Returns 1 if the inode number is unique, 0 if it is not.
1031 */
1032static int test_inode_iunique(struct super_block *sb, unsigned long ino)
1033{
1034 struct hlist_head *b = inode_hashtable + hash(sb, ino);
1035 struct hlist_node *node;
1036 struct inode *inode;
1037
1038 spin_lock(&inode_hash_lock);
1039 hlist_for_each_entry(inode, node, b, i_hash) {
1040 if (inode->i_ino == ino && inode->i_sb == sb) {
1041 spin_unlock(&inode_hash_lock);
1042 return 0;
1043 }
1044 }
1045 spin_unlock(&inode_hash_lock);
1046
1047 return 1;
1048}
1049
1050/**
1051 * iunique - get a unique inode number
1052 * @sb: superblock
1053 * @max_reserved: highest reserved inode number
1054 *
1055 * Obtain an inode number that is unique on the system for a given
1056 * superblock. This is used by file systems that have no natural
1057 * permanent inode numbering system. An inode number is returned that
1058 * is higher than the reserved limit but unique.
1059 *
1060 * BUGS:
1061 * With a large number of inodes live on the file system this function
1062 * currently becomes quite slow.
1063 */
1064ino_t iunique(struct super_block *sb, ino_t max_reserved)
1065{
1066 /*
1067 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
1068 * error if st_ino won't fit in target struct field. Use 32bit counter
1069 * here to attempt to avoid that.
1070 */
1071 static DEFINE_SPINLOCK(iunique_lock);
1072 static unsigned int counter;
1073 ino_t res;
1074
1075 spin_lock(&iunique_lock);
1076 do {
1077 if (counter <= max_reserved)
1078 counter = max_reserved + 1;
1079 res = counter++;
1080 } while (!test_inode_iunique(sb, res));
1081 spin_unlock(&iunique_lock);
1082
1083 return res;
1084}
1085EXPORT_SYMBOL(iunique);
1086
1087struct inode *igrab(struct inode *inode)
1088{
1089 spin_lock(&inode->i_lock);
1090 if (!(inode->i_state & (I_FREEING|I_WILL_FREE))) {
1091 __iget(inode);
1092 spin_unlock(&inode->i_lock);
1093 } else {
1094 spin_unlock(&inode->i_lock);
1095 /*
1096 * Handle the case where s_op->clear_inode is not been
1097 * called yet, and somebody is calling igrab
1098 * while the inode is getting freed.
1099 */
1100 inode = NULL;
1101 }
1102 return inode;
1103}
1104EXPORT_SYMBOL(igrab);
1105
1106/**
1107 * ilookup5_nowait - search for an inode in the inode cache
1108 * @sb: super block of file system to search
1109 * @hashval: hash value (usually inode number) to search for
1110 * @test: callback used for comparisons between inodes
1111 * @data: opaque data pointer to pass to @test
1112 *
1113 * Search for the inode specified by @hashval and @data in the inode cache.
1114 * If the inode is in the cache, the inode is returned with an incremented
1115 * reference count.
1116 *
1117 * Note: I_NEW is not waited upon so you have to be very careful what you do
1118 * with the returned inode. You probably should be using ilookup5() instead.
1119 *
1120 * Note2: @test is called with the inode_hash_lock held, so can't sleep.
1121 */
1122struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval,
1123 int (*test)(struct inode *, void *), void *data)
1124{
1125 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1126 struct inode *inode;
1127
1128 spin_lock(&inode_hash_lock);
1129 inode = find_inode(sb, head, test, data);
1130 spin_unlock(&inode_hash_lock);
1131
1132 return inode;
1133}
1134EXPORT_SYMBOL(ilookup5_nowait);
1135
1136/**
1137 * ilookup5 - search for an inode in the inode cache
1138 * @sb: super block of file system to search
1139 * @hashval: hash value (usually inode number) to search for
1140 * @test: callback used for comparisons between inodes
1141 * @data: opaque data pointer to pass to @test
1142 *
1143 * Search for the inode specified by @hashval and @data in the inode cache,
1144 * and if the inode is in the cache, return the inode with an incremented
1145 * reference count. Waits on I_NEW before returning the inode.
1146 * returned with an incremented reference count.
1147 *
1148 * This is a generalized version of ilookup() for file systems where the
1149 * inode number is not sufficient for unique identification of an inode.
1150 *
1151 * Note: @test is called with the inode_hash_lock held, so can't sleep.
1152 */
1153struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
1154 int (*test)(struct inode *, void *), void *data)
1155{
1156 struct inode *inode = ilookup5_nowait(sb, hashval, test, data);
1157
1158 if (inode)
1159 wait_on_inode(inode);
1160 return inode;
1161}
1162EXPORT_SYMBOL(ilookup5);
1163
1164/**
1165 * ilookup - search for an inode in the inode cache
1166 * @sb: super block of file system to search
1167 * @ino: inode number to search for
1168 *
1169 * Search for the inode @ino in the inode cache, and if the inode is in the
1170 * cache, the inode is returned with an incremented reference count.
1171 */
1172struct inode *ilookup(struct super_block *sb, unsigned long ino)
1173{
1174 struct hlist_head *head = inode_hashtable + hash(sb, ino);
1175 struct inode *inode;
1176
1177 spin_lock(&inode_hash_lock);
1178 inode = find_inode_fast(sb, head, ino);
1179 spin_unlock(&inode_hash_lock);
1180
1181 if (inode)
1182 wait_on_inode(inode);
1183 return inode;
1184}
1185EXPORT_SYMBOL(ilookup);
1186
1187int insert_inode_locked(struct inode *inode)
1188{
1189 struct super_block *sb = inode->i_sb;
1190 ino_t ino = inode->i_ino;
1191 struct hlist_head *head = inode_hashtable + hash(sb, ino);
1192
1193 while (1) {
1194 struct hlist_node *node;
1195 struct inode *old = NULL;
1196 spin_lock(&inode_hash_lock);
1197 hlist_for_each_entry(old, node, head, i_hash) {
1198 if (old->i_ino != ino)
1199 continue;
1200 if (old->i_sb != sb)
1201 continue;
1202 spin_lock(&old->i_lock);
1203 if (old->i_state & (I_FREEING|I_WILL_FREE)) {
1204 spin_unlock(&old->i_lock);
1205 continue;
1206 }
1207 break;
1208 }
1209 if (likely(!node)) {
1210 spin_lock(&inode->i_lock);
1211 inode->i_state |= I_NEW;
1212 hlist_add_head(&inode->i_hash, head);
1213 spin_unlock(&inode->i_lock);
1214 spin_unlock(&inode_hash_lock);
1215 return 0;
1216 }
1217 __iget(old);
1218 spin_unlock(&old->i_lock);
1219 spin_unlock(&inode_hash_lock);
1220 wait_on_inode(old);
1221 if (unlikely(!inode_unhashed(old))) {
1222 iput(old);
1223 return -EBUSY;
1224 }
1225 iput(old);
1226 }
1227}
1228EXPORT_SYMBOL(insert_inode_locked);
1229
1230int insert_inode_locked4(struct inode *inode, unsigned long hashval,
1231 int (*test)(struct inode *, void *), void *data)
1232{
1233 struct super_block *sb = inode->i_sb;
1234 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1235
1236 while (1) {
1237 struct hlist_node *node;
1238 struct inode *old = NULL;
1239
1240 spin_lock(&inode_hash_lock);
1241 hlist_for_each_entry(old, node, head, i_hash) {
1242 if (old->i_sb != sb)
1243 continue;
1244 if (!test(old, data))
1245 continue;
1246 spin_lock(&old->i_lock);
1247 if (old->i_state & (I_FREEING|I_WILL_FREE)) {
1248 spin_unlock(&old->i_lock);
1249 continue;
1250 }
1251 break;
1252 }
1253 if (likely(!node)) {
1254 spin_lock(&inode->i_lock);
1255 inode->i_state |= I_NEW;
1256 hlist_add_head(&inode->i_hash, head);
1257 spin_unlock(&inode->i_lock);
1258 spin_unlock(&inode_hash_lock);
1259 return 0;
1260 }
1261 __iget(old);
1262 spin_unlock(&old->i_lock);
1263 spin_unlock(&inode_hash_lock);
1264 wait_on_inode(old);
1265 if (unlikely(!inode_unhashed(old))) {
1266 iput(old);
1267 return -EBUSY;
1268 }
1269 iput(old);
1270 }
1271}
1272EXPORT_SYMBOL(insert_inode_locked4);
1273
1274
1275int generic_delete_inode(struct inode *inode)
1276{
1277 return 1;
1278}
1279EXPORT_SYMBOL(generic_delete_inode);
1280
1281/*
1282 * Normal UNIX filesystem behaviour: delete the
1283 * inode when the usage count drops to zero, and
1284 * i_nlink is zero.
1285 */
1286int generic_drop_inode(struct inode *inode)
1287{
1288 return !inode->i_nlink || inode_unhashed(inode);
1289}
1290EXPORT_SYMBOL_GPL(generic_drop_inode);
1291
1292/*
1293 * Called when we're dropping the last reference
1294 * to an inode.
1295 *
1296 * Call the FS "drop_inode()" function, defaulting to
1297 * the legacy UNIX filesystem behaviour. If it tells
1298 * us to evict inode, do so. Otherwise, retain inode
1299 * in cache if fs is alive, sync and evict if fs is
1300 * shutting down.
1301 */
1302static void iput_final(struct inode *inode)
1303{
1304 struct super_block *sb = inode->i_sb;
1305 const struct super_operations *op = inode->i_sb->s_op;
1306 int drop;
1307
1308 WARN_ON(inode->i_state & I_NEW);
1309
1310 if (op->drop_inode)
1311 drop = op->drop_inode(inode);
1312 else
1313 drop = generic_drop_inode(inode);
1314
1315 if (!drop && (sb->s_flags & MS_ACTIVE)) {
1316 inode->i_state |= I_REFERENCED;
1317 if (!(inode->i_state & (I_DIRTY|I_SYNC)))
1318 inode_lru_list_add(inode);
1319 spin_unlock(&inode->i_lock);
1320 return;
1321 }
1322
1323 if (!drop) {
1324 inode->i_state |= I_WILL_FREE;
1325 spin_unlock(&inode->i_lock);
1326 write_inode_now(inode, 1);
1327 spin_lock(&inode->i_lock);
1328 WARN_ON(inode->i_state & I_NEW);
1329 inode->i_state &= ~I_WILL_FREE;
1330 }
1331
1332 inode->i_state |= I_FREEING;
1333 inode_lru_list_del(inode);
1334 spin_unlock(&inode->i_lock);
1335
1336 evict(inode);
1337}
1338
1339/**
1340 * iput - put an inode
1341 * @inode: inode to put
1342 *
1343 * Puts an inode, dropping its usage count. If the inode use count hits
1344 * zero, the inode is then freed and may also be destroyed.
1345 *
1346 * Consequently, iput() can sleep.
1347 */
1348void iput(struct inode *inode)
1349{
1350 if (inode) {
1351 BUG_ON(inode->i_state & I_CLEAR);
1352
1353 if (atomic_dec_and_lock(&inode->i_count, &inode->i_lock))
1354 iput_final(inode);
1355 }
1356}
1357EXPORT_SYMBOL(iput);
1358
1359/**
1360 * bmap - find a block number in a file
1361 * @inode: inode of file
1362 * @block: block to find
1363 *
1364 * Returns the block number on the device holding the inode that
1365 * is the disk block number for the block of the file requested.
1366 * That is, asked for block 4 of inode 1 the function will return the
1367 * disk block relative to the disk start that holds that block of the
1368 * file.
1369 */
1370sector_t bmap(struct inode *inode, sector_t block)
1371{
1372 sector_t res = 0;
1373 if (inode->i_mapping->a_ops->bmap)
1374 res = inode->i_mapping->a_ops->bmap(inode->i_mapping, block);
1375 return res;
1376}
1377EXPORT_SYMBOL(bmap);
1378
1379/*
1380 * With relative atime, only update atime if the previous atime is
1381 * earlier than either the ctime or mtime or if at least a day has
1382 * passed since the last atime update.
1383 */
1384static int relatime_need_update(struct vfsmount *mnt, struct inode *inode,
1385 struct timespec now)
1386{
1387
1388 if (!(mnt->mnt_flags & MNT_RELATIME))
1389 return 1;
1390 /*
1391 * Is mtime younger than atime? If yes, update atime:
1392 */
1393 if (timespec_compare(&inode->i_mtime, &inode->i_atime) >= 0)
1394 return 1;
1395 /*
1396 * Is ctime younger than atime? If yes, update atime:
1397 */
1398 if (timespec_compare(&inode->i_ctime, &inode->i_atime) >= 0)
1399 return 1;
1400
1401 /*
1402 * Is the previous atime value older than a day? If yes,
1403 * update atime:
1404 */
1405 if ((long)(now.tv_sec - inode->i_atime.tv_sec) >= 24*60*60)
1406 return 1;
1407 /*
1408 * Good, we can skip the atime update:
1409 */
1410 return 0;
1411}
1412
1413/**
1414 * touch_atime - update the access time
1415 * @mnt: mount the inode is accessed on
1416 * @dentry: dentry accessed
1417 *
1418 * Update the accessed time on an inode and mark it for writeback.
1419 * This function automatically handles read only file systems and media,
1420 * as well as the "noatime" flag and inode specific "noatime" markers.
1421 */
1422void touch_atime(struct vfsmount *mnt, struct dentry *dentry)
1423{
1424 struct inode *inode = dentry->d_inode;
1425 struct timespec now;
1426
1427 if (inode->i_flags & S_NOATIME)
1428 return;
1429 if (IS_NOATIME(inode))
1430 return;
1431 if ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode))
1432 return;
1433
1434 if (mnt->mnt_flags & MNT_NOATIME)
1435 return;
1436 if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))
1437 return;
1438
1439 now = current_fs_time(inode->i_sb);
1440
1441 if (!relatime_need_update(mnt, inode, now))
1442 return;
1443
1444 if (timespec_equal(&inode->i_atime, &now))
1445 return;
1446
1447 if (mnt_want_write(mnt))
1448 return;
1449
1450 inode->i_atime = now;
1451 mark_inode_dirty_sync(inode);
1452 mnt_drop_write(mnt);
1453}
1454EXPORT_SYMBOL(touch_atime);
1455
1456/**
1457 * file_update_time - update mtime and ctime time
1458 * @file: file accessed
1459 *
1460 * Update the mtime and ctime members of an inode and mark the inode
1461 * for writeback. Note that this function is meant exclusively for
1462 * usage in the file write path of filesystems, and filesystems may
1463 * choose to explicitly ignore update via this function with the
1464 * S_NOCMTIME inode flag, e.g. for network filesystem where these
1465 * timestamps are handled by the server.
1466 */
1467
1468void file_update_time(struct file *file)
1469{
1470 struct inode *inode = file->f_path.dentry->d_inode;
1471 struct timespec now;
1472 enum { S_MTIME = 1, S_CTIME = 2, S_VERSION = 4 } sync_it = 0;
1473
1474 /* First try to exhaust all avenues to not sync */
1475 if (IS_NOCMTIME(inode))
1476 return;
1477
1478 now = current_fs_time(inode->i_sb);
1479 if (!timespec_equal(&inode->i_mtime, &now))
1480 sync_it = S_MTIME;
1481
1482 if (!timespec_equal(&inode->i_ctime, &now))
1483 sync_it |= S_CTIME;
1484
1485 if (IS_I_VERSION(inode))
1486 sync_it |= S_VERSION;
1487
1488 if (!sync_it)
1489 return;
1490
1491 /* Finally allowed to write? Takes lock. */
1492 if (mnt_want_write_file(file))
1493 return;
1494
1495 /* Only change inode inside the lock region */
1496 if (sync_it & S_VERSION)
1497 inode_inc_iversion(inode);
1498 if (sync_it & S_CTIME)
1499 inode->i_ctime = now;
1500 if (sync_it & S_MTIME)
1501 inode->i_mtime = now;
1502 mark_inode_dirty_sync(inode);
1503 mnt_drop_write(file->f_path.mnt);
1504}
1505EXPORT_SYMBOL(file_update_time);
1506
1507int inode_needs_sync(struct inode *inode)
1508{
1509 if (IS_SYNC(inode))
1510 return 1;
1511 if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode))
1512 return 1;
1513 return 0;
1514}
1515EXPORT_SYMBOL(inode_needs_sync);
1516
1517int inode_wait(void *word)
1518{
1519 schedule();
1520 return 0;
1521}
1522EXPORT_SYMBOL(inode_wait);
1523
1524/*
1525 * If we try to find an inode in the inode hash while it is being
1526 * deleted, we have to wait until the filesystem completes its
1527 * deletion before reporting that it isn't found. This function waits
1528 * until the deletion _might_ have completed. Callers are responsible
1529 * to recheck inode state.
1530 *
1531 * It doesn't matter if I_NEW is not set initially, a call to
1532 * wake_up_bit(&inode->i_state, __I_NEW) after removing from the hash list
1533 * will DTRT.
1534 */
1535static void __wait_on_freeing_inode(struct inode *inode)
1536{
1537 wait_queue_head_t *wq;
1538 DEFINE_WAIT_BIT(wait, &inode->i_state, __I_NEW);
1539 wq = bit_waitqueue(&inode->i_state, __I_NEW);
1540 prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
1541 spin_unlock(&inode->i_lock);
1542 spin_unlock(&inode_hash_lock);
1543 schedule();
1544 finish_wait(wq, &wait.wait);
1545 spin_lock(&inode_hash_lock);
1546}
1547
1548static __initdata unsigned long ihash_entries;
1549static int __init set_ihash_entries(char *str)
1550{
1551 if (!str)
1552 return 0;
1553 ihash_entries = simple_strtoul(str, &str, 0);
1554 return 1;
1555}
1556__setup("ihash_entries=", set_ihash_entries);
1557
1558/*
1559 * Initialize the waitqueues and inode hash table.
1560 */
1561void __init inode_init_early(void)
1562{
1563 int loop;
1564
1565 /* If hashes are distributed across NUMA nodes, defer
1566 * hash allocation until vmalloc space is available.
1567 */
1568 if (hashdist)
1569 return;
1570
1571 inode_hashtable =
1572 alloc_large_system_hash("Inode-cache",
1573 sizeof(struct hlist_head),
1574 ihash_entries,
1575 14,
1576 HASH_EARLY,
1577 &i_hash_shift,
1578 &i_hash_mask,
1579 0);
1580
1581 for (loop = 0; loop < (1 << i_hash_shift); loop++)
1582 INIT_HLIST_HEAD(&inode_hashtable[loop]);
1583}
1584
1585void __init inode_init(void)
1586{
1587 int loop;
1588
1589 /* inode slab cache */
1590 inode_cachep = kmem_cache_create("inode_cache",
1591 sizeof(struct inode),
1592 0,
1593 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
1594 SLAB_MEM_SPREAD),
1595 init_once);
1596
1597 /* Hash may have been set up in inode_init_early */
1598 if (!hashdist)
1599 return;
1600
1601 inode_hashtable =
1602 alloc_large_system_hash("Inode-cache",
1603 sizeof(struct hlist_head),
1604 ihash_entries,
1605 14,
1606 0,
1607 &i_hash_shift,
1608 &i_hash_mask,
1609 0);
1610
1611 for (loop = 0; loop < (1 << i_hash_shift); loop++)
1612 INIT_HLIST_HEAD(&inode_hashtable[loop]);
1613}
1614
1615void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev)
1616{
1617 inode->i_mode = mode;
1618 if (S_ISCHR(mode)) {
1619 inode->i_fop = &def_chr_fops;
1620 inode->i_rdev = rdev;
1621 } else if (S_ISBLK(mode)) {
1622 inode->i_fop = &def_blk_fops;
1623 inode->i_rdev = rdev;
1624 } else if (S_ISFIFO(mode))
1625 inode->i_fop = &def_fifo_fops;
1626 else if (S_ISSOCK(mode))
1627 inode->i_fop = &bad_sock_fops;
1628 else
1629 printk(KERN_DEBUG "init_special_inode: bogus i_mode (%o) for"
1630 " inode %s:%lu\n", mode, inode->i_sb->s_id,
1631 inode->i_ino);
1632}
1633EXPORT_SYMBOL(init_special_inode);
1634
1635/**
1636 * inode_init_owner - Init uid,gid,mode for new inode according to posix standards
1637 * @inode: New inode
1638 * @dir: Directory inode
1639 * @mode: mode of the new inode
1640 */
1641void inode_init_owner(struct inode *inode, const struct inode *dir,
1642 mode_t mode)
1643{
1644 inode->i_uid = current_fsuid();
1645 if (dir && dir->i_mode & S_ISGID) {
1646 inode->i_gid = dir->i_gid;
1647 if (S_ISDIR(mode))
1648 mode |= S_ISGID;
1649 } else
1650 inode->i_gid = current_fsgid();
1651 inode->i_mode = mode;
1652}
1653EXPORT_SYMBOL(inode_init_owner);
1654
1655/**
1656 * inode_owner_or_capable - check current task permissions to inode
1657 * @inode: inode being checked
1658 *
1659 * Return true if current either has CAP_FOWNER to the inode, or
1660 * owns the file.
1661 */
1662bool inode_owner_or_capable(const struct inode *inode)
1663{
1664 struct user_namespace *ns = inode_userns(inode);
1665
1666 if (current_user_ns() == ns && current_fsuid() == inode->i_uid)
1667 return true;
1668 if (ns_capable(ns, CAP_FOWNER))
1669 return true;
1670 return false;
1671}
1672EXPORT_SYMBOL(inode_owner_or_capable);