]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - fs/inode.c
Merge branch 'stable/for-jens-4.5' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-zesty-kernel.git] / fs / inode.c
1 /*
2 * (C) 1997 Linus Torvalds
3 * (C) 1999 Andrea Arcangeli <andrea@suse.de> (dynamic inode allocation)
4 */
5 #include <linux/export.h>
6 #include <linux/fs.h>
7 #include <linux/mm.h>
8 #include <linux/backing-dev.h>
9 #include <linux/hash.h>
10 #include <linux/swap.h>
11 #include <linux/security.h>
12 #include <linux/cdev.h>
13 #include <linux/bootmem.h>
14 #include <linux/fsnotify.h>
15 #include <linux/mount.h>
16 #include <linux/posix_acl.h>
17 #include <linux/prefetch.h>
18 #include <linux/buffer_head.h> /* for inode_has_buffers */
19 #include <linux/ratelimit.h>
20 #include <linux/list_lru.h>
21 #include <trace/events/writeback.h>
22 #include "internal.h"
23
24 /*
25 * Inode locking rules:
26 *
27 * inode->i_lock protects:
28 * inode->i_state, inode->i_hash, __iget()
29 * Inode LRU list locks protect:
30 * inode->i_sb->s_inode_lru, inode->i_lru
31 * inode->i_sb->s_inode_list_lock protects:
32 * inode->i_sb->s_inodes, inode->i_sb_list
33 * bdi->wb.list_lock protects:
34 * bdi->wb.b_{dirty,io,more_io,dirty_time}, inode->i_io_list
35 * inode_hash_lock protects:
36 * inode_hashtable, inode->i_hash
37 *
38 * Lock ordering:
39 *
40 * inode->i_sb->s_inode_list_lock
41 * inode->i_lock
42 * Inode LRU list locks
43 *
44 * bdi->wb.list_lock
45 * inode->i_lock
46 *
47 * inode_hash_lock
48 * inode->i_sb->s_inode_list_lock
49 * inode->i_lock
50 *
51 * iunique_lock
52 * inode_hash_lock
53 */
54
55 static unsigned int i_hash_mask __read_mostly;
56 static unsigned int i_hash_shift __read_mostly;
57 static struct hlist_head *inode_hashtable __read_mostly;
58 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock);
59
60 /*
61 * Empty aops. Can be used for the cases where the user does not
62 * define any of the address_space operations.
63 */
64 const struct address_space_operations empty_aops = {
65 };
66 EXPORT_SYMBOL(empty_aops);
67
68 /*
69 * Statistics gathering..
70 */
71 struct inodes_stat_t inodes_stat;
72
73 static DEFINE_PER_CPU(unsigned long, nr_inodes);
74 static DEFINE_PER_CPU(unsigned long, nr_unused);
75
76 static struct kmem_cache *inode_cachep __read_mostly;
77
78 static long get_nr_inodes(void)
79 {
80 int i;
81 long sum = 0;
82 for_each_possible_cpu(i)
83 sum += per_cpu(nr_inodes, i);
84 return sum < 0 ? 0 : sum;
85 }
86
87 static inline long get_nr_inodes_unused(void)
88 {
89 int i;
90 long sum = 0;
91 for_each_possible_cpu(i)
92 sum += per_cpu(nr_unused, i);
93 return sum < 0 ? 0 : sum;
94 }
95
96 long get_nr_dirty_inodes(void)
97 {
98 /* not actually dirty inodes, but a wild approximation */
99 long nr_dirty = get_nr_inodes() - get_nr_inodes_unused();
100 return nr_dirty > 0 ? nr_dirty : 0;
101 }
102
103 /*
104 * Handle nr_inode sysctl
105 */
106 #ifdef CONFIG_SYSCTL
107 int proc_nr_inodes(struct ctl_table *table, int write,
108 void __user *buffer, size_t *lenp, loff_t *ppos)
109 {
110 inodes_stat.nr_inodes = get_nr_inodes();
111 inodes_stat.nr_unused = get_nr_inodes_unused();
112 return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
113 }
114 #endif
115
116 static int no_open(struct inode *inode, struct file *file)
117 {
118 return -ENXIO;
119 }
120
121 /**
122 * inode_init_always - perform inode structure intialisation
123 * @sb: superblock inode belongs to
124 * @inode: inode to initialise
125 *
126 * These are initializations that need to be done on every inode
127 * allocation as the fields are not initialised by slab allocation.
128 */
129 int inode_init_always(struct super_block *sb, struct inode *inode)
130 {
131 static const struct inode_operations empty_iops;
132 static const struct file_operations no_open_fops = {.open = no_open};
133 struct address_space *const mapping = &inode->i_data;
134
135 inode->i_sb = sb;
136 inode->i_blkbits = sb->s_blocksize_bits;
137 inode->i_flags = 0;
138 atomic_set(&inode->i_count, 1);
139 inode->i_op = &empty_iops;
140 inode->i_fop = &no_open_fops;
141 inode->__i_nlink = 1;
142 inode->i_opflags = 0;
143 i_uid_write(inode, 0);
144 i_gid_write(inode, 0);
145 atomic_set(&inode->i_writecount, 0);
146 inode->i_size = 0;
147 inode->i_blocks = 0;
148 inode->i_bytes = 0;
149 inode->i_generation = 0;
150 inode->i_pipe = NULL;
151 inode->i_bdev = NULL;
152 inode->i_cdev = NULL;
153 inode->i_link = NULL;
154 inode->i_rdev = 0;
155 inode->dirtied_when = 0;
156
157 if (security_inode_alloc(inode))
158 goto out;
159 spin_lock_init(&inode->i_lock);
160 lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key);
161
162 mutex_init(&inode->i_mutex);
163 lockdep_set_class(&inode->i_mutex, &sb->s_type->i_mutex_key);
164
165 atomic_set(&inode->i_dio_count, 0);
166
167 mapping->a_ops = &empty_aops;
168 mapping->host = inode;
169 mapping->flags = 0;
170 atomic_set(&mapping->i_mmap_writable, 0);
171 mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE);
172 mapping->private_data = NULL;
173 mapping->writeback_index = 0;
174 inode->i_private = NULL;
175 inode->i_mapping = mapping;
176 INIT_HLIST_HEAD(&inode->i_dentry); /* buggered by rcu freeing */
177 #ifdef CONFIG_FS_POSIX_ACL
178 inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED;
179 #endif
180
181 #ifdef CONFIG_FSNOTIFY
182 inode->i_fsnotify_mask = 0;
183 #endif
184 inode->i_flctx = NULL;
185 this_cpu_inc(nr_inodes);
186
187 return 0;
188 out:
189 return -ENOMEM;
190 }
191 EXPORT_SYMBOL(inode_init_always);
192
193 static struct inode *alloc_inode(struct super_block *sb)
194 {
195 struct inode *inode;
196
197 if (sb->s_op->alloc_inode)
198 inode = sb->s_op->alloc_inode(sb);
199 else
200 inode = kmem_cache_alloc(inode_cachep, GFP_KERNEL);
201
202 if (!inode)
203 return NULL;
204
205 if (unlikely(inode_init_always(sb, inode))) {
206 if (inode->i_sb->s_op->destroy_inode)
207 inode->i_sb->s_op->destroy_inode(inode);
208 else
209 kmem_cache_free(inode_cachep, inode);
210 return NULL;
211 }
212
213 return inode;
214 }
215
216 void free_inode_nonrcu(struct inode *inode)
217 {
218 kmem_cache_free(inode_cachep, inode);
219 }
220 EXPORT_SYMBOL(free_inode_nonrcu);
221
222 void __destroy_inode(struct inode *inode)
223 {
224 BUG_ON(inode_has_buffers(inode));
225 inode_detach_wb(inode);
226 security_inode_free(inode);
227 fsnotify_inode_delete(inode);
228 locks_free_lock_context(inode);
229 if (!inode->i_nlink) {
230 WARN_ON(atomic_long_read(&inode->i_sb->s_remove_count) == 0);
231 atomic_long_dec(&inode->i_sb->s_remove_count);
232 }
233
234 #ifdef CONFIG_FS_POSIX_ACL
235 if (inode->i_acl && inode->i_acl != ACL_NOT_CACHED)
236 posix_acl_release(inode->i_acl);
237 if (inode->i_default_acl && inode->i_default_acl != ACL_NOT_CACHED)
238 posix_acl_release(inode->i_default_acl);
239 #endif
240 this_cpu_dec(nr_inodes);
241 }
242 EXPORT_SYMBOL(__destroy_inode);
243
244 static void i_callback(struct rcu_head *head)
245 {
246 struct inode *inode = container_of(head, struct inode, i_rcu);
247 kmem_cache_free(inode_cachep, inode);
248 }
249
250 static void destroy_inode(struct inode *inode)
251 {
252 BUG_ON(!list_empty(&inode->i_lru));
253 __destroy_inode(inode);
254 if (inode->i_sb->s_op->destroy_inode)
255 inode->i_sb->s_op->destroy_inode(inode);
256 else
257 call_rcu(&inode->i_rcu, i_callback);
258 }
259
260 /**
261 * drop_nlink - directly drop an inode's link count
262 * @inode: inode
263 *
264 * This is a low-level filesystem helper to replace any
265 * direct filesystem manipulation of i_nlink. In cases
266 * where we are attempting to track writes to the
267 * filesystem, a decrement to zero means an imminent
268 * write when the file is truncated and actually unlinked
269 * on the filesystem.
270 */
271 void drop_nlink(struct inode *inode)
272 {
273 WARN_ON(inode->i_nlink == 0);
274 inode->__i_nlink--;
275 if (!inode->i_nlink)
276 atomic_long_inc(&inode->i_sb->s_remove_count);
277 }
278 EXPORT_SYMBOL(drop_nlink);
279
280 /**
281 * clear_nlink - directly zero an inode's link count
282 * @inode: inode
283 *
284 * This is a low-level filesystem helper to replace any
285 * direct filesystem manipulation of i_nlink. See
286 * drop_nlink() for why we care about i_nlink hitting zero.
287 */
288 void clear_nlink(struct inode *inode)
289 {
290 if (inode->i_nlink) {
291 inode->__i_nlink = 0;
292 atomic_long_inc(&inode->i_sb->s_remove_count);
293 }
294 }
295 EXPORT_SYMBOL(clear_nlink);
296
297 /**
298 * set_nlink - directly set an inode's link count
299 * @inode: inode
300 * @nlink: new nlink (should be non-zero)
301 *
302 * This is a low-level filesystem helper to replace any
303 * direct filesystem manipulation of i_nlink.
304 */
305 void set_nlink(struct inode *inode, unsigned int nlink)
306 {
307 if (!nlink) {
308 clear_nlink(inode);
309 } else {
310 /* Yes, some filesystems do change nlink from zero to one */
311 if (inode->i_nlink == 0)
312 atomic_long_dec(&inode->i_sb->s_remove_count);
313
314 inode->__i_nlink = nlink;
315 }
316 }
317 EXPORT_SYMBOL(set_nlink);
318
319 /**
320 * inc_nlink - directly increment an inode's link count
321 * @inode: inode
322 *
323 * This is a low-level filesystem helper to replace any
324 * direct filesystem manipulation of i_nlink. Currently,
325 * it is only here for parity with dec_nlink().
326 */
327 void inc_nlink(struct inode *inode)
328 {
329 if (unlikely(inode->i_nlink == 0)) {
330 WARN_ON(!(inode->i_state & I_LINKABLE));
331 atomic_long_dec(&inode->i_sb->s_remove_count);
332 }
333
334 inode->__i_nlink++;
335 }
336 EXPORT_SYMBOL(inc_nlink);
337
338 void address_space_init_once(struct address_space *mapping)
339 {
340 memset(mapping, 0, sizeof(*mapping));
341 INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC);
342 spin_lock_init(&mapping->tree_lock);
343 init_rwsem(&mapping->i_mmap_rwsem);
344 INIT_LIST_HEAD(&mapping->private_list);
345 spin_lock_init(&mapping->private_lock);
346 mapping->i_mmap = RB_ROOT;
347 }
348 EXPORT_SYMBOL(address_space_init_once);
349
350 /*
351 * These are initializations that only need to be done
352 * once, because the fields are idempotent across use
353 * of the inode, so let the slab aware of that.
354 */
355 void inode_init_once(struct inode *inode)
356 {
357 memset(inode, 0, sizeof(*inode));
358 INIT_HLIST_NODE(&inode->i_hash);
359 INIT_LIST_HEAD(&inode->i_devices);
360 INIT_LIST_HEAD(&inode->i_io_list);
361 INIT_LIST_HEAD(&inode->i_lru);
362 address_space_init_once(&inode->i_data);
363 i_size_ordered_init(inode);
364 #ifdef CONFIG_FSNOTIFY
365 INIT_HLIST_HEAD(&inode->i_fsnotify_marks);
366 #endif
367 }
368 EXPORT_SYMBOL(inode_init_once);
369
370 static void init_once(void *foo)
371 {
372 struct inode *inode = (struct inode *) foo;
373
374 inode_init_once(inode);
375 }
376
377 /*
378 * inode->i_lock must be held
379 */
380 void __iget(struct inode *inode)
381 {
382 atomic_inc(&inode->i_count);
383 }
384
385 /*
386 * get additional reference to inode; caller must already hold one.
387 */
388 void ihold(struct inode *inode)
389 {
390 WARN_ON(atomic_inc_return(&inode->i_count) < 2);
391 }
392 EXPORT_SYMBOL(ihold);
393
394 static void inode_lru_list_add(struct inode *inode)
395 {
396 if (list_lru_add(&inode->i_sb->s_inode_lru, &inode->i_lru))
397 this_cpu_inc(nr_unused);
398 }
399
400 /*
401 * Add inode to LRU if needed (inode is unused and clean).
402 *
403 * Needs inode->i_lock held.
404 */
405 void inode_add_lru(struct inode *inode)
406 {
407 if (!(inode->i_state & (I_DIRTY_ALL | I_SYNC |
408 I_FREEING | I_WILL_FREE)) &&
409 !atomic_read(&inode->i_count) && inode->i_sb->s_flags & MS_ACTIVE)
410 inode_lru_list_add(inode);
411 }
412
413
414 static void inode_lru_list_del(struct inode *inode)
415 {
416
417 if (list_lru_del(&inode->i_sb->s_inode_lru, &inode->i_lru))
418 this_cpu_dec(nr_unused);
419 }
420
421 /**
422 * inode_sb_list_add - add inode to the superblock list of inodes
423 * @inode: inode to add
424 */
425 void inode_sb_list_add(struct inode *inode)
426 {
427 spin_lock(&inode->i_sb->s_inode_list_lock);
428 list_add(&inode->i_sb_list, &inode->i_sb->s_inodes);
429 spin_unlock(&inode->i_sb->s_inode_list_lock);
430 }
431 EXPORT_SYMBOL_GPL(inode_sb_list_add);
432
433 static inline void inode_sb_list_del(struct inode *inode)
434 {
435 if (!list_empty(&inode->i_sb_list)) {
436 spin_lock(&inode->i_sb->s_inode_list_lock);
437 list_del_init(&inode->i_sb_list);
438 spin_unlock(&inode->i_sb->s_inode_list_lock);
439 }
440 }
441
442 static unsigned long hash(struct super_block *sb, unsigned long hashval)
443 {
444 unsigned long tmp;
445
446 tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /
447 L1_CACHE_BYTES;
448 tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> i_hash_shift);
449 return tmp & i_hash_mask;
450 }
451
452 /**
453 * __insert_inode_hash - hash an inode
454 * @inode: unhashed inode
455 * @hashval: unsigned long value used to locate this object in the
456 * inode_hashtable.
457 *
458 * Add an inode to the inode hash for this superblock.
459 */
460 void __insert_inode_hash(struct inode *inode, unsigned long hashval)
461 {
462 struct hlist_head *b = inode_hashtable + hash(inode->i_sb, hashval);
463
464 spin_lock(&inode_hash_lock);
465 spin_lock(&inode->i_lock);
466 hlist_add_head(&inode->i_hash, b);
467 spin_unlock(&inode->i_lock);
468 spin_unlock(&inode_hash_lock);
469 }
470 EXPORT_SYMBOL(__insert_inode_hash);
471
472 /**
473 * __remove_inode_hash - remove an inode from the hash
474 * @inode: inode to unhash
475 *
476 * Remove an inode from the superblock.
477 */
478 void __remove_inode_hash(struct inode *inode)
479 {
480 spin_lock(&inode_hash_lock);
481 spin_lock(&inode->i_lock);
482 hlist_del_init(&inode->i_hash);
483 spin_unlock(&inode->i_lock);
484 spin_unlock(&inode_hash_lock);
485 }
486 EXPORT_SYMBOL(__remove_inode_hash);
487
488 void clear_inode(struct inode *inode)
489 {
490 might_sleep();
491 /*
492 * We have to cycle tree_lock here because reclaim can be still in the
493 * process of removing the last page (in __delete_from_page_cache())
494 * and we must not free mapping under it.
495 */
496 spin_lock_irq(&inode->i_data.tree_lock);
497 BUG_ON(inode->i_data.nrpages);
498 BUG_ON(inode->i_data.nrexceptional);
499 spin_unlock_irq(&inode->i_data.tree_lock);
500 BUG_ON(!list_empty(&inode->i_data.private_list));
501 BUG_ON(!(inode->i_state & I_FREEING));
502 BUG_ON(inode->i_state & I_CLEAR);
503 /* don't need i_lock here, no concurrent mods to i_state */
504 inode->i_state = I_FREEING | I_CLEAR;
505 }
506 EXPORT_SYMBOL(clear_inode);
507
508 /*
509 * Free the inode passed in, removing it from the lists it is still connected
510 * to. We remove any pages still attached to the inode and wait for any IO that
511 * is still in progress before finally destroying the inode.
512 *
513 * An inode must already be marked I_FREEING so that we avoid the inode being
514 * moved back onto lists if we race with other code that manipulates the lists
515 * (e.g. writeback_single_inode). The caller is responsible for setting this.
516 *
517 * An inode must already be removed from the LRU list before being evicted from
518 * the cache. This should occur atomically with setting the I_FREEING state
519 * flag, so no inodes here should ever be on the LRU when being evicted.
520 */
521 static void evict(struct inode *inode)
522 {
523 const struct super_operations *op = inode->i_sb->s_op;
524
525 BUG_ON(!(inode->i_state & I_FREEING));
526 BUG_ON(!list_empty(&inode->i_lru));
527
528 if (!list_empty(&inode->i_io_list))
529 inode_io_list_del(inode);
530
531 inode_sb_list_del(inode);
532
533 /*
534 * Wait for flusher thread to be done with the inode so that filesystem
535 * does not start destroying it while writeback is still running. Since
536 * the inode has I_FREEING set, flusher thread won't start new work on
537 * the inode. We just have to wait for running writeback to finish.
538 */
539 inode_wait_for_writeback(inode);
540
541 if (op->evict_inode) {
542 op->evict_inode(inode);
543 } else {
544 truncate_inode_pages_final(&inode->i_data);
545 clear_inode(inode);
546 }
547 if (S_ISBLK(inode->i_mode) && inode->i_bdev)
548 bd_forget(inode);
549 if (S_ISCHR(inode->i_mode) && inode->i_cdev)
550 cd_forget(inode);
551
552 remove_inode_hash(inode);
553
554 spin_lock(&inode->i_lock);
555 wake_up_bit(&inode->i_state, __I_NEW);
556 BUG_ON(inode->i_state != (I_FREEING | I_CLEAR));
557 spin_unlock(&inode->i_lock);
558
559 destroy_inode(inode);
560 }
561
562 /*
563 * dispose_list - dispose of the contents of a local list
564 * @head: the head of the list to free
565 *
566 * Dispose-list gets a local list with local inodes in it, so it doesn't
567 * need to worry about list corruption and SMP locks.
568 */
569 static void dispose_list(struct list_head *head)
570 {
571 while (!list_empty(head)) {
572 struct inode *inode;
573
574 inode = list_first_entry(head, struct inode, i_lru);
575 list_del_init(&inode->i_lru);
576
577 evict(inode);
578 cond_resched();
579 }
580 }
581
582 /**
583 * evict_inodes - evict all evictable inodes for a superblock
584 * @sb: superblock to operate on
585 *
586 * Make sure that no inodes with zero refcount are retained. This is
587 * called by superblock shutdown after having MS_ACTIVE flag removed,
588 * so any inode reaching zero refcount during or after that call will
589 * be immediately evicted.
590 */
591 void evict_inodes(struct super_block *sb)
592 {
593 struct inode *inode, *next;
594 LIST_HEAD(dispose);
595
596 again:
597 spin_lock(&sb->s_inode_list_lock);
598 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
599 if (atomic_read(&inode->i_count))
600 continue;
601
602 spin_lock(&inode->i_lock);
603 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
604 spin_unlock(&inode->i_lock);
605 continue;
606 }
607
608 inode->i_state |= I_FREEING;
609 inode_lru_list_del(inode);
610 spin_unlock(&inode->i_lock);
611 list_add(&inode->i_lru, &dispose);
612
613 /*
614 * We can have a ton of inodes to evict at unmount time given
615 * enough memory, check to see if we need to go to sleep for a
616 * bit so we don't livelock.
617 */
618 if (need_resched()) {
619 spin_unlock(&sb->s_inode_list_lock);
620 cond_resched();
621 dispose_list(&dispose);
622 goto again;
623 }
624 }
625 spin_unlock(&sb->s_inode_list_lock);
626
627 dispose_list(&dispose);
628 }
629
630 /**
631 * invalidate_inodes - attempt to free all inodes on a superblock
632 * @sb: superblock to operate on
633 * @kill_dirty: flag to guide handling of dirty inodes
634 *
635 * Attempts to free all inodes for a given superblock. If there were any
636 * busy inodes return a non-zero value, else zero.
637 * If @kill_dirty is set, discard dirty inodes too, otherwise treat
638 * them as busy.
639 */
640 int invalidate_inodes(struct super_block *sb, bool kill_dirty)
641 {
642 int busy = 0;
643 struct inode *inode, *next;
644 LIST_HEAD(dispose);
645
646 spin_lock(&sb->s_inode_list_lock);
647 list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
648 spin_lock(&inode->i_lock);
649 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
650 spin_unlock(&inode->i_lock);
651 continue;
652 }
653 if (inode->i_state & I_DIRTY_ALL && !kill_dirty) {
654 spin_unlock(&inode->i_lock);
655 busy = 1;
656 continue;
657 }
658 if (atomic_read(&inode->i_count)) {
659 spin_unlock(&inode->i_lock);
660 busy = 1;
661 continue;
662 }
663
664 inode->i_state |= I_FREEING;
665 inode_lru_list_del(inode);
666 spin_unlock(&inode->i_lock);
667 list_add(&inode->i_lru, &dispose);
668 }
669 spin_unlock(&sb->s_inode_list_lock);
670
671 dispose_list(&dispose);
672
673 return busy;
674 }
675
676 /*
677 * Isolate the inode from the LRU in preparation for freeing it.
678 *
679 * Any inodes which are pinned purely because of attached pagecache have their
680 * pagecache removed. If the inode has metadata buffers attached to
681 * mapping->private_list then try to remove them.
682 *
683 * If the inode has the I_REFERENCED flag set, then it means that it has been
684 * used recently - the flag is set in iput_final(). When we encounter such an
685 * inode, clear the flag and move it to the back of the LRU so it gets another
686 * pass through the LRU before it gets reclaimed. This is necessary because of
687 * the fact we are doing lazy LRU updates to minimise lock contention so the
688 * LRU does not have strict ordering. Hence we don't want to reclaim inodes
689 * with this flag set because they are the inodes that are out of order.
690 */
691 static enum lru_status inode_lru_isolate(struct list_head *item,
692 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
693 {
694 struct list_head *freeable = arg;
695 struct inode *inode = container_of(item, struct inode, i_lru);
696
697 /*
698 * we are inverting the lru lock/inode->i_lock here, so use a trylock.
699 * If we fail to get the lock, just skip it.
700 */
701 if (!spin_trylock(&inode->i_lock))
702 return LRU_SKIP;
703
704 /*
705 * Referenced or dirty inodes are still in use. Give them another pass
706 * through the LRU as we canot reclaim them now.
707 */
708 if (atomic_read(&inode->i_count) ||
709 (inode->i_state & ~I_REFERENCED)) {
710 list_lru_isolate(lru, &inode->i_lru);
711 spin_unlock(&inode->i_lock);
712 this_cpu_dec(nr_unused);
713 return LRU_REMOVED;
714 }
715
716 /* recently referenced inodes get one more pass */
717 if (inode->i_state & I_REFERENCED) {
718 inode->i_state &= ~I_REFERENCED;
719 spin_unlock(&inode->i_lock);
720 return LRU_ROTATE;
721 }
722
723 if (inode_has_buffers(inode) || inode->i_data.nrpages) {
724 __iget(inode);
725 spin_unlock(&inode->i_lock);
726 spin_unlock(lru_lock);
727 if (remove_inode_buffers(inode)) {
728 unsigned long reap;
729 reap = invalidate_mapping_pages(&inode->i_data, 0, -1);
730 if (current_is_kswapd())
731 __count_vm_events(KSWAPD_INODESTEAL, reap);
732 else
733 __count_vm_events(PGINODESTEAL, reap);
734 if (current->reclaim_state)
735 current->reclaim_state->reclaimed_slab += reap;
736 }
737 iput(inode);
738 spin_lock(lru_lock);
739 return LRU_RETRY;
740 }
741
742 WARN_ON(inode->i_state & I_NEW);
743 inode->i_state |= I_FREEING;
744 list_lru_isolate_move(lru, &inode->i_lru, freeable);
745 spin_unlock(&inode->i_lock);
746
747 this_cpu_dec(nr_unused);
748 return LRU_REMOVED;
749 }
750
751 /*
752 * Walk the superblock inode LRU for freeable inodes and attempt to free them.
753 * This is called from the superblock shrinker function with a number of inodes
754 * to trim from the LRU. Inodes to be freed are moved to a temporary list and
755 * then are freed outside inode_lock by dispose_list().
756 */
757 long prune_icache_sb(struct super_block *sb, struct shrink_control *sc)
758 {
759 LIST_HEAD(freeable);
760 long freed;
761
762 freed = list_lru_shrink_walk(&sb->s_inode_lru, sc,
763 inode_lru_isolate, &freeable);
764 dispose_list(&freeable);
765 return freed;
766 }
767
768 static void __wait_on_freeing_inode(struct inode *inode);
769 /*
770 * Called with the inode lock held.
771 */
772 static struct inode *find_inode(struct super_block *sb,
773 struct hlist_head *head,
774 int (*test)(struct inode *, void *),
775 void *data)
776 {
777 struct inode *inode = NULL;
778
779 repeat:
780 hlist_for_each_entry(inode, head, i_hash) {
781 if (inode->i_sb != sb)
782 continue;
783 if (!test(inode, data))
784 continue;
785 spin_lock(&inode->i_lock);
786 if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
787 __wait_on_freeing_inode(inode);
788 goto repeat;
789 }
790 __iget(inode);
791 spin_unlock(&inode->i_lock);
792 return inode;
793 }
794 return NULL;
795 }
796
797 /*
798 * find_inode_fast is the fast path version of find_inode, see the comment at
799 * iget_locked for details.
800 */
801 static struct inode *find_inode_fast(struct super_block *sb,
802 struct hlist_head *head, unsigned long ino)
803 {
804 struct inode *inode = NULL;
805
806 repeat:
807 hlist_for_each_entry(inode, head, i_hash) {
808 if (inode->i_ino != ino)
809 continue;
810 if (inode->i_sb != sb)
811 continue;
812 spin_lock(&inode->i_lock);
813 if (inode->i_state & (I_FREEING|I_WILL_FREE)) {
814 __wait_on_freeing_inode(inode);
815 goto repeat;
816 }
817 __iget(inode);
818 spin_unlock(&inode->i_lock);
819 return inode;
820 }
821 return NULL;
822 }
823
824 /*
825 * Each cpu owns a range of LAST_INO_BATCH numbers.
826 * 'shared_last_ino' is dirtied only once out of LAST_INO_BATCH allocations,
827 * to renew the exhausted range.
828 *
829 * This does not significantly increase overflow rate because every CPU can
830 * consume at most LAST_INO_BATCH-1 unused inode numbers. So there is
831 * NR_CPUS*(LAST_INO_BATCH-1) wastage. At 4096 and 1024, this is ~0.1% of the
832 * 2^32 range, and is a worst-case. Even a 50% wastage would only increase
833 * overflow rate by 2x, which does not seem too significant.
834 *
835 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
836 * error if st_ino won't fit in target struct field. Use 32bit counter
837 * here to attempt to avoid that.
838 */
839 #define LAST_INO_BATCH 1024
840 static DEFINE_PER_CPU(unsigned int, last_ino);
841
842 unsigned int get_next_ino(void)
843 {
844 unsigned int *p = &get_cpu_var(last_ino);
845 unsigned int res = *p;
846
847 #ifdef CONFIG_SMP
848 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
849 static atomic_t shared_last_ino;
850 int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
851
852 res = next - LAST_INO_BATCH;
853 }
854 #endif
855
856 res++;
857 /* get_next_ino should not provide a 0 inode number */
858 if (unlikely(!res))
859 res++;
860 *p = res;
861 put_cpu_var(last_ino);
862 return res;
863 }
864 EXPORT_SYMBOL(get_next_ino);
865
866 /**
867 * new_inode_pseudo - obtain an inode
868 * @sb: superblock
869 *
870 * Allocates a new inode for given superblock.
871 * Inode wont be chained in superblock s_inodes list
872 * This means :
873 * - fs can't be unmount
874 * - quotas, fsnotify, writeback can't work
875 */
876 struct inode *new_inode_pseudo(struct super_block *sb)
877 {
878 struct inode *inode = alloc_inode(sb);
879
880 if (inode) {
881 spin_lock(&inode->i_lock);
882 inode->i_state = 0;
883 spin_unlock(&inode->i_lock);
884 INIT_LIST_HEAD(&inode->i_sb_list);
885 }
886 return inode;
887 }
888
889 /**
890 * new_inode - obtain an inode
891 * @sb: superblock
892 *
893 * Allocates a new inode for given superblock. The default gfp_mask
894 * for allocations related to inode->i_mapping is GFP_HIGHUSER_MOVABLE.
895 * If HIGHMEM pages are unsuitable or it is known that pages allocated
896 * for the page cache are not reclaimable or migratable,
897 * mapping_set_gfp_mask() must be called with suitable flags on the
898 * newly created inode's mapping
899 *
900 */
901 struct inode *new_inode(struct super_block *sb)
902 {
903 struct inode *inode;
904
905 spin_lock_prefetch(&sb->s_inode_list_lock);
906
907 inode = new_inode_pseudo(sb);
908 if (inode)
909 inode_sb_list_add(inode);
910 return inode;
911 }
912 EXPORT_SYMBOL(new_inode);
913
914 #ifdef CONFIG_DEBUG_LOCK_ALLOC
915 void lockdep_annotate_inode_mutex_key(struct inode *inode)
916 {
917 if (S_ISDIR(inode->i_mode)) {
918 struct file_system_type *type = inode->i_sb->s_type;
919
920 /* Set new key only if filesystem hasn't already changed it */
921 if (lockdep_match_class(&inode->i_mutex, &type->i_mutex_key)) {
922 /*
923 * ensure nobody is actually holding i_mutex
924 */
925 mutex_destroy(&inode->i_mutex);
926 mutex_init(&inode->i_mutex);
927 lockdep_set_class(&inode->i_mutex,
928 &type->i_mutex_dir_key);
929 }
930 }
931 }
932 EXPORT_SYMBOL(lockdep_annotate_inode_mutex_key);
933 #endif
934
935 /**
936 * unlock_new_inode - clear the I_NEW state and wake up any waiters
937 * @inode: new inode to unlock
938 *
939 * Called when the inode is fully initialised to clear the new state of the
940 * inode and wake up anyone waiting for the inode to finish initialisation.
941 */
942 void unlock_new_inode(struct inode *inode)
943 {
944 lockdep_annotate_inode_mutex_key(inode);
945 spin_lock(&inode->i_lock);
946 WARN_ON(!(inode->i_state & I_NEW));
947 inode->i_state &= ~I_NEW;
948 smp_mb();
949 wake_up_bit(&inode->i_state, __I_NEW);
950 spin_unlock(&inode->i_lock);
951 }
952 EXPORT_SYMBOL(unlock_new_inode);
953
954 /**
955 * lock_two_nondirectories - take two i_mutexes on non-directory objects
956 *
957 * Lock any non-NULL argument that is not a directory.
958 * Zero, one or two objects may be locked by this function.
959 *
960 * @inode1: first inode to lock
961 * @inode2: second inode to lock
962 */
963 void lock_two_nondirectories(struct inode *inode1, struct inode *inode2)
964 {
965 if (inode1 > inode2)
966 swap(inode1, inode2);
967
968 if (inode1 && !S_ISDIR(inode1->i_mode))
969 inode_lock(inode1);
970 if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1)
971 inode_lock_nested(inode2, I_MUTEX_NONDIR2);
972 }
973 EXPORT_SYMBOL(lock_two_nondirectories);
974
975 /**
976 * unlock_two_nondirectories - release locks from lock_two_nondirectories()
977 * @inode1: first inode to unlock
978 * @inode2: second inode to unlock
979 */
980 void unlock_two_nondirectories(struct inode *inode1, struct inode *inode2)
981 {
982 if (inode1 && !S_ISDIR(inode1->i_mode))
983 inode_unlock(inode1);
984 if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1)
985 inode_unlock(inode2);
986 }
987 EXPORT_SYMBOL(unlock_two_nondirectories);
988
989 /**
990 * iget5_locked - obtain an inode from a mounted file system
991 * @sb: super block of file system
992 * @hashval: hash value (usually inode number) to get
993 * @test: callback used for comparisons between inodes
994 * @set: callback used to initialize a new struct inode
995 * @data: opaque data pointer to pass to @test and @set
996 *
997 * Search for the inode specified by @hashval and @data in the inode cache,
998 * and if present it is return it with an increased reference count. This is
999 * a generalized version of iget_locked() for file systems where the inode
1000 * number is not sufficient for unique identification of an inode.
1001 *
1002 * If the inode is not in cache, allocate a new inode and return it locked,
1003 * hashed, and with the I_NEW flag set. The file system gets to fill it in
1004 * before unlocking it via unlock_new_inode().
1005 *
1006 * Note both @test and @set are called with the inode_hash_lock held, so can't
1007 * sleep.
1008 */
1009 struct inode *iget5_locked(struct super_block *sb, unsigned long hashval,
1010 int (*test)(struct inode *, void *),
1011 int (*set)(struct inode *, void *), void *data)
1012 {
1013 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1014 struct inode *inode;
1015
1016 spin_lock(&inode_hash_lock);
1017 inode = find_inode(sb, head, test, data);
1018 spin_unlock(&inode_hash_lock);
1019
1020 if (inode) {
1021 wait_on_inode(inode);
1022 return inode;
1023 }
1024
1025 inode = alloc_inode(sb);
1026 if (inode) {
1027 struct inode *old;
1028
1029 spin_lock(&inode_hash_lock);
1030 /* We released the lock, so.. */
1031 old = find_inode(sb, head, test, data);
1032 if (!old) {
1033 if (set(inode, data))
1034 goto set_failed;
1035
1036 spin_lock(&inode->i_lock);
1037 inode->i_state = I_NEW;
1038 hlist_add_head(&inode->i_hash, head);
1039 spin_unlock(&inode->i_lock);
1040 inode_sb_list_add(inode);
1041 spin_unlock(&inode_hash_lock);
1042
1043 /* Return the locked inode with I_NEW set, the
1044 * caller is responsible for filling in the contents
1045 */
1046 return inode;
1047 }
1048
1049 /*
1050 * Uhhuh, somebody else created the same inode under
1051 * us. Use the old inode instead of the one we just
1052 * allocated.
1053 */
1054 spin_unlock(&inode_hash_lock);
1055 destroy_inode(inode);
1056 inode = old;
1057 wait_on_inode(inode);
1058 }
1059 return inode;
1060
1061 set_failed:
1062 spin_unlock(&inode_hash_lock);
1063 destroy_inode(inode);
1064 return NULL;
1065 }
1066 EXPORT_SYMBOL(iget5_locked);
1067
1068 /**
1069 * iget_locked - obtain an inode from a mounted file system
1070 * @sb: super block of file system
1071 * @ino: inode number to get
1072 *
1073 * Search for the inode specified by @ino in the inode cache and if present
1074 * return it with an increased reference count. This is for file systems
1075 * where the inode number is sufficient for unique identification of an inode.
1076 *
1077 * If the inode is not in cache, allocate a new inode and return it locked,
1078 * hashed, and with the I_NEW flag set. The file system gets to fill it in
1079 * before unlocking it via unlock_new_inode().
1080 */
1081 struct inode *iget_locked(struct super_block *sb, unsigned long ino)
1082 {
1083 struct hlist_head *head = inode_hashtable + hash(sb, ino);
1084 struct inode *inode;
1085
1086 spin_lock(&inode_hash_lock);
1087 inode = find_inode_fast(sb, head, ino);
1088 spin_unlock(&inode_hash_lock);
1089 if (inode) {
1090 wait_on_inode(inode);
1091 return inode;
1092 }
1093
1094 inode = alloc_inode(sb);
1095 if (inode) {
1096 struct inode *old;
1097
1098 spin_lock(&inode_hash_lock);
1099 /* We released the lock, so.. */
1100 old = find_inode_fast(sb, head, ino);
1101 if (!old) {
1102 inode->i_ino = ino;
1103 spin_lock(&inode->i_lock);
1104 inode->i_state = I_NEW;
1105 hlist_add_head(&inode->i_hash, head);
1106 spin_unlock(&inode->i_lock);
1107 inode_sb_list_add(inode);
1108 spin_unlock(&inode_hash_lock);
1109
1110 /* Return the locked inode with I_NEW set, the
1111 * caller is responsible for filling in the contents
1112 */
1113 return inode;
1114 }
1115
1116 /*
1117 * Uhhuh, somebody else created the same inode under
1118 * us. Use the old inode instead of the one we just
1119 * allocated.
1120 */
1121 spin_unlock(&inode_hash_lock);
1122 destroy_inode(inode);
1123 inode = old;
1124 wait_on_inode(inode);
1125 }
1126 return inode;
1127 }
1128 EXPORT_SYMBOL(iget_locked);
1129
1130 /*
1131 * search the inode cache for a matching inode number.
1132 * If we find one, then the inode number we are trying to
1133 * allocate is not unique and so we should not use it.
1134 *
1135 * Returns 1 if the inode number is unique, 0 if it is not.
1136 */
1137 static int test_inode_iunique(struct super_block *sb, unsigned long ino)
1138 {
1139 struct hlist_head *b = inode_hashtable + hash(sb, ino);
1140 struct inode *inode;
1141
1142 spin_lock(&inode_hash_lock);
1143 hlist_for_each_entry(inode, b, i_hash) {
1144 if (inode->i_ino == ino && inode->i_sb == sb) {
1145 spin_unlock(&inode_hash_lock);
1146 return 0;
1147 }
1148 }
1149 spin_unlock(&inode_hash_lock);
1150
1151 return 1;
1152 }
1153
1154 /**
1155 * iunique - get a unique inode number
1156 * @sb: superblock
1157 * @max_reserved: highest reserved inode number
1158 *
1159 * Obtain an inode number that is unique on the system for a given
1160 * superblock. This is used by file systems that have no natural
1161 * permanent inode numbering system. An inode number is returned that
1162 * is higher than the reserved limit but unique.
1163 *
1164 * BUGS:
1165 * With a large number of inodes live on the file system this function
1166 * currently becomes quite slow.
1167 */
1168 ino_t iunique(struct super_block *sb, ino_t max_reserved)
1169 {
1170 /*
1171 * On a 32bit, non LFS stat() call, glibc will generate an EOVERFLOW
1172 * error if st_ino won't fit in target struct field. Use 32bit counter
1173 * here to attempt to avoid that.
1174 */
1175 static DEFINE_SPINLOCK(iunique_lock);
1176 static unsigned int counter;
1177 ino_t res;
1178
1179 spin_lock(&iunique_lock);
1180 do {
1181 if (counter <= max_reserved)
1182 counter = max_reserved + 1;
1183 res = counter++;
1184 } while (!test_inode_iunique(sb, res));
1185 spin_unlock(&iunique_lock);
1186
1187 return res;
1188 }
1189 EXPORT_SYMBOL(iunique);
1190
1191 struct inode *igrab(struct inode *inode)
1192 {
1193 spin_lock(&inode->i_lock);
1194 if (!(inode->i_state & (I_FREEING|I_WILL_FREE))) {
1195 __iget(inode);
1196 spin_unlock(&inode->i_lock);
1197 } else {
1198 spin_unlock(&inode->i_lock);
1199 /*
1200 * Handle the case where s_op->clear_inode is not been
1201 * called yet, and somebody is calling igrab
1202 * while the inode is getting freed.
1203 */
1204 inode = NULL;
1205 }
1206 return inode;
1207 }
1208 EXPORT_SYMBOL(igrab);
1209
1210 /**
1211 * ilookup5_nowait - search for an inode in the inode cache
1212 * @sb: super block of file system to search
1213 * @hashval: hash value (usually inode number) to search for
1214 * @test: callback used for comparisons between inodes
1215 * @data: opaque data pointer to pass to @test
1216 *
1217 * Search for the inode specified by @hashval and @data in the inode cache.
1218 * If the inode is in the cache, the inode is returned with an incremented
1219 * reference count.
1220 *
1221 * Note: I_NEW is not waited upon so you have to be very careful what you do
1222 * with the returned inode. You probably should be using ilookup5() instead.
1223 *
1224 * Note2: @test is called with the inode_hash_lock held, so can't sleep.
1225 */
1226 struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval,
1227 int (*test)(struct inode *, void *), void *data)
1228 {
1229 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1230 struct inode *inode;
1231
1232 spin_lock(&inode_hash_lock);
1233 inode = find_inode(sb, head, test, data);
1234 spin_unlock(&inode_hash_lock);
1235
1236 return inode;
1237 }
1238 EXPORT_SYMBOL(ilookup5_nowait);
1239
1240 /**
1241 * ilookup5 - search for an inode in the inode cache
1242 * @sb: super block of file system to search
1243 * @hashval: hash value (usually inode number) to search for
1244 * @test: callback used for comparisons between inodes
1245 * @data: opaque data pointer to pass to @test
1246 *
1247 * Search for the inode specified by @hashval and @data in the inode cache,
1248 * and if the inode is in the cache, return the inode with an incremented
1249 * reference count. Waits on I_NEW before returning the inode.
1250 * returned with an incremented reference count.
1251 *
1252 * This is a generalized version of ilookup() for file systems where the
1253 * inode number is not sufficient for unique identification of an inode.
1254 *
1255 * Note: @test is called with the inode_hash_lock held, so can't sleep.
1256 */
1257 struct inode *ilookup5(struct super_block *sb, unsigned long hashval,
1258 int (*test)(struct inode *, void *), void *data)
1259 {
1260 struct inode *inode = ilookup5_nowait(sb, hashval, test, data);
1261
1262 if (inode)
1263 wait_on_inode(inode);
1264 return inode;
1265 }
1266 EXPORT_SYMBOL(ilookup5);
1267
1268 /**
1269 * ilookup - search for an inode in the inode cache
1270 * @sb: super block of file system to search
1271 * @ino: inode number to search for
1272 *
1273 * Search for the inode @ino in the inode cache, and if the inode is in the
1274 * cache, the inode is returned with an incremented reference count.
1275 */
1276 struct inode *ilookup(struct super_block *sb, unsigned long ino)
1277 {
1278 struct hlist_head *head = inode_hashtable + hash(sb, ino);
1279 struct inode *inode;
1280
1281 spin_lock(&inode_hash_lock);
1282 inode = find_inode_fast(sb, head, ino);
1283 spin_unlock(&inode_hash_lock);
1284
1285 if (inode)
1286 wait_on_inode(inode);
1287 return inode;
1288 }
1289 EXPORT_SYMBOL(ilookup);
1290
1291 /**
1292 * find_inode_nowait - find an inode in the inode cache
1293 * @sb: super block of file system to search
1294 * @hashval: hash value (usually inode number) to search for
1295 * @match: callback used for comparisons between inodes
1296 * @data: opaque data pointer to pass to @match
1297 *
1298 * Search for the inode specified by @hashval and @data in the inode
1299 * cache, where the helper function @match will return 0 if the inode
1300 * does not match, 1 if the inode does match, and -1 if the search
1301 * should be stopped. The @match function must be responsible for
1302 * taking the i_lock spin_lock and checking i_state for an inode being
1303 * freed or being initialized, and incrementing the reference count
1304 * before returning 1. It also must not sleep, since it is called with
1305 * the inode_hash_lock spinlock held.
1306 *
1307 * This is a even more generalized version of ilookup5() when the
1308 * function must never block --- find_inode() can block in
1309 * __wait_on_freeing_inode() --- or when the caller can not increment
1310 * the reference count because the resulting iput() might cause an
1311 * inode eviction. The tradeoff is that the @match funtion must be
1312 * very carefully implemented.
1313 */
1314 struct inode *find_inode_nowait(struct super_block *sb,
1315 unsigned long hashval,
1316 int (*match)(struct inode *, unsigned long,
1317 void *),
1318 void *data)
1319 {
1320 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1321 struct inode *inode, *ret_inode = NULL;
1322 int mval;
1323
1324 spin_lock(&inode_hash_lock);
1325 hlist_for_each_entry(inode, head, i_hash) {
1326 if (inode->i_sb != sb)
1327 continue;
1328 mval = match(inode, hashval, data);
1329 if (mval == 0)
1330 continue;
1331 if (mval == 1)
1332 ret_inode = inode;
1333 goto out;
1334 }
1335 out:
1336 spin_unlock(&inode_hash_lock);
1337 return ret_inode;
1338 }
1339 EXPORT_SYMBOL(find_inode_nowait);
1340
1341 int insert_inode_locked(struct inode *inode)
1342 {
1343 struct super_block *sb = inode->i_sb;
1344 ino_t ino = inode->i_ino;
1345 struct hlist_head *head = inode_hashtable + hash(sb, ino);
1346
1347 while (1) {
1348 struct inode *old = NULL;
1349 spin_lock(&inode_hash_lock);
1350 hlist_for_each_entry(old, head, i_hash) {
1351 if (old->i_ino != ino)
1352 continue;
1353 if (old->i_sb != sb)
1354 continue;
1355 spin_lock(&old->i_lock);
1356 if (old->i_state & (I_FREEING|I_WILL_FREE)) {
1357 spin_unlock(&old->i_lock);
1358 continue;
1359 }
1360 break;
1361 }
1362 if (likely(!old)) {
1363 spin_lock(&inode->i_lock);
1364 inode->i_state |= I_NEW;
1365 hlist_add_head(&inode->i_hash, head);
1366 spin_unlock(&inode->i_lock);
1367 spin_unlock(&inode_hash_lock);
1368 return 0;
1369 }
1370 __iget(old);
1371 spin_unlock(&old->i_lock);
1372 spin_unlock(&inode_hash_lock);
1373 wait_on_inode(old);
1374 if (unlikely(!inode_unhashed(old))) {
1375 iput(old);
1376 return -EBUSY;
1377 }
1378 iput(old);
1379 }
1380 }
1381 EXPORT_SYMBOL(insert_inode_locked);
1382
1383 int insert_inode_locked4(struct inode *inode, unsigned long hashval,
1384 int (*test)(struct inode *, void *), void *data)
1385 {
1386 struct super_block *sb = inode->i_sb;
1387 struct hlist_head *head = inode_hashtable + hash(sb, hashval);
1388
1389 while (1) {
1390 struct inode *old = NULL;
1391
1392 spin_lock(&inode_hash_lock);
1393 hlist_for_each_entry(old, head, i_hash) {
1394 if (old->i_sb != sb)
1395 continue;
1396 if (!test(old, data))
1397 continue;
1398 spin_lock(&old->i_lock);
1399 if (old->i_state & (I_FREEING|I_WILL_FREE)) {
1400 spin_unlock(&old->i_lock);
1401 continue;
1402 }
1403 break;
1404 }
1405 if (likely(!old)) {
1406 spin_lock(&inode->i_lock);
1407 inode->i_state |= I_NEW;
1408 hlist_add_head(&inode->i_hash, head);
1409 spin_unlock(&inode->i_lock);
1410 spin_unlock(&inode_hash_lock);
1411 return 0;
1412 }
1413 __iget(old);
1414 spin_unlock(&old->i_lock);
1415 spin_unlock(&inode_hash_lock);
1416 wait_on_inode(old);
1417 if (unlikely(!inode_unhashed(old))) {
1418 iput(old);
1419 return -EBUSY;
1420 }
1421 iput(old);
1422 }
1423 }
1424 EXPORT_SYMBOL(insert_inode_locked4);
1425
1426
1427 int generic_delete_inode(struct inode *inode)
1428 {
1429 return 1;
1430 }
1431 EXPORT_SYMBOL(generic_delete_inode);
1432
1433 /*
1434 * Called when we're dropping the last reference
1435 * to an inode.
1436 *
1437 * Call the FS "drop_inode()" function, defaulting to
1438 * the legacy UNIX filesystem behaviour. If it tells
1439 * us to evict inode, do so. Otherwise, retain inode
1440 * in cache if fs is alive, sync and evict if fs is
1441 * shutting down.
1442 */
1443 static void iput_final(struct inode *inode)
1444 {
1445 struct super_block *sb = inode->i_sb;
1446 const struct super_operations *op = inode->i_sb->s_op;
1447 int drop;
1448
1449 WARN_ON(inode->i_state & I_NEW);
1450
1451 if (op->drop_inode)
1452 drop = op->drop_inode(inode);
1453 else
1454 drop = generic_drop_inode(inode);
1455
1456 if (!drop && (sb->s_flags & MS_ACTIVE)) {
1457 inode->i_state |= I_REFERENCED;
1458 inode_add_lru(inode);
1459 spin_unlock(&inode->i_lock);
1460 return;
1461 }
1462
1463 if (!drop) {
1464 inode->i_state |= I_WILL_FREE;
1465 spin_unlock(&inode->i_lock);
1466 write_inode_now(inode, 1);
1467 spin_lock(&inode->i_lock);
1468 WARN_ON(inode->i_state & I_NEW);
1469 inode->i_state &= ~I_WILL_FREE;
1470 }
1471
1472 inode->i_state |= I_FREEING;
1473 if (!list_empty(&inode->i_lru))
1474 inode_lru_list_del(inode);
1475 spin_unlock(&inode->i_lock);
1476
1477 evict(inode);
1478 }
1479
1480 /**
1481 * iput - put an inode
1482 * @inode: inode to put
1483 *
1484 * Puts an inode, dropping its usage count. If the inode use count hits
1485 * zero, the inode is then freed and may also be destroyed.
1486 *
1487 * Consequently, iput() can sleep.
1488 */
1489 void iput(struct inode *inode)
1490 {
1491 if (!inode)
1492 return;
1493 BUG_ON(inode->i_state & I_CLEAR);
1494 retry:
1495 if (atomic_dec_and_lock(&inode->i_count, &inode->i_lock)) {
1496 if (inode->i_nlink && (inode->i_state & I_DIRTY_TIME)) {
1497 atomic_inc(&inode->i_count);
1498 inode->i_state &= ~I_DIRTY_TIME;
1499 spin_unlock(&inode->i_lock);
1500 trace_writeback_lazytime_iput(inode);
1501 mark_inode_dirty_sync(inode);
1502 goto retry;
1503 }
1504 iput_final(inode);
1505 }
1506 }
1507 EXPORT_SYMBOL(iput);
1508
1509 /**
1510 * bmap - find a block number in a file
1511 * @inode: inode of file
1512 * @block: block to find
1513 *
1514 * Returns the block number on the device holding the inode that
1515 * is the disk block number for the block of the file requested.
1516 * That is, asked for block 4 of inode 1 the function will return the
1517 * disk block relative to the disk start that holds that block of the
1518 * file.
1519 */
1520 sector_t bmap(struct inode *inode, sector_t block)
1521 {
1522 sector_t res = 0;
1523 if (inode->i_mapping->a_ops->bmap)
1524 res = inode->i_mapping->a_ops->bmap(inode->i_mapping, block);
1525 return res;
1526 }
1527 EXPORT_SYMBOL(bmap);
1528
1529 /*
1530 * With relative atime, only update atime if the previous atime is
1531 * earlier than either the ctime or mtime or if at least a day has
1532 * passed since the last atime update.
1533 */
1534 static int relatime_need_update(struct vfsmount *mnt, struct inode *inode,
1535 struct timespec now)
1536 {
1537
1538 if (!(mnt->mnt_flags & MNT_RELATIME))
1539 return 1;
1540 /*
1541 * Is mtime younger than atime? If yes, update atime:
1542 */
1543 if (timespec_compare(&inode->i_mtime, &inode->i_atime) >= 0)
1544 return 1;
1545 /*
1546 * Is ctime younger than atime? If yes, update atime:
1547 */
1548 if (timespec_compare(&inode->i_ctime, &inode->i_atime) >= 0)
1549 return 1;
1550
1551 /*
1552 * Is the previous atime value older than a day? If yes,
1553 * update atime:
1554 */
1555 if ((long)(now.tv_sec - inode->i_atime.tv_sec) >= 24*60*60)
1556 return 1;
1557 /*
1558 * Good, we can skip the atime update:
1559 */
1560 return 0;
1561 }
1562
1563 int generic_update_time(struct inode *inode, struct timespec *time, int flags)
1564 {
1565 int iflags = I_DIRTY_TIME;
1566
1567 if (flags & S_ATIME)
1568 inode->i_atime = *time;
1569 if (flags & S_VERSION)
1570 inode_inc_iversion(inode);
1571 if (flags & S_CTIME)
1572 inode->i_ctime = *time;
1573 if (flags & S_MTIME)
1574 inode->i_mtime = *time;
1575
1576 if (!(inode->i_sb->s_flags & MS_LAZYTIME) || (flags & S_VERSION))
1577 iflags |= I_DIRTY_SYNC;
1578 __mark_inode_dirty(inode, iflags);
1579 return 0;
1580 }
1581 EXPORT_SYMBOL(generic_update_time);
1582
1583 /*
1584 * This does the actual work of updating an inodes time or version. Must have
1585 * had called mnt_want_write() before calling this.
1586 */
1587 static int update_time(struct inode *inode, struct timespec *time, int flags)
1588 {
1589 int (*update_time)(struct inode *, struct timespec *, int);
1590
1591 update_time = inode->i_op->update_time ? inode->i_op->update_time :
1592 generic_update_time;
1593
1594 return update_time(inode, time, flags);
1595 }
1596
1597 /**
1598 * touch_atime - update the access time
1599 * @path: the &struct path to update
1600 * @inode: inode to update
1601 *
1602 * Update the accessed time on an inode and mark it for writeback.
1603 * This function automatically handles read only file systems and media,
1604 * as well as the "noatime" flag and inode specific "noatime" markers.
1605 */
1606 bool atime_needs_update(const struct path *path, struct inode *inode)
1607 {
1608 struct vfsmount *mnt = path->mnt;
1609 struct timespec now;
1610
1611 if (inode->i_flags & S_NOATIME)
1612 return false;
1613 if (IS_NOATIME(inode))
1614 return false;
1615 if ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode))
1616 return false;
1617
1618 if (mnt->mnt_flags & MNT_NOATIME)
1619 return false;
1620 if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))
1621 return false;
1622
1623 now = current_fs_time(inode->i_sb);
1624
1625 if (!relatime_need_update(mnt, inode, now))
1626 return false;
1627
1628 if (timespec_equal(&inode->i_atime, &now))
1629 return false;
1630
1631 return true;
1632 }
1633
1634 void touch_atime(const struct path *path)
1635 {
1636 struct vfsmount *mnt = path->mnt;
1637 struct inode *inode = d_inode(path->dentry);
1638 struct timespec now;
1639
1640 if (!atime_needs_update(path, inode))
1641 return;
1642
1643 if (!sb_start_write_trylock(inode->i_sb))
1644 return;
1645
1646 if (__mnt_want_write(mnt) != 0)
1647 goto skip_update;
1648 /*
1649 * File systems can error out when updating inodes if they need to
1650 * allocate new space to modify an inode (such is the case for
1651 * Btrfs), but since we touch atime while walking down the path we
1652 * really don't care if we failed to update the atime of the file,
1653 * so just ignore the return value.
1654 * We may also fail on filesystems that have the ability to make parts
1655 * of the fs read only, e.g. subvolumes in Btrfs.
1656 */
1657 now = current_fs_time(inode->i_sb);
1658 update_time(inode, &now, S_ATIME);
1659 __mnt_drop_write(mnt);
1660 skip_update:
1661 sb_end_write(inode->i_sb);
1662 }
1663 EXPORT_SYMBOL(touch_atime);
1664
1665 /*
1666 * The logic we want is
1667 *
1668 * if suid or (sgid and xgrp)
1669 * remove privs
1670 */
1671 int should_remove_suid(struct dentry *dentry)
1672 {
1673 umode_t mode = d_inode(dentry)->i_mode;
1674 int kill = 0;
1675
1676 /* suid always must be killed */
1677 if (unlikely(mode & S_ISUID))
1678 kill = ATTR_KILL_SUID;
1679
1680 /*
1681 * sgid without any exec bits is just a mandatory locking mark; leave
1682 * it alone. If some exec bits are set, it's a real sgid; kill it.
1683 */
1684 if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
1685 kill |= ATTR_KILL_SGID;
1686
1687 if (unlikely(kill && !capable(CAP_FSETID) && S_ISREG(mode)))
1688 return kill;
1689
1690 return 0;
1691 }
1692 EXPORT_SYMBOL(should_remove_suid);
1693
1694 /*
1695 * Return mask of changes for notify_change() that need to be done as a
1696 * response to write or truncate. Return 0 if nothing has to be changed.
1697 * Negative value on error (change should be denied).
1698 */
1699 int dentry_needs_remove_privs(struct dentry *dentry)
1700 {
1701 struct inode *inode = d_inode(dentry);
1702 int mask = 0;
1703 int ret;
1704
1705 if (IS_NOSEC(inode))
1706 return 0;
1707
1708 mask = should_remove_suid(dentry);
1709 ret = security_inode_need_killpriv(dentry);
1710 if (ret < 0)
1711 return ret;
1712 if (ret)
1713 mask |= ATTR_KILL_PRIV;
1714 return mask;
1715 }
1716 EXPORT_SYMBOL(dentry_needs_remove_privs);
1717
1718 static int __remove_privs(struct dentry *dentry, int kill)
1719 {
1720 struct iattr newattrs;
1721
1722 newattrs.ia_valid = ATTR_FORCE | kill;
1723 /*
1724 * Note we call this on write, so notify_change will not
1725 * encounter any conflicting delegations:
1726 */
1727 return notify_change(dentry, &newattrs, NULL);
1728 }
1729
1730 /*
1731 * Remove special file priviledges (suid, capabilities) when file is written
1732 * to or truncated.
1733 */
1734 int file_remove_privs(struct file *file)
1735 {
1736 struct dentry *dentry = file->f_path.dentry;
1737 struct inode *inode = d_inode(dentry);
1738 int kill;
1739 int error = 0;
1740
1741 /* Fast path for nothing security related */
1742 if (IS_NOSEC(inode))
1743 return 0;
1744
1745 kill = file_needs_remove_privs(file);
1746 if (kill < 0)
1747 return kill;
1748 if (kill)
1749 error = __remove_privs(dentry, kill);
1750 if (!error)
1751 inode_has_no_xattr(inode);
1752
1753 return error;
1754 }
1755 EXPORT_SYMBOL(file_remove_privs);
1756
1757 /**
1758 * file_update_time - update mtime and ctime time
1759 * @file: file accessed
1760 *
1761 * Update the mtime and ctime members of an inode and mark the inode
1762 * for writeback. Note that this function is meant exclusively for
1763 * usage in the file write path of filesystems, and filesystems may
1764 * choose to explicitly ignore update via this function with the
1765 * S_NOCMTIME inode flag, e.g. for network filesystem where these
1766 * timestamps are handled by the server. This can return an error for
1767 * file systems who need to allocate space in order to update an inode.
1768 */
1769
1770 int file_update_time(struct file *file)
1771 {
1772 struct inode *inode = file_inode(file);
1773 struct timespec now;
1774 int sync_it = 0;
1775 int ret;
1776
1777 /* First try to exhaust all avenues to not sync */
1778 if (IS_NOCMTIME(inode))
1779 return 0;
1780
1781 now = current_fs_time(inode->i_sb);
1782 if (!timespec_equal(&inode->i_mtime, &now))
1783 sync_it = S_MTIME;
1784
1785 if (!timespec_equal(&inode->i_ctime, &now))
1786 sync_it |= S_CTIME;
1787
1788 if (IS_I_VERSION(inode))
1789 sync_it |= S_VERSION;
1790
1791 if (!sync_it)
1792 return 0;
1793
1794 /* Finally allowed to write? Takes lock. */
1795 if (__mnt_want_write_file(file))
1796 return 0;
1797
1798 ret = update_time(inode, &now, sync_it);
1799 __mnt_drop_write_file(file);
1800
1801 return ret;
1802 }
1803 EXPORT_SYMBOL(file_update_time);
1804
1805 int inode_needs_sync(struct inode *inode)
1806 {
1807 if (IS_SYNC(inode))
1808 return 1;
1809 if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode))
1810 return 1;
1811 return 0;
1812 }
1813 EXPORT_SYMBOL(inode_needs_sync);
1814
1815 /*
1816 * If we try to find an inode in the inode hash while it is being
1817 * deleted, we have to wait until the filesystem completes its
1818 * deletion before reporting that it isn't found. This function waits
1819 * until the deletion _might_ have completed. Callers are responsible
1820 * to recheck inode state.
1821 *
1822 * It doesn't matter if I_NEW is not set initially, a call to
1823 * wake_up_bit(&inode->i_state, __I_NEW) after removing from the hash list
1824 * will DTRT.
1825 */
1826 static void __wait_on_freeing_inode(struct inode *inode)
1827 {
1828 wait_queue_head_t *wq;
1829 DEFINE_WAIT_BIT(wait, &inode->i_state, __I_NEW);
1830 wq = bit_waitqueue(&inode->i_state, __I_NEW);
1831 prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
1832 spin_unlock(&inode->i_lock);
1833 spin_unlock(&inode_hash_lock);
1834 schedule();
1835 finish_wait(wq, &wait.wait);
1836 spin_lock(&inode_hash_lock);
1837 }
1838
1839 static __initdata unsigned long ihash_entries;
1840 static int __init set_ihash_entries(char *str)
1841 {
1842 if (!str)
1843 return 0;
1844 ihash_entries = simple_strtoul(str, &str, 0);
1845 return 1;
1846 }
1847 __setup("ihash_entries=", set_ihash_entries);
1848
1849 /*
1850 * Initialize the waitqueues and inode hash table.
1851 */
1852 void __init inode_init_early(void)
1853 {
1854 unsigned int loop;
1855
1856 /* If hashes are distributed across NUMA nodes, defer
1857 * hash allocation until vmalloc space is available.
1858 */
1859 if (hashdist)
1860 return;
1861
1862 inode_hashtable =
1863 alloc_large_system_hash("Inode-cache",
1864 sizeof(struct hlist_head),
1865 ihash_entries,
1866 14,
1867 HASH_EARLY,
1868 &i_hash_shift,
1869 &i_hash_mask,
1870 0,
1871 0);
1872
1873 for (loop = 0; loop < (1U << i_hash_shift); loop++)
1874 INIT_HLIST_HEAD(&inode_hashtable[loop]);
1875 }
1876
1877 void __init inode_init(void)
1878 {
1879 unsigned int loop;
1880
1881 /* inode slab cache */
1882 inode_cachep = kmem_cache_create("inode_cache",
1883 sizeof(struct inode),
1884 0,
1885 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
1886 SLAB_MEM_SPREAD|SLAB_ACCOUNT),
1887 init_once);
1888
1889 /* Hash may have been set up in inode_init_early */
1890 if (!hashdist)
1891 return;
1892
1893 inode_hashtable =
1894 alloc_large_system_hash("Inode-cache",
1895 sizeof(struct hlist_head),
1896 ihash_entries,
1897 14,
1898 0,
1899 &i_hash_shift,
1900 &i_hash_mask,
1901 0,
1902 0);
1903
1904 for (loop = 0; loop < (1U << i_hash_shift); loop++)
1905 INIT_HLIST_HEAD(&inode_hashtable[loop]);
1906 }
1907
1908 void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev)
1909 {
1910 inode->i_mode = mode;
1911 if (S_ISCHR(mode)) {
1912 inode->i_fop = &def_chr_fops;
1913 inode->i_rdev = rdev;
1914 } else if (S_ISBLK(mode)) {
1915 inode->i_fop = &def_blk_fops;
1916 inode->i_rdev = rdev;
1917 } else if (S_ISFIFO(mode))
1918 inode->i_fop = &pipefifo_fops;
1919 else if (S_ISSOCK(mode))
1920 ; /* leave it no_open_fops */
1921 else
1922 printk(KERN_DEBUG "init_special_inode: bogus i_mode (%o) for"
1923 " inode %s:%lu\n", mode, inode->i_sb->s_id,
1924 inode->i_ino);
1925 }
1926 EXPORT_SYMBOL(init_special_inode);
1927
1928 /**
1929 * inode_init_owner - Init uid,gid,mode for new inode according to posix standards
1930 * @inode: New inode
1931 * @dir: Directory inode
1932 * @mode: mode of the new inode
1933 */
1934 void inode_init_owner(struct inode *inode, const struct inode *dir,
1935 umode_t mode)
1936 {
1937 inode->i_uid = current_fsuid();
1938 if (dir && dir->i_mode & S_ISGID) {
1939 inode->i_gid = dir->i_gid;
1940 if (S_ISDIR(mode))
1941 mode |= S_ISGID;
1942 } else
1943 inode->i_gid = current_fsgid();
1944 inode->i_mode = mode;
1945 }
1946 EXPORT_SYMBOL(inode_init_owner);
1947
1948 /**
1949 * inode_owner_or_capable - check current task permissions to inode
1950 * @inode: inode being checked
1951 *
1952 * Return true if current either has CAP_FOWNER in a namespace with the
1953 * inode owner uid mapped, or owns the file.
1954 */
1955 bool inode_owner_or_capable(const struct inode *inode)
1956 {
1957 struct user_namespace *ns;
1958
1959 if (uid_eq(current_fsuid(), inode->i_uid))
1960 return true;
1961
1962 ns = current_user_ns();
1963 if (ns_capable(ns, CAP_FOWNER) && kuid_has_mapping(ns, inode->i_uid))
1964 return true;
1965 return false;
1966 }
1967 EXPORT_SYMBOL(inode_owner_or_capable);
1968
1969 /*
1970 * Direct i/o helper functions
1971 */
1972 static void __inode_dio_wait(struct inode *inode)
1973 {
1974 wait_queue_head_t *wq = bit_waitqueue(&inode->i_state, __I_DIO_WAKEUP);
1975 DEFINE_WAIT_BIT(q, &inode->i_state, __I_DIO_WAKEUP);
1976
1977 do {
1978 prepare_to_wait(wq, &q.wait, TASK_UNINTERRUPTIBLE);
1979 if (atomic_read(&inode->i_dio_count))
1980 schedule();
1981 } while (atomic_read(&inode->i_dio_count));
1982 finish_wait(wq, &q.wait);
1983 }
1984
1985 /**
1986 * inode_dio_wait - wait for outstanding DIO requests to finish
1987 * @inode: inode to wait for
1988 *
1989 * Waits for all pending direct I/O requests to finish so that we can
1990 * proceed with a truncate or equivalent operation.
1991 *
1992 * Must be called under a lock that serializes taking new references
1993 * to i_dio_count, usually by inode->i_mutex.
1994 */
1995 void inode_dio_wait(struct inode *inode)
1996 {
1997 if (atomic_read(&inode->i_dio_count))
1998 __inode_dio_wait(inode);
1999 }
2000 EXPORT_SYMBOL(inode_dio_wait);
2001
2002 /*
2003 * inode_set_flags - atomically set some inode flags
2004 *
2005 * Note: the caller should be holding i_mutex, or else be sure that
2006 * they have exclusive access to the inode structure (i.e., while the
2007 * inode is being instantiated). The reason for the cmpxchg() loop
2008 * --- which wouldn't be necessary if all code paths which modify
2009 * i_flags actually followed this rule, is that there is at least one
2010 * code path which doesn't today so we use cmpxchg() out of an abundance
2011 * of caution.
2012 *
2013 * In the long run, i_mutex is overkill, and we should probably look
2014 * at using the i_lock spinlock to protect i_flags, and then make sure
2015 * it is so documented in include/linux/fs.h and that all code follows
2016 * the locking convention!!
2017 */
2018 void inode_set_flags(struct inode *inode, unsigned int flags,
2019 unsigned int mask)
2020 {
2021 unsigned int old_flags, new_flags;
2022
2023 WARN_ON_ONCE(flags & ~mask);
2024 do {
2025 old_flags = ACCESS_ONCE(inode->i_flags);
2026 new_flags = (old_flags & ~mask) | flags;
2027 } while (unlikely(cmpxchg(&inode->i_flags, old_flags,
2028 new_flags) != old_flags));
2029 }
2030 EXPORT_SYMBOL(inode_set_flags);
2031
2032 void inode_nohighmem(struct inode *inode)
2033 {
2034 mapping_set_gfp_mask(inode->i_mapping, GFP_USER);
2035 }
2036 EXPORT_SYMBOL(inode_nohighmem);