2 * Resizable virtual memory filesystem for Linux.
4 * Copyright (C) 2000 Linus Torvalds.
6 * 2000-2001 Christoph Rohland
9 * Copyright (C) 2002-2005 Hugh Dickins.
10 * Copyright (C) 2002-2005 VERITAS Software Corporation.
11 * Copyright (C) 2004 Andi Kleen, SuSE Labs
13 * Extended attribute support for tmpfs:
14 * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
15 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
18 * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
20 * This file is released under the GPL.
24 #include <linux/init.h>
25 #include <linux/vfs.h>
26 #include <linux/mount.h>
27 #include <linux/pagemap.h>
28 #include <linux/file.h>
30 #include <linux/module.h>
31 #include <linux/percpu_counter.h>
32 #include <linux/swap.h>
34 static struct vfsmount
*shm_mnt
;
38 * This virtual memory filesystem is heavily based on the ramfs. It
39 * extends ramfs by the ability to use swap and honor resource limits
40 * which makes it a completely usable filesystem.
43 #include <linux/xattr.h>
44 #include <linux/exportfs.h>
45 #include <linux/posix_acl.h>
46 #include <linux/generic_acl.h>
47 #include <linux/mman.h>
48 #include <linux/string.h>
49 #include <linux/slab.h>
50 #include <linux/backing-dev.h>
51 #include <linux/shmem_fs.h>
52 #include <linux/writeback.h>
53 #include <linux/blkdev.h>
54 #include <linux/splice.h>
55 #include <linux/security.h>
56 #include <linux/swapops.h>
57 #include <linux/mempolicy.h>
58 #include <linux/namei.h>
59 #include <linux/ctype.h>
60 #include <linux/migrate.h>
61 #include <linux/highmem.h>
62 #include <linux/seq_file.h>
63 #include <linux/magic.h>
65 #include <asm/uaccess.h>
66 #include <asm/div64.h>
67 #include <asm/pgtable.h>
70 * The maximum size of a shmem/tmpfs file is limited by the maximum size of
71 * its triple-indirect swap vector - see illustration at shmem_swp_entry().
73 * With 4kB page size, maximum file size is just over 2TB on a 32-bit kernel,
74 * but one eighth of that on a 64-bit kernel. With 8kB page size, maximum
75 * file size is just over 4TB on a 64-bit kernel, but 16TB on a 32-bit kernel,
76 * MAX_LFS_FILESIZE being then more restrictive than swap vector layout.
78 * We use / and * instead of shifts in the definitions below, so that the swap
79 * vector can be tested with small even values (e.g. 20) for ENTRIES_PER_PAGE.
81 #define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long))
82 #define ENTRIES_PER_PAGEPAGE ((unsigned long long)ENTRIES_PER_PAGE*ENTRIES_PER_PAGE)
84 #define SHMSWP_MAX_INDEX (SHMEM_NR_DIRECT + (ENTRIES_PER_PAGEPAGE/2) * (ENTRIES_PER_PAGE+1))
85 #define SHMSWP_MAX_BYTES (SHMSWP_MAX_INDEX << PAGE_CACHE_SHIFT)
87 #define SHMEM_MAX_BYTES min_t(unsigned long long, SHMSWP_MAX_BYTES, MAX_LFS_FILESIZE)
88 #define SHMEM_MAX_INDEX ((unsigned long)((SHMEM_MAX_BYTES+1) >> PAGE_CACHE_SHIFT))
90 #define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512)
91 #define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT)
93 /* info->flags needs VM_flags to handle pagein/truncate races efficiently */
94 #define SHMEM_PAGEIN VM_READ
95 #define SHMEM_TRUNCATE VM_WRITE
97 /* Definition to limit shmem_truncate's steps between cond_rescheds */
98 #define LATENCY_LIMIT 64
100 /* Pretend that each entry is of this size in directory's i_size */
101 #define BOGO_DIRENT_SIZE 20
104 struct list_head list
; /* anchored by shmem_inode_info->xattr_list */
105 char *name
; /* xattr name */
110 /* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */
112 SGP_READ
, /* don't exceed i_size, don't allocate page */
113 SGP_CACHE
, /* don't exceed i_size, may allocate page */
114 SGP_DIRTY
, /* like SGP_CACHE, but set new page dirty */
115 SGP_WRITE
, /* may exceed i_size, may allocate page */
119 static unsigned long shmem_default_max_blocks(void)
121 return totalram_pages
/ 2;
124 static unsigned long shmem_default_max_inodes(void)
126 return min(totalram_pages
- totalhigh_pages
, totalram_pages
/ 2);
130 static int shmem_getpage_gfp(struct inode
*inode
, pgoff_t index
,
131 struct page
**pagep
, enum sgp_type sgp
, gfp_t gfp
, int *fault_type
);
133 static inline int shmem_getpage(struct inode
*inode
, pgoff_t index
,
134 struct page
**pagep
, enum sgp_type sgp
, int *fault_type
)
136 return shmem_getpage_gfp(inode
, index
, pagep
, sgp
,
137 mapping_gfp_mask(inode
->i_mapping
), fault_type
);
140 static inline struct page
*shmem_dir_alloc(gfp_t gfp_mask
)
143 * The above definition of ENTRIES_PER_PAGE, and the use of
144 * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE:
145 * might be reconsidered if it ever diverges from PAGE_SIZE.
147 * Mobility flags are masked out as swap vectors cannot move
149 return alloc_pages((gfp_mask
& ~GFP_MOVABLE_MASK
) | __GFP_ZERO
,
150 PAGE_CACHE_SHIFT
-PAGE_SHIFT
);
153 static inline void shmem_dir_free(struct page
*page
)
155 __free_pages(page
, PAGE_CACHE_SHIFT
-PAGE_SHIFT
);
158 static struct page
**shmem_dir_map(struct page
*page
)
160 return (struct page
**)kmap_atomic(page
, KM_USER0
);
163 static inline void shmem_dir_unmap(struct page
**dir
)
165 kunmap_atomic(dir
, KM_USER0
);
168 static swp_entry_t
*shmem_swp_map(struct page
*page
)
170 return (swp_entry_t
*)kmap_atomic(page
, KM_USER1
);
173 static inline void shmem_swp_balance_unmap(void)
176 * When passing a pointer to an i_direct entry, to code which
177 * also handles indirect entries and so will shmem_swp_unmap,
178 * we must arrange for the preempt count to remain in balance.
179 * What kmap_atomic of a lowmem page does depends on config
180 * and architecture, so pretend to kmap_atomic some lowmem page.
182 (void) kmap_atomic(ZERO_PAGE(0), KM_USER1
);
185 static inline void shmem_swp_unmap(swp_entry_t
*entry
)
187 kunmap_atomic(entry
, KM_USER1
);
190 static inline struct shmem_sb_info
*SHMEM_SB(struct super_block
*sb
)
192 return sb
->s_fs_info
;
196 * shmem_file_setup pre-accounts the whole fixed size of a VM object,
197 * for shared memory and for shared anonymous (/dev/zero) mappings
198 * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
199 * consistent with the pre-accounting of private mappings ...
201 static inline int shmem_acct_size(unsigned long flags
, loff_t size
)
203 return (flags
& VM_NORESERVE
) ?
204 0 : security_vm_enough_memory_kern(VM_ACCT(size
));
207 static inline void shmem_unacct_size(unsigned long flags
, loff_t size
)
209 if (!(flags
& VM_NORESERVE
))
210 vm_unacct_memory(VM_ACCT(size
));
214 * ... whereas tmpfs objects are accounted incrementally as
215 * pages are allocated, in order to allow huge sparse files.
216 * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
217 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
219 static inline int shmem_acct_block(unsigned long flags
)
221 return (flags
& VM_NORESERVE
) ?
222 security_vm_enough_memory_kern(VM_ACCT(PAGE_CACHE_SIZE
)) : 0;
225 static inline void shmem_unacct_blocks(unsigned long flags
, long pages
)
227 if (flags
& VM_NORESERVE
)
228 vm_unacct_memory(pages
* VM_ACCT(PAGE_CACHE_SIZE
));
231 static const struct super_operations shmem_ops
;
232 static const struct address_space_operations shmem_aops
;
233 static const struct file_operations shmem_file_operations
;
234 static const struct inode_operations shmem_inode_operations
;
235 static const struct inode_operations shmem_dir_inode_operations
;
236 static const struct inode_operations shmem_special_inode_operations
;
237 static const struct vm_operations_struct shmem_vm_ops
;
239 static struct backing_dev_info shmem_backing_dev_info __read_mostly
= {
240 .ra_pages
= 0, /* No readahead */
241 .capabilities
= BDI_CAP_NO_ACCT_AND_WRITEBACK
| BDI_CAP_SWAP_BACKED
,
244 static LIST_HEAD(shmem_swaplist
);
245 static DEFINE_MUTEX(shmem_swaplist_mutex
);
247 static void shmem_free_blocks(struct inode
*inode
, long pages
)
249 struct shmem_sb_info
*sbinfo
= SHMEM_SB(inode
->i_sb
);
250 if (sbinfo
->max_blocks
) {
251 percpu_counter_add(&sbinfo
->used_blocks
, -pages
);
252 inode
->i_blocks
-= pages
*BLOCKS_PER_PAGE
;
256 static int shmem_reserve_inode(struct super_block
*sb
)
258 struct shmem_sb_info
*sbinfo
= SHMEM_SB(sb
);
259 if (sbinfo
->max_inodes
) {
260 spin_lock(&sbinfo
->stat_lock
);
261 if (!sbinfo
->free_inodes
) {
262 spin_unlock(&sbinfo
->stat_lock
);
265 sbinfo
->free_inodes
--;
266 spin_unlock(&sbinfo
->stat_lock
);
271 static void shmem_free_inode(struct super_block
*sb
)
273 struct shmem_sb_info
*sbinfo
= SHMEM_SB(sb
);
274 if (sbinfo
->max_inodes
) {
275 spin_lock(&sbinfo
->stat_lock
);
276 sbinfo
->free_inodes
++;
277 spin_unlock(&sbinfo
->stat_lock
);
282 * shmem_recalc_inode - recalculate the size of an inode
283 * @inode: inode to recalc
285 * We have to calculate the free blocks since the mm can drop
286 * undirtied hole pages behind our back.
288 * But normally info->alloced == inode->i_mapping->nrpages + info->swapped
289 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
291 * It has to be called with the spinlock held.
293 static void shmem_recalc_inode(struct inode
*inode
)
295 struct shmem_inode_info
*info
= SHMEM_I(inode
);
298 freed
= info
->alloced
- info
->swapped
- inode
->i_mapping
->nrpages
;
300 info
->alloced
-= freed
;
301 shmem_unacct_blocks(info
->flags
, freed
);
302 shmem_free_blocks(inode
, freed
);
307 * shmem_swp_entry - find the swap vector position in the info structure
308 * @info: info structure for the inode
309 * @index: index of the page to find
310 * @page: optional page to add to the structure. Has to be preset to
313 * If there is no space allocated yet it will return NULL when
314 * page is NULL, else it will use the page for the needed block,
315 * setting it to NULL on return to indicate that it has been used.
317 * The swap vector is organized the following way:
319 * There are SHMEM_NR_DIRECT entries directly stored in the
320 * shmem_inode_info structure. So small files do not need an addional
323 * For pages with index > SHMEM_NR_DIRECT there is the pointer
324 * i_indirect which points to a page which holds in the first half
325 * doubly indirect blocks, in the second half triple indirect blocks:
327 * For an artificial ENTRIES_PER_PAGE = 4 this would lead to the
328 * following layout (for SHMEM_NR_DIRECT == 16):
330 * i_indirect -> dir --> 16-19
343 static swp_entry_t
*shmem_swp_entry(struct shmem_inode_info
*info
, unsigned long index
, struct page
**page
)
345 unsigned long offset
;
349 if (index
< SHMEM_NR_DIRECT
) {
350 shmem_swp_balance_unmap();
351 return info
->i_direct
+index
;
353 if (!info
->i_indirect
) {
355 info
->i_indirect
= *page
;
358 return NULL
; /* need another page */
361 index
-= SHMEM_NR_DIRECT
;
362 offset
= index
% ENTRIES_PER_PAGE
;
363 index
/= ENTRIES_PER_PAGE
;
364 dir
= shmem_dir_map(info
->i_indirect
);
366 if (index
>= ENTRIES_PER_PAGE
/2) {
367 index
-= ENTRIES_PER_PAGE
/2;
368 dir
+= ENTRIES_PER_PAGE
/2 + index
/ENTRIES_PER_PAGE
;
369 index
%= ENTRIES_PER_PAGE
;
376 shmem_dir_unmap(dir
);
377 return NULL
; /* need another page */
379 shmem_dir_unmap(dir
);
380 dir
= shmem_dir_map(subdir
);
386 if (!page
|| !(subdir
= *page
)) {
387 shmem_dir_unmap(dir
);
388 return NULL
; /* need a page */
393 shmem_dir_unmap(dir
);
394 return shmem_swp_map(subdir
) + offset
;
397 static void shmem_swp_set(struct shmem_inode_info
*info
, swp_entry_t
*entry
, unsigned long value
)
399 long incdec
= value
? 1: -1;
402 info
->swapped
+= incdec
;
403 if ((unsigned long)(entry
- info
->i_direct
) >= SHMEM_NR_DIRECT
) {
404 struct page
*page
= kmap_atomic_to_page(entry
);
405 set_page_private(page
, page_private(page
) + incdec
);
410 * shmem_swp_alloc - get the position of the swap entry for the page.
411 * @info: info structure for the inode
412 * @index: index of the page to find
413 * @sgp: check and recheck i_size? skip allocation?
414 * @gfp: gfp mask to use for any page allocation
416 * If the entry does not exist, allocate it.
418 static swp_entry_t
*shmem_swp_alloc(struct shmem_inode_info
*info
,
419 unsigned long index
, enum sgp_type sgp
, gfp_t gfp
)
421 struct inode
*inode
= &info
->vfs_inode
;
422 struct shmem_sb_info
*sbinfo
= SHMEM_SB(inode
->i_sb
);
423 struct page
*page
= NULL
;
426 if (sgp
!= SGP_WRITE
&&
427 ((loff_t
) index
<< PAGE_CACHE_SHIFT
) >= i_size_read(inode
))
428 return ERR_PTR(-EINVAL
);
430 while (!(entry
= shmem_swp_entry(info
, index
, &page
))) {
432 return shmem_swp_map(ZERO_PAGE(0));
434 * Test used_blocks against 1 less max_blocks, since we have 1 data
435 * page (and perhaps indirect index pages) yet to allocate:
436 * a waste to allocate index if we cannot allocate data.
438 if (sbinfo
->max_blocks
) {
439 if (percpu_counter_compare(&sbinfo
->used_blocks
,
440 sbinfo
->max_blocks
- 1) >= 0)
441 return ERR_PTR(-ENOSPC
);
442 percpu_counter_inc(&sbinfo
->used_blocks
);
443 inode
->i_blocks
+= BLOCKS_PER_PAGE
;
446 spin_unlock(&info
->lock
);
447 page
= shmem_dir_alloc(gfp
);
448 spin_lock(&info
->lock
);
451 shmem_free_blocks(inode
, 1);
452 return ERR_PTR(-ENOMEM
);
454 if (sgp
!= SGP_WRITE
&&
455 ((loff_t
) index
<< PAGE_CACHE_SHIFT
) >= i_size_read(inode
)) {
456 entry
= ERR_PTR(-EINVAL
);
459 if (info
->next_index
<= index
)
460 info
->next_index
= index
+ 1;
463 /* another task gave its page, or truncated the file */
464 shmem_free_blocks(inode
, 1);
465 shmem_dir_free(page
);
467 if (info
->next_index
<= index
&& !IS_ERR(entry
))
468 info
->next_index
= index
+ 1;
473 * shmem_free_swp - free some swap entries in a directory
474 * @dir: pointer to the directory
475 * @edir: pointer after last entry of the directory
476 * @punch_lock: pointer to spinlock when needed for the holepunch case
478 static int shmem_free_swp(swp_entry_t
*dir
, swp_entry_t
*edir
,
479 spinlock_t
*punch_lock
)
481 spinlock_t
*punch_unlock
= NULL
;
485 for (ptr
= dir
; ptr
< edir
; ptr
++) {
487 if (unlikely(punch_lock
)) {
488 punch_unlock
= punch_lock
;
490 spin_lock(punch_unlock
);
494 free_swap_and_cache(*ptr
);
495 *ptr
= (swp_entry_t
){0};
500 spin_unlock(punch_unlock
);
504 static int shmem_map_and_free_swp(struct page
*subdir
, int offset
,
505 int limit
, struct page
***dir
, spinlock_t
*punch_lock
)
510 ptr
= shmem_swp_map(subdir
);
511 for (; offset
< limit
; offset
+= LATENCY_LIMIT
) {
512 int size
= limit
- offset
;
513 if (size
> LATENCY_LIMIT
)
514 size
= LATENCY_LIMIT
;
515 freed
+= shmem_free_swp(ptr
+offset
, ptr
+offset
+size
,
517 if (need_resched()) {
518 shmem_swp_unmap(ptr
);
520 shmem_dir_unmap(*dir
);
524 ptr
= shmem_swp_map(subdir
);
527 shmem_swp_unmap(ptr
);
531 static void shmem_free_pages(struct list_head
*next
)
537 page
= container_of(next
, struct page
, lru
);
539 shmem_dir_free(page
);
541 if (freed
>= LATENCY_LIMIT
) {
548 void shmem_truncate_range(struct inode
*inode
, loff_t start
, loff_t end
)
550 struct shmem_inode_info
*info
= SHMEM_I(inode
);
555 unsigned long diroff
;
561 LIST_HEAD(pages_to_free
);
562 long nr_pages_to_free
= 0;
563 long nr_swaps_freed
= 0;
567 spinlock_t
*needs_lock
;
568 spinlock_t
*punch_lock
;
569 unsigned long upper_limit
;
571 truncate_inode_pages_range(inode
->i_mapping
, start
, end
);
573 inode
->i_ctime
= inode
->i_mtime
= CURRENT_TIME
;
574 idx
= (start
+ PAGE_CACHE_SIZE
- 1) >> PAGE_CACHE_SHIFT
;
575 if (idx
>= info
->next_index
)
578 spin_lock(&info
->lock
);
579 info
->flags
|= SHMEM_TRUNCATE
;
580 if (likely(end
== (loff_t
) -1)) {
581 limit
= info
->next_index
;
582 upper_limit
= SHMEM_MAX_INDEX
;
583 info
->next_index
= idx
;
587 if (end
+ 1 >= inode
->i_size
) { /* we may free a little more */
588 limit
= (inode
->i_size
+ PAGE_CACHE_SIZE
- 1) >>
590 upper_limit
= SHMEM_MAX_INDEX
;
592 limit
= (end
+ 1) >> PAGE_CACHE_SHIFT
;
595 needs_lock
= &info
->lock
;
599 topdir
= info
->i_indirect
;
600 if (topdir
&& idx
<= SHMEM_NR_DIRECT
&& !punch_hole
) {
601 info
->i_indirect
= NULL
;
603 list_add(&topdir
->lru
, &pages_to_free
);
605 spin_unlock(&info
->lock
);
607 if (info
->swapped
&& idx
< SHMEM_NR_DIRECT
) {
608 ptr
= info
->i_direct
;
610 if (size
> SHMEM_NR_DIRECT
)
611 size
= SHMEM_NR_DIRECT
;
612 nr_swaps_freed
= shmem_free_swp(ptr
+idx
, ptr
+size
, needs_lock
);
616 * If there are no indirect blocks or we are punching a hole
617 * below indirect blocks, nothing to be done.
619 if (!topdir
|| limit
<= SHMEM_NR_DIRECT
)
623 * The truncation case has already dropped info->lock, and we're safe
624 * because i_size and next_index have already been lowered, preventing
625 * access beyond. But in the punch_hole case, we still need to take
626 * the lock when updating the swap directory, because there might be
627 * racing accesses by shmem_getpage(SGP_CACHE), shmem_unuse_inode or
628 * shmem_writepage. However, whenever we find we can remove a whole
629 * directory page (not at the misaligned start or end of the range),
630 * we first NULLify its pointer in the level above, and then have no
631 * need to take the lock when updating its contents: needs_lock and
632 * punch_lock (either pointing to info->lock or NULL) manage this.
635 upper_limit
-= SHMEM_NR_DIRECT
;
636 limit
-= SHMEM_NR_DIRECT
;
637 idx
= (idx
> SHMEM_NR_DIRECT
)? (idx
- SHMEM_NR_DIRECT
): 0;
638 offset
= idx
% ENTRIES_PER_PAGE
;
641 dir
= shmem_dir_map(topdir
);
642 stage
= ENTRIES_PER_PAGEPAGE
/2;
643 if (idx
< ENTRIES_PER_PAGEPAGE
/2) {
645 diroff
= idx
/ENTRIES_PER_PAGE
;
647 dir
+= ENTRIES_PER_PAGE
/2;
648 dir
+= (idx
- ENTRIES_PER_PAGEPAGE
/2)/ENTRIES_PER_PAGEPAGE
;
650 stage
+= ENTRIES_PER_PAGEPAGE
;
653 diroff
= ((idx
- ENTRIES_PER_PAGEPAGE
/2) %
654 ENTRIES_PER_PAGEPAGE
) / ENTRIES_PER_PAGE
;
655 if (!diroff
&& !offset
&& upper_limit
>= stage
) {
657 spin_lock(needs_lock
);
659 spin_unlock(needs_lock
);
664 list_add(&middir
->lru
, &pages_to_free
);
666 shmem_dir_unmap(dir
);
667 dir
= shmem_dir_map(middir
);
675 for (; idx
< limit
; idx
+= ENTRIES_PER_PAGE
, diroff
++) {
676 if (unlikely(idx
== stage
)) {
677 shmem_dir_unmap(dir
);
678 dir
= shmem_dir_map(topdir
) +
679 ENTRIES_PER_PAGE
/2 + idx
/ENTRIES_PER_PAGEPAGE
;
682 idx
+= ENTRIES_PER_PAGEPAGE
;
686 stage
= idx
+ ENTRIES_PER_PAGEPAGE
;
689 needs_lock
= &info
->lock
;
690 if (upper_limit
>= stage
) {
692 spin_lock(needs_lock
);
694 spin_unlock(needs_lock
);
699 list_add(&middir
->lru
, &pages_to_free
);
701 shmem_dir_unmap(dir
);
703 dir
= shmem_dir_map(middir
);
706 punch_lock
= needs_lock
;
707 subdir
= dir
[diroff
];
708 if (subdir
&& !offset
&& upper_limit
-idx
>= ENTRIES_PER_PAGE
) {
710 spin_lock(needs_lock
);
712 spin_unlock(needs_lock
);
717 list_add(&subdir
->lru
, &pages_to_free
);
719 if (subdir
&& page_private(subdir
) /* has swap entries */) {
721 if (size
> ENTRIES_PER_PAGE
)
722 size
= ENTRIES_PER_PAGE
;
723 freed
= shmem_map_and_free_swp(subdir
,
724 offset
, size
, &dir
, punch_lock
);
726 dir
= shmem_dir_map(middir
);
727 nr_swaps_freed
+= freed
;
728 if (offset
|| punch_lock
) {
729 spin_lock(&info
->lock
);
730 set_page_private(subdir
,
731 page_private(subdir
) - freed
);
732 spin_unlock(&info
->lock
);
734 BUG_ON(page_private(subdir
) != freed
);
739 shmem_dir_unmap(dir
);
741 if (inode
->i_mapping
->nrpages
&& (info
->flags
& SHMEM_PAGEIN
)) {
743 * Call truncate_inode_pages again: racing shmem_unuse_inode
744 * may have swizzled a page in from swap since
745 * truncate_pagecache or generic_delete_inode did it, before we
746 * lowered next_index. Also, though shmem_getpage checks
747 * i_size before adding to cache, no recheck after: so fix the
748 * narrow window there too.
750 truncate_inode_pages_range(inode
->i_mapping
, start
, end
);
753 spin_lock(&info
->lock
);
754 info
->flags
&= ~SHMEM_TRUNCATE
;
755 info
->swapped
-= nr_swaps_freed
;
756 if (nr_pages_to_free
)
757 shmem_free_blocks(inode
, nr_pages_to_free
);
758 shmem_recalc_inode(inode
);
759 spin_unlock(&info
->lock
);
762 * Empty swap vector directory pages to be freed?
764 if (!list_empty(&pages_to_free
)) {
765 pages_to_free
.prev
->next
= NULL
;
766 shmem_free_pages(pages_to_free
.next
);
769 EXPORT_SYMBOL_GPL(shmem_truncate_range
);
771 static int shmem_setattr(struct dentry
*dentry
, struct iattr
*attr
)
773 struct inode
*inode
= dentry
->d_inode
;
776 error
= inode_change_ok(inode
, attr
);
780 if (S_ISREG(inode
->i_mode
) && (attr
->ia_valid
& ATTR_SIZE
)) {
781 loff_t oldsize
= inode
->i_size
;
782 loff_t newsize
= attr
->ia_size
;
783 struct page
*page
= NULL
;
785 if (newsize
< oldsize
) {
787 * If truncating down to a partial page, then
788 * if that page is already allocated, hold it
789 * in memory until the truncation is over, so
790 * truncate_partial_page cannot miss it were
791 * it assigned to swap.
793 if (newsize
& (PAGE_CACHE_SIZE
-1)) {
794 (void) shmem_getpage(inode
,
795 newsize
>> PAGE_CACHE_SHIFT
,
796 &page
, SGP_READ
, NULL
);
801 * Reset SHMEM_PAGEIN flag so that shmem_truncate can
802 * detect if any pages might have been added to cache
803 * after truncate_inode_pages. But we needn't bother
804 * if it's being fully truncated to zero-length: the
805 * nrpages check is efficient enough in that case.
808 struct shmem_inode_info
*info
= SHMEM_I(inode
);
809 spin_lock(&info
->lock
);
810 info
->flags
&= ~SHMEM_PAGEIN
;
811 spin_unlock(&info
->lock
);
814 if (newsize
!= oldsize
) {
815 i_size_write(inode
, newsize
);
816 inode
->i_ctime
= inode
->i_mtime
= CURRENT_TIME
;
818 if (newsize
< oldsize
) {
819 loff_t holebegin
= round_up(newsize
, PAGE_SIZE
);
820 unmap_mapping_range(inode
->i_mapping
, holebegin
, 0, 1);
821 shmem_truncate_range(inode
, newsize
, (loff_t
)-1);
822 /* unmap again to remove racily COWed private pages */
823 unmap_mapping_range(inode
->i_mapping
, holebegin
, 0, 1);
826 page_cache_release(page
);
829 setattr_copy(inode
, attr
);
830 #ifdef CONFIG_TMPFS_POSIX_ACL
831 if (attr
->ia_valid
& ATTR_MODE
)
832 error
= generic_acl_chmod(inode
);
837 static void shmem_evict_inode(struct inode
*inode
)
839 struct shmem_inode_info
*info
= SHMEM_I(inode
);
840 struct shmem_xattr
*xattr
, *nxattr
;
842 if (inode
->i_mapping
->a_ops
== &shmem_aops
) {
843 shmem_unacct_size(info
->flags
, inode
->i_size
);
845 shmem_truncate_range(inode
, 0, (loff_t
)-1);
846 if (!list_empty(&info
->swaplist
)) {
847 mutex_lock(&shmem_swaplist_mutex
);
848 list_del_init(&info
->swaplist
);
849 mutex_unlock(&shmem_swaplist_mutex
);
853 list_for_each_entry_safe(xattr
, nxattr
, &info
->xattr_list
, list
) {
857 BUG_ON(inode
->i_blocks
);
858 shmem_free_inode(inode
->i_sb
);
859 end_writeback(inode
);
862 static inline int shmem_find_swp(swp_entry_t entry
, swp_entry_t
*dir
, swp_entry_t
*edir
)
866 for (ptr
= dir
; ptr
< edir
; ptr
++) {
867 if (ptr
->val
== entry
.val
)
873 static int shmem_unuse_inode(struct shmem_inode_info
*info
, swp_entry_t entry
, struct page
*page
)
875 struct address_space
*mapping
;
887 ptr
= info
->i_direct
;
888 spin_lock(&info
->lock
);
889 if (!info
->swapped
) {
890 list_del_init(&info
->swaplist
);
893 limit
= info
->next_index
;
895 if (size
> SHMEM_NR_DIRECT
)
896 size
= SHMEM_NR_DIRECT
;
897 offset
= shmem_find_swp(entry
, ptr
, ptr
+size
);
899 shmem_swp_balance_unmap();
902 if (!info
->i_indirect
)
905 dir
= shmem_dir_map(info
->i_indirect
);
906 stage
= SHMEM_NR_DIRECT
+ ENTRIES_PER_PAGEPAGE
/2;
908 for (idx
= SHMEM_NR_DIRECT
; idx
< limit
; idx
+= ENTRIES_PER_PAGE
, dir
++) {
909 if (unlikely(idx
== stage
)) {
910 shmem_dir_unmap(dir
-1);
911 if (cond_resched_lock(&info
->lock
)) {
912 /* check it has not been truncated */
913 if (limit
> info
->next_index
) {
914 limit
= info
->next_index
;
919 dir
= shmem_dir_map(info
->i_indirect
) +
920 ENTRIES_PER_PAGE
/2 + idx
/ENTRIES_PER_PAGEPAGE
;
923 idx
+= ENTRIES_PER_PAGEPAGE
;
927 stage
= idx
+ ENTRIES_PER_PAGEPAGE
;
929 shmem_dir_unmap(dir
);
930 dir
= shmem_dir_map(subdir
);
933 if (subdir
&& page_private(subdir
)) {
934 ptr
= shmem_swp_map(subdir
);
936 if (size
> ENTRIES_PER_PAGE
)
937 size
= ENTRIES_PER_PAGE
;
938 offset
= shmem_find_swp(entry
, ptr
, ptr
+size
);
939 shmem_swp_unmap(ptr
);
941 shmem_dir_unmap(dir
);
942 ptr
= shmem_swp_map(subdir
);
948 shmem_dir_unmap(dir
-1);
950 spin_unlock(&info
->lock
);
957 * Move _head_ to start search for next from here.
958 * But be careful: shmem_evict_inode checks list_empty without taking
959 * mutex, and there's an instant in list_move_tail when info->swaplist
960 * would appear empty, if it were the only one on shmem_swaplist. We
961 * could avoid doing it if inode NULL; or use this minor optimization.
963 if (shmem_swaplist
.next
!= &info
->swaplist
)
964 list_move_tail(&shmem_swaplist
, &info
->swaplist
);
967 * We rely on shmem_swaplist_mutex, not only to protect the swaplist,
968 * but also to hold up shmem_evict_inode(): so inode cannot be freed
969 * beneath us (pagelock doesn't help until the page is in pagecache).
971 mapping
= info
->vfs_inode
.i_mapping
;
972 error
= add_to_page_cache_locked(page
, mapping
, idx
, GFP_NOWAIT
);
973 /* which does mem_cgroup_uncharge_cache_page on error */
975 if (error
== -EEXIST
) {
976 struct page
*filepage
= find_get_page(mapping
, idx
);
980 * There might be a more uptodate page coming down
981 * from a stacked writepage: forget our swappage if so.
983 if (PageUptodate(filepage
))
985 page_cache_release(filepage
);
989 delete_from_swap_cache(page
);
990 set_page_dirty(page
);
991 info
->flags
|= SHMEM_PAGEIN
;
992 shmem_swp_set(info
, ptr
, 0);
994 error
= 1; /* not an error, but entry was found */
996 shmem_swp_unmap(ptr
);
997 spin_unlock(&info
->lock
);
1002 * shmem_unuse() search for an eventually swapped out shmem page.
1004 int shmem_unuse(swp_entry_t entry
, struct page
*page
)
1006 struct list_head
*p
, *next
;
1007 struct shmem_inode_info
*info
;
1012 * Charge page using GFP_KERNEL while we can wait, before taking
1013 * the shmem_swaplist_mutex which might hold up shmem_writepage().
1014 * Charged back to the user (not to caller) when swap account is used.
1015 * add_to_page_cache() will be called with GFP_NOWAIT.
1017 error
= mem_cgroup_cache_charge(page
, current
->mm
, GFP_KERNEL
);
1021 * Try to preload while we can wait, to not make a habit of
1022 * draining atomic reserves; but don't latch on to this cpu,
1023 * it's okay if sometimes we get rescheduled after this.
1025 error
= radix_tree_preload(GFP_KERNEL
);
1028 radix_tree_preload_end();
1030 mutex_lock(&shmem_swaplist_mutex
);
1031 list_for_each_safe(p
, next
, &shmem_swaplist
) {
1032 info
= list_entry(p
, struct shmem_inode_info
, swaplist
);
1033 found
= shmem_unuse_inode(info
, entry
, page
);
1038 mutex_unlock(&shmem_swaplist_mutex
);
1042 mem_cgroup_uncharge_cache_page(page
);
1047 page_cache_release(page
);
1052 * Move the page from the page cache to the swap cache.
1054 static int shmem_writepage(struct page
*page
, struct writeback_control
*wbc
)
1056 struct shmem_inode_info
*info
;
1057 swp_entry_t
*entry
, swap
;
1058 struct address_space
*mapping
;
1059 unsigned long index
;
1060 struct inode
*inode
;
1062 BUG_ON(!PageLocked(page
));
1063 mapping
= page
->mapping
;
1064 index
= page
->index
;
1065 inode
= mapping
->host
;
1066 info
= SHMEM_I(inode
);
1067 if (info
->flags
& VM_LOCKED
)
1069 if (!total_swap_pages
)
1073 * shmem_backing_dev_info's capabilities prevent regular writeback or
1074 * sync from ever calling shmem_writepage; but a stacking filesystem
1075 * may use the ->writepage of its underlying filesystem, in which case
1076 * tmpfs should write out to swap only in response to memory pressure,
1077 * and not for the writeback threads or sync. However, in those cases,
1078 * we do still want to check if there's a redundant swappage to be
1081 if (wbc
->for_reclaim
)
1082 swap
= get_swap_page();
1087 * Add inode to shmem_unuse()'s list of swapped-out inodes,
1088 * if it's not already there. Do it now because we cannot take
1089 * mutex while holding spinlock, and must do so before the page
1090 * is moved to swap cache, when its pagelock no longer protects
1091 * the inode from eviction. But don't unlock the mutex until
1092 * we've taken the spinlock, because shmem_unuse_inode() will
1093 * prune a !swapped inode from the swaplist under both locks.
1096 mutex_lock(&shmem_swaplist_mutex
);
1097 if (list_empty(&info
->swaplist
))
1098 list_add_tail(&info
->swaplist
, &shmem_swaplist
);
1101 spin_lock(&info
->lock
);
1103 mutex_unlock(&shmem_swaplist_mutex
);
1105 if (index
>= info
->next_index
) {
1106 BUG_ON(!(info
->flags
& SHMEM_TRUNCATE
));
1109 entry
= shmem_swp_entry(info
, index
, NULL
);
1112 * The more uptodate page coming down from a stacked
1113 * writepage should replace our old swappage.
1115 free_swap_and_cache(*entry
);
1116 shmem_swp_set(info
, entry
, 0);
1118 shmem_recalc_inode(inode
);
1120 if (swap
.val
&& add_to_swap_cache(page
, swap
, GFP_ATOMIC
) == 0) {
1121 delete_from_page_cache(page
);
1122 shmem_swp_set(info
, entry
, swap
.val
);
1123 shmem_swp_unmap(entry
);
1124 swap_shmem_alloc(swap
);
1125 spin_unlock(&info
->lock
);
1126 BUG_ON(page_mapped(page
));
1127 swap_writepage(page
, wbc
);
1131 shmem_swp_unmap(entry
);
1133 spin_unlock(&info
->lock
);
1135 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
1136 * clear SWAP_HAS_CACHE flag.
1138 swapcache_free(swap
, NULL
);
1140 set_page_dirty(page
);
1141 if (wbc
->for_reclaim
)
1142 return AOP_WRITEPAGE_ACTIVATE
; /* Return with page locked */
1149 static void shmem_show_mpol(struct seq_file
*seq
, struct mempolicy
*mpol
)
1153 if (!mpol
|| mpol
->mode
== MPOL_DEFAULT
)
1154 return; /* show nothing */
1156 mpol_to_str(buffer
, sizeof(buffer
), mpol
, 1);
1158 seq_printf(seq
, ",mpol=%s", buffer
);
1161 static struct mempolicy
*shmem_get_sbmpol(struct shmem_sb_info
*sbinfo
)
1163 struct mempolicy
*mpol
= NULL
;
1165 spin_lock(&sbinfo
->stat_lock
); /* prevent replace/use races */
1166 mpol
= sbinfo
->mpol
;
1168 spin_unlock(&sbinfo
->stat_lock
);
1172 #endif /* CONFIG_TMPFS */
1174 static struct page
*shmem_swapin(swp_entry_t entry
, gfp_t gfp
,
1175 struct shmem_inode_info
*info
, unsigned long idx
)
1177 struct mempolicy mpol
, *spol
;
1178 struct vm_area_struct pvma
;
1181 spol
= mpol_cond_copy(&mpol
,
1182 mpol_shared_policy_lookup(&info
->policy
, idx
));
1184 /* Create a pseudo vma that just contains the policy */
1186 pvma
.vm_pgoff
= idx
;
1188 pvma
.vm_policy
= spol
;
1189 page
= swapin_readahead(entry
, gfp
, &pvma
, 0);
1193 static struct page
*shmem_alloc_page(gfp_t gfp
,
1194 struct shmem_inode_info
*info
, unsigned long idx
)
1196 struct vm_area_struct pvma
;
1198 /* Create a pseudo vma that just contains the policy */
1200 pvma
.vm_pgoff
= idx
;
1202 pvma
.vm_policy
= mpol_shared_policy_lookup(&info
->policy
, idx
);
1205 * alloc_page_vma() will drop the shared policy reference
1207 return alloc_page_vma(gfp
, &pvma
, 0);
1209 #else /* !CONFIG_NUMA */
1211 static inline void shmem_show_mpol(struct seq_file
*seq
, struct mempolicy
*p
)
1214 #endif /* CONFIG_TMPFS */
1216 static inline struct page
*shmem_swapin(swp_entry_t entry
, gfp_t gfp
,
1217 struct shmem_inode_info
*info
, unsigned long idx
)
1219 return swapin_readahead(entry
, gfp
, NULL
, 0);
1222 static inline struct page
*shmem_alloc_page(gfp_t gfp
,
1223 struct shmem_inode_info
*info
, unsigned long idx
)
1225 return alloc_page(gfp
);
1227 #endif /* CONFIG_NUMA */
1229 #if !defined(CONFIG_NUMA) || !defined(CONFIG_TMPFS)
1230 static inline struct mempolicy
*shmem_get_sbmpol(struct shmem_sb_info
*sbinfo
)
1237 * shmem_getpage_gfp - find page in cache, or get from swap, or allocate
1239 * If we allocate a new one we do not mark it dirty. That's up to the
1240 * vm. If we swap it in we mark it dirty since we also free the swap
1241 * entry since a page cannot live in both the swap and page cache
1243 static int shmem_getpage_gfp(struct inode
*inode
, pgoff_t idx
,
1244 struct page
**pagep
, enum sgp_type sgp
, gfp_t gfp
, int *fault_type
)
1246 struct address_space
*mapping
= inode
->i_mapping
;
1247 struct shmem_inode_info
*info
= SHMEM_I(inode
);
1248 struct shmem_sb_info
*sbinfo
;
1249 struct page
*filepage
;
1250 struct page
*swappage
;
1251 struct page
*prealloc_page
= NULL
;
1256 if (idx
>= SHMEM_MAX_INDEX
)
1259 filepage
= find_lock_page(mapping
, idx
);
1260 if (filepage
&& PageUptodate(filepage
))
1264 * Try to preload while we can wait, to not make a habit of
1265 * draining atomic reserves; but don't latch on to this cpu.
1267 error
= radix_tree_preload(gfp
& GFP_RECLAIM_MASK
);
1270 radix_tree_preload_end();
1271 if (sgp
!= SGP_READ
&& !prealloc_page
) {
1272 prealloc_page
= shmem_alloc_page(gfp
, info
, idx
);
1273 if (prealloc_page
) {
1274 SetPageSwapBacked(prealloc_page
);
1275 if (mem_cgroup_cache_charge(prealloc_page
,
1276 current
->mm
, GFP_KERNEL
)) {
1277 page_cache_release(prealloc_page
);
1278 prealloc_page
= NULL
;
1285 spin_lock(&info
->lock
);
1286 shmem_recalc_inode(inode
);
1287 entry
= shmem_swp_alloc(info
, idx
, sgp
, gfp
);
1288 if (IS_ERR(entry
)) {
1289 spin_unlock(&info
->lock
);
1290 error
= PTR_ERR(entry
);
1296 /* Look it up and read it in.. */
1297 swappage
= lookup_swap_cache(swap
);
1299 shmem_swp_unmap(entry
);
1300 spin_unlock(&info
->lock
);
1301 /* here we actually do the io */
1303 *fault_type
|= VM_FAULT_MAJOR
;
1304 swappage
= shmem_swapin(swap
, gfp
, info
, idx
);
1306 spin_lock(&info
->lock
);
1307 entry
= shmem_swp_alloc(info
, idx
, sgp
, gfp
);
1309 error
= PTR_ERR(entry
);
1311 if (entry
->val
== swap
.val
)
1313 shmem_swp_unmap(entry
);
1315 spin_unlock(&info
->lock
);
1320 wait_on_page_locked(swappage
);
1321 page_cache_release(swappage
);
1325 /* We have to do this with page locked to prevent races */
1326 if (!trylock_page(swappage
)) {
1327 shmem_swp_unmap(entry
);
1328 spin_unlock(&info
->lock
);
1329 wait_on_page_locked(swappage
);
1330 page_cache_release(swappage
);
1333 if (PageWriteback(swappage
)) {
1334 shmem_swp_unmap(entry
);
1335 spin_unlock(&info
->lock
);
1336 wait_on_page_writeback(swappage
);
1337 unlock_page(swappage
);
1338 page_cache_release(swappage
);
1341 if (!PageUptodate(swappage
)) {
1342 shmem_swp_unmap(entry
);
1343 spin_unlock(&info
->lock
);
1344 unlock_page(swappage
);
1345 page_cache_release(swappage
);
1351 shmem_swp_set(info
, entry
, 0);
1352 shmem_swp_unmap(entry
);
1353 delete_from_swap_cache(swappage
);
1354 spin_unlock(&info
->lock
);
1355 copy_highpage(filepage
, swappage
);
1356 unlock_page(swappage
);
1357 page_cache_release(swappage
);
1358 flush_dcache_page(filepage
);
1359 SetPageUptodate(filepage
);
1360 set_page_dirty(filepage
);
1362 } else if (!(error
= add_to_page_cache_locked(swappage
, mapping
,
1363 idx
, GFP_NOWAIT
))) {
1364 info
->flags
|= SHMEM_PAGEIN
;
1365 shmem_swp_set(info
, entry
, 0);
1366 shmem_swp_unmap(entry
);
1367 delete_from_swap_cache(swappage
);
1368 spin_unlock(&info
->lock
);
1369 filepage
= swappage
;
1370 set_page_dirty(filepage
);
1373 shmem_swp_unmap(entry
);
1374 spin_unlock(&info
->lock
);
1375 if (error
== -ENOMEM
) {
1377 * reclaim from proper memory cgroup and
1378 * call memcg's OOM if needed.
1380 error
= mem_cgroup_shmem_charge_fallback(
1385 unlock_page(swappage
);
1386 page_cache_release(swappage
);
1390 unlock_page(swappage
);
1391 page_cache_release(swappage
);
1394 } else if (sgp
== SGP_READ
&& !filepage
) {
1395 shmem_swp_unmap(entry
);
1396 filepage
= find_get_page(mapping
, idx
);
1398 (!PageUptodate(filepage
) || !trylock_page(filepage
))) {
1399 spin_unlock(&info
->lock
);
1400 wait_on_page_locked(filepage
);
1401 page_cache_release(filepage
);
1405 spin_unlock(&info
->lock
);
1407 } else if (prealloc_page
) {
1408 shmem_swp_unmap(entry
);
1409 sbinfo
= SHMEM_SB(inode
->i_sb
);
1410 if (sbinfo
->max_blocks
) {
1411 if (percpu_counter_compare(&sbinfo
->used_blocks
,
1412 sbinfo
->max_blocks
) >= 0 ||
1413 shmem_acct_block(info
->flags
))
1415 percpu_counter_inc(&sbinfo
->used_blocks
);
1416 inode
->i_blocks
+= BLOCKS_PER_PAGE
;
1417 } else if (shmem_acct_block(info
->flags
))
1423 filepage
= prealloc_page
;
1424 prealloc_page
= NULL
;
1426 entry
= shmem_swp_alloc(info
, idx
, sgp
, gfp
);
1428 error
= PTR_ERR(entry
);
1431 shmem_swp_unmap(entry
);
1433 ret
= error
|| swap
.val
;
1435 mem_cgroup_uncharge_cache_page(filepage
);
1437 ret
= add_to_page_cache_lru(filepage
, mapping
,
1440 * At add_to_page_cache_lru() failure, uncharge will
1441 * be done automatically.
1444 shmem_unacct_blocks(info
->flags
, 1);
1445 shmem_free_blocks(inode
, 1);
1446 spin_unlock(&info
->lock
);
1447 page_cache_release(filepage
);
1453 info
->flags
|= SHMEM_PAGEIN
;
1457 spin_unlock(&info
->lock
);
1458 clear_highpage(filepage
);
1459 flush_dcache_page(filepage
);
1460 SetPageUptodate(filepage
);
1461 if (sgp
== SGP_DIRTY
)
1462 set_page_dirty(filepage
);
1464 spin_unlock(&info
->lock
);
1472 if (prealloc_page
) {
1473 mem_cgroup_uncharge_cache_page(prealloc_page
);
1474 page_cache_release(prealloc_page
);
1480 * Perhaps the page was brought in from swap between find_lock_page
1481 * and taking info->lock? We allow for that at add_to_page_cache_lru,
1482 * but must also avoid reporting a spurious ENOSPC while working on a
1486 struct page
*page
= find_get_page(mapping
, idx
);
1488 spin_unlock(&info
->lock
);
1489 page_cache_release(page
);
1493 spin_unlock(&info
->lock
);
1497 unlock_page(filepage
);
1498 page_cache_release(filepage
);
1503 static int shmem_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
1505 struct inode
*inode
= vma
->vm_file
->f_path
.dentry
->d_inode
;
1507 int ret
= VM_FAULT_LOCKED
;
1509 if (((loff_t
)vmf
->pgoff
<< PAGE_CACHE_SHIFT
) >= i_size_read(inode
))
1510 return VM_FAULT_SIGBUS
;
1512 error
= shmem_getpage(inode
, vmf
->pgoff
, &vmf
->page
, SGP_CACHE
, &ret
);
1514 return ((error
== -ENOMEM
) ? VM_FAULT_OOM
: VM_FAULT_SIGBUS
);
1516 if (ret
& VM_FAULT_MAJOR
) {
1517 count_vm_event(PGMAJFAULT
);
1518 mem_cgroup_count_vm_event(vma
->vm_mm
, PGMAJFAULT
);
1524 static int shmem_set_policy(struct vm_area_struct
*vma
, struct mempolicy
*new)
1526 struct inode
*i
= vma
->vm_file
->f_path
.dentry
->d_inode
;
1527 return mpol_set_shared_policy(&SHMEM_I(i
)->policy
, vma
, new);
1530 static struct mempolicy
*shmem_get_policy(struct vm_area_struct
*vma
,
1533 struct inode
*i
= vma
->vm_file
->f_path
.dentry
->d_inode
;
1536 idx
= ((addr
- vma
->vm_start
) >> PAGE_SHIFT
) + vma
->vm_pgoff
;
1537 return mpol_shared_policy_lookup(&SHMEM_I(i
)->policy
, idx
);
1541 int shmem_lock(struct file
*file
, int lock
, struct user_struct
*user
)
1543 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
1544 struct shmem_inode_info
*info
= SHMEM_I(inode
);
1545 int retval
= -ENOMEM
;
1547 spin_lock(&info
->lock
);
1548 if (lock
&& !(info
->flags
& VM_LOCKED
)) {
1549 if (!user_shm_lock(inode
->i_size
, user
))
1551 info
->flags
|= VM_LOCKED
;
1552 mapping_set_unevictable(file
->f_mapping
);
1554 if (!lock
&& (info
->flags
& VM_LOCKED
) && user
) {
1555 user_shm_unlock(inode
->i_size
, user
);
1556 info
->flags
&= ~VM_LOCKED
;
1557 mapping_clear_unevictable(file
->f_mapping
);
1558 scan_mapping_unevictable_pages(file
->f_mapping
);
1563 spin_unlock(&info
->lock
);
1567 static int shmem_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1569 file_accessed(file
);
1570 vma
->vm_ops
= &shmem_vm_ops
;
1571 vma
->vm_flags
|= VM_CAN_NONLINEAR
;
1575 static struct inode
*shmem_get_inode(struct super_block
*sb
, const struct inode
*dir
,
1576 int mode
, dev_t dev
, unsigned long flags
)
1578 struct inode
*inode
;
1579 struct shmem_inode_info
*info
;
1580 struct shmem_sb_info
*sbinfo
= SHMEM_SB(sb
);
1582 if (shmem_reserve_inode(sb
))
1585 inode
= new_inode(sb
);
1587 inode
->i_ino
= get_next_ino();
1588 inode_init_owner(inode
, dir
, mode
);
1589 inode
->i_blocks
= 0;
1590 inode
->i_mapping
->backing_dev_info
= &shmem_backing_dev_info
;
1591 inode
->i_atime
= inode
->i_mtime
= inode
->i_ctime
= CURRENT_TIME
;
1592 inode
->i_generation
= get_seconds();
1593 info
= SHMEM_I(inode
);
1594 memset(info
, 0, (char *)inode
- (char *)info
);
1595 spin_lock_init(&info
->lock
);
1596 info
->flags
= flags
& VM_NORESERVE
;
1597 INIT_LIST_HEAD(&info
->swaplist
);
1598 INIT_LIST_HEAD(&info
->xattr_list
);
1599 cache_no_acl(inode
);
1601 switch (mode
& S_IFMT
) {
1603 inode
->i_op
= &shmem_special_inode_operations
;
1604 init_special_inode(inode
, mode
, dev
);
1607 inode
->i_mapping
->a_ops
= &shmem_aops
;
1608 inode
->i_op
= &shmem_inode_operations
;
1609 inode
->i_fop
= &shmem_file_operations
;
1610 mpol_shared_policy_init(&info
->policy
,
1611 shmem_get_sbmpol(sbinfo
));
1615 /* Some things misbehave if size == 0 on a directory */
1616 inode
->i_size
= 2 * BOGO_DIRENT_SIZE
;
1617 inode
->i_op
= &shmem_dir_inode_operations
;
1618 inode
->i_fop
= &simple_dir_operations
;
1622 * Must not load anything in the rbtree,
1623 * mpol_free_shared_policy will not be called.
1625 mpol_shared_policy_init(&info
->policy
, NULL
);
1629 shmem_free_inode(sb
);
1634 static const struct inode_operations shmem_symlink_inode_operations
;
1635 static const struct inode_operations shmem_symlink_inline_operations
;
1638 shmem_write_begin(struct file
*file
, struct address_space
*mapping
,
1639 loff_t pos
, unsigned len
, unsigned flags
,
1640 struct page
**pagep
, void **fsdata
)
1642 struct inode
*inode
= mapping
->host
;
1643 pgoff_t index
= pos
>> PAGE_CACHE_SHIFT
;
1644 return shmem_getpage(inode
, index
, pagep
, SGP_WRITE
, NULL
);
1648 shmem_write_end(struct file
*file
, struct address_space
*mapping
,
1649 loff_t pos
, unsigned len
, unsigned copied
,
1650 struct page
*page
, void *fsdata
)
1652 struct inode
*inode
= mapping
->host
;
1654 if (pos
+ copied
> inode
->i_size
)
1655 i_size_write(inode
, pos
+ copied
);
1657 set_page_dirty(page
);
1659 page_cache_release(page
);
1664 static void do_shmem_file_read(struct file
*filp
, loff_t
*ppos
, read_descriptor_t
*desc
, read_actor_t actor
)
1666 struct inode
*inode
= filp
->f_path
.dentry
->d_inode
;
1667 struct address_space
*mapping
= inode
->i_mapping
;
1668 unsigned long index
, offset
;
1669 enum sgp_type sgp
= SGP_READ
;
1672 * Might this read be for a stacking filesystem? Then when reading
1673 * holes of a sparse file, we actually need to allocate those pages,
1674 * and even mark them dirty, so it cannot exceed the max_blocks limit.
1676 if (segment_eq(get_fs(), KERNEL_DS
))
1679 index
= *ppos
>> PAGE_CACHE_SHIFT
;
1680 offset
= *ppos
& ~PAGE_CACHE_MASK
;
1683 struct page
*page
= NULL
;
1684 unsigned long end_index
, nr
, ret
;
1685 loff_t i_size
= i_size_read(inode
);
1687 end_index
= i_size
>> PAGE_CACHE_SHIFT
;
1688 if (index
> end_index
)
1690 if (index
== end_index
) {
1691 nr
= i_size
& ~PAGE_CACHE_MASK
;
1696 desc
->error
= shmem_getpage(inode
, index
, &page
, sgp
, NULL
);
1698 if (desc
->error
== -EINVAL
)
1706 * We must evaluate after, since reads (unlike writes)
1707 * are called without i_mutex protection against truncate
1709 nr
= PAGE_CACHE_SIZE
;
1710 i_size
= i_size_read(inode
);
1711 end_index
= i_size
>> PAGE_CACHE_SHIFT
;
1712 if (index
== end_index
) {
1713 nr
= i_size
& ~PAGE_CACHE_MASK
;
1716 page_cache_release(page
);
1724 * If users can be writing to this page using arbitrary
1725 * virtual addresses, take care about potential aliasing
1726 * before reading the page on the kernel side.
1728 if (mapping_writably_mapped(mapping
))
1729 flush_dcache_page(page
);
1731 * Mark the page accessed if we read the beginning.
1734 mark_page_accessed(page
);
1736 page
= ZERO_PAGE(0);
1737 page_cache_get(page
);
1741 * Ok, we have the page, and it's up-to-date, so
1742 * now we can copy it to user space...
1744 * The actor routine returns how many bytes were actually used..
1745 * NOTE! This may not be the same as how much of a user buffer
1746 * we filled up (we may be padding etc), so we can only update
1747 * "pos" here (the actor routine has to update the user buffer
1748 * pointers and the remaining count).
1750 ret
= actor(desc
, page
, offset
, nr
);
1752 index
+= offset
>> PAGE_CACHE_SHIFT
;
1753 offset
&= ~PAGE_CACHE_MASK
;
1755 page_cache_release(page
);
1756 if (ret
!= nr
|| !desc
->count
)
1762 *ppos
= ((loff_t
) index
<< PAGE_CACHE_SHIFT
) + offset
;
1763 file_accessed(filp
);
1766 static ssize_t
shmem_file_aio_read(struct kiocb
*iocb
,
1767 const struct iovec
*iov
, unsigned long nr_segs
, loff_t pos
)
1769 struct file
*filp
= iocb
->ki_filp
;
1773 loff_t
*ppos
= &iocb
->ki_pos
;
1775 retval
= generic_segment_checks(iov
, &nr_segs
, &count
, VERIFY_WRITE
);
1779 for (seg
= 0; seg
< nr_segs
; seg
++) {
1780 read_descriptor_t desc
;
1783 desc
.arg
.buf
= iov
[seg
].iov_base
;
1784 desc
.count
= iov
[seg
].iov_len
;
1785 if (desc
.count
== 0)
1788 do_shmem_file_read(filp
, ppos
, &desc
, file_read_actor
);
1789 retval
+= desc
.written
;
1791 retval
= retval
?: desc
.error
;
1800 static ssize_t
shmem_file_splice_read(struct file
*in
, loff_t
*ppos
,
1801 struct pipe_inode_info
*pipe
, size_t len
,
1804 struct address_space
*mapping
= in
->f_mapping
;
1805 struct inode
*inode
= mapping
->host
;
1806 unsigned int loff
, nr_pages
, req_pages
;
1807 struct page
*pages
[PIPE_DEF_BUFFERS
];
1808 struct partial_page partial
[PIPE_DEF_BUFFERS
];
1810 pgoff_t index
, end_index
;
1813 struct splice_pipe_desc spd
= {
1817 .ops
= &page_cache_pipe_buf_ops
,
1818 .spd_release
= spd_release_page
,
1821 isize
= i_size_read(inode
);
1822 if (unlikely(*ppos
>= isize
))
1825 left
= isize
- *ppos
;
1826 if (unlikely(left
< len
))
1829 if (splice_grow_spd(pipe
, &spd
))
1832 index
= *ppos
>> PAGE_CACHE_SHIFT
;
1833 loff
= *ppos
& ~PAGE_CACHE_MASK
;
1834 req_pages
= (len
+ loff
+ PAGE_CACHE_SIZE
- 1) >> PAGE_CACHE_SHIFT
;
1835 nr_pages
= min(req_pages
, pipe
->buffers
);
1837 spd
.nr_pages
= find_get_pages_contig(mapping
, index
,
1838 nr_pages
, spd
.pages
);
1839 index
+= spd
.nr_pages
;
1842 while (spd
.nr_pages
< nr_pages
) {
1843 error
= shmem_getpage(inode
, index
, &page
, SGP_CACHE
, NULL
);
1847 spd
.pages
[spd
.nr_pages
++] = page
;
1851 index
= *ppos
>> PAGE_CACHE_SHIFT
;
1852 nr_pages
= spd
.nr_pages
;
1855 for (page_nr
= 0; page_nr
< nr_pages
; page_nr
++) {
1856 unsigned int this_len
;
1861 this_len
= min_t(unsigned long, len
, PAGE_CACHE_SIZE
- loff
);
1862 page
= spd
.pages
[page_nr
];
1864 if (!PageUptodate(page
) || page
->mapping
!= mapping
) {
1865 error
= shmem_getpage(inode
, index
, &page
,
1870 page_cache_release(spd
.pages
[page_nr
]);
1871 spd
.pages
[page_nr
] = page
;
1874 isize
= i_size_read(inode
);
1875 end_index
= (isize
- 1) >> PAGE_CACHE_SHIFT
;
1876 if (unlikely(!isize
|| index
> end_index
))
1879 if (end_index
== index
) {
1882 plen
= ((isize
- 1) & ~PAGE_CACHE_MASK
) + 1;
1886 this_len
= min(this_len
, plen
- loff
);
1890 spd
.partial
[page_nr
].offset
= loff
;
1891 spd
.partial
[page_nr
].len
= this_len
;
1898 while (page_nr
< nr_pages
)
1899 page_cache_release(spd
.pages
[page_nr
++]);
1902 error
= splice_to_pipe(pipe
, &spd
);
1904 splice_shrink_spd(pipe
, &spd
);
1913 static int shmem_statfs(struct dentry
*dentry
, struct kstatfs
*buf
)
1915 struct shmem_sb_info
*sbinfo
= SHMEM_SB(dentry
->d_sb
);
1917 buf
->f_type
= TMPFS_MAGIC
;
1918 buf
->f_bsize
= PAGE_CACHE_SIZE
;
1919 buf
->f_namelen
= NAME_MAX
;
1920 if (sbinfo
->max_blocks
) {
1921 buf
->f_blocks
= sbinfo
->max_blocks
;
1922 buf
->f_bavail
= buf
->f_bfree
=
1923 sbinfo
->max_blocks
- percpu_counter_sum(&sbinfo
->used_blocks
);
1925 if (sbinfo
->max_inodes
) {
1926 buf
->f_files
= sbinfo
->max_inodes
;
1927 buf
->f_ffree
= sbinfo
->free_inodes
;
1929 /* else leave those fields 0 like simple_statfs */
1934 * File creation. Allocate an inode, and we're done..
1937 shmem_mknod(struct inode
*dir
, struct dentry
*dentry
, int mode
, dev_t dev
)
1939 struct inode
*inode
;
1940 int error
= -ENOSPC
;
1942 inode
= shmem_get_inode(dir
->i_sb
, dir
, mode
, dev
, VM_NORESERVE
);
1944 error
= security_inode_init_security(inode
, dir
,
1945 &dentry
->d_name
, NULL
,
1948 if (error
!= -EOPNOTSUPP
) {
1953 #ifdef CONFIG_TMPFS_POSIX_ACL
1954 error
= generic_acl_init(inode
, dir
);
1962 dir
->i_size
+= BOGO_DIRENT_SIZE
;
1963 dir
->i_ctime
= dir
->i_mtime
= CURRENT_TIME
;
1964 d_instantiate(dentry
, inode
);
1965 dget(dentry
); /* Extra count - pin the dentry in core */
1970 static int shmem_mkdir(struct inode
*dir
, struct dentry
*dentry
, int mode
)
1974 if ((error
= shmem_mknod(dir
, dentry
, mode
| S_IFDIR
, 0)))
1980 static int shmem_create(struct inode
*dir
, struct dentry
*dentry
, int mode
,
1981 struct nameidata
*nd
)
1983 return shmem_mknod(dir
, dentry
, mode
| S_IFREG
, 0);
1989 static int shmem_link(struct dentry
*old_dentry
, struct inode
*dir
, struct dentry
*dentry
)
1991 struct inode
*inode
= old_dentry
->d_inode
;
1995 * No ordinary (disk based) filesystem counts links as inodes;
1996 * but each new link needs a new dentry, pinning lowmem, and
1997 * tmpfs dentries cannot be pruned until they are unlinked.
1999 ret
= shmem_reserve_inode(inode
->i_sb
);
2003 dir
->i_size
+= BOGO_DIRENT_SIZE
;
2004 inode
->i_ctime
= dir
->i_ctime
= dir
->i_mtime
= CURRENT_TIME
;
2006 ihold(inode
); /* New dentry reference */
2007 dget(dentry
); /* Extra pinning count for the created dentry */
2008 d_instantiate(dentry
, inode
);
2013 static int shmem_unlink(struct inode
*dir
, struct dentry
*dentry
)
2015 struct inode
*inode
= dentry
->d_inode
;
2017 if (inode
->i_nlink
> 1 && !S_ISDIR(inode
->i_mode
))
2018 shmem_free_inode(inode
->i_sb
);
2020 dir
->i_size
-= BOGO_DIRENT_SIZE
;
2021 inode
->i_ctime
= dir
->i_ctime
= dir
->i_mtime
= CURRENT_TIME
;
2023 dput(dentry
); /* Undo the count from "create" - this does all the work */
2027 static int shmem_rmdir(struct inode
*dir
, struct dentry
*dentry
)
2029 if (!simple_empty(dentry
))
2032 drop_nlink(dentry
->d_inode
);
2034 return shmem_unlink(dir
, dentry
);
2038 * The VFS layer already does all the dentry stuff for rename,
2039 * we just have to decrement the usage count for the target if
2040 * it exists so that the VFS layer correctly free's it when it
2043 static int shmem_rename(struct inode
*old_dir
, struct dentry
*old_dentry
, struct inode
*new_dir
, struct dentry
*new_dentry
)
2045 struct inode
*inode
= old_dentry
->d_inode
;
2046 int they_are_dirs
= S_ISDIR(inode
->i_mode
);
2048 if (!simple_empty(new_dentry
))
2051 if (new_dentry
->d_inode
) {
2052 (void) shmem_unlink(new_dir
, new_dentry
);
2054 drop_nlink(old_dir
);
2055 } else if (they_are_dirs
) {
2056 drop_nlink(old_dir
);
2060 old_dir
->i_size
-= BOGO_DIRENT_SIZE
;
2061 new_dir
->i_size
+= BOGO_DIRENT_SIZE
;
2062 old_dir
->i_ctime
= old_dir
->i_mtime
=
2063 new_dir
->i_ctime
= new_dir
->i_mtime
=
2064 inode
->i_ctime
= CURRENT_TIME
;
2068 static int shmem_symlink(struct inode
*dir
, struct dentry
*dentry
, const char *symname
)
2072 struct inode
*inode
;
2075 struct shmem_inode_info
*info
;
2077 len
= strlen(symname
) + 1;
2078 if (len
> PAGE_CACHE_SIZE
)
2079 return -ENAMETOOLONG
;
2081 inode
= shmem_get_inode(dir
->i_sb
, dir
, S_IFLNK
|S_IRWXUGO
, 0, VM_NORESERVE
);
2085 error
= security_inode_init_security(inode
, dir
, &dentry
->d_name
, NULL
,
2088 if (error
!= -EOPNOTSUPP
) {
2095 info
= SHMEM_I(inode
);
2096 inode
->i_size
= len
-1;
2097 if (len
<= SHMEM_SYMLINK_INLINE_LEN
) {
2099 memcpy(info
->inline_symlink
, symname
, len
);
2100 inode
->i_op
= &shmem_symlink_inline_operations
;
2102 error
= shmem_getpage(inode
, 0, &page
, SGP_WRITE
, NULL
);
2107 inode
->i_mapping
->a_ops
= &shmem_aops
;
2108 inode
->i_op
= &shmem_symlink_inode_operations
;
2109 kaddr
= kmap_atomic(page
, KM_USER0
);
2110 memcpy(kaddr
, symname
, len
);
2111 kunmap_atomic(kaddr
, KM_USER0
);
2112 set_page_dirty(page
);
2114 page_cache_release(page
);
2116 dir
->i_size
+= BOGO_DIRENT_SIZE
;
2117 dir
->i_ctime
= dir
->i_mtime
= CURRENT_TIME
;
2118 d_instantiate(dentry
, inode
);
2123 static void *shmem_follow_link_inline(struct dentry
*dentry
, struct nameidata
*nd
)
2125 nd_set_link(nd
, SHMEM_I(dentry
->d_inode
)->inline_symlink
);
2129 static void *shmem_follow_link(struct dentry
*dentry
, struct nameidata
*nd
)
2131 struct page
*page
= NULL
;
2132 int res
= shmem_getpage(dentry
->d_inode
, 0, &page
, SGP_READ
, NULL
);
2133 nd_set_link(nd
, res
? ERR_PTR(res
) : kmap(page
));
2139 static void shmem_put_link(struct dentry
*dentry
, struct nameidata
*nd
, void *cookie
)
2141 if (!IS_ERR(nd_get_link(nd
))) {
2142 struct page
*page
= cookie
;
2144 mark_page_accessed(page
);
2145 page_cache_release(page
);
2149 #ifdef CONFIG_TMPFS_XATTR
2151 * Superblocks without xattr inode operations may get some security.* xattr
2152 * support from the LSM "for free". As soon as we have any other xattrs
2153 * like ACLs, we also need to implement the security.* handlers at
2154 * filesystem level, though.
2157 static int shmem_xattr_get(struct dentry
*dentry
, const char *name
,
2158 void *buffer
, size_t size
)
2160 struct shmem_inode_info
*info
;
2161 struct shmem_xattr
*xattr
;
2164 info
= SHMEM_I(dentry
->d_inode
);
2166 spin_lock(&info
->lock
);
2167 list_for_each_entry(xattr
, &info
->xattr_list
, list
) {
2168 if (strcmp(name
, xattr
->name
))
2173 if (size
< xattr
->size
)
2176 memcpy(buffer
, xattr
->value
, xattr
->size
);
2180 spin_unlock(&info
->lock
);
2184 static int shmem_xattr_set(struct dentry
*dentry
, const char *name
,
2185 const void *value
, size_t size
, int flags
)
2187 struct inode
*inode
= dentry
->d_inode
;
2188 struct shmem_inode_info
*info
= SHMEM_I(inode
);
2189 struct shmem_xattr
*xattr
;
2190 struct shmem_xattr
*new_xattr
= NULL
;
2194 /* value == NULL means remove */
2197 len
= sizeof(*new_xattr
) + size
;
2198 if (len
<= sizeof(*new_xattr
))
2201 new_xattr
= kmalloc(len
, GFP_KERNEL
);
2205 new_xattr
->name
= kstrdup(name
, GFP_KERNEL
);
2206 if (!new_xattr
->name
) {
2211 new_xattr
->size
= size
;
2212 memcpy(new_xattr
->value
, value
, size
);
2215 spin_lock(&info
->lock
);
2216 list_for_each_entry(xattr
, &info
->xattr_list
, list
) {
2217 if (!strcmp(name
, xattr
->name
)) {
2218 if (flags
& XATTR_CREATE
) {
2221 } else if (new_xattr
) {
2222 list_replace(&xattr
->list
, &new_xattr
->list
);
2224 list_del(&xattr
->list
);
2229 if (flags
& XATTR_REPLACE
) {
2233 list_add(&new_xattr
->list
, &info
->xattr_list
);
2237 spin_unlock(&info
->lock
);
2245 static const struct xattr_handler
*shmem_xattr_handlers
[] = {
2246 #ifdef CONFIG_TMPFS_POSIX_ACL
2247 &generic_acl_access_handler
,
2248 &generic_acl_default_handler
,
2253 static int shmem_xattr_validate(const char *name
)
2255 struct { const char *prefix
; size_t len
; } arr
[] = {
2256 { XATTR_SECURITY_PREFIX
, XATTR_SECURITY_PREFIX_LEN
},
2257 { XATTR_TRUSTED_PREFIX
, XATTR_TRUSTED_PREFIX_LEN
}
2261 for (i
= 0; i
< ARRAY_SIZE(arr
); i
++) {
2262 size_t preflen
= arr
[i
].len
;
2263 if (strncmp(name
, arr
[i
].prefix
, preflen
) == 0) {
2272 static ssize_t
shmem_getxattr(struct dentry
*dentry
, const char *name
,
2273 void *buffer
, size_t size
)
2278 * If this is a request for a synthetic attribute in the system.*
2279 * namespace use the generic infrastructure to resolve a handler
2280 * for it via sb->s_xattr.
2282 if (!strncmp(name
, XATTR_SYSTEM_PREFIX
, XATTR_SYSTEM_PREFIX_LEN
))
2283 return generic_getxattr(dentry
, name
, buffer
, size
);
2285 err
= shmem_xattr_validate(name
);
2289 return shmem_xattr_get(dentry
, name
, buffer
, size
);
2292 static int shmem_setxattr(struct dentry
*dentry
, const char *name
,
2293 const void *value
, size_t size
, int flags
)
2298 * If this is a request for a synthetic attribute in the system.*
2299 * namespace use the generic infrastructure to resolve a handler
2300 * for it via sb->s_xattr.
2302 if (!strncmp(name
, XATTR_SYSTEM_PREFIX
, XATTR_SYSTEM_PREFIX_LEN
))
2303 return generic_setxattr(dentry
, name
, value
, size
, flags
);
2305 err
= shmem_xattr_validate(name
);
2310 value
= ""; /* empty EA, do not remove */
2312 return shmem_xattr_set(dentry
, name
, value
, size
, flags
);
2316 static int shmem_removexattr(struct dentry
*dentry
, const char *name
)
2321 * If this is a request for a synthetic attribute in the system.*
2322 * namespace use the generic infrastructure to resolve a handler
2323 * for it via sb->s_xattr.
2325 if (!strncmp(name
, XATTR_SYSTEM_PREFIX
, XATTR_SYSTEM_PREFIX_LEN
))
2326 return generic_removexattr(dentry
, name
);
2328 err
= shmem_xattr_validate(name
);
2332 return shmem_xattr_set(dentry
, name
, NULL
, 0, XATTR_REPLACE
);
2335 static bool xattr_is_trusted(const char *name
)
2337 return !strncmp(name
, XATTR_TRUSTED_PREFIX
, XATTR_TRUSTED_PREFIX_LEN
);
2340 static ssize_t
shmem_listxattr(struct dentry
*dentry
, char *buffer
, size_t size
)
2342 bool trusted
= capable(CAP_SYS_ADMIN
);
2343 struct shmem_xattr
*xattr
;
2344 struct shmem_inode_info
*info
;
2347 info
= SHMEM_I(dentry
->d_inode
);
2349 spin_lock(&info
->lock
);
2350 list_for_each_entry(xattr
, &info
->xattr_list
, list
) {
2353 /* skip "trusted." attributes for unprivileged callers */
2354 if (!trusted
&& xattr_is_trusted(xattr
->name
))
2357 len
= strlen(xattr
->name
) + 1;
2364 memcpy(buffer
, xattr
->name
, len
);
2368 spin_unlock(&info
->lock
);
2372 #endif /* CONFIG_TMPFS_XATTR */
2374 static const struct inode_operations shmem_symlink_inline_operations
= {
2375 .readlink
= generic_readlink
,
2376 .follow_link
= shmem_follow_link_inline
,
2377 #ifdef CONFIG_TMPFS_XATTR
2378 .setxattr
= shmem_setxattr
,
2379 .getxattr
= shmem_getxattr
,
2380 .listxattr
= shmem_listxattr
,
2381 .removexattr
= shmem_removexattr
,
2385 static const struct inode_operations shmem_symlink_inode_operations
= {
2386 .readlink
= generic_readlink
,
2387 .follow_link
= shmem_follow_link
,
2388 .put_link
= shmem_put_link
,
2389 #ifdef CONFIG_TMPFS_XATTR
2390 .setxattr
= shmem_setxattr
,
2391 .getxattr
= shmem_getxattr
,
2392 .listxattr
= shmem_listxattr
,
2393 .removexattr
= shmem_removexattr
,
2397 static struct dentry
*shmem_get_parent(struct dentry
*child
)
2399 return ERR_PTR(-ESTALE
);
2402 static int shmem_match(struct inode
*ino
, void *vfh
)
2406 inum
= (inum
<< 32) | fh
[1];
2407 return ino
->i_ino
== inum
&& fh
[0] == ino
->i_generation
;
2410 static struct dentry
*shmem_fh_to_dentry(struct super_block
*sb
,
2411 struct fid
*fid
, int fh_len
, int fh_type
)
2413 struct inode
*inode
;
2414 struct dentry
*dentry
= NULL
;
2415 u64 inum
= fid
->raw
[2];
2416 inum
= (inum
<< 32) | fid
->raw
[1];
2421 inode
= ilookup5(sb
, (unsigned long)(inum
+ fid
->raw
[0]),
2422 shmem_match
, fid
->raw
);
2424 dentry
= d_find_alias(inode
);
2431 static int shmem_encode_fh(struct dentry
*dentry
, __u32
*fh
, int *len
,
2434 struct inode
*inode
= dentry
->d_inode
;
2441 if (inode_unhashed(inode
)) {
2442 /* Unfortunately insert_inode_hash is not idempotent,
2443 * so as we hash inodes here rather than at creation
2444 * time, we need a lock to ensure we only try
2447 static DEFINE_SPINLOCK(lock
);
2449 if (inode_unhashed(inode
))
2450 __insert_inode_hash(inode
,
2451 inode
->i_ino
+ inode
->i_generation
);
2455 fh
[0] = inode
->i_generation
;
2456 fh
[1] = inode
->i_ino
;
2457 fh
[2] = ((__u64
)inode
->i_ino
) >> 32;
2463 static const struct export_operations shmem_export_ops
= {
2464 .get_parent
= shmem_get_parent
,
2465 .encode_fh
= shmem_encode_fh
,
2466 .fh_to_dentry
= shmem_fh_to_dentry
,
2469 static int shmem_parse_options(char *options
, struct shmem_sb_info
*sbinfo
,
2472 char *this_char
, *value
, *rest
;
2474 while (options
!= NULL
) {
2475 this_char
= options
;
2478 * NUL-terminate this option: unfortunately,
2479 * mount options form a comma-separated list,
2480 * but mpol's nodelist may also contain commas.
2482 options
= strchr(options
, ',');
2483 if (options
== NULL
)
2486 if (!isdigit(*options
)) {
2493 if ((value
= strchr(this_char
,'=')) != NULL
) {
2497 "tmpfs: No value for mount option '%s'\n",
2502 if (!strcmp(this_char
,"size")) {
2503 unsigned long long size
;
2504 size
= memparse(value
,&rest
);
2506 size
<<= PAGE_SHIFT
;
2507 size
*= totalram_pages
;
2513 sbinfo
->max_blocks
=
2514 DIV_ROUND_UP(size
, PAGE_CACHE_SIZE
);
2515 } else if (!strcmp(this_char
,"nr_blocks")) {
2516 sbinfo
->max_blocks
= memparse(value
, &rest
);
2519 } else if (!strcmp(this_char
,"nr_inodes")) {
2520 sbinfo
->max_inodes
= memparse(value
, &rest
);
2523 } else if (!strcmp(this_char
,"mode")) {
2526 sbinfo
->mode
= simple_strtoul(value
, &rest
, 8) & 07777;
2529 } else if (!strcmp(this_char
,"uid")) {
2532 sbinfo
->uid
= simple_strtoul(value
, &rest
, 0);
2535 } else if (!strcmp(this_char
,"gid")) {
2538 sbinfo
->gid
= simple_strtoul(value
, &rest
, 0);
2541 } else if (!strcmp(this_char
,"mpol")) {
2542 if (mpol_parse_str(value
, &sbinfo
->mpol
, 1))
2545 printk(KERN_ERR
"tmpfs: Bad mount option %s\n",
2553 printk(KERN_ERR
"tmpfs: Bad value '%s' for mount option '%s'\n",
2559 static int shmem_remount_fs(struct super_block
*sb
, int *flags
, char *data
)
2561 struct shmem_sb_info
*sbinfo
= SHMEM_SB(sb
);
2562 struct shmem_sb_info config
= *sbinfo
;
2563 unsigned long inodes
;
2564 int error
= -EINVAL
;
2566 if (shmem_parse_options(data
, &config
, true))
2569 spin_lock(&sbinfo
->stat_lock
);
2570 inodes
= sbinfo
->max_inodes
- sbinfo
->free_inodes
;
2571 if (percpu_counter_compare(&sbinfo
->used_blocks
, config
.max_blocks
) > 0)
2573 if (config
.max_inodes
< inodes
)
2576 * Those tests also disallow limited->unlimited while any are in
2577 * use, so i_blocks will always be zero when max_blocks is zero;
2578 * but we must separately disallow unlimited->limited, because
2579 * in that case we have no record of how much is already in use.
2581 if (config
.max_blocks
&& !sbinfo
->max_blocks
)
2583 if (config
.max_inodes
&& !sbinfo
->max_inodes
)
2587 sbinfo
->max_blocks
= config
.max_blocks
;
2588 sbinfo
->max_inodes
= config
.max_inodes
;
2589 sbinfo
->free_inodes
= config
.max_inodes
- inodes
;
2591 mpol_put(sbinfo
->mpol
);
2592 sbinfo
->mpol
= config
.mpol
; /* transfers initial ref */
2594 spin_unlock(&sbinfo
->stat_lock
);
2598 static int shmem_show_options(struct seq_file
*seq
, struct vfsmount
*vfs
)
2600 struct shmem_sb_info
*sbinfo
= SHMEM_SB(vfs
->mnt_sb
);
2602 if (sbinfo
->max_blocks
!= shmem_default_max_blocks())
2603 seq_printf(seq
, ",size=%luk",
2604 sbinfo
->max_blocks
<< (PAGE_CACHE_SHIFT
- 10));
2605 if (sbinfo
->max_inodes
!= shmem_default_max_inodes())
2606 seq_printf(seq
, ",nr_inodes=%lu", sbinfo
->max_inodes
);
2607 if (sbinfo
->mode
!= (S_IRWXUGO
| S_ISVTX
))
2608 seq_printf(seq
, ",mode=%03o", sbinfo
->mode
);
2609 if (sbinfo
->uid
!= 0)
2610 seq_printf(seq
, ",uid=%u", sbinfo
->uid
);
2611 if (sbinfo
->gid
!= 0)
2612 seq_printf(seq
, ",gid=%u", sbinfo
->gid
);
2613 shmem_show_mpol(seq
, sbinfo
->mpol
);
2616 #endif /* CONFIG_TMPFS */
2618 static void shmem_put_super(struct super_block
*sb
)
2620 struct shmem_sb_info
*sbinfo
= SHMEM_SB(sb
);
2622 percpu_counter_destroy(&sbinfo
->used_blocks
);
2624 sb
->s_fs_info
= NULL
;
2627 int shmem_fill_super(struct super_block
*sb
, void *data
, int silent
)
2629 struct inode
*inode
;
2630 struct dentry
*root
;
2631 struct shmem_sb_info
*sbinfo
;
2634 /* Round up to L1_CACHE_BYTES to resist false sharing */
2635 sbinfo
= kzalloc(max((int)sizeof(struct shmem_sb_info
),
2636 L1_CACHE_BYTES
), GFP_KERNEL
);
2640 sbinfo
->mode
= S_IRWXUGO
| S_ISVTX
;
2641 sbinfo
->uid
= current_fsuid();
2642 sbinfo
->gid
= current_fsgid();
2643 sb
->s_fs_info
= sbinfo
;
2647 * Per default we only allow half of the physical ram per
2648 * tmpfs instance, limiting inodes to one per page of lowmem;
2649 * but the internal instance is left unlimited.
2651 if (!(sb
->s_flags
& MS_NOUSER
)) {
2652 sbinfo
->max_blocks
= shmem_default_max_blocks();
2653 sbinfo
->max_inodes
= shmem_default_max_inodes();
2654 if (shmem_parse_options(data
, sbinfo
, false)) {
2659 sb
->s_export_op
= &shmem_export_ops
;
2661 sb
->s_flags
|= MS_NOUSER
;
2664 spin_lock_init(&sbinfo
->stat_lock
);
2665 if (percpu_counter_init(&sbinfo
->used_blocks
, 0))
2667 sbinfo
->free_inodes
= sbinfo
->max_inodes
;
2669 sb
->s_maxbytes
= SHMEM_MAX_BYTES
;
2670 sb
->s_blocksize
= PAGE_CACHE_SIZE
;
2671 sb
->s_blocksize_bits
= PAGE_CACHE_SHIFT
;
2672 sb
->s_magic
= TMPFS_MAGIC
;
2673 sb
->s_op
= &shmem_ops
;
2674 sb
->s_time_gran
= 1;
2675 #ifdef CONFIG_TMPFS_XATTR
2676 sb
->s_xattr
= shmem_xattr_handlers
;
2678 #ifdef CONFIG_TMPFS_POSIX_ACL
2679 sb
->s_flags
|= MS_POSIXACL
;
2682 inode
= shmem_get_inode(sb
, NULL
, S_IFDIR
| sbinfo
->mode
, 0, VM_NORESERVE
);
2685 inode
->i_uid
= sbinfo
->uid
;
2686 inode
->i_gid
= sbinfo
->gid
;
2687 root
= d_alloc_root(inode
);
2696 shmem_put_super(sb
);
2700 static struct kmem_cache
*shmem_inode_cachep
;
2702 static struct inode
*shmem_alloc_inode(struct super_block
*sb
)
2704 struct shmem_inode_info
*p
;
2705 p
= (struct shmem_inode_info
*)kmem_cache_alloc(shmem_inode_cachep
, GFP_KERNEL
);
2708 return &p
->vfs_inode
;
2711 static void shmem_i_callback(struct rcu_head
*head
)
2713 struct inode
*inode
= container_of(head
, struct inode
, i_rcu
);
2714 INIT_LIST_HEAD(&inode
->i_dentry
);
2715 kmem_cache_free(shmem_inode_cachep
, SHMEM_I(inode
));
2718 static void shmem_destroy_inode(struct inode
*inode
)
2720 if ((inode
->i_mode
& S_IFMT
) == S_IFREG
) {
2721 /* only struct inode is valid if it's an inline symlink */
2722 mpol_free_shared_policy(&SHMEM_I(inode
)->policy
);
2724 call_rcu(&inode
->i_rcu
, shmem_i_callback
);
2727 static void init_once(void *foo
)
2729 struct shmem_inode_info
*p
= (struct shmem_inode_info
*) foo
;
2731 inode_init_once(&p
->vfs_inode
);
2734 static int init_inodecache(void)
2736 shmem_inode_cachep
= kmem_cache_create("shmem_inode_cache",
2737 sizeof(struct shmem_inode_info
),
2738 0, SLAB_PANIC
, init_once
);
2742 static void destroy_inodecache(void)
2744 kmem_cache_destroy(shmem_inode_cachep
);
2747 static const struct address_space_operations shmem_aops
= {
2748 .writepage
= shmem_writepage
,
2749 .set_page_dirty
= __set_page_dirty_no_writeback
,
2751 .write_begin
= shmem_write_begin
,
2752 .write_end
= shmem_write_end
,
2754 .migratepage
= migrate_page
,
2755 .error_remove_page
= generic_error_remove_page
,
2758 static const struct file_operations shmem_file_operations
= {
2761 .llseek
= generic_file_llseek
,
2762 .read
= do_sync_read
,
2763 .write
= do_sync_write
,
2764 .aio_read
= shmem_file_aio_read
,
2765 .aio_write
= generic_file_aio_write
,
2766 .fsync
= noop_fsync
,
2767 .splice_read
= shmem_file_splice_read
,
2768 .splice_write
= generic_file_splice_write
,
2772 static const struct inode_operations shmem_inode_operations
= {
2773 .setattr
= shmem_setattr
,
2774 .truncate_range
= shmem_truncate_range
,
2775 #ifdef CONFIG_TMPFS_XATTR
2776 .setxattr
= shmem_setxattr
,
2777 .getxattr
= shmem_getxattr
,
2778 .listxattr
= shmem_listxattr
,
2779 .removexattr
= shmem_removexattr
,
2781 #ifdef CONFIG_TMPFS_POSIX_ACL
2782 .check_acl
= generic_check_acl
,
2787 static const struct inode_operations shmem_dir_inode_operations
= {
2789 .create
= shmem_create
,
2790 .lookup
= simple_lookup
,
2792 .unlink
= shmem_unlink
,
2793 .symlink
= shmem_symlink
,
2794 .mkdir
= shmem_mkdir
,
2795 .rmdir
= shmem_rmdir
,
2796 .mknod
= shmem_mknod
,
2797 .rename
= shmem_rename
,
2799 #ifdef CONFIG_TMPFS_XATTR
2800 .setxattr
= shmem_setxattr
,
2801 .getxattr
= shmem_getxattr
,
2802 .listxattr
= shmem_listxattr
,
2803 .removexattr
= shmem_removexattr
,
2805 #ifdef CONFIG_TMPFS_POSIX_ACL
2806 .setattr
= shmem_setattr
,
2807 .check_acl
= generic_check_acl
,
2811 static const struct inode_operations shmem_special_inode_operations
= {
2812 #ifdef CONFIG_TMPFS_XATTR
2813 .setxattr
= shmem_setxattr
,
2814 .getxattr
= shmem_getxattr
,
2815 .listxattr
= shmem_listxattr
,
2816 .removexattr
= shmem_removexattr
,
2818 #ifdef CONFIG_TMPFS_POSIX_ACL
2819 .setattr
= shmem_setattr
,
2820 .check_acl
= generic_check_acl
,
2824 static const struct super_operations shmem_ops
= {
2825 .alloc_inode
= shmem_alloc_inode
,
2826 .destroy_inode
= shmem_destroy_inode
,
2828 .statfs
= shmem_statfs
,
2829 .remount_fs
= shmem_remount_fs
,
2830 .show_options
= shmem_show_options
,
2832 .evict_inode
= shmem_evict_inode
,
2833 .drop_inode
= generic_delete_inode
,
2834 .put_super
= shmem_put_super
,
2837 static const struct vm_operations_struct shmem_vm_ops
= {
2838 .fault
= shmem_fault
,
2840 .set_policy
= shmem_set_policy
,
2841 .get_policy
= shmem_get_policy
,
2846 static struct dentry
*shmem_mount(struct file_system_type
*fs_type
,
2847 int flags
, const char *dev_name
, void *data
)
2849 return mount_nodev(fs_type
, flags
, data
, shmem_fill_super
);
2852 static struct file_system_type tmpfs_fs_type
= {
2853 .owner
= THIS_MODULE
,
2855 .mount
= shmem_mount
,
2856 .kill_sb
= kill_litter_super
,
2859 int __init
init_tmpfs(void)
2863 error
= bdi_init(&shmem_backing_dev_info
);
2867 error
= init_inodecache();
2871 error
= register_filesystem(&tmpfs_fs_type
);
2873 printk(KERN_ERR
"Could not register tmpfs\n");
2877 shm_mnt
= vfs_kern_mount(&tmpfs_fs_type
, MS_NOUSER
,
2878 tmpfs_fs_type
.name
, NULL
);
2879 if (IS_ERR(shm_mnt
)) {
2880 error
= PTR_ERR(shm_mnt
);
2881 printk(KERN_ERR
"Could not kern_mount tmpfs\n");
2887 unregister_filesystem(&tmpfs_fs_type
);
2889 destroy_inodecache();
2891 bdi_destroy(&shmem_backing_dev_info
);
2893 shm_mnt
= ERR_PTR(error
);
2897 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
2899 * mem_cgroup_get_shmem_target - find a page or entry assigned to the shmem file
2900 * @inode: the inode to be searched
2901 * @pgoff: the offset to be searched
2902 * @pagep: the pointer for the found page to be stored
2903 * @ent: the pointer for the found swap entry to be stored
2905 * If a page is found, refcount of it is incremented. Callers should handle
2908 void mem_cgroup_get_shmem_target(struct inode
*inode
, pgoff_t pgoff
,
2909 struct page
**pagep
, swp_entry_t
*ent
)
2911 swp_entry_t entry
= { .val
= 0 }, *ptr
;
2912 struct page
*page
= NULL
;
2913 struct shmem_inode_info
*info
= SHMEM_I(inode
);
2915 if ((pgoff
<< PAGE_CACHE_SHIFT
) >= i_size_read(inode
))
2918 spin_lock(&info
->lock
);
2919 ptr
= shmem_swp_entry(info
, pgoff
, NULL
);
2921 if (ptr
&& ptr
->val
) {
2922 entry
.val
= ptr
->val
;
2923 page
= find_get_page(&swapper_space
, entry
.val
);
2926 page
= find_get_page(inode
->i_mapping
, pgoff
);
2928 shmem_swp_unmap(ptr
);
2929 spin_unlock(&info
->lock
);
2936 #else /* !CONFIG_SHMEM */
2939 * tiny-shmem: simple shmemfs and tmpfs using ramfs code
2941 * This is intended for small system where the benefits of the full
2942 * shmem code (swap-backed and resource-limited) are outweighed by
2943 * their complexity. On systems without swap this code should be
2944 * effectively equivalent, but much lighter weight.
2947 #include <linux/ramfs.h>
2949 static struct file_system_type tmpfs_fs_type
= {
2951 .mount
= ramfs_mount
,
2952 .kill_sb
= kill_litter_super
,
2955 int __init
init_tmpfs(void)
2957 BUG_ON(register_filesystem(&tmpfs_fs_type
) != 0);
2959 shm_mnt
= kern_mount(&tmpfs_fs_type
);
2960 BUG_ON(IS_ERR(shm_mnt
));
2965 int shmem_unuse(swp_entry_t entry
, struct page
*page
)
2970 int shmem_lock(struct file
*file
, int lock
, struct user_struct
*user
)
2975 void shmem_truncate_range(struct inode
*inode
, loff_t start
, loff_t end
)
2977 truncate_inode_pages_range(inode
->i_mapping
, start
, end
);
2979 EXPORT_SYMBOL_GPL(shmem_truncate_range
);
2981 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
2983 * mem_cgroup_get_shmem_target - find a page or entry assigned to the shmem file
2984 * @inode: the inode to be searched
2985 * @pgoff: the offset to be searched
2986 * @pagep: the pointer for the found page to be stored
2987 * @ent: the pointer for the found swap entry to be stored
2989 * If a page is found, refcount of it is incremented. Callers should handle
2992 void mem_cgroup_get_shmem_target(struct inode
*inode
, pgoff_t pgoff
,
2993 struct page
**pagep
, swp_entry_t
*ent
)
2995 struct page
*page
= NULL
;
2997 if ((pgoff
<< PAGE_CACHE_SHIFT
) >= i_size_read(inode
))
2999 page
= find_get_page(inode
->i_mapping
, pgoff
);
3002 *ent
= (swp_entry_t
){ .val
= 0 };
3006 #define shmem_vm_ops generic_file_vm_ops
3007 #define shmem_file_operations ramfs_file_operations
3008 #define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev)
3009 #define shmem_acct_size(flags, size) 0
3010 #define shmem_unacct_size(flags, size) do {} while (0)
3011 #define SHMEM_MAX_BYTES MAX_LFS_FILESIZE
3013 #endif /* CONFIG_SHMEM */
3018 * shmem_file_setup - get an unlinked file living in tmpfs
3019 * @name: name for dentry (to be seen in /proc/<pid>/maps
3020 * @size: size to be set for the file
3021 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
3023 struct file
*shmem_file_setup(const char *name
, loff_t size
, unsigned long flags
)
3027 struct inode
*inode
;
3029 struct dentry
*root
;
3032 if (IS_ERR(shm_mnt
))
3033 return (void *)shm_mnt
;
3035 if (size
< 0 || size
> SHMEM_MAX_BYTES
)
3036 return ERR_PTR(-EINVAL
);
3038 if (shmem_acct_size(flags
, size
))
3039 return ERR_PTR(-ENOMEM
);
3043 this.len
= strlen(name
);
3044 this.hash
= 0; /* will go */
3045 root
= shm_mnt
->mnt_root
;
3046 path
.dentry
= d_alloc(root
, &this);
3049 path
.mnt
= mntget(shm_mnt
);
3052 inode
= shmem_get_inode(root
->d_sb
, NULL
, S_IFREG
| S_IRWXUGO
, 0, flags
);
3056 d_instantiate(path
.dentry
, inode
);
3057 inode
->i_size
= size
;
3058 inode
->i_nlink
= 0; /* It is unlinked */
3060 error
= ramfs_nommu_expand_for_mapping(inode
, size
);
3066 file
= alloc_file(&path
, FMODE_WRITE
| FMODE_READ
,
3067 &shmem_file_operations
);
3076 shmem_unacct_size(flags
, size
);
3077 return ERR_PTR(error
);
3079 EXPORT_SYMBOL_GPL(shmem_file_setup
);
3082 * shmem_zero_setup - setup a shared anonymous mapping
3083 * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
3085 int shmem_zero_setup(struct vm_area_struct
*vma
)
3088 loff_t size
= vma
->vm_end
- vma
->vm_start
;
3090 file
= shmem_file_setup("dev/zero", size
, vma
->vm_flags
);
3092 return PTR_ERR(file
);
3096 vma
->vm_file
= file
;
3097 vma
->vm_ops
= &shmem_vm_ops
;
3098 vma
->vm_flags
|= VM_CAN_NONLINEAR
;
3103 * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.
3104 * @mapping: the page's address_space
3105 * @index: the page index
3106 * @gfp: the page allocator flags to use if allocating
3108 * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
3109 * with any new page allocations done using the specified allocation flags.
3110 * But read_cache_page_gfp() uses the ->readpage() method: which does not
3111 * suit tmpfs, since it may have pages in swapcache, and needs to find those
3112 * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
3114 * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in
3115 * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily.
3117 struct page
*shmem_read_mapping_page_gfp(struct address_space
*mapping
,
3118 pgoff_t index
, gfp_t gfp
)
3121 struct inode
*inode
= mapping
->host
;
3125 BUG_ON(mapping
->a_ops
!= &shmem_aops
);
3126 error
= shmem_getpage_gfp(inode
, index
, &page
, SGP_CACHE
, gfp
, NULL
);
3128 page
= ERR_PTR(error
);
3134 * The tiny !SHMEM case uses ramfs without swap
3136 return read_cache_page_gfp(mapping
, index
, gfp
);
3139 EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp
);