2 * Resizable virtual memory filesystem for Linux.
4 * Copyright (C) 2000 Linus Torvalds.
6 * 2000-2001 Christoph Rohland
9 * Copyright (C) 2002-2005 Hugh Dickins.
10 * Copyright (C) 2002-2005 VERITAS Software Corporation.
11 * Copyright (C) 2004 Andi Kleen, SuSE Labs
13 * Extended attribute support for tmpfs:
14 * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
15 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
18 * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
20 * This file is released under the GPL.
24 #include <linux/init.h>
25 #include <linux/vfs.h>
26 #include <linux/mount.h>
27 #include <linux/pagemap.h>
28 #include <linux/file.h>
30 #include <linux/module.h>
31 #include <linux/swap.h>
33 static struct vfsmount
*shm_mnt
;
37 * This virtual memory filesystem is heavily based on the ramfs. It
38 * extends ramfs by the ability to use swap and honor resource limits
39 * which makes it a completely usable filesystem.
42 #include <linux/xattr.h>
43 #include <linux/exportfs.h>
44 #include <linux/generic_acl.h>
45 #include <linux/mman.h>
46 #include <linux/string.h>
47 #include <linux/slab.h>
48 #include <linux/backing-dev.h>
49 #include <linux/shmem_fs.h>
50 #include <linux/writeback.h>
51 #include <linux/blkdev.h>
52 #include <linux/security.h>
53 #include <linux/swapops.h>
54 #include <linux/mempolicy.h>
55 #include <linux/namei.h>
56 #include <linux/ctype.h>
57 #include <linux/migrate.h>
58 #include <linux/highmem.h>
59 #include <linux/seq_file.h>
60 #include <linux/magic.h>
62 #include <asm/uaccess.h>
63 #include <asm/div64.h>
64 #include <asm/pgtable.h>
67 * The maximum size of a shmem/tmpfs file is limited by the maximum size of
68 * its triple-indirect swap vector - see illustration at shmem_swp_entry().
70 * With 4kB page size, maximum file size is just over 2TB on a 32-bit kernel,
71 * but one eighth of that on a 64-bit kernel. With 8kB page size, maximum
72 * file size is just over 4TB on a 64-bit kernel, but 16TB on a 32-bit kernel,
73 * MAX_LFS_FILESIZE being then more restrictive than swap vector layout.
75 * We use / and * instead of shifts in the definitions below, so that the swap
76 * vector can be tested with small even values (e.g. 20) for ENTRIES_PER_PAGE.
78 #define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long))
79 #define ENTRIES_PER_PAGEPAGE ((unsigned long long)ENTRIES_PER_PAGE*ENTRIES_PER_PAGE)
81 #define SHMSWP_MAX_INDEX (SHMEM_NR_DIRECT + (ENTRIES_PER_PAGEPAGE/2) * (ENTRIES_PER_PAGE+1))
82 #define SHMSWP_MAX_BYTES (SHMSWP_MAX_INDEX << PAGE_CACHE_SHIFT)
84 #define SHMEM_MAX_BYTES min_t(unsigned long long, SHMSWP_MAX_BYTES, MAX_LFS_FILESIZE)
85 #define SHMEM_MAX_INDEX ((unsigned long)((SHMEM_MAX_BYTES+1) >> PAGE_CACHE_SHIFT))
87 #define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512)
88 #define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT)
90 /* info->flags needs VM_flags to handle pagein/truncate races efficiently */
91 #define SHMEM_PAGEIN VM_READ
92 #define SHMEM_TRUNCATE VM_WRITE
94 /* Definition to limit shmem_truncate's steps between cond_rescheds */
95 #define LATENCY_LIMIT 64
97 /* Pretend that each entry is of this size in directory's i_size */
98 #define BOGO_DIRENT_SIZE 20
100 /* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */
102 SGP_READ
, /* don't exceed i_size, don't allocate page */
103 SGP_CACHE
, /* don't exceed i_size, may allocate page */
104 SGP_DIRTY
, /* like SGP_CACHE, but set new page dirty */
105 SGP_WRITE
, /* may exceed i_size, may allocate page */
109 static unsigned long shmem_default_max_blocks(void)
111 return totalram_pages
/ 2;
114 static unsigned long shmem_default_max_inodes(void)
116 return min(totalram_pages
- totalhigh_pages
, totalram_pages
/ 2);
120 static int shmem_getpage(struct inode
*inode
, unsigned long idx
,
121 struct page
**pagep
, enum sgp_type sgp
, int *type
);
123 static inline struct page
*shmem_dir_alloc(gfp_t gfp_mask
)
126 * The above definition of ENTRIES_PER_PAGE, and the use of
127 * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE:
128 * might be reconsidered if it ever diverges from PAGE_SIZE.
130 * Mobility flags are masked out as swap vectors cannot move
132 return alloc_pages((gfp_mask
& ~GFP_MOVABLE_MASK
) | __GFP_ZERO
,
133 PAGE_CACHE_SHIFT
-PAGE_SHIFT
);
136 static inline void shmem_dir_free(struct page
*page
)
138 __free_pages(page
, PAGE_CACHE_SHIFT
-PAGE_SHIFT
);
141 static struct page
**shmem_dir_map(struct page
*page
)
143 return (struct page
**)kmap_atomic(page
, KM_USER0
);
146 static inline void shmem_dir_unmap(struct page
**dir
)
148 kunmap_atomic(dir
, KM_USER0
);
151 static swp_entry_t
*shmem_swp_map(struct page
*page
)
153 return (swp_entry_t
*)kmap_atomic(page
, KM_USER1
);
156 static inline void shmem_swp_balance_unmap(void)
159 * When passing a pointer to an i_direct entry, to code which
160 * also handles indirect entries and so will shmem_swp_unmap,
161 * we must arrange for the preempt count to remain in balance.
162 * What kmap_atomic of a lowmem page does depends on config
163 * and architecture, so pretend to kmap_atomic some lowmem page.
165 (void) kmap_atomic(ZERO_PAGE(0), KM_USER1
);
168 static inline void shmem_swp_unmap(swp_entry_t
*entry
)
170 kunmap_atomic(entry
, KM_USER1
);
173 static inline struct shmem_sb_info
*SHMEM_SB(struct super_block
*sb
)
175 return sb
->s_fs_info
;
179 * shmem_file_setup pre-accounts the whole fixed size of a VM object,
180 * for shared memory and for shared anonymous (/dev/zero) mappings
181 * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
182 * consistent with the pre-accounting of private mappings ...
184 static inline int shmem_acct_size(unsigned long flags
, loff_t size
)
186 return (flags
& VM_NORESERVE
) ?
187 0 : security_vm_enough_memory_kern(VM_ACCT(size
));
190 static inline void shmem_unacct_size(unsigned long flags
, loff_t size
)
192 if (!(flags
& VM_NORESERVE
))
193 vm_unacct_memory(VM_ACCT(size
));
197 * ... whereas tmpfs objects are accounted incrementally as
198 * pages are allocated, in order to allow huge sparse files.
199 * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
200 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
202 static inline int shmem_acct_block(unsigned long flags
)
204 return (flags
& VM_NORESERVE
) ?
205 security_vm_enough_memory_kern(VM_ACCT(PAGE_CACHE_SIZE
)) : 0;
208 static inline void shmem_unacct_blocks(unsigned long flags
, long pages
)
210 if (flags
& VM_NORESERVE
)
211 vm_unacct_memory(pages
* VM_ACCT(PAGE_CACHE_SIZE
));
214 static const struct super_operations shmem_ops
;
215 static const struct address_space_operations shmem_aops
;
216 static const struct file_operations shmem_file_operations
;
217 static const struct inode_operations shmem_inode_operations
;
218 static const struct inode_operations shmem_dir_inode_operations
;
219 static const struct inode_operations shmem_special_inode_operations
;
220 static const struct vm_operations_struct shmem_vm_ops
;
222 static struct backing_dev_info shmem_backing_dev_info __read_mostly
= {
223 .ra_pages
= 0, /* No readahead */
224 .capabilities
= BDI_CAP_NO_ACCT_AND_WRITEBACK
| BDI_CAP_SWAP_BACKED
,
225 .unplug_io_fn
= default_unplug_io_fn
,
228 static LIST_HEAD(shmem_swaplist
);
229 static DEFINE_MUTEX(shmem_swaplist_mutex
);
231 static void shmem_free_blocks(struct inode
*inode
, long pages
)
233 struct shmem_sb_info
*sbinfo
= SHMEM_SB(inode
->i_sb
);
234 if (sbinfo
->max_blocks
) {
235 spin_lock(&sbinfo
->stat_lock
);
236 sbinfo
->free_blocks
+= pages
;
237 inode
->i_blocks
-= pages
*BLOCKS_PER_PAGE
;
238 spin_unlock(&sbinfo
->stat_lock
);
242 static int shmem_reserve_inode(struct super_block
*sb
)
244 struct shmem_sb_info
*sbinfo
= SHMEM_SB(sb
);
245 if (sbinfo
->max_inodes
) {
246 spin_lock(&sbinfo
->stat_lock
);
247 if (!sbinfo
->free_inodes
) {
248 spin_unlock(&sbinfo
->stat_lock
);
251 sbinfo
->free_inodes
--;
252 spin_unlock(&sbinfo
->stat_lock
);
257 static void shmem_free_inode(struct super_block
*sb
)
259 struct shmem_sb_info
*sbinfo
= SHMEM_SB(sb
);
260 if (sbinfo
->max_inodes
) {
261 spin_lock(&sbinfo
->stat_lock
);
262 sbinfo
->free_inodes
++;
263 spin_unlock(&sbinfo
->stat_lock
);
268 * shmem_recalc_inode - recalculate the size of an inode
269 * @inode: inode to recalc
271 * We have to calculate the free blocks since the mm can drop
272 * undirtied hole pages behind our back.
274 * But normally info->alloced == inode->i_mapping->nrpages + info->swapped
275 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
277 * It has to be called with the spinlock held.
279 static void shmem_recalc_inode(struct inode
*inode
)
281 struct shmem_inode_info
*info
= SHMEM_I(inode
);
284 freed
= info
->alloced
- info
->swapped
- inode
->i_mapping
->nrpages
;
286 info
->alloced
-= freed
;
287 shmem_unacct_blocks(info
->flags
, freed
);
288 shmem_free_blocks(inode
, freed
);
293 * shmem_swp_entry - find the swap vector position in the info structure
294 * @info: info structure for the inode
295 * @index: index of the page to find
296 * @page: optional page to add to the structure. Has to be preset to
299 * If there is no space allocated yet it will return NULL when
300 * page is NULL, else it will use the page for the needed block,
301 * setting it to NULL on return to indicate that it has been used.
303 * The swap vector is organized the following way:
305 * There are SHMEM_NR_DIRECT entries directly stored in the
306 * shmem_inode_info structure. So small files do not need an addional
309 * For pages with index > SHMEM_NR_DIRECT there is the pointer
310 * i_indirect which points to a page which holds in the first half
311 * doubly indirect blocks, in the second half triple indirect blocks:
313 * For an artificial ENTRIES_PER_PAGE = 4 this would lead to the
314 * following layout (for SHMEM_NR_DIRECT == 16):
316 * i_indirect -> dir --> 16-19
329 static swp_entry_t
*shmem_swp_entry(struct shmem_inode_info
*info
, unsigned long index
, struct page
**page
)
331 unsigned long offset
;
335 if (index
< SHMEM_NR_DIRECT
) {
336 shmem_swp_balance_unmap();
337 return info
->i_direct
+index
;
339 if (!info
->i_indirect
) {
341 info
->i_indirect
= *page
;
344 return NULL
; /* need another page */
347 index
-= SHMEM_NR_DIRECT
;
348 offset
= index
% ENTRIES_PER_PAGE
;
349 index
/= ENTRIES_PER_PAGE
;
350 dir
= shmem_dir_map(info
->i_indirect
);
352 if (index
>= ENTRIES_PER_PAGE
/2) {
353 index
-= ENTRIES_PER_PAGE
/2;
354 dir
+= ENTRIES_PER_PAGE
/2 + index
/ENTRIES_PER_PAGE
;
355 index
%= ENTRIES_PER_PAGE
;
362 shmem_dir_unmap(dir
);
363 return NULL
; /* need another page */
365 shmem_dir_unmap(dir
);
366 dir
= shmem_dir_map(subdir
);
372 if (!page
|| !(subdir
= *page
)) {
373 shmem_dir_unmap(dir
);
374 return NULL
; /* need a page */
379 shmem_dir_unmap(dir
);
380 return shmem_swp_map(subdir
) + offset
;
383 static void shmem_swp_set(struct shmem_inode_info
*info
, swp_entry_t
*entry
, unsigned long value
)
385 long incdec
= value
? 1: -1;
388 info
->swapped
+= incdec
;
389 if ((unsigned long)(entry
- info
->i_direct
) >= SHMEM_NR_DIRECT
) {
390 struct page
*page
= kmap_atomic_to_page(entry
);
391 set_page_private(page
, page_private(page
) + incdec
);
396 * shmem_swp_alloc - get the position of the swap entry for the page.
397 * @info: info structure for the inode
398 * @index: index of the page to find
399 * @sgp: check and recheck i_size? skip allocation?
401 * If the entry does not exist, allocate it.
403 static swp_entry_t
*shmem_swp_alloc(struct shmem_inode_info
*info
, unsigned long index
, enum sgp_type sgp
)
405 struct inode
*inode
= &info
->vfs_inode
;
406 struct shmem_sb_info
*sbinfo
= SHMEM_SB(inode
->i_sb
);
407 struct page
*page
= NULL
;
410 if (sgp
!= SGP_WRITE
&&
411 ((loff_t
) index
<< PAGE_CACHE_SHIFT
) >= i_size_read(inode
))
412 return ERR_PTR(-EINVAL
);
414 while (!(entry
= shmem_swp_entry(info
, index
, &page
))) {
416 return shmem_swp_map(ZERO_PAGE(0));
418 * Test free_blocks against 1 not 0, since we have 1 data
419 * page (and perhaps indirect index pages) yet to allocate:
420 * a waste to allocate index if we cannot allocate data.
422 if (sbinfo
->max_blocks
) {
423 spin_lock(&sbinfo
->stat_lock
);
424 if (sbinfo
->free_blocks
<= 1) {
425 spin_unlock(&sbinfo
->stat_lock
);
426 return ERR_PTR(-ENOSPC
);
428 sbinfo
->free_blocks
--;
429 inode
->i_blocks
+= BLOCKS_PER_PAGE
;
430 spin_unlock(&sbinfo
->stat_lock
);
433 spin_unlock(&info
->lock
);
434 page
= shmem_dir_alloc(mapping_gfp_mask(inode
->i_mapping
));
436 set_page_private(page
, 0);
437 spin_lock(&info
->lock
);
440 shmem_free_blocks(inode
, 1);
441 return ERR_PTR(-ENOMEM
);
443 if (sgp
!= SGP_WRITE
&&
444 ((loff_t
) index
<< PAGE_CACHE_SHIFT
) >= i_size_read(inode
)) {
445 entry
= ERR_PTR(-EINVAL
);
448 if (info
->next_index
<= index
)
449 info
->next_index
= index
+ 1;
452 /* another task gave its page, or truncated the file */
453 shmem_free_blocks(inode
, 1);
454 shmem_dir_free(page
);
456 if (info
->next_index
<= index
&& !IS_ERR(entry
))
457 info
->next_index
= index
+ 1;
462 * shmem_free_swp - free some swap entries in a directory
463 * @dir: pointer to the directory
464 * @edir: pointer after last entry of the directory
465 * @punch_lock: pointer to spinlock when needed for the holepunch case
467 static int shmem_free_swp(swp_entry_t
*dir
, swp_entry_t
*edir
,
468 spinlock_t
*punch_lock
)
470 spinlock_t
*punch_unlock
= NULL
;
474 for (ptr
= dir
; ptr
< edir
; ptr
++) {
476 if (unlikely(punch_lock
)) {
477 punch_unlock
= punch_lock
;
479 spin_lock(punch_unlock
);
483 free_swap_and_cache(*ptr
);
484 *ptr
= (swp_entry_t
){0};
489 spin_unlock(punch_unlock
);
493 static int shmem_map_and_free_swp(struct page
*subdir
, int offset
,
494 int limit
, struct page
***dir
, spinlock_t
*punch_lock
)
499 ptr
= shmem_swp_map(subdir
);
500 for (; offset
< limit
; offset
+= LATENCY_LIMIT
) {
501 int size
= limit
- offset
;
502 if (size
> LATENCY_LIMIT
)
503 size
= LATENCY_LIMIT
;
504 freed
+= shmem_free_swp(ptr
+offset
, ptr
+offset
+size
,
506 if (need_resched()) {
507 shmem_swp_unmap(ptr
);
509 shmem_dir_unmap(*dir
);
513 ptr
= shmem_swp_map(subdir
);
516 shmem_swp_unmap(ptr
);
520 static void shmem_free_pages(struct list_head
*next
)
526 page
= container_of(next
, struct page
, lru
);
528 shmem_dir_free(page
);
530 if (freed
>= LATENCY_LIMIT
) {
537 static void shmem_truncate_range(struct inode
*inode
, loff_t start
, loff_t end
)
539 struct shmem_inode_info
*info
= SHMEM_I(inode
);
544 unsigned long diroff
;
550 LIST_HEAD(pages_to_free
);
551 long nr_pages_to_free
= 0;
552 long nr_swaps_freed
= 0;
556 spinlock_t
*needs_lock
;
557 spinlock_t
*punch_lock
;
558 unsigned long upper_limit
;
560 inode
->i_ctime
= inode
->i_mtime
= CURRENT_TIME
;
561 idx
= (start
+ PAGE_CACHE_SIZE
- 1) >> PAGE_CACHE_SHIFT
;
562 if (idx
>= info
->next_index
)
565 spin_lock(&info
->lock
);
566 info
->flags
|= SHMEM_TRUNCATE
;
567 if (likely(end
== (loff_t
) -1)) {
568 limit
= info
->next_index
;
569 upper_limit
= SHMEM_MAX_INDEX
;
570 info
->next_index
= idx
;
574 if (end
+ 1 >= inode
->i_size
) { /* we may free a little more */
575 limit
= (inode
->i_size
+ PAGE_CACHE_SIZE
- 1) >>
577 upper_limit
= SHMEM_MAX_INDEX
;
579 limit
= (end
+ 1) >> PAGE_CACHE_SHIFT
;
582 needs_lock
= &info
->lock
;
586 topdir
= info
->i_indirect
;
587 if (topdir
&& idx
<= SHMEM_NR_DIRECT
&& !punch_hole
) {
588 info
->i_indirect
= NULL
;
590 list_add(&topdir
->lru
, &pages_to_free
);
592 spin_unlock(&info
->lock
);
594 if (info
->swapped
&& idx
< SHMEM_NR_DIRECT
) {
595 ptr
= info
->i_direct
;
597 if (size
> SHMEM_NR_DIRECT
)
598 size
= SHMEM_NR_DIRECT
;
599 nr_swaps_freed
= shmem_free_swp(ptr
+idx
, ptr
+size
, needs_lock
);
603 * If there are no indirect blocks or we are punching a hole
604 * below indirect blocks, nothing to be done.
606 if (!topdir
|| limit
<= SHMEM_NR_DIRECT
)
610 * The truncation case has already dropped info->lock, and we're safe
611 * because i_size and next_index have already been lowered, preventing
612 * access beyond. But in the punch_hole case, we still need to take
613 * the lock when updating the swap directory, because there might be
614 * racing accesses by shmem_getpage(SGP_CACHE), shmem_unuse_inode or
615 * shmem_writepage. However, whenever we find we can remove a whole
616 * directory page (not at the misaligned start or end of the range),
617 * we first NULLify its pointer in the level above, and then have no
618 * need to take the lock when updating its contents: needs_lock and
619 * punch_lock (either pointing to info->lock or NULL) manage this.
622 upper_limit
-= SHMEM_NR_DIRECT
;
623 limit
-= SHMEM_NR_DIRECT
;
624 idx
= (idx
> SHMEM_NR_DIRECT
)? (idx
- SHMEM_NR_DIRECT
): 0;
625 offset
= idx
% ENTRIES_PER_PAGE
;
628 dir
= shmem_dir_map(topdir
);
629 stage
= ENTRIES_PER_PAGEPAGE
/2;
630 if (idx
< ENTRIES_PER_PAGEPAGE
/2) {
632 diroff
= idx
/ENTRIES_PER_PAGE
;
634 dir
+= ENTRIES_PER_PAGE
/2;
635 dir
+= (idx
- ENTRIES_PER_PAGEPAGE
/2)/ENTRIES_PER_PAGEPAGE
;
637 stage
+= ENTRIES_PER_PAGEPAGE
;
640 diroff
= ((idx
- ENTRIES_PER_PAGEPAGE
/2) %
641 ENTRIES_PER_PAGEPAGE
) / ENTRIES_PER_PAGE
;
642 if (!diroff
&& !offset
&& upper_limit
>= stage
) {
644 spin_lock(needs_lock
);
646 spin_unlock(needs_lock
);
651 list_add(&middir
->lru
, &pages_to_free
);
653 shmem_dir_unmap(dir
);
654 dir
= shmem_dir_map(middir
);
662 for (; idx
< limit
; idx
+= ENTRIES_PER_PAGE
, diroff
++) {
663 if (unlikely(idx
== stage
)) {
664 shmem_dir_unmap(dir
);
665 dir
= shmem_dir_map(topdir
) +
666 ENTRIES_PER_PAGE
/2 + idx
/ENTRIES_PER_PAGEPAGE
;
669 idx
+= ENTRIES_PER_PAGEPAGE
;
673 stage
= idx
+ ENTRIES_PER_PAGEPAGE
;
676 needs_lock
= &info
->lock
;
677 if (upper_limit
>= stage
) {
679 spin_lock(needs_lock
);
681 spin_unlock(needs_lock
);
686 list_add(&middir
->lru
, &pages_to_free
);
688 shmem_dir_unmap(dir
);
690 dir
= shmem_dir_map(middir
);
693 punch_lock
= needs_lock
;
694 subdir
= dir
[diroff
];
695 if (subdir
&& !offset
&& upper_limit
-idx
>= ENTRIES_PER_PAGE
) {
697 spin_lock(needs_lock
);
699 spin_unlock(needs_lock
);
704 list_add(&subdir
->lru
, &pages_to_free
);
706 if (subdir
&& page_private(subdir
) /* has swap entries */) {
708 if (size
> ENTRIES_PER_PAGE
)
709 size
= ENTRIES_PER_PAGE
;
710 freed
= shmem_map_and_free_swp(subdir
,
711 offset
, size
, &dir
, punch_lock
);
713 dir
= shmem_dir_map(middir
);
714 nr_swaps_freed
+= freed
;
715 if (offset
|| punch_lock
) {
716 spin_lock(&info
->lock
);
717 set_page_private(subdir
,
718 page_private(subdir
) - freed
);
719 spin_unlock(&info
->lock
);
721 BUG_ON(page_private(subdir
) != freed
);
726 shmem_dir_unmap(dir
);
728 if (inode
->i_mapping
->nrpages
&& (info
->flags
& SHMEM_PAGEIN
)) {
730 * Call truncate_inode_pages again: racing shmem_unuse_inode
731 * may have swizzled a page in from swap since vmtruncate or
732 * generic_delete_inode did it, before we lowered next_index.
733 * Also, though shmem_getpage checks i_size before adding to
734 * cache, no recheck after: so fix the narrow window there too.
736 * Recalling truncate_inode_pages_range and unmap_mapping_range
737 * every time for punch_hole (which never got a chance to clear
738 * SHMEM_PAGEIN at the start of vmtruncate_range) is expensive,
739 * yet hardly ever necessary: try to optimize them out later.
741 truncate_inode_pages_range(inode
->i_mapping
, start
, end
);
743 unmap_mapping_range(inode
->i_mapping
, start
,
747 spin_lock(&info
->lock
);
748 info
->flags
&= ~SHMEM_TRUNCATE
;
749 info
->swapped
-= nr_swaps_freed
;
750 if (nr_pages_to_free
)
751 shmem_free_blocks(inode
, nr_pages_to_free
);
752 shmem_recalc_inode(inode
);
753 spin_unlock(&info
->lock
);
756 * Empty swap vector directory pages to be freed?
758 if (!list_empty(&pages_to_free
)) {
759 pages_to_free
.prev
->next
= NULL
;
760 shmem_free_pages(pages_to_free
.next
);
764 static void shmem_truncate(struct inode
*inode
)
766 shmem_truncate_range(inode
, inode
->i_size
, (loff_t
)-1);
769 static int shmem_notify_change(struct dentry
*dentry
, struct iattr
*attr
)
771 struct inode
*inode
= dentry
->d_inode
;
772 struct page
*page
= NULL
;
775 if (S_ISREG(inode
->i_mode
) && (attr
->ia_valid
& ATTR_SIZE
)) {
776 if (attr
->ia_size
< inode
->i_size
) {
778 * If truncating down to a partial page, then
779 * if that page is already allocated, hold it
780 * in memory until the truncation is over, so
781 * truncate_partial_page cannnot miss it were
782 * it assigned to swap.
784 if (attr
->ia_size
& (PAGE_CACHE_SIZE
-1)) {
785 (void) shmem_getpage(inode
,
786 attr
->ia_size
>>PAGE_CACHE_SHIFT
,
787 &page
, SGP_READ
, NULL
);
792 * Reset SHMEM_PAGEIN flag so that shmem_truncate can
793 * detect if any pages might have been added to cache
794 * after truncate_inode_pages. But we needn't bother
795 * if it's being fully truncated to zero-length: the
796 * nrpages check is efficient enough in that case.
799 struct shmem_inode_info
*info
= SHMEM_I(inode
);
800 spin_lock(&info
->lock
);
801 info
->flags
&= ~SHMEM_PAGEIN
;
802 spin_unlock(&info
->lock
);
807 error
= inode_change_ok(inode
, attr
);
809 error
= inode_setattr(inode
, attr
);
810 #ifdef CONFIG_TMPFS_POSIX_ACL
811 if (!error
&& (attr
->ia_valid
& ATTR_MODE
))
812 error
= generic_acl_chmod(inode
, &shmem_acl_ops
);
815 page_cache_release(page
);
819 static void shmem_delete_inode(struct inode
*inode
)
821 struct shmem_inode_info
*info
= SHMEM_I(inode
);
823 if (inode
->i_op
->truncate
== shmem_truncate
) {
824 truncate_inode_pages(inode
->i_mapping
, 0);
825 shmem_unacct_size(info
->flags
, inode
->i_size
);
827 shmem_truncate(inode
);
828 if (!list_empty(&info
->swaplist
)) {
829 mutex_lock(&shmem_swaplist_mutex
);
830 list_del_init(&info
->swaplist
);
831 mutex_unlock(&shmem_swaplist_mutex
);
834 BUG_ON(inode
->i_blocks
);
835 shmem_free_inode(inode
->i_sb
);
839 static inline int shmem_find_swp(swp_entry_t entry
, swp_entry_t
*dir
, swp_entry_t
*edir
)
843 for (ptr
= dir
; ptr
< edir
; ptr
++) {
844 if (ptr
->val
== entry
.val
)
850 static int shmem_unuse_inode(struct shmem_inode_info
*info
, swp_entry_t entry
, struct page
*page
)
864 ptr
= info
->i_direct
;
865 spin_lock(&info
->lock
);
866 if (!info
->swapped
) {
867 list_del_init(&info
->swaplist
);
870 limit
= info
->next_index
;
872 if (size
> SHMEM_NR_DIRECT
)
873 size
= SHMEM_NR_DIRECT
;
874 offset
= shmem_find_swp(entry
, ptr
, ptr
+size
);
877 if (!info
->i_indirect
)
880 dir
= shmem_dir_map(info
->i_indirect
);
881 stage
= SHMEM_NR_DIRECT
+ ENTRIES_PER_PAGEPAGE
/2;
883 for (idx
= SHMEM_NR_DIRECT
; idx
< limit
; idx
+= ENTRIES_PER_PAGE
, dir
++) {
884 if (unlikely(idx
== stage
)) {
885 shmem_dir_unmap(dir
-1);
886 if (cond_resched_lock(&info
->lock
)) {
887 /* check it has not been truncated */
888 if (limit
> info
->next_index
) {
889 limit
= info
->next_index
;
894 dir
= shmem_dir_map(info
->i_indirect
) +
895 ENTRIES_PER_PAGE
/2 + idx
/ENTRIES_PER_PAGEPAGE
;
898 idx
+= ENTRIES_PER_PAGEPAGE
;
902 stage
= idx
+ ENTRIES_PER_PAGEPAGE
;
904 shmem_dir_unmap(dir
);
905 dir
= shmem_dir_map(subdir
);
908 if (subdir
&& page_private(subdir
)) {
909 ptr
= shmem_swp_map(subdir
);
911 if (size
> ENTRIES_PER_PAGE
)
912 size
= ENTRIES_PER_PAGE
;
913 offset
= shmem_find_swp(entry
, ptr
, ptr
+size
);
914 shmem_swp_unmap(ptr
);
916 shmem_dir_unmap(dir
);
922 shmem_dir_unmap(dir
-1);
924 spin_unlock(&info
->lock
);
928 inode
= igrab(&info
->vfs_inode
);
929 spin_unlock(&info
->lock
);
932 * Move _head_ to start search for next from here.
933 * But be careful: shmem_delete_inode checks list_empty without taking
934 * mutex, and there's an instant in list_move_tail when info->swaplist
935 * would appear empty, if it were the only one on shmem_swaplist. We
936 * could avoid doing it if inode NULL; or use this minor optimization.
938 if (shmem_swaplist
.next
!= &info
->swaplist
)
939 list_move_tail(&shmem_swaplist
, &info
->swaplist
);
940 mutex_unlock(&shmem_swaplist_mutex
);
946 * Charge page using GFP_KERNEL while we can wait.
947 * Charged back to the user(not to caller) when swap account is used.
948 * add_to_page_cache() will be called with GFP_NOWAIT.
950 error
= mem_cgroup_cache_charge(page
, current
->mm
, GFP_KERNEL
);
953 error
= radix_tree_preload(GFP_KERNEL
);
955 mem_cgroup_uncharge_cache_page(page
);
960 spin_lock(&info
->lock
);
961 ptr
= shmem_swp_entry(info
, idx
, NULL
);
962 if (ptr
&& ptr
->val
== entry
.val
) {
963 error
= add_to_page_cache_locked(page
, inode
->i_mapping
,
965 /* does mem_cgroup_uncharge_cache_page on error */
966 } else /* we must compensate for our precharge above */
967 mem_cgroup_uncharge_cache_page(page
);
969 if (error
== -EEXIST
) {
970 struct page
*filepage
= find_get_page(inode
->i_mapping
, idx
);
974 * There might be a more uptodate page coming down
975 * from a stacked writepage: forget our swappage if so.
977 if (PageUptodate(filepage
))
979 page_cache_release(filepage
);
983 delete_from_swap_cache(page
);
984 set_page_dirty(page
);
985 info
->flags
|= SHMEM_PAGEIN
;
986 shmem_swp_set(info
, ptr
, 0);
988 error
= 1; /* not an error, but entry was found */
991 shmem_swp_unmap(ptr
);
992 spin_unlock(&info
->lock
);
993 radix_tree_preload_end();
996 page_cache_release(page
);
997 iput(inode
); /* allows for NULL */
1002 * shmem_unuse() search for an eventually swapped out shmem page.
1004 int shmem_unuse(swp_entry_t entry
, struct page
*page
)
1006 struct list_head
*p
, *next
;
1007 struct shmem_inode_info
*info
;
1010 mutex_lock(&shmem_swaplist_mutex
);
1011 list_for_each_safe(p
, next
, &shmem_swaplist
) {
1012 info
= list_entry(p
, struct shmem_inode_info
, swaplist
);
1013 found
= shmem_unuse_inode(info
, entry
, page
);
1018 mutex_unlock(&shmem_swaplist_mutex
);
1020 * Can some race bring us here? We've been holding page lock,
1021 * so I think not; but would rather try again later than BUG()
1024 page_cache_release(page
);
1026 return (found
< 0) ? found
: 0;
1030 * Move the page from the page cache to the swap cache.
1032 static int shmem_writepage(struct page
*page
, struct writeback_control
*wbc
)
1034 struct shmem_inode_info
*info
;
1035 swp_entry_t
*entry
, swap
;
1036 struct address_space
*mapping
;
1037 unsigned long index
;
1038 struct inode
*inode
;
1040 BUG_ON(!PageLocked(page
));
1041 mapping
= page
->mapping
;
1042 index
= page
->index
;
1043 inode
= mapping
->host
;
1044 info
= SHMEM_I(inode
);
1045 if (info
->flags
& VM_LOCKED
)
1047 if (!total_swap_pages
)
1051 * shmem_backing_dev_info's capabilities prevent regular writeback or
1052 * sync from ever calling shmem_writepage; but a stacking filesystem
1053 * may use the ->writepage of its underlying filesystem, in which case
1054 * tmpfs should write out to swap only in response to memory pressure,
1055 * and not for the writeback threads or sync. However, in those cases,
1056 * we do still want to check if there's a redundant swappage to be
1059 if (wbc
->for_reclaim
)
1060 swap
= get_swap_page();
1064 spin_lock(&info
->lock
);
1065 if (index
>= info
->next_index
) {
1066 BUG_ON(!(info
->flags
& SHMEM_TRUNCATE
));
1069 entry
= shmem_swp_entry(info
, index
, NULL
);
1072 * The more uptodate page coming down from a stacked
1073 * writepage should replace our old swappage.
1075 free_swap_and_cache(*entry
);
1076 shmem_swp_set(info
, entry
, 0);
1078 shmem_recalc_inode(inode
);
1080 if (swap
.val
&& add_to_swap_cache(page
, swap
, GFP_ATOMIC
) == 0) {
1081 remove_from_page_cache(page
);
1082 shmem_swp_set(info
, entry
, swap
.val
);
1083 shmem_swp_unmap(entry
);
1084 if (list_empty(&info
->swaplist
))
1085 inode
= igrab(inode
);
1088 spin_unlock(&info
->lock
);
1089 swap_shmem_alloc(swap
);
1090 BUG_ON(page_mapped(page
));
1091 page_cache_release(page
); /* pagecache ref */
1092 swap_writepage(page
, wbc
);
1094 mutex_lock(&shmem_swaplist_mutex
);
1095 /* move instead of add in case we're racing */
1096 list_move_tail(&info
->swaplist
, &shmem_swaplist
);
1097 mutex_unlock(&shmem_swaplist_mutex
);
1103 shmem_swp_unmap(entry
);
1105 spin_unlock(&info
->lock
);
1107 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
1108 * clear SWAP_HAS_CACHE flag.
1110 swapcache_free(swap
, NULL
);
1112 set_page_dirty(page
);
1113 if (wbc
->for_reclaim
)
1114 return AOP_WRITEPAGE_ACTIVATE
; /* Return with page locked */
1121 static void shmem_show_mpol(struct seq_file
*seq
, struct mempolicy
*mpol
)
1125 if (!mpol
|| mpol
->mode
== MPOL_DEFAULT
)
1126 return; /* show nothing */
1128 mpol_to_str(buffer
, sizeof(buffer
), mpol
, 1);
1130 seq_printf(seq
, ",mpol=%s", buffer
);
1133 static struct mempolicy
*shmem_get_sbmpol(struct shmem_sb_info
*sbinfo
)
1135 struct mempolicy
*mpol
= NULL
;
1137 spin_lock(&sbinfo
->stat_lock
); /* prevent replace/use races */
1138 mpol
= sbinfo
->mpol
;
1140 spin_unlock(&sbinfo
->stat_lock
);
1144 #endif /* CONFIG_TMPFS */
1146 static struct page
*shmem_swapin(swp_entry_t entry
, gfp_t gfp
,
1147 struct shmem_inode_info
*info
, unsigned long idx
)
1149 struct mempolicy mpol
, *spol
;
1150 struct vm_area_struct pvma
;
1153 spol
= mpol_cond_copy(&mpol
,
1154 mpol_shared_policy_lookup(&info
->policy
, idx
));
1156 /* Create a pseudo vma that just contains the policy */
1158 pvma
.vm_pgoff
= idx
;
1160 pvma
.vm_policy
= spol
;
1161 page
= swapin_readahead(entry
, gfp
, &pvma
, 0);
1165 static struct page
*shmem_alloc_page(gfp_t gfp
,
1166 struct shmem_inode_info
*info
, unsigned long idx
)
1168 struct vm_area_struct pvma
;
1170 /* Create a pseudo vma that just contains the policy */
1172 pvma
.vm_pgoff
= idx
;
1174 pvma
.vm_policy
= mpol_shared_policy_lookup(&info
->policy
, idx
);
1177 * alloc_page_vma() will drop the shared policy reference
1179 return alloc_page_vma(gfp
, &pvma
, 0);
1181 #else /* !CONFIG_NUMA */
1183 static inline void shmem_show_mpol(struct seq_file
*seq
, struct mempolicy
*p
)
1186 #endif /* CONFIG_TMPFS */
1188 static inline struct page
*shmem_swapin(swp_entry_t entry
, gfp_t gfp
,
1189 struct shmem_inode_info
*info
, unsigned long idx
)
1191 return swapin_readahead(entry
, gfp
, NULL
, 0);
1194 static inline struct page
*shmem_alloc_page(gfp_t gfp
,
1195 struct shmem_inode_info
*info
, unsigned long idx
)
1197 return alloc_page(gfp
);
1199 #endif /* CONFIG_NUMA */
1201 #if !defined(CONFIG_NUMA) || !defined(CONFIG_TMPFS)
1202 static inline struct mempolicy
*shmem_get_sbmpol(struct shmem_sb_info
*sbinfo
)
1209 * shmem_getpage - either get the page from swap or allocate a new one
1211 * If we allocate a new one we do not mark it dirty. That's up to the
1212 * vm. If we swap it in we mark it dirty since we also free the swap
1213 * entry since a page cannot live in both the swap and page cache
1215 static int shmem_getpage(struct inode
*inode
, unsigned long idx
,
1216 struct page
**pagep
, enum sgp_type sgp
, int *type
)
1218 struct address_space
*mapping
= inode
->i_mapping
;
1219 struct shmem_inode_info
*info
= SHMEM_I(inode
);
1220 struct shmem_sb_info
*sbinfo
;
1221 struct page
*filepage
= *pagep
;
1222 struct page
*swappage
;
1228 if (idx
>= SHMEM_MAX_INDEX
)
1235 * Normally, filepage is NULL on entry, and either found
1236 * uptodate immediately, or allocated and zeroed, or read
1237 * in under swappage, which is then assigned to filepage.
1238 * But shmem_readpage (required for splice) passes in a locked
1239 * filepage, which may be found not uptodate by other callers
1240 * too, and may need to be copied from the swappage read in.
1244 filepage
= find_lock_page(mapping
, idx
);
1245 if (filepage
&& PageUptodate(filepage
))
1248 gfp
= mapping_gfp_mask(mapping
);
1251 * Try to preload while we can wait, to not make a habit of
1252 * draining atomic reserves; but don't latch on to this cpu.
1254 error
= radix_tree_preload(gfp
& ~__GFP_HIGHMEM
);
1257 radix_tree_preload_end();
1260 spin_lock(&info
->lock
);
1261 shmem_recalc_inode(inode
);
1262 entry
= shmem_swp_alloc(info
, idx
, sgp
);
1263 if (IS_ERR(entry
)) {
1264 spin_unlock(&info
->lock
);
1265 error
= PTR_ERR(entry
);
1271 /* Look it up and read it in.. */
1272 swappage
= lookup_swap_cache(swap
);
1274 shmem_swp_unmap(entry
);
1275 /* here we actually do the io */
1276 if (type
&& !(*type
& VM_FAULT_MAJOR
)) {
1277 __count_vm_event(PGMAJFAULT
);
1278 *type
|= VM_FAULT_MAJOR
;
1280 spin_unlock(&info
->lock
);
1281 swappage
= shmem_swapin(swap
, gfp
, info
, idx
);
1283 spin_lock(&info
->lock
);
1284 entry
= shmem_swp_alloc(info
, idx
, sgp
);
1286 error
= PTR_ERR(entry
);
1288 if (entry
->val
== swap
.val
)
1290 shmem_swp_unmap(entry
);
1292 spin_unlock(&info
->lock
);
1297 wait_on_page_locked(swappage
);
1298 page_cache_release(swappage
);
1302 /* We have to do this with page locked to prevent races */
1303 if (!trylock_page(swappage
)) {
1304 shmem_swp_unmap(entry
);
1305 spin_unlock(&info
->lock
);
1306 wait_on_page_locked(swappage
);
1307 page_cache_release(swappage
);
1310 if (PageWriteback(swappage
)) {
1311 shmem_swp_unmap(entry
);
1312 spin_unlock(&info
->lock
);
1313 wait_on_page_writeback(swappage
);
1314 unlock_page(swappage
);
1315 page_cache_release(swappage
);
1318 if (!PageUptodate(swappage
)) {
1319 shmem_swp_unmap(entry
);
1320 spin_unlock(&info
->lock
);
1321 unlock_page(swappage
);
1322 page_cache_release(swappage
);
1328 shmem_swp_set(info
, entry
, 0);
1329 shmem_swp_unmap(entry
);
1330 delete_from_swap_cache(swappage
);
1331 spin_unlock(&info
->lock
);
1332 copy_highpage(filepage
, swappage
);
1333 unlock_page(swappage
);
1334 page_cache_release(swappage
);
1335 flush_dcache_page(filepage
);
1336 SetPageUptodate(filepage
);
1337 set_page_dirty(filepage
);
1339 } else if (!(error
= add_to_page_cache_locked(swappage
, mapping
,
1340 idx
, GFP_NOWAIT
))) {
1341 info
->flags
|= SHMEM_PAGEIN
;
1342 shmem_swp_set(info
, entry
, 0);
1343 shmem_swp_unmap(entry
);
1344 delete_from_swap_cache(swappage
);
1345 spin_unlock(&info
->lock
);
1346 filepage
= swappage
;
1347 set_page_dirty(filepage
);
1350 shmem_swp_unmap(entry
);
1351 spin_unlock(&info
->lock
);
1352 if (error
== -ENOMEM
) {
1354 * reclaim from proper memory cgroup and
1355 * call memcg's OOM if needed.
1357 error
= mem_cgroup_shmem_charge_fallback(
1362 unlock_page(swappage
);
1363 page_cache_release(swappage
);
1367 unlock_page(swappage
);
1368 page_cache_release(swappage
);
1371 } else if (sgp
== SGP_READ
&& !filepage
) {
1372 shmem_swp_unmap(entry
);
1373 filepage
= find_get_page(mapping
, idx
);
1375 (!PageUptodate(filepage
) || !trylock_page(filepage
))) {
1376 spin_unlock(&info
->lock
);
1377 wait_on_page_locked(filepage
);
1378 page_cache_release(filepage
);
1382 spin_unlock(&info
->lock
);
1384 shmem_swp_unmap(entry
);
1385 sbinfo
= SHMEM_SB(inode
->i_sb
);
1386 if (sbinfo
->max_blocks
) {
1387 spin_lock(&sbinfo
->stat_lock
);
1388 if (sbinfo
->free_blocks
== 0 ||
1389 shmem_acct_block(info
->flags
)) {
1390 spin_unlock(&sbinfo
->stat_lock
);
1391 spin_unlock(&info
->lock
);
1395 sbinfo
->free_blocks
--;
1396 inode
->i_blocks
+= BLOCKS_PER_PAGE
;
1397 spin_unlock(&sbinfo
->stat_lock
);
1398 } else if (shmem_acct_block(info
->flags
)) {
1399 spin_unlock(&info
->lock
);
1407 spin_unlock(&info
->lock
);
1408 filepage
= shmem_alloc_page(gfp
, info
, idx
);
1410 shmem_unacct_blocks(info
->flags
, 1);
1411 shmem_free_blocks(inode
, 1);
1415 SetPageSwapBacked(filepage
);
1417 /* Precharge page while we can wait, compensate after */
1418 error
= mem_cgroup_cache_charge(filepage
, current
->mm
,
1421 page_cache_release(filepage
);
1422 shmem_unacct_blocks(info
->flags
, 1);
1423 shmem_free_blocks(inode
, 1);
1428 spin_lock(&info
->lock
);
1429 entry
= shmem_swp_alloc(info
, idx
, sgp
);
1431 error
= PTR_ERR(entry
);
1434 shmem_swp_unmap(entry
);
1436 ret
= error
|| swap
.val
;
1438 mem_cgroup_uncharge_cache_page(filepage
);
1440 ret
= add_to_page_cache_lru(filepage
, mapping
,
1443 * At add_to_page_cache_lru() failure, uncharge will
1444 * be done automatically.
1447 spin_unlock(&info
->lock
);
1448 page_cache_release(filepage
);
1449 shmem_unacct_blocks(info
->flags
, 1);
1450 shmem_free_blocks(inode
, 1);
1456 info
->flags
|= SHMEM_PAGEIN
;
1460 spin_unlock(&info
->lock
);
1461 clear_highpage(filepage
);
1462 flush_dcache_page(filepage
);
1463 SetPageUptodate(filepage
);
1464 if (sgp
== SGP_DIRTY
)
1465 set_page_dirty(filepage
);
1472 if (*pagep
!= filepage
) {
1473 unlock_page(filepage
);
1474 page_cache_release(filepage
);
1479 static int shmem_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
1481 struct inode
*inode
= vma
->vm_file
->f_path
.dentry
->d_inode
;
1485 if (((loff_t
)vmf
->pgoff
<< PAGE_CACHE_SHIFT
) >= i_size_read(inode
))
1486 return VM_FAULT_SIGBUS
;
1488 error
= shmem_getpage(inode
, vmf
->pgoff
, &vmf
->page
, SGP_CACHE
, &ret
);
1490 return ((error
== -ENOMEM
) ? VM_FAULT_OOM
: VM_FAULT_SIGBUS
);
1492 return ret
| VM_FAULT_LOCKED
;
1496 static int shmem_set_policy(struct vm_area_struct
*vma
, struct mempolicy
*new)
1498 struct inode
*i
= vma
->vm_file
->f_path
.dentry
->d_inode
;
1499 return mpol_set_shared_policy(&SHMEM_I(i
)->policy
, vma
, new);
1502 static struct mempolicy
*shmem_get_policy(struct vm_area_struct
*vma
,
1505 struct inode
*i
= vma
->vm_file
->f_path
.dentry
->d_inode
;
1508 idx
= ((addr
- vma
->vm_start
) >> PAGE_SHIFT
) + vma
->vm_pgoff
;
1509 return mpol_shared_policy_lookup(&SHMEM_I(i
)->policy
, idx
);
1513 int shmem_lock(struct file
*file
, int lock
, struct user_struct
*user
)
1515 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
1516 struct shmem_inode_info
*info
= SHMEM_I(inode
);
1517 int retval
= -ENOMEM
;
1519 spin_lock(&info
->lock
);
1520 if (lock
&& !(info
->flags
& VM_LOCKED
)) {
1521 if (!user_shm_lock(inode
->i_size
, user
))
1523 info
->flags
|= VM_LOCKED
;
1524 mapping_set_unevictable(file
->f_mapping
);
1526 if (!lock
&& (info
->flags
& VM_LOCKED
) && user
) {
1527 user_shm_unlock(inode
->i_size
, user
);
1528 info
->flags
&= ~VM_LOCKED
;
1529 mapping_clear_unevictable(file
->f_mapping
);
1530 scan_mapping_unevictable_pages(file
->f_mapping
);
1535 spin_unlock(&info
->lock
);
1539 static int shmem_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1541 file_accessed(file
);
1542 vma
->vm_ops
= &shmem_vm_ops
;
1543 vma
->vm_flags
|= VM_CAN_NONLINEAR
;
1547 static struct inode
*shmem_get_inode(struct super_block
*sb
, int mode
,
1548 dev_t dev
, unsigned long flags
)
1550 struct inode
*inode
;
1551 struct shmem_inode_info
*info
;
1552 struct shmem_sb_info
*sbinfo
= SHMEM_SB(sb
);
1554 if (shmem_reserve_inode(sb
))
1557 inode
= new_inode(sb
);
1559 inode
->i_mode
= mode
;
1560 inode
->i_uid
= current_fsuid();
1561 inode
->i_gid
= current_fsgid();
1562 inode
->i_blocks
= 0;
1563 inode
->i_mapping
->backing_dev_info
= &shmem_backing_dev_info
;
1564 inode
->i_atime
= inode
->i_mtime
= inode
->i_ctime
= CURRENT_TIME
;
1565 inode
->i_generation
= get_seconds();
1566 info
= SHMEM_I(inode
);
1567 memset(info
, 0, (char *)inode
- (char *)info
);
1568 spin_lock_init(&info
->lock
);
1569 info
->flags
= flags
& VM_NORESERVE
;
1570 INIT_LIST_HEAD(&info
->swaplist
);
1571 cache_no_acl(inode
);
1573 switch (mode
& S_IFMT
) {
1575 inode
->i_op
= &shmem_special_inode_operations
;
1576 init_special_inode(inode
, mode
, dev
);
1579 inode
->i_mapping
->a_ops
= &shmem_aops
;
1580 inode
->i_op
= &shmem_inode_operations
;
1581 inode
->i_fop
= &shmem_file_operations
;
1582 mpol_shared_policy_init(&info
->policy
,
1583 shmem_get_sbmpol(sbinfo
));
1587 /* Some things misbehave if size == 0 on a directory */
1588 inode
->i_size
= 2 * BOGO_DIRENT_SIZE
;
1589 inode
->i_op
= &shmem_dir_inode_operations
;
1590 inode
->i_fop
= &simple_dir_operations
;
1594 * Must not load anything in the rbtree,
1595 * mpol_free_shared_policy will not be called.
1597 mpol_shared_policy_init(&info
->policy
, NULL
);
1601 shmem_free_inode(sb
);
1606 static const struct inode_operations shmem_symlink_inode_operations
;
1607 static const struct inode_operations shmem_symlink_inline_operations
;
1610 * Normally tmpfs avoids the use of shmem_readpage and shmem_write_begin;
1611 * but providing them allows a tmpfs file to be used for splice, sendfile, and
1612 * below the loop driver, in the generic fashion that many filesystems support.
1614 static int shmem_readpage(struct file
*file
, struct page
*page
)
1616 struct inode
*inode
= page
->mapping
->host
;
1617 int error
= shmem_getpage(inode
, page
->index
, &page
, SGP_CACHE
, NULL
);
1623 shmem_write_begin(struct file
*file
, struct address_space
*mapping
,
1624 loff_t pos
, unsigned len
, unsigned flags
,
1625 struct page
**pagep
, void **fsdata
)
1627 struct inode
*inode
= mapping
->host
;
1628 pgoff_t index
= pos
>> PAGE_CACHE_SHIFT
;
1630 return shmem_getpage(inode
, index
, pagep
, SGP_WRITE
, NULL
);
1634 shmem_write_end(struct file
*file
, struct address_space
*mapping
,
1635 loff_t pos
, unsigned len
, unsigned copied
,
1636 struct page
*page
, void *fsdata
)
1638 struct inode
*inode
= mapping
->host
;
1640 if (pos
+ copied
> inode
->i_size
)
1641 i_size_write(inode
, pos
+ copied
);
1643 set_page_dirty(page
);
1645 page_cache_release(page
);
1650 static void do_shmem_file_read(struct file
*filp
, loff_t
*ppos
, read_descriptor_t
*desc
, read_actor_t actor
)
1652 struct inode
*inode
= filp
->f_path
.dentry
->d_inode
;
1653 struct address_space
*mapping
= inode
->i_mapping
;
1654 unsigned long index
, offset
;
1655 enum sgp_type sgp
= SGP_READ
;
1658 * Might this read be for a stacking filesystem? Then when reading
1659 * holes of a sparse file, we actually need to allocate those pages,
1660 * and even mark them dirty, so it cannot exceed the max_blocks limit.
1662 if (segment_eq(get_fs(), KERNEL_DS
))
1665 index
= *ppos
>> PAGE_CACHE_SHIFT
;
1666 offset
= *ppos
& ~PAGE_CACHE_MASK
;
1669 struct page
*page
= NULL
;
1670 unsigned long end_index
, nr
, ret
;
1671 loff_t i_size
= i_size_read(inode
);
1673 end_index
= i_size
>> PAGE_CACHE_SHIFT
;
1674 if (index
> end_index
)
1676 if (index
== end_index
) {
1677 nr
= i_size
& ~PAGE_CACHE_MASK
;
1682 desc
->error
= shmem_getpage(inode
, index
, &page
, sgp
, NULL
);
1684 if (desc
->error
== -EINVAL
)
1692 * We must evaluate after, since reads (unlike writes)
1693 * are called without i_mutex protection against truncate
1695 nr
= PAGE_CACHE_SIZE
;
1696 i_size
= i_size_read(inode
);
1697 end_index
= i_size
>> PAGE_CACHE_SHIFT
;
1698 if (index
== end_index
) {
1699 nr
= i_size
& ~PAGE_CACHE_MASK
;
1702 page_cache_release(page
);
1710 * If users can be writing to this page using arbitrary
1711 * virtual addresses, take care about potential aliasing
1712 * before reading the page on the kernel side.
1714 if (mapping_writably_mapped(mapping
))
1715 flush_dcache_page(page
);
1717 * Mark the page accessed if we read the beginning.
1720 mark_page_accessed(page
);
1722 page
= ZERO_PAGE(0);
1723 page_cache_get(page
);
1727 * Ok, we have the page, and it's up-to-date, so
1728 * now we can copy it to user space...
1730 * The actor routine returns how many bytes were actually used..
1731 * NOTE! This may not be the same as how much of a user buffer
1732 * we filled up (we may be padding etc), so we can only update
1733 * "pos" here (the actor routine has to update the user buffer
1734 * pointers and the remaining count).
1736 ret
= actor(desc
, page
, offset
, nr
);
1738 index
+= offset
>> PAGE_CACHE_SHIFT
;
1739 offset
&= ~PAGE_CACHE_MASK
;
1741 page_cache_release(page
);
1742 if (ret
!= nr
|| !desc
->count
)
1748 *ppos
= ((loff_t
) index
<< PAGE_CACHE_SHIFT
) + offset
;
1749 file_accessed(filp
);
1752 static ssize_t
shmem_file_aio_read(struct kiocb
*iocb
,
1753 const struct iovec
*iov
, unsigned long nr_segs
, loff_t pos
)
1755 struct file
*filp
= iocb
->ki_filp
;
1759 loff_t
*ppos
= &iocb
->ki_pos
;
1761 retval
= generic_segment_checks(iov
, &nr_segs
, &count
, VERIFY_WRITE
);
1765 for (seg
= 0; seg
< nr_segs
; seg
++) {
1766 read_descriptor_t desc
;
1769 desc
.arg
.buf
= iov
[seg
].iov_base
;
1770 desc
.count
= iov
[seg
].iov_len
;
1771 if (desc
.count
== 0)
1774 do_shmem_file_read(filp
, ppos
, &desc
, file_read_actor
);
1775 retval
+= desc
.written
;
1777 retval
= retval
?: desc
.error
;
1786 static int shmem_statfs(struct dentry
*dentry
, struct kstatfs
*buf
)
1788 struct shmem_sb_info
*sbinfo
= SHMEM_SB(dentry
->d_sb
);
1790 buf
->f_type
= TMPFS_MAGIC
;
1791 buf
->f_bsize
= PAGE_CACHE_SIZE
;
1792 buf
->f_namelen
= NAME_MAX
;
1793 spin_lock(&sbinfo
->stat_lock
);
1794 if (sbinfo
->max_blocks
) {
1795 buf
->f_blocks
= sbinfo
->max_blocks
;
1796 buf
->f_bavail
= buf
->f_bfree
= sbinfo
->free_blocks
;
1798 if (sbinfo
->max_inodes
) {
1799 buf
->f_files
= sbinfo
->max_inodes
;
1800 buf
->f_ffree
= sbinfo
->free_inodes
;
1802 /* else leave those fields 0 like simple_statfs */
1803 spin_unlock(&sbinfo
->stat_lock
);
1808 * File creation. Allocate an inode, and we're done..
1811 shmem_mknod(struct inode
*dir
, struct dentry
*dentry
, int mode
, dev_t dev
)
1813 struct inode
*inode
;
1814 int error
= -ENOSPC
;
1816 inode
= shmem_get_inode(dir
->i_sb
, mode
, dev
, VM_NORESERVE
);
1818 error
= security_inode_init_security(inode
, dir
, NULL
, NULL
,
1821 if (error
!= -EOPNOTSUPP
) {
1826 error
= shmem_acl_init(inode
, dir
);
1831 if (dir
->i_mode
& S_ISGID
) {
1832 inode
->i_gid
= dir
->i_gid
;
1834 inode
->i_mode
|= S_ISGID
;
1836 dir
->i_size
+= BOGO_DIRENT_SIZE
;
1837 dir
->i_ctime
= dir
->i_mtime
= CURRENT_TIME
;
1838 d_instantiate(dentry
, inode
);
1839 dget(dentry
); /* Extra count - pin the dentry in core */
1844 static int shmem_mkdir(struct inode
*dir
, struct dentry
*dentry
, int mode
)
1848 if ((error
= shmem_mknod(dir
, dentry
, mode
| S_IFDIR
, 0)))
1854 static int shmem_create(struct inode
*dir
, struct dentry
*dentry
, int mode
,
1855 struct nameidata
*nd
)
1857 return shmem_mknod(dir
, dentry
, mode
| S_IFREG
, 0);
1863 static int shmem_link(struct dentry
*old_dentry
, struct inode
*dir
, struct dentry
*dentry
)
1865 struct inode
*inode
= old_dentry
->d_inode
;
1869 * No ordinary (disk based) filesystem counts links as inodes;
1870 * but each new link needs a new dentry, pinning lowmem, and
1871 * tmpfs dentries cannot be pruned until they are unlinked.
1873 ret
= shmem_reserve_inode(inode
->i_sb
);
1877 dir
->i_size
+= BOGO_DIRENT_SIZE
;
1878 inode
->i_ctime
= dir
->i_ctime
= dir
->i_mtime
= CURRENT_TIME
;
1880 atomic_inc(&inode
->i_count
); /* New dentry reference */
1881 dget(dentry
); /* Extra pinning count for the created dentry */
1882 d_instantiate(dentry
, inode
);
1887 static int shmem_unlink(struct inode
*dir
, struct dentry
*dentry
)
1889 struct inode
*inode
= dentry
->d_inode
;
1891 if (inode
->i_nlink
> 1 && !S_ISDIR(inode
->i_mode
))
1892 shmem_free_inode(inode
->i_sb
);
1894 dir
->i_size
-= BOGO_DIRENT_SIZE
;
1895 inode
->i_ctime
= dir
->i_ctime
= dir
->i_mtime
= CURRENT_TIME
;
1897 dput(dentry
); /* Undo the count from "create" - this does all the work */
1901 static int shmem_rmdir(struct inode
*dir
, struct dentry
*dentry
)
1903 if (!simple_empty(dentry
))
1906 drop_nlink(dentry
->d_inode
);
1908 return shmem_unlink(dir
, dentry
);
1912 * The VFS layer already does all the dentry stuff for rename,
1913 * we just have to decrement the usage count for the target if
1914 * it exists so that the VFS layer correctly free's it when it
1917 static int shmem_rename(struct inode
*old_dir
, struct dentry
*old_dentry
, struct inode
*new_dir
, struct dentry
*new_dentry
)
1919 struct inode
*inode
= old_dentry
->d_inode
;
1920 int they_are_dirs
= S_ISDIR(inode
->i_mode
);
1922 if (!simple_empty(new_dentry
))
1925 if (new_dentry
->d_inode
) {
1926 (void) shmem_unlink(new_dir
, new_dentry
);
1928 drop_nlink(old_dir
);
1929 } else if (they_are_dirs
) {
1930 drop_nlink(old_dir
);
1934 old_dir
->i_size
-= BOGO_DIRENT_SIZE
;
1935 new_dir
->i_size
+= BOGO_DIRENT_SIZE
;
1936 old_dir
->i_ctime
= old_dir
->i_mtime
=
1937 new_dir
->i_ctime
= new_dir
->i_mtime
=
1938 inode
->i_ctime
= CURRENT_TIME
;
1942 static int shmem_symlink(struct inode
*dir
, struct dentry
*dentry
, const char *symname
)
1946 struct inode
*inode
;
1947 struct page
*page
= NULL
;
1949 struct shmem_inode_info
*info
;
1951 len
= strlen(symname
) + 1;
1952 if (len
> PAGE_CACHE_SIZE
)
1953 return -ENAMETOOLONG
;
1955 inode
= shmem_get_inode(dir
->i_sb
, S_IFLNK
|S_IRWXUGO
, 0, VM_NORESERVE
);
1959 error
= security_inode_init_security(inode
, dir
, NULL
, NULL
,
1962 if (error
!= -EOPNOTSUPP
) {
1969 info
= SHMEM_I(inode
);
1970 inode
->i_size
= len
-1;
1971 if (len
<= (char *)inode
- (char *)info
) {
1973 memcpy(info
, symname
, len
);
1974 inode
->i_op
= &shmem_symlink_inline_operations
;
1976 error
= shmem_getpage(inode
, 0, &page
, SGP_WRITE
, NULL
);
1981 inode
->i_mapping
->a_ops
= &shmem_aops
;
1982 inode
->i_op
= &shmem_symlink_inode_operations
;
1983 kaddr
= kmap_atomic(page
, KM_USER0
);
1984 memcpy(kaddr
, symname
, len
);
1985 kunmap_atomic(kaddr
, KM_USER0
);
1986 set_page_dirty(page
);
1988 page_cache_release(page
);
1990 if (dir
->i_mode
& S_ISGID
)
1991 inode
->i_gid
= dir
->i_gid
;
1992 dir
->i_size
+= BOGO_DIRENT_SIZE
;
1993 dir
->i_ctime
= dir
->i_mtime
= CURRENT_TIME
;
1994 d_instantiate(dentry
, inode
);
1999 static void *shmem_follow_link_inline(struct dentry
*dentry
, struct nameidata
*nd
)
2001 nd_set_link(nd
, (char *)SHMEM_I(dentry
->d_inode
));
2005 static void *shmem_follow_link(struct dentry
*dentry
, struct nameidata
*nd
)
2007 struct page
*page
= NULL
;
2008 int res
= shmem_getpage(dentry
->d_inode
, 0, &page
, SGP_READ
, NULL
);
2009 nd_set_link(nd
, res
? ERR_PTR(res
) : kmap(page
));
2015 static void shmem_put_link(struct dentry
*dentry
, struct nameidata
*nd
, void *cookie
)
2017 if (!IS_ERR(nd_get_link(nd
))) {
2018 struct page
*page
= cookie
;
2020 mark_page_accessed(page
);
2021 page_cache_release(page
);
2025 static const struct inode_operations shmem_symlink_inline_operations
= {
2026 .readlink
= generic_readlink
,
2027 .follow_link
= shmem_follow_link_inline
,
2030 static const struct inode_operations shmem_symlink_inode_operations
= {
2031 .truncate
= shmem_truncate
,
2032 .readlink
= generic_readlink
,
2033 .follow_link
= shmem_follow_link
,
2034 .put_link
= shmem_put_link
,
2037 #ifdef CONFIG_TMPFS_POSIX_ACL
2039 * Superblocks without xattr inode operations will get security.* xattr
2040 * support from the VFS "for free". As soon as we have any other xattrs
2041 * like ACLs, we also need to implement the security.* handlers at
2042 * filesystem level, though.
2045 static size_t shmem_xattr_security_list(struct inode
*inode
, char *list
,
2046 size_t list_len
, const char *name
,
2049 return security_inode_listsecurity(inode
, list
, list_len
);
2052 static int shmem_xattr_security_get(struct inode
*inode
, const char *name
,
2053 void *buffer
, size_t size
)
2055 if (strcmp(name
, "") == 0)
2057 return xattr_getsecurity(inode
, name
, buffer
, size
);
2060 static int shmem_xattr_security_set(struct inode
*inode
, const char *name
,
2061 const void *value
, size_t size
, int flags
)
2063 if (strcmp(name
, "") == 0)
2065 return security_inode_setsecurity(inode
, name
, value
, size
, flags
);
2068 static struct xattr_handler shmem_xattr_security_handler
= {
2069 .prefix
= XATTR_SECURITY_PREFIX
,
2070 .list
= shmem_xattr_security_list
,
2071 .get
= shmem_xattr_security_get
,
2072 .set
= shmem_xattr_security_set
,
2075 static struct xattr_handler
*shmem_xattr_handlers
[] = {
2076 &shmem_xattr_acl_access_handler
,
2077 &shmem_xattr_acl_default_handler
,
2078 &shmem_xattr_security_handler
,
2083 static struct dentry
*shmem_get_parent(struct dentry
*child
)
2085 return ERR_PTR(-ESTALE
);
2088 static int shmem_match(struct inode
*ino
, void *vfh
)
2092 inum
= (inum
<< 32) | fh
[1];
2093 return ino
->i_ino
== inum
&& fh
[0] == ino
->i_generation
;
2096 static struct dentry
*shmem_fh_to_dentry(struct super_block
*sb
,
2097 struct fid
*fid
, int fh_len
, int fh_type
)
2099 struct inode
*inode
;
2100 struct dentry
*dentry
= NULL
;
2101 u64 inum
= fid
->raw
[2];
2102 inum
= (inum
<< 32) | fid
->raw
[1];
2107 inode
= ilookup5(sb
, (unsigned long)(inum
+ fid
->raw
[0]),
2108 shmem_match
, fid
->raw
);
2110 dentry
= d_find_alias(inode
);
2117 static int shmem_encode_fh(struct dentry
*dentry
, __u32
*fh
, int *len
,
2120 struct inode
*inode
= dentry
->d_inode
;
2125 if (hlist_unhashed(&inode
->i_hash
)) {
2126 /* Unfortunately insert_inode_hash is not idempotent,
2127 * so as we hash inodes here rather than at creation
2128 * time, we need a lock to ensure we only try
2131 static DEFINE_SPINLOCK(lock
);
2133 if (hlist_unhashed(&inode
->i_hash
))
2134 __insert_inode_hash(inode
,
2135 inode
->i_ino
+ inode
->i_generation
);
2139 fh
[0] = inode
->i_generation
;
2140 fh
[1] = inode
->i_ino
;
2141 fh
[2] = ((__u64
)inode
->i_ino
) >> 32;
2147 static const struct export_operations shmem_export_ops
= {
2148 .get_parent
= shmem_get_parent
,
2149 .encode_fh
= shmem_encode_fh
,
2150 .fh_to_dentry
= shmem_fh_to_dentry
,
2153 static int shmem_parse_options(char *options
, struct shmem_sb_info
*sbinfo
,
2156 char *this_char
, *value
, *rest
;
2158 while (options
!= NULL
) {
2159 this_char
= options
;
2162 * NUL-terminate this option: unfortunately,
2163 * mount options form a comma-separated list,
2164 * but mpol's nodelist may also contain commas.
2166 options
= strchr(options
, ',');
2167 if (options
== NULL
)
2170 if (!isdigit(*options
)) {
2177 if ((value
= strchr(this_char
,'=')) != NULL
) {
2181 "tmpfs: No value for mount option '%s'\n",
2186 if (!strcmp(this_char
,"size")) {
2187 unsigned long long size
;
2188 size
= memparse(value
,&rest
);
2190 size
<<= PAGE_SHIFT
;
2191 size
*= totalram_pages
;
2197 sbinfo
->max_blocks
=
2198 DIV_ROUND_UP(size
, PAGE_CACHE_SIZE
);
2199 } else if (!strcmp(this_char
,"nr_blocks")) {
2200 sbinfo
->max_blocks
= memparse(value
, &rest
);
2203 } else if (!strcmp(this_char
,"nr_inodes")) {
2204 sbinfo
->max_inodes
= memparse(value
, &rest
);
2207 } else if (!strcmp(this_char
,"mode")) {
2210 sbinfo
->mode
= simple_strtoul(value
, &rest
, 8) & 07777;
2213 } else if (!strcmp(this_char
,"uid")) {
2216 sbinfo
->uid
= simple_strtoul(value
, &rest
, 0);
2219 } else if (!strcmp(this_char
,"gid")) {
2222 sbinfo
->gid
= simple_strtoul(value
, &rest
, 0);
2225 } else if (!strcmp(this_char
,"mpol")) {
2226 if (mpol_parse_str(value
, &sbinfo
->mpol
, 1))
2229 printk(KERN_ERR
"tmpfs: Bad mount option %s\n",
2237 printk(KERN_ERR
"tmpfs: Bad value '%s' for mount option '%s'\n",
2243 static int shmem_remount_fs(struct super_block
*sb
, int *flags
, char *data
)
2245 struct shmem_sb_info
*sbinfo
= SHMEM_SB(sb
);
2246 struct shmem_sb_info config
= *sbinfo
;
2247 unsigned long blocks
;
2248 unsigned long inodes
;
2249 int error
= -EINVAL
;
2251 if (shmem_parse_options(data
, &config
, true))
2254 spin_lock(&sbinfo
->stat_lock
);
2255 blocks
= sbinfo
->max_blocks
- sbinfo
->free_blocks
;
2256 inodes
= sbinfo
->max_inodes
- sbinfo
->free_inodes
;
2257 if (config
.max_blocks
< blocks
)
2259 if (config
.max_inodes
< inodes
)
2262 * Those tests also disallow limited->unlimited while any are in
2263 * use, so i_blocks will always be zero when max_blocks is zero;
2264 * but we must separately disallow unlimited->limited, because
2265 * in that case we have no record of how much is already in use.
2267 if (config
.max_blocks
&& !sbinfo
->max_blocks
)
2269 if (config
.max_inodes
&& !sbinfo
->max_inodes
)
2273 sbinfo
->max_blocks
= config
.max_blocks
;
2274 sbinfo
->free_blocks
= config
.max_blocks
- blocks
;
2275 sbinfo
->max_inodes
= config
.max_inodes
;
2276 sbinfo
->free_inodes
= config
.max_inodes
- inodes
;
2278 mpol_put(sbinfo
->mpol
);
2279 sbinfo
->mpol
= config
.mpol
; /* transfers initial ref */
2281 spin_unlock(&sbinfo
->stat_lock
);
2285 static int shmem_show_options(struct seq_file
*seq
, struct vfsmount
*vfs
)
2287 struct shmem_sb_info
*sbinfo
= SHMEM_SB(vfs
->mnt_sb
);
2289 if (sbinfo
->max_blocks
!= shmem_default_max_blocks())
2290 seq_printf(seq
, ",size=%luk",
2291 sbinfo
->max_blocks
<< (PAGE_CACHE_SHIFT
- 10));
2292 if (sbinfo
->max_inodes
!= shmem_default_max_inodes())
2293 seq_printf(seq
, ",nr_inodes=%lu", sbinfo
->max_inodes
);
2294 if (sbinfo
->mode
!= (S_IRWXUGO
| S_ISVTX
))
2295 seq_printf(seq
, ",mode=%03o", sbinfo
->mode
);
2296 if (sbinfo
->uid
!= 0)
2297 seq_printf(seq
, ",uid=%u", sbinfo
->uid
);
2298 if (sbinfo
->gid
!= 0)
2299 seq_printf(seq
, ",gid=%u", sbinfo
->gid
);
2300 shmem_show_mpol(seq
, sbinfo
->mpol
);
2303 #endif /* CONFIG_TMPFS */
2305 static void shmem_put_super(struct super_block
*sb
)
2307 kfree(sb
->s_fs_info
);
2308 sb
->s_fs_info
= NULL
;
2311 int shmem_fill_super(struct super_block
*sb
, void *data
, int silent
)
2313 struct inode
*inode
;
2314 struct dentry
*root
;
2315 struct shmem_sb_info
*sbinfo
;
2318 /* Round up to L1_CACHE_BYTES to resist false sharing */
2319 sbinfo
= kzalloc(max((int)sizeof(struct shmem_sb_info
),
2320 L1_CACHE_BYTES
), GFP_KERNEL
);
2324 sbinfo
->mode
= S_IRWXUGO
| S_ISVTX
;
2325 sbinfo
->uid
= current_fsuid();
2326 sbinfo
->gid
= current_fsgid();
2327 sb
->s_fs_info
= sbinfo
;
2331 * Per default we only allow half of the physical ram per
2332 * tmpfs instance, limiting inodes to one per page of lowmem;
2333 * but the internal instance is left unlimited.
2335 if (!(sb
->s_flags
& MS_NOUSER
)) {
2336 sbinfo
->max_blocks
= shmem_default_max_blocks();
2337 sbinfo
->max_inodes
= shmem_default_max_inodes();
2338 if (shmem_parse_options(data
, sbinfo
, false)) {
2343 sb
->s_export_op
= &shmem_export_ops
;
2345 sb
->s_flags
|= MS_NOUSER
;
2348 spin_lock_init(&sbinfo
->stat_lock
);
2349 sbinfo
->free_blocks
= sbinfo
->max_blocks
;
2350 sbinfo
->free_inodes
= sbinfo
->max_inodes
;
2352 sb
->s_maxbytes
= SHMEM_MAX_BYTES
;
2353 sb
->s_blocksize
= PAGE_CACHE_SIZE
;
2354 sb
->s_blocksize_bits
= PAGE_CACHE_SHIFT
;
2355 sb
->s_magic
= TMPFS_MAGIC
;
2356 sb
->s_op
= &shmem_ops
;
2357 sb
->s_time_gran
= 1;
2358 #ifdef CONFIG_TMPFS_POSIX_ACL
2359 sb
->s_xattr
= shmem_xattr_handlers
;
2360 sb
->s_flags
|= MS_POSIXACL
;
2363 inode
= shmem_get_inode(sb
, S_IFDIR
| sbinfo
->mode
, 0, VM_NORESERVE
);
2366 inode
->i_uid
= sbinfo
->uid
;
2367 inode
->i_gid
= sbinfo
->gid
;
2368 root
= d_alloc_root(inode
);
2377 shmem_put_super(sb
);
2381 static struct kmem_cache
*shmem_inode_cachep
;
2383 static struct inode
*shmem_alloc_inode(struct super_block
*sb
)
2385 struct shmem_inode_info
*p
;
2386 p
= (struct shmem_inode_info
*)kmem_cache_alloc(shmem_inode_cachep
, GFP_KERNEL
);
2389 return &p
->vfs_inode
;
2392 static void shmem_destroy_inode(struct inode
*inode
)
2394 if ((inode
->i_mode
& S_IFMT
) == S_IFREG
) {
2395 /* only struct inode is valid if it's an inline symlink */
2396 mpol_free_shared_policy(&SHMEM_I(inode
)->policy
);
2398 kmem_cache_free(shmem_inode_cachep
, SHMEM_I(inode
));
2401 static void init_once(void *foo
)
2403 struct shmem_inode_info
*p
= (struct shmem_inode_info
*) foo
;
2405 inode_init_once(&p
->vfs_inode
);
2408 static int init_inodecache(void)
2410 shmem_inode_cachep
= kmem_cache_create("shmem_inode_cache",
2411 sizeof(struct shmem_inode_info
),
2412 0, SLAB_PANIC
, init_once
);
2416 static void destroy_inodecache(void)
2418 kmem_cache_destroy(shmem_inode_cachep
);
2421 static const struct address_space_operations shmem_aops
= {
2422 .writepage
= shmem_writepage
,
2423 .set_page_dirty
= __set_page_dirty_no_writeback
,
2425 .readpage
= shmem_readpage
,
2426 .write_begin
= shmem_write_begin
,
2427 .write_end
= shmem_write_end
,
2429 .migratepage
= migrate_page
,
2430 .error_remove_page
= generic_error_remove_page
,
2433 static const struct file_operations shmem_file_operations
= {
2436 .llseek
= generic_file_llseek
,
2437 .read
= do_sync_read
,
2438 .write
= do_sync_write
,
2439 .aio_read
= shmem_file_aio_read
,
2440 .aio_write
= generic_file_aio_write
,
2441 .fsync
= simple_sync_file
,
2442 .splice_read
= generic_file_splice_read
,
2443 .splice_write
= generic_file_splice_write
,
2447 static const struct inode_operations shmem_inode_operations
= {
2448 .truncate
= shmem_truncate
,
2449 .setattr
= shmem_notify_change
,
2450 .truncate_range
= shmem_truncate_range
,
2451 #ifdef CONFIG_TMPFS_POSIX_ACL
2452 .setxattr
= generic_setxattr
,
2453 .getxattr
= generic_getxattr
,
2454 .listxattr
= generic_listxattr
,
2455 .removexattr
= generic_removexattr
,
2456 .check_acl
= shmem_check_acl
,
2461 static const struct inode_operations shmem_dir_inode_operations
= {
2463 .create
= shmem_create
,
2464 .lookup
= simple_lookup
,
2466 .unlink
= shmem_unlink
,
2467 .symlink
= shmem_symlink
,
2468 .mkdir
= shmem_mkdir
,
2469 .rmdir
= shmem_rmdir
,
2470 .mknod
= shmem_mknod
,
2471 .rename
= shmem_rename
,
2473 #ifdef CONFIG_TMPFS_POSIX_ACL
2474 .setattr
= shmem_notify_change
,
2475 .setxattr
= generic_setxattr
,
2476 .getxattr
= generic_getxattr
,
2477 .listxattr
= generic_listxattr
,
2478 .removexattr
= generic_removexattr
,
2479 .check_acl
= shmem_check_acl
,
2483 static const struct inode_operations shmem_special_inode_operations
= {
2484 #ifdef CONFIG_TMPFS_POSIX_ACL
2485 .setattr
= shmem_notify_change
,
2486 .setxattr
= generic_setxattr
,
2487 .getxattr
= generic_getxattr
,
2488 .listxattr
= generic_listxattr
,
2489 .removexattr
= generic_removexattr
,
2490 .check_acl
= shmem_check_acl
,
2494 static const struct super_operations shmem_ops
= {
2495 .alloc_inode
= shmem_alloc_inode
,
2496 .destroy_inode
= shmem_destroy_inode
,
2498 .statfs
= shmem_statfs
,
2499 .remount_fs
= shmem_remount_fs
,
2500 .show_options
= shmem_show_options
,
2502 .delete_inode
= shmem_delete_inode
,
2503 .drop_inode
= generic_delete_inode
,
2504 .put_super
= shmem_put_super
,
2507 static const struct vm_operations_struct shmem_vm_ops
= {
2508 .fault
= shmem_fault
,
2510 .set_policy
= shmem_set_policy
,
2511 .get_policy
= shmem_get_policy
,
2516 static int shmem_get_sb(struct file_system_type
*fs_type
,
2517 int flags
, const char *dev_name
, void *data
, struct vfsmount
*mnt
)
2519 return get_sb_nodev(fs_type
, flags
, data
, shmem_fill_super
, mnt
);
2522 static struct file_system_type tmpfs_fs_type
= {
2523 .owner
= THIS_MODULE
,
2525 .get_sb
= shmem_get_sb
,
2526 .kill_sb
= kill_litter_super
,
2529 int __init
init_tmpfs(void)
2533 error
= bdi_init(&shmem_backing_dev_info
);
2537 error
= init_inodecache();
2541 error
= register_filesystem(&tmpfs_fs_type
);
2543 printk(KERN_ERR
"Could not register tmpfs\n");
2547 shm_mnt
= vfs_kern_mount(&tmpfs_fs_type
, MS_NOUSER
,
2548 tmpfs_fs_type
.name
, NULL
);
2549 if (IS_ERR(shm_mnt
)) {
2550 error
= PTR_ERR(shm_mnt
);
2551 printk(KERN_ERR
"Could not kern_mount tmpfs\n");
2557 unregister_filesystem(&tmpfs_fs_type
);
2559 destroy_inodecache();
2561 bdi_destroy(&shmem_backing_dev_info
);
2563 shm_mnt
= ERR_PTR(error
);
2567 #else /* !CONFIG_SHMEM */
2570 * tiny-shmem: simple shmemfs and tmpfs using ramfs code
2572 * This is intended for small system where the benefits of the full
2573 * shmem code (swap-backed and resource-limited) are outweighed by
2574 * their complexity. On systems without swap this code should be
2575 * effectively equivalent, but much lighter weight.
2578 #include <linux/ramfs.h>
2580 static struct file_system_type tmpfs_fs_type
= {
2582 .get_sb
= ramfs_get_sb
,
2583 .kill_sb
= kill_litter_super
,
2586 int __init
init_tmpfs(void)
2588 BUG_ON(register_filesystem(&tmpfs_fs_type
) != 0);
2590 shm_mnt
= kern_mount(&tmpfs_fs_type
);
2591 BUG_ON(IS_ERR(shm_mnt
));
2596 int shmem_unuse(swp_entry_t entry
, struct page
*page
)
2601 int shmem_lock(struct file
*file
, int lock
, struct user_struct
*user
)
2606 #define shmem_vm_ops generic_file_vm_ops
2607 #define shmem_file_operations ramfs_file_operations
2608 #define shmem_get_inode(sb, mode, dev, flags) ramfs_get_inode(sb, mode, dev)
2609 #define shmem_acct_size(flags, size) 0
2610 #define shmem_unacct_size(flags, size) do {} while (0)
2611 #define SHMEM_MAX_BYTES MAX_LFS_FILESIZE
2613 #endif /* CONFIG_SHMEM */
2618 * shmem_file_setup - get an unlinked file living in tmpfs
2619 * @name: name for dentry (to be seen in /proc/<pid>/maps
2620 * @size: size to be set for the file
2621 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
2623 struct file
*shmem_file_setup(const char *name
, loff_t size
, unsigned long flags
)
2627 struct inode
*inode
;
2629 struct dentry
*root
;
2632 if (IS_ERR(shm_mnt
))
2633 return (void *)shm_mnt
;
2635 if (size
< 0 || size
> SHMEM_MAX_BYTES
)
2636 return ERR_PTR(-EINVAL
);
2638 if (shmem_acct_size(flags
, size
))
2639 return ERR_PTR(-ENOMEM
);
2643 this.len
= strlen(name
);
2644 this.hash
= 0; /* will go */
2645 root
= shm_mnt
->mnt_root
;
2646 path
.dentry
= d_alloc(root
, &this);
2649 path
.mnt
= mntget(shm_mnt
);
2652 inode
= shmem_get_inode(root
->d_sb
, S_IFREG
| S_IRWXUGO
, 0, flags
);
2656 d_instantiate(path
.dentry
, inode
);
2657 inode
->i_size
= size
;
2658 inode
->i_nlink
= 0; /* It is unlinked */
2660 error
= ramfs_nommu_expand_for_mapping(inode
, size
);
2666 file
= alloc_file(&path
, FMODE_WRITE
| FMODE_READ
,
2667 &shmem_file_operations
);
2676 shmem_unacct_size(flags
, size
);
2677 return ERR_PTR(error
);
2679 EXPORT_SYMBOL_GPL(shmem_file_setup
);
2682 * shmem_zero_setup - setup a shared anonymous mapping
2683 * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
2685 int shmem_zero_setup(struct vm_area_struct
*vma
)
2688 loff_t size
= vma
->vm_end
- vma
->vm_start
;
2690 file
= shmem_file_setup("dev/zero", size
, vma
->vm_flags
);
2692 return PTR_ERR(file
);
2696 vma
->vm_file
= file
;
2697 vma
->vm_ops
= &shmem_vm_ops
;