]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - mm/shmem.c
memblock/nobootmem: remove unneeded code from alloc_bootmem_node_high()
[mirror_ubuntu-zesty-kernel.git] / mm / shmem.c
CommitLineData
1da177e4
LT
1/*
2 * Resizable virtual memory filesystem for Linux.
3 *
4 * Copyright (C) 2000 Linus Torvalds.
5 * 2000 Transmeta Corp.
6 * 2000-2001 Christoph Rohland
7 * 2000-2001 SAP AG
8 * 2002 Red Hat Inc.
0edd73b3
HD
9 * Copyright (C) 2002-2005 Hugh Dickins.
10 * Copyright (C) 2002-2005 VERITAS Software Corporation.
1da177e4
LT
11 * Copyright (C) 2004 Andi Kleen, SuSE Labs
12 *
13 * Extended attribute support for tmpfs:
14 * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
15 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
16 *
853ac43a
MM
17 * tiny-shmem:
18 * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
19 *
1da177e4
LT
20 * This file is released under the GPL.
21 */
22
853ac43a
MM
23#include <linux/fs.h>
24#include <linux/init.h>
25#include <linux/vfs.h>
26#include <linux/mount.h>
caefba17 27#include <linux/pagemap.h>
853ac43a
MM
28#include <linux/file.h>
29#include <linux/mm.h>
30#include <linux/module.h>
7e496299 31#include <linux/percpu_counter.h>
853ac43a
MM
32#include <linux/swap.h>
33
34static struct vfsmount *shm_mnt;
35
36#ifdef CONFIG_SHMEM
1da177e4
LT
37/*
38 * This virtual memory filesystem is heavily based on the ramfs. It
39 * extends ramfs by the ability to use swap and honor resource limits
40 * which makes it a completely usable filesystem.
41 */
42
39f0247d 43#include <linux/xattr.h>
a5694255 44#include <linux/exportfs.h>
1c7c474c 45#include <linux/posix_acl.h>
39f0247d 46#include <linux/generic_acl.h>
1da177e4 47#include <linux/mman.h>
1da177e4
LT
48#include <linux/string.h>
49#include <linux/slab.h>
50#include <linux/backing-dev.h>
51#include <linux/shmem_fs.h>
1da177e4 52#include <linux/writeback.h>
1da177e4
LT
53#include <linux/blkdev.h>
54#include <linux/security.h>
55#include <linux/swapops.h>
56#include <linux/mempolicy.h>
57#include <linux/namei.h>
b00dc3ad 58#include <linux/ctype.h>
304dbdb7 59#include <linux/migrate.h>
c1f60a5a 60#include <linux/highmem.h>
680d794b 61#include <linux/seq_file.h>
92562927 62#include <linux/magic.h>
304dbdb7 63
1da177e4
LT
64#include <asm/uaccess.h>
65#include <asm/div64.h>
66#include <asm/pgtable.h>
67
caefba17
HD
68/*
69 * The maximum size of a shmem/tmpfs file is limited by the maximum size of
70 * its triple-indirect swap vector - see illustration at shmem_swp_entry().
71 *
72 * With 4kB page size, maximum file size is just over 2TB on a 32-bit kernel,
73 * but one eighth of that on a 64-bit kernel. With 8kB page size, maximum
74 * file size is just over 4TB on a 64-bit kernel, but 16TB on a 32-bit kernel,
75 * MAX_LFS_FILESIZE being then more restrictive than swap vector layout.
76 *
77 * We use / and * instead of shifts in the definitions below, so that the swap
78 * vector can be tested with small even values (e.g. 20) for ENTRIES_PER_PAGE.
79 */
1da177e4 80#define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long))
61609d01 81#define ENTRIES_PER_PAGEPAGE ((unsigned long long)ENTRIES_PER_PAGE*ENTRIES_PER_PAGE)
1da177e4 82
caefba17
HD
83#define SHMSWP_MAX_INDEX (SHMEM_NR_DIRECT + (ENTRIES_PER_PAGEPAGE/2) * (ENTRIES_PER_PAGE+1))
84#define SHMSWP_MAX_BYTES (SHMSWP_MAX_INDEX << PAGE_CACHE_SHIFT)
1da177e4 85
caefba17
HD
86#define SHMEM_MAX_BYTES min_t(unsigned long long, SHMSWP_MAX_BYTES, MAX_LFS_FILESIZE)
87#define SHMEM_MAX_INDEX ((unsigned long)((SHMEM_MAX_BYTES+1) >> PAGE_CACHE_SHIFT))
88
89#define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512)
1da177e4
LT
90#define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT)
91
92/* info->flags needs VM_flags to handle pagein/truncate races efficiently */
93#define SHMEM_PAGEIN VM_READ
94#define SHMEM_TRUNCATE VM_WRITE
95
96/* Definition to limit shmem_truncate's steps between cond_rescheds */
97#define LATENCY_LIMIT 64
98
99/* Pretend that each entry is of this size in directory's i_size */
100#define BOGO_DIRENT_SIZE 20
101
1da177e4
LT
102/* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */
103enum sgp_type {
1da177e4
LT
104 SGP_READ, /* don't exceed i_size, don't allocate page */
105 SGP_CACHE, /* don't exceed i_size, may allocate page */
a0ee5ec5 106 SGP_DIRTY, /* like SGP_CACHE, but set new page dirty */
1da177e4
LT
107 SGP_WRITE, /* may exceed i_size, may allocate page */
108};
109
b76db735 110#ifdef CONFIG_TMPFS
680d794b
AM
111static unsigned long shmem_default_max_blocks(void)
112{
113 return totalram_pages / 2;
114}
115
116static unsigned long shmem_default_max_inodes(void)
117{
118 return min(totalram_pages - totalhigh_pages, totalram_pages / 2);
119}
b76db735 120#endif
680d794b 121
1da177e4
LT
122static int shmem_getpage(struct inode *inode, unsigned long idx,
123 struct page **pagep, enum sgp_type sgp, int *type);
124
6daa0e28 125static inline struct page *shmem_dir_alloc(gfp_t gfp_mask)
1da177e4
LT
126{
127 /*
128 * The above definition of ENTRIES_PER_PAGE, and the use of
129 * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE:
130 * might be reconsidered if it ever diverges from PAGE_SIZE.
769848c0 131 *
e12ba74d 132 * Mobility flags are masked out as swap vectors cannot move
1da177e4 133 */
e12ba74d 134 return alloc_pages((gfp_mask & ~GFP_MOVABLE_MASK) | __GFP_ZERO,
769848c0 135 PAGE_CACHE_SHIFT-PAGE_SHIFT);
1da177e4
LT
136}
137
138static inline void shmem_dir_free(struct page *page)
139{
140 __free_pages(page, PAGE_CACHE_SHIFT-PAGE_SHIFT);
141}
142
143static struct page **shmem_dir_map(struct page *page)
144{
145 return (struct page **)kmap_atomic(page, KM_USER0);
146}
147
148static inline void shmem_dir_unmap(struct page **dir)
149{
150 kunmap_atomic(dir, KM_USER0);
151}
152
153static swp_entry_t *shmem_swp_map(struct page *page)
154{
155 return (swp_entry_t *)kmap_atomic(page, KM_USER1);
156}
157
158static inline void shmem_swp_balance_unmap(void)
159{
160 /*
161 * When passing a pointer to an i_direct entry, to code which
162 * also handles indirect entries and so will shmem_swp_unmap,
163 * we must arrange for the preempt count to remain in balance.
164 * What kmap_atomic of a lowmem page does depends on config
165 * and architecture, so pretend to kmap_atomic some lowmem page.
166 */
167 (void) kmap_atomic(ZERO_PAGE(0), KM_USER1);
168}
169
170static inline void shmem_swp_unmap(swp_entry_t *entry)
171{
172 kunmap_atomic(entry, KM_USER1);
173}
174
175static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
176{
177 return sb->s_fs_info;
178}
179
180/*
181 * shmem_file_setup pre-accounts the whole fixed size of a VM object,
182 * for shared memory and for shared anonymous (/dev/zero) mappings
183 * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
184 * consistent with the pre-accounting of private mappings ...
185 */
186static inline int shmem_acct_size(unsigned long flags, loff_t size)
187{
0b0a0806
HD
188 return (flags & VM_NORESERVE) ?
189 0 : security_vm_enough_memory_kern(VM_ACCT(size));
1da177e4
LT
190}
191
192static inline void shmem_unacct_size(unsigned long flags, loff_t size)
193{
0b0a0806 194 if (!(flags & VM_NORESERVE))
1da177e4
LT
195 vm_unacct_memory(VM_ACCT(size));
196}
197
198/*
199 * ... whereas tmpfs objects are accounted incrementally as
200 * pages are allocated, in order to allow huge sparse files.
201 * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
202 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
203 */
204static inline int shmem_acct_block(unsigned long flags)
205{
0b0a0806
HD
206 return (flags & VM_NORESERVE) ?
207 security_vm_enough_memory_kern(VM_ACCT(PAGE_CACHE_SIZE)) : 0;
1da177e4
LT
208}
209
210static inline void shmem_unacct_blocks(unsigned long flags, long pages)
211{
0b0a0806 212 if (flags & VM_NORESERVE)
1da177e4
LT
213 vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE));
214}
215
759b9775 216static const struct super_operations shmem_ops;
f5e54d6e 217static const struct address_space_operations shmem_aops;
15ad7cdc 218static const struct file_operations shmem_file_operations;
92e1d5be
AV
219static const struct inode_operations shmem_inode_operations;
220static const struct inode_operations shmem_dir_inode_operations;
221static const struct inode_operations shmem_special_inode_operations;
f0f37e2f 222static const struct vm_operations_struct shmem_vm_ops;
1da177e4 223
6c231b7b 224static struct backing_dev_info shmem_backing_dev_info __read_mostly = {
1da177e4 225 .ra_pages = 0, /* No readahead */
4f98a2fe 226 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
1da177e4
LT
227};
228
229static LIST_HEAD(shmem_swaplist);
cb5f7b9a 230static DEFINE_MUTEX(shmem_swaplist_mutex);
1da177e4
LT
231
232static void shmem_free_blocks(struct inode *inode, long pages)
233{
234 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
0edd73b3 235 if (sbinfo->max_blocks) {
7e496299
TC
236 percpu_counter_add(&sbinfo->used_blocks, -pages);
237 spin_lock(&inode->i_lock);
1da177e4 238 inode->i_blocks -= pages*BLOCKS_PER_PAGE;
7e496299 239 spin_unlock(&inode->i_lock);
1da177e4
LT
240 }
241}
242
5b04c689
PE
243static int shmem_reserve_inode(struct super_block *sb)
244{
245 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
246 if (sbinfo->max_inodes) {
247 spin_lock(&sbinfo->stat_lock);
248 if (!sbinfo->free_inodes) {
249 spin_unlock(&sbinfo->stat_lock);
250 return -ENOSPC;
251 }
252 sbinfo->free_inodes--;
253 spin_unlock(&sbinfo->stat_lock);
254 }
255 return 0;
256}
257
258static void shmem_free_inode(struct super_block *sb)
259{
260 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
261 if (sbinfo->max_inodes) {
262 spin_lock(&sbinfo->stat_lock);
263 sbinfo->free_inodes++;
264 spin_unlock(&sbinfo->stat_lock);
265 }
266}
267
46711810 268/**
1da177e4 269 * shmem_recalc_inode - recalculate the size of an inode
1da177e4
LT
270 * @inode: inode to recalc
271 *
272 * We have to calculate the free blocks since the mm can drop
273 * undirtied hole pages behind our back.
274 *
275 * But normally info->alloced == inode->i_mapping->nrpages + info->swapped
276 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
277 *
278 * It has to be called with the spinlock held.
279 */
280static void shmem_recalc_inode(struct inode *inode)
281{
282 struct shmem_inode_info *info = SHMEM_I(inode);
283 long freed;
284
285 freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
286 if (freed > 0) {
287 info->alloced -= freed;
288 shmem_unacct_blocks(info->flags, freed);
289 shmem_free_blocks(inode, freed);
290 }
291}
292
46711810 293/**
1da177e4 294 * shmem_swp_entry - find the swap vector position in the info structure
1da177e4
LT
295 * @info: info structure for the inode
296 * @index: index of the page to find
297 * @page: optional page to add to the structure. Has to be preset to
298 * all zeros
299 *
300 * If there is no space allocated yet it will return NULL when
301 * page is NULL, else it will use the page for the needed block,
302 * setting it to NULL on return to indicate that it has been used.
303 *
304 * The swap vector is organized the following way:
305 *
306 * There are SHMEM_NR_DIRECT entries directly stored in the
307 * shmem_inode_info structure. So small files do not need an addional
308 * allocation.
309 *
310 * For pages with index > SHMEM_NR_DIRECT there is the pointer
311 * i_indirect which points to a page which holds in the first half
312 * doubly indirect blocks, in the second half triple indirect blocks:
313 *
314 * For an artificial ENTRIES_PER_PAGE = 4 this would lead to the
315 * following layout (for SHMEM_NR_DIRECT == 16):
316 *
317 * i_indirect -> dir --> 16-19
318 * | +-> 20-23
319 * |
320 * +-->dir2 --> 24-27
321 * | +-> 28-31
322 * | +-> 32-35
323 * | +-> 36-39
324 * |
325 * +-->dir3 --> 40-43
326 * +-> 44-47
327 * +-> 48-51
328 * +-> 52-55
329 */
330static swp_entry_t *shmem_swp_entry(struct shmem_inode_info *info, unsigned long index, struct page **page)
331{
332 unsigned long offset;
333 struct page **dir;
334 struct page *subdir;
335
336 if (index < SHMEM_NR_DIRECT) {
337 shmem_swp_balance_unmap();
338 return info->i_direct+index;
339 }
340 if (!info->i_indirect) {
341 if (page) {
342 info->i_indirect = *page;
343 *page = NULL;
344 }
345 return NULL; /* need another page */
346 }
347
348 index -= SHMEM_NR_DIRECT;
349 offset = index % ENTRIES_PER_PAGE;
350 index /= ENTRIES_PER_PAGE;
351 dir = shmem_dir_map(info->i_indirect);
352
353 if (index >= ENTRIES_PER_PAGE/2) {
354 index -= ENTRIES_PER_PAGE/2;
355 dir += ENTRIES_PER_PAGE/2 + index/ENTRIES_PER_PAGE;
356 index %= ENTRIES_PER_PAGE;
357 subdir = *dir;
358 if (!subdir) {
359 if (page) {
360 *dir = *page;
361 *page = NULL;
362 }
363 shmem_dir_unmap(dir);
364 return NULL; /* need another page */
365 }
366 shmem_dir_unmap(dir);
367 dir = shmem_dir_map(subdir);
368 }
369
370 dir += index;
371 subdir = *dir;
372 if (!subdir) {
373 if (!page || !(subdir = *page)) {
374 shmem_dir_unmap(dir);
375 return NULL; /* need a page */
376 }
377 *dir = subdir;
378 *page = NULL;
379 }
380 shmem_dir_unmap(dir);
381 return shmem_swp_map(subdir) + offset;
382}
383
384static void shmem_swp_set(struct shmem_inode_info *info, swp_entry_t *entry, unsigned long value)
385{
386 long incdec = value? 1: -1;
387
388 entry->val = value;
389 info->swapped += incdec;
4c21e2f2
HD
390 if ((unsigned long)(entry - info->i_direct) >= SHMEM_NR_DIRECT) {
391 struct page *page = kmap_atomic_to_page(entry);
392 set_page_private(page, page_private(page) + incdec);
393 }
1da177e4
LT
394}
395
46711810 396/**
1da177e4 397 * shmem_swp_alloc - get the position of the swap entry for the page.
1da177e4
LT
398 * @info: info structure for the inode
399 * @index: index of the page to find
400 * @sgp: check and recheck i_size? skip allocation?
46711810
RD
401 *
402 * If the entry does not exist, allocate it.
1da177e4
LT
403 */
404static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info, unsigned long index, enum sgp_type sgp)
405{
406 struct inode *inode = &info->vfs_inode;
407 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
408 struct page *page = NULL;
409 swp_entry_t *entry;
410
411 if (sgp != SGP_WRITE &&
412 ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode))
413 return ERR_PTR(-EINVAL);
414
415 while (!(entry = shmem_swp_entry(info, index, &page))) {
416 if (sgp == SGP_READ)
417 return shmem_swp_map(ZERO_PAGE(0));
418 /*
7e496299 419 * Test used_blocks against 1 less max_blocks, since we have 1 data
1da177e4
LT
420 * page (and perhaps indirect index pages) yet to allocate:
421 * a waste to allocate index if we cannot allocate data.
422 */
0edd73b3 423 if (sbinfo->max_blocks) {
fc5da22a
HD
424 if (percpu_counter_compare(&sbinfo->used_blocks,
425 sbinfo->max_blocks - 1) >= 0)
1da177e4 426 return ERR_PTR(-ENOSPC);
7e496299
TC
427 percpu_counter_inc(&sbinfo->used_blocks);
428 spin_lock(&inode->i_lock);
1da177e4 429 inode->i_blocks += BLOCKS_PER_PAGE;
7e496299 430 spin_unlock(&inode->i_lock);
1da177e4
LT
431 }
432
433 spin_unlock(&info->lock);
769848c0 434 page = shmem_dir_alloc(mapping_gfp_mask(inode->i_mapping));
1da177e4
LT
435 spin_lock(&info->lock);
436
437 if (!page) {
438 shmem_free_blocks(inode, 1);
439 return ERR_PTR(-ENOMEM);
440 }
441 if (sgp != SGP_WRITE &&
442 ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
443 entry = ERR_PTR(-EINVAL);
444 break;
445 }
446 if (info->next_index <= index)
447 info->next_index = index + 1;
448 }
449 if (page) {
450 /* another task gave its page, or truncated the file */
451 shmem_free_blocks(inode, 1);
452 shmem_dir_free(page);
453 }
454 if (info->next_index <= index && !IS_ERR(entry))
455 info->next_index = index + 1;
456 return entry;
457}
458
46711810 459/**
1da177e4 460 * shmem_free_swp - free some swap entries in a directory
1ae70006
HD
461 * @dir: pointer to the directory
462 * @edir: pointer after last entry of the directory
463 * @punch_lock: pointer to spinlock when needed for the holepunch case
1da177e4 464 */
1ae70006
HD
465static int shmem_free_swp(swp_entry_t *dir, swp_entry_t *edir,
466 spinlock_t *punch_lock)
1da177e4 467{
1ae70006 468 spinlock_t *punch_unlock = NULL;
1da177e4
LT
469 swp_entry_t *ptr;
470 int freed = 0;
471
472 for (ptr = dir; ptr < edir; ptr++) {
473 if (ptr->val) {
1ae70006
HD
474 if (unlikely(punch_lock)) {
475 punch_unlock = punch_lock;
476 punch_lock = NULL;
477 spin_lock(punch_unlock);
478 if (!ptr->val)
479 continue;
480 }
1da177e4
LT
481 free_swap_and_cache(*ptr);
482 *ptr = (swp_entry_t){0};
483 freed++;
484 }
485 }
1ae70006
HD
486 if (punch_unlock)
487 spin_unlock(punch_unlock);
1da177e4
LT
488 return freed;
489}
490
1ae70006
HD
491static int shmem_map_and_free_swp(struct page *subdir, int offset,
492 int limit, struct page ***dir, spinlock_t *punch_lock)
1da177e4
LT
493{
494 swp_entry_t *ptr;
495 int freed = 0;
496
497 ptr = shmem_swp_map(subdir);
498 for (; offset < limit; offset += LATENCY_LIMIT) {
499 int size = limit - offset;
500 if (size > LATENCY_LIMIT)
501 size = LATENCY_LIMIT;
1ae70006
HD
502 freed += shmem_free_swp(ptr+offset, ptr+offset+size,
503 punch_lock);
1da177e4
LT
504 if (need_resched()) {
505 shmem_swp_unmap(ptr);
506 if (*dir) {
507 shmem_dir_unmap(*dir);
508 *dir = NULL;
509 }
510 cond_resched();
511 ptr = shmem_swp_map(subdir);
512 }
513 }
514 shmem_swp_unmap(ptr);
515 return freed;
516}
517
518static void shmem_free_pages(struct list_head *next)
519{
520 struct page *page;
521 int freed = 0;
522
523 do {
524 page = container_of(next, struct page, lru);
525 next = next->next;
526 shmem_dir_free(page);
527 freed++;
528 if (freed >= LATENCY_LIMIT) {
529 cond_resched();
530 freed = 0;
531 }
532 } while (next);
533}
534
f6b3ec23 535static void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
1da177e4
LT
536{
537 struct shmem_inode_info *info = SHMEM_I(inode);
538 unsigned long idx;
539 unsigned long size;
540 unsigned long limit;
541 unsigned long stage;
542 unsigned long diroff;
543 struct page **dir;
544 struct page *topdir;
545 struct page *middir;
546 struct page *subdir;
547 swp_entry_t *ptr;
548 LIST_HEAD(pages_to_free);
549 long nr_pages_to_free = 0;
550 long nr_swaps_freed = 0;
551 int offset;
552 int freed;
a2646d1e 553 int punch_hole;
1ae70006
HD
554 spinlock_t *needs_lock;
555 spinlock_t *punch_lock;
a2646d1e 556 unsigned long upper_limit;
1da177e4
LT
557
558 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
f6b3ec23 559 idx = (start + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1da177e4
LT
560 if (idx >= info->next_index)
561 return;
562
563 spin_lock(&info->lock);
564 info->flags |= SHMEM_TRUNCATE;
f6b3ec23
BP
565 if (likely(end == (loff_t) -1)) {
566 limit = info->next_index;
a2646d1e 567 upper_limit = SHMEM_MAX_INDEX;
f6b3ec23 568 info->next_index = idx;
1ae70006 569 needs_lock = NULL;
a2646d1e 570 punch_hole = 0;
f6b3ec23 571 } else {
a2646d1e
HD
572 if (end + 1 >= inode->i_size) { /* we may free a little more */
573 limit = (inode->i_size + PAGE_CACHE_SIZE - 1) >>
574 PAGE_CACHE_SHIFT;
575 upper_limit = SHMEM_MAX_INDEX;
576 } else {
577 limit = (end + 1) >> PAGE_CACHE_SHIFT;
578 upper_limit = limit;
579 }
1ae70006 580 needs_lock = &info->lock;
f6b3ec23
BP
581 punch_hole = 1;
582 }
583
1da177e4 584 topdir = info->i_indirect;
f6b3ec23 585 if (topdir && idx <= SHMEM_NR_DIRECT && !punch_hole) {
1da177e4
LT
586 info->i_indirect = NULL;
587 nr_pages_to_free++;
588 list_add(&topdir->lru, &pages_to_free);
589 }
590 spin_unlock(&info->lock);
591
592 if (info->swapped && idx < SHMEM_NR_DIRECT) {
593 ptr = info->i_direct;
594 size = limit;
595 if (size > SHMEM_NR_DIRECT)
596 size = SHMEM_NR_DIRECT;
1ae70006 597 nr_swaps_freed = shmem_free_swp(ptr+idx, ptr+size, needs_lock);
1da177e4 598 }
92a3d03a
BP
599
600 /*
601 * If there are no indirect blocks or we are punching a hole
602 * below indirect blocks, nothing to be done.
603 */
a2646d1e 604 if (!topdir || limit <= SHMEM_NR_DIRECT)
1da177e4
LT
605 goto done2;
606
1ae70006
HD
607 /*
608 * The truncation case has already dropped info->lock, and we're safe
609 * because i_size and next_index have already been lowered, preventing
610 * access beyond. But in the punch_hole case, we still need to take
611 * the lock when updating the swap directory, because there might be
612 * racing accesses by shmem_getpage(SGP_CACHE), shmem_unuse_inode or
613 * shmem_writepage. However, whenever we find we can remove a whole
614 * directory page (not at the misaligned start or end of the range),
615 * we first NULLify its pointer in the level above, and then have no
616 * need to take the lock when updating its contents: needs_lock and
617 * punch_lock (either pointing to info->lock or NULL) manage this.
618 */
619
a2646d1e 620 upper_limit -= SHMEM_NR_DIRECT;
1da177e4
LT
621 limit -= SHMEM_NR_DIRECT;
622 idx = (idx > SHMEM_NR_DIRECT)? (idx - SHMEM_NR_DIRECT): 0;
623 offset = idx % ENTRIES_PER_PAGE;
624 idx -= offset;
625
626 dir = shmem_dir_map(topdir);
627 stage = ENTRIES_PER_PAGEPAGE/2;
628 if (idx < ENTRIES_PER_PAGEPAGE/2) {
629 middir = topdir;
630 diroff = idx/ENTRIES_PER_PAGE;
631 } else {
632 dir += ENTRIES_PER_PAGE/2;
633 dir += (idx - ENTRIES_PER_PAGEPAGE/2)/ENTRIES_PER_PAGEPAGE;
634 while (stage <= idx)
635 stage += ENTRIES_PER_PAGEPAGE;
636 middir = *dir;
637 if (*dir) {
638 diroff = ((idx - ENTRIES_PER_PAGEPAGE/2) %
639 ENTRIES_PER_PAGEPAGE) / ENTRIES_PER_PAGE;
a2646d1e 640 if (!diroff && !offset && upper_limit >= stage) {
1ae70006
HD
641 if (needs_lock) {
642 spin_lock(needs_lock);
643 *dir = NULL;
644 spin_unlock(needs_lock);
645 needs_lock = NULL;
646 } else
647 *dir = NULL;
1da177e4
LT
648 nr_pages_to_free++;
649 list_add(&middir->lru, &pages_to_free);
650 }
651 shmem_dir_unmap(dir);
652 dir = shmem_dir_map(middir);
653 } else {
654 diroff = 0;
655 offset = 0;
656 idx = stage;
657 }
658 }
659
660 for (; idx < limit; idx += ENTRIES_PER_PAGE, diroff++) {
661 if (unlikely(idx == stage)) {
662 shmem_dir_unmap(dir);
663 dir = shmem_dir_map(topdir) +
664 ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE;
665 while (!*dir) {
666 dir++;
667 idx += ENTRIES_PER_PAGEPAGE;
668 if (idx >= limit)
669 goto done1;
670 }
671 stage = idx + ENTRIES_PER_PAGEPAGE;
672 middir = *dir;
1ae70006
HD
673 if (punch_hole)
674 needs_lock = &info->lock;
a2646d1e 675 if (upper_limit >= stage) {
1ae70006
HD
676 if (needs_lock) {
677 spin_lock(needs_lock);
678 *dir = NULL;
679 spin_unlock(needs_lock);
680 needs_lock = NULL;
681 } else
682 *dir = NULL;
a2646d1e
HD
683 nr_pages_to_free++;
684 list_add(&middir->lru, &pages_to_free);
685 }
1da177e4
LT
686 shmem_dir_unmap(dir);
687 cond_resched();
688 dir = shmem_dir_map(middir);
689 diroff = 0;
690 }
1ae70006 691 punch_lock = needs_lock;
1da177e4 692 subdir = dir[diroff];
1ae70006
HD
693 if (subdir && !offset && upper_limit-idx >= ENTRIES_PER_PAGE) {
694 if (needs_lock) {
695 spin_lock(needs_lock);
696 dir[diroff] = NULL;
697 spin_unlock(needs_lock);
698 punch_lock = NULL;
699 } else
700 dir[diroff] = NULL;
701 nr_pages_to_free++;
702 list_add(&subdir->lru, &pages_to_free);
703 }
704 if (subdir && page_private(subdir) /* has swap entries */) {
1da177e4
LT
705 size = limit - idx;
706 if (size > ENTRIES_PER_PAGE)
707 size = ENTRIES_PER_PAGE;
708 freed = shmem_map_and_free_swp(subdir,
1ae70006 709 offset, size, &dir, punch_lock);
1da177e4
LT
710 if (!dir)
711 dir = shmem_dir_map(middir);
712 nr_swaps_freed += freed;
1ae70006 713 if (offset || punch_lock) {
1da177e4 714 spin_lock(&info->lock);
1ae70006
HD
715 set_page_private(subdir,
716 page_private(subdir) - freed);
1da177e4 717 spin_unlock(&info->lock);
1ae70006
HD
718 } else
719 BUG_ON(page_private(subdir) != freed);
1da177e4 720 }
1ae70006 721 offset = 0;
1da177e4
LT
722 }
723done1:
724 shmem_dir_unmap(dir);
725done2:
726 if (inode->i_mapping->nrpages && (info->flags & SHMEM_PAGEIN)) {
727 /*
728 * Call truncate_inode_pages again: racing shmem_unuse_inode
3889e6e7 729 * may have swizzled a page in from swap since
730 * truncate_pagecache or generic_delete_inode did it, before we
731 * lowered next_index. Also, though shmem_getpage checks
732 * i_size before adding to cache, no recheck after: so fix the
733 * narrow window there too.
16a10019
HD
734 *
735 * Recalling truncate_inode_pages_range and unmap_mapping_range
736 * every time for punch_hole (which never got a chance to clear
737 * SHMEM_PAGEIN at the start of vmtruncate_range) is expensive,
738 * yet hardly ever necessary: try to optimize them out later.
1da177e4 739 */
f6b3ec23 740 truncate_inode_pages_range(inode->i_mapping, start, end);
16a10019
HD
741 if (punch_hole)
742 unmap_mapping_range(inode->i_mapping, start,
743 end - start, 1);
1da177e4
LT
744 }
745
746 spin_lock(&info->lock);
747 info->flags &= ~SHMEM_TRUNCATE;
748 info->swapped -= nr_swaps_freed;
749 if (nr_pages_to_free)
750 shmem_free_blocks(inode, nr_pages_to_free);
751 shmem_recalc_inode(inode);
752 spin_unlock(&info->lock);
753
754 /*
755 * Empty swap vector directory pages to be freed?
756 */
757 if (!list_empty(&pages_to_free)) {
758 pages_to_free.prev->next = NULL;
759 shmem_free_pages(pages_to_free.next);
760 }
761}
762
763static int shmem_notify_change(struct dentry *dentry, struct iattr *attr)
764{
765 struct inode *inode = dentry->d_inode;
af5a30d8 766 loff_t newsize = attr->ia_size;
1da177e4
LT
767 int error;
768
db78b877
CH
769 error = inode_change_ok(inode, attr);
770 if (error)
771 return error;
772
af5a30d8
NP
773 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)
774 && newsize != inode->i_size) {
3889e6e7 775 struct page *page = NULL;
776
777 if (newsize < inode->i_size) {
1da177e4
LT
778 /*
779 * If truncating down to a partial page, then
780 * if that page is already allocated, hold it
781 * in memory until the truncation is over, so
ae0e47f0 782 * truncate_partial_page cannot miss it were
1da177e4
LT
783 * it assigned to swap.
784 */
3889e6e7 785 if (newsize & (PAGE_CACHE_SIZE-1)) {
1da177e4 786 (void) shmem_getpage(inode,
3889e6e7 787 newsize >> PAGE_CACHE_SHIFT,
1da177e4 788 &page, SGP_READ, NULL);
d3602444
HD
789 if (page)
790 unlock_page(page);
1da177e4
LT
791 }
792 /*
793 * Reset SHMEM_PAGEIN flag so that shmem_truncate can
794 * detect if any pages might have been added to cache
795 * after truncate_inode_pages. But we needn't bother
796 * if it's being fully truncated to zero-length: the
797 * nrpages check is efficient enough in that case.
798 */
3889e6e7 799 if (newsize) {
1da177e4
LT
800 struct shmem_inode_info *info = SHMEM_I(inode);
801 spin_lock(&info->lock);
802 info->flags &= ~SHMEM_PAGEIN;
803 spin_unlock(&info->lock);
804 }
805 }
3889e6e7 806
2c27c65e
CH
807 /* XXX(truncate): truncate_setsize should be called last */
808 truncate_setsize(inode, newsize);
3889e6e7 809 if (page)
810 page_cache_release(page);
3889e6e7 811 shmem_truncate_range(inode, newsize, (loff_t)-1);
1da177e4
LT
812 }
813
db78b877 814 setattr_copy(inode, attr);
39f0247d 815#ifdef CONFIG_TMPFS_POSIX_ACL
db78b877 816 if (attr->ia_valid & ATTR_MODE)
1c7c474c 817 error = generic_acl_chmod(inode);
39f0247d 818#endif
1da177e4
LT
819 return error;
820}
821
1f895f75 822static void shmem_evict_inode(struct inode *inode)
1da177e4 823{
1da177e4
LT
824 struct shmem_inode_info *info = SHMEM_I(inode);
825
3889e6e7 826 if (inode->i_mapping->a_ops == &shmem_aops) {
fef26658 827 truncate_inode_pages(inode->i_mapping, 0);
1da177e4
LT
828 shmem_unacct_size(info->flags, inode->i_size);
829 inode->i_size = 0;
3889e6e7 830 shmem_truncate_range(inode, 0, (loff_t)-1);
1da177e4 831 if (!list_empty(&info->swaplist)) {
cb5f7b9a 832 mutex_lock(&shmem_swaplist_mutex);
1da177e4 833 list_del_init(&info->swaplist);
cb5f7b9a 834 mutex_unlock(&shmem_swaplist_mutex);
1da177e4
LT
835 }
836 }
0edd73b3 837 BUG_ON(inode->i_blocks);
5b04c689 838 shmem_free_inode(inode->i_sb);
1f895f75 839 end_writeback(inode);
1da177e4
LT
840}
841
842static inline int shmem_find_swp(swp_entry_t entry, swp_entry_t *dir, swp_entry_t *edir)
843{
844 swp_entry_t *ptr;
845
846 for (ptr = dir; ptr < edir; ptr++) {
847 if (ptr->val == entry.val)
848 return ptr - dir;
849 }
850 return -1;
851}
852
853static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, struct page *page)
854{
778dd893 855 struct address_space *mapping;
1da177e4
LT
856 unsigned long idx;
857 unsigned long size;
858 unsigned long limit;
859 unsigned long stage;
860 struct page **dir;
861 struct page *subdir;
862 swp_entry_t *ptr;
863 int offset;
d9fe526a 864 int error;
1da177e4
LT
865
866 idx = 0;
867 ptr = info->i_direct;
868 spin_lock(&info->lock);
1b1b32f2
HD
869 if (!info->swapped) {
870 list_del_init(&info->swaplist);
871 goto lost2;
872 }
1da177e4
LT
873 limit = info->next_index;
874 size = limit;
875 if (size > SHMEM_NR_DIRECT)
876 size = SHMEM_NR_DIRECT;
877 offset = shmem_find_swp(entry, ptr, ptr+size);
778dd893
HD
878 if (offset >= 0) {
879 shmem_swp_balance_unmap();
1da177e4 880 goto found;
778dd893 881 }
1da177e4
LT
882 if (!info->i_indirect)
883 goto lost2;
884
885 dir = shmem_dir_map(info->i_indirect);
886 stage = SHMEM_NR_DIRECT + ENTRIES_PER_PAGEPAGE/2;
887
888 for (idx = SHMEM_NR_DIRECT; idx < limit; idx += ENTRIES_PER_PAGE, dir++) {
889 if (unlikely(idx == stage)) {
890 shmem_dir_unmap(dir-1);
cb5f7b9a
HD
891 if (cond_resched_lock(&info->lock)) {
892 /* check it has not been truncated */
893 if (limit > info->next_index) {
894 limit = info->next_index;
895 if (idx >= limit)
896 goto lost2;
897 }
898 }
1da177e4
LT
899 dir = shmem_dir_map(info->i_indirect) +
900 ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE;
901 while (!*dir) {
902 dir++;
903 idx += ENTRIES_PER_PAGEPAGE;
904 if (idx >= limit)
905 goto lost1;
906 }
907 stage = idx + ENTRIES_PER_PAGEPAGE;
908 subdir = *dir;
909 shmem_dir_unmap(dir);
910 dir = shmem_dir_map(subdir);
911 }
912 subdir = *dir;
4c21e2f2 913 if (subdir && page_private(subdir)) {
1da177e4
LT
914 ptr = shmem_swp_map(subdir);
915 size = limit - idx;
916 if (size > ENTRIES_PER_PAGE)
917 size = ENTRIES_PER_PAGE;
918 offset = shmem_find_swp(entry, ptr, ptr+size);
e6c9366b 919 shmem_swp_unmap(ptr);
1da177e4
LT
920 if (offset >= 0) {
921 shmem_dir_unmap(dir);
e6c9366b 922 ptr = shmem_swp_map(subdir);
1da177e4
LT
923 goto found;
924 }
1da177e4
LT
925 }
926 }
927lost1:
928 shmem_dir_unmap(dir-1);
929lost2:
930 spin_unlock(&info->lock);
931 return 0;
932found:
933 idx += offset;
778dd893 934 ptr += offset;
2e0e26c7 935
1b1b32f2
HD
936 /*
937 * Move _head_ to start search for next from here.
1f895f75 938 * But be careful: shmem_evict_inode checks list_empty without taking
1b1b32f2
HD
939 * mutex, and there's an instant in list_move_tail when info->swaplist
940 * would appear empty, if it were the only one on shmem_swaplist. We
941 * could avoid doing it if inode NULL; or use this minor optimization.
942 */
943 if (shmem_swaplist.next != &info->swaplist)
944 list_move_tail(&shmem_swaplist, &info->swaplist);
2e0e26c7 945
d13d1443 946 /*
778dd893
HD
947 * We rely on shmem_swaplist_mutex, not only to protect the swaplist,
948 * but also to hold up shmem_evict_inode(): so inode cannot be freed
949 * beneath us (pagelock doesn't help until the page is in pagecache).
d13d1443 950 */
778dd893
HD
951 mapping = info->vfs_inode.i_mapping;
952 error = add_to_page_cache_locked(page, mapping, idx, GFP_NOWAIT);
953 /* which does mem_cgroup_uncharge_cache_page on error */
69029cd5 954
d9fe526a 955 if (error == -EEXIST) {
778dd893 956 struct page *filepage = find_get_page(mapping, idx);
2e0e26c7 957 error = 1;
d9fe526a
HD
958 if (filepage) {
959 /*
960 * There might be a more uptodate page coming down
961 * from a stacked writepage: forget our swappage if so.
962 */
963 if (PageUptodate(filepage))
964 error = 0;
965 page_cache_release(filepage);
966 }
967 }
968 if (!error) {
73b1262f
HD
969 delete_from_swap_cache(page);
970 set_page_dirty(page);
1da177e4 971 info->flags |= SHMEM_PAGEIN;
2e0e26c7
HD
972 shmem_swp_set(info, ptr, 0);
973 swap_free(entry);
974 error = 1; /* not an error, but entry was found */
1da177e4 975 }
778dd893 976 shmem_swp_unmap(ptr);
1da177e4 977 spin_unlock(&info->lock);
2e0e26c7 978 return error;
1da177e4
LT
979}
980
981/*
982 * shmem_unuse() search for an eventually swapped out shmem page.
983 */
984int shmem_unuse(swp_entry_t entry, struct page *page)
985{
986 struct list_head *p, *next;
987 struct shmem_inode_info *info;
988 int found = 0;
778dd893
HD
989 int error;
990
991 /*
992 * Charge page using GFP_KERNEL while we can wait, before taking
993 * the shmem_swaplist_mutex which might hold up shmem_writepage().
994 * Charged back to the user (not to caller) when swap account is used.
995 * add_to_page_cache() will be called with GFP_NOWAIT.
996 */
997 error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL);
998 if (error)
999 goto out;
1000 /*
1001 * Try to preload while we can wait, to not make a habit of
1002 * draining atomic reserves; but don't latch on to this cpu,
1003 * it's okay if sometimes we get rescheduled after this.
1004 */
1005 error = radix_tree_preload(GFP_KERNEL);
1006 if (error)
1007 goto uncharge;
1008 radix_tree_preload_end();
1da177e4 1009
cb5f7b9a 1010 mutex_lock(&shmem_swaplist_mutex);
1da177e4
LT
1011 list_for_each_safe(p, next, &shmem_swaplist) {
1012 info = list_entry(p, struct shmem_inode_info, swaplist);
1b1b32f2 1013 found = shmem_unuse_inode(info, entry, page);
cb5f7b9a 1014 cond_resched();
2e0e26c7 1015 if (found)
778dd893 1016 break;
1da177e4 1017 }
cb5f7b9a 1018 mutex_unlock(&shmem_swaplist_mutex);
778dd893
HD
1019
1020uncharge:
1021 if (!found)
1022 mem_cgroup_uncharge_cache_page(page);
1023 if (found < 0)
1024 error = found;
1025out:
aaa46865
HD
1026 unlock_page(page);
1027 page_cache_release(page);
778dd893 1028 return error;
1da177e4
LT
1029}
1030
1031/*
1032 * Move the page from the page cache to the swap cache.
1033 */
1034static int shmem_writepage(struct page *page, struct writeback_control *wbc)
1035{
1036 struct shmem_inode_info *info;
1037 swp_entry_t *entry, swap;
1038 struct address_space *mapping;
1039 unsigned long index;
1040 struct inode *inode;
1041
1042 BUG_ON(!PageLocked(page));
1da177e4
LT
1043 mapping = page->mapping;
1044 index = page->index;
1045 inode = mapping->host;
1046 info = SHMEM_I(inode);
1047 if (info->flags & VM_LOCKED)
1048 goto redirty;
d9fe526a 1049 if (!total_swap_pages)
1da177e4
LT
1050 goto redirty;
1051
d9fe526a
HD
1052 /*
1053 * shmem_backing_dev_info's capabilities prevent regular writeback or
1054 * sync from ever calling shmem_writepage; but a stacking filesystem
1055 * may use the ->writepage of its underlying filesystem, in which case
1056 * tmpfs should write out to swap only in response to memory pressure,
5b0830cb
JA
1057 * and not for the writeback threads or sync. However, in those cases,
1058 * we do still want to check if there's a redundant swappage to be
1059 * discarded.
d9fe526a
HD
1060 */
1061 if (wbc->for_reclaim)
1062 swap = get_swap_page();
1063 else
1064 swap.val = 0;
1065
b1dea800
HD
1066 /*
1067 * Add inode to shmem_unuse()'s list of swapped-out inodes,
1068 * if it's not already there. Do it now because we cannot take
1069 * mutex while holding spinlock, and must do so before the page
1070 * is moved to swap cache, when its pagelock no longer protects
1071 * the inode from eviction. But don't unlock the mutex until
1072 * we've taken the spinlock, because shmem_unuse_inode() will
1073 * prune a !swapped inode from the swaplist under both locks.
1074 */
05bf86b4 1075 if (swap.val) {
b1dea800 1076 mutex_lock(&shmem_swaplist_mutex);
05bf86b4
HD
1077 if (list_empty(&info->swaplist))
1078 list_add_tail(&info->swaplist, &shmem_swaplist);
b1dea800
HD
1079 }
1080
1da177e4 1081 spin_lock(&info->lock);
05bf86b4 1082 if (swap.val)
b1dea800
HD
1083 mutex_unlock(&shmem_swaplist_mutex);
1084
1da177e4
LT
1085 if (index >= info->next_index) {
1086 BUG_ON(!(info->flags & SHMEM_TRUNCATE));
1087 goto unlock;
1088 }
1089 entry = shmem_swp_entry(info, index, NULL);
d9fe526a
HD
1090 if (entry->val) {
1091 /*
1092 * The more uptodate page coming down from a stacked
1093 * writepage should replace our old swappage.
1094 */
1095 free_swap_and_cache(*entry);
1096 shmem_swp_set(info, entry, 0);
1097 }
1098 shmem_recalc_inode(inode);
1da177e4 1099
d9fe526a 1100 if (swap.val && add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {
4c73b1bc 1101 delete_from_page_cache(page);
1da177e4
LT
1102 shmem_swp_set(info, entry, swap.val);
1103 shmem_swp_unmap(entry);
1104 spin_unlock(&info->lock);
aaa46865 1105 swap_shmem_alloc(swap);
d9fe526a 1106 BUG_ON(page_mapped(page));
9fab5619 1107 swap_writepage(page, wbc);
1da177e4
LT
1108 return 0;
1109 }
1110
1111 shmem_swp_unmap(entry);
1112unlock:
1113 spin_unlock(&info->lock);
2ca4532a
DN
1114 /*
1115 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
1116 * clear SWAP_HAS_CACHE flag.
1117 */
cb4b86ba 1118 swapcache_free(swap, NULL);
1da177e4
LT
1119redirty:
1120 set_page_dirty(page);
d9fe526a
HD
1121 if (wbc->for_reclaim)
1122 return AOP_WRITEPAGE_ACTIVATE; /* Return with page locked */
1123 unlock_page(page);
1124 return 0;
1da177e4
LT
1125}
1126
1127#ifdef CONFIG_NUMA
680d794b 1128#ifdef CONFIG_TMPFS
71fe804b 1129static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
680d794b 1130{
095f1fc4 1131 char buffer[64];
680d794b 1132
71fe804b 1133 if (!mpol || mpol->mode == MPOL_DEFAULT)
095f1fc4 1134 return; /* show nothing */
680d794b 1135
71fe804b 1136 mpol_to_str(buffer, sizeof(buffer), mpol, 1);
095f1fc4
LS
1137
1138 seq_printf(seq, ",mpol=%s", buffer);
680d794b 1139}
71fe804b
LS
1140
1141static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1142{
1143 struct mempolicy *mpol = NULL;
1144 if (sbinfo->mpol) {
1145 spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */
1146 mpol = sbinfo->mpol;
1147 mpol_get(mpol);
1148 spin_unlock(&sbinfo->stat_lock);
1149 }
1150 return mpol;
1151}
680d794b
AM
1152#endif /* CONFIG_TMPFS */
1153
02098fea
HD
1154static struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
1155 struct shmem_inode_info *info, unsigned long idx)
1da177e4 1156{
52cd3b07 1157 struct mempolicy mpol, *spol;
1da177e4 1158 struct vm_area_struct pvma;
c4cc6d07 1159 struct page *page;
1da177e4 1160
52cd3b07
LS
1161 spol = mpol_cond_copy(&mpol,
1162 mpol_shared_policy_lookup(&info->policy, idx));
1163
1da177e4 1164 /* Create a pseudo vma that just contains the policy */
c4cc6d07 1165 pvma.vm_start = 0;
1da177e4 1166 pvma.vm_pgoff = idx;
c4cc6d07 1167 pvma.vm_ops = NULL;
52cd3b07 1168 pvma.vm_policy = spol;
02098fea 1169 page = swapin_readahead(entry, gfp, &pvma, 0);
1da177e4
LT
1170 return page;
1171}
1172
02098fea
HD
1173static struct page *shmem_alloc_page(gfp_t gfp,
1174 struct shmem_inode_info *info, unsigned long idx)
1da177e4
LT
1175{
1176 struct vm_area_struct pvma;
1da177e4 1177
c4cc6d07
HD
1178 /* Create a pseudo vma that just contains the policy */
1179 pvma.vm_start = 0;
1da177e4 1180 pvma.vm_pgoff = idx;
c4cc6d07
HD
1181 pvma.vm_ops = NULL;
1182 pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
52cd3b07
LS
1183
1184 /*
1185 * alloc_page_vma() will drop the shared policy reference
1186 */
1187 return alloc_page_vma(gfp, &pvma, 0);
1da177e4 1188}
680d794b
AM
1189#else /* !CONFIG_NUMA */
1190#ifdef CONFIG_TMPFS
71fe804b 1191static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *p)
680d794b
AM
1192{
1193}
1194#endif /* CONFIG_TMPFS */
1195
02098fea
HD
1196static inline struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
1197 struct shmem_inode_info *info, unsigned long idx)
1da177e4 1198{
02098fea 1199 return swapin_readahead(entry, gfp, NULL, 0);
1da177e4
LT
1200}
1201
02098fea
HD
1202static inline struct page *shmem_alloc_page(gfp_t gfp,
1203 struct shmem_inode_info *info, unsigned long idx)
1da177e4 1204{
e84e2e13 1205 return alloc_page(gfp);
1da177e4 1206}
680d794b 1207#endif /* CONFIG_NUMA */
1da177e4 1208
71fe804b
LS
1209#if !defined(CONFIG_NUMA) || !defined(CONFIG_TMPFS)
1210static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1211{
1212 return NULL;
1213}
1214#endif
1215
1da177e4
LT
1216/*
1217 * shmem_getpage - either get the page from swap or allocate a new one
1218 *
1219 * If we allocate a new one we do not mark it dirty. That's up to the
1220 * vm. If we swap it in we mark it dirty since we also free the swap
1221 * entry since a page cannot live in both the swap and page cache
1222 */
1223static int shmem_getpage(struct inode *inode, unsigned long idx,
1224 struct page **pagep, enum sgp_type sgp, int *type)
1225{
1226 struct address_space *mapping = inode->i_mapping;
1227 struct shmem_inode_info *info = SHMEM_I(inode);
1228 struct shmem_sb_info *sbinfo;
1229 struct page *filepage = *pagep;
1230 struct page *swappage;
ff36b801 1231 struct page *prealloc_page = NULL;
1da177e4
LT
1232 swp_entry_t *entry;
1233 swp_entry_t swap;
02098fea 1234 gfp_t gfp;
1da177e4
LT
1235 int error;
1236
1237 if (idx >= SHMEM_MAX_INDEX)
1238 return -EFBIG;
54cb8821
NP
1239
1240 if (type)
83c54070 1241 *type = 0;
54cb8821 1242
1da177e4
LT
1243 /*
1244 * Normally, filepage is NULL on entry, and either found
1245 * uptodate immediately, or allocated and zeroed, or read
1246 * in under swappage, which is then assigned to filepage.
5402b976 1247 * But shmem_readpage (required for splice) passes in a locked
ae976416
HD
1248 * filepage, which may be found not uptodate by other callers
1249 * too, and may need to be copied from the swappage read in.
1da177e4
LT
1250 */
1251repeat:
1252 if (!filepage)
1253 filepage = find_lock_page(mapping, idx);
1254 if (filepage && PageUptodate(filepage))
1255 goto done;
02098fea 1256 gfp = mapping_gfp_mask(mapping);
b409f9fc
HD
1257 if (!filepage) {
1258 /*
1259 * Try to preload while we can wait, to not make a habit of
1260 * draining atomic reserves; but don't latch on to this cpu.
1261 */
1262 error = radix_tree_preload(gfp & ~__GFP_HIGHMEM);
1263 if (error)
1264 goto failed;
1265 radix_tree_preload_end();
ff36b801
SL
1266 if (sgp != SGP_READ && !prealloc_page) {
1267 /* We don't care if this fails */
1268 prealloc_page = shmem_alloc_page(gfp, info, idx);
1269 if (prealloc_page) {
1270 if (mem_cgroup_cache_charge(prealloc_page,
1271 current->mm, GFP_KERNEL)) {
1272 page_cache_release(prealloc_page);
1273 prealloc_page = NULL;
1274 }
1275 }
1276 }
b409f9fc 1277 }
ff36b801 1278 error = 0;
1da177e4
LT
1279
1280 spin_lock(&info->lock);
1281 shmem_recalc_inode(inode);
1282 entry = shmem_swp_alloc(info, idx, sgp);
1283 if (IS_ERR(entry)) {
1284 spin_unlock(&info->lock);
1285 error = PTR_ERR(entry);
1286 goto failed;
1287 }
1288 swap = *entry;
1289
1290 if (swap.val) {
1291 /* Look it up and read it in.. */
1292 swappage = lookup_swap_cache(swap);
1293 if (!swappage) {
1294 shmem_swp_unmap(entry);
1da177e4 1295 /* here we actually do the io */
83c54070 1296 if (type && !(*type & VM_FAULT_MAJOR)) {
f8891e5e 1297 __count_vm_event(PGMAJFAULT);
83c54070 1298 *type |= VM_FAULT_MAJOR;
1da177e4 1299 }
f8891e5e 1300 spin_unlock(&info->lock);
02098fea 1301 swappage = shmem_swapin(swap, gfp, info, idx);
1da177e4
LT
1302 if (!swappage) {
1303 spin_lock(&info->lock);
1304 entry = shmem_swp_alloc(info, idx, sgp);
1305 if (IS_ERR(entry))
1306 error = PTR_ERR(entry);
1307 else {
1308 if (entry->val == swap.val)
1309 error = -ENOMEM;
1310 shmem_swp_unmap(entry);
1311 }
1312 spin_unlock(&info->lock);
1313 if (error)
1314 goto failed;
1315 goto repeat;
1316 }
1317 wait_on_page_locked(swappage);
1318 page_cache_release(swappage);
1319 goto repeat;
1320 }
1321
1322 /* We have to do this with page locked to prevent races */
529ae9aa 1323 if (!trylock_page(swappage)) {
1da177e4
LT
1324 shmem_swp_unmap(entry);
1325 spin_unlock(&info->lock);
1326 wait_on_page_locked(swappage);
1327 page_cache_release(swappage);
1328 goto repeat;
1329 }
1330 if (PageWriteback(swappage)) {
1331 shmem_swp_unmap(entry);
1332 spin_unlock(&info->lock);
1333 wait_on_page_writeback(swappage);
1334 unlock_page(swappage);
1335 page_cache_release(swappage);
1336 goto repeat;
1337 }
1338 if (!PageUptodate(swappage)) {
1339 shmem_swp_unmap(entry);
1340 spin_unlock(&info->lock);
1341 unlock_page(swappage);
1342 page_cache_release(swappage);
1343 error = -EIO;
1344 goto failed;
1345 }
1346
1347 if (filepage) {
1348 shmem_swp_set(info, entry, 0);
1349 shmem_swp_unmap(entry);
1350 delete_from_swap_cache(swappage);
1351 spin_unlock(&info->lock);
1352 copy_highpage(filepage, swappage);
1353 unlock_page(swappage);
1354 page_cache_release(swappage);
1355 flush_dcache_page(filepage);
1356 SetPageUptodate(filepage);
1357 set_page_dirty(filepage);
1358 swap_free(swap);
e286781d
NP
1359 } else if (!(error = add_to_page_cache_locked(swappage, mapping,
1360 idx, GFP_NOWAIT))) {
1da177e4
LT
1361 info->flags |= SHMEM_PAGEIN;
1362 shmem_swp_set(info, entry, 0);
1363 shmem_swp_unmap(entry);
73b1262f 1364 delete_from_swap_cache(swappage);
1da177e4
LT
1365 spin_unlock(&info->lock);
1366 filepage = swappage;
73b1262f 1367 set_page_dirty(filepage);
1da177e4
LT
1368 swap_free(swap);
1369 } else {
1370 shmem_swp_unmap(entry);
1371 spin_unlock(&info->lock);
82369553 1372 if (error == -ENOMEM) {
ae3abae6
DN
1373 /*
1374 * reclaim from proper memory cgroup and
1375 * call memcg's OOM if needed.
1376 */
1377 error = mem_cgroup_shmem_charge_fallback(
1378 swappage,
b5a84319 1379 current->mm,
c9b0ed51 1380 gfp);
b5a84319
KH
1381 if (error) {
1382 unlock_page(swappage);
1383 page_cache_release(swappage);
82369553 1384 goto failed;
b5a84319 1385 }
82369553 1386 }
b5a84319
KH
1387 unlock_page(swappage);
1388 page_cache_release(swappage);
1da177e4
LT
1389 goto repeat;
1390 }
1391 } else if (sgp == SGP_READ && !filepage) {
1392 shmem_swp_unmap(entry);
1393 filepage = find_get_page(mapping, idx);
1394 if (filepage &&
529ae9aa 1395 (!PageUptodate(filepage) || !trylock_page(filepage))) {
1da177e4
LT
1396 spin_unlock(&info->lock);
1397 wait_on_page_locked(filepage);
1398 page_cache_release(filepage);
1399 filepage = NULL;
1400 goto repeat;
1401 }
1402 spin_unlock(&info->lock);
1403 } else {
1404 shmem_swp_unmap(entry);
1405 sbinfo = SHMEM_SB(inode->i_sb);
0edd73b3 1406 if (sbinfo->max_blocks) {
fc5da22a
HD
1407 if (percpu_counter_compare(&sbinfo->used_blocks,
1408 sbinfo->max_blocks) >= 0 ||
59a16ead
HD
1409 shmem_acct_block(info->flags))
1410 goto nospace;
7e496299
TC
1411 percpu_counter_inc(&sbinfo->used_blocks);
1412 spin_lock(&inode->i_lock);
1da177e4 1413 inode->i_blocks += BLOCKS_PER_PAGE;
7e496299 1414 spin_unlock(&inode->i_lock);
59a16ead
HD
1415 } else if (shmem_acct_block(info->flags))
1416 goto nospace;
1da177e4
LT
1417
1418 if (!filepage) {
69029cd5
KH
1419 int ret;
1420
ff36b801
SL
1421 if (!prealloc_page) {
1422 spin_unlock(&info->lock);
1423 filepage = shmem_alloc_page(gfp, info, idx);
1424 if (!filepage) {
1425 shmem_unacct_blocks(info->flags, 1);
1426 shmem_free_blocks(inode, 1);
1427 error = -ENOMEM;
1428 goto failed;
1429 }
1430 SetPageSwapBacked(filepage);
1da177e4 1431
ff36b801
SL
1432 /*
1433 * Precharge page while we can wait, compensate
1434 * after
1435 */
1436 error = mem_cgroup_cache_charge(filepage,
1437 current->mm, GFP_KERNEL);
1438 if (error) {
1439 page_cache_release(filepage);
1440 shmem_unacct_blocks(info->flags, 1);
1441 shmem_free_blocks(inode, 1);
1442 filepage = NULL;
1443 goto failed;
1444 }
1445
1446 spin_lock(&info->lock);
1447 } else {
1448 filepage = prealloc_page;
1449 prealloc_page = NULL;
1450 SetPageSwapBacked(filepage);
82369553
HD
1451 }
1452
1da177e4
LT
1453 entry = shmem_swp_alloc(info, idx, sgp);
1454 if (IS_ERR(entry))
1455 error = PTR_ERR(entry);
1456 else {
1457 swap = *entry;
1458 shmem_swp_unmap(entry);
1459 }
69029cd5
KH
1460 ret = error || swap.val;
1461 if (ret)
1462 mem_cgroup_uncharge_cache_page(filepage);
1463 else
1464 ret = add_to_page_cache_lru(filepage, mapping,
1465 idx, GFP_NOWAIT);
1466 /*
1467 * At add_to_page_cache_lru() failure, uncharge will
1468 * be done automatically.
1469 */
1470 if (ret) {
1da177e4
LT
1471 spin_unlock(&info->lock);
1472 page_cache_release(filepage);
1473 shmem_unacct_blocks(info->flags, 1);
1474 shmem_free_blocks(inode, 1);
1475 filepage = NULL;
1476 if (error)
1477 goto failed;
1478 goto repeat;
1479 }
1480 info->flags |= SHMEM_PAGEIN;
1481 }
1482
1483 info->alloced++;
1484 spin_unlock(&info->lock);
e84e2e13 1485 clear_highpage(filepage);
1da177e4
LT
1486 flush_dcache_page(filepage);
1487 SetPageUptodate(filepage);
a0ee5ec5
HD
1488 if (sgp == SGP_DIRTY)
1489 set_page_dirty(filepage);
1da177e4
LT
1490 }
1491done:
d3602444 1492 *pagep = filepage;
ff36b801
SL
1493 error = 0;
1494 goto out;
1da177e4 1495
59a16ead
HD
1496nospace:
1497 /*
1498 * Perhaps the page was brought in from swap between find_lock_page
1499 * and taking info->lock? We allow for that at add_to_page_cache_lru,
1500 * but must also avoid reporting a spurious ENOSPC while working on a
1501 * full tmpfs. (When filepage has been passed in to shmem_getpage, it
1502 * is already in page cache, which prevents this race from occurring.)
1503 */
1504 if (!filepage) {
1505 struct page *page = find_get_page(mapping, idx);
1506 if (page) {
1507 spin_unlock(&info->lock);
1508 page_cache_release(page);
1509 goto repeat;
1510 }
1511 }
1512 spin_unlock(&info->lock);
1513 error = -ENOSPC;
1da177e4
LT
1514failed:
1515 if (*pagep != filepage) {
1516 unlock_page(filepage);
1517 page_cache_release(filepage);
1518 }
ff36b801
SL
1519out:
1520 if (prealloc_page) {
1521 mem_cgroup_uncharge_cache_page(prealloc_page);
1522 page_cache_release(prealloc_page);
1523 }
1da177e4
LT
1524 return error;
1525}
1526
d0217ac0 1527static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1da177e4 1528{
d3ac7f89 1529 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
1da177e4 1530 int error;
d0217ac0 1531 int ret;
1da177e4 1532
d0217ac0
NP
1533 if (((loff_t)vmf->pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode))
1534 return VM_FAULT_SIGBUS;
d00806b1 1535
27d54b39 1536 error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
d0217ac0
NP
1537 if (error)
1538 return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
1da177e4 1539
83c54070 1540 return ret | VM_FAULT_LOCKED;
1da177e4
LT
1541}
1542
1da177e4 1543#ifdef CONFIG_NUMA
d8dc74f2 1544static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
1da177e4 1545{
d3ac7f89 1546 struct inode *i = vma->vm_file->f_path.dentry->d_inode;
1da177e4
LT
1547 return mpol_set_shared_policy(&SHMEM_I(i)->policy, vma, new);
1548}
1549
d8dc74f2
AB
1550static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
1551 unsigned long addr)
1da177e4 1552{
d3ac7f89 1553 struct inode *i = vma->vm_file->f_path.dentry->d_inode;
1da177e4
LT
1554 unsigned long idx;
1555
1556 idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
1557 return mpol_shared_policy_lookup(&SHMEM_I(i)->policy, idx);
1558}
1559#endif
1560
1561int shmem_lock(struct file *file, int lock, struct user_struct *user)
1562{
d3ac7f89 1563 struct inode *inode = file->f_path.dentry->d_inode;
1da177e4
LT
1564 struct shmem_inode_info *info = SHMEM_I(inode);
1565 int retval = -ENOMEM;
1566
1567 spin_lock(&info->lock);
1568 if (lock && !(info->flags & VM_LOCKED)) {
1569 if (!user_shm_lock(inode->i_size, user))
1570 goto out_nomem;
1571 info->flags |= VM_LOCKED;
89e004ea 1572 mapping_set_unevictable(file->f_mapping);
1da177e4
LT
1573 }
1574 if (!lock && (info->flags & VM_LOCKED) && user) {
1575 user_shm_unlock(inode->i_size, user);
1576 info->flags &= ~VM_LOCKED;
89e004ea
LS
1577 mapping_clear_unevictable(file->f_mapping);
1578 scan_mapping_unevictable_pages(file->f_mapping);
1da177e4
LT
1579 }
1580 retval = 0;
89e004ea 1581
1da177e4
LT
1582out_nomem:
1583 spin_unlock(&info->lock);
1584 return retval;
1585}
1586
9b83a6a8 1587static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
1da177e4
LT
1588{
1589 file_accessed(file);
1590 vma->vm_ops = &shmem_vm_ops;
d0217ac0 1591 vma->vm_flags |= VM_CAN_NONLINEAR;
1da177e4
LT
1592 return 0;
1593}
1594
454abafe
DM
1595static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir,
1596 int mode, dev_t dev, unsigned long flags)
1da177e4
LT
1597{
1598 struct inode *inode;
1599 struct shmem_inode_info *info;
1600 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
1601
5b04c689
PE
1602 if (shmem_reserve_inode(sb))
1603 return NULL;
1da177e4
LT
1604
1605 inode = new_inode(sb);
1606 if (inode) {
85fe4025 1607 inode->i_ino = get_next_ino();
454abafe 1608 inode_init_owner(inode, dir, mode);
1da177e4 1609 inode->i_blocks = 0;
1da177e4
LT
1610 inode->i_mapping->backing_dev_info = &shmem_backing_dev_info;
1611 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
91828a40 1612 inode->i_generation = get_seconds();
1da177e4
LT
1613 info = SHMEM_I(inode);
1614 memset(info, 0, (char *)inode - (char *)info);
1615 spin_lock_init(&info->lock);
0b0a0806 1616 info->flags = flags & VM_NORESERVE;
1da177e4 1617 INIT_LIST_HEAD(&info->swaplist);
72c04902 1618 cache_no_acl(inode);
1da177e4
LT
1619
1620 switch (mode & S_IFMT) {
1621 default:
39f0247d 1622 inode->i_op = &shmem_special_inode_operations;
1da177e4
LT
1623 init_special_inode(inode, mode, dev);
1624 break;
1625 case S_IFREG:
14fcc23f 1626 inode->i_mapping->a_ops = &shmem_aops;
1da177e4
LT
1627 inode->i_op = &shmem_inode_operations;
1628 inode->i_fop = &shmem_file_operations;
71fe804b
LS
1629 mpol_shared_policy_init(&info->policy,
1630 shmem_get_sbmpol(sbinfo));
1da177e4
LT
1631 break;
1632 case S_IFDIR:
d8c76e6f 1633 inc_nlink(inode);
1da177e4
LT
1634 /* Some things misbehave if size == 0 on a directory */
1635 inode->i_size = 2 * BOGO_DIRENT_SIZE;
1636 inode->i_op = &shmem_dir_inode_operations;
1637 inode->i_fop = &simple_dir_operations;
1638 break;
1639 case S_IFLNK:
1640 /*
1641 * Must not load anything in the rbtree,
1642 * mpol_free_shared_policy will not be called.
1643 */
71fe804b 1644 mpol_shared_policy_init(&info->policy, NULL);
1da177e4
LT
1645 break;
1646 }
5b04c689
PE
1647 } else
1648 shmem_free_inode(sb);
1da177e4
LT
1649 return inode;
1650}
1651
1652#ifdef CONFIG_TMPFS
92e1d5be
AV
1653static const struct inode_operations shmem_symlink_inode_operations;
1654static const struct inode_operations shmem_symlink_inline_operations;
1da177e4
LT
1655
1656/*
800d15a5 1657 * Normally tmpfs avoids the use of shmem_readpage and shmem_write_begin;
ae976416
HD
1658 * but providing them allows a tmpfs file to be used for splice, sendfile, and
1659 * below the loop driver, in the generic fashion that many filesystems support.
1da177e4 1660 */
ae976416
HD
1661static int shmem_readpage(struct file *file, struct page *page)
1662{
1663 struct inode *inode = page->mapping->host;
1664 int error = shmem_getpage(inode, page->index, &page, SGP_CACHE, NULL);
1665 unlock_page(page);
1666 return error;
1667}
1668
1da177e4 1669static int
800d15a5
NP
1670shmem_write_begin(struct file *file, struct address_space *mapping,
1671 loff_t pos, unsigned len, unsigned flags,
1672 struct page **pagep, void **fsdata)
1da177e4 1673{
800d15a5
NP
1674 struct inode *inode = mapping->host;
1675 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
1676 *pagep = NULL;
1677 return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL);
1678}
1679
1680static int
1681shmem_write_end(struct file *file, struct address_space *mapping,
1682 loff_t pos, unsigned len, unsigned copied,
1683 struct page *page, void *fsdata)
1684{
1685 struct inode *inode = mapping->host;
1686
d3602444
HD
1687 if (pos + copied > inode->i_size)
1688 i_size_write(inode, pos + copied);
1689
800d15a5 1690 set_page_dirty(page);
6746aff7 1691 unlock_page(page);
800d15a5
NP
1692 page_cache_release(page);
1693
800d15a5 1694 return copied;
1da177e4
LT
1695}
1696
1da177e4
LT
1697static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor)
1698{
d3ac7f89 1699 struct inode *inode = filp->f_path.dentry->d_inode;
1da177e4
LT
1700 struct address_space *mapping = inode->i_mapping;
1701 unsigned long index, offset;
a0ee5ec5
HD
1702 enum sgp_type sgp = SGP_READ;
1703
1704 /*
1705 * Might this read be for a stacking filesystem? Then when reading
1706 * holes of a sparse file, we actually need to allocate those pages,
1707 * and even mark them dirty, so it cannot exceed the max_blocks limit.
1708 */
1709 if (segment_eq(get_fs(), KERNEL_DS))
1710 sgp = SGP_DIRTY;
1da177e4
LT
1711
1712 index = *ppos >> PAGE_CACHE_SHIFT;
1713 offset = *ppos & ~PAGE_CACHE_MASK;
1714
1715 for (;;) {
1716 struct page *page = NULL;
1717 unsigned long end_index, nr, ret;
1718 loff_t i_size = i_size_read(inode);
1719
1720 end_index = i_size >> PAGE_CACHE_SHIFT;
1721 if (index > end_index)
1722 break;
1723 if (index == end_index) {
1724 nr = i_size & ~PAGE_CACHE_MASK;
1725 if (nr <= offset)
1726 break;
1727 }
1728
a0ee5ec5 1729 desc->error = shmem_getpage(inode, index, &page, sgp, NULL);
1da177e4
LT
1730 if (desc->error) {
1731 if (desc->error == -EINVAL)
1732 desc->error = 0;
1733 break;
1734 }
d3602444
HD
1735 if (page)
1736 unlock_page(page);
1da177e4
LT
1737
1738 /*
1739 * We must evaluate after, since reads (unlike writes)
1b1dcc1b 1740 * are called without i_mutex protection against truncate
1da177e4
LT
1741 */
1742 nr = PAGE_CACHE_SIZE;
1743 i_size = i_size_read(inode);
1744 end_index = i_size >> PAGE_CACHE_SHIFT;
1745 if (index == end_index) {
1746 nr = i_size & ~PAGE_CACHE_MASK;
1747 if (nr <= offset) {
1748 if (page)
1749 page_cache_release(page);
1750 break;
1751 }
1752 }
1753 nr -= offset;
1754
1755 if (page) {
1756 /*
1757 * If users can be writing to this page using arbitrary
1758 * virtual addresses, take care about potential aliasing
1759 * before reading the page on the kernel side.
1760 */
1761 if (mapping_writably_mapped(mapping))
1762 flush_dcache_page(page);
1763 /*
1764 * Mark the page accessed if we read the beginning.
1765 */
1766 if (!offset)
1767 mark_page_accessed(page);
b5810039 1768 } else {
1da177e4 1769 page = ZERO_PAGE(0);
b5810039
NP
1770 page_cache_get(page);
1771 }
1da177e4
LT
1772
1773 /*
1774 * Ok, we have the page, and it's up-to-date, so
1775 * now we can copy it to user space...
1776 *
1777 * The actor routine returns how many bytes were actually used..
1778 * NOTE! This may not be the same as how much of a user buffer
1779 * we filled up (we may be padding etc), so we can only update
1780 * "pos" here (the actor routine has to update the user buffer
1781 * pointers and the remaining count).
1782 */
1783 ret = actor(desc, page, offset, nr);
1784 offset += ret;
1785 index += offset >> PAGE_CACHE_SHIFT;
1786 offset &= ~PAGE_CACHE_MASK;
1787
1788 page_cache_release(page);
1789 if (ret != nr || !desc->count)
1790 break;
1791
1792 cond_resched();
1793 }
1794
1795 *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
1796 file_accessed(filp);
1797}
1798
bcd78e49
HD
1799static ssize_t shmem_file_aio_read(struct kiocb *iocb,
1800 const struct iovec *iov, unsigned long nr_segs, loff_t pos)
1801{
1802 struct file *filp = iocb->ki_filp;
1803 ssize_t retval;
1804 unsigned long seg;
1805 size_t count;
1806 loff_t *ppos = &iocb->ki_pos;
1807
1808 retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
1809 if (retval)
1810 return retval;
1811
1812 for (seg = 0; seg < nr_segs; seg++) {
1813 read_descriptor_t desc;
1814
1815 desc.written = 0;
1816 desc.arg.buf = iov[seg].iov_base;
1817 desc.count = iov[seg].iov_len;
1818 if (desc.count == 0)
1819 continue;
1820 desc.error = 0;
1821 do_shmem_file_read(filp, ppos, &desc, file_read_actor);
1822 retval += desc.written;
1823 if (desc.error) {
1824 retval = retval ?: desc.error;
1825 break;
1826 }
1827 if (desc.count > 0)
1828 break;
1829 }
1830 return retval;
1da177e4
LT
1831}
1832
726c3342 1833static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
1da177e4 1834{
726c3342 1835 struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
1da177e4
LT
1836
1837 buf->f_type = TMPFS_MAGIC;
1838 buf->f_bsize = PAGE_CACHE_SIZE;
1839 buf->f_namelen = NAME_MAX;
0edd73b3 1840 if (sbinfo->max_blocks) {
1da177e4 1841 buf->f_blocks = sbinfo->max_blocks;
7e496299
TC
1842 buf->f_bavail = buf->f_bfree =
1843 sbinfo->max_blocks - percpu_counter_sum(&sbinfo->used_blocks);
0edd73b3
HD
1844 }
1845 if (sbinfo->max_inodes) {
1da177e4
LT
1846 buf->f_files = sbinfo->max_inodes;
1847 buf->f_ffree = sbinfo->free_inodes;
1da177e4
LT
1848 }
1849 /* else leave those fields 0 like simple_statfs */
1850 return 0;
1851}
1852
1853/*
1854 * File creation. Allocate an inode, and we're done..
1855 */
1856static int
1857shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
1858{
0b0a0806 1859 struct inode *inode;
1da177e4
LT
1860 int error = -ENOSPC;
1861
454abafe 1862 inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE);
1da177e4 1863 if (inode) {
2a7dba39
EP
1864 error = security_inode_init_security(inode, dir,
1865 &dentry->d_name, NULL,
1866 NULL, NULL);
570bc1c2
SS
1867 if (error) {
1868 if (error != -EOPNOTSUPP) {
1869 iput(inode);
1870 return error;
1871 }
39f0247d 1872 }
1c7c474c
CH
1873#ifdef CONFIG_TMPFS_POSIX_ACL
1874 error = generic_acl_init(inode, dir);
39f0247d
AG
1875 if (error) {
1876 iput(inode);
1877 return error;
570bc1c2 1878 }
718deb6b
AV
1879#else
1880 error = 0;
1c7c474c 1881#endif
1da177e4
LT
1882 dir->i_size += BOGO_DIRENT_SIZE;
1883 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1884 d_instantiate(dentry, inode);
1885 dget(dentry); /* Extra count - pin the dentry in core */
1da177e4
LT
1886 }
1887 return error;
1888}
1889
1890static int shmem_mkdir(struct inode *dir, struct dentry *dentry, int mode)
1891{
1892 int error;
1893
1894 if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0)))
1895 return error;
d8c76e6f 1896 inc_nlink(dir);
1da177e4
LT
1897 return 0;
1898}
1899
1900static int shmem_create(struct inode *dir, struct dentry *dentry, int mode,
1901 struct nameidata *nd)
1902{
1903 return shmem_mknod(dir, dentry, mode | S_IFREG, 0);
1904}
1905
1906/*
1907 * Link a file..
1908 */
1909static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
1910{
1911 struct inode *inode = old_dentry->d_inode;
5b04c689 1912 int ret;
1da177e4
LT
1913
1914 /*
1915 * No ordinary (disk based) filesystem counts links as inodes;
1916 * but each new link needs a new dentry, pinning lowmem, and
1917 * tmpfs dentries cannot be pruned until they are unlinked.
1918 */
5b04c689
PE
1919 ret = shmem_reserve_inode(inode->i_sb);
1920 if (ret)
1921 goto out;
1da177e4
LT
1922
1923 dir->i_size += BOGO_DIRENT_SIZE;
1924 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
d8c76e6f 1925 inc_nlink(inode);
7de9c6ee 1926 ihold(inode); /* New dentry reference */
1da177e4
LT
1927 dget(dentry); /* Extra pinning count for the created dentry */
1928 d_instantiate(dentry, inode);
5b04c689
PE
1929out:
1930 return ret;
1da177e4
LT
1931}
1932
1933static int shmem_unlink(struct inode *dir, struct dentry *dentry)
1934{
1935 struct inode *inode = dentry->d_inode;
1936
5b04c689
PE
1937 if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
1938 shmem_free_inode(inode->i_sb);
1da177e4
LT
1939
1940 dir->i_size -= BOGO_DIRENT_SIZE;
1941 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
9a53c3a7 1942 drop_nlink(inode);
1da177e4
LT
1943 dput(dentry); /* Undo the count from "create" - this does all the work */
1944 return 0;
1945}
1946
1947static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
1948{
1949 if (!simple_empty(dentry))
1950 return -ENOTEMPTY;
1951
9a53c3a7
DH
1952 drop_nlink(dentry->d_inode);
1953 drop_nlink(dir);
1da177e4
LT
1954 return shmem_unlink(dir, dentry);
1955}
1956
1957/*
1958 * The VFS layer already does all the dentry stuff for rename,
1959 * we just have to decrement the usage count for the target if
1960 * it exists so that the VFS layer correctly free's it when it
1961 * gets overwritten.
1962 */
1963static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
1964{
1965 struct inode *inode = old_dentry->d_inode;
1966 int they_are_dirs = S_ISDIR(inode->i_mode);
1967
1968 if (!simple_empty(new_dentry))
1969 return -ENOTEMPTY;
1970
1971 if (new_dentry->d_inode) {
1972 (void) shmem_unlink(new_dir, new_dentry);
1973 if (they_are_dirs)
9a53c3a7 1974 drop_nlink(old_dir);
1da177e4 1975 } else if (they_are_dirs) {
9a53c3a7 1976 drop_nlink(old_dir);
d8c76e6f 1977 inc_nlink(new_dir);
1da177e4
LT
1978 }
1979
1980 old_dir->i_size -= BOGO_DIRENT_SIZE;
1981 new_dir->i_size += BOGO_DIRENT_SIZE;
1982 old_dir->i_ctime = old_dir->i_mtime =
1983 new_dir->i_ctime = new_dir->i_mtime =
1984 inode->i_ctime = CURRENT_TIME;
1985 return 0;
1986}
1987
1988static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
1989{
1990 int error;
1991 int len;
1992 struct inode *inode;
1993 struct page *page = NULL;
1994 char *kaddr;
1995 struct shmem_inode_info *info;
1996
1997 len = strlen(symname) + 1;
1998 if (len > PAGE_CACHE_SIZE)
1999 return -ENAMETOOLONG;
2000
454abafe 2001 inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE);
1da177e4
LT
2002 if (!inode)
2003 return -ENOSPC;
2004
2a7dba39
EP
2005 error = security_inode_init_security(inode, dir, &dentry->d_name, NULL,
2006 NULL, NULL);
570bc1c2
SS
2007 if (error) {
2008 if (error != -EOPNOTSUPP) {
2009 iput(inode);
2010 return error;
2011 }
2012 error = 0;
2013 }
2014
1da177e4
LT
2015 info = SHMEM_I(inode);
2016 inode->i_size = len-1;
2017 if (len <= (char *)inode - (char *)info) {
2018 /* do it inline */
2019 memcpy(info, symname, len);
2020 inode->i_op = &shmem_symlink_inline_operations;
2021 } else {
2022 error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL);
2023 if (error) {
2024 iput(inode);
2025 return error;
2026 }
14fcc23f 2027 inode->i_mapping->a_ops = &shmem_aops;
1da177e4
LT
2028 inode->i_op = &shmem_symlink_inode_operations;
2029 kaddr = kmap_atomic(page, KM_USER0);
2030 memcpy(kaddr, symname, len);
2031 kunmap_atomic(kaddr, KM_USER0);
2032 set_page_dirty(page);
6746aff7 2033 unlock_page(page);
1da177e4
LT
2034 page_cache_release(page);
2035 }
1da177e4
LT
2036 dir->i_size += BOGO_DIRENT_SIZE;
2037 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
2038 d_instantiate(dentry, inode);
2039 dget(dentry);
2040 return 0;
2041}
2042
cc314eef 2043static void *shmem_follow_link_inline(struct dentry *dentry, struct nameidata *nd)
1da177e4
LT
2044{
2045 nd_set_link(nd, (char *)SHMEM_I(dentry->d_inode));
cc314eef 2046 return NULL;
1da177e4
LT
2047}
2048
cc314eef 2049static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd)
1da177e4
LT
2050{
2051 struct page *page = NULL;
2052 int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL);
2053 nd_set_link(nd, res ? ERR_PTR(res) : kmap(page));
d3602444
HD
2054 if (page)
2055 unlock_page(page);
cc314eef 2056 return page;
1da177e4
LT
2057}
2058
cc314eef 2059static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
1da177e4
LT
2060{
2061 if (!IS_ERR(nd_get_link(nd))) {
cc314eef 2062 struct page *page = cookie;
1da177e4
LT
2063 kunmap(page);
2064 mark_page_accessed(page);
2065 page_cache_release(page);
1da177e4
LT
2066 }
2067}
2068
92e1d5be 2069static const struct inode_operations shmem_symlink_inline_operations = {
1da177e4
LT
2070 .readlink = generic_readlink,
2071 .follow_link = shmem_follow_link_inline,
1da177e4
LT
2072};
2073
92e1d5be 2074static const struct inode_operations shmem_symlink_inode_operations = {
1da177e4
LT
2075 .readlink = generic_readlink,
2076 .follow_link = shmem_follow_link,
2077 .put_link = shmem_put_link,
1da177e4
LT
2078};
2079
39f0247d 2080#ifdef CONFIG_TMPFS_POSIX_ACL
46711810 2081/*
39f0247d
AG
2082 * Superblocks without xattr inode operations will get security.* xattr
2083 * support from the VFS "for free". As soon as we have any other xattrs
2084 * like ACLs, we also need to implement the security.* handlers at
2085 * filesystem level, though.
2086 */
2087
431547b3 2088static size_t shmem_xattr_security_list(struct dentry *dentry, char *list,
39f0247d 2089 size_t list_len, const char *name,
431547b3 2090 size_t name_len, int handler_flags)
39f0247d 2091{
431547b3 2092 return security_inode_listsecurity(dentry->d_inode, list, list_len);
39f0247d
AG
2093}
2094
431547b3
CH
2095static int shmem_xattr_security_get(struct dentry *dentry, const char *name,
2096 void *buffer, size_t size, int handler_flags)
39f0247d
AG
2097{
2098 if (strcmp(name, "") == 0)
2099 return -EINVAL;
431547b3 2100 return xattr_getsecurity(dentry->d_inode, name, buffer, size);
39f0247d
AG
2101}
2102
431547b3
CH
2103static int shmem_xattr_security_set(struct dentry *dentry, const char *name,
2104 const void *value, size_t size, int flags, int handler_flags)
39f0247d
AG
2105{
2106 if (strcmp(name, "") == 0)
2107 return -EINVAL;
431547b3
CH
2108 return security_inode_setsecurity(dentry->d_inode, name, value,
2109 size, flags);
39f0247d
AG
2110}
2111
bb435453 2112static const struct xattr_handler shmem_xattr_security_handler = {
39f0247d
AG
2113 .prefix = XATTR_SECURITY_PREFIX,
2114 .list = shmem_xattr_security_list,
2115 .get = shmem_xattr_security_get,
2116 .set = shmem_xattr_security_set,
2117};
2118
bb435453 2119static const struct xattr_handler *shmem_xattr_handlers[] = {
1c7c474c
CH
2120 &generic_acl_access_handler,
2121 &generic_acl_default_handler,
39f0247d
AG
2122 &shmem_xattr_security_handler,
2123 NULL
2124};
2125#endif
2126
91828a40
DG
2127static struct dentry *shmem_get_parent(struct dentry *child)
2128{
2129 return ERR_PTR(-ESTALE);
2130}
2131
2132static int shmem_match(struct inode *ino, void *vfh)
2133{
2134 __u32 *fh = vfh;
2135 __u64 inum = fh[2];
2136 inum = (inum << 32) | fh[1];
2137 return ino->i_ino == inum && fh[0] == ino->i_generation;
2138}
2139
480b116c
CH
2140static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
2141 struct fid *fid, int fh_len, int fh_type)
91828a40 2142{
91828a40 2143 struct inode *inode;
480b116c
CH
2144 struct dentry *dentry = NULL;
2145 u64 inum = fid->raw[2];
2146 inum = (inum << 32) | fid->raw[1];
2147
2148 if (fh_len < 3)
2149 return NULL;
91828a40 2150
480b116c
CH
2151 inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
2152 shmem_match, fid->raw);
91828a40 2153 if (inode) {
480b116c 2154 dentry = d_find_alias(inode);
91828a40
DG
2155 iput(inode);
2156 }
2157
480b116c 2158 return dentry;
91828a40
DG
2159}
2160
2161static int shmem_encode_fh(struct dentry *dentry, __u32 *fh, int *len,
2162 int connectable)
2163{
2164 struct inode *inode = dentry->d_inode;
2165
5fe0c237
AK
2166 if (*len < 3) {
2167 *len = 3;
91828a40 2168 return 255;
5fe0c237 2169 }
91828a40 2170
1d3382cb 2171 if (inode_unhashed(inode)) {
91828a40
DG
2172 /* Unfortunately insert_inode_hash is not idempotent,
2173 * so as we hash inodes here rather than at creation
2174 * time, we need a lock to ensure we only try
2175 * to do it once
2176 */
2177 static DEFINE_SPINLOCK(lock);
2178 spin_lock(&lock);
1d3382cb 2179 if (inode_unhashed(inode))
91828a40
DG
2180 __insert_inode_hash(inode,
2181 inode->i_ino + inode->i_generation);
2182 spin_unlock(&lock);
2183 }
2184
2185 fh[0] = inode->i_generation;
2186 fh[1] = inode->i_ino;
2187 fh[2] = ((__u64)inode->i_ino) >> 32;
2188
2189 *len = 3;
2190 return 1;
2191}
2192
39655164 2193static const struct export_operations shmem_export_ops = {
91828a40 2194 .get_parent = shmem_get_parent,
91828a40 2195 .encode_fh = shmem_encode_fh,
480b116c 2196 .fh_to_dentry = shmem_fh_to_dentry,
91828a40
DG
2197};
2198
680d794b
AM
2199static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo,
2200 bool remount)
1da177e4
LT
2201{
2202 char *this_char, *value, *rest;
2203
b00dc3ad
HD
2204 while (options != NULL) {
2205 this_char = options;
2206 for (;;) {
2207 /*
2208 * NUL-terminate this option: unfortunately,
2209 * mount options form a comma-separated list,
2210 * but mpol's nodelist may also contain commas.
2211 */
2212 options = strchr(options, ',');
2213 if (options == NULL)
2214 break;
2215 options++;
2216 if (!isdigit(*options)) {
2217 options[-1] = '\0';
2218 break;
2219 }
2220 }
1da177e4
LT
2221 if (!*this_char)
2222 continue;
2223 if ((value = strchr(this_char,'=')) != NULL) {
2224 *value++ = 0;
2225 } else {
2226 printk(KERN_ERR
2227 "tmpfs: No value for mount option '%s'\n",
2228 this_char);
2229 return 1;
2230 }
2231
2232 if (!strcmp(this_char,"size")) {
2233 unsigned long long size;
2234 size = memparse(value,&rest);
2235 if (*rest == '%') {
2236 size <<= PAGE_SHIFT;
2237 size *= totalram_pages;
2238 do_div(size, 100);
2239 rest++;
2240 }
2241 if (*rest)
2242 goto bad_val;
680d794b
AM
2243 sbinfo->max_blocks =
2244 DIV_ROUND_UP(size, PAGE_CACHE_SIZE);
1da177e4 2245 } else if (!strcmp(this_char,"nr_blocks")) {
680d794b 2246 sbinfo->max_blocks = memparse(value, &rest);
1da177e4
LT
2247 if (*rest)
2248 goto bad_val;
2249 } else if (!strcmp(this_char,"nr_inodes")) {
680d794b 2250 sbinfo->max_inodes = memparse(value, &rest);
1da177e4
LT
2251 if (*rest)
2252 goto bad_val;
2253 } else if (!strcmp(this_char,"mode")) {
680d794b 2254 if (remount)
1da177e4 2255 continue;
680d794b 2256 sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777;
1da177e4
LT
2257 if (*rest)
2258 goto bad_val;
2259 } else if (!strcmp(this_char,"uid")) {
680d794b 2260 if (remount)
1da177e4 2261 continue;
680d794b 2262 sbinfo->uid = simple_strtoul(value, &rest, 0);
1da177e4
LT
2263 if (*rest)
2264 goto bad_val;
2265 } else if (!strcmp(this_char,"gid")) {
680d794b 2266 if (remount)
1da177e4 2267 continue;
680d794b 2268 sbinfo->gid = simple_strtoul(value, &rest, 0);
1da177e4
LT
2269 if (*rest)
2270 goto bad_val;
7339ff83 2271 } else if (!strcmp(this_char,"mpol")) {
71fe804b 2272 if (mpol_parse_str(value, &sbinfo->mpol, 1))
7339ff83 2273 goto bad_val;
1da177e4
LT
2274 } else {
2275 printk(KERN_ERR "tmpfs: Bad mount option %s\n",
2276 this_char);
2277 return 1;
2278 }
2279 }
2280 return 0;
2281
2282bad_val:
2283 printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n",
2284 value, this_char);
2285 return 1;
2286
2287}
2288
2289static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
2290{
2291 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
680d794b 2292 struct shmem_sb_info config = *sbinfo;
0edd73b3
HD
2293 unsigned long inodes;
2294 int error = -EINVAL;
2295
680d794b 2296 if (shmem_parse_options(data, &config, true))
0edd73b3 2297 return error;
1da177e4 2298
0edd73b3 2299 spin_lock(&sbinfo->stat_lock);
0edd73b3 2300 inodes = sbinfo->max_inodes - sbinfo->free_inodes;
7e496299 2301 if (percpu_counter_compare(&sbinfo->used_blocks, config.max_blocks) > 0)
0edd73b3 2302 goto out;
680d794b 2303 if (config.max_inodes < inodes)
0edd73b3
HD
2304 goto out;
2305 /*
2306 * Those tests also disallow limited->unlimited while any are in
2307 * use, so i_blocks will always be zero when max_blocks is zero;
2308 * but we must separately disallow unlimited->limited, because
2309 * in that case we have no record of how much is already in use.
2310 */
680d794b 2311 if (config.max_blocks && !sbinfo->max_blocks)
0edd73b3 2312 goto out;
680d794b 2313 if (config.max_inodes && !sbinfo->max_inodes)
0edd73b3
HD
2314 goto out;
2315
2316 error = 0;
680d794b 2317 sbinfo->max_blocks = config.max_blocks;
680d794b
AM
2318 sbinfo->max_inodes = config.max_inodes;
2319 sbinfo->free_inodes = config.max_inodes - inodes;
71fe804b
LS
2320
2321 mpol_put(sbinfo->mpol);
2322 sbinfo->mpol = config.mpol; /* transfers initial ref */
0edd73b3
HD
2323out:
2324 spin_unlock(&sbinfo->stat_lock);
2325 return error;
1da177e4 2326}
680d794b
AM
2327
2328static int shmem_show_options(struct seq_file *seq, struct vfsmount *vfs)
2329{
2330 struct shmem_sb_info *sbinfo = SHMEM_SB(vfs->mnt_sb);
2331
2332 if (sbinfo->max_blocks != shmem_default_max_blocks())
2333 seq_printf(seq, ",size=%luk",
2334 sbinfo->max_blocks << (PAGE_CACHE_SHIFT - 10));
2335 if (sbinfo->max_inodes != shmem_default_max_inodes())
2336 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
2337 if (sbinfo->mode != (S_IRWXUGO | S_ISVTX))
2338 seq_printf(seq, ",mode=%03o", sbinfo->mode);
2339 if (sbinfo->uid != 0)
2340 seq_printf(seq, ",uid=%u", sbinfo->uid);
2341 if (sbinfo->gid != 0)
2342 seq_printf(seq, ",gid=%u", sbinfo->gid);
71fe804b 2343 shmem_show_mpol(seq, sbinfo->mpol);
680d794b
AM
2344 return 0;
2345}
2346#endif /* CONFIG_TMPFS */
1da177e4
LT
2347
2348static void shmem_put_super(struct super_block *sb)
2349{
602586a8
HD
2350 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2351
2352 percpu_counter_destroy(&sbinfo->used_blocks);
2353 kfree(sbinfo);
1da177e4
LT
2354 sb->s_fs_info = NULL;
2355}
2356
2b2af54a 2357int shmem_fill_super(struct super_block *sb, void *data, int silent)
1da177e4
LT
2358{
2359 struct inode *inode;
2360 struct dentry *root;
0edd73b3 2361 struct shmem_sb_info *sbinfo;
680d794b
AM
2362 int err = -ENOMEM;
2363
2364 /* Round up to L1_CACHE_BYTES to resist false sharing */
425fbf04 2365 sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
680d794b
AM
2366 L1_CACHE_BYTES), GFP_KERNEL);
2367 if (!sbinfo)
2368 return -ENOMEM;
2369
680d794b 2370 sbinfo->mode = S_IRWXUGO | S_ISVTX;
76aac0e9
DH
2371 sbinfo->uid = current_fsuid();
2372 sbinfo->gid = current_fsgid();
680d794b 2373 sb->s_fs_info = sbinfo;
1da177e4 2374
0edd73b3 2375#ifdef CONFIG_TMPFS
1da177e4
LT
2376 /*
2377 * Per default we only allow half of the physical ram per
2378 * tmpfs instance, limiting inodes to one per page of lowmem;
2379 * but the internal instance is left unlimited.
2380 */
2381 if (!(sb->s_flags & MS_NOUSER)) {
680d794b
AM
2382 sbinfo->max_blocks = shmem_default_max_blocks();
2383 sbinfo->max_inodes = shmem_default_max_inodes();
2384 if (shmem_parse_options(data, sbinfo, false)) {
2385 err = -EINVAL;
2386 goto failed;
2387 }
1da177e4 2388 }
91828a40 2389 sb->s_export_op = &shmem_export_ops;
1da177e4
LT
2390#else
2391 sb->s_flags |= MS_NOUSER;
2392#endif
2393
0edd73b3 2394 spin_lock_init(&sbinfo->stat_lock);
602586a8
HD
2395 if (percpu_counter_init(&sbinfo->used_blocks, 0))
2396 goto failed;
680d794b 2397 sbinfo->free_inodes = sbinfo->max_inodes;
0edd73b3 2398
1da177e4
LT
2399 sb->s_maxbytes = SHMEM_MAX_BYTES;
2400 sb->s_blocksize = PAGE_CACHE_SIZE;
2401 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
2402 sb->s_magic = TMPFS_MAGIC;
2403 sb->s_op = &shmem_ops;
cfd95a9c 2404 sb->s_time_gran = 1;
39f0247d
AG
2405#ifdef CONFIG_TMPFS_POSIX_ACL
2406 sb->s_xattr = shmem_xattr_handlers;
2407 sb->s_flags |= MS_POSIXACL;
2408#endif
0edd73b3 2409
454abafe 2410 inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
1da177e4
LT
2411 if (!inode)
2412 goto failed;
680d794b
AM
2413 inode->i_uid = sbinfo->uid;
2414 inode->i_gid = sbinfo->gid;
1da177e4
LT
2415 root = d_alloc_root(inode);
2416 if (!root)
2417 goto failed_iput;
2418 sb->s_root = root;
2419 return 0;
2420
2421failed_iput:
2422 iput(inode);
2423failed:
2424 shmem_put_super(sb);
2425 return err;
2426}
2427
fcc234f8 2428static struct kmem_cache *shmem_inode_cachep;
1da177e4
LT
2429
2430static struct inode *shmem_alloc_inode(struct super_block *sb)
2431{
2432 struct shmem_inode_info *p;
e94b1766 2433 p = (struct shmem_inode_info *)kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
1da177e4
LT
2434 if (!p)
2435 return NULL;
2436 return &p->vfs_inode;
2437}
2438
fa0d7e3d
NP
2439static void shmem_i_callback(struct rcu_head *head)
2440{
2441 struct inode *inode = container_of(head, struct inode, i_rcu);
2442 INIT_LIST_HEAD(&inode->i_dentry);
2443 kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
2444}
2445
1da177e4
LT
2446static void shmem_destroy_inode(struct inode *inode)
2447{
2448 if ((inode->i_mode & S_IFMT) == S_IFREG) {
2449 /* only struct inode is valid if it's an inline symlink */
2450 mpol_free_shared_policy(&SHMEM_I(inode)->policy);
2451 }
fa0d7e3d 2452 call_rcu(&inode->i_rcu, shmem_i_callback);
1da177e4
LT
2453}
2454
51cc5068 2455static void init_once(void *foo)
1da177e4
LT
2456{
2457 struct shmem_inode_info *p = (struct shmem_inode_info *) foo;
2458
a35afb83 2459 inode_init_once(&p->vfs_inode);
1da177e4
LT
2460}
2461
2462static int init_inodecache(void)
2463{
2464 shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
2465 sizeof(struct shmem_inode_info),
040b5c6f 2466 0, SLAB_PANIC, init_once);
1da177e4
LT
2467 return 0;
2468}
2469
2470static void destroy_inodecache(void)
2471{
1a1d92c1 2472 kmem_cache_destroy(shmem_inode_cachep);
1da177e4
LT
2473}
2474
f5e54d6e 2475static const struct address_space_operations shmem_aops = {
1da177e4 2476 .writepage = shmem_writepage,
76719325 2477 .set_page_dirty = __set_page_dirty_no_writeback,
1da177e4 2478#ifdef CONFIG_TMPFS
ae976416 2479 .readpage = shmem_readpage,
800d15a5
NP
2480 .write_begin = shmem_write_begin,
2481 .write_end = shmem_write_end,
1da177e4 2482#endif
304dbdb7 2483 .migratepage = migrate_page,
aa261f54 2484 .error_remove_page = generic_error_remove_page,
1da177e4
LT
2485};
2486
15ad7cdc 2487static const struct file_operations shmem_file_operations = {
1da177e4
LT
2488 .mmap = shmem_mmap,
2489#ifdef CONFIG_TMPFS
2490 .llseek = generic_file_llseek,
bcd78e49 2491 .read = do_sync_read,
5402b976 2492 .write = do_sync_write,
bcd78e49 2493 .aio_read = shmem_file_aio_read,
5402b976 2494 .aio_write = generic_file_aio_write,
1b061d92 2495 .fsync = noop_fsync,
ae976416
HD
2496 .splice_read = generic_file_splice_read,
2497 .splice_write = generic_file_splice_write,
1da177e4
LT
2498#endif
2499};
2500
92e1d5be 2501static const struct inode_operations shmem_inode_operations = {
1da177e4 2502 .setattr = shmem_notify_change,
f6b3ec23 2503 .truncate_range = shmem_truncate_range,
39f0247d
AG
2504#ifdef CONFIG_TMPFS_POSIX_ACL
2505 .setxattr = generic_setxattr,
2506 .getxattr = generic_getxattr,
2507 .listxattr = generic_listxattr,
2508 .removexattr = generic_removexattr,
1c7c474c 2509 .check_acl = generic_check_acl,
39f0247d
AG
2510#endif
2511
1da177e4
LT
2512};
2513
92e1d5be 2514static const struct inode_operations shmem_dir_inode_operations = {
1da177e4
LT
2515#ifdef CONFIG_TMPFS
2516 .create = shmem_create,
2517 .lookup = simple_lookup,
2518 .link = shmem_link,
2519 .unlink = shmem_unlink,
2520 .symlink = shmem_symlink,
2521 .mkdir = shmem_mkdir,
2522 .rmdir = shmem_rmdir,
2523 .mknod = shmem_mknod,
2524 .rename = shmem_rename,
1da177e4 2525#endif
39f0247d
AG
2526#ifdef CONFIG_TMPFS_POSIX_ACL
2527 .setattr = shmem_notify_change,
2528 .setxattr = generic_setxattr,
2529 .getxattr = generic_getxattr,
2530 .listxattr = generic_listxattr,
2531 .removexattr = generic_removexattr,
1c7c474c 2532 .check_acl = generic_check_acl,
39f0247d
AG
2533#endif
2534};
2535
92e1d5be 2536static const struct inode_operations shmem_special_inode_operations = {
39f0247d
AG
2537#ifdef CONFIG_TMPFS_POSIX_ACL
2538 .setattr = shmem_notify_change,
2539 .setxattr = generic_setxattr,
2540 .getxattr = generic_getxattr,
2541 .listxattr = generic_listxattr,
2542 .removexattr = generic_removexattr,
1c7c474c 2543 .check_acl = generic_check_acl,
39f0247d 2544#endif
1da177e4
LT
2545};
2546
759b9775 2547static const struct super_operations shmem_ops = {
1da177e4
LT
2548 .alloc_inode = shmem_alloc_inode,
2549 .destroy_inode = shmem_destroy_inode,
2550#ifdef CONFIG_TMPFS
2551 .statfs = shmem_statfs,
2552 .remount_fs = shmem_remount_fs,
680d794b 2553 .show_options = shmem_show_options,
1da177e4 2554#endif
1f895f75 2555 .evict_inode = shmem_evict_inode,
1da177e4
LT
2556 .drop_inode = generic_delete_inode,
2557 .put_super = shmem_put_super,
2558};
2559
f0f37e2f 2560static const struct vm_operations_struct shmem_vm_ops = {
54cb8821 2561 .fault = shmem_fault,
1da177e4
LT
2562#ifdef CONFIG_NUMA
2563 .set_policy = shmem_set_policy,
2564 .get_policy = shmem_get_policy,
2565#endif
2566};
2567
2568
3c26ff6e
AV
2569static struct dentry *shmem_mount(struct file_system_type *fs_type,
2570 int flags, const char *dev_name, void *data)
1da177e4 2571{
3c26ff6e 2572 return mount_nodev(fs_type, flags, data, shmem_fill_super);
1da177e4
LT
2573}
2574
2575static struct file_system_type tmpfs_fs_type = {
2576 .owner = THIS_MODULE,
2577 .name = "tmpfs",
3c26ff6e 2578 .mount = shmem_mount,
1da177e4
LT
2579 .kill_sb = kill_litter_super,
2580};
1da177e4 2581
2b2af54a 2582int __init init_tmpfs(void)
1da177e4
LT
2583{
2584 int error;
2585
e0bf68dd
PZ
2586 error = bdi_init(&shmem_backing_dev_info);
2587 if (error)
2588 goto out4;
2589
1da177e4
LT
2590 error = init_inodecache();
2591 if (error)
2592 goto out3;
2593
2594 error = register_filesystem(&tmpfs_fs_type);
2595 if (error) {
2596 printk(KERN_ERR "Could not register tmpfs\n");
2597 goto out2;
2598 }
95dc112a 2599
1f5ce9e9 2600 shm_mnt = vfs_kern_mount(&tmpfs_fs_type, MS_NOUSER,
1da177e4
LT
2601 tmpfs_fs_type.name, NULL);
2602 if (IS_ERR(shm_mnt)) {
2603 error = PTR_ERR(shm_mnt);
2604 printk(KERN_ERR "Could not kern_mount tmpfs\n");
2605 goto out1;
2606 }
2607 return 0;
2608
2609out1:
2610 unregister_filesystem(&tmpfs_fs_type);
2611out2:
2612 destroy_inodecache();
2613out3:
e0bf68dd
PZ
2614 bdi_destroy(&shmem_backing_dev_info);
2615out4:
1da177e4
LT
2616 shm_mnt = ERR_PTR(error);
2617 return error;
2618}
853ac43a 2619
87946a72
DN
2620#ifdef CONFIG_CGROUP_MEM_RES_CTLR
2621/**
2622 * mem_cgroup_get_shmem_target - find a page or entry assigned to the shmem file
2623 * @inode: the inode to be searched
2624 * @pgoff: the offset to be searched
2625 * @pagep: the pointer for the found page to be stored
2626 * @ent: the pointer for the found swap entry to be stored
2627 *
2628 * If a page is found, refcount of it is incremented. Callers should handle
2629 * these refcount.
2630 */
2631void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t pgoff,
2632 struct page **pagep, swp_entry_t *ent)
2633{
2634 swp_entry_t entry = { .val = 0 }, *ptr;
2635 struct page *page = NULL;
2636 struct shmem_inode_info *info = SHMEM_I(inode);
2637
2638 if ((pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode))
2639 goto out;
2640
2641 spin_lock(&info->lock);
2642 ptr = shmem_swp_entry(info, pgoff, NULL);
2643#ifdef CONFIG_SWAP
2644 if (ptr && ptr->val) {
2645 entry.val = ptr->val;
2646 page = find_get_page(&swapper_space, entry.val);
2647 } else
2648#endif
2649 page = find_get_page(inode->i_mapping, pgoff);
2650 if (ptr)
2651 shmem_swp_unmap(ptr);
2652 spin_unlock(&info->lock);
2653out:
2654 *pagep = page;
2655 *ent = entry;
2656}
2657#endif
2658
853ac43a
MM
2659#else /* !CONFIG_SHMEM */
2660
2661/*
2662 * tiny-shmem: simple shmemfs and tmpfs using ramfs code
2663 *
2664 * This is intended for small system where the benefits of the full
2665 * shmem code (swap-backed and resource-limited) are outweighed by
2666 * their complexity. On systems without swap this code should be
2667 * effectively equivalent, but much lighter weight.
2668 */
2669
2670#include <linux/ramfs.h>
2671
2672static struct file_system_type tmpfs_fs_type = {
2673 .name = "tmpfs",
3c26ff6e 2674 .mount = ramfs_mount,
853ac43a
MM
2675 .kill_sb = kill_litter_super,
2676};
2677
2b2af54a 2678int __init init_tmpfs(void)
853ac43a
MM
2679{
2680 BUG_ON(register_filesystem(&tmpfs_fs_type) != 0);
2681
2682 shm_mnt = kern_mount(&tmpfs_fs_type);
2683 BUG_ON(IS_ERR(shm_mnt));
2684
2685 return 0;
2686}
2687
2688int shmem_unuse(swp_entry_t entry, struct page *page)
2689{
2690 return 0;
2691}
2692
3f96b79a
HD
2693int shmem_lock(struct file *file, int lock, struct user_struct *user)
2694{
2695 return 0;
2696}
2697
87946a72
DN
2698#ifdef CONFIG_CGROUP_MEM_RES_CTLR
2699/**
2700 * mem_cgroup_get_shmem_target - find a page or entry assigned to the shmem file
2701 * @inode: the inode to be searched
2702 * @pgoff: the offset to be searched
2703 * @pagep: the pointer for the found page to be stored
2704 * @ent: the pointer for the found swap entry to be stored
2705 *
2706 * If a page is found, refcount of it is incremented. Callers should handle
2707 * these refcount.
2708 */
2709void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t pgoff,
2710 struct page **pagep, swp_entry_t *ent)
2711{
2712 struct page *page = NULL;
2713
2714 if ((pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode))
2715 goto out;
2716 page = find_get_page(inode->i_mapping, pgoff);
2717out:
2718 *pagep = page;
2719 *ent = (swp_entry_t){ .val = 0 };
2720}
2721#endif
2722
0b0a0806
HD
2723#define shmem_vm_ops generic_file_vm_ops
2724#define shmem_file_operations ramfs_file_operations
454abafe 2725#define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev)
0b0a0806
HD
2726#define shmem_acct_size(flags, size) 0
2727#define shmem_unacct_size(flags, size) do {} while (0)
caefba17 2728#define SHMEM_MAX_BYTES MAX_LFS_FILESIZE
853ac43a
MM
2729
2730#endif /* CONFIG_SHMEM */
2731
2732/* common code */
1da177e4 2733
46711810 2734/**
1da177e4 2735 * shmem_file_setup - get an unlinked file living in tmpfs
1da177e4
LT
2736 * @name: name for dentry (to be seen in /proc/<pid>/maps
2737 * @size: size to be set for the file
0b0a0806 2738 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
1da177e4 2739 */
168f5ac6 2740struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
1da177e4
LT
2741{
2742 int error;
2743 struct file *file;
2744 struct inode *inode;
2c48b9c4
AV
2745 struct path path;
2746 struct dentry *root;
1da177e4
LT
2747 struct qstr this;
2748
2749 if (IS_ERR(shm_mnt))
2750 return (void *)shm_mnt;
2751
2752 if (size < 0 || size > SHMEM_MAX_BYTES)
2753 return ERR_PTR(-EINVAL);
2754
2755 if (shmem_acct_size(flags, size))
2756 return ERR_PTR(-ENOMEM);
2757
2758 error = -ENOMEM;
2759 this.name = name;
2760 this.len = strlen(name);
2761 this.hash = 0; /* will go */
2762 root = shm_mnt->mnt_root;
2c48b9c4
AV
2763 path.dentry = d_alloc(root, &this);
2764 if (!path.dentry)
1da177e4 2765 goto put_memory;
2c48b9c4 2766 path.mnt = mntget(shm_mnt);
1da177e4 2767
1da177e4 2768 error = -ENOSPC;
454abafe 2769 inode = shmem_get_inode(root->d_sb, NULL, S_IFREG | S_IRWXUGO, 0, flags);
1da177e4 2770 if (!inode)
4b42af81 2771 goto put_dentry;
1da177e4 2772
2c48b9c4 2773 d_instantiate(path.dentry, inode);
1da177e4
LT
2774 inode->i_size = size;
2775 inode->i_nlink = 0; /* It is unlinked */
853ac43a
MM
2776#ifndef CONFIG_MMU
2777 error = ramfs_nommu_expand_for_mapping(inode, size);
2778 if (error)
4b42af81 2779 goto put_dentry;
853ac43a 2780#endif
4b42af81
AV
2781
2782 error = -ENFILE;
2c48b9c4 2783 file = alloc_file(&path, FMODE_WRITE | FMODE_READ,
4b42af81
AV
2784 &shmem_file_operations);
2785 if (!file)
2786 goto put_dentry;
2787
1da177e4
LT
2788 return file;
2789
1da177e4 2790put_dentry:
2c48b9c4 2791 path_put(&path);
1da177e4
LT
2792put_memory:
2793 shmem_unacct_size(flags, size);
2794 return ERR_PTR(error);
2795}
395e0ddc 2796EXPORT_SYMBOL_GPL(shmem_file_setup);
1da177e4 2797
46711810 2798/**
1da177e4 2799 * shmem_zero_setup - setup a shared anonymous mapping
1da177e4
LT
2800 * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
2801 */
2802int shmem_zero_setup(struct vm_area_struct *vma)
2803{
2804 struct file *file;
2805 loff_t size = vma->vm_end - vma->vm_start;
2806
2807 file = shmem_file_setup("dev/zero", size, vma->vm_flags);
2808 if (IS_ERR(file))
2809 return PTR_ERR(file);
2810
2811 if (vma->vm_file)
2812 fput(vma->vm_file);
2813 vma->vm_file = file;
2814 vma->vm_ops = &shmem_vm_ops;
bee4c36a 2815 vma->vm_flags |= VM_CAN_NONLINEAR;
1da177e4
LT
2816 return 0;
2817}