]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - mm/shmem.c
mm: let swap use exceptional entries
[mirror_ubuntu-zesty-kernel.git] / mm / shmem.c
CommitLineData
1da177e4
LT
1/*
2 * Resizable virtual memory filesystem for Linux.
3 *
4 * Copyright (C) 2000 Linus Torvalds.
5 * 2000 Transmeta Corp.
6 * 2000-2001 Christoph Rohland
7 * 2000-2001 SAP AG
8 * 2002 Red Hat Inc.
0edd73b3
HD
9 * Copyright (C) 2002-2005 Hugh Dickins.
10 * Copyright (C) 2002-2005 VERITAS Software Corporation.
1da177e4
LT
11 * Copyright (C) 2004 Andi Kleen, SuSE Labs
12 *
13 * Extended attribute support for tmpfs:
14 * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
15 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
16 *
853ac43a
MM
17 * tiny-shmem:
18 * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
19 *
1da177e4
LT
20 * This file is released under the GPL.
21 */
22
853ac43a
MM
23#include <linux/fs.h>
24#include <linux/init.h>
25#include <linux/vfs.h>
26#include <linux/mount.h>
caefba17 27#include <linux/pagemap.h>
853ac43a
MM
28#include <linux/file.h>
29#include <linux/mm.h>
30#include <linux/module.h>
7e496299 31#include <linux/percpu_counter.h>
853ac43a
MM
32#include <linux/swap.h>
33
34static struct vfsmount *shm_mnt;
35
36#ifdef CONFIG_SHMEM
1da177e4
LT
37/*
38 * This virtual memory filesystem is heavily based on the ramfs. It
39 * extends ramfs by the ability to use swap and honor resource limits
40 * which makes it a completely usable filesystem.
41 */
42
39f0247d 43#include <linux/xattr.h>
a5694255 44#include <linux/exportfs.h>
1c7c474c 45#include <linux/posix_acl.h>
39f0247d 46#include <linux/generic_acl.h>
1da177e4 47#include <linux/mman.h>
1da177e4
LT
48#include <linux/string.h>
49#include <linux/slab.h>
50#include <linux/backing-dev.h>
51#include <linux/shmem_fs.h>
1da177e4 52#include <linux/writeback.h>
1da177e4 53#include <linux/blkdev.h>
708e3508 54#include <linux/splice.h>
1da177e4
LT
55#include <linux/security.h>
56#include <linux/swapops.h>
57#include <linux/mempolicy.h>
58#include <linux/namei.h>
b00dc3ad 59#include <linux/ctype.h>
304dbdb7 60#include <linux/migrate.h>
c1f60a5a 61#include <linux/highmem.h>
680d794b 62#include <linux/seq_file.h>
92562927 63#include <linux/magic.h>
304dbdb7 64
1da177e4
LT
65#include <asm/uaccess.h>
66#include <asm/div64.h>
67#include <asm/pgtable.h>
68
caefba17
HD
69/*
70 * The maximum size of a shmem/tmpfs file is limited by the maximum size of
71 * its triple-indirect swap vector - see illustration at shmem_swp_entry().
72 *
73 * With 4kB page size, maximum file size is just over 2TB on a 32-bit kernel,
74 * but one eighth of that on a 64-bit kernel. With 8kB page size, maximum
75 * file size is just over 4TB on a 64-bit kernel, but 16TB on a 32-bit kernel,
76 * MAX_LFS_FILESIZE being then more restrictive than swap vector layout.
77 *
78 * We use / and * instead of shifts in the definitions below, so that the swap
79 * vector can be tested with small even values (e.g. 20) for ENTRIES_PER_PAGE.
80 */
1da177e4 81#define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long))
61609d01 82#define ENTRIES_PER_PAGEPAGE ((unsigned long long)ENTRIES_PER_PAGE*ENTRIES_PER_PAGE)
1da177e4 83
caefba17
HD
84#define SHMSWP_MAX_INDEX (SHMEM_NR_DIRECT + (ENTRIES_PER_PAGEPAGE/2) * (ENTRIES_PER_PAGE+1))
85#define SHMSWP_MAX_BYTES (SHMSWP_MAX_INDEX << PAGE_CACHE_SHIFT)
1da177e4 86
caefba17
HD
87#define SHMEM_MAX_BYTES min_t(unsigned long long, SHMSWP_MAX_BYTES, MAX_LFS_FILESIZE)
88#define SHMEM_MAX_INDEX ((unsigned long)((SHMEM_MAX_BYTES+1) >> PAGE_CACHE_SHIFT))
89
90#define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512)
1da177e4
LT
91#define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT)
92
93/* info->flags needs VM_flags to handle pagein/truncate races efficiently */
94#define SHMEM_PAGEIN VM_READ
95#define SHMEM_TRUNCATE VM_WRITE
96
97/* Definition to limit shmem_truncate's steps between cond_rescheds */
98#define LATENCY_LIMIT 64
99
100/* Pretend that each entry is of this size in directory's i_size */
101#define BOGO_DIRENT_SIZE 20
102
b09e0fa4
EP
103struct shmem_xattr {
104 struct list_head list; /* anchored by shmem_inode_info->xattr_list */
105 char *name; /* xattr name */
106 size_t size;
107 char value[0];
108};
109
1da177e4
LT
110/* Flag allocation requirements to shmem_getpage and shmem_swp_alloc */
111enum sgp_type {
1da177e4
LT
112 SGP_READ, /* don't exceed i_size, don't allocate page */
113 SGP_CACHE, /* don't exceed i_size, may allocate page */
a0ee5ec5 114 SGP_DIRTY, /* like SGP_CACHE, but set new page dirty */
1da177e4
LT
115 SGP_WRITE, /* may exceed i_size, may allocate page */
116};
117
b76db735 118#ifdef CONFIG_TMPFS
680d794b
AM
119static unsigned long shmem_default_max_blocks(void)
120{
121 return totalram_pages / 2;
122}
123
124static unsigned long shmem_default_max_inodes(void)
125{
126 return min(totalram_pages - totalhigh_pages, totalram_pages / 2);
127}
b76db735 128#endif
680d794b 129
68da9f05
HD
130static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
131 struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type);
132
133static inline int shmem_getpage(struct inode *inode, pgoff_t index,
134 struct page **pagep, enum sgp_type sgp, int *fault_type)
135{
136 return shmem_getpage_gfp(inode, index, pagep, sgp,
137 mapping_gfp_mask(inode->i_mapping), fault_type);
138}
1da177e4 139
6daa0e28 140static inline struct page *shmem_dir_alloc(gfp_t gfp_mask)
1da177e4
LT
141{
142 /*
143 * The above definition of ENTRIES_PER_PAGE, and the use of
144 * BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE:
145 * might be reconsidered if it ever diverges from PAGE_SIZE.
769848c0 146 *
e12ba74d 147 * Mobility flags are masked out as swap vectors cannot move
1da177e4 148 */
e12ba74d 149 return alloc_pages((gfp_mask & ~GFP_MOVABLE_MASK) | __GFP_ZERO,
769848c0 150 PAGE_CACHE_SHIFT-PAGE_SHIFT);
1da177e4
LT
151}
152
153static inline void shmem_dir_free(struct page *page)
154{
155 __free_pages(page, PAGE_CACHE_SHIFT-PAGE_SHIFT);
156}
157
158static struct page **shmem_dir_map(struct page *page)
159{
160 return (struct page **)kmap_atomic(page, KM_USER0);
161}
162
163static inline void shmem_dir_unmap(struct page **dir)
164{
165 kunmap_atomic(dir, KM_USER0);
166}
167
168static swp_entry_t *shmem_swp_map(struct page *page)
169{
170 return (swp_entry_t *)kmap_atomic(page, KM_USER1);
171}
172
173static inline void shmem_swp_balance_unmap(void)
174{
175 /*
176 * When passing a pointer to an i_direct entry, to code which
177 * also handles indirect entries and so will shmem_swp_unmap,
178 * we must arrange for the preempt count to remain in balance.
179 * What kmap_atomic of a lowmem page does depends on config
180 * and architecture, so pretend to kmap_atomic some lowmem page.
181 */
182 (void) kmap_atomic(ZERO_PAGE(0), KM_USER1);
183}
184
185static inline void shmem_swp_unmap(swp_entry_t *entry)
186{
187 kunmap_atomic(entry, KM_USER1);
188}
189
190static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
191{
192 return sb->s_fs_info;
193}
194
195/*
196 * shmem_file_setup pre-accounts the whole fixed size of a VM object,
197 * for shared memory and for shared anonymous (/dev/zero) mappings
198 * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
199 * consistent with the pre-accounting of private mappings ...
200 */
201static inline int shmem_acct_size(unsigned long flags, loff_t size)
202{
0b0a0806
HD
203 return (flags & VM_NORESERVE) ?
204 0 : security_vm_enough_memory_kern(VM_ACCT(size));
1da177e4
LT
205}
206
207static inline void shmem_unacct_size(unsigned long flags, loff_t size)
208{
0b0a0806 209 if (!(flags & VM_NORESERVE))
1da177e4
LT
210 vm_unacct_memory(VM_ACCT(size));
211}
212
213/*
214 * ... whereas tmpfs objects are accounted incrementally as
215 * pages are allocated, in order to allow huge sparse files.
216 * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
217 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
218 */
219static inline int shmem_acct_block(unsigned long flags)
220{
0b0a0806
HD
221 return (flags & VM_NORESERVE) ?
222 security_vm_enough_memory_kern(VM_ACCT(PAGE_CACHE_SIZE)) : 0;
1da177e4
LT
223}
224
225static inline void shmem_unacct_blocks(unsigned long flags, long pages)
226{
0b0a0806 227 if (flags & VM_NORESERVE)
1da177e4
LT
228 vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE));
229}
230
759b9775 231static const struct super_operations shmem_ops;
f5e54d6e 232static const struct address_space_operations shmem_aops;
15ad7cdc 233static const struct file_operations shmem_file_operations;
92e1d5be
AV
234static const struct inode_operations shmem_inode_operations;
235static const struct inode_operations shmem_dir_inode_operations;
236static const struct inode_operations shmem_special_inode_operations;
f0f37e2f 237static const struct vm_operations_struct shmem_vm_ops;
1da177e4 238
6c231b7b 239static struct backing_dev_info shmem_backing_dev_info __read_mostly = {
1da177e4 240 .ra_pages = 0, /* No readahead */
4f98a2fe 241 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
1da177e4
LT
242};
243
244static LIST_HEAD(shmem_swaplist);
cb5f7b9a 245static DEFINE_MUTEX(shmem_swaplist_mutex);
1da177e4
LT
246
247static void shmem_free_blocks(struct inode *inode, long pages)
248{
249 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
0edd73b3 250 if (sbinfo->max_blocks) {
7e496299 251 percpu_counter_add(&sbinfo->used_blocks, -pages);
1da177e4 252 inode->i_blocks -= pages*BLOCKS_PER_PAGE;
1da177e4
LT
253 }
254}
255
5b04c689
PE
256static int shmem_reserve_inode(struct super_block *sb)
257{
258 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
259 if (sbinfo->max_inodes) {
260 spin_lock(&sbinfo->stat_lock);
261 if (!sbinfo->free_inodes) {
262 spin_unlock(&sbinfo->stat_lock);
263 return -ENOSPC;
264 }
265 sbinfo->free_inodes--;
266 spin_unlock(&sbinfo->stat_lock);
267 }
268 return 0;
269}
270
271static void shmem_free_inode(struct super_block *sb)
272{
273 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
274 if (sbinfo->max_inodes) {
275 spin_lock(&sbinfo->stat_lock);
276 sbinfo->free_inodes++;
277 spin_unlock(&sbinfo->stat_lock);
278 }
279}
280
46711810 281/**
1da177e4 282 * shmem_recalc_inode - recalculate the size of an inode
1da177e4
LT
283 * @inode: inode to recalc
284 *
285 * We have to calculate the free blocks since the mm can drop
286 * undirtied hole pages behind our back.
287 *
288 * But normally info->alloced == inode->i_mapping->nrpages + info->swapped
289 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
290 *
291 * It has to be called with the spinlock held.
292 */
293static void shmem_recalc_inode(struct inode *inode)
294{
295 struct shmem_inode_info *info = SHMEM_I(inode);
296 long freed;
297
298 freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
299 if (freed > 0) {
300 info->alloced -= freed;
301 shmem_unacct_blocks(info->flags, freed);
302 shmem_free_blocks(inode, freed);
303 }
304}
305
46711810 306/**
1da177e4 307 * shmem_swp_entry - find the swap vector position in the info structure
1da177e4
LT
308 * @info: info structure for the inode
309 * @index: index of the page to find
310 * @page: optional page to add to the structure. Has to be preset to
311 * all zeros
312 *
313 * If there is no space allocated yet it will return NULL when
314 * page is NULL, else it will use the page for the needed block,
315 * setting it to NULL on return to indicate that it has been used.
316 *
317 * The swap vector is organized the following way:
318 *
319 * There are SHMEM_NR_DIRECT entries directly stored in the
320 * shmem_inode_info structure. So small files do not need an addional
321 * allocation.
322 *
323 * For pages with index > SHMEM_NR_DIRECT there is the pointer
324 * i_indirect which points to a page which holds in the first half
325 * doubly indirect blocks, in the second half triple indirect blocks:
326 *
327 * For an artificial ENTRIES_PER_PAGE = 4 this would lead to the
328 * following layout (for SHMEM_NR_DIRECT == 16):
329 *
330 * i_indirect -> dir --> 16-19
331 * | +-> 20-23
332 * |
333 * +-->dir2 --> 24-27
334 * | +-> 28-31
335 * | +-> 32-35
336 * | +-> 36-39
337 * |
338 * +-->dir3 --> 40-43
339 * +-> 44-47
340 * +-> 48-51
341 * +-> 52-55
342 */
343static swp_entry_t *shmem_swp_entry(struct shmem_inode_info *info, unsigned long index, struct page **page)
344{
345 unsigned long offset;
346 struct page **dir;
347 struct page *subdir;
348
349 if (index < SHMEM_NR_DIRECT) {
350 shmem_swp_balance_unmap();
351 return info->i_direct+index;
352 }
353 if (!info->i_indirect) {
354 if (page) {
355 info->i_indirect = *page;
356 *page = NULL;
357 }
358 return NULL; /* need another page */
359 }
360
361 index -= SHMEM_NR_DIRECT;
362 offset = index % ENTRIES_PER_PAGE;
363 index /= ENTRIES_PER_PAGE;
364 dir = shmem_dir_map(info->i_indirect);
365
366 if (index >= ENTRIES_PER_PAGE/2) {
367 index -= ENTRIES_PER_PAGE/2;
368 dir += ENTRIES_PER_PAGE/2 + index/ENTRIES_PER_PAGE;
369 index %= ENTRIES_PER_PAGE;
370 subdir = *dir;
371 if (!subdir) {
372 if (page) {
373 *dir = *page;
374 *page = NULL;
375 }
376 shmem_dir_unmap(dir);
377 return NULL; /* need another page */
378 }
379 shmem_dir_unmap(dir);
380 dir = shmem_dir_map(subdir);
381 }
382
383 dir += index;
384 subdir = *dir;
385 if (!subdir) {
386 if (!page || !(subdir = *page)) {
387 shmem_dir_unmap(dir);
388 return NULL; /* need a page */
389 }
390 *dir = subdir;
391 *page = NULL;
392 }
393 shmem_dir_unmap(dir);
394 return shmem_swp_map(subdir) + offset;
395}
396
397static void shmem_swp_set(struct shmem_inode_info *info, swp_entry_t *entry, unsigned long value)
398{
399 long incdec = value? 1: -1;
400
401 entry->val = value;
402 info->swapped += incdec;
4c21e2f2
HD
403 if ((unsigned long)(entry - info->i_direct) >= SHMEM_NR_DIRECT) {
404 struct page *page = kmap_atomic_to_page(entry);
405 set_page_private(page, page_private(page) + incdec);
406 }
1da177e4
LT
407}
408
46711810 409/**
1da177e4 410 * shmem_swp_alloc - get the position of the swap entry for the page.
1da177e4
LT
411 * @info: info structure for the inode
412 * @index: index of the page to find
413 * @sgp: check and recheck i_size? skip allocation?
68da9f05 414 * @gfp: gfp mask to use for any page allocation
46711810
RD
415 *
416 * If the entry does not exist, allocate it.
1da177e4 417 */
68da9f05
HD
418static swp_entry_t *shmem_swp_alloc(struct shmem_inode_info *info,
419 unsigned long index, enum sgp_type sgp, gfp_t gfp)
1da177e4
LT
420{
421 struct inode *inode = &info->vfs_inode;
422 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
423 struct page *page = NULL;
424 swp_entry_t *entry;
425
426 if (sgp != SGP_WRITE &&
427 ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode))
428 return ERR_PTR(-EINVAL);
429
430 while (!(entry = shmem_swp_entry(info, index, &page))) {
431 if (sgp == SGP_READ)
432 return shmem_swp_map(ZERO_PAGE(0));
433 /*
7e496299 434 * Test used_blocks against 1 less max_blocks, since we have 1 data
1da177e4
LT
435 * page (and perhaps indirect index pages) yet to allocate:
436 * a waste to allocate index if we cannot allocate data.
437 */
0edd73b3 438 if (sbinfo->max_blocks) {
fc5da22a
HD
439 if (percpu_counter_compare(&sbinfo->used_blocks,
440 sbinfo->max_blocks - 1) >= 0)
1da177e4 441 return ERR_PTR(-ENOSPC);
7e496299 442 percpu_counter_inc(&sbinfo->used_blocks);
1da177e4 443 inode->i_blocks += BLOCKS_PER_PAGE;
1da177e4
LT
444 }
445
446 spin_unlock(&info->lock);
68da9f05 447 page = shmem_dir_alloc(gfp);
1da177e4
LT
448 spin_lock(&info->lock);
449
450 if (!page) {
451 shmem_free_blocks(inode, 1);
452 return ERR_PTR(-ENOMEM);
453 }
454 if (sgp != SGP_WRITE &&
455 ((loff_t) index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
456 entry = ERR_PTR(-EINVAL);
457 break;
458 }
459 if (info->next_index <= index)
460 info->next_index = index + 1;
461 }
462 if (page) {
463 /* another task gave its page, or truncated the file */
464 shmem_free_blocks(inode, 1);
465 shmem_dir_free(page);
466 }
467 if (info->next_index <= index && !IS_ERR(entry))
468 info->next_index = index + 1;
469 return entry;
470}
471
46711810 472/**
1da177e4 473 * shmem_free_swp - free some swap entries in a directory
1ae70006
HD
474 * @dir: pointer to the directory
475 * @edir: pointer after last entry of the directory
476 * @punch_lock: pointer to spinlock when needed for the holepunch case
1da177e4 477 */
1ae70006
HD
478static int shmem_free_swp(swp_entry_t *dir, swp_entry_t *edir,
479 spinlock_t *punch_lock)
1da177e4 480{
1ae70006 481 spinlock_t *punch_unlock = NULL;
1da177e4
LT
482 swp_entry_t *ptr;
483 int freed = 0;
484
485 for (ptr = dir; ptr < edir; ptr++) {
486 if (ptr->val) {
1ae70006
HD
487 if (unlikely(punch_lock)) {
488 punch_unlock = punch_lock;
489 punch_lock = NULL;
490 spin_lock(punch_unlock);
491 if (!ptr->val)
492 continue;
493 }
1da177e4
LT
494 free_swap_and_cache(*ptr);
495 *ptr = (swp_entry_t){0};
496 freed++;
497 }
498 }
1ae70006
HD
499 if (punch_unlock)
500 spin_unlock(punch_unlock);
1da177e4
LT
501 return freed;
502}
503
1ae70006
HD
504static int shmem_map_and_free_swp(struct page *subdir, int offset,
505 int limit, struct page ***dir, spinlock_t *punch_lock)
1da177e4
LT
506{
507 swp_entry_t *ptr;
508 int freed = 0;
509
510 ptr = shmem_swp_map(subdir);
511 for (; offset < limit; offset += LATENCY_LIMIT) {
512 int size = limit - offset;
513 if (size > LATENCY_LIMIT)
514 size = LATENCY_LIMIT;
1ae70006
HD
515 freed += shmem_free_swp(ptr+offset, ptr+offset+size,
516 punch_lock);
1da177e4
LT
517 if (need_resched()) {
518 shmem_swp_unmap(ptr);
519 if (*dir) {
520 shmem_dir_unmap(*dir);
521 *dir = NULL;
522 }
523 cond_resched();
524 ptr = shmem_swp_map(subdir);
525 }
526 }
527 shmem_swp_unmap(ptr);
528 return freed;
529}
530
531static void shmem_free_pages(struct list_head *next)
532{
533 struct page *page;
534 int freed = 0;
535
536 do {
537 page = container_of(next, struct page, lru);
538 next = next->next;
539 shmem_dir_free(page);
540 freed++;
541 if (freed >= LATENCY_LIMIT) {
542 cond_resched();
543 freed = 0;
544 }
545 } while (next);
546}
547
94c1e62d 548void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
1da177e4
LT
549{
550 struct shmem_inode_info *info = SHMEM_I(inode);
551 unsigned long idx;
552 unsigned long size;
553 unsigned long limit;
554 unsigned long stage;
555 unsigned long diroff;
556 struct page **dir;
557 struct page *topdir;
558 struct page *middir;
559 struct page *subdir;
560 swp_entry_t *ptr;
561 LIST_HEAD(pages_to_free);
562 long nr_pages_to_free = 0;
563 long nr_swaps_freed = 0;
564 int offset;
565 int freed;
a2646d1e 566 int punch_hole;
1ae70006
HD
567 spinlock_t *needs_lock;
568 spinlock_t *punch_lock;
a2646d1e 569 unsigned long upper_limit;
1da177e4 570
94c1e62d
HD
571 truncate_inode_pages_range(inode->i_mapping, start, end);
572
1da177e4 573 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
f6b3ec23 574 idx = (start + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1da177e4
LT
575 if (idx >= info->next_index)
576 return;
577
578 spin_lock(&info->lock);
579 info->flags |= SHMEM_TRUNCATE;
f6b3ec23
BP
580 if (likely(end == (loff_t) -1)) {
581 limit = info->next_index;
a2646d1e 582 upper_limit = SHMEM_MAX_INDEX;
f6b3ec23 583 info->next_index = idx;
1ae70006 584 needs_lock = NULL;
a2646d1e 585 punch_hole = 0;
f6b3ec23 586 } else {
a2646d1e
HD
587 if (end + 1 >= inode->i_size) { /* we may free a little more */
588 limit = (inode->i_size + PAGE_CACHE_SIZE - 1) >>
589 PAGE_CACHE_SHIFT;
590 upper_limit = SHMEM_MAX_INDEX;
591 } else {
592 limit = (end + 1) >> PAGE_CACHE_SHIFT;
593 upper_limit = limit;
594 }
1ae70006 595 needs_lock = &info->lock;
f6b3ec23
BP
596 punch_hole = 1;
597 }
598
1da177e4 599 topdir = info->i_indirect;
f6b3ec23 600 if (topdir && idx <= SHMEM_NR_DIRECT && !punch_hole) {
1da177e4
LT
601 info->i_indirect = NULL;
602 nr_pages_to_free++;
603 list_add(&topdir->lru, &pages_to_free);
604 }
605 spin_unlock(&info->lock);
606
607 if (info->swapped && idx < SHMEM_NR_DIRECT) {
608 ptr = info->i_direct;
609 size = limit;
610 if (size > SHMEM_NR_DIRECT)
611 size = SHMEM_NR_DIRECT;
1ae70006 612 nr_swaps_freed = shmem_free_swp(ptr+idx, ptr+size, needs_lock);
1da177e4 613 }
92a3d03a
BP
614
615 /*
616 * If there are no indirect blocks or we are punching a hole
617 * below indirect blocks, nothing to be done.
618 */
a2646d1e 619 if (!topdir || limit <= SHMEM_NR_DIRECT)
1da177e4
LT
620 goto done2;
621
1ae70006
HD
622 /*
623 * The truncation case has already dropped info->lock, and we're safe
624 * because i_size and next_index have already been lowered, preventing
625 * access beyond. But in the punch_hole case, we still need to take
626 * the lock when updating the swap directory, because there might be
627 * racing accesses by shmem_getpage(SGP_CACHE), shmem_unuse_inode or
628 * shmem_writepage. However, whenever we find we can remove a whole
629 * directory page (not at the misaligned start or end of the range),
630 * we first NULLify its pointer in the level above, and then have no
631 * need to take the lock when updating its contents: needs_lock and
632 * punch_lock (either pointing to info->lock or NULL) manage this.
633 */
634
a2646d1e 635 upper_limit -= SHMEM_NR_DIRECT;
1da177e4
LT
636 limit -= SHMEM_NR_DIRECT;
637 idx = (idx > SHMEM_NR_DIRECT)? (idx - SHMEM_NR_DIRECT): 0;
638 offset = idx % ENTRIES_PER_PAGE;
639 idx -= offset;
640
641 dir = shmem_dir_map(topdir);
642 stage = ENTRIES_PER_PAGEPAGE/2;
643 if (idx < ENTRIES_PER_PAGEPAGE/2) {
644 middir = topdir;
645 diroff = idx/ENTRIES_PER_PAGE;
646 } else {
647 dir += ENTRIES_PER_PAGE/2;
648 dir += (idx - ENTRIES_PER_PAGEPAGE/2)/ENTRIES_PER_PAGEPAGE;
649 while (stage <= idx)
650 stage += ENTRIES_PER_PAGEPAGE;
651 middir = *dir;
652 if (*dir) {
653 diroff = ((idx - ENTRIES_PER_PAGEPAGE/2) %
654 ENTRIES_PER_PAGEPAGE) / ENTRIES_PER_PAGE;
a2646d1e 655 if (!diroff && !offset && upper_limit >= stage) {
1ae70006
HD
656 if (needs_lock) {
657 spin_lock(needs_lock);
658 *dir = NULL;
659 spin_unlock(needs_lock);
660 needs_lock = NULL;
661 } else
662 *dir = NULL;
1da177e4
LT
663 nr_pages_to_free++;
664 list_add(&middir->lru, &pages_to_free);
665 }
666 shmem_dir_unmap(dir);
667 dir = shmem_dir_map(middir);
668 } else {
669 diroff = 0;
670 offset = 0;
671 idx = stage;
672 }
673 }
674
675 for (; idx < limit; idx += ENTRIES_PER_PAGE, diroff++) {
676 if (unlikely(idx == stage)) {
677 shmem_dir_unmap(dir);
678 dir = shmem_dir_map(topdir) +
679 ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE;
680 while (!*dir) {
681 dir++;
682 idx += ENTRIES_PER_PAGEPAGE;
683 if (idx >= limit)
684 goto done1;
685 }
686 stage = idx + ENTRIES_PER_PAGEPAGE;
687 middir = *dir;
1ae70006
HD
688 if (punch_hole)
689 needs_lock = &info->lock;
a2646d1e 690 if (upper_limit >= stage) {
1ae70006
HD
691 if (needs_lock) {
692 spin_lock(needs_lock);
693 *dir = NULL;
694 spin_unlock(needs_lock);
695 needs_lock = NULL;
696 } else
697 *dir = NULL;
a2646d1e
HD
698 nr_pages_to_free++;
699 list_add(&middir->lru, &pages_to_free);
700 }
1da177e4
LT
701 shmem_dir_unmap(dir);
702 cond_resched();
703 dir = shmem_dir_map(middir);
704 diroff = 0;
705 }
1ae70006 706 punch_lock = needs_lock;
1da177e4 707 subdir = dir[diroff];
1ae70006
HD
708 if (subdir && !offset && upper_limit-idx >= ENTRIES_PER_PAGE) {
709 if (needs_lock) {
710 spin_lock(needs_lock);
711 dir[diroff] = NULL;
712 spin_unlock(needs_lock);
713 punch_lock = NULL;
714 } else
715 dir[diroff] = NULL;
716 nr_pages_to_free++;
717 list_add(&subdir->lru, &pages_to_free);
718 }
719 if (subdir && page_private(subdir) /* has swap entries */) {
1da177e4
LT
720 size = limit - idx;
721 if (size > ENTRIES_PER_PAGE)
722 size = ENTRIES_PER_PAGE;
723 freed = shmem_map_and_free_swp(subdir,
1ae70006 724 offset, size, &dir, punch_lock);
1da177e4
LT
725 if (!dir)
726 dir = shmem_dir_map(middir);
727 nr_swaps_freed += freed;
1ae70006 728 if (offset || punch_lock) {
1da177e4 729 spin_lock(&info->lock);
1ae70006
HD
730 set_page_private(subdir,
731 page_private(subdir) - freed);
1da177e4 732 spin_unlock(&info->lock);
1ae70006
HD
733 } else
734 BUG_ON(page_private(subdir) != freed);
1da177e4 735 }
1ae70006 736 offset = 0;
1da177e4
LT
737 }
738done1:
739 shmem_dir_unmap(dir);
740done2:
741 if (inode->i_mapping->nrpages && (info->flags & SHMEM_PAGEIN)) {
742 /*
743 * Call truncate_inode_pages again: racing shmem_unuse_inode
3889e6e7 744 * may have swizzled a page in from swap since
745 * truncate_pagecache or generic_delete_inode did it, before we
746 * lowered next_index. Also, though shmem_getpage checks
747 * i_size before adding to cache, no recheck after: so fix the
748 * narrow window there too.
1da177e4 749 */
f6b3ec23 750 truncate_inode_pages_range(inode->i_mapping, start, end);
1da177e4
LT
751 }
752
753 spin_lock(&info->lock);
754 info->flags &= ~SHMEM_TRUNCATE;
755 info->swapped -= nr_swaps_freed;
756 if (nr_pages_to_free)
757 shmem_free_blocks(inode, nr_pages_to_free);
758 shmem_recalc_inode(inode);
759 spin_unlock(&info->lock);
760
761 /*
762 * Empty swap vector directory pages to be freed?
763 */
764 if (!list_empty(&pages_to_free)) {
765 pages_to_free.prev->next = NULL;
766 shmem_free_pages(pages_to_free.next);
767 }
768}
94c1e62d 769EXPORT_SYMBOL_GPL(shmem_truncate_range);
1da177e4 770
94c1e62d 771static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
1da177e4
LT
772{
773 struct inode *inode = dentry->d_inode;
1da177e4
LT
774 int error;
775
db78b877
CH
776 error = inode_change_ok(inode, attr);
777 if (error)
778 return error;
779
94c1e62d
HD
780 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
781 loff_t oldsize = inode->i_size;
782 loff_t newsize = attr->ia_size;
3889e6e7 783 struct page *page = NULL;
784
94c1e62d 785 if (newsize < oldsize) {
1da177e4
LT
786 /*
787 * If truncating down to a partial page, then
788 * if that page is already allocated, hold it
789 * in memory until the truncation is over, so
ae0e47f0 790 * truncate_partial_page cannot miss it were
1da177e4
LT
791 * it assigned to swap.
792 */
3889e6e7 793 if (newsize & (PAGE_CACHE_SIZE-1)) {
1da177e4 794 (void) shmem_getpage(inode,
3889e6e7 795 newsize >> PAGE_CACHE_SHIFT,
1da177e4 796 &page, SGP_READ, NULL);
d3602444
HD
797 if (page)
798 unlock_page(page);
1da177e4
LT
799 }
800 /*
801 * Reset SHMEM_PAGEIN flag so that shmem_truncate can
802 * detect if any pages might have been added to cache
803 * after truncate_inode_pages. But we needn't bother
804 * if it's being fully truncated to zero-length: the
805 * nrpages check is efficient enough in that case.
806 */
3889e6e7 807 if (newsize) {
1da177e4
LT
808 struct shmem_inode_info *info = SHMEM_I(inode);
809 spin_lock(&info->lock);
810 info->flags &= ~SHMEM_PAGEIN;
811 spin_unlock(&info->lock);
812 }
813 }
94c1e62d
HD
814 if (newsize != oldsize) {
815 i_size_write(inode, newsize);
816 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
817 }
818 if (newsize < oldsize) {
819 loff_t holebegin = round_up(newsize, PAGE_SIZE);
820 unmap_mapping_range(inode->i_mapping, holebegin, 0, 1);
821 shmem_truncate_range(inode, newsize, (loff_t)-1);
822 /* unmap again to remove racily COWed private pages */
823 unmap_mapping_range(inode->i_mapping, holebegin, 0, 1);
824 }
3889e6e7 825 if (page)
826 page_cache_release(page);
1da177e4
LT
827 }
828
db78b877 829 setattr_copy(inode, attr);
39f0247d 830#ifdef CONFIG_TMPFS_POSIX_ACL
db78b877 831 if (attr->ia_valid & ATTR_MODE)
1c7c474c 832 error = generic_acl_chmod(inode);
39f0247d 833#endif
1da177e4
LT
834 return error;
835}
836
1f895f75 837static void shmem_evict_inode(struct inode *inode)
1da177e4 838{
1da177e4 839 struct shmem_inode_info *info = SHMEM_I(inode);
b09e0fa4 840 struct shmem_xattr *xattr, *nxattr;
1da177e4 841
3889e6e7 842 if (inode->i_mapping->a_ops == &shmem_aops) {
1da177e4
LT
843 shmem_unacct_size(info->flags, inode->i_size);
844 inode->i_size = 0;
3889e6e7 845 shmem_truncate_range(inode, 0, (loff_t)-1);
1da177e4 846 if (!list_empty(&info->swaplist)) {
cb5f7b9a 847 mutex_lock(&shmem_swaplist_mutex);
1da177e4 848 list_del_init(&info->swaplist);
cb5f7b9a 849 mutex_unlock(&shmem_swaplist_mutex);
1da177e4
LT
850 }
851 }
b09e0fa4
EP
852
853 list_for_each_entry_safe(xattr, nxattr, &info->xattr_list, list) {
854 kfree(xattr->name);
855 kfree(xattr);
856 }
0edd73b3 857 BUG_ON(inode->i_blocks);
5b04c689 858 shmem_free_inode(inode->i_sb);
1f895f75 859 end_writeback(inode);
1da177e4
LT
860}
861
862static inline int shmem_find_swp(swp_entry_t entry, swp_entry_t *dir, swp_entry_t *edir)
863{
864 swp_entry_t *ptr;
865
866 for (ptr = dir; ptr < edir; ptr++) {
867 if (ptr->val == entry.val)
868 return ptr - dir;
869 }
870 return -1;
871}
872
873static int shmem_unuse_inode(struct shmem_inode_info *info, swp_entry_t entry, struct page *page)
874{
778dd893 875 struct address_space *mapping;
1da177e4
LT
876 unsigned long idx;
877 unsigned long size;
878 unsigned long limit;
879 unsigned long stage;
880 struct page **dir;
881 struct page *subdir;
882 swp_entry_t *ptr;
883 int offset;
d9fe526a 884 int error;
1da177e4
LT
885
886 idx = 0;
887 ptr = info->i_direct;
888 spin_lock(&info->lock);
1b1b32f2
HD
889 if (!info->swapped) {
890 list_del_init(&info->swaplist);
891 goto lost2;
892 }
1da177e4
LT
893 limit = info->next_index;
894 size = limit;
895 if (size > SHMEM_NR_DIRECT)
896 size = SHMEM_NR_DIRECT;
897 offset = shmem_find_swp(entry, ptr, ptr+size);
778dd893
HD
898 if (offset >= 0) {
899 shmem_swp_balance_unmap();
1da177e4 900 goto found;
778dd893 901 }
1da177e4
LT
902 if (!info->i_indirect)
903 goto lost2;
904
905 dir = shmem_dir_map(info->i_indirect);
906 stage = SHMEM_NR_DIRECT + ENTRIES_PER_PAGEPAGE/2;
907
908 for (idx = SHMEM_NR_DIRECT; idx < limit; idx += ENTRIES_PER_PAGE, dir++) {
909 if (unlikely(idx == stage)) {
910 shmem_dir_unmap(dir-1);
cb5f7b9a
HD
911 if (cond_resched_lock(&info->lock)) {
912 /* check it has not been truncated */
913 if (limit > info->next_index) {
914 limit = info->next_index;
915 if (idx >= limit)
916 goto lost2;
917 }
918 }
1da177e4
LT
919 dir = shmem_dir_map(info->i_indirect) +
920 ENTRIES_PER_PAGE/2 + idx/ENTRIES_PER_PAGEPAGE;
921 while (!*dir) {
922 dir++;
923 idx += ENTRIES_PER_PAGEPAGE;
924 if (idx >= limit)
925 goto lost1;
926 }
927 stage = idx + ENTRIES_PER_PAGEPAGE;
928 subdir = *dir;
929 shmem_dir_unmap(dir);
930 dir = shmem_dir_map(subdir);
931 }
932 subdir = *dir;
4c21e2f2 933 if (subdir && page_private(subdir)) {
1da177e4
LT
934 ptr = shmem_swp_map(subdir);
935 size = limit - idx;
936 if (size > ENTRIES_PER_PAGE)
937 size = ENTRIES_PER_PAGE;
938 offset = shmem_find_swp(entry, ptr, ptr+size);
e6c9366b 939 shmem_swp_unmap(ptr);
1da177e4
LT
940 if (offset >= 0) {
941 shmem_dir_unmap(dir);
e6c9366b 942 ptr = shmem_swp_map(subdir);
1da177e4
LT
943 goto found;
944 }
1da177e4
LT
945 }
946 }
947lost1:
948 shmem_dir_unmap(dir-1);
949lost2:
950 spin_unlock(&info->lock);
951 return 0;
952found:
953 idx += offset;
778dd893 954 ptr += offset;
2e0e26c7 955
1b1b32f2
HD
956 /*
957 * Move _head_ to start search for next from here.
1f895f75 958 * But be careful: shmem_evict_inode checks list_empty without taking
1b1b32f2
HD
959 * mutex, and there's an instant in list_move_tail when info->swaplist
960 * would appear empty, if it were the only one on shmem_swaplist. We
961 * could avoid doing it if inode NULL; or use this minor optimization.
962 */
963 if (shmem_swaplist.next != &info->swaplist)
964 list_move_tail(&shmem_swaplist, &info->swaplist);
2e0e26c7 965
d13d1443 966 /*
778dd893
HD
967 * We rely on shmem_swaplist_mutex, not only to protect the swaplist,
968 * but also to hold up shmem_evict_inode(): so inode cannot be freed
969 * beneath us (pagelock doesn't help until the page is in pagecache).
d13d1443 970 */
778dd893
HD
971 mapping = info->vfs_inode.i_mapping;
972 error = add_to_page_cache_locked(page, mapping, idx, GFP_NOWAIT);
973 /* which does mem_cgroup_uncharge_cache_page on error */
69029cd5 974
48f170fb 975 if (error != -ENOMEM) {
73b1262f
HD
976 delete_from_swap_cache(page);
977 set_page_dirty(page);
1da177e4 978 info->flags |= SHMEM_PAGEIN;
2e0e26c7
HD
979 shmem_swp_set(info, ptr, 0);
980 swap_free(entry);
981 error = 1; /* not an error, but entry was found */
1da177e4 982 }
778dd893 983 shmem_swp_unmap(ptr);
1da177e4 984 spin_unlock(&info->lock);
2e0e26c7 985 return error;
1da177e4
LT
986}
987
988/*
989 * shmem_unuse() search for an eventually swapped out shmem page.
990 */
991int shmem_unuse(swp_entry_t entry, struct page *page)
992{
993 struct list_head *p, *next;
994 struct shmem_inode_info *info;
995 int found = 0;
778dd893
HD
996 int error;
997
998 /*
999 * Charge page using GFP_KERNEL while we can wait, before taking
1000 * the shmem_swaplist_mutex which might hold up shmem_writepage().
1001 * Charged back to the user (not to caller) when swap account is used.
1002 * add_to_page_cache() will be called with GFP_NOWAIT.
1003 */
1004 error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL);
1005 if (error)
1006 goto out;
1007 /*
1008 * Try to preload while we can wait, to not make a habit of
1009 * draining atomic reserves; but don't latch on to this cpu,
1010 * it's okay if sometimes we get rescheduled after this.
1011 */
1012 error = radix_tree_preload(GFP_KERNEL);
1013 if (error)
1014 goto uncharge;
1015 radix_tree_preload_end();
1da177e4 1016
cb5f7b9a 1017 mutex_lock(&shmem_swaplist_mutex);
1da177e4
LT
1018 list_for_each_safe(p, next, &shmem_swaplist) {
1019 info = list_entry(p, struct shmem_inode_info, swaplist);
1b1b32f2 1020 found = shmem_unuse_inode(info, entry, page);
cb5f7b9a 1021 cond_resched();
2e0e26c7 1022 if (found)
778dd893 1023 break;
1da177e4 1024 }
cb5f7b9a 1025 mutex_unlock(&shmem_swaplist_mutex);
778dd893
HD
1026
1027uncharge:
1028 if (!found)
1029 mem_cgroup_uncharge_cache_page(page);
1030 if (found < 0)
1031 error = found;
1032out:
aaa46865
HD
1033 unlock_page(page);
1034 page_cache_release(page);
778dd893 1035 return error;
1da177e4
LT
1036}
1037
1038/*
1039 * Move the page from the page cache to the swap cache.
1040 */
1041static int shmem_writepage(struct page *page, struct writeback_control *wbc)
1042{
1043 struct shmem_inode_info *info;
1044 swp_entry_t *entry, swap;
1045 struct address_space *mapping;
1046 unsigned long index;
1047 struct inode *inode;
1048
1049 BUG_ON(!PageLocked(page));
1da177e4
LT
1050 mapping = page->mapping;
1051 index = page->index;
1052 inode = mapping->host;
1053 info = SHMEM_I(inode);
1054 if (info->flags & VM_LOCKED)
1055 goto redirty;
d9fe526a 1056 if (!total_swap_pages)
1da177e4
LT
1057 goto redirty;
1058
d9fe526a
HD
1059 /*
1060 * shmem_backing_dev_info's capabilities prevent regular writeback or
1061 * sync from ever calling shmem_writepage; but a stacking filesystem
48f170fb 1062 * might use ->writepage of its underlying filesystem, in which case
d9fe526a 1063 * tmpfs should write out to swap only in response to memory pressure,
48f170fb 1064 * and not for the writeback threads or sync.
d9fe526a 1065 */
48f170fb
HD
1066 if (!wbc->for_reclaim) {
1067 WARN_ON_ONCE(1); /* Still happens? Tell us about it! */
1068 goto redirty;
1069 }
1070 swap = get_swap_page();
1071 if (!swap.val)
1072 goto redirty;
d9fe526a 1073
b1dea800
HD
1074 /*
1075 * Add inode to shmem_unuse()'s list of swapped-out inodes,
1076 * if it's not already there. Do it now because we cannot take
1077 * mutex while holding spinlock, and must do so before the page
1078 * is moved to swap cache, when its pagelock no longer protects
1079 * the inode from eviction. But don't unlock the mutex until
1080 * we've taken the spinlock, because shmem_unuse_inode() will
1081 * prune a !swapped inode from the swaplist under both locks.
1082 */
48f170fb
HD
1083 mutex_lock(&shmem_swaplist_mutex);
1084 if (list_empty(&info->swaplist))
1085 list_add_tail(&info->swaplist, &shmem_swaplist);
b1dea800 1086
1da177e4 1087 spin_lock(&info->lock);
48f170fb 1088 mutex_unlock(&shmem_swaplist_mutex);
b1dea800 1089
1da177e4
LT
1090 if (index >= info->next_index) {
1091 BUG_ON(!(info->flags & SHMEM_TRUNCATE));
1092 goto unlock;
1093 }
1094 entry = shmem_swp_entry(info, index, NULL);
d9fe526a 1095 if (entry->val) {
48f170fb 1096 WARN_ON_ONCE(1); /* Still happens? Tell us about it! */
d9fe526a
HD
1097 free_swap_and_cache(*entry);
1098 shmem_swp_set(info, entry, 0);
1099 }
1100 shmem_recalc_inode(inode);
1da177e4 1101
48f170fb 1102 if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {
4c73b1bc 1103 delete_from_page_cache(page);
1da177e4
LT
1104 shmem_swp_set(info, entry, swap.val);
1105 shmem_swp_unmap(entry);
aaa46865 1106 swap_shmem_alloc(swap);
826267cf 1107 spin_unlock(&info->lock);
d9fe526a 1108 BUG_ON(page_mapped(page));
9fab5619 1109 swap_writepage(page, wbc);
1da177e4
LT
1110 return 0;
1111 }
1112
1113 shmem_swp_unmap(entry);
1114unlock:
1115 spin_unlock(&info->lock);
2ca4532a
DN
1116 /*
1117 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
1118 * clear SWAP_HAS_CACHE flag.
1119 */
cb4b86ba 1120 swapcache_free(swap, NULL);
1da177e4
LT
1121redirty:
1122 set_page_dirty(page);
d9fe526a
HD
1123 if (wbc->for_reclaim)
1124 return AOP_WRITEPAGE_ACTIVATE; /* Return with page locked */
1125 unlock_page(page);
1126 return 0;
1da177e4
LT
1127}
1128
1129#ifdef CONFIG_NUMA
680d794b 1130#ifdef CONFIG_TMPFS
71fe804b 1131static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
680d794b 1132{
095f1fc4 1133 char buffer[64];
680d794b 1134
71fe804b 1135 if (!mpol || mpol->mode == MPOL_DEFAULT)
095f1fc4 1136 return; /* show nothing */
680d794b 1137
71fe804b 1138 mpol_to_str(buffer, sizeof(buffer), mpol, 1);
095f1fc4
LS
1139
1140 seq_printf(seq, ",mpol=%s", buffer);
680d794b 1141}
71fe804b
LS
1142
1143static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1144{
1145 struct mempolicy *mpol = NULL;
1146 if (sbinfo->mpol) {
1147 spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */
1148 mpol = sbinfo->mpol;
1149 mpol_get(mpol);
1150 spin_unlock(&sbinfo->stat_lock);
1151 }
1152 return mpol;
1153}
680d794b
AM
1154#endif /* CONFIG_TMPFS */
1155
02098fea
HD
1156static struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
1157 struct shmem_inode_info *info, unsigned long idx)
1da177e4 1158{
52cd3b07 1159 struct mempolicy mpol, *spol;
1da177e4 1160 struct vm_area_struct pvma;
c4cc6d07 1161 struct page *page;
1da177e4 1162
52cd3b07
LS
1163 spol = mpol_cond_copy(&mpol,
1164 mpol_shared_policy_lookup(&info->policy, idx));
1165
1da177e4 1166 /* Create a pseudo vma that just contains the policy */
c4cc6d07 1167 pvma.vm_start = 0;
1da177e4 1168 pvma.vm_pgoff = idx;
c4cc6d07 1169 pvma.vm_ops = NULL;
52cd3b07 1170 pvma.vm_policy = spol;
02098fea 1171 page = swapin_readahead(entry, gfp, &pvma, 0);
1da177e4
LT
1172 return page;
1173}
1174
02098fea
HD
1175static struct page *shmem_alloc_page(gfp_t gfp,
1176 struct shmem_inode_info *info, unsigned long idx)
1da177e4
LT
1177{
1178 struct vm_area_struct pvma;
1da177e4 1179
c4cc6d07
HD
1180 /* Create a pseudo vma that just contains the policy */
1181 pvma.vm_start = 0;
1da177e4 1182 pvma.vm_pgoff = idx;
c4cc6d07
HD
1183 pvma.vm_ops = NULL;
1184 pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, idx);
52cd3b07
LS
1185
1186 /*
1187 * alloc_page_vma() will drop the shared policy reference
1188 */
1189 return alloc_page_vma(gfp, &pvma, 0);
1da177e4 1190}
680d794b
AM
1191#else /* !CONFIG_NUMA */
1192#ifdef CONFIG_TMPFS
71fe804b 1193static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *p)
680d794b
AM
1194{
1195}
1196#endif /* CONFIG_TMPFS */
1197
02098fea
HD
1198static inline struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
1199 struct shmem_inode_info *info, unsigned long idx)
1da177e4 1200{
02098fea 1201 return swapin_readahead(entry, gfp, NULL, 0);
1da177e4
LT
1202}
1203
02098fea
HD
1204static inline struct page *shmem_alloc_page(gfp_t gfp,
1205 struct shmem_inode_info *info, unsigned long idx)
1da177e4 1206{
e84e2e13 1207 return alloc_page(gfp);
1da177e4 1208}
680d794b 1209#endif /* CONFIG_NUMA */
1da177e4 1210
71fe804b
LS
1211#if !defined(CONFIG_NUMA) || !defined(CONFIG_TMPFS)
1212static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
1213{
1214 return NULL;
1215}
1216#endif
1217
1da177e4 1218/*
68da9f05 1219 * shmem_getpage_gfp - find page in cache, or get from swap, or allocate
1da177e4
LT
1220 *
1221 * If we allocate a new one we do not mark it dirty. That's up to the
1222 * vm. If we swap it in we mark it dirty since we also free the swap
1223 * entry since a page cannot live in both the swap and page cache
1224 */
68da9f05
HD
1225static int shmem_getpage_gfp(struct inode *inode, pgoff_t idx,
1226 struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type)
1da177e4
LT
1227{
1228 struct address_space *mapping = inode->i_mapping;
1229 struct shmem_inode_info *info = SHMEM_I(inode);
1230 struct shmem_sb_info *sbinfo;
27ab7006 1231 struct page *page;
ff36b801 1232 struct page *prealloc_page = NULL;
1da177e4
LT
1233 swp_entry_t *entry;
1234 swp_entry_t swap;
1235 int error;
27ab7006 1236 int ret;
1da177e4
LT
1237
1238 if (idx >= SHMEM_MAX_INDEX)
1239 return -EFBIG;
1da177e4 1240repeat:
27ab7006
HD
1241 page = find_lock_page(mapping, idx);
1242 if (page) {
b409f9fc 1243 /*
27ab7006
HD
1244 * Once we can get the page lock, it must be uptodate:
1245 * if there were an error in reading back from swap,
1246 * the page would not be inserted into the filecache.
b409f9fc 1247 */
27ab7006
HD
1248 BUG_ON(!PageUptodate(page));
1249 goto done;
1250 }
1251
1252 /*
1253 * Try to preload while we can wait, to not make a habit of
1254 * draining atomic reserves; but don't latch on to this cpu.
1255 */
1256 error = radix_tree_preload(gfp & GFP_RECLAIM_MASK);
1257 if (error)
1258 goto out;
1259 radix_tree_preload_end();
1260
1261 if (sgp != SGP_READ && !prealloc_page) {
1262 prealloc_page = shmem_alloc_page(gfp, info, idx);
1263 if (prealloc_page) {
1264 SetPageSwapBacked(prealloc_page);
1265 if (mem_cgroup_cache_charge(prealloc_page,
1266 current->mm, GFP_KERNEL)) {
1267 page_cache_release(prealloc_page);
1268 prealloc_page = NULL;
ff36b801
SL
1269 }
1270 }
b409f9fc 1271 }
1da177e4
LT
1272
1273 spin_lock(&info->lock);
1274 shmem_recalc_inode(inode);
68da9f05 1275 entry = shmem_swp_alloc(info, idx, sgp, gfp);
1da177e4
LT
1276 if (IS_ERR(entry)) {
1277 spin_unlock(&info->lock);
1278 error = PTR_ERR(entry);
27ab7006 1279 goto out;
1da177e4
LT
1280 }
1281 swap = *entry;
1282
1283 if (swap.val) {
1284 /* Look it up and read it in.. */
27ab7006
HD
1285 page = lookup_swap_cache(swap);
1286 if (!page) {
1da177e4 1287 shmem_swp_unmap(entry);
456f998e 1288 spin_unlock(&info->lock);
1da177e4 1289 /* here we actually do the io */
68da9f05
HD
1290 if (fault_type)
1291 *fault_type |= VM_FAULT_MAJOR;
27ab7006
HD
1292 page = shmem_swapin(swap, gfp, info, idx);
1293 if (!page) {
1da177e4 1294 spin_lock(&info->lock);
68da9f05 1295 entry = shmem_swp_alloc(info, idx, sgp, gfp);
1da177e4
LT
1296 if (IS_ERR(entry))
1297 error = PTR_ERR(entry);
1298 else {
1299 if (entry->val == swap.val)
1300 error = -ENOMEM;
1301 shmem_swp_unmap(entry);
1302 }
1303 spin_unlock(&info->lock);
1304 if (error)
27ab7006 1305 goto out;
1da177e4
LT
1306 goto repeat;
1307 }
27ab7006
HD
1308 wait_on_page_locked(page);
1309 page_cache_release(page);
1da177e4
LT
1310 goto repeat;
1311 }
1312
1313 /* We have to do this with page locked to prevent races */
27ab7006 1314 if (!trylock_page(page)) {
1da177e4
LT
1315 shmem_swp_unmap(entry);
1316 spin_unlock(&info->lock);
27ab7006
HD
1317 wait_on_page_locked(page);
1318 page_cache_release(page);
1da177e4
LT
1319 goto repeat;
1320 }
27ab7006 1321 if (PageWriteback(page)) {
1da177e4
LT
1322 shmem_swp_unmap(entry);
1323 spin_unlock(&info->lock);
27ab7006
HD
1324 wait_on_page_writeback(page);
1325 unlock_page(page);
1326 page_cache_release(page);
1da177e4
LT
1327 goto repeat;
1328 }
27ab7006 1329 if (!PageUptodate(page)) {
1da177e4
LT
1330 shmem_swp_unmap(entry);
1331 spin_unlock(&info->lock);
27ab7006
HD
1332 unlock_page(page);
1333 page_cache_release(page);
1da177e4 1334 error = -EIO;
27ab7006 1335 goto out;
1da177e4
LT
1336 }
1337
27ab7006
HD
1338 error = add_to_page_cache_locked(page, mapping,
1339 idx, GFP_NOWAIT);
1340 if (error) {
1da177e4
LT
1341 shmem_swp_unmap(entry);
1342 spin_unlock(&info->lock);
82369553 1343 if (error == -ENOMEM) {
ae3abae6
DN
1344 /*
1345 * reclaim from proper memory cgroup and
1346 * call memcg's OOM if needed.
1347 */
1348 error = mem_cgroup_shmem_charge_fallback(
27ab7006 1349 page, current->mm, gfp);
b5a84319 1350 if (error) {
27ab7006
HD
1351 unlock_page(page);
1352 page_cache_release(page);
1353 goto out;
b5a84319 1354 }
82369553 1355 }
27ab7006
HD
1356 unlock_page(page);
1357 page_cache_release(page);
1da177e4
LT
1358 goto repeat;
1359 }
27ab7006
HD
1360
1361 info->flags |= SHMEM_PAGEIN;
1362 shmem_swp_set(info, entry, 0);
1da177e4 1363 shmem_swp_unmap(entry);
27ab7006
HD
1364 delete_from_swap_cache(page);
1365 spin_unlock(&info->lock);
1366 set_page_dirty(page);
1367 swap_free(swap);
1368
1369 } else if (sgp == SGP_READ) {
1da177e4 1370 shmem_swp_unmap(entry);
27ab7006
HD
1371 page = find_get_page(mapping, idx);
1372 if (page && !trylock_page(page)) {
1da177e4 1373 spin_unlock(&info->lock);
27ab7006
HD
1374 wait_on_page_locked(page);
1375 page_cache_release(page);
1da177e4
LT
1376 goto repeat;
1377 }
1378 spin_unlock(&info->lock);
e83c32e8
HD
1379
1380 } else if (prealloc_page) {
1da177e4
LT
1381 shmem_swp_unmap(entry);
1382 sbinfo = SHMEM_SB(inode->i_sb);
0edd73b3 1383 if (sbinfo->max_blocks) {
fc5da22a
HD
1384 if (percpu_counter_compare(&sbinfo->used_blocks,
1385 sbinfo->max_blocks) >= 0 ||
59a16ead
HD
1386 shmem_acct_block(info->flags))
1387 goto nospace;
7e496299 1388 percpu_counter_inc(&sbinfo->used_blocks);
1da177e4 1389 inode->i_blocks += BLOCKS_PER_PAGE;
59a16ead
HD
1390 } else if (shmem_acct_block(info->flags))
1391 goto nospace;
1da177e4 1392
27ab7006
HD
1393 page = prealloc_page;
1394 prealloc_page = NULL;
1da177e4 1395
27ab7006
HD
1396 entry = shmem_swp_alloc(info, idx, sgp, gfp);
1397 if (IS_ERR(entry))
1398 error = PTR_ERR(entry);
1399 else {
1400 swap = *entry;
1401 shmem_swp_unmap(entry);
1402 }
1403 ret = error || swap.val;
1404 if (ret)
1405 mem_cgroup_uncharge_cache_page(page);
1406 else
1407 ret = add_to_page_cache_lru(page, mapping,
69029cd5 1408 idx, GFP_NOWAIT);
27ab7006
HD
1409 /*
1410 * At add_to_page_cache_lru() failure,
1411 * uncharge will be done automatically.
1412 */
1413 if (ret) {
1414 shmem_unacct_blocks(info->flags, 1);
1415 shmem_free_blocks(inode, 1);
1416 spin_unlock(&info->lock);
1417 page_cache_release(page);
1418 if (error)
1419 goto out;
1420 goto repeat;
1da177e4
LT
1421 }
1422
27ab7006 1423 info->flags |= SHMEM_PAGEIN;
1da177e4
LT
1424 info->alloced++;
1425 spin_unlock(&info->lock);
27ab7006
HD
1426 clear_highpage(page);
1427 flush_dcache_page(page);
1428 SetPageUptodate(page);
a0ee5ec5 1429 if (sgp == SGP_DIRTY)
27ab7006
HD
1430 set_page_dirty(page);
1431
e83c32e8
HD
1432 } else {
1433 spin_unlock(&info->lock);
1434 error = -ENOMEM;
1435 goto out;
1da177e4
LT
1436 }
1437done:
27ab7006 1438 *pagep = page;
ff36b801 1439 error = 0;
e83c32e8
HD
1440out:
1441 if (prealloc_page) {
1442 mem_cgroup_uncharge_cache_page(prealloc_page);
1443 page_cache_release(prealloc_page);
1444 }
1445 return error;
1da177e4 1446
59a16ead
HD
1447nospace:
1448 /*
1449 * Perhaps the page was brought in from swap between find_lock_page
1450 * and taking info->lock? We allow for that at add_to_page_cache_lru,
1451 * but must also avoid reporting a spurious ENOSPC while working on a
9276aad6 1452 * full tmpfs.
59a16ead 1453 */
27ab7006 1454 page = find_get_page(mapping, idx);
59a16ead 1455 spin_unlock(&info->lock);
27ab7006
HD
1456 if (page) {
1457 page_cache_release(page);
1458 goto repeat;
ff36b801 1459 }
27ab7006 1460 error = -ENOSPC;
e83c32e8 1461 goto out;
1da177e4
LT
1462}
1463
d0217ac0 1464static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1da177e4 1465{
d3ac7f89 1466 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
1da177e4 1467 int error;
68da9f05 1468 int ret = VM_FAULT_LOCKED;
1da177e4 1469
d0217ac0
NP
1470 if (((loff_t)vmf->pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode))
1471 return VM_FAULT_SIGBUS;
d00806b1 1472
27d54b39 1473 error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
d0217ac0
NP
1474 if (error)
1475 return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
68da9f05 1476
456f998e
YH
1477 if (ret & VM_FAULT_MAJOR) {
1478 count_vm_event(PGMAJFAULT);
1479 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
1480 }
68da9f05 1481 return ret;
1da177e4
LT
1482}
1483
1da177e4 1484#ifdef CONFIG_NUMA
d8dc74f2 1485static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *new)
1da177e4 1486{
d3ac7f89 1487 struct inode *i = vma->vm_file->f_path.dentry->d_inode;
1da177e4
LT
1488 return mpol_set_shared_policy(&SHMEM_I(i)->policy, vma, new);
1489}
1490
d8dc74f2
AB
1491static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
1492 unsigned long addr)
1da177e4 1493{
d3ac7f89 1494 struct inode *i = vma->vm_file->f_path.dentry->d_inode;
1da177e4
LT
1495 unsigned long idx;
1496
1497 idx = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
1498 return mpol_shared_policy_lookup(&SHMEM_I(i)->policy, idx);
1499}
1500#endif
1501
1502int shmem_lock(struct file *file, int lock, struct user_struct *user)
1503{
d3ac7f89 1504 struct inode *inode = file->f_path.dentry->d_inode;
1da177e4
LT
1505 struct shmem_inode_info *info = SHMEM_I(inode);
1506 int retval = -ENOMEM;
1507
1508 spin_lock(&info->lock);
1509 if (lock && !(info->flags & VM_LOCKED)) {
1510 if (!user_shm_lock(inode->i_size, user))
1511 goto out_nomem;
1512 info->flags |= VM_LOCKED;
89e004ea 1513 mapping_set_unevictable(file->f_mapping);
1da177e4
LT
1514 }
1515 if (!lock && (info->flags & VM_LOCKED) && user) {
1516 user_shm_unlock(inode->i_size, user);
1517 info->flags &= ~VM_LOCKED;
89e004ea
LS
1518 mapping_clear_unevictable(file->f_mapping);
1519 scan_mapping_unevictable_pages(file->f_mapping);
1da177e4
LT
1520 }
1521 retval = 0;
89e004ea 1522
1da177e4
LT
1523out_nomem:
1524 spin_unlock(&info->lock);
1525 return retval;
1526}
1527
9b83a6a8 1528static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
1da177e4
LT
1529{
1530 file_accessed(file);
1531 vma->vm_ops = &shmem_vm_ops;
d0217ac0 1532 vma->vm_flags |= VM_CAN_NONLINEAR;
1da177e4
LT
1533 return 0;
1534}
1535
454abafe
DM
1536static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir,
1537 int mode, dev_t dev, unsigned long flags)
1da177e4
LT
1538{
1539 struct inode *inode;
1540 struct shmem_inode_info *info;
1541 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
1542
5b04c689
PE
1543 if (shmem_reserve_inode(sb))
1544 return NULL;
1da177e4
LT
1545
1546 inode = new_inode(sb);
1547 if (inode) {
85fe4025 1548 inode->i_ino = get_next_ino();
454abafe 1549 inode_init_owner(inode, dir, mode);
1da177e4 1550 inode->i_blocks = 0;
1da177e4
LT
1551 inode->i_mapping->backing_dev_info = &shmem_backing_dev_info;
1552 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
91828a40 1553 inode->i_generation = get_seconds();
1da177e4
LT
1554 info = SHMEM_I(inode);
1555 memset(info, 0, (char *)inode - (char *)info);
1556 spin_lock_init(&info->lock);
0b0a0806 1557 info->flags = flags & VM_NORESERVE;
1da177e4 1558 INIT_LIST_HEAD(&info->swaplist);
b09e0fa4 1559 INIT_LIST_HEAD(&info->xattr_list);
72c04902 1560 cache_no_acl(inode);
1da177e4
LT
1561
1562 switch (mode & S_IFMT) {
1563 default:
39f0247d 1564 inode->i_op = &shmem_special_inode_operations;
1da177e4
LT
1565 init_special_inode(inode, mode, dev);
1566 break;
1567 case S_IFREG:
14fcc23f 1568 inode->i_mapping->a_ops = &shmem_aops;
1da177e4
LT
1569 inode->i_op = &shmem_inode_operations;
1570 inode->i_fop = &shmem_file_operations;
71fe804b
LS
1571 mpol_shared_policy_init(&info->policy,
1572 shmem_get_sbmpol(sbinfo));
1da177e4
LT
1573 break;
1574 case S_IFDIR:
d8c76e6f 1575 inc_nlink(inode);
1da177e4
LT
1576 /* Some things misbehave if size == 0 on a directory */
1577 inode->i_size = 2 * BOGO_DIRENT_SIZE;
1578 inode->i_op = &shmem_dir_inode_operations;
1579 inode->i_fop = &simple_dir_operations;
1580 break;
1581 case S_IFLNK:
1582 /*
1583 * Must not load anything in the rbtree,
1584 * mpol_free_shared_policy will not be called.
1585 */
71fe804b 1586 mpol_shared_policy_init(&info->policy, NULL);
1da177e4
LT
1587 break;
1588 }
5b04c689
PE
1589 } else
1590 shmem_free_inode(sb);
1da177e4
LT
1591 return inode;
1592}
1593
1594#ifdef CONFIG_TMPFS
92e1d5be
AV
1595static const struct inode_operations shmem_symlink_inode_operations;
1596static const struct inode_operations shmem_symlink_inline_operations;
1da177e4 1597
1da177e4 1598static int
800d15a5
NP
1599shmem_write_begin(struct file *file, struct address_space *mapping,
1600 loff_t pos, unsigned len, unsigned flags,
1601 struct page **pagep, void **fsdata)
1da177e4 1602{
800d15a5
NP
1603 struct inode *inode = mapping->host;
1604 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
800d15a5
NP
1605 return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL);
1606}
1607
1608static int
1609shmem_write_end(struct file *file, struct address_space *mapping,
1610 loff_t pos, unsigned len, unsigned copied,
1611 struct page *page, void *fsdata)
1612{
1613 struct inode *inode = mapping->host;
1614
d3602444
HD
1615 if (pos + copied > inode->i_size)
1616 i_size_write(inode, pos + copied);
1617
800d15a5 1618 set_page_dirty(page);
6746aff7 1619 unlock_page(page);
800d15a5
NP
1620 page_cache_release(page);
1621
800d15a5 1622 return copied;
1da177e4
LT
1623}
1624
1da177e4
LT
1625static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor)
1626{
d3ac7f89 1627 struct inode *inode = filp->f_path.dentry->d_inode;
1da177e4
LT
1628 struct address_space *mapping = inode->i_mapping;
1629 unsigned long index, offset;
a0ee5ec5
HD
1630 enum sgp_type sgp = SGP_READ;
1631
1632 /*
1633 * Might this read be for a stacking filesystem? Then when reading
1634 * holes of a sparse file, we actually need to allocate those pages,
1635 * and even mark them dirty, so it cannot exceed the max_blocks limit.
1636 */
1637 if (segment_eq(get_fs(), KERNEL_DS))
1638 sgp = SGP_DIRTY;
1da177e4
LT
1639
1640 index = *ppos >> PAGE_CACHE_SHIFT;
1641 offset = *ppos & ~PAGE_CACHE_MASK;
1642
1643 for (;;) {
1644 struct page *page = NULL;
1645 unsigned long end_index, nr, ret;
1646 loff_t i_size = i_size_read(inode);
1647
1648 end_index = i_size >> PAGE_CACHE_SHIFT;
1649 if (index > end_index)
1650 break;
1651 if (index == end_index) {
1652 nr = i_size & ~PAGE_CACHE_MASK;
1653 if (nr <= offset)
1654 break;
1655 }
1656
a0ee5ec5 1657 desc->error = shmem_getpage(inode, index, &page, sgp, NULL);
1da177e4
LT
1658 if (desc->error) {
1659 if (desc->error == -EINVAL)
1660 desc->error = 0;
1661 break;
1662 }
d3602444
HD
1663 if (page)
1664 unlock_page(page);
1da177e4
LT
1665
1666 /*
1667 * We must evaluate after, since reads (unlike writes)
1b1dcc1b 1668 * are called without i_mutex protection against truncate
1da177e4
LT
1669 */
1670 nr = PAGE_CACHE_SIZE;
1671 i_size = i_size_read(inode);
1672 end_index = i_size >> PAGE_CACHE_SHIFT;
1673 if (index == end_index) {
1674 nr = i_size & ~PAGE_CACHE_MASK;
1675 if (nr <= offset) {
1676 if (page)
1677 page_cache_release(page);
1678 break;
1679 }
1680 }
1681 nr -= offset;
1682
1683 if (page) {
1684 /*
1685 * If users can be writing to this page using arbitrary
1686 * virtual addresses, take care about potential aliasing
1687 * before reading the page on the kernel side.
1688 */
1689 if (mapping_writably_mapped(mapping))
1690 flush_dcache_page(page);
1691 /*
1692 * Mark the page accessed if we read the beginning.
1693 */
1694 if (!offset)
1695 mark_page_accessed(page);
b5810039 1696 } else {
1da177e4 1697 page = ZERO_PAGE(0);
b5810039
NP
1698 page_cache_get(page);
1699 }
1da177e4
LT
1700
1701 /*
1702 * Ok, we have the page, and it's up-to-date, so
1703 * now we can copy it to user space...
1704 *
1705 * The actor routine returns how many bytes were actually used..
1706 * NOTE! This may not be the same as how much of a user buffer
1707 * we filled up (we may be padding etc), so we can only update
1708 * "pos" here (the actor routine has to update the user buffer
1709 * pointers and the remaining count).
1710 */
1711 ret = actor(desc, page, offset, nr);
1712 offset += ret;
1713 index += offset >> PAGE_CACHE_SHIFT;
1714 offset &= ~PAGE_CACHE_MASK;
1715
1716 page_cache_release(page);
1717 if (ret != nr || !desc->count)
1718 break;
1719
1720 cond_resched();
1721 }
1722
1723 *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
1724 file_accessed(filp);
1725}
1726
bcd78e49
HD
1727static ssize_t shmem_file_aio_read(struct kiocb *iocb,
1728 const struct iovec *iov, unsigned long nr_segs, loff_t pos)
1729{
1730 struct file *filp = iocb->ki_filp;
1731 ssize_t retval;
1732 unsigned long seg;
1733 size_t count;
1734 loff_t *ppos = &iocb->ki_pos;
1735
1736 retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
1737 if (retval)
1738 return retval;
1739
1740 for (seg = 0; seg < nr_segs; seg++) {
1741 read_descriptor_t desc;
1742
1743 desc.written = 0;
1744 desc.arg.buf = iov[seg].iov_base;
1745 desc.count = iov[seg].iov_len;
1746 if (desc.count == 0)
1747 continue;
1748 desc.error = 0;
1749 do_shmem_file_read(filp, ppos, &desc, file_read_actor);
1750 retval += desc.written;
1751 if (desc.error) {
1752 retval = retval ?: desc.error;
1753 break;
1754 }
1755 if (desc.count > 0)
1756 break;
1757 }
1758 return retval;
1da177e4
LT
1759}
1760
708e3508
HD
1761static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
1762 struct pipe_inode_info *pipe, size_t len,
1763 unsigned int flags)
1764{
1765 struct address_space *mapping = in->f_mapping;
71f0e07a 1766 struct inode *inode = mapping->host;
708e3508
HD
1767 unsigned int loff, nr_pages, req_pages;
1768 struct page *pages[PIPE_DEF_BUFFERS];
1769 struct partial_page partial[PIPE_DEF_BUFFERS];
1770 struct page *page;
1771 pgoff_t index, end_index;
1772 loff_t isize, left;
1773 int error, page_nr;
1774 struct splice_pipe_desc spd = {
1775 .pages = pages,
1776 .partial = partial,
1777 .flags = flags,
1778 .ops = &page_cache_pipe_buf_ops,
1779 .spd_release = spd_release_page,
1780 };
1781
71f0e07a 1782 isize = i_size_read(inode);
708e3508
HD
1783 if (unlikely(*ppos >= isize))
1784 return 0;
1785
1786 left = isize - *ppos;
1787 if (unlikely(left < len))
1788 len = left;
1789
1790 if (splice_grow_spd(pipe, &spd))
1791 return -ENOMEM;
1792
1793 index = *ppos >> PAGE_CACHE_SHIFT;
1794 loff = *ppos & ~PAGE_CACHE_MASK;
1795 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1796 nr_pages = min(req_pages, pipe->buffers);
1797
708e3508
HD
1798 spd.nr_pages = find_get_pages_contig(mapping, index,
1799 nr_pages, spd.pages);
1800 index += spd.nr_pages;
708e3508 1801 error = 0;
708e3508 1802
71f0e07a 1803 while (spd.nr_pages < nr_pages) {
71f0e07a
HD
1804 error = shmem_getpage(inode, index, &page, SGP_CACHE, NULL);
1805 if (error)
1806 break;
1807 unlock_page(page);
708e3508
HD
1808 spd.pages[spd.nr_pages++] = page;
1809 index++;
1810 }
1811
708e3508
HD
1812 index = *ppos >> PAGE_CACHE_SHIFT;
1813 nr_pages = spd.nr_pages;
1814 spd.nr_pages = 0;
71f0e07a 1815
708e3508
HD
1816 for (page_nr = 0; page_nr < nr_pages; page_nr++) {
1817 unsigned int this_len;
1818
1819 if (!len)
1820 break;
1821
708e3508
HD
1822 this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff);
1823 page = spd.pages[page_nr];
1824
71f0e07a 1825 if (!PageUptodate(page) || page->mapping != mapping) {
71f0e07a
HD
1826 error = shmem_getpage(inode, index, &page,
1827 SGP_CACHE, NULL);
1828 if (error)
708e3508 1829 break;
71f0e07a
HD
1830 unlock_page(page);
1831 page_cache_release(spd.pages[page_nr]);
1832 spd.pages[page_nr] = page;
708e3508 1833 }
71f0e07a
HD
1834
1835 isize = i_size_read(inode);
708e3508
HD
1836 end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
1837 if (unlikely(!isize || index > end_index))
1838 break;
1839
708e3508
HD
1840 if (end_index == index) {
1841 unsigned int plen;
1842
708e3508
HD
1843 plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
1844 if (plen <= loff)
1845 break;
1846
708e3508
HD
1847 this_len = min(this_len, plen - loff);
1848 len = this_len;
1849 }
1850
1851 spd.partial[page_nr].offset = loff;
1852 spd.partial[page_nr].len = this_len;
1853 len -= this_len;
1854 loff = 0;
1855 spd.nr_pages++;
1856 index++;
1857 }
1858
708e3508
HD
1859 while (page_nr < nr_pages)
1860 page_cache_release(spd.pages[page_nr++]);
708e3508
HD
1861
1862 if (spd.nr_pages)
1863 error = splice_to_pipe(pipe, &spd);
1864
1865 splice_shrink_spd(pipe, &spd);
1866
1867 if (error > 0) {
1868 *ppos += error;
1869 file_accessed(in);
1870 }
1871 return error;
1872}
1873
726c3342 1874static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
1da177e4 1875{
726c3342 1876 struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
1da177e4
LT
1877
1878 buf->f_type = TMPFS_MAGIC;
1879 buf->f_bsize = PAGE_CACHE_SIZE;
1880 buf->f_namelen = NAME_MAX;
0edd73b3 1881 if (sbinfo->max_blocks) {
1da177e4 1882 buf->f_blocks = sbinfo->max_blocks;
7e496299
TC
1883 buf->f_bavail = buf->f_bfree =
1884 sbinfo->max_blocks - percpu_counter_sum(&sbinfo->used_blocks);
0edd73b3
HD
1885 }
1886 if (sbinfo->max_inodes) {
1da177e4
LT
1887 buf->f_files = sbinfo->max_inodes;
1888 buf->f_ffree = sbinfo->free_inodes;
1da177e4
LT
1889 }
1890 /* else leave those fields 0 like simple_statfs */
1891 return 0;
1892}
1893
1894/*
1895 * File creation. Allocate an inode, and we're done..
1896 */
1897static int
1898shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
1899{
0b0a0806 1900 struct inode *inode;
1da177e4
LT
1901 int error = -ENOSPC;
1902
454abafe 1903 inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE);
1da177e4 1904 if (inode) {
2a7dba39
EP
1905 error = security_inode_init_security(inode, dir,
1906 &dentry->d_name, NULL,
1907 NULL, NULL);
570bc1c2
SS
1908 if (error) {
1909 if (error != -EOPNOTSUPP) {
1910 iput(inode);
1911 return error;
1912 }
39f0247d 1913 }
1c7c474c
CH
1914#ifdef CONFIG_TMPFS_POSIX_ACL
1915 error = generic_acl_init(inode, dir);
39f0247d
AG
1916 if (error) {
1917 iput(inode);
1918 return error;
570bc1c2 1919 }
718deb6b
AV
1920#else
1921 error = 0;
1c7c474c 1922#endif
1da177e4
LT
1923 dir->i_size += BOGO_DIRENT_SIZE;
1924 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1925 d_instantiate(dentry, inode);
1926 dget(dentry); /* Extra count - pin the dentry in core */
1da177e4
LT
1927 }
1928 return error;
1929}
1930
1931static int shmem_mkdir(struct inode *dir, struct dentry *dentry, int mode)
1932{
1933 int error;
1934
1935 if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0)))
1936 return error;
d8c76e6f 1937 inc_nlink(dir);
1da177e4
LT
1938 return 0;
1939}
1940
1941static int shmem_create(struct inode *dir, struct dentry *dentry, int mode,
1942 struct nameidata *nd)
1943{
1944 return shmem_mknod(dir, dentry, mode | S_IFREG, 0);
1945}
1946
1947/*
1948 * Link a file..
1949 */
1950static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
1951{
1952 struct inode *inode = old_dentry->d_inode;
5b04c689 1953 int ret;
1da177e4
LT
1954
1955 /*
1956 * No ordinary (disk based) filesystem counts links as inodes;
1957 * but each new link needs a new dentry, pinning lowmem, and
1958 * tmpfs dentries cannot be pruned until they are unlinked.
1959 */
5b04c689
PE
1960 ret = shmem_reserve_inode(inode->i_sb);
1961 if (ret)
1962 goto out;
1da177e4
LT
1963
1964 dir->i_size += BOGO_DIRENT_SIZE;
1965 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
d8c76e6f 1966 inc_nlink(inode);
7de9c6ee 1967 ihold(inode); /* New dentry reference */
1da177e4
LT
1968 dget(dentry); /* Extra pinning count for the created dentry */
1969 d_instantiate(dentry, inode);
5b04c689
PE
1970out:
1971 return ret;
1da177e4
LT
1972}
1973
1974static int shmem_unlink(struct inode *dir, struct dentry *dentry)
1975{
1976 struct inode *inode = dentry->d_inode;
1977
5b04c689
PE
1978 if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
1979 shmem_free_inode(inode->i_sb);
1da177e4
LT
1980
1981 dir->i_size -= BOGO_DIRENT_SIZE;
1982 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
9a53c3a7 1983 drop_nlink(inode);
1da177e4
LT
1984 dput(dentry); /* Undo the count from "create" - this does all the work */
1985 return 0;
1986}
1987
1988static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
1989{
1990 if (!simple_empty(dentry))
1991 return -ENOTEMPTY;
1992
9a53c3a7
DH
1993 drop_nlink(dentry->d_inode);
1994 drop_nlink(dir);
1da177e4
LT
1995 return shmem_unlink(dir, dentry);
1996}
1997
1998/*
1999 * The VFS layer already does all the dentry stuff for rename,
2000 * we just have to decrement the usage count for the target if
2001 * it exists so that the VFS layer correctly free's it when it
2002 * gets overwritten.
2003 */
2004static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
2005{
2006 struct inode *inode = old_dentry->d_inode;
2007 int they_are_dirs = S_ISDIR(inode->i_mode);
2008
2009 if (!simple_empty(new_dentry))
2010 return -ENOTEMPTY;
2011
2012 if (new_dentry->d_inode) {
2013 (void) shmem_unlink(new_dir, new_dentry);
2014 if (they_are_dirs)
9a53c3a7 2015 drop_nlink(old_dir);
1da177e4 2016 } else if (they_are_dirs) {
9a53c3a7 2017 drop_nlink(old_dir);
d8c76e6f 2018 inc_nlink(new_dir);
1da177e4
LT
2019 }
2020
2021 old_dir->i_size -= BOGO_DIRENT_SIZE;
2022 new_dir->i_size += BOGO_DIRENT_SIZE;
2023 old_dir->i_ctime = old_dir->i_mtime =
2024 new_dir->i_ctime = new_dir->i_mtime =
2025 inode->i_ctime = CURRENT_TIME;
2026 return 0;
2027}
2028
2029static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
2030{
2031 int error;
2032 int len;
2033 struct inode *inode;
9276aad6 2034 struct page *page;
1da177e4
LT
2035 char *kaddr;
2036 struct shmem_inode_info *info;
2037
2038 len = strlen(symname) + 1;
2039 if (len > PAGE_CACHE_SIZE)
2040 return -ENAMETOOLONG;
2041
454abafe 2042 inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE);
1da177e4
LT
2043 if (!inode)
2044 return -ENOSPC;
2045
2a7dba39
EP
2046 error = security_inode_init_security(inode, dir, &dentry->d_name, NULL,
2047 NULL, NULL);
570bc1c2
SS
2048 if (error) {
2049 if (error != -EOPNOTSUPP) {
2050 iput(inode);
2051 return error;
2052 }
2053 error = 0;
2054 }
2055
1da177e4
LT
2056 info = SHMEM_I(inode);
2057 inode->i_size = len-1;
b09e0fa4 2058 if (len <= SHMEM_SYMLINK_INLINE_LEN) {
1da177e4 2059 /* do it inline */
b09e0fa4 2060 memcpy(info->inline_symlink, symname, len);
1da177e4
LT
2061 inode->i_op = &shmem_symlink_inline_operations;
2062 } else {
2063 error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL);
2064 if (error) {
2065 iput(inode);
2066 return error;
2067 }
14fcc23f 2068 inode->i_mapping->a_ops = &shmem_aops;
1da177e4
LT
2069 inode->i_op = &shmem_symlink_inode_operations;
2070 kaddr = kmap_atomic(page, KM_USER0);
2071 memcpy(kaddr, symname, len);
2072 kunmap_atomic(kaddr, KM_USER0);
2073 set_page_dirty(page);
6746aff7 2074 unlock_page(page);
1da177e4
LT
2075 page_cache_release(page);
2076 }
1da177e4
LT
2077 dir->i_size += BOGO_DIRENT_SIZE;
2078 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
2079 d_instantiate(dentry, inode);
2080 dget(dentry);
2081 return 0;
2082}
2083
cc314eef 2084static void *shmem_follow_link_inline(struct dentry *dentry, struct nameidata *nd)
1da177e4 2085{
b09e0fa4 2086 nd_set_link(nd, SHMEM_I(dentry->d_inode)->inline_symlink);
cc314eef 2087 return NULL;
1da177e4
LT
2088}
2089
cc314eef 2090static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd)
1da177e4
LT
2091{
2092 struct page *page = NULL;
2093 int res = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL);
2094 nd_set_link(nd, res ? ERR_PTR(res) : kmap(page));
d3602444
HD
2095 if (page)
2096 unlock_page(page);
cc314eef 2097 return page;
1da177e4
LT
2098}
2099
cc314eef 2100static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
1da177e4
LT
2101{
2102 if (!IS_ERR(nd_get_link(nd))) {
cc314eef 2103 struct page *page = cookie;
1da177e4
LT
2104 kunmap(page);
2105 mark_page_accessed(page);
2106 page_cache_release(page);
1da177e4
LT
2107 }
2108}
2109
b09e0fa4 2110#ifdef CONFIG_TMPFS_XATTR
46711810 2111/*
b09e0fa4
EP
2112 * Superblocks without xattr inode operations may get some security.* xattr
2113 * support from the LSM "for free". As soon as we have any other xattrs
39f0247d
AG
2114 * like ACLs, we also need to implement the security.* handlers at
2115 * filesystem level, though.
2116 */
2117
b09e0fa4
EP
2118static int shmem_xattr_get(struct dentry *dentry, const char *name,
2119 void *buffer, size_t size)
39f0247d 2120{
b09e0fa4
EP
2121 struct shmem_inode_info *info;
2122 struct shmem_xattr *xattr;
2123 int ret = -ENODATA;
39f0247d 2124
b09e0fa4
EP
2125 info = SHMEM_I(dentry->d_inode);
2126
2127 spin_lock(&info->lock);
2128 list_for_each_entry(xattr, &info->xattr_list, list) {
2129 if (strcmp(name, xattr->name))
2130 continue;
2131
2132 ret = xattr->size;
2133 if (buffer) {
2134 if (size < xattr->size)
2135 ret = -ERANGE;
2136 else
2137 memcpy(buffer, xattr->value, xattr->size);
2138 }
2139 break;
2140 }
2141 spin_unlock(&info->lock);
2142 return ret;
39f0247d
AG
2143}
2144
b09e0fa4
EP
2145static int shmem_xattr_set(struct dentry *dentry, const char *name,
2146 const void *value, size_t size, int flags)
39f0247d 2147{
b09e0fa4
EP
2148 struct inode *inode = dentry->d_inode;
2149 struct shmem_inode_info *info = SHMEM_I(inode);
2150 struct shmem_xattr *xattr;
2151 struct shmem_xattr *new_xattr = NULL;
2152 size_t len;
2153 int err = 0;
2154
2155 /* value == NULL means remove */
2156 if (value) {
2157 /* wrap around? */
2158 len = sizeof(*new_xattr) + size;
2159 if (len <= sizeof(*new_xattr))
2160 return -ENOMEM;
2161
2162 new_xattr = kmalloc(len, GFP_KERNEL);
2163 if (!new_xattr)
2164 return -ENOMEM;
2165
2166 new_xattr->name = kstrdup(name, GFP_KERNEL);
2167 if (!new_xattr->name) {
2168 kfree(new_xattr);
2169 return -ENOMEM;
2170 }
2171
2172 new_xattr->size = size;
2173 memcpy(new_xattr->value, value, size);
2174 }
2175
2176 spin_lock(&info->lock);
2177 list_for_each_entry(xattr, &info->xattr_list, list) {
2178 if (!strcmp(name, xattr->name)) {
2179 if (flags & XATTR_CREATE) {
2180 xattr = new_xattr;
2181 err = -EEXIST;
2182 } else if (new_xattr) {
2183 list_replace(&xattr->list, &new_xattr->list);
2184 } else {
2185 list_del(&xattr->list);
2186 }
2187 goto out;
2188 }
2189 }
2190 if (flags & XATTR_REPLACE) {
2191 xattr = new_xattr;
2192 err = -ENODATA;
2193 } else {
2194 list_add(&new_xattr->list, &info->xattr_list);
2195 xattr = NULL;
2196 }
2197out:
2198 spin_unlock(&info->lock);
2199 if (xattr)
2200 kfree(xattr->name);
2201 kfree(xattr);
2202 return err;
39f0247d
AG
2203}
2204
39f0247d 2205
bb435453 2206static const struct xattr_handler *shmem_xattr_handlers[] = {
b09e0fa4 2207#ifdef CONFIG_TMPFS_POSIX_ACL
1c7c474c
CH
2208 &generic_acl_access_handler,
2209 &generic_acl_default_handler,
b09e0fa4 2210#endif
39f0247d
AG
2211 NULL
2212};
b09e0fa4
EP
2213
2214static int shmem_xattr_validate(const char *name)
2215{
2216 struct { const char *prefix; size_t len; } arr[] = {
2217 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
2218 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
2219 };
2220 int i;
2221
2222 for (i = 0; i < ARRAY_SIZE(arr); i++) {
2223 size_t preflen = arr[i].len;
2224 if (strncmp(name, arr[i].prefix, preflen) == 0) {
2225 if (!name[preflen])
2226 return -EINVAL;
2227 return 0;
2228 }
2229 }
2230 return -EOPNOTSUPP;
2231}
2232
2233static ssize_t shmem_getxattr(struct dentry *dentry, const char *name,
2234 void *buffer, size_t size)
2235{
2236 int err;
2237
2238 /*
2239 * If this is a request for a synthetic attribute in the system.*
2240 * namespace use the generic infrastructure to resolve a handler
2241 * for it via sb->s_xattr.
2242 */
2243 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
2244 return generic_getxattr(dentry, name, buffer, size);
2245
2246 err = shmem_xattr_validate(name);
2247 if (err)
2248 return err;
2249
2250 return shmem_xattr_get(dentry, name, buffer, size);
2251}
2252
2253static int shmem_setxattr(struct dentry *dentry, const char *name,
2254 const void *value, size_t size, int flags)
2255{
2256 int err;
2257
2258 /*
2259 * If this is a request for a synthetic attribute in the system.*
2260 * namespace use the generic infrastructure to resolve a handler
2261 * for it via sb->s_xattr.
2262 */
2263 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
2264 return generic_setxattr(dentry, name, value, size, flags);
2265
2266 err = shmem_xattr_validate(name);
2267 if (err)
2268 return err;
2269
2270 if (size == 0)
2271 value = ""; /* empty EA, do not remove */
2272
2273 return shmem_xattr_set(dentry, name, value, size, flags);
2274
2275}
2276
2277static int shmem_removexattr(struct dentry *dentry, const char *name)
2278{
2279 int err;
2280
2281 /*
2282 * If this is a request for a synthetic attribute in the system.*
2283 * namespace use the generic infrastructure to resolve a handler
2284 * for it via sb->s_xattr.
2285 */
2286 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
2287 return generic_removexattr(dentry, name);
2288
2289 err = shmem_xattr_validate(name);
2290 if (err)
2291 return err;
2292
2293 return shmem_xattr_set(dentry, name, NULL, 0, XATTR_REPLACE);
2294}
2295
2296static bool xattr_is_trusted(const char *name)
2297{
2298 return !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN);
2299}
2300
2301static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
2302{
2303 bool trusted = capable(CAP_SYS_ADMIN);
2304 struct shmem_xattr *xattr;
2305 struct shmem_inode_info *info;
2306 size_t used = 0;
2307
2308 info = SHMEM_I(dentry->d_inode);
2309
2310 spin_lock(&info->lock);
2311 list_for_each_entry(xattr, &info->xattr_list, list) {
2312 size_t len;
2313
2314 /* skip "trusted." attributes for unprivileged callers */
2315 if (!trusted && xattr_is_trusted(xattr->name))
2316 continue;
2317
2318 len = strlen(xattr->name) + 1;
2319 used += len;
2320 if (buffer) {
2321 if (size < used) {
2322 used = -ERANGE;
2323 break;
2324 }
2325 memcpy(buffer, xattr->name, len);
2326 buffer += len;
2327 }
2328 }
2329 spin_unlock(&info->lock);
2330
2331 return used;
2332}
2333#endif /* CONFIG_TMPFS_XATTR */
2334
2335static const struct inode_operations shmem_symlink_inline_operations = {
2336 .readlink = generic_readlink,
2337 .follow_link = shmem_follow_link_inline,
2338#ifdef CONFIG_TMPFS_XATTR
2339 .setxattr = shmem_setxattr,
2340 .getxattr = shmem_getxattr,
2341 .listxattr = shmem_listxattr,
2342 .removexattr = shmem_removexattr,
2343#endif
2344};
2345
2346static const struct inode_operations shmem_symlink_inode_operations = {
2347 .readlink = generic_readlink,
2348 .follow_link = shmem_follow_link,
2349 .put_link = shmem_put_link,
2350#ifdef CONFIG_TMPFS_XATTR
2351 .setxattr = shmem_setxattr,
2352 .getxattr = shmem_getxattr,
2353 .listxattr = shmem_listxattr,
2354 .removexattr = shmem_removexattr,
39f0247d 2355#endif
b09e0fa4 2356};
39f0247d 2357
91828a40
DG
2358static struct dentry *shmem_get_parent(struct dentry *child)
2359{
2360 return ERR_PTR(-ESTALE);
2361}
2362
2363static int shmem_match(struct inode *ino, void *vfh)
2364{
2365 __u32 *fh = vfh;
2366 __u64 inum = fh[2];
2367 inum = (inum << 32) | fh[1];
2368 return ino->i_ino == inum && fh[0] == ino->i_generation;
2369}
2370
480b116c
CH
2371static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
2372 struct fid *fid, int fh_len, int fh_type)
91828a40 2373{
91828a40 2374 struct inode *inode;
480b116c
CH
2375 struct dentry *dentry = NULL;
2376 u64 inum = fid->raw[2];
2377 inum = (inum << 32) | fid->raw[1];
2378
2379 if (fh_len < 3)
2380 return NULL;
91828a40 2381
480b116c
CH
2382 inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
2383 shmem_match, fid->raw);
91828a40 2384 if (inode) {
480b116c 2385 dentry = d_find_alias(inode);
91828a40
DG
2386 iput(inode);
2387 }
2388
480b116c 2389 return dentry;
91828a40
DG
2390}
2391
2392static int shmem_encode_fh(struct dentry *dentry, __u32 *fh, int *len,
2393 int connectable)
2394{
2395 struct inode *inode = dentry->d_inode;
2396
5fe0c237
AK
2397 if (*len < 3) {
2398 *len = 3;
91828a40 2399 return 255;
5fe0c237 2400 }
91828a40 2401
1d3382cb 2402 if (inode_unhashed(inode)) {
91828a40
DG
2403 /* Unfortunately insert_inode_hash is not idempotent,
2404 * so as we hash inodes here rather than at creation
2405 * time, we need a lock to ensure we only try
2406 * to do it once
2407 */
2408 static DEFINE_SPINLOCK(lock);
2409 spin_lock(&lock);
1d3382cb 2410 if (inode_unhashed(inode))
91828a40
DG
2411 __insert_inode_hash(inode,
2412 inode->i_ino + inode->i_generation);
2413 spin_unlock(&lock);
2414 }
2415
2416 fh[0] = inode->i_generation;
2417 fh[1] = inode->i_ino;
2418 fh[2] = ((__u64)inode->i_ino) >> 32;
2419
2420 *len = 3;
2421 return 1;
2422}
2423
39655164 2424static const struct export_operations shmem_export_ops = {
91828a40 2425 .get_parent = shmem_get_parent,
91828a40 2426 .encode_fh = shmem_encode_fh,
480b116c 2427 .fh_to_dentry = shmem_fh_to_dentry,
91828a40
DG
2428};
2429
680d794b
AM
2430static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo,
2431 bool remount)
1da177e4
LT
2432{
2433 char *this_char, *value, *rest;
2434
b00dc3ad
HD
2435 while (options != NULL) {
2436 this_char = options;
2437 for (;;) {
2438 /*
2439 * NUL-terminate this option: unfortunately,
2440 * mount options form a comma-separated list,
2441 * but mpol's nodelist may also contain commas.
2442 */
2443 options = strchr(options, ',');
2444 if (options == NULL)
2445 break;
2446 options++;
2447 if (!isdigit(*options)) {
2448 options[-1] = '\0';
2449 break;
2450 }
2451 }
1da177e4
LT
2452 if (!*this_char)
2453 continue;
2454 if ((value = strchr(this_char,'=')) != NULL) {
2455 *value++ = 0;
2456 } else {
2457 printk(KERN_ERR
2458 "tmpfs: No value for mount option '%s'\n",
2459 this_char);
2460 return 1;
2461 }
2462
2463 if (!strcmp(this_char,"size")) {
2464 unsigned long long size;
2465 size = memparse(value,&rest);
2466 if (*rest == '%') {
2467 size <<= PAGE_SHIFT;
2468 size *= totalram_pages;
2469 do_div(size, 100);
2470 rest++;
2471 }
2472 if (*rest)
2473 goto bad_val;
680d794b
AM
2474 sbinfo->max_blocks =
2475 DIV_ROUND_UP(size, PAGE_CACHE_SIZE);
1da177e4 2476 } else if (!strcmp(this_char,"nr_blocks")) {
680d794b 2477 sbinfo->max_blocks = memparse(value, &rest);
1da177e4
LT
2478 if (*rest)
2479 goto bad_val;
2480 } else if (!strcmp(this_char,"nr_inodes")) {
680d794b 2481 sbinfo->max_inodes = memparse(value, &rest);
1da177e4
LT
2482 if (*rest)
2483 goto bad_val;
2484 } else if (!strcmp(this_char,"mode")) {
680d794b 2485 if (remount)
1da177e4 2486 continue;
680d794b 2487 sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777;
1da177e4
LT
2488 if (*rest)
2489 goto bad_val;
2490 } else if (!strcmp(this_char,"uid")) {
680d794b 2491 if (remount)
1da177e4 2492 continue;
680d794b 2493 sbinfo->uid = simple_strtoul(value, &rest, 0);
1da177e4
LT
2494 if (*rest)
2495 goto bad_val;
2496 } else if (!strcmp(this_char,"gid")) {
680d794b 2497 if (remount)
1da177e4 2498 continue;
680d794b 2499 sbinfo->gid = simple_strtoul(value, &rest, 0);
1da177e4
LT
2500 if (*rest)
2501 goto bad_val;
7339ff83 2502 } else if (!strcmp(this_char,"mpol")) {
71fe804b 2503 if (mpol_parse_str(value, &sbinfo->mpol, 1))
7339ff83 2504 goto bad_val;
1da177e4
LT
2505 } else {
2506 printk(KERN_ERR "tmpfs: Bad mount option %s\n",
2507 this_char);
2508 return 1;
2509 }
2510 }
2511 return 0;
2512
2513bad_val:
2514 printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n",
2515 value, this_char);
2516 return 1;
2517
2518}
2519
2520static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
2521{
2522 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
680d794b 2523 struct shmem_sb_info config = *sbinfo;
0edd73b3
HD
2524 unsigned long inodes;
2525 int error = -EINVAL;
2526
680d794b 2527 if (shmem_parse_options(data, &config, true))
0edd73b3 2528 return error;
1da177e4 2529
0edd73b3 2530 spin_lock(&sbinfo->stat_lock);
0edd73b3 2531 inodes = sbinfo->max_inodes - sbinfo->free_inodes;
7e496299 2532 if (percpu_counter_compare(&sbinfo->used_blocks, config.max_blocks) > 0)
0edd73b3 2533 goto out;
680d794b 2534 if (config.max_inodes < inodes)
0edd73b3
HD
2535 goto out;
2536 /*
2537 * Those tests also disallow limited->unlimited while any are in
2538 * use, so i_blocks will always be zero when max_blocks is zero;
2539 * but we must separately disallow unlimited->limited, because
2540 * in that case we have no record of how much is already in use.
2541 */
680d794b 2542 if (config.max_blocks && !sbinfo->max_blocks)
0edd73b3 2543 goto out;
680d794b 2544 if (config.max_inodes && !sbinfo->max_inodes)
0edd73b3
HD
2545 goto out;
2546
2547 error = 0;
680d794b 2548 sbinfo->max_blocks = config.max_blocks;
680d794b
AM
2549 sbinfo->max_inodes = config.max_inodes;
2550 sbinfo->free_inodes = config.max_inodes - inodes;
71fe804b
LS
2551
2552 mpol_put(sbinfo->mpol);
2553 sbinfo->mpol = config.mpol; /* transfers initial ref */
0edd73b3
HD
2554out:
2555 spin_unlock(&sbinfo->stat_lock);
2556 return error;
1da177e4 2557}
680d794b
AM
2558
2559static int shmem_show_options(struct seq_file *seq, struct vfsmount *vfs)
2560{
2561 struct shmem_sb_info *sbinfo = SHMEM_SB(vfs->mnt_sb);
2562
2563 if (sbinfo->max_blocks != shmem_default_max_blocks())
2564 seq_printf(seq, ",size=%luk",
2565 sbinfo->max_blocks << (PAGE_CACHE_SHIFT - 10));
2566 if (sbinfo->max_inodes != shmem_default_max_inodes())
2567 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
2568 if (sbinfo->mode != (S_IRWXUGO | S_ISVTX))
2569 seq_printf(seq, ",mode=%03o", sbinfo->mode);
2570 if (sbinfo->uid != 0)
2571 seq_printf(seq, ",uid=%u", sbinfo->uid);
2572 if (sbinfo->gid != 0)
2573 seq_printf(seq, ",gid=%u", sbinfo->gid);
71fe804b 2574 shmem_show_mpol(seq, sbinfo->mpol);
680d794b
AM
2575 return 0;
2576}
2577#endif /* CONFIG_TMPFS */
1da177e4
LT
2578
2579static void shmem_put_super(struct super_block *sb)
2580{
602586a8
HD
2581 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2582
2583 percpu_counter_destroy(&sbinfo->used_blocks);
2584 kfree(sbinfo);
1da177e4
LT
2585 sb->s_fs_info = NULL;
2586}
2587
2b2af54a 2588int shmem_fill_super(struct super_block *sb, void *data, int silent)
1da177e4
LT
2589{
2590 struct inode *inode;
2591 struct dentry *root;
0edd73b3 2592 struct shmem_sb_info *sbinfo;
680d794b
AM
2593 int err = -ENOMEM;
2594
2595 /* Round up to L1_CACHE_BYTES to resist false sharing */
425fbf04 2596 sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
680d794b
AM
2597 L1_CACHE_BYTES), GFP_KERNEL);
2598 if (!sbinfo)
2599 return -ENOMEM;
2600
680d794b 2601 sbinfo->mode = S_IRWXUGO | S_ISVTX;
76aac0e9
DH
2602 sbinfo->uid = current_fsuid();
2603 sbinfo->gid = current_fsgid();
680d794b 2604 sb->s_fs_info = sbinfo;
1da177e4 2605
0edd73b3 2606#ifdef CONFIG_TMPFS
1da177e4
LT
2607 /*
2608 * Per default we only allow half of the physical ram per
2609 * tmpfs instance, limiting inodes to one per page of lowmem;
2610 * but the internal instance is left unlimited.
2611 */
2612 if (!(sb->s_flags & MS_NOUSER)) {
680d794b
AM
2613 sbinfo->max_blocks = shmem_default_max_blocks();
2614 sbinfo->max_inodes = shmem_default_max_inodes();
2615 if (shmem_parse_options(data, sbinfo, false)) {
2616 err = -EINVAL;
2617 goto failed;
2618 }
1da177e4 2619 }
91828a40 2620 sb->s_export_op = &shmem_export_ops;
1da177e4
LT
2621#else
2622 sb->s_flags |= MS_NOUSER;
2623#endif
2624
0edd73b3 2625 spin_lock_init(&sbinfo->stat_lock);
602586a8
HD
2626 if (percpu_counter_init(&sbinfo->used_blocks, 0))
2627 goto failed;
680d794b 2628 sbinfo->free_inodes = sbinfo->max_inodes;
0edd73b3 2629
1da177e4
LT
2630 sb->s_maxbytes = SHMEM_MAX_BYTES;
2631 sb->s_blocksize = PAGE_CACHE_SIZE;
2632 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
2633 sb->s_magic = TMPFS_MAGIC;
2634 sb->s_op = &shmem_ops;
cfd95a9c 2635 sb->s_time_gran = 1;
b09e0fa4 2636#ifdef CONFIG_TMPFS_XATTR
39f0247d 2637 sb->s_xattr = shmem_xattr_handlers;
b09e0fa4
EP
2638#endif
2639#ifdef CONFIG_TMPFS_POSIX_ACL
39f0247d
AG
2640 sb->s_flags |= MS_POSIXACL;
2641#endif
0edd73b3 2642
454abafe 2643 inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
1da177e4
LT
2644 if (!inode)
2645 goto failed;
680d794b
AM
2646 inode->i_uid = sbinfo->uid;
2647 inode->i_gid = sbinfo->gid;
1da177e4
LT
2648 root = d_alloc_root(inode);
2649 if (!root)
2650 goto failed_iput;
2651 sb->s_root = root;
2652 return 0;
2653
2654failed_iput:
2655 iput(inode);
2656failed:
2657 shmem_put_super(sb);
2658 return err;
2659}
2660
fcc234f8 2661static struct kmem_cache *shmem_inode_cachep;
1da177e4
LT
2662
2663static struct inode *shmem_alloc_inode(struct super_block *sb)
2664{
2665 struct shmem_inode_info *p;
e94b1766 2666 p = (struct shmem_inode_info *)kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
1da177e4
LT
2667 if (!p)
2668 return NULL;
2669 return &p->vfs_inode;
2670}
2671
fa0d7e3d
NP
2672static void shmem_i_callback(struct rcu_head *head)
2673{
2674 struct inode *inode = container_of(head, struct inode, i_rcu);
2675 INIT_LIST_HEAD(&inode->i_dentry);
2676 kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
2677}
2678
1da177e4
LT
2679static void shmem_destroy_inode(struct inode *inode)
2680{
2681 if ((inode->i_mode & S_IFMT) == S_IFREG) {
2682 /* only struct inode is valid if it's an inline symlink */
2683 mpol_free_shared_policy(&SHMEM_I(inode)->policy);
2684 }
fa0d7e3d 2685 call_rcu(&inode->i_rcu, shmem_i_callback);
1da177e4
LT
2686}
2687
51cc5068 2688static void init_once(void *foo)
1da177e4
LT
2689{
2690 struct shmem_inode_info *p = (struct shmem_inode_info *) foo;
2691
a35afb83 2692 inode_init_once(&p->vfs_inode);
1da177e4
LT
2693}
2694
2695static int init_inodecache(void)
2696{
2697 shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
2698 sizeof(struct shmem_inode_info),
040b5c6f 2699 0, SLAB_PANIC, init_once);
1da177e4
LT
2700 return 0;
2701}
2702
2703static void destroy_inodecache(void)
2704{
1a1d92c1 2705 kmem_cache_destroy(shmem_inode_cachep);
1da177e4
LT
2706}
2707
f5e54d6e 2708static const struct address_space_operations shmem_aops = {
1da177e4 2709 .writepage = shmem_writepage,
76719325 2710 .set_page_dirty = __set_page_dirty_no_writeback,
1da177e4 2711#ifdef CONFIG_TMPFS
800d15a5
NP
2712 .write_begin = shmem_write_begin,
2713 .write_end = shmem_write_end,
1da177e4 2714#endif
304dbdb7 2715 .migratepage = migrate_page,
aa261f54 2716 .error_remove_page = generic_error_remove_page,
1da177e4
LT
2717};
2718
15ad7cdc 2719static const struct file_operations shmem_file_operations = {
1da177e4
LT
2720 .mmap = shmem_mmap,
2721#ifdef CONFIG_TMPFS
2722 .llseek = generic_file_llseek,
bcd78e49 2723 .read = do_sync_read,
5402b976 2724 .write = do_sync_write,
bcd78e49 2725 .aio_read = shmem_file_aio_read,
5402b976 2726 .aio_write = generic_file_aio_write,
1b061d92 2727 .fsync = noop_fsync,
708e3508 2728 .splice_read = shmem_file_splice_read,
ae976416 2729 .splice_write = generic_file_splice_write,
1da177e4
LT
2730#endif
2731};
2732
92e1d5be 2733static const struct inode_operations shmem_inode_operations = {
94c1e62d 2734 .setattr = shmem_setattr,
f6b3ec23 2735 .truncate_range = shmem_truncate_range,
b09e0fa4
EP
2736#ifdef CONFIG_TMPFS_XATTR
2737 .setxattr = shmem_setxattr,
2738 .getxattr = shmem_getxattr,
2739 .listxattr = shmem_listxattr,
2740 .removexattr = shmem_removexattr,
2741#endif
1da177e4
LT
2742};
2743
92e1d5be 2744static const struct inode_operations shmem_dir_inode_operations = {
1da177e4
LT
2745#ifdef CONFIG_TMPFS
2746 .create = shmem_create,
2747 .lookup = simple_lookup,
2748 .link = shmem_link,
2749 .unlink = shmem_unlink,
2750 .symlink = shmem_symlink,
2751 .mkdir = shmem_mkdir,
2752 .rmdir = shmem_rmdir,
2753 .mknod = shmem_mknod,
2754 .rename = shmem_rename,
1da177e4 2755#endif
b09e0fa4
EP
2756#ifdef CONFIG_TMPFS_XATTR
2757 .setxattr = shmem_setxattr,
2758 .getxattr = shmem_getxattr,
2759 .listxattr = shmem_listxattr,
2760 .removexattr = shmem_removexattr,
2761#endif
39f0247d 2762#ifdef CONFIG_TMPFS_POSIX_ACL
94c1e62d 2763 .setattr = shmem_setattr,
39f0247d
AG
2764#endif
2765};
2766
92e1d5be 2767static const struct inode_operations shmem_special_inode_operations = {
b09e0fa4
EP
2768#ifdef CONFIG_TMPFS_XATTR
2769 .setxattr = shmem_setxattr,
2770 .getxattr = shmem_getxattr,
2771 .listxattr = shmem_listxattr,
2772 .removexattr = shmem_removexattr,
2773#endif
39f0247d 2774#ifdef CONFIG_TMPFS_POSIX_ACL
94c1e62d 2775 .setattr = shmem_setattr,
39f0247d 2776#endif
1da177e4
LT
2777};
2778
759b9775 2779static const struct super_operations shmem_ops = {
1da177e4
LT
2780 .alloc_inode = shmem_alloc_inode,
2781 .destroy_inode = shmem_destroy_inode,
2782#ifdef CONFIG_TMPFS
2783 .statfs = shmem_statfs,
2784 .remount_fs = shmem_remount_fs,
680d794b 2785 .show_options = shmem_show_options,
1da177e4 2786#endif
1f895f75 2787 .evict_inode = shmem_evict_inode,
1da177e4
LT
2788 .drop_inode = generic_delete_inode,
2789 .put_super = shmem_put_super,
2790};
2791
f0f37e2f 2792static const struct vm_operations_struct shmem_vm_ops = {
54cb8821 2793 .fault = shmem_fault,
1da177e4
LT
2794#ifdef CONFIG_NUMA
2795 .set_policy = shmem_set_policy,
2796 .get_policy = shmem_get_policy,
2797#endif
2798};
2799
2800
3c26ff6e
AV
2801static struct dentry *shmem_mount(struct file_system_type *fs_type,
2802 int flags, const char *dev_name, void *data)
1da177e4 2803{
3c26ff6e 2804 return mount_nodev(fs_type, flags, data, shmem_fill_super);
1da177e4
LT
2805}
2806
2807static struct file_system_type tmpfs_fs_type = {
2808 .owner = THIS_MODULE,
2809 .name = "tmpfs",
3c26ff6e 2810 .mount = shmem_mount,
1da177e4
LT
2811 .kill_sb = kill_litter_super,
2812};
1da177e4 2813
2b2af54a 2814int __init init_tmpfs(void)
1da177e4
LT
2815{
2816 int error;
2817
e0bf68dd
PZ
2818 error = bdi_init(&shmem_backing_dev_info);
2819 if (error)
2820 goto out4;
2821
1da177e4
LT
2822 error = init_inodecache();
2823 if (error)
2824 goto out3;
2825
2826 error = register_filesystem(&tmpfs_fs_type);
2827 if (error) {
2828 printk(KERN_ERR "Could not register tmpfs\n");
2829 goto out2;
2830 }
95dc112a 2831
1f5ce9e9 2832 shm_mnt = vfs_kern_mount(&tmpfs_fs_type, MS_NOUSER,
1da177e4
LT
2833 tmpfs_fs_type.name, NULL);
2834 if (IS_ERR(shm_mnt)) {
2835 error = PTR_ERR(shm_mnt);
2836 printk(KERN_ERR "Could not kern_mount tmpfs\n");
2837 goto out1;
2838 }
2839 return 0;
2840
2841out1:
2842 unregister_filesystem(&tmpfs_fs_type);
2843out2:
2844 destroy_inodecache();
2845out3:
e0bf68dd
PZ
2846 bdi_destroy(&shmem_backing_dev_info);
2847out4:
1da177e4
LT
2848 shm_mnt = ERR_PTR(error);
2849 return error;
2850}
853ac43a 2851
87946a72
DN
2852#ifdef CONFIG_CGROUP_MEM_RES_CTLR
2853/**
2854 * mem_cgroup_get_shmem_target - find a page or entry assigned to the shmem file
2855 * @inode: the inode to be searched
2856 * @pgoff: the offset to be searched
2857 * @pagep: the pointer for the found page to be stored
2858 * @ent: the pointer for the found swap entry to be stored
2859 *
2860 * If a page is found, refcount of it is incremented. Callers should handle
2861 * these refcount.
2862 */
2863void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t pgoff,
2864 struct page **pagep, swp_entry_t *ent)
2865{
2866 swp_entry_t entry = { .val = 0 }, *ptr;
2867 struct page *page = NULL;
2868 struct shmem_inode_info *info = SHMEM_I(inode);
2869
2870 if ((pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode))
2871 goto out;
2872
2873 spin_lock(&info->lock);
2874 ptr = shmem_swp_entry(info, pgoff, NULL);
2875#ifdef CONFIG_SWAP
2876 if (ptr && ptr->val) {
2877 entry.val = ptr->val;
2878 page = find_get_page(&swapper_space, entry.val);
2879 } else
2880#endif
2881 page = find_get_page(inode->i_mapping, pgoff);
2882 if (ptr)
2883 shmem_swp_unmap(ptr);
2884 spin_unlock(&info->lock);
2885out:
2886 *pagep = page;
2887 *ent = entry;
2888}
2889#endif
2890
853ac43a
MM
2891#else /* !CONFIG_SHMEM */
2892
2893/*
2894 * tiny-shmem: simple shmemfs and tmpfs using ramfs code
2895 *
2896 * This is intended for small system where the benefits of the full
2897 * shmem code (swap-backed and resource-limited) are outweighed by
2898 * their complexity. On systems without swap this code should be
2899 * effectively equivalent, but much lighter weight.
2900 */
2901
2902#include <linux/ramfs.h>
2903
2904static struct file_system_type tmpfs_fs_type = {
2905 .name = "tmpfs",
3c26ff6e 2906 .mount = ramfs_mount,
853ac43a
MM
2907 .kill_sb = kill_litter_super,
2908};
2909
2b2af54a 2910int __init init_tmpfs(void)
853ac43a
MM
2911{
2912 BUG_ON(register_filesystem(&tmpfs_fs_type) != 0);
2913
2914 shm_mnt = kern_mount(&tmpfs_fs_type);
2915 BUG_ON(IS_ERR(shm_mnt));
2916
2917 return 0;
2918}
2919
2920int shmem_unuse(swp_entry_t entry, struct page *page)
2921{
2922 return 0;
2923}
2924
3f96b79a
HD
2925int shmem_lock(struct file *file, int lock, struct user_struct *user)
2926{
2927 return 0;
2928}
2929
94c1e62d
HD
2930void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end)
2931{
2932 truncate_inode_pages_range(inode->i_mapping, start, end);
2933}
2934EXPORT_SYMBOL_GPL(shmem_truncate_range);
2935
87946a72
DN
2936#ifdef CONFIG_CGROUP_MEM_RES_CTLR
2937/**
2938 * mem_cgroup_get_shmem_target - find a page or entry assigned to the shmem file
2939 * @inode: the inode to be searched
2940 * @pgoff: the offset to be searched
2941 * @pagep: the pointer for the found page to be stored
2942 * @ent: the pointer for the found swap entry to be stored
2943 *
2944 * If a page is found, refcount of it is incremented. Callers should handle
2945 * these refcount.
2946 */
2947void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t pgoff,
2948 struct page **pagep, swp_entry_t *ent)
2949{
2950 struct page *page = NULL;
2951
2952 if ((pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode))
2953 goto out;
2954 page = find_get_page(inode->i_mapping, pgoff);
2955out:
2956 *pagep = page;
2957 *ent = (swp_entry_t){ .val = 0 };
2958}
2959#endif
2960
0b0a0806
HD
2961#define shmem_vm_ops generic_file_vm_ops
2962#define shmem_file_operations ramfs_file_operations
454abafe 2963#define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev)
0b0a0806
HD
2964#define shmem_acct_size(flags, size) 0
2965#define shmem_unacct_size(flags, size) do {} while (0)
caefba17 2966#define SHMEM_MAX_BYTES MAX_LFS_FILESIZE
853ac43a
MM
2967
2968#endif /* CONFIG_SHMEM */
2969
2970/* common code */
1da177e4 2971
46711810 2972/**
1da177e4 2973 * shmem_file_setup - get an unlinked file living in tmpfs
1da177e4
LT
2974 * @name: name for dentry (to be seen in /proc/<pid>/maps
2975 * @size: size to be set for the file
0b0a0806 2976 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
1da177e4 2977 */
168f5ac6 2978struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
1da177e4
LT
2979{
2980 int error;
2981 struct file *file;
2982 struct inode *inode;
2c48b9c4
AV
2983 struct path path;
2984 struct dentry *root;
1da177e4
LT
2985 struct qstr this;
2986
2987 if (IS_ERR(shm_mnt))
2988 return (void *)shm_mnt;
2989
2990 if (size < 0 || size > SHMEM_MAX_BYTES)
2991 return ERR_PTR(-EINVAL);
2992
2993 if (shmem_acct_size(flags, size))
2994 return ERR_PTR(-ENOMEM);
2995
2996 error = -ENOMEM;
2997 this.name = name;
2998 this.len = strlen(name);
2999 this.hash = 0; /* will go */
3000 root = shm_mnt->mnt_root;
2c48b9c4
AV
3001 path.dentry = d_alloc(root, &this);
3002 if (!path.dentry)
1da177e4 3003 goto put_memory;
2c48b9c4 3004 path.mnt = mntget(shm_mnt);
1da177e4 3005
1da177e4 3006 error = -ENOSPC;
454abafe 3007 inode = shmem_get_inode(root->d_sb, NULL, S_IFREG | S_IRWXUGO, 0, flags);
1da177e4 3008 if (!inode)
4b42af81 3009 goto put_dentry;
1da177e4 3010
2c48b9c4 3011 d_instantiate(path.dentry, inode);
1da177e4
LT
3012 inode->i_size = size;
3013 inode->i_nlink = 0; /* It is unlinked */
853ac43a
MM
3014#ifndef CONFIG_MMU
3015 error = ramfs_nommu_expand_for_mapping(inode, size);
3016 if (error)
4b42af81 3017 goto put_dentry;
853ac43a 3018#endif
4b42af81
AV
3019
3020 error = -ENFILE;
2c48b9c4 3021 file = alloc_file(&path, FMODE_WRITE | FMODE_READ,
4b42af81
AV
3022 &shmem_file_operations);
3023 if (!file)
3024 goto put_dentry;
3025
1da177e4
LT
3026 return file;
3027
1da177e4 3028put_dentry:
2c48b9c4 3029 path_put(&path);
1da177e4
LT
3030put_memory:
3031 shmem_unacct_size(flags, size);
3032 return ERR_PTR(error);
3033}
395e0ddc 3034EXPORT_SYMBOL_GPL(shmem_file_setup);
1da177e4 3035
46711810 3036/**
1da177e4 3037 * shmem_zero_setup - setup a shared anonymous mapping
1da177e4
LT
3038 * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
3039 */
3040int shmem_zero_setup(struct vm_area_struct *vma)
3041{
3042 struct file *file;
3043 loff_t size = vma->vm_end - vma->vm_start;
3044
3045 file = shmem_file_setup("dev/zero", size, vma->vm_flags);
3046 if (IS_ERR(file))
3047 return PTR_ERR(file);
3048
3049 if (vma->vm_file)
3050 fput(vma->vm_file);
3051 vma->vm_file = file;
3052 vma->vm_ops = &shmem_vm_ops;
bee4c36a 3053 vma->vm_flags |= VM_CAN_NONLINEAR;
1da177e4
LT
3054 return 0;
3055}
d9d90e5e
HD
3056
3057/**
3058 * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.
3059 * @mapping: the page's address_space
3060 * @index: the page index
3061 * @gfp: the page allocator flags to use if allocating
3062 *
3063 * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
3064 * with any new page allocations done using the specified allocation flags.
3065 * But read_cache_page_gfp() uses the ->readpage() method: which does not
3066 * suit tmpfs, since it may have pages in swapcache, and needs to find those
3067 * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
3068 *
68da9f05
HD
3069 * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in
3070 * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily.
d9d90e5e
HD
3071 */
3072struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
3073 pgoff_t index, gfp_t gfp)
3074{
68da9f05
HD
3075#ifdef CONFIG_SHMEM
3076 struct inode *inode = mapping->host;
9276aad6 3077 struct page *page;
68da9f05
HD
3078 int error;
3079
3080 BUG_ON(mapping->a_ops != &shmem_aops);
3081 error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE, gfp, NULL);
3082 if (error)
3083 page = ERR_PTR(error);
3084 else
3085 unlock_page(page);
3086 return page;
3087#else
3088 /*
3089 * The tiny !SHMEM case uses ramfs without swap
3090 */
d9d90e5e 3091 return read_cache_page_gfp(mapping, index, gfp);
68da9f05 3092#endif
d9d90e5e
HD
3093}
3094EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);