]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - mm/shmem.c
tmpfs: convert shmem_truncate_range to radix-swap
[mirror_ubuntu-zesty-kernel.git] / mm / shmem.c
CommitLineData
1da177e4
LT
1/*
2 * Resizable virtual memory filesystem for Linux.
3 *
4 * Copyright (C) 2000 Linus Torvalds.
5 * 2000 Transmeta Corp.
6 * 2000-2001 Christoph Rohland
7 * 2000-2001 SAP AG
8 * 2002 Red Hat Inc.
0edd73b3
HD
9 * Copyright (C) 2002-2005 Hugh Dickins.
10 * Copyright (C) 2002-2005 VERITAS Software Corporation.
1da177e4
LT
11 * Copyright (C) 2004 Andi Kleen, SuSE Labs
12 *
13 * Extended attribute support for tmpfs:
14 * Copyright (c) 2004, Luke Kenneth Casson Leighton <lkcl@lkcl.net>
15 * Copyright (c) 2004 Red Hat, Inc., James Morris <jmorris@redhat.com>
16 *
853ac43a
MM
17 * tiny-shmem:
18 * Copyright (c) 2004, 2008 Matt Mackall <mpm@selenic.com>
19 *
1da177e4
LT
20 * This file is released under the GPL.
21 */
22
853ac43a
MM
23#include <linux/fs.h>
24#include <linux/init.h>
25#include <linux/vfs.h>
26#include <linux/mount.h>
caefba17 27#include <linux/pagemap.h>
853ac43a
MM
28#include <linux/file.h>
29#include <linux/mm.h>
30#include <linux/module.h>
31#include <linux/swap.h>
32
33static struct vfsmount *shm_mnt;
34
35#ifdef CONFIG_SHMEM
1da177e4
LT
36/*
37 * This virtual memory filesystem is heavily based on the ramfs. It
38 * extends ramfs by the ability to use swap and honor resource limits
39 * which makes it a completely usable filesystem.
40 */
41
39f0247d 42#include <linux/xattr.h>
a5694255 43#include <linux/exportfs.h>
1c7c474c 44#include <linux/posix_acl.h>
39f0247d 45#include <linux/generic_acl.h>
1da177e4 46#include <linux/mman.h>
1da177e4
LT
47#include <linux/string.h>
48#include <linux/slab.h>
49#include <linux/backing-dev.h>
50#include <linux/shmem_fs.h>
1da177e4 51#include <linux/writeback.h>
1da177e4 52#include <linux/blkdev.h>
bda97eab 53#include <linux/pagevec.h>
41ffe5d5 54#include <linux/percpu_counter.h>
708e3508 55#include <linux/splice.h>
1da177e4
LT
56#include <linux/security.h>
57#include <linux/swapops.h>
58#include <linux/mempolicy.h>
59#include <linux/namei.h>
b00dc3ad 60#include <linux/ctype.h>
304dbdb7 61#include <linux/migrate.h>
c1f60a5a 62#include <linux/highmem.h>
680d794b 63#include <linux/seq_file.h>
92562927 64#include <linux/magic.h>
304dbdb7 65
1da177e4 66#include <asm/uaccess.h>
1da177e4
LT
67#include <asm/pgtable.h>
68
caefba17 69#define BLOCKS_PER_PAGE (PAGE_CACHE_SIZE/512)
1da177e4
LT
70#define VM_ACCT(size) (PAGE_CACHE_ALIGN(size) >> PAGE_SHIFT)
71
1da177e4
LT
72/* Pretend that each entry is of this size in directory's i_size */
73#define BOGO_DIRENT_SIZE 20
74
b09e0fa4
EP
75struct shmem_xattr {
76 struct list_head list; /* anchored by shmem_inode_info->xattr_list */
77 char *name; /* xattr name */
78 size_t size;
79 char value[0];
80};
81
285b2c4f 82/* Flag allocation requirements to shmem_getpage */
1da177e4 83enum sgp_type {
1da177e4
LT
84 SGP_READ, /* don't exceed i_size, don't allocate page */
85 SGP_CACHE, /* don't exceed i_size, may allocate page */
a0ee5ec5 86 SGP_DIRTY, /* like SGP_CACHE, but set new page dirty */
1da177e4
LT
87 SGP_WRITE, /* may exceed i_size, may allocate page */
88};
89
b76db735 90#ifdef CONFIG_TMPFS
680d794b
AM
91static unsigned long shmem_default_max_blocks(void)
92{
93 return totalram_pages / 2;
94}
95
96static unsigned long shmem_default_max_inodes(void)
97{
98 return min(totalram_pages - totalhigh_pages, totalram_pages / 2);
99}
b76db735 100#endif
680d794b 101
68da9f05
HD
102static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
103 struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type);
104
105static inline int shmem_getpage(struct inode *inode, pgoff_t index,
106 struct page **pagep, enum sgp_type sgp, int *fault_type)
107{
108 return shmem_getpage_gfp(inode, index, pagep, sgp,
109 mapping_gfp_mask(inode->i_mapping), fault_type);
110}
1da177e4 111
1da177e4
LT
112static inline struct shmem_sb_info *SHMEM_SB(struct super_block *sb)
113{
114 return sb->s_fs_info;
115}
116
117/*
118 * shmem_file_setup pre-accounts the whole fixed size of a VM object,
119 * for shared memory and for shared anonymous (/dev/zero) mappings
120 * (unless MAP_NORESERVE and sysctl_overcommit_memory <= 1),
121 * consistent with the pre-accounting of private mappings ...
122 */
123static inline int shmem_acct_size(unsigned long flags, loff_t size)
124{
0b0a0806
HD
125 return (flags & VM_NORESERVE) ?
126 0 : security_vm_enough_memory_kern(VM_ACCT(size));
1da177e4
LT
127}
128
129static inline void shmem_unacct_size(unsigned long flags, loff_t size)
130{
0b0a0806 131 if (!(flags & VM_NORESERVE))
1da177e4
LT
132 vm_unacct_memory(VM_ACCT(size));
133}
134
135/*
136 * ... whereas tmpfs objects are accounted incrementally as
137 * pages are allocated, in order to allow huge sparse files.
138 * shmem_getpage reports shmem_acct_block failure as -ENOSPC not -ENOMEM,
139 * so that a failure on a sparse tmpfs mapping will give SIGBUS not OOM.
140 */
141static inline int shmem_acct_block(unsigned long flags)
142{
0b0a0806
HD
143 return (flags & VM_NORESERVE) ?
144 security_vm_enough_memory_kern(VM_ACCT(PAGE_CACHE_SIZE)) : 0;
1da177e4
LT
145}
146
147static inline void shmem_unacct_blocks(unsigned long flags, long pages)
148{
0b0a0806 149 if (flags & VM_NORESERVE)
1da177e4
LT
150 vm_unacct_memory(pages * VM_ACCT(PAGE_CACHE_SIZE));
151}
152
759b9775 153static const struct super_operations shmem_ops;
f5e54d6e 154static const struct address_space_operations shmem_aops;
15ad7cdc 155static const struct file_operations shmem_file_operations;
92e1d5be
AV
156static const struct inode_operations shmem_inode_operations;
157static const struct inode_operations shmem_dir_inode_operations;
158static const struct inode_operations shmem_special_inode_operations;
f0f37e2f 159static const struct vm_operations_struct shmem_vm_ops;
1da177e4 160
6c231b7b 161static struct backing_dev_info shmem_backing_dev_info __read_mostly = {
1da177e4 162 .ra_pages = 0, /* No readahead */
4f98a2fe 163 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED,
1da177e4
LT
164};
165
166static LIST_HEAD(shmem_swaplist);
cb5f7b9a 167static DEFINE_MUTEX(shmem_swaplist_mutex);
1da177e4
LT
168
169static void shmem_free_blocks(struct inode *inode, long pages)
170{
171 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
0edd73b3 172 if (sbinfo->max_blocks) {
7e496299 173 percpu_counter_add(&sbinfo->used_blocks, -pages);
1da177e4 174 inode->i_blocks -= pages*BLOCKS_PER_PAGE;
1da177e4
LT
175 }
176}
177
5b04c689
PE
178static int shmem_reserve_inode(struct super_block *sb)
179{
180 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
181 if (sbinfo->max_inodes) {
182 spin_lock(&sbinfo->stat_lock);
183 if (!sbinfo->free_inodes) {
184 spin_unlock(&sbinfo->stat_lock);
185 return -ENOSPC;
186 }
187 sbinfo->free_inodes--;
188 spin_unlock(&sbinfo->stat_lock);
189 }
190 return 0;
191}
192
193static void shmem_free_inode(struct super_block *sb)
194{
195 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
196 if (sbinfo->max_inodes) {
197 spin_lock(&sbinfo->stat_lock);
198 sbinfo->free_inodes++;
199 spin_unlock(&sbinfo->stat_lock);
200 }
201}
202
46711810 203/**
41ffe5d5 204 * shmem_recalc_inode - recalculate the block usage of an inode
1da177e4
LT
205 * @inode: inode to recalc
206 *
207 * We have to calculate the free blocks since the mm can drop
208 * undirtied hole pages behind our back.
209 *
210 * But normally info->alloced == inode->i_mapping->nrpages + info->swapped
211 * So mm freed is info->alloced - (inode->i_mapping->nrpages + info->swapped)
212 *
213 * It has to be called with the spinlock held.
214 */
215static void shmem_recalc_inode(struct inode *inode)
216{
217 struct shmem_inode_info *info = SHMEM_I(inode);
218 long freed;
219
220 freed = info->alloced - info->swapped - inode->i_mapping->nrpages;
221 if (freed > 0) {
222 info->alloced -= freed;
223 shmem_unacct_blocks(info->flags, freed);
224 shmem_free_blocks(inode, freed);
225 }
226}
227
285b2c4f
HD
228static void shmem_put_swap(struct shmem_inode_info *info, pgoff_t index,
229 swp_entry_t swap)
1da177e4 230{
285b2c4f
HD
231 if (index < SHMEM_NR_DIRECT)
232 info->i_direct[index] = swap;
1da177e4
LT
233}
234
285b2c4f 235static swp_entry_t shmem_get_swap(struct shmem_inode_info *info, pgoff_t index)
1da177e4 236{
285b2c4f
HD
237 return (index < SHMEM_NR_DIRECT) ?
238 info->i_direct[index] : (swp_entry_t){0};
1da177e4
LT
239}
240
7a5d0fbb
HD
241/*
242 * Replace item expected in radix tree by a new item, while holding tree lock.
243 */
244static int shmem_radix_tree_replace(struct address_space *mapping,
245 pgoff_t index, void *expected, void *replacement)
246{
247 void **pslot;
248 void *item = NULL;
249
250 VM_BUG_ON(!expected);
251 pslot = radix_tree_lookup_slot(&mapping->page_tree, index);
252 if (pslot)
253 item = radix_tree_deref_slot_protected(pslot,
254 &mapping->tree_lock);
255 if (item != expected)
256 return -ENOENT;
257 if (replacement)
258 radix_tree_replace_slot(pslot, replacement);
259 else
260 radix_tree_delete(&mapping->page_tree, index);
261 return 0;
262}
263
264/*
265 * Like find_get_pages, but collecting swap entries as well as pages.
266 */
267static unsigned shmem_find_get_pages_and_swap(struct address_space *mapping,
268 pgoff_t start, unsigned int nr_pages,
269 struct page **pages, pgoff_t *indices)
270{
271 unsigned int i;
272 unsigned int ret;
273 unsigned int nr_found;
274
275 rcu_read_lock();
276restart:
277 nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree,
278 (void ***)pages, indices, start, nr_pages);
279 ret = 0;
280 for (i = 0; i < nr_found; i++) {
281 struct page *page;
282repeat:
283 page = radix_tree_deref_slot((void **)pages[i]);
284 if (unlikely(!page))
285 continue;
286 if (radix_tree_exception(page)) {
287 if (radix_tree_exceptional_entry(page))
288 goto export;
289 /* radix_tree_deref_retry(page) */
290 goto restart;
291 }
292 if (!page_cache_get_speculative(page))
293 goto repeat;
294
295 /* Has the page moved? */
296 if (unlikely(page != *((void **)pages[i]))) {
297 page_cache_release(page);
298 goto repeat;
299 }
300export:
301 indices[ret] = indices[i];
302 pages[ret] = page;
303 ret++;
304 }
305 if (unlikely(!ret && nr_found))
306 goto restart;
307 rcu_read_unlock();
308 return ret;
309}
310
311/*
312 * Remove swap entry from radix tree, free the swap and its page cache.
313 */
314static int shmem_free_swap(struct address_space *mapping,
315 pgoff_t index, void *radswap)
316{
317 int error;
318
319 spin_lock_irq(&mapping->tree_lock);
320 error = shmem_radix_tree_replace(mapping, index, radswap, NULL);
321 spin_unlock_irq(&mapping->tree_lock);
322 if (!error)
323 free_swap_and_cache(radix_to_swp_entry(radswap));
324 return error;
325}
326
327/*
328 * Pagevec may contain swap entries, so shuffle up pages before releasing.
329 */
330static void shmem_pagevec_release(struct pagevec *pvec)
331{
332 int i, j;
333
334 for (i = 0, j = 0; i < pagevec_count(pvec); i++) {
335 struct page *page = pvec->pages[i];
336 if (!radix_tree_exceptional_entry(page))
337 pvec->pages[j++] = page;
338 }
339 pvec->nr = j;
340 pagevec_release(pvec);
341}
342
343/*
344 * Remove range of pages and swap entries from radix tree, and free them.
345 */
285b2c4f 346void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
1da177e4 347{
285b2c4f 348 struct address_space *mapping = inode->i_mapping;
1da177e4 349 struct shmem_inode_info *info = SHMEM_I(inode);
285b2c4f 350 pgoff_t start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
bda97eab 351 unsigned partial = lstart & (PAGE_CACHE_SIZE - 1);
285b2c4f 352 pgoff_t end = (lend >> PAGE_CACHE_SHIFT);
bda97eab 353 struct pagevec pvec;
7a5d0fbb
HD
354 pgoff_t indices[PAGEVEC_SIZE];
355 long nr_swaps_freed = 0;
285b2c4f 356 pgoff_t index;
bda97eab
HD
357 int i;
358
359 BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1));
360
361 pagevec_init(&pvec, 0);
362 index = start;
7a5d0fbb
HD
363 while (index <= end) {
364 pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
365 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
366 pvec.pages, indices);
367 if (!pvec.nr)
368 break;
bda97eab
HD
369 mem_cgroup_uncharge_start();
370 for (i = 0; i < pagevec_count(&pvec); i++) {
371 struct page *page = pvec.pages[i];
372
7a5d0fbb 373 index = indices[i];
bda97eab
HD
374 if (index > end)
375 break;
376
7a5d0fbb
HD
377 if (radix_tree_exceptional_entry(page)) {
378 nr_swaps_freed += !shmem_free_swap(mapping,
379 index, page);
bda97eab 380 continue;
7a5d0fbb
HD
381 }
382
383 if (!trylock_page(page))
bda97eab 384 continue;
7a5d0fbb
HD
385 if (page->mapping == mapping) {
386 VM_BUG_ON(PageWriteback(page));
387 truncate_inode_page(mapping, page);
bda97eab 388 }
bda97eab
HD
389 unlock_page(page);
390 }
7a5d0fbb 391 shmem_pagevec_release(&pvec);
bda97eab
HD
392 mem_cgroup_uncharge_end();
393 cond_resched();
394 index++;
395 }
1da177e4 396
bda97eab
HD
397 if (partial) {
398 struct page *page = NULL;
399 shmem_getpage(inode, start - 1, &page, SGP_READ, NULL);
400 if (page) {
401 zero_user_segment(page, partial, PAGE_CACHE_SIZE);
402 set_page_dirty(page);
403 unlock_page(page);
404 page_cache_release(page);
405 }
406 }
407
408 index = start;
409 for ( ; ; ) {
410 cond_resched();
7a5d0fbb
HD
411 pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
412 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1,
413 pvec.pages, indices);
414 if (!pvec.nr) {
bda97eab
HD
415 if (index == start)
416 break;
417 index = start;
418 continue;
419 }
7a5d0fbb
HD
420 if (index == start && indices[0] > end) {
421 shmem_pagevec_release(&pvec);
bda97eab
HD
422 break;
423 }
424 mem_cgroup_uncharge_start();
425 for (i = 0; i < pagevec_count(&pvec); i++) {
426 struct page *page = pvec.pages[i];
427
7a5d0fbb 428 index = indices[i];
bda97eab
HD
429 if (index > end)
430 break;
431
7a5d0fbb
HD
432 if (radix_tree_exceptional_entry(page)) {
433 nr_swaps_freed += !shmem_free_swap(mapping,
434 index, page);
435 continue;
436 }
437
bda97eab 438 lock_page(page);
7a5d0fbb
HD
439 if (page->mapping == mapping) {
440 VM_BUG_ON(PageWriteback(page));
441 truncate_inode_page(mapping, page);
442 }
bda97eab
HD
443 unlock_page(page);
444 }
7a5d0fbb 445 shmem_pagevec_release(&pvec);
bda97eab
HD
446 mem_cgroup_uncharge_end();
447 index++;
448 }
94c1e62d 449
1da177e4 450 spin_lock(&info->lock);
7a5d0fbb 451 info->swapped -= nr_swaps_freed;
1da177e4
LT
452 shmem_recalc_inode(inode);
453 spin_unlock(&info->lock);
454
285b2c4f 455 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
1da177e4 456}
94c1e62d 457EXPORT_SYMBOL_GPL(shmem_truncate_range);
1da177e4 458
94c1e62d 459static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
1da177e4
LT
460{
461 struct inode *inode = dentry->d_inode;
1da177e4
LT
462 int error;
463
db78b877
CH
464 error = inode_change_ok(inode, attr);
465 if (error)
466 return error;
467
94c1e62d
HD
468 if (S_ISREG(inode->i_mode) && (attr->ia_valid & ATTR_SIZE)) {
469 loff_t oldsize = inode->i_size;
470 loff_t newsize = attr->ia_size;
3889e6e7 471
94c1e62d
HD
472 if (newsize != oldsize) {
473 i_size_write(inode, newsize);
474 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
475 }
476 if (newsize < oldsize) {
477 loff_t holebegin = round_up(newsize, PAGE_SIZE);
478 unmap_mapping_range(inode->i_mapping, holebegin, 0, 1);
479 shmem_truncate_range(inode, newsize, (loff_t)-1);
480 /* unmap again to remove racily COWed private pages */
481 unmap_mapping_range(inode->i_mapping, holebegin, 0, 1);
482 }
1da177e4
LT
483 }
484
db78b877 485 setattr_copy(inode, attr);
39f0247d 486#ifdef CONFIG_TMPFS_POSIX_ACL
db78b877 487 if (attr->ia_valid & ATTR_MODE)
1c7c474c 488 error = generic_acl_chmod(inode);
39f0247d 489#endif
1da177e4
LT
490 return error;
491}
492
1f895f75 493static void shmem_evict_inode(struct inode *inode)
1da177e4 494{
1da177e4 495 struct shmem_inode_info *info = SHMEM_I(inode);
b09e0fa4 496 struct shmem_xattr *xattr, *nxattr;
1da177e4 497
3889e6e7 498 if (inode->i_mapping->a_ops == &shmem_aops) {
1da177e4
LT
499 shmem_unacct_size(info->flags, inode->i_size);
500 inode->i_size = 0;
3889e6e7 501 shmem_truncate_range(inode, 0, (loff_t)-1);
1da177e4 502 if (!list_empty(&info->swaplist)) {
cb5f7b9a 503 mutex_lock(&shmem_swaplist_mutex);
1da177e4 504 list_del_init(&info->swaplist);
cb5f7b9a 505 mutex_unlock(&shmem_swaplist_mutex);
1da177e4
LT
506 }
507 }
b09e0fa4
EP
508
509 list_for_each_entry_safe(xattr, nxattr, &info->xattr_list, list) {
510 kfree(xattr->name);
511 kfree(xattr);
512 }
0edd73b3 513 BUG_ON(inode->i_blocks);
5b04c689 514 shmem_free_inode(inode->i_sb);
1f895f75 515 end_writeback(inode);
1da177e4
LT
516}
517
41ffe5d5
HD
518static int shmem_unuse_inode(struct shmem_inode_info *info,
519 swp_entry_t swap, struct page *page)
1da177e4 520{
285b2c4f 521 struct address_space *mapping = info->vfs_inode.i_mapping;
41ffe5d5 522 pgoff_t index;
d9fe526a 523 int error;
1da177e4 524
41ffe5d5
HD
525 for (index = 0; index < SHMEM_NR_DIRECT; index++)
526 if (shmem_get_swap(info, index).val == swap.val)
285b2c4f 527 goto found;
1da177e4
LT
528 return 0;
529found:
285b2c4f 530 spin_lock(&info->lock);
41ffe5d5 531 if (shmem_get_swap(info, index).val != swap.val) {
285b2c4f
HD
532 spin_unlock(&info->lock);
533 return 0;
534 }
2e0e26c7 535
1b1b32f2
HD
536 /*
537 * Move _head_ to start search for next from here.
1f895f75 538 * But be careful: shmem_evict_inode checks list_empty without taking
1b1b32f2 539 * mutex, and there's an instant in list_move_tail when info->swaplist
285b2c4f 540 * would appear empty, if it were the only one on shmem_swaplist.
1b1b32f2
HD
541 */
542 if (shmem_swaplist.next != &info->swaplist)
543 list_move_tail(&shmem_swaplist, &info->swaplist);
2e0e26c7 544
d13d1443 545 /*
778dd893
HD
546 * We rely on shmem_swaplist_mutex, not only to protect the swaplist,
547 * but also to hold up shmem_evict_inode(): so inode cannot be freed
548 * beneath us (pagelock doesn't help until the page is in pagecache).
d13d1443 549 */
41ffe5d5 550 error = add_to_page_cache_locked(page, mapping, index, GFP_NOWAIT);
778dd893 551 /* which does mem_cgroup_uncharge_cache_page on error */
69029cd5 552
48f170fb 553 if (error != -ENOMEM) {
73b1262f
HD
554 delete_from_swap_cache(page);
555 set_page_dirty(page);
41ffe5d5 556 shmem_put_swap(info, index, (swp_entry_t){0});
285b2c4f 557 info->swapped--;
41ffe5d5 558 swap_free(swap);
2e0e26c7 559 error = 1; /* not an error, but entry was found */
1da177e4 560 }
1da177e4 561 spin_unlock(&info->lock);
2e0e26c7 562 return error;
1da177e4
LT
563}
564
565/*
566 * shmem_unuse() search for an eventually swapped out shmem page.
567 */
41ffe5d5 568int shmem_unuse(swp_entry_t swap, struct page *page)
1da177e4 569{
41ffe5d5 570 struct list_head *this, *next;
1da177e4
LT
571 struct shmem_inode_info *info;
572 int found = 0;
778dd893
HD
573 int error;
574
575 /*
576 * Charge page using GFP_KERNEL while we can wait, before taking
577 * the shmem_swaplist_mutex which might hold up shmem_writepage().
578 * Charged back to the user (not to caller) when swap account is used.
579 * add_to_page_cache() will be called with GFP_NOWAIT.
580 */
581 error = mem_cgroup_cache_charge(page, current->mm, GFP_KERNEL);
582 if (error)
583 goto out;
584 /*
585 * Try to preload while we can wait, to not make a habit of
586 * draining atomic reserves; but don't latch on to this cpu,
587 * it's okay if sometimes we get rescheduled after this.
588 */
589 error = radix_tree_preload(GFP_KERNEL);
590 if (error)
591 goto uncharge;
592 radix_tree_preload_end();
1da177e4 593
cb5f7b9a 594 mutex_lock(&shmem_swaplist_mutex);
41ffe5d5
HD
595 list_for_each_safe(this, next, &shmem_swaplist) {
596 info = list_entry(this, struct shmem_inode_info, swaplist);
285b2c4f
HD
597 if (!info->swapped) {
598 spin_lock(&info->lock);
599 if (!info->swapped)
600 list_del_init(&info->swaplist);
601 spin_unlock(&info->lock);
602 }
603 if (info->swapped)
41ffe5d5 604 found = shmem_unuse_inode(info, swap, page);
cb5f7b9a 605 cond_resched();
2e0e26c7 606 if (found)
778dd893 607 break;
1da177e4 608 }
cb5f7b9a 609 mutex_unlock(&shmem_swaplist_mutex);
778dd893
HD
610
611uncharge:
612 if (!found)
613 mem_cgroup_uncharge_cache_page(page);
614 if (found < 0)
615 error = found;
616out:
aaa46865
HD
617 unlock_page(page);
618 page_cache_release(page);
778dd893 619 return error;
1da177e4
LT
620}
621
622/*
623 * Move the page from the page cache to the swap cache.
624 */
625static int shmem_writepage(struct page *page, struct writeback_control *wbc)
626{
627 struct shmem_inode_info *info;
285b2c4f 628 swp_entry_t swap, oswap;
1da177e4 629 struct address_space *mapping;
41ffe5d5 630 pgoff_t index;
1da177e4
LT
631 struct inode *inode;
632
633 BUG_ON(!PageLocked(page));
1da177e4
LT
634 mapping = page->mapping;
635 index = page->index;
636 inode = mapping->host;
637 info = SHMEM_I(inode);
638 if (info->flags & VM_LOCKED)
639 goto redirty;
d9fe526a 640 if (!total_swap_pages)
1da177e4
LT
641 goto redirty;
642
d9fe526a
HD
643 /*
644 * shmem_backing_dev_info's capabilities prevent regular writeback or
645 * sync from ever calling shmem_writepage; but a stacking filesystem
48f170fb 646 * might use ->writepage of its underlying filesystem, in which case
d9fe526a 647 * tmpfs should write out to swap only in response to memory pressure,
48f170fb 648 * and not for the writeback threads or sync.
d9fe526a 649 */
48f170fb
HD
650 if (!wbc->for_reclaim) {
651 WARN_ON_ONCE(1); /* Still happens? Tell us about it! */
652 goto redirty;
653 }
285b2c4f
HD
654
655 /*
7a5d0fbb
HD
656 * Disable even the toy swapping implementation, while we convert
657 * functions one by one to having swap entries in the radix tree.
285b2c4f 658 */
7a5d0fbb 659 if (index < ULONG_MAX)
285b2c4f
HD
660 goto redirty;
661
48f170fb
HD
662 swap = get_swap_page();
663 if (!swap.val)
664 goto redirty;
d9fe526a 665
b1dea800
HD
666 /*
667 * Add inode to shmem_unuse()'s list of swapped-out inodes,
668 * if it's not already there. Do it now because we cannot take
669 * mutex while holding spinlock, and must do so before the page
670 * is moved to swap cache, when its pagelock no longer protects
671 * the inode from eviction. But don't unlock the mutex until
672 * we've taken the spinlock, because shmem_unuse_inode() will
673 * prune a !swapped inode from the swaplist under both locks.
674 */
48f170fb
HD
675 mutex_lock(&shmem_swaplist_mutex);
676 if (list_empty(&info->swaplist))
677 list_add_tail(&info->swaplist, &shmem_swaplist);
b1dea800 678
1da177e4 679 spin_lock(&info->lock);
48f170fb 680 mutex_unlock(&shmem_swaplist_mutex);
b1dea800 681
285b2c4f
HD
682 oswap = shmem_get_swap(info, index);
683 if (oswap.val) {
48f170fb 684 WARN_ON_ONCE(1); /* Still happens? Tell us about it! */
285b2c4f
HD
685 free_swap_and_cache(oswap);
686 shmem_put_swap(info, index, (swp_entry_t){0});
687 info->swapped--;
d9fe526a
HD
688 }
689 shmem_recalc_inode(inode);
1da177e4 690
48f170fb 691 if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) {
4c73b1bc 692 delete_from_page_cache(page);
285b2c4f
HD
693 shmem_put_swap(info, index, swap);
694 info->swapped++;
aaa46865 695 swap_shmem_alloc(swap);
826267cf 696 spin_unlock(&info->lock);
d9fe526a 697 BUG_ON(page_mapped(page));
9fab5619 698 swap_writepage(page, wbc);
1da177e4
LT
699 return 0;
700 }
701
1da177e4 702 spin_unlock(&info->lock);
cb4b86ba 703 swapcache_free(swap, NULL);
1da177e4
LT
704redirty:
705 set_page_dirty(page);
d9fe526a
HD
706 if (wbc->for_reclaim)
707 return AOP_WRITEPAGE_ACTIVATE; /* Return with page locked */
708 unlock_page(page);
709 return 0;
1da177e4
LT
710}
711
712#ifdef CONFIG_NUMA
680d794b 713#ifdef CONFIG_TMPFS
71fe804b 714static void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
680d794b 715{
095f1fc4 716 char buffer[64];
680d794b 717
71fe804b 718 if (!mpol || mpol->mode == MPOL_DEFAULT)
095f1fc4 719 return; /* show nothing */
680d794b 720
71fe804b 721 mpol_to_str(buffer, sizeof(buffer), mpol, 1);
095f1fc4
LS
722
723 seq_printf(seq, ",mpol=%s", buffer);
680d794b 724}
71fe804b
LS
725
726static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
727{
728 struct mempolicy *mpol = NULL;
729 if (sbinfo->mpol) {
730 spin_lock(&sbinfo->stat_lock); /* prevent replace/use races */
731 mpol = sbinfo->mpol;
732 mpol_get(mpol);
733 spin_unlock(&sbinfo->stat_lock);
734 }
735 return mpol;
736}
680d794b
AM
737#endif /* CONFIG_TMPFS */
738
41ffe5d5
HD
739static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
740 struct shmem_inode_info *info, pgoff_t index)
1da177e4 741{
52cd3b07 742 struct mempolicy mpol, *spol;
1da177e4
LT
743 struct vm_area_struct pvma;
744
52cd3b07 745 spol = mpol_cond_copy(&mpol,
41ffe5d5 746 mpol_shared_policy_lookup(&info->policy, index));
52cd3b07 747
1da177e4 748 /* Create a pseudo vma that just contains the policy */
c4cc6d07 749 pvma.vm_start = 0;
41ffe5d5 750 pvma.vm_pgoff = index;
c4cc6d07 751 pvma.vm_ops = NULL;
52cd3b07 752 pvma.vm_policy = spol;
41ffe5d5 753 return swapin_readahead(swap, gfp, &pvma, 0);
1da177e4
LT
754}
755
02098fea 756static struct page *shmem_alloc_page(gfp_t gfp,
41ffe5d5 757 struct shmem_inode_info *info, pgoff_t index)
1da177e4
LT
758{
759 struct vm_area_struct pvma;
1da177e4 760
c4cc6d07
HD
761 /* Create a pseudo vma that just contains the policy */
762 pvma.vm_start = 0;
41ffe5d5 763 pvma.vm_pgoff = index;
c4cc6d07 764 pvma.vm_ops = NULL;
41ffe5d5 765 pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index);
52cd3b07
LS
766
767 /*
768 * alloc_page_vma() will drop the shared policy reference
769 */
770 return alloc_page_vma(gfp, &pvma, 0);
1da177e4 771}
680d794b
AM
772#else /* !CONFIG_NUMA */
773#ifdef CONFIG_TMPFS
41ffe5d5 774static inline void shmem_show_mpol(struct seq_file *seq, struct mempolicy *mpol)
680d794b
AM
775{
776}
777#endif /* CONFIG_TMPFS */
778
41ffe5d5
HD
779static inline struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
780 struct shmem_inode_info *info, pgoff_t index)
1da177e4 781{
41ffe5d5 782 return swapin_readahead(swap, gfp, NULL, 0);
1da177e4
LT
783}
784
02098fea 785static inline struct page *shmem_alloc_page(gfp_t gfp,
41ffe5d5 786 struct shmem_inode_info *info, pgoff_t index)
1da177e4 787{
e84e2e13 788 return alloc_page(gfp);
1da177e4 789}
680d794b 790#endif /* CONFIG_NUMA */
1da177e4 791
71fe804b
LS
792#if !defined(CONFIG_NUMA) || !defined(CONFIG_TMPFS)
793static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
794{
795 return NULL;
796}
797#endif
798
1da177e4 799/*
68da9f05 800 * shmem_getpage_gfp - find page in cache, or get from swap, or allocate
1da177e4
LT
801 *
802 * If we allocate a new one we do not mark it dirty. That's up to the
803 * vm. If we swap it in we mark it dirty since we also free the swap
804 * entry since a page cannot live in both the swap and page cache
805 */
41ffe5d5 806static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
68da9f05 807 struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type)
1da177e4
LT
808{
809 struct address_space *mapping = inode->i_mapping;
810 struct shmem_inode_info *info = SHMEM_I(inode);
811 struct shmem_sb_info *sbinfo;
27ab7006 812 struct page *page;
ff36b801 813 struct page *prealloc_page = NULL;
1da177e4
LT
814 swp_entry_t swap;
815 int error;
816
41ffe5d5 817 if (index > (MAX_LFS_FILESIZE >> PAGE_CACHE_SHIFT))
1da177e4 818 return -EFBIG;
1da177e4 819repeat:
41ffe5d5 820 page = find_lock_page(mapping, index);
27ab7006 821 if (page) {
b409f9fc 822 /*
27ab7006
HD
823 * Once we can get the page lock, it must be uptodate:
824 * if there were an error in reading back from swap,
825 * the page would not be inserted into the filecache.
b409f9fc 826 */
27ab7006
HD
827 BUG_ON(!PageUptodate(page));
828 goto done;
829 }
830
831 /*
832 * Try to preload while we can wait, to not make a habit of
833 * draining atomic reserves; but don't latch on to this cpu.
834 */
835 error = radix_tree_preload(gfp & GFP_RECLAIM_MASK);
836 if (error)
837 goto out;
838 radix_tree_preload_end();
839
840 if (sgp != SGP_READ && !prealloc_page) {
41ffe5d5 841 prealloc_page = shmem_alloc_page(gfp, info, index);
27ab7006
HD
842 if (prealloc_page) {
843 SetPageSwapBacked(prealloc_page);
844 if (mem_cgroup_cache_charge(prealloc_page,
845 current->mm, GFP_KERNEL)) {
846 page_cache_release(prealloc_page);
847 prealloc_page = NULL;
ff36b801
SL
848 }
849 }
b409f9fc 850 }
1da177e4
LT
851
852 spin_lock(&info->lock);
853 shmem_recalc_inode(inode);
41ffe5d5 854 swap = shmem_get_swap(info, index);
1da177e4
LT
855 if (swap.val) {
856 /* Look it up and read it in.. */
27ab7006
HD
857 page = lookup_swap_cache(swap);
858 if (!page) {
456f998e 859 spin_unlock(&info->lock);
1da177e4 860 /* here we actually do the io */
68da9f05
HD
861 if (fault_type)
862 *fault_type |= VM_FAULT_MAJOR;
41ffe5d5 863 page = shmem_swapin(swap, gfp, info, index);
27ab7006 864 if (!page) {
41ffe5d5 865 swp_entry_t nswap = shmem_get_swap(info, index);
285b2c4f
HD
866 if (nswap.val == swap.val) {
867 error = -ENOMEM;
27ab7006 868 goto out;
285b2c4f 869 }
1da177e4
LT
870 goto repeat;
871 }
27ab7006
HD
872 wait_on_page_locked(page);
873 page_cache_release(page);
1da177e4
LT
874 goto repeat;
875 }
876
877 /* We have to do this with page locked to prevent races */
27ab7006 878 if (!trylock_page(page)) {
1da177e4 879 spin_unlock(&info->lock);
27ab7006
HD
880 wait_on_page_locked(page);
881 page_cache_release(page);
1da177e4
LT
882 goto repeat;
883 }
27ab7006 884 if (PageWriteback(page)) {
1da177e4 885 spin_unlock(&info->lock);
27ab7006
HD
886 wait_on_page_writeback(page);
887 unlock_page(page);
888 page_cache_release(page);
1da177e4
LT
889 goto repeat;
890 }
27ab7006 891 if (!PageUptodate(page)) {
1da177e4 892 spin_unlock(&info->lock);
27ab7006
HD
893 unlock_page(page);
894 page_cache_release(page);
1da177e4 895 error = -EIO;
27ab7006 896 goto out;
1da177e4
LT
897 }
898
27ab7006 899 error = add_to_page_cache_locked(page, mapping,
41ffe5d5 900 index, GFP_NOWAIT);
27ab7006 901 if (error) {
1da177e4 902 spin_unlock(&info->lock);
82369553 903 if (error == -ENOMEM) {
ae3abae6
DN
904 /*
905 * reclaim from proper memory cgroup and
906 * call memcg's OOM if needed.
907 */
908 error = mem_cgroup_shmem_charge_fallback(
27ab7006 909 page, current->mm, gfp);
b5a84319 910 if (error) {
27ab7006
HD
911 unlock_page(page);
912 page_cache_release(page);
913 goto out;
b5a84319 914 }
82369553 915 }
27ab7006
HD
916 unlock_page(page);
917 page_cache_release(page);
1da177e4
LT
918 goto repeat;
919 }
27ab7006 920
27ab7006 921 delete_from_swap_cache(page);
41ffe5d5 922 shmem_put_swap(info, index, (swp_entry_t){0});
285b2c4f 923 info->swapped--;
27ab7006
HD
924 spin_unlock(&info->lock);
925 set_page_dirty(page);
926 swap_free(swap);
927
928 } else if (sgp == SGP_READ) {
41ffe5d5 929 page = find_get_page(mapping, index);
27ab7006 930 if (page && !trylock_page(page)) {
1da177e4 931 spin_unlock(&info->lock);
27ab7006
HD
932 wait_on_page_locked(page);
933 page_cache_release(page);
1da177e4
LT
934 goto repeat;
935 }
936 spin_unlock(&info->lock);
e83c32e8
HD
937
938 } else if (prealloc_page) {
1da177e4 939 sbinfo = SHMEM_SB(inode->i_sb);
0edd73b3 940 if (sbinfo->max_blocks) {
fc5da22a
HD
941 if (percpu_counter_compare(&sbinfo->used_blocks,
942 sbinfo->max_blocks) >= 0 ||
59a16ead
HD
943 shmem_acct_block(info->flags))
944 goto nospace;
7e496299 945 percpu_counter_inc(&sbinfo->used_blocks);
1da177e4 946 inode->i_blocks += BLOCKS_PER_PAGE;
59a16ead
HD
947 } else if (shmem_acct_block(info->flags))
948 goto nospace;
1da177e4 949
27ab7006
HD
950 page = prealloc_page;
951 prealloc_page = NULL;
1da177e4 952
41ffe5d5 953 swap = shmem_get_swap(info, index);
285b2c4f 954 if (swap.val)
27ab7006
HD
955 mem_cgroup_uncharge_cache_page(page);
956 else
285b2c4f 957 error = add_to_page_cache_lru(page, mapping,
41ffe5d5 958 index, GFP_NOWAIT);
27ab7006
HD
959 /*
960 * At add_to_page_cache_lru() failure,
961 * uncharge will be done automatically.
962 */
285b2c4f 963 if (swap.val || error) {
27ab7006
HD
964 shmem_unacct_blocks(info->flags, 1);
965 shmem_free_blocks(inode, 1);
966 spin_unlock(&info->lock);
967 page_cache_release(page);
27ab7006 968 goto repeat;
1da177e4
LT
969 }
970
971 info->alloced++;
972 spin_unlock(&info->lock);
27ab7006
HD
973 clear_highpage(page);
974 flush_dcache_page(page);
975 SetPageUptodate(page);
a0ee5ec5 976 if (sgp == SGP_DIRTY)
27ab7006
HD
977 set_page_dirty(page);
978
e83c32e8
HD
979 } else {
980 spin_unlock(&info->lock);
981 error = -ENOMEM;
982 goto out;
1da177e4
LT
983 }
984done:
27ab7006 985 *pagep = page;
ff36b801 986 error = 0;
e83c32e8
HD
987out:
988 if (prealloc_page) {
989 mem_cgroup_uncharge_cache_page(prealloc_page);
990 page_cache_release(prealloc_page);
991 }
992 return error;
1da177e4 993
59a16ead
HD
994nospace:
995 /*
996 * Perhaps the page was brought in from swap between find_lock_page
997 * and taking info->lock? We allow for that at add_to_page_cache_lru,
998 * but must also avoid reporting a spurious ENOSPC while working on a
9276aad6 999 * full tmpfs.
59a16ead 1000 */
41ffe5d5 1001 page = find_get_page(mapping, index);
59a16ead 1002 spin_unlock(&info->lock);
27ab7006
HD
1003 if (page) {
1004 page_cache_release(page);
1005 goto repeat;
ff36b801 1006 }
27ab7006 1007 error = -ENOSPC;
e83c32e8 1008 goto out;
1da177e4
LT
1009}
1010
d0217ac0 1011static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1da177e4 1012{
d3ac7f89 1013 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
1da177e4 1014 int error;
68da9f05 1015 int ret = VM_FAULT_LOCKED;
1da177e4 1016
d0217ac0
NP
1017 if (((loff_t)vmf->pgoff << PAGE_CACHE_SHIFT) >= i_size_read(inode))
1018 return VM_FAULT_SIGBUS;
d00806b1 1019
27d54b39 1020 error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
d0217ac0
NP
1021 if (error)
1022 return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
68da9f05 1023
456f998e
YH
1024 if (ret & VM_FAULT_MAJOR) {
1025 count_vm_event(PGMAJFAULT);
1026 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
1027 }
68da9f05 1028 return ret;
1da177e4
LT
1029}
1030
1da177e4 1031#ifdef CONFIG_NUMA
41ffe5d5 1032static int shmem_set_policy(struct vm_area_struct *vma, struct mempolicy *mpol)
1da177e4 1033{
41ffe5d5
HD
1034 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
1035 return mpol_set_shared_policy(&SHMEM_I(inode)->policy, vma, mpol);
1da177e4
LT
1036}
1037
d8dc74f2
AB
1038static struct mempolicy *shmem_get_policy(struct vm_area_struct *vma,
1039 unsigned long addr)
1da177e4 1040{
41ffe5d5
HD
1041 struct inode *inode = vma->vm_file->f_path.dentry->d_inode;
1042 pgoff_t index;
1da177e4 1043
41ffe5d5
HD
1044 index = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
1045 return mpol_shared_policy_lookup(&SHMEM_I(inode)->policy, index);
1da177e4
LT
1046}
1047#endif
1048
1049int shmem_lock(struct file *file, int lock, struct user_struct *user)
1050{
d3ac7f89 1051 struct inode *inode = file->f_path.dentry->d_inode;
1da177e4
LT
1052 struct shmem_inode_info *info = SHMEM_I(inode);
1053 int retval = -ENOMEM;
1054
1055 spin_lock(&info->lock);
1056 if (lock && !(info->flags & VM_LOCKED)) {
1057 if (!user_shm_lock(inode->i_size, user))
1058 goto out_nomem;
1059 info->flags |= VM_LOCKED;
89e004ea 1060 mapping_set_unevictable(file->f_mapping);
1da177e4
LT
1061 }
1062 if (!lock && (info->flags & VM_LOCKED) && user) {
1063 user_shm_unlock(inode->i_size, user);
1064 info->flags &= ~VM_LOCKED;
89e004ea
LS
1065 mapping_clear_unevictable(file->f_mapping);
1066 scan_mapping_unevictable_pages(file->f_mapping);
1da177e4
LT
1067 }
1068 retval = 0;
89e004ea 1069
1da177e4
LT
1070out_nomem:
1071 spin_unlock(&info->lock);
1072 return retval;
1073}
1074
9b83a6a8 1075static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
1da177e4
LT
1076{
1077 file_accessed(file);
1078 vma->vm_ops = &shmem_vm_ops;
d0217ac0 1079 vma->vm_flags |= VM_CAN_NONLINEAR;
1da177e4
LT
1080 return 0;
1081}
1082
454abafe
DM
1083static struct inode *shmem_get_inode(struct super_block *sb, const struct inode *dir,
1084 int mode, dev_t dev, unsigned long flags)
1da177e4
LT
1085{
1086 struct inode *inode;
1087 struct shmem_inode_info *info;
1088 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
1089
5b04c689
PE
1090 if (shmem_reserve_inode(sb))
1091 return NULL;
1da177e4
LT
1092
1093 inode = new_inode(sb);
1094 if (inode) {
85fe4025 1095 inode->i_ino = get_next_ino();
454abafe 1096 inode_init_owner(inode, dir, mode);
1da177e4 1097 inode->i_blocks = 0;
1da177e4
LT
1098 inode->i_mapping->backing_dev_info = &shmem_backing_dev_info;
1099 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
91828a40 1100 inode->i_generation = get_seconds();
1da177e4
LT
1101 info = SHMEM_I(inode);
1102 memset(info, 0, (char *)inode - (char *)info);
1103 spin_lock_init(&info->lock);
0b0a0806 1104 info->flags = flags & VM_NORESERVE;
1da177e4 1105 INIT_LIST_HEAD(&info->swaplist);
b09e0fa4 1106 INIT_LIST_HEAD(&info->xattr_list);
72c04902 1107 cache_no_acl(inode);
1da177e4
LT
1108
1109 switch (mode & S_IFMT) {
1110 default:
39f0247d 1111 inode->i_op = &shmem_special_inode_operations;
1da177e4
LT
1112 init_special_inode(inode, mode, dev);
1113 break;
1114 case S_IFREG:
14fcc23f 1115 inode->i_mapping->a_ops = &shmem_aops;
1da177e4
LT
1116 inode->i_op = &shmem_inode_operations;
1117 inode->i_fop = &shmem_file_operations;
71fe804b
LS
1118 mpol_shared_policy_init(&info->policy,
1119 shmem_get_sbmpol(sbinfo));
1da177e4
LT
1120 break;
1121 case S_IFDIR:
d8c76e6f 1122 inc_nlink(inode);
1da177e4
LT
1123 /* Some things misbehave if size == 0 on a directory */
1124 inode->i_size = 2 * BOGO_DIRENT_SIZE;
1125 inode->i_op = &shmem_dir_inode_operations;
1126 inode->i_fop = &simple_dir_operations;
1127 break;
1128 case S_IFLNK:
1129 /*
1130 * Must not load anything in the rbtree,
1131 * mpol_free_shared_policy will not be called.
1132 */
71fe804b 1133 mpol_shared_policy_init(&info->policy, NULL);
1da177e4
LT
1134 break;
1135 }
5b04c689
PE
1136 } else
1137 shmem_free_inode(sb);
1da177e4
LT
1138 return inode;
1139}
1140
1141#ifdef CONFIG_TMPFS
92e1d5be
AV
1142static const struct inode_operations shmem_symlink_inode_operations;
1143static const struct inode_operations shmem_symlink_inline_operations;
1da177e4 1144
1da177e4 1145static int
800d15a5
NP
1146shmem_write_begin(struct file *file, struct address_space *mapping,
1147 loff_t pos, unsigned len, unsigned flags,
1148 struct page **pagep, void **fsdata)
1da177e4 1149{
800d15a5
NP
1150 struct inode *inode = mapping->host;
1151 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
800d15a5
NP
1152 return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL);
1153}
1154
1155static int
1156shmem_write_end(struct file *file, struct address_space *mapping,
1157 loff_t pos, unsigned len, unsigned copied,
1158 struct page *page, void *fsdata)
1159{
1160 struct inode *inode = mapping->host;
1161
d3602444
HD
1162 if (pos + copied > inode->i_size)
1163 i_size_write(inode, pos + copied);
1164
800d15a5 1165 set_page_dirty(page);
6746aff7 1166 unlock_page(page);
800d15a5
NP
1167 page_cache_release(page);
1168
800d15a5 1169 return copied;
1da177e4
LT
1170}
1171
1da177e4
LT
1172static void do_shmem_file_read(struct file *filp, loff_t *ppos, read_descriptor_t *desc, read_actor_t actor)
1173{
d3ac7f89 1174 struct inode *inode = filp->f_path.dentry->d_inode;
1da177e4 1175 struct address_space *mapping = inode->i_mapping;
41ffe5d5
HD
1176 pgoff_t index;
1177 unsigned long offset;
a0ee5ec5
HD
1178 enum sgp_type sgp = SGP_READ;
1179
1180 /*
1181 * Might this read be for a stacking filesystem? Then when reading
1182 * holes of a sparse file, we actually need to allocate those pages,
1183 * and even mark them dirty, so it cannot exceed the max_blocks limit.
1184 */
1185 if (segment_eq(get_fs(), KERNEL_DS))
1186 sgp = SGP_DIRTY;
1da177e4
LT
1187
1188 index = *ppos >> PAGE_CACHE_SHIFT;
1189 offset = *ppos & ~PAGE_CACHE_MASK;
1190
1191 for (;;) {
1192 struct page *page = NULL;
41ffe5d5
HD
1193 pgoff_t end_index;
1194 unsigned long nr, ret;
1da177e4
LT
1195 loff_t i_size = i_size_read(inode);
1196
1197 end_index = i_size >> PAGE_CACHE_SHIFT;
1198 if (index > end_index)
1199 break;
1200 if (index == end_index) {
1201 nr = i_size & ~PAGE_CACHE_MASK;
1202 if (nr <= offset)
1203 break;
1204 }
1205
a0ee5ec5 1206 desc->error = shmem_getpage(inode, index, &page, sgp, NULL);
1da177e4
LT
1207 if (desc->error) {
1208 if (desc->error == -EINVAL)
1209 desc->error = 0;
1210 break;
1211 }
d3602444
HD
1212 if (page)
1213 unlock_page(page);
1da177e4
LT
1214
1215 /*
1216 * We must evaluate after, since reads (unlike writes)
1b1dcc1b 1217 * are called without i_mutex protection against truncate
1da177e4
LT
1218 */
1219 nr = PAGE_CACHE_SIZE;
1220 i_size = i_size_read(inode);
1221 end_index = i_size >> PAGE_CACHE_SHIFT;
1222 if (index == end_index) {
1223 nr = i_size & ~PAGE_CACHE_MASK;
1224 if (nr <= offset) {
1225 if (page)
1226 page_cache_release(page);
1227 break;
1228 }
1229 }
1230 nr -= offset;
1231
1232 if (page) {
1233 /*
1234 * If users can be writing to this page using arbitrary
1235 * virtual addresses, take care about potential aliasing
1236 * before reading the page on the kernel side.
1237 */
1238 if (mapping_writably_mapped(mapping))
1239 flush_dcache_page(page);
1240 /*
1241 * Mark the page accessed if we read the beginning.
1242 */
1243 if (!offset)
1244 mark_page_accessed(page);
b5810039 1245 } else {
1da177e4 1246 page = ZERO_PAGE(0);
b5810039
NP
1247 page_cache_get(page);
1248 }
1da177e4
LT
1249
1250 /*
1251 * Ok, we have the page, and it's up-to-date, so
1252 * now we can copy it to user space...
1253 *
1254 * The actor routine returns how many bytes were actually used..
1255 * NOTE! This may not be the same as how much of a user buffer
1256 * we filled up (we may be padding etc), so we can only update
1257 * "pos" here (the actor routine has to update the user buffer
1258 * pointers and the remaining count).
1259 */
1260 ret = actor(desc, page, offset, nr);
1261 offset += ret;
1262 index += offset >> PAGE_CACHE_SHIFT;
1263 offset &= ~PAGE_CACHE_MASK;
1264
1265 page_cache_release(page);
1266 if (ret != nr || !desc->count)
1267 break;
1268
1269 cond_resched();
1270 }
1271
1272 *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset;
1273 file_accessed(filp);
1274}
1275
bcd78e49
HD
1276static ssize_t shmem_file_aio_read(struct kiocb *iocb,
1277 const struct iovec *iov, unsigned long nr_segs, loff_t pos)
1278{
1279 struct file *filp = iocb->ki_filp;
1280 ssize_t retval;
1281 unsigned long seg;
1282 size_t count;
1283 loff_t *ppos = &iocb->ki_pos;
1284
1285 retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
1286 if (retval)
1287 return retval;
1288
1289 for (seg = 0; seg < nr_segs; seg++) {
1290 read_descriptor_t desc;
1291
1292 desc.written = 0;
1293 desc.arg.buf = iov[seg].iov_base;
1294 desc.count = iov[seg].iov_len;
1295 if (desc.count == 0)
1296 continue;
1297 desc.error = 0;
1298 do_shmem_file_read(filp, ppos, &desc, file_read_actor);
1299 retval += desc.written;
1300 if (desc.error) {
1301 retval = retval ?: desc.error;
1302 break;
1303 }
1304 if (desc.count > 0)
1305 break;
1306 }
1307 return retval;
1da177e4
LT
1308}
1309
708e3508
HD
1310static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
1311 struct pipe_inode_info *pipe, size_t len,
1312 unsigned int flags)
1313{
1314 struct address_space *mapping = in->f_mapping;
71f0e07a 1315 struct inode *inode = mapping->host;
708e3508
HD
1316 unsigned int loff, nr_pages, req_pages;
1317 struct page *pages[PIPE_DEF_BUFFERS];
1318 struct partial_page partial[PIPE_DEF_BUFFERS];
1319 struct page *page;
1320 pgoff_t index, end_index;
1321 loff_t isize, left;
1322 int error, page_nr;
1323 struct splice_pipe_desc spd = {
1324 .pages = pages,
1325 .partial = partial,
1326 .flags = flags,
1327 .ops = &page_cache_pipe_buf_ops,
1328 .spd_release = spd_release_page,
1329 };
1330
71f0e07a 1331 isize = i_size_read(inode);
708e3508
HD
1332 if (unlikely(*ppos >= isize))
1333 return 0;
1334
1335 left = isize - *ppos;
1336 if (unlikely(left < len))
1337 len = left;
1338
1339 if (splice_grow_spd(pipe, &spd))
1340 return -ENOMEM;
1341
1342 index = *ppos >> PAGE_CACHE_SHIFT;
1343 loff = *ppos & ~PAGE_CACHE_MASK;
1344 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1345 nr_pages = min(req_pages, pipe->buffers);
1346
708e3508
HD
1347 spd.nr_pages = find_get_pages_contig(mapping, index,
1348 nr_pages, spd.pages);
1349 index += spd.nr_pages;
708e3508 1350 error = 0;
708e3508 1351
71f0e07a 1352 while (spd.nr_pages < nr_pages) {
71f0e07a
HD
1353 error = shmem_getpage(inode, index, &page, SGP_CACHE, NULL);
1354 if (error)
1355 break;
1356 unlock_page(page);
708e3508
HD
1357 spd.pages[spd.nr_pages++] = page;
1358 index++;
1359 }
1360
708e3508
HD
1361 index = *ppos >> PAGE_CACHE_SHIFT;
1362 nr_pages = spd.nr_pages;
1363 spd.nr_pages = 0;
71f0e07a 1364
708e3508
HD
1365 for (page_nr = 0; page_nr < nr_pages; page_nr++) {
1366 unsigned int this_len;
1367
1368 if (!len)
1369 break;
1370
708e3508
HD
1371 this_len = min_t(unsigned long, len, PAGE_CACHE_SIZE - loff);
1372 page = spd.pages[page_nr];
1373
71f0e07a 1374 if (!PageUptodate(page) || page->mapping != mapping) {
71f0e07a
HD
1375 error = shmem_getpage(inode, index, &page,
1376 SGP_CACHE, NULL);
1377 if (error)
708e3508 1378 break;
71f0e07a
HD
1379 unlock_page(page);
1380 page_cache_release(spd.pages[page_nr]);
1381 spd.pages[page_nr] = page;
708e3508 1382 }
71f0e07a
HD
1383
1384 isize = i_size_read(inode);
708e3508
HD
1385 end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
1386 if (unlikely(!isize || index > end_index))
1387 break;
1388
708e3508
HD
1389 if (end_index == index) {
1390 unsigned int plen;
1391
708e3508
HD
1392 plen = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
1393 if (plen <= loff)
1394 break;
1395
708e3508
HD
1396 this_len = min(this_len, plen - loff);
1397 len = this_len;
1398 }
1399
1400 spd.partial[page_nr].offset = loff;
1401 spd.partial[page_nr].len = this_len;
1402 len -= this_len;
1403 loff = 0;
1404 spd.nr_pages++;
1405 index++;
1406 }
1407
708e3508
HD
1408 while (page_nr < nr_pages)
1409 page_cache_release(spd.pages[page_nr++]);
708e3508
HD
1410
1411 if (spd.nr_pages)
1412 error = splice_to_pipe(pipe, &spd);
1413
1414 splice_shrink_spd(pipe, &spd);
1415
1416 if (error > 0) {
1417 *ppos += error;
1418 file_accessed(in);
1419 }
1420 return error;
1421}
1422
726c3342 1423static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
1da177e4 1424{
726c3342 1425 struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
1da177e4
LT
1426
1427 buf->f_type = TMPFS_MAGIC;
1428 buf->f_bsize = PAGE_CACHE_SIZE;
1429 buf->f_namelen = NAME_MAX;
0edd73b3 1430 if (sbinfo->max_blocks) {
1da177e4 1431 buf->f_blocks = sbinfo->max_blocks;
41ffe5d5
HD
1432 buf->f_bavail =
1433 buf->f_bfree = sbinfo->max_blocks -
1434 percpu_counter_sum(&sbinfo->used_blocks);
0edd73b3
HD
1435 }
1436 if (sbinfo->max_inodes) {
1da177e4
LT
1437 buf->f_files = sbinfo->max_inodes;
1438 buf->f_ffree = sbinfo->free_inodes;
1da177e4
LT
1439 }
1440 /* else leave those fields 0 like simple_statfs */
1441 return 0;
1442}
1443
1444/*
1445 * File creation. Allocate an inode, and we're done..
1446 */
1447static int
1448shmem_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
1449{
0b0a0806 1450 struct inode *inode;
1da177e4
LT
1451 int error = -ENOSPC;
1452
454abafe 1453 inode = shmem_get_inode(dir->i_sb, dir, mode, dev, VM_NORESERVE);
1da177e4 1454 if (inode) {
2a7dba39
EP
1455 error = security_inode_init_security(inode, dir,
1456 &dentry->d_name, NULL,
1457 NULL, NULL);
570bc1c2
SS
1458 if (error) {
1459 if (error != -EOPNOTSUPP) {
1460 iput(inode);
1461 return error;
1462 }
39f0247d 1463 }
1c7c474c
CH
1464#ifdef CONFIG_TMPFS_POSIX_ACL
1465 error = generic_acl_init(inode, dir);
39f0247d
AG
1466 if (error) {
1467 iput(inode);
1468 return error;
570bc1c2 1469 }
718deb6b
AV
1470#else
1471 error = 0;
1c7c474c 1472#endif
1da177e4
LT
1473 dir->i_size += BOGO_DIRENT_SIZE;
1474 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1475 d_instantiate(dentry, inode);
1476 dget(dentry); /* Extra count - pin the dentry in core */
1da177e4
LT
1477 }
1478 return error;
1479}
1480
1481static int shmem_mkdir(struct inode *dir, struct dentry *dentry, int mode)
1482{
1483 int error;
1484
1485 if ((error = shmem_mknod(dir, dentry, mode | S_IFDIR, 0)))
1486 return error;
d8c76e6f 1487 inc_nlink(dir);
1da177e4
LT
1488 return 0;
1489}
1490
1491static int shmem_create(struct inode *dir, struct dentry *dentry, int mode,
1492 struct nameidata *nd)
1493{
1494 return shmem_mknod(dir, dentry, mode | S_IFREG, 0);
1495}
1496
1497/*
1498 * Link a file..
1499 */
1500static int shmem_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
1501{
1502 struct inode *inode = old_dentry->d_inode;
5b04c689 1503 int ret;
1da177e4
LT
1504
1505 /*
1506 * No ordinary (disk based) filesystem counts links as inodes;
1507 * but each new link needs a new dentry, pinning lowmem, and
1508 * tmpfs dentries cannot be pruned until they are unlinked.
1509 */
5b04c689
PE
1510 ret = shmem_reserve_inode(inode->i_sb);
1511 if (ret)
1512 goto out;
1da177e4
LT
1513
1514 dir->i_size += BOGO_DIRENT_SIZE;
1515 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
d8c76e6f 1516 inc_nlink(inode);
7de9c6ee 1517 ihold(inode); /* New dentry reference */
1da177e4
LT
1518 dget(dentry); /* Extra pinning count for the created dentry */
1519 d_instantiate(dentry, inode);
5b04c689
PE
1520out:
1521 return ret;
1da177e4
LT
1522}
1523
1524static int shmem_unlink(struct inode *dir, struct dentry *dentry)
1525{
1526 struct inode *inode = dentry->d_inode;
1527
5b04c689
PE
1528 if (inode->i_nlink > 1 && !S_ISDIR(inode->i_mode))
1529 shmem_free_inode(inode->i_sb);
1da177e4
LT
1530
1531 dir->i_size -= BOGO_DIRENT_SIZE;
1532 inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME;
9a53c3a7 1533 drop_nlink(inode);
1da177e4
LT
1534 dput(dentry); /* Undo the count from "create" - this does all the work */
1535 return 0;
1536}
1537
1538static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
1539{
1540 if (!simple_empty(dentry))
1541 return -ENOTEMPTY;
1542
9a53c3a7
DH
1543 drop_nlink(dentry->d_inode);
1544 drop_nlink(dir);
1da177e4
LT
1545 return shmem_unlink(dir, dentry);
1546}
1547
1548/*
1549 * The VFS layer already does all the dentry stuff for rename,
1550 * we just have to decrement the usage count for the target if
1551 * it exists so that the VFS layer correctly free's it when it
1552 * gets overwritten.
1553 */
1554static int shmem_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
1555{
1556 struct inode *inode = old_dentry->d_inode;
1557 int they_are_dirs = S_ISDIR(inode->i_mode);
1558
1559 if (!simple_empty(new_dentry))
1560 return -ENOTEMPTY;
1561
1562 if (new_dentry->d_inode) {
1563 (void) shmem_unlink(new_dir, new_dentry);
1564 if (they_are_dirs)
9a53c3a7 1565 drop_nlink(old_dir);
1da177e4 1566 } else if (they_are_dirs) {
9a53c3a7 1567 drop_nlink(old_dir);
d8c76e6f 1568 inc_nlink(new_dir);
1da177e4
LT
1569 }
1570
1571 old_dir->i_size -= BOGO_DIRENT_SIZE;
1572 new_dir->i_size += BOGO_DIRENT_SIZE;
1573 old_dir->i_ctime = old_dir->i_mtime =
1574 new_dir->i_ctime = new_dir->i_mtime =
1575 inode->i_ctime = CURRENT_TIME;
1576 return 0;
1577}
1578
1579static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
1580{
1581 int error;
1582 int len;
1583 struct inode *inode;
9276aad6 1584 struct page *page;
1da177e4
LT
1585 char *kaddr;
1586 struct shmem_inode_info *info;
1587
1588 len = strlen(symname) + 1;
1589 if (len > PAGE_CACHE_SIZE)
1590 return -ENAMETOOLONG;
1591
454abafe 1592 inode = shmem_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0, VM_NORESERVE);
1da177e4
LT
1593 if (!inode)
1594 return -ENOSPC;
1595
2a7dba39
EP
1596 error = security_inode_init_security(inode, dir, &dentry->d_name, NULL,
1597 NULL, NULL);
570bc1c2
SS
1598 if (error) {
1599 if (error != -EOPNOTSUPP) {
1600 iput(inode);
1601 return error;
1602 }
1603 error = 0;
1604 }
1605
1da177e4
LT
1606 info = SHMEM_I(inode);
1607 inode->i_size = len-1;
b09e0fa4 1608 if (len <= SHMEM_SYMLINK_INLINE_LEN) {
1da177e4 1609 /* do it inline */
b09e0fa4 1610 memcpy(info->inline_symlink, symname, len);
1da177e4
LT
1611 inode->i_op = &shmem_symlink_inline_operations;
1612 } else {
1613 error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL);
1614 if (error) {
1615 iput(inode);
1616 return error;
1617 }
14fcc23f 1618 inode->i_mapping->a_ops = &shmem_aops;
1da177e4
LT
1619 inode->i_op = &shmem_symlink_inode_operations;
1620 kaddr = kmap_atomic(page, KM_USER0);
1621 memcpy(kaddr, symname, len);
1622 kunmap_atomic(kaddr, KM_USER0);
1623 set_page_dirty(page);
6746aff7 1624 unlock_page(page);
1da177e4
LT
1625 page_cache_release(page);
1626 }
1da177e4
LT
1627 dir->i_size += BOGO_DIRENT_SIZE;
1628 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
1629 d_instantiate(dentry, inode);
1630 dget(dentry);
1631 return 0;
1632}
1633
cc314eef 1634static void *shmem_follow_link_inline(struct dentry *dentry, struct nameidata *nd)
1da177e4 1635{
b09e0fa4 1636 nd_set_link(nd, SHMEM_I(dentry->d_inode)->inline_symlink);
cc314eef 1637 return NULL;
1da177e4
LT
1638}
1639
cc314eef 1640static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd)
1da177e4
LT
1641{
1642 struct page *page = NULL;
41ffe5d5
HD
1643 int error = shmem_getpage(dentry->d_inode, 0, &page, SGP_READ, NULL);
1644 nd_set_link(nd, error ? ERR_PTR(error) : kmap(page));
d3602444
HD
1645 if (page)
1646 unlock_page(page);
cc314eef 1647 return page;
1da177e4
LT
1648}
1649
cc314eef 1650static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
1da177e4
LT
1651{
1652 if (!IS_ERR(nd_get_link(nd))) {
cc314eef 1653 struct page *page = cookie;
1da177e4
LT
1654 kunmap(page);
1655 mark_page_accessed(page);
1656 page_cache_release(page);
1da177e4
LT
1657 }
1658}
1659
b09e0fa4 1660#ifdef CONFIG_TMPFS_XATTR
46711810 1661/*
b09e0fa4
EP
1662 * Superblocks without xattr inode operations may get some security.* xattr
1663 * support from the LSM "for free". As soon as we have any other xattrs
39f0247d
AG
1664 * like ACLs, we also need to implement the security.* handlers at
1665 * filesystem level, though.
1666 */
1667
b09e0fa4
EP
1668static int shmem_xattr_get(struct dentry *dentry, const char *name,
1669 void *buffer, size_t size)
39f0247d 1670{
b09e0fa4
EP
1671 struct shmem_inode_info *info;
1672 struct shmem_xattr *xattr;
1673 int ret = -ENODATA;
39f0247d 1674
b09e0fa4
EP
1675 info = SHMEM_I(dentry->d_inode);
1676
1677 spin_lock(&info->lock);
1678 list_for_each_entry(xattr, &info->xattr_list, list) {
1679 if (strcmp(name, xattr->name))
1680 continue;
1681
1682 ret = xattr->size;
1683 if (buffer) {
1684 if (size < xattr->size)
1685 ret = -ERANGE;
1686 else
1687 memcpy(buffer, xattr->value, xattr->size);
1688 }
1689 break;
1690 }
1691 spin_unlock(&info->lock);
1692 return ret;
39f0247d
AG
1693}
1694
b09e0fa4
EP
1695static int shmem_xattr_set(struct dentry *dentry, const char *name,
1696 const void *value, size_t size, int flags)
39f0247d 1697{
b09e0fa4
EP
1698 struct inode *inode = dentry->d_inode;
1699 struct shmem_inode_info *info = SHMEM_I(inode);
1700 struct shmem_xattr *xattr;
1701 struct shmem_xattr *new_xattr = NULL;
1702 size_t len;
1703 int err = 0;
1704
1705 /* value == NULL means remove */
1706 if (value) {
1707 /* wrap around? */
1708 len = sizeof(*new_xattr) + size;
1709 if (len <= sizeof(*new_xattr))
1710 return -ENOMEM;
1711
1712 new_xattr = kmalloc(len, GFP_KERNEL);
1713 if (!new_xattr)
1714 return -ENOMEM;
1715
1716 new_xattr->name = kstrdup(name, GFP_KERNEL);
1717 if (!new_xattr->name) {
1718 kfree(new_xattr);
1719 return -ENOMEM;
1720 }
1721
1722 new_xattr->size = size;
1723 memcpy(new_xattr->value, value, size);
1724 }
1725
1726 spin_lock(&info->lock);
1727 list_for_each_entry(xattr, &info->xattr_list, list) {
1728 if (!strcmp(name, xattr->name)) {
1729 if (flags & XATTR_CREATE) {
1730 xattr = new_xattr;
1731 err = -EEXIST;
1732 } else if (new_xattr) {
1733 list_replace(&xattr->list, &new_xattr->list);
1734 } else {
1735 list_del(&xattr->list);
1736 }
1737 goto out;
1738 }
1739 }
1740 if (flags & XATTR_REPLACE) {
1741 xattr = new_xattr;
1742 err = -ENODATA;
1743 } else {
1744 list_add(&new_xattr->list, &info->xattr_list);
1745 xattr = NULL;
1746 }
1747out:
1748 spin_unlock(&info->lock);
1749 if (xattr)
1750 kfree(xattr->name);
1751 kfree(xattr);
1752 return err;
39f0247d
AG
1753}
1754
bb435453 1755static const struct xattr_handler *shmem_xattr_handlers[] = {
b09e0fa4 1756#ifdef CONFIG_TMPFS_POSIX_ACL
1c7c474c
CH
1757 &generic_acl_access_handler,
1758 &generic_acl_default_handler,
b09e0fa4 1759#endif
39f0247d
AG
1760 NULL
1761};
b09e0fa4
EP
1762
1763static int shmem_xattr_validate(const char *name)
1764{
1765 struct { const char *prefix; size_t len; } arr[] = {
1766 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
1767 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
1768 };
1769 int i;
1770
1771 for (i = 0; i < ARRAY_SIZE(arr); i++) {
1772 size_t preflen = arr[i].len;
1773 if (strncmp(name, arr[i].prefix, preflen) == 0) {
1774 if (!name[preflen])
1775 return -EINVAL;
1776 return 0;
1777 }
1778 }
1779 return -EOPNOTSUPP;
1780}
1781
1782static ssize_t shmem_getxattr(struct dentry *dentry, const char *name,
1783 void *buffer, size_t size)
1784{
1785 int err;
1786
1787 /*
1788 * If this is a request for a synthetic attribute in the system.*
1789 * namespace use the generic infrastructure to resolve a handler
1790 * for it via sb->s_xattr.
1791 */
1792 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
1793 return generic_getxattr(dentry, name, buffer, size);
1794
1795 err = shmem_xattr_validate(name);
1796 if (err)
1797 return err;
1798
1799 return shmem_xattr_get(dentry, name, buffer, size);
1800}
1801
1802static int shmem_setxattr(struct dentry *dentry, const char *name,
1803 const void *value, size_t size, int flags)
1804{
1805 int err;
1806
1807 /*
1808 * If this is a request for a synthetic attribute in the system.*
1809 * namespace use the generic infrastructure to resolve a handler
1810 * for it via sb->s_xattr.
1811 */
1812 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
1813 return generic_setxattr(dentry, name, value, size, flags);
1814
1815 err = shmem_xattr_validate(name);
1816 if (err)
1817 return err;
1818
1819 if (size == 0)
1820 value = ""; /* empty EA, do not remove */
1821
1822 return shmem_xattr_set(dentry, name, value, size, flags);
1823
1824}
1825
1826static int shmem_removexattr(struct dentry *dentry, const char *name)
1827{
1828 int err;
1829
1830 /*
1831 * If this is a request for a synthetic attribute in the system.*
1832 * namespace use the generic infrastructure to resolve a handler
1833 * for it via sb->s_xattr.
1834 */
1835 if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
1836 return generic_removexattr(dentry, name);
1837
1838 err = shmem_xattr_validate(name);
1839 if (err)
1840 return err;
1841
1842 return shmem_xattr_set(dentry, name, NULL, 0, XATTR_REPLACE);
1843}
1844
1845static bool xattr_is_trusted(const char *name)
1846{
1847 return !strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN);
1848}
1849
1850static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
1851{
1852 bool trusted = capable(CAP_SYS_ADMIN);
1853 struct shmem_xattr *xattr;
1854 struct shmem_inode_info *info;
1855 size_t used = 0;
1856
1857 info = SHMEM_I(dentry->d_inode);
1858
1859 spin_lock(&info->lock);
1860 list_for_each_entry(xattr, &info->xattr_list, list) {
1861 size_t len;
1862
1863 /* skip "trusted." attributes for unprivileged callers */
1864 if (!trusted && xattr_is_trusted(xattr->name))
1865 continue;
1866
1867 len = strlen(xattr->name) + 1;
1868 used += len;
1869 if (buffer) {
1870 if (size < used) {
1871 used = -ERANGE;
1872 break;
1873 }
1874 memcpy(buffer, xattr->name, len);
1875 buffer += len;
1876 }
1877 }
1878 spin_unlock(&info->lock);
1879
1880 return used;
1881}
1882#endif /* CONFIG_TMPFS_XATTR */
1883
1884static const struct inode_operations shmem_symlink_inline_operations = {
1885 .readlink = generic_readlink,
1886 .follow_link = shmem_follow_link_inline,
1887#ifdef CONFIG_TMPFS_XATTR
1888 .setxattr = shmem_setxattr,
1889 .getxattr = shmem_getxattr,
1890 .listxattr = shmem_listxattr,
1891 .removexattr = shmem_removexattr,
1892#endif
1893};
1894
1895static const struct inode_operations shmem_symlink_inode_operations = {
1896 .readlink = generic_readlink,
1897 .follow_link = shmem_follow_link,
1898 .put_link = shmem_put_link,
1899#ifdef CONFIG_TMPFS_XATTR
1900 .setxattr = shmem_setxattr,
1901 .getxattr = shmem_getxattr,
1902 .listxattr = shmem_listxattr,
1903 .removexattr = shmem_removexattr,
39f0247d 1904#endif
b09e0fa4 1905};
39f0247d 1906
91828a40
DG
1907static struct dentry *shmem_get_parent(struct dentry *child)
1908{
1909 return ERR_PTR(-ESTALE);
1910}
1911
1912static int shmem_match(struct inode *ino, void *vfh)
1913{
1914 __u32 *fh = vfh;
1915 __u64 inum = fh[2];
1916 inum = (inum << 32) | fh[1];
1917 return ino->i_ino == inum && fh[0] == ino->i_generation;
1918}
1919
480b116c
CH
1920static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
1921 struct fid *fid, int fh_len, int fh_type)
91828a40 1922{
91828a40 1923 struct inode *inode;
480b116c
CH
1924 struct dentry *dentry = NULL;
1925 u64 inum = fid->raw[2];
1926 inum = (inum << 32) | fid->raw[1];
1927
1928 if (fh_len < 3)
1929 return NULL;
91828a40 1930
480b116c
CH
1931 inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]),
1932 shmem_match, fid->raw);
91828a40 1933 if (inode) {
480b116c 1934 dentry = d_find_alias(inode);
91828a40
DG
1935 iput(inode);
1936 }
1937
480b116c 1938 return dentry;
91828a40
DG
1939}
1940
1941static int shmem_encode_fh(struct dentry *dentry, __u32 *fh, int *len,
1942 int connectable)
1943{
1944 struct inode *inode = dentry->d_inode;
1945
5fe0c237
AK
1946 if (*len < 3) {
1947 *len = 3;
91828a40 1948 return 255;
5fe0c237 1949 }
91828a40 1950
1d3382cb 1951 if (inode_unhashed(inode)) {
91828a40
DG
1952 /* Unfortunately insert_inode_hash is not idempotent,
1953 * so as we hash inodes here rather than at creation
1954 * time, we need a lock to ensure we only try
1955 * to do it once
1956 */
1957 static DEFINE_SPINLOCK(lock);
1958 spin_lock(&lock);
1d3382cb 1959 if (inode_unhashed(inode))
91828a40
DG
1960 __insert_inode_hash(inode,
1961 inode->i_ino + inode->i_generation);
1962 spin_unlock(&lock);
1963 }
1964
1965 fh[0] = inode->i_generation;
1966 fh[1] = inode->i_ino;
1967 fh[2] = ((__u64)inode->i_ino) >> 32;
1968
1969 *len = 3;
1970 return 1;
1971}
1972
39655164 1973static const struct export_operations shmem_export_ops = {
91828a40 1974 .get_parent = shmem_get_parent,
91828a40 1975 .encode_fh = shmem_encode_fh,
480b116c 1976 .fh_to_dentry = shmem_fh_to_dentry,
91828a40
DG
1977};
1978
680d794b
AM
1979static int shmem_parse_options(char *options, struct shmem_sb_info *sbinfo,
1980 bool remount)
1da177e4
LT
1981{
1982 char *this_char, *value, *rest;
1983
b00dc3ad
HD
1984 while (options != NULL) {
1985 this_char = options;
1986 for (;;) {
1987 /*
1988 * NUL-terminate this option: unfortunately,
1989 * mount options form a comma-separated list,
1990 * but mpol's nodelist may also contain commas.
1991 */
1992 options = strchr(options, ',');
1993 if (options == NULL)
1994 break;
1995 options++;
1996 if (!isdigit(*options)) {
1997 options[-1] = '\0';
1998 break;
1999 }
2000 }
1da177e4
LT
2001 if (!*this_char)
2002 continue;
2003 if ((value = strchr(this_char,'=')) != NULL) {
2004 *value++ = 0;
2005 } else {
2006 printk(KERN_ERR
2007 "tmpfs: No value for mount option '%s'\n",
2008 this_char);
2009 return 1;
2010 }
2011
2012 if (!strcmp(this_char,"size")) {
2013 unsigned long long size;
2014 size = memparse(value,&rest);
2015 if (*rest == '%') {
2016 size <<= PAGE_SHIFT;
2017 size *= totalram_pages;
2018 do_div(size, 100);
2019 rest++;
2020 }
2021 if (*rest)
2022 goto bad_val;
680d794b
AM
2023 sbinfo->max_blocks =
2024 DIV_ROUND_UP(size, PAGE_CACHE_SIZE);
1da177e4 2025 } else if (!strcmp(this_char,"nr_blocks")) {
680d794b 2026 sbinfo->max_blocks = memparse(value, &rest);
1da177e4
LT
2027 if (*rest)
2028 goto bad_val;
2029 } else if (!strcmp(this_char,"nr_inodes")) {
680d794b 2030 sbinfo->max_inodes = memparse(value, &rest);
1da177e4
LT
2031 if (*rest)
2032 goto bad_val;
2033 } else if (!strcmp(this_char,"mode")) {
680d794b 2034 if (remount)
1da177e4 2035 continue;
680d794b 2036 sbinfo->mode = simple_strtoul(value, &rest, 8) & 07777;
1da177e4
LT
2037 if (*rest)
2038 goto bad_val;
2039 } else if (!strcmp(this_char,"uid")) {
680d794b 2040 if (remount)
1da177e4 2041 continue;
680d794b 2042 sbinfo->uid = simple_strtoul(value, &rest, 0);
1da177e4
LT
2043 if (*rest)
2044 goto bad_val;
2045 } else if (!strcmp(this_char,"gid")) {
680d794b 2046 if (remount)
1da177e4 2047 continue;
680d794b 2048 sbinfo->gid = simple_strtoul(value, &rest, 0);
1da177e4
LT
2049 if (*rest)
2050 goto bad_val;
7339ff83 2051 } else if (!strcmp(this_char,"mpol")) {
71fe804b 2052 if (mpol_parse_str(value, &sbinfo->mpol, 1))
7339ff83 2053 goto bad_val;
1da177e4
LT
2054 } else {
2055 printk(KERN_ERR "tmpfs: Bad mount option %s\n",
2056 this_char);
2057 return 1;
2058 }
2059 }
2060 return 0;
2061
2062bad_val:
2063 printk(KERN_ERR "tmpfs: Bad value '%s' for mount option '%s'\n",
2064 value, this_char);
2065 return 1;
2066
2067}
2068
2069static int shmem_remount_fs(struct super_block *sb, int *flags, char *data)
2070{
2071 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
680d794b 2072 struct shmem_sb_info config = *sbinfo;
0edd73b3
HD
2073 unsigned long inodes;
2074 int error = -EINVAL;
2075
680d794b 2076 if (shmem_parse_options(data, &config, true))
0edd73b3 2077 return error;
1da177e4 2078
0edd73b3 2079 spin_lock(&sbinfo->stat_lock);
0edd73b3 2080 inodes = sbinfo->max_inodes - sbinfo->free_inodes;
7e496299 2081 if (percpu_counter_compare(&sbinfo->used_blocks, config.max_blocks) > 0)
0edd73b3 2082 goto out;
680d794b 2083 if (config.max_inodes < inodes)
0edd73b3
HD
2084 goto out;
2085 /*
2086 * Those tests also disallow limited->unlimited while any are in
2087 * use, so i_blocks will always be zero when max_blocks is zero;
2088 * but we must separately disallow unlimited->limited, because
2089 * in that case we have no record of how much is already in use.
2090 */
680d794b 2091 if (config.max_blocks && !sbinfo->max_blocks)
0edd73b3 2092 goto out;
680d794b 2093 if (config.max_inodes && !sbinfo->max_inodes)
0edd73b3
HD
2094 goto out;
2095
2096 error = 0;
680d794b 2097 sbinfo->max_blocks = config.max_blocks;
680d794b
AM
2098 sbinfo->max_inodes = config.max_inodes;
2099 sbinfo->free_inodes = config.max_inodes - inodes;
71fe804b
LS
2100
2101 mpol_put(sbinfo->mpol);
2102 sbinfo->mpol = config.mpol; /* transfers initial ref */
0edd73b3
HD
2103out:
2104 spin_unlock(&sbinfo->stat_lock);
2105 return error;
1da177e4 2106}
680d794b
AM
2107
2108static int shmem_show_options(struct seq_file *seq, struct vfsmount *vfs)
2109{
2110 struct shmem_sb_info *sbinfo = SHMEM_SB(vfs->mnt_sb);
2111
2112 if (sbinfo->max_blocks != shmem_default_max_blocks())
2113 seq_printf(seq, ",size=%luk",
2114 sbinfo->max_blocks << (PAGE_CACHE_SHIFT - 10));
2115 if (sbinfo->max_inodes != shmem_default_max_inodes())
2116 seq_printf(seq, ",nr_inodes=%lu", sbinfo->max_inodes);
2117 if (sbinfo->mode != (S_IRWXUGO | S_ISVTX))
2118 seq_printf(seq, ",mode=%03o", sbinfo->mode);
2119 if (sbinfo->uid != 0)
2120 seq_printf(seq, ",uid=%u", sbinfo->uid);
2121 if (sbinfo->gid != 0)
2122 seq_printf(seq, ",gid=%u", sbinfo->gid);
71fe804b 2123 shmem_show_mpol(seq, sbinfo->mpol);
680d794b
AM
2124 return 0;
2125}
2126#endif /* CONFIG_TMPFS */
1da177e4
LT
2127
2128static void shmem_put_super(struct super_block *sb)
2129{
602586a8
HD
2130 struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
2131
2132 percpu_counter_destroy(&sbinfo->used_blocks);
2133 kfree(sbinfo);
1da177e4
LT
2134 sb->s_fs_info = NULL;
2135}
2136
2b2af54a 2137int shmem_fill_super(struct super_block *sb, void *data, int silent)
1da177e4
LT
2138{
2139 struct inode *inode;
2140 struct dentry *root;
0edd73b3 2141 struct shmem_sb_info *sbinfo;
680d794b
AM
2142 int err = -ENOMEM;
2143
2144 /* Round up to L1_CACHE_BYTES to resist false sharing */
425fbf04 2145 sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
680d794b
AM
2146 L1_CACHE_BYTES), GFP_KERNEL);
2147 if (!sbinfo)
2148 return -ENOMEM;
2149
680d794b 2150 sbinfo->mode = S_IRWXUGO | S_ISVTX;
76aac0e9
DH
2151 sbinfo->uid = current_fsuid();
2152 sbinfo->gid = current_fsgid();
680d794b 2153 sb->s_fs_info = sbinfo;
1da177e4 2154
0edd73b3 2155#ifdef CONFIG_TMPFS
1da177e4
LT
2156 /*
2157 * Per default we only allow half of the physical ram per
2158 * tmpfs instance, limiting inodes to one per page of lowmem;
2159 * but the internal instance is left unlimited.
2160 */
2161 if (!(sb->s_flags & MS_NOUSER)) {
680d794b
AM
2162 sbinfo->max_blocks = shmem_default_max_blocks();
2163 sbinfo->max_inodes = shmem_default_max_inodes();
2164 if (shmem_parse_options(data, sbinfo, false)) {
2165 err = -EINVAL;
2166 goto failed;
2167 }
1da177e4 2168 }
91828a40 2169 sb->s_export_op = &shmem_export_ops;
1da177e4
LT
2170#else
2171 sb->s_flags |= MS_NOUSER;
2172#endif
2173
0edd73b3 2174 spin_lock_init(&sbinfo->stat_lock);
602586a8
HD
2175 if (percpu_counter_init(&sbinfo->used_blocks, 0))
2176 goto failed;
680d794b 2177 sbinfo->free_inodes = sbinfo->max_inodes;
0edd73b3 2178
285b2c4f 2179 sb->s_maxbytes = MAX_LFS_FILESIZE;
1da177e4
LT
2180 sb->s_blocksize = PAGE_CACHE_SIZE;
2181 sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
2182 sb->s_magic = TMPFS_MAGIC;
2183 sb->s_op = &shmem_ops;
cfd95a9c 2184 sb->s_time_gran = 1;
b09e0fa4 2185#ifdef CONFIG_TMPFS_XATTR
39f0247d 2186 sb->s_xattr = shmem_xattr_handlers;
b09e0fa4
EP
2187#endif
2188#ifdef CONFIG_TMPFS_POSIX_ACL
39f0247d
AG
2189 sb->s_flags |= MS_POSIXACL;
2190#endif
0edd73b3 2191
454abafe 2192 inode = shmem_get_inode(sb, NULL, S_IFDIR | sbinfo->mode, 0, VM_NORESERVE);
1da177e4
LT
2193 if (!inode)
2194 goto failed;
680d794b
AM
2195 inode->i_uid = sbinfo->uid;
2196 inode->i_gid = sbinfo->gid;
1da177e4
LT
2197 root = d_alloc_root(inode);
2198 if (!root)
2199 goto failed_iput;
2200 sb->s_root = root;
2201 return 0;
2202
2203failed_iput:
2204 iput(inode);
2205failed:
2206 shmem_put_super(sb);
2207 return err;
2208}
2209
fcc234f8 2210static struct kmem_cache *shmem_inode_cachep;
1da177e4
LT
2211
2212static struct inode *shmem_alloc_inode(struct super_block *sb)
2213{
41ffe5d5
HD
2214 struct shmem_inode_info *info;
2215 info = kmem_cache_alloc(shmem_inode_cachep, GFP_KERNEL);
2216 if (!info)
1da177e4 2217 return NULL;
41ffe5d5 2218 return &info->vfs_inode;
1da177e4
LT
2219}
2220
41ffe5d5 2221static void shmem_destroy_callback(struct rcu_head *head)
fa0d7e3d
NP
2222{
2223 struct inode *inode = container_of(head, struct inode, i_rcu);
2224 INIT_LIST_HEAD(&inode->i_dentry);
2225 kmem_cache_free(shmem_inode_cachep, SHMEM_I(inode));
2226}
2227
1da177e4
LT
2228static void shmem_destroy_inode(struct inode *inode)
2229{
2230 if ((inode->i_mode & S_IFMT) == S_IFREG) {
2231 /* only struct inode is valid if it's an inline symlink */
2232 mpol_free_shared_policy(&SHMEM_I(inode)->policy);
2233 }
41ffe5d5 2234 call_rcu(&inode->i_rcu, shmem_destroy_callback);
1da177e4
LT
2235}
2236
41ffe5d5 2237static void shmem_init_inode(void *foo)
1da177e4 2238{
41ffe5d5
HD
2239 struct shmem_inode_info *info = foo;
2240 inode_init_once(&info->vfs_inode);
1da177e4
LT
2241}
2242
41ffe5d5 2243static int shmem_init_inodecache(void)
1da177e4
LT
2244{
2245 shmem_inode_cachep = kmem_cache_create("shmem_inode_cache",
2246 sizeof(struct shmem_inode_info),
41ffe5d5 2247 0, SLAB_PANIC, shmem_init_inode);
1da177e4
LT
2248 return 0;
2249}
2250
41ffe5d5 2251static void shmem_destroy_inodecache(void)
1da177e4 2252{
1a1d92c1 2253 kmem_cache_destroy(shmem_inode_cachep);
1da177e4
LT
2254}
2255
f5e54d6e 2256static const struct address_space_operations shmem_aops = {
1da177e4 2257 .writepage = shmem_writepage,
76719325 2258 .set_page_dirty = __set_page_dirty_no_writeback,
1da177e4 2259#ifdef CONFIG_TMPFS
800d15a5
NP
2260 .write_begin = shmem_write_begin,
2261 .write_end = shmem_write_end,
1da177e4 2262#endif
304dbdb7 2263 .migratepage = migrate_page,
aa261f54 2264 .error_remove_page = generic_error_remove_page,
1da177e4
LT
2265};
2266
15ad7cdc 2267static const struct file_operations shmem_file_operations = {
1da177e4
LT
2268 .mmap = shmem_mmap,
2269#ifdef CONFIG_TMPFS
2270 .llseek = generic_file_llseek,
bcd78e49 2271 .read = do_sync_read,
5402b976 2272 .write = do_sync_write,
bcd78e49 2273 .aio_read = shmem_file_aio_read,
5402b976 2274 .aio_write = generic_file_aio_write,
1b061d92 2275 .fsync = noop_fsync,
708e3508 2276 .splice_read = shmem_file_splice_read,
ae976416 2277 .splice_write = generic_file_splice_write,
1da177e4
LT
2278#endif
2279};
2280
92e1d5be 2281static const struct inode_operations shmem_inode_operations = {
94c1e62d 2282 .setattr = shmem_setattr,
f6b3ec23 2283 .truncate_range = shmem_truncate_range,
b09e0fa4
EP
2284#ifdef CONFIG_TMPFS_XATTR
2285 .setxattr = shmem_setxattr,
2286 .getxattr = shmem_getxattr,
2287 .listxattr = shmem_listxattr,
2288 .removexattr = shmem_removexattr,
2289#endif
1da177e4
LT
2290};
2291
92e1d5be 2292static const struct inode_operations shmem_dir_inode_operations = {
1da177e4
LT
2293#ifdef CONFIG_TMPFS
2294 .create = shmem_create,
2295 .lookup = simple_lookup,
2296 .link = shmem_link,
2297 .unlink = shmem_unlink,
2298 .symlink = shmem_symlink,
2299 .mkdir = shmem_mkdir,
2300 .rmdir = shmem_rmdir,
2301 .mknod = shmem_mknod,
2302 .rename = shmem_rename,
1da177e4 2303#endif
b09e0fa4
EP
2304#ifdef CONFIG_TMPFS_XATTR
2305 .setxattr = shmem_setxattr,
2306 .getxattr = shmem_getxattr,
2307 .listxattr = shmem_listxattr,
2308 .removexattr = shmem_removexattr,
2309#endif
39f0247d 2310#ifdef CONFIG_TMPFS_POSIX_ACL
94c1e62d 2311 .setattr = shmem_setattr,
39f0247d
AG
2312#endif
2313};
2314
92e1d5be 2315static const struct inode_operations shmem_special_inode_operations = {
b09e0fa4
EP
2316#ifdef CONFIG_TMPFS_XATTR
2317 .setxattr = shmem_setxattr,
2318 .getxattr = shmem_getxattr,
2319 .listxattr = shmem_listxattr,
2320 .removexattr = shmem_removexattr,
2321#endif
39f0247d 2322#ifdef CONFIG_TMPFS_POSIX_ACL
94c1e62d 2323 .setattr = shmem_setattr,
39f0247d 2324#endif
1da177e4
LT
2325};
2326
759b9775 2327static const struct super_operations shmem_ops = {
1da177e4
LT
2328 .alloc_inode = shmem_alloc_inode,
2329 .destroy_inode = shmem_destroy_inode,
2330#ifdef CONFIG_TMPFS
2331 .statfs = shmem_statfs,
2332 .remount_fs = shmem_remount_fs,
680d794b 2333 .show_options = shmem_show_options,
1da177e4 2334#endif
1f895f75 2335 .evict_inode = shmem_evict_inode,
1da177e4
LT
2336 .drop_inode = generic_delete_inode,
2337 .put_super = shmem_put_super,
2338};
2339
f0f37e2f 2340static const struct vm_operations_struct shmem_vm_ops = {
54cb8821 2341 .fault = shmem_fault,
1da177e4
LT
2342#ifdef CONFIG_NUMA
2343 .set_policy = shmem_set_policy,
2344 .get_policy = shmem_get_policy,
2345#endif
2346};
2347
3c26ff6e
AV
2348static struct dentry *shmem_mount(struct file_system_type *fs_type,
2349 int flags, const char *dev_name, void *data)
1da177e4 2350{
3c26ff6e 2351 return mount_nodev(fs_type, flags, data, shmem_fill_super);
1da177e4
LT
2352}
2353
41ffe5d5 2354static struct file_system_type shmem_fs_type = {
1da177e4
LT
2355 .owner = THIS_MODULE,
2356 .name = "tmpfs",
3c26ff6e 2357 .mount = shmem_mount,
1da177e4
LT
2358 .kill_sb = kill_litter_super,
2359};
1da177e4 2360
41ffe5d5 2361int __init shmem_init(void)
1da177e4
LT
2362{
2363 int error;
2364
e0bf68dd
PZ
2365 error = bdi_init(&shmem_backing_dev_info);
2366 if (error)
2367 goto out4;
2368
41ffe5d5 2369 error = shmem_init_inodecache();
1da177e4
LT
2370 if (error)
2371 goto out3;
2372
41ffe5d5 2373 error = register_filesystem(&shmem_fs_type);
1da177e4
LT
2374 if (error) {
2375 printk(KERN_ERR "Could not register tmpfs\n");
2376 goto out2;
2377 }
95dc112a 2378
41ffe5d5
HD
2379 shm_mnt = vfs_kern_mount(&shmem_fs_type, MS_NOUSER,
2380 shmem_fs_type.name, NULL);
1da177e4
LT
2381 if (IS_ERR(shm_mnt)) {
2382 error = PTR_ERR(shm_mnt);
2383 printk(KERN_ERR "Could not kern_mount tmpfs\n");
2384 goto out1;
2385 }
2386 return 0;
2387
2388out1:
41ffe5d5 2389 unregister_filesystem(&shmem_fs_type);
1da177e4 2390out2:
41ffe5d5 2391 shmem_destroy_inodecache();
1da177e4 2392out3:
e0bf68dd
PZ
2393 bdi_destroy(&shmem_backing_dev_info);
2394out4:
1da177e4
LT
2395 shm_mnt = ERR_PTR(error);
2396 return error;
2397}
853ac43a 2398
87946a72
DN
2399#ifdef CONFIG_CGROUP_MEM_RES_CTLR
2400/**
41ffe5d5 2401 * mem_cgroup_get_shmem_target - find page or swap assigned to the shmem file
87946a72 2402 * @inode: the inode to be searched
41ffe5d5 2403 * @index: the page offset to be searched
87946a72 2404 * @pagep: the pointer for the found page to be stored
41ffe5d5 2405 * @swapp: the pointer for the found swap entry to be stored
87946a72
DN
2406 *
2407 * If a page is found, refcount of it is incremented. Callers should handle
2408 * these refcount.
2409 */
41ffe5d5
HD
2410void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t index,
2411 struct page **pagep, swp_entry_t *swapp)
87946a72 2412{
87946a72 2413 struct shmem_inode_info *info = SHMEM_I(inode);
41ffe5d5
HD
2414 struct page *page = NULL;
2415 swp_entry_t swap = {0};
87946a72 2416
41ffe5d5 2417 if ((index << PAGE_CACHE_SHIFT) >= i_size_read(inode))
87946a72
DN
2418 goto out;
2419
2420 spin_lock(&info->lock);
87946a72 2421#ifdef CONFIG_SWAP
41ffe5d5
HD
2422 swap = shmem_get_swap(info, index);
2423 if (swap.val)
2424 page = find_get_page(&swapper_space, swap.val);
285b2c4f 2425 else
87946a72 2426#endif
41ffe5d5 2427 page = find_get_page(inode->i_mapping, index);
87946a72
DN
2428 spin_unlock(&info->lock);
2429out:
2430 *pagep = page;
41ffe5d5 2431 *swapp = swap;
87946a72
DN
2432}
2433#endif
2434
853ac43a
MM
2435#else /* !CONFIG_SHMEM */
2436
2437/*
2438 * tiny-shmem: simple shmemfs and tmpfs using ramfs code
2439 *
2440 * This is intended for small system where the benefits of the full
2441 * shmem code (swap-backed and resource-limited) are outweighed by
2442 * their complexity. On systems without swap this code should be
2443 * effectively equivalent, but much lighter weight.
2444 */
2445
2446#include <linux/ramfs.h>
2447
41ffe5d5 2448static struct file_system_type shmem_fs_type = {
853ac43a 2449 .name = "tmpfs",
3c26ff6e 2450 .mount = ramfs_mount,
853ac43a
MM
2451 .kill_sb = kill_litter_super,
2452};
2453
41ffe5d5 2454int __init shmem_init(void)
853ac43a 2455{
41ffe5d5 2456 BUG_ON(register_filesystem(&shmem_fs_type) != 0);
853ac43a 2457
41ffe5d5 2458 shm_mnt = kern_mount(&shmem_fs_type);
853ac43a
MM
2459 BUG_ON(IS_ERR(shm_mnt));
2460
2461 return 0;
2462}
2463
41ffe5d5 2464int shmem_unuse(swp_entry_t swap, struct page *page)
853ac43a
MM
2465{
2466 return 0;
2467}
2468
3f96b79a
HD
2469int shmem_lock(struct file *file, int lock, struct user_struct *user)
2470{
2471 return 0;
2472}
2473
41ffe5d5 2474void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
94c1e62d 2475{
41ffe5d5 2476 truncate_inode_pages_range(inode->i_mapping, lstart, lend);
94c1e62d
HD
2477}
2478EXPORT_SYMBOL_GPL(shmem_truncate_range);
2479
87946a72
DN
2480#ifdef CONFIG_CGROUP_MEM_RES_CTLR
2481/**
41ffe5d5 2482 * mem_cgroup_get_shmem_target - find page or swap assigned to the shmem file
87946a72 2483 * @inode: the inode to be searched
41ffe5d5 2484 * @index: the page offset to be searched
87946a72 2485 * @pagep: the pointer for the found page to be stored
41ffe5d5 2486 * @swapp: the pointer for the found swap entry to be stored
87946a72
DN
2487 *
2488 * If a page is found, refcount of it is incremented. Callers should handle
2489 * these refcount.
2490 */
41ffe5d5
HD
2491void mem_cgroup_get_shmem_target(struct inode *inode, pgoff_t index,
2492 struct page **pagep, swp_entry_t *swapp)
87946a72
DN
2493{
2494 struct page *page = NULL;
2495
41ffe5d5 2496 if ((index << PAGE_CACHE_SHIFT) >= i_size_read(inode))
87946a72 2497 goto out;
41ffe5d5 2498 page = find_get_page(inode->i_mapping, index);
87946a72
DN
2499out:
2500 *pagep = page;
41ffe5d5 2501 *swapp = (swp_entry_t){0};
87946a72
DN
2502}
2503#endif
2504
0b0a0806
HD
2505#define shmem_vm_ops generic_file_vm_ops
2506#define shmem_file_operations ramfs_file_operations
454abafe 2507#define shmem_get_inode(sb, dir, mode, dev, flags) ramfs_get_inode(sb, dir, mode, dev)
0b0a0806
HD
2508#define shmem_acct_size(flags, size) 0
2509#define shmem_unacct_size(flags, size) do {} while (0)
853ac43a
MM
2510
2511#endif /* CONFIG_SHMEM */
2512
2513/* common code */
1da177e4 2514
46711810 2515/**
1da177e4 2516 * shmem_file_setup - get an unlinked file living in tmpfs
1da177e4
LT
2517 * @name: name for dentry (to be seen in /proc/<pid>/maps
2518 * @size: size to be set for the file
0b0a0806 2519 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
1da177e4 2520 */
168f5ac6 2521struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
1da177e4
LT
2522{
2523 int error;
2524 struct file *file;
2525 struct inode *inode;
2c48b9c4
AV
2526 struct path path;
2527 struct dentry *root;
1da177e4
LT
2528 struct qstr this;
2529
2530 if (IS_ERR(shm_mnt))
2531 return (void *)shm_mnt;
2532
285b2c4f 2533 if (size < 0 || size > MAX_LFS_FILESIZE)
1da177e4
LT
2534 return ERR_PTR(-EINVAL);
2535
2536 if (shmem_acct_size(flags, size))
2537 return ERR_PTR(-ENOMEM);
2538
2539 error = -ENOMEM;
2540 this.name = name;
2541 this.len = strlen(name);
2542 this.hash = 0; /* will go */
2543 root = shm_mnt->mnt_root;
2c48b9c4
AV
2544 path.dentry = d_alloc(root, &this);
2545 if (!path.dentry)
1da177e4 2546 goto put_memory;
2c48b9c4 2547 path.mnt = mntget(shm_mnt);
1da177e4 2548
1da177e4 2549 error = -ENOSPC;
454abafe 2550 inode = shmem_get_inode(root->d_sb, NULL, S_IFREG | S_IRWXUGO, 0, flags);
1da177e4 2551 if (!inode)
4b42af81 2552 goto put_dentry;
1da177e4 2553
2c48b9c4 2554 d_instantiate(path.dentry, inode);
1da177e4
LT
2555 inode->i_size = size;
2556 inode->i_nlink = 0; /* It is unlinked */
853ac43a
MM
2557#ifndef CONFIG_MMU
2558 error = ramfs_nommu_expand_for_mapping(inode, size);
2559 if (error)
4b42af81 2560 goto put_dentry;
853ac43a 2561#endif
4b42af81
AV
2562
2563 error = -ENFILE;
2c48b9c4 2564 file = alloc_file(&path, FMODE_WRITE | FMODE_READ,
4b42af81
AV
2565 &shmem_file_operations);
2566 if (!file)
2567 goto put_dentry;
2568
1da177e4
LT
2569 return file;
2570
1da177e4 2571put_dentry:
2c48b9c4 2572 path_put(&path);
1da177e4
LT
2573put_memory:
2574 shmem_unacct_size(flags, size);
2575 return ERR_PTR(error);
2576}
395e0ddc 2577EXPORT_SYMBOL_GPL(shmem_file_setup);
1da177e4 2578
46711810 2579/**
1da177e4 2580 * shmem_zero_setup - setup a shared anonymous mapping
1da177e4
LT
2581 * @vma: the vma to be mmapped is prepared by do_mmap_pgoff
2582 */
2583int shmem_zero_setup(struct vm_area_struct *vma)
2584{
2585 struct file *file;
2586 loff_t size = vma->vm_end - vma->vm_start;
2587
2588 file = shmem_file_setup("dev/zero", size, vma->vm_flags);
2589 if (IS_ERR(file))
2590 return PTR_ERR(file);
2591
2592 if (vma->vm_file)
2593 fput(vma->vm_file);
2594 vma->vm_file = file;
2595 vma->vm_ops = &shmem_vm_ops;
bee4c36a 2596 vma->vm_flags |= VM_CAN_NONLINEAR;
1da177e4
LT
2597 return 0;
2598}
d9d90e5e
HD
2599
2600/**
2601 * shmem_read_mapping_page_gfp - read into page cache, using specified page allocation flags.
2602 * @mapping: the page's address_space
2603 * @index: the page index
2604 * @gfp: the page allocator flags to use if allocating
2605 *
2606 * This behaves as a tmpfs "read_cache_page_gfp(mapping, index, gfp)",
2607 * with any new page allocations done using the specified allocation flags.
2608 * But read_cache_page_gfp() uses the ->readpage() method: which does not
2609 * suit tmpfs, since it may have pages in swapcache, and needs to find those
2610 * for itself; although drivers/gpu/drm i915 and ttm rely upon this support.
2611 *
68da9f05
HD
2612 * i915_gem_object_get_pages_gtt() mixes __GFP_NORETRY | __GFP_NOWARN in
2613 * with the mapping_gfp_mask(), to avoid OOMing the machine unnecessarily.
d9d90e5e
HD
2614 */
2615struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
2616 pgoff_t index, gfp_t gfp)
2617{
68da9f05
HD
2618#ifdef CONFIG_SHMEM
2619 struct inode *inode = mapping->host;
9276aad6 2620 struct page *page;
68da9f05
HD
2621 int error;
2622
2623 BUG_ON(mapping->a_ops != &shmem_aops);
2624 error = shmem_getpage_gfp(inode, index, &page, SGP_CACHE, gfp, NULL);
2625 if (error)
2626 page = ERR_PTR(error);
2627 else
2628 unlock_page(page);
2629 return page;
2630#else
2631 /*
2632 * The tiny !SHMEM case uses ramfs without swap
2633 */
d9d90e5e 2634 return read_cache_page_gfp(mapping, index, gfp);
68da9f05 2635#endif
d9d90e5e
HD
2636}
2637EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);