]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - fs/hugetlbfs/inode.c
VFS: normal filesystems (and lustre): d_inode() annotations
[mirror_ubuntu-zesty-kernel.git] / fs / hugetlbfs / inode.c
CommitLineData
1da177e4
LT
1/*
2 * hugetlbpage-backed filesystem. Based on ramfs.
3 *
6d49e352 4 * Nadia Yvette Chambers, 2002
1da177e4
LT
5 *
6 * Copyright (C) 2002 Linus Torvalds.
7 */
8
9b857d26
AM
9#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
1da177e4
LT
11#include <linux/module.h>
12#include <linux/thread_info.h>
13#include <asm/current.h>
14#include <linux/sched.h> /* remove ASAP */
15#include <linux/fs.h>
16#include <linux/mount.h>
17#include <linux/file.h>
e73a75fa 18#include <linux/kernel.h>
1da177e4
LT
19#include <linux/writeback.h>
20#include <linux/pagemap.h>
21#include <linux/highmem.h>
22#include <linux/init.h>
23#include <linux/string.h>
16f7e0fe 24#include <linux/capability.h>
e73a75fa 25#include <linux/ctype.h>
1da177e4
LT
26#include <linux/backing-dev.h>
27#include <linux/hugetlb.h>
28#include <linux/pagevec.h>
e73a75fa 29#include <linux/parser.h>
036e0856 30#include <linux/mman.h>
1da177e4
LT
31#include <linux/slab.h>
32#include <linux/dnotify.h>
33#include <linux/statfs.h>
34#include <linux/security.h>
1fd7317d 35#include <linux/magic.h>
290408d4 36#include <linux/migrate.h>
34d0640e 37#include <linux/uio.h>
1da177e4
LT
38
39#include <asm/uaccess.h>
40
ee9b6d61 41static const struct super_operations hugetlbfs_ops;
f5e54d6e 42static const struct address_space_operations hugetlbfs_aops;
4b6f5d20 43const struct file_operations hugetlbfs_file_operations;
92e1d5be
AV
44static const struct inode_operations hugetlbfs_dir_inode_operations;
45static const struct inode_operations hugetlbfs_inode_operations;
1da177e4 46
a1d776ee 47struct hugetlbfs_config {
a0eb3a05
EB
48 kuid_t uid;
49 kgid_t gid;
a1d776ee
DG
50 umode_t mode;
51 long nr_blocks;
52 long nr_inodes;
53 struct hstate *hstate;
54};
55
56struct hugetlbfs_inode_info {
57 struct shared_policy policy;
58 struct inode vfs_inode;
59};
60
61static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
62{
63 return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
64}
65
1da177e4
LT
66int sysctl_hugetlb_shm_group;
67
e73a75fa
RD
68enum {
69 Opt_size, Opt_nr_inodes,
70 Opt_mode, Opt_uid, Opt_gid,
a137e1cc 71 Opt_pagesize,
e73a75fa
RD
72 Opt_err,
73};
74
a447c093 75static const match_table_t tokens = {
e73a75fa
RD
76 {Opt_size, "size=%s"},
77 {Opt_nr_inodes, "nr_inodes=%s"},
78 {Opt_mode, "mode=%o"},
79 {Opt_uid, "uid=%u"},
80 {Opt_gid, "gid=%u"},
a137e1cc 81 {Opt_pagesize, "pagesize=%s"},
e73a75fa
RD
82 {Opt_err, NULL},
83};
84
2e9b367c
AL
85static void huge_pagevec_release(struct pagevec *pvec)
86{
87 int i;
88
89 for (i = 0; i < pagevec_count(pvec); ++i)
90 put_page(pvec->pages[i]);
91
92 pagevec_reinit(pvec);
93}
94
1da177e4
LT
95static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
96{
496ad9aa 97 struct inode *inode = file_inode(file);
1da177e4
LT
98 loff_t len, vma_len;
99 int ret;
a5516438 100 struct hstate *h = hstate_file(file);
1da177e4 101
68589bc3 102 /*
dec4ad86
DG
103 * vma address alignment (but not the pgoff alignment) has
104 * already been checked by prepare_hugepage_range. If you add
105 * any error returns here, do so after setting VM_HUGETLB, so
106 * is_vm_hugetlb_page tests below unmap_region go the right
107 * way when do_mmap_pgoff unwinds (may be important on powerpc
108 * and ia64).
68589bc3 109 */
a2fce914 110 vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
68589bc3 111 vma->vm_ops = &hugetlb_vm_ops;
1da177e4 112
2b37c35e 113 if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
dec4ad86
DG
114 return -EINVAL;
115
1da177e4
LT
116 vma_len = (loff_t)(vma->vm_end - vma->vm_start);
117
1b1dcc1b 118 mutex_lock(&inode->i_mutex);
1da177e4 119 file_accessed(file);
1da177e4
LT
120
121 ret = -ENOMEM;
122 len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
1da177e4 123
a1e78772 124 if (hugetlb_reserve_pages(inode,
a5516438 125 vma->vm_pgoff >> huge_page_order(h),
5a6fe125
MG
126 len >> huge_page_shift(h), vma,
127 vma->vm_flags))
a43a8c39 128 goto out;
b45b5bd6 129
4c887265
AL
130 ret = 0;
131 hugetlb_prefault_arch_hook(vma->vm_mm);
b6174df5 132 if (vma->vm_flags & VM_WRITE && inode->i_size < len)
1da177e4
LT
133 inode->i_size = len;
134out:
1b1dcc1b 135 mutex_unlock(&inode->i_mutex);
1da177e4
LT
136
137 return ret;
138}
139
140/*
508034a3 141 * Called under down_write(mmap_sem).
1da177e4
LT
142 */
143
d2ba27e8 144#ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
1da177e4
LT
145static unsigned long
146hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
147 unsigned long len, unsigned long pgoff, unsigned long flags)
148{
149 struct mm_struct *mm = current->mm;
150 struct vm_area_struct *vma;
a5516438 151 struct hstate *h = hstate_file(file);
08659355 152 struct vm_unmapped_area_info info;
1da177e4 153
a5516438 154 if (len & ~huge_page_mask(h))
1da177e4
LT
155 return -EINVAL;
156 if (len > TASK_SIZE)
157 return -ENOMEM;
158
036e0856 159 if (flags & MAP_FIXED) {
a5516438 160 if (prepare_hugepage_range(file, addr, len))
036e0856
BH
161 return -EINVAL;
162 return addr;
163 }
164
1da177e4 165 if (addr) {
a5516438 166 addr = ALIGN(addr, huge_page_size(h));
1da177e4
LT
167 vma = find_vma(mm, addr);
168 if (TASK_SIZE - len >= addr &&
169 (!vma || addr + len <= vma->vm_start))
170 return addr;
171 }
172
08659355
ML
173 info.flags = 0;
174 info.length = len;
175 info.low_limit = TASK_UNMAPPED_BASE;
176 info.high_limit = TASK_SIZE;
177 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
178 info.align_offset = 0;
179 return vm_unmapped_area(&info);
1da177e4
LT
180}
181#endif
182
34d0640e 183static size_t
e63e1e5a 184hugetlbfs_read_actor(struct page *page, unsigned long offset,
34d0640e 185 struct iov_iter *to, unsigned long size)
e63e1e5a 186{
34d0640e 187 size_t copied = 0;
e63e1e5a
BP
188 int i, chunksize;
189
e63e1e5a
BP
190 /* Find which 4k chunk and offset with in that chunk */
191 i = offset >> PAGE_CACHE_SHIFT;
192 offset = offset & ~PAGE_CACHE_MASK;
193
194 while (size) {
34d0640e 195 size_t n;
e63e1e5a
BP
196 chunksize = PAGE_CACHE_SIZE;
197 if (offset)
198 chunksize -= offset;
199 if (chunksize > size)
200 chunksize = size;
34d0640e
AV
201 n = copy_page_to_iter(&page[i], offset, chunksize, to);
202 copied += n;
203 if (n != chunksize)
204 return copied;
e63e1e5a
BP
205 offset = 0;
206 size -= chunksize;
e63e1e5a
BP
207 i++;
208 }
34d0640e 209 return copied;
e63e1e5a
BP
210}
211
212/*
213 * Support for read() - Find the page attached to f_mapping and copy out the
214 * data. Its *very* similar to do_generic_mapping_read(), we can't use that
215 * since it has PAGE_CACHE_SIZE assumptions.
216 */
34d0640e 217static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
e63e1e5a 218{
34d0640e
AV
219 struct file *file = iocb->ki_filp;
220 struct hstate *h = hstate_file(file);
221 struct address_space *mapping = file->f_mapping;
e63e1e5a 222 struct inode *inode = mapping->host;
34d0640e
AV
223 unsigned long index = iocb->ki_pos >> huge_page_shift(h);
224 unsigned long offset = iocb->ki_pos & ~huge_page_mask(h);
e63e1e5a
BP
225 unsigned long end_index;
226 loff_t isize;
227 ssize_t retval = 0;
228
34d0640e 229 while (iov_iter_count(to)) {
e63e1e5a 230 struct page *page;
34d0640e 231 size_t nr, copied;
e63e1e5a
BP
232
233 /* nr is the maximum number of bytes to copy from this page */
a5516438 234 nr = huge_page_size(h);
a05b0855
AK
235 isize = i_size_read(inode);
236 if (!isize)
34d0640e 237 break;
a05b0855 238 end_index = (isize - 1) >> huge_page_shift(h);
34d0640e
AV
239 if (index > end_index)
240 break;
241 if (index == end_index) {
a5516438 242 nr = ((isize - 1) & ~huge_page_mask(h)) + 1;
a05b0855 243 if (nr <= offset)
34d0640e 244 break;
e63e1e5a
BP
245 }
246 nr = nr - offset;
247
248 /* Find the page */
a05b0855 249 page = find_lock_page(mapping, index);
e63e1e5a
BP
250 if (unlikely(page == NULL)) {
251 /*
252 * We have a HOLE, zero out the user-buffer for the
253 * length of the hole or request.
254 */
34d0640e 255 copied = iov_iter_zero(nr, to);
e63e1e5a 256 } else {
a05b0855
AK
257 unlock_page(page);
258
e63e1e5a
BP
259 /*
260 * We have the page, copy it to user space buffer.
261 */
34d0640e 262 copied = hugetlbfs_read_actor(page, offset, to, nr);
a05b0855 263 page_cache_release(page);
e63e1e5a 264 }
34d0640e
AV
265 offset += copied;
266 retval += copied;
267 if (copied != nr && iov_iter_count(to)) {
268 if (!retval)
269 retval = -EFAULT;
270 break;
e63e1e5a 271 }
a5516438
AK
272 index += offset >> huge_page_shift(h);
273 offset &= ~huge_page_mask(h);
e63e1e5a 274 }
34d0640e 275 iocb->ki_pos = ((loff_t)index << huge_page_shift(h)) + offset;
e63e1e5a
BP
276 return retval;
277}
278
800d15a5
NP
279static int hugetlbfs_write_begin(struct file *file,
280 struct address_space *mapping,
281 loff_t pos, unsigned len, unsigned flags,
282 struct page **pagep, void **fsdata)
1da177e4
LT
283{
284 return -EINVAL;
285}
286
800d15a5
NP
287static int hugetlbfs_write_end(struct file *file, struct address_space *mapping,
288 loff_t pos, unsigned len, unsigned copied,
289 struct page *page, void *fsdata)
1da177e4 290{
800d15a5 291 BUG();
1da177e4
LT
292 return -EINVAL;
293}
294
1da177e4
LT
295static void truncate_huge_page(struct page *page)
296{
fba2591b 297 cancel_dirty_page(page, /* No IO accounting for huge pages? */0);
1da177e4 298 ClearPageUptodate(page);
bd65cb86 299 delete_from_page_cache(page);
1da177e4
LT
300}
301
b45b5bd6 302static void truncate_hugepages(struct inode *inode, loff_t lstart)
1da177e4 303{
a5516438 304 struct hstate *h = hstate_inode(inode);
b45b5bd6 305 struct address_space *mapping = &inode->i_data;
a5516438 306 const pgoff_t start = lstart >> huge_page_shift(h);
1da177e4
LT
307 struct pagevec pvec;
308 pgoff_t next;
a43a8c39 309 int i, freed = 0;
1da177e4
LT
310
311 pagevec_init(&pvec, 0);
312 next = start;
313 while (1) {
314 if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
315 if (next == start)
316 break;
317 next = start;
318 continue;
319 }
320
321 for (i = 0; i < pagevec_count(&pvec); ++i) {
322 struct page *page = pvec.pages[i];
323
324 lock_page(page);
325 if (page->index > next)
326 next = page->index;
327 ++next;
328 truncate_huge_page(page);
329 unlock_page(page);
a43a8c39 330 freed++;
1da177e4
LT
331 }
332 huge_pagevec_release(&pvec);
333 }
334 BUG_ON(!lstart && mapping->nrpages);
a43a8c39 335 hugetlb_unreserve_pages(inode, start, freed);
1da177e4
LT
336}
337
2bbbda30 338static void hugetlbfs_evict_inode(struct inode *inode)
1da177e4 339{
9119a41e
JK
340 struct resv_map *resv_map;
341
b45b5bd6 342 truncate_hugepages(inode, 0);
9119a41e
JK
343 resv_map = (struct resv_map *)inode->i_mapping->private_data;
344 /* root inode doesn't have the resv_map, so we should check it */
345 if (resv_map)
346 resv_map_release(&resv_map->refs);
dbd5768f 347 clear_inode(inode);
149f4211
CH
348}
349
1da177e4 350static inline void
6b2dbba8 351hugetlb_vmtruncate_list(struct rb_root *root, pgoff_t pgoff)
1da177e4
LT
352{
353 struct vm_area_struct *vma;
1da177e4 354
6b2dbba8 355 vma_interval_tree_foreach(vma, root, pgoff, ULONG_MAX) {
1da177e4
LT
356 unsigned long v_offset;
357
1da177e4 358 /*
856fc295 359 * Can the expression below overflow on 32-bit arches?
6b2dbba8 360 * No, because the interval tree returns us only those vmas
856fc295
HD
361 * which overlap the truncated area starting at pgoff,
362 * and no vma on a 32-bit arch can span beyond the 4GB.
1da177e4 363 */
856fc295
HD
364 if (vma->vm_pgoff < pgoff)
365 v_offset = (pgoff - vma->vm_pgoff) << PAGE_SHIFT;
366 else
1da177e4
LT
367 v_offset = 0;
368
24669e58
AK
369 unmap_hugepage_range(vma, vma->vm_start + v_offset,
370 vma->vm_end, NULL);
1da177e4
LT
371 }
372}
373
1da177e4
LT
374static int hugetlb_vmtruncate(struct inode *inode, loff_t offset)
375{
856fc295 376 pgoff_t pgoff;
1da177e4 377 struct address_space *mapping = inode->i_mapping;
a5516438 378 struct hstate *h = hstate_inode(inode);
1da177e4 379
a5516438 380 BUG_ON(offset & ~huge_page_mask(h));
856fc295 381 pgoff = offset >> PAGE_SHIFT;
1da177e4 382
7aa91e10 383 i_size_write(inode, offset);
83cde9e8 384 i_mmap_lock_write(mapping);
6b2dbba8 385 if (!RB_EMPTY_ROOT(&mapping->i_mmap))
1da177e4 386 hugetlb_vmtruncate_list(&mapping->i_mmap, pgoff);
83cde9e8 387 i_mmap_unlock_write(mapping);
b45b5bd6 388 truncate_hugepages(inode, offset);
1da177e4
LT
389 return 0;
390}
391
392static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr)
393{
2b0143b5 394 struct inode *inode = d_inode(dentry);
a5516438 395 struct hstate *h = hstate_inode(inode);
1da177e4
LT
396 int error;
397 unsigned int ia_valid = attr->ia_valid;
398
399 BUG_ON(!inode);
400
401 error = inode_change_ok(inode, attr);
402 if (error)
1025774c 403 return error;
1da177e4
LT
404
405 if (ia_valid & ATTR_SIZE) {
406 error = -EINVAL;
1025774c
CH
407 if (attr->ia_size & ~huge_page_mask(h))
408 return -EINVAL;
409 error = hugetlb_vmtruncate(inode, attr->ia_size);
1da177e4 410 if (error)
1025774c 411 return error;
1da177e4 412 }
1025774c
CH
413
414 setattr_copy(inode, attr);
415 mark_inode_dirty(inode);
416 return 0;
1da177e4
LT
417}
418
7d54fa64
AV
419static struct inode *hugetlbfs_get_root(struct super_block *sb,
420 struct hugetlbfs_config *config)
1da177e4
LT
421{
422 struct inode *inode;
1da177e4
LT
423
424 inode = new_inode(sb);
425 if (inode) {
426 struct hugetlbfs_inode_info *info;
85fe4025 427 inode->i_ino = get_next_ino();
7d54fa64
AV
428 inode->i_mode = S_IFDIR | config->mode;
429 inode->i_uid = config->uid;
430 inode->i_gid = config->gid;
431 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
432 info = HUGETLBFS_I(inode);
433 mpol_shared_policy_init(&info->policy, NULL);
434 inode->i_op = &hugetlbfs_dir_inode_operations;
435 inode->i_fop = &simple_dir_operations;
436 /* directory inodes start off with i_nlink == 2 (for "." entry) */
437 inc_nlink(inode);
65ed7601 438 lockdep_annotate_inode_mutex_key(inode);
7d54fa64
AV
439 }
440 return inode;
441}
442
b610ded7 443/*
c8c06efa 444 * Hugetlbfs is not reclaimable; therefore its i_mmap_rwsem will never
b610ded7
MH
445 * be taken from reclaim -- unlike regular filesystems. This needs an
446 * annotation because huge_pmd_share() does an allocation under
c8c06efa 447 * i_mmap_rwsem.
b610ded7 448 */
c8c06efa 449static struct lock_class_key hugetlbfs_i_mmap_rwsem_key;
b610ded7 450
7d54fa64
AV
451static struct inode *hugetlbfs_get_inode(struct super_block *sb,
452 struct inode *dir,
18df2252 453 umode_t mode, dev_t dev)
7d54fa64
AV
454{
455 struct inode *inode;
9119a41e
JK
456 struct resv_map *resv_map;
457
458 resv_map = resv_map_alloc();
459 if (!resv_map)
460 return NULL;
7d54fa64
AV
461
462 inode = new_inode(sb);
463 if (inode) {
464 struct hugetlbfs_inode_info *info;
465 inode->i_ino = get_next_ino();
466 inode_init_owner(inode, dir, mode);
c8c06efa
DB
467 lockdep_set_class(&inode->i_mapping->i_mmap_rwsem,
468 &hugetlbfs_i_mmap_rwsem_key);
1da177e4 469 inode->i_mapping->a_ops = &hugetlbfs_aops;
1da177e4 470 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
9119a41e 471 inode->i_mapping->private_data = resv_map;
1da177e4 472 info = HUGETLBFS_I(inode);
6bfde05b
EM
473 /*
474 * The policy is initialized here even if we are creating a
475 * private inode because initialization simply creates an
476 * an empty rb tree and calls spin_lock_init(), later when we
477 * call mpol_free_shared_policy() it will just return because
478 * the rb tree will still be empty.
479 */
71fe804b 480 mpol_shared_policy_init(&info->policy, NULL);
1da177e4
LT
481 switch (mode & S_IFMT) {
482 default:
483 init_special_inode(inode, mode, dev);
484 break;
485 case S_IFREG:
486 inode->i_op = &hugetlbfs_inode_operations;
487 inode->i_fop = &hugetlbfs_file_operations;
488 break;
489 case S_IFDIR:
490 inode->i_op = &hugetlbfs_dir_inode_operations;
491 inode->i_fop = &simple_dir_operations;
492
493 /* directory inodes start off with i_nlink == 2 (for "." entry) */
d8c76e6f 494 inc_nlink(inode);
1da177e4
LT
495 break;
496 case S_IFLNK:
497 inode->i_op = &page_symlink_inode_operations;
498 break;
499 }
e096d0c7 500 lockdep_annotate_inode_mutex_key(inode);
9119a41e
JK
501 } else
502 kref_put(&resv_map->refs, resv_map_release);
503
1da177e4
LT
504 return inode;
505}
506
507/*
508 * File creation. Allocate an inode, and we're done..
509 */
510static int hugetlbfs_mknod(struct inode *dir,
1a67aafb 511 struct dentry *dentry, umode_t mode, dev_t dev)
1da177e4
LT
512{
513 struct inode *inode;
514 int error = -ENOSPC;
7d54fa64
AV
515
516 inode = hugetlbfs_get_inode(dir->i_sb, dir, mode, dev);
1da177e4
LT
517 if (inode) {
518 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
519 d_instantiate(dentry, inode);
520 dget(dentry); /* Extra count - pin the dentry in core */
521 error = 0;
522 }
523 return error;
524}
525
18bb1db3 526static int hugetlbfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
1da177e4
LT
527{
528 int retval = hugetlbfs_mknod(dir, dentry, mode | S_IFDIR, 0);
529 if (!retval)
d8c76e6f 530 inc_nlink(dir);
1da177e4
LT
531 return retval;
532}
533
ebfc3b49 534static int hugetlbfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl)
1da177e4
LT
535{
536 return hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0);
537}
538
539static int hugetlbfs_symlink(struct inode *dir,
540 struct dentry *dentry, const char *symname)
541{
542 struct inode *inode;
543 int error = -ENOSPC;
1da177e4 544
7d54fa64 545 inode = hugetlbfs_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0);
1da177e4
LT
546 if (inode) {
547 int l = strlen(symname)+1;
548 error = page_symlink(inode, symname, l);
549 if (!error) {
550 d_instantiate(dentry, inode);
551 dget(dentry);
552 } else
553 iput(inode);
554 }
555 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
556
557 return error;
558}
559
560/*
6649a386 561 * mark the head page dirty
1da177e4
LT
562 */
563static int hugetlbfs_set_page_dirty(struct page *page)
564{
d85f3385 565 struct page *head = compound_head(page);
6649a386
KC
566
567 SetPageDirty(head);
1da177e4
LT
568 return 0;
569}
570
290408d4 571static int hugetlbfs_migrate_page(struct address_space *mapping,
b969c4ab 572 struct page *newpage, struct page *page,
a6bc32b8 573 enum migrate_mode mode)
290408d4
NH
574{
575 int rc;
576
577 rc = migrate_huge_page_move_mapping(mapping, newpage, page);
78bd5209 578 if (rc != MIGRATEPAGE_SUCCESS)
290408d4
NH
579 return rc;
580 migrate_page_copy(newpage, page);
581
78bd5209 582 return MIGRATEPAGE_SUCCESS;
290408d4
NH
583}
584
726c3342 585static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf)
1da177e4 586{
726c3342 587 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb);
2b0143b5 588 struct hstate *h = hstate_inode(d_inode(dentry));
1da177e4
LT
589
590 buf->f_type = HUGETLBFS_MAGIC;
a5516438 591 buf->f_bsize = huge_page_size(h);
1da177e4
LT
592 if (sbinfo) {
593 spin_lock(&sbinfo->stat_lock);
74a8a65c
DG
594 /* If no limits set, just report 0 for max/free/used
595 * blocks, like simple_statfs() */
90481622
DG
596 if (sbinfo->spool) {
597 long free_pages;
598
599 spin_lock(&sbinfo->spool->lock);
600 buf->f_blocks = sbinfo->spool->max_hpages;
601 free_pages = sbinfo->spool->max_hpages
602 - sbinfo->spool->used_hpages;
603 buf->f_bavail = buf->f_bfree = free_pages;
604 spin_unlock(&sbinfo->spool->lock);
74a8a65c
DG
605 buf->f_files = sbinfo->max_inodes;
606 buf->f_ffree = sbinfo->free_inodes;
607 }
1da177e4
LT
608 spin_unlock(&sbinfo->stat_lock);
609 }
610 buf->f_namelen = NAME_MAX;
611 return 0;
612}
613
614static void hugetlbfs_put_super(struct super_block *sb)
615{
616 struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb);
617
618 if (sbi) {
619 sb->s_fs_info = NULL;
90481622
DG
620
621 if (sbi->spool)
622 hugepage_put_subpool(sbi->spool);
623
1da177e4
LT
624 kfree(sbi);
625 }
626}
627
96527980
CH
628static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo)
629{
630 if (sbinfo->free_inodes >= 0) {
631 spin_lock(&sbinfo->stat_lock);
632 if (unlikely(!sbinfo->free_inodes)) {
633 spin_unlock(&sbinfo->stat_lock);
634 return 0;
635 }
636 sbinfo->free_inodes--;
637 spin_unlock(&sbinfo->stat_lock);
638 }
639
640 return 1;
641}
642
643static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo)
644{
645 if (sbinfo->free_inodes >= 0) {
646 spin_lock(&sbinfo->stat_lock);
647 sbinfo->free_inodes++;
648 spin_unlock(&sbinfo->stat_lock);
649 }
650}
651
652
e18b890b 653static struct kmem_cache *hugetlbfs_inode_cachep;
1da177e4
LT
654
655static struct inode *hugetlbfs_alloc_inode(struct super_block *sb)
656{
96527980 657 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb);
1da177e4
LT
658 struct hugetlbfs_inode_info *p;
659
96527980
CH
660 if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo)))
661 return NULL;
e94b1766 662 p = kmem_cache_alloc(hugetlbfs_inode_cachep, GFP_KERNEL);
96527980
CH
663 if (unlikely(!p)) {
664 hugetlbfs_inc_free_inodes(sbinfo);
1da177e4 665 return NULL;
96527980 666 }
1da177e4
LT
667 return &p->vfs_inode;
668}
669
fa0d7e3d
NP
670static void hugetlbfs_i_callback(struct rcu_head *head)
671{
672 struct inode *inode = container_of(head, struct inode, i_rcu);
fa0d7e3d
NP
673 kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode));
674}
675
1da177e4
LT
676static void hugetlbfs_destroy_inode(struct inode *inode)
677{
96527980 678 hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb));
1da177e4 679 mpol_free_shared_policy(&HUGETLBFS_I(inode)->policy);
fa0d7e3d 680 call_rcu(&inode->i_rcu, hugetlbfs_i_callback);
1da177e4
LT
681}
682
f5e54d6e 683static const struct address_space_operations hugetlbfs_aops = {
800d15a5
NP
684 .write_begin = hugetlbfs_write_begin,
685 .write_end = hugetlbfs_write_end,
1da177e4 686 .set_page_dirty = hugetlbfs_set_page_dirty,
290408d4 687 .migratepage = hugetlbfs_migrate_page,
1da177e4
LT
688};
689
96527980 690
51cc5068 691static void init_once(void *foo)
96527980
CH
692{
693 struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo;
694
a35afb83 695 inode_init_once(&ei->vfs_inode);
96527980
CH
696}
697
4b6f5d20 698const struct file_operations hugetlbfs_file_operations = {
34d0640e 699 .read_iter = hugetlbfs_read_iter,
1da177e4 700 .mmap = hugetlbfs_file_mmap,
1b061d92 701 .fsync = noop_fsync,
1da177e4 702 .get_unmapped_area = hugetlb_get_unmapped_area,
6038f373 703 .llseek = default_llseek,
1da177e4
LT
704};
705
92e1d5be 706static const struct inode_operations hugetlbfs_dir_inode_operations = {
1da177e4
LT
707 .create = hugetlbfs_create,
708 .lookup = simple_lookup,
709 .link = simple_link,
710 .unlink = simple_unlink,
711 .symlink = hugetlbfs_symlink,
712 .mkdir = hugetlbfs_mkdir,
713 .rmdir = simple_rmdir,
714 .mknod = hugetlbfs_mknod,
715 .rename = simple_rename,
716 .setattr = hugetlbfs_setattr,
717};
718
92e1d5be 719static const struct inode_operations hugetlbfs_inode_operations = {
1da177e4
LT
720 .setattr = hugetlbfs_setattr,
721};
722
ee9b6d61 723static const struct super_operations hugetlbfs_ops = {
1da177e4
LT
724 .alloc_inode = hugetlbfs_alloc_inode,
725 .destroy_inode = hugetlbfs_destroy_inode,
2bbbda30 726 .evict_inode = hugetlbfs_evict_inode,
1da177e4 727 .statfs = hugetlbfs_statfs,
1da177e4 728 .put_super = hugetlbfs_put_super,
10f19a86 729 .show_options = generic_show_options,
1da177e4
LT
730};
731
732static int
733hugetlbfs_parse_options(char *options, struct hugetlbfs_config *pconfig)
734{
e73a75fa
RD
735 char *p, *rest;
736 substring_t args[MAX_OPT_ARGS];
737 int option;
a137e1cc
AK
738 unsigned long long size = 0;
739 enum { NO_SIZE, SIZE_STD, SIZE_PERCENT } setsize = NO_SIZE;
1da177e4
LT
740
741 if (!options)
742 return 0;
1da177e4 743
e73a75fa
RD
744 while ((p = strsep(&options, ",")) != NULL) {
745 int token;
b4c07bce
LS
746 if (!*p)
747 continue;
e73a75fa
RD
748
749 token = match_token(p, tokens, args);
750 switch (token) {
751 case Opt_uid:
752 if (match_int(&args[0], &option))
753 goto bad_val;
a0eb3a05
EB
754 pconfig->uid = make_kuid(current_user_ns(), option);
755 if (!uid_valid(pconfig->uid))
756 goto bad_val;
e73a75fa
RD
757 break;
758
759 case Opt_gid:
760 if (match_int(&args[0], &option))
761 goto bad_val;
a0eb3a05
EB
762 pconfig->gid = make_kgid(current_user_ns(), option);
763 if (!gid_valid(pconfig->gid))
764 goto bad_val;
e73a75fa
RD
765 break;
766
767 case Opt_mode:
768 if (match_octal(&args[0], &option))
769 goto bad_val;
75897d60 770 pconfig->mode = option & 01777U;
e73a75fa
RD
771 break;
772
773 case Opt_size: {
e73a75fa
RD
774 /* memparse() will accept a K/M/G without a digit */
775 if (!isdigit(*args[0].from))
776 goto bad_val;
777 size = memparse(args[0].from, &rest);
a137e1cc
AK
778 setsize = SIZE_STD;
779 if (*rest == '%')
780 setsize = SIZE_PERCENT;
e73a75fa
RD
781 break;
782 }
1da177e4 783
e73a75fa
RD
784 case Opt_nr_inodes:
785 /* memparse() will accept a K/M/G without a digit */
786 if (!isdigit(*args[0].from))
787 goto bad_val;
788 pconfig->nr_inodes = memparse(args[0].from, &rest);
789 break;
790
a137e1cc
AK
791 case Opt_pagesize: {
792 unsigned long ps;
793 ps = memparse(args[0].from, &rest);
794 pconfig->hstate = size_to_hstate(ps);
795 if (!pconfig->hstate) {
9b857d26 796 pr_err("Unsupported page size %lu MB\n",
a137e1cc
AK
797 ps >> 20);
798 return -EINVAL;
799 }
800 break;
801 }
802
e73a75fa 803 default:
9b857d26 804 pr_err("Bad mount option: \"%s\"\n", p);
b4c07bce 805 return -EINVAL;
e73a75fa
RD
806 break;
807 }
1da177e4 808 }
a137e1cc
AK
809
810 /* Do size after hstate is set up */
811 if (setsize > NO_SIZE) {
812 struct hstate *h = pconfig->hstate;
813 if (setsize == SIZE_PERCENT) {
814 size <<= huge_page_shift(h);
815 size *= h->max_huge_pages;
816 do_div(size, 100);
817 }
818 pconfig->nr_blocks = (size >> huge_page_shift(h));
819 }
820
1da177e4 821 return 0;
e73a75fa
RD
822
823bad_val:
9b857d26 824 pr_err("Bad value '%s' for mount option '%s'\n", args[0].from, p);
c12ddba0 825 return -EINVAL;
1da177e4
LT
826}
827
828static int
829hugetlbfs_fill_super(struct super_block *sb, void *data, int silent)
830{
1da177e4
LT
831 int ret;
832 struct hugetlbfs_config config;
833 struct hugetlbfs_sb_info *sbinfo;
834
10f19a86
MS
835 save_mount_options(sb, data);
836
1da177e4
LT
837 config.nr_blocks = -1; /* No limit on size by default */
838 config.nr_inodes = -1; /* No limit on number of inodes by default */
77c70de1
DH
839 config.uid = current_fsuid();
840 config.gid = current_fsgid();
1da177e4 841 config.mode = 0755;
a137e1cc 842 config.hstate = &default_hstate;
1da177e4 843 ret = hugetlbfs_parse_options(data, &config);
1da177e4
LT
844 if (ret)
845 return ret;
846
847 sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL);
848 if (!sbinfo)
849 return -ENOMEM;
850 sb->s_fs_info = sbinfo;
a137e1cc 851 sbinfo->hstate = config.hstate;
1da177e4 852 spin_lock_init(&sbinfo->stat_lock);
1da177e4
LT
853 sbinfo->max_inodes = config.nr_inodes;
854 sbinfo->free_inodes = config.nr_inodes;
90481622
DG
855 sbinfo->spool = NULL;
856 if (config.nr_blocks != -1) {
857 sbinfo->spool = hugepage_new_subpool(config.nr_blocks);
858 if (!sbinfo->spool)
859 goto out_free;
860 }
1da177e4 861 sb->s_maxbytes = MAX_LFS_FILESIZE;
a137e1cc
AK
862 sb->s_blocksize = huge_page_size(config.hstate);
863 sb->s_blocksize_bits = huge_page_shift(config.hstate);
1da177e4
LT
864 sb->s_magic = HUGETLBFS_MAGIC;
865 sb->s_op = &hugetlbfs_ops;
866 sb->s_time_gran = 1;
48fde701
AV
867 sb->s_root = d_make_root(hugetlbfs_get_root(sb, &config));
868 if (!sb->s_root)
1da177e4 869 goto out_free;
1da177e4
LT
870 return 0;
871out_free:
6e6870d4 872 kfree(sbinfo->spool);
1da177e4
LT
873 kfree(sbinfo);
874 return -ENOMEM;
875}
876
3c26ff6e
AV
877static struct dentry *hugetlbfs_mount(struct file_system_type *fs_type,
878 int flags, const char *dev_name, void *data)
1da177e4 879{
3c26ff6e 880 return mount_nodev(fs_type, flags, data, hugetlbfs_fill_super);
1da177e4
LT
881}
882
883static struct file_system_type hugetlbfs_fs_type = {
884 .name = "hugetlbfs",
3c26ff6e 885 .mount = hugetlbfs_mount,
1da177e4
LT
886 .kill_sb = kill_litter_super,
887};
7f78e035 888MODULE_ALIAS_FS("hugetlbfs");
1da177e4 889
42d7395f 890static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
1da177e4 891
ef1ff6b8 892static int can_do_hugetlb_shm(void)
1da177e4 893{
a0eb3a05
EB
894 kgid_t shm_group;
895 shm_group = make_kgid(&init_user_ns, sysctl_hugetlb_shm_group);
896 return capable(CAP_IPC_LOCK) || in_group_p(shm_group);
1da177e4
LT
897}
898
42d7395f
AK
899static int get_hstate_idx(int page_size_log)
900{
af73e4d9 901 struct hstate *h = hstate_sizelog(page_size_log);
42d7395f 902
42d7395f
AK
903 if (!h)
904 return -1;
905 return h - hstates;
906}
907
be1d2cf5 908static const struct dentry_operations anon_ops = {
118b2302 909 .d_dname = simple_dname
0df4d6e5
AV
910};
911
af73e4d9
NH
912/*
913 * Note that size should be aligned to proper hugepage size in caller side,
914 * otherwise hugetlb_reserve_pages reserves one less hugepages than intended.
915 */
916struct file *hugetlb_file_setup(const char *name, size_t size,
917 vm_flags_t acctflag, struct user_struct **user,
42d7395f 918 int creat_flags, int page_size_log)
1da177e4 919{
39b65252 920 struct file *file = ERR_PTR(-ENOMEM);
1da177e4 921 struct inode *inode;
2c48b9c4 922 struct path path;
0df4d6e5 923 struct super_block *sb;
1da177e4 924 struct qstr quick_string;
42d7395f
AK
925 int hstate_idx;
926
927 hstate_idx = get_hstate_idx(page_size_log);
928 if (hstate_idx < 0)
929 return ERR_PTR(-ENODEV);
1da177e4 930
353d5c30 931 *user = NULL;
42d7395f 932 if (!hugetlbfs_vfsmount[hstate_idx])
5bc98594
AM
933 return ERR_PTR(-ENOENT);
934
ef1ff6b8 935 if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) {
353d5c30
HD
936 *user = current_user();
937 if (user_shm_lock(size, *user)) {
21a3c273 938 task_lock(current);
9b857d26 939 pr_warn_once("%s (%d): Using mlock ulimits for SHM_HUGETLB is deprecated\n",
21a3c273
DR
940 current->comm, current->pid);
941 task_unlock(current);
353d5c30
HD
942 } else {
943 *user = NULL;
2584e517 944 return ERR_PTR(-EPERM);
353d5c30 945 }
2584e517 946 }
1da177e4 947
0df4d6e5 948 sb = hugetlbfs_vfsmount[hstate_idx]->mnt_sb;
9d66586f 949 quick_string.name = name;
1da177e4
LT
950 quick_string.len = strlen(quick_string.name);
951 quick_string.hash = 0;
0df4d6e5 952 path.dentry = d_alloc_pseudo(sb, &quick_string);
2c48b9c4 953 if (!path.dentry)
1da177e4
LT
954 goto out_shm_unlock;
955
0df4d6e5 956 d_set_d_op(path.dentry, &anon_ops);
42d7395f 957 path.mnt = mntget(hugetlbfs_vfsmount[hstate_idx]);
39b65252 958 file = ERR_PTR(-ENOSPC);
0df4d6e5 959 inode = hugetlbfs_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0);
1da177e4 960 if (!inode)
ce8d2cdf 961 goto out_dentry;
1da177e4 962
39b65252 963 file = ERR_PTR(-ENOMEM);
af73e4d9
NH
964 if (hugetlb_reserve_pages(inode, 0,
965 size >> huge_page_shift(hstate_inode(inode)), NULL,
966 acctflag))
b45b5bd6
DG
967 goto out_inode;
968
2c48b9c4 969 d_instantiate(path.dentry, inode);
1da177e4 970 inode->i_size = size;
6d6b77f1 971 clear_nlink(inode);
ce8d2cdf 972
2c48b9c4 973 file = alloc_file(&path, FMODE_WRITE | FMODE_READ,
ce8d2cdf 974 &hugetlbfs_file_operations);
39b65252 975 if (IS_ERR(file))
b4d232e6 976 goto out_dentry; /* inode is already attached */
ce8d2cdf 977
1da177e4
LT
978 return file;
979
b45b5bd6
DG
980out_inode:
981 iput(inode);
1da177e4 982out_dentry:
2c48b9c4 983 path_put(&path);
1da177e4 984out_shm_unlock:
353d5c30
HD
985 if (*user) {
986 user_shm_unlock(size, *user);
987 *user = NULL;
988 }
39b65252 989 return file;
1da177e4
LT
990}
991
992static int __init init_hugetlbfs_fs(void)
993{
42d7395f 994 struct hstate *h;
1da177e4 995 int error;
42d7395f 996 int i;
1da177e4 997
457c1b27 998 if (!hugepages_supported()) {
9b857d26 999 pr_info("disabling because there are no supported hugepage sizes\n");
457c1b27
NA
1000 return -ENOTSUPP;
1001 }
1002
d1d5e05f 1003 error = -ENOMEM;
1da177e4
LT
1004 hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache",
1005 sizeof(struct hugetlbfs_inode_info),
20c2df83 1006 0, 0, init_once);
1da177e4 1007 if (hugetlbfs_inode_cachep == NULL)
e0bf68dd 1008 goto out2;
1da177e4
LT
1009
1010 error = register_filesystem(&hugetlbfs_fs_type);
1011 if (error)
1012 goto out;
1013
42d7395f
AK
1014 i = 0;
1015 for_each_hstate(h) {
1016 char buf[50];
1017 unsigned ps_kb = 1U << (h->order + PAGE_SHIFT - 10);
1da177e4 1018
42d7395f
AK
1019 snprintf(buf, sizeof(buf), "pagesize=%uK", ps_kb);
1020 hugetlbfs_vfsmount[i] = kern_mount_data(&hugetlbfs_fs_type,
1021 buf);
1da177e4 1022
42d7395f 1023 if (IS_ERR(hugetlbfs_vfsmount[i])) {
9b857d26 1024 pr_err("Cannot mount internal hugetlbfs for "
42d7395f
AK
1025 "page size %uK", ps_kb);
1026 error = PTR_ERR(hugetlbfs_vfsmount[i]);
1027 hugetlbfs_vfsmount[i] = NULL;
1028 }
1029 i++;
1030 }
1031 /* Non default hstates are optional */
1032 if (!IS_ERR_OR_NULL(hugetlbfs_vfsmount[default_hstate_idx]))
1033 return 0;
1da177e4
LT
1034
1035 out:
d1d5e05f 1036 kmem_cache_destroy(hugetlbfs_inode_cachep);
e0bf68dd 1037 out2:
1da177e4
LT
1038 return error;
1039}
1040
1041static void __exit exit_hugetlbfs_fs(void)
1042{
42d7395f
AK
1043 struct hstate *h;
1044 int i;
1045
1046
8c0a8537
KS
1047 /*
1048 * Make sure all delayed rcu free inodes are flushed before we
1049 * destroy cache.
1050 */
1051 rcu_barrier();
1da177e4 1052 kmem_cache_destroy(hugetlbfs_inode_cachep);
42d7395f
AK
1053 i = 0;
1054 for_each_hstate(h)
1055 kern_unmount(hugetlbfs_vfsmount[i++]);
1da177e4
LT
1056 unregister_filesystem(&hugetlbfs_fs_type);
1057}
1058
1059module_init(init_hugetlbfs_fs)
1060module_exit(exit_hugetlbfs_fs)
1061
1062MODULE_LICENSE("GPL");