]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - fs/hugetlbfs/inode.c
hugetlbfs: check for pgoff value overflow
[mirror_ubuntu-jammy-kernel.git] / fs / hugetlbfs / inode.c
CommitLineData
1da177e4
LT
1/*
2 * hugetlbpage-backed filesystem. Based on ramfs.
3 *
6d49e352 4 * Nadia Yvette Chambers, 2002
1da177e4
LT
5 *
6 * Copyright (C) 2002 Linus Torvalds.
3e89e1c5 7 * License: GPL
1da177e4
LT
8 */
9
9b857d26
AM
10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
1da177e4
LT
12#include <linux/thread_info.h>
13#include <asm/current.h>
174cd4b1 14#include <linux/sched/signal.h> /* remove ASAP */
70c3547e 15#include <linux/falloc.h>
1da177e4
LT
16#include <linux/fs.h>
17#include <linux/mount.h>
18#include <linux/file.h>
e73a75fa 19#include <linux/kernel.h>
1da177e4
LT
20#include <linux/writeback.h>
21#include <linux/pagemap.h>
22#include <linux/highmem.h>
23#include <linux/init.h>
24#include <linux/string.h>
16f7e0fe 25#include <linux/capability.h>
e73a75fa 26#include <linux/ctype.h>
1da177e4
LT
27#include <linux/backing-dev.h>
28#include <linux/hugetlb.h>
29#include <linux/pagevec.h>
e73a75fa 30#include <linux/parser.h>
036e0856 31#include <linux/mman.h>
1da177e4
LT
32#include <linux/slab.h>
33#include <linux/dnotify.h>
34#include <linux/statfs.h>
35#include <linux/security.h>
1fd7317d 36#include <linux/magic.h>
290408d4 37#include <linux/migrate.h>
34d0640e 38#include <linux/uio.h>
1da177e4 39
7c0f6ba6 40#include <linux/uaccess.h>
1da177e4 41
ee9b6d61 42static const struct super_operations hugetlbfs_ops;
f5e54d6e 43static const struct address_space_operations hugetlbfs_aops;
4b6f5d20 44const struct file_operations hugetlbfs_file_operations;
92e1d5be
AV
45static const struct inode_operations hugetlbfs_dir_inode_operations;
46static const struct inode_operations hugetlbfs_inode_operations;
1da177e4 47
a1d776ee 48struct hugetlbfs_config {
4a25220d
DH
49 struct hstate *hstate;
50 long max_hpages;
51 long nr_inodes;
52 long min_hpages;
53 kuid_t uid;
54 kgid_t gid;
55 umode_t mode;
a1d776ee
DG
56};
57
1da177e4
LT
58int sysctl_hugetlb_shm_group;
59
e73a75fa
RD
60enum {
61 Opt_size, Opt_nr_inodes,
62 Opt_mode, Opt_uid, Opt_gid,
7ca02d0a 63 Opt_pagesize, Opt_min_size,
e73a75fa
RD
64 Opt_err,
65};
66
a447c093 67static const match_table_t tokens = {
e73a75fa
RD
68 {Opt_size, "size=%s"},
69 {Opt_nr_inodes, "nr_inodes=%s"},
70 {Opt_mode, "mode=%o"},
71 {Opt_uid, "uid=%u"},
72 {Opt_gid, "gid=%u"},
a137e1cc 73 {Opt_pagesize, "pagesize=%s"},
7ca02d0a 74 {Opt_min_size, "min_size=%s"},
e73a75fa
RD
75 {Opt_err, NULL},
76};
77
70c3547e
MK
78#ifdef CONFIG_NUMA
79static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma,
80 struct inode *inode, pgoff_t index)
81{
82 vma->vm_policy = mpol_shared_policy_lookup(&HUGETLBFS_I(inode)->policy,
83 index);
84}
85
86static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma)
87{
88 mpol_cond_put(vma->vm_policy);
89}
90#else
91static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma,
92 struct inode *inode, pgoff_t index)
93{
94}
95
96static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma)
97{
98}
99#endif
100
2e9b367c
AL
101static void huge_pagevec_release(struct pagevec *pvec)
102{
103 int i;
104
105 for (i = 0; i < pagevec_count(pvec); ++i)
106 put_page(pvec->pages[i]);
107
108 pagevec_reinit(pvec);
109}
110
63489f8e
MK
111/*
112 * Mask used when checking the page offset value passed in via system
113 * calls. This value will be converted to a loff_t which is signed.
114 * Therefore, we want to check the upper PAGE_SHIFT + 1 bits of the
115 * value. The extra bit (- 1 in the shift value) is to take the sign
116 * bit into account.
117 */
118#define PGOFF_LOFFT_MAX \
119 (((1UL << (PAGE_SHIFT + 1)) - 1) << (BITS_PER_LONG - (PAGE_SHIFT + 1)))
120
1da177e4
LT
121static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
122{
496ad9aa 123 struct inode *inode = file_inode(file);
1da177e4
LT
124 loff_t len, vma_len;
125 int ret;
a5516438 126 struct hstate *h = hstate_file(file);
1da177e4 127
68589bc3 128 /*
dec4ad86
DG
129 * vma address alignment (but not the pgoff alignment) has
130 * already been checked by prepare_hugepage_range. If you add
131 * any error returns here, do so after setting VM_HUGETLB, so
132 * is_vm_hugetlb_page tests below unmap_region go the right
133 * way when do_mmap_pgoff unwinds (may be important on powerpc
134 * and ia64).
68589bc3 135 */
a2fce914 136 vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
68589bc3 137 vma->vm_ops = &hugetlb_vm_ops;
1da177e4 138
045c7a3f 139 /*
63489f8e
MK
140 * page based offset in vm_pgoff could be sufficiently large to
141 * overflow a (l)off_t when converted to byte offset.
045c7a3f 142 */
63489f8e 143 if (vma->vm_pgoff & PGOFF_LOFFT_MAX)
045c7a3f
MK
144 return -EINVAL;
145
63489f8e 146 /* must be huge page aligned */
2b37c35e 147 if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
dec4ad86
DG
148 return -EINVAL;
149
1da177e4 150 vma_len = (loff_t)(vma->vm_end - vma->vm_start);
045c7a3f
MK
151 len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
152 /* check for overflow */
153 if (len < vma_len)
154 return -EINVAL;
1da177e4 155
5955102c 156 inode_lock(inode);
1da177e4 157 file_accessed(file);
1da177e4
LT
158
159 ret = -ENOMEM;
a1e78772 160 if (hugetlb_reserve_pages(inode,
a5516438 161 vma->vm_pgoff >> huge_page_order(h),
5a6fe125
MG
162 len >> huge_page_shift(h), vma,
163 vma->vm_flags))
a43a8c39 164 goto out;
b45b5bd6 165
4c887265 166 ret = 0;
b6174df5 167 if (vma->vm_flags & VM_WRITE && inode->i_size < len)
045c7a3f 168 i_size_write(inode, len);
1da177e4 169out:
5955102c 170 inode_unlock(inode);
1da177e4
LT
171
172 return ret;
173}
174
175/*
508034a3 176 * Called under down_write(mmap_sem).
1da177e4
LT
177 */
178
d2ba27e8 179#ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
1da177e4
LT
180static unsigned long
181hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
182 unsigned long len, unsigned long pgoff, unsigned long flags)
183{
184 struct mm_struct *mm = current->mm;
185 struct vm_area_struct *vma;
a5516438 186 struct hstate *h = hstate_file(file);
08659355 187 struct vm_unmapped_area_info info;
1da177e4 188
a5516438 189 if (len & ~huge_page_mask(h))
1da177e4
LT
190 return -EINVAL;
191 if (len > TASK_SIZE)
192 return -ENOMEM;
193
036e0856 194 if (flags & MAP_FIXED) {
a5516438 195 if (prepare_hugepage_range(file, addr, len))
036e0856
BH
196 return -EINVAL;
197 return addr;
198 }
199
1da177e4 200 if (addr) {
a5516438 201 addr = ALIGN(addr, huge_page_size(h));
1da177e4
LT
202 vma = find_vma(mm, addr);
203 if (TASK_SIZE - len >= addr &&
1be7107f 204 (!vma || addr + len <= vm_start_gap(vma)))
1da177e4
LT
205 return addr;
206 }
207
08659355
ML
208 info.flags = 0;
209 info.length = len;
210 info.low_limit = TASK_UNMAPPED_BASE;
211 info.high_limit = TASK_SIZE;
212 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
213 info.align_offset = 0;
214 return vm_unmapped_area(&info);
1da177e4
LT
215}
216#endif
217
34d0640e 218static size_t
e63e1e5a 219hugetlbfs_read_actor(struct page *page, unsigned long offset,
34d0640e 220 struct iov_iter *to, unsigned long size)
e63e1e5a 221{
34d0640e 222 size_t copied = 0;
e63e1e5a
BP
223 int i, chunksize;
224
e63e1e5a 225 /* Find which 4k chunk and offset with in that chunk */
09cbfeaf
KS
226 i = offset >> PAGE_SHIFT;
227 offset = offset & ~PAGE_MASK;
e63e1e5a
BP
228
229 while (size) {
34d0640e 230 size_t n;
09cbfeaf 231 chunksize = PAGE_SIZE;
e63e1e5a
BP
232 if (offset)
233 chunksize -= offset;
234 if (chunksize > size)
235 chunksize = size;
34d0640e
AV
236 n = copy_page_to_iter(&page[i], offset, chunksize, to);
237 copied += n;
238 if (n != chunksize)
239 return copied;
e63e1e5a
BP
240 offset = 0;
241 size -= chunksize;
e63e1e5a
BP
242 i++;
243 }
34d0640e 244 return copied;
e63e1e5a
BP
245}
246
247/*
248 * Support for read() - Find the page attached to f_mapping and copy out the
249 * data. Its *very* similar to do_generic_mapping_read(), we can't use that
ea1754a0 250 * since it has PAGE_SIZE assumptions.
e63e1e5a 251 */
34d0640e 252static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
e63e1e5a 253{
34d0640e
AV
254 struct file *file = iocb->ki_filp;
255 struct hstate *h = hstate_file(file);
256 struct address_space *mapping = file->f_mapping;
e63e1e5a 257 struct inode *inode = mapping->host;
34d0640e
AV
258 unsigned long index = iocb->ki_pos >> huge_page_shift(h);
259 unsigned long offset = iocb->ki_pos & ~huge_page_mask(h);
e63e1e5a
BP
260 unsigned long end_index;
261 loff_t isize;
262 ssize_t retval = 0;
263
34d0640e 264 while (iov_iter_count(to)) {
e63e1e5a 265 struct page *page;
34d0640e 266 size_t nr, copied;
e63e1e5a
BP
267
268 /* nr is the maximum number of bytes to copy from this page */
a5516438 269 nr = huge_page_size(h);
a05b0855
AK
270 isize = i_size_read(inode);
271 if (!isize)
34d0640e 272 break;
a05b0855 273 end_index = (isize - 1) >> huge_page_shift(h);
34d0640e
AV
274 if (index > end_index)
275 break;
276 if (index == end_index) {
a5516438 277 nr = ((isize - 1) & ~huge_page_mask(h)) + 1;
a05b0855 278 if (nr <= offset)
34d0640e 279 break;
e63e1e5a
BP
280 }
281 nr = nr - offset;
282
283 /* Find the page */
a05b0855 284 page = find_lock_page(mapping, index);
e63e1e5a
BP
285 if (unlikely(page == NULL)) {
286 /*
287 * We have a HOLE, zero out the user-buffer for the
288 * length of the hole or request.
289 */
34d0640e 290 copied = iov_iter_zero(nr, to);
e63e1e5a 291 } else {
a05b0855
AK
292 unlock_page(page);
293
e63e1e5a
BP
294 /*
295 * We have the page, copy it to user space buffer.
296 */
34d0640e 297 copied = hugetlbfs_read_actor(page, offset, to, nr);
09cbfeaf 298 put_page(page);
e63e1e5a 299 }
34d0640e
AV
300 offset += copied;
301 retval += copied;
302 if (copied != nr && iov_iter_count(to)) {
303 if (!retval)
304 retval = -EFAULT;
305 break;
e63e1e5a 306 }
a5516438
AK
307 index += offset >> huge_page_shift(h);
308 offset &= ~huge_page_mask(h);
e63e1e5a 309 }
34d0640e 310 iocb->ki_pos = ((loff_t)index << huge_page_shift(h)) + offset;
e63e1e5a
BP
311 return retval;
312}
313
800d15a5
NP
314static int hugetlbfs_write_begin(struct file *file,
315 struct address_space *mapping,
316 loff_t pos, unsigned len, unsigned flags,
317 struct page **pagep, void **fsdata)
1da177e4
LT
318{
319 return -EINVAL;
320}
321
800d15a5
NP
322static int hugetlbfs_write_end(struct file *file, struct address_space *mapping,
323 loff_t pos, unsigned len, unsigned copied,
324 struct page *page, void *fsdata)
1da177e4 325{
800d15a5 326 BUG();
1da177e4
LT
327 return -EINVAL;
328}
329
b5cec28d 330static void remove_huge_page(struct page *page)
1da177e4 331{
b9ea2515 332 ClearPageDirty(page);
1da177e4 333 ClearPageUptodate(page);
bd65cb86 334 delete_from_page_cache(page);
1da177e4
LT
335}
336
4aae8d1c 337static void
f808c13f 338hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end)
4aae8d1c
MK
339{
340 struct vm_area_struct *vma;
341
342 /*
343 * end == 0 indicates that the entire range after
344 * start should be unmapped.
345 */
346 vma_interval_tree_foreach(vma, root, start, end ? end : ULONG_MAX) {
347 unsigned long v_offset;
348 unsigned long v_end;
349
350 /*
351 * Can the expression below overflow on 32-bit arches?
352 * No, because the interval tree returns us only those vmas
353 * which overlap the truncated area starting at pgoff,
354 * and no vma on a 32-bit arch can span beyond the 4GB.
355 */
356 if (vma->vm_pgoff < start)
357 v_offset = (start - vma->vm_pgoff) << PAGE_SHIFT;
358 else
359 v_offset = 0;
360
361 if (!end)
362 v_end = vma->vm_end;
363 else {
364 v_end = ((end - vma->vm_pgoff) << PAGE_SHIFT)
365 + vma->vm_start;
366 if (v_end > vma->vm_end)
367 v_end = vma->vm_end;
368 }
369
370 unmap_hugepage_range(vma, vma->vm_start + v_offset, v_end,
371 NULL);
372 }
373}
b5cec28d
MK
374
375/*
376 * remove_inode_hugepages handles two distinct cases: truncation and hole
377 * punch. There are subtle differences in operation for each case.
4aae8d1c 378 *
b5cec28d
MK
379 * truncation is indicated by end of range being LLONG_MAX
380 * In this case, we first scan the range and release found pages.
381 * After releasing pages, hugetlb_unreserve_pages cleans up region/reserv
1817889e
MK
382 * maps and global counts. Page faults can not race with truncation
383 * in this routine. hugetlb_no_page() prevents page faults in the
384 * truncated range. It checks i_size before allocation, and again after
385 * with the page table lock for the page held. The same lock must be
386 * acquired to unmap a page.
b5cec28d
MK
387 * hole punch is indicated if end is not LLONG_MAX
388 * In the hole punch case we scan the range and release found pages.
389 * Only when releasing a page is the associated region/reserv map
390 * deleted. The region/reserv map for ranges without associated
1817889e
MK
391 * pages are not modified. Page faults can race with hole punch.
392 * This is indicated if we find a mapped page.
b5cec28d
MK
393 * Note: If the passed end of range value is beyond the end of file, but
394 * not LLONG_MAX this routine still performs a hole punch operation.
395 */
396static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
397 loff_t lend)
1da177e4 398{
a5516438 399 struct hstate *h = hstate_inode(inode);
b45b5bd6 400 struct address_space *mapping = &inode->i_data;
a5516438 401 const pgoff_t start = lstart >> huge_page_shift(h);
b5cec28d
MK
402 const pgoff_t end = lend >> huge_page_shift(h);
403 struct vm_area_struct pseudo_vma;
1da177e4 404 struct pagevec pvec;
d72dc8a2 405 pgoff_t next, index;
a43a8c39 406 int i, freed = 0;
b5cec28d 407 bool truncate_op = (lend == LLONG_MAX);
1da177e4 408
b5cec28d
MK
409 memset(&pseudo_vma, 0, sizeof(struct vm_area_struct));
410 pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
86679820 411 pagevec_init(&pvec);
1da177e4 412 next = start;
b5cec28d 413 while (next < end) {
b5cec28d 414 /*
1817889e 415 * When no more pages are found, we are done.
b5cec28d 416 */
397162ff 417 if (!pagevec_lookup_range(&pvec, mapping, &next, end - 1))
1817889e 418 break;
1da177e4
LT
419
420 for (i = 0; i < pagevec_count(&pvec); ++i) {
421 struct page *page = pvec.pages[i];
b5cec28d
MK
422 u32 hash;
423
d72dc8a2 424 index = page->index;
b5cec28d
MK
425 hash = hugetlb_fault_mutex_hash(h, current->mm,
426 &pseudo_vma,
d72dc8a2 427 mapping, index, 0);
b5cec28d 428 mutex_lock(&hugetlb_fault_mutex_table[hash]);
1da177e4 429
4aae8d1c
MK
430 /*
431 * If page is mapped, it was faulted in after being
432 * unmapped in caller. Unmap (again) now after taking
433 * the fault mutex. The mutex will prevent faults
434 * until we finish removing the page.
435 *
436 * This race can only happen in the hole punch case.
437 * Getting here in a truncate operation is a bug.
438 */
439 if (unlikely(page_mapped(page))) {
1817889e 440 BUG_ON(truncate_op);
4aae8d1c
MK
441
442 i_mmap_lock_write(mapping);
443 hugetlb_vmdelete_list(&mapping->i_mmap,
d72dc8a2
JK
444 index * pages_per_huge_page(h),
445 (index + 1) * pages_per_huge_page(h));
4aae8d1c
MK
446 i_mmap_unlock_write(mapping);
447 }
448
449 lock_page(page);
450 /*
451 * We must free the huge page and remove from page
452 * cache (remove_huge_page) BEFORE removing the
453 * region/reserve map (hugetlb_unreserve_pages). In
454 * rare out of memory conditions, removal of the
72e2936c 455 * region/reserve map could fail. Correspondingly,
456 * the subpool and global reserve usage count can need
457 * to be adjusted.
4aae8d1c 458 */
72e2936c 459 VM_BUG_ON(PagePrivate(page));
4aae8d1c
MK
460 remove_huge_page(page);
461 freed++;
462 if (!truncate_op) {
463 if (unlikely(hugetlb_unreserve_pages(inode,
d72dc8a2 464 index, index + 1, 1)))
72e2936c 465 hugetlb_fix_reserve_counts(inode);
b5cec28d
MK
466 }
467
1da177e4 468 unlock_page(page);
b5cec28d 469 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
1da177e4
LT
470 }
471 huge_pagevec_release(&pvec);
1817889e 472 cond_resched();
1da177e4 473 }
b5cec28d
MK
474
475 if (truncate_op)
476 (void)hugetlb_unreserve_pages(inode, start, LONG_MAX, freed);
1da177e4
LT
477}
478
2bbbda30 479static void hugetlbfs_evict_inode(struct inode *inode)
1da177e4 480{
9119a41e
JK
481 struct resv_map *resv_map;
482
b5cec28d 483 remove_inode_hugepages(inode, 0, LLONG_MAX);
9119a41e
JK
484 resv_map = (struct resv_map *)inode->i_mapping->private_data;
485 /* root inode doesn't have the resv_map, so we should check it */
486 if (resv_map)
487 resv_map_release(&resv_map->refs);
dbd5768f 488 clear_inode(inode);
149f4211
CH
489}
490
1da177e4
LT
491static int hugetlb_vmtruncate(struct inode *inode, loff_t offset)
492{
856fc295 493 pgoff_t pgoff;
1da177e4 494 struct address_space *mapping = inode->i_mapping;
a5516438 495 struct hstate *h = hstate_inode(inode);
1da177e4 496
a5516438 497 BUG_ON(offset & ~huge_page_mask(h));
856fc295 498 pgoff = offset >> PAGE_SHIFT;
1da177e4 499
7aa91e10 500 i_size_write(inode, offset);
83cde9e8 501 i_mmap_lock_write(mapping);
f808c13f 502 if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))
1bfad99a 503 hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0);
83cde9e8 504 i_mmap_unlock_write(mapping);
b5cec28d 505 remove_inode_hugepages(inode, offset, LLONG_MAX);
1da177e4
LT
506 return 0;
507}
508
70c3547e
MK
509static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
510{
511 struct hstate *h = hstate_inode(inode);
512 loff_t hpage_size = huge_page_size(h);
513 loff_t hole_start, hole_end;
514
515 /*
516 * For hole punch round up the beginning offset of the hole and
517 * round down the end.
518 */
519 hole_start = round_up(offset, hpage_size);
520 hole_end = round_down(offset + len, hpage_size);
521
522 if (hole_end > hole_start) {
523 struct address_space *mapping = inode->i_mapping;
ff62a342 524 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
70c3547e 525
5955102c 526 inode_lock(inode);
ff62a342
MAL
527
528 /* protected by i_mutex */
529 if (info->seals & F_SEAL_WRITE) {
530 inode_unlock(inode);
531 return -EPERM;
532 }
533
70c3547e 534 i_mmap_lock_write(mapping);
f808c13f 535 if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))
70c3547e
MK
536 hugetlb_vmdelete_list(&mapping->i_mmap,
537 hole_start >> PAGE_SHIFT,
538 hole_end >> PAGE_SHIFT);
539 i_mmap_unlock_write(mapping);
540 remove_inode_hugepages(inode, hole_start, hole_end);
5955102c 541 inode_unlock(inode);
70c3547e
MK
542 }
543
544 return 0;
545}
546
547static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
548 loff_t len)
549{
550 struct inode *inode = file_inode(file);
ff62a342 551 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
70c3547e
MK
552 struct address_space *mapping = inode->i_mapping;
553 struct hstate *h = hstate_inode(inode);
554 struct vm_area_struct pseudo_vma;
555 struct mm_struct *mm = current->mm;
556 loff_t hpage_size = huge_page_size(h);
557 unsigned long hpage_shift = huge_page_shift(h);
558 pgoff_t start, index, end;
559 int error;
560 u32 hash;
561
562 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
563 return -EOPNOTSUPP;
564
565 if (mode & FALLOC_FL_PUNCH_HOLE)
566 return hugetlbfs_punch_hole(inode, offset, len);
567
568 /*
569 * Default preallocate case.
570 * For this range, start is rounded down and end is rounded up
571 * as well as being converted to page offsets.
572 */
573 start = offset >> hpage_shift;
574 end = (offset + len + hpage_size - 1) >> hpage_shift;
575
5955102c 576 inode_lock(inode);
70c3547e
MK
577
578 /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
579 error = inode_newsize_ok(inode, offset + len);
580 if (error)
581 goto out;
582
ff62a342
MAL
583 if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
584 error = -EPERM;
585 goto out;
586 }
587
70c3547e
MK
588 /*
589 * Initialize a pseudo vma as this is required by the huge page
590 * allocation routines. If NUMA is configured, use page index
591 * as input to create an allocation policy.
592 */
593 memset(&pseudo_vma, 0, sizeof(struct vm_area_struct));
594 pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
595 pseudo_vma.vm_file = file;
596
597 for (index = start; index < end; index++) {
598 /*
599 * This is supposed to be the vaddr where the page is being
600 * faulted in, but we have no vaddr here.
601 */
602 struct page *page;
603 unsigned long addr;
604 int avoid_reserve = 0;
605
606 cond_resched();
607
608 /*
609 * fallocate(2) manpage permits EINTR; we may have been
610 * interrupted because we are using up too much memory.
611 */
612 if (signal_pending(current)) {
613 error = -EINTR;
614 break;
615 }
616
617 /* Set numa allocation policy based on index */
618 hugetlb_set_vma_policy(&pseudo_vma, inode, index);
619
620 /* addr is the offset within the file (zero based) */
621 addr = index * hpage_size;
622
623 /* mutex taken here, fault path and hole punch */
624 hash = hugetlb_fault_mutex_hash(h, mm, &pseudo_vma, mapping,
625 index, addr);
626 mutex_lock(&hugetlb_fault_mutex_table[hash]);
627
628 /* See if already present in mapping to avoid alloc/free */
629 page = find_get_page(mapping, index);
630 if (page) {
631 put_page(page);
632 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
633 hugetlb_drop_vma_policy(&pseudo_vma);
634 continue;
635 }
636
637 /* Allocate page and add to page cache */
638 page = alloc_huge_page(&pseudo_vma, addr, avoid_reserve);
639 hugetlb_drop_vma_policy(&pseudo_vma);
640 if (IS_ERR(page)) {
641 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
642 error = PTR_ERR(page);
643 goto out;
644 }
645 clear_huge_page(page, addr, pages_per_huge_page(h));
646 __SetPageUptodate(page);
647 error = huge_add_to_page_cache(page, mapping, index);
648 if (unlikely(error)) {
649 put_page(page);
650 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
651 goto out;
652 }
653
654 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
655
656 /*
70c3547e 657 * unlock_page because locked by add_to_page_cache()
72639e6d 658 * page_put due to reference from alloc_huge_page()
70c3547e 659 */
70c3547e 660 unlock_page(page);
72639e6d 661 put_page(page);
70c3547e
MK
662 }
663
664 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
665 i_size_write(inode, offset + len);
078cd827 666 inode->i_ctime = current_time(inode);
70c3547e 667out:
5955102c 668 inode_unlock(inode);
70c3547e
MK
669 return error;
670}
671
1da177e4
LT
672static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr)
673{
2b0143b5 674 struct inode *inode = d_inode(dentry);
a5516438 675 struct hstate *h = hstate_inode(inode);
1da177e4
LT
676 int error;
677 unsigned int ia_valid = attr->ia_valid;
ff62a342 678 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
1da177e4
LT
679
680 BUG_ON(!inode);
681
31051c85 682 error = setattr_prepare(dentry, attr);
1da177e4 683 if (error)
1025774c 684 return error;
1da177e4
LT
685
686 if (ia_valid & ATTR_SIZE) {
ff62a342
MAL
687 loff_t oldsize = inode->i_size;
688 loff_t newsize = attr->ia_size;
689
690 if (newsize & ~huge_page_mask(h))
1025774c 691 return -EINVAL;
ff62a342
MAL
692 /* protected by i_mutex */
693 if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
694 (newsize > oldsize && (info->seals & F_SEAL_GROW)))
695 return -EPERM;
696 error = hugetlb_vmtruncate(inode, newsize);
1da177e4 697 if (error)
1025774c 698 return error;
1da177e4 699 }
1025774c
CH
700
701 setattr_copy(inode, attr);
702 mark_inode_dirty(inode);
703 return 0;
1da177e4
LT
704}
705
7d54fa64
AV
706static struct inode *hugetlbfs_get_root(struct super_block *sb,
707 struct hugetlbfs_config *config)
1da177e4
LT
708{
709 struct inode *inode;
1da177e4
LT
710
711 inode = new_inode(sb);
712 if (inode) {
85fe4025 713 inode->i_ino = get_next_ino();
7d54fa64
AV
714 inode->i_mode = S_IFDIR | config->mode;
715 inode->i_uid = config->uid;
716 inode->i_gid = config->gid;
078cd827 717 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
7d54fa64
AV
718 inode->i_op = &hugetlbfs_dir_inode_operations;
719 inode->i_fop = &simple_dir_operations;
720 /* directory inodes start off with i_nlink == 2 (for "." entry) */
721 inc_nlink(inode);
65ed7601 722 lockdep_annotate_inode_mutex_key(inode);
7d54fa64
AV
723 }
724 return inode;
725}
726
b610ded7 727/*
c8c06efa 728 * Hugetlbfs is not reclaimable; therefore its i_mmap_rwsem will never
b610ded7 729 * be taken from reclaim -- unlike regular filesystems. This needs an
88f306b6 730 * annotation because huge_pmd_share() does an allocation under hugetlb's
c8c06efa 731 * i_mmap_rwsem.
b610ded7 732 */
c8c06efa 733static struct lock_class_key hugetlbfs_i_mmap_rwsem_key;
b610ded7 734
7d54fa64
AV
735static struct inode *hugetlbfs_get_inode(struct super_block *sb,
736 struct inode *dir,
18df2252 737 umode_t mode, dev_t dev)
7d54fa64
AV
738{
739 struct inode *inode;
9119a41e
JK
740 struct resv_map *resv_map;
741
742 resv_map = resv_map_alloc();
743 if (!resv_map)
744 return NULL;
7d54fa64
AV
745
746 inode = new_inode(sb);
747 if (inode) {
ff62a342
MAL
748 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
749
7d54fa64
AV
750 inode->i_ino = get_next_ino();
751 inode_init_owner(inode, dir, mode);
c8c06efa
DB
752 lockdep_set_class(&inode->i_mapping->i_mmap_rwsem,
753 &hugetlbfs_i_mmap_rwsem_key);
1da177e4 754 inode->i_mapping->a_ops = &hugetlbfs_aops;
078cd827 755 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
9119a41e 756 inode->i_mapping->private_data = resv_map;
ff62a342 757 info->seals = F_SEAL_SEAL;
1da177e4
LT
758 switch (mode & S_IFMT) {
759 default:
760 init_special_inode(inode, mode, dev);
761 break;
762 case S_IFREG:
763 inode->i_op = &hugetlbfs_inode_operations;
764 inode->i_fop = &hugetlbfs_file_operations;
765 break;
766 case S_IFDIR:
767 inode->i_op = &hugetlbfs_dir_inode_operations;
768 inode->i_fop = &simple_dir_operations;
769
770 /* directory inodes start off with i_nlink == 2 (for "." entry) */
d8c76e6f 771 inc_nlink(inode);
1da177e4
LT
772 break;
773 case S_IFLNK:
774 inode->i_op = &page_symlink_inode_operations;
21fc61c7 775 inode_nohighmem(inode);
1da177e4
LT
776 break;
777 }
e096d0c7 778 lockdep_annotate_inode_mutex_key(inode);
9119a41e
JK
779 } else
780 kref_put(&resv_map->refs, resv_map_release);
781
1da177e4
LT
782 return inode;
783}
784
785/*
786 * File creation. Allocate an inode, and we're done..
787 */
788static int hugetlbfs_mknod(struct inode *dir,
1a67aafb 789 struct dentry *dentry, umode_t mode, dev_t dev)
1da177e4
LT
790{
791 struct inode *inode;
792 int error = -ENOSPC;
7d54fa64
AV
793
794 inode = hugetlbfs_get_inode(dir->i_sb, dir, mode, dev);
1da177e4 795 if (inode) {
078cd827 796 dir->i_ctime = dir->i_mtime = current_time(dir);
1da177e4
LT
797 d_instantiate(dentry, inode);
798 dget(dentry); /* Extra count - pin the dentry in core */
799 error = 0;
800 }
801 return error;
802}
803
18bb1db3 804static int hugetlbfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
1da177e4
LT
805{
806 int retval = hugetlbfs_mknod(dir, dentry, mode | S_IFDIR, 0);
807 if (!retval)
d8c76e6f 808 inc_nlink(dir);
1da177e4
LT
809 return retval;
810}
811
ebfc3b49 812static int hugetlbfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl)
1da177e4
LT
813{
814 return hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0);
815}
816
817static int hugetlbfs_symlink(struct inode *dir,
818 struct dentry *dentry, const char *symname)
819{
820 struct inode *inode;
821 int error = -ENOSPC;
1da177e4 822
7d54fa64 823 inode = hugetlbfs_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0);
1da177e4
LT
824 if (inode) {
825 int l = strlen(symname)+1;
826 error = page_symlink(inode, symname, l);
827 if (!error) {
828 d_instantiate(dentry, inode);
829 dget(dentry);
830 } else
831 iput(inode);
832 }
078cd827 833 dir->i_ctime = dir->i_mtime = current_time(dir);
1da177e4
LT
834
835 return error;
836}
837
838/*
6649a386 839 * mark the head page dirty
1da177e4
LT
840 */
841static int hugetlbfs_set_page_dirty(struct page *page)
842{
d85f3385 843 struct page *head = compound_head(page);
6649a386
KC
844
845 SetPageDirty(head);
1da177e4
LT
846 return 0;
847}
848
290408d4 849static int hugetlbfs_migrate_page(struct address_space *mapping,
b969c4ab 850 struct page *newpage, struct page *page,
a6bc32b8 851 enum migrate_mode mode)
290408d4
NH
852{
853 int rc;
854
855 rc = migrate_huge_page_move_mapping(mapping, newpage, page);
78bd5209 856 if (rc != MIGRATEPAGE_SUCCESS)
290408d4 857 return rc;
2916ecc0
JG
858 if (mode != MIGRATE_SYNC_NO_COPY)
859 migrate_page_copy(newpage, page);
860 else
861 migrate_page_states(newpage, page);
290408d4 862
78bd5209 863 return MIGRATEPAGE_SUCCESS;
290408d4
NH
864}
865
78bb9203
NH
866static int hugetlbfs_error_remove_page(struct address_space *mapping,
867 struct page *page)
868{
869 struct inode *inode = mapping->host;
ab615a5b 870 pgoff_t index = page->index;
78bb9203
NH
871
872 remove_huge_page(page);
ab615a5b
MK
873 if (unlikely(hugetlb_unreserve_pages(inode, index, index + 1, 1)))
874 hugetlb_fix_reserve_counts(inode);
875
78bb9203
NH
876 return 0;
877}
878
4a25220d
DH
879/*
880 * Display the mount options in /proc/mounts.
881 */
882static int hugetlbfs_show_options(struct seq_file *m, struct dentry *root)
883{
884 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(root->d_sb);
885 struct hugepage_subpool *spool = sbinfo->spool;
886 unsigned long hpage_size = huge_page_size(sbinfo->hstate);
887 unsigned hpage_shift = huge_page_shift(sbinfo->hstate);
888 char mod;
889
890 if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
891 seq_printf(m, ",uid=%u",
892 from_kuid_munged(&init_user_ns, sbinfo->uid));
893 if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
894 seq_printf(m, ",gid=%u",
895 from_kgid_munged(&init_user_ns, sbinfo->gid));
896 if (sbinfo->mode != 0755)
897 seq_printf(m, ",mode=%o", sbinfo->mode);
898 if (sbinfo->max_inodes != -1)
899 seq_printf(m, ",nr_inodes=%lu", sbinfo->max_inodes);
900
901 hpage_size /= 1024;
902 mod = 'K';
903 if (hpage_size >= 1024) {
904 hpage_size /= 1024;
905 mod = 'M';
906 }
907 seq_printf(m, ",pagesize=%lu%c", hpage_size, mod);
908 if (spool) {
909 if (spool->max_hpages != -1)
910 seq_printf(m, ",size=%llu",
911 (unsigned long long)spool->max_hpages << hpage_shift);
912 if (spool->min_hpages != -1)
913 seq_printf(m, ",min_size=%llu",
914 (unsigned long long)spool->min_hpages << hpage_shift);
915 }
916 return 0;
917}
918
726c3342 919static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf)
1da177e4 920{
726c3342 921 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb);
2b0143b5 922 struct hstate *h = hstate_inode(d_inode(dentry));
1da177e4
LT
923
924 buf->f_type = HUGETLBFS_MAGIC;
a5516438 925 buf->f_bsize = huge_page_size(h);
1da177e4
LT
926 if (sbinfo) {
927 spin_lock(&sbinfo->stat_lock);
74a8a65c
DG
928 /* If no limits set, just report 0 for max/free/used
929 * blocks, like simple_statfs() */
90481622
DG
930 if (sbinfo->spool) {
931 long free_pages;
932
933 spin_lock(&sbinfo->spool->lock);
934 buf->f_blocks = sbinfo->spool->max_hpages;
935 free_pages = sbinfo->spool->max_hpages
936 - sbinfo->spool->used_hpages;
937 buf->f_bavail = buf->f_bfree = free_pages;
938 spin_unlock(&sbinfo->spool->lock);
74a8a65c
DG
939 buf->f_files = sbinfo->max_inodes;
940 buf->f_ffree = sbinfo->free_inodes;
941 }
1da177e4
LT
942 spin_unlock(&sbinfo->stat_lock);
943 }
944 buf->f_namelen = NAME_MAX;
945 return 0;
946}
947
948static void hugetlbfs_put_super(struct super_block *sb)
949{
950 struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb);
951
952 if (sbi) {
953 sb->s_fs_info = NULL;
90481622
DG
954
955 if (sbi->spool)
956 hugepage_put_subpool(sbi->spool);
957
1da177e4
LT
958 kfree(sbi);
959 }
960}
961
96527980
CH
962static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo)
963{
964 if (sbinfo->free_inodes >= 0) {
965 spin_lock(&sbinfo->stat_lock);
966 if (unlikely(!sbinfo->free_inodes)) {
967 spin_unlock(&sbinfo->stat_lock);
968 return 0;
969 }
970 sbinfo->free_inodes--;
971 spin_unlock(&sbinfo->stat_lock);
972 }
973
974 return 1;
975}
976
977static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo)
978{
979 if (sbinfo->free_inodes >= 0) {
980 spin_lock(&sbinfo->stat_lock);
981 sbinfo->free_inodes++;
982 spin_unlock(&sbinfo->stat_lock);
983 }
984}
985
986
e18b890b 987static struct kmem_cache *hugetlbfs_inode_cachep;
1da177e4
LT
988
989static struct inode *hugetlbfs_alloc_inode(struct super_block *sb)
990{
96527980 991 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb);
1da177e4
LT
992 struct hugetlbfs_inode_info *p;
993
96527980
CH
994 if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo)))
995 return NULL;
e94b1766 996 p = kmem_cache_alloc(hugetlbfs_inode_cachep, GFP_KERNEL);
96527980
CH
997 if (unlikely(!p)) {
998 hugetlbfs_inc_free_inodes(sbinfo);
1da177e4 999 return NULL;
96527980 1000 }
4742a35d
MK
1001
1002 /*
1003 * Any time after allocation, hugetlbfs_destroy_inode can be called
1004 * for the inode. mpol_free_shared_policy is unconditionally called
1005 * as part of hugetlbfs_destroy_inode. So, initialize policy here
1006 * in case of a quick call to destroy.
1007 *
1008 * Note that the policy is initialized even if we are creating a
1009 * private inode. This simplifies hugetlbfs_destroy_inode.
1010 */
1011 mpol_shared_policy_init(&p->policy, NULL);
1012
1da177e4
LT
1013 return &p->vfs_inode;
1014}
1015
fa0d7e3d
NP
1016static void hugetlbfs_i_callback(struct rcu_head *head)
1017{
1018 struct inode *inode = container_of(head, struct inode, i_rcu);
fa0d7e3d
NP
1019 kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode));
1020}
1021
1da177e4
LT
1022static void hugetlbfs_destroy_inode(struct inode *inode)
1023{
96527980 1024 hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb));
1da177e4 1025 mpol_free_shared_policy(&HUGETLBFS_I(inode)->policy);
fa0d7e3d 1026 call_rcu(&inode->i_rcu, hugetlbfs_i_callback);
1da177e4
LT
1027}
1028
f5e54d6e 1029static const struct address_space_operations hugetlbfs_aops = {
800d15a5
NP
1030 .write_begin = hugetlbfs_write_begin,
1031 .write_end = hugetlbfs_write_end,
1da177e4 1032 .set_page_dirty = hugetlbfs_set_page_dirty,
290408d4 1033 .migratepage = hugetlbfs_migrate_page,
78bb9203 1034 .error_remove_page = hugetlbfs_error_remove_page,
1da177e4
LT
1035};
1036
96527980 1037
51cc5068 1038static void init_once(void *foo)
96527980
CH
1039{
1040 struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo;
1041
a35afb83 1042 inode_init_once(&ei->vfs_inode);
96527980
CH
1043}
1044
4b6f5d20 1045const struct file_operations hugetlbfs_file_operations = {
34d0640e 1046 .read_iter = hugetlbfs_read_iter,
1da177e4 1047 .mmap = hugetlbfs_file_mmap,
1b061d92 1048 .fsync = noop_fsync,
1da177e4 1049 .get_unmapped_area = hugetlb_get_unmapped_area,
70c3547e
MK
1050 .llseek = default_llseek,
1051 .fallocate = hugetlbfs_fallocate,
1da177e4
LT
1052};
1053
92e1d5be 1054static const struct inode_operations hugetlbfs_dir_inode_operations = {
1da177e4
LT
1055 .create = hugetlbfs_create,
1056 .lookup = simple_lookup,
1057 .link = simple_link,
1058 .unlink = simple_unlink,
1059 .symlink = hugetlbfs_symlink,
1060 .mkdir = hugetlbfs_mkdir,
1061 .rmdir = simple_rmdir,
1062 .mknod = hugetlbfs_mknod,
1063 .rename = simple_rename,
1064 .setattr = hugetlbfs_setattr,
1065};
1066
92e1d5be 1067static const struct inode_operations hugetlbfs_inode_operations = {
1da177e4
LT
1068 .setattr = hugetlbfs_setattr,
1069};
1070
ee9b6d61 1071static const struct super_operations hugetlbfs_ops = {
1da177e4
LT
1072 .alloc_inode = hugetlbfs_alloc_inode,
1073 .destroy_inode = hugetlbfs_destroy_inode,
2bbbda30 1074 .evict_inode = hugetlbfs_evict_inode,
1da177e4 1075 .statfs = hugetlbfs_statfs,
1da177e4 1076 .put_super = hugetlbfs_put_super,
4a25220d 1077 .show_options = hugetlbfs_show_options,
1da177e4
LT
1078};
1079
4a25220d 1080enum hugetlbfs_size_type { NO_SIZE, SIZE_STD, SIZE_PERCENT };
7ca02d0a
MK
1081
1082/*
1083 * Convert size option passed from command line to number of huge pages
1084 * in the pool specified by hstate. Size option could be in bytes
1085 * (val_type == SIZE_STD) or percentage of the pool (val_type == SIZE_PERCENT).
1086 */
4a25220d 1087static long
7ca02d0a 1088hugetlbfs_size_to_hpages(struct hstate *h, unsigned long long size_opt,
4a25220d 1089 enum hugetlbfs_size_type val_type)
7ca02d0a
MK
1090{
1091 if (val_type == NO_SIZE)
1092 return -1;
1093
1094 if (val_type == SIZE_PERCENT) {
1095 size_opt <<= huge_page_shift(h);
1096 size_opt *= h->max_huge_pages;
1097 do_div(size_opt, 100);
1098 }
1099
1100 size_opt >>= huge_page_shift(h);
1101 return size_opt;
1102}
1103
1da177e4
LT
1104static int
1105hugetlbfs_parse_options(char *options, struct hugetlbfs_config *pconfig)
1106{
e73a75fa
RD
1107 char *p, *rest;
1108 substring_t args[MAX_OPT_ARGS];
1109 int option;
7ca02d0a 1110 unsigned long long max_size_opt = 0, min_size_opt = 0;
4a25220d 1111 enum hugetlbfs_size_type max_val_type = NO_SIZE, min_val_type = NO_SIZE;
1da177e4
LT
1112
1113 if (!options)
1114 return 0;
1da177e4 1115
e73a75fa
RD
1116 while ((p = strsep(&options, ",")) != NULL) {
1117 int token;
b4c07bce
LS
1118 if (!*p)
1119 continue;
e73a75fa
RD
1120
1121 token = match_token(p, tokens, args);
1122 switch (token) {
1123 case Opt_uid:
1124 if (match_int(&args[0], &option))
1125 goto bad_val;
a0eb3a05
EB
1126 pconfig->uid = make_kuid(current_user_ns(), option);
1127 if (!uid_valid(pconfig->uid))
1128 goto bad_val;
e73a75fa
RD
1129 break;
1130
1131 case Opt_gid:
1132 if (match_int(&args[0], &option))
1133 goto bad_val;
a0eb3a05
EB
1134 pconfig->gid = make_kgid(current_user_ns(), option);
1135 if (!gid_valid(pconfig->gid))
1136 goto bad_val;
e73a75fa
RD
1137 break;
1138
1139 case Opt_mode:
1140 if (match_octal(&args[0], &option))
1141 goto bad_val;
75897d60 1142 pconfig->mode = option & 01777U;
e73a75fa
RD
1143 break;
1144
1145 case Opt_size: {
e73a75fa
RD
1146 /* memparse() will accept a K/M/G without a digit */
1147 if (!isdigit(*args[0].from))
1148 goto bad_val;
7ca02d0a
MK
1149 max_size_opt = memparse(args[0].from, &rest);
1150 max_val_type = SIZE_STD;
a137e1cc 1151 if (*rest == '%')
7ca02d0a 1152 max_val_type = SIZE_PERCENT;
e73a75fa
RD
1153 break;
1154 }
1da177e4 1155
e73a75fa
RD
1156 case Opt_nr_inodes:
1157 /* memparse() will accept a K/M/G without a digit */
1158 if (!isdigit(*args[0].from))
1159 goto bad_val;
1160 pconfig->nr_inodes = memparse(args[0].from, &rest);
1161 break;
1162
a137e1cc
AK
1163 case Opt_pagesize: {
1164 unsigned long ps;
1165 ps = memparse(args[0].from, &rest);
1166 pconfig->hstate = size_to_hstate(ps);
1167 if (!pconfig->hstate) {
9b857d26 1168 pr_err("Unsupported page size %lu MB\n",
a137e1cc
AK
1169 ps >> 20);
1170 return -EINVAL;
1171 }
1172 break;
1173 }
1174
7ca02d0a
MK
1175 case Opt_min_size: {
1176 /* memparse() will accept a K/M/G without a digit */
1177 if (!isdigit(*args[0].from))
1178 goto bad_val;
1179 min_size_opt = memparse(args[0].from, &rest);
1180 min_val_type = SIZE_STD;
1181 if (*rest == '%')
1182 min_val_type = SIZE_PERCENT;
1183 break;
1184 }
1185
e73a75fa 1186 default:
9b857d26 1187 pr_err("Bad mount option: \"%s\"\n", p);
b4c07bce 1188 return -EINVAL;
e73a75fa
RD
1189 break;
1190 }
1da177e4 1191 }
a137e1cc 1192
7ca02d0a
MK
1193 /*
1194 * Use huge page pool size (in hstate) to convert the size
1195 * options to number of huge pages. If NO_SIZE, -1 is returned.
1196 */
1197 pconfig->max_hpages = hugetlbfs_size_to_hpages(pconfig->hstate,
1198 max_size_opt, max_val_type);
1199 pconfig->min_hpages = hugetlbfs_size_to_hpages(pconfig->hstate,
1200 min_size_opt, min_val_type);
1201
1202 /*
1203 * If max_size was specified, then min_size must be smaller
1204 */
1205 if (max_val_type > NO_SIZE &&
1206 pconfig->min_hpages > pconfig->max_hpages) {
1207 pr_err("minimum size can not be greater than maximum size\n");
1208 return -EINVAL;
a137e1cc
AK
1209 }
1210
1da177e4 1211 return 0;
e73a75fa
RD
1212
1213bad_val:
9b857d26 1214 pr_err("Bad value '%s' for mount option '%s'\n", args[0].from, p);
c12ddba0 1215 return -EINVAL;
1da177e4
LT
1216}
1217
1218static int
1219hugetlbfs_fill_super(struct super_block *sb, void *data, int silent)
1220{
1da177e4
LT
1221 int ret;
1222 struct hugetlbfs_config config;
1223 struct hugetlbfs_sb_info *sbinfo;
1224
7ca02d0a 1225 config.max_hpages = -1; /* No limit on size by default */
1da177e4 1226 config.nr_inodes = -1; /* No limit on number of inodes by default */
77c70de1
DH
1227 config.uid = current_fsuid();
1228 config.gid = current_fsgid();
1da177e4 1229 config.mode = 0755;
a137e1cc 1230 config.hstate = &default_hstate;
7ca02d0a 1231 config.min_hpages = -1; /* No default minimum size */
1da177e4 1232 ret = hugetlbfs_parse_options(data, &config);
1da177e4
LT
1233 if (ret)
1234 return ret;
1235
1236 sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL);
1237 if (!sbinfo)
1238 return -ENOMEM;
1239 sb->s_fs_info = sbinfo;
a137e1cc 1240 sbinfo->hstate = config.hstate;
1da177e4 1241 spin_lock_init(&sbinfo->stat_lock);
1da177e4
LT
1242 sbinfo->max_inodes = config.nr_inodes;
1243 sbinfo->free_inodes = config.nr_inodes;
90481622 1244 sbinfo->spool = NULL;
4a25220d
DH
1245 sbinfo->uid = config.uid;
1246 sbinfo->gid = config.gid;
1247 sbinfo->mode = config.mode;
1248
7ca02d0a
MK
1249 /*
1250 * Allocate and initialize subpool if maximum or minimum size is
1251 * specified. Any needed reservations (for minimim size) are taken
1252 * taken when the subpool is created.
1253 */
1254 if (config.max_hpages != -1 || config.min_hpages != -1) {
1255 sbinfo->spool = hugepage_new_subpool(config.hstate,
1256 config.max_hpages,
1257 config.min_hpages);
90481622
DG
1258 if (!sbinfo->spool)
1259 goto out_free;
1260 }
1da177e4 1261 sb->s_maxbytes = MAX_LFS_FILESIZE;
a137e1cc
AK
1262 sb->s_blocksize = huge_page_size(config.hstate);
1263 sb->s_blocksize_bits = huge_page_shift(config.hstate);
1da177e4
LT
1264 sb->s_magic = HUGETLBFS_MAGIC;
1265 sb->s_op = &hugetlbfs_ops;
1266 sb->s_time_gran = 1;
48fde701
AV
1267 sb->s_root = d_make_root(hugetlbfs_get_root(sb, &config));
1268 if (!sb->s_root)
1da177e4 1269 goto out_free;
1da177e4
LT
1270 return 0;
1271out_free:
6e6870d4 1272 kfree(sbinfo->spool);
1da177e4
LT
1273 kfree(sbinfo);
1274 return -ENOMEM;
1275}
1276
3c26ff6e
AV
1277static struct dentry *hugetlbfs_mount(struct file_system_type *fs_type,
1278 int flags, const char *dev_name, void *data)
1da177e4 1279{
3c26ff6e 1280 return mount_nodev(fs_type, flags, data, hugetlbfs_fill_super);
1da177e4
LT
1281}
1282
1283static struct file_system_type hugetlbfs_fs_type = {
1284 .name = "hugetlbfs",
3c26ff6e 1285 .mount = hugetlbfs_mount,
1da177e4
LT
1286 .kill_sb = kill_litter_super,
1287};
1288
42d7395f 1289static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
1da177e4 1290
ef1ff6b8 1291static int can_do_hugetlb_shm(void)
1da177e4 1292{
a0eb3a05
EB
1293 kgid_t shm_group;
1294 shm_group = make_kgid(&init_user_ns, sysctl_hugetlb_shm_group);
1295 return capable(CAP_IPC_LOCK) || in_group_p(shm_group);
1da177e4
LT
1296}
1297
42d7395f
AK
1298static int get_hstate_idx(int page_size_log)
1299{
af73e4d9 1300 struct hstate *h = hstate_sizelog(page_size_log);
42d7395f 1301
42d7395f
AK
1302 if (!h)
1303 return -1;
1304 return h - hstates;
1305}
1306
be1d2cf5 1307static const struct dentry_operations anon_ops = {
118b2302 1308 .d_dname = simple_dname
0df4d6e5
AV
1309};
1310
af73e4d9
NH
1311/*
1312 * Note that size should be aligned to proper hugepage size in caller side,
1313 * otherwise hugetlb_reserve_pages reserves one less hugepages than intended.
1314 */
1315struct file *hugetlb_file_setup(const char *name, size_t size,
1316 vm_flags_t acctflag, struct user_struct **user,
42d7395f 1317 int creat_flags, int page_size_log)
1da177e4 1318{
39b65252 1319 struct file *file = ERR_PTR(-ENOMEM);
1da177e4 1320 struct inode *inode;
2c48b9c4 1321 struct path path;
0df4d6e5 1322 struct super_block *sb;
1da177e4 1323 struct qstr quick_string;
42d7395f
AK
1324 int hstate_idx;
1325
1326 hstate_idx = get_hstate_idx(page_size_log);
1327 if (hstate_idx < 0)
1328 return ERR_PTR(-ENODEV);
1da177e4 1329
353d5c30 1330 *user = NULL;
42d7395f 1331 if (!hugetlbfs_vfsmount[hstate_idx])
5bc98594
AM
1332 return ERR_PTR(-ENOENT);
1333
ef1ff6b8 1334 if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) {
353d5c30
HD
1335 *user = current_user();
1336 if (user_shm_lock(size, *user)) {
21a3c273 1337 task_lock(current);
9b857d26 1338 pr_warn_once("%s (%d): Using mlock ulimits for SHM_HUGETLB is deprecated\n",
21a3c273
DR
1339 current->comm, current->pid);
1340 task_unlock(current);
353d5c30
HD
1341 } else {
1342 *user = NULL;
2584e517 1343 return ERR_PTR(-EPERM);
353d5c30 1344 }
2584e517 1345 }
1da177e4 1346
0df4d6e5 1347 sb = hugetlbfs_vfsmount[hstate_idx]->mnt_sb;
9d66586f 1348 quick_string.name = name;
1da177e4
LT
1349 quick_string.len = strlen(quick_string.name);
1350 quick_string.hash = 0;
0df4d6e5 1351 path.dentry = d_alloc_pseudo(sb, &quick_string);
2c48b9c4 1352 if (!path.dentry)
1da177e4
LT
1353 goto out_shm_unlock;
1354
0df4d6e5 1355 d_set_d_op(path.dentry, &anon_ops);
42d7395f 1356 path.mnt = mntget(hugetlbfs_vfsmount[hstate_idx]);
39b65252 1357 file = ERR_PTR(-ENOSPC);
0df4d6e5 1358 inode = hugetlbfs_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0);
1da177e4 1359 if (!inode)
ce8d2cdf 1360 goto out_dentry;
e1832f29
SS
1361 if (creat_flags == HUGETLB_SHMFS_INODE)
1362 inode->i_flags |= S_PRIVATE;
1da177e4 1363
39b65252 1364 file = ERR_PTR(-ENOMEM);
af73e4d9
NH
1365 if (hugetlb_reserve_pages(inode, 0,
1366 size >> huge_page_shift(hstate_inode(inode)), NULL,
1367 acctflag))
b45b5bd6
DG
1368 goto out_inode;
1369
2c48b9c4 1370 d_instantiate(path.dentry, inode);
1da177e4 1371 inode->i_size = size;
6d6b77f1 1372 clear_nlink(inode);
ce8d2cdf 1373
2c48b9c4 1374 file = alloc_file(&path, FMODE_WRITE | FMODE_READ,
ce8d2cdf 1375 &hugetlbfs_file_operations);
39b65252 1376 if (IS_ERR(file))
b4d232e6 1377 goto out_dentry; /* inode is already attached */
ce8d2cdf 1378
1da177e4
LT
1379 return file;
1380
b45b5bd6
DG
1381out_inode:
1382 iput(inode);
1da177e4 1383out_dentry:
2c48b9c4 1384 path_put(&path);
1da177e4 1385out_shm_unlock:
353d5c30
HD
1386 if (*user) {
1387 user_shm_unlock(size, *user);
1388 *user = NULL;
1389 }
39b65252 1390 return file;
1da177e4
LT
1391}
1392
1393static int __init init_hugetlbfs_fs(void)
1394{
42d7395f 1395 struct hstate *h;
1da177e4 1396 int error;
42d7395f 1397 int i;
1da177e4 1398
457c1b27 1399 if (!hugepages_supported()) {
9b857d26 1400 pr_info("disabling because there are no supported hugepage sizes\n");
457c1b27
NA
1401 return -ENOTSUPP;
1402 }
1403
d1d5e05f 1404 error = -ENOMEM;
1da177e4
LT
1405 hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache",
1406 sizeof(struct hugetlbfs_inode_info),
5d097056 1407 0, SLAB_ACCOUNT, init_once);
1da177e4 1408 if (hugetlbfs_inode_cachep == NULL)
e0bf68dd 1409 goto out2;
1da177e4
LT
1410
1411 error = register_filesystem(&hugetlbfs_fs_type);
1412 if (error)
1413 goto out;
1414
42d7395f
AK
1415 i = 0;
1416 for_each_hstate(h) {
1417 char buf[50];
1418 unsigned ps_kb = 1U << (h->order + PAGE_SHIFT - 10);
1da177e4 1419
42d7395f
AK
1420 snprintf(buf, sizeof(buf), "pagesize=%uK", ps_kb);
1421 hugetlbfs_vfsmount[i] = kern_mount_data(&hugetlbfs_fs_type,
1422 buf);
1da177e4 1423
42d7395f 1424 if (IS_ERR(hugetlbfs_vfsmount[i])) {
9b857d26 1425 pr_err("Cannot mount internal hugetlbfs for "
42d7395f
AK
1426 "page size %uK", ps_kb);
1427 error = PTR_ERR(hugetlbfs_vfsmount[i]);
1428 hugetlbfs_vfsmount[i] = NULL;
1429 }
1430 i++;
1431 }
1432 /* Non default hstates are optional */
1433 if (!IS_ERR_OR_NULL(hugetlbfs_vfsmount[default_hstate_idx]))
1434 return 0;
1da177e4
LT
1435
1436 out:
d1d5e05f 1437 kmem_cache_destroy(hugetlbfs_inode_cachep);
e0bf68dd 1438 out2:
1da177e4
LT
1439 return error;
1440}
3e89e1c5 1441fs_initcall(init_hugetlbfs_fs)