]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - fs/hugetlbfs/inode.c
mmap locking API: convert mmap_sem API comments
[mirror_ubuntu-hirsute-kernel.git] / fs / hugetlbfs / inode.c
CommitLineData
1da177e4
LT
1/*
2 * hugetlbpage-backed filesystem. Based on ramfs.
3 *
6d49e352 4 * Nadia Yvette Chambers, 2002
1da177e4
LT
5 *
6 * Copyright (C) 2002 Linus Torvalds.
3e89e1c5 7 * License: GPL
1da177e4
LT
8 */
9
9b857d26
AM
10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
1da177e4
LT
12#include <linux/thread_info.h>
13#include <asm/current.h>
174cd4b1 14#include <linux/sched/signal.h> /* remove ASAP */
70c3547e 15#include <linux/falloc.h>
1da177e4
LT
16#include <linux/fs.h>
17#include <linux/mount.h>
18#include <linux/file.h>
e73a75fa 19#include <linux/kernel.h>
1da177e4
LT
20#include <linux/writeback.h>
21#include <linux/pagemap.h>
22#include <linux/highmem.h>
23#include <linux/init.h>
24#include <linux/string.h>
16f7e0fe 25#include <linux/capability.h>
e73a75fa 26#include <linux/ctype.h>
1da177e4
LT
27#include <linux/backing-dev.h>
28#include <linux/hugetlb.h>
29#include <linux/pagevec.h>
32021982 30#include <linux/fs_parser.h>
036e0856 31#include <linux/mman.h>
1da177e4
LT
32#include <linux/slab.h>
33#include <linux/dnotify.h>
34#include <linux/statfs.h>
35#include <linux/security.h>
1fd7317d 36#include <linux/magic.h>
290408d4 37#include <linux/migrate.h>
34d0640e 38#include <linux/uio.h>
1da177e4 39
7c0f6ba6 40#include <linux/uaccess.h>
88590253 41#include <linux/sched/mm.h>
1da177e4 42
ee9b6d61 43static const struct super_operations hugetlbfs_ops;
f5e54d6e 44static const struct address_space_operations hugetlbfs_aops;
4b6f5d20 45const struct file_operations hugetlbfs_file_operations;
92e1d5be
AV
46static const struct inode_operations hugetlbfs_dir_inode_operations;
47static const struct inode_operations hugetlbfs_inode_operations;
1da177e4 48
32021982
DH
49enum hugetlbfs_size_type { NO_SIZE, SIZE_STD, SIZE_PERCENT };
50
51struct hugetlbfs_fs_context {
4a25220d 52 struct hstate *hstate;
32021982
DH
53 unsigned long long max_size_opt;
54 unsigned long long min_size_opt;
4a25220d
DH
55 long max_hpages;
56 long nr_inodes;
57 long min_hpages;
32021982
DH
58 enum hugetlbfs_size_type max_val_type;
59 enum hugetlbfs_size_type min_val_type;
4a25220d
DH
60 kuid_t uid;
61 kgid_t gid;
62 umode_t mode;
a1d776ee
DG
63};
64
1da177e4
LT
65int sysctl_hugetlb_shm_group;
66
32021982
DH
67enum hugetlb_param {
68 Opt_gid,
69 Opt_min_size,
70 Opt_mode,
71 Opt_nr_inodes,
72 Opt_pagesize,
73 Opt_size,
74 Opt_uid,
e73a75fa
RD
75};
76
d7167b14 77static const struct fs_parameter_spec hugetlb_fs_parameters[] = {
32021982
DH
78 fsparam_u32 ("gid", Opt_gid),
79 fsparam_string("min_size", Opt_min_size),
80 fsparam_u32 ("mode", Opt_mode),
81 fsparam_string("nr_inodes", Opt_nr_inodes),
82 fsparam_string("pagesize", Opt_pagesize),
83 fsparam_string("size", Opt_size),
84 fsparam_u32 ("uid", Opt_uid),
85 {}
86};
87
70c3547e
MK
88#ifdef CONFIG_NUMA
89static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma,
90 struct inode *inode, pgoff_t index)
91{
92 vma->vm_policy = mpol_shared_policy_lookup(&HUGETLBFS_I(inode)->policy,
93 index);
94}
95
96static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma)
97{
98 mpol_cond_put(vma->vm_policy);
99}
100#else
101static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma,
102 struct inode *inode, pgoff_t index)
103{
104}
105
106static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma)
107{
108}
109#endif
110
2e9b367c
AL
111static void huge_pagevec_release(struct pagevec *pvec)
112{
113 int i;
114
115 for (i = 0; i < pagevec_count(pvec); ++i)
116 put_page(pvec->pages[i]);
117
118 pagevec_reinit(pvec);
119}
120
63489f8e
MK
121/*
122 * Mask used when checking the page offset value passed in via system
123 * calls. This value will be converted to a loff_t which is signed.
124 * Therefore, we want to check the upper PAGE_SHIFT + 1 bits of the
125 * value. The extra bit (- 1 in the shift value) is to take the sign
126 * bit into account.
127 */
128#define PGOFF_LOFFT_MAX \
129 (((1UL << (PAGE_SHIFT + 1)) - 1) << (BITS_PER_LONG - (PAGE_SHIFT + 1)))
130
1da177e4
LT
131static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
132{
496ad9aa 133 struct inode *inode = file_inode(file);
1da177e4
LT
134 loff_t len, vma_len;
135 int ret;
a5516438 136 struct hstate *h = hstate_file(file);
1da177e4 137
68589bc3 138 /*
dec4ad86
DG
139 * vma address alignment (but not the pgoff alignment) has
140 * already been checked by prepare_hugepage_range. If you add
141 * any error returns here, do so after setting VM_HUGETLB, so
142 * is_vm_hugetlb_page tests below unmap_region go the right
143 * way when do_mmap_pgoff unwinds (may be important on powerpc
144 * and ia64).
68589bc3 145 */
a2fce914 146 vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
68589bc3 147 vma->vm_ops = &hugetlb_vm_ops;
1da177e4 148
045c7a3f 149 /*
63489f8e 150 * page based offset in vm_pgoff could be sufficiently large to
5df63c2a
MK
151 * overflow a loff_t when converted to byte offset. This can
152 * only happen on architectures where sizeof(loff_t) ==
153 * sizeof(unsigned long). So, only check in those instances.
045c7a3f 154 */
5df63c2a
MK
155 if (sizeof(unsigned long) == sizeof(loff_t)) {
156 if (vma->vm_pgoff & PGOFF_LOFFT_MAX)
157 return -EINVAL;
158 }
045c7a3f 159
63489f8e 160 /* must be huge page aligned */
2b37c35e 161 if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
dec4ad86
DG
162 return -EINVAL;
163
1da177e4 164 vma_len = (loff_t)(vma->vm_end - vma->vm_start);
045c7a3f
MK
165 len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
166 /* check for overflow */
167 if (len < vma_len)
168 return -EINVAL;
1da177e4 169
5955102c 170 inode_lock(inode);
1da177e4 171 file_accessed(file);
1da177e4
LT
172
173 ret = -ENOMEM;
a1e78772 174 if (hugetlb_reserve_pages(inode,
a5516438 175 vma->vm_pgoff >> huge_page_order(h),
5a6fe125
MG
176 len >> huge_page_shift(h), vma,
177 vma->vm_flags))
a43a8c39 178 goto out;
b45b5bd6 179
4c887265 180 ret = 0;
b6174df5 181 if (vma->vm_flags & VM_WRITE && inode->i_size < len)
045c7a3f 182 i_size_write(inode, len);
1da177e4 183out:
5955102c 184 inode_unlock(inode);
1da177e4
LT
185
186 return ret;
187}
188
189/*
3e4e28c5 190 * Called under mmap_write_lock(mm).
1da177e4
LT
191 */
192
d2ba27e8 193#ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
88590253
SH
194static unsigned long
195hugetlb_get_unmapped_area_bottomup(struct file *file, unsigned long addr,
196 unsigned long len, unsigned long pgoff, unsigned long flags)
197{
198 struct hstate *h = hstate_file(file);
199 struct vm_unmapped_area_info info;
200
201 info.flags = 0;
202 info.length = len;
203 info.low_limit = current->mm->mmap_base;
204 info.high_limit = TASK_SIZE;
205 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
206 info.align_offset = 0;
207 return vm_unmapped_area(&info);
208}
209
210static unsigned long
211hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr,
212 unsigned long len, unsigned long pgoff, unsigned long flags)
213{
214 struct hstate *h = hstate_file(file);
215 struct vm_unmapped_area_info info;
216
217 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
218 info.length = len;
219 info.low_limit = max(PAGE_SIZE, mmap_min_addr);
220 info.high_limit = current->mm->mmap_base;
221 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
222 info.align_offset = 0;
223 addr = vm_unmapped_area(&info);
224
225 /*
226 * A failed mmap() very likely causes application failure,
227 * so fall back to the bottom-up function here. This scenario
228 * can happen with large stack limits and large mmap()
229 * allocations.
230 */
231 if (unlikely(offset_in_page(addr))) {
232 VM_BUG_ON(addr != -ENOMEM);
233 info.flags = 0;
234 info.low_limit = current->mm->mmap_base;
235 info.high_limit = TASK_SIZE;
236 addr = vm_unmapped_area(&info);
237 }
238
239 return addr;
240}
241
1da177e4
LT
242static unsigned long
243hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
244 unsigned long len, unsigned long pgoff, unsigned long flags)
245{
246 struct mm_struct *mm = current->mm;
247 struct vm_area_struct *vma;
a5516438 248 struct hstate *h = hstate_file(file);
1da177e4 249
a5516438 250 if (len & ~huge_page_mask(h))
1da177e4
LT
251 return -EINVAL;
252 if (len > TASK_SIZE)
253 return -ENOMEM;
254
036e0856 255 if (flags & MAP_FIXED) {
a5516438 256 if (prepare_hugepage_range(file, addr, len))
036e0856
BH
257 return -EINVAL;
258 return addr;
259 }
260
1da177e4 261 if (addr) {
a5516438 262 addr = ALIGN(addr, huge_page_size(h));
1da177e4
LT
263 vma = find_vma(mm, addr);
264 if (TASK_SIZE - len >= addr &&
1be7107f 265 (!vma || addr + len <= vm_start_gap(vma)))
1da177e4
LT
266 return addr;
267 }
268
88590253
SH
269 /*
270 * Use mm->get_unmapped_area value as a hint to use topdown routine.
271 * If architectures have special needs, they should define their own
272 * version of hugetlb_get_unmapped_area.
273 */
274 if (mm->get_unmapped_area == arch_get_unmapped_area_topdown)
275 return hugetlb_get_unmapped_area_topdown(file, addr, len,
276 pgoff, flags);
277 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
278 pgoff, flags);
1da177e4
LT
279}
280#endif
281
34d0640e 282static size_t
e63e1e5a 283hugetlbfs_read_actor(struct page *page, unsigned long offset,
34d0640e 284 struct iov_iter *to, unsigned long size)
e63e1e5a 285{
34d0640e 286 size_t copied = 0;
e63e1e5a
BP
287 int i, chunksize;
288
e63e1e5a 289 /* Find which 4k chunk and offset with in that chunk */
09cbfeaf
KS
290 i = offset >> PAGE_SHIFT;
291 offset = offset & ~PAGE_MASK;
e63e1e5a
BP
292
293 while (size) {
34d0640e 294 size_t n;
09cbfeaf 295 chunksize = PAGE_SIZE;
e63e1e5a
BP
296 if (offset)
297 chunksize -= offset;
298 if (chunksize > size)
299 chunksize = size;
34d0640e
AV
300 n = copy_page_to_iter(&page[i], offset, chunksize, to);
301 copied += n;
302 if (n != chunksize)
303 return copied;
e63e1e5a
BP
304 offset = 0;
305 size -= chunksize;
e63e1e5a
BP
306 i++;
307 }
34d0640e 308 return copied;
e63e1e5a
BP
309}
310
311/*
312 * Support for read() - Find the page attached to f_mapping and copy out the
313 * data. Its *very* similar to do_generic_mapping_read(), we can't use that
ea1754a0 314 * since it has PAGE_SIZE assumptions.
e63e1e5a 315 */
34d0640e 316static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
e63e1e5a 317{
34d0640e
AV
318 struct file *file = iocb->ki_filp;
319 struct hstate *h = hstate_file(file);
320 struct address_space *mapping = file->f_mapping;
e63e1e5a 321 struct inode *inode = mapping->host;
34d0640e
AV
322 unsigned long index = iocb->ki_pos >> huge_page_shift(h);
323 unsigned long offset = iocb->ki_pos & ~huge_page_mask(h);
e63e1e5a
BP
324 unsigned long end_index;
325 loff_t isize;
326 ssize_t retval = 0;
327
34d0640e 328 while (iov_iter_count(to)) {
e63e1e5a 329 struct page *page;
34d0640e 330 size_t nr, copied;
e63e1e5a
BP
331
332 /* nr is the maximum number of bytes to copy from this page */
a5516438 333 nr = huge_page_size(h);
a05b0855
AK
334 isize = i_size_read(inode);
335 if (!isize)
34d0640e 336 break;
a05b0855 337 end_index = (isize - 1) >> huge_page_shift(h);
34d0640e
AV
338 if (index > end_index)
339 break;
340 if (index == end_index) {
a5516438 341 nr = ((isize - 1) & ~huge_page_mask(h)) + 1;
a05b0855 342 if (nr <= offset)
34d0640e 343 break;
e63e1e5a
BP
344 }
345 nr = nr - offset;
346
347 /* Find the page */
a05b0855 348 page = find_lock_page(mapping, index);
e63e1e5a
BP
349 if (unlikely(page == NULL)) {
350 /*
351 * We have a HOLE, zero out the user-buffer for the
352 * length of the hole or request.
353 */
34d0640e 354 copied = iov_iter_zero(nr, to);
e63e1e5a 355 } else {
a05b0855
AK
356 unlock_page(page);
357
e63e1e5a
BP
358 /*
359 * We have the page, copy it to user space buffer.
360 */
34d0640e 361 copied = hugetlbfs_read_actor(page, offset, to, nr);
09cbfeaf 362 put_page(page);
e63e1e5a 363 }
34d0640e
AV
364 offset += copied;
365 retval += copied;
366 if (copied != nr && iov_iter_count(to)) {
367 if (!retval)
368 retval = -EFAULT;
369 break;
e63e1e5a 370 }
a5516438
AK
371 index += offset >> huge_page_shift(h);
372 offset &= ~huge_page_mask(h);
e63e1e5a 373 }
34d0640e 374 iocb->ki_pos = ((loff_t)index << huge_page_shift(h)) + offset;
e63e1e5a
BP
375 return retval;
376}
377
800d15a5
NP
378static int hugetlbfs_write_begin(struct file *file,
379 struct address_space *mapping,
380 loff_t pos, unsigned len, unsigned flags,
381 struct page **pagep, void **fsdata)
1da177e4
LT
382{
383 return -EINVAL;
384}
385
800d15a5
NP
386static int hugetlbfs_write_end(struct file *file, struct address_space *mapping,
387 loff_t pos, unsigned len, unsigned copied,
388 struct page *page, void *fsdata)
1da177e4 389{
800d15a5 390 BUG();
1da177e4
LT
391 return -EINVAL;
392}
393
b5cec28d 394static void remove_huge_page(struct page *page)
1da177e4 395{
b9ea2515 396 ClearPageDirty(page);
1da177e4 397 ClearPageUptodate(page);
bd65cb86 398 delete_from_page_cache(page);
1da177e4
LT
399}
400
4aae8d1c 401static void
f808c13f 402hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end)
4aae8d1c
MK
403{
404 struct vm_area_struct *vma;
405
406 /*
407 * end == 0 indicates that the entire range after
408 * start should be unmapped.
409 */
410 vma_interval_tree_foreach(vma, root, start, end ? end : ULONG_MAX) {
411 unsigned long v_offset;
412 unsigned long v_end;
413
414 /*
415 * Can the expression below overflow on 32-bit arches?
416 * No, because the interval tree returns us only those vmas
417 * which overlap the truncated area starting at pgoff,
418 * and no vma on a 32-bit arch can span beyond the 4GB.
419 */
420 if (vma->vm_pgoff < start)
421 v_offset = (start - vma->vm_pgoff) << PAGE_SHIFT;
422 else
423 v_offset = 0;
424
425 if (!end)
426 v_end = vma->vm_end;
427 else {
428 v_end = ((end - vma->vm_pgoff) << PAGE_SHIFT)
429 + vma->vm_start;
430 if (v_end > vma->vm_end)
431 v_end = vma->vm_end;
432 }
433
434 unmap_hugepage_range(vma, vma->vm_start + v_offset, v_end,
435 NULL);
436 }
437}
b5cec28d
MK
438
439/*
440 * remove_inode_hugepages handles two distinct cases: truncation and hole
441 * punch. There are subtle differences in operation for each case.
4aae8d1c 442 *
b5cec28d
MK
443 * truncation is indicated by end of range being LLONG_MAX
444 * In this case, we first scan the range and release found pages.
445 * After releasing pages, hugetlb_unreserve_pages cleans up region/reserv
e7c58097 446 * maps and global counts. Page faults can not race with truncation
87bf91d3
MK
447 * in this routine. hugetlb_no_page() holds i_mmap_rwsem and prevents
448 * page faults in the truncated range by checking i_size. i_size is
449 * modified while holding i_mmap_rwsem.
b5cec28d
MK
450 * hole punch is indicated if end is not LLONG_MAX
451 * In the hole punch case we scan the range and release found pages.
452 * Only when releasing a page is the associated region/reserv map
453 * deleted. The region/reserv map for ranges without associated
e7c58097
MK
454 * pages are not modified. Page faults can race with hole punch.
455 * This is indicated if we find a mapped page.
b5cec28d
MK
456 * Note: If the passed end of range value is beyond the end of file, but
457 * not LLONG_MAX this routine still performs a hole punch operation.
458 */
459static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
460 loff_t lend)
1da177e4 461{
a5516438 462 struct hstate *h = hstate_inode(inode);
b45b5bd6 463 struct address_space *mapping = &inode->i_data;
a5516438 464 const pgoff_t start = lstart >> huge_page_shift(h);
b5cec28d
MK
465 const pgoff_t end = lend >> huge_page_shift(h);
466 struct vm_area_struct pseudo_vma;
1da177e4 467 struct pagevec pvec;
d72dc8a2 468 pgoff_t next, index;
a43a8c39 469 int i, freed = 0;
b5cec28d 470 bool truncate_op = (lend == LLONG_MAX);
1da177e4 471
2c4541e2 472 vma_init(&pseudo_vma, current->mm);
b5cec28d 473 pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
86679820 474 pagevec_init(&pvec);
1da177e4 475 next = start;
b5cec28d 476 while (next < end) {
b5cec28d 477 /*
1817889e 478 * When no more pages are found, we are done.
b5cec28d 479 */
397162ff 480 if (!pagevec_lookup_range(&pvec, mapping, &next, end - 1))
1817889e 481 break;
1da177e4
LT
482
483 for (i = 0; i < pagevec_count(&pvec); ++i) {
484 struct page *page = pvec.pages[i];
e7c58097 485 u32 hash;
b5cec28d 486
d72dc8a2 487 index = page->index;
188b04a7 488 hash = hugetlb_fault_mutex_hash(mapping, index);
87bf91d3
MK
489 if (!truncate_op) {
490 /*
491 * Only need to hold the fault mutex in the
492 * hole punch case. This prevents races with
493 * page faults. Races are not possible in the
494 * case of truncation.
495 */
496 mutex_lock(&hugetlb_fault_mutex_table[hash]);
497 }
e7c58097 498
4aae8d1c 499 /*
e7c58097
MK
500 * If page is mapped, it was faulted in after being
501 * unmapped in caller. Unmap (again) now after taking
502 * the fault mutex. The mutex will prevent faults
503 * until we finish removing the page.
504 *
505 * This race can only happen in the hole punch case.
506 * Getting here in a truncate operation is a bug.
4aae8d1c 507 */
e7c58097
MK
508 if (unlikely(page_mapped(page))) {
509 BUG_ON(truncate_op);
510
c0d0381a 511 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
e7c58097 512 i_mmap_lock_write(mapping);
c0d0381a 513 mutex_lock(&hugetlb_fault_mutex_table[hash]);
e7c58097
MK
514 hugetlb_vmdelete_list(&mapping->i_mmap,
515 index * pages_per_huge_page(h),
516 (index + 1) * pages_per_huge_page(h));
517 i_mmap_unlock_write(mapping);
518 }
4aae8d1c
MK
519
520 lock_page(page);
521 /*
522 * We must free the huge page and remove from page
523 * cache (remove_huge_page) BEFORE removing the
524 * region/reserve map (hugetlb_unreserve_pages). In
525 * rare out of memory conditions, removal of the
72e2936c 526 * region/reserve map could fail. Correspondingly,
527 * the subpool and global reserve usage count can need
528 * to be adjusted.
4aae8d1c 529 */
72e2936c 530 VM_BUG_ON(PagePrivate(page));
4aae8d1c
MK
531 remove_huge_page(page);
532 freed++;
533 if (!truncate_op) {
534 if (unlikely(hugetlb_unreserve_pages(inode,
d72dc8a2 535 index, index + 1, 1)))
72e2936c 536 hugetlb_fix_reserve_counts(inode);
b5cec28d
MK
537 }
538
1da177e4 539 unlock_page(page);
87bf91d3
MK
540 if (!truncate_op)
541 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
1da177e4
LT
542 }
543 huge_pagevec_release(&pvec);
1817889e 544 cond_resched();
1da177e4 545 }
b5cec28d
MK
546
547 if (truncate_op)
548 (void)hugetlb_unreserve_pages(inode, start, LONG_MAX, freed);
1da177e4
LT
549}
550
2bbbda30 551static void hugetlbfs_evict_inode(struct inode *inode)
1da177e4 552{
9119a41e
JK
553 struct resv_map *resv_map;
554
b5cec28d 555 remove_inode_hugepages(inode, 0, LLONG_MAX);
f27a5136
MK
556
557 /*
558 * Get the resv_map from the address space embedded in the inode.
559 * This is the address space which points to any resv_map allocated
560 * at inode creation time. If this is a device special inode,
561 * i_mapping may not point to the original address space.
562 */
563 resv_map = (struct resv_map *)(&inode->i_data)->private_data;
564 /* Only regular and link inodes have associated reserve maps */
9119a41e
JK
565 if (resv_map)
566 resv_map_release(&resv_map->refs);
dbd5768f 567 clear_inode(inode);
149f4211
CH
568}
569
1da177e4
LT
570static int hugetlb_vmtruncate(struct inode *inode, loff_t offset)
571{
856fc295 572 pgoff_t pgoff;
1da177e4 573 struct address_space *mapping = inode->i_mapping;
a5516438 574 struct hstate *h = hstate_inode(inode);
1da177e4 575
a5516438 576 BUG_ON(offset & ~huge_page_mask(h));
856fc295 577 pgoff = offset >> PAGE_SHIFT;
1da177e4 578
83cde9e8 579 i_mmap_lock_write(mapping);
87bf91d3 580 i_size_write(inode, offset);
f808c13f 581 if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))
1bfad99a 582 hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0);
c86aa7bb 583 i_mmap_unlock_write(mapping);
e7c58097 584 remove_inode_hugepages(inode, offset, LLONG_MAX);
1da177e4
LT
585 return 0;
586}
587
70c3547e
MK
588static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
589{
590 struct hstate *h = hstate_inode(inode);
591 loff_t hpage_size = huge_page_size(h);
592 loff_t hole_start, hole_end;
593
594 /*
595 * For hole punch round up the beginning offset of the hole and
596 * round down the end.
597 */
598 hole_start = round_up(offset, hpage_size);
599 hole_end = round_down(offset + len, hpage_size);
600
601 if (hole_end > hole_start) {
602 struct address_space *mapping = inode->i_mapping;
ff62a342 603 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
70c3547e 604
5955102c 605 inode_lock(inode);
ff62a342
MAL
606
607 /* protected by i_mutex */
ab3948f5 608 if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
ff62a342
MAL
609 inode_unlock(inode);
610 return -EPERM;
611 }
612
70c3547e 613 i_mmap_lock_write(mapping);
f808c13f 614 if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))
70c3547e
MK
615 hugetlb_vmdelete_list(&mapping->i_mmap,
616 hole_start >> PAGE_SHIFT,
617 hole_end >> PAGE_SHIFT);
c86aa7bb 618 i_mmap_unlock_write(mapping);
e7c58097 619 remove_inode_hugepages(inode, hole_start, hole_end);
5955102c 620 inode_unlock(inode);
70c3547e
MK
621 }
622
623 return 0;
624}
625
626static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
627 loff_t len)
628{
629 struct inode *inode = file_inode(file);
ff62a342 630 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
70c3547e
MK
631 struct address_space *mapping = inode->i_mapping;
632 struct hstate *h = hstate_inode(inode);
633 struct vm_area_struct pseudo_vma;
634 struct mm_struct *mm = current->mm;
635 loff_t hpage_size = huge_page_size(h);
636 unsigned long hpage_shift = huge_page_shift(h);
637 pgoff_t start, index, end;
638 int error;
639 u32 hash;
640
641 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
642 return -EOPNOTSUPP;
643
644 if (mode & FALLOC_FL_PUNCH_HOLE)
645 return hugetlbfs_punch_hole(inode, offset, len);
646
647 /*
648 * Default preallocate case.
649 * For this range, start is rounded down and end is rounded up
650 * as well as being converted to page offsets.
651 */
652 start = offset >> hpage_shift;
653 end = (offset + len + hpage_size - 1) >> hpage_shift;
654
5955102c 655 inode_lock(inode);
70c3547e
MK
656
657 /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
658 error = inode_newsize_ok(inode, offset + len);
659 if (error)
660 goto out;
661
ff62a342
MAL
662 if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
663 error = -EPERM;
664 goto out;
665 }
666
70c3547e
MK
667 /*
668 * Initialize a pseudo vma as this is required by the huge page
669 * allocation routines. If NUMA is configured, use page index
670 * as input to create an allocation policy.
671 */
2c4541e2 672 vma_init(&pseudo_vma, mm);
70c3547e
MK
673 pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
674 pseudo_vma.vm_file = file;
675
676 for (index = start; index < end; index++) {
677 /*
678 * This is supposed to be the vaddr where the page is being
679 * faulted in, but we have no vaddr here.
680 */
681 struct page *page;
682 unsigned long addr;
683 int avoid_reserve = 0;
684
685 cond_resched();
686
687 /*
688 * fallocate(2) manpage permits EINTR; we may have been
689 * interrupted because we are using up too much memory.
690 */
691 if (signal_pending(current)) {
692 error = -EINTR;
693 break;
694 }
695
696 /* Set numa allocation policy based on index */
697 hugetlb_set_vma_policy(&pseudo_vma, inode, index);
698
699 /* addr is the offset within the file (zero based) */
700 addr = index * hpage_size;
701
87bf91d3
MK
702 /*
703 * fault mutex taken here, protects against fault path
704 * and hole punch. inode_lock previously taken protects
705 * against truncation.
706 */
188b04a7 707 hash = hugetlb_fault_mutex_hash(mapping, index);
70c3547e
MK
708 mutex_lock(&hugetlb_fault_mutex_table[hash]);
709
710 /* See if already present in mapping to avoid alloc/free */
711 page = find_get_page(mapping, index);
712 if (page) {
713 put_page(page);
714 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
715 hugetlb_drop_vma_policy(&pseudo_vma);
716 continue;
717 }
718
719 /* Allocate page and add to page cache */
720 page = alloc_huge_page(&pseudo_vma, addr, avoid_reserve);
721 hugetlb_drop_vma_policy(&pseudo_vma);
722 if (IS_ERR(page)) {
723 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
724 error = PTR_ERR(page);
725 goto out;
726 }
727 clear_huge_page(page, addr, pages_per_huge_page(h));
728 __SetPageUptodate(page);
729 error = huge_add_to_page_cache(page, mapping, index);
730 if (unlikely(error)) {
731 put_page(page);
732 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
733 goto out;
734 }
735
736 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
737
738 /*
70c3547e 739 * unlock_page because locked by add_to_page_cache()
72639e6d 740 * page_put due to reference from alloc_huge_page()
70c3547e 741 */
70c3547e 742 unlock_page(page);
72639e6d 743 put_page(page);
70c3547e
MK
744 }
745
746 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
747 i_size_write(inode, offset + len);
078cd827 748 inode->i_ctime = current_time(inode);
70c3547e 749out:
5955102c 750 inode_unlock(inode);
70c3547e
MK
751 return error;
752}
753
1da177e4
LT
754static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr)
755{
2b0143b5 756 struct inode *inode = d_inode(dentry);
a5516438 757 struct hstate *h = hstate_inode(inode);
1da177e4
LT
758 int error;
759 unsigned int ia_valid = attr->ia_valid;
ff62a342 760 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
1da177e4
LT
761
762 BUG_ON(!inode);
763
31051c85 764 error = setattr_prepare(dentry, attr);
1da177e4 765 if (error)
1025774c 766 return error;
1da177e4
LT
767
768 if (ia_valid & ATTR_SIZE) {
ff62a342
MAL
769 loff_t oldsize = inode->i_size;
770 loff_t newsize = attr->ia_size;
771
772 if (newsize & ~huge_page_mask(h))
1025774c 773 return -EINVAL;
ff62a342
MAL
774 /* protected by i_mutex */
775 if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
776 (newsize > oldsize && (info->seals & F_SEAL_GROW)))
777 return -EPERM;
778 error = hugetlb_vmtruncate(inode, newsize);
1da177e4 779 if (error)
1025774c 780 return error;
1da177e4 781 }
1025774c
CH
782
783 setattr_copy(inode, attr);
784 mark_inode_dirty(inode);
785 return 0;
1da177e4
LT
786}
787
7d54fa64 788static struct inode *hugetlbfs_get_root(struct super_block *sb,
32021982 789 struct hugetlbfs_fs_context *ctx)
1da177e4
LT
790{
791 struct inode *inode;
1da177e4
LT
792
793 inode = new_inode(sb);
794 if (inode) {
85fe4025 795 inode->i_ino = get_next_ino();
32021982
DH
796 inode->i_mode = S_IFDIR | ctx->mode;
797 inode->i_uid = ctx->uid;
798 inode->i_gid = ctx->gid;
078cd827 799 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
7d54fa64
AV
800 inode->i_op = &hugetlbfs_dir_inode_operations;
801 inode->i_fop = &simple_dir_operations;
802 /* directory inodes start off with i_nlink == 2 (for "." entry) */
803 inc_nlink(inode);
65ed7601 804 lockdep_annotate_inode_mutex_key(inode);
7d54fa64
AV
805 }
806 return inode;
807}
808
b610ded7 809/*
c8c06efa 810 * Hugetlbfs is not reclaimable; therefore its i_mmap_rwsem will never
b610ded7 811 * be taken from reclaim -- unlike regular filesystems. This needs an
88f306b6 812 * annotation because huge_pmd_share() does an allocation under hugetlb's
c8c06efa 813 * i_mmap_rwsem.
b610ded7 814 */
c8c06efa 815static struct lock_class_key hugetlbfs_i_mmap_rwsem_key;
b610ded7 816
7d54fa64
AV
817static struct inode *hugetlbfs_get_inode(struct super_block *sb,
818 struct inode *dir,
18df2252 819 umode_t mode, dev_t dev)
7d54fa64
AV
820{
821 struct inode *inode;
58b6e5e8 822 struct resv_map *resv_map = NULL;
9119a41e 823
58b6e5e8
MK
824 /*
825 * Reserve maps are only needed for inodes that can have associated
826 * page allocations.
827 */
828 if (S_ISREG(mode) || S_ISLNK(mode)) {
829 resv_map = resv_map_alloc();
830 if (!resv_map)
831 return NULL;
832 }
7d54fa64
AV
833
834 inode = new_inode(sb);
835 if (inode) {
ff62a342
MAL
836 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
837
7d54fa64
AV
838 inode->i_ino = get_next_ino();
839 inode_init_owner(inode, dir, mode);
c8c06efa
DB
840 lockdep_set_class(&inode->i_mapping->i_mmap_rwsem,
841 &hugetlbfs_i_mmap_rwsem_key);
1da177e4 842 inode->i_mapping->a_ops = &hugetlbfs_aops;
078cd827 843 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
9119a41e 844 inode->i_mapping->private_data = resv_map;
ff62a342 845 info->seals = F_SEAL_SEAL;
1da177e4
LT
846 switch (mode & S_IFMT) {
847 default:
848 init_special_inode(inode, mode, dev);
849 break;
850 case S_IFREG:
851 inode->i_op = &hugetlbfs_inode_operations;
852 inode->i_fop = &hugetlbfs_file_operations;
853 break;
854 case S_IFDIR:
855 inode->i_op = &hugetlbfs_dir_inode_operations;
856 inode->i_fop = &simple_dir_operations;
857
858 /* directory inodes start off with i_nlink == 2 (for "." entry) */
d8c76e6f 859 inc_nlink(inode);
1da177e4
LT
860 break;
861 case S_IFLNK:
862 inode->i_op = &page_symlink_inode_operations;
21fc61c7 863 inode_nohighmem(inode);
1da177e4
LT
864 break;
865 }
e096d0c7 866 lockdep_annotate_inode_mutex_key(inode);
58b6e5e8
MK
867 } else {
868 if (resv_map)
869 kref_put(&resv_map->refs, resv_map_release);
870 }
9119a41e 871
1da177e4
LT
872 return inode;
873}
874
875/*
876 * File creation. Allocate an inode, and we're done..
877 */
1ab5b82f
PS
878static int do_hugetlbfs_mknod(struct inode *dir,
879 struct dentry *dentry,
880 umode_t mode,
881 dev_t dev,
882 bool tmpfile)
1da177e4
LT
883{
884 struct inode *inode;
885 int error = -ENOSPC;
7d54fa64
AV
886
887 inode = hugetlbfs_get_inode(dir->i_sb, dir, mode, dev);
1da177e4 888 if (inode) {
078cd827 889 dir->i_ctime = dir->i_mtime = current_time(dir);
1ab5b82f
PS
890 if (tmpfile) {
891 d_tmpfile(dentry, inode);
892 } else {
893 d_instantiate(dentry, inode);
894 dget(dentry);/* Extra count - pin the dentry in core */
895 }
1da177e4
LT
896 error = 0;
897 }
898 return error;
899}
900
1ab5b82f
PS
901static int hugetlbfs_mknod(struct inode *dir,
902 struct dentry *dentry, umode_t mode, dev_t dev)
903{
904 return do_hugetlbfs_mknod(dir, dentry, mode, dev, false);
905}
906
18bb1db3 907static int hugetlbfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
1da177e4
LT
908{
909 int retval = hugetlbfs_mknod(dir, dentry, mode | S_IFDIR, 0);
910 if (!retval)
d8c76e6f 911 inc_nlink(dir);
1da177e4
LT
912 return retval;
913}
914
ebfc3b49 915static int hugetlbfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl)
1da177e4
LT
916{
917 return hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0);
918}
919
1ab5b82f
PS
920static int hugetlbfs_tmpfile(struct inode *dir,
921 struct dentry *dentry, umode_t mode)
922{
923 return do_hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0, true);
924}
925
1da177e4
LT
926static int hugetlbfs_symlink(struct inode *dir,
927 struct dentry *dentry, const char *symname)
928{
929 struct inode *inode;
930 int error = -ENOSPC;
1da177e4 931
7d54fa64 932 inode = hugetlbfs_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0);
1da177e4
LT
933 if (inode) {
934 int l = strlen(symname)+1;
935 error = page_symlink(inode, symname, l);
936 if (!error) {
937 d_instantiate(dentry, inode);
938 dget(dentry);
939 } else
940 iput(inode);
941 }
078cd827 942 dir->i_ctime = dir->i_mtime = current_time(dir);
1da177e4
LT
943
944 return error;
945}
946
947/*
6649a386 948 * mark the head page dirty
1da177e4
LT
949 */
950static int hugetlbfs_set_page_dirty(struct page *page)
951{
d85f3385 952 struct page *head = compound_head(page);
6649a386
KC
953
954 SetPageDirty(head);
1da177e4
LT
955 return 0;
956}
957
290408d4 958static int hugetlbfs_migrate_page(struct address_space *mapping,
b969c4ab 959 struct page *newpage, struct page *page,
a6bc32b8 960 enum migrate_mode mode)
290408d4
NH
961{
962 int rc;
963
964 rc = migrate_huge_page_move_mapping(mapping, newpage, page);
78bd5209 965 if (rc != MIGRATEPAGE_SUCCESS)
290408d4 966 return rc;
cb6acd01
MK
967
968 /*
969 * page_private is subpool pointer in hugetlb pages. Transfer to
970 * new page. PagePrivate is not associated with page_private for
971 * hugetlb pages and can not be set here as only page_huge_active
972 * pages can be migrated.
973 */
974 if (page_private(page)) {
975 set_page_private(newpage, page_private(page));
976 set_page_private(page, 0);
977 }
978
2916ecc0
JG
979 if (mode != MIGRATE_SYNC_NO_COPY)
980 migrate_page_copy(newpage, page);
981 else
982 migrate_page_states(newpage, page);
290408d4 983
78bd5209 984 return MIGRATEPAGE_SUCCESS;
290408d4
NH
985}
986
78bb9203
NH
987static int hugetlbfs_error_remove_page(struct address_space *mapping,
988 struct page *page)
989{
990 struct inode *inode = mapping->host;
ab615a5b 991 pgoff_t index = page->index;
78bb9203
NH
992
993 remove_huge_page(page);
ab615a5b
MK
994 if (unlikely(hugetlb_unreserve_pages(inode, index, index + 1, 1)))
995 hugetlb_fix_reserve_counts(inode);
996
78bb9203
NH
997 return 0;
998}
999
4a25220d
DH
1000/*
1001 * Display the mount options in /proc/mounts.
1002 */
1003static int hugetlbfs_show_options(struct seq_file *m, struct dentry *root)
1004{
1005 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(root->d_sb);
1006 struct hugepage_subpool *spool = sbinfo->spool;
1007 unsigned long hpage_size = huge_page_size(sbinfo->hstate);
1008 unsigned hpage_shift = huge_page_shift(sbinfo->hstate);
1009 char mod;
1010
1011 if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
1012 seq_printf(m, ",uid=%u",
1013 from_kuid_munged(&init_user_ns, sbinfo->uid));
1014 if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
1015 seq_printf(m, ",gid=%u",
1016 from_kgid_munged(&init_user_ns, sbinfo->gid));
1017 if (sbinfo->mode != 0755)
1018 seq_printf(m, ",mode=%o", sbinfo->mode);
1019 if (sbinfo->max_inodes != -1)
1020 seq_printf(m, ",nr_inodes=%lu", sbinfo->max_inodes);
1021
1022 hpage_size /= 1024;
1023 mod = 'K';
1024 if (hpage_size >= 1024) {
1025 hpage_size /= 1024;
1026 mod = 'M';
1027 }
1028 seq_printf(m, ",pagesize=%lu%c", hpage_size, mod);
1029 if (spool) {
1030 if (spool->max_hpages != -1)
1031 seq_printf(m, ",size=%llu",
1032 (unsigned long long)spool->max_hpages << hpage_shift);
1033 if (spool->min_hpages != -1)
1034 seq_printf(m, ",min_size=%llu",
1035 (unsigned long long)spool->min_hpages << hpage_shift);
1036 }
1037 return 0;
1038}
1039
726c3342 1040static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf)
1da177e4 1041{
726c3342 1042 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb);
2b0143b5 1043 struct hstate *h = hstate_inode(d_inode(dentry));
1da177e4
LT
1044
1045 buf->f_type = HUGETLBFS_MAGIC;
a5516438 1046 buf->f_bsize = huge_page_size(h);
1da177e4
LT
1047 if (sbinfo) {
1048 spin_lock(&sbinfo->stat_lock);
74a8a65c
DG
1049 /* If no limits set, just report 0 for max/free/used
1050 * blocks, like simple_statfs() */
90481622
DG
1051 if (sbinfo->spool) {
1052 long free_pages;
1053
1054 spin_lock(&sbinfo->spool->lock);
1055 buf->f_blocks = sbinfo->spool->max_hpages;
1056 free_pages = sbinfo->spool->max_hpages
1057 - sbinfo->spool->used_hpages;
1058 buf->f_bavail = buf->f_bfree = free_pages;
1059 spin_unlock(&sbinfo->spool->lock);
74a8a65c
DG
1060 buf->f_files = sbinfo->max_inodes;
1061 buf->f_ffree = sbinfo->free_inodes;
1062 }
1da177e4
LT
1063 spin_unlock(&sbinfo->stat_lock);
1064 }
1065 buf->f_namelen = NAME_MAX;
1066 return 0;
1067}
1068
1069static void hugetlbfs_put_super(struct super_block *sb)
1070{
1071 struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb);
1072
1073 if (sbi) {
1074 sb->s_fs_info = NULL;
90481622
DG
1075
1076 if (sbi->spool)
1077 hugepage_put_subpool(sbi->spool);
1078
1da177e4
LT
1079 kfree(sbi);
1080 }
1081}
1082
96527980
CH
1083static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo)
1084{
1085 if (sbinfo->free_inodes >= 0) {
1086 spin_lock(&sbinfo->stat_lock);
1087 if (unlikely(!sbinfo->free_inodes)) {
1088 spin_unlock(&sbinfo->stat_lock);
1089 return 0;
1090 }
1091 sbinfo->free_inodes--;
1092 spin_unlock(&sbinfo->stat_lock);
1093 }
1094
1095 return 1;
1096}
1097
1098static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo)
1099{
1100 if (sbinfo->free_inodes >= 0) {
1101 spin_lock(&sbinfo->stat_lock);
1102 sbinfo->free_inodes++;
1103 spin_unlock(&sbinfo->stat_lock);
1104 }
1105}
1106
1107
e18b890b 1108static struct kmem_cache *hugetlbfs_inode_cachep;
1da177e4
LT
1109
1110static struct inode *hugetlbfs_alloc_inode(struct super_block *sb)
1111{
96527980 1112 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb);
1da177e4
LT
1113 struct hugetlbfs_inode_info *p;
1114
96527980
CH
1115 if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo)))
1116 return NULL;
e94b1766 1117 p = kmem_cache_alloc(hugetlbfs_inode_cachep, GFP_KERNEL);
96527980
CH
1118 if (unlikely(!p)) {
1119 hugetlbfs_inc_free_inodes(sbinfo);
1da177e4 1120 return NULL;
96527980 1121 }
4742a35d
MK
1122
1123 /*
1124 * Any time after allocation, hugetlbfs_destroy_inode can be called
1125 * for the inode. mpol_free_shared_policy is unconditionally called
1126 * as part of hugetlbfs_destroy_inode. So, initialize policy here
1127 * in case of a quick call to destroy.
1128 *
1129 * Note that the policy is initialized even if we are creating a
1130 * private inode. This simplifies hugetlbfs_destroy_inode.
1131 */
1132 mpol_shared_policy_init(&p->policy, NULL);
1133
1da177e4
LT
1134 return &p->vfs_inode;
1135}
1136
b62de322 1137static void hugetlbfs_free_inode(struct inode *inode)
fa0d7e3d 1138{
fa0d7e3d
NP
1139 kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode));
1140}
1141
1da177e4
LT
1142static void hugetlbfs_destroy_inode(struct inode *inode)
1143{
96527980 1144 hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb));
1da177e4 1145 mpol_free_shared_policy(&HUGETLBFS_I(inode)->policy);
1da177e4
LT
1146}
1147
f5e54d6e 1148static const struct address_space_operations hugetlbfs_aops = {
800d15a5
NP
1149 .write_begin = hugetlbfs_write_begin,
1150 .write_end = hugetlbfs_write_end,
1da177e4 1151 .set_page_dirty = hugetlbfs_set_page_dirty,
290408d4 1152 .migratepage = hugetlbfs_migrate_page,
78bb9203 1153 .error_remove_page = hugetlbfs_error_remove_page,
1da177e4
LT
1154};
1155
96527980 1156
51cc5068 1157static void init_once(void *foo)
96527980
CH
1158{
1159 struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo;
1160
a35afb83 1161 inode_init_once(&ei->vfs_inode);
96527980
CH
1162}
1163
4b6f5d20 1164const struct file_operations hugetlbfs_file_operations = {
34d0640e 1165 .read_iter = hugetlbfs_read_iter,
1da177e4 1166 .mmap = hugetlbfs_file_mmap,
1b061d92 1167 .fsync = noop_fsync,
1da177e4 1168 .get_unmapped_area = hugetlb_get_unmapped_area,
70c3547e
MK
1169 .llseek = default_llseek,
1170 .fallocate = hugetlbfs_fallocate,
1da177e4
LT
1171};
1172
92e1d5be 1173static const struct inode_operations hugetlbfs_dir_inode_operations = {
1da177e4
LT
1174 .create = hugetlbfs_create,
1175 .lookup = simple_lookup,
1176 .link = simple_link,
1177 .unlink = simple_unlink,
1178 .symlink = hugetlbfs_symlink,
1179 .mkdir = hugetlbfs_mkdir,
1180 .rmdir = simple_rmdir,
1181 .mknod = hugetlbfs_mknod,
1182 .rename = simple_rename,
1183 .setattr = hugetlbfs_setattr,
1ab5b82f 1184 .tmpfile = hugetlbfs_tmpfile,
1da177e4
LT
1185};
1186
92e1d5be 1187static const struct inode_operations hugetlbfs_inode_operations = {
1da177e4
LT
1188 .setattr = hugetlbfs_setattr,
1189};
1190
ee9b6d61 1191static const struct super_operations hugetlbfs_ops = {
1da177e4 1192 .alloc_inode = hugetlbfs_alloc_inode,
b62de322 1193 .free_inode = hugetlbfs_free_inode,
1da177e4 1194 .destroy_inode = hugetlbfs_destroy_inode,
2bbbda30 1195 .evict_inode = hugetlbfs_evict_inode,
1da177e4 1196 .statfs = hugetlbfs_statfs,
1da177e4 1197 .put_super = hugetlbfs_put_super,
4a25220d 1198 .show_options = hugetlbfs_show_options,
1da177e4
LT
1199};
1200
7ca02d0a
MK
1201/*
1202 * Convert size option passed from command line to number of huge pages
1203 * in the pool specified by hstate. Size option could be in bytes
1204 * (val_type == SIZE_STD) or percentage of the pool (val_type == SIZE_PERCENT).
1205 */
4a25220d 1206static long
7ca02d0a 1207hugetlbfs_size_to_hpages(struct hstate *h, unsigned long long size_opt,
4a25220d 1208 enum hugetlbfs_size_type val_type)
7ca02d0a
MK
1209{
1210 if (val_type == NO_SIZE)
1211 return -1;
1212
1213 if (val_type == SIZE_PERCENT) {
1214 size_opt <<= huge_page_shift(h);
1215 size_opt *= h->max_huge_pages;
1216 do_div(size_opt, 100);
1217 }
1218
1219 size_opt >>= huge_page_shift(h);
1220 return size_opt;
1221}
1222
32021982
DH
1223/*
1224 * Parse one mount parameter.
1225 */
1226static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
1da177e4 1227{
32021982
DH
1228 struct hugetlbfs_fs_context *ctx = fc->fs_private;
1229 struct fs_parse_result result;
1230 char *rest;
1231 unsigned long ps;
1232 int opt;
1233
d7167b14 1234 opt = fs_parse(fc, hugetlb_fs_parameters, param, &result);
32021982
DH
1235 if (opt < 0)
1236 return opt;
1237
1238 switch (opt) {
1239 case Opt_uid:
1240 ctx->uid = make_kuid(current_user_ns(), result.uint_32);
1241 if (!uid_valid(ctx->uid))
1242 goto bad_val;
1da177e4 1243 return 0;
1da177e4 1244
32021982
DH
1245 case Opt_gid:
1246 ctx->gid = make_kgid(current_user_ns(), result.uint_32);
1247 if (!gid_valid(ctx->gid))
1248 goto bad_val;
1249 return 0;
e73a75fa 1250
32021982
DH
1251 case Opt_mode:
1252 ctx->mode = result.uint_32 & 01777U;
1253 return 0;
e73a75fa 1254
32021982
DH
1255 case Opt_size:
1256 /* memparse() will accept a K/M/G without a digit */
1257 if (!isdigit(param->string[0]))
1258 goto bad_val;
1259 ctx->max_size_opt = memparse(param->string, &rest);
1260 ctx->max_val_type = SIZE_STD;
1261 if (*rest == '%')
1262 ctx->max_val_type = SIZE_PERCENT;
1263 return 0;
e73a75fa 1264
32021982
DH
1265 case Opt_nr_inodes:
1266 /* memparse() will accept a K/M/G without a digit */
1267 if (!isdigit(param->string[0]))
1268 goto bad_val;
1269 ctx->nr_inodes = memparse(param->string, &rest);
1270 return 0;
e73a75fa 1271
32021982
DH
1272 case Opt_pagesize:
1273 ps = memparse(param->string, &rest);
1274 ctx->hstate = size_to_hstate(ps);
1275 if (!ctx->hstate) {
1276 pr_err("Unsupported page size %lu MB\n", ps >> 20);
1277 return -EINVAL;
e73a75fa 1278 }
32021982 1279 return 0;
1da177e4 1280
32021982
DH
1281 case Opt_min_size:
1282 /* memparse() will accept a K/M/G without a digit */
1283 if (!isdigit(param->string[0]))
1284 goto bad_val;
1285 ctx->min_size_opt = memparse(param->string, &rest);
1286 ctx->min_val_type = SIZE_STD;
1287 if (*rest == '%')
1288 ctx->min_val_type = SIZE_PERCENT;
1289 return 0;
e73a75fa 1290
32021982
DH
1291 default:
1292 return -EINVAL;
1293 }
a137e1cc 1294
32021982 1295bad_val:
b5db30cf 1296 return invalfc(fc, "Bad value '%s' for mount option '%s'\n",
32021982
DH
1297 param->string, param->key);
1298}
7ca02d0a 1299
32021982
DH
1300/*
1301 * Validate the parsed options.
1302 */
1303static int hugetlbfs_validate(struct fs_context *fc)
1304{
1305 struct hugetlbfs_fs_context *ctx = fc->fs_private;
a137e1cc 1306
7ca02d0a
MK
1307 /*
1308 * Use huge page pool size (in hstate) to convert the size
1309 * options to number of huge pages. If NO_SIZE, -1 is returned.
1310 */
32021982
DH
1311 ctx->max_hpages = hugetlbfs_size_to_hpages(ctx->hstate,
1312 ctx->max_size_opt,
1313 ctx->max_val_type);
1314 ctx->min_hpages = hugetlbfs_size_to_hpages(ctx->hstate,
1315 ctx->min_size_opt,
1316 ctx->min_val_type);
7ca02d0a
MK
1317
1318 /*
1319 * If max_size was specified, then min_size must be smaller
1320 */
32021982
DH
1321 if (ctx->max_val_type > NO_SIZE &&
1322 ctx->min_hpages > ctx->max_hpages) {
1323 pr_err("Minimum size can not be greater than maximum size\n");
7ca02d0a 1324 return -EINVAL;
a137e1cc
AK
1325 }
1326
1da177e4
LT
1327 return 0;
1328}
1329
1330static int
32021982 1331hugetlbfs_fill_super(struct super_block *sb, struct fs_context *fc)
1da177e4 1332{
32021982 1333 struct hugetlbfs_fs_context *ctx = fc->fs_private;
1da177e4
LT
1334 struct hugetlbfs_sb_info *sbinfo;
1335
1da177e4
LT
1336 sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL);
1337 if (!sbinfo)
1338 return -ENOMEM;
1339 sb->s_fs_info = sbinfo;
1340 spin_lock_init(&sbinfo->stat_lock);
32021982
DH
1341 sbinfo->hstate = ctx->hstate;
1342 sbinfo->max_inodes = ctx->nr_inodes;
1343 sbinfo->free_inodes = ctx->nr_inodes;
1344 sbinfo->spool = NULL;
1345 sbinfo->uid = ctx->uid;
1346 sbinfo->gid = ctx->gid;
1347 sbinfo->mode = ctx->mode;
4a25220d 1348
7ca02d0a
MK
1349 /*
1350 * Allocate and initialize subpool if maximum or minimum size is
1351 * specified. Any needed reservations (for minimim size) are taken
1352 * taken when the subpool is created.
1353 */
32021982
DH
1354 if (ctx->max_hpages != -1 || ctx->min_hpages != -1) {
1355 sbinfo->spool = hugepage_new_subpool(ctx->hstate,
1356 ctx->max_hpages,
1357 ctx->min_hpages);
90481622
DG
1358 if (!sbinfo->spool)
1359 goto out_free;
1360 }
1da177e4 1361 sb->s_maxbytes = MAX_LFS_FILESIZE;
32021982
DH
1362 sb->s_blocksize = huge_page_size(ctx->hstate);
1363 sb->s_blocksize_bits = huge_page_shift(ctx->hstate);
1da177e4
LT
1364 sb->s_magic = HUGETLBFS_MAGIC;
1365 sb->s_op = &hugetlbfs_ops;
1366 sb->s_time_gran = 1;
32021982 1367 sb->s_root = d_make_root(hugetlbfs_get_root(sb, ctx));
48fde701 1368 if (!sb->s_root)
1da177e4 1369 goto out_free;
1da177e4
LT
1370 return 0;
1371out_free:
6e6870d4 1372 kfree(sbinfo->spool);
1da177e4
LT
1373 kfree(sbinfo);
1374 return -ENOMEM;
1375}
1376
32021982
DH
1377static int hugetlbfs_get_tree(struct fs_context *fc)
1378{
1379 int err = hugetlbfs_validate(fc);
1380 if (err)
1381 return err;
2ac295d4 1382 return get_tree_nodev(fc, hugetlbfs_fill_super);
32021982
DH
1383}
1384
1385static void hugetlbfs_fs_context_free(struct fs_context *fc)
1386{
1387 kfree(fc->fs_private);
1388}
1389
1390static const struct fs_context_operations hugetlbfs_fs_context_ops = {
1391 .free = hugetlbfs_fs_context_free,
1392 .parse_param = hugetlbfs_parse_param,
1393 .get_tree = hugetlbfs_get_tree,
1394};
1395
1396static int hugetlbfs_init_fs_context(struct fs_context *fc)
1da177e4 1397{
32021982
DH
1398 struct hugetlbfs_fs_context *ctx;
1399
1400 ctx = kzalloc(sizeof(struct hugetlbfs_fs_context), GFP_KERNEL);
1401 if (!ctx)
1402 return -ENOMEM;
1403
1404 ctx->max_hpages = -1; /* No limit on size by default */
1405 ctx->nr_inodes = -1; /* No limit on number of inodes by default */
1406 ctx->uid = current_fsuid();
1407 ctx->gid = current_fsgid();
1408 ctx->mode = 0755;
1409 ctx->hstate = &default_hstate;
1410 ctx->min_hpages = -1; /* No default minimum size */
1411 ctx->max_val_type = NO_SIZE;
1412 ctx->min_val_type = NO_SIZE;
1413 fc->fs_private = ctx;
1414 fc->ops = &hugetlbfs_fs_context_ops;
1415 return 0;
1da177e4
LT
1416}
1417
1418static struct file_system_type hugetlbfs_fs_type = {
32021982
DH
1419 .name = "hugetlbfs",
1420 .init_fs_context = hugetlbfs_init_fs_context,
d7167b14 1421 .parameters = hugetlb_fs_parameters,
32021982 1422 .kill_sb = kill_litter_super,
1da177e4
LT
1423};
1424
42d7395f 1425static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
1da177e4 1426
ef1ff6b8 1427static int can_do_hugetlb_shm(void)
1da177e4 1428{
a0eb3a05
EB
1429 kgid_t shm_group;
1430 shm_group = make_kgid(&init_user_ns, sysctl_hugetlb_shm_group);
1431 return capable(CAP_IPC_LOCK) || in_group_p(shm_group);
1da177e4
LT
1432}
1433
42d7395f
AK
1434static int get_hstate_idx(int page_size_log)
1435{
af73e4d9 1436 struct hstate *h = hstate_sizelog(page_size_log);
42d7395f 1437
42d7395f
AK
1438 if (!h)
1439 return -1;
1440 return h - hstates;
1441}
1442
af73e4d9
NH
1443/*
1444 * Note that size should be aligned to proper hugepage size in caller side,
1445 * otherwise hugetlb_reserve_pages reserves one less hugepages than intended.
1446 */
1447struct file *hugetlb_file_setup(const char *name, size_t size,
1448 vm_flags_t acctflag, struct user_struct **user,
42d7395f 1449 int creat_flags, int page_size_log)
1da177e4 1450{
1da177e4 1451 struct inode *inode;
e68375c8 1452 struct vfsmount *mnt;
42d7395f 1453 int hstate_idx;
e68375c8 1454 struct file *file;
42d7395f
AK
1455
1456 hstate_idx = get_hstate_idx(page_size_log);
1457 if (hstate_idx < 0)
1458 return ERR_PTR(-ENODEV);
1da177e4 1459
353d5c30 1460 *user = NULL;
e68375c8
AV
1461 mnt = hugetlbfs_vfsmount[hstate_idx];
1462 if (!mnt)
5bc98594
AM
1463 return ERR_PTR(-ENOENT);
1464
ef1ff6b8 1465 if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) {
353d5c30
HD
1466 *user = current_user();
1467 if (user_shm_lock(size, *user)) {
21a3c273 1468 task_lock(current);
9b857d26 1469 pr_warn_once("%s (%d): Using mlock ulimits for SHM_HUGETLB is deprecated\n",
21a3c273
DR
1470 current->comm, current->pid);
1471 task_unlock(current);
353d5c30
HD
1472 } else {
1473 *user = NULL;
2584e517 1474 return ERR_PTR(-EPERM);
353d5c30 1475 }
2584e517 1476 }
1da177e4 1477
39b65252 1478 file = ERR_PTR(-ENOSPC);
e68375c8 1479 inode = hugetlbfs_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0);
1da177e4 1480 if (!inode)
e68375c8 1481 goto out;
e1832f29
SS
1482 if (creat_flags == HUGETLB_SHMFS_INODE)
1483 inode->i_flags |= S_PRIVATE;
1da177e4 1484
1da177e4 1485 inode->i_size = size;
6d6b77f1 1486 clear_nlink(inode);
ce8d2cdf 1487
e68375c8
AV
1488 if (hugetlb_reserve_pages(inode, 0,
1489 size >> huge_page_shift(hstate_inode(inode)), NULL,
1490 acctflag))
1491 file = ERR_PTR(-ENOMEM);
1492 else
1493 file = alloc_file_pseudo(inode, mnt, name, O_RDWR,
1494 &hugetlbfs_file_operations);
1495 if (!IS_ERR(file))
1496 return file;
1da177e4 1497
b45b5bd6 1498 iput(inode);
e68375c8 1499out:
353d5c30
HD
1500 if (*user) {
1501 user_shm_unlock(size, *user);
1502 *user = NULL;
1503 }
39b65252 1504 return file;
1da177e4
LT
1505}
1506
32021982
DH
1507static struct vfsmount *__init mount_one_hugetlbfs(struct hstate *h)
1508{
1509 struct fs_context *fc;
1510 struct vfsmount *mnt;
1511
1512 fc = fs_context_for_mount(&hugetlbfs_fs_type, SB_KERNMOUNT);
1513 if (IS_ERR(fc)) {
1514 mnt = ERR_CAST(fc);
1515 } else {
1516 struct hugetlbfs_fs_context *ctx = fc->fs_private;
1517 ctx->hstate = h;
1518 mnt = fc_mount(fc);
1519 put_fs_context(fc);
1520 }
1521 if (IS_ERR(mnt))
1522 pr_err("Cannot mount internal hugetlbfs for page size %uK",
1523 1U << (h->order + PAGE_SHIFT - 10));
1524 return mnt;
1525}
1526
1da177e4
LT
1527static int __init init_hugetlbfs_fs(void)
1528{
32021982 1529 struct vfsmount *mnt;
42d7395f 1530 struct hstate *h;
1da177e4 1531 int error;
42d7395f 1532 int i;
1da177e4 1533
457c1b27 1534 if (!hugepages_supported()) {
9b857d26 1535 pr_info("disabling because there are no supported hugepage sizes\n");
457c1b27
NA
1536 return -ENOTSUPP;
1537 }
1538
d1d5e05f 1539 error = -ENOMEM;
1da177e4
LT
1540 hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache",
1541 sizeof(struct hugetlbfs_inode_info),
5d097056 1542 0, SLAB_ACCOUNT, init_once);
1da177e4 1543 if (hugetlbfs_inode_cachep == NULL)
8fc312b3 1544 goto out;
1da177e4
LT
1545
1546 error = register_filesystem(&hugetlbfs_fs_type);
1547 if (error)
8fc312b3 1548 goto out_free;
1da177e4 1549
8fc312b3
MK
1550 /* default hstate mount is required */
1551 mnt = mount_one_hugetlbfs(&hstates[default_hstate_idx]);
1552 if (IS_ERR(mnt)) {
1553 error = PTR_ERR(mnt);
1554 goto out_unreg;
1555 }
1556 hugetlbfs_vfsmount[default_hstate_idx] = mnt;
1557
1558 /* other hstates are optional */
42d7395f
AK
1559 i = 0;
1560 for_each_hstate(h) {
15f0ec94
JS
1561 if (i == default_hstate_idx) {
1562 i++;
8fc312b3 1563 continue;
15f0ec94 1564 }
8fc312b3 1565
32021982 1566 mnt = mount_one_hugetlbfs(h);
8fc312b3
MK
1567 if (IS_ERR(mnt))
1568 hugetlbfs_vfsmount[i] = NULL;
1569 else
1570 hugetlbfs_vfsmount[i] = mnt;
42d7395f
AK
1571 i++;
1572 }
32021982
DH
1573
1574 return 0;
1da177e4 1575
8fc312b3
MK
1576 out_unreg:
1577 (void)unregister_filesystem(&hugetlbfs_fs_type);
1578 out_free:
d1d5e05f 1579 kmem_cache_destroy(hugetlbfs_inode_cachep);
8fc312b3 1580 out:
1da177e4
LT
1581 return error;
1582}
3e89e1c5 1583fs_initcall(init_hugetlbfs_fs)