2 * hugetlbpage-backed filesystem. Based on ramfs.
4 * Nadia Yvette Chambers, 2002
6 * Copyright (C) 2002 Linus Torvalds.
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/thread_info.h>
13 #include <asm/current.h>
14 #include <linux/sched/signal.h> /* remove ASAP */
15 #include <linux/falloc.h>
17 #include <linux/mount.h>
18 #include <linux/file.h>
19 #include <linux/kernel.h>
20 #include <linux/writeback.h>
21 #include <linux/pagemap.h>
22 #include <linux/highmem.h>
23 #include <linux/init.h>
24 #include <linux/string.h>
25 #include <linux/capability.h>
26 #include <linux/ctype.h>
27 #include <linux/backing-dev.h>
28 #include <linux/hugetlb.h>
29 #include <linux/pagevec.h>
30 #include <linux/parser.h>
31 #include <linux/mman.h>
32 #include <linux/slab.h>
33 #include <linux/dnotify.h>
34 #include <linux/statfs.h>
35 #include <linux/security.h>
36 #include <linux/magic.h>
37 #include <linux/migrate.h>
38 #include <linux/uio.h>
40 #include <linux/uaccess.h>
42 static const struct super_operations hugetlbfs_ops
;
43 static const struct address_space_operations hugetlbfs_aops
;
44 const struct file_operations hugetlbfs_file_operations
;
45 static const struct inode_operations hugetlbfs_dir_inode_operations
;
46 static const struct inode_operations hugetlbfs_inode_operations
;
48 struct hugetlbfs_config
{
49 struct hstate
*hstate
;
58 struct hugetlbfs_inode_info
{
59 struct shared_policy policy
;
60 struct inode vfs_inode
;
63 static inline struct hugetlbfs_inode_info
*HUGETLBFS_I(struct inode
*inode
)
65 return container_of(inode
, struct hugetlbfs_inode_info
, vfs_inode
);
68 int sysctl_hugetlb_shm_group
;
71 Opt_size
, Opt_nr_inodes
,
72 Opt_mode
, Opt_uid
, Opt_gid
,
73 Opt_pagesize
, Opt_min_size
,
77 static const match_table_t tokens
= {
78 {Opt_size
, "size=%s"},
79 {Opt_nr_inodes
, "nr_inodes=%s"},
80 {Opt_mode
, "mode=%o"},
83 {Opt_pagesize
, "pagesize=%s"},
84 {Opt_min_size
, "min_size=%s"},
89 static inline void hugetlb_set_vma_policy(struct vm_area_struct
*vma
,
90 struct inode
*inode
, pgoff_t index
)
92 vma
->vm_policy
= mpol_shared_policy_lookup(&HUGETLBFS_I(inode
)->policy
,
96 static inline void hugetlb_drop_vma_policy(struct vm_area_struct
*vma
)
98 mpol_cond_put(vma
->vm_policy
);
101 static inline void hugetlb_set_vma_policy(struct vm_area_struct
*vma
,
102 struct inode
*inode
, pgoff_t index
)
106 static inline void hugetlb_drop_vma_policy(struct vm_area_struct
*vma
)
111 static void huge_pagevec_release(struct pagevec
*pvec
)
115 for (i
= 0; i
< pagevec_count(pvec
); ++i
)
116 put_page(pvec
->pages
[i
]);
118 pagevec_reinit(pvec
);
121 static int hugetlbfs_file_mmap(struct file
*file
, struct vm_area_struct
*vma
)
123 struct inode
*inode
= file_inode(file
);
126 struct hstate
*h
= hstate_file(file
);
129 * vma address alignment (but not the pgoff alignment) has
130 * already been checked by prepare_hugepage_range. If you add
131 * any error returns here, do so after setting VM_HUGETLB, so
132 * is_vm_hugetlb_page tests below unmap_region go the right
133 * way when do_mmap_pgoff unwinds (may be important on powerpc
136 vma
->vm_flags
|= VM_HUGETLB
| VM_DONTEXPAND
;
137 vma
->vm_ops
= &hugetlb_vm_ops
;
140 * Offset passed to mmap (before page shift) could have been
141 * negative when represented as a (l)off_t.
143 if (((loff_t
)vma
->vm_pgoff
<< PAGE_SHIFT
) < 0)
146 if (vma
->vm_pgoff
& (~huge_page_mask(h
) >> PAGE_SHIFT
))
149 vma_len
= (loff_t
)(vma
->vm_end
- vma
->vm_start
);
150 len
= vma_len
+ ((loff_t
)vma
->vm_pgoff
<< PAGE_SHIFT
);
151 /* check for overflow */
159 if (hugetlb_reserve_pages(inode
,
160 vma
->vm_pgoff
>> huge_page_order(h
),
161 len
>> huge_page_shift(h
), vma
,
166 if (vma
->vm_flags
& VM_WRITE
&& inode
->i_size
< len
)
167 i_size_write(inode
, len
);
175 * Called under down_write(mmap_sem).
178 #ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
180 hugetlb_get_unmapped_area(struct file
*file
, unsigned long addr
,
181 unsigned long len
, unsigned long pgoff
, unsigned long flags
)
183 struct mm_struct
*mm
= current
->mm
;
184 struct vm_area_struct
*vma
;
185 struct hstate
*h
= hstate_file(file
);
186 struct vm_unmapped_area_info info
;
188 if (len
& ~huge_page_mask(h
))
193 if (flags
& MAP_FIXED
) {
194 if (prepare_hugepage_range(file
, addr
, len
))
200 addr
= ALIGN(addr
, huge_page_size(h
));
201 vma
= find_vma(mm
, addr
);
202 if (TASK_SIZE
- len
>= addr
&&
203 (!vma
|| addr
+ len
<= vm_start_gap(vma
)))
209 info
.low_limit
= TASK_UNMAPPED_BASE
;
210 info
.high_limit
= TASK_SIZE
;
211 info
.align_mask
= PAGE_MASK
& ~huge_page_mask(h
);
212 info
.align_offset
= 0;
213 return vm_unmapped_area(&info
);
218 hugetlbfs_read_actor(struct page
*page
, unsigned long offset
,
219 struct iov_iter
*to
, unsigned long size
)
224 /* Find which 4k chunk and offset with in that chunk */
225 i
= offset
>> PAGE_SHIFT
;
226 offset
= offset
& ~PAGE_MASK
;
230 chunksize
= PAGE_SIZE
;
233 if (chunksize
> size
)
235 n
= copy_page_to_iter(&page
[i
], offset
, chunksize
, to
);
247 * Support for read() - Find the page attached to f_mapping and copy out the
248 * data. Its *very* similar to do_generic_mapping_read(), we can't use that
249 * since it has PAGE_SIZE assumptions.
251 static ssize_t
hugetlbfs_read_iter(struct kiocb
*iocb
, struct iov_iter
*to
)
253 struct file
*file
= iocb
->ki_filp
;
254 struct hstate
*h
= hstate_file(file
);
255 struct address_space
*mapping
= file
->f_mapping
;
256 struct inode
*inode
= mapping
->host
;
257 unsigned long index
= iocb
->ki_pos
>> huge_page_shift(h
);
258 unsigned long offset
= iocb
->ki_pos
& ~huge_page_mask(h
);
259 unsigned long end_index
;
263 while (iov_iter_count(to
)) {
267 /* nr is the maximum number of bytes to copy from this page */
268 nr
= huge_page_size(h
);
269 isize
= i_size_read(inode
);
272 end_index
= (isize
- 1) >> huge_page_shift(h
);
273 if (index
> end_index
)
275 if (index
== end_index
) {
276 nr
= ((isize
- 1) & ~huge_page_mask(h
)) + 1;
283 page
= find_lock_page(mapping
, index
);
284 if (unlikely(page
== NULL
)) {
286 * We have a HOLE, zero out the user-buffer for the
287 * length of the hole or request.
289 copied
= iov_iter_zero(nr
, to
);
294 * We have the page, copy it to user space buffer.
296 copied
= hugetlbfs_read_actor(page
, offset
, to
, nr
);
301 if (copied
!= nr
&& iov_iter_count(to
)) {
306 index
+= offset
>> huge_page_shift(h
);
307 offset
&= ~huge_page_mask(h
);
309 iocb
->ki_pos
= ((loff_t
)index
<< huge_page_shift(h
)) + offset
;
313 static int hugetlbfs_write_begin(struct file
*file
,
314 struct address_space
*mapping
,
315 loff_t pos
, unsigned len
, unsigned flags
,
316 struct page
**pagep
, void **fsdata
)
321 static int hugetlbfs_write_end(struct file
*file
, struct address_space
*mapping
,
322 loff_t pos
, unsigned len
, unsigned copied
,
323 struct page
*page
, void *fsdata
)
329 static void remove_huge_page(struct page
*page
)
331 ClearPageDirty(page
);
332 ClearPageUptodate(page
);
333 delete_from_page_cache(page
);
337 hugetlb_vmdelete_list(struct rb_root_cached
*root
, pgoff_t start
, pgoff_t end
)
339 struct vm_area_struct
*vma
;
342 * end == 0 indicates that the entire range after
343 * start should be unmapped.
345 vma_interval_tree_foreach(vma
, root
, start
, end
? end
: ULONG_MAX
) {
346 unsigned long v_offset
;
350 * Can the expression below overflow on 32-bit arches?
351 * No, because the interval tree returns us only those vmas
352 * which overlap the truncated area starting at pgoff,
353 * and no vma on a 32-bit arch can span beyond the 4GB.
355 if (vma
->vm_pgoff
< start
)
356 v_offset
= (start
- vma
->vm_pgoff
) << PAGE_SHIFT
;
363 v_end
= ((end
- vma
->vm_pgoff
) << PAGE_SHIFT
)
365 if (v_end
> vma
->vm_end
)
369 unmap_hugepage_range(vma
, vma
->vm_start
+ v_offset
, v_end
,
375 * remove_inode_hugepages handles two distinct cases: truncation and hole
376 * punch. There are subtle differences in operation for each case.
378 * truncation is indicated by end of range being LLONG_MAX
379 * In this case, we first scan the range and release found pages.
380 * After releasing pages, hugetlb_unreserve_pages cleans up region/reserv
381 * maps and global counts. Page faults can not race with truncation
382 * in this routine. hugetlb_no_page() prevents page faults in the
383 * truncated range. It checks i_size before allocation, and again after
384 * with the page table lock for the page held. The same lock must be
385 * acquired to unmap a page.
386 * hole punch is indicated if end is not LLONG_MAX
387 * In the hole punch case we scan the range and release found pages.
388 * Only when releasing a page is the associated region/reserv map
389 * deleted. The region/reserv map for ranges without associated
390 * pages are not modified. Page faults can race with hole punch.
391 * This is indicated if we find a mapped page.
392 * Note: If the passed end of range value is beyond the end of file, but
393 * not LLONG_MAX this routine still performs a hole punch operation.
395 static void remove_inode_hugepages(struct inode
*inode
, loff_t lstart
,
398 struct hstate
*h
= hstate_inode(inode
);
399 struct address_space
*mapping
= &inode
->i_data
;
400 const pgoff_t start
= lstart
>> huge_page_shift(h
);
401 const pgoff_t end
= lend
>> huge_page_shift(h
);
402 struct vm_area_struct pseudo_vma
;
406 bool truncate_op
= (lend
== LLONG_MAX
);
408 memset(&pseudo_vma
, 0, sizeof(struct vm_area_struct
));
409 pseudo_vma
.vm_flags
= (VM_HUGETLB
| VM_MAYSHARE
| VM_SHARED
);
410 pagevec_init(&pvec
, 0);
414 * When no more pages are found, we are done.
416 if (!pagevec_lookup_range(&pvec
, mapping
, &next
, end
- 1))
419 for (i
= 0; i
< pagevec_count(&pvec
); ++i
) {
420 struct page
*page
= pvec
.pages
[i
];
424 hash
= hugetlb_fault_mutex_hash(h
, current
->mm
,
427 mutex_lock(&hugetlb_fault_mutex_table
[hash
]);
430 * If page is mapped, it was faulted in after being
431 * unmapped in caller. Unmap (again) now after taking
432 * the fault mutex. The mutex will prevent faults
433 * until we finish removing the page.
435 * This race can only happen in the hole punch case.
436 * Getting here in a truncate operation is a bug.
438 if (unlikely(page_mapped(page
))) {
441 i_mmap_lock_write(mapping
);
442 hugetlb_vmdelete_list(&mapping
->i_mmap
,
443 index
* pages_per_huge_page(h
),
444 (index
+ 1) * pages_per_huge_page(h
));
445 i_mmap_unlock_write(mapping
);
450 * We must free the huge page and remove from page
451 * cache (remove_huge_page) BEFORE removing the
452 * region/reserve map (hugetlb_unreserve_pages). In
453 * rare out of memory conditions, removal of the
454 * region/reserve map could fail. Correspondingly,
455 * the subpool and global reserve usage count can need
458 VM_BUG_ON(PagePrivate(page
));
459 remove_huge_page(page
);
462 if (unlikely(hugetlb_unreserve_pages(inode
,
463 index
, index
+ 1, 1)))
464 hugetlb_fix_reserve_counts(inode
);
468 mutex_unlock(&hugetlb_fault_mutex_table
[hash
]);
470 huge_pagevec_release(&pvec
);
475 (void)hugetlb_unreserve_pages(inode
, start
, LONG_MAX
, freed
);
478 static void hugetlbfs_evict_inode(struct inode
*inode
)
480 struct resv_map
*resv_map
;
482 remove_inode_hugepages(inode
, 0, LLONG_MAX
);
483 resv_map
= (struct resv_map
*)inode
->i_mapping
->private_data
;
484 /* root inode doesn't have the resv_map, so we should check it */
486 resv_map_release(&resv_map
->refs
);
490 static int hugetlb_vmtruncate(struct inode
*inode
, loff_t offset
)
493 struct address_space
*mapping
= inode
->i_mapping
;
494 struct hstate
*h
= hstate_inode(inode
);
496 BUG_ON(offset
& ~huge_page_mask(h
));
497 pgoff
= offset
>> PAGE_SHIFT
;
499 i_size_write(inode
, offset
);
500 i_mmap_lock_write(mapping
);
501 if (!RB_EMPTY_ROOT(&mapping
->i_mmap
.rb_root
))
502 hugetlb_vmdelete_list(&mapping
->i_mmap
, pgoff
, 0);
503 i_mmap_unlock_write(mapping
);
504 remove_inode_hugepages(inode
, offset
, LLONG_MAX
);
508 static long hugetlbfs_punch_hole(struct inode
*inode
, loff_t offset
, loff_t len
)
510 struct hstate
*h
= hstate_inode(inode
);
511 loff_t hpage_size
= huge_page_size(h
);
512 loff_t hole_start
, hole_end
;
515 * For hole punch round up the beginning offset of the hole and
516 * round down the end.
518 hole_start
= round_up(offset
, hpage_size
);
519 hole_end
= round_down(offset
+ len
, hpage_size
);
521 if (hole_end
> hole_start
) {
522 struct address_space
*mapping
= inode
->i_mapping
;
525 i_mmap_lock_write(mapping
);
526 if (!RB_EMPTY_ROOT(&mapping
->i_mmap
.rb_root
))
527 hugetlb_vmdelete_list(&mapping
->i_mmap
,
528 hole_start
>> PAGE_SHIFT
,
529 hole_end
>> PAGE_SHIFT
);
530 i_mmap_unlock_write(mapping
);
531 remove_inode_hugepages(inode
, hole_start
, hole_end
);
538 static long hugetlbfs_fallocate(struct file
*file
, int mode
, loff_t offset
,
541 struct inode
*inode
= file_inode(file
);
542 struct address_space
*mapping
= inode
->i_mapping
;
543 struct hstate
*h
= hstate_inode(inode
);
544 struct vm_area_struct pseudo_vma
;
545 struct mm_struct
*mm
= current
->mm
;
546 loff_t hpage_size
= huge_page_size(h
);
547 unsigned long hpage_shift
= huge_page_shift(h
);
548 pgoff_t start
, index
, end
;
552 if (mode
& ~(FALLOC_FL_KEEP_SIZE
| FALLOC_FL_PUNCH_HOLE
))
555 if (mode
& FALLOC_FL_PUNCH_HOLE
)
556 return hugetlbfs_punch_hole(inode
, offset
, len
);
559 * Default preallocate case.
560 * For this range, start is rounded down and end is rounded up
561 * as well as being converted to page offsets.
563 start
= offset
>> hpage_shift
;
564 end
= (offset
+ len
+ hpage_size
- 1) >> hpage_shift
;
568 /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
569 error
= inode_newsize_ok(inode
, offset
+ len
);
574 * Initialize a pseudo vma as this is required by the huge page
575 * allocation routines. If NUMA is configured, use page index
576 * as input to create an allocation policy.
578 memset(&pseudo_vma
, 0, sizeof(struct vm_area_struct
));
579 pseudo_vma
.vm_flags
= (VM_HUGETLB
| VM_MAYSHARE
| VM_SHARED
);
580 pseudo_vma
.vm_file
= file
;
582 for (index
= start
; index
< end
; index
++) {
584 * This is supposed to be the vaddr where the page is being
585 * faulted in, but we have no vaddr here.
589 int avoid_reserve
= 0;
594 * fallocate(2) manpage permits EINTR; we may have been
595 * interrupted because we are using up too much memory.
597 if (signal_pending(current
)) {
602 /* Set numa allocation policy based on index */
603 hugetlb_set_vma_policy(&pseudo_vma
, inode
, index
);
605 /* addr is the offset within the file (zero based) */
606 addr
= index
* hpage_size
;
608 /* mutex taken here, fault path and hole punch */
609 hash
= hugetlb_fault_mutex_hash(h
, mm
, &pseudo_vma
, mapping
,
611 mutex_lock(&hugetlb_fault_mutex_table
[hash
]);
613 /* See if already present in mapping to avoid alloc/free */
614 page
= find_get_page(mapping
, index
);
617 mutex_unlock(&hugetlb_fault_mutex_table
[hash
]);
618 hugetlb_drop_vma_policy(&pseudo_vma
);
622 /* Allocate page and add to page cache */
623 page
= alloc_huge_page(&pseudo_vma
, addr
, avoid_reserve
);
624 hugetlb_drop_vma_policy(&pseudo_vma
);
626 mutex_unlock(&hugetlb_fault_mutex_table
[hash
]);
627 error
= PTR_ERR(page
);
630 clear_huge_page(page
, addr
, pages_per_huge_page(h
));
631 __SetPageUptodate(page
);
632 error
= huge_add_to_page_cache(page
, mapping
, index
);
633 if (unlikely(error
)) {
635 mutex_unlock(&hugetlb_fault_mutex_table
[hash
]);
639 mutex_unlock(&hugetlb_fault_mutex_table
[hash
]);
642 * page_put due to reference from alloc_huge_page()
643 * unlock_page because locked by add_to_page_cache()
649 if (!(mode
& FALLOC_FL_KEEP_SIZE
) && offset
+ len
> inode
->i_size
)
650 i_size_write(inode
, offset
+ len
);
651 inode
->i_ctime
= current_time(inode
);
657 static int hugetlbfs_setattr(struct dentry
*dentry
, struct iattr
*attr
)
659 struct inode
*inode
= d_inode(dentry
);
660 struct hstate
*h
= hstate_inode(inode
);
662 unsigned int ia_valid
= attr
->ia_valid
;
666 error
= setattr_prepare(dentry
, attr
);
670 if (ia_valid
& ATTR_SIZE
) {
672 if (attr
->ia_size
& ~huge_page_mask(h
))
674 error
= hugetlb_vmtruncate(inode
, attr
->ia_size
);
679 setattr_copy(inode
, attr
);
680 mark_inode_dirty(inode
);
684 static struct inode
*hugetlbfs_get_root(struct super_block
*sb
,
685 struct hugetlbfs_config
*config
)
689 inode
= new_inode(sb
);
691 inode
->i_ino
= get_next_ino();
692 inode
->i_mode
= S_IFDIR
| config
->mode
;
693 inode
->i_uid
= config
->uid
;
694 inode
->i_gid
= config
->gid
;
695 inode
->i_atime
= inode
->i_mtime
= inode
->i_ctime
= current_time(inode
);
696 inode
->i_op
= &hugetlbfs_dir_inode_operations
;
697 inode
->i_fop
= &simple_dir_operations
;
698 /* directory inodes start off with i_nlink == 2 (for "." entry) */
700 lockdep_annotate_inode_mutex_key(inode
);
706 * Hugetlbfs is not reclaimable; therefore its i_mmap_rwsem will never
707 * be taken from reclaim -- unlike regular filesystems. This needs an
708 * annotation because huge_pmd_share() does an allocation under hugetlb's
711 static struct lock_class_key hugetlbfs_i_mmap_rwsem_key
;
713 static struct inode
*hugetlbfs_get_inode(struct super_block
*sb
,
715 umode_t mode
, dev_t dev
)
718 struct resv_map
*resv_map
;
720 resv_map
= resv_map_alloc();
724 inode
= new_inode(sb
);
726 inode
->i_ino
= get_next_ino();
727 inode_init_owner(inode
, dir
, mode
);
728 lockdep_set_class(&inode
->i_mapping
->i_mmap_rwsem
,
729 &hugetlbfs_i_mmap_rwsem_key
);
730 inode
->i_mapping
->a_ops
= &hugetlbfs_aops
;
731 inode
->i_atime
= inode
->i_mtime
= inode
->i_ctime
= current_time(inode
);
732 inode
->i_mapping
->private_data
= resv_map
;
733 switch (mode
& S_IFMT
) {
735 init_special_inode(inode
, mode
, dev
);
738 inode
->i_op
= &hugetlbfs_inode_operations
;
739 inode
->i_fop
= &hugetlbfs_file_operations
;
742 inode
->i_op
= &hugetlbfs_dir_inode_operations
;
743 inode
->i_fop
= &simple_dir_operations
;
745 /* directory inodes start off with i_nlink == 2 (for "." entry) */
749 inode
->i_op
= &page_symlink_inode_operations
;
750 inode_nohighmem(inode
);
753 lockdep_annotate_inode_mutex_key(inode
);
755 kref_put(&resv_map
->refs
, resv_map_release
);
761 * File creation. Allocate an inode, and we're done..
763 static int hugetlbfs_mknod(struct inode
*dir
,
764 struct dentry
*dentry
, umode_t mode
, dev_t dev
)
769 inode
= hugetlbfs_get_inode(dir
->i_sb
, dir
, mode
, dev
);
771 dir
->i_ctime
= dir
->i_mtime
= current_time(dir
);
772 d_instantiate(dentry
, inode
);
773 dget(dentry
); /* Extra count - pin the dentry in core */
779 static int hugetlbfs_mkdir(struct inode
*dir
, struct dentry
*dentry
, umode_t mode
)
781 int retval
= hugetlbfs_mknod(dir
, dentry
, mode
| S_IFDIR
, 0);
787 static int hugetlbfs_create(struct inode
*dir
, struct dentry
*dentry
, umode_t mode
, bool excl
)
789 return hugetlbfs_mknod(dir
, dentry
, mode
| S_IFREG
, 0);
792 static int hugetlbfs_symlink(struct inode
*dir
,
793 struct dentry
*dentry
, const char *symname
)
798 inode
= hugetlbfs_get_inode(dir
->i_sb
, dir
, S_IFLNK
|S_IRWXUGO
, 0);
800 int l
= strlen(symname
)+1;
801 error
= page_symlink(inode
, symname
, l
);
803 d_instantiate(dentry
, inode
);
808 dir
->i_ctime
= dir
->i_mtime
= current_time(dir
);
814 * mark the head page dirty
816 static int hugetlbfs_set_page_dirty(struct page
*page
)
818 struct page
*head
= compound_head(page
);
824 static int hugetlbfs_migrate_page(struct address_space
*mapping
,
825 struct page
*newpage
, struct page
*page
,
826 enum migrate_mode mode
)
830 rc
= migrate_huge_page_move_mapping(mapping
, newpage
, page
);
831 if (rc
!= MIGRATEPAGE_SUCCESS
)
833 if (mode
!= MIGRATE_SYNC_NO_COPY
)
834 migrate_page_copy(newpage
, page
);
836 migrate_page_states(newpage
, page
);
838 return MIGRATEPAGE_SUCCESS
;
841 static int hugetlbfs_error_remove_page(struct address_space
*mapping
,
844 struct inode
*inode
= mapping
->host
;
846 remove_huge_page(page
);
847 hugetlb_fix_reserve_counts(inode
);
852 * Display the mount options in /proc/mounts.
854 static int hugetlbfs_show_options(struct seq_file
*m
, struct dentry
*root
)
856 struct hugetlbfs_sb_info
*sbinfo
= HUGETLBFS_SB(root
->d_sb
);
857 struct hugepage_subpool
*spool
= sbinfo
->spool
;
858 unsigned long hpage_size
= huge_page_size(sbinfo
->hstate
);
859 unsigned hpage_shift
= huge_page_shift(sbinfo
->hstate
);
862 if (!uid_eq(sbinfo
->uid
, GLOBAL_ROOT_UID
))
863 seq_printf(m
, ",uid=%u",
864 from_kuid_munged(&init_user_ns
, sbinfo
->uid
));
865 if (!gid_eq(sbinfo
->gid
, GLOBAL_ROOT_GID
))
866 seq_printf(m
, ",gid=%u",
867 from_kgid_munged(&init_user_ns
, sbinfo
->gid
));
868 if (sbinfo
->mode
!= 0755)
869 seq_printf(m
, ",mode=%o", sbinfo
->mode
);
870 if (sbinfo
->max_inodes
!= -1)
871 seq_printf(m
, ",nr_inodes=%lu", sbinfo
->max_inodes
);
875 if (hpage_size
>= 1024) {
879 seq_printf(m
, ",pagesize=%lu%c", hpage_size
, mod
);
881 if (spool
->max_hpages
!= -1)
882 seq_printf(m
, ",size=%llu",
883 (unsigned long long)spool
->max_hpages
<< hpage_shift
);
884 if (spool
->min_hpages
!= -1)
885 seq_printf(m
, ",min_size=%llu",
886 (unsigned long long)spool
->min_hpages
<< hpage_shift
);
891 static int hugetlbfs_statfs(struct dentry
*dentry
, struct kstatfs
*buf
)
893 struct hugetlbfs_sb_info
*sbinfo
= HUGETLBFS_SB(dentry
->d_sb
);
894 struct hstate
*h
= hstate_inode(d_inode(dentry
));
896 buf
->f_type
= HUGETLBFS_MAGIC
;
897 buf
->f_bsize
= huge_page_size(h
);
899 spin_lock(&sbinfo
->stat_lock
);
900 /* If no limits set, just report 0 for max/free/used
901 * blocks, like simple_statfs() */
905 spin_lock(&sbinfo
->spool
->lock
);
906 buf
->f_blocks
= sbinfo
->spool
->max_hpages
;
907 free_pages
= sbinfo
->spool
->max_hpages
908 - sbinfo
->spool
->used_hpages
;
909 buf
->f_bavail
= buf
->f_bfree
= free_pages
;
910 spin_unlock(&sbinfo
->spool
->lock
);
911 buf
->f_files
= sbinfo
->max_inodes
;
912 buf
->f_ffree
= sbinfo
->free_inodes
;
914 spin_unlock(&sbinfo
->stat_lock
);
916 buf
->f_namelen
= NAME_MAX
;
920 static void hugetlbfs_put_super(struct super_block
*sb
)
922 struct hugetlbfs_sb_info
*sbi
= HUGETLBFS_SB(sb
);
925 sb
->s_fs_info
= NULL
;
928 hugepage_put_subpool(sbi
->spool
);
934 static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info
*sbinfo
)
936 if (sbinfo
->free_inodes
>= 0) {
937 spin_lock(&sbinfo
->stat_lock
);
938 if (unlikely(!sbinfo
->free_inodes
)) {
939 spin_unlock(&sbinfo
->stat_lock
);
942 sbinfo
->free_inodes
--;
943 spin_unlock(&sbinfo
->stat_lock
);
949 static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info
*sbinfo
)
951 if (sbinfo
->free_inodes
>= 0) {
952 spin_lock(&sbinfo
->stat_lock
);
953 sbinfo
->free_inodes
++;
954 spin_unlock(&sbinfo
->stat_lock
);
959 static struct kmem_cache
*hugetlbfs_inode_cachep
;
961 static struct inode
*hugetlbfs_alloc_inode(struct super_block
*sb
)
963 struct hugetlbfs_sb_info
*sbinfo
= HUGETLBFS_SB(sb
);
964 struct hugetlbfs_inode_info
*p
;
966 if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo
)))
968 p
= kmem_cache_alloc(hugetlbfs_inode_cachep
, GFP_KERNEL
);
970 hugetlbfs_inc_free_inodes(sbinfo
);
975 * Any time after allocation, hugetlbfs_destroy_inode can be called
976 * for the inode. mpol_free_shared_policy is unconditionally called
977 * as part of hugetlbfs_destroy_inode. So, initialize policy here
978 * in case of a quick call to destroy.
980 * Note that the policy is initialized even if we are creating a
981 * private inode. This simplifies hugetlbfs_destroy_inode.
983 mpol_shared_policy_init(&p
->policy
, NULL
);
985 return &p
->vfs_inode
;
988 static void hugetlbfs_i_callback(struct rcu_head
*head
)
990 struct inode
*inode
= container_of(head
, struct inode
, i_rcu
);
991 kmem_cache_free(hugetlbfs_inode_cachep
, HUGETLBFS_I(inode
));
994 static void hugetlbfs_destroy_inode(struct inode
*inode
)
996 hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode
->i_sb
));
997 mpol_free_shared_policy(&HUGETLBFS_I(inode
)->policy
);
998 call_rcu(&inode
->i_rcu
, hugetlbfs_i_callback
);
1001 static const struct address_space_operations hugetlbfs_aops
= {
1002 .write_begin
= hugetlbfs_write_begin
,
1003 .write_end
= hugetlbfs_write_end
,
1004 .set_page_dirty
= hugetlbfs_set_page_dirty
,
1005 .migratepage
= hugetlbfs_migrate_page
,
1006 .error_remove_page
= hugetlbfs_error_remove_page
,
1010 static void init_once(void *foo
)
1012 struct hugetlbfs_inode_info
*ei
= (struct hugetlbfs_inode_info
*)foo
;
1014 inode_init_once(&ei
->vfs_inode
);
1017 const struct file_operations hugetlbfs_file_operations
= {
1018 .read_iter
= hugetlbfs_read_iter
,
1019 .mmap
= hugetlbfs_file_mmap
,
1020 .fsync
= noop_fsync
,
1021 .get_unmapped_area
= hugetlb_get_unmapped_area
,
1022 .llseek
= default_llseek
,
1023 .fallocate
= hugetlbfs_fallocate
,
1026 static const struct inode_operations hugetlbfs_dir_inode_operations
= {
1027 .create
= hugetlbfs_create
,
1028 .lookup
= simple_lookup
,
1029 .link
= simple_link
,
1030 .unlink
= simple_unlink
,
1031 .symlink
= hugetlbfs_symlink
,
1032 .mkdir
= hugetlbfs_mkdir
,
1033 .rmdir
= simple_rmdir
,
1034 .mknod
= hugetlbfs_mknod
,
1035 .rename
= simple_rename
,
1036 .setattr
= hugetlbfs_setattr
,
1039 static const struct inode_operations hugetlbfs_inode_operations
= {
1040 .setattr
= hugetlbfs_setattr
,
1043 static const struct super_operations hugetlbfs_ops
= {
1044 .alloc_inode
= hugetlbfs_alloc_inode
,
1045 .destroy_inode
= hugetlbfs_destroy_inode
,
1046 .evict_inode
= hugetlbfs_evict_inode
,
1047 .statfs
= hugetlbfs_statfs
,
1048 .put_super
= hugetlbfs_put_super
,
1049 .show_options
= hugetlbfs_show_options
,
1052 enum hugetlbfs_size_type
{ NO_SIZE
, SIZE_STD
, SIZE_PERCENT
};
1055 * Convert size option passed from command line to number of huge pages
1056 * in the pool specified by hstate. Size option could be in bytes
1057 * (val_type == SIZE_STD) or percentage of the pool (val_type == SIZE_PERCENT).
1060 hugetlbfs_size_to_hpages(struct hstate
*h
, unsigned long long size_opt
,
1061 enum hugetlbfs_size_type val_type
)
1063 if (val_type
== NO_SIZE
)
1066 if (val_type
== SIZE_PERCENT
) {
1067 size_opt
<<= huge_page_shift(h
);
1068 size_opt
*= h
->max_huge_pages
;
1069 do_div(size_opt
, 100);
1072 size_opt
>>= huge_page_shift(h
);
1077 hugetlbfs_parse_options(char *options
, struct hugetlbfs_config
*pconfig
)
1080 substring_t args
[MAX_OPT_ARGS
];
1082 unsigned long long max_size_opt
= 0, min_size_opt
= 0;
1083 enum hugetlbfs_size_type max_val_type
= NO_SIZE
, min_val_type
= NO_SIZE
;
1088 while ((p
= strsep(&options
, ",")) != NULL
) {
1093 token
= match_token(p
, tokens
, args
);
1096 if (match_int(&args
[0], &option
))
1098 pconfig
->uid
= make_kuid(current_user_ns(), option
);
1099 if (!uid_valid(pconfig
->uid
))
1104 if (match_int(&args
[0], &option
))
1106 pconfig
->gid
= make_kgid(current_user_ns(), option
);
1107 if (!gid_valid(pconfig
->gid
))
1112 if (match_octal(&args
[0], &option
))
1114 pconfig
->mode
= option
& 01777U;
1118 /* memparse() will accept a K/M/G without a digit */
1119 if (!isdigit(*args
[0].from
))
1121 max_size_opt
= memparse(args
[0].from
, &rest
);
1122 max_val_type
= SIZE_STD
;
1124 max_val_type
= SIZE_PERCENT
;
1129 /* memparse() will accept a K/M/G without a digit */
1130 if (!isdigit(*args
[0].from
))
1132 pconfig
->nr_inodes
= memparse(args
[0].from
, &rest
);
1135 case Opt_pagesize
: {
1137 ps
= memparse(args
[0].from
, &rest
);
1138 pconfig
->hstate
= size_to_hstate(ps
);
1139 if (!pconfig
->hstate
) {
1140 pr_err("Unsupported page size %lu MB\n",
1147 case Opt_min_size
: {
1148 /* memparse() will accept a K/M/G without a digit */
1149 if (!isdigit(*args
[0].from
))
1151 min_size_opt
= memparse(args
[0].from
, &rest
);
1152 min_val_type
= SIZE_STD
;
1154 min_val_type
= SIZE_PERCENT
;
1159 pr_err("Bad mount option: \"%s\"\n", p
);
1166 * Use huge page pool size (in hstate) to convert the size
1167 * options to number of huge pages. If NO_SIZE, -1 is returned.
1169 pconfig
->max_hpages
= hugetlbfs_size_to_hpages(pconfig
->hstate
,
1170 max_size_opt
, max_val_type
);
1171 pconfig
->min_hpages
= hugetlbfs_size_to_hpages(pconfig
->hstate
,
1172 min_size_opt
, min_val_type
);
1175 * If max_size was specified, then min_size must be smaller
1177 if (max_val_type
> NO_SIZE
&&
1178 pconfig
->min_hpages
> pconfig
->max_hpages
) {
1179 pr_err("minimum size can not be greater than maximum size\n");
1186 pr_err("Bad value '%s' for mount option '%s'\n", args
[0].from
, p
);
1191 hugetlbfs_fill_super(struct super_block
*sb
, void *data
, int silent
)
1194 struct hugetlbfs_config config
;
1195 struct hugetlbfs_sb_info
*sbinfo
;
1197 config
.max_hpages
= -1; /* No limit on size by default */
1198 config
.nr_inodes
= -1; /* No limit on number of inodes by default */
1199 config
.uid
= current_fsuid();
1200 config
.gid
= current_fsgid();
1202 config
.hstate
= &default_hstate
;
1203 config
.min_hpages
= -1; /* No default minimum size */
1204 ret
= hugetlbfs_parse_options(data
, &config
);
1208 sbinfo
= kmalloc(sizeof(struct hugetlbfs_sb_info
), GFP_KERNEL
);
1211 sb
->s_fs_info
= sbinfo
;
1212 sbinfo
->hstate
= config
.hstate
;
1213 spin_lock_init(&sbinfo
->stat_lock
);
1214 sbinfo
->max_inodes
= config
.nr_inodes
;
1215 sbinfo
->free_inodes
= config
.nr_inodes
;
1216 sbinfo
->spool
= NULL
;
1217 sbinfo
->uid
= config
.uid
;
1218 sbinfo
->gid
= config
.gid
;
1219 sbinfo
->mode
= config
.mode
;
1222 * Allocate and initialize subpool if maximum or minimum size is
1223 * specified. Any needed reservations (for minimim size) are taken
1224 * taken when the subpool is created.
1226 if (config
.max_hpages
!= -1 || config
.min_hpages
!= -1) {
1227 sbinfo
->spool
= hugepage_new_subpool(config
.hstate
,
1233 sb
->s_maxbytes
= MAX_LFS_FILESIZE
;
1234 sb
->s_blocksize
= huge_page_size(config
.hstate
);
1235 sb
->s_blocksize_bits
= huge_page_shift(config
.hstate
);
1236 sb
->s_magic
= HUGETLBFS_MAGIC
;
1237 sb
->s_op
= &hugetlbfs_ops
;
1238 sb
->s_time_gran
= 1;
1239 sb
->s_root
= d_make_root(hugetlbfs_get_root(sb
, &config
));
1244 kfree(sbinfo
->spool
);
1249 static struct dentry
*hugetlbfs_mount(struct file_system_type
*fs_type
,
1250 int flags
, const char *dev_name
, void *data
)
1252 return mount_nodev(fs_type
, flags
, data
, hugetlbfs_fill_super
);
1255 static struct file_system_type hugetlbfs_fs_type
= {
1256 .name
= "hugetlbfs",
1257 .mount
= hugetlbfs_mount
,
1258 .kill_sb
= kill_litter_super
,
1261 static struct vfsmount
*hugetlbfs_vfsmount
[HUGE_MAX_HSTATE
];
1263 static int can_do_hugetlb_shm(void)
1266 shm_group
= make_kgid(&init_user_ns
, sysctl_hugetlb_shm_group
);
1267 return capable(CAP_IPC_LOCK
) || in_group_p(shm_group
);
1270 static int get_hstate_idx(int page_size_log
)
1272 struct hstate
*h
= hstate_sizelog(page_size_log
);
1279 static const struct dentry_operations anon_ops
= {
1280 .d_dname
= simple_dname
1284 * Note that size should be aligned to proper hugepage size in caller side,
1285 * otherwise hugetlb_reserve_pages reserves one less hugepages than intended.
1287 struct file
*hugetlb_file_setup(const char *name
, size_t size
,
1288 vm_flags_t acctflag
, struct user_struct
**user
,
1289 int creat_flags
, int page_size_log
)
1291 struct file
*file
= ERR_PTR(-ENOMEM
);
1292 struct inode
*inode
;
1294 struct super_block
*sb
;
1295 struct qstr quick_string
;
1298 hstate_idx
= get_hstate_idx(page_size_log
);
1300 return ERR_PTR(-ENODEV
);
1303 if (!hugetlbfs_vfsmount
[hstate_idx
])
1304 return ERR_PTR(-ENOENT
);
1306 if (creat_flags
== HUGETLB_SHMFS_INODE
&& !can_do_hugetlb_shm()) {
1307 *user
= current_user();
1308 if (user_shm_lock(size
, *user
)) {
1310 pr_warn_once("%s (%d): Using mlock ulimits for SHM_HUGETLB is deprecated\n",
1311 current
->comm
, current
->pid
);
1312 task_unlock(current
);
1315 return ERR_PTR(-EPERM
);
1319 sb
= hugetlbfs_vfsmount
[hstate_idx
]->mnt_sb
;
1320 quick_string
.name
= name
;
1321 quick_string
.len
= strlen(quick_string
.name
);
1322 quick_string
.hash
= 0;
1323 path
.dentry
= d_alloc_pseudo(sb
, &quick_string
);
1325 goto out_shm_unlock
;
1327 d_set_d_op(path
.dentry
, &anon_ops
);
1328 path
.mnt
= mntget(hugetlbfs_vfsmount
[hstate_idx
]);
1329 file
= ERR_PTR(-ENOSPC
);
1330 inode
= hugetlbfs_get_inode(sb
, NULL
, S_IFREG
| S_IRWXUGO
, 0);
1333 if (creat_flags
== HUGETLB_SHMFS_INODE
)
1334 inode
->i_flags
|= S_PRIVATE
;
1336 file
= ERR_PTR(-ENOMEM
);
1337 if (hugetlb_reserve_pages(inode
, 0,
1338 size
>> huge_page_shift(hstate_inode(inode
)), NULL
,
1342 d_instantiate(path
.dentry
, inode
);
1343 inode
->i_size
= size
;
1346 file
= alloc_file(&path
, FMODE_WRITE
| FMODE_READ
,
1347 &hugetlbfs_file_operations
);
1349 goto out_dentry
; /* inode is already attached */
1359 user_shm_unlock(size
, *user
);
1365 static int __init
init_hugetlbfs_fs(void)
1371 if (!hugepages_supported()) {
1372 pr_info("disabling because there are no supported hugepage sizes\n");
1377 hugetlbfs_inode_cachep
= kmem_cache_create("hugetlbfs_inode_cache",
1378 sizeof(struct hugetlbfs_inode_info
),
1379 0, SLAB_ACCOUNT
, init_once
);
1380 if (hugetlbfs_inode_cachep
== NULL
)
1383 error
= register_filesystem(&hugetlbfs_fs_type
);
1388 for_each_hstate(h
) {
1390 unsigned ps_kb
= 1U << (h
->order
+ PAGE_SHIFT
- 10);
1392 snprintf(buf
, sizeof(buf
), "pagesize=%uK", ps_kb
);
1393 hugetlbfs_vfsmount
[i
] = kern_mount_data(&hugetlbfs_fs_type
,
1396 if (IS_ERR(hugetlbfs_vfsmount
[i
])) {
1397 pr_err("Cannot mount internal hugetlbfs for "
1398 "page size %uK", ps_kb
);
1399 error
= PTR_ERR(hugetlbfs_vfsmount
[i
]);
1400 hugetlbfs_vfsmount
[i
] = NULL
;
1404 /* Non default hstates are optional */
1405 if (!IS_ERR_OR_NULL(hugetlbfs_vfsmount
[default_hstate_idx
]))
1409 kmem_cache_destroy(hugetlbfs_inode_cachep
);
1413 fs_initcall(init_hugetlbfs_fs
)