]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - fs/hugetlbfs/inode.c
Merge branch '9p-iov_iter' into for-next
[mirror_ubuntu-zesty-kernel.git] / fs / hugetlbfs / inode.c
1 /*
2 * hugetlbpage-backed filesystem. Based on ramfs.
3 *
4 * Nadia Yvette Chambers, 2002
5 *
6 * Copyright (C) 2002 Linus Torvalds.
7 */
8
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11 #include <linux/module.h>
12 #include <linux/thread_info.h>
13 #include <asm/current.h>
14 #include <linux/sched.h> /* remove ASAP */
15 #include <linux/fs.h>
16 #include <linux/mount.h>
17 #include <linux/file.h>
18 #include <linux/kernel.h>
19 #include <linux/writeback.h>
20 #include <linux/pagemap.h>
21 #include <linux/highmem.h>
22 #include <linux/init.h>
23 #include <linux/string.h>
24 #include <linux/capability.h>
25 #include <linux/ctype.h>
26 #include <linux/backing-dev.h>
27 #include <linux/hugetlb.h>
28 #include <linux/pagevec.h>
29 #include <linux/parser.h>
30 #include <linux/mman.h>
31 #include <linux/slab.h>
32 #include <linux/dnotify.h>
33 #include <linux/statfs.h>
34 #include <linux/security.h>
35 #include <linux/magic.h>
36 #include <linux/migrate.h>
37 #include <linux/uio.h>
38
39 #include <asm/uaccess.h>
40
41 static const struct super_operations hugetlbfs_ops;
42 static const struct address_space_operations hugetlbfs_aops;
43 const struct file_operations hugetlbfs_file_operations;
44 static const struct inode_operations hugetlbfs_dir_inode_operations;
45 static const struct inode_operations hugetlbfs_inode_operations;
46
47 struct hugetlbfs_config {
48 kuid_t uid;
49 kgid_t gid;
50 umode_t mode;
51 long nr_blocks;
52 long nr_inodes;
53 struct hstate *hstate;
54 };
55
56 struct hugetlbfs_inode_info {
57 struct shared_policy policy;
58 struct inode vfs_inode;
59 };
60
61 static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
62 {
63 return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
64 }
65
66 int sysctl_hugetlb_shm_group;
67
68 enum {
69 Opt_size, Opt_nr_inodes,
70 Opt_mode, Opt_uid, Opt_gid,
71 Opt_pagesize,
72 Opt_err,
73 };
74
75 static const match_table_t tokens = {
76 {Opt_size, "size=%s"},
77 {Opt_nr_inodes, "nr_inodes=%s"},
78 {Opt_mode, "mode=%o"},
79 {Opt_uid, "uid=%u"},
80 {Opt_gid, "gid=%u"},
81 {Opt_pagesize, "pagesize=%s"},
82 {Opt_err, NULL},
83 };
84
85 static void huge_pagevec_release(struct pagevec *pvec)
86 {
87 int i;
88
89 for (i = 0; i < pagevec_count(pvec); ++i)
90 put_page(pvec->pages[i]);
91
92 pagevec_reinit(pvec);
93 }
94
95 static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
96 {
97 struct inode *inode = file_inode(file);
98 loff_t len, vma_len;
99 int ret;
100 struct hstate *h = hstate_file(file);
101
102 /*
103 * vma address alignment (but not the pgoff alignment) has
104 * already been checked by prepare_hugepage_range. If you add
105 * any error returns here, do so after setting VM_HUGETLB, so
106 * is_vm_hugetlb_page tests below unmap_region go the right
107 * way when do_mmap_pgoff unwinds (may be important on powerpc
108 * and ia64).
109 */
110 vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
111 vma->vm_ops = &hugetlb_vm_ops;
112
113 if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
114 return -EINVAL;
115
116 vma_len = (loff_t)(vma->vm_end - vma->vm_start);
117
118 mutex_lock(&inode->i_mutex);
119 file_accessed(file);
120
121 ret = -ENOMEM;
122 len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
123
124 if (hugetlb_reserve_pages(inode,
125 vma->vm_pgoff >> huge_page_order(h),
126 len >> huge_page_shift(h), vma,
127 vma->vm_flags))
128 goto out;
129
130 ret = 0;
131 hugetlb_prefault_arch_hook(vma->vm_mm);
132 if (vma->vm_flags & VM_WRITE && inode->i_size < len)
133 inode->i_size = len;
134 out:
135 mutex_unlock(&inode->i_mutex);
136
137 return ret;
138 }
139
140 /*
141 * Called under down_write(mmap_sem).
142 */
143
144 #ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
145 static unsigned long
146 hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
147 unsigned long len, unsigned long pgoff, unsigned long flags)
148 {
149 struct mm_struct *mm = current->mm;
150 struct vm_area_struct *vma;
151 struct hstate *h = hstate_file(file);
152 struct vm_unmapped_area_info info;
153
154 if (len & ~huge_page_mask(h))
155 return -EINVAL;
156 if (len > TASK_SIZE)
157 return -ENOMEM;
158
159 if (flags & MAP_FIXED) {
160 if (prepare_hugepage_range(file, addr, len))
161 return -EINVAL;
162 return addr;
163 }
164
165 if (addr) {
166 addr = ALIGN(addr, huge_page_size(h));
167 vma = find_vma(mm, addr);
168 if (TASK_SIZE - len >= addr &&
169 (!vma || addr + len <= vma->vm_start))
170 return addr;
171 }
172
173 info.flags = 0;
174 info.length = len;
175 info.low_limit = TASK_UNMAPPED_BASE;
176 info.high_limit = TASK_SIZE;
177 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
178 info.align_offset = 0;
179 return vm_unmapped_area(&info);
180 }
181 #endif
182
183 static size_t
184 hugetlbfs_read_actor(struct page *page, unsigned long offset,
185 struct iov_iter *to, unsigned long size)
186 {
187 size_t copied = 0;
188 int i, chunksize;
189
190 /* Find which 4k chunk and offset with in that chunk */
191 i = offset >> PAGE_CACHE_SHIFT;
192 offset = offset & ~PAGE_CACHE_MASK;
193
194 while (size) {
195 size_t n;
196 chunksize = PAGE_CACHE_SIZE;
197 if (offset)
198 chunksize -= offset;
199 if (chunksize > size)
200 chunksize = size;
201 n = copy_page_to_iter(&page[i], offset, chunksize, to);
202 copied += n;
203 if (n != chunksize)
204 return copied;
205 offset = 0;
206 size -= chunksize;
207 i++;
208 }
209 return copied;
210 }
211
212 /*
213 * Support for read() - Find the page attached to f_mapping and copy out the
214 * data. Its *very* similar to do_generic_mapping_read(), we can't use that
215 * since it has PAGE_CACHE_SIZE assumptions.
216 */
217 static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
218 {
219 struct file *file = iocb->ki_filp;
220 struct hstate *h = hstate_file(file);
221 struct address_space *mapping = file->f_mapping;
222 struct inode *inode = mapping->host;
223 unsigned long index = iocb->ki_pos >> huge_page_shift(h);
224 unsigned long offset = iocb->ki_pos & ~huge_page_mask(h);
225 unsigned long end_index;
226 loff_t isize;
227 ssize_t retval = 0;
228
229 while (iov_iter_count(to)) {
230 struct page *page;
231 size_t nr, copied;
232
233 /* nr is the maximum number of bytes to copy from this page */
234 nr = huge_page_size(h);
235 isize = i_size_read(inode);
236 if (!isize)
237 break;
238 end_index = (isize - 1) >> huge_page_shift(h);
239 if (index > end_index)
240 break;
241 if (index == end_index) {
242 nr = ((isize - 1) & ~huge_page_mask(h)) + 1;
243 if (nr <= offset)
244 break;
245 }
246 nr = nr - offset;
247
248 /* Find the page */
249 page = find_lock_page(mapping, index);
250 if (unlikely(page == NULL)) {
251 /*
252 * We have a HOLE, zero out the user-buffer for the
253 * length of the hole or request.
254 */
255 copied = iov_iter_zero(nr, to);
256 } else {
257 unlock_page(page);
258
259 /*
260 * We have the page, copy it to user space buffer.
261 */
262 copied = hugetlbfs_read_actor(page, offset, to, nr);
263 page_cache_release(page);
264 }
265 offset += copied;
266 retval += copied;
267 if (copied != nr && iov_iter_count(to)) {
268 if (!retval)
269 retval = -EFAULT;
270 break;
271 }
272 index += offset >> huge_page_shift(h);
273 offset &= ~huge_page_mask(h);
274 }
275 iocb->ki_pos = ((loff_t)index << huge_page_shift(h)) + offset;
276 return retval;
277 }
278
279 static int hugetlbfs_write_begin(struct file *file,
280 struct address_space *mapping,
281 loff_t pos, unsigned len, unsigned flags,
282 struct page **pagep, void **fsdata)
283 {
284 return -EINVAL;
285 }
286
287 static int hugetlbfs_write_end(struct file *file, struct address_space *mapping,
288 loff_t pos, unsigned len, unsigned copied,
289 struct page *page, void *fsdata)
290 {
291 BUG();
292 return -EINVAL;
293 }
294
295 static void truncate_huge_page(struct page *page)
296 {
297 cancel_dirty_page(page, /* No IO accounting for huge pages? */0);
298 ClearPageUptodate(page);
299 delete_from_page_cache(page);
300 }
301
302 static void truncate_hugepages(struct inode *inode, loff_t lstart)
303 {
304 struct hstate *h = hstate_inode(inode);
305 struct address_space *mapping = &inode->i_data;
306 const pgoff_t start = lstart >> huge_page_shift(h);
307 struct pagevec pvec;
308 pgoff_t next;
309 int i, freed = 0;
310
311 pagevec_init(&pvec, 0);
312 next = start;
313 while (1) {
314 if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
315 if (next == start)
316 break;
317 next = start;
318 continue;
319 }
320
321 for (i = 0; i < pagevec_count(&pvec); ++i) {
322 struct page *page = pvec.pages[i];
323
324 lock_page(page);
325 if (page->index > next)
326 next = page->index;
327 ++next;
328 truncate_huge_page(page);
329 unlock_page(page);
330 freed++;
331 }
332 huge_pagevec_release(&pvec);
333 }
334 BUG_ON(!lstart && mapping->nrpages);
335 hugetlb_unreserve_pages(inode, start, freed);
336 }
337
338 static void hugetlbfs_evict_inode(struct inode *inode)
339 {
340 struct resv_map *resv_map;
341
342 truncate_hugepages(inode, 0);
343 resv_map = (struct resv_map *)inode->i_mapping->private_data;
344 /* root inode doesn't have the resv_map, so we should check it */
345 if (resv_map)
346 resv_map_release(&resv_map->refs);
347 clear_inode(inode);
348 }
349
350 static inline void
351 hugetlb_vmtruncate_list(struct rb_root *root, pgoff_t pgoff)
352 {
353 struct vm_area_struct *vma;
354
355 vma_interval_tree_foreach(vma, root, pgoff, ULONG_MAX) {
356 unsigned long v_offset;
357
358 /*
359 * Can the expression below overflow on 32-bit arches?
360 * No, because the interval tree returns us only those vmas
361 * which overlap the truncated area starting at pgoff,
362 * and no vma on a 32-bit arch can span beyond the 4GB.
363 */
364 if (vma->vm_pgoff < pgoff)
365 v_offset = (pgoff - vma->vm_pgoff) << PAGE_SHIFT;
366 else
367 v_offset = 0;
368
369 unmap_hugepage_range(vma, vma->vm_start + v_offset,
370 vma->vm_end, NULL);
371 }
372 }
373
374 static int hugetlb_vmtruncate(struct inode *inode, loff_t offset)
375 {
376 pgoff_t pgoff;
377 struct address_space *mapping = inode->i_mapping;
378 struct hstate *h = hstate_inode(inode);
379
380 BUG_ON(offset & ~huge_page_mask(h));
381 pgoff = offset >> PAGE_SHIFT;
382
383 i_size_write(inode, offset);
384 i_mmap_lock_write(mapping);
385 if (!RB_EMPTY_ROOT(&mapping->i_mmap))
386 hugetlb_vmtruncate_list(&mapping->i_mmap, pgoff);
387 i_mmap_unlock_write(mapping);
388 truncate_hugepages(inode, offset);
389 return 0;
390 }
391
392 static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr)
393 {
394 struct inode *inode = dentry->d_inode;
395 struct hstate *h = hstate_inode(inode);
396 int error;
397 unsigned int ia_valid = attr->ia_valid;
398
399 BUG_ON(!inode);
400
401 error = inode_change_ok(inode, attr);
402 if (error)
403 return error;
404
405 if (ia_valid & ATTR_SIZE) {
406 error = -EINVAL;
407 if (attr->ia_size & ~huge_page_mask(h))
408 return -EINVAL;
409 error = hugetlb_vmtruncate(inode, attr->ia_size);
410 if (error)
411 return error;
412 }
413
414 setattr_copy(inode, attr);
415 mark_inode_dirty(inode);
416 return 0;
417 }
418
419 static struct inode *hugetlbfs_get_root(struct super_block *sb,
420 struct hugetlbfs_config *config)
421 {
422 struct inode *inode;
423
424 inode = new_inode(sb);
425 if (inode) {
426 struct hugetlbfs_inode_info *info;
427 inode->i_ino = get_next_ino();
428 inode->i_mode = S_IFDIR | config->mode;
429 inode->i_uid = config->uid;
430 inode->i_gid = config->gid;
431 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
432 info = HUGETLBFS_I(inode);
433 mpol_shared_policy_init(&info->policy, NULL);
434 inode->i_op = &hugetlbfs_dir_inode_operations;
435 inode->i_fop = &simple_dir_operations;
436 /* directory inodes start off with i_nlink == 2 (for "." entry) */
437 inc_nlink(inode);
438 lockdep_annotate_inode_mutex_key(inode);
439 }
440 return inode;
441 }
442
443 /*
444 * Hugetlbfs is not reclaimable; therefore its i_mmap_rwsem will never
445 * be taken from reclaim -- unlike regular filesystems. This needs an
446 * annotation because huge_pmd_share() does an allocation under
447 * i_mmap_rwsem.
448 */
449 static struct lock_class_key hugetlbfs_i_mmap_rwsem_key;
450
451 static struct inode *hugetlbfs_get_inode(struct super_block *sb,
452 struct inode *dir,
453 umode_t mode, dev_t dev)
454 {
455 struct inode *inode;
456 struct resv_map *resv_map;
457
458 resv_map = resv_map_alloc();
459 if (!resv_map)
460 return NULL;
461
462 inode = new_inode(sb);
463 if (inode) {
464 struct hugetlbfs_inode_info *info;
465 inode->i_ino = get_next_ino();
466 inode_init_owner(inode, dir, mode);
467 lockdep_set_class(&inode->i_mapping->i_mmap_rwsem,
468 &hugetlbfs_i_mmap_rwsem_key);
469 inode->i_mapping->a_ops = &hugetlbfs_aops;
470 inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
471 inode->i_mapping->private_data = resv_map;
472 info = HUGETLBFS_I(inode);
473 /*
474 * The policy is initialized here even if we are creating a
475 * private inode because initialization simply creates an
476 * an empty rb tree and calls spin_lock_init(), later when we
477 * call mpol_free_shared_policy() it will just return because
478 * the rb tree will still be empty.
479 */
480 mpol_shared_policy_init(&info->policy, NULL);
481 switch (mode & S_IFMT) {
482 default:
483 init_special_inode(inode, mode, dev);
484 break;
485 case S_IFREG:
486 inode->i_op = &hugetlbfs_inode_operations;
487 inode->i_fop = &hugetlbfs_file_operations;
488 break;
489 case S_IFDIR:
490 inode->i_op = &hugetlbfs_dir_inode_operations;
491 inode->i_fop = &simple_dir_operations;
492
493 /* directory inodes start off with i_nlink == 2 (for "." entry) */
494 inc_nlink(inode);
495 break;
496 case S_IFLNK:
497 inode->i_op = &page_symlink_inode_operations;
498 break;
499 }
500 lockdep_annotate_inode_mutex_key(inode);
501 } else
502 kref_put(&resv_map->refs, resv_map_release);
503
504 return inode;
505 }
506
507 /*
508 * File creation. Allocate an inode, and we're done..
509 */
510 static int hugetlbfs_mknod(struct inode *dir,
511 struct dentry *dentry, umode_t mode, dev_t dev)
512 {
513 struct inode *inode;
514 int error = -ENOSPC;
515
516 inode = hugetlbfs_get_inode(dir->i_sb, dir, mode, dev);
517 if (inode) {
518 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
519 d_instantiate(dentry, inode);
520 dget(dentry); /* Extra count - pin the dentry in core */
521 error = 0;
522 }
523 return error;
524 }
525
526 static int hugetlbfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
527 {
528 int retval = hugetlbfs_mknod(dir, dentry, mode | S_IFDIR, 0);
529 if (!retval)
530 inc_nlink(dir);
531 return retval;
532 }
533
534 static int hugetlbfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl)
535 {
536 return hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0);
537 }
538
539 static int hugetlbfs_symlink(struct inode *dir,
540 struct dentry *dentry, const char *symname)
541 {
542 struct inode *inode;
543 int error = -ENOSPC;
544
545 inode = hugetlbfs_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0);
546 if (inode) {
547 int l = strlen(symname)+1;
548 error = page_symlink(inode, symname, l);
549 if (!error) {
550 d_instantiate(dentry, inode);
551 dget(dentry);
552 } else
553 iput(inode);
554 }
555 dir->i_ctime = dir->i_mtime = CURRENT_TIME;
556
557 return error;
558 }
559
560 /*
561 * mark the head page dirty
562 */
563 static int hugetlbfs_set_page_dirty(struct page *page)
564 {
565 struct page *head = compound_head(page);
566
567 SetPageDirty(head);
568 return 0;
569 }
570
571 static int hugetlbfs_migrate_page(struct address_space *mapping,
572 struct page *newpage, struct page *page,
573 enum migrate_mode mode)
574 {
575 int rc;
576
577 rc = migrate_huge_page_move_mapping(mapping, newpage, page);
578 if (rc != MIGRATEPAGE_SUCCESS)
579 return rc;
580 migrate_page_copy(newpage, page);
581
582 return MIGRATEPAGE_SUCCESS;
583 }
584
585 static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf)
586 {
587 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb);
588 struct hstate *h = hstate_inode(dentry->d_inode);
589
590 buf->f_type = HUGETLBFS_MAGIC;
591 buf->f_bsize = huge_page_size(h);
592 if (sbinfo) {
593 spin_lock(&sbinfo->stat_lock);
594 /* If no limits set, just report 0 for max/free/used
595 * blocks, like simple_statfs() */
596 if (sbinfo->spool) {
597 long free_pages;
598
599 spin_lock(&sbinfo->spool->lock);
600 buf->f_blocks = sbinfo->spool->max_hpages;
601 free_pages = sbinfo->spool->max_hpages
602 - sbinfo->spool->used_hpages;
603 buf->f_bavail = buf->f_bfree = free_pages;
604 spin_unlock(&sbinfo->spool->lock);
605 buf->f_files = sbinfo->max_inodes;
606 buf->f_ffree = sbinfo->free_inodes;
607 }
608 spin_unlock(&sbinfo->stat_lock);
609 }
610 buf->f_namelen = NAME_MAX;
611 return 0;
612 }
613
614 static void hugetlbfs_put_super(struct super_block *sb)
615 {
616 struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb);
617
618 if (sbi) {
619 sb->s_fs_info = NULL;
620
621 if (sbi->spool)
622 hugepage_put_subpool(sbi->spool);
623
624 kfree(sbi);
625 }
626 }
627
628 static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo)
629 {
630 if (sbinfo->free_inodes >= 0) {
631 spin_lock(&sbinfo->stat_lock);
632 if (unlikely(!sbinfo->free_inodes)) {
633 spin_unlock(&sbinfo->stat_lock);
634 return 0;
635 }
636 sbinfo->free_inodes--;
637 spin_unlock(&sbinfo->stat_lock);
638 }
639
640 return 1;
641 }
642
643 static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo)
644 {
645 if (sbinfo->free_inodes >= 0) {
646 spin_lock(&sbinfo->stat_lock);
647 sbinfo->free_inodes++;
648 spin_unlock(&sbinfo->stat_lock);
649 }
650 }
651
652
653 static struct kmem_cache *hugetlbfs_inode_cachep;
654
655 static struct inode *hugetlbfs_alloc_inode(struct super_block *sb)
656 {
657 struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb);
658 struct hugetlbfs_inode_info *p;
659
660 if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo)))
661 return NULL;
662 p = kmem_cache_alloc(hugetlbfs_inode_cachep, GFP_KERNEL);
663 if (unlikely(!p)) {
664 hugetlbfs_inc_free_inodes(sbinfo);
665 return NULL;
666 }
667 return &p->vfs_inode;
668 }
669
670 static void hugetlbfs_i_callback(struct rcu_head *head)
671 {
672 struct inode *inode = container_of(head, struct inode, i_rcu);
673 kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode));
674 }
675
676 static void hugetlbfs_destroy_inode(struct inode *inode)
677 {
678 hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb));
679 mpol_free_shared_policy(&HUGETLBFS_I(inode)->policy);
680 call_rcu(&inode->i_rcu, hugetlbfs_i_callback);
681 }
682
683 static const struct address_space_operations hugetlbfs_aops = {
684 .write_begin = hugetlbfs_write_begin,
685 .write_end = hugetlbfs_write_end,
686 .set_page_dirty = hugetlbfs_set_page_dirty,
687 .migratepage = hugetlbfs_migrate_page,
688 };
689
690
691 static void init_once(void *foo)
692 {
693 struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo;
694
695 inode_init_once(&ei->vfs_inode);
696 }
697
698 const struct file_operations hugetlbfs_file_operations = {
699 .read = new_sync_read,
700 .read_iter = hugetlbfs_read_iter,
701 .mmap = hugetlbfs_file_mmap,
702 .fsync = noop_fsync,
703 .get_unmapped_area = hugetlb_get_unmapped_area,
704 .llseek = default_llseek,
705 };
706
707 static const struct inode_operations hugetlbfs_dir_inode_operations = {
708 .create = hugetlbfs_create,
709 .lookup = simple_lookup,
710 .link = simple_link,
711 .unlink = simple_unlink,
712 .symlink = hugetlbfs_symlink,
713 .mkdir = hugetlbfs_mkdir,
714 .rmdir = simple_rmdir,
715 .mknod = hugetlbfs_mknod,
716 .rename = simple_rename,
717 .setattr = hugetlbfs_setattr,
718 };
719
720 static const struct inode_operations hugetlbfs_inode_operations = {
721 .setattr = hugetlbfs_setattr,
722 };
723
724 static const struct super_operations hugetlbfs_ops = {
725 .alloc_inode = hugetlbfs_alloc_inode,
726 .destroy_inode = hugetlbfs_destroy_inode,
727 .evict_inode = hugetlbfs_evict_inode,
728 .statfs = hugetlbfs_statfs,
729 .put_super = hugetlbfs_put_super,
730 .show_options = generic_show_options,
731 };
732
733 static int
734 hugetlbfs_parse_options(char *options, struct hugetlbfs_config *pconfig)
735 {
736 char *p, *rest;
737 substring_t args[MAX_OPT_ARGS];
738 int option;
739 unsigned long long size = 0;
740 enum { NO_SIZE, SIZE_STD, SIZE_PERCENT } setsize = NO_SIZE;
741
742 if (!options)
743 return 0;
744
745 while ((p = strsep(&options, ",")) != NULL) {
746 int token;
747 if (!*p)
748 continue;
749
750 token = match_token(p, tokens, args);
751 switch (token) {
752 case Opt_uid:
753 if (match_int(&args[0], &option))
754 goto bad_val;
755 pconfig->uid = make_kuid(current_user_ns(), option);
756 if (!uid_valid(pconfig->uid))
757 goto bad_val;
758 break;
759
760 case Opt_gid:
761 if (match_int(&args[0], &option))
762 goto bad_val;
763 pconfig->gid = make_kgid(current_user_ns(), option);
764 if (!gid_valid(pconfig->gid))
765 goto bad_val;
766 break;
767
768 case Opt_mode:
769 if (match_octal(&args[0], &option))
770 goto bad_val;
771 pconfig->mode = option & 01777U;
772 break;
773
774 case Opt_size: {
775 /* memparse() will accept a K/M/G without a digit */
776 if (!isdigit(*args[0].from))
777 goto bad_val;
778 size = memparse(args[0].from, &rest);
779 setsize = SIZE_STD;
780 if (*rest == '%')
781 setsize = SIZE_PERCENT;
782 break;
783 }
784
785 case Opt_nr_inodes:
786 /* memparse() will accept a K/M/G without a digit */
787 if (!isdigit(*args[0].from))
788 goto bad_val;
789 pconfig->nr_inodes = memparse(args[0].from, &rest);
790 break;
791
792 case Opt_pagesize: {
793 unsigned long ps;
794 ps = memparse(args[0].from, &rest);
795 pconfig->hstate = size_to_hstate(ps);
796 if (!pconfig->hstate) {
797 pr_err("Unsupported page size %lu MB\n",
798 ps >> 20);
799 return -EINVAL;
800 }
801 break;
802 }
803
804 default:
805 pr_err("Bad mount option: \"%s\"\n", p);
806 return -EINVAL;
807 break;
808 }
809 }
810
811 /* Do size after hstate is set up */
812 if (setsize > NO_SIZE) {
813 struct hstate *h = pconfig->hstate;
814 if (setsize == SIZE_PERCENT) {
815 size <<= huge_page_shift(h);
816 size *= h->max_huge_pages;
817 do_div(size, 100);
818 }
819 pconfig->nr_blocks = (size >> huge_page_shift(h));
820 }
821
822 return 0;
823
824 bad_val:
825 pr_err("Bad value '%s' for mount option '%s'\n", args[0].from, p);
826 return -EINVAL;
827 }
828
829 static int
830 hugetlbfs_fill_super(struct super_block *sb, void *data, int silent)
831 {
832 int ret;
833 struct hugetlbfs_config config;
834 struct hugetlbfs_sb_info *sbinfo;
835
836 save_mount_options(sb, data);
837
838 config.nr_blocks = -1; /* No limit on size by default */
839 config.nr_inodes = -1; /* No limit on number of inodes by default */
840 config.uid = current_fsuid();
841 config.gid = current_fsgid();
842 config.mode = 0755;
843 config.hstate = &default_hstate;
844 ret = hugetlbfs_parse_options(data, &config);
845 if (ret)
846 return ret;
847
848 sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL);
849 if (!sbinfo)
850 return -ENOMEM;
851 sb->s_fs_info = sbinfo;
852 sbinfo->hstate = config.hstate;
853 spin_lock_init(&sbinfo->stat_lock);
854 sbinfo->max_inodes = config.nr_inodes;
855 sbinfo->free_inodes = config.nr_inodes;
856 sbinfo->spool = NULL;
857 if (config.nr_blocks != -1) {
858 sbinfo->spool = hugepage_new_subpool(config.nr_blocks);
859 if (!sbinfo->spool)
860 goto out_free;
861 }
862 sb->s_maxbytes = MAX_LFS_FILESIZE;
863 sb->s_blocksize = huge_page_size(config.hstate);
864 sb->s_blocksize_bits = huge_page_shift(config.hstate);
865 sb->s_magic = HUGETLBFS_MAGIC;
866 sb->s_op = &hugetlbfs_ops;
867 sb->s_time_gran = 1;
868 sb->s_root = d_make_root(hugetlbfs_get_root(sb, &config));
869 if (!sb->s_root)
870 goto out_free;
871 return 0;
872 out_free:
873 kfree(sbinfo->spool);
874 kfree(sbinfo);
875 return -ENOMEM;
876 }
877
878 static struct dentry *hugetlbfs_mount(struct file_system_type *fs_type,
879 int flags, const char *dev_name, void *data)
880 {
881 return mount_nodev(fs_type, flags, data, hugetlbfs_fill_super);
882 }
883
884 static struct file_system_type hugetlbfs_fs_type = {
885 .name = "hugetlbfs",
886 .mount = hugetlbfs_mount,
887 .kill_sb = kill_litter_super,
888 };
889 MODULE_ALIAS_FS("hugetlbfs");
890
891 static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
892
893 static int can_do_hugetlb_shm(void)
894 {
895 kgid_t shm_group;
896 shm_group = make_kgid(&init_user_ns, sysctl_hugetlb_shm_group);
897 return capable(CAP_IPC_LOCK) || in_group_p(shm_group);
898 }
899
900 static int get_hstate_idx(int page_size_log)
901 {
902 struct hstate *h = hstate_sizelog(page_size_log);
903
904 if (!h)
905 return -1;
906 return h - hstates;
907 }
908
909 static const struct dentry_operations anon_ops = {
910 .d_dname = simple_dname
911 };
912
913 /*
914 * Note that size should be aligned to proper hugepage size in caller side,
915 * otherwise hugetlb_reserve_pages reserves one less hugepages than intended.
916 */
917 struct file *hugetlb_file_setup(const char *name, size_t size,
918 vm_flags_t acctflag, struct user_struct **user,
919 int creat_flags, int page_size_log)
920 {
921 struct file *file = ERR_PTR(-ENOMEM);
922 struct inode *inode;
923 struct path path;
924 struct super_block *sb;
925 struct qstr quick_string;
926 int hstate_idx;
927
928 hstate_idx = get_hstate_idx(page_size_log);
929 if (hstate_idx < 0)
930 return ERR_PTR(-ENODEV);
931
932 *user = NULL;
933 if (!hugetlbfs_vfsmount[hstate_idx])
934 return ERR_PTR(-ENOENT);
935
936 if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) {
937 *user = current_user();
938 if (user_shm_lock(size, *user)) {
939 task_lock(current);
940 pr_warn_once("%s (%d): Using mlock ulimits for SHM_HUGETLB is deprecated\n",
941 current->comm, current->pid);
942 task_unlock(current);
943 } else {
944 *user = NULL;
945 return ERR_PTR(-EPERM);
946 }
947 }
948
949 sb = hugetlbfs_vfsmount[hstate_idx]->mnt_sb;
950 quick_string.name = name;
951 quick_string.len = strlen(quick_string.name);
952 quick_string.hash = 0;
953 path.dentry = d_alloc_pseudo(sb, &quick_string);
954 if (!path.dentry)
955 goto out_shm_unlock;
956
957 d_set_d_op(path.dentry, &anon_ops);
958 path.mnt = mntget(hugetlbfs_vfsmount[hstate_idx]);
959 file = ERR_PTR(-ENOSPC);
960 inode = hugetlbfs_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0);
961 if (!inode)
962 goto out_dentry;
963
964 file = ERR_PTR(-ENOMEM);
965 if (hugetlb_reserve_pages(inode, 0,
966 size >> huge_page_shift(hstate_inode(inode)), NULL,
967 acctflag))
968 goto out_inode;
969
970 d_instantiate(path.dentry, inode);
971 inode->i_size = size;
972 clear_nlink(inode);
973
974 file = alloc_file(&path, FMODE_WRITE | FMODE_READ,
975 &hugetlbfs_file_operations);
976 if (IS_ERR(file))
977 goto out_dentry; /* inode is already attached */
978
979 return file;
980
981 out_inode:
982 iput(inode);
983 out_dentry:
984 path_put(&path);
985 out_shm_unlock:
986 if (*user) {
987 user_shm_unlock(size, *user);
988 *user = NULL;
989 }
990 return file;
991 }
992
993 static int __init init_hugetlbfs_fs(void)
994 {
995 struct hstate *h;
996 int error;
997 int i;
998
999 if (!hugepages_supported()) {
1000 pr_info("disabling because there are no supported hugepage sizes\n");
1001 return -ENOTSUPP;
1002 }
1003
1004 error = -ENOMEM;
1005 hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache",
1006 sizeof(struct hugetlbfs_inode_info),
1007 0, 0, init_once);
1008 if (hugetlbfs_inode_cachep == NULL)
1009 goto out2;
1010
1011 error = register_filesystem(&hugetlbfs_fs_type);
1012 if (error)
1013 goto out;
1014
1015 i = 0;
1016 for_each_hstate(h) {
1017 char buf[50];
1018 unsigned ps_kb = 1U << (h->order + PAGE_SHIFT - 10);
1019
1020 snprintf(buf, sizeof(buf), "pagesize=%uK", ps_kb);
1021 hugetlbfs_vfsmount[i] = kern_mount_data(&hugetlbfs_fs_type,
1022 buf);
1023
1024 if (IS_ERR(hugetlbfs_vfsmount[i])) {
1025 pr_err("Cannot mount internal hugetlbfs for "
1026 "page size %uK", ps_kb);
1027 error = PTR_ERR(hugetlbfs_vfsmount[i]);
1028 hugetlbfs_vfsmount[i] = NULL;
1029 }
1030 i++;
1031 }
1032 /* Non default hstates are optional */
1033 if (!IS_ERR_OR_NULL(hugetlbfs_vfsmount[default_hstate_idx]))
1034 return 0;
1035
1036 out:
1037 kmem_cache_destroy(hugetlbfs_inode_cachep);
1038 out2:
1039 return error;
1040 }
1041
1042 static void __exit exit_hugetlbfs_fs(void)
1043 {
1044 struct hstate *h;
1045 int i;
1046
1047
1048 /*
1049 * Make sure all delayed rcu free inodes are flushed before we
1050 * destroy cache.
1051 */
1052 rcu_barrier();
1053 kmem_cache_destroy(hugetlbfs_inode_cachep);
1054 i = 0;
1055 for_each_hstate(h)
1056 kern_unmount(hugetlbfs_vfsmount[i++]);
1057 unregister_filesystem(&hugetlbfs_fs_type);
1058 }
1059
1060 module_init(init_hugetlbfs_fs)
1061 module_exit(exit_hugetlbfs_fs)
1062
1063 MODULE_LICENSE("GPL");