]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * hugetlbpage-backed filesystem. Based on ramfs. | |
3 | * | |
6d49e352 | 4 | * Nadia Yvette Chambers, 2002 |
1da177e4 LT |
5 | * |
6 | * Copyright (C) 2002 Linus Torvalds. | |
3e89e1c5 | 7 | * License: GPL |
1da177e4 LT |
8 | */ |
9 | ||
9b857d26 AM |
10 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
11 | ||
1da177e4 LT |
12 | #include <linux/thread_info.h> |
13 | #include <asm/current.h> | |
174cd4b1 | 14 | #include <linux/sched/signal.h> /* remove ASAP */ |
70c3547e | 15 | #include <linux/falloc.h> |
1da177e4 LT |
16 | #include <linux/fs.h> |
17 | #include <linux/mount.h> | |
18 | #include <linux/file.h> | |
e73a75fa | 19 | #include <linux/kernel.h> |
1da177e4 LT |
20 | #include <linux/writeback.h> |
21 | #include <linux/pagemap.h> | |
22 | #include <linux/highmem.h> | |
23 | #include <linux/init.h> | |
24 | #include <linux/string.h> | |
16f7e0fe | 25 | #include <linux/capability.h> |
e73a75fa | 26 | #include <linux/ctype.h> |
1da177e4 LT |
27 | #include <linux/backing-dev.h> |
28 | #include <linux/hugetlb.h> | |
29 | #include <linux/pagevec.h> | |
e73a75fa | 30 | #include <linux/parser.h> |
036e0856 | 31 | #include <linux/mman.h> |
1da177e4 LT |
32 | #include <linux/slab.h> |
33 | #include <linux/dnotify.h> | |
34 | #include <linux/statfs.h> | |
35 | #include <linux/security.h> | |
1fd7317d | 36 | #include <linux/magic.h> |
290408d4 | 37 | #include <linux/migrate.h> |
34d0640e | 38 | #include <linux/uio.h> |
1da177e4 | 39 | |
7c0f6ba6 | 40 | #include <linux/uaccess.h> |
1da177e4 | 41 | |
ee9b6d61 | 42 | static const struct super_operations hugetlbfs_ops; |
f5e54d6e | 43 | static const struct address_space_operations hugetlbfs_aops; |
4b6f5d20 | 44 | const struct file_operations hugetlbfs_file_operations; |
92e1d5be AV |
45 | static const struct inode_operations hugetlbfs_dir_inode_operations; |
46 | static const struct inode_operations hugetlbfs_inode_operations; | |
1da177e4 | 47 | |
a1d776ee | 48 | struct hugetlbfs_config { |
4a25220d DH |
49 | struct hstate *hstate; |
50 | long max_hpages; | |
51 | long nr_inodes; | |
52 | long min_hpages; | |
53 | kuid_t uid; | |
54 | kgid_t gid; | |
55 | umode_t mode; | |
a1d776ee DG |
56 | }; |
57 | ||
58 | struct hugetlbfs_inode_info { | |
59 | struct shared_policy policy; | |
60 | struct inode vfs_inode; | |
61 | }; | |
62 | ||
63 | static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode) | |
64 | { | |
65 | return container_of(inode, struct hugetlbfs_inode_info, vfs_inode); | |
66 | } | |
67 | ||
1da177e4 LT |
68 | int sysctl_hugetlb_shm_group; |
69 | ||
e73a75fa RD |
70 | enum { |
71 | Opt_size, Opt_nr_inodes, | |
72 | Opt_mode, Opt_uid, Opt_gid, | |
7ca02d0a | 73 | Opt_pagesize, Opt_min_size, |
e73a75fa RD |
74 | Opt_err, |
75 | }; | |
76 | ||
a447c093 | 77 | static const match_table_t tokens = { |
e73a75fa RD |
78 | {Opt_size, "size=%s"}, |
79 | {Opt_nr_inodes, "nr_inodes=%s"}, | |
80 | {Opt_mode, "mode=%o"}, | |
81 | {Opt_uid, "uid=%u"}, | |
82 | {Opt_gid, "gid=%u"}, | |
a137e1cc | 83 | {Opt_pagesize, "pagesize=%s"}, |
7ca02d0a | 84 | {Opt_min_size, "min_size=%s"}, |
e73a75fa RD |
85 | {Opt_err, NULL}, |
86 | }; | |
87 | ||
70c3547e MK |
88 | #ifdef CONFIG_NUMA |
89 | static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma, | |
90 | struct inode *inode, pgoff_t index) | |
91 | { | |
92 | vma->vm_policy = mpol_shared_policy_lookup(&HUGETLBFS_I(inode)->policy, | |
93 | index); | |
94 | } | |
95 | ||
96 | static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma) | |
97 | { | |
98 | mpol_cond_put(vma->vm_policy); | |
99 | } | |
100 | #else | |
101 | static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma, | |
102 | struct inode *inode, pgoff_t index) | |
103 | { | |
104 | } | |
105 | ||
106 | static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma) | |
107 | { | |
108 | } | |
109 | #endif | |
110 | ||
2e9b367c AL |
111 | static void huge_pagevec_release(struct pagevec *pvec) |
112 | { | |
113 | int i; | |
114 | ||
115 | for (i = 0; i < pagevec_count(pvec); ++i) | |
116 | put_page(pvec->pages[i]); | |
117 | ||
118 | pagevec_reinit(pvec); | |
119 | } | |
120 | ||
1211d50a MK |
121 | /* |
122 | * Mask used when checking the page offset value passed in via system | |
123 | * calls. This value will be converted to a loff_t which is signed. | |
124 | * Therefore, we want to check the upper PAGE_SHIFT + 1 bits of the | |
125 | * value. The extra bit (- 1 in the shift value) is to take the sign | |
126 | * bit into account. | |
127 | */ | |
128 | #define PGOFF_LOFFT_MAX \ | |
129 | (((1UL << (PAGE_SHIFT + 1)) - 1) << (BITS_PER_LONG - (PAGE_SHIFT + 1))) | |
130 | ||
1da177e4 LT |
131 | static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) |
132 | { | |
496ad9aa | 133 | struct inode *inode = file_inode(file); |
1da177e4 LT |
134 | loff_t len, vma_len; |
135 | int ret; | |
a5516438 | 136 | struct hstate *h = hstate_file(file); |
1da177e4 | 137 | |
68589bc3 | 138 | /* |
dec4ad86 DG |
139 | * vma address alignment (but not the pgoff alignment) has |
140 | * already been checked by prepare_hugepage_range. If you add | |
141 | * any error returns here, do so after setting VM_HUGETLB, so | |
142 | * is_vm_hugetlb_page tests below unmap_region go the right | |
143 | * way when do_mmap_pgoff unwinds (may be important on powerpc | |
144 | * and ia64). | |
68589bc3 | 145 | */ |
a2fce914 | 146 | vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND; |
68589bc3 | 147 | vma->vm_ops = &hugetlb_vm_ops; |
1da177e4 | 148 | |
045c7a3f | 149 | /* |
1211d50a | 150 | * page based offset in vm_pgoff could be sufficiently large to |
ba5f44e5 MK |
151 | * overflow a loff_t when converted to byte offset. This can |
152 | * only happen on architectures where sizeof(loff_t) == | |
153 | * sizeof(unsigned long). So, only check in those instances. | |
045c7a3f | 154 | */ |
ba5f44e5 MK |
155 | if (sizeof(unsigned long) == sizeof(loff_t)) { |
156 | if (vma->vm_pgoff & PGOFF_LOFFT_MAX) | |
157 | return -EINVAL; | |
158 | } | |
045c7a3f | 159 | |
1211d50a | 160 | /* must be huge page aligned */ |
2b37c35e | 161 | if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT)) |
dec4ad86 DG |
162 | return -EINVAL; |
163 | ||
1da177e4 | 164 | vma_len = (loff_t)(vma->vm_end - vma->vm_start); |
045c7a3f MK |
165 | len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); |
166 | /* check for overflow */ | |
167 | if (len < vma_len) | |
168 | return -EINVAL; | |
1da177e4 | 169 | |
5955102c | 170 | inode_lock(inode); |
1da177e4 | 171 | file_accessed(file); |
1da177e4 LT |
172 | |
173 | ret = -ENOMEM; | |
a1e78772 | 174 | if (hugetlb_reserve_pages(inode, |
a5516438 | 175 | vma->vm_pgoff >> huge_page_order(h), |
5a6fe125 MG |
176 | len >> huge_page_shift(h), vma, |
177 | vma->vm_flags)) | |
a43a8c39 | 178 | goto out; |
b45b5bd6 | 179 | |
4c887265 | 180 | ret = 0; |
b6174df5 | 181 | if (vma->vm_flags & VM_WRITE && inode->i_size < len) |
045c7a3f | 182 | i_size_write(inode, len); |
1da177e4 | 183 | out: |
5955102c | 184 | inode_unlock(inode); |
1da177e4 LT |
185 | |
186 | return ret; | |
187 | } | |
188 | ||
189 | /* | |
508034a3 | 190 | * Called under down_write(mmap_sem). |
1da177e4 LT |
191 | */ |
192 | ||
d2ba27e8 | 193 | #ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA |
1da177e4 LT |
194 | static unsigned long |
195 | hugetlb_get_unmapped_area(struct file *file, unsigned long addr, | |
196 | unsigned long len, unsigned long pgoff, unsigned long flags) | |
197 | { | |
198 | struct mm_struct *mm = current->mm; | |
199 | struct vm_area_struct *vma; | |
a5516438 | 200 | struct hstate *h = hstate_file(file); |
08659355 | 201 | struct vm_unmapped_area_info info; |
1da177e4 | 202 | |
a5516438 | 203 | if (len & ~huge_page_mask(h)) |
1da177e4 LT |
204 | return -EINVAL; |
205 | if (len > TASK_SIZE) | |
206 | return -ENOMEM; | |
207 | ||
036e0856 | 208 | if (flags & MAP_FIXED) { |
a5516438 | 209 | if (prepare_hugepage_range(file, addr, len)) |
036e0856 BH |
210 | return -EINVAL; |
211 | return addr; | |
212 | } | |
213 | ||
1da177e4 | 214 | if (addr) { |
a5516438 | 215 | addr = ALIGN(addr, huge_page_size(h)); |
1da177e4 LT |
216 | vma = find_vma(mm, addr); |
217 | if (TASK_SIZE - len >= addr && | |
1be7107f | 218 | (!vma || addr + len <= vm_start_gap(vma))) |
1da177e4 LT |
219 | return addr; |
220 | } | |
221 | ||
08659355 ML |
222 | info.flags = 0; |
223 | info.length = len; | |
224 | info.low_limit = TASK_UNMAPPED_BASE; | |
225 | info.high_limit = TASK_SIZE; | |
226 | info.align_mask = PAGE_MASK & ~huge_page_mask(h); | |
227 | info.align_offset = 0; | |
228 | return vm_unmapped_area(&info); | |
1da177e4 LT |
229 | } |
230 | #endif | |
231 | ||
34d0640e | 232 | static size_t |
e63e1e5a | 233 | hugetlbfs_read_actor(struct page *page, unsigned long offset, |
34d0640e | 234 | struct iov_iter *to, unsigned long size) |
e63e1e5a | 235 | { |
34d0640e | 236 | size_t copied = 0; |
e63e1e5a BP |
237 | int i, chunksize; |
238 | ||
e63e1e5a | 239 | /* Find which 4k chunk and offset with in that chunk */ |
09cbfeaf KS |
240 | i = offset >> PAGE_SHIFT; |
241 | offset = offset & ~PAGE_MASK; | |
e63e1e5a BP |
242 | |
243 | while (size) { | |
34d0640e | 244 | size_t n; |
09cbfeaf | 245 | chunksize = PAGE_SIZE; |
e63e1e5a BP |
246 | if (offset) |
247 | chunksize -= offset; | |
248 | if (chunksize > size) | |
249 | chunksize = size; | |
34d0640e AV |
250 | n = copy_page_to_iter(&page[i], offset, chunksize, to); |
251 | copied += n; | |
252 | if (n != chunksize) | |
253 | return copied; | |
e63e1e5a BP |
254 | offset = 0; |
255 | size -= chunksize; | |
e63e1e5a BP |
256 | i++; |
257 | } | |
34d0640e | 258 | return copied; |
e63e1e5a BP |
259 | } |
260 | ||
261 | /* | |
262 | * Support for read() - Find the page attached to f_mapping and copy out the | |
263 | * data. Its *very* similar to do_generic_mapping_read(), we can't use that | |
ea1754a0 | 264 | * since it has PAGE_SIZE assumptions. |
e63e1e5a | 265 | */ |
34d0640e | 266 | static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to) |
e63e1e5a | 267 | { |
34d0640e AV |
268 | struct file *file = iocb->ki_filp; |
269 | struct hstate *h = hstate_file(file); | |
270 | struct address_space *mapping = file->f_mapping; | |
e63e1e5a | 271 | struct inode *inode = mapping->host; |
34d0640e AV |
272 | unsigned long index = iocb->ki_pos >> huge_page_shift(h); |
273 | unsigned long offset = iocb->ki_pos & ~huge_page_mask(h); | |
e63e1e5a BP |
274 | unsigned long end_index; |
275 | loff_t isize; | |
276 | ssize_t retval = 0; | |
277 | ||
34d0640e | 278 | while (iov_iter_count(to)) { |
e63e1e5a | 279 | struct page *page; |
34d0640e | 280 | size_t nr, copied; |
e63e1e5a BP |
281 | |
282 | /* nr is the maximum number of bytes to copy from this page */ | |
a5516438 | 283 | nr = huge_page_size(h); |
a05b0855 AK |
284 | isize = i_size_read(inode); |
285 | if (!isize) | |
34d0640e | 286 | break; |
a05b0855 | 287 | end_index = (isize - 1) >> huge_page_shift(h); |
34d0640e AV |
288 | if (index > end_index) |
289 | break; | |
290 | if (index == end_index) { | |
a5516438 | 291 | nr = ((isize - 1) & ~huge_page_mask(h)) + 1; |
a05b0855 | 292 | if (nr <= offset) |
34d0640e | 293 | break; |
e63e1e5a BP |
294 | } |
295 | nr = nr - offset; | |
296 | ||
297 | /* Find the page */ | |
a05b0855 | 298 | page = find_lock_page(mapping, index); |
e63e1e5a BP |
299 | if (unlikely(page == NULL)) { |
300 | /* | |
301 | * We have a HOLE, zero out the user-buffer for the | |
302 | * length of the hole or request. | |
303 | */ | |
34d0640e | 304 | copied = iov_iter_zero(nr, to); |
e63e1e5a | 305 | } else { |
a05b0855 AK |
306 | unlock_page(page); |
307 | ||
e63e1e5a BP |
308 | /* |
309 | * We have the page, copy it to user space buffer. | |
310 | */ | |
34d0640e | 311 | copied = hugetlbfs_read_actor(page, offset, to, nr); |
09cbfeaf | 312 | put_page(page); |
e63e1e5a | 313 | } |
34d0640e AV |
314 | offset += copied; |
315 | retval += copied; | |
316 | if (copied != nr && iov_iter_count(to)) { | |
317 | if (!retval) | |
318 | retval = -EFAULT; | |
319 | break; | |
e63e1e5a | 320 | } |
a5516438 AK |
321 | index += offset >> huge_page_shift(h); |
322 | offset &= ~huge_page_mask(h); | |
e63e1e5a | 323 | } |
34d0640e | 324 | iocb->ki_pos = ((loff_t)index << huge_page_shift(h)) + offset; |
e63e1e5a BP |
325 | return retval; |
326 | } | |
327 | ||
800d15a5 NP |
328 | static int hugetlbfs_write_begin(struct file *file, |
329 | struct address_space *mapping, | |
330 | loff_t pos, unsigned len, unsigned flags, | |
331 | struct page **pagep, void **fsdata) | |
1da177e4 LT |
332 | { |
333 | return -EINVAL; | |
334 | } | |
335 | ||
800d15a5 NP |
336 | static int hugetlbfs_write_end(struct file *file, struct address_space *mapping, |
337 | loff_t pos, unsigned len, unsigned copied, | |
338 | struct page *page, void *fsdata) | |
1da177e4 | 339 | { |
800d15a5 | 340 | BUG(); |
1da177e4 LT |
341 | return -EINVAL; |
342 | } | |
343 | ||
b5cec28d | 344 | static void remove_huge_page(struct page *page) |
1da177e4 | 345 | { |
b9ea2515 | 346 | ClearPageDirty(page); |
1da177e4 | 347 | ClearPageUptodate(page); |
bd65cb86 | 348 | delete_from_page_cache(page); |
1da177e4 LT |
349 | } |
350 | ||
4aae8d1c | 351 | static void |
f808c13f | 352 | hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end) |
4aae8d1c MK |
353 | { |
354 | struct vm_area_struct *vma; | |
355 | ||
356 | /* | |
357 | * end == 0 indicates that the entire range after | |
358 | * start should be unmapped. | |
359 | */ | |
360 | vma_interval_tree_foreach(vma, root, start, end ? end : ULONG_MAX) { | |
361 | unsigned long v_offset; | |
362 | unsigned long v_end; | |
363 | ||
364 | /* | |
365 | * Can the expression below overflow on 32-bit arches? | |
366 | * No, because the interval tree returns us only those vmas | |
367 | * which overlap the truncated area starting at pgoff, | |
368 | * and no vma on a 32-bit arch can span beyond the 4GB. | |
369 | */ | |
370 | if (vma->vm_pgoff < start) | |
371 | v_offset = (start - vma->vm_pgoff) << PAGE_SHIFT; | |
372 | else | |
373 | v_offset = 0; | |
374 | ||
375 | if (!end) | |
376 | v_end = vma->vm_end; | |
377 | else { | |
378 | v_end = ((end - vma->vm_pgoff) << PAGE_SHIFT) | |
379 | + vma->vm_start; | |
380 | if (v_end > vma->vm_end) | |
381 | v_end = vma->vm_end; | |
382 | } | |
383 | ||
384 | unmap_hugepage_range(vma, vma->vm_start + v_offset, v_end, | |
385 | NULL); | |
386 | } | |
387 | } | |
b5cec28d MK |
388 | |
389 | /* | |
390 | * remove_inode_hugepages handles two distinct cases: truncation and hole | |
391 | * punch. There are subtle differences in operation for each case. | |
4aae8d1c | 392 | * |
b5cec28d MK |
393 | * truncation is indicated by end of range being LLONG_MAX |
394 | * In this case, we first scan the range and release found pages. | |
395 | * After releasing pages, hugetlb_unreserve_pages cleans up region/reserv | |
1817889e MK |
396 | * maps and global counts. Page faults can not race with truncation |
397 | * in this routine. hugetlb_no_page() prevents page faults in the | |
398 | * truncated range. It checks i_size before allocation, and again after | |
399 | * with the page table lock for the page held. The same lock must be | |
400 | * acquired to unmap a page. | |
b5cec28d MK |
401 | * hole punch is indicated if end is not LLONG_MAX |
402 | * In the hole punch case we scan the range and release found pages. | |
403 | * Only when releasing a page is the associated region/reserv map | |
404 | * deleted. The region/reserv map for ranges without associated | |
1817889e MK |
405 | * pages are not modified. Page faults can race with hole punch. |
406 | * This is indicated if we find a mapped page. | |
b5cec28d MK |
407 | * Note: If the passed end of range value is beyond the end of file, but |
408 | * not LLONG_MAX this routine still performs a hole punch operation. | |
409 | */ | |
410 | static void remove_inode_hugepages(struct inode *inode, loff_t lstart, | |
411 | loff_t lend) | |
1da177e4 | 412 | { |
a5516438 | 413 | struct hstate *h = hstate_inode(inode); |
b45b5bd6 | 414 | struct address_space *mapping = &inode->i_data; |
a5516438 | 415 | const pgoff_t start = lstart >> huge_page_shift(h); |
b5cec28d MK |
416 | const pgoff_t end = lend >> huge_page_shift(h); |
417 | struct vm_area_struct pseudo_vma; | |
1da177e4 | 418 | struct pagevec pvec; |
d72dc8a2 | 419 | pgoff_t next, index; |
a43a8c39 | 420 | int i, freed = 0; |
b5cec28d | 421 | bool truncate_op = (lend == LLONG_MAX); |
1da177e4 | 422 | |
b5cec28d MK |
423 | memset(&pseudo_vma, 0, sizeof(struct vm_area_struct)); |
424 | pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED); | |
86679820 | 425 | pagevec_init(&pvec); |
1da177e4 | 426 | next = start; |
b5cec28d | 427 | while (next < end) { |
b5cec28d | 428 | /* |
1817889e | 429 | * When no more pages are found, we are done. |
b5cec28d | 430 | */ |
397162ff | 431 | if (!pagevec_lookup_range(&pvec, mapping, &next, end - 1)) |
1817889e | 432 | break; |
1da177e4 LT |
433 | |
434 | for (i = 0; i < pagevec_count(&pvec); ++i) { | |
435 | struct page *page = pvec.pages[i]; | |
b5cec28d MK |
436 | u32 hash; |
437 | ||
d72dc8a2 | 438 | index = page->index; |
b5cec28d MK |
439 | hash = hugetlb_fault_mutex_hash(h, current->mm, |
440 | &pseudo_vma, | |
d72dc8a2 | 441 | mapping, index, 0); |
b5cec28d | 442 | mutex_lock(&hugetlb_fault_mutex_table[hash]); |
1da177e4 | 443 | |
4aae8d1c MK |
444 | /* |
445 | * If page is mapped, it was faulted in after being | |
446 | * unmapped in caller. Unmap (again) now after taking | |
447 | * the fault mutex. The mutex will prevent faults | |
448 | * until we finish removing the page. | |
449 | * | |
450 | * This race can only happen in the hole punch case. | |
451 | * Getting here in a truncate operation is a bug. | |
452 | */ | |
453 | if (unlikely(page_mapped(page))) { | |
1817889e | 454 | BUG_ON(truncate_op); |
4aae8d1c MK |
455 | |
456 | i_mmap_lock_write(mapping); | |
457 | hugetlb_vmdelete_list(&mapping->i_mmap, | |
d72dc8a2 JK |
458 | index * pages_per_huge_page(h), |
459 | (index + 1) * pages_per_huge_page(h)); | |
4aae8d1c MK |
460 | i_mmap_unlock_write(mapping); |
461 | } | |
462 | ||
463 | lock_page(page); | |
464 | /* | |
465 | * We must free the huge page and remove from page | |
466 | * cache (remove_huge_page) BEFORE removing the | |
467 | * region/reserve map (hugetlb_unreserve_pages). In | |
468 | * rare out of memory conditions, removal of the | |
72e2936c | 469 | * region/reserve map could fail. Correspondingly, |
470 | * the subpool and global reserve usage count can need | |
471 | * to be adjusted. | |
4aae8d1c | 472 | */ |
72e2936c | 473 | VM_BUG_ON(PagePrivate(page)); |
4aae8d1c MK |
474 | remove_huge_page(page); |
475 | freed++; | |
476 | if (!truncate_op) { | |
477 | if (unlikely(hugetlb_unreserve_pages(inode, | |
d72dc8a2 | 478 | index, index + 1, 1))) |
72e2936c | 479 | hugetlb_fix_reserve_counts(inode); |
b5cec28d MK |
480 | } |
481 | ||
1da177e4 | 482 | unlock_page(page); |
b5cec28d | 483 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); |
1da177e4 LT |
484 | } |
485 | huge_pagevec_release(&pvec); | |
1817889e | 486 | cond_resched(); |
1da177e4 | 487 | } |
b5cec28d MK |
488 | |
489 | if (truncate_op) | |
490 | (void)hugetlb_unreserve_pages(inode, start, LONG_MAX, freed); | |
1da177e4 LT |
491 | } |
492 | ||
2bbbda30 | 493 | static void hugetlbfs_evict_inode(struct inode *inode) |
1da177e4 | 494 | { |
9119a41e JK |
495 | struct resv_map *resv_map; |
496 | ||
b5cec28d | 497 | remove_inode_hugepages(inode, 0, LLONG_MAX); |
9119a41e JK |
498 | resv_map = (struct resv_map *)inode->i_mapping->private_data; |
499 | /* root inode doesn't have the resv_map, so we should check it */ | |
500 | if (resv_map) | |
501 | resv_map_release(&resv_map->refs); | |
dbd5768f | 502 | clear_inode(inode); |
149f4211 CH |
503 | } |
504 | ||
1da177e4 LT |
505 | static int hugetlb_vmtruncate(struct inode *inode, loff_t offset) |
506 | { | |
856fc295 | 507 | pgoff_t pgoff; |
1da177e4 | 508 | struct address_space *mapping = inode->i_mapping; |
a5516438 | 509 | struct hstate *h = hstate_inode(inode); |
1da177e4 | 510 | |
a5516438 | 511 | BUG_ON(offset & ~huge_page_mask(h)); |
856fc295 | 512 | pgoff = offset >> PAGE_SHIFT; |
1da177e4 | 513 | |
7aa91e10 | 514 | i_size_write(inode, offset); |
83cde9e8 | 515 | i_mmap_lock_write(mapping); |
f808c13f | 516 | if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)) |
1bfad99a | 517 | hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0); |
83cde9e8 | 518 | i_mmap_unlock_write(mapping); |
b5cec28d | 519 | remove_inode_hugepages(inode, offset, LLONG_MAX); |
1da177e4 LT |
520 | return 0; |
521 | } | |
522 | ||
70c3547e MK |
523 | static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) |
524 | { | |
525 | struct hstate *h = hstate_inode(inode); | |
526 | loff_t hpage_size = huge_page_size(h); | |
527 | loff_t hole_start, hole_end; | |
528 | ||
529 | /* | |
530 | * For hole punch round up the beginning offset of the hole and | |
531 | * round down the end. | |
532 | */ | |
533 | hole_start = round_up(offset, hpage_size); | |
534 | hole_end = round_down(offset + len, hpage_size); | |
535 | ||
536 | if (hole_end > hole_start) { | |
537 | struct address_space *mapping = inode->i_mapping; | |
538 | ||
5955102c | 539 | inode_lock(inode); |
70c3547e | 540 | i_mmap_lock_write(mapping); |
f808c13f | 541 | if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)) |
70c3547e MK |
542 | hugetlb_vmdelete_list(&mapping->i_mmap, |
543 | hole_start >> PAGE_SHIFT, | |
544 | hole_end >> PAGE_SHIFT); | |
545 | i_mmap_unlock_write(mapping); | |
546 | remove_inode_hugepages(inode, hole_start, hole_end); | |
5955102c | 547 | inode_unlock(inode); |
70c3547e MK |
548 | } |
549 | ||
550 | return 0; | |
551 | } | |
552 | ||
553 | static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset, | |
554 | loff_t len) | |
555 | { | |
556 | struct inode *inode = file_inode(file); | |
557 | struct address_space *mapping = inode->i_mapping; | |
558 | struct hstate *h = hstate_inode(inode); | |
559 | struct vm_area_struct pseudo_vma; | |
560 | struct mm_struct *mm = current->mm; | |
561 | loff_t hpage_size = huge_page_size(h); | |
562 | unsigned long hpage_shift = huge_page_shift(h); | |
563 | pgoff_t start, index, end; | |
564 | int error; | |
565 | u32 hash; | |
566 | ||
567 | if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) | |
568 | return -EOPNOTSUPP; | |
569 | ||
570 | if (mode & FALLOC_FL_PUNCH_HOLE) | |
571 | return hugetlbfs_punch_hole(inode, offset, len); | |
572 | ||
573 | /* | |
574 | * Default preallocate case. | |
575 | * For this range, start is rounded down and end is rounded up | |
576 | * as well as being converted to page offsets. | |
577 | */ | |
578 | start = offset >> hpage_shift; | |
579 | end = (offset + len + hpage_size - 1) >> hpage_shift; | |
580 | ||
5955102c | 581 | inode_lock(inode); |
70c3547e MK |
582 | |
583 | /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */ | |
584 | error = inode_newsize_ok(inode, offset + len); | |
585 | if (error) | |
586 | goto out; | |
587 | ||
588 | /* | |
589 | * Initialize a pseudo vma as this is required by the huge page | |
590 | * allocation routines. If NUMA is configured, use page index | |
591 | * as input to create an allocation policy. | |
592 | */ | |
593 | memset(&pseudo_vma, 0, sizeof(struct vm_area_struct)); | |
594 | pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED); | |
595 | pseudo_vma.vm_file = file; | |
596 | ||
597 | for (index = start; index < end; index++) { | |
598 | /* | |
599 | * This is supposed to be the vaddr where the page is being | |
600 | * faulted in, but we have no vaddr here. | |
601 | */ | |
602 | struct page *page; | |
603 | unsigned long addr; | |
604 | int avoid_reserve = 0; | |
605 | ||
606 | cond_resched(); | |
607 | ||
608 | /* | |
609 | * fallocate(2) manpage permits EINTR; we may have been | |
610 | * interrupted because we are using up too much memory. | |
611 | */ | |
612 | if (signal_pending(current)) { | |
613 | error = -EINTR; | |
614 | break; | |
615 | } | |
616 | ||
617 | /* Set numa allocation policy based on index */ | |
618 | hugetlb_set_vma_policy(&pseudo_vma, inode, index); | |
619 | ||
620 | /* addr is the offset within the file (zero based) */ | |
621 | addr = index * hpage_size; | |
622 | ||
623 | /* mutex taken here, fault path and hole punch */ | |
624 | hash = hugetlb_fault_mutex_hash(h, mm, &pseudo_vma, mapping, | |
625 | index, addr); | |
626 | mutex_lock(&hugetlb_fault_mutex_table[hash]); | |
627 | ||
628 | /* See if already present in mapping to avoid alloc/free */ | |
629 | page = find_get_page(mapping, index); | |
630 | if (page) { | |
631 | put_page(page); | |
632 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); | |
633 | hugetlb_drop_vma_policy(&pseudo_vma); | |
634 | continue; | |
635 | } | |
636 | ||
637 | /* Allocate page and add to page cache */ | |
638 | page = alloc_huge_page(&pseudo_vma, addr, avoid_reserve); | |
639 | hugetlb_drop_vma_policy(&pseudo_vma); | |
640 | if (IS_ERR(page)) { | |
641 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); | |
642 | error = PTR_ERR(page); | |
643 | goto out; | |
644 | } | |
645 | clear_huge_page(page, addr, pages_per_huge_page(h)); | |
646 | __SetPageUptodate(page); | |
647 | error = huge_add_to_page_cache(page, mapping, index); | |
648 | if (unlikely(error)) { | |
649 | put_page(page); | |
650 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); | |
651 | goto out; | |
652 | } | |
653 | ||
654 | mutex_unlock(&hugetlb_fault_mutex_table[hash]); | |
655 | ||
656 | /* | |
70c3547e | 657 | * unlock_page because locked by add_to_page_cache() |
72639e6d | 658 | * page_put due to reference from alloc_huge_page() |
70c3547e | 659 | */ |
70c3547e | 660 | unlock_page(page); |
72639e6d | 661 | put_page(page); |
70c3547e MK |
662 | } |
663 | ||
664 | if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size) | |
665 | i_size_write(inode, offset + len); | |
078cd827 | 666 | inode->i_ctime = current_time(inode); |
70c3547e | 667 | out: |
5955102c | 668 | inode_unlock(inode); |
70c3547e MK |
669 | return error; |
670 | } | |
671 | ||
1da177e4 LT |
672 | static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr) |
673 | { | |
2b0143b5 | 674 | struct inode *inode = d_inode(dentry); |
a5516438 | 675 | struct hstate *h = hstate_inode(inode); |
1da177e4 LT |
676 | int error; |
677 | unsigned int ia_valid = attr->ia_valid; | |
678 | ||
679 | BUG_ON(!inode); | |
680 | ||
31051c85 | 681 | error = setattr_prepare(dentry, attr); |
1da177e4 | 682 | if (error) |
1025774c | 683 | return error; |
1da177e4 LT |
684 | |
685 | if (ia_valid & ATTR_SIZE) { | |
1025774c CH |
686 | if (attr->ia_size & ~huge_page_mask(h)) |
687 | return -EINVAL; | |
688 | error = hugetlb_vmtruncate(inode, attr->ia_size); | |
1da177e4 | 689 | if (error) |
1025774c | 690 | return error; |
1da177e4 | 691 | } |
1025774c CH |
692 | |
693 | setattr_copy(inode, attr); | |
694 | mark_inode_dirty(inode); | |
695 | return 0; | |
1da177e4 LT |
696 | } |
697 | ||
7d54fa64 AV |
698 | static struct inode *hugetlbfs_get_root(struct super_block *sb, |
699 | struct hugetlbfs_config *config) | |
1da177e4 LT |
700 | { |
701 | struct inode *inode; | |
1da177e4 LT |
702 | |
703 | inode = new_inode(sb); | |
704 | if (inode) { | |
85fe4025 | 705 | inode->i_ino = get_next_ino(); |
7d54fa64 AV |
706 | inode->i_mode = S_IFDIR | config->mode; |
707 | inode->i_uid = config->uid; | |
708 | inode->i_gid = config->gid; | |
078cd827 | 709 | inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); |
7d54fa64 AV |
710 | inode->i_op = &hugetlbfs_dir_inode_operations; |
711 | inode->i_fop = &simple_dir_operations; | |
712 | /* directory inodes start off with i_nlink == 2 (for "." entry) */ | |
713 | inc_nlink(inode); | |
65ed7601 | 714 | lockdep_annotate_inode_mutex_key(inode); |
7d54fa64 AV |
715 | } |
716 | return inode; | |
717 | } | |
718 | ||
b610ded7 | 719 | /* |
c8c06efa | 720 | * Hugetlbfs is not reclaimable; therefore its i_mmap_rwsem will never |
b610ded7 | 721 | * be taken from reclaim -- unlike regular filesystems. This needs an |
88f306b6 | 722 | * annotation because huge_pmd_share() does an allocation under hugetlb's |
c8c06efa | 723 | * i_mmap_rwsem. |
b610ded7 | 724 | */ |
c8c06efa | 725 | static struct lock_class_key hugetlbfs_i_mmap_rwsem_key; |
b610ded7 | 726 | |
7d54fa64 AV |
727 | static struct inode *hugetlbfs_get_inode(struct super_block *sb, |
728 | struct inode *dir, | |
18df2252 | 729 | umode_t mode, dev_t dev) |
7d54fa64 AV |
730 | { |
731 | struct inode *inode; | |
9119a41e JK |
732 | struct resv_map *resv_map; |
733 | ||
734 | resv_map = resv_map_alloc(); | |
735 | if (!resv_map) | |
736 | return NULL; | |
7d54fa64 AV |
737 | |
738 | inode = new_inode(sb); | |
739 | if (inode) { | |
7d54fa64 AV |
740 | inode->i_ino = get_next_ino(); |
741 | inode_init_owner(inode, dir, mode); | |
c8c06efa DB |
742 | lockdep_set_class(&inode->i_mapping->i_mmap_rwsem, |
743 | &hugetlbfs_i_mmap_rwsem_key); | |
1da177e4 | 744 | inode->i_mapping->a_ops = &hugetlbfs_aops; |
078cd827 | 745 | inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode); |
9119a41e | 746 | inode->i_mapping->private_data = resv_map; |
1da177e4 LT |
747 | switch (mode & S_IFMT) { |
748 | default: | |
749 | init_special_inode(inode, mode, dev); | |
750 | break; | |
751 | case S_IFREG: | |
752 | inode->i_op = &hugetlbfs_inode_operations; | |
753 | inode->i_fop = &hugetlbfs_file_operations; | |
754 | break; | |
755 | case S_IFDIR: | |
756 | inode->i_op = &hugetlbfs_dir_inode_operations; | |
757 | inode->i_fop = &simple_dir_operations; | |
758 | ||
759 | /* directory inodes start off with i_nlink == 2 (for "." entry) */ | |
d8c76e6f | 760 | inc_nlink(inode); |
1da177e4 LT |
761 | break; |
762 | case S_IFLNK: | |
763 | inode->i_op = &page_symlink_inode_operations; | |
21fc61c7 | 764 | inode_nohighmem(inode); |
1da177e4 LT |
765 | break; |
766 | } | |
e096d0c7 | 767 | lockdep_annotate_inode_mutex_key(inode); |
9119a41e JK |
768 | } else |
769 | kref_put(&resv_map->refs, resv_map_release); | |
770 | ||
1da177e4 LT |
771 | return inode; |
772 | } | |
773 | ||
774 | /* | |
775 | * File creation. Allocate an inode, and we're done.. | |
776 | */ | |
777 | static int hugetlbfs_mknod(struct inode *dir, | |
1a67aafb | 778 | struct dentry *dentry, umode_t mode, dev_t dev) |
1da177e4 LT |
779 | { |
780 | struct inode *inode; | |
781 | int error = -ENOSPC; | |
7d54fa64 AV |
782 | |
783 | inode = hugetlbfs_get_inode(dir->i_sb, dir, mode, dev); | |
1da177e4 | 784 | if (inode) { |
078cd827 | 785 | dir->i_ctime = dir->i_mtime = current_time(dir); |
1da177e4 LT |
786 | d_instantiate(dentry, inode); |
787 | dget(dentry); /* Extra count - pin the dentry in core */ | |
788 | error = 0; | |
789 | } | |
790 | return error; | |
791 | } | |
792 | ||
18bb1db3 | 793 | static int hugetlbfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) |
1da177e4 LT |
794 | { |
795 | int retval = hugetlbfs_mknod(dir, dentry, mode | S_IFDIR, 0); | |
796 | if (!retval) | |
d8c76e6f | 797 | inc_nlink(dir); |
1da177e4 LT |
798 | return retval; |
799 | } | |
800 | ||
ebfc3b49 | 801 | static int hugetlbfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl) |
1da177e4 LT |
802 | { |
803 | return hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0); | |
804 | } | |
805 | ||
806 | static int hugetlbfs_symlink(struct inode *dir, | |
807 | struct dentry *dentry, const char *symname) | |
808 | { | |
809 | struct inode *inode; | |
810 | int error = -ENOSPC; | |
1da177e4 | 811 | |
7d54fa64 | 812 | inode = hugetlbfs_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0); |
1da177e4 LT |
813 | if (inode) { |
814 | int l = strlen(symname)+1; | |
815 | error = page_symlink(inode, symname, l); | |
816 | if (!error) { | |
817 | d_instantiate(dentry, inode); | |
818 | dget(dentry); | |
819 | } else | |
820 | iput(inode); | |
821 | } | |
078cd827 | 822 | dir->i_ctime = dir->i_mtime = current_time(dir); |
1da177e4 LT |
823 | |
824 | return error; | |
825 | } | |
826 | ||
827 | /* | |
6649a386 | 828 | * mark the head page dirty |
1da177e4 LT |
829 | */ |
830 | static int hugetlbfs_set_page_dirty(struct page *page) | |
831 | { | |
d85f3385 | 832 | struct page *head = compound_head(page); |
6649a386 KC |
833 | |
834 | SetPageDirty(head); | |
1da177e4 LT |
835 | return 0; |
836 | } | |
837 | ||
290408d4 | 838 | static int hugetlbfs_migrate_page(struct address_space *mapping, |
b969c4ab | 839 | struct page *newpage, struct page *page, |
a6bc32b8 | 840 | enum migrate_mode mode) |
290408d4 NH |
841 | { |
842 | int rc; | |
843 | ||
844 | rc = migrate_huge_page_move_mapping(mapping, newpage, page); | |
78bd5209 | 845 | if (rc != MIGRATEPAGE_SUCCESS) |
290408d4 | 846 | return rc; |
2916ecc0 JG |
847 | if (mode != MIGRATE_SYNC_NO_COPY) |
848 | migrate_page_copy(newpage, page); | |
849 | else | |
850 | migrate_page_states(newpage, page); | |
290408d4 | 851 | |
78bd5209 | 852 | return MIGRATEPAGE_SUCCESS; |
290408d4 NH |
853 | } |
854 | ||
78bb9203 NH |
855 | static int hugetlbfs_error_remove_page(struct address_space *mapping, |
856 | struct page *page) | |
857 | { | |
858 | struct inode *inode = mapping->host; | |
ab615a5b | 859 | pgoff_t index = page->index; |
78bb9203 NH |
860 | |
861 | remove_huge_page(page); | |
ab615a5b MK |
862 | if (unlikely(hugetlb_unreserve_pages(inode, index, index + 1, 1))) |
863 | hugetlb_fix_reserve_counts(inode); | |
864 | ||
78bb9203 NH |
865 | return 0; |
866 | } | |
867 | ||
4a25220d DH |
868 | /* |
869 | * Display the mount options in /proc/mounts. | |
870 | */ | |
871 | static int hugetlbfs_show_options(struct seq_file *m, struct dentry *root) | |
872 | { | |
873 | struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(root->d_sb); | |
874 | struct hugepage_subpool *spool = sbinfo->spool; | |
875 | unsigned long hpage_size = huge_page_size(sbinfo->hstate); | |
876 | unsigned hpage_shift = huge_page_shift(sbinfo->hstate); | |
877 | char mod; | |
878 | ||
879 | if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID)) | |
880 | seq_printf(m, ",uid=%u", | |
881 | from_kuid_munged(&init_user_ns, sbinfo->uid)); | |
882 | if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID)) | |
883 | seq_printf(m, ",gid=%u", | |
884 | from_kgid_munged(&init_user_ns, sbinfo->gid)); | |
885 | if (sbinfo->mode != 0755) | |
886 | seq_printf(m, ",mode=%o", sbinfo->mode); | |
887 | if (sbinfo->max_inodes != -1) | |
888 | seq_printf(m, ",nr_inodes=%lu", sbinfo->max_inodes); | |
889 | ||
890 | hpage_size /= 1024; | |
891 | mod = 'K'; | |
892 | if (hpage_size >= 1024) { | |
893 | hpage_size /= 1024; | |
894 | mod = 'M'; | |
895 | } | |
896 | seq_printf(m, ",pagesize=%lu%c", hpage_size, mod); | |
897 | if (spool) { | |
898 | if (spool->max_hpages != -1) | |
899 | seq_printf(m, ",size=%llu", | |
900 | (unsigned long long)spool->max_hpages << hpage_shift); | |
901 | if (spool->min_hpages != -1) | |
902 | seq_printf(m, ",min_size=%llu", | |
903 | (unsigned long long)spool->min_hpages << hpage_shift); | |
904 | } | |
905 | return 0; | |
906 | } | |
907 | ||
726c3342 | 908 | static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf) |
1da177e4 | 909 | { |
726c3342 | 910 | struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb); |
2b0143b5 | 911 | struct hstate *h = hstate_inode(d_inode(dentry)); |
1da177e4 LT |
912 | |
913 | buf->f_type = HUGETLBFS_MAGIC; | |
a5516438 | 914 | buf->f_bsize = huge_page_size(h); |
1da177e4 LT |
915 | if (sbinfo) { |
916 | spin_lock(&sbinfo->stat_lock); | |
74a8a65c DG |
917 | /* If no limits set, just report 0 for max/free/used |
918 | * blocks, like simple_statfs() */ | |
90481622 DG |
919 | if (sbinfo->spool) { |
920 | long free_pages; | |
921 | ||
922 | spin_lock(&sbinfo->spool->lock); | |
923 | buf->f_blocks = sbinfo->spool->max_hpages; | |
924 | free_pages = sbinfo->spool->max_hpages | |
925 | - sbinfo->spool->used_hpages; | |
926 | buf->f_bavail = buf->f_bfree = free_pages; | |
927 | spin_unlock(&sbinfo->spool->lock); | |
74a8a65c DG |
928 | buf->f_files = sbinfo->max_inodes; |
929 | buf->f_ffree = sbinfo->free_inodes; | |
930 | } | |
1da177e4 LT |
931 | spin_unlock(&sbinfo->stat_lock); |
932 | } | |
933 | buf->f_namelen = NAME_MAX; | |
934 | return 0; | |
935 | } | |
936 | ||
937 | static void hugetlbfs_put_super(struct super_block *sb) | |
938 | { | |
939 | struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb); | |
940 | ||
941 | if (sbi) { | |
942 | sb->s_fs_info = NULL; | |
90481622 DG |
943 | |
944 | if (sbi->spool) | |
945 | hugepage_put_subpool(sbi->spool); | |
946 | ||
1da177e4 LT |
947 | kfree(sbi); |
948 | } | |
949 | } | |
950 | ||
96527980 CH |
951 | static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo) |
952 | { | |
953 | if (sbinfo->free_inodes >= 0) { | |
954 | spin_lock(&sbinfo->stat_lock); | |
955 | if (unlikely(!sbinfo->free_inodes)) { | |
956 | spin_unlock(&sbinfo->stat_lock); | |
957 | return 0; | |
958 | } | |
959 | sbinfo->free_inodes--; | |
960 | spin_unlock(&sbinfo->stat_lock); | |
961 | } | |
962 | ||
963 | return 1; | |
964 | } | |
965 | ||
966 | static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo) | |
967 | { | |
968 | if (sbinfo->free_inodes >= 0) { | |
969 | spin_lock(&sbinfo->stat_lock); | |
970 | sbinfo->free_inodes++; | |
971 | spin_unlock(&sbinfo->stat_lock); | |
972 | } | |
973 | } | |
974 | ||
975 | ||
e18b890b | 976 | static struct kmem_cache *hugetlbfs_inode_cachep; |
1da177e4 LT |
977 | |
978 | static struct inode *hugetlbfs_alloc_inode(struct super_block *sb) | |
979 | { | |
96527980 | 980 | struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb); |
1da177e4 LT |
981 | struct hugetlbfs_inode_info *p; |
982 | ||
96527980 CH |
983 | if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo))) |
984 | return NULL; | |
e94b1766 | 985 | p = kmem_cache_alloc(hugetlbfs_inode_cachep, GFP_KERNEL); |
96527980 CH |
986 | if (unlikely(!p)) { |
987 | hugetlbfs_inc_free_inodes(sbinfo); | |
1da177e4 | 988 | return NULL; |
96527980 | 989 | } |
4742a35d MK |
990 | |
991 | /* | |
992 | * Any time after allocation, hugetlbfs_destroy_inode can be called | |
993 | * for the inode. mpol_free_shared_policy is unconditionally called | |
994 | * as part of hugetlbfs_destroy_inode. So, initialize policy here | |
995 | * in case of a quick call to destroy. | |
996 | * | |
997 | * Note that the policy is initialized even if we are creating a | |
998 | * private inode. This simplifies hugetlbfs_destroy_inode. | |
999 | */ | |
1000 | mpol_shared_policy_init(&p->policy, NULL); | |
1001 | ||
1da177e4 LT |
1002 | return &p->vfs_inode; |
1003 | } | |
1004 | ||
fa0d7e3d NP |
1005 | static void hugetlbfs_i_callback(struct rcu_head *head) |
1006 | { | |
1007 | struct inode *inode = container_of(head, struct inode, i_rcu); | |
fa0d7e3d NP |
1008 | kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode)); |
1009 | } | |
1010 | ||
1da177e4 LT |
1011 | static void hugetlbfs_destroy_inode(struct inode *inode) |
1012 | { | |
96527980 | 1013 | hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb)); |
1da177e4 | 1014 | mpol_free_shared_policy(&HUGETLBFS_I(inode)->policy); |
fa0d7e3d | 1015 | call_rcu(&inode->i_rcu, hugetlbfs_i_callback); |
1da177e4 LT |
1016 | } |
1017 | ||
f5e54d6e | 1018 | static const struct address_space_operations hugetlbfs_aops = { |
800d15a5 NP |
1019 | .write_begin = hugetlbfs_write_begin, |
1020 | .write_end = hugetlbfs_write_end, | |
1da177e4 | 1021 | .set_page_dirty = hugetlbfs_set_page_dirty, |
290408d4 | 1022 | .migratepage = hugetlbfs_migrate_page, |
78bb9203 | 1023 | .error_remove_page = hugetlbfs_error_remove_page, |
1da177e4 LT |
1024 | }; |
1025 | ||
96527980 | 1026 | |
51cc5068 | 1027 | static void init_once(void *foo) |
96527980 CH |
1028 | { |
1029 | struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo; | |
1030 | ||
a35afb83 | 1031 | inode_init_once(&ei->vfs_inode); |
96527980 CH |
1032 | } |
1033 | ||
4b6f5d20 | 1034 | const struct file_operations hugetlbfs_file_operations = { |
34d0640e | 1035 | .read_iter = hugetlbfs_read_iter, |
1da177e4 | 1036 | .mmap = hugetlbfs_file_mmap, |
1b061d92 | 1037 | .fsync = noop_fsync, |
1da177e4 | 1038 | .get_unmapped_area = hugetlb_get_unmapped_area, |
70c3547e MK |
1039 | .llseek = default_llseek, |
1040 | .fallocate = hugetlbfs_fallocate, | |
1da177e4 LT |
1041 | }; |
1042 | ||
92e1d5be | 1043 | static const struct inode_operations hugetlbfs_dir_inode_operations = { |
1da177e4 LT |
1044 | .create = hugetlbfs_create, |
1045 | .lookup = simple_lookup, | |
1046 | .link = simple_link, | |
1047 | .unlink = simple_unlink, | |
1048 | .symlink = hugetlbfs_symlink, | |
1049 | .mkdir = hugetlbfs_mkdir, | |
1050 | .rmdir = simple_rmdir, | |
1051 | .mknod = hugetlbfs_mknod, | |
1052 | .rename = simple_rename, | |
1053 | .setattr = hugetlbfs_setattr, | |
1054 | }; | |
1055 | ||
92e1d5be | 1056 | static const struct inode_operations hugetlbfs_inode_operations = { |
1da177e4 LT |
1057 | .setattr = hugetlbfs_setattr, |
1058 | }; | |
1059 | ||
ee9b6d61 | 1060 | static const struct super_operations hugetlbfs_ops = { |
1da177e4 LT |
1061 | .alloc_inode = hugetlbfs_alloc_inode, |
1062 | .destroy_inode = hugetlbfs_destroy_inode, | |
2bbbda30 | 1063 | .evict_inode = hugetlbfs_evict_inode, |
1da177e4 | 1064 | .statfs = hugetlbfs_statfs, |
1da177e4 | 1065 | .put_super = hugetlbfs_put_super, |
4a25220d | 1066 | .show_options = hugetlbfs_show_options, |
1da177e4 LT |
1067 | }; |
1068 | ||
4a25220d | 1069 | enum hugetlbfs_size_type { NO_SIZE, SIZE_STD, SIZE_PERCENT }; |
7ca02d0a MK |
1070 | |
1071 | /* | |
1072 | * Convert size option passed from command line to number of huge pages | |
1073 | * in the pool specified by hstate. Size option could be in bytes | |
1074 | * (val_type == SIZE_STD) or percentage of the pool (val_type == SIZE_PERCENT). | |
1075 | */ | |
4a25220d | 1076 | static long |
7ca02d0a | 1077 | hugetlbfs_size_to_hpages(struct hstate *h, unsigned long long size_opt, |
4a25220d | 1078 | enum hugetlbfs_size_type val_type) |
7ca02d0a MK |
1079 | { |
1080 | if (val_type == NO_SIZE) | |
1081 | return -1; | |
1082 | ||
1083 | if (val_type == SIZE_PERCENT) { | |
1084 | size_opt <<= huge_page_shift(h); | |
1085 | size_opt *= h->max_huge_pages; | |
1086 | do_div(size_opt, 100); | |
1087 | } | |
1088 | ||
1089 | size_opt >>= huge_page_shift(h); | |
1090 | return size_opt; | |
1091 | } | |
1092 | ||
1da177e4 LT |
1093 | static int |
1094 | hugetlbfs_parse_options(char *options, struct hugetlbfs_config *pconfig) | |
1095 | { | |
e73a75fa RD |
1096 | char *p, *rest; |
1097 | substring_t args[MAX_OPT_ARGS]; | |
1098 | int option; | |
7ca02d0a | 1099 | unsigned long long max_size_opt = 0, min_size_opt = 0; |
4a25220d | 1100 | enum hugetlbfs_size_type max_val_type = NO_SIZE, min_val_type = NO_SIZE; |
1da177e4 LT |
1101 | |
1102 | if (!options) | |
1103 | return 0; | |
1da177e4 | 1104 | |
e73a75fa RD |
1105 | while ((p = strsep(&options, ",")) != NULL) { |
1106 | int token; | |
b4c07bce LS |
1107 | if (!*p) |
1108 | continue; | |
e73a75fa RD |
1109 | |
1110 | token = match_token(p, tokens, args); | |
1111 | switch (token) { | |
1112 | case Opt_uid: | |
1113 | if (match_int(&args[0], &option)) | |
1114 | goto bad_val; | |
a0eb3a05 EB |
1115 | pconfig->uid = make_kuid(current_user_ns(), option); |
1116 | if (!uid_valid(pconfig->uid)) | |
1117 | goto bad_val; | |
e73a75fa RD |
1118 | break; |
1119 | ||
1120 | case Opt_gid: | |
1121 | if (match_int(&args[0], &option)) | |
1122 | goto bad_val; | |
a0eb3a05 EB |
1123 | pconfig->gid = make_kgid(current_user_ns(), option); |
1124 | if (!gid_valid(pconfig->gid)) | |
1125 | goto bad_val; | |
e73a75fa RD |
1126 | break; |
1127 | ||
1128 | case Opt_mode: | |
1129 | if (match_octal(&args[0], &option)) | |
1130 | goto bad_val; | |
75897d60 | 1131 | pconfig->mode = option & 01777U; |
e73a75fa RD |
1132 | break; |
1133 | ||
1134 | case Opt_size: { | |
e73a75fa RD |
1135 | /* memparse() will accept a K/M/G without a digit */ |
1136 | if (!isdigit(*args[0].from)) | |
1137 | goto bad_val; | |
7ca02d0a MK |
1138 | max_size_opt = memparse(args[0].from, &rest); |
1139 | max_val_type = SIZE_STD; | |
a137e1cc | 1140 | if (*rest == '%') |
7ca02d0a | 1141 | max_val_type = SIZE_PERCENT; |
e73a75fa RD |
1142 | break; |
1143 | } | |
1da177e4 | 1144 | |
e73a75fa RD |
1145 | case Opt_nr_inodes: |
1146 | /* memparse() will accept a K/M/G without a digit */ | |
1147 | if (!isdigit(*args[0].from)) | |
1148 | goto bad_val; | |
1149 | pconfig->nr_inodes = memparse(args[0].from, &rest); | |
1150 | break; | |
1151 | ||
a137e1cc AK |
1152 | case Opt_pagesize: { |
1153 | unsigned long ps; | |
1154 | ps = memparse(args[0].from, &rest); | |
1155 | pconfig->hstate = size_to_hstate(ps); | |
1156 | if (!pconfig->hstate) { | |
9b857d26 | 1157 | pr_err("Unsupported page size %lu MB\n", |
a137e1cc AK |
1158 | ps >> 20); |
1159 | return -EINVAL; | |
1160 | } | |
1161 | break; | |
1162 | } | |
1163 | ||
7ca02d0a MK |
1164 | case Opt_min_size: { |
1165 | /* memparse() will accept a K/M/G without a digit */ | |
1166 | if (!isdigit(*args[0].from)) | |
1167 | goto bad_val; | |
1168 | min_size_opt = memparse(args[0].from, &rest); | |
1169 | min_val_type = SIZE_STD; | |
1170 | if (*rest == '%') | |
1171 | min_val_type = SIZE_PERCENT; | |
1172 | break; | |
1173 | } | |
1174 | ||
e73a75fa | 1175 | default: |
9b857d26 | 1176 | pr_err("Bad mount option: \"%s\"\n", p); |
b4c07bce | 1177 | return -EINVAL; |
e73a75fa RD |
1178 | break; |
1179 | } | |
1da177e4 | 1180 | } |
a137e1cc | 1181 | |
7ca02d0a MK |
1182 | /* |
1183 | * Use huge page pool size (in hstate) to convert the size | |
1184 | * options to number of huge pages. If NO_SIZE, -1 is returned. | |
1185 | */ | |
1186 | pconfig->max_hpages = hugetlbfs_size_to_hpages(pconfig->hstate, | |
1187 | max_size_opt, max_val_type); | |
1188 | pconfig->min_hpages = hugetlbfs_size_to_hpages(pconfig->hstate, | |
1189 | min_size_opt, min_val_type); | |
1190 | ||
1191 | /* | |
1192 | * If max_size was specified, then min_size must be smaller | |
1193 | */ | |
1194 | if (max_val_type > NO_SIZE && | |
1195 | pconfig->min_hpages > pconfig->max_hpages) { | |
1196 | pr_err("minimum size can not be greater than maximum size\n"); | |
1197 | return -EINVAL; | |
a137e1cc AK |
1198 | } |
1199 | ||
1da177e4 | 1200 | return 0; |
e73a75fa RD |
1201 | |
1202 | bad_val: | |
9b857d26 | 1203 | pr_err("Bad value '%s' for mount option '%s'\n", args[0].from, p); |
c12ddba0 | 1204 | return -EINVAL; |
1da177e4 LT |
1205 | } |
1206 | ||
1207 | static int | |
1208 | hugetlbfs_fill_super(struct super_block *sb, void *data, int silent) | |
1209 | { | |
1da177e4 LT |
1210 | int ret; |
1211 | struct hugetlbfs_config config; | |
1212 | struct hugetlbfs_sb_info *sbinfo; | |
1213 | ||
7ca02d0a | 1214 | config.max_hpages = -1; /* No limit on size by default */ |
1da177e4 | 1215 | config.nr_inodes = -1; /* No limit on number of inodes by default */ |
77c70de1 DH |
1216 | config.uid = current_fsuid(); |
1217 | config.gid = current_fsgid(); | |
1da177e4 | 1218 | config.mode = 0755; |
a137e1cc | 1219 | config.hstate = &default_hstate; |
7ca02d0a | 1220 | config.min_hpages = -1; /* No default minimum size */ |
1da177e4 | 1221 | ret = hugetlbfs_parse_options(data, &config); |
1da177e4 LT |
1222 | if (ret) |
1223 | return ret; | |
1224 | ||
1225 | sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL); | |
1226 | if (!sbinfo) | |
1227 | return -ENOMEM; | |
1228 | sb->s_fs_info = sbinfo; | |
a137e1cc | 1229 | sbinfo->hstate = config.hstate; |
1da177e4 | 1230 | spin_lock_init(&sbinfo->stat_lock); |
1da177e4 LT |
1231 | sbinfo->max_inodes = config.nr_inodes; |
1232 | sbinfo->free_inodes = config.nr_inodes; | |
90481622 | 1233 | sbinfo->spool = NULL; |
4a25220d DH |
1234 | sbinfo->uid = config.uid; |
1235 | sbinfo->gid = config.gid; | |
1236 | sbinfo->mode = config.mode; | |
1237 | ||
7ca02d0a MK |
1238 | /* |
1239 | * Allocate and initialize subpool if maximum or minimum size is | |
1240 | * specified. Any needed reservations (for minimim size) are taken | |
1241 | * taken when the subpool is created. | |
1242 | */ | |
1243 | if (config.max_hpages != -1 || config.min_hpages != -1) { | |
1244 | sbinfo->spool = hugepage_new_subpool(config.hstate, | |
1245 | config.max_hpages, | |
1246 | config.min_hpages); | |
90481622 DG |
1247 | if (!sbinfo->spool) |
1248 | goto out_free; | |
1249 | } | |
1da177e4 | 1250 | sb->s_maxbytes = MAX_LFS_FILESIZE; |
a137e1cc AK |
1251 | sb->s_blocksize = huge_page_size(config.hstate); |
1252 | sb->s_blocksize_bits = huge_page_shift(config.hstate); | |
1da177e4 LT |
1253 | sb->s_magic = HUGETLBFS_MAGIC; |
1254 | sb->s_op = &hugetlbfs_ops; | |
1255 | sb->s_time_gran = 1; | |
48fde701 AV |
1256 | sb->s_root = d_make_root(hugetlbfs_get_root(sb, &config)); |
1257 | if (!sb->s_root) | |
1da177e4 | 1258 | goto out_free; |
1da177e4 LT |
1259 | return 0; |
1260 | out_free: | |
6e6870d4 | 1261 | kfree(sbinfo->spool); |
1da177e4 LT |
1262 | kfree(sbinfo); |
1263 | return -ENOMEM; | |
1264 | } | |
1265 | ||
3c26ff6e AV |
1266 | static struct dentry *hugetlbfs_mount(struct file_system_type *fs_type, |
1267 | int flags, const char *dev_name, void *data) | |
1da177e4 | 1268 | { |
3c26ff6e | 1269 | return mount_nodev(fs_type, flags, data, hugetlbfs_fill_super); |
1da177e4 LT |
1270 | } |
1271 | ||
1272 | static struct file_system_type hugetlbfs_fs_type = { | |
1273 | .name = "hugetlbfs", | |
3c26ff6e | 1274 | .mount = hugetlbfs_mount, |
1da177e4 LT |
1275 | .kill_sb = kill_litter_super, |
1276 | }; | |
1277 | ||
42d7395f | 1278 | static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE]; |
1da177e4 | 1279 | |
ef1ff6b8 | 1280 | static int can_do_hugetlb_shm(void) |
1da177e4 | 1281 | { |
a0eb3a05 EB |
1282 | kgid_t shm_group; |
1283 | shm_group = make_kgid(&init_user_ns, sysctl_hugetlb_shm_group); | |
1284 | return capable(CAP_IPC_LOCK) || in_group_p(shm_group); | |
1da177e4 LT |
1285 | } |
1286 | ||
42d7395f AK |
1287 | static int get_hstate_idx(int page_size_log) |
1288 | { | |
af73e4d9 | 1289 | struct hstate *h = hstate_sizelog(page_size_log); |
42d7395f | 1290 | |
42d7395f AK |
1291 | if (!h) |
1292 | return -1; | |
1293 | return h - hstates; | |
1294 | } | |
1295 | ||
be1d2cf5 | 1296 | static const struct dentry_operations anon_ops = { |
118b2302 | 1297 | .d_dname = simple_dname |
0df4d6e5 AV |
1298 | }; |
1299 | ||
af73e4d9 NH |
1300 | /* |
1301 | * Note that size should be aligned to proper hugepage size in caller side, | |
1302 | * otherwise hugetlb_reserve_pages reserves one less hugepages than intended. | |
1303 | */ | |
1304 | struct file *hugetlb_file_setup(const char *name, size_t size, | |
1305 | vm_flags_t acctflag, struct user_struct **user, | |
42d7395f | 1306 | int creat_flags, int page_size_log) |
1da177e4 | 1307 | { |
39b65252 | 1308 | struct file *file = ERR_PTR(-ENOMEM); |
1da177e4 | 1309 | struct inode *inode; |
2c48b9c4 | 1310 | struct path path; |
0df4d6e5 | 1311 | struct super_block *sb; |
1da177e4 | 1312 | struct qstr quick_string; |
42d7395f AK |
1313 | int hstate_idx; |
1314 | ||
1315 | hstate_idx = get_hstate_idx(page_size_log); | |
1316 | if (hstate_idx < 0) | |
1317 | return ERR_PTR(-ENODEV); | |
1da177e4 | 1318 | |
353d5c30 | 1319 | *user = NULL; |
42d7395f | 1320 | if (!hugetlbfs_vfsmount[hstate_idx]) |
5bc98594 AM |
1321 | return ERR_PTR(-ENOENT); |
1322 | ||
ef1ff6b8 | 1323 | if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) { |
353d5c30 HD |
1324 | *user = current_user(); |
1325 | if (user_shm_lock(size, *user)) { | |
21a3c273 | 1326 | task_lock(current); |
9b857d26 | 1327 | pr_warn_once("%s (%d): Using mlock ulimits for SHM_HUGETLB is deprecated\n", |
21a3c273 DR |
1328 | current->comm, current->pid); |
1329 | task_unlock(current); | |
353d5c30 HD |
1330 | } else { |
1331 | *user = NULL; | |
2584e517 | 1332 | return ERR_PTR(-EPERM); |
353d5c30 | 1333 | } |
2584e517 | 1334 | } |
1da177e4 | 1335 | |
0df4d6e5 | 1336 | sb = hugetlbfs_vfsmount[hstate_idx]->mnt_sb; |
9d66586f | 1337 | quick_string.name = name; |
1da177e4 LT |
1338 | quick_string.len = strlen(quick_string.name); |
1339 | quick_string.hash = 0; | |
0df4d6e5 | 1340 | path.dentry = d_alloc_pseudo(sb, &quick_string); |
2c48b9c4 | 1341 | if (!path.dentry) |
1da177e4 LT |
1342 | goto out_shm_unlock; |
1343 | ||
0df4d6e5 | 1344 | d_set_d_op(path.dentry, &anon_ops); |
42d7395f | 1345 | path.mnt = mntget(hugetlbfs_vfsmount[hstate_idx]); |
39b65252 | 1346 | file = ERR_PTR(-ENOSPC); |
0df4d6e5 | 1347 | inode = hugetlbfs_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0); |
1da177e4 | 1348 | if (!inode) |
ce8d2cdf | 1349 | goto out_dentry; |
e1832f29 SS |
1350 | if (creat_flags == HUGETLB_SHMFS_INODE) |
1351 | inode->i_flags |= S_PRIVATE; | |
1da177e4 | 1352 | |
39b65252 | 1353 | file = ERR_PTR(-ENOMEM); |
af73e4d9 NH |
1354 | if (hugetlb_reserve_pages(inode, 0, |
1355 | size >> huge_page_shift(hstate_inode(inode)), NULL, | |
1356 | acctflag)) | |
b45b5bd6 DG |
1357 | goto out_inode; |
1358 | ||
2c48b9c4 | 1359 | d_instantiate(path.dentry, inode); |
1da177e4 | 1360 | inode->i_size = size; |
6d6b77f1 | 1361 | clear_nlink(inode); |
ce8d2cdf | 1362 | |
2c48b9c4 | 1363 | file = alloc_file(&path, FMODE_WRITE | FMODE_READ, |
ce8d2cdf | 1364 | &hugetlbfs_file_operations); |
39b65252 | 1365 | if (IS_ERR(file)) |
b4d232e6 | 1366 | goto out_dentry; /* inode is already attached */ |
ce8d2cdf | 1367 | |
1da177e4 LT |
1368 | return file; |
1369 | ||
b45b5bd6 DG |
1370 | out_inode: |
1371 | iput(inode); | |
1da177e4 | 1372 | out_dentry: |
2c48b9c4 | 1373 | path_put(&path); |
1da177e4 | 1374 | out_shm_unlock: |
353d5c30 HD |
1375 | if (*user) { |
1376 | user_shm_unlock(size, *user); | |
1377 | *user = NULL; | |
1378 | } | |
39b65252 | 1379 | return file; |
1da177e4 LT |
1380 | } |
1381 | ||
1382 | static int __init init_hugetlbfs_fs(void) | |
1383 | { | |
42d7395f | 1384 | struct hstate *h; |
1da177e4 | 1385 | int error; |
42d7395f | 1386 | int i; |
1da177e4 | 1387 | |
457c1b27 | 1388 | if (!hugepages_supported()) { |
9b857d26 | 1389 | pr_info("disabling because there are no supported hugepage sizes\n"); |
457c1b27 NA |
1390 | return -ENOTSUPP; |
1391 | } | |
1392 | ||
d1d5e05f | 1393 | error = -ENOMEM; |
1da177e4 LT |
1394 | hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache", |
1395 | sizeof(struct hugetlbfs_inode_info), | |
5d097056 | 1396 | 0, SLAB_ACCOUNT, init_once); |
1da177e4 | 1397 | if (hugetlbfs_inode_cachep == NULL) |
e0bf68dd | 1398 | goto out2; |
1da177e4 LT |
1399 | |
1400 | error = register_filesystem(&hugetlbfs_fs_type); | |
1401 | if (error) | |
1402 | goto out; | |
1403 | ||
42d7395f AK |
1404 | i = 0; |
1405 | for_each_hstate(h) { | |
1406 | char buf[50]; | |
1407 | unsigned ps_kb = 1U << (h->order + PAGE_SHIFT - 10); | |
1da177e4 | 1408 | |
42d7395f AK |
1409 | snprintf(buf, sizeof(buf), "pagesize=%uK", ps_kb); |
1410 | hugetlbfs_vfsmount[i] = kern_mount_data(&hugetlbfs_fs_type, | |
1411 | buf); | |
1da177e4 | 1412 | |
42d7395f | 1413 | if (IS_ERR(hugetlbfs_vfsmount[i])) { |
9b857d26 | 1414 | pr_err("Cannot mount internal hugetlbfs for " |
42d7395f AK |
1415 | "page size %uK", ps_kb); |
1416 | error = PTR_ERR(hugetlbfs_vfsmount[i]); | |
1417 | hugetlbfs_vfsmount[i] = NULL; | |
1418 | } | |
1419 | i++; | |
1420 | } | |
1421 | /* Non default hstates are optional */ | |
1422 | if (!IS_ERR_OR_NULL(hugetlbfs_vfsmount[default_hstate_idx])) | |
1423 | return 0; | |
1da177e4 LT |
1424 | |
1425 | out: | |
d1d5e05f | 1426 | kmem_cache_destroy(hugetlbfs_inode_cachep); |
e0bf68dd | 1427 | out2: |
1da177e4 LT |
1428 | return error; |
1429 | } | |
3e89e1c5 | 1430 | fs_initcall(init_hugetlbfs_fs) |