]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - mm/hugetlb.c
use simple_read_from_buffer in kernel/
[mirror_ubuntu-zesty-kernel.git] / mm / hugetlb.c
CommitLineData
1da177e4
LT
1/*
2 * Generic hugetlb support.
3 * (C) William Irwin, April 2004
4 */
5#include <linux/gfp.h>
6#include <linux/list.h>
7#include <linux/init.h>
8#include <linux/module.h>
9#include <linux/mm.h>
1da177e4
LT
10#include <linux/sysctl.h>
11#include <linux/highmem.h>
12#include <linux/nodemask.h>
63551ae0 13#include <linux/pagemap.h>
5da7ca86 14#include <linux/mempolicy.h>
aea47ff3 15#include <linux/cpuset.h>
3935baa9 16#include <linux/mutex.h>
5da7ca86 17
63551ae0
DG
18#include <asm/page.h>
19#include <asm/pgtable.h>
20
21#include <linux/hugetlb.h>
7835e98b 22#include "internal.h"
1da177e4
LT
23
24const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
a43a8c39 25static unsigned long nr_huge_pages, free_huge_pages, resv_huge_pages;
1da177e4
LT
26unsigned long max_huge_pages;
27static struct list_head hugepage_freelists[MAX_NUMNODES];
28static unsigned int nr_huge_pages_node[MAX_NUMNODES];
29static unsigned int free_huge_pages_node[MAX_NUMNODES];
3935baa9
DG
30/*
31 * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
32 */
33static DEFINE_SPINLOCK(hugetlb_lock);
0bd0f9fb 34
79ac6ba4
DG
35static void clear_huge_page(struct page *page, unsigned long addr)
36{
37 int i;
38
39 might_sleep();
40 for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); i++) {
41 cond_resched();
42 clear_user_highpage(page + i, addr);
43 }
44}
45
46static void copy_huge_page(struct page *dst, struct page *src,
9de455b2 47 unsigned long addr, struct vm_area_struct *vma)
79ac6ba4
DG
48{
49 int i;
50
51 might_sleep();
52 for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++) {
53 cond_resched();
9de455b2 54 copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE, vma);
79ac6ba4
DG
55 }
56}
57
1da177e4
LT
58static void enqueue_huge_page(struct page *page)
59{
60 int nid = page_to_nid(page);
61 list_add(&page->lru, &hugepage_freelists[nid]);
62 free_huge_pages++;
63 free_huge_pages_node[nid]++;
64}
65
5da7ca86
CL
66static struct page *dequeue_huge_page(struct vm_area_struct *vma,
67 unsigned long address)
1da177e4
LT
68{
69 int nid = numa_node_id();
70 struct page *page = NULL;
5da7ca86 71 struct zonelist *zonelist = huge_zonelist(vma, address);
96df9333 72 struct zone **z;
1da177e4 73
96df9333 74 for (z = zonelist->zones; *z; z++) {
89fa3024 75 nid = zone_to_nid(*z);
02a0e53d 76 if (cpuset_zone_allowed_softwall(*z, GFP_HIGHUSER) &&
aea47ff3 77 !list_empty(&hugepage_freelists[nid]))
96df9333 78 break;
1da177e4 79 }
96df9333
CL
80
81 if (*z) {
1da177e4
LT
82 page = list_entry(hugepage_freelists[nid].next,
83 struct page, lru);
84 list_del(&page->lru);
85 free_huge_pages--;
86 free_huge_pages_node[nid]--;
87 }
88 return page;
89}
90
27a85ef1
DG
91static void free_huge_page(struct page *page)
92{
93 BUG_ON(page_count(page));
94
95 INIT_LIST_HEAD(&page->lru);
96
97 spin_lock(&hugetlb_lock);
98 enqueue_huge_page(page);
99 spin_unlock(&hugetlb_lock);
100}
101
a482289d 102static int alloc_fresh_huge_page(void)
1da177e4
LT
103{
104 static int nid = 0;
105 struct page *page;
106 page = alloc_pages_node(nid, GFP_HIGHUSER|__GFP_COMP|__GFP_NOWARN,
107 HUGETLB_PAGE_ORDER);
fdb7cc59
PJ
108 nid = next_node(nid, node_online_map);
109 if (nid == MAX_NUMNODES)
110 nid = first_node(node_online_map);
1da177e4 111 if (page) {
33f2ef89 112 set_compound_page_dtor(page, free_huge_page);
0bd0f9fb 113 spin_lock(&hugetlb_lock);
1da177e4
LT
114 nr_huge_pages++;
115 nr_huge_pages_node[page_to_nid(page)]++;
0bd0f9fb 116 spin_unlock(&hugetlb_lock);
a482289d
NP
117 put_page(page); /* free it into the hugepage allocator */
118 return 1;
1da177e4 119 }
a482289d 120 return 0;
1da177e4
LT
121}
122
27a85ef1
DG
123static struct page *alloc_huge_page(struct vm_area_struct *vma,
124 unsigned long addr)
1da177e4
LT
125{
126 struct page *page;
1da177e4
LT
127
128 spin_lock(&hugetlb_lock);
a43a8c39
KC
129 if (vma->vm_flags & VM_MAYSHARE)
130 resv_huge_pages--;
131 else if (free_huge_pages <= resv_huge_pages)
132 goto fail;
b45b5bd6
DG
133
134 page = dequeue_huge_page(vma, addr);
135 if (!page)
136 goto fail;
137
1da177e4 138 spin_unlock(&hugetlb_lock);
7835e98b 139 set_page_refcounted(page);
1da177e4 140 return page;
b45b5bd6 141
a43a8c39 142fail:
ace4bd29
KC
143 if (vma->vm_flags & VM_MAYSHARE)
144 resv_huge_pages++;
b45b5bd6
DG
145 spin_unlock(&hugetlb_lock);
146 return NULL;
147}
148
1da177e4
LT
149static int __init hugetlb_init(void)
150{
151 unsigned long i;
1da177e4 152
3c726f8d
BH
153 if (HPAGE_SHIFT == 0)
154 return 0;
155
1da177e4
LT
156 for (i = 0; i < MAX_NUMNODES; ++i)
157 INIT_LIST_HEAD(&hugepage_freelists[i]);
158
159 for (i = 0; i < max_huge_pages; ++i) {
a482289d 160 if (!alloc_fresh_huge_page())
1da177e4 161 break;
1da177e4
LT
162 }
163 max_huge_pages = free_huge_pages = nr_huge_pages = i;
164 printk("Total HugeTLB memory allocated, %ld\n", free_huge_pages);
165 return 0;
166}
167module_init(hugetlb_init);
168
169static int __init hugetlb_setup(char *s)
170{
171 if (sscanf(s, "%lu", &max_huge_pages) <= 0)
172 max_huge_pages = 0;
173 return 1;
174}
175__setup("hugepages=", hugetlb_setup);
176
177#ifdef CONFIG_SYSCTL
178static void update_and_free_page(struct page *page)
179{
180 int i;
181 nr_huge_pages--;
4415cc8d 182 nr_huge_pages_node[page_to_nid(page)]--;
1da177e4
LT
183 for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++) {
184 page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
185 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
186 1 << PG_private | 1<< PG_writeback);
1da177e4 187 }
a482289d 188 page[1].lru.next = NULL;
7835e98b 189 set_page_refcounted(page);
1da177e4
LT
190 __free_pages(page, HUGETLB_PAGE_ORDER);
191}
192
193#ifdef CONFIG_HIGHMEM
194static void try_to_free_low(unsigned long count)
195{
4415cc8d
CL
196 int i;
197
1da177e4
LT
198 for (i = 0; i < MAX_NUMNODES; ++i) {
199 struct page *page, *next;
200 list_for_each_entry_safe(page, next, &hugepage_freelists[i], lru) {
201 if (PageHighMem(page))
202 continue;
203 list_del(&page->lru);
204 update_and_free_page(page);
1da177e4 205 free_huge_pages--;
4415cc8d 206 free_huge_pages_node[page_to_nid(page)]--;
1da177e4
LT
207 if (count >= nr_huge_pages)
208 return;
209 }
210 }
211}
212#else
213static inline void try_to_free_low(unsigned long count)
214{
215}
216#endif
217
218static unsigned long set_max_huge_pages(unsigned long count)
219{
220 while (count > nr_huge_pages) {
a482289d 221 if (!alloc_fresh_huge_page())
1da177e4 222 return nr_huge_pages;
1da177e4
LT
223 }
224 if (count >= nr_huge_pages)
225 return nr_huge_pages;
226
227 spin_lock(&hugetlb_lock);
a43a8c39 228 count = max(count, resv_huge_pages);
1da177e4
LT
229 try_to_free_low(count);
230 while (count < nr_huge_pages) {
5da7ca86 231 struct page *page = dequeue_huge_page(NULL, 0);
1da177e4
LT
232 if (!page)
233 break;
234 update_and_free_page(page);
235 }
236 spin_unlock(&hugetlb_lock);
237 return nr_huge_pages;
238}
239
240int hugetlb_sysctl_handler(struct ctl_table *table, int write,
241 struct file *file, void __user *buffer,
242 size_t *length, loff_t *ppos)
243{
244 proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
245 max_huge_pages = set_max_huge_pages(max_huge_pages);
246 return 0;
247}
248#endif /* CONFIG_SYSCTL */
249
250int hugetlb_report_meminfo(char *buf)
251{
252 return sprintf(buf,
253 "HugePages_Total: %5lu\n"
254 "HugePages_Free: %5lu\n"
a43a8c39 255 "HugePages_Rsvd: %5lu\n"
1da177e4
LT
256 "Hugepagesize: %5lu kB\n",
257 nr_huge_pages,
258 free_huge_pages,
a43a8c39 259 resv_huge_pages,
1da177e4
LT
260 HPAGE_SIZE/1024);
261}
262
263int hugetlb_report_node_meminfo(int nid, char *buf)
264{
265 return sprintf(buf,
266 "Node %d HugePages_Total: %5u\n"
267 "Node %d HugePages_Free: %5u\n",
268 nid, nr_huge_pages_node[nid],
269 nid, free_huge_pages_node[nid]);
270}
271
1da177e4
LT
272/* Return the number pages of memory we physically have, in PAGE_SIZE units. */
273unsigned long hugetlb_total_pages(void)
274{
275 return nr_huge_pages * (HPAGE_SIZE / PAGE_SIZE);
276}
1da177e4
LT
277
278/*
279 * We cannot handle pagefaults against hugetlb pages at all. They cause
280 * handle_mm_fault() to try to instantiate regular-sized pages in the
281 * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get
282 * this far.
283 */
284static struct page *hugetlb_nopage(struct vm_area_struct *vma,
285 unsigned long address, int *unused)
286{
287 BUG();
288 return NULL;
289}
290
291struct vm_operations_struct hugetlb_vm_ops = {
292 .nopage = hugetlb_nopage,
293};
294
1e8f889b
DG
295static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
296 int writable)
63551ae0
DG
297{
298 pte_t entry;
299
1e8f889b 300 if (writable) {
63551ae0
DG
301 entry =
302 pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
303 } else {
304 entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
305 }
306 entry = pte_mkyoung(entry);
307 entry = pte_mkhuge(entry);
308
309 return entry;
310}
311
1e8f889b
DG
312static void set_huge_ptep_writable(struct vm_area_struct *vma,
313 unsigned long address, pte_t *ptep)
314{
315 pte_t entry;
316
317 entry = pte_mkwrite(pte_mkdirty(*ptep));
318 ptep_set_access_flags(vma, address, ptep, entry, 1);
319 update_mmu_cache(vma, address, entry);
320 lazy_mmu_prot_update(entry);
321}
322
323
63551ae0
DG
324int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
325 struct vm_area_struct *vma)
326{
327 pte_t *src_pte, *dst_pte, entry;
328 struct page *ptepage;
1c59827d 329 unsigned long addr;
1e8f889b
DG
330 int cow;
331
332 cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
63551ae0 333
1c59827d 334 for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
c74df32c
HD
335 src_pte = huge_pte_offset(src, addr);
336 if (!src_pte)
337 continue;
63551ae0
DG
338 dst_pte = huge_pte_alloc(dst, addr);
339 if (!dst_pte)
340 goto nomem;
c74df32c 341 spin_lock(&dst->page_table_lock);
1c59827d 342 spin_lock(&src->page_table_lock);
c74df32c 343 if (!pte_none(*src_pte)) {
1e8f889b
DG
344 if (cow)
345 ptep_set_wrprotect(src, addr, src_pte);
1c59827d
HD
346 entry = *src_pte;
347 ptepage = pte_page(entry);
348 get_page(ptepage);
1c59827d
HD
349 set_huge_pte_at(dst, addr, dst_pte, entry);
350 }
351 spin_unlock(&src->page_table_lock);
c74df32c 352 spin_unlock(&dst->page_table_lock);
63551ae0
DG
353 }
354 return 0;
355
356nomem:
357 return -ENOMEM;
358}
359
502717f4
KC
360void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
361 unsigned long end)
63551ae0
DG
362{
363 struct mm_struct *mm = vma->vm_mm;
364 unsigned long address;
c7546f8f 365 pte_t *ptep;
63551ae0
DG
366 pte_t pte;
367 struct page *page;
fe1668ae 368 struct page *tmp;
c0a499c2
KC
369 /*
370 * A page gathering list, protected by per file i_mmap_lock. The
371 * lock is used to avoid list corruption from multiple unmapping
372 * of the same page since we are using page->lru.
373 */
fe1668ae 374 LIST_HEAD(page_list);
63551ae0
DG
375
376 WARN_ON(!is_vm_hugetlb_page(vma));
377 BUG_ON(start & ~HPAGE_MASK);
378 BUG_ON(end & ~HPAGE_MASK);
379
508034a3 380 spin_lock(&mm->page_table_lock);
63551ae0 381 for (address = start; address < end; address += HPAGE_SIZE) {
c7546f8f 382 ptep = huge_pte_offset(mm, address);
4c887265 383 if (!ptep)
c7546f8f
DG
384 continue;
385
39dde65c
KC
386 if (huge_pmd_unshare(mm, &address, ptep))
387 continue;
388
c7546f8f 389 pte = huge_ptep_get_and_clear(mm, address, ptep);
63551ae0
DG
390 if (pte_none(pte))
391 continue;
c7546f8f 392
63551ae0 393 page = pte_page(pte);
6649a386
KC
394 if (pte_dirty(pte))
395 set_page_dirty(page);
fe1668ae 396 list_add(&page->lru, &page_list);
63551ae0 397 }
1da177e4 398 spin_unlock(&mm->page_table_lock);
508034a3 399 flush_tlb_range(vma, start, end);
fe1668ae
KC
400 list_for_each_entry_safe(page, tmp, &page_list, lru) {
401 list_del(&page->lru);
402 put_page(page);
403 }
1da177e4 404}
63551ae0 405
502717f4
KC
406void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
407 unsigned long end)
408{
409 /*
410 * It is undesirable to test vma->vm_file as it should be non-null
411 * for valid hugetlb area. However, vm_file will be NULL in the error
412 * cleanup path of do_mmap_pgoff. When hugetlbfs ->mmap method fails,
413 * do_mmap_pgoff() nullifies vma->vm_file before calling this function
414 * to clean up. Since no pte has actually been setup, it is safe to
415 * do nothing in this case.
416 */
417 if (vma->vm_file) {
418 spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
419 __unmap_hugepage_range(vma, start, end);
420 spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
421 }
422}
423
1e8f889b
DG
424static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
425 unsigned long address, pte_t *ptep, pte_t pte)
426{
427 struct page *old_page, *new_page;
79ac6ba4 428 int avoidcopy;
1e8f889b
DG
429
430 old_page = pte_page(pte);
431
432 /* If no-one else is actually using this page, avoid the copy
433 * and just make the page writable */
434 avoidcopy = (page_count(old_page) == 1);
435 if (avoidcopy) {
436 set_huge_ptep_writable(vma, address, ptep);
437 return VM_FAULT_MINOR;
438 }
439
440 page_cache_get(old_page);
5da7ca86 441 new_page = alloc_huge_page(vma, address);
1e8f889b
DG
442
443 if (!new_page) {
444 page_cache_release(old_page);
0df420d8 445 return VM_FAULT_OOM;
1e8f889b
DG
446 }
447
448 spin_unlock(&mm->page_table_lock);
9de455b2 449 copy_huge_page(new_page, old_page, address, vma);
1e8f889b
DG
450 spin_lock(&mm->page_table_lock);
451
452 ptep = huge_pte_offset(mm, address & HPAGE_MASK);
453 if (likely(pte_same(*ptep, pte))) {
454 /* Break COW */
455 set_huge_pte_at(mm, address, ptep,
456 make_huge_pte(vma, new_page, 1));
457 /* Make the old page be freed below */
458 new_page = old_page;
459 }
460 page_cache_release(new_page);
461 page_cache_release(old_page);
462 return VM_FAULT_MINOR;
463}
464
86e5216f 465int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
1e8f889b 466 unsigned long address, pte_t *ptep, int write_access)
ac9b9c66
HD
467{
468 int ret = VM_FAULT_SIGBUS;
4c887265
AL
469 unsigned long idx;
470 unsigned long size;
4c887265
AL
471 struct page *page;
472 struct address_space *mapping;
1e8f889b 473 pte_t new_pte;
4c887265 474
4c887265
AL
475 mapping = vma->vm_file->f_mapping;
476 idx = ((address - vma->vm_start) >> HPAGE_SHIFT)
477 + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
478
479 /*
480 * Use page lock to guard against racing truncation
481 * before we get page_table_lock.
482 */
6bda666a
CL
483retry:
484 page = find_lock_page(mapping, idx);
485 if (!page) {
ebed4bfc
HD
486 size = i_size_read(mapping->host) >> HPAGE_SHIFT;
487 if (idx >= size)
488 goto out;
6bda666a
CL
489 if (hugetlb_get_quota(mapping))
490 goto out;
491 page = alloc_huge_page(vma, address);
492 if (!page) {
493 hugetlb_put_quota(mapping);
0df420d8 494 ret = VM_FAULT_OOM;
6bda666a
CL
495 goto out;
496 }
79ac6ba4 497 clear_huge_page(page, address);
ac9b9c66 498
6bda666a
CL
499 if (vma->vm_flags & VM_SHARED) {
500 int err;
501
502 err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
503 if (err) {
504 put_page(page);
505 hugetlb_put_quota(mapping);
506 if (err == -EEXIST)
507 goto retry;
508 goto out;
509 }
510 } else
511 lock_page(page);
512 }
1e8f889b 513
ac9b9c66 514 spin_lock(&mm->page_table_lock);
4c887265
AL
515 size = i_size_read(mapping->host) >> HPAGE_SHIFT;
516 if (idx >= size)
517 goto backout;
518
519 ret = VM_FAULT_MINOR;
86e5216f 520 if (!pte_none(*ptep))
4c887265
AL
521 goto backout;
522
1e8f889b
DG
523 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
524 && (vma->vm_flags & VM_SHARED)));
525 set_huge_pte_at(mm, address, ptep, new_pte);
526
527 if (write_access && !(vma->vm_flags & VM_SHARED)) {
528 /* Optimization, do the COW without a second fault */
529 ret = hugetlb_cow(mm, vma, address, ptep, new_pte);
530 }
531
ac9b9c66 532 spin_unlock(&mm->page_table_lock);
4c887265
AL
533 unlock_page(page);
534out:
ac9b9c66 535 return ret;
4c887265
AL
536
537backout:
538 spin_unlock(&mm->page_table_lock);
539 hugetlb_put_quota(mapping);
540 unlock_page(page);
541 put_page(page);
542 goto out;
ac9b9c66
HD
543}
544
86e5216f
AL
545int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
546 unsigned long address, int write_access)
547{
548 pte_t *ptep;
549 pte_t entry;
1e8f889b 550 int ret;
3935baa9 551 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
86e5216f
AL
552
553 ptep = huge_pte_alloc(mm, address);
554 if (!ptep)
555 return VM_FAULT_OOM;
556
3935baa9
DG
557 /*
558 * Serialize hugepage allocation and instantiation, so that we don't
559 * get spurious allocation failures if two CPUs race to instantiate
560 * the same page in the page cache.
561 */
562 mutex_lock(&hugetlb_instantiation_mutex);
86e5216f 563 entry = *ptep;
3935baa9
DG
564 if (pte_none(entry)) {
565 ret = hugetlb_no_page(mm, vma, address, ptep, write_access);
566 mutex_unlock(&hugetlb_instantiation_mutex);
567 return ret;
568 }
86e5216f 569
1e8f889b
DG
570 ret = VM_FAULT_MINOR;
571
572 spin_lock(&mm->page_table_lock);
573 /* Check for a racing update before calling hugetlb_cow */
574 if (likely(pte_same(entry, *ptep)))
575 if (write_access && !pte_write(entry))
576 ret = hugetlb_cow(mm, vma, address, ptep, entry);
577 spin_unlock(&mm->page_table_lock);
3935baa9 578 mutex_unlock(&hugetlb_instantiation_mutex);
1e8f889b
DG
579
580 return ret;
86e5216f
AL
581}
582
63551ae0
DG
583int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
584 struct page **pages, struct vm_area_struct **vmas,
585 unsigned long *position, int *length, int i)
586{
d5d4b0aa
KC
587 unsigned long pfn_offset;
588 unsigned long vaddr = *position;
63551ae0
DG
589 int remainder = *length;
590
1c59827d 591 spin_lock(&mm->page_table_lock);
63551ae0 592 while (vaddr < vma->vm_end && remainder) {
4c887265
AL
593 pte_t *pte;
594 struct page *page;
63551ae0 595
4c887265
AL
596 /*
597 * Some archs (sparc64, sh*) have multiple pte_ts to
598 * each hugepage. We have to make * sure we get the
599 * first, for the page indexing below to work.
600 */
601 pte = huge_pte_offset(mm, vaddr & HPAGE_MASK);
63551ae0 602
4c887265
AL
603 if (!pte || pte_none(*pte)) {
604 int ret;
63551ae0 605
4c887265
AL
606 spin_unlock(&mm->page_table_lock);
607 ret = hugetlb_fault(mm, vma, vaddr, 0);
608 spin_lock(&mm->page_table_lock);
609 if (ret == VM_FAULT_MINOR)
610 continue;
63551ae0 611
4c887265
AL
612 remainder = 0;
613 if (!i)
614 i = -EFAULT;
615 break;
616 }
617
d5d4b0aa
KC
618 pfn_offset = (vaddr & ~HPAGE_MASK) >> PAGE_SHIFT;
619 page = pte_page(*pte);
620same_page:
d6692183
KC
621 if (pages) {
622 get_page(page);
d5d4b0aa 623 pages[i] = page + pfn_offset;
d6692183 624 }
63551ae0
DG
625
626 if (vmas)
627 vmas[i] = vma;
628
629 vaddr += PAGE_SIZE;
d5d4b0aa 630 ++pfn_offset;
63551ae0
DG
631 --remainder;
632 ++i;
d5d4b0aa
KC
633 if (vaddr < vma->vm_end && remainder &&
634 pfn_offset < HPAGE_SIZE/PAGE_SIZE) {
635 /*
636 * We use pfn_offset to avoid touching the pageframes
637 * of this compound page.
638 */
639 goto same_page;
640 }
63551ae0 641 }
1c59827d 642 spin_unlock(&mm->page_table_lock);
63551ae0
DG
643 *length = remainder;
644 *position = vaddr;
645
646 return i;
647}
8f860591
ZY
648
649void hugetlb_change_protection(struct vm_area_struct *vma,
650 unsigned long address, unsigned long end, pgprot_t newprot)
651{
652 struct mm_struct *mm = vma->vm_mm;
653 unsigned long start = address;
654 pte_t *ptep;
655 pte_t pte;
656
657 BUG_ON(address >= end);
658 flush_cache_range(vma, address, end);
659
39dde65c 660 spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
8f860591
ZY
661 spin_lock(&mm->page_table_lock);
662 for (; address < end; address += HPAGE_SIZE) {
663 ptep = huge_pte_offset(mm, address);
664 if (!ptep)
665 continue;
39dde65c
KC
666 if (huge_pmd_unshare(mm, &address, ptep))
667 continue;
8f860591
ZY
668 if (!pte_none(*ptep)) {
669 pte = huge_ptep_get_and_clear(mm, address, ptep);
670 pte = pte_mkhuge(pte_modify(pte, newprot));
671 set_huge_pte_at(mm, address, ptep, pte);
672 lazy_mmu_prot_update(pte);
673 }
674 }
675 spin_unlock(&mm->page_table_lock);
39dde65c 676 spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
8f860591
ZY
677
678 flush_tlb_range(vma, start, end);
679}
680
a43a8c39
KC
681struct file_region {
682 struct list_head link;
683 long from;
684 long to;
685};
686
687static long region_add(struct list_head *head, long f, long t)
688{
689 struct file_region *rg, *nrg, *trg;
690
691 /* Locate the region we are either in or before. */
692 list_for_each_entry(rg, head, link)
693 if (f <= rg->to)
694 break;
695
696 /* Round our left edge to the current segment if it encloses us. */
697 if (f > rg->from)
698 f = rg->from;
699
700 /* Check for and consume any regions we now overlap with. */
701 nrg = rg;
702 list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
703 if (&rg->link == head)
704 break;
705 if (rg->from > t)
706 break;
707
708 /* If this area reaches higher then extend our area to
709 * include it completely. If this is not the first area
710 * which we intend to reuse, free it. */
711 if (rg->to > t)
712 t = rg->to;
713 if (rg != nrg) {
714 list_del(&rg->link);
715 kfree(rg);
716 }
717 }
718 nrg->from = f;
719 nrg->to = t;
720 return 0;
721}
722
723static long region_chg(struct list_head *head, long f, long t)
724{
725 struct file_region *rg, *nrg;
726 long chg = 0;
727
728 /* Locate the region we are before or in. */
729 list_for_each_entry(rg, head, link)
730 if (f <= rg->to)
731 break;
732
733 /* If we are below the current region then a new region is required.
734 * Subtle, allocate a new region at the position but make it zero
735 * size such that we can guarentee to record the reservation. */
736 if (&rg->link == head || t < rg->from) {
737 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
738 if (nrg == 0)
739 return -ENOMEM;
740 nrg->from = f;
741 nrg->to = f;
742 INIT_LIST_HEAD(&nrg->link);
743 list_add(&nrg->link, rg->link.prev);
744
745 return t - f;
746 }
747
748 /* Round our left edge to the current segment if it encloses us. */
749 if (f > rg->from)
750 f = rg->from;
751 chg = t - f;
752
753 /* Check for and consume any regions we now overlap with. */
754 list_for_each_entry(rg, rg->link.prev, link) {
755 if (&rg->link == head)
756 break;
757 if (rg->from > t)
758 return chg;
759
760 /* We overlap with this area, if it extends futher than
761 * us then we must extend ourselves. Account for its
762 * existing reservation. */
763 if (rg->to > t) {
764 chg += rg->to - t;
765 t = rg->to;
766 }
767 chg -= rg->to - rg->from;
768 }
769 return chg;
770}
771
772static long region_truncate(struct list_head *head, long end)
773{
774 struct file_region *rg, *trg;
775 long chg = 0;
776
777 /* Locate the region we are either in or before. */
778 list_for_each_entry(rg, head, link)
779 if (end <= rg->to)
780 break;
781 if (&rg->link == head)
782 return 0;
783
784 /* If we are in the middle of a region then adjust it. */
785 if (end > rg->from) {
786 chg = rg->to - end;
787 rg->to = end;
788 rg = list_entry(rg->link.next, typeof(*rg), link);
789 }
790
791 /* Drop any remaining regions. */
792 list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
793 if (&rg->link == head)
794 break;
795 chg += rg->to - rg->from;
796 list_del(&rg->link);
797 kfree(rg);
798 }
799 return chg;
800}
801
802static int hugetlb_acct_memory(long delta)
803{
804 int ret = -ENOMEM;
805
806 spin_lock(&hugetlb_lock);
807 if ((delta + resv_huge_pages) <= free_huge_pages) {
808 resv_huge_pages += delta;
809 ret = 0;
810 }
811 spin_unlock(&hugetlb_lock);
812 return ret;
813}
814
815int hugetlb_reserve_pages(struct inode *inode, long from, long to)
816{
817 long ret, chg;
818
819 chg = region_chg(&inode->i_mapping->private_list, from, to);
820 if (chg < 0)
821 return chg;
822 ret = hugetlb_acct_memory(chg);
823 if (ret < 0)
824 return ret;
825 region_add(&inode->i_mapping->private_list, from, to);
826 return 0;
827}
828
829void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
830{
831 long chg = region_truncate(&inode->i_mapping->private_list, offset);
832 hugetlb_acct_memory(freed - chg);
833}