]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - mm/hugetlb.c
Merge branch 'master' of /usr/src/ntfs-2.6/
[mirror_ubuntu-artful-kernel.git] / mm / hugetlb.c
1 /*
2 * Generic hugetlb support.
3 * (C) William Irwin, April 2004
4 */
5 #include <linux/gfp.h>
6 #include <linux/list.h>
7 #include <linux/init.h>
8 #include <linux/module.h>
9 #include <linux/mm.h>
10 #include <linux/sysctl.h>
11 #include <linux/highmem.h>
12 #include <linux/nodemask.h>
13 #include <linux/pagemap.h>
14 #include <linux/mempolicy.h>
15 #include <linux/cpuset.h>
16 #include <linux/mutex.h>
17
18 #include <asm/page.h>
19 #include <asm/pgtable.h>
20
21 #include <linux/hugetlb.h>
22 #include "internal.h"
23
24 const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
25 static unsigned long nr_huge_pages, free_huge_pages, reserved_huge_pages;
26 unsigned long max_huge_pages;
27 static struct list_head hugepage_freelists[MAX_NUMNODES];
28 static unsigned int nr_huge_pages_node[MAX_NUMNODES];
29 static unsigned int free_huge_pages_node[MAX_NUMNODES];
30 /*
31 * Protects updates to hugepage_freelists, nr_huge_pages, and free_huge_pages
32 */
33 static DEFINE_SPINLOCK(hugetlb_lock);
34
35 static void clear_huge_page(struct page *page, unsigned long addr)
36 {
37 int i;
38
39 might_sleep();
40 for (i = 0; i < (HPAGE_SIZE/PAGE_SIZE); i++) {
41 cond_resched();
42 clear_user_highpage(page + i, addr);
43 }
44 }
45
46 static void copy_huge_page(struct page *dst, struct page *src,
47 unsigned long addr)
48 {
49 int i;
50
51 might_sleep();
52 for (i = 0; i < HPAGE_SIZE/PAGE_SIZE; i++) {
53 cond_resched();
54 copy_user_highpage(dst + i, src + i, addr + i*PAGE_SIZE);
55 }
56 }
57
58 static void enqueue_huge_page(struct page *page)
59 {
60 int nid = page_to_nid(page);
61 list_add(&page->lru, &hugepage_freelists[nid]);
62 free_huge_pages++;
63 free_huge_pages_node[nid]++;
64 }
65
66 static struct page *dequeue_huge_page(struct vm_area_struct *vma,
67 unsigned long address)
68 {
69 int nid = numa_node_id();
70 struct page *page = NULL;
71 struct zonelist *zonelist = huge_zonelist(vma, address);
72 struct zone **z;
73
74 for (z = zonelist->zones; *z; z++) {
75 nid = (*z)->zone_pgdat->node_id;
76 if (cpuset_zone_allowed(*z, GFP_HIGHUSER) &&
77 !list_empty(&hugepage_freelists[nid]))
78 break;
79 }
80
81 if (*z) {
82 page = list_entry(hugepage_freelists[nid].next,
83 struct page, lru);
84 list_del(&page->lru);
85 free_huge_pages--;
86 free_huge_pages_node[nid]--;
87 }
88 return page;
89 }
90
91 static void free_huge_page(struct page *page)
92 {
93 BUG_ON(page_count(page));
94
95 INIT_LIST_HEAD(&page->lru);
96
97 spin_lock(&hugetlb_lock);
98 enqueue_huge_page(page);
99 spin_unlock(&hugetlb_lock);
100 }
101
102 static int alloc_fresh_huge_page(void)
103 {
104 static int nid = 0;
105 struct page *page;
106 page = alloc_pages_node(nid, GFP_HIGHUSER|__GFP_COMP|__GFP_NOWARN,
107 HUGETLB_PAGE_ORDER);
108 nid = next_node(nid, node_online_map);
109 if (nid == MAX_NUMNODES)
110 nid = first_node(node_online_map);
111 if (page) {
112 page[1].lru.next = (void *)free_huge_page; /* dtor */
113 spin_lock(&hugetlb_lock);
114 nr_huge_pages++;
115 nr_huge_pages_node[page_to_nid(page)]++;
116 spin_unlock(&hugetlb_lock);
117 put_page(page); /* free it into the hugepage allocator */
118 return 1;
119 }
120 return 0;
121 }
122
123 static struct page *alloc_huge_page(struct vm_area_struct *vma,
124 unsigned long addr)
125 {
126 struct inode *inode = vma->vm_file->f_dentry->d_inode;
127 struct page *page;
128 int use_reserve = 0;
129 unsigned long idx;
130
131 spin_lock(&hugetlb_lock);
132
133 if (vma->vm_flags & VM_MAYSHARE) {
134
135 /* idx = radix tree index, i.e. offset into file in
136 * HPAGE_SIZE units */
137 idx = ((addr - vma->vm_start) >> HPAGE_SHIFT)
138 + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
139
140 /* The hugetlbfs specific inode info stores the number
141 * of "guaranteed available" (huge) pages. That is,
142 * the first 'prereserved_hpages' pages of the inode
143 * are either already instantiated, or have been
144 * pre-reserved (by hugetlb_reserve_for_inode()). Here
145 * we're in the process of instantiating the page, so
146 * we use this to determine whether to draw from the
147 * pre-reserved pool or the truly free pool. */
148 if (idx < HUGETLBFS_I(inode)->prereserved_hpages)
149 use_reserve = 1;
150 }
151
152 if (!use_reserve) {
153 if (free_huge_pages <= reserved_huge_pages)
154 goto fail;
155 } else {
156 BUG_ON(reserved_huge_pages == 0);
157 reserved_huge_pages--;
158 }
159
160 page = dequeue_huge_page(vma, addr);
161 if (!page)
162 goto fail;
163
164 spin_unlock(&hugetlb_lock);
165 set_page_refcounted(page);
166 return page;
167
168 fail:
169 WARN_ON(use_reserve); /* reserved allocations shouldn't fail */
170 spin_unlock(&hugetlb_lock);
171 return NULL;
172 }
173
174 /* hugetlb_extend_reservation()
175 *
176 * Ensure that at least 'atleast' hugepages are, and will remain,
177 * available to instantiate the first 'atleast' pages of the given
178 * inode. If the inode doesn't already have this many pages reserved
179 * or instantiated, set aside some hugepages in the reserved pool to
180 * satisfy later faults (or fail now if there aren't enough, rather
181 * than getting the SIGBUS later).
182 */
183 int hugetlb_extend_reservation(struct hugetlbfs_inode_info *info,
184 unsigned long atleast)
185 {
186 struct inode *inode = &info->vfs_inode;
187 unsigned long change_in_reserve = 0;
188 int ret = 0;
189
190 spin_lock(&hugetlb_lock);
191 read_lock_irq(&inode->i_mapping->tree_lock);
192
193 if (info->prereserved_hpages >= atleast)
194 goto out;
195
196 /* Because we always call this on shared mappings, none of the
197 * pages beyond info->prereserved_hpages can have been
198 * instantiated, so we need to reserve all of them now. */
199 change_in_reserve = atleast - info->prereserved_hpages;
200
201 if ((reserved_huge_pages + change_in_reserve) > free_huge_pages) {
202 ret = -ENOMEM;
203 goto out;
204 }
205
206 reserved_huge_pages += change_in_reserve;
207 info->prereserved_hpages = atleast;
208
209 out:
210 read_unlock_irq(&inode->i_mapping->tree_lock);
211 spin_unlock(&hugetlb_lock);
212
213 return ret;
214 }
215
216 /* hugetlb_truncate_reservation()
217 *
218 * This returns pages reserved for the given inode to the general free
219 * hugepage pool. If the inode has any pages prereserved, but not
220 * instantiated, beyond offset (atmost << HPAGE_SIZE), then release
221 * them.
222 */
223 void hugetlb_truncate_reservation(struct hugetlbfs_inode_info *info,
224 unsigned long atmost)
225 {
226 struct inode *inode = &info->vfs_inode;
227 struct address_space *mapping = inode->i_mapping;
228 unsigned long idx;
229 unsigned long change_in_reserve = 0;
230 struct page *page;
231
232 spin_lock(&hugetlb_lock);
233 read_lock_irq(&inode->i_mapping->tree_lock);
234
235 if (info->prereserved_hpages <= atmost)
236 goto out;
237
238 /* Count pages which were reserved, but not instantiated, and
239 * which we can now release. */
240 for (idx = atmost; idx < info->prereserved_hpages; idx++) {
241 page = radix_tree_lookup(&mapping->page_tree, idx);
242 if (!page)
243 /* Pages which are already instantiated can't
244 * be unreserved (and in fact have already
245 * been removed from the reserved pool) */
246 change_in_reserve++;
247 }
248
249 BUG_ON(reserved_huge_pages < change_in_reserve);
250 reserved_huge_pages -= change_in_reserve;
251 info->prereserved_hpages = atmost;
252
253 out:
254 read_unlock_irq(&inode->i_mapping->tree_lock);
255 spin_unlock(&hugetlb_lock);
256 }
257
258 static int __init hugetlb_init(void)
259 {
260 unsigned long i;
261
262 if (HPAGE_SHIFT == 0)
263 return 0;
264
265 for (i = 0; i < MAX_NUMNODES; ++i)
266 INIT_LIST_HEAD(&hugepage_freelists[i]);
267
268 for (i = 0; i < max_huge_pages; ++i) {
269 if (!alloc_fresh_huge_page())
270 break;
271 }
272 max_huge_pages = free_huge_pages = nr_huge_pages = i;
273 printk("Total HugeTLB memory allocated, %ld\n", free_huge_pages);
274 return 0;
275 }
276 module_init(hugetlb_init);
277
278 static int __init hugetlb_setup(char *s)
279 {
280 if (sscanf(s, "%lu", &max_huge_pages) <= 0)
281 max_huge_pages = 0;
282 return 1;
283 }
284 __setup("hugepages=", hugetlb_setup);
285
286 #ifdef CONFIG_SYSCTL
287 static void update_and_free_page(struct page *page)
288 {
289 int i;
290 nr_huge_pages--;
291 nr_huge_pages_node[page_zone(page)->zone_pgdat->node_id]--;
292 for (i = 0; i < (HPAGE_SIZE / PAGE_SIZE); i++) {
293 page[i].flags &= ~(1 << PG_locked | 1 << PG_error | 1 << PG_referenced |
294 1 << PG_dirty | 1 << PG_active | 1 << PG_reserved |
295 1 << PG_private | 1<< PG_writeback);
296 }
297 page[1].lru.next = NULL;
298 set_page_refcounted(page);
299 __free_pages(page, HUGETLB_PAGE_ORDER);
300 }
301
302 #ifdef CONFIG_HIGHMEM
303 static void try_to_free_low(unsigned long count)
304 {
305 int i, nid;
306 for (i = 0; i < MAX_NUMNODES; ++i) {
307 struct page *page, *next;
308 list_for_each_entry_safe(page, next, &hugepage_freelists[i], lru) {
309 if (PageHighMem(page))
310 continue;
311 list_del(&page->lru);
312 update_and_free_page(page);
313 nid = page_zone(page)->zone_pgdat->node_id;
314 free_huge_pages--;
315 free_huge_pages_node[nid]--;
316 if (count >= nr_huge_pages)
317 return;
318 }
319 }
320 }
321 #else
322 static inline void try_to_free_low(unsigned long count)
323 {
324 }
325 #endif
326
327 static unsigned long set_max_huge_pages(unsigned long count)
328 {
329 while (count > nr_huge_pages) {
330 if (!alloc_fresh_huge_page())
331 return nr_huge_pages;
332 }
333 if (count >= nr_huge_pages)
334 return nr_huge_pages;
335
336 spin_lock(&hugetlb_lock);
337 try_to_free_low(count);
338 while (count < nr_huge_pages) {
339 struct page *page = dequeue_huge_page(NULL, 0);
340 if (!page)
341 break;
342 update_and_free_page(page);
343 }
344 spin_unlock(&hugetlb_lock);
345 return nr_huge_pages;
346 }
347
348 int hugetlb_sysctl_handler(struct ctl_table *table, int write,
349 struct file *file, void __user *buffer,
350 size_t *length, loff_t *ppos)
351 {
352 proc_doulongvec_minmax(table, write, file, buffer, length, ppos);
353 max_huge_pages = set_max_huge_pages(max_huge_pages);
354 return 0;
355 }
356 #endif /* CONFIG_SYSCTL */
357
358 int hugetlb_report_meminfo(char *buf)
359 {
360 return sprintf(buf,
361 "HugePages_Total: %5lu\n"
362 "HugePages_Free: %5lu\n"
363 "HugePages_Rsvd: %5lu\n"
364 "Hugepagesize: %5lu kB\n",
365 nr_huge_pages,
366 free_huge_pages,
367 reserved_huge_pages,
368 HPAGE_SIZE/1024);
369 }
370
371 int hugetlb_report_node_meminfo(int nid, char *buf)
372 {
373 return sprintf(buf,
374 "Node %d HugePages_Total: %5u\n"
375 "Node %d HugePages_Free: %5u\n",
376 nid, nr_huge_pages_node[nid],
377 nid, free_huge_pages_node[nid]);
378 }
379
380 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
381 unsigned long hugetlb_total_pages(void)
382 {
383 return nr_huge_pages * (HPAGE_SIZE / PAGE_SIZE);
384 }
385
386 /*
387 * We cannot handle pagefaults against hugetlb pages at all. They cause
388 * handle_mm_fault() to try to instantiate regular-sized pages in the
389 * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get
390 * this far.
391 */
392 static struct page *hugetlb_nopage(struct vm_area_struct *vma,
393 unsigned long address, int *unused)
394 {
395 BUG();
396 return NULL;
397 }
398
399 struct vm_operations_struct hugetlb_vm_ops = {
400 .nopage = hugetlb_nopage,
401 };
402
403 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
404 int writable)
405 {
406 pte_t entry;
407
408 if (writable) {
409 entry =
410 pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
411 } else {
412 entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot));
413 }
414 entry = pte_mkyoung(entry);
415 entry = pte_mkhuge(entry);
416
417 return entry;
418 }
419
420 static void set_huge_ptep_writable(struct vm_area_struct *vma,
421 unsigned long address, pte_t *ptep)
422 {
423 pte_t entry;
424
425 entry = pte_mkwrite(pte_mkdirty(*ptep));
426 ptep_set_access_flags(vma, address, ptep, entry, 1);
427 update_mmu_cache(vma, address, entry);
428 lazy_mmu_prot_update(entry);
429 }
430
431
432 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
433 struct vm_area_struct *vma)
434 {
435 pte_t *src_pte, *dst_pte, entry;
436 struct page *ptepage;
437 unsigned long addr;
438 int cow;
439
440 cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
441
442 for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) {
443 src_pte = huge_pte_offset(src, addr);
444 if (!src_pte)
445 continue;
446 dst_pte = huge_pte_alloc(dst, addr);
447 if (!dst_pte)
448 goto nomem;
449 spin_lock(&dst->page_table_lock);
450 spin_lock(&src->page_table_lock);
451 if (!pte_none(*src_pte)) {
452 if (cow)
453 ptep_set_wrprotect(src, addr, src_pte);
454 entry = *src_pte;
455 ptepage = pte_page(entry);
456 get_page(ptepage);
457 add_mm_counter(dst, file_rss, HPAGE_SIZE / PAGE_SIZE);
458 set_huge_pte_at(dst, addr, dst_pte, entry);
459 }
460 spin_unlock(&src->page_table_lock);
461 spin_unlock(&dst->page_table_lock);
462 }
463 return 0;
464
465 nomem:
466 return -ENOMEM;
467 }
468
469 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
470 unsigned long end)
471 {
472 struct mm_struct *mm = vma->vm_mm;
473 unsigned long address;
474 pte_t *ptep;
475 pte_t pte;
476 struct page *page;
477
478 WARN_ON(!is_vm_hugetlb_page(vma));
479 BUG_ON(start & ~HPAGE_MASK);
480 BUG_ON(end & ~HPAGE_MASK);
481
482 spin_lock(&mm->page_table_lock);
483
484 /* Update high watermark before we lower rss */
485 update_hiwater_rss(mm);
486
487 for (address = start; address < end; address += HPAGE_SIZE) {
488 ptep = huge_pte_offset(mm, address);
489 if (!ptep)
490 continue;
491
492 pte = huge_ptep_get_and_clear(mm, address, ptep);
493 if (pte_none(pte))
494 continue;
495
496 page = pte_page(pte);
497 put_page(page);
498 add_mm_counter(mm, file_rss, (int) -(HPAGE_SIZE / PAGE_SIZE));
499 }
500
501 spin_unlock(&mm->page_table_lock);
502 flush_tlb_range(vma, start, end);
503 }
504
505 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
506 unsigned long address, pte_t *ptep, pte_t pte)
507 {
508 struct page *old_page, *new_page;
509 int avoidcopy;
510
511 old_page = pte_page(pte);
512
513 /* If no-one else is actually using this page, avoid the copy
514 * and just make the page writable */
515 avoidcopy = (page_count(old_page) == 1);
516 if (avoidcopy) {
517 set_huge_ptep_writable(vma, address, ptep);
518 return VM_FAULT_MINOR;
519 }
520
521 page_cache_get(old_page);
522 new_page = alloc_huge_page(vma, address);
523
524 if (!new_page) {
525 page_cache_release(old_page);
526 return VM_FAULT_OOM;
527 }
528
529 spin_unlock(&mm->page_table_lock);
530 copy_huge_page(new_page, old_page, address);
531 spin_lock(&mm->page_table_lock);
532
533 ptep = huge_pte_offset(mm, address & HPAGE_MASK);
534 if (likely(pte_same(*ptep, pte))) {
535 /* Break COW */
536 set_huge_pte_at(mm, address, ptep,
537 make_huge_pte(vma, new_page, 1));
538 /* Make the old page be freed below */
539 new_page = old_page;
540 }
541 page_cache_release(new_page);
542 page_cache_release(old_page);
543 return VM_FAULT_MINOR;
544 }
545
546 int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
547 unsigned long address, pte_t *ptep, int write_access)
548 {
549 int ret = VM_FAULT_SIGBUS;
550 unsigned long idx;
551 unsigned long size;
552 struct page *page;
553 struct address_space *mapping;
554 pte_t new_pte;
555
556 mapping = vma->vm_file->f_mapping;
557 idx = ((address - vma->vm_start) >> HPAGE_SHIFT)
558 + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT));
559
560 /*
561 * Use page lock to guard against racing truncation
562 * before we get page_table_lock.
563 */
564 retry:
565 page = find_lock_page(mapping, idx);
566 if (!page) {
567 if (hugetlb_get_quota(mapping))
568 goto out;
569 page = alloc_huge_page(vma, address);
570 if (!page) {
571 hugetlb_put_quota(mapping);
572 ret = VM_FAULT_OOM;
573 goto out;
574 }
575 clear_huge_page(page, address);
576
577 if (vma->vm_flags & VM_SHARED) {
578 int err;
579
580 err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
581 if (err) {
582 put_page(page);
583 hugetlb_put_quota(mapping);
584 if (err == -EEXIST)
585 goto retry;
586 goto out;
587 }
588 } else
589 lock_page(page);
590 }
591
592 spin_lock(&mm->page_table_lock);
593 size = i_size_read(mapping->host) >> HPAGE_SHIFT;
594 if (idx >= size)
595 goto backout;
596
597 ret = VM_FAULT_MINOR;
598 if (!pte_none(*ptep))
599 goto backout;
600
601 add_mm_counter(mm, file_rss, HPAGE_SIZE / PAGE_SIZE);
602 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
603 && (vma->vm_flags & VM_SHARED)));
604 set_huge_pte_at(mm, address, ptep, new_pte);
605
606 if (write_access && !(vma->vm_flags & VM_SHARED)) {
607 /* Optimization, do the COW without a second fault */
608 ret = hugetlb_cow(mm, vma, address, ptep, new_pte);
609 }
610
611 spin_unlock(&mm->page_table_lock);
612 unlock_page(page);
613 out:
614 return ret;
615
616 backout:
617 spin_unlock(&mm->page_table_lock);
618 hugetlb_put_quota(mapping);
619 unlock_page(page);
620 put_page(page);
621 goto out;
622 }
623
624 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
625 unsigned long address, int write_access)
626 {
627 pte_t *ptep;
628 pte_t entry;
629 int ret;
630 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
631
632 ptep = huge_pte_alloc(mm, address);
633 if (!ptep)
634 return VM_FAULT_OOM;
635
636 /*
637 * Serialize hugepage allocation and instantiation, so that we don't
638 * get spurious allocation failures if two CPUs race to instantiate
639 * the same page in the page cache.
640 */
641 mutex_lock(&hugetlb_instantiation_mutex);
642 entry = *ptep;
643 if (pte_none(entry)) {
644 ret = hugetlb_no_page(mm, vma, address, ptep, write_access);
645 mutex_unlock(&hugetlb_instantiation_mutex);
646 return ret;
647 }
648
649 ret = VM_FAULT_MINOR;
650
651 spin_lock(&mm->page_table_lock);
652 /* Check for a racing update before calling hugetlb_cow */
653 if (likely(pte_same(entry, *ptep)))
654 if (write_access && !pte_write(entry))
655 ret = hugetlb_cow(mm, vma, address, ptep, entry);
656 spin_unlock(&mm->page_table_lock);
657 mutex_unlock(&hugetlb_instantiation_mutex);
658
659 return ret;
660 }
661
662 int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
663 struct page **pages, struct vm_area_struct **vmas,
664 unsigned long *position, int *length, int i)
665 {
666 unsigned long pfn_offset;
667 unsigned long vaddr = *position;
668 int remainder = *length;
669
670 spin_lock(&mm->page_table_lock);
671 while (vaddr < vma->vm_end && remainder) {
672 pte_t *pte;
673 struct page *page;
674
675 /*
676 * Some archs (sparc64, sh*) have multiple pte_ts to
677 * each hugepage. We have to make * sure we get the
678 * first, for the page indexing below to work.
679 */
680 pte = huge_pte_offset(mm, vaddr & HPAGE_MASK);
681
682 if (!pte || pte_none(*pte)) {
683 int ret;
684
685 spin_unlock(&mm->page_table_lock);
686 ret = hugetlb_fault(mm, vma, vaddr, 0);
687 spin_lock(&mm->page_table_lock);
688 if (ret == VM_FAULT_MINOR)
689 continue;
690
691 remainder = 0;
692 if (!i)
693 i = -EFAULT;
694 break;
695 }
696
697 pfn_offset = (vaddr & ~HPAGE_MASK) >> PAGE_SHIFT;
698 page = pte_page(*pte);
699 same_page:
700 get_page(page);
701 if (pages)
702 pages[i] = page + pfn_offset;
703
704 if (vmas)
705 vmas[i] = vma;
706
707 vaddr += PAGE_SIZE;
708 ++pfn_offset;
709 --remainder;
710 ++i;
711 if (vaddr < vma->vm_end && remainder &&
712 pfn_offset < HPAGE_SIZE/PAGE_SIZE) {
713 /*
714 * We use pfn_offset to avoid touching the pageframes
715 * of this compound page.
716 */
717 goto same_page;
718 }
719 }
720 spin_unlock(&mm->page_table_lock);
721 *length = remainder;
722 *position = vaddr;
723
724 return i;
725 }
726
727 void hugetlb_change_protection(struct vm_area_struct *vma,
728 unsigned long address, unsigned long end, pgprot_t newprot)
729 {
730 struct mm_struct *mm = vma->vm_mm;
731 unsigned long start = address;
732 pte_t *ptep;
733 pte_t pte;
734
735 BUG_ON(address >= end);
736 flush_cache_range(vma, address, end);
737
738 spin_lock(&mm->page_table_lock);
739 for (; address < end; address += HPAGE_SIZE) {
740 ptep = huge_pte_offset(mm, address);
741 if (!ptep)
742 continue;
743 if (!pte_none(*ptep)) {
744 pte = huge_ptep_get_and_clear(mm, address, ptep);
745 pte = pte_mkhuge(pte_modify(pte, newprot));
746 set_huge_pte_at(mm, address, ptep, pte);
747 lazy_mmu_prot_update(pte);
748 }
749 }
750 spin_unlock(&mm->page_table_lock);
751
752 flush_tlb_range(vma, start, end);
753 }
754