]>
git.proxmox.com Git - mirror_ubuntu-kernels.git/blob - arch/x86/mm/pageattr.c
2 * Copyright 2002 Andi Kleen, SuSE Labs.
3 * Thanks to Ben LaHaise for precious feedback.
5 #include <linux/highmem.h>
6 #include <linux/bootmem.h>
7 #include <linux/module.h>
8 #include <linux/sched.h>
9 #include <linux/slab.h>
11 #include <linux/interrupt.h>
14 #include <asm/processor.h>
15 #include <asm/tlbflush.h>
16 #include <asm/sections.h>
17 #include <asm/uaccess.h>
18 #include <asm/pgalloc.h>
19 #include <asm/proto.h>
22 * The current flushing context - we pass it instead of 5 arguments:
35 static inline unsigned long highmap_start_pfn(void)
37 return __pa(_text
) >> PAGE_SHIFT
;
40 static inline unsigned long highmap_end_pfn(void)
42 return __pa(round_up((unsigned long)_end
, PMD_SIZE
)) >> PAGE_SHIFT
;
48 within(unsigned long addr
, unsigned long start
, unsigned long end
)
50 return addr
>= start
&& addr
< end
;
58 * clflush_cache_range - flush a cache range with clflush
59 * @addr: virtual start address
60 * @size: number of bytes to flush
62 * clflush is an unordered instruction which needs fencing with mfence
63 * to avoid ordering issues.
65 void clflush_cache_range(void *vaddr
, unsigned int size
)
67 void *vend
= vaddr
+ size
- 1;
71 for (; vaddr
< vend
; vaddr
+= boot_cpu_data
.x86_clflush_size
)
74 * Flush any possible final partial cacheline:
81 static void __cpa_flush_all(void *arg
)
83 unsigned long cache
= (unsigned long)arg
;
86 * Flush all to work around Errata in early athlons regarding
87 * large page flushing.
91 if (cache
&& boot_cpu_data
.x86_model
>= 4)
95 static void cpa_flush_all(unsigned long cache
)
97 BUG_ON(irqs_disabled());
99 on_each_cpu(__cpa_flush_all
, (void *) cache
, 1, 1);
102 static void __cpa_flush_range(void *arg
)
105 * We could optimize that further and do individual per page
106 * tlb invalidates for a low number of pages. Caveat: we must
107 * flush the high aliases on 64bit as well.
112 static void cpa_flush_range(unsigned long start
, int numpages
, int cache
)
114 unsigned int i
, level
;
117 BUG_ON(irqs_disabled());
118 WARN_ON(PAGE_ALIGN(start
) != start
);
120 on_each_cpu(__cpa_flush_range
, NULL
, 1, 1);
126 * We only need to flush on one CPU,
127 * clflush is a MESI-coherent instruction that
128 * will cause all other CPUs to flush the same
131 for (i
= 0, addr
= start
; i
< numpages
; i
++, addr
+= PAGE_SIZE
) {
132 pte_t
*pte
= lookup_address(addr
, &level
);
135 * Only flush present addresses:
137 if (pte
&& (pte_val(*pte
) & _PAGE_PRESENT
))
138 clflush_cache_range((void *) addr
, PAGE_SIZE
);
143 * Certain areas of memory on x86 require very specific protection flags,
144 * for example the BIOS area or kernel text. Callers don't always get this
145 * right (again, ioremap() on BIOS memory is not uncommon) so this function
146 * checks and fixes these known static required protection bits.
148 static inline pgprot_t
static_protections(pgprot_t prot
, unsigned long address
,
151 pgprot_t forbidden
= __pgprot(0);
154 * The BIOS area between 640k and 1Mb needs to be executable for
155 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
157 if (within(pfn
, BIOS_BEGIN
>> PAGE_SHIFT
, BIOS_END
>> PAGE_SHIFT
))
158 pgprot_val(forbidden
) |= _PAGE_NX
;
161 * The kernel text needs to be executable for obvious reasons
162 * Does not cover __inittext since that is gone later on. On
163 * 64bit we do not enforce !NX on the low mapping
165 if (within(address
, (unsigned long)_text
, (unsigned long)_etext
))
166 pgprot_val(forbidden
) |= _PAGE_NX
;
169 * The .rodata section needs to be read-only. Using the pfn
170 * catches all aliases.
172 if (within(pfn
, __pa((unsigned long)__start_rodata
) >> PAGE_SHIFT
,
173 __pa((unsigned long)__end_rodata
) >> PAGE_SHIFT
))
174 pgprot_val(forbidden
) |= _PAGE_RW
;
176 prot
= __pgprot(pgprot_val(prot
) & ~pgprot_val(forbidden
));
182 * Lookup the page table entry for a virtual address. Return a pointer
183 * to the entry and the level of the mapping.
185 * Note: We return pud and pmd either when the entry is marked large
186 * or when the present bit is not set. Otherwise we would return a
187 * pointer to a nonexisting mapping.
189 pte_t
*lookup_address(unsigned long address
, unsigned int *level
)
191 pgd_t
*pgd
= pgd_offset_k(address
);
195 *level
= PG_LEVEL_NONE
;
200 pud
= pud_offset(pgd
, address
);
204 *level
= PG_LEVEL_1G
;
205 if (pud_large(*pud
) || !pud_present(*pud
))
208 pmd
= pmd_offset(pud
, address
);
212 *level
= PG_LEVEL_2M
;
213 if (pmd_large(*pmd
) || !pmd_present(*pmd
))
216 *level
= PG_LEVEL_4K
;
218 return pte_offset_kernel(pmd
, address
);
222 * Set the new pmd in all the pgds we know about:
224 static void __set_pmd_pte(pte_t
*kpte
, unsigned long address
, pte_t pte
)
227 set_pte_atomic(kpte
, pte
);
229 if (!SHARED_KERNEL_PMD
) {
232 list_for_each_entry(page
, &pgd_list
, lru
) {
237 pgd
= (pgd_t
*)page_address(page
) + pgd_index(address
);
238 pud
= pud_offset(pgd
, address
);
239 pmd
= pmd_offset(pud
, address
);
240 set_pte_atomic((pte_t
*)pmd
, pte
);
247 try_preserve_large_page(pte_t
*kpte
, unsigned long address
,
248 struct cpa_data
*cpa
)
250 unsigned long nextpage_addr
, numpages
, pmask
, psize
, flags
, addr
, pfn
;
251 pte_t new_pte
, old_pte
, *tmp
;
252 pgprot_t old_prot
, new_prot
;
256 spin_lock_irqsave(&pgd_lock
, flags
);
258 * Check for races, another CPU might have split this page
261 tmp
= lookup_address(address
, &level
);
267 psize
= PMD_PAGE_SIZE
;
268 pmask
= PMD_PAGE_MASK
;
272 psize
= PUD_PAGE_SIZE
;
273 pmask
= PUD_PAGE_MASK
;
282 * Calculate the number of pages, which fit into this large
283 * page starting at address:
285 nextpage_addr
= (address
+ psize
) & pmask
;
286 numpages
= (nextpage_addr
- address
) >> PAGE_SHIFT
;
287 if (numpages
< cpa
->numpages
)
288 cpa
->numpages
= numpages
;
291 * We are safe now. Check whether the new pgprot is the same:
294 old_prot
= new_prot
= pte_pgprot(old_pte
);
296 pgprot_val(new_prot
) &= ~pgprot_val(cpa
->mask_clr
);
297 pgprot_val(new_prot
) |= pgprot_val(cpa
->mask_set
);
300 * old_pte points to the large page base address. So we need
301 * to add the offset of the virtual address:
303 pfn
= pte_pfn(old_pte
) + ((address
& (psize
- 1)) >> PAGE_SHIFT
);
306 new_prot
= static_protections(new_prot
, address
, pfn
);
309 * We need to check the full range, whether
310 * static_protection() requires a different pgprot for one of
311 * the pages in the range we try to preserve:
313 addr
= address
+ PAGE_SIZE
;
315 for (i
= 1; i
< cpa
->numpages
; i
++, addr
+= PAGE_SIZE
, pfn
++) {
316 pgprot_t chk_prot
= static_protections(new_prot
, addr
, pfn
);
318 if (pgprot_val(chk_prot
) != pgprot_val(new_prot
))
323 * If there are no changes, return. maxpages has been updated
326 if (pgprot_val(new_prot
) == pgprot_val(old_prot
)) {
332 * We need to change the attributes. Check, whether we can
333 * change the large page in one go. We request a split, when
334 * the address is not aligned and the number of pages is
335 * smaller than the number of pages in the large page. Note
336 * that we limited the number of possible pages already to
337 * the number of pages in the large page.
339 if (address
== (nextpage_addr
- psize
) && cpa
->numpages
== numpages
) {
341 * The address is aligned and the number of pages
342 * covers the full page.
344 new_pte
= pfn_pte(pte_pfn(old_pte
), canon_pgprot(new_prot
));
345 __set_pmd_pte(kpte
, address
, new_pte
);
351 spin_unlock_irqrestore(&pgd_lock
, flags
);
356 static LIST_HEAD(page_pool
);
357 static unsigned long pool_size
, pool_pages
, pool_low
;
358 static unsigned long pool_used
, pool_failed
, pool_refill
;
360 static void cpa_fill_pool(void)
363 gfp_t gfp
= GFP_KERNEL
;
365 /* Do not allocate from interrupt context */
366 if (in_irq() || irqs_disabled())
369 * Check unlocked. I does not matter when we have one more
370 * page in the pool. The bit lock avoids recursive pool
373 if (pool_pages
>= pool_size
|| test_and_set_bit_lock(0, &pool_refill
))
376 #ifdef CONFIG_DEBUG_PAGEALLOC
379 * gfp = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
380 * but this fails on !PREEMPT kernels
382 gfp
= GFP_ATOMIC
| __GFP_NORETRY
| __GFP_NOWARN
;
385 while (pool_pages
< pool_size
) {
386 p
= alloc_pages(gfp
, 0);
391 spin_lock_irq(&pgd_lock
);
392 list_add(&p
->lru
, &page_pool
);
394 spin_unlock_irq(&pgd_lock
);
396 clear_bit_unlock(0, &pool_refill
);
399 #define SHIFT_MB (20 - PAGE_SHIFT)
400 #define ROUND_MB_GB ((1 << 10) - 1)
401 #define SHIFT_MB_GB 10
402 #define POOL_PAGES_PER_GB 16
404 void __init
cpa_init(void)
411 * Calculate the number of pool pages:
413 * Convert totalram (nr of pages) to MiB and round to the next
414 * GiB. Shift MiB to Gib and multiply the result by
417 gb
= ((si
.totalram
>> SHIFT_MB
) + ROUND_MB_GB
) >> SHIFT_MB_GB
;
418 pool_size
= POOL_PAGES_PER_GB
* gb
;
419 pool_low
= pool_size
;
423 "CPA: page pool initialized %lu of %lu pages preallocated\n",
424 pool_pages
, pool_size
);
427 static int split_large_page(pte_t
*kpte
, unsigned long address
)
429 unsigned long flags
, pfn
, pfninc
= 1;
430 unsigned int i
, level
;
436 * Get a page from the pool. The pool list is protected by the
437 * pgd_lock, which we have to take anyway for the split
440 spin_lock_irqsave(&pgd_lock
, flags
);
441 if (list_empty(&page_pool
)) {
442 spin_unlock_irqrestore(&pgd_lock
, flags
);
446 base
= list_first_entry(&page_pool
, struct page
, lru
);
447 list_del(&base
->lru
);
450 if (pool_pages
< pool_low
)
451 pool_low
= pool_pages
;
454 * Check for races, another CPU might have split this page
457 tmp
= lookup_address(address
, &level
);
461 pbase
= (pte_t
*)page_address(base
);
463 paravirt_alloc_pt(&init_mm
, page_to_pfn(base
));
465 ref_prot
= pte_pgprot(pte_clrhuge(*kpte
));
468 if (level
== PG_LEVEL_1G
) {
469 pfninc
= PMD_PAGE_SIZE
>> PAGE_SHIFT
;
470 pgprot_val(ref_prot
) |= _PAGE_PSE
;
475 * Get the target pfn from the original entry:
477 pfn
= pte_pfn(*kpte
);
478 for (i
= 0; i
< PTRS_PER_PTE
; i
++, pfn
+= pfninc
)
479 set_pte(&pbase
[i
], pfn_pte(pfn
, ref_prot
));
482 * Install the new, split up pagetable. Important details here:
484 * On Intel the NX bit of all levels must be cleared to make a
485 * page executable. See section 4.13.2 of Intel 64 and IA-32
486 * Architectures Software Developer's Manual).
488 * Mark the entry present. The current mapping might be
489 * set to not present, which we preserved above.
491 ref_prot
= pte_pgprot(pte_mkexec(pte_clrhuge(*kpte
)));
492 pgprot_val(ref_prot
) |= _PAGE_PRESENT
;
493 __set_pmd_pte(kpte
, address
, mk_pte(base
, ref_prot
));
498 * If we dropped out via the lookup_address check under
499 * pgd_lock then stick the page back into the pool:
502 list_add(&base
->lru
, &page_pool
);
506 spin_unlock_irqrestore(&pgd_lock
, flags
);
511 static int __change_page_attr(struct cpa_data
*cpa
, int primary
)
513 unsigned long address
= cpa
->vaddr
;
516 struct page
*kpte_page
;
517 pte_t
*kpte
, old_pte
;
520 kpte
= lookup_address(address
, &level
);
522 return primary
? -EINVAL
: 0;
525 if (!pte_val(old_pte
)) {
528 printk(KERN_WARNING
"CPA: called for zero pte. "
529 "vaddr = %lx cpa->vaddr = %lx\n", address
,
535 kpte_page
= virt_to_page(kpte
);
536 BUG_ON(PageLRU(kpte_page
));
537 BUG_ON(PageCompound(kpte_page
));
539 if (level
== PG_LEVEL_4K
) {
541 pgprot_t new_prot
= pte_pgprot(old_pte
);
542 unsigned long pfn
= pte_pfn(old_pte
);
544 pgprot_val(new_prot
) &= ~pgprot_val(cpa
->mask_clr
);
545 pgprot_val(new_prot
) |= pgprot_val(cpa
->mask_set
);
547 new_prot
= static_protections(new_prot
, address
, pfn
);
550 * We need to keep the pfn from the existing PTE,
551 * after all we're only going to change it's attributes
552 * not the memory it points to
554 new_pte
= pfn_pte(pfn
, canon_pgprot(new_prot
));
557 * Do we really change anything ?
559 if (pte_val(old_pte
) != pte_val(new_pte
)) {
560 set_pte_atomic(kpte
, new_pte
);
568 * Check, whether we can keep the large page intact
569 * and just change the pte:
571 do_split
= try_preserve_large_page(kpte
, address
, cpa
);
573 * When the range fits into the existing large page,
574 * return. cp->numpages and cpa->tlbflush have been updated in
581 * We have to split the large page:
583 err
= split_large_page(kpte
, address
);
592 static int __change_page_attr_set_clr(struct cpa_data
*cpa
, int checkalias
);
594 static int cpa_process_alias(struct cpa_data
*cpa
)
596 struct cpa_data alias_cpa
;
599 if (cpa
->pfn
> max_pfn_mapped
)
603 * No need to redo, when the primary call touched the direct
606 if (!within(cpa
->vaddr
, PAGE_OFFSET
,
607 PAGE_OFFSET
+ (max_pfn_mapped
<< PAGE_SHIFT
))) {
610 alias_cpa
.vaddr
= (unsigned long) __va(cpa
->pfn
<< PAGE_SHIFT
);
612 ret
= __change_page_attr_set_clr(&alias_cpa
, 0);
619 * No need to redo, when the primary call touched the high
622 if (within(cpa
->vaddr
, (unsigned long) _text
, (unsigned long) _end
))
626 * If the physical address is inside the kernel map, we need
627 * to touch the high mapped kernel as well:
629 if (!within(cpa
->pfn
, highmap_start_pfn(), highmap_end_pfn()))
634 (cpa
->pfn
<< PAGE_SHIFT
) + __START_KERNEL_map
- phys_base
;
637 * The high mapping range is imprecise, so ignore the return value.
639 __change_page_attr_set_clr(&alias_cpa
, 0);
644 static int __change_page_attr_set_clr(struct cpa_data
*cpa
, int checkalias
)
646 int ret
, numpages
= cpa
->numpages
;
650 * Store the remaining nr of pages for the large page
651 * preservation check.
653 cpa
->numpages
= numpages
;
655 ret
= __change_page_attr(cpa
, checkalias
);
660 ret
= cpa_process_alias(cpa
);
666 * Adjust the number of pages with the result of the
667 * CPA operation. Either a large page has been
668 * preserved or a single page update happened.
670 BUG_ON(cpa
->numpages
> numpages
);
671 numpages
-= cpa
->numpages
;
672 cpa
->vaddr
+= cpa
->numpages
* PAGE_SIZE
;
677 static inline int cache_attr(pgprot_t attr
)
679 return pgprot_val(attr
) &
680 (_PAGE_PAT
| _PAGE_PAT_LARGE
| _PAGE_PWT
| _PAGE_PCD
);
683 static int change_page_attr_set_clr(unsigned long addr
, int numpages
,
684 pgprot_t mask_set
, pgprot_t mask_clr
)
687 int ret
, cache
, checkalias
;
690 * Check, if we are requested to change a not supported
693 mask_set
= canon_pgprot(mask_set
);
694 mask_clr
= canon_pgprot(mask_clr
);
695 if (!pgprot_val(mask_set
) && !pgprot_val(mask_clr
))
698 /* Ensure we are PAGE_SIZE aligned */
699 if (addr
& ~PAGE_MASK
) {
702 * People should not be passing in unaligned addresses:
708 cpa
.numpages
= numpages
;
709 cpa
.mask_set
= mask_set
;
710 cpa
.mask_clr
= mask_clr
;
713 /* No alias checking for _NX bit modifications */
714 checkalias
= (pgprot_val(mask_set
) | pgprot_val(mask_clr
)) != _PAGE_NX
;
716 ret
= __change_page_attr_set_clr(&cpa
, checkalias
);
719 * Check whether we really changed something:
725 * No need to flush, when we did not set any of the caching
728 cache
= cache_attr(mask_set
);
731 * On success we use clflush, when the CPU supports it to
732 * avoid the wbindv. If the CPU does not support it and in the
733 * error case we fall back to cpa_flush_all (which uses
736 if (!ret
&& cpu_has_clflush
)
737 cpa_flush_range(addr
, numpages
, cache
);
739 cpa_flush_all(cache
);
746 static inline int change_page_attr_set(unsigned long addr
, int numpages
,
749 return change_page_attr_set_clr(addr
, numpages
, mask
, __pgprot(0));
752 static inline int change_page_attr_clear(unsigned long addr
, int numpages
,
755 return change_page_attr_set_clr(addr
, numpages
, __pgprot(0), mask
);
758 int set_memory_uc(unsigned long addr
, int numpages
)
760 return change_page_attr_set(addr
, numpages
,
761 __pgprot(_PAGE_PCD
| _PAGE_PWT
));
763 EXPORT_SYMBOL(set_memory_uc
);
765 int set_memory_wb(unsigned long addr
, int numpages
)
767 return change_page_attr_clear(addr
, numpages
,
768 __pgprot(_PAGE_PCD
| _PAGE_PWT
));
770 EXPORT_SYMBOL(set_memory_wb
);
772 int set_memory_x(unsigned long addr
, int numpages
)
774 return change_page_attr_clear(addr
, numpages
, __pgprot(_PAGE_NX
));
776 EXPORT_SYMBOL(set_memory_x
);
778 int set_memory_nx(unsigned long addr
, int numpages
)
780 return change_page_attr_set(addr
, numpages
, __pgprot(_PAGE_NX
));
782 EXPORT_SYMBOL(set_memory_nx
);
784 int set_memory_ro(unsigned long addr
, int numpages
)
786 return change_page_attr_clear(addr
, numpages
, __pgprot(_PAGE_RW
));
789 int set_memory_rw(unsigned long addr
, int numpages
)
791 return change_page_attr_set(addr
, numpages
, __pgprot(_PAGE_RW
));
794 int set_memory_np(unsigned long addr
, int numpages
)
796 return change_page_attr_clear(addr
, numpages
, __pgprot(_PAGE_PRESENT
));
799 int set_pages_uc(struct page
*page
, int numpages
)
801 unsigned long addr
= (unsigned long)page_address(page
);
803 return set_memory_uc(addr
, numpages
);
805 EXPORT_SYMBOL(set_pages_uc
);
807 int set_pages_wb(struct page
*page
, int numpages
)
809 unsigned long addr
= (unsigned long)page_address(page
);
811 return set_memory_wb(addr
, numpages
);
813 EXPORT_SYMBOL(set_pages_wb
);
815 int set_pages_x(struct page
*page
, int numpages
)
817 unsigned long addr
= (unsigned long)page_address(page
);
819 return set_memory_x(addr
, numpages
);
821 EXPORT_SYMBOL(set_pages_x
);
823 int set_pages_nx(struct page
*page
, int numpages
)
825 unsigned long addr
= (unsigned long)page_address(page
);
827 return set_memory_nx(addr
, numpages
);
829 EXPORT_SYMBOL(set_pages_nx
);
831 int set_pages_ro(struct page
*page
, int numpages
)
833 unsigned long addr
= (unsigned long)page_address(page
);
835 return set_memory_ro(addr
, numpages
);
838 int set_pages_rw(struct page
*page
, int numpages
)
840 unsigned long addr
= (unsigned long)page_address(page
);
842 return set_memory_rw(addr
, numpages
);
845 #ifdef CONFIG_DEBUG_PAGEALLOC
847 static int __set_pages_p(struct page
*page
, int numpages
)
849 struct cpa_data cpa
= { .vaddr
= (unsigned long) page_address(page
),
850 .numpages
= numpages
,
851 .mask_set
= __pgprot(_PAGE_PRESENT
| _PAGE_RW
),
852 .mask_clr
= __pgprot(0)};
854 return __change_page_attr_set_clr(&cpa
, 1);
857 static int __set_pages_np(struct page
*page
, int numpages
)
859 struct cpa_data cpa
= { .vaddr
= (unsigned long) page_address(page
),
860 .numpages
= numpages
,
861 .mask_set
= __pgprot(0),
862 .mask_clr
= __pgprot(_PAGE_PRESENT
| _PAGE_RW
)};
864 return __change_page_attr_set_clr(&cpa
, 1);
867 void kernel_map_pages(struct page
*page
, int numpages
, int enable
)
869 if (PageHighMem(page
))
872 debug_check_no_locks_freed(page_address(page
),
873 numpages
* PAGE_SIZE
);
877 * If page allocator is not up yet then do not call c_p_a():
879 if (!debug_pagealloc_enabled
)
883 * The return value is ignored as the calls cannot fail.
884 * Large pages are kept enabled at boot time, and are
885 * split up quickly with DEBUG_PAGEALLOC. If a splitup
886 * fails here (due to temporary memory shortage) no damage
887 * is done because we just keep the largepage intact up
888 * to the next attempt when it will likely be split up:
891 __set_pages_p(page
, numpages
);
893 __set_pages_np(page
, numpages
);
896 * We should perform an IPI and flush all tlbs,
897 * but that can deadlock->flush only current cpu:
902 * Try to refill the page pool here. We can do this only after
910 * The testcases use internal knowledge of the implementation that shouldn't
911 * be exposed to the rest of the kernel. Include these directly here.
913 #ifdef CONFIG_CPA_DEBUG
914 #include "pageattr-test.c"