]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blob - arch/x86/mm/pageattr.c
x86: CPA: avoid double checking of alias ranges
[mirror_ubuntu-kernels.git] / arch / x86 / mm / pageattr.c
1 /*
2 * Copyright 2002 Andi Kleen, SuSE Labs.
3 * Thanks to Ben LaHaise for precious feedback.
4 */
5 #include <linux/highmem.h>
6 #include <linux/bootmem.h>
7 #include <linux/module.h>
8 #include <linux/sched.h>
9 #include <linux/slab.h>
10 #include <linux/mm.h>
11 #include <linux/interrupt.h>
12
13 #include <asm/e820.h>
14 #include <asm/processor.h>
15 #include <asm/tlbflush.h>
16 #include <asm/sections.h>
17 #include <asm/uaccess.h>
18 #include <asm/pgalloc.h>
19 #include <asm/proto.h>
20
21 /*
22 * The current flushing context - we pass it instead of 5 arguments:
23 */
24 struct cpa_data {
25 unsigned long vaddr;
26 pgprot_t mask_set;
27 pgprot_t mask_clr;
28 int numpages;
29 int flushtlb;
30 unsigned long pfn;
31 };
32
33 #ifdef CONFIG_X86_64
34
35 static inline unsigned long highmap_start_pfn(void)
36 {
37 return __pa(_text) >> PAGE_SHIFT;
38 }
39
40 static inline unsigned long highmap_end_pfn(void)
41 {
42 return __pa(round_up((unsigned long)_end, PMD_SIZE)) >> PAGE_SHIFT;
43 }
44
45 #endif
46
47 static inline int
48 within(unsigned long addr, unsigned long start, unsigned long end)
49 {
50 return addr >= start && addr < end;
51 }
52
53 /*
54 * Flushing functions
55 */
56
57 /**
58 * clflush_cache_range - flush a cache range with clflush
59 * @addr: virtual start address
60 * @size: number of bytes to flush
61 *
62 * clflush is an unordered instruction which needs fencing with mfence
63 * to avoid ordering issues.
64 */
65 void clflush_cache_range(void *vaddr, unsigned int size)
66 {
67 void *vend = vaddr + size - 1;
68
69 mb();
70
71 for (; vaddr < vend; vaddr += boot_cpu_data.x86_clflush_size)
72 clflush(vaddr);
73 /*
74 * Flush any possible final partial cacheline:
75 */
76 clflush(vend);
77
78 mb();
79 }
80
81 static void __cpa_flush_all(void *arg)
82 {
83 unsigned long cache = (unsigned long)arg;
84
85 /*
86 * Flush all to work around Errata in early athlons regarding
87 * large page flushing.
88 */
89 __flush_tlb_all();
90
91 if (cache && boot_cpu_data.x86_model >= 4)
92 wbinvd();
93 }
94
95 static void cpa_flush_all(unsigned long cache)
96 {
97 BUG_ON(irqs_disabled());
98
99 on_each_cpu(__cpa_flush_all, (void *) cache, 1, 1);
100 }
101
102 static void __cpa_flush_range(void *arg)
103 {
104 /*
105 * We could optimize that further and do individual per page
106 * tlb invalidates for a low number of pages. Caveat: we must
107 * flush the high aliases on 64bit as well.
108 */
109 __flush_tlb_all();
110 }
111
112 static void cpa_flush_range(unsigned long start, int numpages, int cache)
113 {
114 unsigned int i, level;
115 unsigned long addr;
116
117 BUG_ON(irqs_disabled());
118 WARN_ON(PAGE_ALIGN(start) != start);
119
120 on_each_cpu(__cpa_flush_range, NULL, 1, 1);
121
122 if (!cache)
123 return;
124
125 /*
126 * We only need to flush on one CPU,
127 * clflush is a MESI-coherent instruction that
128 * will cause all other CPUs to flush the same
129 * cachelines:
130 */
131 for (i = 0, addr = start; i < numpages; i++, addr += PAGE_SIZE) {
132 pte_t *pte = lookup_address(addr, &level);
133
134 /*
135 * Only flush present addresses:
136 */
137 if (pte && (pte_val(*pte) & _PAGE_PRESENT))
138 clflush_cache_range((void *) addr, PAGE_SIZE);
139 }
140 }
141
142 /*
143 * Certain areas of memory on x86 require very specific protection flags,
144 * for example the BIOS area or kernel text. Callers don't always get this
145 * right (again, ioremap() on BIOS memory is not uncommon) so this function
146 * checks and fixes these known static required protection bits.
147 */
148 static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
149 unsigned long pfn)
150 {
151 pgprot_t forbidden = __pgprot(0);
152
153 /*
154 * The BIOS area between 640k and 1Mb needs to be executable for
155 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
156 */
157 if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
158 pgprot_val(forbidden) |= _PAGE_NX;
159
160 /*
161 * The kernel text needs to be executable for obvious reasons
162 * Does not cover __inittext since that is gone later on. On
163 * 64bit we do not enforce !NX on the low mapping
164 */
165 if (within(address, (unsigned long)_text, (unsigned long)_etext))
166 pgprot_val(forbidden) |= _PAGE_NX;
167
168 /*
169 * The .rodata section needs to be read-only. Using the pfn
170 * catches all aliases.
171 */
172 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
173 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
174 pgprot_val(forbidden) |= _PAGE_RW;
175
176 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
177
178 return prot;
179 }
180
181 /*
182 * Lookup the page table entry for a virtual address. Return a pointer
183 * to the entry and the level of the mapping.
184 *
185 * Note: We return pud and pmd either when the entry is marked large
186 * or when the present bit is not set. Otherwise we would return a
187 * pointer to a nonexisting mapping.
188 */
189 pte_t *lookup_address(unsigned long address, unsigned int *level)
190 {
191 pgd_t *pgd = pgd_offset_k(address);
192 pud_t *pud;
193 pmd_t *pmd;
194
195 *level = PG_LEVEL_NONE;
196
197 if (pgd_none(*pgd))
198 return NULL;
199
200 pud = pud_offset(pgd, address);
201 if (pud_none(*pud))
202 return NULL;
203
204 *level = PG_LEVEL_1G;
205 if (pud_large(*pud) || !pud_present(*pud))
206 return (pte_t *)pud;
207
208 pmd = pmd_offset(pud, address);
209 if (pmd_none(*pmd))
210 return NULL;
211
212 *level = PG_LEVEL_2M;
213 if (pmd_large(*pmd) || !pmd_present(*pmd))
214 return (pte_t *)pmd;
215
216 *level = PG_LEVEL_4K;
217
218 return pte_offset_kernel(pmd, address);
219 }
220
221 /*
222 * Set the new pmd in all the pgds we know about:
223 */
224 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
225 {
226 /* change init_mm */
227 set_pte_atomic(kpte, pte);
228 #ifdef CONFIG_X86_32
229 if (!SHARED_KERNEL_PMD) {
230 struct page *page;
231
232 list_for_each_entry(page, &pgd_list, lru) {
233 pgd_t *pgd;
234 pud_t *pud;
235 pmd_t *pmd;
236
237 pgd = (pgd_t *)page_address(page) + pgd_index(address);
238 pud = pud_offset(pgd, address);
239 pmd = pmd_offset(pud, address);
240 set_pte_atomic((pte_t *)pmd, pte);
241 }
242 }
243 #endif
244 }
245
246 static int
247 try_preserve_large_page(pte_t *kpte, unsigned long address,
248 struct cpa_data *cpa)
249 {
250 unsigned long nextpage_addr, numpages, pmask, psize, flags, addr, pfn;
251 pte_t new_pte, old_pte, *tmp;
252 pgprot_t old_prot, new_prot;
253 int i, do_split = 1;
254 unsigned int level;
255
256 spin_lock_irqsave(&pgd_lock, flags);
257 /*
258 * Check for races, another CPU might have split this page
259 * up already:
260 */
261 tmp = lookup_address(address, &level);
262 if (tmp != kpte)
263 goto out_unlock;
264
265 switch (level) {
266 case PG_LEVEL_2M:
267 psize = PMD_PAGE_SIZE;
268 pmask = PMD_PAGE_MASK;
269 break;
270 #ifdef CONFIG_X86_64
271 case PG_LEVEL_1G:
272 psize = PUD_PAGE_SIZE;
273 pmask = PUD_PAGE_MASK;
274 break;
275 #endif
276 default:
277 do_split = -EINVAL;
278 goto out_unlock;
279 }
280
281 /*
282 * Calculate the number of pages, which fit into this large
283 * page starting at address:
284 */
285 nextpage_addr = (address + psize) & pmask;
286 numpages = (nextpage_addr - address) >> PAGE_SHIFT;
287 if (numpages < cpa->numpages)
288 cpa->numpages = numpages;
289
290 /*
291 * We are safe now. Check whether the new pgprot is the same:
292 */
293 old_pte = *kpte;
294 old_prot = new_prot = pte_pgprot(old_pte);
295
296 pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr);
297 pgprot_val(new_prot) |= pgprot_val(cpa->mask_set);
298
299 /*
300 * old_pte points to the large page base address. So we need
301 * to add the offset of the virtual address:
302 */
303 pfn = pte_pfn(old_pte) + ((address & (psize - 1)) >> PAGE_SHIFT);
304 cpa->pfn = pfn;
305
306 new_prot = static_protections(new_prot, address, pfn);
307
308 /*
309 * We need to check the full range, whether
310 * static_protection() requires a different pgprot for one of
311 * the pages in the range we try to preserve:
312 */
313 addr = address + PAGE_SIZE;
314 pfn++;
315 for (i = 1; i < cpa->numpages; i++, addr += PAGE_SIZE, pfn++) {
316 pgprot_t chk_prot = static_protections(new_prot, addr, pfn);
317
318 if (pgprot_val(chk_prot) != pgprot_val(new_prot))
319 goto out_unlock;
320 }
321
322 /*
323 * If there are no changes, return. maxpages has been updated
324 * above:
325 */
326 if (pgprot_val(new_prot) == pgprot_val(old_prot)) {
327 do_split = 0;
328 goto out_unlock;
329 }
330
331 /*
332 * We need to change the attributes. Check, whether we can
333 * change the large page in one go. We request a split, when
334 * the address is not aligned and the number of pages is
335 * smaller than the number of pages in the large page. Note
336 * that we limited the number of possible pages already to
337 * the number of pages in the large page.
338 */
339 if (address == (nextpage_addr - psize) && cpa->numpages == numpages) {
340 /*
341 * The address is aligned and the number of pages
342 * covers the full page.
343 */
344 new_pte = pfn_pte(pte_pfn(old_pte), canon_pgprot(new_prot));
345 __set_pmd_pte(kpte, address, new_pte);
346 cpa->flushtlb = 1;
347 do_split = 0;
348 }
349
350 out_unlock:
351 spin_unlock_irqrestore(&pgd_lock, flags);
352
353 return do_split;
354 }
355
356 static LIST_HEAD(page_pool);
357 static unsigned long pool_size, pool_pages, pool_low;
358 static unsigned long pool_used, pool_failed, pool_refill;
359
360 static void cpa_fill_pool(void)
361 {
362 struct page *p;
363 gfp_t gfp = GFP_KERNEL;
364
365 /* Do not allocate from interrupt context */
366 if (in_irq() || irqs_disabled())
367 return;
368 /*
369 * Check unlocked. I does not matter when we have one more
370 * page in the pool. The bit lock avoids recursive pool
371 * allocations:
372 */
373 if (pool_pages >= pool_size || test_and_set_bit_lock(0, &pool_refill))
374 return;
375
376 #ifdef CONFIG_DEBUG_PAGEALLOC
377 /*
378 * We could do:
379 * gfp = in_atomic() ? GFP_ATOMIC : GFP_KERNEL;
380 * but this fails on !PREEMPT kernels
381 */
382 gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
383 #endif
384
385 while (pool_pages < pool_size) {
386 p = alloc_pages(gfp, 0);
387 if (!p) {
388 pool_failed++;
389 break;
390 }
391 spin_lock_irq(&pgd_lock);
392 list_add(&p->lru, &page_pool);
393 pool_pages++;
394 spin_unlock_irq(&pgd_lock);
395 }
396 clear_bit_unlock(0, &pool_refill);
397 }
398
399 #define SHIFT_MB (20 - PAGE_SHIFT)
400 #define ROUND_MB_GB ((1 << 10) - 1)
401 #define SHIFT_MB_GB 10
402 #define POOL_PAGES_PER_GB 16
403
404 void __init cpa_init(void)
405 {
406 struct sysinfo si;
407 unsigned long gb;
408
409 si_meminfo(&si);
410 /*
411 * Calculate the number of pool pages:
412 *
413 * Convert totalram (nr of pages) to MiB and round to the next
414 * GiB. Shift MiB to Gib and multiply the result by
415 * POOL_PAGES_PER_GB:
416 */
417 gb = ((si.totalram >> SHIFT_MB) + ROUND_MB_GB) >> SHIFT_MB_GB;
418 pool_size = POOL_PAGES_PER_GB * gb;
419 pool_low = pool_size;
420
421 cpa_fill_pool();
422 printk(KERN_DEBUG
423 "CPA: page pool initialized %lu of %lu pages preallocated\n",
424 pool_pages, pool_size);
425 }
426
427 static int split_large_page(pte_t *kpte, unsigned long address)
428 {
429 unsigned long flags, pfn, pfninc = 1;
430 unsigned int i, level;
431 pte_t *pbase, *tmp;
432 pgprot_t ref_prot;
433 struct page *base;
434
435 /*
436 * Get a page from the pool. The pool list is protected by the
437 * pgd_lock, which we have to take anyway for the split
438 * operation:
439 */
440 spin_lock_irqsave(&pgd_lock, flags);
441 if (list_empty(&page_pool)) {
442 spin_unlock_irqrestore(&pgd_lock, flags);
443 return -ENOMEM;
444 }
445
446 base = list_first_entry(&page_pool, struct page, lru);
447 list_del(&base->lru);
448 pool_pages--;
449
450 if (pool_pages < pool_low)
451 pool_low = pool_pages;
452
453 /*
454 * Check for races, another CPU might have split this page
455 * up for us already:
456 */
457 tmp = lookup_address(address, &level);
458 if (tmp != kpte)
459 goto out_unlock;
460
461 pbase = (pte_t *)page_address(base);
462 #ifdef CONFIG_X86_32
463 paravirt_alloc_pt(&init_mm, page_to_pfn(base));
464 #endif
465 ref_prot = pte_pgprot(pte_clrhuge(*kpte));
466
467 #ifdef CONFIG_X86_64
468 if (level == PG_LEVEL_1G) {
469 pfninc = PMD_PAGE_SIZE >> PAGE_SHIFT;
470 pgprot_val(ref_prot) |= _PAGE_PSE;
471 }
472 #endif
473
474 /*
475 * Get the target pfn from the original entry:
476 */
477 pfn = pte_pfn(*kpte);
478 for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc)
479 set_pte(&pbase[i], pfn_pte(pfn, ref_prot));
480
481 /*
482 * Install the new, split up pagetable. Important details here:
483 *
484 * On Intel the NX bit of all levels must be cleared to make a
485 * page executable. See section 4.13.2 of Intel 64 and IA-32
486 * Architectures Software Developer's Manual).
487 *
488 * Mark the entry present. The current mapping might be
489 * set to not present, which we preserved above.
490 */
491 ref_prot = pte_pgprot(pte_mkexec(pte_clrhuge(*kpte)));
492 pgprot_val(ref_prot) |= _PAGE_PRESENT;
493 __set_pmd_pte(kpte, address, mk_pte(base, ref_prot));
494 base = NULL;
495
496 out_unlock:
497 /*
498 * If we dropped out via the lookup_address check under
499 * pgd_lock then stick the page back into the pool:
500 */
501 if (base) {
502 list_add(&base->lru, &page_pool);
503 pool_pages++;
504 } else
505 pool_used++;
506 spin_unlock_irqrestore(&pgd_lock, flags);
507
508 return 0;
509 }
510
511 static int __change_page_attr(struct cpa_data *cpa, int primary)
512 {
513 unsigned long address = cpa->vaddr;
514 int do_split, err;
515 unsigned int level;
516 struct page *kpte_page;
517 pte_t *kpte, old_pte;
518
519 repeat:
520 kpte = lookup_address(address, &level);
521 if (!kpte)
522 return primary ? -EINVAL : 0;
523
524 old_pte = *kpte;
525 if (!pte_val(old_pte)) {
526 if (!primary)
527 return 0;
528 printk(KERN_WARNING "CPA: called for zero pte. "
529 "vaddr = %lx cpa->vaddr = %lx\n", address,
530 cpa->vaddr);
531 WARN_ON(1);
532 return -EINVAL;
533 }
534
535 kpte_page = virt_to_page(kpte);
536 BUG_ON(PageLRU(kpte_page));
537 BUG_ON(PageCompound(kpte_page));
538
539 if (level == PG_LEVEL_4K) {
540 pte_t new_pte;
541 pgprot_t new_prot = pte_pgprot(old_pte);
542 unsigned long pfn = pte_pfn(old_pte);
543
544 pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr);
545 pgprot_val(new_prot) |= pgprot_val(cpa->mask_set);
546
547 new_prot = static_protections(new_prot, address, pfn);
548
549 /*
550 * We need to keep the pfn from the existing PTE,
551 * after all we're only going to change it's attributes
552 * not the memory it points to
553 */
554 new_pte = pfn_pte(pfn, canon_pgprot(new_prot));
555 cpa->pfn = pfn;
556 /*
557 * Do we really change anything ?
558 */
559 if (pte_val(old_pte) != pte_val(new_pte)) {
560 set_pte_atomic(kpte, new_pte);
561 cpa->flushtlb = 1;
562 }
563 cpa->numpages = 1;
564 return 0;
565 }
566
567 /*
568 * Check, whether we can keep the large page intact
569 * and just change the pte:
570 */
571 do_split = try_preserve_large_page(kpte, address, cpa);
572 /*
573 * When the range fits into the existing large page,
574 * return. cp->numpages and cpa->tlbflush have been updated in
575 * try_large_page:
576 */
577 if (do_split <= 0)
578 return do_split;
579
580 /*
581 * We have to split the large page:
582 */
583 err = split_large_page(kpte, address);
584 if (!err) {
585 cpa->flushtlb = 1;
586 goto repeat;
587 }
588
589 return err;
590 }
591
592 static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias);
593
594 static int cpa_process_alias(struct cpa_data *cpa)
595 {
596 struct cpa_data alias_cpa;
597 int ret = 0;
598
599 if (cpa->pfn > max_pfn_mapped)
600 return 0;
601
602 /*
603 * No need to redo, when the primary call touched the direct
604 * mapping already:
605 */
606 if (!within(cpa->vaddr, PAGE_OFFSET,
607 PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT))) {
608
609 alias_cpa = *cpa;
610 alias_cpa.vaddr = (unsigned long) __va(cpa->pfn << PAGE_SHIFT);
611
612 ret = __change_page_attr_set_clr(&alias_cpa, 0);
613 }
614
615 #ifdef CONFIG_X86_64
616 if (ret)
617 return ret;
618 /*
619 * No need to redo, when the primary call touched the high
620 * mapping already:
621 */
622 if (within(cpa->vaddr, (unsigned long) _text, (unsigned long) _end))
623 return 0;
624
625 /*
626 * If the physical address is inside the kernel map, we need
627 * to touch the high mapped kernel as well:
628 */
629 if (!within(cpa->pfn, highmap_start_pfn(), highmap_end_pfn()))
630 return 0;
631
632 alias_cpa = *cpa;
633 alias_cpa.vaddr =
634 (cpa->pfn << PAGE_SHIFT) + __START_KERNEL_map - phys_base;
635
636 /*
637 * The high mapping range is imprecise, so ignore the return value.
638 */
639 __change_page_attr_set_clr(&alias_cpa, 0);
640 #endif
641 return ret;
642 }
643
644 static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
645 {
646 int ret, numpages = cpa->numpages;
647
648 while (numpages) {
649 /*
650 * Store the remaining nr of pages for the large page
651 * preservation check.
652 */
653 cpa->numpages = numpages;
654
655 ret = __change_page_attr(cpa, checkalias);
656 if (ret)
657 return ret;
658
659 if (checkalias) {
660 ret = cpa_process_alias(cpa);
661 if (ret)
662 return ret;
663 }
664
665 /*
666 * Adjust the number of pages with the result of the
667 * CPA operation. Either a large page has been
668 * preserved or a single page update happened.
669 */
670 BUG_ON(cpa->numpages > numpages);
671 numpages -= cpa->numpages;
672 cpa->vaddr += cpa->numpages * PAGE_SIZE;
673 }
674 return 0;
675 }
676
677 static inline int cache_attr(pgprot_t attr)
678 {
679 return pgprot_val(attr) &
680 (_PAGE_PAT | _PAGE_PAT_LARGE | _PAGE_PWT | _PAGE_PCD);
681 }
682
683 static int change_page_attr_set_clr(unsigned long addr, int numpages,
684 pgprot_t mask_set, pgprot_t mask_clr)
685 {
686 struct cpa_data cpa;
687 int ret, cache, checkalias;
688
689 /*
690 * Check, if we are requested to change a not supported
691 * feature:
692 */
693 mask_set = canon_pgprot(mask_set);
694 mask_clr = canon_pgprot(mask_clr);
695 if (!pgprot_val(mask_set) && !pgprot_val(mask_clr))
696 return 0;
697
698 /* Ensure we are PAGE_SIZE aligned */
699 if (addr & ~PAGE_MASK) {
700 addr &= PAGE_MASK;
701 /*
702 * People should not be passing in unaligned addresses:
703 */
704 WARN_ON_ONCE(1);
705 }
706
707 cpa.vaddr = addr;
708 cpa.numpages = numpages;
709 cpa.mask_set = mask_set;
710 cpa.mask_clr = mask_clr;
711 cpa.flushtlb = 0;
712
713 /* No alias checking for _NX bit modifications */
714 checkalias = (pgprot_val(mask_set) | pgprot_val(mask_clr)) != _PAGE_NX;
715
716 ret = __change_page_attr_set_clr(&cpa, checkalias);
717
718 /*
719 * Check whether we really changed something:
720 */
721 if (!cpa.flushtlb)
722 goto out;
723
724 /*
725 * No need to flush, when we did not set any of the caching
726 * attributes:
727 */
728 cache = cache_attr(mask_set);
729
730 /*
731 * On success we use clflush, when the CPU supports it to
732 * avoid the wbindv. If the CPU does not support it and in the
733 * error case we fall back to cpa_flush_all (which uses
734 * wbindv):
735 */
736 if (!ret && cpu_has_clflush)
737 cpa_flush_range(addr, numpages, cache);
738 else
739 cpa_flush_all(cache);
740
741 out:
742 cpa_fill_pool();
743 return ret;
744 }
745
746 static inline int change_page_attr_set(unsigned long addr, int numpages,
747 pgprot_t mask)
748 {
749 return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0));
750 }
751
752 static inline int change_page_attr_clear(unsigned long addr, int numpages,
753 pgprot_t mask)
754 {
755 return change_page_attr_set_clr(addr, numpages, __pgprot(0), mask);
756 }
757
758 int set_memory_uc(unsigned long addr, int numpages)
759 {
760 return change_page_attr_set(addr, numpages,
761 __pgprot(_PAGE_PCD | _PAGE_PWT));
762 }
763 EXPORT_SYMBOL(set_memory_uc);
764
765 int set_memory_wb(unsigned long addr, int numpages)
766 {
767 return change_page_attr_clear(addr, numpages,
768 __pgprot(_PAGE_PCD | _PAGE_PWT));
769 }
770 EXPORT_SYMBOL(set_memory_wb);
771
772 int set_memory_x(unsigned long addr, int numpages)
773 {
774 return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_NX));
775 }
776 EXPORT_SYMBOL(set_memory_x);
777
778 int set_memory_nx(unsigned long addr, int numpages)
779 {
780 return change_page_attr_set(addr, numpages, __pgprot(_PAGE_NX));
781 }
782 EXPORT_SYMBOL(set_memory_nx);
783
784 int set_memory_ro(unsigned long addr, int numpages)
785 {
786 return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_RW));
787 }
788
789 int set_memory_rw(unsigned long addr, int numpages)
790 {
791 return change_page_attr_set(addr, numpages, __pgprot(_PAGE_RW));
792 }
793
794 int set_memory_np(unsigned long addr, int numpages)
795 {
796 return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_PRESENT));
797 }
798
799 int set_pages_uc(struct page *page, int numpages)
800 {
801 unsigned long addr = (unsigned long)page_address(page);
802
803 return set_memory_uc(addr, numpages);
804 }
805 EXPORT_SYMBOL(set_pages_uc);
806
807 int set_pages_wb(struct page *page, int numpages)
808 {
809 unsigned long addr = (unsigned long)page_address(page);
810
811 return set_memory_wb(addr, numpages);
812 }
813 EXPORT_SYMBOL(set_pages_wb);
814
815 int set_pages_x(struct page *page, int numpages)
816 {
817 unsigned long addr = (unsigned long)page_address(page);
818
819 return set_memory_x(addr, numpages);
820 }
821 EXPORT_SYMBOL(set_pages_x);
822
823 int set_pages_nx(struct page *page, int numpages)
824 {
825 unsigned long addr = (unsigned long)page_address(page);
826
827 return set_memory_nx(addr, numpages);
828 }
829 EXPORT_SYMBOL(set_pages_nx);
830
831 int set_pages_ro(struct page *page, int numpages)
832 {
833 unsigned long addr = (unsigned long)page_address(page);
834
835 return set_memory_ro(addr, numpages);
836 }
837
838 int set_pages_rw(struct page *page, int numpages)
839 {
840 unsigned long addr = (unsigned long)page_address(page);
841
842 return set_memory_rw(addr, numpages);
843 }
844
845 #ifdef CONFIG_DEBUG_PAGEALLOC
846
847 static int __set_pages_p(struct page *page, int numpages)
848 {
849 struct cpa_data cpa = { .vaddr = (unsigned long) page_address(page),
850 .numpages = numpages,
851 .mask_set = __pgprot(_PAGE_PRESENT | _PAGE_RW),
852 .mask_clr = __pgprot(0)};
853
854 return __change_page_attr_set_clr(&cpa, 1);
855 }
856
857 static int __set_pages_np(struct page *page, int numpages)
858 {
859 struct cpa_data cpa = { .vaddr = (unsigned long) page_address(page),
860 .numpages = numpages,
861 .mask_set = __pgprot(0),
862 .mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW)};
863
864 return __change_page_attr_set_clr(&cpa, 1);
865 }
866
867 void kernel_map_pages(struct page *page, int numpages, int enable)
868 {
869 if (PageHighMem(page))
870 return;
871 if (!enable) {
872 debug_check_no_locks_freed(page_address(page),
873 numpages * PAGE_SIZE);
874 }
875
876 /*
877 * If page allocator is not up yet then do not call c_p_a():
878 */
879 if (!debug_pagealloc_enabled)
880 return;
881
882 /*
883 * The return value is ignored as the calls cannot fail.
884 * Large pages are kept enabled at boot time, and are
885 * split up quickly with DEBUG_PAGEALLOC. If a splitup
886 * fails here (due to temporary memory shortage) no damage
887 * is done because we just keep the largepage intact up
888 * to the next attempt when it will likely be split up:
889 */
890 if (enable)
891 __set_pages_p(page, numpages);
892 else
893 __set_pages_np(page, numpages);
894
895 /*
896 * We should perform an IPI and flush all tlbs,
897 * but that can deadlock->flush only current cpu:
898 */
899 __flush_tlb_all();
900
901 /*
902 * Try to refill the page pool here. We can do this only after
903 * the tlb flush.
904 */
905 cpa_fill_pool();
906 }
907 #endif
908
909 /*
910 * The testcases use internal knowledge of the implementation that shouldn't
911 * be exposed to the rest of the kernel. Include these directly here.
912 */
913 #ifdef CONFIG_CPA_DEBUG
914 #include "pageattr-test.c"
915 #endif