]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - arch/x86/mm/pageattr.c
x86: reorganize cpa_process_alias()
[mirror_ubuntu-hirsute-kernel.git] / arch / x86 / mm / pageattr.c
CommitLineData
9f4c815c
IM
1/*
2 * Copyright 2002 Andi Kleen, SuSE Labs.
1da177e4 3 * Thanks to Ben LaHaise for precious feedback.
9f4c815c 4 */
1da177e4 5#include <linux/highmem.h>
8192206d 6#include <linux/bootmem.h>
1da177e4 7#include <linux/module.h>
9f4c815c 8#include <linux/sched.h>
1da177e4 9#include <linux/slab.h>
9f4c815c 10#include <linux/mm.h>
76ebd054 11#include <linux/interrupt.h>
ee7ae7a1
TG
12#include <linux/seq_file.h>
13#include <linux/debugfs.h>
9f4c815c 14
950f9d95 15#include <asm/e820.h>
1da177e4
LT
16#include <asm/processor.h>
17#include <asm/tlbflush.h>
f8af095d 18#include <asm/sections.h>
93dbda7c 19#include <asm/setup.h>
9f4c815c
IM
20#include <asm/uaccess.h>
21#include <asm/pgalloc.h>
c31c7d48 22#include <asm/proto.h>
1219333d 23#include <asm/pat.h>
1da177e4 24
9df84993
IM
25/*
26 * The current flushing context - we pass it instead of 5 arguments:
27 */
72e458df 28struct cpa_data {
d75586ad 29 unsigned long *vaddr;
72e458df
TG
30 pgprot_t mask_set;
31 pgprot_t mask_clr;
65e074df 32 int numpages;
d75586ad 33 int flags;
c31c7d48 34 unsigned long pfn;
c9caa02c 35 unsigned force_split : 1;
d75586ad 36 int curpage;
9ae28475 37 struct page **pages;
72e458df
TG
38};
39
ad5ca55f
SS
40/*
41 * Serialize cpa() (for !DEBUG_PAGEALLOC which uses large identity mappings)
42 * using cpa_lock. So that we don't allow any other cpu, with stale large tlb
43 * entries change the page attribute in parallel to some other cpu
44 * splitting a large page entry along with changing the attribute.
45 */
46static DEFINE_SPINLOCK(cpa_lock);
47
d75586ad
SL
48#define CPA_FLUSHTLB 1
49#define CPA_ARRAY 2
9ae28475 50#define CPA_PAGES_ARRAY 4
d75586ad 51
65280e61 52#ifdef CONFIG_PROC_FS
ce0c0e50
AK
53static unsigned long direct_pages_count[PG_LEVEL_NUM];
54
65280e61 55void update_page_count(int level, unsigned long pages)
ce0c0e50 56{
ce0c0e50 57 unsigned long flags;
65280e61 58
ce0c0e50
AK
59 /* Protect against CPA */
60 spin_lock_irqsave(&pgd_lock, flags);
61 direct_pages_count[level] += pages;
62 spin_unlock_irqrestore(&pgd_lock, flags);
65280e61
TG
63}
64
65static void split_page_count(int level)
66{
67 direct_pages_count[level]--;
68 direct_pages_count[level - 1] += PTRS_PER_PTE;
69}
70
e1759c21 71void arch_report_meminfo(struct seq_file *m)
65280e61 72{
b9c3bfc2 73 seq_printf(m, "DirectMap4k: %8lu kB\n",
a06de630
HD
74 direct_pages_count[PG_LEVEL_4K] << 2);
75#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
b9c3bfc2 76 seq_printf(m, "DirectMap2M: %8lu kB\n",
a06de630
HD
77 direct_pages_count[PG_LEVEL_2M] << 11);
78#else
b9c3bfc2 79 seq_printf(m, "DirectMap4M: %8lu kB\n",
a06de630
HD
80 direct_pages_count[PG_LEVEL_2M] << 12);
81#endif
65280e61 82#ifdef CONFIG_X86_64
a06de630 83 if (direct_gbpages)
b9c3bfc2 84 seq_printf(m, "DirectMap1G: %8lu kB\n",
a06de630 85 direct_pages_count[PG_LEVEL_1G] << 20);
ce0c0e50
AK
86#endif
87}
65280e61
TG
88#else
89static inline void split_page_count(int level) { }
90#endif
ce0c0e50 91
c31c7d48
TG
92#ifdef CONFIG_X86_64
93
94static inline unsigned long highmap_start_pfn(void)
95{
96 return __pa(_text) >> PAGE_SHIFT;
97}
98
99static inline unsigned long highmap_end_pfn(void)
100{
93dbda7c 101 return __pa(roundup(_brk_end, PMD_SIZE)) >> PAGE_SHIFT;
c31c7d48
TG
102}
103
104#endif
105
92cb54a3
IM
106#ifdef CONFIG_DEBUG_PAGEALLOC
107# define debug_pagealloc 1
108#else
109# define debug_pagealloc 0
110#endif
111
ed724be6
AV
112static inline int
113within(unsigned long addr, unsigned long start, unsigned long end)
687c4825 114{
ed724be6
AV
115 return addr >= start && addr < end;
116}
117
d7c8f21a
TG
118/*
119 * Flushing functions
120 */
cd8ddf1a 121
cd8ddf1a
TG
122/**
123 * clflush_cache_range - flush a cache range with clflush
124 * @addr: virtual start address
125 * @size: number of bytes to flush
126 *
127 * clflush is an unordered instruction which needs fencing with mfence
128 * to avoid ordering issues.
129 */
4c61afcd 130void clflush_cache_range(void *vaddr, unsigned int size)
d7c8f21a 131{
4c61afcd 132 void *vend = vaddr + size - 1;
d7c8f21a 133
cd8ddf1a 134 mb();
4c61afcd
IM
135
136 for (; vaddr < vend; vaddr += boot_cpu_data.x86_clflush_size)
137 clflush(vaddr);
138 /*
139 * Flush any possible final partial cacheline:
140 */
141 clflush(vend);
142
cd8ddf1a 143 mb();
d7c8f21a
TG
144}
145
af1e6844 146static void __cpa_flush_all(void *arg)
d7c8f21a 147{
6bb8383b
AK
148 unsigned long cache = (unsigned long)arg;
149
d7c8f21a
TG
150 /*
151 * Flush all to work around Errata in early athlons regarding
152 * large page flushing.
153 */
154 __flush_tlb_all();
155
0b827537 156 if (cache && boot_cpu_data.x86 >= 4)
d7c8f21a
TG
157 wbinvd();
158}
159
6bb8383b 160static void cpa_flush_all(unsigned long cache)
d7c8f21a
TG
161{
162 BUG_ON(irqs_disabled());
163
15c8b6c1 164 on_each_cpu(__cpa_flush_all, (void *) cache, 1);
d7c8f21a
TG
165}
166
57a6a46a
TG
167static void __cpa_flush_range(void *arg)
168{
57a6a46a
TG
169 /*
170 * We could optimize that further and do individual per page
171 * tlb invalidates for a low number of pages. Caveat: we must
172 * flush the high aliases on 64bit as well.
173 */
174 __flush_tlb_all();
57a6a46a
TG
175}
176
6bb8383b 177static void cpa_flush_range(unsigned long start, int numpages, int cache)
57a6a46a 178{
4c61afcd
IM
179 unsigned int i, level;
180 unsigned long addr;
181
57a6a46a 182 BUG_ON(irqs_disabled());
4c61afcd 183 WARN_ON(PAGE_ALIGN(start) != start);
57a6a46a 184
15c8b6c1 185 on_each_cpu(__cpa_flush_range, NULL, 1);
57a6a46a 186
6bb8383b
AK
187 if (!cache)
188 return;
189
3b233e52
TG
190 /*
191 * We only need to flush on one CPU,
192 * clflush is a MESI-coherent instruction that
193 * will cause all other CPUs to flush the same
194 * cachelines:
195 */
4c61afcd
IM
196 for (i = 0, addr = start; i < numpages; i++, addr += PAGE_SIZE) {
197 pte_t *pte = lookup_address(addr, &level);
198
199 /*
200 * Only flush present addresses:
201 */
7bfb72e8 202 if (pte && (pte_val(*pte) & _PAGE_PRESENT))
4c61afcd
IM
203 clflush_cache_range((void *) addr, PAGE_SIZE);
204 }
57a6a46a
TG
205}
206
9ae28475 207static void cpa_flush_array(unsigned long *start, int numpages, int cache,
208 int in_flags, struct page **pages)
d75586ad
SL
209{
210 unsigned int i, level;
2171787b 211 unsigned long do_wbinvd = cache && numpages >= 1024; /* 4M threshold */
d75586ad
SL
212
213 BUG_ON(irqs_disabled());
214
2171787b 215 on_each_cpu(__cpa_flush_all, (void *) do_wbinvd, 1);
d75586ad 216
2171787b 217 if (!cache || do_wbinvd)
d75586ad
SL
218 return;
219
d75586ad
SL
220 /*
221 * We only need to flush on one CPU,
222 * clflush is a MESI-coherent instruction that
223 * will cause all other CPUs to flush the same
224 * cachelines:
225 */
9ae28475 226 for (i = 0; i < numpages; i++) {
227 unsigned long addr;
228 pte_t *pte;
229
230 if (in_flags & CPA_PAGES_ARRAY)
231 addr = (unsigned long)page_address(pages[i]);
232 else
233 addr = start[i];
234
235 pte = lookup_address(addr, &level);
d75586ad
SL
236
237 /*
238 * Only flush present addresses:
239 */
240 if (pte && (pte_val(*pte) & _PAGE_PRESENT))
9ae28475 241 clflush_cache_range((void *)addr, PAGE_SIZE);
d75586ad
SL
242 }
243}
244
ed724be6
AV
245/*
246 * Certain areas of memory on x86 require very specific protection flags,
247 * for example the BIOS area or kernel text. Callers don't always get this
248 * right (again, ioremap() on BIOS memory is not uncommon) so this function
249 * checks and fixes these known static required protection bits.
250 */
c31c7d48
TG
251static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
252 unsigned long pfn)
ed724be6
AV
253{
254 pgprot_t forbidden = __pgprot(0);
255
687c4825 256 /*
ed724be6
AV
257 * The BIOS area between 640k and 1Mb needs to be executable for
258 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
687c4825 259 */
c31c7d48 260 if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
ed724be6
AV
261 pgprot_val(forbidden) |= _PAGE_NX;
262
263 /*
264 * The kernel text needs to be executable for obvious reasons
c31c7d48
TG
265 * Does not cover __inittext since that is gone later on. On
266 * 64bit we do not enforce !NX on the low mapping
ed724be6
AV
267 */
268 if (within(address, (unsigned long)_text, (unsigned long)_etext))
269 pgprot_val(forbidden) |= _PAGE_NX;
cc0f21bb 270
cc0f21bb 271 /*
c31c7d48
TG
272 * The .rodata section needs to be read-only. Using the pfn
273 * catches all aliases.
cc0f21bb 274 */
c31c7d48
TG
275 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
276 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
cc0f21bb 277 pgprot_val(forbidden) |= _PAGE_RW;
ed724be6
AV
278
279 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
687c4825
IM
280
281 return prot;
282}
283
9a14aefc
TG
284/*
285 * Lookup the page table entry for a virtual address. Return a pointer
286 * to the entry and the level of the mapping.
287 *
288 * Note: We return pud and pmd either when the entry is marked large
289 * or when the present bit is not set. Otherwise we would return a
290 * pointer to a nonexisting mapping.
291 */
da7bfc50 292pte_t *lookup_address(unsigned long address, unsigned int *level)
9f4c815c 293{
1da177e4
LT
294 pgd_t *pgd = pgd_offset_k(address);
295 pud_t *pud;
296 pmd_t *pmd;
9f4c815c 297
30551bb3
TG
298 *level = PG_LEVEL_NONE;
299
1da177e4
LT
300 if (pgd_none(*pgd))
301 return NULL;
9df84993 302
1da177e4
LT
303 pud = pud_offset(pgd, address);
304 if (pud_none(*pud))
305 return NULL;
c2f71ee2
AK
306
307 *level = PG_LEVEL_1G;
308 if (pud_large(*pud) || !pud_present(*pud))
309 return (pte_t *)pud;
310
1da177e4
LT
311 pmd = pmd_offset(pud, address);
312 if (pmd_none(*pmd))
313 return NULL;
30551bb3
TG
314
315 *level = PG_LEVEL_2M;
9a14aefc 316 if (pmd_large(*pmd) || !pmd_present(*pmd))
1da177e4 317 return (pte_t *)pmd;
1da177e4 318
30551bb3 319 *level = PG_LEVEL_4K;
9df84993 320
9f4c815c
IM
321 return pte_offset_kernel(pmd, address);
322}
75bb8835 323EXPORT_SYMBOL_GPL(lookup_address);
9f4c815c 324
9df84993
IM
325/*
326 * Set the new pmd in all the pgds we know about:
327 */
9a3dc780 328static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
9f4c815c 329{
9f4c815c
IM
330 /* change init_mm */
331 set_pte_atomic(kpte, pte);
44af6c41 332#ifdef CONFIG_X86_32
e4b71dcf 333 if (!SHARED_KERNEL_PMD) {
44af6c41
IM
334 struct page *page;
335
e3ed910d 336 list_for_each_entry(page, &pgd_list, lru) {
44af6c41
IM
337 pgd_t *pgd;
338 pud_t *pud;
339 pmd_t *pmd;
340
341 pgd = (pgd_t *)page_address(page) + pgd_index(address);
342 pud = pud_offset(pgd, address);
343 pmd = pmd_offset(pud, address);
344 set_pte_atomic((pte_t *)pmd, pte);
345 }
1da177e4 346 }
44af6c41 347#endif
1da177e4
LT
348}
349
9df84993
IM
350static int
351try_preserve_large_page(pte_t *kpte, unsigned long address,
352 struct cpa_data *cpa)
65e074df 353{
c31c7d48 354 unsigned long nextpage_addr, numpages, pmask, psize, flags, addr, pfn;
65e074df
TG
355 pte_t new_pte, old_pte, *tmp;
356 pgprot_t old_prot, new_prot;
fac84939 357 int i, do_split = 1;
da7bfc50 358 unsigned int level;
65e074df 359
c9caa02c
AK
360 if (cpa->force_split)
361 return 1;
362
65e074df
TG
363 spin_lock_irqsave(&pgd_lock, flags);
364 /*
365 * Check for races, another CPU might have split this page
366 * up already:
367 */
368 tmp = lookup_address(address, &level);
369 if (tmp != kpte)
370 goto out_unlock;
371
372 switch (level) {
373 case PG_LEVEL_2M:
31422c51
AK
374 psize = PMD_PAGE_SIZE;
375 pmask = PMD_PAGE_MASK;
65e074df 376 break;
f07333fd 377#ifdef CONFIG_X86_64
65e074df 378 case PG_LEVEL_1G:
5d3c8b21
AK
379 psize = PUD_PAGE_SIZE;
380 pmask = PUD_PAGE_MASK;
f07333fd
AK
381 break;
382#endif
65e074df 383 default:
beaff633 384 do_split = -EINVAL;
65e074df
TG
385 goto out_unlock;
386 }
387
388 /*
389 * Calculate the number of pages, which fit into this large
390 * page starting at address:
391 */
392 nextpage_addr = (address + psize) & pmask;
393 numpages = (nextpage_addr - address) >> PAGE_SHIFT;
9b5cf48b
RW
394 if (numpages < cpa->numpages)
395 cpa->numpages = numpages;
65e074df
TG
396
397 /*
398 * We are safe now. Check whether the new pgprot is the same:
399 */
400 old_pte = *kpte;
401 old_prot = new_prot = pte_pgprot(old_pte);
402
403 pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr);
404 pgprot_val(new_prot) |= pgprot_val(cpa->mask_set);
c31c7d48
TG
405
406 /*
407 * old_pte points to the large page base address. So we need
408 * to add the offset of the virtual address:
409 */
410 pfn = pte_pfn(old_pte) + ((address & (psize - 1)) >> PAGE_SHIFT);
411 cpa->pfn = pfn;
412
413 new_prot = static_protections(new_prot, address, pfn);
65e074df 414
fac84939
TG
415 /*
416 * We need to check the full range, whether
417 * static_protection() requires a different pgprot for one of
418 * the pages in the range we try to preserve:
419 */
420 addr = address + PAGE_SIZE;
c31c7d48 421 pfn++;
9b5cf48b 422 for (i = 1; i < cpa->numpages; i++, addr += PAGE_SIZE, pfn++) {
c31c7d48 423 pgprot_t chk_prot = static_protections(new_prot, addr, pfn);
fac84939
TG
424
425 if (pgprot_val(chk_prot) != pgprot_val(new_prot))
426 goto out_unlock;
427 }
428
65e074df
TG
429 /*
430 * If there are no changes, return. maxpages has been updated
431 * above:
432 */
433 if (pgprot_val(new_prot) == pgprot_val(old_prot)) {
beaff633 434 do_split = 0;
65e074df
TG
435 goto out_unlock;
436 }
437
438 /*
439 * We need to change the attributes. Check, whether we can
440 * change the large page in one go. We request a split, when
441 * the address is not aligned and the number of pages is
442 * smaller than the number of pages in the large page. Note
443 * that we limited the number of possible pages already to
444 * the number of pages in the large page.
445 */
9b5cf48b 446 if (address == (nextpage_addr - psize) && cpa->numpages == numpages) {
65e074df
TG
447 /*
448 * The address is aligned and the number of pages
449 * covers the full page.
450 */
451 new_pte = pfn_pte(pte_pfn(old_pte), canon_pgprot(new_prot));
452 __set_pmd_pte(kpte, address, new_pte);
d75586ad 453 cpa->flags |= CPA_FLUSHTLB;
beaff633 454 do_split = 0;
65e074df
TG
455 }
456
457out_unlock:
458 spin_unlock_irqrestore(&pgd_lock, flags);
9df84993 459
beaff633 460 return do_split;
65e074df
TG
461}
462
7afe15b9 463static int split_large_page(pte_t *kpte, unsigned long address)
bb5c2dbd 464{
7b610eec 465 unsigned long flags, pfn, pfninc = 1;
9df84993 466 unsigned int i, level;
bb5c2dbd 467 pte_t *pbase, *tmp;
9df84993 468 pgprot_t ref_prot;
ad5ca55f
SS
469 struct page *base;
470
471 if (!debug_pagealloc)
472 spin_unlock(&cpa_lock);
9e730237 473 base = alloc_pages(GFP_KERNEL | __GFP_NOTRACK, 0);
ad5ca55f
SS
474 if (!debug_pagealloc)
475 spin_lock(&cpa_lock);
8311eb84
SS
476 if (!base)
477 return -ENOMEM;
bb5c2dbd 478
eb5b5f02 479 spin_lock_irqsave(&pgd_lock, flags);
bb5c2dbd
IM
480 /*
481 * Check for races, another CPU might have split this page
482 * up for us already:
483 */
484 tmp = lookup_address(address, &level);
6ce9fc17 485 if (tmp != kpte)
bb5c2dbd
IM
486 goto out_unlock;
487
bb5c2dbd 488 pbase = (pte_t *)page_address(base);
6944a9c8 489 paravirt_alloc_pte(&init_mm, page_to_pfn(base));
07cf89c0 490 ref_prot = pte_pgprot(pte_clrhuge(*kpte));
7a5714e0
IM
491 /*
492 * If we ever want to utilize the PAT bit, we need to
493 * update this function to make sure it's converted from
494 * bit 12 to bit 7 when we cross from the 2MB level to
495 * the 4K level:
496 */
497 WARN_ON_ONCE(pgprot_val(ref_prot) & _PAGE_PAT_LARGE);
bb5c2dbd 498
f07333fd
AK
499#ifdef CONFIG_X86_64
500 if (level == PG_LEVEL_1G) {
501 pfninc = PMD_PAGE_SIZE >> PAGE_SHIFT;
502 pgprot_val(ref_prot) |= _PAGE_PSE;
f07333fd
AK
503 }
504#endif
505
63c1dcf4
TG
506 /*
507 * Get the target pfn from the original entry:
508 */
509 pfn = pte_pfn(*kpte);
f07333fd 510 for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc)
63c1dcf4 511 set_pte(&pbase[i], pfn_pte(pfn, ref_prot));
bb5c2dbd 512
ce0c0e50 513 if (address >= (unsigned long)__va(0) &&
f361a450
YL
514 address < (unsigned long)__va(max_low_pfn_mapped << PAGE_SHIFT))
515 split_page_count(level);
516
517#ifdef CONFIG_X86_64
518 if (address >= (unsigned long)__va(1UL<<32) &&
65280e61
TG
519 address < (unsigned long)__va(max_pfn_mapped << PAGE_SHIFT))
520 split_page_count(level);
f361a450 521#endif
ce0c0e50 522
bb5c2dbd 523 /*
07a66d7c 524 * Install the new, split up pagetable.
4c881ca1 525 *
07a66d7c
IM
526 * We use the standard kernel pagetable protections for the new
527 * pagetable protections, the actual ptes set above control the
528 * primary protection behavior:
bb5c2dbd 529 */
07a66d7c 530 __set_pmd_pte(kpte, address, mk_pte(base, __pgprot(_KERNPG_TABLE)));
211b3d03
IM
531
532 /*
533 * Intel Atom errata AAH41 workaround.
534 *
535 * The real fix should be in hw or in a microcode update, but
536 * we also probabilistically try to reduce the window of having
537 * a large TLB mixed with 4K TLBs while instruction fetches are
538 * going on.
539 */
540 __flush_tlb_all();
541
bb5c2dbd
IM
542 base = NULL;
543
544out_unlock:
eb5b5f02
TG
545 /*
546 * If we dropped out via the lookup_address check under
547 * pgd_lock then stick the page back into the pool:
548 */
8311eb84
SS
549 if (base)
550 __free_page(base);
9a3dc780 551 spin_unlock_irqrestore(&pgd_lock, flags);
bb5c2dbd 552
bb5c2dbd
IM
553 return 0;
554}
555
a1e46212
SS
556static int __cpa_process_fault(struct cpa_data *cpa, unsigned long vaddr,
557 int primary)
558{
559 /*
560 * Ignore all non primary paths.
561 */
562 if (!primary)
563 return 0;
564
565 /*
566 * Ignore the NULL PTE for kernel identity mapping, as it is expected
567 * to have holes.
568 * Also set numpages to '1' indicating that we processed cpa req for
569 * one virtual address page and its pfn. TBD: numpages can be set based
570 * on the initial value and the level returned by lookup_address().
571 */
572 if (within(vaddr, PAGE_OFFSET,
573 PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT))) {
574 cpa->numpages = 1;
575 cpa->pfn = __pa(vaddr) >> PAGE_SHIFT;
576 return 0;
577 } else {
578 WARN(1, KERN_WARNING "CPA: called for zero pte. "
579 "vaddr = %lx cpa->vaddr = %lx\n", vaddr,
580 *cpa->vaddr);
581
582 return -EFAULT;
583 }
584}
585
c31c7d48 586static int __change_page_attr(struct cpa_data *cpa, int primary)
9f4c815c 587{
d75586ad 588 unsigned long address;
da7bfc50
HH
589 int do_split, err;
590 unsigned int level;
c31c7d48 591 pte_t *kpte, old_pte;
1da177e4 592
9ae28475 593 if (cpa->flags & CPA_PAGES_ARRAY)
594 address = (unsigned long)page_address(cpa->pages[cpa->curpage]);
595 else if (cpa->flags & CPA_ARRAY)
d75586ad
SL
596 address = cpa->vaddr[cpa->curpage];
597 else
598 address = *cpa->vaddr;
97f99fed 599repeat:
f0646e43 600 kpte = lookup_address(address, &level);
1da177e4 601 if (!kpte)
a1e46212 602 return __cpa_process_fault(cpa, address, primary);
c31c7d48
TG
603
604 old_pte = *kpte;
a1e46212
SS
605 if (!pte_val(old_pte))
606 return __cpa_process_fault(cpa, address, primary);
9f4c815c 607
30551bb3 608 if (level == PG_LEVEL_4K) {
c31c7d48 609 pte_t new_pte;
626c2c9d 610 pgprot_t new_prot = pte_pgprot(old_pte);
c31c7d48 611 unsigned long pfn = pte_pfn(old_pte);
86f03989 612
72e458df
TG
613 pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr);
614 pgprot_val(new_prot) |= pgprot_val(cpa->mask_set);
86f03989 615
c31c7d48 616 new_prot = static_protections(new_prot, address, pfn);
86f03989 617
626c2c9d
AV
618 /*
619 * We need to keep the pfn from the existing PTE,
620 * after all we're only going to change it's attributes
621 * not the memory it points to
622 */
c31c7d48
TG
623 new_pte = pfn_pte(pfn, canon_pgprot(new_prot));
624 cpa->pfn = pfn;
f4ae5da0
TG
625 /*
626 * Do we really change anything ?
627 */
628 if (pte_val(old_pte) != pte_val(new_pte)) {
629 set_pte_atomic(kpte, new_pte);
d75586ad 630 cpa->flags |= CPA_FLUSHTLB;
f4ae5da0 631 }
9b5cf48b 632 cpa->numpages = 1;
65e074df 633 return 0;
1da177e4 634 }
65e074df
TG
635
636 /*
637 * Check, whether we can keep the large page intact
638 * and just change the pte:
639 */
beaff633 640 do_split = try_preserve_large_page(kpte, address, cpa);
65e074df
TG
641 /*
642 * When the range fits into the existing large page,
9b5cf48b 643 * return. cp->numpages and cpa->tlbflush have been updated in
65e074df
TG
644 * try_large_page:
645 */
87f7f8fe
IM
646 if (do_split <= 0)
647 return do_split;
65e074df
TG
648
649 /*
650 * We have to split the large page:
651 */
87f7f8fe
IM
652 err = split_large_page(kpte, address);
653 if (!err) {
ad5ca55f
SS
654 /*
655 * Do a global flush tlb after splitting the large page
656 * and before we do the actual change page attribute in the PTE.
657 *
658 * With out this, we violate the TLB application note, that says
659 * "The TLBs may contain both ordinary and large-page
660 * translations for a 4-KByte range of linear addresses. This
661 * may occur if software modifies the paging structures so that
662 * the page size used for the address range changes. If the two
663 * translations differ with respect to page frame or attributes
664 * (e.g., permissions), processor behavior is undefined and may
665 * be implementation-specific."
666 *
667 * We do this global tlb flush inside the cpa_lock, so that we
668 * don't allow any other cpu, with stale tlb entries change the
669 * page attribute in parallel, that also falls into the
670 * just split large page entry.
671 */
672 flush_tlb_all();
87f7f8fe
IM
673 goto repeat;
674 }
beaff633 675
87f7f8fe 676 return err;
9f4c815c 677}
1da177e4 678
c31c7d48
TG
679static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias);
680
681static int cpa_process_alias(struct cpa_data *cpa)
1da177e4 682{
c31c7d48 683 struct cpa_data alias_cpa;
992f4c1c
TH
684 unsigned long laddr = (unsigned long)__va(cpa->pfn << PAGE_SHIFT);
685 unsigned long vaddr;
686 int ret;
44af6c41 687
965194c1 688 if (cpa->pfn >= max_pfn_mapped)
c31c7d48 689 return 0;
626c2c9d 690
f361a450 691#ifdef CONFIG_X86_64
965194c1 692 if (cpa->pfn >= max_low_pfn_mapped && cpa->pfn < (1UL<<(32-PAGE_SHIFT)))
f361a450
YL
693 return 0;
694#endif
f34b439f
TG
695 /*
696 * No need to redo, when the primary call touched the direct
697 * mapping already:
698 */
9ae28475 699 if (cpa->flags & CPA_PAGES_ARRAY)
700 vaddr = (unsigned long)page_address(cpa->pages[cpa->curpage]);
701 else if (cpa->flags & CPA_ARRAY)
d75586ad
SL
702 vaddr = cpa->vaddr[cpa->curpage];
703 else
704 vaddr = *cpa->vaddr;
705
706 if (!(within(vaddr, PAGE_OFFSET,
a1e46212 707 PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT)))) {
44af6c41 708
f34b439f 709 alias_cpa = *cpa;
992f4c1c 710 alias_cpa.vaddr = &laddr;
9ae28475 711 alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
d75586ad 712
f34b439f 713 ret = __change_page_attr_set_clr(&alias_cpa, 0);
992f4c1c
TH
714 if (ret)
715 return ret;
f34b439f 716 }
44af6c41 717
44af6c41 718#ifdef CONFIG_X86_64
488fd995 719 /*
992f4c1c
TH
720 * If the primary call didn't touch the high mapping already
721 * and the physical address is inside the kernel map, we need
0879750f 722 * to touch the high mapped kernel as well:
488fd995 723 */
992f4c1c
TH
724 if (!within(vaddr, (unsigned long)_text, _brk_end) &&
725 within(cpa->pfn, highmap_start_pfn(), highmap_end_pfn())) {
726 unsigned long temp_cpa_vaddr = (cpa->pfn << PAGE_SHIFT) +
727 __START_KERNEL_map - phys_base;
728 alias_cpa = *cpa;
729 alias_cpa.vaddr = &temp_cpa_vaddr;
730 alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
c31c7d48 731
992f4c1c
TH
732 /*
733 * The high mapping range is imprecise, so ignore the
734 * return value.
735 */
736 __change_page_attr_set_clr(&alias_cpa, 0);
737 }
488fd995 738#endif
992f4c1c
TH
739
740 return 0;
1da177e4
LT
741}
742
c31c7d48 743static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
ff31452b 744{
65e074df 745 int ret, numpages = cpa->numpages;
ff31452b 746
65e074df
TG
747 while (numpages) {
748 /*
749 * Store the remaining nr of pages for the large page
750 * preservation check.
751 */
9b5cf48b 752 cpa->numpages = numpages;
d75586ad 753 /* for array changes, we can't use large page */
9ae28475 754 if (cpa->flags & (CPA_ARRAY | CPA_PAGES_ARRAY))
d75586ad 755 cpa->numpages = 1;
c31c7d48 756
ad5ca55f
SS
757 if (!debug_pagealloc)
758 spin_lock(&cpa_lock);
c31c7d48 759 ret = __change_page_attr(cpa, checkalias);
ad5ca55f
SS
760 if (!debug_pagealloc)
761 spin_unlock(&cpa_lock);
ff31452b
TG
762 if (ret)
763 return ret;
ff31452b 764
c31c7d48
TG
765 if (checkalias) {
766 ret = cpa_process_alias(cpa);
767 if (ret)
768 return ret;
769 }
770
65e074df
TG
771 /*
772 * Adjust the number of pages with the result of the
773 * CPA operation. Either a large page has been
774 * preserved or a single page update happened.
775 */
9b5cf48b
RW
776 BUG_ON(cpa->numpages > numpages);
777 numpages -= cpa->numpages;
9ae28475 778 if (cpa->flags & (CPA_PAGES_ARRAY | CPA_ARRAY))
d75586ad
SL
779 cpa->curpage++;
780 else
781 *cpa->vaddr += cpa->numpages * PAGE_SIZE;
782
65e074df 783 }
ff31452b
TG
784 return 0;
785}
786
6bb8383b
AK
787static inline int cache_attr(pgprot_t attr)
788{
789 return pgprot_val(attr) &
790 (_PAGE_PAT | _PAGE_PAT_LARGE | _PAGE_PWT | _PAGE_PCD);
791}
792
d75586ad 793static int change_page_attr_set_clr(unsigned long *addr, int numpages,
c9caa02c 794 pgprot_t mask_set, pgprot_t mask_clr,
9ae28475 795 int force_split, int in_flag,
796 struct page **pages)
ff31452b 797{
72e458df 798 struct cpa_data cpa;
cacf8906 799 int ret, cache, checkalias;
331e4065
TG
800
801 /*
802 * Check, if we are requested to change a not supported
803 * feature:
804 */
805 mask_set = canon_pgprot(mask_set);
806 mask_clr = canon_pgprot(mask_clr);
c9caa02c 807 if (!pgprot_val(mask_set) && !pgprot_val(mask_clr) && !force_split)
331e4065
TG
808 return 0;
809
69b1415e 810 /* Ensure we are PAGE_SIZE aligned */
9ae28475 811 if (in_flag & CPA_ARRAY) {
d75586ad
SL
812 int i;
813 for (i = 0; i < numpages; i++) {
814 if (addr[i] & ~PAGE_MASK) {
815 addr[i] &= PAGE_MASK;
816 WARN_ON_ONCE(1);
817 }
818 }
9ae28475 819 } else if (!(in_flag & CPA_PAGES_ARRAY)) {
820 /*
821 * in_flag of CPA_PAGES_ARRAY implies it is aligned.
822 * No need to cehck in that case
823 */
824 if (*addr & ~PAGE_MASK) {
825 *addr &= PAGE_MASK;
826 /*
827 * People should not be passing in unaligned addresses:
828 */
829 WARN_ON_ONCE(1);
830 }
69b1415e
TG
831 }
832
5843d9a4
NP
833 /* Must avoid aliasing mappings in the highmem code */
834 kmap_flush_unused();
835
db64fe02
NP
836 vm_unmap_aliases();
837
72e458df 838 cpa.vaddr = addr;
9ae28475 839 cpa.pages = pages;
72e458df
TG
840 cpa.numpages = numpages;
841 cpa.mask_set = mask_set;
842 cpa.mask_clr = mask_clr;
d75586ad
SL
843 cpa.flags = 0;
844 cpa.curpage = 0;
c9caa02c 845 cpa.force_split = force_split;
72e458df 846
9ae28475 847 if (in_flag & (CPA_ARRAY | CPA_PAGES_ARRAY))
848 cpa.flags |= in_flag;
d75586ad 849
af96e443
TG
850 /* No alias checking for _NX bit modifications */
851 checkalias = (pgprot_val(mask_set) | pgprot_val(mask_clr)) != _PAGE_NX;
852
853 ret = __change_page_attr_set_clr(&cpa, checkalias);
ff31452b 854
f4ae5da0
TG
855 /*
856 * Check whether we really changed something:
857 */
d75586ad 858 if (!(cpa.flags & CPA_FLUSHTLB))
1ac2f7d5 859 goto out;
cacf8906 860
6bb8383b
AK
861 /*
862 * No need to flush, when we did not set any of the caching
863 * attributes:
864 */
865 cache = cache_attr(mask_set);
866
57a6a46a
TG
867 /*
868 * On success we use clflush, when the CPU supports it to
869 * avoid the wbindv. If the CPU does not support it and in the
af1e6844 870 * error case we fall back to cpa_flush_all (which uses
57a6a46a
TG
871 * wbindv):
872 */
d75586ad 873 if (!ret && cpu_has_clflush) {
9ae28475 874 if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) {
875 cpa_flush_array(addr, numpages, cache,
876 cpa.flags, pages);
877 } else
d75586ad
SL
878 cpa_flush_range(*addr, numpages, cache);
879 } else
6bb8383b 880 cpa_flush_all(cache);
cacf8906 881
76ebd054 882out:
ff31452b
TG
883 return ret;
884}
885
d75586ad
SL
886static inline int change_page_attr_set(unsigned long *addr, int numpages,
887 pgprot_t mask, int array)
75cbade8 888{
d75586ad 889 return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0), 0,
9ae28475 890 (array ? CPA_ARRAY : 0), NULL);
75cbade8
AV
891}
892
d75586ad
SL
893static inline int change_page_attr_clear(unsigned long *addr, int numpages,
894 pgprot_t mask, int array)
72932c7a 895{
d75586ad 896 return change_page_attr_set_clr(addr, numpages, __pgprot(0), mask, 0,
9ae28475 897 (array ? CPA_ARRAY : 0), NULL);
72932c7a
TG
898}
899
0f350755 900static inline int cpa_set_pages_array(struct page **pages, int numpages,
901 pgprot_t mask)
902{
903 return change_page_attr_set_clr(NULL, numpages, mask, __pgprot(0), 0,
904 CPA_PAGES_ARRAY, pages);
905}
906
907static inline int cpa_clear_pages_array(struct page **pages, int numpages,
908 pgprot_t mask)
909{
910 return change_page_attr_set_clr(NULL, numpages, __pgprot(0), mask, 0,
911 CPA_PAGES_ARRAY, pages);
912}
913
1219333d 914int _set_memory_uc(unsigned long addr, int numpages)
72932c7a 915{
de33c442
SS
916 /*
917 * for now UC MINUS. see comments in ioremap_nocache()
918 */
d75586ad
SL
919 return change_page_attr_set(&addr, numpages,
920 __pgprot(_PAGE_CACHE_UC_MINUS), 0);
75cbade8 921}
1219333d 922
923int set_memory_uc(unsigned long addr, int numpages)
924{
9fa3ab39 925 int ret;
926
de33c442
SS
927 /*
928 * for now UC MINUS. see comments in ioremap_nocache()
929 */
9fa3ab39 930 ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
931 _PAGE_CACHE_UC_MINUS, NULL);
932 if (ret)
933 goto out_err;
934
935 ret = _set_memory_uc(addr, numpages);
936 if (ret)
937 goto out_free;
938
939 return 0;
1219333d 940
9fa3ab39 941out_free:
942 free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
943out_err:
944 return ret;
1219333d 945}
75cbade8
AV
946EXPORT_SYMBOL(set_memory_uc);
947
d75586ad
SL
948int set_memory_array_uc(unsigned long *addr, int addrinarray)
949{
9fa3ab39 950 int i, j;
951 int ret;
952
d75586ad
SL
953 /*
954 * for now UC MINUS. see comments in ioremap_nocache()
955 */
956 for (i = 0; i < addrinarray; i++) {
9fa3ab39 957 ret = reserve_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE,
958 _PAGE_CACHE_UC_MINUS, NULL);
959 if (ret)
960 goto out_free;
d75586ad
SL
961 }
962
9fa3ab39 963 ret = change_page_attr_set(addr, addrinarray,
d75586ad 964 __pgprot(_PAGE_CACHE_UC_MINUS), 1);
9fa3ab39 965 if (ret)
966 goto out_free;
967
968 return 0;
969
970out_free:
971 for (j = 0; j < i; j++)
972 free_memtype(__pa(addr[j]), __pa(addr[j]) + PAGE_SIZE);
973
974 return ret;
d75586ad
SL
975}
976EXPORT_SYMBOL(set_memory_array_uc);
977
ef354af4 978int _set_memory_wc(unsigned long addr, int numpages)
979{
3869c4aa 980 int ret;
981 ret = change_page_attr_set(&addr, numpages,
982 __pgprot(_PAGE_CACHE_UC_MINUS), 0);
983
984 if (!ret) {
985 ret = change_page_attr_set(&addr, numpages,
d75586ad 986 __pgprot(_PAGE_CACHE_WC), 0);
3869c4aa 987 }
988 return ret;
ef354af4 989}
990
991int set_memory_wc(unsigned long addr, int numpages)
992{
9fa3ab39 993 int ret;
994
499f8f84 995 if (!pat_enabled)
ef354af4 996 return set_memory_uc(addr, numpages);
997
9fa3ab39 998 ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
999 _PAGE_CACHE_WC, NULL);
1000 if (ret)
1001 goto out_err;
ef354af4 1002
9fa3ab39 1003 ret = _set_memory_wc(addr, numpages);
1004 if (ret)
1005 goto out_free;
1006
1007 return 0;
1008
1009out_free:
1010 free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
1011out_err:
1012 return ret;
ef354af4 1013}
1014EXPORT_SYMBOL(set_memory_wc);
1015
1219333d 1016int _set_memory_wb(unsigned long addr, int numpages)
75cbade8 1017{
d75586ad
SL
1018 return change_page_attr_clear(&addr, numpages,
1019 __pgprot(_PAGE_CACHE_MASK), 0);
75cbade8 1020}
1219333d 1021
1022int set_memory_wb(unsigned long addr, int numpages)
1023{
9fa3ab39 1024 int ret;
1025
1026 ret = _set_memory_wb(addr, numpages);
1027 if (ret)
1028 return ret;
1029
c15238df 1030 free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
9fa3ab39 1031 return 0;
1219333d 1032}
75cbade8
AV
1033EXPORT_SYMBOL(set_memory_wb);
1034
d75586ad
SL
1035int set_memory_array_wb(unsigned long *addr, int addrinarray)
1036{
1037 int i;
a5593e0b 1038 int ret;
1039
1040 ret = change_page_attr_clear(addr, addrinarray,
1041 __pgprot(_PAGE_CACHE_MASK), 1);
9fa3ab39 1042 if (ret)
1043 return ret;
d75586ad 1044
9fa3ab39 1045 for (i = 0; i < addrinarray; i++)
1046 free_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE);
c5e147cf 1047
9fa3ab39 1048 return 0;
d75586ad
SL
1049}
1050EXPORT_SYMBOL(set_memory_array_wb);
1051
75cbade8
AV
1052int set_memory_x(unsigned long addr, int numpages)
1053{
d75586ad 1054 return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_NX), 0);
75cbade8
AV
1055}
1056EXPORT_SYMBOL(set_memory_x);
1057
1058int set_memory_nx(unsigned long addr, int numpages)
1059{
d75586ad 1060 return change_page_attr_set(&addr, numpages, __pgprot(_PAGE_NX), 0);
75cbade8
AV
1061}
1062EXPORT_SYMBOL(set_memory_nx);
1063
1064int set_memory_ro(unsigned long addr, int numpages)
1065{
d75586ad 1066 return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_RW), 0);
75cbade8 1067}
a03352d2 1068EXPORT_SYMBOL_GPL(set_memory_ro);
75cbade8
AV
1069
1070int set_memory_rw(unsigned long addr, int numpages)
1071{
d75586ad 1072 return change_page_attr_set(&addr, numpages, __pgprot(_PAGE_RW), 0);
75cbade8 1073}
a03352d2 1074EXPORT_SYMBOL_GPL(set_memory_rw);
f62d0f00
IM
1075
1076int set_memory_np(unsigned long addr, int numpages)
1077{
d75586ad 1078 return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_PRESENT), 0);
f62d0f00 1079}
75cbade8 1080
c9caa02c
AK
1081int set_memory_4k(unsigned long addr, int numpages)
1082{
d75586ad 1083 return change_page_attr_set_clr(&addr, numpages, __pgprot(0),
9ae28475 1084 __pgprot(0), 1, 0, NULL);
c9caa02c
AK
1085}
1086
75cbade8
AV
1087int set_pages_uc(struct page *page, int numpages)
1088{
1089 unsigned long addr = (unsigned long)page_address(page);
75cbade8 1090
d7c8f21a 1091 return set_memory_uc(addr, numpages);
75cbade8
AV
1092}
1093EXPORT_SYMBOL(set_pages_uc);
1094
0f350755 1095int set_pages_array_uc(struct page **pages, int addrinarray)
1096{
1097 unsigned long start;
1098 unsigned long end;
1099 int i;
1100 int free_idx;
1101
1102 for (i = 0; i < addrinarray; i++) {
1103 start = (unsigned long)page_address(pages[i]);
1104 end = start + PAGE_SIZE;
1105 if (reserve_memtype(start, end, _PAGE_CACHE_UC_MINUS, NULL))
1106 goto err_out;
1107 }
1108
1109 if (cpa_set_pages_array(pages, addrinarray,
1110 __pgprot(_PAGE_CACHE_UC_MINUS)) == 0) {
1111 return 0; /* Success */
1112 }
1113err_out:
1114 free_idx = i;
1115 for (i = 0; i < free_idx; i++) {
1116 start = (unsigned long)page_address(pages[i]);
1117 end = start + PAGE_SIZE;
1118 free_memtype(start, end);
1119 }
1120 return -EINVAL;
1121}
1122EXPORT_SYMBOL(set_pages_array_uc);
1123
75cbade8
AV
1124int set_pages_wb(struct page *page, int numpages)
1125{
1126 unsigned long addr = (unsigned long)page_address(page);
75cbade8 1127
d7c8f21a 1128 return set_memory_wb(addr, numpages);
75cbade8
AV
1129}
1130EXPORT_SYMBOL(set_pages_wb);
1131
0f350755 1132int set_pages_array_wb(struct page **pages, int addrinarray)
1133{
1134 int retval;
1135 unsigned long start;
1136 unsigned long end;
1137 int i;
1138
1139 retval = cpa_clear_pages_array(pages, addrinarray,
1140 __pgprot(_PAGE_CACHE_MASK));
9fa3ab39 1141 if (retval)
1142 return retval;
0f350755 1143
1144 for (i = 0; i < addrinarray; i++) {
1145 start = (unsigned long)page_address(pages[i]);
1146 end = start + PAGE_SIZE;
1147 free_memtype(start, end);
1148 }
1149
9fa3ab39 1150 return 0;
0f350755 1151}
1152EXPORT_SYMBOL(set_pages_array_wb);
1153
75cbade8
AV
1154int set_pages_x(struct page *page, int numpages)
1155{
1156 unsigned long addr = (unsigned long)page_address(page);
75cbade8 1157
d7c8f21a 1158 return set_memory_x(addr, numpages);
75cbade8
AV
1159}
1160EXPORT_SYMBOL(set_pages_x);
1161
1162int set_pages_nx(struct page *page, int numpages)
1163{
1164 unsigned long addr = (unsigned long)page_address(page);
75cbade8 1165
d7c8f21a 1166 return set_memory_nx(addr, numpages);
75cbade8
AV
1167}
1168EXPORT_SYMBOL(set_pages_nx);
1169
1170int set_pages_ro(struct page *page, int numpages)
1171{
1172 unsigned long addr = (unsigned long)page_address(page);
75cbade8 1173
d7c8f21a 1174 return set_memory_ro(addr, numpages);
75cbade8 1175}
75cbade8
AV
1176
1177int set_pages_rw(struct page *page, int numpages)
1178{
1179 unsigned long addr = (unsigned long)page_address(page);
e81d5dc4 1180
d7c8f21a 1181 return set_memory_rw(addr, numpages);
78c94aba
IM
1182}
1183
1da177e4 1184#ifdef CONFIG_DEBUG_PAGEALLOC
f62d0f00
IM
1185
1186static int __set_pages_p(struct page *page, int numpages)
1187{
d75586ad
SL
1188 unsigned long tempaddr = (unsigned long) page_address(page);
1189 struct cpa_data cpa = { .vaddr = &tempaddr,
72e458df
TG
1190 .numpages = numpages,
1191 .mask_set = __pgprot(_PAGE_PRESENT | _PAGE_RW),
d75586ad
SL
1192 .mask_clr = __pgprot(0),
1193 .flags = 0};
72932c7a 1194
55121b43
SS
1195 /*
1196 * No alias checking needed for setting present flag. otherwise,
1197 * we may need to break large pages for 64-bit kernel text
1198 * mappings (this adds to complexity if we want to do this from
1199 * atomic context especially). Let's keep it simple!
1200 */
1201 return __change_page_attr_set_clr(&cpa, 0);
f62d0f00
IM
1202}
1203
1204static int __set_pages_np(struct page *page, int numpages)
1205{
d75586ad
SL
1206 unsigned long tempaddr = (unsigned long) page_address(page);
1207 struct cpa_data cpa = { .vaddr = &tempaddr,
72e458df
TG
1208 .numpages = numpages,
1209 .mask_set = __pgprot(0),
d75586ad
SL
1210 .mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW),
1211 .flags = 0};
72932c7a 1212
55121b43
SS
1213 /*
1214 * No alias checking needed for setting not present flag. otherwise,
1215 * we may need to break large pages for 64-bit kernel text
1216 * mappings (this adds to complexity if we want to do this from
1217 * atomic context especially). Let's keep it simple!
1218 */
1219 return __change_page_attr_set_clr(&cpa, 0);
f62d0f00
IM
1220}
1221
1da177e4
LT
1222void kernel_map_pages(struct page *page, int numpages, int enable)
1223{
1224 if (PageHighMem(page))
1225 return;
9f4c815c 1226 if (!enable) {
f9b8404c
IM
1227 debug_check_no_locks_freed(page_address(page),
1228 numpages * PAGE_SIZE);
9f4c815c 1229 }
de5097c2 1230
12d6f21e
IM
1231 /*
1232 * If page allocator is not up yet then do not call c_p_a():
1233 */
1234 if (!debug_pagealloc_enabled)
1235 return;
1236
9f4c815c 1237 /*
f8d8406b 1238 * The return value is ignored as the calls cannot fail.
55121b43
SS
1239 * Large pages for identity mappings are not used at boot time
1240 * and hence no memory allocations during large page split.
1da177e4 1241 */
f62d0f00
IM
1242 if (enable)
1243 __set_pages_p(page, numpages);
1244 else
1245 __set_pages_np(page, numpages);
9f4c815c
IM
1246
1247 /*
e4b71dcf
IM
1248 * We should perform an IPI and flush all tlbs,
1249 * but that can deadlock->flush only current cpu:
1da177e4
LT
1250 */
1251 __flush_tlb_all();
ee7ae7a1
TG
1252}
1253
8a235efa
RW
1254#ifdef CONFIG_HIBERNATION
1255
1256bool kernel_page_present(struct page *page)
1257{
1258 unsigned int level;
1259 pte_t *pte;
1260
1261 if (PageHighMem(page))
1262 return false;
1263
1264 pte = lookup_address((unsigned long)page_address(page), &level);
1265 return (pte_val(*pte) & _PAGE_PRESENT);
1266}
1267
1268#endif /* CONFIG_HIBERNATION */
1269
1270#endif /* CONFIG_DEBUG_PAGEALLOC */
d1028a15
AV
1271
1272/*
1273 * The testcases use internal knowledge of the implementation that shouldn't
1274 * be exposed to the rest of the kernel. Include these directly here.
1275 */
1276#ifdef CONFIG_CPA_DEBUG
1277#include "pageattr-test.c"
1278#endif