]>
Commit | Line | Data |
---|---|---|
9f4c815c IM |
1 | /* |
2 | * Copyright 2002 Andi Kleen, SuSE Labs. | |
1da177e4 | 3 | * Thanks to Ben LaHaise for precious feedback. |
9f4c815c | 4 | */ |
1da177e4 | 5 | #include <linux/highmem.h> |
8192206d | 6 | #include <linux/bootmem.h> |
1da177e4 | 7 | #include <linux/module.h> |
9f4c815c | 8 | #include <linux/sched.h> |
1da177e4 | 9 | #include <linux/slab.h> |
9f4c815c | 10 | #include <linux/mm.h> |
76ebd054 | 11 | #include <linux/interrupt.h> |
ee7ae7a1 TG |
12 | #include <linux/seq_file.h> |
13 | #include <linux/debugfs.h> | |
9f4c815c | 14 | |
950f9d95 | 15 | #include <asm/e820.h> |
1da177e4 LT |
16 | #include <asm/processor.h> |
17 | #include <asm/tlbflush.h> | |
f8af095d | 18 | #include <asm/sections.h> |
9f4c815c IM |
19 | #include <asm/uaccess.h> |
20 | #include <asm/pgalloc.h> | |
c31c7d48 | 21 | #include <asm/proto.h> |
1219333d | 22 | #include <asm/pat.h> |
1da177e4 | 23 | |
9df84993 IM |
24 | /* |
25 | * The current flushing context - we pass it instead of 5 arguments: | |
26 | */ | |
72e458df TG |
27 | struct cpa_data { |
28 | unsigned long vaddr; | |
72e458df TG |
29 | pgprot_t mask_set; |
30 | pgprot_t mask_clr; | |
65e074df | 31 | int numpages; |
f4ae5da0 | 32 | int flushtlb; |
c31c7d48 | 33 | unsigned long pfn; |
c9caa02c | 34 | unsigned force_split : 1; |
72e458df TG |
35 | }; |
36 | ||
ce0c0e50 AK |
37 | static unsigned long direct_pages_count[PG_LEVEL_NUM]; |
38 | ||
39 | void __meminit update_page_count(int level, unsigned long pages) | |
40 | { | |
41 | #ifdef CONFIG_PROC_FS | |
42 | unsigned long flags; | |
43 | /* Protect against CPA */ | |
44 | spin_lock_irqsave(&pgd_lock, flags); | |
45 | direct_pages_count[level] += pages; | |
46 | spin_unlock_irqrestore(&pgd_lock, flags); | |
47 | #endif | |
48 | } | |
49 | ||
c31c7d48 TG |
50 | #ifdef CONFIG_X86_64 |
51 | ||
52 | static inline unsigned long highmap_start_pfn(void) | |
53 | { | |
54 | return __pa(_text) >> PAGE_SHIFT; | |
55 | } | |
56 | ||
57 | static inline unsigned long highmap_end_pfn(void) | |
58 | { | |
59 | return __pa(round_up((unsigned long)_end, PMD_SIZE)) >> PAGE_SHIFT; | |
60 | } | |
61 | ||
62 | #endif | |
63 | ||
92cb54a3 IM |
64 | #ifdef CONFIG_DEBUG_PAGEALLOC |
65 | # define debug_pagealloc 1 | |
66 | #else | |
67 | # define debug_pagealloc 0 | |
68 | #endif | |
69 | ||
ed724be6 AV |
70 | static inline int |
71 | within(unsigned long addr, unsigned long start, unsigned long end) | |
687c4825 | 72 | { |
ed724be6 AV |
73 | return addr >= start && addr < end; |
74 | } | |
75 | ||
d7c8f21a TG |
76 | /* |
77 | * Flushing functions | |
78 | */ | |
cd8ddf1a | 79 | |
cd8ddf1a TG |
80 | /** |
81 | * clflush_cache_range - flush a cache range with clflush | |
82 | * @addr: virtual start address | |
83 | * @size: number of bytes to flush | |
84 | * | |
85 | * clflush is an unordered instruction which needs fencing with mfence | |
86 | * to avoid ordering issues. | |
87 | */ | |
4c61afcd | 88 | void clflush_cache_range(void *vaddr, unsigned int size) |
d7c8f21a | 89 | { |
4c61afcd | 90 | void *vend = vaddr + size - 1; |
d7c8f21a | 91 | |
cd8ddf1a | 92 | mb(); |
4c61afcd IM |
93 | |
94 | for (; vaddr < vend; vaddr += boot_cpu_data.x86_clflush_size) | |
95 | clflush(vaddr); | |
96 | /* | |
97 | * Flush any possible final partial cacheline: | |
98 | */ | |
99 | clflush(vend); | |
100 | ||
cd8ddf1a | 101 | mb(); |
d7c8f21a TG |
102 | } |
103 | ||
af1e6844 | 104 | static void __cpa_flush_all(void *arg) |
d7c8f21a | 105 | { |
6bb8383b AK |
106 | unsigned long cache = (unsigned long)arg; |
107 | ||
d7c8f21a TG |
108 | /* |
109 | * Flush all to work around Errata in early athlons regarding | |
110 | * large page flushing. | |
111 | */ | |
112 | __flush_tlb_all(); | |
113 | ||
6bb8383b | 114 | if (cache && boot_cpu_data.x86_model >= 4) |
d7c8f21a TG |
115 | wbinvd(); |
116 | } | |
117 | ||
6bb8383b | 118 | static void cpa_flush_all(unsigned long cache) |
d7c8f21a TG |
119 | { |
120 | BUG_ON(irqs_disabled()); | |
121 | ||
6bb8383b | 122 | on_each_cpu(__cpa_flush_all, (void *) cache, 1, 1); |
d7c8f21a TG |
123 | } |
124 | ||
57a6a46a TG |
125 | static void __cpa_flush_range(void *arg) |
126 | { | |
57a6a46a TG |
127 | /* |
128 | * We could optimize that further and do individual per page | |
129 | * tlb invalidates for a low number of pages. Caveat: we must | |
130 | * flush the high aliases on 64bit as well. | |
131 | */ | |
132 | __flush_tlb_all(); | |
57a6a46a TG |
133 | } |
134 | ||
6bb8383b | 135 | static void cpa_flush_range(unsigned long start, int numpages, int cache) |
57a6a46a | 136 | { |
4c61afcd IM |
137 | unsigned int i, level; |
138 | unsigned long addr; | |
139 | ||
57a6a46a | 140 | BUG_ON(irqs_disabled()); |
4c61afcd | 141 | WARN_ON(PAGE_ALIGN(start) != start); |
57a6a46a | 142 | |
3b233e52 | 143 | on_each_cpu(__cpa_flush_range, NULL, 1, 1); |
57a6a46a | 144 | |
6bb8383b AK |
145 | if (!cache) |
146 | return; | |
147 | ||
3b233e52 TG |
148 | /* |
149 | * We only need to flush on one CPU, | |
150 | * clflush is a MESI-coherent instruction that | |
151 | * will cause all other CPUs to flush the same | |
152 | * cachelines: | |
153 | */ | |
4c61afcd IM |
154 | for (i = 0, addr = start; i < numpages; i++, addr += PAGE_SIZE) { |
155 | pte_t *pte = lookup_address(addr, &level); | |
156 | ||
157 | /* | |
158 | * Only flush present addresses: | |
159 | */ | |
7bfb72e8 | 160 | if (pte && (pte_val(*pte) & _PAGE_PRESENT)) |
4c61afcd IM |
161 | clflush_cache_range((void *) addr, PAGE_SIZE); |
162 | } | |
57a6a46a TG |
163 | } |
164 | ||
ed724be6 AV |
165 | /* |
166 | * Certain areas of memory on x86 require very specific protection flags, | |
167 | * for example the BIOS area or kernel text. Callers don't always get this | |
168 | * right (again, ioremap() on BIOS memory is not uncommon) so this function | |
169 | * checks and fixes these known static required protection bits. | |
170 | */ | |
c31c7d48 TG |
171 | static inline pgprot_t static_protections(pgprot_t prot, unsigned long address, |
172 | unsigned long pfn) | |
ed724be6 AV |
173 | { |
174 | pgprot_t forbidden = __pgprot(0); | |
175 | ||
687c4825 | 176 | /* |
ed724be6 AV |
177 | * The BIOS area between 640k and 1Mb needs to be executable for |
178 | * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support. | |
687c4825 | 179 | */ |
c31c7d48 | 180 | if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT)) |
ed724be6 AV |
181 | pgprot_val(forbidden) |= _PAGE_NX; |
182 | ||
183 | /* | |
184 | * The kernel text needs to be executable for obvious reasons | |
c31c7d48 TG |
185 | * Does not cover __inittext since that is gone later on. On |
186 | * 64bit we do not enforce !NX on the low mapping | |
ed724be6 AV |
187 | */ |
188 | if (within(address, (unsigned long)_text, (unsigned long)_etext)) | |
189 | pgprot_val(forbidden) |= _PAGE_NX; | |
cc0f21bb | 190 | |
cc0f21bb | 191 | /* |
c31c7d48 TG |
192 | * The .rodata section needs to be read-only. Using the pfn |
193 | * catches all aliases. | |
cc0f21bb | 194 | */ |
c31c7d48 TG |
195 | if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT, |
196 | __pa((unsigned long)__end_rodata) >> PAGE_SHIFT)) | |
cc0f21bb | 197 | pgprot_val(forbidden) |= _PAGE_RW; |
ed724be6 AV |
198 | |
199 | prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden)); | |
687c4825 IM |
200 | |
201 | return prot; | |
202 | } | |
203 | ||
9a14aefc TG |
204 | /* |
205 | * Lookup the page table entry for a virtual address. Return a pointer | |
206 | * to the entry and the level of the mapping. | |
207 | * | |
208 | * Note: We return pud and pmd either when the entry is marked large | |
209 | * or when the present bit is not set. Otherwise we would return a | |
210 | * pointer to a nonexisting mapping. | |
211 | */ | |
da7bfc50 | 212 | pte_t *lookup_address(unsigned long address, unsigned int *level) |
9f4c815c | 213 | { |
1da177e4 LT |
214 | pgd_t *pgd = pgd_offset_k(address); |
215 | pud_t *pud; | |
216 | pmd_t *pmd; | |
9f4c815c | 217 | |
30551bb3 TG |
218 | *level = PG_LEVEL_NONE; |
219 | ||
1da177e4 LT |
220 | if (pgd_none(*pgd)) |
221 | return NULL; | |
9df84993 | 222 | |
1da177e4 LT |
223 | pud = pud_offset(pgd, address); |
224 | if (pud_none(*pud)) | |
225 | return NULL; | |
c2f71ee2 AK |
226 | |
227 | *level = PG_LEVEL_1G; | |
228 | if (pud_large(*pud) || !pud_present(*pud)) | |
229 | return (pte_t *)pud; | |
230 | ||
1da177e4 LT |
231 | pmd = pmd_offset(pud, address); |
232 | if (pmd_none(*pmd)) | |
233 | return NULL; | |
30551bb3 TG |
234 | |
235 | *level = PG_LEVEL_2M; | |
9a14aefc | 236 | if (pmd_large(*pmd) || !pmd_present(*pmd)) |
1da177e4 | 237 | return (pte_t *)pmd; |
1da177e4 | 238 | |
30551bb3 | 239 | *level = PG_LEVEL_4K; |
9df84993 | 240 | |
9f4c815c IM |
241 | return pte_offset_kernel(pmd, address); |
242 | } | |
243 | ||
9df84993 IM |
244 | /* |
245 | * Set the new pmd in all the pgds we know about: | |
246 | */ | |
9a3dc780 | 247 | static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte) |
9f4c815c | 248 | { |
9f4c815c IM |
249 | /* change init_mm */ |
250 | set_pte_atomic(kpte, pte); | |
44af6c41 | 251 | #ifdef CONFIG_X86_32 |
e4b71dcf | 252 | if (!SHARED_KERNEL_PMD) { |
44af6c41 IM |
253 | struct page *page; |
254 | ||
e3ed910d | 255 | list_for_each_entry(page, &pgd_list, lru) { |
44af6c41 IM |
256 | pgd_t *pgd; |
257 | pud_t *pud; | |
258 | pmd_t *pmd; | |
259 | ||
260 | pgd = (pgd_t *)page_address(page) + pgd_index(address); | |
261 | pud = pud_offset(pgd, address); | |
262 | pmd = pmd_offset(pud, address); | |
263 | set_pte_atomic((pte_t *)pmd, pte); | |
264 | } | |
1da177e4 | 265 | } |
44af6c41 | 266 | #endif |
1da177e4 LT |
267 | } |
268 | ||
9df84993 IM |
269 | static int |
270 | try_preserve_large_page(pte_t *kpte, unsigned long address, | |
271 | struct cpa_data *cpa) | |
65e074df | 272 | { |
c31c7d48 | 273 | unsigned long nextpage_addr, numpages, pmask, psize, flags, addr, pfn; |
65e074df TG |
274 | pte_t new_pte, old_pte, *tmp; |
275 | pgprot_t old_prot, new_prot; | |
fac84939 | 276 | int i, do_split = 1; |
da7bfc50 | 277 | unsigned int level; |
65e074df | 278 | |
c9caa02c AK |
279 | if (cpa->force_split) |
280 | return 1; | |
281 | ||
65e074df TG |
282 | spin_lock_irqsave(&pgd_lock, flags); |
283 | /* | |
284 | * Check for races, another CPU might have split this page | |
285 | * up already: | |
286 | */ | |
287 | tmp = lookup_address(address, &level); | |
288 | if (tmp != kpte) | |
289 | goto out_unlock; | |
290 | ||
291 | switch (level) { | |
292 | case PG_LEVEL_2M: | |
31422c51 AK |
293 | psize = PMD_PAGE_SIZE; |
294 | pmask = PMD_PAGE_MASK; | |
65e074df | 295 | break; |
f07333fd | 296 | #ifdef CONFIG_X86_64 |
65e074df | 297 | case PG_LEVEL_1G: |
5d3c8b21 AK |
298 | psize = PUD_PAGE_SIZE; |
299 | pmask = PUD_PAGE_MASK; | |
f07333fd AK |
300 | break; |
301 | #endif | |
65e074df | 302 | default: |
beaff633 | 303 | do_split = -EINVAL; |
65e074df TG |
304 | goto out_unlock; |
305 | } | |
306 | ||
307 | /* | |
308 | * Calculate the number of pages, which fit into this large | |
309 | * page starting at address: | |
310 | */ | |
311 | nextpage_addr = (address + psize) & pmask; | |
312 | numpages = (nextpage_addr - address) >> PAGE_SHIFT; | |
9b5cf48b RW |
313 | if (numpages < cpa->numpages) |
314 | cpa->numpages = numpages; | |
65e074df TG |
315 | |
316 | /* | |
317 | * We are safe now. Check whether the new pgprot is the same: | |
318 | */ | |
319 | old_pte = *kpte; | |
320 | old_prot = new_prot = pte_pgprot(old_pte); | |
321 | ||
322 | pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr); | |
323 | pgprot_val(new_prot) |= pgprot_val(cpa->mask_set); | |
c31c7d48 TG |
324 | |
325 | /* | |
326 | * old_pte points to the large page base address. So we need | |
327 | * to add the offset of the virtual address: | |
328 | */ | |
329 | pfn = pte_pfn(old_pte) + ((address & (psize - 1)) >> PAGE_SHIFT); | |
330 | cpa->pfn = pfn; | |
331 | ||
332 | new_prot = static_protections(new_prot, address, pfn); | |
65e074df | 333 | |
fac84939 TG |
334 | /* |
335 | * We need to check the full range, whether | |
336 | * static_protection() requires a different pgprot for one of | |
337 | * the pages in the range we try to preserve: | |
338 | */ | |
339 | addr = address + PAGE_SIZE; | |
c31c7d48 | 340 | pfn++; |
9b5cf48b | 341 | for (i = 1; i < cpa->numpages; i++, addr += PAGE_SIZE, pfn++) { |
c31c7d48 | 342 | pgprot_t chk_prot = static_protections(new_prot, addr, pfn); |
fac84939 TG |
343 | |
344 | if (pgprot_val(chk_prot) != pgprot_val(new_prot)) | |
345 | goto out_unlock; | |
346 | } | |
347 | ||
65e074df TG |
348 | /* |
349 | * If there are no changes, return. maxpages has been updated | |
350 | * above: | |
351 | */ | |
352 | if (pgprot_val(new_prot) == pgprot_val(old_prot)) { | |
beaff633 | 353 | do_split = 0; |
65e074df TG |
354 | goto out_unlock; |
355 | } | |
356 | ||
357 | /* | |
358 | * We need to change the attributes. Check, whether we can | |
359 | * change the large page in one go. We request a split, when | |
360 | * the address is not aligned and the number of pages is | |
361 | * smaller than the number of pages in the large page. Note | |
362 | * that we limited the number of possible pages already to | |
363 | * the number of pages in the large page. | |
364 | */ | |
9b5cf48b | 365 | if (address == (nextpage_addr - psize) && cpa->numpages == numpages) { |
65e074df TG |
366 | /* |
367 | * The address is aligned and the number of pages | |
368 | * covers the full page. | |
369 | */ | |
370 | new_pte = pfn_pte(pte_pfn(old_pte), canon_pgprot(new_prot)); | |
371 | __set_pmd_pte(kpte, address, new_pte); | |
372 | cpa->flushtlb = 1; | |
beaff633 | 373 | do_split = 0; |
65e074df TG |
374 | } |
375 | ||
376 | out_unlock: | |
377 | spin_unlock_irqrestore(&pgd_lock, flags); | |
9df84993 | 378 | |
beaff633 | 379 | return do_split; |
65e074df TG |
380 | } |
381 | ||
76ebd054 TG |
382 | static LIST_HEAD(page_pool); |
383 | static unsigned long pool_size, pool_pages, pool_low; | |
92cb54a3 | 384 | static unsigned long pool_used, pool_failed; |
76ebd054 | 385 | |
92cb54a3 | 386 | static void cpa_fill_pool(struct page **ret) |
76ebd054 | 387 | { |
76ebd054 | 388 | gfp_t gfp = GFP_KERNEL; |
92cb54a3 IM |
389 | unsigned long flags; |
390 | struct page *p; | |
76ebd054 | 391 | |
76ebd054 | 392 | /* |
92cb54a3 IM |
393 | * Avoid recursion (on debug-pagealloc) and also signal |
394 | * our priority to get to these pagetables: | |
76ebd054 | 395 | */ |
92cb54a3 | 396 | if (current->flags & PF_MEMALLOC) |
76ebd054 | 397 | return; |
92cb54a3 | 398 | current->flags |= PF_MEMALLOC; |
76ebd054 | 399 | |
76ebd054 | 400 | /* |
92cb54a3 | 401 | * Allocate atomically from atomic contexts: |
76ebd054 | 402 | */ |
92cb54a3 IM |
403 | if (in_atomic() || irqs_disabled() || debug_pagealloc) |
404 | gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN; | |
76ebd054 | 405 | |
92cb54a3 | 406 | while (pool_pages < pool_size || (ret && !*ret)) { |
76ebd054 TG |
407 | p = alloc_pages(gfp, 0); |
408 | if (!p) { | |
409 | pool_failed++; | |
410 | break; | |
411 | } | |
92cb54a3 IM |
412 | /* |
413 | * If the call site needs a page right now, provide it: | |
414 | */ | |
415 | if (ret && !*ret) { | |
416 | *ret = p; | |
417 | continue; | |
418 | } | |
419 | spin_lock_irqsave(&pgd_lock, flags); | |
76ebd054 TG |
420 | list_add(&p->lru, &page_pool); |
421 | pool_pages++; | |
92cb54a3 | 422 | spin_unlock_irqrestore(&pgd_lock, flags); |
76ebd054 | 423 | } |
92cb54a3 IM |
424 | |
425 | current->flags &= ~PF_MEMALLOC; | |
76ebd054 TG |
426 | } |
427 | ||
428 | #define SHIFT_MB (20 - PAGE_SHIFT) | |
429 | #define ROUND_MB_GB ((1 << 10) - 1) | |
430 | #define SHIFT_MB_GB 10 | |
431 | #define POOL_PAGES_PER_GB 16 | |
432 | ||
433 | void __init cpa_init(void) | |
434 | { | |
435 | struct sysinfo si; | |
436 | unsigned long gb; | |
437 | ||
438 | si_meminfo(&si); | |
439 | /* | |
440 | * Calculate the number of pool pages: | |
441 | * | |
442 | * Convert totalram (nr of pages) to MiB and round to the next | |
443 | * GiB. Shift MiB to Gib and multiply the result by | |
444 | * POOL_PAGES_PER_GB: | |
445 | */ | |
92cb54a3 IM |
446 | if (debug_pagealloc) { |
447 | gb = ((si.totalram >> SHIFT_MB) + ROUND_MB_GB) >> SHIFT_MB_GB; | |
448 | pool_size = POOL_PAGES_PER_GB * gb; | |
449 | } else { | |
450 | pool_size = 1; | |
451 | } | |
76ebd054 TG |
452 | pool_low = pool_size; |
453 | ||
92cb54a3 | 454 | cpa_fill_pool(NULL); |
76ebd054 TG |
455 | printk(KERN_DEBUG |
456 | "CPA: page pool initialized %lu of %lu pages preallocated\n", | |
457 | pool_pages, pool_size); | |
458 | } | |
459 | ||
7afe15b9 | 460 | static int split_large_page(pte_t *kpte, unsigned long address) |
bb5c2dbd | 461 | { |
7b610eec | 462 | unsigned long flags, pfn, pfninc = 1; |
9df84993 | 463 | unsigned int i, level; |
bb5c2dbd | 464 | pte_t *pbase, *tmp; |
9df84993 | 465 | pgprot_t ref_prot; |
bb5c2dbd IM |
466 | struct page *base; |
467 | ||
eb5b5f02 TG |
468 | /* |
469 | * Get a page from the pool. The pool list is protected by the | |
470 | * pgd_lock, which we have to take anyway for the split | |
471 | * operation: | |
472 | */ | |
473 | spin_lock_irqsave(&pgd_lock, flags); | |
474 | if (list_empty(&page_pool)) { | |
475 | spin_unlock_irqrestore(&pgd_lock, flags); | |
92cb54a3 IM |
476 | base = NULL; |
477 | cpa_fill_pool(&base); | |
478 | if (!base) | |
479 | return -ENOMEM; | |
480 | spin_lock_irqsave(&pgd_lock, flags); | |
481 | } else { | |
482 | base = list_first_entry(&page_pool, struct page, lru); | |
483 | list_del(&base->lru); | |
484 | pool_pages--; | |
485 | ||
486 | if (pool_pages < pool_low) | |
487 | pool_low = pool_pages; | |
eb5b5f02 TG |
488 | } |
489 | ||
bb5c2dbd IM |
490 | /* |
491 | * Check for races, another CPU might have split this page | |
492 | * up for us already: | |
493 | */ | |
494 | tmp = lookup_address(address, &level); | |
6ce9fc17 | 495 | if (tmp != kpte) |
bb5c2dbd IM |
496 | goto out_unlock; |
497 | ||
bb5c2dbd | 498 | pbase = (pte_t *)page_address(base); |
6944a9c8 | 499 | paravirt_alloc_pte(&init_mm, page_to_pfn(base)); |
07cf89c0 | 500 | ref_prot = pte_pgprot(pte_clrhuge(*kpte)); |
bb5c2dbd | 501 | |
f07333fd AK |
502 | #ifdef CONFIG_X86_64 |
503 | if (level == PG_LEVEL_1G) { | |
504 | pfninc = PMD_PAGE_SIZE >> PAGE_SHIFT; | |
505 | pgprot_val(ref_prot) |= _PAGE_PSE; | |
f07333fd AK |
506 | } |
507 | #endif | |
508 | ||
63c1dcf4 TG |
509 | /* |
510 | * Get the target pfn from the original entry: | |
511 | */ | |
512 | pfn = pte_pfn(*kpte); | |
f07333fd | 513 | for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc) |
63c1dcf4 | 514 | set_pte(&pbase[i], pfn_pte(pfn, ref_prot)); |
bb5c2dbd | 515 | |
ce0c0e50 AK |
516 | if (address >= (unsigned long)__va(0) && |
517 | address < (unsigned long)__va(max_pfn_mapped << PAGE_SHIFT)) { | |
518 | direct_pages_count[level]--; | |
519 | direct_pages_count[level - 1] += PTRS_PER_PTE; | |
520 | } | |
521 | ||
bb5c2dbd | 522 | /* |
07cf89c0 | 523 | * Install the new, split up pagetable. Important details here: |
4c881ca1 HY |
524 | * |
525 | * On Intel the NX bit of all levels must be cleared to make a | |
526 | * page executable. See section 4.13.2 of Intel 64 and IA-32 | |
527 | * Architectures Software Developer's Manual). | |
07cf89c0 TG |
528 | * |
529 | * Mark the entry present. The current mapping might be | |
530 | * set to not present, which we preserved above. | |
bb5c2dbd | 531 | */ |
4c881ca1 | 532 | ref_prot = pte_pgprot(pte_mkexec(pte_clrhuge(*kpte))); |
07cf89c0 | 533 | pgprot_val(ref_prot) |= _PAGE_PRESENT; |
9a3dc780 | 534 | __set_pmd_pte(kpte, address, mk_pte(base, ref_prot)); |
bb5c2dbd IM |
535 | base = NULL; |
536 | ||
537 | out_unlock: | |
eb5b5f02 TG |
538 | /* |
539 | * If we dropped out via the lookup_address check under | |
540 | * pgd_lock then stick the page back into the pool: | |
541 | */ | |
542 | if (base) { | |
543 | list_add(&base->lru, &page_pool); | |
544 | pool_pages++; | |
545 | } else | |
546 | pool_used++; | |
9a3dc780 | 547 | spin_unlock_irqrestore(&pgd_lock, flags); |
bb5c2dbd | 548 | |
bb5c2dbd IM |
549 | return 0; |
550 | } | |
551 | ||
c31c7d48 | 552 | static int __change_page_attr(struct cpa_data *cpa, int primary) |
9f4c815c | 553 | { |
c31c7d48 | 554 | unsigned long address = cpa->vaddr; |
da7bfc50 HH |
555 | int do_split, err; |
556 | unsigned int level; | |
c31c7d48 | 557 | pte_t *kpte, old_pte; |
1da177e4 | 558 | |
97f99fed | 559 | repeat: |
f0646e43 | 560 | kpte = lookup_address(address, &level); |
1da177e4 | 561 | if (!kpte) |
d1a4be63 | 562 | return 0; |
c31c7d48 TG |
563 | |
564 | old_pte = *kpte; | |
565 | if (!pte_val(old_pte)) { | |
566 | if (!primary) | |
567 | return 0; | |
568 | printk(KERN_WARNING "CPA: called for zero pte. " | |
569 | "vaddr = %lx cpa->vaddr = %lx\n", address, | |
570 | cpa->vaddr); | |
571 | WARN_ON(1); | |
1da177e4 | 572 | return -EINVAL; |
c31c7d48 | 573 | } |
9f4c815c | 574 | |
30551bb3 | 575 | if (level == PG_LEVEL_4K) { |
c31c7d48 | 576 | pte_t new_pte; |
626c2c9d | 577 | pgprot_t new_prot = pte_pgprot(old_pte); |
c31c7d48 | 578 | unsigned long pfn = pte_pfn(old_pte); |
86f03989 | 579 | |
72e458df TG |
580 | pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr); |
581 | pgprot_val(new_prot) |= pgprot_val(cpa->mask_set); | |
86f03989 | 582 | |
c31c7d48 | 583 | new_prot = static_protections(new_prot, address, pfn); |
86f03989 | 584 | |
626c2c9d AV |
585 | /* |
586 | * We need to keep the pfn from the existing PTE, | |
587 | * after all we're only going to change it's attributes | |
588 | * not the memory it points to | |
589 | */ | |
c31c7d48 TG |
590 | new_pte = pfn_pte(pfn, canon_pgprot(new_prot)); |
591 | cpa->pfn = pfn; | |
f4ae5da0 TG |
592 | /* |
593 | * Do we really change anything ? | |
594 | */ | |
595 | if (pte_val(old_pte) != pte_val(new_pte)) { | |
596 | set_pte_atomic(kpte, new_pte); | |
597 | cpa->flushtlb = 1; | |
598 | } | |
9b5cf48b | 599 | cpa->numpages = 1; |
65e074df | 600 | return 0; |
1da177e4 | 601 | } |
65e074df TG |
602 | |
603 | /* | |
604 | * Check, whether we can keep the large page intact | |
605 | * and just change the pte: | |
606 | */ | |
beaff633 | 607 | do_split = try_preserve_large_page(kpte, address, cpa); |
65e074df TG |
608 | /* |
609 | * When the range fits into the existing large page, | |
9b5cf48b | 610 | * return. cp->numpages and cpa->tlbflush have been updated in |
65e074df TG |
611 | * try_large_page: |
612 | */ | |
87f7f8fe IM |
613 | if (do_split <= 0) |
614 | return do_split; | |
65e074df TG |
615 | |
616 | /* | |
617 | * We have to split the large page: | |
618 | */ | |
87f7f8fe IM |
619 | err = split_large_page(kpte, address); |
620 | if (!err) { | |
621 | cpa->flushtlb = 1; | |
622 | goto repeat; | |
623 | } | |
beaff633 | 624 | |
87f7f8fe | 625 | return err; |
9f4c815c | 626 | } |
1da177e4 | 627 | |
c31c7d48 TG |
628 | static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias); |
629 | ||
630 | static int cpa_process_alias(struct cpa_data *cpa) | |
1da177e4 | 631 | { |
c31c7d48 | 632 | struct cpa_data alias_cpa; |
f34b439f | 633 | int ret = 0; |
44af6c41 | 634 | |
c31c7d48 TG |
635 | if (cpa->pfn > max_pfn_mapped) |
636 | return 0; | |
626c2c9d | 637 | |
f34b439f TG |
638 | /* |
639 | * No need to redo, when the primary call touched the direct | |
640 | * mapping already: | |
641 | */ | |
642 | if (!within(cpa->vaddr, PAGE_OFFSET, | |
643 | PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT))) { | |
44af6c41 | 644 | |
f34b439f TG |
645 | alias_cpa = *cpa; |
646 | alias_cpa.vaddr = (unsigned long) __va(cpa->pfn << PAGE_SHIFT); | |
647 | ||
648 | ret = __change_page_attr_set_clr(&alias_cpa, 0); | |
649 | } | |
44af6c41 | 650 | |
44af6c41 | 651 | #ifdef CONFIG_X86_64 |
c31c7d48 TG |
652 | if (ret) |
653 | return ret; | |
f34b439f TG |
654 | /* |
655 | * No need to redo, when the primary call touched the high | |
656 | * mapping already: | |
657 | */ | |
658 | if (within(cpa->vaddr, (unsigned long) _text, (unsigned long) _end)) | |
659 | return 0; | |
660 | ||
488fd995 | 661 | /* |
0879750f TG |
662 | * If the physical address is inside the kernel map, we need |
663 | * to touch the high mapped kernel as well: | |
488fd995 | 664 | */ |
c31c7d48 TG |
665 | if (!within(cpa->pfn, highmap_start_pfn(), highmap_end_pfn())) |
666 | return 0; | |
0879750f | 667 | |
c31c7d48 TG |
668 | alias_cpa = *cpa; |
669 | alias_cpa.vaddr = | |
670 | (cpa->pfn << PAGE_SHIFT) + __START_KERNEL_map - phys_base; | |
671 | ||
672 | /* | |
673 | * The high mapping range is imprecise, so ignore the return value. | |
674 | */ | |
675 | __change_page_attr_set_clr(&alias_cpa, 0); | |
488fd995 | 676 | #endif |
c31c7d48 | 677 | return ret; |
1da177e4 LT |
678 | } |
679 | ||
c31c7d48 | 680 | static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias) |
ff31452b | 681 | { |
65e074df | 682 | int ret, numpages = cpa->numpages; |
ff31452b | 683 | |
65e074df TG |
684 | while (numpages) { |
685 | /* | |
686 | * Store the remaining nr of pages for the large page | |
687 | * preservation check. | |
688 | */ | |
9b5cf48b | 689 | cpa->numpages = numpages; |
c31c7d48 TG |
690 | |
691 | ret = __change_page_attr(cpa, checkalias); | |
ff31452b TG |
692 | if (ret) |
693 | return ret; | |
ff31452b | 694 | |
c31c7d48 TG |
695 | if (checkalias) { |
696 | ret = cpa_process_alias(cpa); | |
697 | if (ret) | |
698 | return ret; | |
699 | } | |
700 | ||
65e074df TG |
701 | /* |
702 | * Adjust the number of pages with the result of the | |
703 | * CPA operation. Either a large page has been | |
704 | * preserved or a single page update happened. | |
705 | */ | |
9b5cf48b RW |
706 | BUG_ON(cpa->numpages > numpages); |
707 | numpages -= cpa->numpages; | |
708 | cpa->vaddr += cpa->numpages * PAGE_SIZE; | |
65e074df | 709 | } |
ff31452b TG |
710 | return 0; |
711 | } | |
712 | ||
6bb8383b AK |
713 | static inline int cache_attr(pgprot_t attr) |
714 | { | |
715 | return pgprot_val(attr) & | |
716 | (_PAGE_PAT | _PAGE_PAT_LARGE | _PAGE_PWT | _PAGE_PCD); | |
717 | } | |
718 | ||
ff31452b | 719 | static int change_page_attr_set_clr(unsigned long addr, int numpages, |
c9caa02c AK |
720 | pgprot_t mask_set, pgprot_t mask_clr, |
721 | int force_split) | |
ff31452b | 722 | { |
72e458df | 723 | struct cpa_data cpa; |
af96e443 | 724 | int ret, cache, checkalias; |
331e4065 TG |
725 | |
726 | /* | |
727 | * Check, if we are requested to change a not supported | |
728 | * feature: | |
729 | */ | |
730 | mask_set = canon_pgprot(mask_set); | |
731 | mask_clr = canon_pgprot(mask_clr); | |
c9caa02c | 732 | if (!pgprot_val(mask_set) && !pgprot_val(mask_clr) && !force_split) |
331e4065 TG |
733 | return 0; |
734 | ||
69b1415e TG |
735 | /* Ensure we are PAGE_SIZE aligned */ |
736 | if (addr & ~PAGE_MASK) { | |
737 | addr &= PAGE_MASK; | |
738 | /* | |
739 | * People should not be passing in unaligned addresses: | |
740 | */ | |
741 | WARN_ON_ONCE(1); | |
742 | } | |
743 | ||
72e458df TG |
744 | cpa.vaddr = addr; |
745 | cpa.numpages = numpages; | |
746 | cpa.mask_set = mask_set; | |
747 | cpa.mask_clr = mask_clr; | |
f4ae5da0 | 748 | cpa.flushtlb = 0; |
c9caa02c | 749 | cpa.force_split = force_split; |
72e458df | 750 | |
af96e443 TG |
751 | /* No alias checking for _NX bit modifications */ |
752 | checkalias = (pgprot_val(mask_set) | pgprot_val(mask_clr)) != _PAGE_NX; | |
753 | ||
754 | ret = __change_page_attr_set_clr(&cpa, checkalias); | |
ff31452b | 755 | |
f4ae5da0 TG |
756 | /* |
757 | * Check whether we really changed something: | |
758 | */ | |
759 | if (!cpa.flushtlb) | |
76ebd054 | 760 | goto out; |
f4ae5da0 | 761 | |
6bb8383b AK |
762 | /* |
763 | * No need to flush, when we did not set any of the caching | |
764 | * attributes: | |
765 | */ | |
766 | cache = cache_attr(mask_set); | |
767 | ||
57a6a46a TG |
768 | /* |
769 | * On success we use clflush, when the CPU supports it to | |
770 | * avoid the wbindv. If the CPU does not support it and in the | |
af1e6844 | 771 | * error case we fall back to cpa_flush_all (which uses |
57a6a46a TG |
772 | * wbindv): |
773 | */ | |
774 | if (!ret && cpu_has_clflush) | |
6bb8383b | 775 | cpa_flush_range(addr, numpages, cache); |
57a6a46a | 776 | else |
6bb8383b | 777 | cpa_flush_all(cache); |
ff31452b | 778 | |
76ebd054 | 779 | out: |
92cb54a3 IM |
780 | cpa_fill_pool(NULL); |
781 | ||
ff31452b TG |
782 | return ret; |
783 | } | |
784 | ||
56744546 TG |
785 | static inline int change_page_attr_set(unsigned long addr, int numpages, |
786 | pgprot_t mask) | |
75cbade8 | 787 | { |
c9caa02c | 788 | return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0), 0); |
75cbade8 AV |
789 | } |
790 | ||
56744546 TG |
791 | static inline int change_page_attr_clear(unsigned long addr, int numpages, |
792 | pgprot_t mask) | |
72932c7a | 793 | { |
c9caa02c | 794 | return change_page_attr_set_clr(addr, numpages, __pgprot(0), mask, 0); |
72932c7a TG |
795 | } |
796 | ||
1219333d | 797 | int _set_memory_uc(unsigned long addr, int numpages) |
72932c7a | 798 | { |
de33c442 SS |
799 | /* |
800 | * for now UC MINUS. see comments in ioremap_nocache() | |
801 | */ | |
72932c7a | 802 | return change_page_attr_set(addr, numpages, |
de33c442 | 803 | __pgprot(_PAGE_CACHE_UC_MINUS)); |
75cbade8 | 804 | } |
1219333d | 805 | |
806 | int set_memory_uc(unsigned long addr, int numpages) | |
807 | { | |
de33c442 SS |
808 | /* |
809 | * for now UC MINUS. see comments in ioremap_nocache() | |
810 | */ | |
1219333d | 811 | if (reserve_memtype(addr, addr + numpages * PAGE_SIZE, |
de33c442 | 812 | _PAGE_CACHE_UC_MINUS, NULL)) |
1219333d | 813 | return -EINVAL; |
814 | ||
815 | return _set_memory_uc(addr, numpages); | |
816 | } | |
75cbade8 AV |
817 | EXPORT_SYMBOL(set_memory_uc); |
818 | ||
ef354af4 | 819 | int _set_memory_wc(unsigned long addr, int numpages) |
820 | { | |
821 | return change_page_attr_set(addr, numpages, | |
822 | __pgprot(_PAGE_CACHE_WC)); | |
823 | } | |
824 | ||
825 | int set_memory_wc(unsigned long addr, int numpages) | |
826 | { | |
827 | if (!pat_wc_enabled) | |
828 | return set_memory_uc(addr, numpages); | |
829 | ||
830 | if (reserve_memtype(addr, addr + numpages * PAGE_SIZE, | |
831 | _PAGE_CACHE_WC, NULL)) | |
832 | return -EINVAL; | |
833 | ||
834 | return _set_memory_wc(addr, numpages); | |
835 | } | |
836 | EXPORT_SYMBOL(set_memory_wc); | |
837 | ||
1219333d | 838 | int _set_memory_wb(unsigned long addr, int numpages) |
75cbade8 | 839 | { |
72932c7a | 840 | return change_page_attr_clear(addr, numpages, |
2e5d9c85 | 841 | __pgprot(_PAGE_CACHE_MASK)); |
75cbade8 | 842 | } |
1219333d | 843 | |
844 | int set_memory_wb(unsigned long addr, int numpages) | |
845 | { | |
846 | free_memtype(addr, addr + numpages * PAGE_SIZE); | |
847 | ||
848 | return _set_memory_wb(addr, numpages); | |
849 | } | |
75cbade8 AV |
850 | EXPORT_SYMBOL(set_memory_wb); |
851 | ||
852 | int set_memory_x(unsigned long addr, int numpages) | |
853 | { | |
72932c7a | 854 | return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_NX)); |
75cbade8 AV |
855 | } |
856 | EXPORT_SYMBOL(set_memory_x); | |
857 | ||
858 | int set_memory_nx(unsigned long addr, int numpages) | |
859 | { | |
72932c7a | 860 | return change_page_attr_set(addr, numpages, __pgprot(_PAGE_NX)); |
75cbade8 AV |
861 | } |
862 | EXPORT_SYMBOL(set_memory_nx); | |
863 | ||
864 | int set_memory_ro(unsigned long addr, int numpages) | |
865 | { | |
72932c7a | 866 | return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_RW)); |
75cbade8 | 867 | } |
75cbade8 AV |
868 | |
869 | int set_memory_rw(unsigned long addr, int numpages) | |
870 | { | |
72932c7a | 871 | return change_page_attr_set(addr, numpages, __pgprot(_PAGE_RW)); |
75cbade8 | 872 | } |
f62d0f00 IM |
873 | |
874 | int set_memory_np(unsigned long addr, int numpages) | |
875 | { | |
72932c7a | 876 | return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_PRESENT)); |
f62d0f00 | 877 | } |
75cbade8 | 878 | |
c9caa02c AK |
879 | int set_memory_4k(unsigned long addr, int numpages) |
880 | { | |
881 | return change_page_attr_set_clr(addr, numpages, __pgprot(0), | |
882 | __pgprot(0), 1); | |
883 | } | |
884 | ||
75cbade8 AV |
885 | int set_pages_uc(struct page *page, int numpages) |
886 | { | |
887 | unsigned long addr = (unsigned long)page_address(page); | |
75cbade8 | 888 | |
d7c8f21a | 889 | return set_memory_uc(addr, numpages); |
75cbade8 AV |
890 | } |
891 | EXPORT_SYMBOL(set_pages_uc); | |
892 | ||
893 | int set_pages_wb(struct page *page, int numpages) | |
894 | { | |
895 | unsigned long addr = (unsigned long)page_address(page); | |
75cbade8 | 896 | |
d7c8f21a | 897 | return set_memory_wb(addr, numpages); |
75cbade8 AV |
898 | } |
899 | EXPORT_SYMBOL(set_pages_wb); | |
900 | ||
901 | int set_pages_x(struct page *page, int numpages) | |
902 | { | |
903 | unsigned long addr = (unsigned long)page_address(page); | |
75cbade8 | 904 | |
d7c8f21a | 905 | return set_memory_x(addr, numpages); |
75cbade8 AV |
906 | } |
907 | EXPORT_SYMBOL(set_pages_x); | |
908 | ||
909 | int set_pages_nx(struct page *page, int numpages) | |
910 | { | |
911 | unsigned long addr = (unsigned long)page_address(page); | |
75cbade8 | 912 | |
d7c8f21a | 913 | return set_memory_nx(addr, numpages); |
75cbade8 AV |
914 | } |
915 | EXPORT_SYMBOL(set_pages_nx); | |
916 | ||
917 | int set_pages_ro(struct page *page, int numpages) | |
918 | { | |
919 | unsigned long addr = (unsigned long)page_address(page); | |
75cbade8 | 920 | |
d7c8f21a | 921 | return set_memory_ro(addr, numpages); |
75cbade8 | 922 | } |
75cbade8 AV |
923 | |
924 | int set_pages_rw(struct page *page, int numpages) | |
925 | { | |
926 | unsigned long addr = (unsigned long)page_address(page); | |
e81d5dc4 | 927 | |
d7c8f21a | 928 | return set_memory_rw(addr, numpages); |
78c94aba IM |
929 | } |
930 | ||
1da177e4 | 931 | #ifdef CONFIG_DEBUG_PAGEALLOC |
f62d0f00 IM |
932 | |
933 | static int __set_pages_p(struct page *page, int numpages) | |
934 | { | |
72e458df TG |
935 | struct cpa_data cpa = { .vaddr = (unsigned long) page_address(page), |
936 | .numpages = numpages, | |
937 | .mask_set = __pgprot(_PAGE_PRESENT | _PAGE_RW), | |
938 | .mask_clr = __pgprot(0)}; | |
72932c7a | 939 | |
c31c7d48 | 940 | return __change_page_attr_set_clr(&cpa, 1); |
f62d0f00 IM |
941 | } |
942 | ||
943 | static int __set_pages_np(struct page *page, int numpages) | |
944 | { | |
72e458df TG |
945 | struct cpa_data cpa = { .vaddr = (unsigned long) page_address(page), |
946 | .numpages = numpages, | |
947 | .mask_set = __pgprot(0), | |
948 | .mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW)}; | |
72932c7a | 949 | |
c31c7d48 | 950 | return __change_page_attr_set_clr(&cpa, 1); |
f62d0f00 IM |
951 | } |
952 | ||
1da177e4 LT |
953 | void kernel_map_pages(struct page *page, int numpages, int enable) |
954 | { | |
955 | if (PageHighMem(page)) | |
956 | return; | |
9f4c815c | 957 | if (!enable) { |
f9b8404c IM |
958 | debug_check_no_locks_freed(page_address(page), |
959 | numpages * PAGE_SIZE); | |
9f4c815c | 960 | } |
de5097c2 | 961 | |
12d6f21e IM |
962 | /* |
963 | * If page allocator is not up yet then do not call c_p_a(): | |
964 | */ | |
965 | if (!debug_pagealloc_enabled) | |
966 | return; | |
967 | ||
9f4c815c | 968 | /* |
f8d8406b IM |
969 | * The return value is ignored as the calls cannot fail. |
970 | * Large pages are kept enabled at boot time, and are | |
971 | * split up quickly with DEBUG_PAGEALLOC. If a splitup | |
972 | * fails here (due to temporary memory shortage) no damage | |
973 | * is done because we just keep the largepage intact up | |
974 | * to the next attempt when it will likely be split up: | |
1da177e4 | 975 | */ |
f62d0f00 IM |
976 | if (enable) |
977 | __set_pages_p(page, numpages); | |
978 | else | |
979 | __set_pages_np(page, numpages); | |
9f4c815c IM |
980 | |
981 | /* | |
e4b71dcf IM |
982 | * We should perform an IPI and flush all tlbs, |
983 | * but that can deadlock->flush only current cpu: | |
1da177e4 LT |
984 | */ |
985 | __flush_tlb_all(); | |
76ebd054 TG |
986 | |
987 | /* | |
988 | * Try to refill the page pool here. We can do this only after | |
989 | * the tlb flush. | |
990 | */ | |
92cb54a3 | 991 | cpa_fill_pool(NULL); |
1da177e4 | 992 | } |
8a235efa | 993 | |
ee7ae7a1 TG |
994 | #ifdef CONFIG_DEBUG_FS |
995 | static int dpa_show(struct seq_file *m, void *v) | |
996 | { | |
997 | seq_puts(m, "DEBUG_PAGEALLOC\n"); | |
998 | seq_printf(m, "pool_size : %lu\n", pool_size); | |
999 | seq_printf(m, "pool_pages : %lu\n", pool_pages); | |
1000 | seq_printf(m, "pool_low : %lu\n", pool_low); | |
1001 | seq_printf(m, "pool_used : %lu\n", pool_used); | |
1002 | seq_printf(m, "pool_failed : %lu\n", pool_failed); | |
1003 | ||
1004 | return 0; | |
1005 | } | |
1006 | ||
1007 | static int dpa_open(struct inode *inode, struct file *filp) | |
1008 | { | |
1009 | return single_open(filp, dpa_show, NULL); | |
1010 | } | |
1011 | ||
1012 | static const struct file_operations dpa_fops = { | |
1013 | .open = dpa_open, | |
1014 | .read = seq_read, | |
1015 | .llseek = seq_lseek, | |
1016 | .release = single_release, | |
1017 | }; | |
1018 | ||
a4928cff | 1019 | static int __init debug_pagealloc_proc_init(void) |
ee7ae7a1 TG |
1020 | { |
1021 | struct dentry *de; | |
1022 | ||
1023 | de = debugfs_create_file("debug_pagealloc", 0600, NULL, NULL, | |
1024 | &dpa_fops); | |
1025 | if (!de) | |
1026 | return -ENOMEM; | |
1027 | ||
1028 | return 0; | |
1029 | } | |
1030 | __initcall(debug_pagealloc_proc_init); | |
1031 | #endif | |
1032 | ||
8a235efa RW |
1033 | #ifdef CONFIG_HIBERNATION |
1034 | ||
1035 | bool kernel_page_present(struct page *page) | |
1036 | { | |
1037 | unsigned int level; | |
1038 | pte_t *pte; | |
1039 | ||
1040 | if (PageHighMem(page)) | |
1041 | return false; | |
1042 | ||
1043 | pte = lookup_address((unsigned long)page_address(page), &level); | |
1044 | return (pte_val(*pte) & _PAGE_PRESENT); | |
1045 | } | |
1046 | ||
1047 | #endif /* CONFIG_HIBERNATION */ | |
1048 | ||
1049 | #endif /* CONFIG_DEBUG_PAGEALLOC */ | |
d1028a15 | 1050 | |
ce0c0e50 AK |
1051 | #ifdef CONFIG_PROC_FS |
1052 | int arch_report_meminfo(char *page) | |
1053 | { | |
1054 | int n; | |
1055 | n = sprintf(page, "DirectMap4k: %8lu\n" | |
1056 | "DirectMap2M: %8lu\n", | |
1057 | direct_pages_count[PG_LEVEL_4K], | |
1058 | direct_pages_count[PG_LEVEL_2M]); | |
1059 | #ifdef CONFIG_X86_64 | |
1060 | n += sprintf(page + n, "DirectMap1G: %8lu\n", | |
1061 | direct_pages_count[PG_LEVEL_1G]); | |
1062 | #endif | |
1063 | return n; | |
1064 | } | |
1065 | #endif | |
1066 | ||
d1028a15 AV |
1067 | /* |
1068 | * The testcases use internal knowledge of the implementation that shouldn't | |
1069 | * be exposed to the rest of the kernel. Include these directly here. | |
1070 | */ | |
1071 | #ifdef CONFIG_CPA_DEBUG | |
1072 | #include "pageattr-test.c" | |
1073 | #endif |