]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright 2002 Andi Kleen, SuSE Labs. | |
3 | * Thanks to Ben LaHaise for precious feedback. | |
4 | */ | |
5 | #include <linux/highmem.h> | |
6 | #include <linux/bootmem.h> | |
7 | #include <linux/module.h> | |
8 | #include <linux/sched.h> | |
9 | #include <linux/mm.h> | |
10 | #include <linux/interrupt.h> | |
11 | #include <linux/seq_file.h> | |
12 | #include <linux/debugfs.h> | |
13 | #include <linux/pfn.h> | |
14 | #include <linux/percpu.h> | |
15 | #include <linux/gfp.h> | |
16 | #include <linux/pci.h> | |
17 | ||
18 | #include <asm/e820.h> | |
19 | #include <asm/processor.h> | |
20 | #include <asm/tlbflush.h> | |
21 | #include <asm/sections.h> | |
22 | #include <asm/setup.h> | |
23 | #include <asm/uaccess.h> | |
24 | #include <asm/pgalloc.h> | |
25 | #include <asm/proto.h> | |
26 | #include <asm/pat.h> | |
27 | ||
28 | /* | |
29 | * The current flushing context - we pass it instead of 5 arguments: | |
30 | */ | |
31 | struct cpa_data { | |
32 | unsigned long *vaddr; | |
33 | pgd_t *pgd; | |
34 | pgprot_t mask_set; | |
35 | pgprot_t mask_clr; | |
36 | int numpages; | |
37 | int flags; | |
38 | unsigned long pfn; | |
39 | unsigned force_split : 1; | |
40 | int curpage; | |
41 | struct page **pages; | |
42 | }; | |
43 | ||
44 | /* | |
45 | * Serialize cpa() (for !DEBUG_PAGEALLOC which uses large identity mappings) | |
46 | * using cpa_lock. So that we don't allow any other cpu, with stale large tlb | |
47 | * entries change the page attribute in parallel to some other cpu | |
48 | * splitting a large page entry along with changing the attribute. | |
49 | */ | |
50 | static DEFINE_SPINLOCK(cpa_lock); | |
51 | ||
52 | #define CPA_FLUSHTLB 1 | |
53 | #define CPA_ARRAY 2 | |
54 | #define CPA_PAGES_ARRAY 4 | |
55 | ||
56 | #ifdef CONFIG_PROC_FS | |
57 | static unsigned long direct_pages_count[PG_LEVEL_NUM]; | |
58 | ||
59 | void update_page_count(int level, unsigned long pages) | |
60 | { | |
61 | /* Protect against CPA */ | |
62 | spin_lock(&pgd_lock); | |
63 | direct_pages_count[level] += pages; | |
64 | spin_unlock(&pgd_lock); | |
65 | } | |
66 | ||
67 | static void split_page_count(int level) | |
68 | { | |
69 | direct_pages_count[level]--; | |
70 | direct_pages_count[level - 1] += PTRS_PER_PTE; | |
71 | } | |
72 | ||
73 | void arch_report_meminfo(struct seq_file *m) | |
74 | { | |
75 | seq_printf(m, "DirectMap4k: %8lu kB\n", | |
76 | direct_pages_count[PG_LEVEL_4K] << 2); | |
77 | #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) | |
78 | seq_printf(m, "DirectMap2M: %8lu kB\n", | |
79 | direct_pages_count[PG_LEVEL_2M] << 11); | |
80 | #else | |
81 | seq_printf(m, "DirectMap4M: %8lu kB\n", | |
82 | direct_pages_count[PG_LEVEL_2M] << 12); | |
83 | #endif | |
84 | #ifdef CONFIG_X86_64 | |
85 | if (direct_gbpages) | |
86 | seq_printf(m, "DirectMap1G: %8lu kB\n", | |
87 | direct_pages_count[PG_LEVEL_1G] << 20); | |
88 | #endif | |
89 | } | |
90 | #else | |
91 | static inline void split_page_count(int level) { } | |
92 | #endif | |
93 | ||
94 | #ifdef CONFIG_X86_64 | |
95 | ||
96 | static inline unsigned long highmap_start_pfn(void) | |
97 | { | |
98 | return __pa_symbol(_text) >> PAGE_SHIFT; | |
99 | } | |
100 | ||
101 | static inline unsigned long highmap_end_pfn(void) | |
102 | { | |
103 | return __pa_symbol(roundup(_brk_end, PMD_SIZE)) >> PAGE_SHIFT; | |
104 | } | |
105 | ||
106 | #endif | |
107 | ||
108 | #ifdef CONFIG_DEBUG_PAGEALLOC | |
109 | # define debug_pagealloc 1 | |
110 | #else | |
111 | # define debug_pagealloc 0 | |
112 | #endif | |
113 | ||
114 | static inline int | |
115 | within(unsigned long addr, unsigned long start, unsigned long end) | |
116 | { | |
117 | return addr >= start && addr < end; | |
118 | } | |
119 | ||
120 | /* | |
121 | * Flushing functions | |
122 | */ | |
123 | ||
124 | /** | |
125 | * clflush_cache_range - flush a cache range with clflush | |
126 | * @vaddr: virtual start address | |
127 | * @size: number of bytes to flush | |
128 | * | |
129 | * clflush is an unordered instruction which needs fencing with mfence | |
130 | * to avoid ordering issues. | |
131 | */ | |
132 | void clflush_cache_range(void *vaddr, unsigned int size) | |
133 | { | |
134 | void *vend = vaddr + size - 1; | |
135 | ||
136 | mb(); | |
137 | ||
138 | for (; vaddr < vend; vaddr += boot_cpu_data.x86_clflush_size) | |
139 | clflush(vaddr); | |
140 | /* | |
141 | * Flush any possible final partial cacheline: | |
142 | */ | |
143 | clflush(vend); | |
144 | ||
145 | mb(); | |
146 | } | |
147 | EXPORT_SYMBOL_GPL(clflush_cache_range); | |
148 | ||
149 | static void __cpa_flush_all(void *arg) | |
150 | { | |
151 | unsigned long cache = (unsigned long)arg; | |
152 | ||
153 | /* | |
154 | * Flush all to work around Errata in early athlons regarding | |
155 | * large page flushing. | |
156 | */ | |
157 | __flush_tlb_all(); | |
158 | ||
159 | if (cache && boot_cpu_data.x86 >= 4) | |
160 | wbinvd(); | |
161 | } | |
162 | ||
163 | static void cpa_flush_all(unsigned long cache) | |
164 | { | |
165 | BUG_ON(irqs_disabled()); | |
166 | ||
167 | on_each_cpu(__cpa_flush_all, (void *) cache, 1); | |
168 | } | |
169 | ||
170 | static void __cpa_flush_range(void *arg) | |
171 | { | |
172 | /* | |
173 | * We could optimize that further and do individual per page | |
174 | * tlb invalidates for a low number of pages. Caveat: we must | |
175 | * flush the high aliases on 64bit as well. | |
176 | */ | |
177 | __flush_tlb_all(); | |
178 | } | |
179 | ||
180 | static void cpa_flush_range(unsigned long start, int numpages, int cache) | |
181 | { | |
182 | unsigned int i, level; | |
183 | unsigned long addr; | |
184 | ||
185 | BUG_ON(irqs_disabled()); | |
186 | WARN_ON(PAGE_ALIGN(start) != start); | |
187 | ||
188 | on_each_cpu(__cpa_flush_range, NULL, 1); | |
189 | ||
190 | if (!cache) | |
191 | return; | |
192 | ||
193 | /* | |
194 | * We only need to flush on one CPU, | |
195 | * clflush is a MESI-coherent instruction that | |
196 | * will cause all other CPUs to flush the same | |
197 | * cachelines: | |
198 | */ | |
199 | for (i = 0, addr = start; i < numpages; i++, addr += PAGE_SIZE) { | |
200 | pte_t *pte = lookup_address(addr, &level); | |
201 | ||
202 | /* | |
203 | * Only flush present addresses: | |
204 | */ | |
205 | if (pte && (pte_val(*pte) & _PAGE_PRESENT)) | |
206 | clflush_cache_range((void *) addr, PAGE_SIZE); | |
207 | } | |
208 | } | |
209 | ||
210 | static void cpa_flush_array(unsigned long *start, int numpages, int cache, | |
211 | int in_flags, struct page **pages) | |
212 | { | |
213 | unsigned int i, level; | |
214 | unsigned long do_wbinvd = cache && numpages >= 1024; /* 4M threshold */ | |
215 | ||
216 | BUG_ON(irqs_disabled()); | |
217 | ||
218 | on_each_cpu(__cpa_flush_all, (void *) do_wbinvd, 1); | |
219 | ||
220 | if (!cache || do_wbinvd) | |
221 | return; | |
222 | ||
223 | /* | |
224 | * We only need to flush on one CPU, | |
225 | * clflush is a MESI-coherent instruction that | |
226 | * will cause all other CPUs to flush the same | |
227 | * cachelines: | |
228 | */ | |
229 | for (i = 0; i < numpages; i++) { | |
230 | unsigned long addr; | |
231 | pte_t *pte; | |
232 | ||
233 | if (in_flags & CPA_PAGES_ARRAY) | |
234 | addr = (unsigned long)page_address(pages[i]); | |
235 | else | |
236 | addr = start[i]; | |
237 | ||
238 | pte = lookup_address(addr, &level); | |
239 | ||
240 | /* | |
241 | * Only flush present addresses: | |
242 | */ | |
243 | if (pte && (pte_val(*pte) & _PAGE_PRESENT)) | |
244 | clflush_cache_range((void *)addr, PAGE_SIZE); | |
245 | } | |
246 | } | |
247 | ||
248 | /* | |
249 | * Certain areas of memory on x86 require very specific protection flags, | |
250 | * for example the BIOS area or kernel text. Callers don't always get this | |
251 | * right (again, ioremap() on BIOS memory is not uncommon) so this function | |
252 | * checks and fixes these known static required protection bits. | |
253 | */ | |
254 | static inline pgprot_t static_protections(pgprot_t prot, unsigned long address, | |
255 | unsigned long pfn) | |
256 | { | |
257 | pgprot_t forbidden = __pgprot(0); | |
258 | ||
259 | /* | |
260 | * The BIOS area between 640k and 1Mb needs to be executable for | |
261 | * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support. | |
262 | */ | |
263 | #ifdef CONFIG_PCI_BIOS | |
264 | if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT)) | |
265 | pgprot_val(forbidden) |= _PAGE_NX; | |
266 | #endif | |
267 | ||
268 | /* | |
269 | * The kernel text needs to be executable for obvious reasons | |
270 | * Does not cover __inittext since that is gone later on. On | |
271 | * 64bit we do not enforce !NX on the low mapping | |
272 | */ | |
273 | if (within(address, (unsigned long)_text, (unsigned long)_etext)) | |
274 | pgprot_val(forbidden) |= _PAGE_NX; | |
275 | ||
276 | /* | |
277 | * The .rodata section needs to be read-only. Using the pfn | |
278 | * catches all aliases. | |
279 | */ | |
280 | if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT, | |
281 | __pa_symbol(__end_rodata) >> PAGE_SHIFT)) | |
282 | pgprot_val(forbidden) |= _PAGE_RW; | |
283 | ||
284 | #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA) | |
285 | /* | |
286 | * Once the kernel maps the text as RO (kernel_set_to_readonly is set), | |
287 | * kernel text mappings for the large page aligned text, rodata sections | |
288 | * will be always read-only. For the kernel identity mappings covering | |
289 | * the holes caused by this alignment can be anything that user asks. | |
290 | * | |
291 | * This will preserve the large page mappings for kernel text/data | |
292 | * at no extra cost. | |
293 | */ | |
294 | if (kernel_set_to_readonly && | |
295 | within(address, (unsigned long)_text, | |
296 | (unsigned long)__end_rodata_hpage_align)) { | |
297 | unsigned int level; | |
298 | ||
299 | /* | |
300 | * Don't enforce the !RW mapping for the kernel text mapping, | |
301 | * if the current mapping is already using small page mapping. | |
302 | * No need to work hard to preserve large page mappings in this | |
303 | * case. | |
304 | * | |
305 | * This also fixes the Linux Xen paravirt guest boot failure | |
306 | * (because of unexpected read-only mappings for kernel identity | |
307 | * mappings). In this paravirt guest case, the kernel text | |
308 | * mapping and the kernel identity mapping share the same | |
309 | * page-table pages. Thus we can't really use different | |
310 | * protections for the kernel text and identity mappings. Also, | |
311 | * these shared mappings are made of small page mappings. | |
312 | * Thus this don't enforce !RW mapping for small page kernel | |
313 | * text mapping logic will help Linux Xen parvirt guest boot | |
314 | * as well. | |
315 | */ | |
316 | if (lookup_address(address, &level) && (level != PG_LEVEL_4K)) | |
317 | pgprot_val(forbidden) |= _PAGE_RW; | |
318 | } | |
319 | #endif | |
320 | ||
321 | prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden)); | |
322 | ||
323 | return prot; | |
324 | } | |
325 | ||
326 | static pte_t *__lookup_address_in_pgd(pgd_t *pgd, unsigned long address, | |
327 | unsigned int *level) | |
328 | { | |
329 | pud_t *pud; | |
330 | pmd_t *pmd; | |
331 | ||
332 | *level = PG_LEVEL_NONE; | |
333 | ||
334 | if (pgd_none(*pgd)) | |
335 | return NULL; | |
336 | ||
337 | pud = pud_offset(pgd, address); | |
338 | if (pud_none(*pud)) | |
339 | return NULL; | |
340 | ||
341 | *level = PG_LEVEL_1G; | |
342 | if (pud_large(*pud) || !pud_present(*pud)) | |
343 | return (pte_t *)pud; | |
344 | ||
345 | pmd = pmd_offset(pud, address); | |
346 | if (pmd_none(*pmd)) | |
347 | return NULL; | |
348 | ||
349 | *level = PG_LEVEL_2M; | |
350 | if (pmd_large(*pmd) || !pmd_present(*pmd)) | |
351 | return (pte_t *)pmd; | |
352 | ||
353 | *level = PG_LEVEL_4K; | |
354 | ||
355 | return pte_offset_kernel(pmd, address); | |
356 | } | |
357 | ||
358 | /* | |
359 | * Lookup the page table entry for a virtual address. Return a pointer | |
360 | * to the entry and the level of the mapping. | |
361 | * | |
362 | * Note: We return pud and pmd either when the entry is marked large | |
363 | * or when the present bit is not set. Otherwise we would return a | |
364 | * pointer to a nonexisting mapping. | |
365 | */ | |
366 | pte_t *lookup_address(unsigned long address, unsigned int *level) | |
367 | { | |
368 | return __lookup_address_in_pgd(pgd_offset_k(address), address, level); | |
369 | } | |
370 | EXPORT_SYMBOL_GPL(lookup_address); | |
371 | ||
372 | static pte_t *_lookup_address_cpa(struct cpa_data *cpa, unsigned long address, | |
373 | unsigned int *level) | |
374 | { | |
375 | if (cpa->pgd) | |
376 | return __lookup_address_in_pgd(cpa->pgd + pgd_index(address), | |
377 | address, level); | |
378 | ||
379 | return lookup_address(address, level); | |
380 | } | |
381 | ||
382 | /* | |
383 | * This is necessary because __pa() does not work on some | |
384 | * kinds of memory, like vmalloc() or the alloc_remap() | |
385 | * areas on 32-bit NUMA systems. The percpu areas can | |
386 | * end up in this kind of memory, for instance. | |
387 | * | |
388 | * This could be optimized, but it is only intended to be | |
389 | * used at inititalization time, and keeping it | |
390 | * unoptimized should increase the testing coverage for | |
391 | * the more obscure platforms. | |
392 | */ | |
393 | phys_addr_t slow_virt_to_phys(void *__virt_addr) | |
394 | { | |
395 | unsigned long virt_addr = (unsigned long)__virt_addr; | |
396 | phys_addr_t phys_addr; | |
397 | unsigned long offset; | |
398 | enum pg_level level; | |
399 | unsigned long psize; | |
400 | unsigned long pmask; | |
401 | pte_t *pte; | |
402 | ||
403 | pte = lookup_address(virt_addr, &level); | |
404 | BUG_ON(!pte); | |
405 | psize = page_level_size(level); | |
406 | pmask = page_level_mask(level); | |
407 | offset = virt_addr & ~pmask; | |
408 | phys_addr = pte_pfn(*pte) << PAGE_SHIFT; | |
409 | return (phys_addr | offset); | |
410 | } | |
411 | EXPORT_SYMBOL_GPL(slow_virt_to_phys); | |
412 | ||
413 | /* | |
414 | * Set the new pmd in all the pgds we know about: | |
415 | */ | |
416 | static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte) | |
417 | { | |
418 | /* change init_mm */ | |
419 | set_pte_atomic(kpte, pte); | |
420 | #ifdef CONFIG_X86_32 | |
421 | if (!SHARED_KERNEL_PMD) { | |
422 | struct page *page; | |
423 | ||
424 | list_for_each_entry(page, &pgd_list, lru) { | |
425 | pgd_t *pgd; | |
426 | pud_t *pud; | |
427 | pmd_t *pmd; | |
428 | ||
429 | pgd = (pgd_t *)page_address(page) + pgd_index(address); | |
430 | pud = pud_offset(pgd, address); | |
431 | pmd = pmd_offset(pud, address); | |
432 | set_pte_atomic((pte_t *)pmd, pte); | |
433 | } | |
434 | } | |
435 | #endif | |
436 | } | |
437 | ||
438 | static int | |
439 | try_preserve_large_page(pte_t *kpte, unsigned long address, | |
440 | struct cpa_data *cpa) | |
441 | { | |
442 | unsigned long nextpage_addr, numpages, pmask, psize, addr, pfn; | |
443 | pte_t new_pte, old_pte, *tmp; | |
444 | pgprot_t old_prot, new_prot, req_prot; | |
445 | int i, do_split = 1; | |
446 | enum pg_level level; | |
447 | ||
448 | if (cpa->force_split) | |
449 | return 1; | |
450 | ||
451 | spin_lock(&pgd_lock); | |
452 | /* | |
453 | * Check for races, another CPU might have split this page | |
454 | * up already: | |
455 | */ | |
456 | tmp = lookup_address(address, &level); | |
457 | if (tmp != kpte) | |
458 | goto out_unlock; | |
459 | ||
460 | switch (level) { | |
461 | case PG_LEVEL_2M: | |
462 | #ifdef CONFIG_X86_64 | |
463 | case PG_LEVEL_1G: | |
464 | #endif | |
465 | psize = page_level_size(level); | |
466 | pmask = page_level_mask(level); | |
467 | break; | |
468 | default: | |
469 | do_split = -EINVAL; | |
470 | goto out_unlock; | |
471 | } | |
472 | ||
473 | /* | |
474 | * Calculate the number of pages, which fit into this large | |
475 | * page starting at address: | |
476 | */ | |
477 | nextpage_addr = (address + psize) & pmask; | |
478 | numpages = (nextpage_addr - address) >> PAGE_SHIFT; | |
479 | if (numpages < cpa->numpages) | |
480 | cpa->numpages = numpages; | |
481 | ||
482 | /* | |
483 | * We are safe now. Check whether the new pgprot is the same: | |
484 | */ | |
485 | old_pte = *kpte; | |
486 | old_prot = req_prot = pte_pgprot(old_pte); | |
487 | ||
488 | pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr); | |
489 | pgprot_val(req_prot) |= pgprot_val(cpa->mask_set); | |
490 | ||
491 | /* | |
492 | * Set the PSE and GLOBAL flags only if the PRESENT flag is | |
493 | * set otherwise pmd_present/pmd_huge will return true even on | |
494 | * a non present pmd. The canon_pgprot will clear _PAGE_GLOBAL | |
495 | * for the ancient hardware that doesn't support it. | |
496 | */ | |
497 | if (pgprot_val(req_prot) & _PAGE_PRESENT) | |
498 | pgprot_val(req_prot) |= _PAGE_PSE | _PAGE_GLOBAL; | |
499 | else | |
500 | pgprot_val(req_prot) &= ~(_PAGE_PSE | _PAGE_GLOBAL); | |
501 | ||
502 | req_prot = canon_pgprot(req_prot); | |
503 | ||
504 | /* | |
505 | * old_pte points to the large page base address. So we need | |
506 | * to add the offset of the virtual address: | |
507 | */ | |
508 | pfn = pte_pfn(old_pte) + ((address & (psize - 1)) >> PAGE_SHIFT); | |
509 | cpa->pfn = pfn; | |
510 | ||
511 | new_prot = static_protections(req_prot, address, pfn); | |
512 | ||
513 | /* | |
514 | * We need to check the full range, whether | |
515 | * static_protection() requires a different pgprot for one of | |
516 | * the pages in the range we try to preserve: | |
517 | */ | |
518 | addr = address & pmask; | |
519 | pfn = pte_pfn(old_pte); | |
520 | for (i = 0; i < (psize >> PAGE_SHIFT); i++, addr += PAGE_SIZE, pfn++) { | |
521 | pgprot_t chk_prot = static_protections(req_prot, addr, pfn); | |
522 | ||
523 | if (pgprot_val(chk_prot) != pgprot_val(new_prot)) | |
524 | goto out_unlock; | |
525 | } | |
526 | ||
527 | /* | |
528 | * If there are no changes, return. maxpages has been updated | |
529 | * above: | |
530 | */ | |
531 | if (pgprot_val(new_prot) == pgprot_val(old_prot)) { | |
532 | do_split = 0; | |
533 | goto out_unlock; | |
534 | } | |
535 | ||
536 | /* | |
537 | * We need to change the attributes. Check, whether we can | |
538 | * change the large page in one go. We request a split, when | |
539 | * the address is not aligned and the number of pages is | |
540 | * smaller than the number of pages in the large page. Note | |
541 | * that we limited the number of possible pages already to | |
542 | * the number of pages in the large page. | |
543 | */ | |
544 | if (address == (address & pmask) && cpa->numpages == (psize >> PAGE_SHIFT)) { | |
545 | /* | |
546 | * The address is aligned and the number of pages | |
547 | * covers the full page. | |
548 | */ | |
549 | new_pte = pfn_pte(pte_pfn(old_pte), new_prot); | |
550 | __set_pmd_pte(kpte, address, new_pte); | |
551 | cpa->flags |= CPA_FLUSHTLB; | |
552 | do_split = 0; | |
553 | } | |
554 | ||
555 | out_unlock: | |
556 | spin_unlock(&pgd_lock); | |
557 | ||
558 | return do_split; | |
559 | } | |
560 | ||
561 | static int | |
562 | __split_large_page(pte_t *kpte, unsigned long address, struct page *base) | |
563 | { | |
564 | pte_t *pbase = (pte_t *)page_address(base); | |
565 | unsigned long pfn, pfninc = 1; | |
566 | unsigned int i, level; | |
567 | pte_t *tmp; | |
568 | pgprot_t ref_prot; | |
569 | ||
570 | spin_lock(&pgd_lock); | |
571 | /* | |
572 | * Check for races, another CPU might have split this page | |
573 | * up for us already: | |
574 | */ | |
575 | tmp = lookup_address(address, &level); | |
576 | if (tmp != kpte) { | |
577 | spin_unlock(&pgd_lock); | |
578 | return 1; | |
579 | } | |
580 | ||
581 | paravirt_alloc_pte(&init_mm, page_to_pfn(base)); | |
582 | ref_prot = pte_pgprot(pte_clrhuge(*kpte)); | |
583 | /* | |
584 | * If we ever want to utilize the PAT bit, we need to | |
585 | * update this function to make sure it's converted from | |
586 | * bit 12 to bit 7 when we cross from the 2MB level to | |
587 | * the 4K level: | |
588 | */ | |
589 | WARN_ON_ONCE(pgprot_val(ref_prot) & _PAGE_PAT_LARGE); | |
590 | ||
591 | #ifdef CONFIG_X86_64 | |
592 | if (level == PG_LEVEL_1G) { | |
593 | pfninc = PMD_PAGE_SIZE >> PAGE_SHIFT; | |
594 | /* | |
595 | * Set the PSE flags only if the PRESENT flag is set | |
596 | * otherwise pmd_present/pmd_huge will return true | |
597 | * even on a non present pmd. | |
598 | */ | |
599 | if (pgprot_val(ref_prot) & _PAGE_PRESENT) | |
600 | pgprot_val(ref_prot) |= _PAGE_PSE; | |
601 | else | |
602 | pgprot_val(ref_prot) &= ~_PAGE_PSE; | |
603 | } | |
604 | #endif | |
605 | ||
606 | /* | |
607 | * Set the GLOBAL flags only if the PRESENT flag is set | |
608 | * otherwise pmd/pte_present will return true even on a non | |
609 | * present pmd/pte. The canon_pgprot will clear _PAGE_GLOBAL | |
610 | * for the ancient hardware that doesn't support it. | |
611 | */ | |
612 | if (pgprot_val(ref_prot) & _PAGE_PRESENT) | |
613 | pgprot_val(ref_prot) |= _PAGE_GLOBAL; | |
614 | else | |
615 | pgprot_val(ref_prot) &= ~_PAGE_GLOBAL; | |
616 | ||
617 | /* | |
618 | * Get the target pfn from the original entry: | |
619 | */ | |
620 | pfn = pte_pfn(*kpte); | |
621 | for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc) | |
622 | set_pte(&pbase[i], pfn_pte(pfn, canon_pgprot(ref_prot))); | |
623 | ||
624 | if (pfn_range_is_mapped(PFN_DOWN(__pa(address)), | |
625 | PFN_DOWN(__pa(address)) + 1)) | |
626 | split_page_count(level); | |
627 | ||
628 | /* | |
629 | * Install the new, split up pagetable. | |
630 | * | |
631 | * We use the standard kernel pagetable protections for the new | |
632 | * pagetable protections, the actual ptes set above control the | |
633 | * primary protection behavior: | |
634 | */ | |
635 | __set_pmd_pte(kpte, address, mk_pte(base, __pgprot(_KERNPG_TABLE))); | |
636 | ||
637 | /* | |
638 | * Intel Atom errata AAH41 workaround. | |
639 | * | |
640 | * The real fix should be in hw or in a microcode update, but | |
641 | * we also probabilistically try to reduce the window of having | |
642 | * a large TLB mixed with 4K TLBs while instruction fetches are | |
643 | * going on. | |
644 | */ | |
645 | __flush_tlb_all(); | |
646 | spin_unlock(&pgd_lock); | |
647 | ||
648 | return 0; | |
649 | } | |
650 | ||
651 | static int split_large_page(pte_t *kpte, unsigned long address) | |
652 | { | |
653 | struct page *base; | |
654 | ||
655 | if (!debug_pagealloc) | |
656 | spin_unlock(&cpa_lock); | |
657 | base = alloc_pages(GFP_KERNEL | __GFP_NOTRACK, 0); | |
658 | if (!debug_pagealloc) | |
659 | spin_lock(&cpa_lock); | |
660 | if (!base) | |
661 | return -ENOMEM; | |
662 | ||
663 | if (__split_large_page(kpte, address, base)) | |
664 | __free_page(base); | |
665 | ||
666 | return 0; | |
667 | } | |
668 | ||
669 | #define populate_pud(cpa, addr, pgd, pgprot) (-1) | |
670 | ||
671 | /* | |
672 | * Restrictions for kernel page table do not necessarily apply when mapping in | |
673 | * an alternate PGD. | |
674 | */ | |
675 | static int populate_pgd(struct cpa_data *cpa, unsigned long addr) | |
676 | { | |
677 | pgprot_t pgprot = __pgprot(_KERNPG_TABLE); | |
678 | bool allocd_pgd = false; | |
679 | pgd_t *pgd_entry; | |
680 | pud_t *pud = NULL; /* shut up gcc */ | |
681 | int ret; | |
682 | ||
683 | pgd_entry = cpa->pgd + pgd_index(addr); | |
684 | ||
685 | /* | |
686 | * Allocate a PUD page and hand it down for mapping. | |
687 | */ | |
688 | if (pgd_none(*pgd_entry)) { | |
689 | pud = (pud_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK); | |
690 | if (!pud) | |
691 | return -1; | |
692 | ||
693 | set_pgd(pgd_entry, __pgd(__pa(pud) | _KERNPG_TABLE)); | |
694 | allocd_pgd = true; | |
695 | } | |
696 | ||
697 | pgprot_val(pgprot) &= ~pgprot_val(cpa->mask_clr); | |
698 | pgprot_val(pgprot) |= pgprot_val(cpa->mask_set); | |
699 | ||
700 | ret = populate_pud(cpa, addr, pgd_entry, pgprot); | |
701 | if (ret < 0) | |
702 | return ret; | |
703 | ||
704 | cpa->numpages = ret; | |
705 | return 0; | |
706 | } | |
707 | ||
708 | static int __cpa_process_fault(struct cpa_data *cpa, unsigned long vaddr, | |
709 | int primary) | |
710 | { | |
711 | /* | |
712 | * Ignore all non primary paths. | |
713 | */ | |
714 | if (!primary) | |
715 | return 0; | |
716 | ||
717 | /* | |
718 | * Ignore the NULL PTE for kernel identity mapping, as it is expected | |
719 | * to have holes. | |
720 | * Also set numpages to '1' indicating that we processed cpa req for | |
721 | * one virtual address page and its pfn. TBD: numpages can be set based | |
722 | * on the initial value and the level returned by lookup_address(). | |
723 | */ | |
724 | if (within(vaddr, PAGE_OFFSET, | |
725 | PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT))) { | |
726 | cpa->numpages = 1; | |
727 | cpa->pfn = __pa(vaddr) >> PAGE_SHIFT; | |
728 | return 0; | |
729 | } else { | |
730 | WARN(1, KERN_WARNING "CPA: called for zero pte. " | |
731 | "vaddr = %lx cpa->vaddr = %lx\n", vaddr, | |
732 | *cpa->vaddr); | |
733 | ||
734 | return -EFAULT; | |
735 | } | |
736 | } | |
737 | ||
738 | static int __change_page_attr(struct cpa_data *cpa, int primary) | |
739 | { | |
740 | unsigned long address; | |
741 | int do_split, err; | |
742 | unsigned int level; | |
743 | pte_t *kpte, old_pte; | |
744 | ||
745 | if (cpa->flags & CPA_PAGES_ARRAY) { | |
746 | struct page *page = cpa->pages[cpa->curpage]; | |
747 | if (unlikely(PageHighMem(page))) | |
748 | return 0; | |
749 | address = (unsigned long)page_address(page); | |
750 | } else if (cpa->flags & CPA_ARRAY) | |
751 | address = cpa->vaddr[cpa->curpage]; | |
752 | else | |
753 | address = *cpa->vaddr; | |
754 | repeat: | |
755 | kpte = lookup_address(address, &level); | |
756 | if (!kpte) | |
757 | return __cpa_process_fault(cpa, address, primary); | |
758 | ||
759 | old_pte = *kpte; | |
760 | if (!pte_val(old_pte)) | |
761 | return __cpa_process_fault(cpa, address, primary); | |
762 | ||
763 | if (level == PG_LEVEL_4K) { | |
764 | pte_t new_pte; | |
765 | pgprot_t new_prot = pte_pgprot(old_pte); | |
766 | unsigned long pfn = pte_pfn(old_pte); | |
767 | ||
768 | pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr); | |
769 | pgprot_val(new_prot) |= pgprot_val(cpa->mask_set); | |
770 | ||
771 | new_prot = static_protections(new_prot, address, pfn); | |
772 | ||
773 | /* | |
774 | * Set the GLOBAL flags only if the PRESENT flag is | |
775 | * set otherwise pte_present will return true even on | |
776 | * a non present pte. The canon_pgprot will clear | |
777 | * _PAGE_GLOBAL for the ancient hardware that doesn't | |
778 | * support it. | |
779 | */ | |
780 | if (pgprot_val(new_prot) & _PAGE_PRESENT) | |
781 | pgprot_val(new_prot) |= _PAGE_GLOBAL; | |
782 | else | |
783 | pgprot_val(new_prot) &= ~_PAGE_GLOBAL; | |
784 | ||
785 | /* | |
786 | * We need to keep the pfn from the existing PTE, | |
787 | * after all we're only going to change it's attributes | |
788 | * not the memory it points to | |
789 | */ | |
790 | new_pte = pfn_pte(pfn, canon_pgprot(new_prot)); | |
791 | cpa->pfn = pfn; | |
792 | /* | |
793 | * Do we really change anything ? | |
794 | */ | |
795 | if (pte_val(old_pte) != pte_val(new_pte)) { | |
796 | set_pte_atomic(kpte, new_pte); | |
797 | cpa->flags |= CPA_FLUSHTLB; | |
798 | } | |
799 | cpa->numpages = 1; | |
800 | return 0; | |
801 | } | |
802 | ||
803 | /* | |
804 | * Check, whether we can keep the large page intact | |
805 | * and just change the pte: | |
806 | */ | |
807 | do_split = try_preserve_large_page(kpte, address, cpa); | |
808 | /* | |
809 | * When the range fits into the existing large page, | |
810 | * return. cp->numpages and cpa->tlbflush have been updated in | |
811 | * try_large_page: | |
812 | */ | |
813 | if (do_split <= 0) | |
814 | return do_split; | |
815 | ||
816 | /* | |
817 | * We have to split the large page: | |
818 | */ | |
819 | err = split_large_page(kpte, address); | |
820 | if (!err) { | |
821 | /* | |
822 | * Do a global flush tlb after splitting the large page | |
823 | * and before we do the actual change page attribute in the PTE. | |
824 | * | |
825 | * With out this, we violate the TLB application note, that says | |
826 | * "The TLBs may contain both ordinary and large-page | |
827 | * translations for a 4-KByte range of linear addresses. This | |
828 | * may occur if software modifies the paging structures so that | |
829 | * the page size used for the address range changes. If the two | |
830 | * translations differ with respect to page frame or attributes | |
831 | * (e.g., permissions), processor behavior is undefined and may | |
832 | * be implementation-specific." | |
833 | * | |
834 | * We do this global tlb flush inside the cpa_lock, so that we | |
835 | * don't allow any other cpu, with stale tlb entries change the | |
836 | * page attribute in parallel, that also falls into the | |
837 | * just split large page entry. | |
838 | */ | |
839 | flush_tlb_all(); | |
840 | goto repeat; | |
841 | } | |
842 | ||
843 | return err; | |
844 | } | |
845 | ||
846 | static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias); | |
847 | ||
848 | static int cpa_process_alias(struct cpa_data *cpa) | |
849 | { | |
850 | struct cpa_data alias_cpa; | |
851 | unsigned long laddr = (unsigned long)__va(cpa->pfn << PAGE_SHIFT); | |
852 | unsigned long vaddr; | |
853 | int ret; | |
854 | ||
855 | if (!pfn_range_is_mapped(cpa->pfn, cpa->pfn + 1)) | |
856 | return 0; | |
857 | ||
858 | /* | |
859 | * No need to redo, when the primary call touched the direct | |
860 | * mapping already: | |
861 | */ | |
862 | if (cpa->flags & CPA_PAGES_ARRAY) { | |
863 | struct page *page = cpa->pages[cpa->curpage]; | |
864 | if (unlikely(PageHighMem(page))) | |
865 | return 0; | |
866 | vaddr = (unsigned long)page_address(page); | |
867 | } else if (cpa->flags & CPA_ARRAY) | |
868 | vaddr = cpa->vaddr[cpa->curpage]; | |
869 | else | |
870 | vaddr = *cpa->vaddr; | |
871 | ||
872 | if (!(within(vaddr, PAGE_OFFSET, | |
873 | PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT)))) { | |
874 | ||
875 | alias_cpa = *cpa; | |
876 | alias_cpa.vaddr = &laddr; | |
877 | alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY); | |
878 | ||
879 | ret = __change_page_attr_set_clr(&alias_cpa, 0); | |
880 | if (ret) | |
881 | return ret; | |
882 | } | |
883 | ||
884 | #ifdef CONFIG_X86_64 | |
885 | /* | |
886 | * If the primary call didn't touch the high mapping already | |
887 | * and the physical address is inside the kernel map, we need | |
888 | * to touch the high mapped kernel as well: | |
889 | */ | |
890 | if (!within(vaddr, (unsigned long)_text, _brk_end) && | |
891 | within(cpa->pfn, highmap_start_pfn(), highmap_end_pfn())) { | |
892 | unsigned long temp_cpa_vaddr = (cpa->pfn << PAGE_SHIFT) + | |
893 | __START_KERNEL_map - phys_base; | |
894 | alias_cpa = *cpa; | |
895 | alias_cpa.vaddr = &temp_cpa_vaddr; | |
896 | alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY); | |
897 | ||
898 | /* | |
899 | * The high mapping range is imprecise, so ignore the | |
900 | * return value. | |
901 | */ | |
902 | __change_page_attr_set_clr(&alias_cpa, 0); | |
903 | } | |
904 | #endif | |
905 | ||
906 | return 0; | |
907 | } | |
908 | ||
909 | static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias) | |
910 | { | |
911 | int ret, numpages = cpa->numpages; | |
912 | ||
913 | while (numpages) { | |
914 | /* | |
915 | * Store the remaining nr of pages for the large page | |
916 | * preservation check. | |
917 | */ | |
918 | cpa->numpages = numpages; | |
919 | /* for array changes, we can't use large page */ | |
920 | if (cpa->flags & (CPA_ARRAY | CPA_PAGES_ARRAY)) | |
921 | cpa->numpages = 1; | |
922 | ||
923 | if (!debug_pagealloc) | |
924 | spin_lock(&cpa_lock); | |
925 | ret = __change_page_attr(cpa, checkalias); | |
926 | if (!debug_pagealloc) | |
927 | spin_unlock(&cpa_lock); | |
928 | if (ret) | |
929 | return ret; | |
930 | ||
931 | if (checkalias) { | |
932 | ret = cpa_process_alias(cpa); | |
933 | if (ret) | |
934 | return ret; | |
935 | } | |
936 | ||
937 | /* | |
938 | * Adjust the number of pages with the result of the | |
939 | * CPA operation. Either a large page has been | |
940 | * preserved or a single page update happened. | |
941 | */ | |
942 | BUG_ON(cpa->numpages > numpages); | |
943 | numpages -= cpa->numpages; | |
944 | if (cpa->flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) | |
945 | cpa->curpage++; | |
946 | else | |
947 | *cpa->vaddr += cpa->numpages * PAGE_SIZE; | |
948 | ||
949 | } | |
950 | return 0; | |
951 | } | |
952 | ||
953 | static inline int cache_attr(pgprot_t attr) | |
954 | { | |
955 | return pgprot_val(attr) & | |
956 | (_PAGE_PAT | _PAGE_PAT_LARGE | _PAGE_PWT | _PAGE_PCD); | |
957 | } | |
958 | ||
959 | static int change_page_attr_set_clr(unsigned long *addr, int numpages, | |
960 | pgprot_t mask_set, pgprot_t mask_clr, | |
961 | int force_split, int in_flag, | |
962 | struct page **pages) | |
963 | { | |
964 | struct cpa_data cpa; | |
965 | int ret, cache, checkalias; | |
966 | unsigned long baddr = 0; | |
967 | ||
968 | /* | |
969 | * Check, if we are requested to change a not supported | |
970 | * feature: | |
971 | */ | |
972 | mask_set = canon_pgprot(mask_set); | |
973 | mask_clr = canon_pgprot(mask_clr); | |
974 | if (!pgprot_val(mask_set) && !pgprot_val(mask_clr) && !force_split) | |
975 | return 0; | |
976 | ||
977 | /* Ensure we are PAGE_SIZE aligned */ | |
978 | if (in_flag & CPA_ARRAY) { | |
979 | int i; | |
980 | for (i = 0; i < numpages; i++) { | |
981 | if (addr[i] & ~PAGE_MASK) { | |
982 | addr[i] &= PAGE_MASK; | |
983 | WARN_ON_ONCE(1); | |
984 | } | |
985 | } | |
986 | } else if (!(in_flag & CPA_PAGES_ARRAY)) { | |
987 | /* | |
988 | * in_flag of CPA_PAGES_ARRAY implies it is aligned. | |
989 | * No need to cehck in that case | |
990 | */ | |
991 | if (*addr & ~PAGE_MASK) { | |
992 | *addr &= PAGE_MASK; | |
993 | /* | |
994 | * People should not be passing in unaligned addresses: | |
995 | */ | |
996 | WARN_ON_ONCE(1); | |
997 | } | |
998 | /* | |
999 | * Save address for cache flush. *addr is modified in the call | |
1000 | * to __change_page_attr_set_clr() below. | |
1001 | */ | |
1002 | baddr = *addr; | |
1003 | } | |
1004 | ||
1005 | /* Must avoid aliasing mappings in the highmem code */ | |
1006 | kmap_flush_unused(); | |
1007 | ||
1008 | vm_unmap_aliases(); | |
1009 | ||
1010 | cpa.vaddr = addr; | |
1011 | cpa.pages = pages; | |
1012 | cpa.numpages = numpages; | |
1013 | cpa.mask_set = mask_set; | |
1014 | cpa.mask_clr = mask_clr; | |
1015 | cpa.flags = 0; | |
1016 | cpa.curpage = 0; | |
1017 | cpa.force_split = force_split; | |
1018 | ||
1019 | if (in_flag & (CPA_ARRAY | CPA_PAGES_ARRAY)) | |
1020 | cpa.flags |= in_flag; | |
1021 | ||
1022 | /* No alias checking for _NX bit modifications */ | |
1023 | checkalias = (pgprot_val(mask_set) | pgprot_val(mask_clr)) != _PAGE_NX; | |
1024 | ||
1025 | ret = __change_page_attr_set_clr(&cpa, checkalias); | |
1026 | ||
1027 | /* | |
1028 | * Check whether we really changed something: | |
1029 | */ | |
1030 | if (!(cpa.flags & CPA_FLUSHTLB)) | |
1031 | goto out; | |
1032 | ||
1033 | /* | |
1034 | * No need to flush, when we did not set any of the caching | |
1035 | * attributes: | |
1036 | */ | |
1037 | cache = cache_attr(mask_set); | |
1038 | ||
1039 | /* | |
1040 | * On success we use clflush, when the CPU supports it to | |
1041 | * avoid the wbindv. If the CPU does not support it and in the | |
1042 | * error case we fall back to cpa_flush_all (which uses | |
1043 | * wbindv): | |
1044 | */ | |
1045 | if (!ret && cpu_has_clflush) { | |
1046 | if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) { | |
1047 | cpa_flush_array(addr, numpages, cache, | |
1048 | cpa.flags, pages); | |
1049 | } else | |
1050 | cpa_flush_range(baddr, numpages, cache); | |
1051 | } else | |
1052 | cpa_flush_all(cache); | |
1053 | ||
1054 | out: | |
1055 | return ret; | |
1056 | } | |
1057 | ||
1058 | static inline int change_page_attr_set(unsigned long *addr, int numpages, | |
1059 | pgprot_t mask, int array) | |
1060 | { | |
1061 | return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0), 0, | |
1062 | (array ? CPA_ARRAY : 0), NULL); | |
1063 | } | |
1064 | ||
1065 | static inline int change_page_attr_clear(unsigned long *addr, int numpages, | |
1066 | pgprot_t mask, int array) | |
1067 | { | |
1068 | return change_page_attr_set_clr(addr, numpages, __pgprot(0), mask, 0, | |
1069 | (array ? CPA_ARRAY : 0), NULL); | |
1070 | } | |
1071 | ||
1072 | static inline int cpa_set_pages_array(struct page **pages, int numpages, | |
1073 | pgprot_t mask) | |
1074 | { | |
1075 | return change_page_attr_set_clr(NULL, numpages, mask, __pgprot(0), 0, | |
1076 | CPA_PAGES_ARRAY, pages); | |
1077 | } | |
1078 | ||
1079 | static inline int cpa_clear_pages_array(struct page **pages, int numpages, | |
1080 | pgprot_t mask) | |
1081 | { | |
1082 | return change_page_attr_set_clr(NULL, numpages, __pgprot(0), mask, 0, | |
1083 | CPA_PAGES_ARRAY, pages); | |
1084 | } | |
1085 | ||
1086 | int _set_memory_uc(unsigned long addr, int numpages) | |
1087 | { | |
1088 | /* | |
1089 | * for now UC MINUS. see comments in ioremap_nocache() | |
1090 | */ | |
1091 | return change_page_attr_set(&addr, numpages, | |
1092 | __pgprot(_PAGE_CACHE_UC_MINUS), 0); | |
1093 | } | |
1094 | ||
1095 | int set_memory_uc(unsigned long addr, int numpages) | |
1096 | { | |
1097 | int ret; | |
1098 | ||
1099 | /* | |
1100 | * for now UC MINUS. see comments in ioremap_nocache() | |
1101 | */ | |
1102 | ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, | |
1103 | _PAGE_CACHE_UC_MINUS, NULL); | |
1104 | if (ret) | |
1105 | goto out_err; | |
1106 | ||
1107 | ret = _set_memory_uc(addr, numpages); | |
1108 | if (ret) | |
1109 | goto out_free; | |
1110 | ||
1111 | return 0; | |
1112 | ||
1113 | out_free: | |
1114 | free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE); | |
1115 | out_err: | |
1116 | return ret; | |
1117 | } | |
1118 | EXPORT_SYMBOL(set_memory_uc); | |
1119 | ||
1120 | static int _set_memory_array(unsigned long *addr, int addrinarray, | |
1121 | unsigned long new_type) | |
1122 | { | |
1123 | int i, j; | |
1124 | int ret; | |
1125 | ||
1126 | /* | |
1127 | * for now UC MINUS. see comments in ioremap_nocache() | |
1128 | */ | |
1129 | for (i = 0; i < addrinarray; i++) { | |
1130 | ret = reserve_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE, | |
1131 | new_type, NULL); | |
1132 | if (ret) | |
1133 | goto out_free; | |
1134 | } | |
1135 | ||
1136 | ret = change_page_attr_set(addr, addrinarray, | |
1137 | __pgprot(_PAGE_CACHE_UC_MINUS), 1); | |
1138 | ||
1139 | if (!ret && new_type == _PAGE_CACHE_WC) | |
1140 | ret = change_page_attr_set_clr(addr, addrinarray, | |
1141 | __pgprot(_PAGE_CACHE_WC), | |
1142 | __pgprot(_PAGE_CACHE_MASK), | |
1143 | 0, CPA_ARRAY, NULL); | |
1144 | if (ret) | |
1145 | goto out_free; | |
1146 | ||
1147 | return 0; | |
1148 | ||
1149 | out_free: | |
1150 | for (j = 0; j < i; j++) | |
1151 | free_memtype(__pa(addr[j]), __pa(addr[j]) + PAGE_SIZE); | |
1152 | ||
1153 | return ret; | |
1154 | } | |
1155 | ||
1156 | int set_memory_array_uc(unsigned long *addr, int addrinarray) | |
1157 | { | |
1158 | return _set_memory_array(addr, addrinarray, _PAGE_CACHE_UC_MINUS); | |
1159 | } | |
1160 | EXPORT_SYMBOL(set_memory_array_uc); | |
1161 | ||
1162 | int set_memory_array_wc(unsigned long *addr, int addrinarray) | |
1163 | { | |
1164 | return _set_memory_array(addr, addrinarray, _PAGE_CACHE_WC); | |
1165 | } | |
1166 | EXPORT_SYMBOL(set_memory_array_wc); | |
1167 | ||
1168 | int _set_memory_wc(unsigned long addr, int numpages) | |
1169 | { | |
1170 | int ret; | |
1171 | unsigned long addr_copy = addr; | |
1172 | ||
1173 | ret = change_page_attr_set(&addr, numpages, | |
1174 | __pgprot(_PAGE_CACHE_UC_MINUS), 0); | |
1175 | if (!ret) { | |
1176 | ret = change_page_attr_set_clr(&addr_copy, numpages, | |
1177 | __pgprot(_PAGE_CACHE_WC), | |
1178 | __pgprot(_PAGE_CACHE_MASK), | |
1179 | 0, 0, NULL); | |
1180 | } | |
1181 | return ret; | |
1182 | } | |
1183 | ||
1184 | int set_memory_wc(unsigned long addr, int numpages) | |
1185 | { | |
1186 | int ret; | |
1187 | ||
1188 | if (!pat_enabled) | |
1189 | return set_memory_uc(addr, numpages); | |
1190 | ||
1191 | ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE, | |
1192 | _PAGE_CACHE_WC, NULL); | |
1193 | if (ret) | |
1194 | goto out_err; | |
1195 | ||
1196 | ret = _set_memory_wc(addr, numpages); | |
1197 | if (ret) | |
1198 | goto out_free; | |
1199 | ||
1200 | return 0; | |
1201 | ||
1202 | out_free: | |
1203 | free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE); | |
1204 | out_err: | |
1205 | return ret; | |
1206 | } | |
1207 | EXPORT_SYMBOL(set_memory_wc); | |
1208 | ||
1209 | int _set_memory_wb(unsigned long addr, int numpages) | |
1210 | { | |
1211 | return change_page_attr_clear(&addr, numpages, | |
1212 | __pgprot(_PAGE_CACHE_MASK), 0); | |
1213 | } | |
1214 | ||
1215 | int set_memory_wb(unsigned long addr, int numpages) | |
1216 | { | |
1217 | int ret; | |
1218 | ||
1219 | ret = _set_memory_wb(addr, numpages); | |
1220 | if (ret) | |
1221 | return ret; | |
1222 | ||
1223 | free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE); | |
1224 | return 0; | |
1225 | } | |
1226 | EXPORT_SYMBOL(set_memory_wb); | |
1227 | ||
1228 | int set_memory_array_wb(unsigned long *addr, int addrinarray) | |
1229 | { | |
1230 | int i; | |
1231 | int ret; | |
1232 | ||
1233 | ret = change_page_attr_clear(addr, addrinarray, | |
1234 | __pgprot(_PAGE_CACHE_MASK), 1); | |
1235 | if (ret) | |
1236 | return ret; | |
1237 | ||
1238 | for (i = 0; i < addrinarray; i++) | |
1239 | free_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE); | |
1240 | ||
1241 | return 0; | |
1242 | } | |
1243 | EXPORT_SYMBOL(set_memory_array_wb); | |
1244 | ||
1245 | int set_memory_x(unsigned long addr, int numpages) | |
1246 | { | |
1247 | if (!(__supported_pte_mask & _PAGE_NX)) | |
1248 | return 0; | |
1249 | ||
1250 | return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_NX), 0); | |
1251 | } | |
1252 | EXPORT_SYMBOL(set_memory_x); | |
1253 | ||
1254 | int set_memory_nx(unsigned long addr, int numpages) | |
1255 | { | |
1256 | if (!(__supported_pte_mask & _PAGE_NX)) | |
1257 | return 0; | |
1258 | ||
1259 | return change_page_attr_set(&addr, numpages, __pgprot(_PAGE_NX), 0); | |
1260 | } | |
1261 | EXPORT_SYMBOL(set_memory_nx); | |
1262 | ||
1263 | int set_memory_ro(unsigned long addr, int numpages) | |
1264 | { | |
1265 | return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_RW), 0); | |
1266 | } | |
1267 | EXPORT_SYMBOL_GPL(set_memory_ro); | |
1268 | ||
1269 | int set_memory_rw(unsigned long addr, int numpages) | |
1270 | { | |
1271 | return change_page_attr_set(&addr, numpages, __pgprot(_PAGE_RW), 0); | |
1272 | } | |
1273 | EXPORT_SYMBOL_GPL(set_memory_rw); | |
1274 | ||
1275 | int set_memory_np(unsigned long addr, int numpages) | |
1276 | { | |
1277 | return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_PRESENT), 0); | |
1278 | } | |
1279 | ||
1280 | int set_memory_4k(unsigned long addr, int numpages) | |
1281 | { | |
1282 | return change_page_attr_set_clr(&addr, numpages, __pgprot(0), | |
1283 | __pgprot(0), 1, 0, NULL); | |
1284 | } | |
1285 | ||
1286 | int set_pages_uc(struct page *page, int numpages) | |
1287 | { | |
1288 | unsigned long addr = (unsigned long)page_address(page); | |
1289 | ||
1290 | return set_memory_uc(addr, numpages); | |
1291 | } | |
1292 | EXPORT_SYMBOL(set_pages_uc); | |
1293 | ||
1294 | static int _set_pages_array(struct page **pages, int addrinarray, | |
1295 | unsigned long new_type) | |
1296 | { | |
1297 | unsigned long start; | |
1298 | unsigned long end; | |
1299 | int i; | |
1300 | int free_idx; | |
1301 | int ret; | |
1302 | ||
1303 | for (i = 0; i < addrinarray; i++) { | |
1304 | if (PageHighMem(pages[i])) | |
1305 | continue; | |
1306 | start = page_to_pfn(pages[i]) << PAGE_SHIFT; | |
1307 | end = start + PAGE_SIZE; | |
1308 | if (reserve_memtype(start, end, new_type, NULL)) | |
1309 | goto err_out; | |
1310 | } | |
1311 | ||
1312 | ret = cpa_set_pages_array(pages, addrinarray, | |
1313 | __pgprot(_PAGE_CACHE_UC_MINUS)); | |
1314 | if (!ret && new_type == _PAGE_CACHE_WC) | |
1315 | ret = change_page_attr_set_clr(NULL, addrinarray, | |
1316 | __pgprot(_PAGE_CACHE_WC), | |
1317 | __pgprot(_PAGE_CACHE_MASK), | |
1318 | 0, CPA_PAGES_ARRAY, pages); | |
1319 | if (ret) | |
1320 | goto err_out; | |
1321 | return 0; /* Success */ | |
1322 | err_out: | |
1323 | free_idx = i; | |
1324 | for (i = 0; i < free_idx; i++) { | |
1325 | if (PageHighMem(pages[i])) | |
1326 | continue; | |
1327 | start = page_to_pfn(pages[i]) << PAGE_SHIFT; | |
1328 | end = start + PAGE_SIZE; | |
1329 | free_memtype(start, end); | |
1330 | } | |
1331 | return -EINVAL; | |
1332 | } | |
1333 | ||
1334 | int set_pages_array_uc(struct page **pages, int addrinarray) | |
1335 | { | |
1336 | return _set_pages_array(pages, addrinarray, _PAGE_CACHE_UC_MINUS); | |
1337 | } | |
1338 | EXPORT_SYMBOL(set_pages_array_uc); | |
1339 | ||
1340 | int set_pages_array_wc(struct page **pages, int addrinarray) | |
1341 | { | |
1342 | return _set_pages_array(pages, addrinarray, _PAGE_CACHE_WC); | |
1343 | } | |
1344 | EXPORT_SYMBOL(set_pages_array_wc); | |
1345 | ||
1346 | int set_pages_wb(struct page *page, int numpages) | |
1347 | { | |
1348 | unsigned long addr = (unsigned long)page_address(page); | |
1349 | ||
1350 | return set_memory_wb(addr, numpages); | |
1351 | } | |
1352 | EXPORT_SYMBOL(set_pages_wb); | |
1353 | ||
1354 | int set_pages_array_wb(struct page **pages, int addrinarray) | |
1355 | { | |
1356 | int retval; | |
1357 | unsigned long start; | |
1358 | unsigned long end; | |
1359 | int i; | |
1360 | ||
1361 | retval = cpa_clear_pages_array(pages, addrinarray, | |
1362 | __pgprot(_PAGE_CACHE_MASK)); | |
1363 | if (retval) | |
1364 | return retval; | |
1365 | ||
1366 | for (i = 0; i < addrinarray; i++) { | |
1367 | if (PageHighMem(pages[i])) | |
1368 | continue; | |
1369 | start = page_to_pfn(pages[i]) << PAGE_SHIFT; | |
1370 | end = start + PAGE_SIZE; | |
1371 | free_memtype(start, end); | |
1372 | } | |
1373 | ||
1374 | return 0; | |
1375 | } | |
1376 | EXPORT_SYMBOL(set_pages_array_wb); | |
1377 | ||
1378 | int set_pages_x(struct page *page, int numpages) | |
1379 | { | |
1380 | unsigned long addr = (unsigned long)page_address(page); | |
1381 | ||
1382 | return set_memory_x(addr, numpages); | |
1383 | } | |
1384 | EXPORT_SYMBOL(set_pages_x); | |
1385 | ||
1386 | int set_pages_nx(struct page *page, int numpages) | |
1387 | { | |
1388 | unsigned long addr = (unsigned long)page_address(page); | |
1389 | ||
1390 | return set_memory_nx(addr, numpages); | |
1391 | } | |
1392 | EXPORT_SYMBOL(set_pages_nx); | |
1393 | ||
1394 | int set_pages_ro(struct page *page, int numpages) | |
1395 | { | |
1396 | unsigned long addr = (unsigned long)page_address(page); | |
1397 | ||
1398 | return set_memory_ro(addr, numpages); | |
1399 | } | |
1400 | ||
1401 | int set_pages_rw(struct page *page, int numpages) | |
1402 | { | |
1403 | unsigned long addr = (unsigned long)page_address(page); | |
1404 | ||
1405 | return set_memory_rw(addr, numpages); | |
1406 | } | |
1407 | ||
1408 | #ifdef CONFIG_DEBUG_PAGEALLOC | |
1409 | ||
1410 | static int __set_pages_p(struct page *page, int numpages) | |
1411 | { | |
1412 | unsigned long tempaddr = (unsigned long) page_address(page); | |
1413 | struct cpa_data cpa = { .vaddr = &tempaddr, | |
1414 | .numpages = numpages, | |
1415 | .mask_set = __pgprot(_PAGE_PRESENT | _PAGE_RW), | |
1416 | .mask_clr = __pgprot(0), | |
1417 | .flags = 0}; | |
1418 | ||
1419 | /* | |
1420 | * No alias checking needed for setting present flag. otherwise, | |
1421 | * we may need to break large pages for 64-bit kernel text | |
1422 | * mappings (this adds to complexity if we want to do this from | |
1423 | * atomic context especially). Let's keep it simple! | |
1424 | */ | |
1425 | return __change_page_attr_set_clr(&cpa, 0); | |
1426 | } | |
1427 | ||
1428 | static int __set_pages_np(struct page *page, int numpages) | |
1429 | { | |
1430 | unsigned long tempaddr = (unsigned long) page_address(page); | |
1431 | struct cpa_data cpa = { .vaddr = &tempaddr, | |
1432 | .numpages = numpages, | |
1433 | .mask_set = __pgprot(0), | |
1434 | .mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW), | |
1435 | .flags = 0}; | |
1436 | ||
1437 | /* | |
1438 | * No alias checking needed for setting not present flag. otherwise, | |
1439 | * we may need to break large pages for 64-bit kernel text | |
1440 | * mappings (this adds to complexity if we want to do this from | |
1441 | * atomic context especially). Let's keep it simple! | |
1442 | */ | |
1443 | return __change_page_attr_set_clr(&cpa, 0); | |
1444 | } | |
1445 | ||
1446 | void kernel_map_pages(struct page *page, int numpages, int enable) | |
1447 | { | |
1448 | if (PageHighMem(page)) | |
1449 | return; | |
1450 | if (!enable) { | |
1451 | debug_check_no_locks_freed(page_address(page), | |
1452 | numpages * PAGE_SIZE); | |
1453 | } | |
1454 | ||
1455 | /* | |
1456 | * The return value is ignored as the calls cannot fail. | |
1457 | * Large pages for identity mappings are not used at boot time | |
1458 | * and hence no memory allocations during large page split. | |
1459 | */ | |
1460 | if (enable) | |
1461 | __set_pages_p(page, numpages); | |
1462 | else | |
1463 | __set_pages_np(page, numpages); | |
1464 | ||
1465 | /* | |
1466 | * We should perform an IPI and flush all tlbs, | |
1467 | * but that can deadlock->flush only current cpu: | |
1468 | */ | |
1469 | __flush_tlb_all(); | |
1470 | ||
1471 | arch_flush_lazy_mmu_mode(); | |
1472 | } | |
1473 | ||
1474 | #ifdef CONFIG_HIBERNATION | |
1475 | ||
1476 | bool kernel_page_present(struct page *page) | |
1477 | { | |
1478 | unsigned int level; | |
1479 | pte_t *pte; | |
1480 | ||
1481 | if (PageHighMem(page)) | |
1482 | return false; | |
1483 | ||
1484 | pte = lookup_address((unsigned long)page_address(page), &level); | |
1485 | return (pte_val(*pte) & _PAGE_PRESENT); | |
1486 | } | |
1487 | ||
1488 | #endif /* CONFIG_HIBERNATION */ | |
1489 | ||
1490 | #endif /* CONFIG_DEBUG_PAGEALLOC */ | |
1491 | ||
1492 | /* | |
1493 | * The testcases use internal knowledge of the implementation that shouldn't | |
1494 | * be exposed to the rest of the kernel. Include these directly here. | |
1495 | */ | |
1496 | #ifdef CONFIG_CPA_DEBUG | |
1497 | #include "pageattr-test.c" | |
1498 | #endif |