]>
Commit | Line | Data |
---|---|---|
9f4c815c IM |
1 | /* |
2 | * Copyright 2002 Andi Kleen, SuSE Labs. | |
1da177e4 | 3 | * Thanks to Ben LaHaise for precious feedback. |
9f4c815c | 4 | */ |
1da177e4 | 5 | #include <linux/highmem.h> |
8192206d | 6 | #include <linux/bootmem.h> |
1da177e4 | 7 | #include <linux/module.h> |
9f4c815c | 8 | #include <linux/sched.h> |
1da177e4 | 9 | #include <linux/slab.h> |
9f4c815c | 10 | #include <linux/mm.h> |
76ebd054 | 11 | #include <linux/interrupt.h> |
9f4c815c | 12 | |
950f9d95 | 13 | #include <asm/e820.h> |
1da177e4 LT |
14 | #include <asm/processor.h> |
15 | #include <asm/tlbflush.h> | |
f8af095d | 16 | #include <asm/sections.h> |
9f4c815c IM |
17 | #include <asm/uaccess.h> |
18 | #include <asm/pgalloc.h> | |
c31c7d48 | 19 | #include <asm/proto.h> |
1da177e4 | 20 | |
9df84993 IM |
21 | /* |
22 | * The current flushing context - we pass it instead of 5 arguments: | |
23 | */ | |
72e458df TG |
24 | struct cpa_data { |
25 | unsigned long vaddr; | |
72e458df TG |
26 | pgprot_t mask_set; |
27 | pgprot_t mask_clr; | |
65e074df | 28 | int numpages; |
8be8f54b | 29 | int processed; |
f4ae5da0 | 30 | int flushtlb; |
c31c7d48 | 31 | unsigned long pfn; |
72e458df TG |
32 | }; |
33 | ||
c31c7d48 TG |
34 | #ifdef CONFIG_X86_64 |
35 | ||
36 | static inline unsigned long highmap_start_pfn(void) | |
37 | { | |
38 | return __pa(_text) >> PAGE_SHIFT; | |
39 | } | |
40 | ||
41 | static inline unsigned long highmap_end_pfn(void) | |
42 | { | |
43 | return __pa(round_up((unsigned long)_end, PMD_SIZE)) >> PAGE_SHIFT; | |
44 | } | |
45 | ||
46 | #endif | |
47 | ||
92cb54a3 IM |
48 | #ifdef CONFIG_DEBUG_PAGEALLOC |
49 | # define debug_pagealloc 1 | |
50 | #else | |
51 | # define debug_pagealloc 0 | |
52 | #endif | |
53 | ||
ed724be6 AV |
54 | static inline int |
55 | within(unsigned long addr, unsigned long start, unsigned long end) | |
687c4825 | 56 | { |
ed724be6 AV |
57 | return addr >= start && addr < end; |
58 | } | |
59 | ||
d7c8f21a TG |
60 | /* |
61 | * Flushing functions | |
62 | */ | |
cd8ddf1a | 63 | |
cd8ddf1a TG |
64 | /** |
65 | * clflush_cache_range - flush a cache range with clflush | |
66 | * @addr: virtual start address | |
67 | * @size: number of bytes to flush | |
68 | * | |
69 | * clflush is an unordered instruction which needs fencing with mfence | |
70 | * to avoid ordering issues. | |
71 | */ | |
4c61afcd | 72 | void clflush_cache_range(void *vaddr, unsigned int size) |
d7c8f21a | 73 | { |
4c61afcd | 74 | void *vend = vaddr + size - 1; |
d7c8f21a | 75 | |
cd8ddf1a | 76 | mb(); |
4c61afcd IM |
77 | |
78 | for (; vaddr < vend; vaddr += boot_cpu_data.x86_clflush_size) | |
79 | clflush(vaddr); | |
80 | /* | |
81 | * Flush any possible final partial cacheline: | |
82 | */ | |
83 | clflush(vend); | |
84 | ||
cd8ddf1a | 85 | mb(); |
d7c8f21a TG |
86 | } |
87 | ||
af1e6844 | 88 | static void __cpa_flush_all(void *arg) |
d7c8f21a | 89 | { |
6bb8383b AK |
90 | unsigned long cache = (unsigned long)arg; |
91 | ||
d7c8f21a TG |
92 | /* |
93 | * Flush all to work around Errata in early athlons regarding | |
94 | * large page flushing. | |
95 | */ | |
96 | __flush_tlb_all(); | |
97 | ||
6bb8383b | 98 | if (cache && boot_cpu_data.x86_model >= 4) |
d7c8f21a TG |
99 | wbinvd(); |
100 | } | |
101 | ||
6bb8383b | 102 | static void cpa_flush_all(unsigned long cache) |
d7c8f21a TG |
103 | { |
104 | BUG_ON(irqs_disabled()); | |
105 | ||
6bb8383b | 106 | on_each_cpu(__cpa_flush_all, (void *) cache, 1, 1); |
d7c8f21a TG |
107 | } |
108 | ||
57a6a46a TG |
109 | static void __cpa_flush_range(void *arg) |
110 | { | |
57a6a46a TG |
111 | /* |
112 | * We could optimize that further and do individual per page | |
113 | * tlb invalidates for a low number of pages. Caveat: we must | |
114 | * flush the high aliases on 64bit as well. | |
115 | */ | |
116 | __flush_tlb_all(); | |
57a6a46a TG |
117 | } |
118 | ||
6bb8383b | 119 | static void cpa_flush_range(unsigned long start, int numpages, int cache) |
57a6a46a | 120 | { |
4c61afcd IM |
121 | unsigned int i, level; |
122 | unsigned long addr; | |
123 | ||
57a6a46a | 124 | BUG_ON(irqs_disabled()); |
4c61afcd | 125 | WARN_ON(PAGE_ALIGN(start) != start); |
57a6a46a | 126 | |
3b233e52 | 127 | on_each_cpu(__cpa_flush_range, NULL, 1, 1); |
57a6a46a | 128 | |
6bb8383b AK |
129 | if (!cache) |
130 | return; | |
131 | ||
3b233e52 TG |
132 | /* |
133 | * We only need to flush on one CPU, | |
134 | * clflush is a MESI-coherent instruction that | |
135 | * will cause all other CPUs to flush the same | |
136 | * cachelines: | |
137 | */ | |
4c61afcd IM |
138 | for (i = 0, addr = start; i < numpages; i++, addr += PAGE_SIZE) { |
139 | pte_t *pte = lookup_address(addr, &level); | |
140 | ||
141 | /* | |
142 | * Only flush present addresses: | |
143 | */ | |
7bfb72e8 | 144 | if (pte && (pte_val(*pte) & _PAGE_PRESENT)) |
4c61afcd IM |
145 | clflush_cache_range((void *) addr, PAGE_SIZE); |
146 | } | |
57a6a46a TG |
147 | } |
148 | ||
ed724be6 AV |
149 | /* |
150 | * Certain areas of memory on x86 require very specific protection flags, | |
151 | * for example the BIOS area or kernel text. Callers don't always get this | |
152 | * right (again, ioremap() on BIOS memory is not uncommon) so this function | |
153 | * checks and fixes these known static required protection bits. | |
154 | */ | |
c31c7d48 TG |
155 | static inline pgprot_t static_protections(pgprot_t prot, unsigned long address, |
156 | unsigned long pfn) | |
ed724be6 AV |
157 | { |
158 | pgprot_t forbidden = __pgprot(0); | |
159 | ||
687c4825 | 160 | /* |
ed724be6 AV |
161 | * The BIOS area between 640k and 1Mb needs to be executable for |
162 | * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support. | |
687c4825 | 163 | */ |
c31c7d48 | 164 | if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT)) |
ed724be6 AV |
165 | pgprot_val(forbidden) |= _PAGE_NX; |
166 | ||
167 | /* | |
168 | * The kernel text needs to be executable for obvious reasons | |
c31c7d48 TG |
169 | * Does not cover __inittext since that is gone later on. On |
170 | * 64bit we do not enforce !NX on the low mapping | |
ed724be6 AV |
171 | */ |
172 | if (within(address, (unsigned long)_text, (unsigned long)_etext)) | |
173 | pgprot_val(forbidden) |= _PAGE_NX; | |
cc0f21bb | 174 | |
cc0f21bb | 175 | /* |
c31c7d48 TG |
176 | * The .rodata section needs to be read-only. Using the pfn |
177 | * catches all aliases. | |
cc0f21bb | 178 | */ |
c31c7d48 TG |
179 | if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT, |
180 | __pa((unsigned long)__end_rodata) >> PAGE_SHIFT)) | |
cc0f21bb | 181 | pgprot_val(forbidden) |= _PAGE_RW; |
ed724be6 AV |
182 | |
183 | prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden)); | |
687c4825 IM |
184 | |
185 | return prot; | |
186 | } | |
187 | ||
9a14aefc TG |
188 | /* |
189 | * Lookup the page table entry for a virtual address. Return a pointer | |
190 | * to the entry and the level of the mapping. | |
191 | * | |
192 | * Note: We return pud and pmd either when the entry is marked large | |
193 | * or when the present bit is not set. Otherwise we would return a | |
194 | * pointer to a nonexisting mapping. | |
195 | */ | |
da7bfc50 | 196 | pte_t *lookup_address(unsigned long address, unsigned int *level) |
9f4c815c | 197 | { |
1da177e4 LT |
198 | pgd_t *pgd = pgd_offset_k(address); |
199 | pud_t *pud; | |
200 | pmd_t *pmd; | |
9f4c815c | 201 | |
30551bb3 TG |
202 | *level = PG_LEVEL_NONE; |
203 | ||
1da177e4 LT |
204 | if (pgd_none(*pgd)) |
205 | return NULL; | |
9df84993 | 206 | |
1da177e4 LT |
207 | pud = pud_offset(pgd, address); |
208 | if (pud_none(*pud)) | |
209 | return NULL; | |
c2f71ee2 AK |
210 | |
211 | *level = PG_LEVEL_1G; | |
212 | if (pud_large(*pud) || !pud_present(*pud)) | |
213 | return (pte_t *)pud; | |
214 | ||
1da177e4 LT |
215 | pmd = pmd_offset(pud, address); |
216 | if (pmd_none(*pmd)) | |
217 | return NULL; | |
30551bb3 TG |
218 | |
219 | *level = PG_LEVEL_2M; | |
9a14aefc | 220 | if (pmd_large(*pmd) || !pmd_present(*pmd)) |
1da177e4 | 221 | return (pte_t *)pmd; |
1da177e4 | 222 | |
30551bb3 | 223 | *level = PG_LEVEL_4K; |
9df84993 | 224 | |
9f4c815c IM |
225 | return pte_offset_kernel(pmd, address); |
226 | } | |
227 | ||
9df84993 IM |
228 | /* |
229 | * Set the new pmd in all the pgds we know about: | |
230 | */ | |
9a3dc780 | 231 | static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte) |
9f4c815c | 232 | { |
9f4c815c IM |
233 | /* change init_mm */ |
234 | set_pte_atomic(kpte, pte); | |
44af6c41 | 235 | #ifdef CONFIG_X86_32 |
e4b71dcf | 236 | if (!SHARED_KERNEL_PMD) { |
44af6c41 IM |
237 | struct page *page; |
238 | ||
e3ed910d | 239 | list_for_each_entry(page, &pgd_list, lru) { |
44af6c41 IM |
240 | pgd_t *pgd; |
241 | pud_t *pud; | |
242 | pmd_t *pmd; | |
243 | ||
244 | pgd = (pgd_t *)page_address(page) + pgd_index(address); | |
245 | pud = pud_offset(pgd, address); | |
246 | pmd = pmd_offset(pud, address); | |
247 | set_pte_atomic((pte_t *)pmd, pte); | |
248 | } | |
1da177e4 | 249 | } |
44af6c41 | 250 | #endif |
1da177e4 LT |
251 | } |
252 | ||
9df84993 IM |
253 | static int |
254 | try_preserve_large_page(pte_t *kpte, unsigned long address, | |
255 | struct cpa_data *cpa) | |
65e074df | 256 | { |
c31c7d48 | 257 | unsigned long nextpage_addr, numpages, pmask, psize, flags, addr, pfn; |
65e074df TG |
258 | pte_t new_pte, old_pte, *tmp; |
259 | pgprot_t old_prot, new_prot; | |
fac84939 | 260 | int i, do_split = 1; |
da7bfc50 | 261 | unsigned int level; |
65e074df TG |
262 | |
263 | spin_lock_irqsave(&pgd_lock, flags); | |
264 | /* | |
265 | * Check for races, another CPU might have split this page | |
266 | * up already: | |
267 | */ | |
268 | tmp = lookup_address(address, &level); | |
269 | if (tmp != kpte) | |
270 | goto out_unlock; | |
271 | ||
272 | switch (level) { | |
273 | case PG_LEVEL_2M: | |
31422c51 AK |
274 | psize = PMD_PAGE_SIZE; |
275 | pmask = PMD_PAGE_MASK; | |
65e074df | 276 | break; |
f07333fd | 277 | #ifdef CONFIG_X86_64 |
65e074df | 278 | case PG_LEVEL_1G: |
5d3c8b21 AK |
279 | psize = PUD_PAGE_SIZE; |
280 | pmask = PUD_PAGE_MASK; | |
f07333fd AK |
281 | break; |
282 | #endif | |
65e074df | 283 | default: |
beaff633 | 284 | do_split = -EINVAL; |
65e074df TG |
285 | goto out_unlock; |
286 | } | |
287 | ||
288 | /* | |
289 | * Calculate the number of pages, which fit into this large | |
290 | * page starting at address: | |
291 | */ | |
292 | nextpage_addr = (address + psize) & pmask; | |
293 | numpages = (nextpage_addr - address) >> PAGE_SHIFT; | |
8be8f54b TG |
294 | if (numpages < cpa->processed) |
295 | cpa->processed = numpages; | |
65e074df TG |
296 | |
297 | /* | |
298 | * We are safe now. Check whether the new pgprot is the same: | |
299 | */ | |
300 | old_pte = *kpte; | |
301 | old_prot = new_prot = pte_pgprot(old_pte); | |
302 | ||
303 | pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr); | |
304 | pgprot_val(new_prot) |= pgprot_val(cpa->mask_set); | |
c31c7d48 TG |
305 | |
306 | /* | |
307 | * old_pte points to the large page base address. So we need | |
308 | * to add the offset of the virtual address: | |
309 | */ | |
310 | pfn = pte_pfn(old_pte) + ((address & (psize - 1)) >> PAGE_SHIFT); | |
311 | cpa->pfn = pfn; | |
312 | ||
313 | new_prot = static_protections(new_prot, address, pfn); | |
65e074df | 314 | |
fac84939 TG |
315 | /* |
316 | * We need to check the full range, whether | |
317 | * static_protection() requires a different pgprot for one of | |
318 | * the pages in the range we try to preserve: | |
319 | */ | |
320 | addr = address + PAGE_SIZE; | |
c31c7d48 | 321 | pfn++; |
8be8f54b | 322 | for (i = 1; i < cpa->processed; i++, addr += PAGE_SIZE, pfn++) { |
c31c7d48 | 323 | pgprot_t chk_prot = static_protections(new_prot, addr, pfn); |
fac84939 TG |
324 | |
325 | if (pgprot_val(chk_prot) != pgprot_val(new_prot)) | |
326 | goto out_unlock; | |
327 | } | |
328 | ||
65e074df TG |
329 | /* |
330 | * If there are no changes, return. maxpages has been updated | |
331 | * above: | |
332 | */ | |
333 | if (pgprot_val(new_prot) == pgprot_val(old_prot)) { | |
beaff633 | 334 | do_split = 0; |
65e074df TG |
335 | goto out_unlock; |
336 | } | |
337 | ||
338 | /* | |
339 | * We need to change the attributes. Check, whether we can | |
340 | * change the large page in one go. We request a split, when | |
341 | * the address is not aligned and the number of pages is | |
342 | * smaller than the number of pages in the large page. Note | |
343 | * that we limited the number of possible pages already to | |
344 | * the number of pages in the large page. | |
345 | */ | |
8be8f54b | 346 | if (address == (nextpage_addr - psize) && cpa->processed == numpages) { |
65e074df TG |
347 | /* |
348 | * The address is aligned and the number of pages | |
349 | * covers the full page. | |
350 | */ | |
351 | new_pte = pfn_pte(pte_pfn(old_pte), canon_pgprot(new_prot)); | |
352 | __set_pmd_pte(kpte, address, new_pte); | |
353 | cpa->flushtlb = 1; | |
beaff633 | 354 | do_split = 0; |
65e074df TG |
355 | } |
356 | ||
357 | out_unlock: | |
358 | spin_unlock_irqrestore(&pgd_lock, flags); | |
9df84993 | 359 | |
beaff633 | 360 | return do_split; |
65e074df TG |
361 | } |
362 | ||
76ebd054 TG |
363 | static LIST_HEAD(page_pool); |
364 | static unsigned long pool_size, pool_pages, pool_low; | |
92cb54a3 | 365 | static unsigned long pool_used, pool_failed; |
76ebd054 | 366 | |
92cb54a3 | 367 | static void cpa_fill_pool(struct page **ret) |
76ebd054 | 368 | { |
76ebd054 | 369 | gfp_t gfp = GFP_KERNEL; |
92cb54a3 IM |
370 | unsigned long flags; |
371 | struct page *p; | |
76ebd054 | 372 | |
76ebd054 | 373 | /* |
92cb54a3 IM |
374 | * Avoid recursion (on debug-pagealloc) and also signal |
375 | * our priority to get to these pagetables: | |
76ebd054 | 376 | */ |
92cb54a3 | 377 | if (current->flags & PF_MEMALLOC) |
76ebd054 | 378 | return; |
92cb54a3 | 379 | current->flags |= PF_MEMALLOC; |
76ebd054 | 380 | |
76ebd054 | 381 | /* |
92cb54a3 | 382 | * Allocate atomically from atomic contexts: |
76ebd054 | 383 | */ |
92cb54a3 IM |
384 | if (in_atomic() || irqs_disabled() || debug_pagealloc) |
385 | gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN; | |
76ebd054 | 386 | |
92cb54a3 | 387 | while (pool_pages < pool_size || (ret && !*ret)) { |
76ebd054 TG |
388 | p = alloc_pages(gfp, 0); |
389 | if (!p) { | |
390 | pool_failed++; | |
391 | break; | |
392 | } | |
92cb54a3 IM |
393 | /* |
394 | * If the call site needs a page right now, provide it: | |
395 | */ | |
396 | if (ret && !*ret) { | |
397 | *ret = p; | |
398 | continue; | |
399 | } | |
400 | spin_lock_irqsave(&pgd_lock, flags); | |
76ebd054 TG |
401 | list_add(&p->lru, &page_pool); |
402 | pool_pages++; | |
92cb54a3 | 403 | spin_unlock_irqrestore(&pgd_lock, flags); |
76ebd054 | 404 | } |
92cb54a3 IM |
405 | |
406 | current->flags &= ~PF_MEMALLOC; | |
76ebd054 TG |
407 | } |
408 | ||
409 | #define SHIFT_MB (20 - PAGE_SHIFT) | |
410 | #define ROUND_MB_GB ((1 << 10) - 1) | |
411 | #define SHIFT_MB_GB 10 | |
412 | #define POOL_PAGES_PER_GB 16 | |
413 | ||
414 | void __init cpa_init(void) | |
415 | { | |
416 | struct sysinfo si; | |
417 | unsigned long gb; | |
418 | ||
419 | si_meminfo(&si); | |
420 | /* | |
421 | * Calculate the number of pool pages: | |
422 | * | |
423 | * Convert totalram (nr of pages) to MiB and round to the next | |
424 | * GiB. Shift MiB to Gib and multiply the result by | |
425 | * POOL_PAGES_PER_GB: | |
426 | */ | |
92cb54a3 IM |
427 | if (debug_pagealloc) { |
428 | gb = ((si.totalram >> SHIFT_MB) + ROUND_MB_GB) >> SHIFT_MB_GB; | |
429 | pool_size = POOL_PAGES_PER_GB * gb; | |
430 | } else { | |
431 | pool_size = 1; | |
432 | } | |
76ebd054 TG |
433 | pool_low = pool_size; |
434 | ||
92cb54a3 | 435 | cpa_fill_pool(NULL); |
76ebd054 TG |
436 | printk(KERN_DEBUG |
437 | "CPA: page pool initialized %lu of %lu pages preallocated\n", | |
438 | pool_pages, pool_size); | |
439 | } | |
440 | ||
7afe15b9 | 441 | static int split_large_page(pte_t *kpte, unsigned long address) |
bb5c2dbd | 442 | { |
7b610eec | 443 | unsigned long flags, pfn, pfninc = 1; |
9df84993 | 444 | unsigned int i, level; |
bb5c2dbd | 445 | pte_t *pbase, *tmp; |
9df84993 | 446 | pgprot_t ref_prot; |
bb5c2dbd IM |
447 | struct page *base; |
448 | ||
eb5b5f02 TG |
449 | /* |
450 | * Get a page from the pool. The pool list is protected by the | |
451 | * pgd_lock, which we have to take anyway for the split | |
452 | * operation: | |
453 | */ | |
454 | spin_lock_irqsave(&pgd_lock, flags); | |
455 | if (list_empty(&page_pool)) { | |
456 | spin_unlock_irqrestore(&pgd_lock, flags); | |
92cb54a3 IM |
457 | base = NULL; |
458 | cpa_fill_pool(&base); | |
459 | if (!base) | |
460 | return -ENOMEM; | |
461 | spin_lock_irqsave(&pgd_lock, flags); | |
462 | } else { | |
463 | base = list_first_entry(&page_pool, struct page, lru); | |
464 | list_del(&base->lru); | |
465 | pool_pages--; | |
466 | ||
467 | if (pool_pages < pool_low) | |
468 | pool_low = pool_pages; | |
eb5b5f02 TG |
469 | } |
470 | ||
bb5c2dbd IM |
471 | /* |
472 | * Check for races, another CPU might have split this page | |
473 | * up for us already: | |
474 | */ | |
475 | tmp = lookup_address(address, &level); | |
6ce9fc17 | 476 | if (tmp != kpte) |
bb5c2dbd IM |
477 | goto out_unlock; |
478 | ||
bb5c2dbd | 479 | pbase = (pte_t *)page_address(base); |
44af6c41 | 480 | #ifdef CONFIG_X86_32 |
bb5c2dbd | 481 | paravirt_alloc_pt(&init_mm, page_to_pfn(base)); |
44af6c41 | 482 | #endif |
07cf89c0 | 483 | ref_prot = pte_pgprot(pte_clrhuge(*kpte)); |
bb5c2dbd | 484 | |
f07333fd AK |
485 | #ifdef CONFIG_X86_64 |
486 | if (level == PG_LEVEL_1G) { | |
487 | pfninc = PMD_PAGE_SIZE >> PAGE_SHIFT; | |
488 | pgprot_val(ref_prot) |= _PAGE_PSE; | |
f07333fd AK |
489 | } |
490 | #endif | |
491 | ||
63c1dcf4 TG |
492 | /* |
493 | * Get the target pfn from the original entry: | |
494 | */ | |
495 | pfn = pte_pfn(*kpte); | |
f07333fd | 496 | for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc) |
63c1dcf4 | 497 | set_pte(&pbase[i], pfn_pte(pfn, ref_prot)); |
bb5c2dbd IM |
498 | |
499 | /* | |
07cf89c0 | 500 | * Install the new, split up pagetable. Important details here: |
4c881ca1 HY |
501 | * |
502 | * On Intel the NX bit of all levels must be cleared to make a | |
503 | * page executable. See section 4.13.2 of Intel 64 and IA-32 | |
504 | * Architectures Software Developer's Manual). | |
07cf89c0 TG |
505 | * |
506 | * Mark the entry present. The current mapping might be | |
507 | * set to not present, which we preserved above. | |
bb5c2dbd | 508 | */ |
4c881ca1 | 509 | ref_prot = pte_pgprot(pte_mkexec(pte_clrhuge(*kpte))); |
07cf89c0 | 510 | pgprot_val(ref_prot) |= _PAGE_PRESENT; |
9a3dc780 | 511 | __set_pmd_pte(kpte, address, mk_pte(base, ref_prot)); |
bb5c2dbd IM |
512 | base = NULL; |
513 | ||
514 | out_unlock: | |
eb5b5f02 TG |
515 | /* |
516 | * If we dropped out via the lookup_address check under | |
517 | * pgd_lock then stick the page back into the pool: | |
518 | */ | |
519 | if (base) { | |
520 | list_add(&base->lru, &page_pool); | |
521 | pool_pages++; | |
522 | } else | |
523 | pool_used++; | |
9a3dc780 | 524 | spin_unlock_irqrestore(&pgd_lock, flags); |
bb5c2dbd | 525 | |
bb5c2dbd IM |
526 | return 0; |
527 | } | |
528 | ||
c31c7d48 | 529 | static int __change_page_attr(struct cpa_data *cpa, int primary) |
9f4c815c | 530 | { |
c31c7d48 | 531 | unsigned long address = cpa->vaddr; |
da7bfc50 HH |
532 | int do_split, err; |
533 | unsigned int level; | |
c31c7d48 | 534 | pte_t *kpte, old_pte; |
1da177e4 | 535 | |
97f99fed | 536 | repeat: |
f0646e43 | 537 | kpte = lookup_address(address, &level); |
1da177e4 | 538 | if (!kpte) |
c31c7d48 TG |
539 | return primary ? -EINVAL : 0; |
540 | ||
541 | old_pte = *kpte; | |
542 | if (!pte_val(old_pte)) { | |
543 | if (!primary) | |
544 | return 0; | |
545 | printk(KERN_WARNING "CPA: called for zero pte. " | |
546 | "vaddr = %lx cpa->vaddr = %lx\n", address, | |
547 | cpa->vaddr); | |
548 | WARN_ON(1); | |
1da177e4 | 549 | return -EINVAL; |
c31c7d48 | 550 | } |
9f4c815c | 551 | |
30551bb3 | 552 | if (level == PG_LEVEL_4K) { |
c31c7d48 | 553 | pte_t new_pte; |
626c2c9d | 554 | pgprot_t new_prot = pte_pgprot(old_pte); |
c31c7d48 | 555 | unsigned long pfn = pte_pfn(old_pte); |
86f03989 | 556 | |
72e458df TG |
557 | pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr); |
558 | pgprot_val(new_prot) |= pgprot_val(cpa->mask_set); | |
86f03989 | 559 | |
c31c7d48 | 560 | new_prot = static_protections(new_prot, address, pfn); |
86f03989 | 561 | |
626c2c9d AV |
562 | /* |
563 | * We need to keep the pfn from the existing PTE, | |
564 | * after all we're only going to change it's attributes | |
565 | * not the memory it points to | |
566 | */ | |
c31c7d48 TG |
567 | new_pte = pfn_pte(pfn, canon_pgprot(new_prot)); |
568 | cpa->pfn = pfn; | |
f4ae5da0 TG |
569 | /* |
570 | * Do we really change anything ? | |
571 | */ | |
572 | if (pte_val(old_pte) != pte_val(new_pte)) { | |
573 | set_pte_atomic(kpte, new_pte); | |
574 | cpa->flushtlb = 1; | |
575 | } | |
8be8f54b | 576 | cpa->processed = 1; |
65e074df | 577 | return 0; |
1da177e4 | 578 | } |
65e074df TG |
579 | |
580 | /* | |
581 | * Check, whether we can keep the large page intact | |
582 | * and just change the pte: | |
583 | */ | |
beaff633 | 584 | do_split = try_preserve_large_page(kpte, address, cpa); |
65e074df TG |
585 | /* |
586 | * When the range fits into the existing large page, | |
8be8f54b | 587 | * return. cp->processed and cpa->tlbflush have been updated in |
65e074df TG |
588 | * try_large_page: |
589 | */ | |
87f7f8fe IM |
590 | if (do_split <= 0) |
591 | return do_split; | |
65e074df TG |
592 | |
593 | /* | |
594 | * We have to split the large page: | |
595 | */ | |
87f7f8fe IM |
596 | err = split_large_page(kpte, address); |
597 | if (!err) { | |
598 | cpa->flushtlb = 1; | |
599 | goto repeat; | |
600 | } | |
beaff633 | 601 | |
87f7f8fe | 602 | return err; |
9f4c815c | 603 | } |
1da177e4 | 604 | |
c31c7d48 TG |
605 | static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias); |
606 | ||
607 | static int cpa_process_alias(struct cpa_data *cpa) | |
1da177e4 | 608 | { |
c31c7d48 | 609 | struct cpa_data alias_cpa; |
f34b439f | 610 | int ret = 0; |
44af6c41 | 611 | |
c31c7d48 TG |
612 | if (cpa->pfn > max_pfn_mapped) |
613 | return 0; | |
626c2c9d | 614 | |
f34b439f TG |
615 | /* |
616 | * No need to redo, when the primary call touched the direct | |
617 | * mapping already: | |
618 | */ | |
619 | if (!within(cpa->vaddr, PAGE_OFFSET, | |
620 | PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT))) { | |
44af6c41 | 621 | |
f34b439f TG |
622 | alias_cpa = *cpa; |
623 | alias_cpa.vaddr = (unsigned long) __va(cpa->pfn << PAGE_SHIFT); | |
624 | ||
625 | ret = __change_page_attr_set_clr(&alias_cpa, 0); | |
626 | } | |
44af6c41 | 627 | |
44af6c41 | 628 | #ifdef CONFIG_X86_64 |
c31c7d48 TG |
629 | if (ret) |
630 | return ret; | |
f34b439f TG |
631 | /* |
632 | * No need to redo, when the primary call touched the high | |
633 | * mapping already: | |
634 | */ | |
635 | if (within(cpa->vaddr, (unsigned long) _text, (unsigned long) _end)) | |
636 | return 0; | |
637 | ||
488fd995 | 638 | /* |
0879750f TG |
639 | * If the physical address is inside the kernel map, we need |
640 | * to touch the high mapped kernel as well: | |
488fd995 | 641 | */ |
c31c7d48 TG |
642 | if (!within(cpa->pfn, highmap_start_pfn(), highmap_end_pfn())) |
643 | return 0; | |
0879750f | 644 | |
c31c7d48 TG |
645 | alias_cpa = *cpa; |
646 | alias_cpa.vaddr = | |
647 | (cpa->pfn << PAGE_SHIFT) + __START_KERNEL_map - phys_base; | |
648 | ||
649 | /* | |
650 | * The high mapping range is imprecise, so ignore the return value. | |
651 | */ | |
652 | __change_page_attr_set_clr(&alias_cpa, 0); | |
488fd995 | 653 | #endif |
c31c7d48 | 654 | return ret; |
1da177e4 LT |
655 | } |
656 | ||
c31c7d48 | 657 | static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias) |
ff31452b | 658 | { |
65e074df | 659 | int ret, numpages = cpa->numpages; |
ff31452b | 660 | |
65e074df TG |
661 | while (numpages) { |
662 | /* | |
663 | * Store the remaining nr of pages for the large page | |
664 | * preservation check. | |
665 | */ | |
8be8f54b | 666 | cpa->numpages = cpa->processed = numpages; |
c31c7d48 TG |
667 | |
668 | ret = __change_page_attr(cpa, checkalias); | |
ff31452b TG |
669 | if (ret) |
670 | return ret; | |
ff31452b | 671 | |
c31c7d48 TG |
672 | if (checkalias) { |
673 | ret = cpa_process_alias(cpa); | |
674 | if (ret) | |
675 | return ret; | |
676 | } | |
677 | ||
65e074df TG |
678 | /* |
679 | * Adjust the number of pages with the result of the | |
680 | * CPA operation. Either a large page has been | |
681 | * preserved or a single page update happened. | |
682 | */ | |
8be8f54b TG |
683 | BUG_ON(cpa->processed > numpages); |
684 | numpages -= cpa->processed; | |
685 | cpa->vaddr += cpa->processed * PAGE_SIZE; | |
65e074df | 686 | } |
ff31452b TG |
687 | return 0; |
688 | } | |
689 | ||
6bb8383b AK |
690 | static inline int cache_attr(pgprot_t attr) |
691 | { | |
692 | return pgprot_val(attr) & | |
693 | (_PAGE_PAT | _PAGE_PAT_LARGE | _PAGE_PWT | _PAGE_PCD); | |
694 | } | |
695 | ||
ff31452b TG |
696 | static int change_page_attr_set_clr(unsigned long addr, int numpages, |
697 | pgprot_t mask_set, pgprot_t mask_clr) | |
698 | { | |
72e458df | 699 | struct cpa_data cpa; |
af96e443 | 700 | int ret, cache, checkalias; |
331e4065 TG |
701 | |
702 | /* | |
703 | * Check, if we are requested to change a not supported | |
704 | * feature: | |
705 | */ | |
706 | mask_set = canon_pgprot(mask_set); | |
707 | mask_clr = canon_pgprot(mask_clr); | |
708 | if (!pgprot_val(mask_set) && !pgprot_val(mask_clr)) | |
709 | return 0; | |
710 | ||
69b1415e TG |
711 | /* Ensure we are PAGE_SIZE aligned */ |
712 | if (addr & ~PAGE_MASK) { | |
713 | addr &= PAGE_MASK; | |
714 | /* | |
715 | * People should not be passing in unaligned addresses: | |
716 | */ | |
717 | WARN_ON_ONCE(1); | |
718 | } | |
719 | ||
72e458df TG |
720 | cpa.vaddr = addr; |
721 | cpa.numpages = numpages; | |
722 | cpa.mask_set = mask_set; | |
723 | cpa.mask_clr = mask_clr; | |
f4ae5da0 | 724 | cpa.flushtlb = 0; |
72e458df | 725 | |
af96e443 TG |
726 | /* No alias checking for _NX bit modifications */ |
727 | checkalias = (pgprot_val(mask_set) | pgprot_val(mask_clr)) != _PAGE_NX; | |
728 | ||
729 | ret = __change_page_attr_set_clr(&cpa, checkalias); | |
ff31452b | 730 | |
f4ae5da0 TG |
731 | /* |
732 | * Check whether we really changed something: | |
733 | */ | |
734 | if (!cpa.flushtlb) | |
76ebd054 | 735 | goto out; |
f4ae5da0 | 736 | |
6bb8383b AK |
737 | /* |
738 | * No need to flush, when we did not set any of the caching | |
739 | * attributes: | |
740 | */ | |
741 | cache = cache_attr(mask_set); | |
742 | ||
57a6a46a TG |
743 | /* |
744 | * On success we use clflush, when the CPU supports it to | |
745 | * avoid the wbindv. If the CPU does not support it and in the | |
af1e6844 | 746 | * error case we fall back to cpa_flush_all (which uses |
57a6a46a TG |
747 | * wbindv): |
748 | */ | |
749 | if (!ret && cpu_has_clflush) | |
6bb8383b | 750 | cpa_flush_range(addr, numpages, cache); |
57a6a46a | 751 | else |
6bb8383b | 752 | cpa_flush_all(cache); |
ff31452b | 753 | |
76ebd054 | 754 | out: |
92cb54a3 IM |
755 | cpa_fill_pool(NULL); |
756 | ||
ff31452b TG |
757 | return ret; |
758 | } | |
759 | ||
56744546 TG |
760 | static inline int change_page_attr_set(unsigned long addr, int numpages, |
761 | pgprot_t mask) | |
75cbade8 | 762 | { |
56744546 | 763 | return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0)); |
75cbade8 AV |
764 | } |
765 | ||
56744546 TG |
766 | static inline int change_page_attr_clear(unsigned long addr, int numpages, |
767 | pgprot_t mask) | |
72932c7a | 768 | { |
5827040d | 769 | return change_page_attr_set_clr(addr, numpages, __pgprot(0), mask); |
72932c7a TG |
770 | } |
771 | ||
772 | int set_memory_uc(unsigned long addr, int numpages) | |
773 | { | |
774 | return change_page_attr_set(addr, numpages, | |
775 | __pgprot(_PAGE_PCD | _PAGE_PWT)); | |
75cbade8 AV |
776 | } |
777 | EXPORT_SYMBOL(set_memory_uc); | |
778 | ||
779 | int set_memory_wb(unsigned long addr, int numpages) | |
780 | { | |
72932c7a TG |
781 | return change_page_attr_clear(addr, numpages, |
782 | __pgprot(_PAGE_PCD | _PAGE_PWT)); | |
75cbade8 AV |
783 | } |
784 | EXPORT_SYMBOL(set_memory_wb); | |
785 | ||
786 | int set_memory_x(unsigned long addr, int numpages) | |
787 | { | |
72932c7a | 788 | return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_NX)); |
75cbade8 AV |
789 | } |
790 | EXPORT_SYMBOL(set_memory_x); | |
791 | ||
792 | int set_memory_nx(unsigned long addr, int numpages) | |
793 | { | |
72932c7a | 794 | return change_page_attr_set(addr, numpages, __pgprot(_PAGE_NX)); |
75cbade8 AV |
795 | } |
796 | EXPORT_SYMBOL(set_memory_nx); | |
797 | ||
798 | int set_memory_ro(unsigned long addr, int numpages) | |
799 | { | |
72932c7a | 800 | return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_RW)); |
75cbade8 | 801 | } |
75cbade8 AV |
802 | |
803 | int set_memory_rw(unsigned long addr, int numpages) | |
804 | { | |
72932c7a | 805 | return change_page_attr_set(addr, numpages, __pgprot(_PAGE_RW)); |
75cbade8 | 806 | } |
f62d0f00 IM |
807 | |
808 | int set_memory_np(unsigned long addr, int numpages) | |
809 | { | |
72932c7a | 810 | return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_PRESENT)); |
f62d0f00 | 811 | } |
75cbade8 AV |
812 | |
813 | int set_pages_uc(struct page *page, int numpages) | |
814 | { | |
815 | unsigned long addr = (unsigned long)page_address(page); | |
75cbade8 | 816 | |
d7c8f21a | 817 | return set_memory_uc(addr, numpages); |
75cbade8 AV |
818 | } |
819 | EXPORT_SYMBOL(set_pages_uc); | |
820 | ||
821 | int set_pages_wb(struct page *page, int numpages) | |
822 | { | |
823 | unsigned long addr = (unsigned long)page_address(page); | |
75cbade8 | 824 | |
d7c8f21a | 825 | return set_memory_wb(addr, numpages); |
75cbade8 AV |
826 | } |
827 | EXPORT_SYMBOL(set_pages_wb); | |
828 | ||
829 | int set_pages_x(struct page *page, int numpages) | |
830 | { | |
831 | unsigned long addr = (unsigned long)page_address(page); | |
75cbade8 | 832 | |
d7c8f21a | 833 | return set_memory_x(addr, numpages); |
75cbade8 AV |
834 | } |
835 | EXPORT_SYMBOL(set_pages_x); | |
836 | ||
837 | int set_pages_nx(struct page *page, int numpages) | |
838 | { | |
839 | unsigned long addr = (unsigned long)page_address(page); | |
75cbade8 | 840 | |
d7c8f21a | 841 | return set_memory_nx(addr, numpages); |
75cbade8 AV |
842 | } |
843 | EXPORT_SYMBOL(set_pages_nx); | |
844 | ||
845 | int set_pages_ro(struct page *page, int numpages) | |
846 | { | |
847 | unsigned long addr = (unsigned long)page_address(page); | |
75cbade8 | 848 | |
d7c8f21a | 849 | return set_memory_ro(addr, numpages); |
75cbade8 | 850 | } |
75cbade8 AV |
851 | |
852 | int set_pages_rw(struct page *page, int numpages) | |
853 | { | |
854 | unsigned long addr = (unsigned long)page_address(page); | |
e81d5dc4 | 855 | |
d7c8f21a | 856 | return set_memory_rw(addr, numpages); |
78c94aba IM |
857 | } |
858 | ||
1da177e4 | 859 | #ifdef CONFIG_DEBUG_PAGEALLOC |
f62d0f00 IM |
860 | |
861 | static int __set_pages_p(struct page *page, int numpages) | |
862 | { | |
72e458df TG |
863 | struct cpa_data cpa = { .vaddr = (unsigned long) page_address(page), |
864 | .numpages = numpages, | |
865 | .mask_set = __pgprot(_PAGE_PRESENT | _PAGE_RW), | |
866 | .mask_clr = __pgprot(0)}; | |
72932c7a | 867 | |
c31c7d48 | 868 | return __change_page_attr_set_clr(&cpa, 1); |
f62d0f00 IM |
869 | } |
870 | ||
871 | static int __set_pages_np(struct page *page, int numpages) | |
872 | { | |
72e458df TG |
873 | struct cpa_data cpa = { .vaddr = (unsigned long) page_address(page), |
874 | .numpages = numpages, | |
875 | .mask_set = __pgprot(0), | |
876 | .mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW)}; | |
72932c7a | 877 | |
c31c7d48 | 878 | return __change_page_attr_set_clr(&cpa, 1); |
f62d0f00 IM |
879 | } |
880 | ||
1da177e4 LT |
881 | void kernel_map_pages(struct page *page, int numpages, int enable) |
882 | { | |
883 | if (PageHighMem(page)) | |
884 | return; | |
9f4c815c | 885 | if (!enable) { |
f9b8404c IM |
886 | debug_check_no_locks_freed(page_address(page), |
887 | numpages * PAGE_SIZE); | |
9f4c815c | 888 | } |
de5097c2 | 889 | |
12d6f21e IM |
890 | /* |
891 | * If page allocator is not up yet then do not call c_p_a(): | |
892 | */ | |
893 | if (!debug_pagealloc_enabled) | |
894 | return; | |
895 | ||
9f4c815c | 896 | /* |
f8d8406b IM |
897 | * The return value is ignored as the calls cannot fail. |
898 | * Large pages are kept enabled at boot time, and are | |
899 | * split up quickly with DEBUG_PAGEALLOC. If a splitup | |
900 | * fails here (due to temporary memory shortage) no damage | |
901 | * is done because we just keep the largepage intact up | |
902 | * to the next attempt when it will likely be split up: | |
1da177e4 | 903 | */ |
f62d0f00 IM |
904 | if (enable) |
905 | __set_pages_p(page, numpages); | |
906 | else | |
907 | __set_pages_np(page, numpages); | |
9f4c815c IM |
908 | |
909 | /* | |
e4b71dcf IM |
910 | * We should perform an IPI and flush all tlbs, |
911 | * but that can deadlock->flush only current cpu: | |
1da177e4 LT |
912 | */ |
913 | __flush_tlb_all(); | |
76ebd054 TG |
914 | |
915 | /* | |
916 | * Try to refill the page pool here. We can do this only after | |
917 | * the tlb flush. | |
918 | */ | |
92cb54a3 | 919 | cpa_fill_pool(NULL); |
1da177e4 | 920 | } |
8a235efa RW |
921 | |
922 | #ifdef CONFIG_HIBERNATION | |
923 | ||
924 | bool kernel_page_present(struct page *page) | |
925 | { | |
926 | unsigned int level; | |
927 | pte_t *pte; | |
928 | ||
929 | if (PageHighMem(page)) | |
930 | return false; | |
931 | ||
932 | pte = lookup_address((unsigned long)page_address(page), &level); | |
933 | return (pte_val(*pte) & _PAGE_PRESENT); | |
934 | } | |
935 | ||
936 | #endif /* CONFIG_HIBERNATION */ | |
937 | ||
938 | #endif /* CONFIG_DEBUG_PAGEALLOC */ | |
d1028a15 AV |
939 | |
940 | /* | |
941 | * The testcases use internal knowledge of the implementation that shouldn't | |
942 | * be exposed to the rest of the kernel. Include these directly here. | |
943 | */ | |
944 | #ifdef CONFIG_CPA_DEBUG | |
945 | #include "pageattr-test.c" | |
946 | #endif |