]>
Commit | Line | Data |
---|---|---|
9f4c815c IM |
1 | /* |
2 | * Copyright 2002 Andi Kleen, SuSE Labs. | |
1da177e4 | 3 | * Thanks to Ben LaHaise for precious feedback. |
9f4c815c | 4 | */ |
1da177e4 | 5 | #include <linux/highmem.h> |
8192206d | 6 | #include <linux/bootmem.h> |
1da177e4 | 7 | #include <linux/module.h> |
9f4c815c | 8 | #include <linux/sched.h> |
1da177e4 | 9 | #include <linux/slab.h> |
9f4c815c IM |
10 | #include <linux/mm.h> |
11 | ||
950f9d95 | 12 | #include <asm/e820.h> |
1da177e4 LT |
13 | #include <asm/processor.h> |
14 | #include <asm/tlbflush.h> | |
f8af095d | 15 | #include <asm/sections.h> |
9f4c815c IM |
16 | #include <asm/uaccess.h> |
17 | #include <asm/pgalloc.h> | |
1da177e4 | 18 | |
ed724be6 AV |
19 | static inline int |
20 | within(unsigned long addr, unsigned long start, unsigned long end) | |
687c4825 | 21 | { |
ed724be6 AV |
22 | return addr >= start && addr < end; |
23 | } | |
24 | ||
d7c8f21a TG |
25 | /* |
26 | * Flushing functions | |
27 | */ | |
28 | void clflush_cache_range(void *addr, int size) | |
29 | { | |
30 | int i; | |
31 | ||
32 | for (i = 0; i < size; i += boot_cpu_data.x86_clflush_size) | |
33 | clflush(addr+i); | |
34 | } | |
35 | ||
36 | static void flush_kernel_map(void *arg) | |
37 | { | |
38 | /* | |
39 | * Flush all to work around Errata in early athlons regarding | |
40 | * large page flushing. | |
41 | */ | |
42 | __flush_tlb_all(); | |
43 | ||
44 | if (boot_cpu_data.x86_model >= 4) | |
45 | wbinvd(); | |
46 | } | |
47 | ||
48 | static void global_flush_tlb(void) | |
49 | { | |
50 | BUG_ON(irqs_disabled()); | |
51 | ||
52 | on_each_cpu(flush_kernel_map, NULL, 1, 1); | |
53 | } | |
54 | ||
57a6a46a TG |
55 | struct clflush_data { |
56 | unsigned long addr; | |
57 | int numpages; | |
58 | }; | |
59 | ||
60 | static void __cpa_flush_range(void *arg) | |
61 | { | |
62 | struct clflush_data *cld = arg; | |
63 | ||
64 | /* | |
65 | * We could optimize that further and do individual per page | |
66 | * tlb invalidates for a low number of pages. Caveat: we must | |
67 | * flush the high aliases on 64bit as well. | |
68 | */ | |
69 | __flush_tlb_all(); | |
70 | ||
71 | clflush_cache_range((void *) cld->addr, cld->numpages * PAGE_SIZE); | |
72 | } | |
73 | ||
74 | static void cpa_flush_range(unsigned long addr, int numpages) | |
75 | { | |
76 | struct clflush_data cld; | |
77 | ||
78 | BUG_ON(irqs_disabled()); | |
79 | ||
80 | cld.addr = addr; | |
81 | cld.numpages = numpages; | |
82 | ||
83 | on_each_cpu(__cpa_flush_range, &cld, 1, 1); | |
84 | } | |
85 | ||
ed724be6 AV |
86 | /* |
87 | * Certain areas of memory on x86 require very specific protection flags, | |
88 | * for example the BIOS area or kernel text. Callers don't always get this | |
89 | * right (again, ioremap() on BIOS memory is not uncommon) so this function | |
90 | * checks and fixes these known static required protection bits. | |
91 | */ | |
92 | static inline pgprot_t static_protections(pgprot_t prot, unsigned long address) | |
93 | { | |
94 | pgprot_t forbidden = __pgprot(0); | |
95 | ||
687c4825 | 96 | /* |
ed724be6 AV |
97 | * The BIOS area between 640k and 1Mb needs to be executable for |
98 | * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support. | |
687c4825 | 99 | */ |
ed724be6 AV |
100 | if (within(__pa(address), BIOS_BEGIN, BIOS_END)) |
101 | pgprot_val(forbidden) |= _PAGE_NX; | |
102 | ||
103 | /* | |
104 | * The kernel text needs to be executable for obvious reasons | |
105 | * Does not cover __inittext since that is gone later on | |
106 | */ | |
107 | if (within(address, (unsigned long)_text, (unsigned long)_etext)) | |
108 | pgprot_val(forbidden) |= _PAGE_NX; | |
109 | ||
110 | #ifdef CONFIG_DEBUG_RODATA | |
111 | /* The .rodata section needs to be read-only */ | |
112 | if (within(address, (unsigned long)__start_rodata, | |
113 | (unsigned long)__end_rodata)) | |
114 | pgprot_val(forbidden) |= _PAGE_RW; | |
115 | #endif | |
116 | ||
117 | prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden)); | |
687c4825 IM |
118 | |
119 | return prot; | |
120 | } | |
121 | ||
f0646e43 | 122 | pte_t *lookup_address(unsigned long address, int *level) |
9f4c815c | 123 | { |
1da177e4 LT |
124 | pgd_t *pgd = pgd_offset_k(address); |
125 | pud_t *pud; | |
126 | pmd_t *pmd; | |
9f4c815c | 127 | |
30551bb3 TG |
128 | *level = PG_LEVEL_NONE; |
129 | ||
1da177e4 LT |
130 | if (pgd_none(*pgd)) |
131 | return NULL; | |
132 | pud = pud_offset(pgd, address); | |
133 | if (pud_none(*pud)) | |
134 | return NULL; | |
135 | pmd = pmd_offset(pud, address); | |
136 | if (pmd_none(*pmd)) | |
137 | return NULL; | |
30551bb3 TG |
138 | |
139 | *level = PG_LEVEL_2M; | |
1da177e4 LT |
140 | if (pmd_large(*pmd)) |
141 | return (pte_t *)pmd; | |
1da177e4 | 142 | |
30551bb3 | 143 | *level = PG_LEVEL_4K; |
9f4c815c IM |
144 | return pte_offset_kernel(pmd, address); |
145 | } | |
146 | ||
9a3dc780 | 147 | static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte) |
9f4c815c | 148 | { |
9f4c815c IM |
149 | /* change init_mm */ |
150 | set_pte_atomic(kpte, pte); | |
44af6c41 | 151 | #ifdef CONFIG_X86_32 |
e4b71dcf | 152 | if (!SHARED_KERNEL_PMD) { |
44af6c41 IM |
153 | struct page *page; |
154 | ||
155 | for (page = pgd_list; page; page = (struct page *)page->index) { | |
156 | pgd_t *pgd; | |
157 | pud_t *pud; | |
158 | pmd_t *pmd; | |
159 | ||
160 | pgd = (pgd_t *)page_address(page) + pgd_index(address); | |
161 | pud = pud_offset(pgd, address); | |
162 | pmd = pmd_offset(pud, address); | |
163 | set_pte_atomic((pte_t *)pmd, pte); | |
164 | } | |
1da177e4 | 165 | } |
44af6c41 | 166 | #endif |
1da177e4 LT |
167 | } |
168 | ||
7afe15b9 | 169 | static int split_large_page(pte_t *kpte, unsigned long address) |
bb5c2dbd | 170 | { |
7afe15b9 | 171 | pgprot_t ref_prot = pte_pgprot(pte_clrhuge(*kpte)); |
12d6f21e | 172 | gfp_t gfp_flags = GFP_KERNEL; |
9a3dc780 | 173 | unsigned long flags; |
bb5c2dbd IM |
174 | unsigned long addr; |
175 | pte_t *pbase, *tmp; | |
176 | struct page *base; | |
7afe15b9 | 177 | int i, level; |
bb5c2dbd | 178 | |
12d6f21e IM |
179 | #ifdef CONFIG_DEBUG_PAGEALLOC |
180 | gfp_flags = GFP_ATOMIC; | |
181 | #endif | |
182 | base = alloc_pages(gfp_flags, 0); | |
bb5c2dbd IM |
183 | if (!base) |
184 | return -ENOMEM; | |
185 | ||
9a3dc780 | 186 | spin_lock_irqsave(&pgd_lock, flags); |
bb5c2dbd IM |
187 | /* |
188 | * Check for races, another CPU might have split this page | |
189 | * up for us already: | |
190 | */ | |
191 | tmp = lookup_address(address, &level); | |
5508a748 IM |
192 | if (tmp != kpte) { |
193 | WARN_ON_ONCE(1); | |
bb5c2dbd | 194 | goto out_unlock; |
5508a748 | 195 | } |
bb5c2dbd IM |
196 | |
197 | address = __pa(address); | |
198 | addr = address & LARGE_PAGE_MASK; | |
199 | pbase = (pte_t *)page_address(base); | |
44af6c41 | 200 | #ifdef CONFIG_X86_32 |
bb5c2dbd | 201 | paravirt_alloc_pt(&init_mm, page_to_pfn(base)); |
44af6c41 | 202 | #endif |
bb5c2dbd IM |
203 | |
204 | for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) | |
205 | set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, ref_prot)); | |
206 | ||
207 | /* | |
4c881ca1 HY |
208 | * Install the new, split up pagetable. Important detail here: |
209 | * | |
210 | * On Intel the NX bit of all levels must be cleared to make a | |
211 | * page executable. See section 4.13.2 of Intel 64 and IA-32 | |
212 | * Architectures Software Developer's Manual). | |
bb5c2dbd | 213 | */ |
4c881ca1 | 214 | ref_prot = pte_pgprot(pte_mkexec(pte_clrhuge(*kpte))); |
9a3dc780 | 215 | __set_pmd_pte(kpte, address, mk_pte(base, ref_prot)); |
bb5c2dbd IM |
216 | base = NULL; |
217 | ||
218 | out_unlock: | |
9a3dc780 | 219 | spin_unlock_irqrestore(&pgd_lock, flags); |
bb5c2dbd IM |
220 | |
221 | if (base) | |
222 | __free_pages(base, 0); | |
223 | ||
224 | return 0; | |
225 | } | |
226 | ||
44af6c41 | 227 | static int |
8192206d | 228 | __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot) |
9f4c815c | 229 | { |
1da177e4 | 230 | struct page *kpte_page; |
bb5c2dbd | 231 | int level, err = 0; |
9f4c815c | 232 | pte_t *kpte; |
1da177e4 | 233 | |
8192206d IM |
234 | #ifdef CONFIG_X86_32 |
235 | BUG_ON(pfn > max_low_pfn); | |
236 | #endif | |
1da177e4 | 237 | |
97f99fed | 238 | repeat: |
f0646e43 | 239 | kpte = lookup_address(address, &level); |
1da177e4 LT |
240 | if (!kpte) |
241 | return -EINVAL; | |
9f4c815c | 242 | |
1da177e4 | 243 | kpte_page = virt_to_page(kpte); |
65d2f0bc AK |
244 | BUG_ON(PageLRU(kpte_page)); |
245 | BUG_ON(PageCompound(kpte_page)); | |
246 | ||
ed724be6 | 247 | prot = static_protections(prot, address); |
65d2f0bc | 248 | |
30551bb3 | 249 | if (level == PG_LEVEL_4K) { |
a72a08a4 | 250 | WARN_ON_ONCE(pgprot_val(prot) & _PAGE_PSE); |
8192206d | 251 | set_pte_atomic(kpte, pfn_pte(pfn, canon_pgprot(prot))); |
78c94aba | 252 | } else { |
a72a08a4 TG |
253 | /* Clear the PSE bit for the 4k level pages ! */ |
254 | pgprot_val(prot) = pgprot_val(prot) & ~_PAGE_PSE; | |
255 | ||
7afe15b9 | 256 | err = split_large_page(kpte, address); |
bb5c2dbd IM |
257 | if (!err) |
258 | goto repeat; | |
1da177e4 | 259 | } |
bb5c2dbd | 260 | return err; |
9f4c815c | 261 | } |
1da177e4 | 262 | |
44af6c41 IM |
263 | /** |
264 | * change_page_attr_addr - Change page table attributes in linear mapping | |
265 | * @address: Virtual address in linear mapping. | |
44af6c41 | 266 | * @prot: New page table attribute (PAGE_*) |
1da177e4 | 267 | * |
44af6c41 IM |
268 | * Change page attributes of a page in the direct mapping. This is a variant |
269 | * of change_page_attr() that also works on memory holes that do not have | |
270 | * mem_map entry (pfn_valid() is false). | |
9f4c815c | 271 | * |
44af6c41 | 272 | * See change_page_attr() documentation for more details. |
75cbade8 AV |
273 | * |
274 | * Modules and drivers should use the set_memory_* APIs instead. | |
1da177e4 | 275 | */ |
44af6c41 | 276 | |
488fd995 | 277 | static int change_page_attr_addr(unsigned long address, pgprot_t prot) |
1da177e4 | 278 | { |
488fd995 AV |
279 | int err = 0, kernel_map = 0; |
280 | unsigned long pfn = __pa(address) >> PAGE_SHIFT; | |
44af6c41 IM |
281 | |
282 | #ifdef CONFIG_X86_64 | |
283 | if (address >= __START_KERNEL_map && | |
284 | address < __START_KERNEL_map + KERNEL_TEXT_SIZE) { | |
1da177e4 | 285 | |
44af6c41 IM |
286 | address = (unsigned long)__va(__pa(address)); |
287 | kernel_map = 1; | |
288 | } | |
289 | #endif | |
290 | ||
488fd995 AV |
291 | if (!kernel_map || pte_present(pfn_pte(0, prot))) { |
292 | err = __change_page_attr(address, pfn, prot); | |
293 | if (err) | |
294 | return err; | |
295 | } | |
44af6c41 | 296 | |
44af6c41 | 297 | #ifdef CONFIG_X86_64 |
488fd995 AV |
298 | /* |
299 | * Handle kernel mapping too which aliases part of | |
300 | * lowmem: | |
301 | */ | |
302 | if (__pa(address) < KERNEL_TEXT_SIZE) { | |
303 | unsigned long addr2; | |
304 | pgprot_t prot2; | |
305 | ||
306 | addr2 = __START_KERNEL_map + __pa(address); | |
307 | /* Make sure the kernel mappings stay executable */ | |
308 | prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot))); | |
309 | err = __change_page_attr(addr2, pfn, prot2); | |
9f4c815c | 310 | } |
488fd995 | 311 | #endif |
9f4c815c | 312 | |
1da177e4 LT |
313 | return err; |
314 | } | |
315 | ||
ff31452b TG |
316 | static int __change_page_attr_set_clr(unsigned long addr, int numpages, |
317 | pgprot_t mask_set, pgprot_t mask_clr) | |
318 | { | |
319 | pgprot_t new_prot; | |
320 | int level; | |
321 | pte_t *pte; | |
322 | int i, ret; | |
323 | ||
324 | for (i = 0; i < numpages ; i++) { | |
325 | ||
326 | pte = lookup_address(addr, &level); | |
327 | if (!pte) | |
328 | return -EINVAL; | |
329 | ||
330 | new_prot = pte_pgprot(*pte); | |
331 | ||
332 | pgprot_val(new_prot) &= ~pgprot_val(mask_clr); | |
333 | pgprot_val(new_prot) |= pgprot_val(mask_set); | |
334 | ||
335 | ret = change_page_attr_addr(addr, new_prot); | |
336 | if (ret) | |
337 | return ret; | |
338 | addr += PAGE_SIZE; | |
339 | } | |
340 | ||
341 | return 0; | |
342 | } | |
343 | ||
344 | static int change_page_attr_set_clr(unsigned long addr, int numpages, | |
345 | pgprot_t mask_set, pgprot_t mask_clr) | |
346 | { | |
347 | int ret = __change_page_attr_set_clr(addr, numpages, mask_set, | |
348 | mask_clr); | |
349 | ||
57a6a46a TG |
350 | /* |
351 | * On success we use clflush, when the CPU supports it to | |
352 | * avoid the wbindv. If the CPU does not support it and in the | |
353 | * error case we fall back to global_flush_tlb (which uses | |
354 | * wbindv): | |
355 | */ | |
356 | if (!ret && cpu_has_clflush) | |
357 | cpa_flush_range(addr, numpages); | |
358 | else | |
359 | global_flush_tlb(); | |
ff31452b TG |
360 | |
361 | return ret; | |
362 | } | |
363 | ||
56744546 TG |
364 | static inline int change_page_attr_set(unsigned long addr, int numpages, |
365 | pgprot_t mask) | |
75cbade8 | 366 | { |
56744546 | 367 | return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0)); |
75cbade8 AV |
368 | } |
369 | ||
56744546 TG |
370 | static inline int change_page_attr_clear(unsigned long addr, int numpages, |
371 | pgprot_t mask) | |
72932c7a | 372 | { |
56744546 | 373 | return __change_page_attr_set_clr(addr, numpages, __pgprot(0), mask); |
72932c7a TG |
374 | |
375 | } | |
376 | ||
377 | int set_memory_uc(unsigned long addr, int numpages) | |
378 | { | |
379 | return change_page_attr_set(addr, numpages, | |
380 | __pgprot(_PAGE_PCD | _PAGE_PWT)); | |
75cbade8 AV |
381 | } |
382 | EXPORT_SYMBOL(set_memory_uc); | |
383 | ||
384 | int set_memory_wb(unsigned long addr, int numpages) | |
385 | { | |
72932c7a TG |
386 | return change_page_attr_clear(addr, numpages, |
387 | __pgprot(_PAGE_PCD | _PAGE_PWT)); | |
75cbade8 AV |
388 | } |
389 | EXPORT_SYMBOL(set_memory_wb); | |
390 | ||
391 | int set_memory_x(unsigned long addr, int numpages) | |
392 | { | |
72932c7a | 393 | return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_NX)); |
75cbade8 AV |
394 | } |
395 | EXPORT_SYMBOL(set_memory_x); | |
396 | ||
397 | int set_memory_nx(unsigned long addr, int numpages) | |
398 | { | |
72932c7a | 399 | return change_page_attr_set(addr, numpages, __pgprot(_PAGE_NX)); |
75cbade8 AV |
400 | } |
401 | EXPORT_SYMBOL(set_memory_nx); | |
402 | ||
403 | int set_memory_ro(unsigned long addr, int numpages) | |
404 | { | |
72932c7a | 405 | return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_RW)); |
75cbade8 | 406 | } |
75cbade8 AV |
407 | |
408 | int set_memory_rw(unsigned long addr, int numpages) | |
409 | { | |
72932c7a | 410 | return change_page_attr_set(addr, numpages, __pgprot(_PAGE_RW)); |
75cbade8 | 411 | } |
f62d0f00 IM |
412 | |
413 | int set_memory_np(unsigned long addr, int numpages) | |
414 | { | |
72932c7a | 415 | return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_PRESENT)); |
f62d0f00 | 416 | } |
75cbade8 AV |
417 | |
418 | int set_pages_uc(struct page *page, int numpages) | |
419 | { | |
420 | unsigned long addr = (unsigned long)page_address(page); | |
75cbade8 | 421 | |
d7c8f21a | 422 | return set_memory_uc(addr, numpages); |
75cbade8 AV |
423 | } |
424 | EXPORT_SYMBOL(set_pages_uc); | |
425 | ||
426 | int set_pages_wb(struct page *page, int numpages) | |
427 | { | |
428 | unsigned long addr = (unsigned long)page_address(page); | |
75cbade8 | 429 | |
d7c8f21a | 430 | return set_memory_wb(addr, numpages); |
75cbade8 AV |
431 | } |
432 | EXPORT_SYMBOL(set_pages_wb); | |
433 | ||
434 | int set_pages_x(struct page *page, int numpages) | |
435 | { | |
436 | unsigned long addr = (unsigned long)page_address(page); | |
75cbade8 | 437 | |
d7c8f21a | 438 | return set_memory_x(addr, numpages); |
75cbade8 AV |
439 | } |
440 | EXPORT_SYMBOL(set_pages_x); | |
441 | ||
442 | int set_pages_nx(struct page *page, int numpages) | |
443 | { | |
444 | unsigned long addr = (unsigned long)page_address(page); | |
75cbade8 | 445 | |
d7c8f21a | 446 | return set_memory_nx(addr, numpages); |
75cbade8 AV |
447 | } |
448 | EXPORT_SYMBOL(set_pages_nx); | |
449 | ||
450 | int set_pages_ro(struct page *page, int numpages) | |
451 | { | |
452 | unsigned long addr = (unsigned long)page_address(page); | |
75cbade8 | 453 | |
d7c8f21a | 454 | return set_memory_ro(addr, numpages); |
75cbade8 | 455 | } |
75cbade8 AV |
456 | |
457 | int set_pages_rw(struct page *page, int numpages) | |
458 | { | |
459 | unsigned long addr = (unsigned long)page_address(page); | |
e81d5dc4 | 460 | |
d7c8f21a | 461 | return set_memory_rw(addr, numpages); |
78c94aba IM |
462 | } |
463 | ||
1da177e4 | 464 | |
56744546 TG |
465 | #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_CPA_DEBUG) |
466 | static inline int __change_page_attr_set(unsigned long addr, int numpages, | |
467 | pgprot_t mask) | |
468 | { | |
469 | return __change_page_attr_set_clr(addr, numpages, mask, __pgprot(0)); | |
470 | } | |
471 | ||
472 | static inline int __change_page_attr_clear(unsigned long addr, int numpages, | |
473 | pgprot_t mask) | |
474 | { | |
475 | return __change_page_attr_set_clr(addr, numpages, __pgprot(0), mask); | |
476 | } | |
477 | #endif | |
478 | ||
1da177e4 | 479 | #ifdef CONFIG_DEBUG_PAGEALLOC |
f62d0f00 IM |
480 | |
481 | static int __set_pages_p(struct page *page, int numpages) | |
482 | { | |
483 | unsigned long addr = (unsigned long)page_address(page); | |
72932c7a TG |
484 | |
485 | return __change_page_attr_set(addr, numpages, | |
486 | __pgprot(_PAGE_PRESENT | _PAGE_RW)); | |
f62d0f00 IM |
487 | } |
488 | ||
489 | static int __set_pages_np(struct page *page, int numpages) | |
490 | { | |
491 | unsigned long addr = (unsigned long)page_address(page); | |
72932c7a TG |
492 | |
493 | return __change_page_attr_clear(addr, numpages, | |
494 | __pgprot(_PAGE_PRESENT)); | |
f62d0f00 IM |
495 | } |
496 | ||
1da177e4 LT |
497 | void kernel_map_pages(struct page *page, int numpages, int enable) |
498 | { | |
499 | if (PageHighMem(page)) | |
500 | return; | |
9f4c815c | 501 | if (!enable) { |
f9b8404c IM |
502 | debug_check_no_locks_freed(page_address(page), |
503 | numpages * PAGE_SIZE); | |
9f4c815c | 504 | } |
de5097c2 | 505 | |
12d6f21e IM |
506 | /* |
507 | * If page allocator is not up yet then do not call c_p_a(): | |
508 | */ | |
509 | if (!debug_pagealloc_enabled) | |
510 | return; | |
511 | ||
9f4c815c | 512 | /* |
e4b71dcf IM |
513 | * The return value is ignored - the calls cannot fail, |
514 | * large pages are disabled at boot time: | |
1da177e4 | 515 | */ |
f62d0f00 IM |
516 | if (enable) |
517 | __set_pages_p(page, numpages); | |
518 | else | |
519 | __set_pages_np(page, numpages); | |
9f4c815c IM |
520 | |
521 | /* | |
e4b71dcf IM |
522 | * We should perform an IPI and flush all tlbs, |
523 | * but that can deadlock->flush only current cpu: | |
1da177e4 LT |
524 | */ |
525 | __flush_tlb_all(); | |
526 | } | |
527 | #endif | |
d1028a15 AV |
528 | |
529 | /* | |
530 | * The testcases use internal knowledge of the implementation that shouldn't | |
531 | * be exposed to the rest of the kernel. Include these directly here. | |
532 | */ | |
533 | #ifdef CONFIG_CPA_DEBUG | |
534 | #include "pageattr-test.c" | |
535 | #endif |