]>
Commit | Line | Data |
---|---|---|
1965aae3 PA |
1 | #ifndef _ASM_X86_PGTABLE_H |
2 | #define _ASM_X86_PGTABLE_H | |
6c386655 | 3 | |
c47c1b1f | 4 | #include <asm/page.h> |
1adcaafe | 5 | #include <asm/e820.h> |
c47c1b1f | 6 | |
8d19c99f | 7 | #include <asm/pgtable_types.h> |
b2bc2731 | 8 | |
8a7b12f7 | 9 | /* |
10 | * Macro to mark a page protection value as UC- | |
11 | */ | |
12 | #define pgprot_noncached(prot) \ | |
13 | ((boot_cpu_data.x86 > 3) \ | |
14 | ? (__pgprot(pgprot_val(prot) | _PAGE_CACHE_UC_MINUS)) \ | |
15 | : (prot)) | |
16 | ||
4614139c | 17 | #ifndef __ASSEMBLY__ |
195466dc | 18 | |
8405b122 JF |
19 | /* |
20 | * ZERO_PAGE is a global shared page that is always zero: used | |
21 | * for zero-mapped memory areas etc.. | |
22 | */ | |
3cbaeafe | 23 | extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; |
8405b122 JF |
24 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) |
25 | ||
e3ed910d JF |
26 | extern spinlock_t pgd_lock; |
27 | extern struct list_head pgd_list; | |
8405b122 | 28 | |
54321d94 JF |
29 | #ifdef CONFIG_PARAVIRT |
30 | #include <asm/paravirt.h> | |
31 | #else /* !CONFIG_PARAVIRT */ | |
32 | #define set_pte(ptep, pte) native_set_pte(ptep, pte) | |
33 | #define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte) | |
34 | ||
54321d94 JF |
35 | #define set_pte_atomic(ptep, pte) \ |
36 | native_set_pte_atomic(ptep, pte) | |
37 | ||
38 | #define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd) | |
39 | ||
40 | #ifndef __PAGETABLE_PUD_FOLDED | |
41 | #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd) | |
42 | #define pgd_clear(pgd) native_pgd_clear(pgd) | |
43 | #endif | |
44 | ||
45 | #ifndef set_pud | |
46 | # define set_pud(pudp, pud) native_set_pud(pudp, pud) | |
47 | #endif | |
48 | ||
49 | #ifndef __PAGETABLE_PMD_FOLDED | |
50 | #define pud_clear(pud) native_pud_clear(pud) | |
51 | #endif | |
52 | ||
53 | #define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep) | |
54 | #define pmd_clear(pmd) native_pmd_clear(pmd) | |
55 | ||
56 | #define pte_update(mm, addr, ptep) do { } while (0) | |
57 | #define pte_update_defer(mm, addr, ptep) do { } while (0) | |
58 | ||
54321d94 JF |
59 | #define pgd_val(x) native_pgd_val(x) |
60 | #define __pgd(x) native_make_pgd(x) | |
61 | ||
62 | #ifndef __PAGETABLE_PUD_FOLDED | |
63 | #define pud_val(x) native_pud_val(x) | |
64 | #define __pud(x) native_make_pud(x) | |
65 | #endif | |
66 | ||
67 | #ifndef __PAGETABLE_PMD_FOLDED | |
68 | #define pmd_val(x) native_pmd_val(x) | |
69 | #define __pmd(x) native_make_pmd(x) | |
70 | #endif | |
71 | ||
72 | #define pte_val(x) native_pte_val(x) | |
73 | #define __pte(x) native_make_pte(x) | |
74 | ||
224101ed JF |
75 | #define arch_end_context_switch(prev) do {} while(0) |
76 | ||
54321d94 JF |
77 | #endif /* CONFIG_PARAVIRT */ |
78 | ||
4614139c JF |
79 | /* |
80 | * The following only work if pte_present() is true. | |
81 | * Undefined behaviour if not.. | |
82 | */ | |
3cbaeafe JP |
83 | static inline int pte_dirty(pte_t pte) |
84 | { | |
a15af1c9 | 85 | return pte_flags(pte) & _PAGE_DIRTY; |
3cbaeafe JP |
86 | } |
87 | ||
88 | static inline int pte_young(pte_t pte) | |
89 | { | |
a15af1c9 | 90 | return pte_flags(pte) & _PAGE_ACCESSED; |
3cbaeafe JP |
91 | } |
92 | ||
93 | static inline int pte_write(pte_t pte) | |
94 | { | |
a15af1c9 | 95 | return pte_flags(pte) & _PAGE_RW; |
3cbaeafe JP |
96 | } |
97 | ||
98 | static inline int pte_file(pte_t pte) | |
99 | { | |
a15af1c9 | 100 | return pte_flags(pte) & _PAGE_FILE; |
3cbaeafe JP |
101 | } |
102 | ||
103 | static inline int pte_huge(pte_t pte) | |
104 | { | |
a15af1c9 | 105 | return pte_flags(pte) & _PAGE_PSE; |
4614139c JF |
106 | } |
107 | ||
3cbaeafe JP |
108 | static inline int pte_global(pte_t pte) |
109 | { | |
a15af1c9 | 110 | return pte_flags(pte) & _PAGE_GLOBAL; |
3cbaeafe JP |
111 | } |
112 | ||
113 | static inline int pte_exec(pte_t pte) | |
114 | { | |
a15af1c9 | 115 | return !(pte_flags(pte) & _PAGE_NX); |
3cbaeafe JP |
116 | } |
117 | ||
7e675137 NP |
118 | static inline int pte_special(pte_t pte) |
119 | { | |
606ee44d | 120 | return pte_flags(pte) & _PAGE_SPECIAL; |
7e675137 NP |
121 | } |
122 | ||
91030ca1 HD |
123 | static inline unsigned long pte_pfn(pte_t pte) |
124 | { | |
125 | return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT; | |
126 | } | |
127 | ||
087975b0 AM |
128 | static inline unsigned long pmd_pfn(pmd_t pmd) |
129 | { | |
130 | return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT; | |
131 | } | |
132 | ||
91030ca1 HD |
133 | #define pte_page(pte) pfn_to_page(pte_pfn(pte)) |
134 | ||
3cbaeafe JP |
135 | static inline int pmd_large(pmd_t pte) |
136 | { | |
18a7a199 | 137 | return (pmd_flags(pte) & (_PAGE_PSE | _PAGE_PRESENT)) == |
3cbaeafe JP |
138 | (_PAGE_PSE | _PAGE_PRESENT); |
139 | } | |
140 | ||
6522869c JF |
141 | static inline pte_t pte_set_flags(pte_t pte, pteval_t set) |
142 | { | |
143 | pteval_t v = native_pte_val(pte); | |
144 | ||
145 | return native_make_pte(v | set); | |
146 | } | |
147 | ||
148 | static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear) | |
149 | { | |
150 | pteval_t v = native_pte_val(pte); | |
151 | ||
152 | return native_make_pte(v & ~clear); | |
153 | } | |
154 | ||
3cbaeafe JP |
155 | static inline pte_t pte_mkclean(pte_t pte) |
156 | { | |
6522869c | 157 | return pte_clear_flags(pte, _PAGE_DIRTY); |
3cbaeafe JP |
158 | } |
159 | ||
160 | static inline pte_t pte_mkold(pte_t pte) | |
161 | { | |
6522869c | 162 | return pte_clear_flags(pte, _PAGE_ACCESSED); |
3cbaeafe JP |
163 | } |
164 | ||
165 | static inline pte_t pte_wrprotect(pte_t pte) | |
166 | { | |
6522869c | 167 | return pte_clear_flags(pte, _PAGE_RW); |
3cbaeafe JP |
168 | } |
169 | ||
170 | static inline pte_t pte_mkexec(pte_t pte) | |
171 | { | |
6522869c | 172 | return pte_clear_flags(pte, _PAGE_NX); |
3cbaeafe JP |
173 | } |
174 | ||
175 | static inline pte_t pte_mkdirty(pte_t pte) | |
176 | { | |
6522869c | 177 | return pte_set_flags(pte, _PAGE_DIRTY); |
3cbaeafe JP |
178 | } |
179 | ||
180 | static inline pte_t pte_mkyoung(pte_t pte) | |
181 | { | |
6522869c | 182 | return pte_set_flags(pte, _PAGE_ACCESSED); |
3cbaeafe JP |
183 | } |
184 | ||
185 | static inline pte_t pte_mkwrite(pte_t pte) | |
186 | { | |
6522869c | 187 | return pte_set_flags(pte, _PAGE_RW); |
3cbaeafe JP |
188 | } |
189 | ||
190 | static inline pte_t pte_mkhuge(pte_t pte) | |
191 | { | |
6522869c | 192 | return pte_set_flags(pte, _PAGE_PSE); |
3cbaeafe JP |
193 | } |
194 | ||
195 | static inline pte_t pte_clrhuge(pte_t pte) | |
196 | { | |
6522869c | 197 | return pte_clear_flags(pte, _PAGE_PSE); |
3cbaeafe JP |
198 | } |
199 | ||
200 | static inline pte_t pte_mkglobal(pte_t pte) | |
201 | { | |
6522869c | 202 | return pte_set_flags(pte, _PAGE_GLOBAL); |
3cbaeafe JP |
203 | } |
204 | ||
205 | static inline pte_t pte_clrglobal(pte_t pte) | |
206 | { | |
6522869c | 207 | return pte_clear_flags(pte, _PAGE_GLOBAL); |
3cbaeafe | 208 | } |
4614139c | 209 | |
7e675137 NP |
210 | static inline pte_t pte_mkspecial(pte_t pte) |
211 | { | |
6522869c | 212 | return pte_set_flags(pte, _PAGE_SPECIAL); |
7e675137 NP |
213 | } |
214 | ||
b534816b JF |
215 | /* |
216 | * Mask out unsupported bits in a present pgprot. Non-present pgprots | |
217 | * can use those bits for other purposes, so leave them be. | |
218 | */ | |
219 | static inline pgprotval_t massage_pgprot(pgprot_t pgprot) | |
220 | { | |
221 | pgprotval_t protval = pgprot_val(pgprot); | |
222 | ||
223 | if (protval & _PAGE_PRESENT) | |
224 | protval &= __supported_pte_mask; | |
225 | ||
226 | return protval; | |
227 | } | |
228 | ||
6fdc05d4 JF |
229 | static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot) |
230 | { | |
b534816b JF |
231 | return __pte(((phys_addr_t)page_nr << PAGE_SHIFT) | |
232 | massage_pgprot(pgprot)); | |
6fdc05d4 JF |
233 | } |
234 | ||
235 | static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot) | |
236 | { | |
b534816b JF |
237 | return __pmd(((phys_addr_t)page_nr << PAGE_SHIFT) | |
238 | massage_pgprot(pgprot)); | |
6fdc05d4 JF |
239 | } |
240 | ||
38472311 IM |
241 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
242 | { | |
243 | pteval_t val = pte_val(pte); | |
244 | ||
245 | /* | |
246 | * Chop off the NX bit (if present), and add the NX portion of | |
247 | * the newprot (if present): | |
248 | */ | |
1c12c4cf | 249 | val &= _PAGE_CHG_MASK; |
b534816b | 250 | val |= massage_pgprot(newprot) & ~_PAGE_CHG_MASK; |
38472311 IM |
251 | |
252 | return __pte(val); | |
253 | } | |
254 | ||
1c12c4cf VP |
255 | /* mprotect needs to preserve PAT bits when updating vm_page_prot */ |
256 | #define pgprot_modify pgprot_modify | |
257 | static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) | |
258 | { | |
259 | pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK; | |
260 | pgprotval_t addbits = pgprot_val(newprot); | |
261 | return __pgprot(preservebits | addbits); | |
262 | } | |
263 | ||
77be1fab | 264 | #define pte_pgprot(x) __pgprot(pte_flags(x) & PTE_FLAGS_MASK) |
c6ca18eb | 265 | |
b534816b | 266 | #define canon_pgprot(p) __pgprot(massage_pgprot(p)) |
1e8e23bc | 267 | |
1adcaafe SS |
268 | static inline int is_new_memtype_allowed(u64 paddr, unsigned long size, |
269 | unsigned long flags, | |
270 | unsigned long new_flags) | |
afc7d20c | 271 | { |
1adcaafe SS |
272 | /* |
273 | * PAT type is always WB for ISA. So no need to check. | |
274 | */ | |
275 | if (is_ISA_range(paddr, paddr + size - 1)) | |
276 | return 1; | |
277 | ||
afc7d20c | 278 | /* |
279 | * Certain new memtypes are not allowed with certain | |
280 | * requested memtype: | |
281 | * - request is uncached, return cannot be write-back | |
282 | * - request is write-combine, return cannot be write-back | |
283 | */ | |
284 | if ((flags == _PAGE_CACHE_UC_MINUS && | |
285 | new_flags == _PAGE_CACHE_WB) || | |
286 | (flags == _PAGE_CACHE_WC && | |
287 | new_flags == _PAGE_CACHE_WB)) { | |
288 | return 0; | |
289 | } | |
290 | ||
291 | return 1; | |
292 | } | |
293 | ||
458a3e64 TH |
294 | pmd_t *populate_extra_pmd(unsigned long vaddr); |
295 | pte_t *populate_extra_pte(unsigned long vaddr); | |
4614139c JF |
296 | #endif /* __ASSEMBLY__ */ |
297 | ||
96a388de TG |
298 | #ifdef CONFIG_X86_32 |
299 | # include "pgtable_32.h" | |
300 | #else | |
301 | # include "pgtable_64.h" | |
302 | #endif | |
6c386655 | 303 | |
aca159db | 304 | #ifndef __ASSEMBLY__ |
f476961c | 305 | #include <linux/mm_types.h> |
aca159db | 306 | |
a034a010 JF |
307 | static inline int pte_none(pte_t pte) |
308 | { | |
309 | return !pte.pte; | |
310 | } | |
311 | ||
8de01da3 JF |
312 | #define __HAVE_ARCH_PTE_SAME |
313 | static inline int pte_same(pte_t a, pte_t b) | |
314 | { | |
315 | return a.pte == b.pte; | |
316 | } | |
317 | ||
7c683851 JF |
318 | static inline int pte_present(pte_t a) |
319 | { | |
320 | return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE); | |
321 | } | |
322 | ||
eb63657e | 323 | static inline int pte_hidden(pte_t pte) |
dfec072e | 324 | { |
eb63657e | 325 | return pte_flags(pte) & _PAGE_HIDDEN; |
dfec072e VN |
326 | } |
327 | ||
649e8ef6 JF |
328 | static inline int pmd_present(pmd_t pmd) |
329 | { | |
18a7a199 | 330 | return pmd_flags(pmd) & _PAGE_PRESENT; |
649e8ef6 JF |
331 | } |
332 | ||
4fea801a JF |
333 | static inline int pmd_none(pmd_t pmd) |
334 | { | |
335 | /* Only check low word on 32-bit platforms, since it might be | |
336 | out of sync with upper half. */ | |
26c8e317 | 337 | return (unsigned long)native_pmd_val(pmd) == 0; |
4fea801a JF |
338 | } |
339 | ||
3ffb3564 JF |
340 | static inline unsigned long pmd_page_vaddr(pmd_t pmd) |
341 | { | |
342 | return (unsigned long)__va(pmd_val(pmd) & PTE_PFN_MASK); | |
343 | } | |
344 | ||
e5f7f202 IM |
345 | /* |
346 | * Currently stuck as a macro due to indirect forward reference to | |
347 | * linux/mmzone.h's __section_mem_map_addr() definition: | |
348 | */ | |
349 | #define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT) | |
20063ca4 | 350 | |
e24d7eee JF |
351 | /* |
352 | * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD] | |
353 | * | |
354 | * this macro returns the index of the entry in the pmd page which would | |
355 | * control the given virtual address | |
356 | */ | |
ce0c0f9e | 357 | static inline unsigned long pmd_index(unsigned long address) |
e24d7eee JF |
358 | { |
359 | return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1); | |
360 | } | |
361 | ||
97e2817d JF |
362 | /* |
363 | * Conversion functions: convert a page and protection to a page entry, | |
364 | * and a page entry and page directory to the page they refer to. | |
365 | * | |
366 | * (Currently stuck as a macro because of indirect forward reference | |
367 | * to linux/mm.h:page_to_nid()) | |
368 | */ | |
369 | #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) | |
370 | ||
346309cf JF |
371 | /* |
372 | * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE] | |
373 | * | |
374 | * this function returns the index of the entry in the pte page which would | |
375 | * control the given virtual address | |
376 | */ | |
ce0c0f9e | 377 | static inline unsigned long pte_index(unsigned long address) |
346309cf JF |
378 | { |
379 | return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); | |
380 | } | |
381 | ||
3fbc2444 JF |
382 | static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address) |
383 | { | |
384 | return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address); | |
385 | } | |
386 | ||
99510238 JF |
387 | static inline int pmd_bad(pmd_t pmd) |
388 | { | |
18a7a199 | 389 | return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE; |
99510238 JF |
390 | } |
391 | ||
cc290ca3 JF |
392 | static inline unsigned long pages_to_mb(unsigned long npg) |
393 | { | |
394 | return npg >> (20 - PAGE_SHIFT); | |
395 | } | |
396 | ||
6cf71500 JF |
397 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ |
398 | remap_pfn_range(vma, vaddr, pfn, size, prot) | |
399 | ||
5ba7c913 | 400 | #if PAGETABLE_LEVELS > 2 |
deb79cfb JF |
401 | static inline int pud_none(pud_t pud) |
402 | { | |
26c8e317 | 403 | return native_pud_val(pud) == 0; |
deb79cfb JF |
404 | } |
405 | ||
5ba7c913 JF |
406 | static inline int pud_present(pud_t pud) |
407 | { | |
18a7a199 | 408 | return pud_flags(pud) & _PAGE_PRESENT; |
5ba7c913 | 409 | } |
6fff47e3 JF |
410 | |
411 | static inline unsigned long pud_page_vaddr(pud_t pud) | |
412 | { | |
413 | return (unsigned long)__va((unsigned long)pud_val(pud) & PTE_PFN_MASK); | |
414 | } | |
f476961c | 415 | |
e5f7f202 IM |
416 | /* |
417 | * Currently stuck as a macro due to indirect forward reference to | |
418 | * linux/mmzone.h's __section_mem_map_addr() definition: | |
419 | */ | |
420 | #define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT) | |
01ade20d JF |
421 | |
422 | /* Find an entry in the second-level page table.. */ | |
423 | static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) | |
424 | { | |
425 | return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address); | |
426 | } | |
3180fba0 | 427 | |
3f6cbef1 JF |
428 | static inline int pud_large(pud_t pud) |
429 | { | |
e2f5bda9 | 430 | return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) == |
3f6cbef1 JF |
431 | (_PAGE_PSE | _PAGE_PRESENT); |
432 | } | |
a61bb29a JF |
433 | |
434 | static inline int pud_bad(pud_t pud) | |
435 | { | |
18a7a199 | 436 | return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0; |
a61bb29a | 437 | } |
e2f5bda9 JF |
438 | #else |
439 | static inline int pud_large(pud_t pud) | |
440 | { | |
441 | return 0; | |
442 | } | |
5ba7c913 JF |
443 | #endif /* PAGETABLE_LEVELS > 2 */ |
444 | ||
9f38d7e8 JF |
445 | #if PAGETABLE_LEVELS > 3 |
446 | static inline int pgd_present(pgd_t pgd) | |
447 | { | |
18a7a199 | 448 | return pgd_flags(pgd) & _PAGE_PRESENT; |
9f38d7e8 | 449 | } |
c5f040b1 JF |
450 | |
451 | static inline unsigned long pgd_page_vaddr(pgd_t pgd) | |
452 | { | |
453 | return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK); | |
454 | } | |
777cba16 | 455 | |
e5f7f202 IM |
456 | /* |
457 | * Currently stuck as a macro due to indirect forward reference to | |
458 | * linux/mmzone.h's __section_mem_map_addr() definition: | |
459 | */ | |
460 | #define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT) | |
7cfb8102 JF |
461 | |
462 | /* to find an entry in a page-table-directory. */ | |
ce0c0f9e | 463 | static inline unsigned long pud_index(unsigned long address) |
7cfb8102 JF |
464 | { |
465 | return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1); | |
466 | } | |
3d081b18 JF |
467 | |
468 | static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address) | |
469 | { | |
470 | return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(address); | |
471 | } | |
30f10316 JF |
472 | |
473 | static inline int pgd_bad(pgd_t pgd) | |
474 | { | |
18a7a199 | 475 | return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE; |
30f10316 | 476 | } |
7325cc2e JF |
477 | |
478 | static inline int pgd_none(pgd_t pgd) | |
479 | { | |
26c8e317 | 480 | return !native_pgd_val(pgd); |
7325cc2e | 481 | } |
9f38d7e8 JF |
482 | #endif /* PAGETABLE_LEVELS > 3 */ |
483 | ||
4614139c JF |
484 | #endif /* __ASSEMBLY__ */ |
485 | ||
fb15a9b3 JF |
486 | /* |
487 | * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD] | |
488 | * | |
489 | * this macro returns the index of the entry in the pgd page which would | |
490 | * control the given virtual address | |
491 | */ | |
492 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) | |
493 | ||
494 | /* | |
495 | * pgd_offset() returns a (pgd_t *) | |
496 | * pgd_index() is used get the offset into the pgd page's array of pgd_t's; | |
497 | */ | |
498 | #define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address))) | |
499 | /* | |
500 | * a shortcut which implies the use of the kernel's pgd, instead | |
501 | * of a process's | |
502 | */ | |
503 | #define pgd_offset_k(address) pgd_offset(&init_mm, (address)) | |
504 | ||
505 | ||
68db065c JF |
506 | #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET) |
507 | #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY) | |
508 | ||
195466dc JF |
509 | #ifndef __ASSEMBLY__ |
510 | ||
2c1b284e JSR |
511 | extern int direct_gbpages; |
512 | ||
4891645e JF |
513 | /* local pte updates need not use xchg for locking */ |
514 | static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep) | |
515 | { | |
516 | pte_t res = *ptep; | |
517 | ||
518 | /* Pure native function needs no input for mm, addr */ | |
519 | native_pte_clear(NULL, 0, ptep); | |
520 | return res; | |
521 | } | |
522 | ||
523 | static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr, | |
524 | pte_t *ptep , pte_t pte) | |
525 | { | |
526 | native_set_pte(ptep, pte); | |
527 | } | |
528 | ||
195466dc JF |
529 | #ifndef CONFIG_PARAVIRT |
530 | /* | |
531 | * Rules for using pte_update - it must be called after any PTE update which | |
532 | * has not been done using the set_pte / clear_pte interfaces. It is used by | |
533 | * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE | |
534 | * updates should either be sets, clears, or set_pte_atomic for P->P | |
535 | * transitions, which means this hook should only be called for user PTEs. | |
536 | * This hook implies a P->P protection or access change has taken place, which | |
537 | * requires a subsequent TLB flush. The notification can optionally be delayed | |
538 | * until the TLB flush event by using the pte_update_defer form of the | |
539 | * interface, but care must be taken to assure that the flush happens while | |
540 | * still holding the same page table lock so that the shadow and primary pages | |
541 | * do not become out of sync on SMP. | |
542 | */ | |
543 | #define pte_update(mm, addr, ptep) do { } while (0) | |
544 | #define pte_update_defer(mm, addr, ptep) do { } while (0) | |
545 | #endif | |
546 | ||
195466dc JF |
547 | /* |
548 | * We only update the dirty/accessed state if we set | |
549 | * the dirty bit by hand in the kernel, since the hardware | |
550 | * will do the accessed bit for us, and we don't want to | |
551 | * race with other CPU's that might be updating the dirty | |
552 | * bit at the same time. | |
553 | */ | |
bea41808 JF |
554 | struct vm_area_struct; |
555 | ||
195466dc | 556 | #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS |
ee5aa8d3 JF |
557 | extern int ptep_set_access_flags(struct vm_area_struct *vma, |
558 | unsigned long address, pte_t *ptep, | |
559 | pte_t entry, int dirty); | |
195466dc JF |
560 | |
561 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | |
f9fbf1a3 JF |
562 | extern int ptep_test_and_clear_young(struct vm_area_struct *vma, |
563 | unsigned long addr, pte_t *ptep); | |
195466dc JF |
564 | |
565 | #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH | |
c20311e1 JF |
566 | extern int ptep_clear_flush_young(struct vm_area_struct *vma, |
567 | unsigned long address, pte_t *ptep); | |
195466dc JF |
568 | |
569 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | |
3cbaeafe JP |
570 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, |
571 | pte_t *ptep) | |
195466dc JF |
572 | { |
573 | pte_t pte = native_ptep_get_and_clear(ptep); | |
574 | pte_update(mm, addr, ptep); | |
575 | return pte; | |
576 | } | |
577 | ||
578 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL | |
3cbaeafe JP |
579 | static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, |
580 | unsigned long addr, pte_t *ptep, | |
581 | int full) | |
195466dc JF |
582 | { |
583 | pte_t pte; | |
584 | if (full) { | |
585 | /* | |
586 | * Full address destruction in progress; paravirt does not | |
587 | * care about updates and native needs no locking | |
588 | */ | |
589 | pte = native_local_ptep_get_and_clear(ptep); | |
590 | } else { | |
591 | pte = ptep_get_and_clear(mm, addr, ptep); | |
592 | } | |
593 | return pte; | |
594 | } | |
595 | ||
596 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | |
3cbaeafe JP |
597 | static inline void ptep_set_wrprotect(struct mm_struct *mm, |
598 | unsigned long addr, pte_t *ptep) | |
195466dc | 599 | { |
d8d89827 | 600 | clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte); |
195466dc JF |
601 | pte_update(mm, addr, ptep); |
602 | } | |
603 | ||
85958b46 JF |
604 | /* |
605 | * clone_pgd_range(pgd_t *dst, pgd_t *src, int count); | |
606 | * | |
607 | * dst - pointer to pgd range anwhere on a pgd page | |
608 | * src - "" | |
609 | * count - the number of pgds to copy. | |
610 | * | |
611 | * dst and src can be on the same page, but the range must not overlap, | |
612 | * and must not cross a page boundary. | |
613 | */ | |
614 | static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count) | |
615 | { | |
616 | memcpy(dst, src, count * sizeof(pgd_t)); | |
617 | } | |
618 | ||
619 | ||
195466dc JF |
620 | #include <asm-generic/pgtable.h> |
621 | #endif /* __ASSEMBLY__ */ | |
622 | ||
1965aae3 | 623 | #endif /* _ASM_X86_PGTABLE_H */ |