]>
Commit | Line | Data |
---|---|---|
1965aae3 PA |
1 | #ifndef _ASM_X86_PGTABLE_H |
2 | #define _ASM_X86_PGTABLE_H | |
6c386655 | 3 | |
c47c1b1f | 4 | #include <asm/page.h> |
8d19c99f | 5 | #include <asm/pgtable_types.h> |
b2bc2731 | 6 | |
8a7b12f7 | 7 | /* |
8 | * Macro to mark a page protection value as UC- | |
9 | */ | |
d85f3334 JG |
10 | #define pgprot_noncached(prot) \ |
11 | ((boot_cpu_data.x86 > 3) \ | |
12 | ? (__pgprot(pgprot_val(prot) | \ | |
13 | cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS))) \ | |
8a7b12f7 | 14 | : (prot)) |
15 | ||
4614139c | 16 | #ifndef __ASSEMBLY__ |
55a6ca25 PA |
17 | #include <asm/x86_init.h> |
18 | ||
ef6bea6d | 19 | void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd); |
e1a58320 SS |
20 | void ptdump_walk_pgd_level_checkwx(void); |
21 | ||
22 | #ifdef CONFIG_DEBUG_WX | |
23 | #define debug_checkwx() ptdump_walk_pgd_level_checkwx() | |
24 | #else | |
25 | #define debug_checkwx() do { } while (0) | |
26 | #endif | |
ef6bea6d | 27 | |
8405b122 JF |
28 | /* |
29 | * ZERO_PAGE is a global shared page that is always zero: used | |
30 | * for zero-mapped memory areas etc.. | |
31 | */ | |
277d5b40 AK |
32 | extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] |
33 | __visible; | |
8405b122 JF |
34 | #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) |
35 | ||
e3ed910d JF |
36 | extern spinlock_t pgd_lock; |
37 | extern struct list_head pgd_list; | |
8405b122 | 38 | |
617d34d9 JF |
39 | extern struct mm_struct *pgd_page_get_mm(struct page *page); |
40 | ||
54321d94 JF |
41 | #ifdef CONFIG_PARAVIRT |
42 | #include <asm/paravirt.h> | |
43 | #else /* !CONFIG_PARAVIRT */ | |
44 | #define set_pte(ptep, pte) native_set_pte(ptep, pte) | |
45 | #define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte) | |
2609ae6d | 46 | #define set_pmd_at(mm, addr, pmdp, pmd) native_set_pmd_at(mm, addr, pmdp, pmd) |
a00cc7d9 | 47 | #define set_pud_at(mm, addr, pudp, pud) native_set_pud_at(mm, addr, pudp, pud) |
54321d94 | 48 | |
54321d94 JF |
49 | #define set_pte_atomic(ptep, pte) \ |
50 | native_set_pte_atomic(ptep, pte) | |
51 | ||
52 | #define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd) | |
53 | ||
f2a6a705 | 54 | #ifndef __PAGETABLE_P4D_FOLDED |
54321d94 JF |
55 | #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd) |
56 | #define pgd_clear(pgd) native_pgd_clear(pgd) | |
57 | #endif | |
58 | ||
f2a6a705 KS |
59 | #ifndef set_p4d |
60 | # define set_p4d(p4dp, p4d) native_set_p4d(p4dp, p4d) | |
61 | #endif | |
62 | ||
63 | #ifndef __PAGETABLE_PUD_FOLDED | |
64 | #define p4d_clear(p4d) native_p4d_clear(p4d) | |
65 | #endif | |
66 | ||
54321d94 JF |
67 | #ifndef set_pud |
68 | # define set_pud(pudp, pud) native_set_pud(pudp, pud) | |
69 | #endif | |
70 | ||
d0f33ac9 | 71 | #ifndef __PAGETABLE_PUD_FOLDED |
54321d94 JF |
72 | #define pud_clear(pud) native_pud_clear(pud) |
73 | #endif | |
74 | ||
75 | #define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep) | |
76 | #define pmd_clear(pmd) native_pmd_clear(pmd) | |
77 | ||
78 | #define pte_update(mm, addr, ptep) do { } while (0) | |
54321d94 | 79 | |
54321d94 JF |
80 | #define pgd_val(x) native_pgd_val(x) |
81 | #define __pgd(x) native_make_pgd(x) | |
82 | ||
f2a6a705 KS |
83 | #ifndef __PAGETABLE_P4D_FOLDED |
84 | #define p4d_val(x) native_p4d_val(x) | |
85 | #define __p4d(x) native_make_p4d(x) | |
86 | #endif | |
87 | ||
54321d94 JF |
88 | #ifndef __PAGETABLE_PUD_FOLDED |
89 | #define pud_val(x) native_pud_val(x) | |
90 | #define __pud(x) native_make_pud(x) | |
91 | #endif | |
92 | ||
93 | #ifndef __PAGETABLE_PMD_FOLDED | |
94 | #define pmd_val(x) native_pmd_val(x) | |
95 | #define __pmd(x) native_make_pmd(x) | |
96 | #endif | |
97 | ||
98 | #define pte_val(x) native_pte_val(x) | |
99 | #define __pte(x) native_make_pte(x) | |
100 | ||
224101ed JF |
101 | #define arch_end_context_switch(prev) do {} while(0) |
102 | ||
54321d94 JF |
103 | #endif /* CONFIG_PARAVIRT */ |
104 | ||
4614139c JF |
105 | /* |
106 | * The following only work if pte_present() is true. | |
107 | * Undefined behaviour if not.. | |
108 | */ | |
3cbaeafe JP |
109 | static inline int pte_dirty(pte_t pte) |
110 | { | |
a15af1c9 | 111 | return pte_flags(pte) & _PAGE_DIRTY; |
3cbaeafe JP |
112 | } |
113 | ||
a927cb83 DH |
114 | |
115 | static inline u32 read_pkru(void) | |
116 | { | |
117 | if (boot_cpu_has(X86_FEATURE_OSPKE)) | |
118 | return __read_pkru(); | |
119 | return 0; | |
120 | } | |
121 | ||
9e90199c XG |
122 | static inline void write_pkru(u32 pkru) |
123 | { | |
124 | if (boot_cpu_has(X86_FEATURE_OSPKE)) | |
125 | __write_pkru(pkru); | |
126 | } | |
127 | ||
3cbaeafe JP |
128 | static inline int pte_young(pte_t pte) |
129 | { | |
a15af1c9 | 130 | return pte_flags(pte) & _PAGE_ACCESSED; |
3cbaeafe JP |
131 | } |
132 | ||
c164e038 KS |
133 | static inline int pmd_dirty(pmd_t pmd) |
134 | { | |
135 | return pmd_flags(pmd) & _PAGE_DIRTY; | |
136 | } | |
3cbaeafe | 137 | |
f2d6bfe9 JW |
138 | static inline int pmd_young(pmd_t pmd) |
139 | { | |
140 | return pmd_flags(pmd) & _PAGE_ACCESSED; | |
141 | } | |
142 | ||
a00cc7d9 MW |
143 | static inline int pud_dirty(pud_t pud) |
144 | { | |
145 | return pud_flags(pud) & _PAGE_DIRTY; | |
146 | } | |
147 | ||
148 | static inline int pud_young(pud_t pud) | |
149 | { | |
150 | return pud_flags(pud) & _PAGE_ACCESSED; | |
151 | } | |
152 | ||
3cbaeafe JP |
153 | static inline int pte_write(pte_t pte) |
154 | { | |
a15af1c9 | 155 | return pte_flags(pte) & _PAGE_RW; |
3cbaeafe JP |
156 | } |
157 | ||
3cbaeafe JP |
158 | static inline int pte_huge(pte_t pte) |
159 | { | |
a15af1c9 | 160 | return pte_flags(pte) & _PAGE_PSE; |
4614139c JF |
161 | } |
162 | ||
3cbaeafe JP |
163 | static inline int pte_global(pte_t pte) |
164 | { | |
a15af1c9 | 165 | return pte_flags(pte) & _PAGE_GLOBAL; |
3cbaeafe JP |
166 | } |
167 | ||
168 | static inline int pte_exec(pte_t pte) | |
169 | { | |
a15af1c9 | 170 | return !(pte_flags(pte) & _PAGE_NX); |
3cbaeafe JP |
171 | } |
172 | ||
7e675137 NP |
173 | static inline int pte_special(pte_t pte) |
174 | { | |
c819f37e | 175 | return pte_flags(pte) & _PAGE_SPECIAL; |
7e675137 NP |
176 | } |
177 | ||
91030ca1 HD |
178 | static inline unsigned long pte_pfn(pte_t pte) |
179 | { | |
180 | return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT; | |
181 | } | |
182 | ||
087975b0 AM |
183 | static inline unsigned long pmd_pfn(pmd_t pmd) |
184 | { | |
f70abb0f | 185 | return (pmd_val(pmd) & pmd_pfn_mask(pmd)) >> PAGE_SHIFT; |
087975b0 AM |
186 | } |
187 | ||
0ee364eb MG |
188 | static inline unsigned long pud_pfn(pud_t pud) |
189 | { | |
f70abb0f | 190 | return (pud_val(pud) & pud_pfn_mask(pud)) >> PAGE_SHIFT; |
0ee364eb MG |
191 | } |
192 | ||
fe1e8c3e KS |
193 | static inline unsigned long p4d_pfn(p4d_t p4d) |
194 | { | |
195 | return (p4d_val(p4d) & p4d_pfn_mask(p4d)) >> PAGE_SHIFT; | |
196 | } | |
197 | ||
fd7e3159 TL |
198 | static inline unsigned long pgd_pfn(pgd_t pgd) |
199 | { | |
200 | return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT; | |
201 | } | |
202 | ||
fe1e8c3e KS |
203 | static inline int p4d_large(p4d_t p4d) |
204 | { | |
205 | /* No 512 GiB pages yet */ | |
206 | return 0; | |
207 | } | |
208 | ||
91030ca1 HD |
209 | #define pte_page(pte) pfn_to_page(pte_pfn(pte)) |
210 | ||
3cbaeafe JP |
211 | static inline int pmd_large(pmd_t pte) |
212 | { | |
027ef6c8 | 213 | return pmd_flags(pte) & _PAGE_PSE; |
3cbaeafe JP |
214 | } |
215 | ||
f2d6bfe9 | 216 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
f2d6bfe9 JW |
217 | static inline int pmd_trans_huge(pmd_t pmd) |
218 | { | |
5c7fb56e | 219 | return (pmd_val(pmd) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE; |
f2d6bfe9 | 220 | } |
4b7167b9 | 221 | |
a00cc7d9 MW |
222 | #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD |
223 | static inline int pud_trans_huge(pud_t pud) | |
224 | { | |
225 | return (pud_val(pud) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE; | |
226 | } | |
227 | #endif | |
228 | ||
fd8cfd30 | 229 | #define has_transparent_hugepage has_transparent_hugepage |
4b7167b9 AA |
230 | static inline int has_transparent_hugepage(void) |
231 | { | |
16bf9226 | 232 | return boot_cpu_has(X86_FEATURE_PSE); |
4b7167b9 | 233 | } |
5c7fb56e DW |
234 | |
235 | #ifdef __HAVE_ARCH_PTE_DEVMAP | |
236 | static inline int pmd_devmap(pmd_t pmd) | |
237 | { | |
238 | return !!(pmd_val(pmd) & _PAGE_DEVMAP); | |
239 | } | |
a00cc7d9 MW |
240 | |
241 | #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD | |
242 | static inline int pud_devmap(pud_t pud) | |
243 | { | |
244 | return !!(pud_val(pud) & _PAGE_DEVMAP); | |
245 | } | |
246 | #else | |
247 | static inline int pud_devmap(pud_t pud) | |
248 | { | |
249 | return 0; | |
250 | } | |
251 | #endif | |
e585513b KS |
252 | |
253 | static inline int pgd_devmap(pgd_t pgd) | |
254 | { | |
255 | return 0; | |
256 | } | |
5c7fb56e | 257 | #endif |
f2d6bfe9 JW |
258 | #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ |
259 | ||
6522869c JF |
260 | static inline pte_t pte_set_flags(pte_t pte, pteval_t set) |
261 | { | |
262 | pteval_t v = native_pte_val(pte); | |
263 | ||
264 | return native_make_pte(v | set); | |
265 | } | |
266 | ||
267 | static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear) | |
268 | { | |
269 | pteval_t v = native_pte_val(pte); | |
270 | ||
271 | return native_make_pte(v & ~clear); | |
272 | } | |
273 | ||
3cbaeafe JP |
274 | static inline pte_t pte_mkclean(pte_t pte) |
275 | { | |
6522869c | 276 | return pte_clear_flags(pte, _PAGE_DIRTY); |
3cbaeafe JP |
277 | } |
278 | ||
279 | static inline pte_t pte_mkold(pte_t pte) | |
280 | { | |
6522869c | 281 | return pte_clear_flags(pte, _PAGE_ACCESSED); |
3cbaeafe JP |
282 | } |
283 | ||
284 | static inline pte_t pte_wrprotect(pte_t pte) | |
285 | { | |
6522869c | 286 | return pte_clear_flags(pte, _PAGE_RW); |
3cbaeafe JP |
287 | } |
288 | ||
289 | static inline pte_t pte_mkexec(pte_t pte) | |
290 | { | |
6522869c | 291 | return pte_clear_flags(pte, _PAGE_NX); |
3cbaeafe JP |
292 | } |
293 | ||
294 | static inline pte_t pte_mkdirty(pte_t pte) | |
295 | { | |
0f8975ec | 296 | return pte_set_flags(pte, _PAGE_DIRTY | _PAGE_SOFT_DIRTY); |
3cbaeafe JP |
297 | } |
298 | ||
299 | static inline pte_t pte_mkyoung(pte_t pte) | |
300 | { | |
6522869c | 301 | return pte_set_flags(pte, _PAGE_ACCESSED); |
3cbaeafe JP |
302 | } |
303 | ||
304 | static inline pte_t pte_mkwrite(pte_t pte) | |
305 | { | |
6522869c | 306 | return pte_set_flags(pte, _PAGE_RW); |
3cbaeafe JP |
307 | } |
308 | ||
309 | static inline pte_t pte_mkhuge(pte_t pte) | |
310 | { | |
6522869c | 311 | return pte_set_flags(pte, _PAGE_PSE); |
3cbaeafe JP |
312 | } |
313 | ||
314 | static inline pte_t pte_clrhuge(pte_t pte) | |
315 | { | |
6522869c | 316 | return pte_clear_flags(pte, _PAGE_PSE); |
3cbaeafe JP |
317 | } |
318 | ||
319 | static inline pte_t pte_mkglobal(pte_t pte) | |
320 | { | |
6522869c | 321 | return pte_set_flags(pte, _PAGE_GLOBAL); |
3cbaeafe JP |
322 | } |
323 | ||
324 | static inline pte_t pte_clrglobal(pte_t pte) | |
325 | { | |
6522869c | 326 | return pte_clear_flags(pte, _PAGE_GLOBAL); |
3cbaeafe | 327 | } |
4614139c | 328 | |
7e675137 NP |
329 | static inline pte_t pte_mkspecial(pte_t pte) |
330 | { | |
6522869c | 331 | return pte_set_flags(pte, _PAGE_SPECIAL); |
7e675137 NP |
332 | } |
333 | ||
01c8f1c4 DW |
334 | static inline pte_t pte_mkdevmap(pte_t pte) |
335 | { | |
336 | return pte_set_flags(pte, _PAGE_SPECIAL|_PAGE_DEVMAP); | |
337 | } | |
338 | ||
f2d6bfe9 JW |
339 | static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set) |
340 | { | |
341 | pmdval_t v = native_pmd_val(pmd); | |
342 | ||
343 | return __pmd(v | set); | |
344 | } | |
345 | ||
346 | static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear) | |
347 | { | |
348 | pmdval_t v = native_pmd_val(pmd); | |
349 | ||
350 | return __pmd(v & ~clear); | |
351 | } | |
352 | ||
353 | static inline pmd_t pmd_mkold(pmd_t pmd) | |
354 | { | |
355 | return pmd_clear_flags(pmd, _PAGE_ACCESSED); | |
356 | } | |
357 | ||
590a471c MK |
358 | static inline pmd_t pmd_mkclean(pmd_t pmd) |
359 | { | |
360 | return pmd_clear_flags(pmd, _PAGE_DIRTY); | |
361 | } | |
362 | ||
f2d6bfe9 JW |
363 | static inline pmd_t pmd_wrprotect(pmd_t pmd) |
364 | { | |
365 | return pmd_clear_flags(pmd, _PAGE_RW); | |
366 | } | |
367 | ||
368 | static inline pmd_t pmd_mkdirty(pmd_t pmd) | |
369 | { | |
0f8975ec | 370 | return pmd_set_flags(pmd, _PAGE_DIRTY | _PAGE_SOFT_DIRTY); |
f2d6bfe9 JW |
371 | } |
372 | ||
f25748e3 DW |
373 | static inline pmd_t pmd_mkdevmap(pmd_t pmd) |
374 | { | |
375 | return pmd_set_flags(pmd, _PAGE_DEVMAP); | |
376 | } | |
377 | ||
f2d6bfe9 JW |
378 | static inline pmd_t pmd_mkhuge(pmd_t pmd) |
379 | { | |
380 | return pmd_set_flags(pmd, _PAGE_PSE); | |
381 | } | |
382 | ||
383 | static inline pmd_t pmd_mkyoung(pmd_t pmd) | |
384 | { | |
385 | return pmd_set_flags(pmd, _PAGE_ACCESSED); | |
386 | } | |
387 | ||
388 | static inline pmd_t pmd_mkwrite(pmd_t pmd) | |
389 | { | |
390 | return pmd_set_flags(pmd, _PAGE_RW); | |
391 | } | |
392 | ||
393 | static inline pmd_t pmd_mknotpresent(pmd_t pmd) | |
394 | { | |
21d9ee3e | 395 | return pmd_clear_flags(pmd, _PAGE_PRESENT | _PAGE_PROTNONE); |
f2d6bfe9 JW |
396 | } |
397 | ||
a00cc7d9 MW |
398 | static inline pud_t pud_set_flags(pud_t pud, pudval_t set) |
399 | { | |
400 | pudval_t v = native_pud_val(pud); | |
401 | ||
402 | return __pud(v | set); | |
403 | } | |
404 | ||
405 | static inline pud_t pud_clear_flags(pud_t pud, pudval_t clear) | |
406 | { | |
407 | pudval_t v = native_pud_val(pud); | |
408 | ||
409 | return __pud(v & ~clear); | |
410 | } | |
411 | ||
412 | static inline pud_t pud_mkold(pud_t pud) | |
413 | { | |
414 | return pud_clear_flags(pud, _PAGE_ACCESSED); | |
415 | } | |
416 | ||
417 | static inline pud_t pud_mkclean(pud_t pud) | |
418 | { | |
419 | return pud_clear_flags(pud, _PAGE_DIRTY); | |
420 | } | |
421 | ||
422 | static inline pud_t pud_wrprotect(pud_t pud) | |
423 | { | |
424 | return pud_clear_flags(pud, _PAGE_RW); | |
425 | } | |
426 | ||
427 | static inline pud_t pud_mkdirty(pud_t pud) | |
428 | { | |
429 | return pud_set_flags(pud, _PAGE_DIRTY | _PAGE_SOFT_DIRTY); | |
430 | } | |
431 | ||
432 | static inline pud_t pud_mkdevmap(pud_t pud) | |
433 | { | |
434 | return pud_set_flags(pud, _PAGE_DEVMAP); | |
435 | } | |
436 | ||
437 | static inline pud_t pud_mkhuge(pud_t pud) | |
438 | { | |
439 | return pud_set_flags(pud, _PAGE_PSE); | |
440 | } | |
441 | ||
442 | static inline pud_t pud_mkyoung(pud_t pud) | |
443 | { | |
444 | return pud_set_flags(pud, _PAGE_ACCESSED); | |
445 | } | |
446 | ||
447 | static inline pud_t pud_mkwrite(pud_t pud) | |
448 | { | |
449 | return pud_set_flags(pud, _PAGE_RW); | |
450 | } | |
451 | ||
452 | static inline pud_t pud_mknotpresent(pud_t pud) | |
453 | { | |
454 | return pud_clear_flags(pud, _PAGE_PRESENT | _PAGE_PROTNONE); | |
455 | } | |
456 | ||
2bf01f9f | 457 | #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY |
0f8975ec PE |
458 | static inline int pte_soft_dirty(pte_t pte) |
459 | { | |
460 | return pte_flags(pte) & _PAGE_SOFT_DIRTY; | |
461 | } | |
462 | ||
463 | static inline int pmd_soft_dirty(pmd_t pmd) | |
464 | { | |
465 | return pmd_flags(pmd) & _PAGE_SOFT_DIRTY; | |
466 | } | |
467 | ||
a00cc7d9 MW |
468 | static inline int pud_soft_dirty(pud_t pud) |
469 | { | |
470 | return pud_flags(pud) & _PAGE_SOFT_DIRTY; | |
471 | } | |
472 | ||
0f8975ec PE |
473 | static inline pte_t pte_mksoft_dirty(pte_t pte) |
474 | { | |
475 | return pte_set_flags(pte, _PAGE_SOFT_DIRTY); | |
476 | } | |
477 | ||
478 | static inline pmd_t pmd_mksoft_dirty(pmd_t pmd) | |
479 | { | |
480 | return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY); | |
481 | } | |
482 | ||
a00cc7d9 MW |
483 | static inline pud_t pud_mksoft_dirty(pud_t pud) |
484 | { | |
485 | return pud_set_flags(pud, _PAGE_SOFT_DIRTY); | |
486 | } | |
487 | ||
a7b76174 MS |
488 | static inline pte_t pte_clear_soft_dirty(pte_t pte) |
489 | { | |
490 | return pte_clear_flags(pte, _PAGE_SOFT_DIRTY); | |
491 | } | |
492 | ||
493 | static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd) | |
494 | { | |
495 | return pmd_clear_flags(pmd, _PAGE_SOFT_DIRTY); | |
496 | } | |
497 | ||
a00cc7d9 MW |
498 | static inline pud_t pud_clear_soft_dirty(pud_t pud) |
499 | { | |
500 | return pud_clear_flags(pud, _PAGE_SOFT_DIRTY); | |
501 | } | |
502 | ||
2bf01f9f CG |
503 | #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */ |
504 | ||
b534816b JF |
505 | /* |
506 | * Mask out unsupported bits in a present pgprot. Non-present pgprots | |
507 | * can use those bits for other purposes, so leave them be. | |
508 | */ | |
509 | static inline pgprotval_t massage_pgprot(pgprot_t pgprot) | |
510 | { | |
511 | pgprotval_t protval = pgprot_val(pgprot); | |
512 | ||
513 | if (protval & _PAGE_PRESENT) | |
514 | protval &= __supported_pte_mask; | |
515 | ||
516 | return protval; | |
517 | } | |
518 | ||
6fdc05d4 JF |
519 | static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot) |
520 | { | |
b534816b JF |
521 | return __pte(((phys_addr_t)page_nr << PAGE_SHIFT) | |
522 | massage_pgprot(pgprot)); | |
6fdc05d4 JF |
523 | } |
524 | ||
525 | static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot) | |
526 | { | |
b534816b JF |
527 | return __pmd(((phys_addr_t)page_nr << PAGE_SHIFT) | |
528 | massage_pgprot(pgprot)); | |
6fdc05d4 JF |
529 | } |
530 | ||
a00cc7d9 MW |
531 | static inline pud_t pfn_pud(unsigned long page_nr, pgprot_t pgprot) |
532 | { | |
533 | return __pud(((phys_addr_t)page_nr << PAGE_SHIFT) | | |
534 | massage_pgprot(pgprot)); | |
535 | } | |
536 | ||
38472311 IM |
537 | static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) |
538 | { | |
539 | pteval_t val = pte_val(pte); | |
540 | ||
541 | /* | |
542 | * Chop off the NX bit (if present), and add the NX portion of | |
543 | * the newprot (if present): | |
544 | */ | |
1c12c4cf | 545 | val &= _PAGE_CHG_MASK; |
b534816b | 546 | val |= massage_pgprot(newprot) & ~_PAGE_CHG_MASK; |
38472311 IM |
547 | |
548 | return __pte(val); | |
549 | } | |
550 | ||
c489f125 JW |
551 | static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) |
552 | { | |
553 | pmdval_t val = pmd_val(pmd); | |
554 | ||
555 | val &= _HPAGE_CHG_MASK; | |
556 | val |= massage_pgprot(newprot) & ~_HPAGE_CHG_MASK; | |
557 | ||
558 | return __pmd(val); | |
559 | } | |
560 | ||
1c12c4cf VP |
561 | /* mprotect needs to preserve PAT bits when updating vm_page_prot */ |
562 | #define pgprot_modify pgprot_modify | |
563 | static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) | |
564 | { | |
565 | pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK; | |
566 | pgprotval_t addbits = pgprot_val(newprot); | |
567 | return __pgprot(preservebits | addbits); | |
568 | } | |
569 | ||
bbac8c6d TK |
570 | #define pte_pgprot(x) __pgprot(pte_flags(x)) |
571 | #define pmd_pgprot(x) __pgprot(pmd_flags(x)) | |
572 | #define pud_pgprot(x) __pgprot(pud_flags(x)) | |
f2a6a705 | 573 | #define p4d_pgprot(x) __pgprot(p4d_flags(x)) |
c6ca18eb | 574 | |
b534816b | 575 | #define canon_pgprot(p) __pgprot(massage_pgprot(p)) |
1e8e23bc | 576 | |
1adcaafe | 577 | static inline int is_new_memtype_allowed(u64 paddr, unsigned long size, |
d85f3334 JG |
578 | enum page_cache_mode pcm, |
579 | enum page_cache_mode new_pcm) | |
afc7d20c | 580 | { |
1adcaafe | 581 | /* |
55a6ca25 | 582 | * PAT type is always WB for untracked ranges, so no need to check. |
1adcaafe | 583 | */ |
8a271389 | 584 | if (x86_platform.is_untracked_pat_range(paddr, paddr + size)) |
1adcaafe SS |
585 | return 1; |
586 | ||
afc7d20c | 587 | /* |
588 | * Certain new memtypes are not allowed with certain | |
589 | * requested memtype: | |
590 | * - request is uncached, return cannot be write-back | |
591 | * - request is write-combine, return cannot be write-back | |
ecb2feba TK |
592 | * - request is write-through, return cannot be write-back |
593 | * - request is write-through, return cannot be write-combine | |
afc7d20c | 594 | */ |
d85f3334 JG |
595 | if ((pcm == _PAGE_CACHE_MODE_UC_MINUS && |
596 | new_pcm == _PAGE_CACHE_MODE_WB) || | |
597 | (pcm == _PAGE_CACHE_MODE_WC && | |
ecb2feba TK |
598 | new_pcm == _PAGE_CACHE_MODE_WB) || |
599 | (pcm == _PAGE_CACHE_MODE_WT && | |
600 | new_pcm == _PAGE_CACHE_MODE_WB) || | |
601 | (pcm == _PAGE_CACHE_MODE_WT && | |
602 | new_pcm == _PAGE_CACHE_MODE_WC)) { | |
afc7d20c | 603 | return 0; |
604 | } | |
605 | ||
606 | return 1; | |
607 | } | |
608 | ||
458a3e64 TH |
609 | pmd_t *populate_extra_pmd(unsigned long vaddr); |
610 | pte_t *populate_extra_pte(unsigned long vaddr); | |
4614139c JF |
611 | #endif /* __ASSEMBLY__ */ |
612 | ||
96a388de | 613 | #ifdef CONFIG_X86_32 |
a1ce3928 | 614 | # include <asm/pgtable_32.h> |
96a388de | 615 | #else |
a1ce3928 | 616 | # include <asm/pgtable_64.h> |
96a388de | 617 | #endif |
6c386655 | 618 | |
aca159db | 619 | #ifndef __ASSEMBLY__ |
f476961c | 620 | #include <linux/mm_types.h> |
fa0f281c | 621 | #include <linux/mmdebug.h> |
4cbeb51b | 622 | #include <linux/log2.h> |
ef37bc36 | 623 | #include <asm/fixmap.h> |
aca159db | 624 | |
a034a010 JF |
625 | static inline int pte_none(pte_t pte) |
626 | { | |
97e3c602 | 627 | return !(pte.pte & ~(_PAGE_KNL_ERRATUM_MASK)); |
a034a010 JF |
628 | } |
629 | ||
8de01da3 JF |
630 | #define __HAVE_ARCH_PTE_SAME |
631 | static inline int pte_same(pte_t a, pte_t b) | |
632 | { | |
633 | return a.pte == b.pte; | |
634 | } | |
635 | ||
7c683851 | 636 | static inline int pte_present(pte_t a) |
c46a7c81 MG |
637 | { |
638 | return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE); | |
639 | } | |
640 | ||
3565fce3 DW |
641 | #ifdef __HAVE_ARCH_PTE_DEVMAP |
642 | static inline int pte_devmap(pte_t a) | |
643 | { | |
644 | return (pte_flags(a) & _PAGE_DEVMAP) == _PAGE_DEVMAP; | |
645 | } | |
646 | #endif | |
647 | ||
2c3cf556 | 648 | #define pte_accessible pte_accessible |
20841405 | 649 | static inline bool pte_accessible(struct mm_struct *mm, pte_t a) |
2c3cf556 | 650 | { |
20841405 RR |
651 | if (pte_flags(a) & _PAGE_PRESENT) |
652 | return true; | |
653 | ||
21d9ee3e | 654 | if ((pte_flags(a) & _PAGE_PROTNONE) && |
20841405 RR |
655 | mm_tlb_flush_pending(mm)) |
656 | return true; | |
657 | ||
658 | return false; | |
2c3cf556 RR |
659 | } |
660 | ||
eb63657e | 661 | static inline int pte_hidden(pte_t pte) |
dfec072e | 662 | { |
eb63657e | 663 | return pte_flags(pte) & _PAGE_HIDDEN; |
dfec072e VN |
664 | } |
665 | ||
649e8ef6 JF |
666 | static inline int pmd_present(pmd_t pmd) |
667 | { | |
027ef6c8 AA |
668 | /* |
669 | * Checking for _PAGE_PSE is needed too because | |
670 | * split_huge_page will temporarily clear the present bit (but | |
671 | * the _PAGE_PSE flag will remain set at all times while the | |
672 | * _PAGE_PRESENT bit is clear). | |
673 | */ | |
21d9ee3e | 674 | return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE); |
649e8ef6 JF |
675 | } |
676 | ||
e7bb4b6d MG |
677 | #ifdef CONFIG_NUMA_BALANCING |
678 | /* | |
679 | * These work without NUMA balancing but the kernel does not care. See the | |
680 | * comment in include/asm-generic/pgtable.h | |
681 | */ | |
682 | static inline int pte_protnone(pte_t pte) | |
683 | { | |
e3a1f6ca DV |
684 | return (pte_flags(pte) & (_PAGE_PROTNONE | _PAGE_PRESENT)) |
685 | == _PAGE_PROTNONE; | |
e7bb4b6d MG |
686 | } |
687 | ||
688 | static inline int pmd_protnone(pmd_t pmd) | |
689 | { | |
e3a1f6ca DV |
690 | return (pmd_flags(pmd) & (_PAGE_PROTNONE | _PAGE_PRESENT)) |
691 | == _PAGE_PROTNONE; | |
e7bb4b6d MG |
692 | } |
693 | #endif /* CONFIG_NUMA_BALANCING */ | |
694 | ||
4fea801a JF |
695 | static inline int pmd_none(pmd_t pmd) |
696 | { | |
697 | /* Only check low word on 32-bit platforms, since it might be | |
698 | out of sync with upper half. */ | |
97e3c602 DH |
699 | unsigned long val = native_pmd_val(pmd); |
700 | return (val & ~_PAGE_KNL_ERRATUM_MASK) == 0; | |
4fea801a JF |
701 | } |
702 | ||
3ffb3564 JF |
703 | static inline unsigned long pmd_page_vaddr(pmd_t pmd) |
704 | { | |
f70abb0f | 705 | return (unsigned long)__va(pmd_val(pmd) & pmd_pfn_mask(pmd)); |
3ffb3564 JF |
706 | } |
707 | ||
e5f7f202 IM |
708 | /* |
709 | * Currently stuck as a macro due to indirect forward reference to | |
710 | * linux/mmzone.h's __section_mem_map_addr() definition: | |
711 | */ | |
fd7e3159 | 712 | #define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd)) |
20063ca4 | 713 | |
e24d7eee JF |
714 | /* |
715 | * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD] | |
716 | * | |
717 | * this macro returns the index of the entry in the pmd page which would | |
718 | * control the given virtual address | |
719 | */ | |
ce0c0f9e | 720 | static inline unsigned long pmd_index(unsigned long address) |
e24d7eee JF |
721 | { |
722 | return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1); | |
723 | } | |
724 | ||
97e2817d JF |
725 | /* |
726 | * Conversion functions: convert a page and protection to a page entry, | |
727 | * and a page entry and page directory to the page they refer to. | |
728 | * | |
729 | * (Currently stuck as a macro because of indirect forward reference | |
730 | * to linux/mm.h:page_to_nid()) | |
731 | */ | |
732 | #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) | |
733 | ||
346309cf JF |
734 | /* |
735 | * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE] | |
736 | * | |
737 | * this function returns the index of the entry in the pte page which would | |
738 | * control the given virtual address | |
739 | */ | |
ce0c0f9e | 740 | static inline unsigned long pte_index(unsigned long address) |
346309cf JF |
741 | { |
742 | return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1); | |
743 | } | |
744 | ||
3fbc2444 JF |
745 | static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address) |
746 | { | |
747 | return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address); | |
748 | } | |
749 | ||
99510238 JF |
750 | static inline int pmd_bad(pmd_t pmd) |
751 | { | |
18a7a199 | 752 | return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE; |
99510238 JF |
753 | } |
754 | ||
cc290ca3 JF |
755 | static inline unsigned long pages_to_mb(unsigned long npg) |
756 | { | |
757 | return npg >> (20 - PAGE_SHIFT); | |
758 | } | |
759 | ||
98233368 | 760 | #if CONFIG_PGTABLE_LEVELS > 2 |
deb79cfb JF |
761 | static inline int pud_none(pud_t pud) |
762 | { | |
97e3c602 | 763 | return (native_pud_val(pud) & ~(_PAGE_KNL_ERRATUM_MASK)) == 0; |
deb79cfb JF |
764 | } |
765 | ||
5ba7c913 JF |
766 | static inline int pud_present(pud_t pud) |
767 | { | |
18a7a199 | 768 | return pud_flags(pud) & _PAGE_PRESENT; |
5ba7c913 | 769 | } |
6fff47e3 JF |
770 | |
771 | static inline unsigned long pud_page_vaddr(pud_t pud) | |
772 | { | |
f70abb0f | 773 | return (unsigned long)__va(pud_val(pud) & pud_pfn_mask(pud)); |
6fff47e3 | 774 | } |
f476961c | 775 | |
e5f7f202 IM |
776 | /* |
777 | * Currently stuck as a macro due to indirect forward reference to | |
778 | * linux/mmzone.h's __section_mem_map_addr() definition: | |
779 | */ | |
fd7e3159 | 780 | #define pud_page(pud) pfn_to_page(pud_pfn(pud)) |
01ade20d JF |
781 | |
782 | /* Find an entry in the second-level page table.. */ | |
783 | static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) | |
784 | { | |
785 | return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address); | |
786 | } | |
3180fba0 | 787 | |
3f6cbef1 JF |
788 | static inline int pud_large(pud_t pud) |
789 | { | |
e2f5bda9 | 790 | return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) == |
3f6cbef1 JF |
791 | (_PAGE_PSE | _PAGE_PRESENT); |
792 | } | |
a61bb29a JF |
793 | |
794 | static inline int pud_bad(pud_t pud) | |
795 | { | |
18a7a199 | 796 | return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0; |
a61bb29a | 797 | } |
e2f5bda9 JF |
798 | #else |
799 | static inline int pud_large(pud_t pud) | |
800 | { | |
801 | return 0; | |
802 | } | |
98233368 | 803 | #endif /* CONFIG_PGTABLE_LEVELS > 2 */ |
5ba7c913 | 804 | |
fe1e8c3e KS |
805 | static inline unsigned long pud_index(unsigned long address) |
806 | { | |
807 | return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1); | |
808 | } | |
809 | ||
f2a6a705 KS |
810 | #if CONFIG_PGTABLE_LEVELS > 3 |
811 | static inline int p4d_none(p4d_t p4d) | |
812 | { | |
813 | return (native_p4d_val(p4d) & ~(_PAGE_KNL_ERRATUM_MASK)) == 0; | |
814 | } | |
815 | ||
816 | static inline int p4d_present(p4d_t p4d) | |
817 | { | |
818 | return p4d_flags(p4d) & _PAGE_PRESENT; | |
819 | } | |
820 | ||
821 | static inline unsigned long p4d_page_vaddr(p4d_t p4d) | |
822 | { | |
823 | return (unsigned long)__va(p4d_val(p4d) & p4d_pfn_mask(p4d)); | |
824 | } | |
825 | ||
826 | /* | |
827 | * Currently stuck as a macro due to indirect forward reference to | |
828 | * linux/mmzone.h's __section_mem_map_addr() definition: | |
829 | */ | |
fd7e3159 | 830 | #define p4d_page(p4d) pfn_to_page(p4d_pfn(p4d)) |
f2a6a705 KS |
831 | |
832 | /* Find an entry in the third-level page table.. */ | |
833 | static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address) | |
834 | { | |
835 | return (pud_t *)p4d_page_vaddr(*p4d) + pud_index(address); | |
836 | } | |
837 | ||
838 | static inline int p4d_bad(p4d_t p4d) | |
839 | { | |
840 | return (p4d_flags(p4d) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0; | |
841 | } | |
842 | #endif /* CONFIG_PGTABLE_LEVELS > 3 */ | |
843 | ||
fe1e8c3e KS |
844 | static inline unsigned long p4d_index(unsigned long address) |
845 | { | |
846 | return (address >> P4D_SHIFT) & (PTRS_PER_P4D - 1); | |
847 | } | |
848 | ||
f2a6a705 | 849 | #if CONFIG_PGTABLE_LEVELS > 4 |
9f38d7e8 JF |
850 | static inline int pgd_present(pgd_t pgd) |
851 | { | |
18a7a199 | 852 | return pgd_flags(pgd) & _PAGE_PRESENT; |
9f38d7e8 | 853 | } |
c5f040b1 JF |
854 | |
855 | static inline unsigned long pgd_page_vaddr(pgd_t pgd) | |
856 | { | |
857 | return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK); | |
858 | } | |
777cba16 | 859 | |
e5f7f202 IM |
860 | /* |
861 | * Currently stuck as a macro due to indirect forward reference to | |
862 | * linux/mmzone.h's __section_mem_map_addr() definition: | |
863 | */ | |
fd7e3159 | 864 | #define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd)) |
7cfb8102 JF |
865 | |
866 | /* to find an entry in a page-table-directory. */ | |
f2a6a705 | 867 | static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address) |
3d081b18 | 868 | { |
f2a6a705 | 869 | return (p4d_t *)pgd_page_vaddr(*pgd) + p4d_index(address); |
3d081b18 | 870 | } |
30f10316 JF |
871 | |
872 | static inline int pgd_bad(pgd_t pgd) | |
873 | { | |
18a7a199 | 874 | return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE; |
30f10316 | 875 | } |
7325cc2e JF |
876 | |
877 | static inline int pgd_none(pgd_t pgd) | |
878 | { | |
97e3c602 DH |
879 | /* |
880 | * There is no need to do a workaround for the KNL stray | |
881 | * A/D bit erratum here. PGDs only point to page tables | |
882 | * except on 32-bit non-PAE which is not supported on | |
883 | * KNL. | |
884 | */ | |
26c8e317 | 885 | return !native_pgd_val(pgd); |
7325cc2e | 886 | } |
f2a6a705 | 887 | #endif /* CONFIG_PGTABLE_LEVELS > 4 */ |
9f38d7e8 | 888 | |
4614139c JF |
889 | #endif /* __ASSEMBLY__ */ |
890 | ||
fb15a9b3 JF |
891 | /* |
892 | * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD] | |
893 | * | |
894 | * this macro returns the index of the entry in the pgd page which would | |
895 | * control the given virtual address | |
896 | */ | |
897 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)) | |
898 | ||
899 | /* | |
900 | * pgd_offset() returns a (pgd_t *) | |
901 | * pgd_index() is used get the offset into the pgd page's array of pgd_t's; | |
902 | */ | |
903 | #define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address))) | |
904 | /* | |
905 | * a shortcut which implies the use of the kernel's pgd, instead | |
906 | * of a process's | |
907 | */ | |
908 | #define pgd_offset_k(address) pgd_offset(&init_mm, (address)) | |
909 | ||
910 | ||
68db065c JF |
911 | #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET) |
912 | #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY) | |
913 | ||
195466dc JF |
914 | #ifndef __ASSEMBLY__ |
915 | ||
2c1b284e | 916 | extern int direct_gbpages; |
22ddfcaa | 917 | void init_mem_mapping(void); |
8d57470d | 918 | void early_alloc_pgt_buf(void); |
4270fd8b | 919 | extern void memblock_find_dma_reserve(void); |
2c1b284e | 920 | |
b234e8a0 TG |
921 | #ifdef CONFIG_X86_64 |
922 | /* Realmode trampoline initialization. */ | |
923 | extern pgd_t trampoline_pgd_entry; | |
0483e1fa | 924 | static inline void __meminit init_trampoline_default(void) |
b234e8a0 TG |
925 | { |
926 | /* Default trampoline pgd value */ | |
65ade2f8 | 927 | trampoline_pgd_entry = init_top_pgt[pgd_index(__PAGE_OFFSET)]; |
b234e8a0 | 928 | } |
0483e1fa TG |
929 | # ifdef CONFIG_RANDOMIZE_MEMORY |
930 | void __meminit init_trampoline(void); | |
931 | # else | |
932 | # define init_trampoline init_trampoline_default | |
933 | # endif | |
b234e8a0 TG |
934 | #else |
935 | static inline void init_trampoline(void) { } | |
936 | #endif | |
937 | ||
4891645e JF |
938 | /* local pte updates need not use xchg for locking */ |
939 | static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep) | |
940 | { | |
941 | pte_t res = *ptep; | |
942 | ||
943 | /* Pure native function needs no input for mm, addr */ | |
944 | native_pte_clear(NULL, 0, ptep); | |
945 | return res; | |
946 | } | |
947 | ||
f2d6bfe9 JW |
948 | static inline pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp) |
949 | { | |
950 | pmd_t res = *pmdp; | |
951 | ||
952 | native_pmd_clear(pmdp); | |
953 | return res; | |
954 | } | |
955 | ||
a00cc7d9 MW |
956 | static inline pud_t native_local_pudp_get_and_clear(pud_t *pudp) |
957 | { | |
958 | pud_t res = *pudp; | |
959 | ||
960 | native_pud_clear(pudp); | |
961 | return res; | |
962 | } | |
963 | ||
4891645e JF |
964 | static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr, |
965 | pte_t *ptep , pte_t pte) | |
966 | { | |
967 | native_set_pte(ptep, pte); | |
968 | } | |
969 | ||
0a47de52 AA |
970 | static inline void native_set_pmd_at(struct mm_struct *mm, unsigned long addr, |
971 | pmd_t *pmdp , pmd_t pmd) | |
972 | { | |
973 | native_set_pmd(pmdp, pmd); | |
974 | } | |
975 | ||
a00cc7d9 MW |
976 | static inline void native_set_pud_at(struct mm_struct *mm, unsigned long addr, |
977 | pud_t *pudp, pud_t pud) | |
978 | { | |
979 | native_set_pud(pudp, pud); | |
980 | } | |
981 | ||
195466dc JF |
982 | #ifndef CONFIG_PARAVIRT |
983 | /* | |
984 | * Rules for using pte_update - it must be called after any PTE update which | |
985 | * has not been done using the set_pte / clear_pte interfaces. It is used by | |
986 | * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE | |
987 | * updates should either be sets, clears, or set_pte_atomic for P->P | |
988 | * transitions, which means this hook should only be called for user PTEs. | |
989 | * This hook implies a P->P protection or access change has taken place, which | |
d6ccc3ec | 990 | * requires a subsequent TLB flush. |
195466dc JF |
991 | */ |
992 | #define pte_update(mm, addr, ptep) do { } while (0) | |
195466dc JF |
993 | #endif |
994 | ||
195466dc JF |
995 | /* |
996 | * We only update the dirty/accessed state if we set | |
997 | * the dirty bit by hand in the kernel, since the hardware | |
998 | * will do the accessed bit for us, and we don't want to | |
999 | * race with other CPU's that might be updating the dirty | |
1000 | * bit at the same time. | |
1001 | */ | |
bea41808 JF |
1002 | struct vm_area_struct; |
1003 | ||
195466dc | 1004 | #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS |
ee5aa8d3 JF |
1005 | extern int ptep_set_access_flags(struct vm_area_struct *vma, |
1006 | unsigned long address, pte_t *ptep, | |
1007 | pte_t entry, int dirty); | |
195466dc JF |
1008 | |
1009 | #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG | |
f9fbf1a3 JF |
1010 | extern int ptep_test_and_clear_young(struct vm_area_struct *vma, |
1011 | unsigned long addr, pte_t *ptep); | |
195466dc JF |
1012 | |
1013 | #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH | |
c20311e1 JF |
1014 | extern int ptep_clear_flush_young(struct vm_area_struct *vma, |
1015 | unsigned long address, pte_t *ptep); | |
195466dc JF |
1016 | |
1017 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR | |
3cbaeafe JP |
1018 | static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, |
1019 | pte_t *ptep) | |
195466dc JF |
1020 | { |
1021 | pte_t pte = native_ptep_get_and_clear(ptep); | |
1022 | pte_update(mm, addr, ptep); | |
1023 | return pte; | |
1024 | } | |
1025 | ||
1026 | #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL | |
3cbaeafe JP |
1027 | static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, |
1028 | unsigned long addr, pte_t *ptep, | |
1029 | int full) | |
195466dc JF |
1030 | { |
1031 | pte_t pte; | |
1032 | if (full) { | |
1033 | /* | |
1034 | * Full address destruction in progress; paravirt does not | |
1035 | * care about updates and native needs no locking | |
1036 | */ | |
1037 | pte = native_local_ptep_get_and_clear(ptep); | |
1038 | } else { | |
1039 | pte = ptep_get_and_clear(mm, addr, ptep); | |
1040 | } | |
1041 | return pte; | |
1042 | } | |
1043 | ||
1044 | #define __HAVE_ARCH_PTEP_SET_WRPROTECT | |
3cbaeafe JP |
1045 | static inline void ptep_set_wrprotect(struct mm_struct *mm, |
1046 | unsigned long addr, pte_t *ptep) | |
195466dc | 1047 | { |
d8d89827 | 1048 | clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte); |
195466dc JF |
1049 | pte_update(mm, addr, ptep); |
1050 | } | |
1051 | ||
2ac13462 | 1052 | #define flush_tlb_fix_spurious_fault(vma, address) do { } while (0) |
61c77326 | 1053 | |
f2d6bfe9 JW |
1054 | #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot)) |
1055 | ||
1056 | #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS | |
1057 | extern int pmdp_set_access_flags(struct vm_area_struct *vma, | |
1058 | unsigned long address, pmd_t *pmdp, | |
1059 | pmd_t entry, int dirty); | |
a00cc7d9 MW |
1060 | extern int pudp_set_access_flags(struct vm_area_struct *vma, |
1061 | unsigned long address, pud_t *pudp, | |
1062 | pud_t entry, int dirty); | |
f2d6bfe9 JW |
1063 | |
1064 | #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG | |
1065 | extern int pmdp_test_and_clear_young(struct vm_area_struct *vma, | |
1066 | unsigned long addr, pmd_t *pmdp); | |
a00cc7d9 MW |
1067 | extern int pudp_test_and_clear_young(struct vm_area_struct *vma, |
1068 | unsigned long addr, pud_t *pudp); | |
f2d6bfe9 JW |
1069 | |
1070 | #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH | |
1071 | extern int pmdp_clear_flush_young(struct vm_area_struct *vma, | |
1072 | unsigned long address, pmd_t *pmdp); | |
1073 | ||
1074 | ||
f2d6bfe9 JW |
1075 | #define __HAVE_ARCH_PMD_WRITE |
1076 | static inline int pmd_write(pmd_t pmd) | |
1077 | { | |
1078 | return pmd_flags(pmd) & _PAGE_RW; | |
1079 | } | |
1080 | ||
8809aa2d AK |
1081 | #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR |
1082 | static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr, | |
f2d6bfe9 JW |
1083 | pmd_t *pmdp) |
1084 | { | |
d6ccc3ec | 1085 | return native_pmdp_get_and_clear(pmdp); |
f2d6bfe9 JW |
1086 | } |
1087 | ||
a00cc7d9 MW |
1088 | #define __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR |
1089 | static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm, | |
1090 | unsigned long addr, pud_t *pudp) | |
1091 | { | |
1092 | return native_pudp_get_and_clear(pudp); | |
1093 | } | |
1094 | ||
f2d6bfe9 JW |
1095 | #define __HAVE_ARCH_PMDP_SET_WRPROTECT |
1096 | static inline void pmdp_set_wrprotect(struct mm_struct *mm, | |
1097 | unsigned long addr, pmd_t *pmdp) | |
1098 | { | |
1099 | clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp); | |
f2d6bfe9 JW |
1100 | } |
1101 | ||
85958b46 JF |
1102 | /* |
1103 | * clone_pgd_range(pgd_t *dst, pgd_t *src, int count); | |
1104 | * | |
1105 | * dst - pointer to pgd range anwhere on a pgd page | |
1106 | * src - "" | |
1107 | * count - the number of pgds to copy. | |
1108 | * | |
1109 | * dst and src can be on the same page, but the range must not overlap, | |
1110 | * and must not cross a page boundary. | |
1111 | */ | |
1112 | static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count) | |
1113 | { | |
1114 | memcpy(dst, src, count * sizeof(pgd_t)); | |
1115 | } | |
1116 | ||
4cbeb51b DH |
1117 | #define PTE_SHIFT ilog2(PTRS_PER_PTE) |
1118 | static inline int page_level_shift(enum pg_level level) | |
1119 | { | |
1120 | return (PAGE_SHIFT - PTE_SHIFT) + level * PTE_SHIFT; | |
1121 | } | |
1122 | static inline unsigned long page_level_size(enum pg_level level) | |
1123 | { | |
1124 | return 1UL << page_level_shift(level); | |
1125 | } | |
1126 | static inline unsigned long page_level_mask(enum pg_level level) | |
1127 | { | |
1128 | return ~(page_level_size(level) - 1); | |
1129 | } | |
85958b46 | 1130 | |
602e0186 KS |
1131 | /* |
1132 | * The x86 doesn't have any external MMU info: the kernel page | |
1133 | * tables contain all the necessary information. | |
1134 | */ | |
1135 | static inline void update_mmu_cache(struct vm_area_struct *vma, | |
1136 | unsigned long addr, pte_t *ptep) | |
1137 | { | |
1138 | } | |
1139 | static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, | |
1140 | unsigned long addr, pmd_t *pmd) | |
1141 | { | |
1142 | } | |
a00cc7d9 MW |
1143 | static inline void update_mmu_cache_pud(struct vm_area_struct *vma, |
1144 | unsigned long addr, pud_t *pud) | |
1145 | { | |
1146 | } | |
85958b46 | 1147 | |
2bf01f9f | 1148 | #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY |
fa0f281c CG |
1149 | static inline pte_t pte_swp_mksoft_dirty(pte_t pte) |
1150 | { | |
fa0f281c CG |
1151 | return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY); |
1152 | } | |
1153 | ||
1154 | static inline int pte_swp_soft_dirty(pte_t pte) | |
1155 | { | |
fa0f281c CG |
1156 | return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY; |
1157 | } | |
1158 | ||
1159 | static inline pte_t pte_swp_clear_soft_dirty(pte_t pte) | |
1160 | { | |
fa0f281c CG |
1161 | return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY); |
1162 | } | |
2bf01f9f | 1163 | #endif |
fa0f281c | 1164 | |
33a709b2 DH |
1165 | #define PKRU_AD_BIT 0x1 |
1166 | #define PKRU_WD_BIT 0x2 | |
84594296 | 1167 | #define PKRU_BITS_PER_PKEY 2 |
33a709b2 DH |
1168 | |
1169 | static inline bool __pkru_allows_read(u32 pkru, u16 pkey) | |
1170 | { | |
84594296 | 1171 | int pkru_pkey_bits = pkey * PKRU_BITS_PER_PKEY; |
33a709b2 DH |
1172 | return !(pkru & (PKRU_AD_BIT << pkru_pkey_bits)); |
1173 | } | |
1174 | ||
1175 | static inline bool __pkru_allows_write(u32 pkru, u16 pkey) | |
1176 | { | |
84594296 | 1177 | int pkru_pkey_bits = pkey * PKRU_BITS_PER_PKEY; |
33a709b2 DH |
1178 | /* |
1179 | * Access-disable disables writes too so we need to check | |
1180 | * both bits here. | |
1181 | */ | |
1182 | return !(pkru & ((PKRU_AD_BIT|PKRU_WD_BIT) << pkru_pkey_bits)); | |
1183 | } | |
1184 | ||
1185 | static inline u16 pte_flags_pkey(unsigned long pte_flags) | |
1186 | { | |
1187 | #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS | |
1188 | /* ifdef to avoid doing 59-bit shift on 32-bit values */ | |
1189 | return (pte_flags & _PAGE_PKEY_MASK) >> _PAGE_BIT_PKEY_BIT0; | |
1190 | #else | |
1191 | return 0; | |
1192 | #endif | |
1193 | } | |
1194 | ||
e585513b KS |
1195 | static inline bool __pkru_allows_pkey(u16 pkey, bool write) |
1196 | { | |
1197 | u32 pkru = read_pkru(); | |
1198 | ||
1199 | if (!__pkru_allows_read(pkru, pkey)) | |
1200 | return false; | |
1201 | if (write && !__pkru_allows_write(pkru, pkey)) | |
1202 | return false; | |
1203 | ||
1204 | return true; | |
1205 | } | |
1206 | ||
1207 | /* | |
1208 | * 'pteval' can come from a PTE, PMD or PUD. We only check | |
1209 | * _PAGE_PRESENT, _PAGE_USER, and _PAGE_RW in here which are the | |
1210 | * same value on all 3 types. | |
1211 | */ | |
1212 | static inline bool __pte_access_permitted(unsigned long pteval, bool write) | |
1213 | { | |
1214 | unsigned long need_pte_bits = _PAGE_PRESENT|_PAGE_USER; | |
1215 | ||
1216 | if (write) | |
1217 | need_pte_bits |= _PAGE_RW; | |
1218 | ||
1219 | if ((pteval & need_pte_bits) != need_pte_bits) | |
1220 | return 0; | |
1221 | ||
1222 | return __pkru_allows_pkey(pte_flags_pkey(pteval), write); | |
1223 | } | |
1224 | ||
1225 | #define pte_access_permitted pte_access_permitted | |
1226 | static inline bool pte_access_permitted(pte_t pte, bool write) | |
1227 | { | |
1228 | return __pte_access_permitted(pte_val(pte), write); | |
1229 | } | |
1230 | ||
1231 | #define pmd_access_permitted pmd_access_permitted | |
1232 | static inline bool pmd_access_permitted(pmd_t pmd, bool write) | |
1233 | { | |
1234 | return __pte_access_permitted(pmd_val(pmd), write); | |
1235 | } | |
1236 | ||
1237 | #define pud_access_permitted pud_access_permitted | |
1238 | static inline bool pud_access_permitted(pud_t pud, bool write) | |
1239 | { | |
1240 | return __pte_access_permitted(pud_val(pud), write); | |
1241 | } | |
1242 | ||
195466dc JF |
1243 | #include <asm-generic/pgtable.h> |
1244 | #endif /* __ASSEMBLY__ */ | |
1245 | ||
1965aae3 | 1246 | #endif /* _ASM_X86_PGTABLE_H */ |