]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/x86/include/asm/pgtable.h
x86: PAT: remove follow_pfnmap_pte in favor of follow_phys
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / include / asm / pgtable.h
CommitLineData
1965aae3
PA
1#ifndef _ASM_X86_PGTABLE_H
2#define _ASM_X86_PGTABLE_H
6c386655 3
6c386655
JF
4#define FIRST_USER_ADDRESS 0
5
43cdf5d6
JS
6#define _PAGE_BIT_PRESENT 0 /* is present */
7#define _PAGE_BIT_RW 1 /* writeable */
8#define _PAGE_BIT_USER 2 /* userspace addressable */
9#define _PAGE_BIT_PWT 3 /* page write through */
10#define _PAGE_BIT_PCD 4 /* page cache disabled */
11#define _PAGE_BIT_ACCESSED 5 /* was accessed (raised by CPU) */
12#define _PAGE_BIT_DIRTY 6 /* was written to (raised by CPU) */
6c386655
JF
13#define _PAGE_BIT_FILE 6
14#define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
9bf5a475 15#define _PAGE_BIT_PAT 7 /* on 4KB pages */
6c386655
JF
16#define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
17#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
be43d728 18#define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
6c386655 19#define _PAGE_BIT_UNUSED3 11
9bf5a475 20#define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
a0a8f536 21#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
110e0358 22#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
6c386655
JF
23#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
24
4226ab93
JF
25#define _PAGE_PRESENT (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT)
26#define _PAGE_RW (_AT(pteval_t, 1) << _PAGE_BIT_RW)
27#define _PAGE_USER (_AT(pteval_t, 1) << _PAGE_BIT_USER)
28#define _PAGE_PWT (_AT(pteval_t, 1) << _PAGE_BIT_PWT)
29#define _PAGE_PCD (_AT(pteval_t, 1) << _PAGE_BIT_PCD)
30#define _PAGE_ACCESSED (_AT(pteval_t, 1) << _PAGE_BIT_ACCESSED)
31#define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
32#define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
33#define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
34#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
be43d728 35#define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
4226ab93
JF
36#define _PAGE_UNUSED3 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED3)
37#define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
38#define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
a0a8f536 39#define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL)
110e0358 40#define _PAGE_CPA_TEST (_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST)
a0a8f536 41#define __HAVE_ARCH_PTE_SPECIAL
6c386655
JF
42
43#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
4226ab93 44#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
6c386655 45#else
4226ab93 46#define _PAGE_NX (_AT(pteval_t, 0))
6c386655
JF
47#endif
48
49/* If _PAGE_PRESENT is clear, we use these: */
3cbaeafe
JP
50#define _PAGE_FILE _PAGE_DIRTY /* nonlinear file mapping,
51 * saved PTE; unset:swap */
6c386655
JF
52#define _PAGE_PROTNONE _PAGE_PSE /* if the user mapped it with PROT_NONE;
53 pte_present gives true */
54
3cbaeafe
JP
55#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
56 _PAGE_ACCESSED | _PAGE_DIRTY)
57#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \
58 _PAGE_DIRTY)
6c386655 59
86aaf4fd 60/* Set of bits not changed in pte_modify */
59438c9f 61#define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
a0a8f536 62 _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY)
6c386655 63
2e5d9c85 64#define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT)
65#define _PAGE_CACHE_WB (0)
66#define _PAGE_CACHE_WC (_PAGE_PWT)
67#define _PAGE_CACHE_UC_MINUS (_PAGE_PCD)
68#define _PAGE_CACHE_UC (_PAGE_PCD | _PAGE_PWT)
69
6c386655 70#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
3cbaeafe
JP
71#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
72 _PAGE_ACCESSED | _PAGE_NX)
73
74#define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | \
75 _PAGE_USER | _PAGE_ACCESSED)
76#define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
77 _PAGE_ACCESSED | _PAGE_NX)
78#define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
79 _PAGE_ACCESSED)
6c386655 80#define PAGE_COPY PAGE_COPY_NOEXEC
3cbaeafe
JP
81#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | \
82 _PAGE_ACCESSED | _PAGE_NX)
83#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
84 _PAGE_ACCESSED)
6c386655 85
6c386655 86#define __PAGE_KERNEL_EXEC \
8490638c 87 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
6c386655 88#define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
6c386655
JF
89
90#define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
91#define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
d2e626f4 92#define __PAGE_KERNEL_EXEC_NOCACHE (__PAGE_KERNEL_EXEC | _PAGE_PCD | _PAGE_PWT)
b310f381 93#define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
6c386655 94#define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
d546b67a 95#define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
6c386655
JF
96#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
97#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
98#define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
3a9e189d 99#define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
6c386655
JF
100#define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
101
be43d728
JF
102#define __PAGE_KERNEL_IO (__PAGE_KERNEL | _PAGE_IOMAP)
103#define __PAGE_KERNEL_IO_NOCACHE (__PAGE_KERNEL_NOCACHE | _PAGE_IOMAP)
104#define __PAGE_KERNEL_IO_UC_MINUS (__PAGE_KERNEL_UC_MINUS | _PAGE_IOMAP)
105#define __PAGE_KERNEL_IO_WC (__PAGE_KERNEL_WC | _PAGE_IOMAP)
106
8490638c
JF
107#define PAGE_KERNEL __pgprot(__PAGE_KERNEL)
108#define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO)
109#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
110#define PAGE_KERNEL_RX __pgprot(__PAGE_KERNEL_RX)
111#define PAGE_KERNEL_WC __pgprot(__PAGE_KERNEL_WC)
112#define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE)
113#define PAGE_KERNEL_UC_MINUS __pgprot(__PAGE_KERNEL_UC_MINUS)
114#define PAGE_KERNEL_EXEC_NOCACHE __pgprot(__PAGE_KERNEL_EXEC_NOCACHE)
115#define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE)
3a9e189d 116#define PAGE_KERNEL_LARGE_NOCACHE __pgprot(__PAGE_KERNEL_LARGE_NOCACHE)
8490638c
JF
117#define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC)
118#define PAGE_KERNEL_VSYSCALL __pgprot(__PAGE_KERNEL_VSYSCALL)
119#define PAGE_KERNEL_VSYSCALL_NOCACHE __pgprot(__PAGE_KERNEL_VSYSCALL_NOCACHE)
6c386655 120
be43d728
JF
121#define PAGE_KERNEL_IO __pgprot(__PAGE_KERNEL_IO)
122#define PAGE_KERNEL_IO_NOCACHE __pgprot(__PAGE_KERNEL_IO_NOCACHE)
123#define PAGE_KERNEL_IO_UC_MINUS __pgprot(__PAGE_KERNEL_IO_UC_MINUS)
124#define PAGE_KERNEL_IO_WC __pgprot(__PAGE_KERNEL_IO_WC)
125
6c386655
JF
126/* xwr */
127#define __P000 PAGE_NONE
128#define __P001 PAGE_READONLY
129#define __P010 PAGE_COPY
130#define __P011 PAGE_COPY
131#define __P100 PAGE_READONLY_EXEC
132#define __P101 PAGE_READONLY_EXEC
133#define __P110 PAGE_COPY_EXEC
134#define __P111 PAGE_COPY_EXEC
135
136#define __S000 PAGE_NONE
137#define __S001 PAGE_READONLY
138#define __S010 PAGE_SHARED
139#define __S011 PAGE_SHARED
140#define __S100 PAGE_READONLY_EXEC
141#define __S101 PAGE_READONLY_EXEC
142#define __S110 PAGE_SHARED_EXEC
143#define __S111 PAGE_SHARED_EXEC
144
b2bc2731
SS
145/*
146 * early identity mapping pte attrib macros.
147 */
148#ifdef CONFIG_X86_64
149#define __PAGE_KERNEL_IDENT_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC
150#else
f61f1b57
SS
151/*
152 * For PDE_IDENT_ATTR include USER bit. As the PDE and PTE protection
153 * bits are combined, this will alow user to access the high address mapped
154 * VDSO in the presence of CONFIG_COMPAT_VDSO
155 */
3a85e770 156#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
f61f1b57 157#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
b2bc2731
SS
158#define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
159#endif
160
8a7b12f7 161/*
162 * Macro to mark a page protection value as UC-
163 */
164#define pgprot_noncached(prot) \
165 ((boot_cpu_data.x86 > 3) \
166 ? (__pgprot(pgprot_val(prot) | _PAGE_CACHE_UC_MINUS)) \
167 : (prot))
168
4614139c 169#ifndef __ASSEMBLY__
195466dc 170
2520bd31 171#define pgprot_writecombine pgprot_writecombine
172extern pgprot_t pgprot_writecombine(pgprot_t prot);
173
8405b122
JF
174/*
175 * ZERO_PAGE is a global shared page that is always zero: used
176 * for zero-mapped memory areas etc..
177 */
3cbaeafe 178extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
8405b122
JF
179#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
180
e3ed910d
JF
181extern spinlock_t pgd_lock;
182extern struct list_head pgd_list;
8405b122 183
4614139c
JF
184/*
185 * The following only work if pte_present() is true.
186 * Undefined behaviour if not..
187 */
3cbaeafe
JP
188static inline int pte_dirty(pte_t pte)
189{
a15af1c9 190 return pte_flags(pte) & _PAGE_DIRTY;
3cbaeafe
JP
191}
192
193static inline int pte_young(pte_t pte)
194{
a15af1c9 195 return pte_flags(pte) & _PAGE_ACCESSED;
3cbaeafe
JP
196}
197
198static inline int pte_write(pte_t pte)
199{
a15af1c9 200 return pte_flags(pte) & _PAGE_RW;
3cbaeafe
JP
201}
202
203static inline int pte_file(pte_t pte)
204{
a15af1c9 205 return pte_flags(pte) & _PAGE_FILE;
3cbaeafe
JP
206}
207
208static inline int pte_huge(pte_t pte)
209{
a15af1c9 210 return pte_flags(pte) & _PAGE_PSE;
4614139c
JF
211}
212
3cbaeafe
JP
213static inline int pte_global(pte_t pte)
214{
a15af1c9 215 return pte_flags(pte) & _PAGE_GLOBAL;
3cbaeafe
JP
216}
217
218static inline int pte_exec(pte_t pte)
219{
a15af1c9 220 return !(pte_flags(pte) & _PAGE_NX);
3cbaeafe
JP
221}
222
7e675137
NP
223static inline int pte_special(pte_t pte)
224{
606ee44d 225 return pte_flags(pte) & _PAGE_SPECIAL;
7e675137
NP
226}
227
91030ca1
HD
228static inline unsigned long pte_pfn(pte_t pte)
229{
230 return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
231}
232
233#define pte_page(pte) pfn_to_page(pte_pfn(pte))
234
3cbaeafe
JP
235static inline int pmd_large(pmd_t pte)
236{
237 return (pmd_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
238 (_PAGE_PSE | _PAGE_PRESENT);
239}
240
241static inline pte_t pte_mkclean(pte_t pte)
242{
4226ab93 243 return __pte(pte_val(pte) & ~_PAGE_DIRTY);
3cbaeafe
JP
244}
245
246static inline pte_t pte_mkold(pte_t pte)
247{
4226ab93 248 return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
3cbaeafe
JP
249}
250
251static inline pte_t pte_wrprotect(pte_t pte)
252{
4226ab93 253 return __pte(pte_val(pte) & ~_PAGE_RW);
3cbaeafe
JP
254}
255
256static inline pte_t pte_mkexec(pte_t pte)
257{
4226ab93 258 return __pte(pte_val(pte) & ~_PAGE_NX);
3cbaeafe
JP
259}
260
261static inline pte_t pte_mkdirty(pte_t pte)
262{
263 return __pte(pte_val(pte) | _PAGE_DIRTY);
264}
265
266static inline pte_t pte_mkyoung(pte_t pte)
267{
268 return __pte(pte_val(pte) | _PAGE_ACCESSED);
269}
270
271static inline pte_t pte_mkwrite(pte_t pte)
272{
273 return __pte(pte_val(pte) | _PAGE_RW);
274}
275
276static inline pte_t pte_mkhuge(pte_t pte)
277{
278 return __pte(pte_val(pte) | _PAGE_PSE);
279}
280
281static inline pte_t pte_clrhuge(pte_t pte)
282{
4226ab93 283 return __pte(pte_val(pte) & ~_PAGE_PSE);
3cbaeafe
JP
284}
285
286static inline pte_t pte_mkglobal(pte_t pte)
287{
288 return __pte(pte_val(pte) | _PAGE_GLOBAL);
289}
290
291static inline pte_t pte_clrglobal(pte_t pte)
292{
4226ab93 293 return __pte(pte_val(pte) & ~_PAGE_GLOBAL);
3cbaeafe 294}
4614139c 295
7e675137
NP
296static inline pte_t pte_mkspecial(pte_t pte)
297{
a0a8f536 298 return __pte(pte_val(pte) | _PAGE_SPECIAL);
7e675137
NP
299}
300
6fdc05d4
JF
301extern pteval_t __supported_pte_mask;
302
303static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
304{
305 return __pte((((phys_addr_t)page_nr << PAGE_SHIFT) |
306 pgprot_val(pgprot)) & __supported_pte_mask);
307}
308
309static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
310{
311 return __pmd((((phys_addr_t)page_nr << PAGE_SHIFT) |
312 pgprot_val(pgprot)) & __supported_pte_mask);
313}
314
38472311
IM
315static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
316{
317 pteval_t val = pte_val(pte);
318
319 /*
320 * Chop off the NX bit (if present), and add the NX portion of
321 * the newprot (if present):
322 */
1c12c4cf
VP
323 val &= _PAGE_CHG_MASK;
324 val |= pgprot_val(newprot) & (~_PAGE_CHG_MASK) & __supported_pte_mask;
38472311
IM
325
326 return __pte(val);
327}
328
1c12c4cf
VP
329/* mprotect needs to preserve PAT bits when updating vm_page_prot */
330#define pgprot_modify pgprot_modify
331static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
332{
333 pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
334 pgprotval_t addbits = pgprot_val(newprot);
335 return __pgprot(preservebits | addbits);
336}
337
77be1fab 338#define pte_pgprot(x) __pgprot(pte_flags(x) & PTE_FLAGS_MASK)
c6ca18eb 339
1e8e23bc
AK
340#define canon_pgprot(p) __pgprot(pgprot_val(p) & __supported_pte_mask)
341
5899329b 342/* Indicate that x86 has its own track and untrack pfn vma functions */
343#define track_pfn_vma_new track_pfn_vma_new
344#define track_pfn_vma_copy track_pfn_vma_copy
345#define untrack_pfn_vma untrack_pfn_vma
346
f0970c13 347#ifndef __ASSEMBLY__
348#define __HAVE_PHYS_MEM_ACCESS_PROT
349struct file;
350pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
351 unsigned long size, pgprot_t vma_prot);
352int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
353 unsigned long size, pgprot_t *vma_prot);
354#endif
355
d494a961
JF
356/* Install a pte for a particular vaddr in kernel space. */
357void set_pte_vaddr(unsigned long vaddr, pte_t pte);
358
a312b37b
EH
359#ifdef CONFIG_X86_32
360extern void native_pagetable_setup_start(pgd_t *base);
361extern void native_pagetable_setup_done(pgd_t *base);
362#else
363static inline void native_pagetable_setup_start(pgd_t *base) {}
364static inline void native_pagetable_setup_done(pgd_t *base) {}
365#endif
366
e1759c21
AD
367struct seq_file;
368extern void arch_report_meminfo(struct seq_file *m);
e0b7c819 369
4891645e
JF
370#ifdef CONFIG_PARAVIRT
371#include <asm/paravirt.h>
372#else /* !CONFIG_PARAVIRT */
373#define set_pte(ptep, pte) native_set_pte(ptep, pte)
374#define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
375
376#define set_pte_present(mm, addr, ptep, pte) \
377 native_set_pte_present(mm, addr, ptep, pte)
378#define set_pte_atomic(ptep, pte) \
379 native_set_pte_atomic(ptep, pte)
380
381#define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd)
382
383#ifndef __PAGETABLE_PUD_FOLDED
384#define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
385#define pgd_clear(pgd) native_pgd_clear(pgd)
386#endif
387
388#ifndef set_pud
389# define set_pud(pudp, pud) native_set_pud(pudp, pud)
390#endif
391
392#ifndef __PAGETABLE_PMD_FOLDED
393#define pud_clear(pud) native_pud_clear(pud)
394#endif
395
396#define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
397#define pmd_clear(pmd) native_pmd_clear(pmd)
398
399#define pte_update(mm, addr, ptep) do { } while (0)
400#define pte_update_defer(mm, addr, ptep) do { } while (0)
a312b37b
EH
401
402static inline void __init paravirt_pagetable_setup_start(pgd_t *base)
403{
404 native_pagetable_setup_start(base);
405}
406
407static inline void __init paravirt_pagetable_setup_done(pgd_t *base)
408{
409 native_pagetable_setup_done(base);
410}
4891645e
JF
411#endif /* CONFIG_PARAVIRT */
412
4614139c
JF
413#endif /* __ASSEMBLY__ */
414
96a388de
TG
415#ifdef CONFIG_X86_32
416# include "pgtable_32.h"
417#else
418# include "pgtable_64.h"
419#endif
6c386655 420
fb15a9b3
JF
421/*
422 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
423 *
424 * this macro returns the index of the entry in the pgd page which would
425 * control the given virtual address
426 */
427#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
428
429/*
430 * pgd_offset() returns a (pgd_t *)
431 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
432 */
433#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
434/*
435 * a shortcut which implies the use of the kernel's pgd, instead
436 * of a process's
437 */
438#define pgd_offset_k(address) pgd_offset(&init_mm, (address))
439
440
68db065c
JF
441#define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
442#define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
443
195466dc
JF
444#ifndef __ASSEMBLY__
445
30551bb3
TG
446enum {
447 PG_LEVEL_NONE,
448 PG_LEVEL_4K,
449 PG_LEVEL_2M,
86f03989 450 PG_LEVEL_1G,
ce0c0e50 451 PG_LEVEL_NUM
30551bb3
TG
452};
453
65280e61
TG
454#ifdef CONFIG_PROC_FS
455extern void update_page_count(int level, unsigned long pages);
456#else
457static inline void update_page_count(int level, unsigned long pages) { }
458#endif
ce0c0e50 459
0a663088
TG
460/*
461 * Helper function that returns the kernel pagetable entry controlling
462 * the virtual address 'address'. NULL means no pagetable entry present.
463 * NOTE: the return type is pte_t but if the pmd is PSE then we return it
464 * as a pte too.
465 */
da7bfc50 466extern pte_t *lookup_address(unsigned long address, unsigned int *level);
0a663088 467
4891645e
JF
468/* local pte updates need not use xchg for locking */
469static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
470{
471 pte_t res = *ptep;
472
473 /* Pure native function needs no input for mm, addr */
474 native_pte_clear(NULL, 0, ptep);
475 return res;
476}
477
478static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
479 pte_t *ptep , pte_t pte)
480{
481 native_set_pte(ptep, pte);
482}
483
195466dc
JF
484#ifndef CONFIG_PARAVIRT
485/*
486 * Rules for using pte_update - it must be called after any PTE update which
487 * has not been done using the set_pte / clear_pte interfaces. It is used by
488 * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE
489 * updates should either be sets, clears, or set_pte_atomic for P->P
490 * transitions, which means this hook should only be called for user PTEs.
491 * This hook implies a P->P protection or access change has taken place, which
492 * requires a subsequent TLB flush. The notification can optionally be delayed
493 * until the TLB flush event by using the pte_update_defer form of the
494 * interface, but care must be taken to assure that the flush happens while
495 * still holding the same page table lock so that the shadow and primary pages
496 * do not become out of sync on SMP.
497 */
498#define pte_update(mm, addr, ptep) do { } while (0)
499#define pte_update_defer(mm, addr, ptep) do { } while (0)
500#endif
501
195466dc
JF
502/*
503 * We only update the dirty/accessed state if we set
504 * the dirty bit by hand in the kernel, since the hardware
505 * will do the accessed bit for us, and we don't want to
506 * race with other CPU's that might be updating the dirty
507 * bit at the same time.
508 */
bea41808
JF
509struct vm_area_struct;
510
195466dc 511#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
ee5aa8d3
JF
512extern int ptep_set_access_flags(struct vm_area_struct *vma,
513 unsigned long address, pte_t *ptep,
514 pte_t entry, int dirty);
195466dc
JF
515
516#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
f9fbf1a3
JF
517extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
518 unsigned long addr, pte_t *ptep);
195466dc
JF
519
520#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
c20311e1
JF
521extern int ptep_clear_flush_young(struct vm_area_struct *vma,
522 unsigned long address, pte_t *ptep);
195466dc
JF
523
524#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
3cbaeafe
JP
525static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
526 pte_t *ptep)
195466dc
JF
527{
528 pte_t pte = native_ptep_get_and_clear(ptep);
529 pte_update(mm, addr, ptep);
530 return pte;
531}
532
533#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
3cbaeafe
JP
534static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
535 unsigned long addr, pte_t *ptep,
536 int full)
195466dc
JF
537{
538 pte_t pte;
539 if (full) {
540 /*
541 * Full address destruction in progress; paravirt does not
542 * care about updates and native needs no locking
543 */
544 pte = native_local_ptep_get_and_clear(ptep);
545 } else {
546 pte = ptep_get_and_clear(mm, addr, ptep);
547 }
548 return pte;
549}
550
551#define __HAVE_ARCH_PTEP_SET_WRPROTECT
3cbaeafe
JP
552static inline void ptep_set_wrprotect(struct mm_struct *mm,
553 unsigned long addr, pte_t *ptep)
195466dc 554{
d8d89827 555 clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
195466dc
JF
556 pte_update(mm, addr, ptep);
557}
558
85958b46
JF
559/*
560 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
561 *
562 * dst - pointer to pgd range anwhere on a pgd page
563 * src - ""
564 * count - the number of pgds to copy.
565 *
566 * dst and src can be on the same page, but the range must not overlap,
567 * and must not cross a page boundary.
568 */
569static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
570{
571 memcpy(dst, src, count * sizeof(pgd_t));
572}
573
574
195466dc
JF
575#include <asm-generic/pgtable.h>
576#endif /* __ASSEMBLY__ */
577
1965aae3 578#endif /* _ASM_X86_PGTABLE_H */