]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame_incremental - arch/x86/include/asm/pgtable.h
x86: fix grammar in user-visible BIOS warning
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / include / asm / pgtable.h
... / ...
CommitLineData
1#ifndef _ASM_X86_PGTABLE_H
2#define _ASM_X86_PGTABLE_H
3
4#define FIRST_USER_ADDRESS 0
5
6#define _PAGE_BIT_PRESENT 0 /* is present */
7#define _PAGE_BIT_RW 1 /* writeable */
8#define _PAGE_BIT_USER 2 /* userspace addressable */
9#define _PAGE_BIT_PWT 3 /* page write through */
10#define _PAGE_BIT_PCD 4 /* page cache disabled */
11#define _PAGE_BIT_ACCESSED 5 /* was accessed (raised by CPU) */
12#define _PAGE_BIT_DIRTY 6 /* was written to (raised by CPU) */
13#define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
14#define _PAGE_BIT_PAT 7 /* on 4KB pages */
15#define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
16#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
17#define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
18#define _PAGE_BIT_UNUSED3 11
19#define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
20#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
21#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
22#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
23
24/* If _PAGE_BIT_PRESENT is clear, we use these: */
25/* - if the user mapped it with PROT_NONE; pte_present gives true */
26#define _PAGE_BIT_PROTNONE _PAGE_BIT_GLOBAL
27/* - set: nonlinear file mapping, saved PTE; unset:swap */
28#define _PAGE_BIT_FILE _PAGE_BIT_DIRTY
29
30#define _PAGE_PRESENT (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT)
31#define _PAGE_RW (_AT(pteval_t, 1) << _PAGE_BIT_RW)
32#define _PAGE_USER (_AT(pteval_t, 1) << _PAGE_BIT_USER)
33#define _PAGE_PWT (_AT(pteval_t, 1) << _PAGE_BIT_PWT)
34#define _PAGE_PCD (_AT(pteval_t, 1) << _PAGE_BIT_PCD)
35#define _PAGE_ACCESSED (_AT(pteval_t, 1) << _PAGE_BIT_ACCESSED)
36#define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
37#define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
38#define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
39#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
40#define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
41#define _PAGE_UNUSED3 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED3)
42#define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
43#define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
44#define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL)
45#define _PAGE_CPA_TEST (_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST)
46#define __HAVE_ARCH_PTE_SPECIAL
47
48#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
49#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
50#else
51#define _PAGE_NX (_AT(pteval_t, 0))
52#endif
53
54#define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
55#define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE)
56
57#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
58 _PAGE_ACCESSED | _PAGE_DIRTY)
59#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \
60 _PAGE_DIRTY)
61
62/* Set of bits not changed in pte_modify */
63#define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
64 _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY)
65
66#define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT)
67#define _PAGE_CACHE_WB (0)
68#define _PAGE_CACHE_WC (_PAGE_PWT)
69#define _PAGE_CACHE_UC_MINUS (_PAGE_PCD)
70#define _PAGE_CACHE_UC (_PAGE_PCD | _PAGE_PWT)
71
72#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
73#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
74 _PAGE_ACCESSED | _PAGE_NX)
75
76#define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | \
77 _PAGE_USER | _PAGE_ACCESSED)
78#define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
79 _PAGE_ACCESSED | _PAGE_NX)
80#define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
81 _PAGE_ACCESSED)
82#define PAGE_COPY PAGE_COPY_NOEXEC
83#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | \
84 _PAGE_ACCESSED | _PAGE_NX)
85#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
86 _PAGE_ACCESSED)
87
88#define __PAGE_KERNEL_EXEC \
89 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
90#define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
91
92#define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
93#define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
94#define __PAGE_KERNEL_EXEC_NOCACHE (__PAGE_KERNEL_EXEC | _PAGE_PCD | _PAGE_PWT)
95#define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
96#define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
97#define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
98#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
99#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
100#define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
101#define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
102#define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
103
104#define __PAGE_KERNEL_IO (__PAGE_KERNEL | _PAGE_IOMAP)
105#define __PAGE_KERNEL_IO_NOCACHE (__PAGE_KERNEL_NOCACHE | _PAGE_IOMAP)
106#define __PAGE_KERNEL_IO_UC_MINUS (__PAGE_KERNEL_UC_MINUS | _PAGE_IOMAP)
107#define __PAGE_KERNEL_IO_WC (__PAGE_KERNEL_WC | _PAGE_IOMAP)
108
109#define PAGE_KERNEL __pgprot(__PAGE_KERNEL)
110#define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO)
111#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
112#define PAGE_KERNEL_RX __pgprot(__PAGE_KERNEL_RX)
113#define PAGE_KERNEL_WC __pgprot(__PAGE_KERNEL_WC)
114#define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE)
115#define PAGE_KERNEL_UC_MINUS __pgprot(__PAGE_KERNEL_UC_MINUS)
116#define PAGE_KERNEL_EXEC_NOCACHE __pgprot(__PAGE_KERNEL_EXEC_NOCACHE)
117#define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE)
118#define PAGE_KERNEL_LARGE_NOCACHE __pgprot(__PAGE_KERNEL_LARGE_NOCACHE)
119#define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC)
120#define PAGE_KERNEL_VSYSCALL __pgprot(__PAGE_KERNEL_VSYSCALL)
121#define PAGE_KERNEL_VSYSCALL_NOCACHE __pgprot(__PAGE_KERNEL_VSYSCALL_NOCACHE)
122
123#define PAGE_KERNEL_IO __pgprot(__PAGE_KERNEL_IO)
124#define PAGE_KERNEL_IO_NOCACHE __pgprot(__PAGE_KERNEL_IO_NOCACHE)
125#define PAGE_KERNEL_IO_UC_MINUS __pgprot(__PAGE_KERNEL_IO_UC_MINUS)
126#define PAGE_KERNEL_IO_WC __pgprot(__PAGE_KERNEL_IO_WC)
127
128/* xwr */
129#define __P000 PAGE_NONE
130#define __P001 PAGE_READONLY
131#define __P010 PAGE_COPY
132#define __P011 PAGE_COPY
133#define __P100 PAGE_READONLY_EXEC
134#define __P101 PAGE_READONLY_EXEC
135#define __P110 PAGE_COPY_EXEC
136#define __P111 PAGE_COPY_EXEC
137
138#define __S000 PAGE_NONE
139#define __S001 PAGE_READONLY
140#define __S010 PAGE_SHARED
141#define __S011 PAGE_SHARED
142#define __S100 PAGE_READONLY_EXEC
143#define __S101 PAGE_READONLY_EXEC
144#define __S110 PAGE_SHARED_EXEC
145#define __S111 PAGE_SHARED_EXEC
146
147/*
148 * early identity mapping pte attrib macros.
149 */
150#ifdef CONFIG_X86_64
151#define __PAGE_KERNEL_IDENT_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC
152#else
153/*
154 * For PDE_IDENT_ATTR include USER bit. As the PDE and PTE protection
155 * bits are combined, this will alow user to access the high address mapped
156 * VDSO in the presence of CONFIG_COMPAT_VDSO
157 */
158#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
159#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
160#define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
161#endif
162
163/*
164 * Macro to mark a page protection value as UC-
165 */
166#define pgprot_noncached(prot) \
167 ((boot_cpu_data.x86 > 3) \
168 ? (__pgprot(pgprot_val(prot) | _PAGE_CACHE_UC_MINUS)) \
169 : (prot))
170
171#ifndef __ASSEMBLY__
172
173#define pgprot_writecombine pgprot_writecombine
174extern pgprot_t pgprot_writecombine(pgprot_t prot);
175
176/*
177 * ZERO_PAGE is a global shared page that is always zero: used
178 * for zero-mapped memory areas etc..
179 */
180extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
181#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
182
183extern spinlock_t pgd_lock;
184extern struct list_head pgd_list;
185
186/*
187 * The following only work if pte_present() is true.
188 * Undefined behaviour if not..
189 */
190static inline int pte_dirty(pte_t pte)
191{
192 return pte_flags(pte) & _PAGE_DIRTY;
193}
194
195static inline int pte_young(pte_t pte)
196{
197 return pte_flags(pte) & _PAGE_ACCESSED;
198}
199
200static inline int pte_write(pte_t pte)
201{
202 return pte_flags(pte) & _PAGE_RW;
203}
204
205static inline int pte_file(pte_t pte)
206{
207 return pte_flags(pte) & _PAGE_FILE;
208}
209
210static inline int pte_huge(pte_t pte)
211{
212 return pte_flags(pte) & _PAGE_PSE;
213}
214
215static inline int pte_global(pte_t pte)
216{
217 return pte_flags(pte) & _PAGE_GLOBAL;
218}
219
220static inline int pte_exec(pte_t pte)
221{
222 return !(pte_flags(pte) & _PAGE_NX);
223}
224
225static inline int pte_special(pte_t pte)
226{
227 return pte_flags(pte) & _PAGE_SPECIAL;
228}
229
230static inline unsigned long pte_pfn(pte_t pte)
231{
232 return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
233}
234
235#define pte_page(pte) pfn_to_page(pte_pfn(pte))
236
237static inline int pmd_large(pmd_t pte)
238{
239 return (pmd_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
240 (_PAGE_PSE | _PAGE_PRESENT);
241}
242
243static inline pte_t pte_mkclean(pte_t pte)
244{
245 return __pte(pte_val(pte) & ~_PAGE_DIRTY);
246}
247
248static inline pte_t pte_mkold(pte_t pte)
249{
250 return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
251}
252
253static inline pte_t pte_wrprotect(pte_t pte)
254{
255 return __pte(pte_val(pte) & ~_PAGE_RW);
256}
257
258static inline pte_t pte_mkexec(pte_t pte)
259{
260 return __pte(pte_val(pte) & ~_PAGE_NX);
261}
262
263static inline pte_t pte_mkdirty(pte_t pte)
264{
265 return __pte(pte_val(pte) | _PAGE_DIRTY);
266}
267
268static inline pte_t pte_mkyoung(pte_t pte)
269{
270 return __pte(pte_val(pte) | _PAGE_ACCESSED);
271}
272
273static inline pte_t pte_mkwrite(pte_t pte)
274{
275 return __pte(pte_val(pte) | _PAGE_RW);
276}
277
278static inline pte_t pte_mkhuge(pte_t pte)
279{
280 return __pte(pte_val(pte) | _PAGE_PSE);
281}
282
283static inline pte_t pte_clrhuge(pte_t pte)
284{
285 return __pte(pte_val(pte) & ~_PAGE_PSE);
286}
287
288static inline pte_t pte_mkglobal(pte_t pte)
289{
290 return __pte(pte_val(pte) | _PAGE_GLOBAL);
291}
292
293static inline pte_t pte_clrglobal(pte_t pte)
294{
295 return __pte(pte_val(pte) & ~_PAGE_GLOBAL);
296}
297
298static inline pte_t pte_mkspecial(pte_t pte)
299{
300 return __pte(pte_val(pte) | _PAGE_SPECIAL);
301}
302
303extern pteval_t __supported_pte_mask;
304
305static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
306{
307 return __pte((((phys_addr_t)page_nr << PAGE_SHIFT) |
308 pgprot_val(pgprot)) & __supported_pte_mask);
309}
310
311static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
312{
313 return __pmd((((phys_addr_t)page_nr << PAGE_SHIFT) |
314 pgprot_val(pgprot)) & __supported_pte_mask);
315}
316
317static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
318{
319 pteval_t val = pte_val(pte);
320
321 /*
322 * Chop off the NX bit (if present), and add the NX portion of
323 * the newprot (if present):
324 */
325 val &= _PAGE_CHG_MASK;
326 val |= pgprot_val(newprot) & (~_PAGE_CHG_MASK) & __supported_pte_mask;
327
328 return __pte(val);
329}
330
331/* mprotect needs to preserve PAT bits when updating vm_page_prot */
332#define pgprot_modify pgprot_modify
333static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
334{
335 pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
336 pgprotval_t addbits = pgprot_val(newprot);
337 return __pgprot(preservebits | addbits);
338}
339
340#define pte_pgprot(x) __pgprot(pte_flags(x) & PTE_FLAGS_MASK)
341
342#define canon_pgprot(p) __pgprot(pgprot_val(p) & __supported_pte_mask)
343
344static inline int is_new_memtype_allowed(unsigned long flags,
345 unsigned long new_flags)
346{
347 /*
348 * Certain new memtypes are not allowed with certain
349 * requested memtype:
350 * - request is uncached, return cannot be write-back
351 * - request is write-combine, return cannot be write-back
352 */
353 if ((flags == _PAGE_CACHE_UC_MINUS &&
354 new_flags == _PAGE_CACHE_WB) ||
355 (flags == _PAGE_CACHE_WC &&
356 new_flags == _PAGE_CACHE_WB)) {
357 return 0;
358 }
359
360 return 1;
361}
362
363#ifndef __ASSEMBLY__
364/* Indicate that x86 has its own track and untrack pfn vma functions */
365#define __HAVE_PFNMAP_TRACKING
366
367#define __HAVE_PHYS_MEM_ACCESS_PROT
368struct file;
369pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
370 unsigned long size, pgprot_t vma_prot);
371int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
372 unsigned long size, pgprot_t *vma_prot);
373#endif
374
375/* Install a pte for a particular vaddr in kernel space. */
376void set_pte_vaddr(unsigned long vaddr, pte_t pte);
377
378#ifdef CONFIG_X86_32
379extern void native_pagetable_setup_start(pgd_t *base);
380extern void native_pagetable_setup_done(pgd_t *base);
381#else
382static inline void native_pagetable_setup_start(pgd_t *base) {}
383static inline void native_pagetable_setup_done(pgd_t *base) {}
384#endif
385
386struct seq_file;
387extern void arch_report_meminfo(struct seq_file *m);
388
389#ifdef CONFIG_PARAVIRT
390#include <asm/paravirt.h>
391#else /* !CONFIG_PARAVIRT */
392#define set_pte(ptep, pte) native_set_pte(ptep, pte)
393#define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
394
395#define set_pte_present(mm, addr, ptep, pte) \
396 native_set_pte_present(mm, addr, ptep, pte)
397#define set_pte_atomic(ptep, pte) \
398 native_set_pte_atomic(ptep, pte)
399
400#define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd)
401
402#ifndef __PAGETABLE_PUD_FOLDED
403#define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
404#define pgd_clear(pgd) native_pgd_clear(pgd)
405#endif
406
407#ifndef set_pud
408# define set_pud(pudp, pud) native_set_pud(pudp, pud)
409#endif
410
411#ifndef __PAGETABLE_PMD_FOLDED
412#define pud_clear(pud) native_pud_clear(pud)
413#endif
414
415#define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
416#define pmd_clear(pmd) native_pmd_clear(pmd)
417
418#define pte_update(mm, addr, ptep) do { } while (0)
419#define pte_update_defer(mm, addr, ptep) do { } while (0)
420
421static inline void __init paravirt_pagetable_setup_start(pgd_t *base)
422{
423 native_pagetable_setup_start(base);
424}
425
426static inline void __init paravirt_pagetable_setup_done(pgd_t *base)
427{
428 native_pagetable_setup_done(base);
429}
430#endif /* CONFIG_PARAVIRT */
431
432#endif /* __ASSEMBLY__ */
433
434#ifdef CONFIG_X86_32
435# include "pgtable_32.h"
436#else
437# include "pgtable_64.h"
438#endif
439
440/*
441 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
442 *
443 * this macro returns the index of the entry in the pgd page which would
444 * control the given virtual address
445 */
446#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
447
448/*
449 * pgd_offset() returns a (pgd_t *)
450 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
451 */
452#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
453/*
454 * a shortcut which implies the use of the kernel's pgd, instead
455 * of a process's
456 */
457#define pgd_offset_k(address) pgd_offset(&init_mm, (address))
458
459
460#define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
461#define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
462
463#ifndef __ASSEMBLY__
464
465enum {
466 PG_LEVEL_NONE,
467 PG_LEVEL_4K,
468 PG_LEVEL_2M,
469 PG_LEVEL_1G,
470 PG_LEVEL_NUM
471};
472
473#ifdef CONFIG_PROC_FS
474extern void update_page_count(int level, unsigned long pages);
475#else
476static inline void update_page_count(int level, unsigned long pages) { }
477#endif
478
479/*
480 * Helper function that returns the kernel pagetable entry controlling
481 * the virtual address 'address'. NULL means no pagetable entry present.
482 * NOTE: the return type is pte_t but if the pmd is PSE then we return it
483 * as a pte too.
484 */
485extern pte_t *lookup_address(unsigned long address, unsigned int *level);
486
487/* local pte updates need not use xchg for locking */
488static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
489{
490 pte_t res = *ptep;
491
492 /* Pure native function needs no input for mm, addr */
493 native_pte_clear(NULL, 0, ptep);
494 return res;
495}
496
497static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
498 pte_t *ptep , pte_t pte)
499{
500 native_set_pte(ptep, pte);
501}
502
503#ifndef CONFIG_PARAVIRT
504/*
505 * Rules for using pte_update - it must be called after any PTE update which
506 * has not been done using the set_pte / clear_pte interfaces. It is used by
507 * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE
508 * updates should either be sets, clears, or set_pte_atomic for P->P
509 * transitions, which means this hook should only be called for user PTEs.
510 * This hook implies a P->P protection or access change has taken place, which
511 * requires a subsequent TLB flush. The notification can optionally be delayed
512 * until the TLB flush event by using the pte_update_defer form of the
513 * interface, but care must be taken to assure that the flush happens while
514 * still holding the same page table lock so that the shadow and primary pages
515 * do not become out of sync on SMP.
516 */
517#define pte_update(mm, addr, ptep) do { } while (0)
518#define pte_update_defer(mm, addr, ptep) do { } while (0)
519#endif
520
521/*
522 * We only update the dirty/accessed state if we set
523 * the dirty bit by hand in the kernel, since the hardware
524 * will do the accessed bit for us, and we don't want to
525 * race with other CPU's that might be updating the dirty
526 * bit at the same time.
527 */
528struct vm_area_struct;
529
530#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
531extern int ptep_set_access_flags(struct vm_area_struct *vma,
532 unsigned long address, pte_t *ptep,
533 pte_t entry, int dirty);
534
535#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
536extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
537 unsigned long addr, pte_t *ptep);
538
539#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
540extern int ptep_clear_flush_young(struct vm_area_struct *vma,
541 unsigned long address, pte_t *ptep);
542
543#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
544static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
545 pte_t *ptep)
546{
547 pte_t pte = native_ptep_get_and_clear(ptep);
548 pte_update(mm, addr, ptep);
549 return pte;
550}
551
552#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
553static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
554 unsigned long addr, pte_t *ptep,
555 int full)
556{
557 pte_t pte;
558 if (full) {
559 /*
560 * Full address destruction in progress; paravirt does not
561 * care about updates and native needs no locking
562 */
563 pte = native_local_ptep_get_and_clear(ptep);
564 } else {
565 pte = ptep_get_and_clear(mm, addr, ptep);
566 }
567 return pte;
568}
569
570#define __HAVE_ARCH_PTEP_SET_WRPROTECT
571static inline void ptep_set_wrprotect(struct mm_struct *mm,
572 unsigned long addr, pte_t *ptep)
573{
574 clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
575 pte_update(mm, addr, ptep);
576}
577
578/*
579 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
580 *
581 * dst - pointer to pgd range anwhere on a pgd page
582 * src - ""
583 * count - the number of pgds to copy.
584 *
585 * dst and src can be on the same page, but the range must not overlap,
586 * and must not cross a page boundary.
587 */
588static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
589{
590 memcpy(dst, src, count * sizeof(pgd_t));
591}
592
593
594#include <asm-generic/pgtable.h>
595#endif /* __ASSEMBLY__ */
596
597#endif /* _ASM_X86_PGTABLE_H */