]>
git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - include/asm-um/pgtable.h
2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Copyright 2003 PathScale, Inc.
4 * Derived from include/asm-i386/pgtable.h
5 * Licensed under the GPL
11 #include "linux/sched.h"
12 #include <asm/fixmap.h>
14 #define _PAGE_PRESENT 0x001
15 #define _PAGE_NEWPAGE 0x002
16 #define _PAGE_NEWPROT 0x004
17 #define _PAGE_RW 0x020
18 #define _PAGE_USER 0x040
19 #define _PAGE_ACCESSED 0x080
20 #define _PAGE_DIRTY 0x100
21 /* If _PAGE_PRESENT is clear, we use these: */
22 #define _PAGE_FILE 0x008 /* nonlinear file mapping, saved PTE; unset:swap */
23 #define _PAGE_PROTNONE 0x010 /* if the user mapped it with PROT_NONE;
24 pte_present gives true */
26 #ifdef CONFIG_3_LEVEL_PGTABLES
27 #include "asm/pgtable-3level.h"
29 #include "asm/pgtable-2level.h"
32 extern pgd_t swapper_pg_dir
[PTRS_PER_PGD
];
34 extern void *um_virt_to_phys(struct task_struct
*task
, unsigned long virt
,
37 /* zero page used for uninitialized stuff */
38 extern unsigned long *empty_zero_page
;
40 #define pgtable_cache_init() do ; while (0)
42 /* Just any arbitrary offset to the start of the vmalloc VM area: the
43 * current 8MB value just means that there will be a 8MB "hole" after the
44 * physical memory until the kernel virtual memory starts. That means that
45 * any out-of-bounds memory accesses will hopefully be caught.
46 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
47 * area for the same reason. ;)
50 extern unsigned long end_iomem
;
52 #define VMALLOC_OFFSET (__va_space)
53 #define VMALLOC_START ((end_iomem + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
55 # define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
57 # define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
60 #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
61 #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
62 #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
64 #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
65 #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
66 #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
67 #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
68 #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
71 * The i386 can't do page protection for execute, and considers that the same
73 * Also, write permissions imply read permissions. This is the closest we can
76 #define __P000 PAGE_NONE
77 #define __P001 PAGE_READONLY
78 #define __P010 PAGE_COPY
79 #define __P011 PAGE_COPY
80 #define __P100 PAGE_READONLY
81 #define __P101 PAGE_READONLY
82 #define __P110 PAGE_COPY
83 #define __P111 PAGE_COPY
85 #define __S000 PAGE_NONE
86 #define __S001 PAGE_READONLY
87 #define __S010 PAGE_SHARED
88 #define __S011 PAGE_SHARED
89 #define __S100 PAGE_READONLY
90 #define __S101 PAGE_READONLY
91 #define __S110 PAGE_SHARED
92 #define __S111 PAGE_SHARED
95 * ZERO_PAGE is a global shared page that is always zero: used
96 * for zero-mapped memory areas etc..
98 #define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
100 #define pte_clear(mm,addr,xp) pte_set_val(*(xp), (phys_t) 0, __pgprot(_PAGE_NEWPAGE))
102 #define pmd_none(x) (!((unsigned long)pmd_val(x) & ~_PAGE_NEWPAGE))
103 #define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
105 #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
106 #define pmd_clear(xp) do { pmd_val(*(xp)) = _PAGE_NEWPAGE; } while (0)
108 #define pmd_newpage(x) (pmd_val(x) & _PAGE_NEWPAGE)
109 #define pmd_mkuptodate(x) (pmd_val(x) &= ~_PAGE_NEWPAGE)
111 #define pud_newpage(x) (pud_val(x) & _PAGE_NEWPAGE)
112 #define pud_mkuptodate(x) (pud_val(x) &= ~_PAGE_NEWPAGE)
114 #define pmd_page(pmd) phys_to_page(pmd_val(pmd) & PAGE_MASK)
116 #define pte_page(x) pfn_to_page(pte_pfn(x))
118 #define pte_present(x) pte_get_bits(x, (_PAGE_PRESENT | _PAGE_PROTNONE))
121 * =================================
122 * Flags checking section.
123 * =================================
126 static inline int pte_none(pte_t pte
)
128 return pte_is_zero(pte
);
132 * The following only work if pte_present() is true.
133 * Undefined behaviour if not..
135 static inline int pte_read(pte_t pte
)
137 return((pte_get_bits(pte
, _PAGE_USER
)) &&
138 !(pte_get_bits(pte
, _PAGE_PROTNONE
)));
141 static inline int pte_exec(pte_t pte
){
142 return((pte_get_bits(pte
, _PAGE_USER
)) &&
143 !(pte_get_bits(pte
, _PAGE_PROTNONE
)));
146 static inline int pte_write(pte_t pte
)
148 return((pte_get_bits(pte
, _PAGE_RW
)) &&
149 !(pte_get_bits(pte
, _PAGE_PROTNONE
)));
153 * The following only works if pte_present() is not true.
155 static inline int pte_file(pte_t pte
)
157 return pte_get_bits(pte
, _PAGE_FILE
);
160 static inline int pte_dirty(pte_t pte
)
162 return pte_get_bits(pte
, _PAGE_DIRTY
);
165 static inline int pte_young(pte_t pte
)
167 return pte_get_bits(pte
, _PAGE_ACCESSED
);
170 static inline int pte_newpage(pte_t pte
)
172 return pte_get_bits(pte
, _PAGE_NEWPAGE
);
175 static inline int pte_newprot(pte_t pte
)
177 return(pte_present(pte
) && (pte_get_bits(pte
, _PAGE_NEWPROT
)));
181 * =================================
182 * Flags setting section.
183 * =================================
186 static inline pte_t
pte_mknewprot(pte_t pte
)
188 pte_set_bits(pte
, _PAGE_NEWPROT
);
192 static inline pte_t
pte_mkclean(pte_t pte
)
194 pte_clear_bits(pte
, _PAGE_DIRTY
);
198 static inline pte_t
pte_mkold(pte_t pte
)
200 pte_clear_bits(pte
, _PAGE_ACCESSED
);
204 static inline pte_t
pte_wrprotect(pte_t pte
)
206 pte_clear_bits(pte
, _PAGE_RW
);
207 return(pte_mknewprot(pte
));
210 static inline pte_t
pte_mkread(pte_t pte
)
212 pte_set_bits(pte
, _PAGE_USER
);
213 return(pte_mknewprot(pte
));
216 static inline pte_t
pte_mkdirty(pte_t pte
)
218 pte_set_bits(pte
, _PAGE_DIRTY
);
222 static inline pte_t
pte_mkyoung(pte_t pte
)
224 pte_set_bits(pte
, _PAGE_ACCESSED
);
228 static inline pte_t
pte_mkwrite(pte_t pte
)
230 pte_set_bits(pte
, _PAGE_RW
);
231 return(pte_mknewprot(pte
));
234 static inline pte_t
pte_mkuptodate(pte_t pte
)
236 pte_clear_bits(pte
, _PAGE_NEWPAGE
);
238 pte_clear_bits(pte
, _PAGE_NEWPROT
);
242 static inline pte_t
pte_mknewpage(pte_t pte
)
244 pte_set_bits(pte
, _PAGE_NEWPAGE
);
248 static inline void set_pte(pte_t
*pteptr
, pte_t pteval
)
250 pte_copy(*pteptr
, pteval
);
252 /* If it's a swap entry, it needs to be marked _PAGE_NEWPAGE so
253 * fix_range knows to unmap it. _PAGE_NEWPROT is specific to
257 *pteptr
= pte_mknewpage(*pteptr
);
258 if(pte_present(*pteptr
)) *pteptr
= pte_mknewprot(*pteptr
);
260 #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
263 * Conversion functions: convert a page and protection to a page entry,
264 * and a page entry and page directory to the page they refer to.
267 #define phys_to_page(phys) pfn_to_page(phys_to_pfn(phys))
268 #define __virt_to_page(virt) phys_to_page(__pa(virt))
269 #define page_to_phys(page) pfn_to_phys(page_to_pfn(page))
270 #define virt_to_page(addr) __virt_to_page((const unsigned long) addr)
272 #define mk_pte(page, pgprot) \
275 pte_set_val(pte, page_to_phys(page), (pgprot)); \
276 if (pte_present(pte)) \
277 pte_mknewprot(pte_mknewpage(pte)); \
280 static inline pte_t
pte_modify(pte_t pte
, pgprot_t newprot
)
282 pte_set_val(pte
, (pte_val(pte
) & _PAGE_CHG_MASK
), newprot
);
287 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
289 * this macro returns the index of the entry in the pgd page which would
290 * control the given virtual address
292 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
295 * pgd_offset() returns a (pgd_t *)
296 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
298 #define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
301 * a shortcut which implies the use of the kernel's pgd, instead
304 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
307 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
309 * this macro returns the index of the entry in the pmd page which would
310 * control the given virtual address
312 #define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
313 #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
316 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
318 * this macro returns the index of the entry in the pte page which would
319 * control the given virtual address
321 #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
322 #define pte_offset_kernel(dir, address) \
323 ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address))
324 #define pte_offset_map(dir, address) \
325 ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
326 #define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
327 #define pte_unmap(pte) do { } while (0)
328 #define pte_unmap_nested(pte) do { } while (0)
330 #define update_mmu_cache(vma,address,pte) do ; while (0)
332 /* Encode and de-code a swap entry */
333 #define __swp_type(x) (((x).val >> 4) & 0x3f)
334 #define __swp_offset(x) ((x).val >> 11)
336 #define __swp_entry(type, offset) \
337 ((swp_entry_t) { ((type) << 4) | ((offset) << 11) })
338 #define __pte_to_swp_entry(pte) \
339 ((swp_entry_t) { pte_val(pte_mkuptodate(pte)) })
340 #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
342 #define kern_addr_valid(addr) (1)
344 #include <asm-generic/pgtable.h>