]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _I386_PGTABLE_H |
2 | #define _I386_PGTABLE_H | |
3 | ||
1da177e4 LT |
4 | |
5 | /* | |
6 | * The Linux memory management assumes a three-level page table setup. On | |
7 | * the i386, we use that, but "fold" the mid level into the top-level page | |
8 | * table, so that we physically have the same two-level page table as the | |
9 | * i386 mmu expects. | |
10 | * | |
11 | * This file contains the functions and defines necessary to modify and use | |
12 | * the i386 page table tree. | |
13 | */ | |
14 | #ifndef __ASSEMBLY__ | |
15 | #include <asm/processor.h> | |
16 | #include <asm/fixmap.h> | |
17 | #include <linux/threads.h> | |
da181a8b | 18 | #include <asm/paravirt.h> |
1da177e4 | 19 | |
1977f032 | 20 | #include <linux/bitops.h> |
1da177e4 LT |
21 | #include <linux/slab.h> |
22 | #include <linux/list.h> | |
23 | #include <linux/spinlock.h> | |
24 | ||
8c65b4a6 TS |
25 | struct mm_struct; |
26 | struct vm_area_struct; | |
27 | ||
1da177e4 | 28 | extern pgd_t swapper_pg_dir[1024]; |
1da177e4 | 29 | |
985a34bd TG |
30 | static inline void pgtable_cache_init(void) { } |
31 | static inline void check_pgt_cache(void) { } | |
1da177e4 LT |
32 | void paging_init(void); |
33 | ||
f1d1a842 | 34 | |
1da177e4 LT |
35 | /* |
36 | * The Linux x86 paging architecture is 'compile-time dual-mode', it | |
37 | * implements both the traditional 2-level x86 page tables and the | |
38 | * newer 3-level PAE-mode page tables. | |
39 | */ | |
40 | #ifdef CONFIG_X86_PAE | |
41 | # include <asm/pgtable-3level-defs.h> | |
42 | # define PMD_SIZE (1UL << PMD_SHIFT) | |
43 | # define PMD_MASK (~(PMD_SIZE-1)) | |
44 | #else | |
45 | # include <asm/pgtable-2level-defs.h> | |
46 | #endif | |
47 | ||
48 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) | |
49 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | |
50 | ||
1da177e4 LT |
51 | #define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT) |
52 | #define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS) | |
53 | ||
1da177e4 LT |
54 | /* Just any arbitrary offset to the start of the vmalloc VM area: the |
55 | * current 8MB value just means that there will be a 8MB "hole" after the | |
56 | * physical memory until the kernel virtual memory starts. That means that | |
57 | * any out-of-bounds memory accesses will hopefully be caught. | |
58 | * The vmalloc() routines leaves a hole of 4kB between each vmalloced | |
59 | * area for the same reason. ;) | |
60 | */ | |
61 | #define VMALLOC_OFFSET (8*1024*1024) | |
8f0accc8 | 62 | #define VMALLOC_START (((unsigned long) high_memory + \ |
1da177e4 | 63 | 2*VMALLOC_OFFSET-1) & ~(VMALLOC_OFFSET-1)) |
0b7a9611 CL |
64 | #ifdef CONFIG_X86_PAE |
65 | #define LAST_PKMAP 512 | |
66 | #else | |
67 | #define LAST_PKMAP 1024 | |
68 | #endif | |
69 | ||
70 | #define PKMAP_BASE ((FIXADDR_BOOT_START - PAGE_SIZE*(LAST_PKMAP + 1)) & PMD_MASK) | |
71 | ||
1da177e4 LT |
72 | #ifdef CONFIG_HIGHMEM |
73 | # define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE) | |
74 | #else | |
75 | # define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE) | |
76 | #endif | |
77 | ||
1da177e4 LT |
78 | /* |
79 | * Define this if things work differently on an i386 and an i486: | |
80 | * it will (on an i486) warn about kernel memory accesses that are | |
e49332bd | 81 | * done without a 'access_ok(VERIFY_WRITE,..)' |
1da177e4 | 82 | */ |
e49332bd | 83 | #undef TEST_ACCESS_OK |
1da177e4 LT |
84 | |
85 | /* The boot page tables (all created as a single array) */ | |
86 | extern unsigned long pg0[]; | |
87 | ||
88 | #define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE)) | |
1da177e4 | 89 | |
705e87c0 HD |
90 | /* To avoid harmful races, pmd_none(x) should check only the lower when PAE */ |
91 | #define pmd_none(x) (!(unsigned long)pmd_val(x)) | |
1da177e4 | 92 | #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) |
a345b4ba | 93 | #define pmd_bad(x) ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE) |
1da177e4 LT |
94 | |
95 | ||
96 | #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) | |
97 | ||
1da177e4 LT |
98 | #ifdef CONFIG_X86_PAE |
99 | # include <asm/pgtable-3level.h> | |
100 | #else | |
101 | # include <asm/pgtable-2level.h> | |
102 | #endif | |
103 | ||
d7271b14 ZA |
104 | /* |
105 | * clone_pgd_range(pgd_t *dst, pgd_t *src, int count); | |
106 | * | |
107 | * dst - pointer to pgd range anwhere on a pgd page | |
108 | * src - "" | |
109 | * count - the number of pgds to copy. | |
110 | * | |
111 | * dst and src can be on the same page, but the range must not overlap, | |
112 | * and must not cross a page boundary. | |
113 | */ | |
114 | static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count) | |
115 | { | |
116 | memcpy(dst, src, count * sizeof(pgd_t)); | |
117 | } | |
118 | ||
1da177e4 LT |
119 | /* |
120 | * Macro to mark a page protection value as "uncacheable". On processors which do not support | |
121 | * it, this is a no-op. | |
122 | */ | |
123 | #define pgprot_noncached(prot) ((boot_cpu_data.x86 > 3) \ | |
124 | ? (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) : (prot)) | |
125 | ||
126 | /* | |
127 | * Conversion functions: convert a page and protection to a page entry, | |
128 | * and a page entry and page directory to the page they refer to. | |
129 | */ | |
130 | ||
131 | #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) | |
1da177e4 | 132 | |
1da177e4 LT |
133 | /* |
134 | * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD] | |
135 | * | |
136 | * this macro returns the index of the entry in the pgd page which would | |
137 | * control the given virtual address | |
138 | */ | |
139 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) | |
140 | #define pgd_index_k(addr) pgd_index(addr) | |
141 | ||
142 | /* | |
143 | * pgd_offset() returns a (pgd_t *) | |
144 | * pgd_index() is used get the offset into the pgd page's array of pgd_t's; | |
145 | */ | |
146 | #define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address)) | |
147 | ||
148 | /* | |
149 | * a shortcut which implies the use of the kernel's pgd, instead | |
150 | * of a process's | |
151 | */ | |
152 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) | |
153 | ||
61e19a34 AK |
154 | static inline int pud_large(pud_t pud) { return 0; } |
155 | ||
1da177e4 LT |
156 | /* |
157 | * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD] | |
158 | * | |
159 | * this macro returns the index of the entry in the pmd page which would | |
160 | * control the given virtual address | |
161 | */ | |
162 | #define pmd_index(address) \ | |
163 | (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) | |
164 | ||
165 | /* | |
166 | * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE] | |
167 | * | |
168 | * this macro returns the index of the entry in the pte page which would | |
169 | * control the given virtual address | |
170 | */ | |
171 | #define pte_index(address) \ | |
172 | (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | |
173 | #define pte_offset_kernel(dir, address) \ | |
46a82b2d | 174 | ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address)) |
1da177e4 | 175 | |
ca140fda PBG |
176 | #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) |
177 | ||
46a82b2d | 178 | #define pmd_page_vaddr(pmd) \ |
ca140fda PBG |
179 | ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) |
180 | ||
1da177e4 | 181 | #if defined(CONFIG_HIGHPTE) |
a27fe809 | 182 | #define pte_offset_map(dir, address) \ |
ce6234b5 | 183 | ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE0) + pte_index(address)) |
a27fe809 | 184 | #define pte_offset_map_nested(dir, address) \ |
ce6234b5 | 185 | ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE1) + pte_index(address)) |
1da177e4 LT |
186 | #define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0) |
187 | #define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1) | |
188 | #else | |
189 | #define pte_offset_map(dir, address) \ | |
190 | ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address)) | |
191 | #define pte_offset_map_nested(dir, address) pte_offset_map(dir, address) | |
192 | #define pte_unmap(pte) do { } while (0) | |
193 | #define pte_unmap_nested(pte) do { } while (0) | |
194 | #endif | |
195 | ||
23002d88 ZA |
196 | /* Clear a kernel PTE and flush it from the TLB */ |
197 | #define kpte_clear_flush(ptep, vaddr) \ | |
198 | do { \ | |
199 | pte_clear(&init_mm, vaddr, ptep); \ | |
200 | __flush_tlb_one(vaddr); \ | |
201 | } while (0) | |
202 | ||
1da177e4 LT |
203 | /* |
204 | * The i386 doesn't have any external MMU info: the kernel page | |
205 | * tables contain all the necessary information. | |
1da177e4 LT |
206 | */ |
207 | #define update_mmu_cache(vma,address,pte) do { } while (0) | |
b239fb25 JF |
208 | |
209 | void native_pagetable_setup_start(pgd_t *base); | |
210 | void native_pagetable_setup_done(pgd_t *base); | |
211 | ||
212 | #ifndef CONFIG_PARAVIRT | |
213 | static inline void paravirt_pagetable_setup_start(pgd_t *base) | |
214 | { | |
215 | native_pagetable_setup_start(base); | |
216 | } | |
217 | ||
218 | static inline void paravirt_pagetable_setup_done(pgd_t *base) | |
219 | { | |
220 | native_pagetable_setup_done(base); | |
221 | } | |
222 | #endif /* !CONFIG_PARAVIRT */ | |
223 | ||
1da177e4 LT |
224 | #endif /* !__ASSEMBLY__ */ |
225 | ||
4757d7d8 TG |
226 | /* |
227 | * kern_addr_valid() is (1) for FLATMEM and (0) for | |
228 | * SPARSEMEM and DISCONTIGMEM | |
229 | */ | |
05b79bdc | 230 | #ifdef CONFIG_FLATMEM |
1da177e4 | 231 | #define kern_addr_valid(addr) (1) |
4757d7d8 TG |
232 | #else |
233 | #define kern_addr_valid(kaddr) (0) | |
234 | #endif | |
1da177e4 | 235 | |
1da177e4 LT |
236 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ |
237 | remap_pfn_range(vma, vaddr, pfn, size, prot) | |
238 | ||
1da177e4 | 239 | #endif /* _I386_PGTABLE_H */ |