]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _I386_PGTABLE_H |
2 | #define _I386_PGTABLE_H | |
3 | ||
1da177e4 LT |
4 | |
5 | /* | |
6 | * The Linux memory management assumes a three-level page table setup. On | |
7 | * the i386, we use that, but "fold" the mid level into the top-level page | |
8 | * table, so that we physically have the same two-level page table as the | |
9 | * i386 mmu expects. | |
10 | * | |
11 | * This file contains the functions and defines necessary to modify and use | |
12 | * the i386 page table tree. | |
13 | */ | |
14 | #ifndef __ASSEMBLY__ | |
15 | #include <asm/processor.h> | |
16 | #include <asm/fixmap.h> | |
17 | #include <linux/threads.h> | |
da181a8b | 18 | #include <asm/paravirt.h> |
1da177e4 | 19 | |
1977f032 | 20 | #include <linux/bitops.h> |
1da177e4 LT |
21 | #include <linux/slab.h> |
22 | #include <linux/list.h> | |
23 | #include <linux/spinlock.h> | |
24 | ||
8c65b4a6 TS |
25 | struct mm_struct; |
26 | struct vm_area_struct; | |
27 | ||
1da177e4 | 28 | extern pgd_t swapper_pg_dir[1024]; |
1da177e4 | 29 | |
985a34bd TG |
30 | static inline void pgtable_cache_init(void) { } |
31 | static inline void check_pgt_cache(void) { } | |
1da177e4 LT |
32 | void paging_init(void); |
33 | ||
f1d1a842 | 34 | |
1da177e4 LT |
35 | /* |
36 | * The Linux x86 paging architecture is 'compile-time dual-mode', it | |
37 | * implements both the traditional 2-level x86 page tables and the | |
38 | * newer 3-level PAE-mode page tables. | |
39 | */ | |
40 | #ifdef CONFIG_X86_PAE | |
41 | # include <asm/pgtable-3level-defs.h> | |
42 | # define PMD_SIZE (1UL << PMD_SHIFT) | |
43 | # define PMD_MASK (~(PMD_SIZE-1)) | |
44 | #else | |
45 | # include <asm/pgtable-2level-defs.h> | |
46 | #endif | |
47 | ||
48 | #define PGDIR_SIZE (1UL << PGDIR_SHIFT) | |
49 | #define PGDIR_MASK (~(PGDIR_SIZE-1)) | |
50 | ||
1da177e4 LT |
51 | #define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT) |
52 | #define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS) | |
53 | ||
1da177e4 LT |
54 | /* Just any arbitrary offset to the start of the vmalloc VM area: the |
55 | * current 8MB value just means that there will be a 8MB "hole" after the | |
56 | * physical memory until the kernel virtual memory starts. That means that | |
57 | * any out-of-bounds memory accesses will hopefully be caught. | |
58 | * The vmalloc() routines leaves a hole of 4kB between each vmalloced | |
59 | * area for the same reason. ;) | |
60 | */ | |
61 | #define VMALLOC_OFFSET (8*1024*1024) | |
8f0accc8 | 62 | #define VMALLOC_START (((unsigned long) high_memory + \ |
1da177e4 | 63 | 2*VMALLOC_OFFSET-1) & ~(VMALLOC_OFFSET-1)) |
0b7a9611 CL |
64 | #ifdef CONFIG_X86_PAE |
65 | #define LAST_PKMAP 512 | |
66 | #else | |
67 | #define LAST_PKMAP 1024 | |
68 | #endif | |
69 | ||
70 | #define PKMAP_BASE ((FIXADDR_BOOT_START - PAGE_SIZE*(LAST_PKMAP + 1)) & PMD_MASK) | |
71 | ||
1da177e4 LT |
72 | #ifdef CONFIG_HIGHMEM |
73 | # define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE) | |
74 | #else | |
75 | # define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE) | |
76 | #endif | |
77 | ||
1da177e4 LT |
78 | /* |
79 | * Define this if things work differently on an i386 and an i486: | |
80 | * it will (on an i486) warn about kernel memory accesses that are | |
e49332bd | 81 | * done without a 'access_ok(VERIFY_WRITE,..)' |
1da177e4 | 82 | */ |
e49332bd | 83 | #undef TEST_ACCESS_OK |
1da177e4 LT |
84 | |
85 | /* The boot page tables (all created as a single array) */ | |
86 | extern unsigned long pg0[]; | |
87 | ||
88 | #define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE)) | |
1da177e4 | 89 | |
705e87c0 HD |
90 | /* To avoid harmful races, pmd_none(x) should check only the lower when PAE */ |
91 | #define pmd_none(x) (!(unsigned long)pmd_val(x)) | |
1da177e4 | 92 | #define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT) |
40869cd0 IM |
93 | #define pmd_bad(x) ((pmd_val(x) \ |
94 | & ~(PAGE_MASK | _PAGE_USER | _PAGE_PSE | _PAGE_NX)) \ | |
95 | != _KERNPG_TABLE) | |
1da177e4 LT |
96 | |
97 | ||
98 | #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) | |
99 | ||
1da177e4 LT |
100 | #ifdef CONFIG_X86_PAE |
101 | # include <asm/pgtable-3level.h> | |
102 | #else | |
103 | # include <asm/pgtable-2level.h> | |
104 | #endif | |
105 | ||
d7271b14 ZA |
106 | /* |
107 | * clone_pgd_range(pgd_t *dst, pgd_t *src, int count); | |
108 | * | |
109 | * dst - pointer to pgd range anwhere on a pgd page | |
110 | * src - "" | |
111 | * count - the number of pgds to copy. | |
112 | * | |
113 | * dst and src can be on the same page, but the range must not overlap, | |
114 | * and must not cross a page boundary. | |
115 | */ | |
116 | static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count) | |
117 | { | |
118 | memcpy(dst, src, count * sizeof(pgd_t)); | |
119 | } | |
120 | ||
1da177e4 LT |
121 | /* |
122 | * Macro to mark a page protection value as "uncacheable". On processors which do not support | |
123 | * it, this is a no-op. | |
124 | */ | |
125 | #define pgprot_noncached(prot) ((boot_cpu_data.x86 > 3) \ | |
126 | ? (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) : (prot)) | |
127 | ||
128 | /* | |
129 | * Conversion functions: convert a page and protection to a page entry, | |
130 | * and a page entry and page directory to the page they refer to. | |
131 | */ | |
132 | ||
133 | #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) | |
1da177e4 | 134 | |
1da177e4 LT |
135 | /* |
136 | * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD] | |
137 | * | |
138 | * this macro returns the index of the entry in the pgd page which would | |
139 | * control the given virtual address | |
140 | */ | |
141 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) | |
142 | #define pgd_index_k(addr) pgd_index(addr) | |
143 | ||
144 | /* | |
145 | * pgd_offset() returns a (pgd_t *) | |
146 | * pgd_index() is used get the offset into the pgd page's array of pgd_t's; | |
147 | */ | |
148 | #define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address)) | |
149 | ||
150 | /* | |
151 | * a shortcut which implies the use of the kernel's pgd, instead | |
152 | * of a process's | |
153 | */ | |
154 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) | |
155 | ||
61e19a34 AK |
156 | static inline int pud_large(pud_t pud) { return 0; } |
157 | ||
1da177e4 LT |
158 | /* |
159 | * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD] | |
160 | * | |
161 | * this macro returns the index of the entry in the pmd page which would | |
162 | * control the given virtual address | |
163 | */ | |
164 | #define pmd_index(address) \ | |
165 | (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) | |
166 | ||
167 | /* | |
168 | * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE] | |
169 | * | |
170 | * this macro returns the index of the entry in the pte page which would | |
171 | * control the given virtual address | |
172 | */ | |
173 | #define pte_index(address) \ | |
174 | (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | |
175 | #define pte_offset_kernel(dir, address) \ | |
46a82b2d | 176 | ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address)) |
1da177e4 | 177 | |
ca140fda PBG |
178 | #define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)) |
179 | ||
46a82b2d | 180 | #define pmd_page_vaddr(pmd) \ |
ca140fda PBG |
181 | ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK)) |
182 | ||
1da177e4 | 183 | #if defined(CONFIG_HIGHPTE) |
a27fe809 | 184 | #define pte_offset_map(dir, address) \ |
ce6234b5 | 185 | ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE0) + pte_index(address)) |
a27fe809 | 186 | #define pte_offset_map_nested(dir, address) \ |
ce6234b5 | 187 | ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE1) + pte_index(address)) |
1da177e4 LT |
188 | #define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0) |
189 | #define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1) | |
190 | #else | |
191 | #define pte_offset_map(dir, address) \ | |
192 | ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address)) | |
193 | #define pte_offset_map_nested(dir, address) pte_offset_map(dir, address) | |
194 | #define pte_unmap(pte) do { } while (0) | |
195 | #define pte_unmap_nested(pte) do { } while (0) | |
196 | #endif | |
197 | ||
23002d88 ZA |
198 | /* Clear a kernel PTE and flush it from the TLB */ |
199 | #define kpte_clear_flush(ptep, vaddr) \ | |
200 | do { \ | |
201 | pte_clear(&init_mm, vaddr, ptep); \ | |
202 | __flush_tlb_one(vaddr); \ | |
203 | } while (0) | |
204 | ||
1da177e4 LT |
205 | /* |
206 | * The i386 doesn't have any external MMU info: the kernel page | |
207 | * tables contain all the necessary information. | |
1da177e4 LT |
208 | */ |
209 | #define update_mmu_cache(vma,address,pte) do { } while (0) | |
b239fb25 JF |
210 | |
211 | void native_pagetable_setup_start(pgd_t *base); | |
212 | void native_pagetable_setup_done(pgd_t *base); | |
213 | ||
214 | #ifndef CONFIG_PARAVIRT | |
215 | static inline void paravirt_pagetable_setup_start(pgd_t *base) | |
216 | { | |
217 | native_pagetable_setup_start(base); | |
218 | } | |
219 | ||
220 | static inline void paravirt_pagetable_setup_done(pgd_t *base) | |
221 | { | |
222 | native_pagetable_setup_done(base); | |
223 | } | |
224 | #endif /* !CONFIG_PARAVIRT */ | |
225 | ||
1da177e4 LT |
226 | #endif /* !__ASSEMBLY__ */ |
227 | ||
4757d7d8 TG |
228 | /* |
229 | * kern_addr_valid() is (1) for FLATMEM and (0) for | |
230 | * SPARSEMEM and DISCONTIGMEM | |
231 | */ | |
05b79bdc | 232 | #ifdef CONFIG_FLATMEM |
1da177e4 | 233 | #define kern_addr_valid(addr) (1) |
4757d7d8 TG |
234 | #else |
235 | #define kern_addr_valid(kaddr) (0) | |
236 | #endif | |
1da177e4 | 237 | |
1da177e4 LT |
238 | #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ |
239 | remap_pfn_range(vma, vaddr, pfn, size, prot) | |
240 | ||
1da177e4 | 241 | #endif /* _I386_PGTABLE_H */ |