]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 LT |
2 | * |
3 | * Copyright (C) 1995 Linus Torvalds | |
4 | * | |
5 | * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 | |
6 | */ | |
7 | ||
1da177e4 LT |
8 | #include <linux/module.h> |
9 | #include <linux/signal.h> | |
10 | #include <linux/sched.h> | |
11 | #include <linux/kernel.h> | |
12 | #include <linux/errno.h> | |
13 | #include <linux/string.h> | |
14 | #include <linux/types.h> | |
15 | #include <linux/ptrace.h> | |
16 | #include <linux/mman.h> | |
17 | #include <linux/mm.h> | |
18 | #include <linux/hugetlb.h> | |
19 | #include <linux/swap.h> | |
20 | #include <linux/smp.h> | |
21 | #include <linux/init.h> | |
22 | #include <linux/highmem.h> | |
23 | #include <linux/pagemap.h> | |
cfb80c9e | 24 | #include <linux/pci.h> |
6fb14755 | 25 | #include <linux/pfn.h> |
c9cf5528 | 26 | #include <linux/poison.h> |
1da177e4 LT |
27 | #include <linux/bootmem.h> |
28 | #include <linux/slab.h> | |
29 | #include <linux/proc_fs.h> | |
05039b92 | 30 | #include <linux/memory_hotplug.h> |
27d99f7e | 31 | #include <linux/initrd.h> |
55b2355e | 32 | #include <linux/cpumask.h> |
1da177e4 | 33 | |
f832ff18 | 34 | #include <asm/asm.h> |
46eaa670 | 35 | #include <asm/bios_ebda.h> |
1da177e4 LT |
36 | #include <asm/processor.h> |
37 | #include <asm/system.h> | |
38 | #include <asm/uaccess.h> | |
39 | #include <asm/pgtable.h> | |
40 | #include <asm/dma.h> | |
41 | #include <asm/fixmap.h> | |
42 | #include <asm/e820.h> | |
43 | #include <asm/apic.h> | |
8550eb99 | 44 | #include <asm/bugs.h> |
1da177e4 LT |
45 | #include <asm/tlb.h> |
46 | #include <asm/tlbflush.h> | |
a5a19c63 | 47 | #include <asm/pgalloc.h> |
1da177e4 | 48 | #include <asm/sections.h> |
b239fb25 | 49 | #include <asm/paravirt.h> |
551889a6 | 50 | #include <asm/setup.h> |
7bfeab9a | 51 | #include <asm/cacheflush.h> |
1da177e4 | 52 | |
f361a450 | 53 | unsigned long max_low_pfn_mapped; |
67794292 | 54 | unsigned long max_pfn_mapped; |
7d1116a9 | 55 | |
1da177e4 LT |
56 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); |
57 | unsigned long highstart_pfn, highend_pfn; | |
58 | ||
8550eb99 | 59 | static noinline int do_test_wp_bit(void); |
1da177e4 | 60 | |
4e29684c YL |
61 | |
62 | static unsigned long __initdata table_start; | |
63 | static unsigned long __meminitdata table_end; | |
64 | static unsigned long __meminitdata table_top; | |
65 | ||
66 | static int __initdata after_init_bootmem; | |
67 | ||
d6be89ad | 68 | static __init void *alloc_low_page(void) |
4e29684c YL |
69 | { |
70 | unsigned long pfn = table_end++; | |
71 | void *adr; | |
72 | ||
73 | if (pfn >= table_top) | |
74 | panic("alloc_low_page: ran out of memory"); | |
75 | ||
76 | adr = __va(pfn * PAGE_SIZE); | |
77 | memset(adr, 0, PAGE_SIZE); | |
4e29684c YL |
78 | return adr; |
79 | } | |
80 | ||
1da177e4 LT |
81 | /* |
82 | * Creates a middle page table and puts a pointer to it in the | |
83 | * given global directory entry. This only returns the gd entry | |
84 | * in non-PAE compilation mode, since the middle layer is folded. | |
85 | */ | |
86 | static pmd_t * __init one_md_table_init(pgd_t *pgd) | |
87 | { | |
88 | pud_t *pud; | |
89 | pmd_t *pmd_table; | |
8550eb99 | 90 | |
1da177e4 | 91 | #ifdef CONFIG_X86_PAE |
b239fb25 | 92 | if (!(pgd_val(*pgd) & _PAGE_PRESENT)) { |
4e29684c YL |
93 | if (after_init_bootmem) |
94 | pmd_table = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE); | |
95 | else | |
d6be89ad | 96 | pmd_table = (pmd_t *)alloc_low_page(); |
6944a9c8 | 97 | paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT); |
b239fb25 JF |
98 | set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); |
99 | pud = pud_offset(pgd, 0); | |
8550eb99 | 100 | BUG_ON(pmd_table != pmd_offset(pud, 0)); |
a376f30a Z |
101 | |
102 | return pmd_table; | |
b239fb25 JF |
103 | } |
104 | #endif | |
1da177e4 LT |
105 | pud = pud_offset(pgd, 0); |
106 | pmd_table = pmd_offset(pud, 0); | |
8550eb99 | 107 | |
1da177e4 LT |
108 | return pmd_table; |
109 | } | |
110 | ||
111 | /* | |
112 | * Create a page table and place a pointer to it in a middle page | |
8550eb99 | 113 | * directory entry: |
1da177e4 LT |
114 | */ |
115 | static pte_t * __init one_page_table_init(pmd_t *pmd) | |
116 | { | |
b239fb25 | 117 | if (!(pmd_val(*pmd) & _PAGE_PRESENT)) { |
509a80c4 IM |
118 | pte_t *page_table = NULL; |
119 | ||
4e29684c | 120 | if (after_init_bootmem) { |
509a80c4 | 121 | #ifdef CONFIG_DEBUG_PAGEALLOC |
4e29684c | 122 | page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE); |
509a80c4 | 123 | #endif |
4e29684c YL |
124 | if (!page_table) |
125 | page_table = | |
509a80c4 | 126 | (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE); |
d6be89ad JB |
127 | } else |
128 | page_table = (pte_t *)alloc_low_page(); | |
b239fb25 | 129 | |
6944a9c8 | 130 | paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT); |
1da177e4 | 131 | set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE)); |
b239fb25 | 132 | BUG_ON(page_table != pte_offset_kernel(pmd, 0)); |
1da177e4 | 133 | } |
509a80c4 | 134 | |
1da177e4 LT |
135 | return pte_offset_kernel(pmd, 0); |
136 | } | |
137 | ||
a3c6018e JB |
138 | static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd, |
139 | unsigned long vaddr, pte_t *lastpte) | |
140 | { | |
141 | #ifdef CONFIG_HIGHMEM | |
142 | /* | |
143 | * Something (early fixmap) may already have put a pte | |
144 | * page here, which causes the page table allocation | |
145 | * to become nonlinear. Attempt to fix it, and if it | |
146 | * is still nonlinear then we have to bug. | |
147 | */ | |
148 | int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT; | |
149 | int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT; | |
150 | ||
151 | if (pmd_idx_kmap_begin != pmd_idx_kmap_end | |
152 | && (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin | |
153 | && (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end | |
154 | && ((__pa(pte) >> PAGE_SHIFT) < table_start | |
155 | || (__pa(pte) >> PAGE_SHIFT) >= table_end)) { | |
156 | pte_t *newpte; | |
157 | int i; | |
158 | ||
159 | BUG_ON(after_init_bootmem); | |
160 | newpte = alloc_low_page(); | |
161 | for (i = 0; i < PTRS_PER_PTE; i++) | |
162 | set_pte(newpte + i, pte[i]); | |
163 | ||
164 | paravirt_alloc_pte(&init_mm, __pa(newpte) >> PAGE_SHIFT); | |
165 | set_pmd(pmd, __pmd(__pa(newpte)|_PAGE_TABLE)); | |
166 | BUG_ON(newpte != pte_offset_kernel(pmd, 0)); | |
167 | __flush_tlb_all(); | |
168 | ||
169 | paravirt_release_pte(__pa(pte) >> PAGE_SHIFT); | |
170 | pte = newpte; | |
171 | } | |
172 | BUG_ON(vaddr < fix_to_virt(FIX_KMAP_BEGIN - 1) | |
173 | && vaddr > fix_to_virt(FIX_KMAP_END) | |
174 | && lastpte && lastpte + PTRS_PER_PTE != pte); | |
175 | #endif | |
176 | return pte; | |
177 | } | |
178 | ||
1da177e4 | 179 | /* |
8550eb99 | 180 | * This function initializes a certain range of kernel virtual memory |
1da177e4 LT |
181 | * with new bootmem page tables, everywhere page tables are missing in |
182 | * the given range. | |
8550eb99 IM |
183 | * |
184 | * NOTE: The pagetables are allocated contiguous on the physical space | |
185 | * so we can cache the place of the first one and move around without | |
1da177e4 LT |
186 | * checking the pgd every time. |
187 | */ | |
8550eb99 IM |
188 | static void __init |
189 | page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base) | |
1da177e4 | 190 | { |
1da177e4 LT |
191 | int pgd_idx, pmd_idx; |
192 | unsigned long vaddr; | |
8550eb99 IM |
193 | pgd_t *pgd; |
194 | pmd_t *pmd; | |
a3c6018e | 195 | pte_t *pte = NULL; |
1da177e4 LT |
196 | |
197 | vaddr = start; | |
198 | pgd_idx = pgd_index(vaddr); | |
199 | pmd_idx = pmd_index(vaddr); | |
200 | pgd = pgd_base + pgd_idx; | |
201 | ||
202 | for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) { | |
b239fb25 JF |
203 | pmd = one_md_table_init(pgd); |
204 | pmd = pmd + pmd_index(vaddr); | |
8550eb99 IM |
205 | for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); |
206 | pmd++, pmd_idx++) { | |
a3c6018e JB |
207 | pte = page_table_kmap_check(one_page_table_init(pmd), |
208 | pmd, vaddr, pte); | |
1da177e4 LT |
209 | |
210 | vaddr += PMD_SIZE; | |
211 | } | |
212 | pmd_idx = 0; | |
213 | } | |
214 | } | |
215 | ||
216 | static inline int is_kernel_text(unsigned long addr) | |
217 | { | |
218 | if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end) | |
219 | return 1; | |
220 | return 0; | |
221 | } | |
222 | ||
223 | /* | |
8550eb99 IM |
224 | * This maps the physical memory to kernel virtual address space, a total |
225 | * of max_low_pfn pages, by creating page tables starting from address | |
226 | * PAGE_OFFSET: | |
1da177e4 | 227 | */ |
4e29684c | 228 | static void __init kernel_physical_mapping_init(pgd_t *pgd_base, |
a04ad82d YL |
229 | unsigned long start_pfn, |
230 | unsigned long end_pfn, | |
231 | int use_pse) | |
1da177e4 | 232 | { |
8550eb99 | 233 | int pgd_idx, pmd_idx, pte_ofs; |
1da177e4 LT |
234 | unsigned long pfn; |
235 | pgd_t *pgd; | |
236 | pmd_t *pmd; | |
237 | pte_t *pte; | |
a2699e47 SS |
238 | unsigned pages_2m, pages_4k; |
239 | int mapping_iter; | |
240 | ||
241 | /* | |
242 | * First iteration will setup identity mapping using large/small pages | |
243 | * based on use_pse, with other attributes same as set by | |
244 | * the early code in head_32.S | |
245 | * | |
246 | * Second iteration will setup the appropriate attributes (NX, GLOBAL..) | |
247 | * as desired for the kernel identity mapping. | |
248 | * | |
249 | * This two pass mechanism conforms to the TLB app note which says: | |
250 | * | |
251 | * "Software should not write to a paging-structure entry in a way | |
252 | * that would change, for any linear address, both the page size | |
253 | * and either the page frame or attributes." | |
254 | */ | |
255 | mapping_iter = 1; | |
1da177e4 | 256 | |
a04ad82d YL |
257 | if (!cpu_has_pse) |
258 | use_pse = 0; | |
1da177e4 | 259 | |
a2699e47 SS |
260 | repeat: |
261 | pages_2m = pages_4k = 0; | |
a04ad82d YL |
262 | pfn = start_pfn; |
263 | pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET); | |
264 | pgd = pgd_base + pgd_idx; | |
1da177e4 LT |
265 | for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) { |
266 | pmd = one_md_table_init(pgd); | |
8550eb99 | 267 | |
a04ad82d YL |
268 | if (pfn >= end_pfn) |
269 | continue; | |
270 | #ifdef CONFIG_X86_PAE | |
271 | pmd_idx = pmd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET); | |
272 | pmd += pmd_idx; | |
273 | #else | |
274 | pmd_idx = 0; | |
275 | #endif | |
276 | for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn; | |
f3f20de8 | 277 | pmd++, pmd_idx++) { |
8550eb99 | 278 | unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET; |
1da177e4 | 279 | |
8550eb99 IM |
280 | /* |
281 | * Map with big pages if possible, otherwise | |
282 | * create normal page tables: | |
283 | */ | |
a04ad82d | 284 | if (use_pse) { |
8550eb99 | 285 | unsigned int addr2; |
f3f20de8 | 286 | pgprot_t prot = PAGE_KERNEL_LARGE; |
a2699e47 SS |
287 | /* |
288 | * first pass will use the same initial | |
289 | * identity mapping attribute + _PAGE_PSE. | |
290 | */ | |
291 | pgprot_t init_prot = | |
292 | __pgprot(PTE_IDENT_ATTR | | |
293 | _PAGE_PSE); | |
f3f20de8 | 294 | |
8550eb99 | 295 | addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE + |
f3f20de8 JF |
296 | PAGE_OFFSET + PAGE_SIZE-1; |
297 | ||
8550eb99 IM |
298 | if (is_kernel_text(addr) || |
299 | is_kernel_text(addr2)) | |
f3f20de8 JF |
300 | prot = PAGE_KERNEL_LARGE_EXEC; |
301 | ||
ce0c0e50 | 302 | pages_2m++; |
a2699e47 SS |
303 | if (mapping_iter == 1) |
304 | set_pmd(pmd, pfn_pmd(pfn, init_prot)); | |
305 | else | |
306 | set_pmd(pmd, pfn_pmd(pfn, prot)); | |
b239fb25 | 307 | |
1da177e4 | 308 | pfn += PTRS_PER_PTE; |
8550eb99 IM |
309 | continue; |
310 | } | |
311 | pte = one_page_table_init(pmd); | |
1da177e4 | 312 | |
a04ad82d YL |
313 | pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET); |
314 | pte += pte_ofs; | |
315 | for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn; | |
8550eb99 IM |
316 | pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) { |
317 | pgprot_t prot = PAGE_KERNEL; | |
a2699e47 SS |
318 | /* |
319 | * first pass will use the same initial | |
320 | * identity mapping attribute. | |
321 | */ | |
322 | pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR); | |
f3f20de8 | 323 | |
8550eb99 IM |
324 | if (is_kernel_text(addr)) |
325 | prot = PAGE_KERNEL_EXEC; | |
f3f20de8 | 326 | |
ce0c0e50 | 327 | pages_4k++; |
a2699e47 SS |
328 | if (mapping_iter == 1) |
329 | set_pte(pte, pfn_pte(pfn, init_prot)); | |
330 | else | |
331 | set_pte(pte, pfn_pte(pfn, prot)); | |
1da177e4 LT |
332 | } |
333 | } | |
334 | } | |
a2699e47 SS |
335 | if (mapping_iter == 1) { |
336 | /* | |
337 | * update direct mapping page count only in the first | |
338 | * iteration. | |
339 | */ | |
340 | update_page_count(PG_LEVEL_2M, pages_2m); | |
341 | update_page_count(PG_LEVEL_4K, pages_4k); | |
342 | ||
343 | /* | |
344 | * local global flush tlb, which will flush the previous | |
345 | * mappings present in both small and large page TLB's. | |
346 | */ | |
347 | __flush_tlb_all(); | |
348 | ||
349 | /* | |
350 | * Second iteration will set the actual desired PTE attributes. | |
351 | */ | |
352 | mapping_iter = 2; | |
353 | goto repeat; | |
354 | } | |
1da177e4 LT |
355 | } |
356 | ||
1da177e4 LT |
357 | pte_t *kmap_pte; |
358 | pgprot_t kmap_prot; | |
359 | ||
8550eb99 IM |
360 | static inline pte_t *kmap_get_fixmap_pte(unsigned long vaddr) |
361 | { | |
362 | return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), | |
363 | vaddr), vaddr), vaddr); | |
364 | } | |
1da177e4 LT |
365 | |
366 | static void __init kmap_init(void) | |
367 | { | |
368 | unsigned long kmap_vstart; | |
369 | ||
8550eb99 IM |
370 | /* |
371 | * Cache the first kmap pte: | |
372 | */ | |
1da177e4 LT |
373 | kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN); |
374 | kmap_pte = kmap_get_fixmap_pte(kmap_vstart); | |
375 | ||
376 | kmap_prot = PAGE_KERNEL; | |
377 | } | |
378 | ||
fd940934 | 379 | #ifdef CONFIG_HIGHMEM |
1da177e4 LT |
380 | static void __init permanent_kmaps_init(pgd_t *pgd_base) |
381 | { | |
8550eb99 | 382 | unsigned long vaddr; |
1da177e4 LT |
383 | pgd_t *pgd; |
384 | pud_t *pud; | |
385 | pmd_t *pmd; | |
386 | pte_t *pte; | |
1da177e4 LT |
387 | |
388 | vaddr = PKMAP_BASE; | |
389 | page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base); | |
390 | ||
391 | pgd = swapper_pg_dir + pgd_index(vaddr); | |
392 | pud = pud_offset(pgd, vaddr); | |
393 | pmd = pmd_offset(pud, vaddr); | |
394 | pte = pte_offset_kernel(pmd, vaddr); | |
8550eb99 | 395 | pkmap_page_table = pte; |
1da177e4 LT |
396 | } |
397 | ||
cc9f7a0c | 398 | static void __init add_one_highpage_init(struct page *page, int pfn) |
1da177e4 | 399 | { |
cc9f7a0c YL |
400 | ClearPageReserved(page); |
401 | init_page_count(page); | |
402 | __free_page(page); | |
403 | totalhigh_pages++; | |
1da177e4 LT |
404 | } |
405 | ||
b5bc6c0e YL |
406 | struct add_highpages_data { |
407 | unsigned long start_pfn; | |
408 | unsigned long end_pfn; | |
b5bc6c0e YL |
409 | }; |
410 | ||
d52d53b8 | 411 | static int __init add_highpages_work_fn(unsigned long start_pfn, |
b5bc6c0e | 412 | unsigned long end_pfn, void *datax) |
1da177e4 | 413 | { |
b5bc6c0e YL |
414 | int node_pfn; |
415 | struct page *page; | |
416 | unsigned long final_start_pfn, final_end_pfn; | |
417 | struct add_highpages_data *data; | |
8550eb99 | 418 | |
b5bc6c0e | 419 | data = (struct add_highpages_data *)datax; |
b5bc6c0e YL |
420 | |
421 | final_start_pfn = max(start_pfn, data->start_pfn); | |
422 | final_end_pfn = min(end_pfn, data->end_pfn); | |
423 | if (final_start_pfn >= final_end_pfn) | |
d52d53b8 | 424 | return 0; |
b5bc6c0e YL |
425 | |
426 | for (node_pfn = final_start_pfn; node_pfn < final_end_pfn; | |
427 | node_pfn++) { | |
428 | if (!pfn_valid(node_pfn)) | |
429 | continue; | |
430 | page = pfn_to_page(node_pfn); | |
cc9f7a0c | 431 | add_one_highpage_init(page, node_pfn); |
23be8c7d | 432 | } |
b5bc6c0e | 433 | |
d52d53b8 YL |
434 | return 0; |
435 | ||
b5bc6c0e YL |
436 | } |
437 | ||
438 | void __init add_highpages_with_active_regions(int nid, unsigned long start_pfn, | |
cc9f7a0c | 439 | unsigned long end_pfn) |
b5bc6c0e YL |
440 | { |
441 | struct add_highpages_data data; | |
442 | ||
443 | data.start_pfn = start_pfn; | |
444 | data.end_pfn = end_pfn; | |
b5bc6c0e YL |
445 | |
446 | work_with_active_regions(nid, add_highpages_work_fn, &data); | |
447 | } | |
448 | ||
1da177e4 | 449 | #else |
e8e32326 IB |
450 | static inline void permanent_kmaps_init(pgd_t *pgd_base) |
451 | { | |
452 | } | |
1da177e4 LT |
453 | #endif /* CONFIG_HIGHMEM */ |
454 | ||
b239fb25 | 455 | void __init native_pagetable_setup_start(pgd_t *base) |
1da177e4 | 456 | { |
551889a6 IC |
457 | unsigned long pfn, va; |
458 | pgd_t *pgd; | |
459 | pud_t *pud; | |
460 | pmd_t *pmd; | |
461 | pte_t *pte; | |
b239fb25 JF |
462 | |
463 | /* | |
551889a6 IC |
464 | * Remove any mappings which extend past the end of physical |
465 | * memory from the boot time page table: | |
b239fb25 | 466 | */ |
551889a6 IC |
467 | for (pfn = max_low_pfn + 1; pfn < 1<<(32-PAGE_SHIFT); pfn++) { |
468 | va = PAGE_OFFSET + (pfn<<PAGE_SHIFT); | |
469 | pgd = base + pgd_index(va); | |
470 | if (!pgd_present(*pgd)) | |
471 | break; | |
472 | ||
473 | pud = pud_offset(pgd, va); | |
474 | pmd = pmd_offset(pud, va); | |
475 | if (!pmd_present(*pmd)) | |
476 | break; | |
477 | ||
478 | pte = pte_offset_kernel(pmd, va); | |
479 | if (!pte_present(*pte)) | |
480 | break; | |
481 | ||
482 | pte_clear(NULL, va, pte); | |
483 | } | |
6944a9c8 | 484 | paravirt_alloc_pmd(&init_mm, __pa(base) >> PAGE_SHIFT); |
b239fb25 JF |
485 | } |
486 | ||
487 | void __init native_pagetable_setup_done(pgd_t *base) | |
488 | { | |
b239fb25 JF |
489 | } |
490 | ||
491 | /* | |
492 | * Build a proper pagetable for the kernel mappings. Up until this | |
493 | * point, we've been running on some set of pagetables constructed by | |
494 | * the boot process. | |
495 | * | |
496 | * If we're booting on native hardware, this will be a pagetable | |
551889a6 IC |
497 | * constructed in arch/x86/kernel/head_32.S. The root of the |
498 | * pagetable will be swapper_pg_dir. | |
b239fb25 JF |
499 | * |
500 | * If we're booting paravirtualized under a hypervisor, then there are | |
501 | * more options: we may already be running PAE, and the pagetable may | |
502 | * or may not be based in swapper_pg_dir. In any case, | |
503 | * paravirt_pagetable_setup_start() will set up swapper_pg_dir | |
504 | * appropriately for the rest of the initialization to work. | |
505 | * | |
506 | * In general, pagetable_init() assumes that the pagetable may already | |
507 | * be partially populated, and so it avoids stomping on any existing | |
508 | * mappings. | |
509 | */ | |
e7b37895 | 510 | static void __init early_ioremap_page_table_range_init(pgd_t *pgd_base) |
b239fb25 | 511 | { |
8550eb99 | 512 | unsigned long vaddr, end; |
b239fb25 | 513 | |
1da177e4 LT |
514 | /* |
515 | * Fixed mappings, only the page table structure has to be | |
516 | * created - mappings will be set by set_fixmap(): | |
517 | */ | |
518 | vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; | |
b239fb25 JF |
519 | end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK; |
520 | page_table_range_init(vaddr, end, pgd_base); | |
beacfaac | 521 | early_ioremap_reset(); |
e7b37895 YL |
522 | } |
523 | ||
524 | static void __init pagetable_init(void) | |
525 | { | |
526 | pgd_t *pgd_base = swapper_pg_dir; | |
527 | ||
1da177e4 | 528 | permanent_kmaps_init(pgd_base); |
1da177e4 LT |
529 | } |
530 | ||
a6eb84bc | 531 | #ifdef CONFIG_ACPI_SLEEP |
1da177e4 | 532 | /* |
a6eb84bc | 533 | * ACPI suspend needs this for resume, because things like the intel-agp |
1da177e4 LT |
534 | * driver might have split up a kernel 4MB mapping. |
535 | */ | |
a6eb84bc | 536 | char swsusp_pg_dir[PAGE_SIZE] |
8550eb99 | 537 | __attribute__ ((aligned(PAGE_SIZE))); |
1da177e4 LT |
538 | |
539 | static inline void save_pg_dir(void) | |
540 | { | |
541 | memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE); | |
542 | } | |
a6eb84bc | 543 | #else /* !CONFIG_ACPI_SLEEP */ |
1da177e4 LT |
544 | static inline void save_pg_dir(void) |
545 | { | |
546 | } | |
a6eb84bc | 547 | #endif /* !CONFIG_ACPI_SLEEP */ |
1da177e4 | 548 | |
8550eb99 | 549 | void zap_low_mappings(void) |
1da177e4 LT |
550 | { |
551 | int i; | |
552 | ||
1da177e4 LT |
553 | /* |
554 | * Zap initial low-memory mappings. | |
555 | * | |
556 | * Note that "pgd_clear()" doesn't do it for | |
557 | * us, because pgd_clear() is a no-op on i386. | |
558 | */ | |
68db065c | 559 | for (i = 0; i < KERNEL_PGD_BOUNDARY; i++) { |
1da177e4 LT |
560 | #ifdef CONFIG_X86_PAE |
561 | set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page))); | |
562 | #else | |
563 | set_pgd(swapper_pg_dir+i, __pgd(0)); | |
564 | #endif | |
8550eb99 | 565 | } |
1da177e4 LT |
566 | flush_tlb_all(); |
567 | } | |
568 | ||
8550eb99 | 569 | int nx_enabled; |
d5321abe | 570 | |
be43d728 | 571 | pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP); |
6fdc05d4 JF |
572 | EXPORT_SYMBOL_GPL(__supported_pte_mask); |
573 | ||
d5321abe JB |
574 | #ifdef CONFIG_X86_PAE |
575 | ||
8550eb99 | 576 | static int disable_nx __initdata; |
1da177e4 LT |
577 | |
578 | /* | |
579 | * noexec = on|off | |
580 | * | |
581 | * Control non executable mappings. | |
582 | * | |
583 | * on Enable | |
584 | * off Disable | |
585 | */ | |
1a3f239d | 586 | static int __init noexec_setup(char *str) |
1da177e4 | 587 | { |
1a3f239d RR |
588 | if (!str || !strcmp(str, "on")) { |
589 | if (cpu_has_nx) { | |
590 | __supported_pte_mask |= _PAGE_NX; | |
591 | disable_nx = 0; | |
592 | } | |
8550eb99 IM |
593 | } else { |
594 | if (!strcmp(str, "off")) { | |
595 | disable_nx = 1; | |
596 | __supported_pte_mask &= ~_PAGE_NX; | |
597 | } else { | |
598 | return -EINVAL; | |
599 | } | |
600 | } | |
1a3f239d RR |
601 | |
602 | return 0; | |
1da177e4 | 603 | } |
1a3f239d | 604 | early_param("noexec", noexec_setup); |
1da177e4 | 605 | |
1da177e4 LT |
606 | static void __init set_nx(void) |
607 | { | |
608 | unsigned int v[4], l, h; | |
609 | ||
610 | if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) { | |
611 | cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]); | |
8550eb99 | 612 | |
1da177e4 LT |
613 | if ((v[3] & (1 << 20)) && !disable_nx) { |
614 | rdmsr(MSR_EFER, l, h); | |
615 | l |= EFER_NX; | |
616 | wrmsr(MSR_EFER, l, h); | |
617 | nx_enabled = 1; | |
618 | __supported_pte_mask |= _PAGE_NX; | |
619 | } | |
620 | } | |
621 | } | |
1da177e4 LT |
622 | #endif |
623 | ||
90d967e0 YL |
624 | /* user-defined highmem size */ |
625 | static unsigned int highmem_pages = -1; | |
626 | ||
627 | /* | |
628 | * highmem=size forces highmem to be exactly 'size' bytes. | |
629 | * This works even on boxes that have no highmem otherwise. | |
630 | * This also works to reduce highmem size on bigger boxes. | |
631 | */ | |
632 | static int __init parse_highmem(char *arg) | |
633 | { | |
634 | if (!arg) | |
635 | return -EINVAL; | |
636 | ||
637 | highmem_pages = memparse(arg, &arg) >> PAGE_SHIFT; | |
638 | return 0; | |
639 | } | |
640 | early_param("highmem", parse_highmem); | |
641 | ||
4769843b IM |
642 | #define MSG_HIGHMEM_TOO_BIG \ |
643 | "highmem size (%luMB) is bigger than pages available (%luMB)!\n" | |
644 | ||
645 | #define MSG_LOWMEM_TOO_SMALL \ | |
646 | "highmem size (%luMB) results in <64MB lowmem, ignoring it!\n" | |
90d967e0 | 647 | /* |
4769843b IM |
648 | * All of RAM fits into lowmem - but if user wants highmem |
649 | * artificially via the highmem=x boot parameter then create | |
650 | * it: | |
90d967e0 | 651 | */ |
4769843b | 652 | void __init lowmem_pfn_init(void) |
90d967e0 | 653 | { |
346cafec | 654 | /* max_low_pfn is 0, we already have early_res support */ |
90d967e0 | 655 | max_low_pfn = max_pfn; |
d88316c2 | 656 | |
4769843b IM |
657 | if (highmem_pages == -1) |
658 | highmem_pages = 0; | |
659 | #ifdef CONFIG_HIGHMEM | |
660 | if (highmem_pages >= max_pfn) { | |
661 | printk(KERN_ERR MSG_HIGHMEM_TOO_BIG, | |
662 | pages_to_mb(highmem_pages), pages_to_mb(max_pfn)); | |
663 | highmem_pages = 0; | |
664 | } | |
665 | if (highmem_pages) { | |
666 | if (max_low_pfn - highmem_pages < 64*1024*1024/PAGE_SIZE) { | |
667 | printk(KERN_ERR MSG_LOWMEM_TOO_SMALL, | |
90d967e0 YL |
668 | pages_to_mb(highmem_pages)); |
669 | highmem_pages = 0; | |
670 | } | |
4769843b IM |
671 | max_low_pfn -= highmem_pages; |
672 | } | |
673 | #else | |
674 | if (highmem_pages) | |
675 | printk(KERN_ERR "ignoring highmem size on non-highmem kernel!\n"); | |
676 | #endif | |
677 | } | |
678 | ||
679 | #define MSG_HIGHMEM_TOO_SMALL \ | |
680 | "only %luMB highmem pages available, ignoring highmem size of %luMB!\n" | |
681 | ||
682 | #define MSG_HIGHMEM_TRIMMED \ | |
683 | "Warning: only 4GB will be used. Use a HIGHMEM64G enabled kernel!\n" | |
684 | /* | |
685 | * We have more RAM than fits into lowmem - we try to put it into | |
686 | * highmem, also taking the highmem=x boot parameter into account: | |
687 | */ | |
688 | void __init highmem_pfn_init(void) | |
689 | { | |
d88316c2 IM |
690 | max_low_pfn = MAXMEM_PFN; |
691 | ||
4769843b IM |
692 | if (highmem_pages == -1) |
693 | highmem_pages = max_pfn - MAXMEM_PFN; | |
694 | ||
695 | if (highmem_pages + MAXMEM_PFN < max_pfn) | |
696 | max_pfn = MAXMEM_PFN + highmem_pages; | |
697 | ||
698 | if (highmem_pages + MAXMEM_PFN > max_pfn) { | |
699 | printk(KERN_WARNING MSG_HIGHMEM_TOO_SMALL, | |
700 | pages_to_mb(max_pfn - MAXMEM_PFN), | |
701 | pages_to_mb(highmem_pages)); | |
702 | highmem_pages = 0; | |
703 | } | |
90d967e0 | 704 | #ifndef CONFIG_HIGHMEM |
4769843b IM |
705 | /* Maximum memory usable is what is directly addressable */ |
706 | printk(KERN_WARNING "Warning only %ldMB will be used.\n", MAXMEM>>20); | |
707 | if (max_pfn > MAX_NONPAE_PFN) | |
708 | printk(KERN_WARNING "Use a HIGHMEM64G enabled kernel.\n"); | |
709 | else | |
710 | printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n"); | |
711 | max_pfn = MAXMEM_PFN; | |
90d967e0 YL |
712 | #else /* !CONFIG_HIGHMEM */ |
713 | #ifndef CONFIG_HIGHMEM64G | |
4769843b IM |
714 | if (max_pfn > MAX_NONPAE_PFN) { |
715 | max_pfn = MAX_NONPAE_PFN; | |
716 | printk(KERN_WARNING MSG_HIGHMEM_TRIMMED); | |
717 | } | |
90d967e0 YL |
718 | #endif /* !CONFIG_HIGHMEM64G */ |
719 | #endif /* !CONFIG_HIGHMEM */ | |
4769843b IM |
720 | } |
721 | ||
722 | /* | |
723 | * Determine low and high memory ranges: | |
724 | */ | |
725 | void __init find_low_pfn_range(void) | |
726 | { | |
727 | /* it could update max_pfn */ | |
728 | ||
d88316c2 | 729 | if (max_pfn <= MAXMEM_PFN) |
4769843b | 730 | lowmem_pfn_init(); |
d88316c2 IM |
731 | else |
732 | highmem_pfn_init(); | |
90d967e0 YL |
733 | } |
734 | ||
b2ac82a0 | 735 | #ifndef CONFIG_NEED_MULTIPLE_NODES |
2ec65f8b | 736 | void __init initmem_init(unsigned long start_pfn, |
b2ac82a0 YL |
737 | unsigned long end_pfn) |
738 | { | |
b2ac82a0 YL |
739 | #ifdef CONFIG_HIGHMEM |
740 | highstart_pfn = highend_pfn = max_pfn; | |
741 | if (max_pfn > max_low_pfn) | |
742 | highstart_pfn = max_low_pfn; | |
743 | memory_present(0, 0, highend_pfn); | |
cb95a13a | 744 | e820_register_active_regions(0, 0, highend_pfn); |
b2ac82a0 YL |
745 | printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", |
746 | pages_to_mb(highend_pfn - highstart_pfn)); | |
747 | num_physpages = highend_pfn; | |
748 | high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1; | |
749 | #else | |
750 | memory_present(0, 0, max_low_pfn); | |
cb95a13a | 751 | e820_register_active_regions(0, 0, max_low_pfn); |
b2ac82a0 YL |
752 | num_physpages = max_low_pfn; |
753 | high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1; | |
754 | #endif | |
755 | #ifdef CONFIG_FLATMEM | |
756 | max_mapnr = num_physpages; | |
757 | #endif | |
758 | printk(KERN_NOTICE "%ldMB LOWMEM available.\n", | |
759 | pages_to_mb(max_low_pfn)); | |
760 | ||
761 | setup_bootmem_allocator(); | |
b2ac82a0 | 762 | } |
cb95a13a | 763 | #endif /* !CONFIG_NEED_MULTIPLE_NODES */ |
b2ac82a0 | 764 | |
cb95a13a | 765 | static void __init zone_sizes_init(void) |
b2ac82a0 YL |
766 | { |
767 | unsigned long max_zone_pfns[MAX_NR_ZONES]; | |
768 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); | |
769 | max_zone_pfns[ZONE_DMA] = | |
770 | virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT; | |
771 | max_zone_pfns[ZONE_NORMAL] = max_low_pfn; | |
b2ac82a0 YL |
772 | #ifdef CONFIG_HIGHMEM |
773 | max_zone_pfns[ZONE_HIGHMEM] = highend_pfn; | |
b2ac82a0 YL |
774 | #endif |
775 | ||
776 | free_area_init_nodes(max_zone_pfns); | |
777 | } | |
b2ac82a0 | 778 | |
a71edd1f YL |
779 | static unsigned long __init setup_node_bootmem(int nodeid, |
780 | unsigned long start_pfn, | |
781 | unsigned long end_pfn, | |
782 | unsigned long bootmap) | |
783 | { | |
784 | unsigned long bootmap_size; | |
785 | ||
786 | if (start_pfn > max_low_pfn) | |
787 | return bootmap; | |
788 | if (end_pfn > max_low_pfn) | |
789 | end_pfn = max_low_pfn; | |
790 | ||
791 | /* don't touch min_low_pfn */ | |
792 | bootmap_size = init_bootmem_node(NODE_DATA(nodeid), | |
793 | bootmap >> PAGE_SHIFT, | |
794 | start_pfn, end_pfn); | |
795 | printk(KERN_INFO " node %d low ram: %08lx - %08lx\n", | |
796 | nodeid, start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT); | |
797 | printk(KERN_INFO " node %d bootmap %08lx - %08lx\n", | |
798 | nodeid, bootmap, bootmap + bootmap_size); | |
799 | free_bootmem_with_active_regions(nodeid, end_pfn); | |
800 | early_res_to_bootmem(start_pfn<<PAGE_SHIFT, end_pfn<<PAGE_SHIFT); | |
801 | ||
802 | return bootmap + bootmap_size; | |
803 | } | |
a71edd1f | 804 | |
b2ac82a0 YL |
805 | void __init setup_bootmem_allocator(void) |
806 | { | |
a71edd1f | 807 | int nodeid; |
b2ac82a0 YL |
808 | unsigned long bootmap_size, bootmap; |
809 | /* | |
810 | * Initialize the boot-time allocator (with low memory only): | |
811 | */ | |
812 | bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT; | |
fc5efe39 | 813 | bootmap = find_e820_area(0, max_pfn_mapped<<PAGE_SHIFT, bootmap_size, |
b2ac82a0 YL |
814 | PAGE_SIZE); |
815 | if (bootmap == -1L) | |
816 | panic("Cannot find bootmem map of size %ld\n", bootmap_size); | |
817 | reserve_early(bootmap, bootmap + bootmap_size, "BOOTMAP"); | |
225c37d7 | 818 | |
b2ac82a0 YL |
819 | printk(KERN_INFO " mapped low ram: 0 - %08lx\n", |
820 | max_pfn_mapped<<PAGE_SHIFT); | |
fc5efe39 | 821 | printk(KERN_INFO " low ram: 0 - %08lx\n", max_low_pfn<<PAGE_SHIFT); |
a71edd1f YL |
822 | |
823 | #ifdef CONFIG_NEED_MULTIPLE_NODES | |
824 | for_each_online_node(nodeid) | |
825 | bootmap = setup_node_bootmem(nodeid, node_start_pfn[nodeid], | |
826 | node_end_pfn[nodeid], bootmap); | |
827 | #else | |
fc5efe39 | 828 | bootmap = setup_node_bootmem(0, 0, max_low_pfn, bootmap); |
a71edd1f | 829 | #endif |
b2ac82a0 | 830 | |
4e29684c | 831 | after_init_bootmem = 1; |
b2ac82a0 YL |
832 | } |
833 | ||
0b8fdcbc | 834 | static void __init find_early_table_space(unsigned long end, int use_pse) |
4e29684c | 835 | { |
7482b0e9 | 836 | unsigned long puds, pmds, ptes, tables, start; |
4e29684c YL |
837 | |
838 | puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; | |
fd578f9c | 839 | tables = roundup(puds * sizeof(pud_t), PAGE_SIZE); |
4e29684c YL |
840 | |
841 | pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT; | |
fd578f9c | 842 | tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE); |
4e29684c | 843 | |
0b8fdcbc | 844 | if (use_pse) { |
7482b0e9 | 845 | unsigned long extra; |
a04ad82d YL |
846 | |
847 | extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT); | |
848 | extra += PMD_SIZE; | |
7482b0e9 YL |
849 | ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT; |
850 | } else | |
851 | ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT; | |
852 | ||
fd578f9c | 853 | tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE); |
8207c257 | 854 | |
a04ad82d | 855 | /* for fixmap */ |
fd578f9c | 856 | tables += roundup(__end_of_fixed_addresses * sizeof(pte_t), PAGE_SIZE); |
a04ad82d | 857 | |
4e29684c YL |
858 | /* |
859 | * RED-PEN putting page tables only on node 0 could | |
860 | * cause a hotspot and fill up ZONE_DMA. The page tables | |
861 | * need roughly 0.5KB per GB. | |
862 | */ | |
863 | start = 0x7000; | |
864 | table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT, | |
865 | tables, PAGE_SIZE); | |
866 | if (table_start == -1UL) | |
867 | panic("Cannot find space for the kernel page tables"); | |
868 | ||
869 | table_start >>= PAGE_SHIFT; | |
870 | table_end = table_start; | |
871 | table_top = table_start + (tables>>PAGE_SHIFT); | |
872 | ||
873 | printk(KERN_DEBUG "kernel direct mapping tables up to %lx @ %lx-%lx\n", | |
874 | end, table_start << PAGE_SHIFT, | |
875 | (table_start << PAGE_SHIFT) + tables); | |
876 | } | |
877 | ||
b68adb16 YL |
878 | struct map_range { |
879 | unsigned long start; | |
880 | unsigned long end; | |
881 | unsigned page_size_mask; | |
882 | }; | |
883 | ||
884 | #define NR_RANGE_MR 3 | |
885 | ||
886 | static int save_mr(struct map_range *mr, int nr_range, | |
887 | unsigned long start_pfn, unsigned long end_pfn, | |
888 | unsigned long page_size_mask) | |
889 | { | |
890 | if (start_pfn < end_pfn) { | |
891 | if (nr_range >= NR_RANGE_MR) | |
892 | panic("run out of range for init_memory_mapping\n"); | |
893 | mr[nr_range].start = start_pfn<<PAGE_SHIFT; | |
894 | mr[nr_range].end = end_pfn<<PAGE_SHIFT; | |
895 | mr[nr_range].page_size_mask = page_size_mask; | |
896 | nr_range++; | |
897 | } | |
898 | ||
899 | return nr_range; | |
900 | } | |
901 | ||
4e29684c YL |
902 | unsigned long __init_refok init_memory_mapping(unsigned long start, |
903 | unsigned long end) | |
904 | { | |
905 | pgd_t *pgd_base = swapper_pg_dir; | |
b68adb16 | 906 | unsigned long page_size_mask = 0; |
a04ad82d | 907 | unsigned long start_pfn, end_pfn; |
b68adb16 YL |
908 | unsigned long pos; |
909 | ||
910 | struct map_range mr[NR_RANGE_MR]; | |
911 | int nr_range, i; | |
912 | int use_pse; | |
913 | ||
914 | printk(KERN_INFO "init_memory_mapping: %08lx-%08lx\n", start, end); | |
915 | ||
0b8fdcbc SS |
916 | #ifdef CONFIG_DEBUG_PAGEALLOC |
917 | /* | |
918 | * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages. | |
919 | * This will simplify cpa(), which otherwise needs to support splitting | |
920 | * large pages into small in interrupt context, etc. | |
921 | */ | |
b68adb16 | 922 | use_pse = 0; |
0b8fdcbc | 923 | #else |
b68adb16 | 924 | use_pse = cpu_has_pse; |
0b8fdcbc | 925 | #endif |
4e29684c | 926 | |
4e29684c YL |
927 | #ifdef CONFIG_X86_PAE |
928 | set_nx(); | |
929 | if (nx_enabled) | |
930 | printk(KERN_INFO "NX (Execute Disable) protection: active\n"); | |
931 | #endif | |
932 | ||
933 | /* Enable PSE if available */ | |
934 | if (cpu_has_pse) | |
935 | set_in_cr4(X86_CR4_PSE); | |
936 | ||
937 | /* Enable PGE if available */ | |
938 | if (cpu_has_pge) { | |
939 | set_in_cr4(X86_CR4_PGE); | |
ef5e94af | 940 | __supported_pte_mask |= _PAGE_GLOBAL; |
4e29684c YL |
941 | } |
942 | ||
b68adb16 YL |
943 | memset(mr, 0, sizeof(mr)); |
944 | nr_range = 0; | |
945 | ||
946 | if (use_pse) | |
947 | page_size_mask |= 1 << PG_LEVEL_2M; | |
948 | ||
a04ad82d YL |
949 | /* |
950 | * Don't use a large page for the first 2/4MB of memory | |
951 | * because there are often fixed size MTRRs in there | |
952 | * and overlapping MTRRs into large pages can cause | |
953 | * slowdowns. | |
954 | */ | |
b68adb16 YL |
955 | /* head could not be big page alignment ? */ |
956 | start_pfn = start >> PAGE_SHIFT; | |
957 | pos = start_pfn << PAGE_SHIFT; | |
958 | if (pos == 0) | |
959 | end_pfn = 1<<(PMD_SHIFT - PAGE_SHIFT); | |
960 | else | |
961 | end_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT) | |
a04ad82d | 962 | << (PMD_SHIFT - PAGE_SHIFT); |
b68adb16 YL |
963 | if (end_pfn > (end>>PAGE_SHIFT)) |
964 | end_pfn = end>>PAGE_SHIFT; | |
965 | if (start_pfn < end_pfn) { | |
966 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); | |
967 | pos = end_pfn << PAGE_SHIFT; | |
a04ad82d | 968 | } |
a04ad82d YL |
969 | |
970 | /* big page range */ | |
b68adb16 | 971 | start_pfn = ((pos + (PMD_SIZE - 1))>>PMD_SHIFT) |
a04ad82d | 972 | << (PMD_SHIFT - PAGE_SHIFT); |
a04ad82d | 973 | end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT); |
b68adb16 YL |
974 | if (start_pfn < end_pfn) { |
975 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, | |
976 | page_size_mask & (1<<PG_LEVEL_2M)); | |
977 | pos = end_pfn << PAGE_SHIFT; | |
978 | } | |
a04ad82d YL |
979 | |
980 | /* tail is not big page alignment ? */ | |
b68adb16 YL |
981 | start_pfn = pos>>PAGE_SHIFT; |
982 | end_pfn = end>>PAGE_SHIFT; | |
983 | if (start_pfn < end_pfn) | |
984 | nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); | |
985 | ||
986 | /* try to merge same page size and continuous */ | |
987 | for (i = 0; nr_range > 1 && i < nr_range - 1; i++) { | |
988 | unsigned long old_start; | |
989 | if (mr[i].end != mr[i+1].start || | |
990 | mr[i].page_size_mask != mr[i+1].page_size_mask) | |
991 | continue; | |
992 | /* move it */ | |
993 | old_start = mr[i].start; | |
994 | memmove(&mr[i], &mr[i+1], | |
995 | (nr_range - 1 - i) * sizeof(struct map_range)); | |
996 | mr[i--].start = old_start; | |
997 | nr_range--; | |
a04ad82d | 998 | } |
4e29684c | 999 | |
b68adb16 YL |
1000 | for (i = 0; i < nr_range; i++) |
1001 | printk(KERN_DEBUG " %08lx - %08lx page %s\n", | |
1002 | mr[i].start, mr[i].end, | |
1003 | (mr[i].page_size_mask & (1<<PG_LEVEL_2M)) ? | |
1004 | "big page" : "4k"); | |
1005 | ||
1006 | /* | |
1007 | * Find space for the kernel direct mapping tables. | |
1008 | */ | |
1009 | if (!after_init_bootmem) | |
1010 | find_early_table_space(end, use_pse); | |
1011 | ||
1012 | for (i = 0; i < nr_range; i++) | |
1013 | kernel_physical_mapping_init(pgd_base, | |
1014 | mr[i].start >> PAGE_SHIFT, | |
1015 | mr[i].end >> PAGE_SHIFT, | |
1016 | mr[i].page_size_mask == (1<<PG_LEVEL_2M)); | |
1017 | ||
e7b37895 YL |
1018 | early_ioremap_page_table_range_init(pgd_base); |
1019 | ||
4e29684c YL |
1020 | load_cr3(swapper_pg_dir); |
1021 | ||
1022 | __flush_tlb_all(); | |
1023 | ||
1024 | if (!after_init_bootmem) | |
1025 | reserve_early(table_start << PAGE_SHIFT, | |
1026 | table_end << PAGE_SHIFT, "PGTABLE"); | |
1027 | ||
caadbdce YL |
1028 | if (!after_init_bootmem) |
1029 | early_memtest(start, end); | |
1030 | ||
4e29684c YL |
1031 | return end >> PAGE_SHIFT; |
1032 | } | |
1033 | ||
e7b37895 | 1034 | |
1da177e4 LT |
1035 | /* |
1036 | * paging_init() sets up the page tables - note that the first 8MB are | |
1037 | * already mapped by head.S. | |
1038 | * | |
1039 | * This routines also unmaps the page at virtual kernel address 0, so | |
1040 | * that we can trap those pesky NULL-reference errors in the kernel. | |
1041 | */ | |
1042 | void __init paging_init(void) | |
1043 | { | |
1da177e4 LT |
1044 | pagetable_init(); |
1045 | ||
1da177e4 LT |
1046 | __flush_tlb_all(); |
1047 | ||
1048 | kmap_init(); | |
11cd0bc1 YL |
1049 | |
1050 | /* | |
1051 | * NOTE: at this point the bootmem allocator is fully available. | |
1052 | */ | |
11cd0bc1 YL |
1053 | sparse_init(); |
1054 | zone_sizes_init(); | |
1da177e4 LT |
1055 | } |
1056 | ||
1057 | /* | |
1058 | * Test if the WP bit works in supervisor mode. It isn't supported on 386's | |
f7f17a67 DV |
1059 | * and also on some strange 486's. All 586+'s are OK. This used to involve |
1060 | * black magic jumps to work around some nasty CPU bugs, but fortunately the | |
1061 | * switch to using exceptions got rid of all that. | |
1da177e4 | 1062 | */ |
1da177e4 LT |
1063 | static void __init test_wp_bit(void) |
1064 | { | |
d7d119d7 IM |
1065 | printk(KERN_INFO |
1066 | "Checking if this processor honours the WP bit even in supervisor mode..."); | |
1da177e4 LT |
1067 | |
1068 | /* Any page-aligned address will do, the test is non-destructive */ | |
1069 | __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY); | |
1070 | boot_cpu_data.wp_works_ok = do_test_wp_bit(); | |
1071 | clear_fixmap(FIX_WP_TEST); | |
1072 | ||
1073 | if (!boot_cpu_data.wp_works_ok) { | |
d7d119d7 | 1074 | printk(KERN_CONT "No.\n"); |
1da177e4 | 1075 | #ifdef CONFIG_X86_WP_WORKS_OK |
d7d119d7 IM |
1076 | panic( |
1077 | "This kernel doesn't support CPU's with broken WP. Recompile it for a 386!"); | |
1da177e4 LT |
1078 | #endif |
1079 | } else { | |
d7d119d7 | 1080 | printk(KERN_CONT "Ok.\n"); |
1da177e4 LT |
1081 | } |
1082 | } | |
1083 | ||
8550eb99 | 1084 | static struct kcore_list kcore_mem, kcore_vmalloc; |
1da177e4 LT |
1085 | |
1086 | void __init mem_init(void) | |
1087 | { | |
1da177e4 | 1088 | int codesize, reservedpages, datasize, initsize; |
cc9f7a0c | 1089 | int tmp; |
1da177e4 | 1090 | |
cfb80c9e JF |
1091 | pci_iommu_alloc(); |
1092 | ||
05b79bdc | 1093 | #ifdef CONFIG_FLATMEM |
8d8f3cbe | 1094 | BUG_ON(!mem_map); |
1da177e4 | 1095 | #endif |
1da177e4 LT |
1096 | /* this will put all low memory onto the freelists */ |
1097 | totalram_pages += free_all_bootmem(); | |
1098 | ||
1099 | reservedpages = 0; | |
1100 | for (tmp = 0; tmp < max_low_pfn; tmp++) | |
1101 | /* | |
8550eb99 | 1102 | * Only count reserved RAM pages: |
1da177e4 LT |
1103 | */ |
1104 | if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp))) | |
1105 | reservedpages++; | |
1106 | ||
cc9f7a0c | 1107 | set_highmem_pages_init(); |
1da177e4 LT |
1108 | |
1109 | codesize = (unsigned long) &_etext - (unsigned long) &_text; | |
1110 | datasize = (unsigned long) &_edata - (unsigned long) &_etext; | |
1111 | initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; | |
1112 | ||
8550eb99 IM |
1113 | kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT); |
1114 | kclist_add(&kcore_vmalloc, (void *)VMALLOC_START, | |
1da177e4 LT |
1115 | VMALLOC_END-VMALLOC_START); |
1116 | ||
8550eb99 IM |
1117 | printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, " |
1118 | "%dk reserved, %dk data, %dk init, %ldk highmem)\n", | |
1da177e4 LT |
1119 | (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), |
1120 | num_physpages << (PAGE_SHIFT-10), | |
1121 | codesize >> 10, | |
1122 | reservedpages << (PAGE_SHIFT-10), | |
1123 | datasize >> 10, | |
1124 | initsize >> 10, | |
1125 | (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10)) | |
1126 | ); | |
1127 | ||
d7d119d7 | 1128 | printk(KERN_INFO "virtual kernel memory layout:\n" |
8550eb99 | 1129 | " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" |
052e7994 | 1130 | #ifdef CONFIG_HIGHMEM |
8550eb99 | 1131 | " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n" |
052e7994 | 1132 | #endif |
8550eb99 IM |
1133 | " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" |
1134 | " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n" | |
1135 | " .init : 0x%08lx - 0x%08lx (%4ld kB)\n" | |
1136 | " .data : 0x%08lx - 0x%08lx (%4ld kB)\n" | |
1137 | " .text : 0x%08lx - 0x%08lx (%4ld kB)\n", | |
1138 | FIXADDR_START, FIXADDR_TOP, | |
1139 | (FIXADDR_TOP - FIXADDR_START) >> 10, | |
052e7994 JF |
1140 | |
1141 | #ifdef CONFIG_HIGHMEM | |
8550eb99 IM |
1142 | PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, |
1143 | (LAST_PKMAP*PAGE_SIZE) >> 10, | |
052e7994 JF |
1144 | #endif |
1145 | ||
8550eb99 IM |
1146 | VMALLOC_START, VMALLOC_END, |
1147 | (VMALLOC_END - VMALLOC_START) >> 20, | |
052e7994 | 1148 | |
8550eb99 IM |
1149 | (unsigned long)__va(0), (unsigned long)high_memory, |
1150 | ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20, | |
052e7994 | 1151 | |
8550eb99 IM |
1152 | (unsigned long)&__init_begin, (unsigned long)&__init_end, |
1153 | ((unsigned long)&__init_end - | |
1154 | (unsigned long)&__init_begin) >> 10, | |
052e7994 | 1155 | |
8550eb99 IM |
1156 | (unsigned long)&_etext, (unsigned long)&_edata, |
1157 | ((unsigned long)&_edata - (unsigned long)&_etext) >> 10, | |
052e7994 | 1158 | |
8550eb99 IM |
1159 | (unsigned long)&_text, (unsigned long)&_etext, |
1160 | ((unsigned long)&_etext - (unsigned long)&_text) >> 10); | |
052e7994 | 1161 | |
beeb4195 JB |
1162 | /* |
1163 | * Check boundaries twice: Some fundamental inconsistencies can | |
1164 | * be detected at build time already. | |
1165 | */ | |
1166 | #define __FIXADDR_TOP (-PAGE_SIZE) | |
1167 | #ifdef CONFIG_HIGHMEM | |
1168 | BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START); | |
1169 | BUILD_BUG_ON(VMALLOC_END > PKMAP_BASE); | |
1170 | #endif | |
1171 | #define high_memory (-128UL << 20) | |
1172 | BUILD_BUG_ON(VMALLOC_START >= VMALLOC_END); | |
1173 | #undef high_memory | |
1174 | #undef __FIXADDR_TOP | |
1175 | ||
052e7994 | 1176 | #ifdef CONFIG_HIGHMEM |
8550eb99 IM |
1177 | BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START); |
1178 | BUG_ON(VMALLOC_END > PKMAP_BASE); | |
052e7994 | 1179 | #endif |
beeb4195 | 1180 | BUG_ON(VMALLOC_START >= VMALLOC_END); |
8550eb99 | 1181 | BUG_ON((unsigned long)high_memory > VMALLOC_START); |
052e7994 | 1182 | |
1da177e4 LT |
1183 | if (boot_cpu_data.wp_works_ok < 0) |
1184 | test_wp_bit(); | |
1185 | ||
61165d7a | 1186 | save_pg_dir(); |
1da177e4 | 1187 | zap_low_mappings(); |
1da177e4 LT |
1188 | } |
1189 | ||
ad8f5797 | 1190 | #ifdef CONFIG_MEMORY_HOTPLUG |
bc02af93 | 1191 | int arch_add_memory(int nid, u64 start, u64 size) |
05039b92 | 1192 | { |
7c7e9425 | 1193 | struct pglist_data *pgdata = NODE_DATA(nid); |
776ed98b | 1194 | struct zone *zone = pgdata->node_zones + ZONE_HIGHMEM; |
05039b92 DH |
1195 | unsigned long start_pfn = start >> PAGE_SHIFT; |
1196 | unsigned long nr_pages = size >> PAGE_SHIFT; | |
1197 | ||
c04fc586 | 1198 | return __add_pages(nid, zone, start_pfn, nr_pages); |
05039b92 | 1199 | } |
9d99aaa3 | 1200 | #endif |
05039b92 | 1201 | |
1da177e4 LT |
1202 | /* |
1203 | * This function cannot be __init, since exceptions don't work in that | |
1204 | * section. Put this after the callers, so that it cannot be inlined. | |
1205 | */ | |
8550eb99 | 1206 | static noinline int do_test_wp_bit(void) |
1da177e4 LT |
1207 | { |
1208 | char tmp_reg; | |
1209 | int flag; | |
1210 | ||
1211 | __asm__ __volatile__( | |
8550eb99 IM |
1212 | " movb %0, %1 \n" |
1213 | "1: movb %1, %0 \n" | |
1214 | " xorl %2, %2 \n" | |
1da177e4 | 1215 | "2: \n" |
f832ff18 | 1216 | _ASM_EXTABLE(1b,2b) |
1da177e4 LT |
1217 | :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)), |
1218 | "=q" (tmp_reg), | |
1219 | "=r" (flag) | |
1220 | :"2" (1) | |
1221 | :"memory"); | |
8550eb99 | 1222 | |
1da177e4 LT |
1223 | return flag; |
1224 | } | |
1225 | ||
63aaf308 | 1226 | #ifdef CONFIG_DEBUG_RODATA |
edeed305 AV |
1227 | const int rodata_test_data = 0xC3; |
1228 | EXPORT_SYMBOL_GPL(rodata_test_data); | |
63aaf308 | 1229 | |
63aaf308 AV |
1230 | void mark_rodata_ro(void) |
1231 | { | |
6fb14755 JB |
1232 | unsigned long start = PFN_ALIGN(_text); |
1233 | unsigned long size = PFN_ALIGN(_etext) - start; | |
63aaf308 | 1234 | |
8f0f996e SR |
1235 | #ifndef CONFIG_DYNAMIC_FTRACE |
1236 | /* Dynamic tracing modifies the kernel text section */ | |
4e4eee0e MD |
1237 | set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); |
1238 | printk(KERN_INFO "Write protecting the kernel text: %luk\n", | |
1239 | size >> 10); | |
0c42f392 AK |
1240 | |
1241 | #ifdef CONFIG_CPA_DEBUG | |
4e4eee0e MD |
1242 | printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n", |
1243 | start, start+size); | |
1244 | set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT); | |
0c42f392 | 1245 | |
4e4eee0e MD |
1246 | printk(KERN_INFO "Testing CPA: write protecting again\n"); |
1247 | set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT); | |
602033ed | 1248 | #endif |
8f0f996e SR |
1249 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
1250 | ||
6fb14755 JB |
1251 | start += size; |
1252 | size = (unsigned long)__end_rodata - start; | |
6d238cc4 | 1253 | set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); |
d7d119d7 IM |
1254 | printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", |
1255 | size >> 10); | |
edeed305 | 1256 | rodata_test(); |
63aaf308 | 1257 | |
0c42f392 | 1258 | #ifdef CONFIG_CPA_DEBUG |
d7d119d7 | 1259 | printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, start + size); |
6d238cc4 | 1260 | set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT); |
0c42f392 | 1261 | |
d7d119d7 | 1262 | printk(KERN_INFO "Testing CPA: write protecting again\n"); |
6d238cc4 | 1263 | set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); |
0c42f392 | 1264 | #endif |
63aaf308 AV |
1265 | } |
1266 | #endif | |
1267 | ||
d2dbf343 YL |
1268 | int __init reserve_bootmem_generic(unsigned long phys, unsigned long len, |
1269 | int flags) | |
1270 | { | |
1271 | return reserve_bootmem(phys, len, flags); | |
1272 | } |